From f1703570692144b2dbb99577a690d4b28e684496 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pablo=20Carranza=20V=C3=A9lez?= Date: Thu, 23 Jun 2022 19:00:54 +0300 Subject: [PATCH 001/112] feat: migrating GNS signal to L2 (early WIP) --- .../libraries/MerklePatriciaProofVerifier.sol | 271 +++++++++++++ contracts/libraries/RLPReader.sol | 362 ++++++++++++++++++ contracts/libraries/StateProofVerifier.sol | 146 +++++++ 3 files changed, 779 insertions(+) create mode 100644 contracts/libraries/MerklePatriciaProofVerifier.sol create mode 100644 contracts/libraries/RLPReader.sol create mode 100644 contracts/libraries/StateProofVerifier.sol diff --git a/contracts/libraries/MerklePatriciaProofVerifier.sol b/contracts/libraries/MerklePatriciaProofVerifier.sol new file mode 100644 index 000000000..7a6e02162 --- /dev/null +++ b/contracts/libraries/MerklePatriciaProofVerifier.sol @@ -0,0 +1,271 @@ +// SPDX-License-Identifier: MIT + +/* + * Copied from: + * https://github.com/lidofinance/curve-merkle-oracle/blob/1033b3e84142317ffd8f366b52e489d5eb49c73f/contracts/MerklePatriciaProofVerifier.sol + * + * MODIFIED from lidofinance's implementation: + * - Changed solidity version to 0.7.6 (pablo@edgeandnode.com) + * - Using local copy of the RLPReader library instead of using the package + */ + +/** + * Copied from https://github.com/lorenzb/proveth/blob/c74b20e/onchain/ProvethVerifier.sol + * with minor performance and code style-related modifications. + */ +pragma solidity 0.7.6; + +import { RLPReader } from "./RLPReader.sol"; + +library MerklePatriciaProofVerifier { + using RLPReader for RLPReader.RLPItem; + using RLPReader for bytes; + + /// @dev Validates a Merkle-Patricia-Trie proof. + /// If the proof proves the inclusion of some key-value pair in the + /// trie, the value is returned. Otherwise, i.e. if the proof proves + /// the exclusion of a key from the trie, an empty byte array is + /// returned. + /// @param rootHash is the Keccak-256 hash of the root node of the MPT. + /// @param path is the key of the node whose inclusion/exclusion we are + /// proving. + /// @param stack is the stack of MPT nodes (starting with the root) that + /// need to be traversed during verification. + /// @return value whose inclusion is proved or an empty byte array for + /// a proof of exclusion + function extractProofValue( + bytes32 rootHash, + bytes memory path, + RLPReader.RLPItem[] memory stack + ) internal pure returns (bytes memory value) { + bytes memory mptKey = _decodeNibbles(path, 0); + uint256 mptKeyOffset = 0; + + bytes32 nodeHashHash; + RLPReader.RLPItem[] memory node; + + RLPReader.RLPItem memory rlpValue; + + if (stack.length == 0) { + // Root hash of empty Merkle-Patricia-Trie + require(rootHash == 0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421); + return new bytes(0); + } + + // Traverse stack of nodes starting at root. + for (uint256 i = 0; i < stack.length; i++) { + // We use the fact that an rlp encoded list consists of some + // encoding of its length plus the concatenation of its + // *rlp-encoded* items. + + // The root node is hashed with Keccak-256 ... + if (i == 0 && rootHash != stack[i].rlpBytesKeccak256()) { + revert(); + } + // ... whereas all other nodes are hashed with the MPT + // hash function. + if (i != 0 && nodeHashHash != _mptHashHash(stack[i])) { + revert(); + } + // We verified that stack[i] has the correct hash, so we + // may safely decode it. + node = stack[i].toList(); + + if (node.length == 2) { + // Extension or Leaf node + + bool isLeaf; + bytes memory nodeKey; + (isLeaf, nodeKey) = _merklePatriciaCompactDecode(node[0].toBytes()); + + uint256 prefixLength = _sharedPrefixLength(mptKeyOffset, mptKey, nodeKey); + mptKeyOffset += prefixLength; + + if (prefixLength < nodeKey.length) { + // Proof claims divergent extension or leaf. (Only + // relevant for proofs of exclusion.) + // An Extension/Leaf node is divergent iff it "skips" over + // the point at which a Branch node should have been had the + // excluded key been included in the trie. + // Example: Imagine a proof of exclusion for path [1, 4], + // where the current node is a Leaf node with + // path [1, 3, 3, 7]. For [1, 4] to be included, there + // should have been a Branch node at [1] with a child + // at 3 and a child at 4. + + // Sanity check + if (i < stack.length - 1) { + // divergent node must come last in proof + revert(); + } + + return new bytes(0); + } + + if (isLeaf) { + // Sanity check + if (i < stack.length - 1) { + // leaf node must come last in proof + revert(); + } + + if (mptKeyOffset < mptKey.length) { + return new bytes(0); + } + + rlpValue = node[1]; + return rlpValue.toBytes(); + } else { + // extension + // Sanity check + if (i == stack.length - 1) { + // shouldn't be at last level + revert(); + } + + if (!node[1].isList()) { + // rlp(child) was at least 32 bytes. node[1] contains + // Keccak256(rlp(child)). + nodeHashHash = node[1].payloadKeccak256(); + } else { + // rlp(child) was less than 32 bytes. node[1] contains + // rlp(child). + nodeHashHash = node[1].rlpBytesKeccak256(); + } + } + } else if (node.length == 17) { + // Branch node + + if (mptKeyOffset != mptKey.length) { + // we haven't consumed the entire path, so we need to look at a child + uint8 nibble = uint8(mptKey[mptKeyOffset]); + mptKeyOffset += 1; + if (nibble >= 16) { + // each element of the path has to be a nibble + revert(); + } + + if (_isEmptyBytesequence(node[nibble])) { + // Sanity + if (i != stack.length - 1) { + // leaf node should be at last level + revert(); + } + + return new bytes(0); + } else if (!node[nibble].isList()) { + nodeHashHash = node[nibble].payloadKeccak256(); + } else { + nodeHashHash = node[nibble].rlpBytesKeccak256(); + } + } else { + // we have consumed the entire mptKey, so we need to look at what's contained in this node. + + // Sanity + if (i != stack.length - 1) { + // should be at last level + revert(); + } + + return node[16].toBytes(); + } + } + } + } + + /// @dev Computes the hash of the Merkle-Patricia-Trie hash of the RLP item. + /// Merkle-Patricia-Tries use a weird "hash function" that outputs + /// *variable-length* hashes: If the item is shorter than 32 bytes, + /// the MPT hash is the item. Otherwise, the MPT hash is the + /// Keccak-256 hash of the item. + /// The easiest way to compare variable-length byte sequences is + /// to compare their Keccak-256 hashes. + /// @param item The RLP item to be hashed. + /// @return Keccak-256(MPT-hash(item)) + function _mptHashHash(RLPReader.RLPItem memory item) private pure returns (bytes32) { + if (item.len < 32) { + return item.rlpBytesKeccak256(); + } else { + return keccak256(abi.encodePacked(item.rlpBytesKeccak256())); + } + } + + function _isEmptyBytesequence(RLPReader.RLPItem memory item) private pure returns (bool) { + if (item.len != 1) { + return false; + } + uint8 b; + uint256 memPtr = item.memPtr; + assembly { + b := byte(0, mload(memPtr)) + } + return b == 0x80; /* empty byte string */ + } + + function _merklePatriciaCompactDecode(bytes memory compact) + private + pure + returns (bool isLeaf, bytes memory nibbles) + { + require(compact.length > 0); + uint256 first_nibble = (uint8(compact[0]) >> 4) & 0xF; + uint256 skipNibbles; + if (first_nibble == 0) { + skipNibbles = 2; + isLeaf = false; + } else if (first_nibble == 1) { + skipNibbles = 1; + isLeaf = false; + } else if (first_nibble == 2) { + skipNibbles = 2; + isLeaf = true; + } else if (first_nibble == 3) { + skipNibbles = 1; + isLeaf = true; + } else { + // Not supposed to happen! + revert(); + } + return (isLeaf, _decodeNibbles(compact, skipNibbles)); + } + + function _decodeNibbles(bytes memory compact, uint256 skipNibbles) + private + pure + returns (bytes memory nibbles) + { + require(compact.length > 0); + + uint256 length = compact.length * 2; + require(skipNibbles <= length); + length -= skipNibbles; + + nibbles = new bytes(length); + uint256 nibblesLength = 0; + + for (uint256 i = skipNibbles; i < skipNibbles + length; i += 1) { + if (i % 2 == 0) { + nibbles[nibblesLength] = bytes1((uint8(compact[i / 2]) >> 4) & 0xF); + } else { + nibbles[nibblesLength] = bytes1((uint8(compact[i / 2]) >> 0) & 0xF); + } + nibblesLength += 1; + } + + assert(nibblesLength == nibbles.length); + } + + function _sharedPrefixLength( + uint256 xsOffset, + bytes memory xs, + bytes memory ys + ) private pure returns (uint256) { + uint256 i; + for (i = 0; i + xsOffset < xs.length && i < ys.length; i++) { + if (xs[i + xsOffset] != ys[i]) { + return i; + } + } + return i; + } +} diff --git a/contracts/libraries/RLPReader.sol b/contracts/libraries/RLPReader.sol new file mode 100644 index 000000000..6b0c2a1f9 --- /dev/null +++ b/contracts/libraries/RLPReader.sol @@ -0,0 +1,362 @@ +// SPDX-License-Identifier: Apache-2.0 + +/* + * Copied from: + * https://github.com/edumar111/Solidity-RLP/blob/2e48c3004b7c70e1a1dfdd04b5b9761c28da9cc4/contracts/RLPReader.sol + * + * (using edumar111's fork as it includes Solidity 0.7+ support, + * which we need, and the PR is still open on hamdiallam's original repo) + */ + +/* + * @author Hamdi Allam hamdi.allam97@gmail.com + * Please reach out with any questions or concerns + */ +pragma solidity >=0.5.0 <0.9.0; + +library RLPReader { + uint8 constant STRING_SHORT_START = 0x80; + uint8 constant STRING_LONG_START = 0xb8; + uint8 constant LIST_SHORT_START = 0xc0; + uint8 constant LIST_LONG_START = 0xf8; + uint8 constant WORD_SIZE = 32; + + struct RLPItem { + uint256 len; + uint256 memPtr; + } + + struct Iterator { + RLPItem item; // Item that's being iterated over. + uint256 nextPtr; // Position of the next item in the list. + } + + /* + * @dev Returns the next element in the iteration. Reverts if it has not next element. + * @param self The iterator. + * @return The next element in the iteration. + */ + function next(Iterator memory self) internal pure returns (RLPItem memory) { + require(hasNext(self)); + + uint256 ptr = self.nextPtr; + uint256 itemLength = _itemLength(ptr); + self.nextPtr = ptr + itemLength; + + return RLPItem(itemLength, ptr); + } + + /* + * @dev Returns true if the iteration has more elements. + * @param self The iterator. + * @return true if the iteration has more elements. + */ + function hasNext(Iterator memory self) internal pure returns (bool) { + RLPItem memory item = self.item; + return self.nextPtr < item.memPtr + item.len; + } + + /* + * @param item RLP encoded bytes + */ + function toRlpItem(bytes memory item) internal pure returns (RLPItem memory) { + uint256 memPtr; + assembly { + memPtr := add(item, 0x20) + } + + return RLPItem(item.length, memPtr); + } + + /* + * @dev Create an iterator. Reverts if item is not a list. + * @param self The RLP item. + * @return An 'Iterator' over the item. + */ + function iterator(RLPItem memory self) internal pure returns (Iterator memory) { + require(isList(self)); + + uint256 ptr = self.memPtr + _payloadOffset(self.memPtr); + return Iterator(self, ptr); + } + + /* + * @param the RLP item. + */ + function rlpLen(RLPItem memory item) internal pure returns (uint256) { + return item.len; + } + + /* + * @param the RLP item. + * @return (memPtr, len) pair: location of the item's payload in memory. + */ + function payloadLocation(RLPItem memory item) internal pure returns (uint256, uint256) { + uint256 offset = _payloadOffset(item.memPtr); + uint256 memPtr = item.memPtr + offset; + uint256 len = item.len - offset; // data length + return (memPtr, len); + } + + /* + * @param the RLP item. + */ + function payloadLen(RLPItem memory item) internal pure returns (uint256) { + (, uint256 len) = payloadLocation(item); + return len; + } + + /* + * @param the RLP item containing the encoded list. + */ + function toList(RLPItem memory item) internal pure returns (RLPItem[] memory) { + require(isList(item)); + + uint256 items = numItems(item); + RLPItem[] memory result = new RLPItem[](items); + + uint256 memPtr = item.memPtr + _payloadOffset(item.memPtr); + uint256 dataLen; + for (uint256 i = 0; i < items; i++) { + dataLen = _itemLength(memPtr); + result[i] = RLPItem(dataLen, memPtr); + memPtr = memPtr + dataLen; + } + + return result; + } + + // @return indicator whether encoded payload is a list. negate this function call for isData. + function isList(RLPItem memory item) internal pure returns (bool) { + if (item.len == 0) return false; + + uint8 byte0; + uint256 memPtr = item.memPtr; + assembly { + byte0 := byte(0, mload(memPtr)) + } + + if (byte0 < LIST_SHORT_START) return false; + return true; + } + + /* + * @dev A cheaper version of keccak256(toRlpBytes(item)) that avoids copying memory. + * @return keccak256 hash of RLP encoded bytes. + */ + function rlpBytesKeccak256(RLPItem memory item) internal pure returns (bytes32) { + uint256 ptr = item.memPtr; + uint256 len = item.len; + bytes32 result; + assembly { + result := keccak256(ptr, len) + } + return result; + } + + /* + * @dev A cheaper version of keccak256(toBytes(item)) that avoids copying memory. + * @return keccak256 hash of the item payload. + */ + function payloadKeccak256(RLPItem memory item) internal pure returns (bytes32) { + (uint256 memPtr, uint256 len) = payloadLocation(item); + bytes32 result; + assembly { + result := keccak256(memPtr, len) + } + return result; + } + + /** RLPItem conversions into data types **/ + + // @returns raw rlp encoding in bytes + function toRlpBytes(RLPItem memory item) internal pure returns (bytes memory) { + bytes memory result = new bytes(item.len); + if (result.length == 0) return result; + + uint256 ptr; + assembly { + ptr := add(0x20, result) + } + + copy(item.memPtr, ptr, item.len); + return result; + } + + // any non-zero byte except "0x80" is considered true + function toBoolean(RLPItem memory item) internal pure returns (bool) { + require(item.len == 1); + uint256 result; + uint256 memPtr = item.memPtr; + assembly { + result := byte(0, mload(memPtr)) + } + + // SEE Github Issue #5. + // Summary: Most commonly used RLP libraries (i.e Geth) will encode + // "0" as "0x80" instead of as "0". We handle this edge case explicitly + // here. + if (result == 0 || result == STRING_SHORT_START) { + return false; + } else { + return true; + } + } + + function toAddress(RLPItem memory item) internal pure returns (address) { + // 1 byte for the length prefix + require(item.len == 21); + + return address(uint160(toUint(item))); + } + + function toUint(RLPItem memory item) internal pure returns (uint256) { + require(item.len > 0 && item.len <= 33); + + (uint256 memPtr, uint256 len) = payloadLocation(item); + + uint256 result; + assembly { + result := mload(memPtr) + + // shfit to the correct location if neccesary + if lt(len, 32) { + result := div(result, exp(256, sub(32, len))) + } + } + + return result; + } + + // enforces 32 byte length + function toUintStrict(RLPItem memory item) internal pure returns (uint256) { + // one byte prefix + require(item.len == 33); + + uint256 result; + uint256 memPtr = item.memPtr + 1; + assembly { + result := mload(memPtr) + } + + return result; + } + + function toBytes(RLPItem memory item) internal pure returns (bytes memory) { + require(item.len > 0); + + (uint256 memPtr, uint256 len) = payloadLocation(item); + bytes memory result = new bytes(len); + + uint256 destPtr; + assembly { + destPtr := add(0x20, result) + } + + copy(memPtr, destPtr, len); + return result; + } + + /* + * Private Helpers + */ + + // @return number of payload items inside an encoded list. + function numItems(RLPItem memory item) private pure returns (uint256) { + if (item.len == 0) return 0; + + uint256 count = 0; + uint256 currPtr = item.memPtr + _payloadOffset(item.memPtr); + uint256 endPtr = item.memPtr + item.len; + while (currPtr < endPtr) { + currPtr = currPtr + _itemLength(currPtr); // skip over an item + count++; + } + + return count; + } + + // @return entire rlp item byte length + function _itemLength(uint256 memPtr) private pure returns (uint256) { + uint256 itemLen; + uint256 byte0; + assembly { + byte0 := byte(0, mload(memPtr)) + } + + if (byte0 < STRING_SHORT_START) itemLen = 1; + else if (byte0 < STRING_LONG_START) itemLen = byte0 - STRING_SHORT_START + 1; + else if (byte0 < LIST_SHORT_START) { + assembly { + let byteLen := sub(byte0, 0xb7) // # of bytes the actual length is + memPtr := add(memPtr, 1) // skip over the first byte + + /* 32 byte word size */ + let dataLen := div(mload(memPtr), exp(256, sub(32, byteLen))) // right shifting to get the len + itemLen := add(dataLen, add(byteLen, 1)) + } + } else if (byte0 < LIST_LONG_START) { + itemLen = byte0 - LIST_SHORT_START + 1; + } else { + assembly { + let byteLen := sub(byte0, 0xf7) + memPtr := add(memPtr, 1) + + let dataLen := div(mload(memPtr), exp(256, sub(32, byteLen))) // right shifting to the correct length + itemLen := add(dataLen, add(byteLen, 1)) + } + } + + return itemLen; + } + + // @return number of bytes until the data + function _payloadOffset(uint256 memPtr) private pure returns (uint256) { + uint256 byte0; + assembly { + byte0 := byte(0, mload(memPtr)) + } + + if (byte0 < STRING_SHORT_START) return 0; + else if ( + byte0 < STRING_LONG_START || (byte0 >= LIST_SHORT_START && byte0 < LIST_LONG_START) + ) return 1; + else if (byte0 < LIST_SHORT_START) + // being explicit + return byte0 - (STRING_LONG_START - 1) + 1; + else return byte0 - (LIST_LONG_START - 1) + 1; + } + + /* + * @param src Pointer to source + * @param dest Pointer to destination + * @param len Amount of memory to copy from the source + */ + function copy( + uint256 src, + uint256 dest, + uint256 len + ) private pure { + if (len == 0) return; + + // copy as many word sizes as possible + for (; len >= WORD_SIZE; len -= WORD_SIZE) { + assembly { + mstore(dest, mload(src)) + } + + src += WORD_SIZE; + dest += WORD_SIZE; + } + + if (len > 0) { + // left over bytes. Mask is used to remove unwanted bytes from the word + uint256 mask = 256**(WORD_SIZE - len) - 1; + assembly { + let srcpart := and(mload(src), not(mask)) // zero out src + let destpart := and(mload(dest), mask) // retrieve the bytes + mstore(dest, or(destpart, srcpart)) + } + } + } +} diff --git a/contracts/libraries/StateProofVerifier.sol b/contracts/libraries/StateProofVerifier.sol new file mode 100644 index 000000000..f9b52c67b --- /dev/null +++ b/contracts/libraries/StateProofVerifier.sol @@ -0,0 +1,146 @@ +// SPDX-License-Identifier: MIT + +/* + * Copied from: + * https://github.com/lidofinance/curve-merkle-oracle/blob/1033b3e84142317ffd8f366b52e489d5eb49c73f/contracts/StateProofVerifier.sol + * + * MODIFIED from lidofinance's implementation: + * - Changed solidity version to 0.7.6 (pablo@edgeandnode.com) + * - Using local copy of the RLPReader library instead of using the package + */ + +pragma solidity 0.7.6; + +import { RLPReader } from "./RLPReader.sol"; +import { MerklePatriciaProofVerifier } from "./MerklePatriciaProofVerifier.sol"; + +/** + * @title A helper library for verification of Merkle Patricia account and state proofs. + */ +library StateProofVerifier { + using RLPReader for RLPReader.RLPItem; + using RLPReader for bytes; + + uint256 constant HEADER_STATE_ROOT_INDEX = 3; + uint256 constant HEADER_NUMBER_INDEX = 8; + uint256 constant HEADER_TIMESTAMP_INDEX = 11; + + struct BlockHeader { + bytes32 hash; + bytes32 stateRootHash; + uint256 number; + uint256 timestamp; + } + + struct Account { + bool exists; + uint256 nonce; + uint256 balance; + bytes32 storageRoot; + bytes32 codeHash; + } + + struct SlotValue { + bool exists; + uint256 value; + } + + /** + * @notice Parses block header and verifies its presence onchain within the latest 256 blocks. + * @param _headerRlpBytes RLP-encoded block header. + */ + function verifyBlockHeader(bytes memory _headerRlpBytes) + internal + view + returns (BlockHeader memory) + { + BlockHeader memory header = parseBlockHeader(_headerRlpBytes); + // ensure that the block is actually in the blockchain + require(header.hash == blockhash(header.number), "blockhash mismatch"); + return header; + } + + /** + * @notice Parses RLP-encoded block header. + * @param _headerRlpBytes RLP-encoded block header. + */ + function parseBlockHeader(bytes memory _headerRlpBytes) + internal + pure + returns (BlockHeader memory) + { + BlockHeader memory result; + RLPReader.RLPItem[] memory headerFields = _headerRlpBytes.toRlpItem().toList(); + + require(headerFields.length > HEADER_TIMESTAMP_INDEX); + + result.stateRootHash = bytes32(headerFields[HEADER_STATE_ROOT_INDEX].toUint()); + result.number = headerFields[HEADER_NUMBER_INDEX].toUint(); + result.timestamp = headerFields[HEADER_TIMESTAMP_INDEX].toUint(); + result.hash = keccak256(_headerRlpBytes); + + return result; + } + + /** + * @notice Verifies Merkle Patricia proof of an account and extracts the account fields. + * + * @param _addressHash Keccak256 hash of the address corresponding to the account. + * @param _stateRootHash MPT root hash of the Ethereum state trie. + */ + function extractAccountFromProof( + bytes32 _addressHash, // keccak256(abi.encodePacked(address)) + bytes32 _stateRootHash, + RLPReader.RLPItem[] memory _proof + ) internal pure returns (Account memory) { + bytes memory acctRlpBytes = MerklePatriciaProofVerifier.extractProofValue( + _stateRootHash, + abi.encodePacked(_addressHash), + _proof + ); + + Account memory account; + + if (acctRlpBytes.length == 0) { + return account; + } + + RLPReader.RLPItem[] memory acctFields = acctRlpBytes.toRlpItem().toList(); + require(acctFields.length == 4); + + account.exists = true; + account.nonce = acctFields[0].toUint(); + account.balance = acctFields[1].toUint(); + account.storageRoot = bytes32(acctFields[2].toUint()); + account.codeHash = bytes32(acctFields[3].toUint()); + + return account; + } + + /** + * @notice Verifies Merkle Patricia proof of a slot and extracts the slot's value. + * + * @param _slotHash Keccak256 hash of the slot position. + * @param _storageRootHash MPT root hash of the account's storage trie. + */ + function extractSlotValueFromProof( + bytes32 _slotHash, + bytes32 _storageRootHash, + RLPReader.RLPItem[] memory _proof + ) internal pure returns (SlotValue memory) { + bytes memory valueRlpBytes = MerklePatriciaProofVerifier.extractProofValue( + _storageRootHash, + abi.encodePacked(_slotHash), + _proof + ); + + SlotValue memory value; + + if (valueRlpBytes.length != 0) { + value.exists = true; + value.value = valueRlpBytes.toRlpItem().toUint(); + } + + return value; + } +} From 266fdefc8f6ade9f1a0ae690416a02f2bad44e3a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pablo=20Carranza=20V=C3=A9lez?= Date: Fri, 24 Jun 2022 15:20:42 +0300 Subject: [PATCH 002/112] feat: working on L1 side of GNS L2 migration --- contracts/discovery/GNS.sol | 4 +- contracts/discovery/GNSStorage.sol | 7 + contracts/discovery/IGNS.sol | 6 + contracts/discovery/L1GNS.sol | 131 ++++++++++++++++++ contracts/l2/discovery/L2GNS.sol | 39 ++++++ .../libraries/MerklePatriciaProofVerifier.sol | 13 +- contracts/libraries/RLPReader.sol | 15 +- contracts/libraries/StateProofVerifier.sol | 7 +- 8 files changed, 207 insertions(+), 15 deletions(-) create mode 100644 contracts/discovery/L1GNS.sol create mode 100644 contracts/l2/discovery/L2GNS.sol diff --git a/contracts/discovery/GNS.sol b/contracts/discovery/GNS.sol index fc8802df2..fd3234ec2 100644 --- a/contracts/discovery/GNS.sol +++ b/contracts/discovery/GNS.sol @@ -23,7 +23,7 @@ import "./GNSStorage.sol"; * The contract implements a multicall behaviour to support batching multiple calls in a single * transaction. */ -contract GNS is GNSV2Storage, GraphUpgradeable, IGNS, Multicall { +contract GNS is GNSV3Storage, GraphUpgradeable, IGNS, Multicall { using SafeMath for uint256; // -- Constants -- @@ -785,7 +785,7 @@ contract GNS is GNSV2Storage, GraphUpgradeable, IGNS, Multicall { * @param _subgraphID Subgraph ID * @return Subgraph Data */ - function _getSubgraphData(uint256 _subgraphID) private view returns (SubgraphData storage) { + function _getSubgraphData(uint256 _subgraphID) internal view returns (SubgraphData storage) { // If there is a legacy subgraph created return it LegacySubgraphKey storage legacySubgraphKey = legacySubgraphKeys[_subgraphID]; if (legacySubgraphKey.account != address(0)) { diff --git a/contracts/discovery/GNSStorage.sol b/contracts/discovery/GNSStorage.sol index 50a480777..cdf82a806 100644 --- a/contracts/discovery/GNSStorage.sol +++ b/contracts/discovery/GNSStorage.sol @@ -49,3 +49,10 @@ abstract contract GNSV2Storage is GNSV1Storage { // Contract that represents subgraph ownership through an NFT ISubgraphNFT public subgraphNFT; } + +abstract contract GNSV3Storage is GNSV2Storage { + // Block numbers and tokens for subgraphs that are locked and ready to be sent to L2 + mapping(uint256 => IGNS.SubgraphL2MigrationData) public subgraphL2MigrationData; + // Address of the counterpart GNS contract (L1GNS/L2GNS) + address counterpartGNSAddress; +} diff --git a/contracts/discovery/IGNS.sol b/contracts/discovery/IGNS.sol index 13efa1b9d..2c6f0cd04 100644 --- a/contracts/discovery/IGNS.sol +++ b/contracts/discovery/IGNS.sol @@ -15,6 +15,12 @@ interface IGNS { uint256 withdrawableGRT; } + struct SubgraphL2MigrationData { + uint256 lockedAtBlock; // Block at which the subgraph was locked for migration + uint256 tokens; // GRT that will be sent to L2 to mint signal + bool l1Done; // Migration finished on L1 side (or subgraph deprecated) + } + struct LegacySubgraphKey { address account; uint256 accountSeqID; diff --git a/contracts/discovery/L1GNS.sol b/contracts/discovery/L1GNS.sol new file mode 100644 index 000000000..dca13d1e4 --- /dev/null +++ b/contracts/discovery/L1GNS.sol @@ -0,0 +1,131 @@ +// SPDX-License-Identifier: GPL-2.0-or-later + +pragma solidity ^0.7.6; +pragma abicoder v2; + +import "@openzeppelin/contracts/math/SafeMath.sol"; +import "@openzeppelin/contracts/utils/Address.sol"; + +import "./GNS.sol"; +import "./GNSStorage.sol"; + +import "../arbitrum/ITokenGateway.sol"; +import "../l2/discovery/L2GNS.sol"; + +/** + * @title GNS + * @dev The Graph Name System contract provides a decentralized naming system for subgraphs + * used in the scope of the Graph Network. It translates Subgraphs into Subgraph Versions. + * Each version is associated with a Subgraph Deployment. The contract has no knowledge of + * human-readable names. All human readable names emitted in events. + * The contract implements a multicall behaviour to support batching multiple calls in a single + * transaction. + */ +contract L1GNS is GNS { + using SafeMath for uint256; + + event SubgraphLockedForMigrationToL2(uint256 _subgraphID); + event SubgraphSentToL2(uint256 _subgraphID); + + function lockSubgraphForMigrationToL2(uint256 _subgraphID) + external + payable + notPaused + onlySubgraphAuth(_subgraphID) + { + // Subgraph check + SubgraphData storage subgraphData = _getSubgraphOrRevert(_subgraphID); + SubgraphL2MigrationData storage migrationData = subgraphL2MigrationData[_subgraphID]; + + // Move all signal from previous version to L2 + if (subgraphData.nSignal > 0) { + // Burn all version signal in the name pool for tokens (w/no slippage protection) + // Sell all signal from the old deployment + migrationData.tokens = curation().burn( + subgraphData.subgraphDeploymentID, + subgraphData.vSignal, + 0 + ); + } + + subgraphData.disabled = true; + subgraphData.vSignal = 0; + + migrationData.lockedAtBlock = block.number; + emit SubgraphLockedForMigrationToL2(_subgraphID); + } + + /** + * @dev Send a subgraph's data and tokens to L2. + * The subgraph must be locked using lockSubgraphForMigrationToL2 in a previous block + * (less than 256 blocks ago). + */ + function sendSubgraphToL2( + uint256 _subgraphID, + uint256 maxGas, + uint256 gasPriceBid, + uint256 maxSubmissionCost + ) external payable notPaused onlySubgraphAuth(_subgraphID) { + SubgraphData storage subgraphData = _getSubgraphData(_subgraphID); + SubgraphL2MigrationData storage migrationData = subgraphL2MigrationData[_subgraphID]; + require( + migrationData.lockedAtBlock > 0 && + migrationData.lockedAtBlock > block.number.sub(255) && + migrationData.lockedAtBlock < block.number, + "!LOCKED" + ); + require(!migrationData.l1Done, "ALREADY_DONE"); + migrationData.l1Done = true; + + bytes memory extraData = abi.encodeWithSelector( + L2GNS.receiveSubgraphFromL1.selector, + _subgraphID, + ownerOf(_subgraphID), + migrationData.tokens, + blockhash(migrationData.lockedAtBlock), + subgraphData.nSignal, + subgraphData.reserveRatio + ); + + bytes memory data = abi.encode(maxSubmissionCost, extraData); + IGraphToken grt = graphToken(); + ITokenGateway gateway = ITokenGateway(_resolveContract(keccak256("GraphTokenGateway"))); + grt.approve(address(gateway), migrationData.tokens); + gateway.outboundTransfer{ value: msg.value }( + address(grt), + counterpartGNSAddress, + migrationData.tokens, + maxGas, + gasPriceBid, + data + ); + + subgraphData.reserveRatio = 0; + _burnNFT(_subgraphID); + emit SubgraphSentToL2(_subgraphID); + } + + /** + * @dev Deprecate a subgraph locked more than 256 blocks ago. + * This allows curators to recover their funds if the subgraph was locked + * for a migration to L2 but the subgraph was never actually sent to L2. + */ + function deprecateLockedSubgraph(uint256 _subgraphID) external notPaused { + SubgraphData storage subgraphData = _getSubgraphData(_subgraphID); + SubgraphL2MigrationData storage migrationData = subgraphL2MigrationData[_subgraphID]; + require( + migrationData.lockedAtBlock > 0 && migrationData.lockedAtBlock < block.number.sub(256), + "!LOCKED" + ); + require(!migrationData.l1Done, "ALREADY_DONE"); + migrationData.l1Done = true; + + subgraphData.withdrawableGRT = migrationData.tokens; + subgraphData.reserveRatio = 0; + + // Burn the NFT + _burnNFT(_subgraphID); + + emit SubgraphDeprecated(_subgraphID, subgraphData.withdrawableGRT); + } +} diff --git a/contracts/l2/discovery/L2GNS.sol b/contracts/l2/discovery/L2GNS.sol new file mode 100644 index 000000000..f803dc9af --- /dev/null +++ b/contracts/l2/discovery/L2GNS.sol @@ -0,0 +1,39 @@ +// SPDX-License-Identifier: GPL-2.0-or-later + +pragma solidity ^0.7.6; +pragma abicoder v2; + +import "@openzeppelin/contracts/math/SafeMath.sol"; +import "@openzeppelin/contracts/utils/Address.sol"; + +import "../../discovery/GNS.sol"; + +/** + * @title GNS + * @dev The Graph Name System contract provides a decentralized naming system for subgraphs + * used in the scope of the Graph Network. It translates Subgraphs into Subgraph Versions. + * Each version is associated with a Subgraph Deployment. The contract has no knowledge of + * human-readable names. All human readable names emitted in events. + * The contract implements a multicall behaviour to support batching multiple calls in a single + * transaction. + */ +contract L2GNS is GNS { + /** + * @dev Checks that the sender is the L2GraphTokenGateway as configured on the Controller. + */ + modifier onlyL2Gateway() { + require(msg.sender == _resolveContract(keccak256("GraphTokenGateway")), "ONLY_GATEWAY"); + _; + } + + function receiveSubgraphFromL1( + uint256 _subgraphID, + address owner, + uint256 tokens, + bytes32 lockedAtBlockhash, + uint256 nSignal, + uint32 reserveRatio + ) external onlyL2Gateway { + // TODO + } +} diff --git a/contracts/libraries/MerklePatriciaProofVerifier.sol b/contracts/libraries/MerklePatriciaProofVerifier.sol index 7a6e02162..c2704b67b 100644 --- a/contracts/libraries/MerklePatriciaProofVerifier.sol +++ b/contracts/libraries/MerklePatriciaProofVerifier.sol @@ -7,6 +7,8 @@ * MODIFIED from lidofinance's implementation: * - Changed solidity version to 0.7.6 (pablo@edgeandnode.com) * - Using local copy of the RLPReader library instead of using the package + * - Silenced linter warnings about inline assembly + * - Renamed a variable for mixedCase consistency */ /** @@ -196,6 +198,7 @@ library MerklePatriciaProofVerifier { } uint8 b; uint256 memPtr = item.memPtr; + // solhint-disable-next-line no-inline-assembly assembly { b := byte(0, mload(memPtr)) } @@ -208,18 +211,18 @@ library MerklePatriciaProofVerifier { returns (bool isLeaf, bytes memory nibbles) { require(compact.length > 0); - uint256 first_nibble = (uint8(compact[0]) >> 4) & 0xF; + uint256 firstNibble = (uint8(compact[0]) >> 4) & 0xF; uint256 skipNibbles; - if (first_nibble == 0) { + if (firstNibble == 0) { skipNibbles = 2; isLeaf = false; - } else if (first_nibble == 1) { + } else if (firstNibble == 1) { skipNibbles = 1; isLeaf = false; - } else if (first_nibble == 2) { + } else if (firstNibble == 2) { skipNibbles = 2; isLeaf = true; - } else if (first_nibble == 3) { + } else if (firstNibble == 3) { skipNibbles = 1; isLeaf = true; } else { diff --git a/contracts/libraries/RLPReader.sol b/contracts/libraries/RLPReader.sol index 6b0c2a1f9..4c3d177a3 100644 --- a/contracts/libraries/RLPReader.sol +++ b/contracts/libraries/RLPReader.sol @@ -6,6 +6,10 @@ * * (using edumar111's fork as it includes Solidity 0.7+ support, * which we need, and the PR is still open on hamdiallam's original repo) + * + * MODIFIED from hamdiallam's implementation: + * - Explicitly marked visibility of constants + * - Silenced linter warnings about inline assembly */ /* @@ -14,12 +18,13 @@ */ pragma solidity >=0.5.0 <0.9.0; +// solhint-disable no-inline-assembly library RLPReader { - uint8 constant STRING_SHORT_START = 0x80; - uint8 constant STRING_LONG_START = 0xb8; - uint8 constant LIST_SHORT_START = 0xc0; - uint8 constant LIST_LONG_START = 0xf8; - uint8 constant WORD_SIZE = 32; + uint8 public constant STRING_SHORT_START = 0x80; + uint8 public constant STRING_LONG_START = 0xb8; + uint8 public constant LIST_SHORT_START = 0xc0; + uint8 public constant LIST_LONG_START = 0xf8; + uint8 public constant WORD_SIZE = 32; struct RLPItem { uint256 len; diff --git a/contracts/libraries/StateProofVerifier.sol b/contracts/libraries/StateProofVerifier.sol index f9b52c67b..efacf94c3 100644 --- a/contracts/libraries/StateProofVerifier.sol +++ b/contracts/libraries/StateProofVerifier.sol @@ -7,6 +7,7 @@ * MODIFIED from lidofinance's implementation: * - Changed solidity version to 0.7.6 (pablo@edgeandnode.com) * - Using local copy of the RLPReader library instead of using the package + * - Explicitly marked visibility of constants */ pragma solidity 0.7.6; @@ -21,9 +22,9 @@ library StateProofVerifier { using RLPReader for RLPReader.RLPItem; using RLPReader for bytes; - uint256 constant HEADER_STATE_ROOT_INDEX = 3; - uint256 constant HEADER_NUMBER_INDEX = 8; - uint256 constant HEADER_TIMESTAMP_INDEX = 11; + uint256 public constant HEADER_STATE_ROOT_INDEX = 3; + uint256 public constant HEADER_NUMBER_INDEX = 8; + uint256 public constant HEADER_TIMESTAMP_INDEX = 11; struct BlockHeader { bytes32 hash; From 498cd38f568efe2bbf605f4d0b87a9e44df1f3d1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pablo=20Carranza=20V=C3=A9lez?= Date: Mon, 27 Jun 2022 14:45:25 +0300 Subject: [PATCH 003/112] feat: still more WIP on GNS migration --- cli/commands/migrate.ts | 4 +- cli/contracts.ts | 12 ++- config/graph.arbitrum-one.yml | 6 +- config/graph.mainnet.yml | 6 +- contracts/curation/Curation.sol | 62 +++++++++++++ contracts/curation/ICuration.sol | 7 ++ contracts/discovery/GNS.sol | 24 ++++- contracts/discovery/GNSStorage.sol | 2 +- contracts/discovery/IGNS.sol | 7 ++ contracts/discovery/ISubgraphNFT.sol | 2 + contracts/discovery/L1GNS.sol | 51 +++++++---- contracts/discovery/SubgraphNFT.sol | 11 +++ contracts/governance/Managed.sol | 1 + contracts/l2/discovery/L2GNS.sol | 112 ++++++++++++++++++++++-- contracts/l2/discovery/L2GNSStorage.sol | 13 +++ test/lib/deployment.ts | 37 +++++++- test/lib/fixtures.ts | 14 ++- 17 files changed, 327 insertions(+), 44 deletions(-) create mode 100644 contracts/l2/discovery/L2GNSStorage.sol diff --git a/cli/commands/migrate.ts b/cli/commands/migrate.ts index 335b6fa32..3756f2b69 100644 --- a/cli/commands/migrate.ts +++ b/cli/commands/migrate.ts @@ -29,7 +29,7 @@ let allContracts = [ 'Curation', 'SubgraphNFTDescriptor', 'SubgraphNFT', - 'GNS', + 'L1GNS', 'Staking', 'RewardsManager', 'DisputeManager', @@ -49,7 +49,7 @@ const l2Contracts = [ 'Curation', 'SubgraphNFTDescriptor', 'SubgraphNFT', - 'GNS', + 'L2GNS', 'Staking', 'RewardsManager', 'DisputeManager', diff --git a/cli/contracts.ts b/cli/contracts.ts index c36e83567..883473252 100644 --- a/cli/contracts.ts +++ b/cli/contracts.ts @@ -23,6 +23,8 @@ import { ServiceRegistry } from '../build/types/ServiceRegistry' import { Curation } from '../build/types/Curation' import { RewardsManager } from '../build/types/RewardsManager' import { GNS } from '../build/types/GNS' +import { L1GNS } from '../build/types/L1GNS' +import { L2GNS } from '../build/types/L2GNS' import { GraphProxyAdmin } from '../build/types/GraphProxyAdmin' import { GraphToken } from '../build/types/GraphToken' import { Controller } from '../build/types/Controller' @@ -45,7 +47,7 @@ export interface NetworkContracts { ServiceRegistry: ServiceRegistry Curation: Curation RewardsManager: RewardsManager - GNS: GNS + GNS: GNS | L1GNS | L2GNS GraphProxyAdmin: GraphProxyAdmin GraphToken: GraphToken Controller: Controller @@ -60,6 +62,8 @@ export interface NetworkContracts { BridgeEscrow: BridgeEscrow L2GraphToken: L2GraphToken L2GraphTokenGateway: L2GraphTokenGateway + L1GNS: L1GNS + L2GNS: L2GNS } export const loadAddressBookContract = ( @@ -101,6 +105,12 @@ export const loadContracts = ( if (chainIdIsL2(chainId) && contractName == 'L2GraphToken') { contracts['GraphToken'] = contracts[contractName] } + if (signerOrProvider && chainIdIsL2(chainId) && contractName == 'L2GNS') { + contracts['GNS'] = contracts[contractName] + } + if (signerOrProvider && !chainIdIsL2(chainId) && contractName == 'L1GNS') { + contracts['GNS'] = contracts[contractName] + } } catch (err) { logger.warn(`Could not load contract ${contractName} - ${err.message}`) } diff --git a/config/graph.arbitrum-one.yml b/config/graph.arbitrum-one.yml index 49c7b0b13..1e2750b50 100644 --- a/config/graph.arbitrum-one.yml +++ b/config/graph.arbitrum-one.yml @@ -14,7 +14,7 @@ contracts: contractAddress: "${{Curation.address}}" - fn: "setContractProxy" id: "0x39605a6c26a173774ca666c67ef70cf491880e5d3d6d0ca66ec0a31034f15ea3" # keccak256('GNS') - contractAddress: "${{GNS.address}}" + contractAddress: "${{L2GNS.address}}" - fn: "setContractProxy" id: "0xf942813d07d17b56de9a9afc8de0ced6e8c053bbfdcc87b7badea4ddcf27c307" # keccak256('DisputeManager') contractAddress: "${{DisputeManager.address}}" @@ -82,7 +82,7 @@ contracts: qrySlashingPercentage: 25000 # in parts per million calls: - fn: "syncAllContracts" - GNS: + L2GNS: proxy: true init: controller: "${{Controller.address}}" @@ -98,7 +98,7 @@ contracts: - fn: "setTokenDescriptor" tokenDescriptor: "${{SubgraphNFTDescriptor.address}}" - fn: "setMinter" - minter: "${{GNS.address}}" + minter: "${{L2GNS.address}}" - fn: "transferOwnership" owner: *governor Staking: diff --git a/config/graph.mainnet.yml b/config/graph.mainnet.yml index 6eb08b233..36b1f49c3 100644 --- a/config/graph.mainnet.yml +++ b/config/graph.mainnet.yml @@ -14,7 +14,7 @@ contracts: contractAddress: "${{Curation.address}}" - fn: "setContractProxy" id: "0x39605a6c26a173774ca666c67ef70cf491880e5d3d6d0ca66ec0a31034f15ea3" # keccak256('GNS') - contractAddress: "${{GNS.address}}" + contractAddress: "${{L1GNS.address}}" - fn: "setContractProxy" id: "0xf942813d07d17b56de9a9afc8de0ced6e8c053bbfdcc87b7badea4ddcf27c307" # keccak256('DisputeManager') contractAddress: "${{DisputeManager.address}}" @@ -83,7 +83,7 @@ contracts: qrySlashingPercentage: 25000 # in parts per million calls: - fn: "syncAllContracts" - GNS: + L1GNS: proxy: true init: controller: "${{Controller.address}}" @@ -99,7 +99,7 @@ contracts: - fn: "setTokenDescriptor" tokenDescriptor: "${{SubgraphNFTDescriptor.address}}" - fn: "setMinter" - minter: "${{GNS.address}}" + minter: "${{L1GNS.address}}" - fn: "transferOwnership" owner: *governor Staking: diff --git a/contracts/curation/Curation.sol b/contracts/curation/Curation.sol index 565a51a89..f73e3bcfd 100644 --- a/contracts/curation/Curation.sol +++ b/contracts/curation/Curation.sol @@ -70,6 +70,11 @@ contract Curation is CurationV1Storage, GraphUpgradeable { */ event Collected(bytes32 indexed subgraphDeploymentID, uint256 tokens); + modifier onlyGNS() { + require(msg.sender == _resolveContract(keccak256("GNS")), "Only the GNS can call this"); + _; + } + /** * @dev Initialize this contract. */ @@ -267,6 +272,63 @@ contract Curation is CurationV1Storage, GraphUpgradeable { return (signalOut, curationTax); } + /** + * @dev Deposit Graph Tokens in exchange for signal of a SubgraphDeployment curation pool. + * This function charges no tax and can only be called by GNS in specific scenarios (for now + * only during an L1-L2 migration). + * @param _subgraphDeploymentID Subgraph deployment pool from where to mint signal + * @param _tokensIn Amount of Graph Tokens to deposit + * @param _signalOutMin Expected minimum amount of signal to receive + * @return Signal minted + */ + function mintTaxFree( + bytes32 _subgraphDeploymentID, + uint256 _tokensIn, + uint256 _signalOutMin + ) external override notPartialPaused onlyGNS returns (uint256) { + // Need to deposit some funds + require(_tokensIn > 0, "Cannot deposit zero tokens"); + + // Exchange GRT tokens for GCS of the subgraph pool (no tax) + uint256 signalOut = _tokensToSignal(_subgraphDeploymentID, _tokensIn); + + // Slippage protection + require(signalOut >= _signalOutMin, "Slippage protection"); + + address curator = msg.sender; + CurationPool storage curationPool = pools[_subgraphDeploymentID]; + + // If it hasn't been curated before then initialize the curve + if (!isCurated(_subgraphDeploymentID)) { + curationPool.reserveRatio = defaultReserveRatio; + + // If no signal token for the pool - create one + if (address(curationPool.gcs) == address(0)) { + // Use a minimal proxy to reduce gas cost + IGraphCurationToken gcs = IGraphCurationToken(Clones.clone(curationTokenMaster)); + gcs.initialize(address(this)); + curationPool.gcs = gcs; + } + } + + // Trigger update rewards calculation snapshot + _updateRewards(_subgraphDeploymentID); + + // Transfer tokens from the curator to this contract + // NOTE: This needs to happen after _updateRewards snapshot as that function + // is using balanceOf(curation) + IGraphToken _graphToken = graphToken(); + TokenUtils.pullTokens(_graphToken, curator, _tokensIn); + + // Update curation pool + curationPool.tokens = curationPool.tokens.add(_tokensIn); + curationPool.gcs.mint(curator, signalOut); + + emit Signalled(curator, _subgraphDeploymentID, _tokensIn, signalOut, 0); + + return signalOut; + } + /** * @dev Return an amount of signal to get tokens back. * @notice Burn _signal from the SubgraphDeployment curation pool diff --git a/contracts/curation/ICuration.sol b/contracts/curation/ICuration.sol index 9e1701aaf..244b098de 100644 --- a/contracts/curation/ICuration.sol +++ b/contracts/curation/ICuration.sol @@ -23,6 +23,13 @@ interface ICuration { uint256 _signalOutMin ) external returns (uint256, uint256); + // Callable only by GNS in specific scenarios + function mintTaxFree( + bytes32 _subgraphDeploymentID, + uint256 _tokensIn, + uint256 _signalOutMin + ) external returns (uint256); + function burn( bytes32 _subgraphDeploymentID, uint256 _signalIn, diff --git a/contracts/discovery/GNS.sol b/contracts/discovery/GNS.sol index fd3234ec2..f7f79aec1 100644 --- a/contracts/discovery/GNS.sol +++ b/contracts/discovery/GNS.sol @@ -717,7 +717,7 @@ contract GNS is GNSV3Storage, GraphUpgradeable, IGNS, Multicall { require(legacySubgraphExists == true, "GNS: Subgraph does not exist"); // Must not be a claimed subgraph - uint256 subgraphID = _buildSubgraphID(_graphAccount, _subgraphNumber); + uint256 subgraphID = _buildLegacySubgraphID(_graphAccount, _subgraphNumber); require( legacySubgraphKeys[subgraphID].account == address(0), "GNS: Subgraph was already claimed" @@ -752,13 +752,33 @@ contract GNS is GNSV3Storage, GraphUpgradeable, IGNS, Multicall { /** * @dev Build a subgraph ID based on the account creating it and a sequence number for that account. + * Only used for legacy subgraphs being migrated, as new ones will also use the chainid. * Subgraph ID is the keccak hash of account+seqID * @return Subgraph ID */ - function _buildSubgraphID(address _account, uint256 _seqID) internal pure returns (uint256) { + function _buildLegacySubgraphID(address _account, uint256 _seqID) + internal + pure + returns (uint256) + { return uint256(keccak256(abi.encodePacked(_account, _seqID))); } + /** + * @dev Build a subgraph ID based on the account creating it and a sequence number for that account. + * Subgraph ID is the keccak hash of account+seqID + * @return Subgraph ID + */ + function _buildSubgraphID(address _account, uint256 _seqID) internal pure returns (uint256) { + uint256 chainId; + // Too bad solidity 0.7.6 still doesn't have block.chainid + // solhint-disable-next-line no-inline-assembly + assembly { + chainId := chainid() + } + return uint256(keccak256(abi.encodePacked(_account, _seqID, chainId))); + } + /** * @dev Return the next subgraphID given the account that is creating the subgraph. * NOTE: This function updates the sequence ID for the account diff --git a/contracts/discovery/GNSStorage.sol b/contracts/discovery/GNSStorage.sol index cdf82a806..1f0229cd4 100644 --- a/contracts/discovery/GNSStorage.sol +++ b/contracts/discovery/GNSStorage.sol @@ -54,5 +54,5 @@ abstract contract GNSV3Storage is GNSV2Storage { // Block numbers and tokens for subgraphs that are locked and ready to be sent to L2 mapping(uint256 => IGNS.SubgraphL2MigrationData) public subgraphL2MigrationData; // Address of the counterpart GNS contract (L1GNS/L2GNS) - address counterpartGNSAddress; + address public counterpartGNSAddress; } diff --git a/contracts/discovery/IGNS.sol b/contracts/discovery/IGNS.sol index 2c6f0cd04..568190107 100644 --- a/contracts/discovery/IGNS.sol +++ b/contracts/discovery/IGNS.sol @@ -21,6 +21,13 @@ interface IGNS { bool l1Done; // Migration finished on L1 side (or subgraph deprecated) } + struct MigratedSubgraphData { + bytes32 lockedAtBlockHash; + mapping(address => bool) curatorBalanceClaimed; + uint256 tokens; + bool l2Done; + } + struct LegacySubgraphKey { address account; uint256 accountSeqID; diff --git a/contracts/discovery/ISubgraphNFT.sol b/contracts/discovery/ISubgraphNFT.sol index 4b0495a28..bf0cb2bfe 100644 --- a/contracts/discovery/ISubgraphNFT.sol +++ b/contracts/discovery/ISubgraphNFT.sol @@ -22,4 +22,6 @@ interface ISubgraphNFT is IERC721 { function setSubgraphMetadata(uint256 _tokenId, bytes32 _subgraphMetadata) external; function tokenURI(uint256 _tokenId) external view returns (string memory); + + function getSubgraphMetadata(uint256 _tokenId) external view returns (bytes32); } diff --git a/contracts/discovery/L1GNS.sol b/contracts/discovery/L1GNS.sol index dca13d1e4..a4539d0b3 100644 --- a/contracts/discovery/L1GNS.sol +++ b/contracts/discovery/L1GNS.sol @@ -37,16 +37,17 @@ contract L1GNS is GNS { SubgraphData storage subgraphData = _getSubgraphOrRevert(_subgraphID); SubgraphL2MigrationData storage migrationData = subgraphL2MigrationData[_subgraphID]; - // Move all signal from previous version to L2 - if (subgraphData.nSignal > 0) { - // Burn all version signal in the name pool for tokens (w/no slippage protection) - // Sell all signal from the old deployment - migrationData.tokens = curation().burn( - subgraphData.subgraphDeploymentID, - subgraphData.vSignal, - 0 - ); - } + // Lock the subgraph so no more signal can be minted or burned. + // This can only be done for subgraphs that have nonzero signal. + require(subgraphData.nSignal > 0, "!SIGNAL"); + + // Burn all version signal in the name pool for tokens (w/no slippage protection) + // Sell all signal from the old deployment + migrationData.tokens = curation().burn( + subgraphData.subgraphDeploymentID, + subgraphData.vSignal, + 1 // We do check that the output must be nonzero... + ); subgraphData.disabled = true; subgraphData.vSignal = 0; @@ -70,21 +71,17 @@ contract L1GNS is GNS { SubgraphL2MigrationData storage migrationData = subgraphL2MigrationData[_subgraphID]; require( migrationData.lockedAtBlock > 0 && - migrationData.lockedAtBlock > block.number.sub(255) && + migrationData.lockedAtBlock >= block.number.sub(255) && migrationData.lockedAtBlock < block.number, "!LOCKED" ); require(!migrationData.l1Done, "ALREADY_DONE"); migrationData.l1Done = true; - bytes memory extraData = abi.encodeWithSelector( - L2GNS.receiveSubgraphFromL1.selector, + bytes memory extraData = encodeSubgraphMetadataForL2( _subgraphID, - ownerOf(_subgraphID), - migrationData.tokens, - blockhash(migrationData.lockedAtBlock), - subgraphData.nSignal, - subgraphData.reserveRatio + migrationData, + subgraphData ); bytes memory data = abi.encode(maxSubmissionCost, extraData); @@ -105,6 +102,24 @@ contract L1GNS is GNS { emit SubgraphSentToL2(_subgraphID); } + function encodeSubgraphMetadataForL2( + uint256 _subgraphID, + SubgraphL2MigrationData storage migrationData, + SubgraphData storage subgraphData + ) internal view returns (bytes memory) { + return + abi.encodeWithSelector( + L2GNS.receiveSubgraphFromL1.selector, + _subgraphID, + ownerOf(_subgraphID), + migrationData.tokens, + blockhash(migrationData.lockedAtBlock), + subgraphData.nSignal, + subgraphData.reserveRatio, + subgraphNFT.getSubgraphMetadata(_subgraphID) + ); + } + /** * @dev Deprecate a subgraph locked more than 256 blocks ago. * This allows curators to recover their funds if the subgraph was locked diff --git a/contracts/discovery/SubgraphNFT.sol b/contracts/discovery/SubgraphNFT.sol index c6dadaa81..8da3e9e61 100644 --- a/contracts/discovery/SubgraphNFT.sol +++ b/contracts/discovery/SubgraphNFT.sol @@ -164,4 +164,15 @@ contract SubgraphNFT is Governed, ERC721, ISubgraphNFT { // If there is a baseURI but no tokenURI, concatenate the tokenID to the baseURI. return string(abi.encodePacked(base, HexStrings.toString(_tokenId))); } + + /** + * @notice Get the metadata for a subgraph represented by `_tokenId`. + * @dev `_tokenId` must exist. + * @param _tokenId ID of the NFT + * @return IPFS hash for the metadata + */ + function getSubgraphMetadata(uint256 _tokenId) external view override returns (bytes32) { + require(_exists(_tokenId), "ERC721Metadata: URI set of nonexistent token"); + return _subgraphMetadataHashes[_tokenId]; + } } diff --git a/contracts/governance/Managed.sol b/contracts/governance/Managed.sol index 6b8fe624e..568acd0e8 100644 --- a/contracts/governance/Managed.sol +++ b/contracts/governance/Managed.sol @@ -228,5 +228,6 @@ abstract contract Managed is IManaged { _syncContract("Staking"); _syncContract("GraphToken"); _syncContract("GraphTokenGateway"); + _syncContract("GNS"); } } diff --git a/contracts/l2/discovery/L2GNS.sol b/contracts/l2/discovery/L2GNS.sol index f803dc9af..36e4bfc4a 100644 --- a/contracts/l2/discovery/L2GNS.sol +++ b/contracts/l2/discovery/L2GNS.sol @@ -7,6 +7,7 @@ import "@openzeppelin/contracts/math/SafeMath.sol"; import "@openzeppelin/contracts/utils/Address.sol"; import "../../discovery/GNS.sol"; +import "./L2GNSStorage.sol"; /** * @title GNS @@ -17,7 +18,10 @@ import "../../discovery/GNS.sol"; * The contract implements a multicall behaviour to support batching multiple calls in a single * transaction. */ -contract L2GNS is GNS { +contract L2GNS is GNS, L2GNSV1Storage { + event SubgraphReceivedFromL1(uint256 _subgraphID); + event SubgraphMigrationFinalized(uint256 _subgraphID); + /** * @dev Checks that the sender is the L2GraphTokenGateway as configured on the Controller. */ @@ -27,13 +31,109 @@ contract L2GNS is GNS { } function receiveSubgraphFromL1( - uint256 _subgraphID, - address owner, + uint256 subgraphID, + address subgraphOwner, uint256 tokens, - bytes32 lockedAtBlockhash, + bytes32 lockedAtBlockHash, uint256 nSignal, - uint32 reserveRatio - ) external onlyL2Gateway { + uint32 reserveRatio, + bytes32 subgraphMetadata + ) external notPartialPaused onlyL2Gateway { + IGNS.MigratedSubgraphData storage migratedData = migratedSubgraphData[subgraphID]; + SubgraphData storage subgraphData = subgraphs[subgraphID]; + + subgraphData.reserveRatio = reserveRatio; + // The subgraph will be disabled until finishSubgraphMigrationFromL1 is called + subgraphData.disabled = true; + subgraphData.nSignal = nSignal; + + migratedData.tokens = tokens; + migratedData.lockedAtBlockHash = lockedAtBlockHash; + + // Mint the NFT. Use the subgraphID as tokenID. + // This function will check the if tokenID already exists. + _mintNFT(subgraphOwner, subgraphID); + + // Set the token metadata + _setSubgraphMetadata(subgraphID, subgraphMetadata); + emit SubgraphReceivedFromL1(subgraphID); + } + + function finishSubgraphMigrationFromL1( + uint256 _subgraphID, + bytes32 _subgraphDeploymentID, + bytes32 _versionMetadata + ) external notPartialPaused onlySubgraphAuth(_subgraphID) { + IGNS.MigratedSubgraphData storage migratedData = migratedSubgraphData[_subgraphID]; + SubgraphData storage subgraphData = subgraphs[_subgraphID]; + // A subgraph + require(migratedData.tokens > 0, "INVALID_SUBGRAPH"); + require(!migratedData.l2Done, "ALREADY_DONE"); + migratedData.l2Done = true; + + // New subgraph deployment must be non-empty + require(_subgraphDeploymentID != 0, "GNS: Cannot set deploymentID to 0"); + + // This is to prevent the owner from front running its name curators signal by posting + // its own signal ahead, bringing the name curators in, and dumping on them + ICuration curation = curation(); + require( + !curation.isCurated(_subgraphDeploymentID), + "GNS: Owner cannot point to a subgraphID that has been pre-curated" + ); + + // Update pool: constant nSignal, vSignal can change (w/no slippage protection) + // Buy all signal from the new deployment + subgraphData.vSignal = curation.mintTaxFree(_subgraphDeploymentID, migratedData.tokens, 0); + + emit SubgraphPublished(_subgraphID, _subgraphDeploymentID, subgraphData.reserveRatio); + emit SubgraphUpgraded( + _subgraphID, + subgraphData.vSignal, + migratedData.tokens, + _subgraphDeploymentID + ); + // Update target deployment + subgraphData.subgraphDeploymentID = _subgraphDeploymentID; + emit SubgraphVersionUpdated(_subgraphID, _subgraphDeploymentID, _versionMetadata); + emit SubgraphMigrationFinalized(_subgraphID); + } + + /** + * @dev Claim curator balance belonging to a curator from L1. + * This will be credited to the same curator's balance on L2. + * This can only be called by the corresponding curator. + * @param _blockHeaderRlpBytes RLP-encoded block header from the block when the subgraph was locked on L1 + * @param _proofRlpBytes RLP-encoded list of proofs: first proof of the L1 GNS account, then proof of the slot for the curator's balance + */ + function claimL1CuratorBalance(bytes memory _blockHeaderRlpBytes, bytes memory _proofRlpBytes) + external + notPartialPaused + { + // TODO + } + + /** + * @dev Claim curator balance belonging to a curator from L1. + * This will be credited to the a beneficiary on L2, and a signature must be provided + * to prove the L1 curator permits this assignment. + * @param _blockHeaderRlpBytes RLP-encoded block header from the block when the subgraph was locked on L1 + * @param _proofRlpBytes RLP-encoded list of proofs: first proof of the L1 GNS account, then proof of the slot for the curator's balance + * @param _beneficiary Address of a beneficiary for the balance + * @param _deadline Expiration time of the signed permit + * @param _v Signature version + * @param _r Signature r value + * @param _s Signature s value + */ + function claimL1CuratorBalanceToBeneficiary( + bytes memory _blockHeaderRlpBytes, + bytes memory _proofRlpBytes, + address _beneficiary, + uint256 _deadline, + uint8 _v, + bytes32 _r, + bytes32 _s + ) external notPartialPaused { // TODO } } diff --git a/contracts/l2/discovery/L2GNSStorage.sol b/contracts/l2/discovery/L2GNSStorage.sol new file mode 100644 index 000000000..006f34636 --- /dev/null +++ b/contracts/l2/discovery/L2GNSStorage.sol @@ -0,0 +1,13 @@ +// SPDX-License-Identifier: GPL-2.0-or-later + +pragma solidity ^0.7.6; + +import "../../discovery/IGNS.sol"; + +/** + * @dev Storage variables for the L2GNS + */ +contract L2GNSV1Storage { + // Subgraph data incoming from L1 + mapping(uint256 => IGNS.MigratedSubgraphData) public migratedSubgraphData; +} diff --git a/test/lib/deployment.ts b/test/lib/deployment.ts index a6afe9dbb..6b667a12f 100644 --- a/test/lib/deployment.ts +++ b/test/lib/deployment.ts @@ -22,6 +22,8 @@ import { L1GraphTokenGateway } from '../../build/types/L1GraphTokenGateway' import { L2GraphTokenGateway } from '../../build/types/L2GraphTokenGateway' import { L2GraphToken } from '../../build/types/L2GraphToken' import { BridgeEscrow } from '../../build/types/BridgeEscrow' +import { L2GNS } from '../../build/types/L2GNS' +import { L1GNS } from '../../build/types/L1GNS' // Disable logging for tests logger.pause() @@ -155,11 +157,12 @@ export async function deployEpochManager( ) as unknown as EpochManager } -export async function deployGNS( +async function deployL1OrL2GNS( deployer: Signer, controller: string, proxyAdmin: GraphProxyAdmin, -): Promise { + isL2: boolean, +): Promise { // Dependency const bondingCurve = (await deployContract('BancorFormula', deployer)) as unknown as BancorFormula const subgraphDescriptor = await deployContract('SubgraphNFTDescriptor', deployer) @@ -169,10 +172,16 @@ export async function deployGNS( await deployer.getAddress(), )) as SubgraphNFT + let name: string + if (isL2) { + name = 'L2GNS' + } else { + name = 'L1GNS' + } // Deploy const proxy = (await network.deployContractWithProxy( proxyAdmin, - 'GNS', + name, [controller, bondingCurve.address, subgraphNFT.address], deployer, )) as unknown as GNS @@ -181,7 +190,27 @@ export async function deployGNS( await subgraphNFT.connect(deployer).setMinter(proxy.address) await subgraphNFT.connect(deployer).setTokenDescriptor(subgraphDescriptor.address) - return proxy + if (isL2) { + return proxy as L2GNS + } else { + return proxy as L1GNS + } +} + +export async function deployL1GNS( + deployer: Signer, + controller: string, + proxyAdmin: GraphProxyAdmin, +): Promise { + return deployL1OrL2GNS(deployer, controller, proxyAdmin, false) as unknown as L1GNS +} + +export async function deployL2GNS( + deployer: Signer, + controller: string, + proxyAdmin: GraphProxyAdmin, +): Promise { + return deployL1OrL2GNS(deployer, controller, proxyAdmin, true) as unknown as L2GNS } export async function deployServiceRegistry( diff --git a/test/lib/fixtures.ts b/test/lib/fixtures.ts index 8375a86a4..e063afa8d 100644 --- a/test/lib/fixtures.ts +++ b/test/lib/fixtures.ts @@ -12,7 +12,8 @@ import { DisputeManager } from '../../build/types/DisputeManager' import { EpochManager } from '../../build/types/EpochManager' import { GraphToken } from '../../build/types/GraphToken' import { Curation } from '../../build/types/Curation' -import { GNS } from '../../build/types/GNS' +import { L1GNS } from '../../build/types/L1GNS' +import { L2GNS } from '../../build/types/L2GNS' import { Staking } from '../../build/types/Staking' import { RewardsManager } from '../../build/types/RewardsManager' import { ServiceRegistry } from '../../build/types/ServiceRegistry' @@ -28,7 +29,7 @@ export interface L1FixtureContracts { epochManager: EpochManager grt: GraphToken curation: Curation - gns: GNS + gns: L1GNS staking: Staking rewardsManager: RewardsManager serviceRegistry: ServiceRegistry @@ -43,7 +44,7 @@ export interface L2FixtureContracts { epochManager: EpochManager grt: L2GraphToken curation: Curation - gns: GNS + gns: L2GNS staking: Staking rewardsManager: RewardsManager serviceRegistry: ServiceRegistry @@ -92,7 +93,12 @@ export class NetworkFixture { } const curation = await deployment.deployCuration(deployer, controller.address, proxyAdmin) - const gns = await deployment.deployGNS(deployer, controller.address, proxyAdmin) + let gns: L1GNS | L2GNS + if (isL2) { + gns = await deployment.deployL2GNS(deployer, controller.address, proxyAdmin) + } else { + gns = await deployment.deployL1GNS(deployer, controller.address, proxyAdmin) + } const staking = await deployment.deployStaking(deployer, controller.address, proxyAdmin) const disputeManager = await deployment.deployDisputeManager( deployer, From a0e90b2f7388291d4ac9ec5f9d7c6cecadd8e3c8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pablo=20Carranza=20V=C3=A9lez?= Date: Mon, 27 Jun 2022 19:49:43 +0300 Subject: [PATCH 004/112] test: fix subgraph IDs now we add chainID --- test/gns.test.ts | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/test/gns.test.ts b/test/gns.test.ts index 1ee46002a..fe83a6f54 100644 --- a/test/gns.test.ts +++ b/test/gns.test.ts @@ -8,7 +8,7 @@ import { GraphToken } from '../build/types/GraphToken' import { Curation } from '../build/types/Curation' import { SubgraphNFT } from '../build/types/SubgraphNFT' -import { getAccounts, randomHexBytes, Account, toGRT } from './lib/testHelpers' +import { getAccounts, randomHexBytes, Account, toGRT, getChainID } from './lib/testHelpers' import { NetworkFixture } from './lib/fixtures' import { toBN, formatGRT } from './lib/testHelpers' import { getContractAt } from '../cli/network' @@ -42,8 +42,8 @@ interface AccountDefaultName { const DEFAULT_RESERVE_RATIO = 1000000 const toFloat = (n: BigNumber) => parseFloat(formatGRT(n)) const toRound = (n: number) => n.toFixed(12) -const buildSubgraphID = (account: string, seqID: BigNumber): string => - solidityKeccak256(['address', 'uint256'], [account, seqID]) +const buildSubgraphID = async (account: string, seqID: BigNumber): Promise => + solidityKeccak256(['address', 'uint256', 'uint256'], [account, seqID, await getChainID()]) describe('GNS', () => { let me: Account @@ -146,7 +146,10 @@ describe('GNS', () => { account: Account, newSubgraph: PublishSubgraph, // Defaults to subgraph created in before() ): Promise => { - const subgraphID = buildSubgraphID(account.address, await gns.nextAccountSeqID(account.address)) + const subgraphID = await buildSubgraphID( + account.address, + await gns.nextAccountSeqID(account.address), + ) // Send tx const tx = gns @@ -615,7 +618,7 @@ describe('GNS', () => { describe('isPublished', function () { it('should return if the subgraph is published', async function () { - const subgraphID = buildSubgraphID(me.address, toBN(0)) + const subgraphID = await buildSubgraphID(me.address, toBN(0)) expect(await gns.isPublished(subgraphID)).eq(false) await publishNewSubgraph(me, newSubgraph0) expect(await gns.isPublished(subgraphID)).eq(true) @@ -1014,7 +1017,7 @@ describe('GNS', () => { newSubgraph0.subgraphMetadata, ) // Curate on the subgraph - const subgraphID = buildSubgraphID(me.address, await gns.nextAccountSeqID(me.address)) + const subgraphID = await buildSubgraphID(me.address, await gns.nextAccountSeqID(me.address)) const tx2 = await gns.populateTransaction.mintSignal(subgraphID, toGRT('90000'), 0) // Batch send transaction From 9efb72cf2f66d1bcb095d5fa19a478f6a5e169b0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pablo=20Carranza=20V=C3=A9lez?= Date: Wed, 29 Jun 2022 13:44:41 +0300 Subject: [PATCH 005/112] feat: functions to claim curator balance --- contracts/discovery/GNS.sol | 14 ++++ contracts/discovery/L1GNS.sol | 59 +++++++++++++- contracts/l2/discovery/L2GNS.sol | 136 +++++++++++++++++++++++++------ 3 files changed, 185 insertions(+), 24 deletions(-) diff --git a/contracts/discovery/GNS.sol b/contracts/discovery/GNS.sol index f7f79aec1..073537a2d 100644 --- a/contracts/discovery/GNS.sol +++ b/contracts/discovery/GNS.sol @@ -132,6 +132,11 @@ contract GNS is GNSV3Storage, GraphUpgradeable, IGNS, Multicall { uint256 withdrawnGRT ); + /** + * @dev Emitted when the counterpart (L1/L2) GNS address is updated + */ + event CounterpartGNSAddressUpdated(address _counterpart); + // -- Modifiers -- /** @@ -219,6 +224,15 @@ contract GNS is GNSV3Storage, GraphUpgradeable, IGNS, Multicall { emit SubgraphNFTUpdated(_subgraphNFT); } + /** + * @dev Set the counterpart (L1/L2) GNS address + * @param _counterpart Owner tax percentage + */ + function setCounterpartGNSAddress(address _counterpart) external onlyGovernor { + counterpartGNSAddress = _counterpart; + emit CounterpartGNSAddressUpdated(_counterpart); + } + // -- Actions -- /** diff --git a/contracts/discovery/L1GNS.sol b/contracts/discovery/L1GNS.sol index a4539d0b3..1fa97ad1f 100644 --- a/contracts/discovery/L1GNS.sol +++ b/contracts/discovery/L1GNS.sol @@ -8,8 +8,10 @@ import "@openzeppelin/contracts/utils/Address.sol"; import "./GNS.sol"; import "./GNSStorage.sol"; +import "./L1GNSStorage.sol"; import "../arbitrum/ITokenGateway.sol"; +import "../arbitrum/L1ArbitrumMessenger.sol"; import "../l2/discovery/L2GNS.sol"; /** @@ -21,11 +23,21 @@ import "../l2/discovery/L2GNS.sol"; * The contract implements a multicall behaviour to support batching multiple calls in a single * transaction. */ -contract L1GNS is GNS { +contract L1GNS is GNS, L1GNSV1Storage, L1ArbitrumMessenger { using SafeMath for uint256; event SubgraphLockedForMigrationToL2(uint256 _subgraphID); event SubgraphSentToL2(uint256 _subgraphID); + event ArbitrumInboxAddressSet(address _inbox); + + /** + * @dev sets the addresses for L1 inbox provided by Arbitrum + * @param _inbox Address of the Inbox that is part of the Arbitrum Bridge + */ + function setArbitrumInboxAddress(address _inbox) external onlyGovernor { + arbitrumInboxAddress = _inbox; + emit ArbitrumInboxAddressSet(_inbox); + } function lockSubgraphForMigrationToL2(uint256 _subgraphID) external @@ -143,4 +155,49 @@ contract L1GNS is GNS { emit SubgraphDeprecated(_subgraphID, subgraphData.withdrawableGRT); } + + function claimCuratorBalanceToBeneficiaryOnL2( + uint256 _subgraphID, + address _beneficiary, + uint256 _maxGas, + uint256 _gasPriceBid, + uint256 _maxSubmissionCost + ) external payable notPaused returns (bytes memory) { + SubgraphData storage subgraphData = _getSubgraphData(_subgraphID); + SubgraphL2MigrationData storage migrationData = subgraphL2MigrationData[_subgraphID]; + + require(migrationData.l1Done, "!MIGRATED"); + require(subgraphData.withdrawableGRT == 0, "DEPRECATED"); + + require(_maxSubmissionCost > 0, "NO_SUBMISSION_COST"); + + { + // makes sure only sufficient ETH is supplied required for successful redemption on L2 + // if a user does not desire immediate redemption they should provide + // a msg.value of AT LEAST maxSubmissionCost + uint256 expectedEth = _maxSubmissionCost + (_maxGas * _gasPriceBid); + require(msg.value == expectedEth, "WRONG_ETH_VALUE"); + } + L2GasParams memory gasParams = L2GasParams(_maxSubmissionCost, _maxGas, _gasPriceBid); + + bytes memory outboundCalldata = abi.encodeWithSelector( + L2GNS.claimL1CuratorBalanceToBeneficiary.selector, + _subgraphID, + msg.sender, + subgraphData.curatorNSignal[msg.sender], + _beneficiary + ); + + uint256 seqNum = sendTxToL2( + arbitrumInboxAddress, + counterpartGNSAddress, + msg.sender, + msg.value, + 0, + gasParams, + outboundCalldata + ); + + return abi.encode(seqNum); + } } diff --git a/contracts/l2/discovery/L2GNS.sol b/contracts/l2/discovery/L2GNS.sol index 36e4bfc4a..8a52246d2 100644 --- a/contracts/l2/discovery/L2GNS.sol +++ b/contracts/l2/discovery/L2GNS.sol @@ -9,6 +9,9 @@ import "@openzeppelin/contracts/utils/Address.sol"; import "../../discovery/GNS.sol"; import "./L2GNSStorage.sol"; +import { RLPReader } from "../../libraries/RLPReader.sol"; +import { StateProofVerifier as Verifier } from "../../libraries/StateProofVerifier.sol"; + /** * @title GNS * @dev The Graph Name System contract provides a decentralized naming system for subgraphs @@ -19,8 +22,22 @@ import "./L2GNSStorage.sol"; * transaction. */ contract L2GNS is GNS, L2GNSV1Storage { + using RLPReader for bytes; + using RLPReader for RLPReader.RLPItem; + using SafeMath for uint256; + + // Offset applied by the bridge to L1 addresses sending messages to L2 + uint160 internal constant L2_ADDRESS_OFFSET = + uint160(0x1111000000000000000000000000000000001111); + event SubgraphReceivedFromL1(uint256 _subgraphID); event SubgraphMigrationFinalized(uint256 _subgraphID); + event CuratorBalanceClaimed( + uint256 _subgraphID, + address _l1Curator, + address _l2Curator, + uint256 _nSignalClaimed + ); /** * @dev Checks that the sender is the L2GraphTokenGateway as configured on the Controller. @@ -30,6 +47,15 @@ contract L2GNS is GNS, L2GNSV1Storage { _; } + /** + * @dev Checks that the sender is the L2 alias of the counterpart + * GNS on L1. + */ + modifier onlyL1Counterpart() { + require(msg.sender == l1ToL2Alias(counterpartGNSAddress), "ONLY_COUNTERPART_GNS"); + _; + } + function receiveSubgraphFromL1( uint256 subgraphID, address subgraphOwner, @@ -103,37 +129,101 @@ contract L2GNS is GNS, L2GNSV1Storage { * @dev Claim curator balance belonging to a curator from L1. * This will be credited to the same curator's balance on L2. * This can only be called by the corresponding curator. + * @param _subgraphID Subgraph for which to claim a balance * @param _blockHeaderRlpBytes RLP-encoded block header from the block when the subgraph was locked on L1 * @param _proofRlpBytes RLP-encoded list of proofs: first proof of the L1 GNS account, then proof of the slot for the curator's balance */ - function claimL1CuratorBalance(bytes memory _blockHeaderRlpBytes, bytes memory _proofRlpBytes) - external - notPartialPaused - { - // TODO + function claimL1CuratorBalance( + uint256 _subgraphID, + bytes memory _blockHeaderRlpBytes, + bytes memory _proofRlpBytes + ) external notPartialPaused { + Verifier.BlockHeader memory blockHeader = Verifier.parseBlockHeader(_blockHeaderRlpBytes); + IGNS.MigratedSubgraphData storage migratedData = migratedSubgraphData[_subgraphID]; + + require(migratedData.l2Done, "!MIGRATED"); + require(blockHeader.hash == migratedData.lockedAtBlockHash, "!BLOCKHASH"); + require(!migratedData.curatorBalanceClaimed[msg.sender], "ALREADY_CLAIMED"); + + RLPReader.RLPItem[] memory proofs = _proofRlpBytes.toRlpItem().toList(); + require(proofs.length == 2, "!N_PROOFS"); + + Verifier.Account memory l1GNSAccount = Verifier.extractAccountFromProof( + keccak256(abi.encodePacked(counterpartGNSAddress)), + blockHeader.stateRootHash, + proofs[0].toList() + ); + + require(l1GNSAccount.exists, "!ACCOUNT"); + + // subgraphs mapping at slot 7. + // So our subgraph is at slot keccak256(abi.encodePacked(uint256(subgraphID), uint256(7))) + // The curatorNSignal mapping is at slot 2 within the SubgraphData struct, + // So the mapping is at slot keccak256(abi.encodePacked(uint256(subgraphID), uint256(7))) + 2 + // Therefore the nSignal value for msg.sender should be at slot: + uint256 curatorSlot = uint256( + keccak256( + abi.encodePacked( + uint256(msg.sender), + uint256( + uint256(keccak256(abi.encodePacked(uint256(_subgraphID), uint256(7)))) + 2 + ) + ) + ) + ); + + Verifier.SlotValue memory curatorNSignalSlot = Verifier.extractSlotValueFromProof( + keccak256(abi.encodePacked(curatorSlot)), + l1GNSAccount.storageRoot, + proofs[1].toList() + ); + + require(curatorNSignalSlot.exists, "!CURATOR_SLOT"); + + SubgraphData storage subgraphData = subgraphs[_subgraphID]; + subgraphData.curatorNSignal[msg.sender] = subgraphData.curatorNSignal[msg.sender].add( + curatorNSignalSlot.value + ); + migratedData.curatorBalanceClaimed[msg.sender] = true; + + emit CuratorBalanceClaimed(_subgraphID, msg.sender, msg.sender, curatorNSignalSlot.value); } /** * @dev Claim curator balance belonging to a curator from L1. - * This will be credited to the a beneficiary on L2, and a signature must be provided - * to prove the L1 curator permits this assignment. - * @param _blockHeaderRlpBytes RLP-encoded block header from the block when the subgraph was locked on L1 - * @param _proofRlpBytes RLP-encoded list of proofs: first proof of the L1 GNS account, then proof of the slot for the curator's balance - * @param _beneficiary Address of a beneficiary for the balance - * @param _deadline Expiration time of the signed permit - * @param _v Signature version - * @param _r Signature r value - * @param _s Signature s value + * This will be credited to the a beneficiary on L2, and can only be called + * from the GNS on L1 through a retryable ticket. + * @param _subgraphID Subgraph on which to claim the balance + * @param _curator Curator who owns the balance on L1 + * @param _balance Balance of the curator from L1 + * @param _beneficiary Address of an L2 beneficiary for the balance */ function claimL1CuratorBalanceToBeneficiary( - bytes memory _blockHeaderRlpBytes, - bytes memory _proofRlpBytes, - address _beneficiary, - uint256 _deadline, - uint8 _v, - bytes32 _r, - bytes32 _s - ) external notPartialPaused { - // TODO + uint256 _subgraphID, + address _curator, + uint256 _balance, + address _beneficiary + ) external notPartialPaused onlyL1Counterpart { + GNS.MigratedSubgraphData storage migratedData = migratedSubgraphData[_subgraphID]; + + require(migratedData.l2Done, "!MIGRATED"); + require(!migratedData.curatorBalanceClaimed[_curator], "ALREADY_CLAIMED"); + + SubgraphData storage subgraphData = subgraphs[_subgraphID]; + subgraphData.curatorNSignal[_beneficiary] = subgraphData.curatorNSignal[_beneficiary].add( + _balance + ); + migratedData.curatorBalanceClaimed[_curator] = true; + } + + /** + * @notice Converts L1 address to its L2 alias used when sending messages + * @dev The Arbitrum bridge adds an offset to addresses when sending messages, + * so we need to apply it to check any L1 address from a message in L2 + * @param _l1Address The L1 address + * @return _l2Address the L2 alias of _l1Address + */ + function l1ToL2Alias(address _l1Address) internal pure returns (address _l2Address) { + _l2Address = address(uint160(_l1Address) + L2_ADDRESS_OFFSET); } } From 55fe96afdd13cccf83b3d9d08ea90184337217cf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pablo=20Carranza=20V=C3=A9lez?= Date: Wed, 29 Jun 2022 14:25:45 +0300 Subject: [PATCH 006/112] test: add tests for mintTaxFree --- contracts/curation/Curation.sol | 16 ++++ contracts/curation/ICuration.sol | 5 ++ contracts/discovery/L1GNSStorage.sol | 8 ++ test/curation/curation.test.ts | 116 ++++++++++++++++++++++++++- test/lib/testHelpers.ts | 10 ++- 5 files changed, 148 insertions(+), 7 deletions(-) create mode 100644 contracts/discovery/L1GNSStorage.sol diff --git a/contracts/curation/Curation.sol b/contracts/curation/Curation.sol index f73e3bcfd..7efef53e4 100644 --- a/contracts/curation/Curation.sol +++ b/contracts/curation/Curation.sol @@ -452,6 +452,22 @@ contract Curation is CurationV1Storage, GraphUpgradeable { return (signalOut, curationTax); } + /** + * @dev Calculate amount of signal that can be bought with tokens in a curation pool, + * without accounting for curation tax. + * @param _subgraphDeploymentID Subgraph deployment to mint signal + * @param _tokensIn Amount of tokens used to mint signal + * @return Amount of signal that can be bought and tokens subtracted for the tax + */ + function tokensToSignalNoTax(bytes32 _subgraphDeploymentID, uint256 _tokensIn) + public + view + override + returns (uint256) + { + return _tokensToSignal(_subgraphDeploymentID, _tokensIn); + } + /** * @dev Calculate amount of signal that can be bought with tokens in a curation pool. * @param _subgraphDeploymentID Subgraph deployment to mint signal diff --git a/contracts/curation/ICuration.sol b/contracts/curation/ICuration.sol index 244b098de..1da4252cc 100644 --- a/contracts/curation/ICuration.sol +++ b/contracts/curation/ICuration.sol @@ -56,6 +56,11 @@ interface ICuration { view returns (uint256, uint256); + function tokensToSignalNoTax(bytes32 _subgraphDeploymentID, uint256 _tokensIn) + external + view + returns (uint256); + function signalToTokens(bytes32 _subgraphDeploymentID, uint256 _signalIn) external view diff --git a/contracts/discovery/L1GNSStorage.sol b/contracts/discovery/L1GNSStorage.sol new file mode 100644 index 000000000..b55a70f8c --- /dev/null +++ b/contracts/discovery/L1GNSStorage.sol @@ -0,0 +1,8 @@ +// SPDX-License-Identifier: GPL-2.0-or-later + +pragma solidity ^0.7.6; +pragma abicoder v2; + +abstract contract L1GNSV1Storage { + address public arbitrumInboxAddress; +} diff --git a/test/curation/curation.test.ts b/test/curation/curation.test.ts index 897ef8da3..b570a20e8 100644 --- a/test/curation/curation.test.ts +++ b/test/curation/curation.test.ts @@ -6,7 +6,15 @@ import { GraphToken } from '../../build/types/GraphToken' import { Controller } from '../../build/types/Controller' import { NetworkFixture } from '../lib/fixtures' -import { getAccounts, randomHexBytes, toBN, toGRT, formatGRT, Account } from '../lib/testHelpers' +import { + getAccounts, + randomHexBytes, + toBN, + toGRT, + formatGRT, + Account, + impersonateAccount, +} from '../lib/testHelpers' const MAX_PPM = 1000000 @@ -34,6 +42,7 @@ describe('Curation', () => { let governor: Account let curator: Account let stakingMock: Account + let gnsImpersonator: Account let fixture: NetworkFixture @@ -124,6 +133,50 @@ describe('Curation', () => { expect(afterTokenTotalSupply).eq(beforeTokenTotalSupply.sub(curationTax)) } + const shouldMintTaxFree = async (tokensToDeposit: BigNumber, expectedSignal: BigNumber) => { + // Before state + const beforeTokenTotalSupply = await grt.totalSupply() + const beforeCuratorTokens = await grt.balanceOf(gnsImpersonator.address) + const beforeCuratorSignal = await curation.getCuratorSignal( + gnsImpersonator.address, + subgraphDeploymentID, + ) + const beforePool = await curation.pools(subgraphDeploymentID) + const beforePoolSignal = await curation.getCurationPoolSignal(subgraphDeploymentID) + const beforeTotalTokens = await grt.balanceOf(curation.address) + + // Curate + const tx = curation + .connect(gnsImpersonator.signer) + .mintTaxFree(subgraphDeploymentID, tokensToDeposit, 0) + await expect(tx) + .emit(curation, 'Signalled') + .withArgs(gnsImpersonator.address, subgraphDeploymentID, tokensToDeposit, expectedSignal, 0) + + // After state + const afterTokenTotalSupply = await grt.totalSupply() + const afterCuratorTokens = await grt.balanceOf(gnsImpersonator.address) + const afterCuratorSignal = await curation.getCuratorSignal( + gnsImpersonator.address, + subgraphDeploymentID, + ) + const afterPool = await curation.pools(subgraphDeploymentID) + const afterPoolSignal = await curation.getCurationPoolSignal(subgraphDeploymentID) + const afterTotalTokens = await grt.balanceOf(curation.address) + + // Curator balance updated + expect(afterCuratorTokens).eq(beforeCuratorTokens.sub(tokensToDeposit)) + expect(afterCuratorSignal).eq(beforeCuratorSignal.add(expectedSignal)) + // Allocated and balance updated + expect(afterPool.tokens).eq(beforePool.tokens.add(tokensToDeposit)) + expect(afterPoolSignal).eq(beforePoolSignal.add(expectedSignal)) + expect(afterPool.reserveRatio).eq(await curation.defaultReserveRatio()) + // Contract balance updated + expect(afterTotalTokens).eq(beforeTotalTokens.add(tokensToDeposit)) + // Total supply is reduced to curation tax burning + expect(afterTokenTotalSupply).eq(beforeTokenTotalSupply) + } + const shouldBurn = async (signalToRedeem: BigNumber, expectedTokens: BigNumber) => { // Before balances const beforeTokenTotalSupply = await grt.totalSupply() @@ -186,14 +239,16 @@ describe('Curation', () => { before(async function () { // Use stakingMock so we can call collect - ;[me, governor, curator, stakingMock] = await getAccounts() + ;[me, governor, curator, stakingMock, gnsImpersonator] = await getAccounts() fixture = new NetworkFixture() ;({ controller, curation, grt } = await fixture.load(governor.signer)) - // Give some funds to the curator and approve the curation contract + // Give some funds to the curator and GNS impersonator and approve the curation contract await grt.connect(governor.signer).mint(curator.address, curatorTokens) await grt.connect(curator.signer).approve(curation.address, curatorTokens) + await grt.connect(governor.signer).mint(gnsImpersonator.address, curatorTokens) + await grt.connect(gnsImpersonator.signer).approve(curation.address, curatorTokens) // Give some funds to the staking contract and approve the curation contract await grt.connect(governor.signer).mint(stakingMock.address, tokensToCollect) @@ -304,6 +359,61 @@ describe('Curation', () => { }) }) + describe('curate tax free (from GNS)', async function () { + beforeEach(async function () { + await controller.setContractProxy(utils.id('GNS'), gnsImpersonator.address) + }) + it('can not be called by anyone other than GNS', async function () { + const tokensToDeposit = await curation.minimumCurationDeposit() + const tx = curation + .connect(curator.signer) + .mintTaxFree(subgraphDeploymentID, tokensToDeposit, 0) + await expect(tx).revertedWith('Only the GNS can call this') + }) + + it('reject deposit below minimum tokens required', async function () { + const tokensToDeposit = (await curation.minimumCurationDeposit()).sub(toBN(1)) + const tx = curation + .connect(gnsImpersonator.signer) + .mintTaxFree(subgraphDeploymentID, tokensToDeposit, 0) + await expect(tx).revertedWith('Curation deposit is below minimum required') + }) + + it('should deposit on a subgraph deployment', async function () { + const tokensToDeposit = await curation.minimumCurationDeposit() + const expectedSignal = toGRT('1') + await shouldMintTaxFree(tokensToDeposit, expectedSignal) + }) + + it('should get signal according to bonding curve', async function () { + const tokensToDeposit = toGRT('1000') + const expectedSignal = signalAmountFor1000Tokens + await shouldMintTaxFree(tokensToDeposit, expectedSignal) + }) + + it('should get signal according to bonding curve (and with zero tax)', async function () { + // Set curation tax + await curation.connect(governor.signer).setCurationTaxPercentage(50000) // 5% + + // Mint + const tokensToDeposit = toGRT('1000') + const expectedSignal = await curation.tokensToSignalNoTax( + subgraphDeploymentID, + tokensToDeposit, + ) + await shouldMintTaxFree(tokensToDeposit, expectedSignal) + }) + + it('should revert curate if over slippage', async function () { + const tokensToDeposit = toGRT('1000') + const expectedSignal = signalAmountFor1000Tokens + const tx = curation + .connect(gnsImpersonator.signer) + .mintTaxFree(subgraphDeploymentID, tokensToDeposit, expectedSignal.add(1)) + await expect(tx).revertedWith('Slippage protection') + }) + }) + describe('collect', async function () { context('> not curated', async function () { it('reject collect tokens distributed to the curation pool', async function () { diff --git a/test/lib/testHelpers.ts b/test/lib/testHelpers.ts index 87e04beb1..d3cab0849 100644 --- a/test/lib/testHelpers.ts +++ b/test/lib/testHelpers.ts @@ -133,12 +133,14 @@ export const applyL1ToL2Alias = (l1Address: string): string => { return l2AddressAsNumber.mod(mask).toHexString() } +export async function impersonateAccount(address: string): Promise { + await provider().send('hardhat_impersonateAccount', [address]) + return hre.ethers.getSigner(address) +} + // Adapted from: // https://github.com/livepeer/arbitrum-lpt-bridge/blob/e1a81edda3594e434dbcaa4f1ebc95b7e67ecf2a/test/utils/messaging.ts#L5 export async function getL2SignerFromL1(l1Address: string): Promise { const l2Address = applyL1ToL2Alias(l1Address) - await provider().send('hardhat_impersonateAccount', [l2Address]) - const l2Signer = await hre.ethers.getSigner(l2Address) - - return l2Signer + return impersonateAccount(l2Address) } From 88443f9bcd1d6ddb0f9bf17fb523f2dec5aa25da Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pablo=20Carranza=20V=C3=A9lez?= Date: Wed, 29 Jun 2022 18:08:15 +0300 Subject: [PATCH 007/112] test: add some tests for legacy GNS migration --- contracts/discovery/L1GNS.sol | 4 +- contracts/discovery/SubgraphNFT.sol | 2 +- contracts/l2/discovery/L2GNS.sol | 8 +- contracts/tests/LegacyGNSMock.sol | 32 ++++++++ test/gns.test.ts | 116 +++++++++++++++++++++++++++- 5 files changed, 151 insertions(+), 11 deletions(-) create mode 100644 contracts/tests/LegacyGNSMock.sol diff --git a/contracts/discovery/L1GNS.sol b/contracts/discovery/L1GNS.sol index 1fa97ad1f..a83af48fc 100644 --- a/contracts/discovery/L1GNS.sol +++ b/contracts/discovery/L1GNS.sol @@ -28,7 +28,7 @@ contract L1GNS is GNS, L1GNSV1Storage, L1ArbitrumMessenger { event SubgraphLockedForMigrationToL2(uint256 _subgraphID); event SubgraphSentToL2(uint256 _subgraphID); - event ArbitrumInboxAddressSet(address _inbox); + event ArbitrumInboxAddressUpdated(address _inbox); /** * @dev sets the addresses for L1 inbox provided by Arbitrum @@ -36,7 +36,7 @@ contract L1GNS is GNS, L1GNSV1Storage, L1ArbitrumMessenger { */ function setArbitrumInboxAddress(address _inbox) external onlyGovernor { arbitrumInboxAddress = _inbox; - emit ArbitrumInboxAddressSet(_inbox); + emit ArbitrumInboxAddressUpdated(_inbox); } function lockSubgraphForMigrationToL2(uint256 _subgraphID) diff --git a/contracts/discovery/SubgraphNFT.sol b/contracts/discovery/SubgraphNFT.sol index 8da3e9e61..307b089c7 100644 --- a/contracts/discovery/SubgraphNFT.sol +++ b/contracts/discovery/SubgraphNFT.sol @@ -172,7 +172,7 @@ contract SubgraphNFT is Governed, ERC721, ISubgraphNFT { * @return IPFS hash for the metadata */ function getSubgraphMetadata(uint256 _tokenId) external view override returns (bytes32) { - require(_exists(_tokenId), "ERC721Metadata: URI set of nonexistent token"); + require(_exists(_tokenId), "ERC721Metadata: metadata query of nonexistent token"); return _subgraphMetadataHashes[_tokenId]; } } diff --git a/contracts/l2/discovery/L2GNS.sol b/contracts/l2/discovery/L2GNS.sol index 8a52246d2..f881787f1 100644 --- a/contracts/l2/discovery/L2GNS.sol +++ b/contracts/l2/discovery/L2GNS.sol @@ -66,7 +66,7 @@ contract L2GNS is GNS, L2GNSV1Storage { bytes32 subgraphMetadata ) external notPartialPaused onlyL2Gateway { IGNS.MigratedSubgraphData storage migratedData = migratedSubgraphData[subgraphID]; - SubgraphData storage subgraphData = subgraphs[subgraphID]; + SubgraphData storage subgraphData = _getSubgraphData(subgraphID); subgraphData.reserveRatio = reserveRatio; // The subgraph will be disabled until finishSubgraphMigrationFromL1 is called @@ -91,7 +91,7 @@ contract L2GNS is GNS, L2GNSV1Storage { bytes32 _versionMetadata ) external notPartialPaused onlySubgraphAuth(_subgraphID) { IGNS.MigratedSubgraphData storage migratedData = migratedSubgraphData[_subgraphID]; - SubgraphData storage subgraphData = subgraphs[_subgraphID]; + SubgraphData storage subgraphData = _getSubgraphData(_subgraphID); // A subgraph require(migratedData.tokens > 0, "INVALID_SUBGRAPH"); require(!migratedData.l2Done, "ALREADY_DONE"); @@ -180,7 +180,7 @@ contract L2GNS is GNS, L2GNSV1Storage { require(curatorNSignalSlot.exists, "!CURATOR_SLOT"); - SubgraphData storage subgraphData = subgraphs[_subgraphID]; + SubgraphData storage subgraphData = _getSubgraphData(_subgraphID); subgraphData.curatorNSignal[msg.sender] = subgraphData.curatorNSignal[msg.sender].add( curatorNSignalSlot.value ); @@ -209,7 +209,7 @@ contract L2GNS is GNS, L2GNSV1Storage { require(migratedData.l2Done, "!MIGRATED"); require(!migratedData.curatorBalanceClaimed[_curator], "ALREADY_CLAIMED"); - SubgraphData storage subgraphData = subgraphs[_subgraphID]; + SubgraphData storage subgraphData = _getSubgraphData(_subgraphID); subgraphData.curatorNSignal[_beneficiary] = subgraphData.curatorNSignal[_beneficiary].add( _balance ); diff --git a/contracts/tests/LegacyGNSMock.sol b/contracts/tests/LegacyGNSMock.sol new file mode 100644 index 000000000..87696412c --- /dev/null +++ b/contracts/tests/LegacyGNSMock.sol @@ -0,0 +1,32 @@ +// SPDX-License-Identifier: GPL-2.0-or-later + +pragma solidity ^0.7.6; +pragma abicoder v2; + +import "../discovery/GNS.sol"; + +/** + * @title LegacyGNSMock contract + */ +contract LegacyGNSMock is GNS { + function createLegacySubgraph(uint256 subgraphNumber, bytes32 subgraphDeploymentID) external { + SubgraphData storage subgraphData = legacySubgraphData[msg.sender][subgraphNumber]; + legacySubgraphs[msg.sender][subgraphNumber] = subgraphDeploymentID; + subgraphData.subgraphDeploymentID = subgraphDeploymentID; + subgraphData.nSignal = 1000; // Mock value + } + + function getSubgraphDeploymentID(uint256 subgraphID) + external + view + returns (bytes32 subgraphDeploymentID) + { + IGNS.SubgraphData storage subgraph = _getSubgraphData(subgraphID); + subgraphDeploymentID = subgraph.subgraphDeploymentID; + } + + function getSubgraphNSignal(uint256 subgraphID) external view returns (uint256 nSignal) { + IGNS.SubgraphData storage subgraph = _getSubgraphData(subgraphID); + nSignal = subgraph.nSignal; + } +} diff --git a/test/gns.test.ts b/test/gns.test.ts index fe83a6f54..b98a7efdf 100644 --- a/test/gns.test.ts +++ b/test/gns.test.ts @@ -3,7 +3,7 @@ import { ethers, ContractTransaction, BigNumber, Event } from 'ethers' import { solidityKeccak256 } from 'ethers/lib/utils' import { SubgraphDeploymentID } from '@graphprotocol/common-ts' -import { GNS } from '../build/types/GNS' +import { LegacyGNSMock } from '../build/types/LegacyGNSMock' import { GraphToken } from '../build/types/GraphToken' import { Curation } from '../build/types/Curation' import { SubgraphNFT } from '../build/types/SubgraphNFT' @@ -12,6 +12,12 @@ import { getAccounts, randomHexBytes, Account, toGRT, getChainID } from './lib/t import { NetworkFixture } from './lib/fixtures' import { toBN, formatGRT } from './lib/testHelpers' import { getContractAt } from '../cli/network' +import { deployContract } from './lib/deployment' +import { BancorFormula } from '../build/types/BancorFormula' +import { network } from '../cli' +import { Controller } from '../build/types/Controller' +import { GraphProxyAdmin } from '../build/types/GraphProxyAdmin' +import { L1GNS } from '../build/types/L1GNS' const { AddressZero, HashZero } = ethers.constants @@ -45,7 +51,10 @@ const toRound = (n: number) => n.toFixed(12) const buildSubgraphID = async (account: string, seqID: BigNumber): Promise => solidityKeccak256(['address', 'uint256', 'uint256'], [account, seqID, await getChainID()]) -describe('GNS', () => { +const buildLegacySubgraphID = (account: string, seqID: BigNumber): string => + solidityKeccak256(['address', 'uint256'], [account, seqID]) + +describe('GNS (L1)', () => { let me: Account let other: Account let another: Account @@ -53,9 +62,12 @@ describe('GNS', () => { let fixture: NetworkFixture - let gns: GNS + let gns: L1GNS + let legacyGNSMock: LegacyGNSMock let grt: GraphToken let curation: Curation + let controller: Controller + let proxyAdmin: GraphProxyAdmin const tokens1000 = toGRT('1000') const tokens10000 = toGRT('10000') @@ -509,7 +521,7 @@ describe('GNS', () => { before(async function () { ;[me, other, governor, another] = await getAccounts() fixture = new NetworkFixture() - ;({ grt, curation, gns } = await fixture.load(governor.signer)) + ;({ grt, curation, gns, controller, proxyAdmin } = await fixture.load(governor.signer)) newSubgraph0 = buildSubgraph() newSubgraph1 = buildSubgraph() defaultName = createDefaultName('graph') @@ -553,6 +565,38 @@ describe('GNS', () => { }) }) + describe('setCounterpartGNSAddress', function () { + it('should set `counterpartGNSAddress`', async function () { + // Can set if allowed + const newValue = other.address + const tx = gns.connect(governor.signer).setCounterpartGNSAddress(newValue) + await expect(tx).emit(gns, 'CounterpartGNSAddressUpdated').withArgs(newValue) + expect(await gns.counterpartGNSAddress()).eq(newValue) + }) + + it('reject set `counterpartGNSAddress` if not allowed', async function () { + const newValue = other.address + const tx = gns.connect(me.signer).setCounterpartGNSAddress(newValue) + await expect(tx).revertedWith('Caller must be Controller governor') + }) + }) + + describe('setArbitrumInboxAddress', function () { + it('should set `arbitrumInboxAddress`', async function () { + // Can set if allowed + const newValue = other.address + const tx = gns.connect(governor.signer).setArbitrumInboxAddress(newValue) + await expect(tx).emit(gns, 'ArbitrumInboxAddressUpdated').withArgs(newValue) + expect(await gns.arbitrumInboxAddress()).eq(newValue) + }) + + it('reject set `arbitrumInboxAddress` if not allowed', async function () { + const newValue = other.address + const tx = gns.connect(me.signer).setArbitrumInboxAddress(newValue) + await expect(tx).revertedWith('Caller must be Controller governor') + }) + }) + describe('setSubgraphNFT', function () { it('should set `setSubgraphNFT`', async function () { const newValue = gns.address // I just use any contract address @@ -1140,4 +1184,68 @@ describe('GNS', () => { expect('ipfs://' + subgraph0.id).eq(tokenURI) }) }) + describe('Legacy subgraph migration', function () { + beforeEach(async function () { + const bondingCurve = (await deployContract( + 'BancorFormula', + governor.signer, + )) as unknown as BancorFormula + const subgraphDescriptor = await deployContract('SubgraphNFTDescriptor', governor.signer) + const subgraphNFT = (await deployContract( + 'SubgraphNFT', + governor.signer, + governor.address, + )) as SubgraphNFT + + // Deploy + legacyGNSMock = (await network.deployContractWithProxy( + proxyAdmin, + 'LegacyGNSMock', + [controller.address, bondingCurve.address, subgraphNFT.address], + governor.signer, + )) as unknown as LegacyGNSMock + + // Post-config + await subgraphNFT.connect(governor.signer).setMinter(legacyGNSMock.address) + await subgraphNFT.connect(governor.signer).setTokenDescriptor(subgraphDescriptor.address) + }) + it('migrates a legacy subgraph', async function () { + const seqID = toBN('2') + await legacyGNSMock + .connect(me.signer) + .createLegacySubgraph(seqID, newSubgraph0.subgraphDeploymentID) + const tx = legacyGNSMock + .connect(me.signer) + .migrateLegacySubgraph(me.address, seqID, newSubgraph0.subgraphMetadata) + await expect(tx).emit(legacyGNSMock, ' LegacySubgraphClaimed').withArgs(me.address, seqID) + const expectedSubgraphID = buildLegacySubgraphID(me.address, seqID) + const migratedSubgraphDeploymentID = await legacyGNSMock.getSubgraphDeploymentID( + expectedSubgraphID, + ) + const migratedNSignal = await legacyGNSMock.getSubgraphNSignal(expectedSubgraphID) + expect(migratedSubgraphDeploymentID).eq(newSubgraph0.subgraphDeploymentID) + expect(migratedNSignal).eq(toBN('1000')) + + const subgraphNFTAddress = await legacyGNSMock.subgraphNFT() + const subgraphNFT = getContractAt('SubgraphNFT', subgraphNFTAddress) as SubgraphNFT + const tokenURI = await subgraphNFT.connect(me.signer).tokenURI(expectedSubgraphID) + + const sub = new SubgraphDeploymentID(newSubgraph0.subgraphMetadata) + expect(sub.ipfsHash).eq(tokenURI) + }) + it('refuses to migrate an already migrated subgraph', async function () { + const seqID = toBN('2') + await legacyGNSMock + .connect(me.signer) + .createLegacySubgraph(seqID, newSubgraph0.subgraphDeploymentID) + let tx = legacyGNSMock + .connect(me.signer) + .migrateLegacySubgraph(me.address, seqID, newSubgraph0.subgraphMetadata) + await expect(tx).emit(legacyGNSMock, ' LegacySubgraphClaimed').withArgs(me.address, seqID) + tx = legacyGNSMock + .connect(me.signer) + .migrateLegacySubgraph(me.address, seqID, newSubgraph0.subgraphMetadata) + await expect(tx).revertedWith('GNS: Subgraph was already claimed') + }) + }) }) From 02b7c7da0c95f5a0e3a47ddb5e8912f7a5f9b871 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pablo=20Carranza=20V=C3=A9lez?= Date: Thu, 30 Jun 2022 14:10:03 +0300 Subject: [PATCH 008/112] fix: use L1GNS as key in the address book --- addresses.json | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/addresses.json b/addresses.json index 24307ea8d..df2e2c762 100644 --- a/addresses.json +++ b/addresses.json @@ -119,7 +119,7 @@ "txHash": "0x2e44da799ad8866ac49aae2e40a16c57784ed4b1e9343daa4f764c39a05e0826" } }, - "GNS": { + "L1GNS": { "address": "0xaDcA0dd4729c8BA3aCf3E99F3A9f471EF37b6825", "initArgs": [ { @@ -601,7 +601,7 @@ "txHash": "0xc93d39f849b249792924ee973c022aea2445c6662ce26f450d324b1c721c25a7" } }, - "GNS": { + "L1GNS": { "address": "0xA94B7f0465E98609391C623d0560C5720a3f2D33", "initArgs": [ "0x254dffcd3277C0b1660F6d42EFbB754edaBAbC2B", From f76daec871d22139bc4846a54edf4e34427b08c8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pablo=20Carranza=20V=C3=A9lez?= Date: Thu, 30 Jun 2022 20:26:46 +0300 Subject: [PATCH 009/112] feat: tests outline, and realized we need to handle legacy subgraphs --- contracts/discovery/L1GNS.sol | 8 ++--- contracts/l2/discovery/L2GNS.sol | 31 +++++++++++++++--- test/gns.test.ts | 34 ++++++++++++++++++- test/l2/l2GNS.test.ts | 56 ++++++++++++++++++++++++++++++++ 4 files changed, 120 insertions(+), 9 deletions(-) create mode 100644 test/l2/l2GNS.test.ts diff --git a/contracts/discovery/L1GNS.sol b/contracts/discovery/L1GNS.sol index a83af48fc..7b8416ff2 100644 --- a/contracts/discovery/L1GNS.sol +++ b/contracts/discovery/L1GNS.sol @@ -42,7 +42,7 @@ contract L1GNS is GNS, L1GNSV1Storage, L1ArbitrumMessenger { function lockSubgraphForMigrationToL2(uint256 _subgraphID) external payable - notPaused + notPartialPaused onlySubgraphAuth(_subgraphID) { // Subgraph check @@ -78,7 +78,7 @@ contract L1GNS is GNS, L1GNSV1Storage, L1ArbitrumMessenger { uint256 maxGas, uint256 gasPriceBid, uint256 maxSubmissionCost - ) external payable notPaused onlySubgraphAuth(_subgraphID) { + ) external payable notPartialPaused onlySubgraphAuth(_subgraphID) { SubgraphData storage subgraphData = _getSubgraphData(_subgraphID); SubgraphL2MigrationData storage migrationData = subgraphL2MigrationData[_subgraphID]; require( @@ -137,7 +137,7 @@ contract L1GNS is GNS, L1GNSV1Storage, L1ArbitrumMessenger { * This allows curators to recover their funds if the subgraph was locked * for a migration to L2 but the subgraph was never actually sent to L2. */ - function deprecateLockedSubgraph(uint256 _subgraphID) external notPaused { + function deprecateLockedSubgraph(uint256 _subgraphID) external notPartialPaused { SubgraphData storage subgraphData = _getSubgraphData(_subgraphID); SubgraphL2MigrationData storage migrationData = subgraphL2MigrationData[_subgraphID]; require( @@ -162,7 +162,7 @@ contract L1GNS is GNS, L1GNSV1Storage, L1ArbitrumMessenger { uint256 _maxGas, uint256 _gasPriceBid, uint256 _maxSubmissionCost - ) external payable notPaused returns (bytes memory) { + ) external payable notPartialPaused returns (bytes memory) { SubgraphData storage subgraphData = _getSubgraphData(_subgraphID); SubgraphL2MigrationData storage migrationData = subgraphL2MigrationData[_subgraphID]; diff --git a/contracts/l2/discovery/L2GNS.sol b/contracts/l2/discovery/L2GNS.sol index f881787f1..7cb24a861 100644 --- a/contracts/l2/discovery/L2GNS.sol +++ b/contracts/l2/discovery/L2GNS.sol @@ -29,6 +29,10 @@ contract L2GNS is GNS, L2GNSV1Storage { // Offset applied by the bridge to L1 addresses sending messages to L2 uint160 internal constant L2_ADDRESS_OFFSET = uint160(0x1111000000000000000000000000000000001111); + // Storage slot where the subgraphs mapping is stored + uint256 internal constant SUBGRAPH_MAPPING_SLOT = 18; + // Storage slot where the legacy subgraphs mapping is stored + uint256 internal constant LEGACY_SUBGRAPH_MAPPING_SLOT = 17; event SubgraphReceivedFromL1(uint256 _subgraphID); event SubgraphMigrationFinalized(uint256 _subgraphID); @@ -156,17 +160,24 @@ contract L2GNS is GNS, L2GNSV1Storage { require(l1GNSAccount.exists, "!ACCOUNT"); - // subgraphs mapping at slot 7. - // So our subgraph is at slot keccak256(abi.encodePacked(uint256(subgraphID), uint256(7))) + // subgraphs mapping is stored at slot SUBGRAPH_MAPPING_SLOT. + // So our subgraph is at slot keccak256(abi.encodePacked(uint256(subgraphID), uint256(SUBGRAPH_MAPPING_SLOT))) // The curatorNSignal mapping is at slot 2 within the SubgraphData struct, - // So the mapping is at slot keccak256(abi.encodePacked(uint256(subgraphID), uint256(7))) + 2 + // So the mapping is at slot keccak256(abi.encodePacked(uint256(subgraphID), uint256(SUBGRAPH_MAPPING_SLOT))) + 2 // Therefore the nSignal value for msg.sender should be at slot: uint256 curatorSlot = uint256( keccak256( abi.encodePacked( uint256(msg.sender), uint256( - uint256(keccak256(abi.encodePacked(uint256(_subgraphID), uint256(7)))) + 2 + uint256( + keccak256( + abi.encodePacked( + uint256(_subgraphID), + uint256(SUBGRAPH_MAPPING_SLOT) + ) + ) + ) + 2 ) ) ) @@ -189,6 +200,18 @@ contract L2GNS is GNS, L2GNSV1Storage { emit CuratorBalanceClaimed(_subgraphID, msg.sender, msg.sender, curatorNSignalSlot.value); } + /** + * @dev Claim curator balance belonging to a curator from L1 on a legacy subgraph. + * This will be credited to the same curator's balance on L2. + * This can only be called by the corresponding curator. + * @param _subgraphID Subgraph for which to claim a balance + * @param _blockHeaderRlpBytes RLP-encoded block header from the block when the subgraph was locked on L1 + * @param _proofRlpBytes RLP-encoded list of proofs: first proof of the L1 GNS account, then proof of the slot for the curator's balance + */ + function claimL1CuratorBalanceForLegacySubgraph() external { + // TODO + } + /** * @dev Claim curator balance belonging to a curator from L1. * This will be credited to the a beneficiary on L2, and can only be called diff --git a/test/gns.test.ts b/test/gns.test.ts index b98a7efdf..db122b08f 100644 --- a/test/gns.test.ts +++ b/test/gns.test.ts @@ -54,7 +54,7 @@ const buildSubgraphID = async (account: string, seqID: BigNumber): Promise solidityKeccak256(['address', 'uint256'], [account, seqID]) -describe('GNS (L1)', () => { +describe('L1GNS', () => { let me: Account let other: Account let another: Account @@ -1248,4 +1248,36 @@ describe('GNS (L1)', () => { await expect(tx).revertedWith('GNS: Subgraph was already claimed') }) }) + describe('Subgraph migration to L2', function () { + describe('lockSubgraphForMigrationToL2', function () { + it('locks and disables a subgraph, burning the signal and storing the block number') + it('locks and disables a legacy subgraph, burning the signal and storing the block number') + it('rejects calls from someone who is not the subgraph owner') + it('rejects a call for a non-existent subgraph') + it('rejects a call for a subgraph with no signal') + }) + describe('sendSubgraphToL2', function () { + it('sends tokens and calldata to L2 through the GRT bridge') + it('sends tokens and calldata for a legacy subgraph to L2 through the GRT bridge') + it('rejects calls from someone who is not the subgraph owner') + it('rejects calls for a subgraph that is not locked') + it('rejects calls for a subgraph that was already sent') + it('rejects calls after too many blocks have passed') + }) + describe('deprecateLockedSubgraph', function () { + it('makes the GRT from the subgraph withdrawable') + it('rejects calls for a subgraph that was not locked') + it('rejects calls if not enough blocks have passed') + it('rejects calls for a subgraph that was sent to L2') + it('rejects calls for a subgraph that was already deprecated') + }) + describe('claimCuratorBalanceToBeneficiaryOnL2', function () { + it('sends a transaction to the L2GNS using the Arbitrum inbox') + it('rejects calls for a subgraph that was locked but not sent to L2') + it('rejects calls for a subgraph that was not locked') + it('rejects calls for a subgraph that was locked but deprecated') + it('rejects calls with an incorrect eth value') + it('rejects calls with zero maxSubmissionCost') + }) + }) }) diff --git a/test/l2/l2GNS.test.ts b/test/l2/l2GNS.test.ts new file mode 100644 index 000000000..c03646234 --- /dev/null +++ b/test/l2/l2GNS.test.ts @@ -0,0 +1,56 @@ +import { expect } from 'chai' +import { ethers, ContractTransaction, BigNumber, Event } from 'ethers' +import { solidityKeccak256 } from 'ethers/lib/utils' +import { SubgraphDeploymentID } from '@graphprotocol/common-ts' + +import { LegacyGNSMock } from '../build/types/LegacyGNSMock' +import { GraphToken } from '../build/types/GraphToken' +import { Curation } from '../build/types/Curation' +import { SubgraphNFT } from '../build/types/SubgraphNFT' + +import { getAccounts, randomHexBytes, Account, toGRT, getChainID } from './lib/testHelpers' +import { NetworkFixture } from './lib/fixtures' +import { toBN, formatGRT } from './lib/testHelpers' +import { getContractAt } from '../cli/network' +import { deployContract } from './lib/deployment' +import { BancorFormula } from '../build/types/BancorFormula' +import { network } from '../cli' +import { Controller } from '../build/types/Controller' +import { GraphProxyAdmin } from '../build/types/GraphProxyAdmin' +import { L2GNS } from '../build/types/L1GNS' + +const { AddressZero, HashZero } = ethers.constants + +describe('L2GNS', () => { + describe('receiving a subgraph from L1', function () { + it('cannot be called by someone other than the L2GraphTokenGateway') + it('creates a subgraph in a disabled state') + it('does not conflict with a locally created subgraph') + }) + + describe('finishing a subgraph migration from L1', function () { + it('publishes the migrated subgraph and mints signal with no tax') + it('cannot be called by someone other than the subgraph owner') + it('rejects calls for a subgraph that was not migrated') + it('rejects calls to a pre-curated subgraph deployment') + it('rejects calls if the subgraph deployment ID is zero') + }) + + describe('claiming a curator balance using a proof', function () { + it('verifies a proof and assigns a curator balance') + it('adds the balance to any existing balance for the curator') + it('rejects calls with an invalid proof') + it('rejects calls for a subgraph that was not migrated') + it('rejects calls if the balance was already claimed') + it('rejects calls with proof from a different curator') + it('rejects calls with proof from a different contract') + it('rejects calls with a proof from a different block') + }) + describe('claiming a curator balance with a message from L1', function () { + it('assigns a curator balance to a beneficiary') + it('adds the balance to any existing balance for the beneficiary') + it('can only be called from the gateway') + it('rejects calls for a subgraph that was not migrated') + it('rejects calls if the balance was already claimed') + }) +}) From 844bdc71ad2815b0326b2d5f1bb5f2e77f9f1044 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pablo=20Carranza=20V=C3=A9lez?= Date: Fri, 1 Jul 2022 14:18:18 +0300 Subject: [PATCH 010/112] feat: claiming balance on legacy subgraphs is a bit different --- contracts/discovery/GNS.sol | 27 +++++++ contracts/discovery/GNSStorage.sol | 2 +- contracts/discovery/IGNS.sol | 17 ++-- contracts/discovery/L1GNS.sol | 4 +- contracts/l2/discovery/L2GNS.sol | 102 +++++++++++++++++++----- contracts/l2/discovery/L2GNSStorage.sol | 13 --- 6 files changed, 119 insertions(+), 46 deletions(-) delete mode 100644 contracts/l2/discovery/L2GNSStorage.sol diff --git a/contracts/discovery/GNS.sol b/contracts/discovery/GNS.sol index 073537a2d..b53027502 100644 --- a/contracts/discovery/GNS.sol +++ b/contracts/discovery/GNS.sol @@ -764,6 +764,33 @@ contract GNS is GNSV3Storage, GraphUpgradeable, IGNS, Multicall { return _isPublished(_getSubgraphData(_subgraphID)); } + /** + * @dev Return whether a subgraph is a legacy subgraph (created before subgraph NFTs). + * @param _subgraphID Subgraph ID + * @return Return true if subgraph is a legacy subgraph + */ + function isLegacySubgraph(uint256 _subgraphID) public view override returns (bool) { + (address account, ) = getLegacySubgraphKey(_subgraphID); + return account != address(0); + } + + /** + * @dev Returns account and sequence ID for a legacy subgraph (created before subgraph NFTs). + * @param _subgraphID Subgraph ID + * @return account Account that created the subgraph (or 0 if it's not a legacy subgraph) + * @return seqID Sequence number for the subgraph + */ + function getLegacySubgraphKey(uint256 _subgraphID) + public + view + override + returns (address account, uint256 seqID) + { + LegacySubgraphKey storage legacySubgraphKey = legacySubgraphKeys[_subgraphID]; + account = legacySubgraphKey.account; + seqID = legacySubgraphKey.accountSeqID; + } + /** * @dev Build a subgraph ID based on the account creating it and a sequence number for that account. * Only used for legacy subgraphs being migrated, as new ones will also use the chainid. diff --git a/contracts/discovery/GNSStorage.sol b/contracts/discovery/GNSStorage.sol index 1f0229cd4..aeb98a460 100644 --- a/contracts/discovery/GNSStorage.sol +++ b/contracts/discovery/GNSStorage.sol @@ -51,7 +51,7 @@ abstract contract GNSV2Storage is GNSV1Storage { } abstract contract GNSV3Storage is GNSV2Storage { - // Block numbers and tokens for subgraphs that are locked and ready to be sent to L2 + // Data for subgraph migration from L1 to L2, some fields will be empty or set differently on each layer mapping(uint256 => IGNS.SubgraphL2MigrationData) public subgraphL2MigrationData; // Address of the counterpart GNS contract (L1GNS/L2GNS) address public counterpartGNSAddress; diff --git a/contracts/discovery/IGNS.sol b/contracts/discovery/IGNS.sol index 568190107..7e58badf0 100644 --- a/contracts/discovery/IGNS.sol +++ b/contracts/discovery/IGNS.sol @@ -19,13 +19,9 @@ interface IGNS { uint256 lockedAtBlock; // Block at which the subgraph was locked for migration uint256 tokens; // GRT that will be sent to L2 to mint signal bool l1Done; // Migration finished on L1 side (or subgraph deprecated) - } - - struct MigratedSubgraphData { - bytes32 lockedAtBlockHash; - mapping(address => bool) curatorBalanceClaimed; - uint256 tokens; - bool l2Done; + bytes32 lockedAtBlockHash; // Blockhash from block at which the subgraph was locked for migration + mapping(address => bool) curatorBalanceClaimed; // True for curators whose balance has been claimed in L2 + bool l2Done; // Migration finished on L2 side } struct LegacySubgraphKey { @@ -124,4 +120,11 @@ interface IGNS { returns (uint256); function isPublished(uint256 _subgraphID) external view returns (bool); + + function isLegacySubgraph(uint256 _subgraphID) external view returns (bool); + + function getLegacySubgraphKey(uint256 _subgraphID) + external + view + returns (address account, uint256 seqID); } diff --git a/contracts/discovery/L1GNS.sol b/contracts/discovery/L1GNS.sol index 7b8416ff2..c45df5672 100644 --- a/contracts/discovery/L1GNS.sol +++ b/contracts/discovery/L1GNS.sol @@ -50,15 +50,13 @@ contract L1GNS is GNS, L1GNSV1Storage, L1ArbitrumMessenger { SubgraphL2MigrationData storage migrationData = subgraphL2MigrationData[_subgraphID]; // Lock the subgraph so no more signal can be minted or burned. - // This can only be done for subgraphs that have nonzero signal. - require(subgraphData.nSignal > 0, "!SIGNAL"); // Burn all version signal in the name pool for tokens (w/no slippage protection) // Sell all signal from the old deployment migrationData.tokens = curation().burn( subgraphData.subgraphDeploymentID, subgraphData.vSignal, - 1 // We do check that the output must be nonzero... + 0 ); subgraphData.disabled = true; diff --git a/contracts/l2/discovery/L2GNS.sol b/contracts/l2/discovery/L2GNS.sol index 7cb24a861..3d541fa77 100644 --- a/contracts/l2/discovery/L2GNS.sol +++ b/contracts/l2/discovery/L2GNS.sol @@ -7,7 +7,6 @@ import "@openzeppelin/contracts/math/SafeMath.sol"; import "@openzeppelin/contracts/utils/Address.sol"; import "../../discovery/GNS.sol"; -import "./L2GNSStorage.sol"; import { RLPReader } from "../../libraries/RLPReader.sol"; import { StateProofVerifier as Verifier } from "../../libraries/StateProofVerifier.sol"; @@ -21,7 +20,7 @@ import { StateProofVerifier as Verifier } from "../../libraries/StateProofVerifi * The contract implements a multicall behaviour to support batching multiple calls in a single * transaction. */ -contract L2GNS is GNS, L2GNSV1Storage { +contract L2GNS is GNS { using RLPReader for bytes; using RLPReader for RLPReader.RLPItem; using SafeMath for uint256; @@ -29,10 +28,10 @@ contract L2GNS is GNS, L2GNSV1Storage { // Offset applied by the bridge to L1 addresses sending messages to L2 uint160 internal constant L2_ADDRESS_OFFSET = uint160(0x1111000000000000000000000000000000001111); - // Storage slot where the subgraphs mapping is stored + // Storage slot where the subgraphs mapping is stored on L1GNS uint256 internal constant SUBGRAPH_MAPPING_SLOT = 18; - // Storage slot where the legacy subgraphs mapping is stored - uint256 internal constant LEGACY_SUBGRAPH_MAPPING_SLOT = 17; + // Storage slot where the legacy subgraphs mapping is stored on L1GNS + uint256 internal constant LEGACY_SUBGRAPH_MAPPING_SLOT = 15; event SubgraphReceivedFromL1(uint256 _subgraphID); event SubgraphMigrationFinalized(uint256 _subgraphID); @@ -69,7 +68,7 @@ contract L2GNS is GNS, L2GNSV1Storage { uint32 reserveRatio, bytes32 subgraphMetadata ) external notPartialPaused onlyL2Gateway { - IGNS.MigratedSubgraphData storage migratedData = migratedSubgraphData[subgraphID]; + IGNS.SubgraphL2MigrationData storage migratedData = subgraphL2MigrationData[subgraphID]; SubgraphData storage subgraphData = _getSubgraphData(subgraphID); subgraphData.reserveRatio = reserveRatio; @@ -79,6 +78,7 @@ contract L2GNS is GNS, L2GNSV1Storage { migratedData.tokens = tokens; migratedData.lockedAtBlockHash = lockedAtBlockHash; + migratedData.l1Done = true; // Mint the NFT. Use the subgraphID as tokenID. // This function will check the if tokenID already exists. @@ -94,10 +94,10 @@ contract L2GNS is GNS, L2GNSV1Storage { bytes32 _subgraphDeploymentID, bytes32 _versionMetadata ) external notPartialPaused onlySubgraphAuth(_subgraphID) { - IGNS.MigratedSubgraphData storage migratedData = migratedSubgraphData[_subgraphID]; + IGNS.SubgraphL2MigrationData storage migratedData = subgraphL2MigrationData[_subgraphID]; SubgraphData storage subgraphData = _getSubgraphData(_subgraphID); // A subgraph - require(migratedData.tokens > 0, "INVALID_SUBGRAPH"); + require(migratedData.l1Done, "INVALID_SUBGRAPH"); require(!migratedData.l2Done, "ALREADY_DONE"); migratedData.l2Done = true; @@ -143,7 +143,7 @@ contract L2GNS is GNS, L2GNSV1Storage { bytes memory _proofRlpBytes ) external notPartialPaused { Verifier.BlockHeader memory blockHeader = Verifier.parseBlockHeader(_blockHeaderRlpBytes); - IGNS.MigratedSubgraphData storage migratedData = migratedSubgraphData[_subgraphID]; + IGNS.SubgraphL2MigrationData storage migratedData = subgraphL2MigrationData[_subgraphID]; require(migratedData.l2Done, "!MIGRATED"); require(blockHeader.hash == migratedData.lockedAtBlockHash, "!BLOCKHASH"); @@ -170,15 +170,10 @@ contract L2GNS is GNS, L2GNSV1Storage { abi.encodePacked( uint256(msg.sender), uint256( - uint256( - keccak256( - abi.encodePacked( - uint256(_subgraphID), - uint256(SUBGRAPH_MAPPING_SLOT) - ) - ) - ) + 2 - ) + keccak256( + abi.encodePacked(uint256(_subgraphID), uint256(SUBGRAPH_MAPPING_SLOT)) + ) + ).add(2) ) ) ); @@ -204,12 +199,75 @@ contract L2GNS is GNS, L2GNSV1Storage { * @dev Claim curator balance belonging to a curator from L1 on a legacy subgraph. * This will be credited to the same curator's balance on L2. * This can only be called by the corresponding curator. - * @param _subgraphID Subgraph for which to claim a balance + * Users can query getLegacySubgraphKey on L1 to get the _subgraphCreatorAccount and _seqID. + * @param _subgraphCreatorAccount Account that created the subgraph in L1 + * @param _seqID Sequence number for the subgraph * @param _blockHeaderRlpBytes RLP-encoded block header from the block when the subgraph was locked on L1 * @param _proofRlpBytes RLP-encoded list of proofs: first proof of the L1 GNS account, then proof of the slot for the curator's balance */ - function claimL1CuratorBalanceForLegacySubgraph() external { - // TODO + function claimL1CuratorBalanceForLegacySubgraph( + address _subgraphCreatorAccount, + uint256 _seqID, + bytes memory _blockHeaderRlpBytes, + bytes memory _proofRlpBytes + ) external { + uint256 _subgraphID = _buildLegacySubgraphID(_subgraphCreatorAccount, _seqID); + + Verifier.BlockHeader memory blockHeader = Verifier.parseBlockHeader(_blockHeaderRlpBytes); + IGNS.SubgraphL2MigrationData storage migratedData = subgraphL2MigrationData[_subgraphID]; + + require(migratedData.l2Done, "!MIGRATED"); + require(blockHeader.hash == migratedData.lockedAtBlockHash, "!BLOCKHASH"); + require(!migratedData.curatorBalanceClaimed[msg.sender], "ALREADY_CLAIMED"); + + RLPReader.RLPItem[] memory proofs = _proofRlpBytes.toRlpItem().toList(); + require(proofs.length == 2, "!N_PROOFS"); + + Verifier.Account memory l1GNSAccount = Verifier.extractAccountFromProof( + keccak256(abi.encodePacked(counterpartGNSAddress)), + blockHeader.stateRootHash, + proofs[0].toList() + ); + + require(l1GNSAccount.exists, "!ACCOUNT"); + + uint256 curatorSlot; + { + // legacy subgraphs mapping is stored at slot LEGACY_SUBGRAPH_MAPPING_SLOT. + // So the subgraphs for the account are at slot keccak256(abi.encodePacked(uint256(_subgraphCreatorAccount), uint256(SUBGRAPH_MAPPING_SLOT))) + uint256 accountSlot = uint256( + keccak256( + abi.encodePacked( + uint256(_subgraphCreatorAccount), + uint256(LEGACY_SUBGRAPH_MAPPING_SLOT) + ) + ) + ); + // Then the subgraph for this _seqID should be at: + uint256 subgraphSlot = uint256(keccak256(abi.encodePacked(_seqID, accountSlot))); + // The curatorNSignal mapping is at slot 2 within the SubgraphData struct, + // So the mapping is at slot subgraphSlot + 2 + // Therefore the nSignal value for msg.sender should be at slot: + curatorSlot = uint256( + keccak256(abi.encodePacked(uint256(msg.sender), uint256(subgraphSlot).add(2))) + ); + } + + Verifier.SlotValue memory curatorNSignalSlot = Verifier.extractSlotValueFromProof( + keccak256(abi.encodePacked(curatorSlot)), + l1GNSAccount.storageRoot, + proofs[1].toList() + ); + + require(curatorNSignalSlot.exists, "!CURATOR_SLOT"); + + SubgraphData storage subgraphData = _getSubgraphData(_subgraphID); + subgraphData.curatorNSignal[msg.sender] = subgraphData.curatorNSignal[msg.sender].add( + curatorNSignalSlot.value + ); + migratedData.curatorBalanceClaimed[msg.sender] = true; + + emit CuratorBalanceClaimed(_subgraphID, msg.sender, msg.sender, curatorNSignalSlot.value); } /** @@ -227,7 +285,7 @@ contract L2GNS is GNS, L2GNSV1Storage { uint256 _balance, address _beneficiary ) external notPartialPaused onlyL1Counterpart { - GNS.MigratedSubgraphData storage migratedData = migratedSubgraphData[_subgraphID]; + GNS.SubgraphL2MigrationData storage migratedData = subgraphL2MigrationData[_subgraphID]; require(migratedData.l2Done, "!MIGRATED"); require(!migratedData.curatorBalanceClaimed[_curator], "ALREADY_CLAIMED"); diff --git a/contracts/l2/discovery/L2GNSStorage.sol b/contracts/l2/discovery/L2GNSStorage.sol deleted file mode 100644 index 006f34636..000000000 --- a/contracts/l2/discovery/L2GNSStorage.sol +++ /dev/null @@ -1,13 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later - -pragma solidity ^0.7.6; - -import "../../discovery/IGNS.sol"; - -/** - * @dev Storage variables for the L2GNS - */ -contract L2GNSV1Storage { - // Subgraph data incoming from L1 - mapping(uint256 => IGNS.MigratedSubgraphData) public migratedSubgraphData; -} From c0e74858046cf88cdc1243f13783cea0c0f142f5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pablo=20Carranza=20V=C3=A9lez?= Date: Fri, 1 Jul 2022 18:50:36 +0300 Subject: [PATCH 011/112] test: more WIP on tests for GNS migration --- contracts/tests/LegacyGNSMock.sol | 4 +- test/gns.test.ts | 268 ++++++++++++++++++++++++++---- test/l2/l2GNS.test.ts | 28 ++-- test/lib/fixtures.ts | 5 + 4 files changed, 254 insertions(+), 51 deletions(-) diff --git a/contracts/tests/LegacyGNSMock.sol b/contracts/tests/LegacyGNSMock.sol index 87696412c..8fd622882 100644 --- a/contracts/tests/LegacyGNSMock.sol +++ b/contracts/tests/LegacyGNSMock.sol @@ -3,12 +3,12 @@ pragma solidity ^0.7.6; pragma abicoder v2; -import "../discovery/GNS.sol"; +import "../discovery/L1GNS.sol"; /** * @title LegacyGNSMock contract */ -contract LegacyGNSMock is GNS { +contract LegacyGNSMock is L1GNS { function createLegacySubgraph(uint256 subgraphNumber, bytes32 subgraphDeploymentID) external { SubgraphData storage subgraphData = legacySubgraphData[msg.sender][subgraphNumber]; legacySubgraphs[msg.sender][subgraphNumber] = subgraphDeploymentID; diff --git a/test/gns.test.ts b/test/gns.test.ts index db122b08f..78cc8d1af 100644 --- a/test/gns.test.ts +++ b/test/gns.test.ts @@ -1,6 +1,6 @@ import { expect } from 'chai' import { ethers, ContractTransaction, BigNumber, Event } from 'ethers' -import { solidityKeccak256 } from 'ethers/lib/utils' +import { Interface, solidityKeccak256 } from 'ethers/lib/utils' import { SubgraphDeploymentID } from '@graphprotocol/common-ts' import { LegacyGNSMock } from '../build/types/LegacyGNSMock' @@ -8,7 +8,15 @@ import { GraphToken } from '../build/types/GraphToken' import { Curation } from '../build/types/Curation' import { SubgraphNFT } from '../build/types/SubgraphNFT' -import { getAccounts, randomHexBytes, Account, toGRT, getChainID } from './lib/testHelpers' +import { + getAccounts, + randomHexBytes, + Account, + toGRT, + getChainID, + latestBlock, + advanceBlocks, +} from './lib/testHelpers' import { NetworkFixture } from './lib/fixtures' import { toBN, formatGRT } from './lib/testHelpers' import { getContractAt } from '../cli/network' @@ -18,9 +26,17 @@ import { network } from '../cli' import { Controller } from '../build/types/Controller' import { GraphProxyAdmin } from '../build/types/GraphProxyAdmin' import { L1GNS } from '../build/types/L1GNS' +import path from 'path' +import { Artifacts } from 'hardhat/internal/artifacts' +import { L1GraphTokenGateway } from '../build/types/L1GraphTokenGateway' const { AddressZero, HashZero } = ethers.constants +const ARTIFACTS_PATH = path.resolve('build/contracts') +const artifacts = new Artifacts(ARTIFACTS_PATH) +const l2GNSabi = artifacts.readArtifactSync('L2GNS').abi +const l2GNSIface = new Interface(l2GNSabi) + // Entities interface PublishSubgraph { subgraphDeploymentID: string @@ -59,6 +75,10 @@ describe('L1GNS', () => { let other: Account let another: Account let governor: Account + let mockRouter: Account + let mockL2GRT: Account + let mockL2Gateway: Account + let mockL2GNS: Account let fixture: NetworkFixture @@ -68,6 +88,7 @@ describe('L1GNS', () => { let curation: Curation let controller: Controller let proxyAdmin: GraphProxyAdmin + let l1GraphTokenGateway: L1GraphTokenGateway const tokens1000 = toGRT('1000') const tokens10000 = toGRT('10000') @@ -518,10 +539,39 @@ describe('L1GNS', () => { return tx } + const deployLegacyGNSMock = async (): Promise => { + const bondingCurve = (await deployContract( + 'BancorFormula', + governor.signer, + )) as unknown as BancorFormula + const subgraphDescriptor = await deployContract('SubgraphNFTDescriptor', governor.signer) + const subgraphNFT = (await deployContract( + 'SubgraphNFT', + governor.signer, + governor.address, + )) as SubgraphNFT + + // Deploy + legacyGNSMock = (await network.deployContractWithProxy( + proxyAdmin, + 'LegacyGNSMock', + [controller.address, bondingCurve.address, subgraphNFT.address], + governor.signer, + )) as unknown as LegacyGNSMock + + // Post-config + await subgraphNFT.connect(governor.signer).setMinter(legacyGNSMock.address) + await subgraphNFT.connect(governor.signer).setTokenDescriptor(subgraphDescriptor.address) + await legacyGNSMock.connect(governor.signer).syncAllContracts() + await legacyGNSMock.connect(governor.signer).approveAll() + } + before(async function () { - ;[me, other, governor, another] = await getAccounts() + ;[me, other, governor, another, mockRouter, mockL2GRT, mockL2Gateway, mockL2GNS] = + await getAccounts() fixture = new NetworkFixture() - ;({ grt, curation, gns, controller, proxyAdmin } = await fixture.load(governor.signer)) + const fixtureContracts = await fixture.load(governor.signer) + ;({ grt, curation, gns, controller, proxyAdmin, l1GraphTokenGateway } = fixtureContracts) newSubgraph0 = buildSubgraph() newSubgraph1 = buildSubgraph() defaultName = createDefaultName('graph') @@ -534,6 +584,21 @@ describe('L1GNS', () => { await grt.connect(other.signer).approve(curation.address, tokens100000) // Update curation tax to test the functionality of it in disableNameSignal() await curation.connect(governor.signer).setCurationTaxPercentage(curationTaxPercentage) + + // Deploying a GNS mock with support for legacy subgraphs + await deployLegacyGNSMock() + await grt.connect(me.signer).approve(legacyGNSMock.address, tokens100000) + + const arbitrumMocks = await fixture.loadArbitrumL1Mocks(governor.signer) + await fixture.configureL1Bridge( + governor.signer, + arbitrumMocks, + fixtureContracts, + mockRouter.address, + mockL2GRT.address, + mockL2Gateway.address, + mockL2GNS.address, + ) }) beforeEach(async function () { @@ -1185,30 +1250,6 @@ describe('L1GNS', () => { }) }) describe('Legacy subgraph migration', function () { - beforeEach(async function () { - const bondingCurve = (await deployContract( - 'BancorFormula', - governor.signer, - )) as unknown as BancorFormula - const subgraphDescriptor = await deployContract('SubgraphNFTDescriptor', governor.signer) - const subgraphNFT = (await deployContract( - 'SubgraphNFT', - governor.signer, - governor.address, - )) as SubgraphNFT - - // Deploy - legacyGNSMock = (await network.deployContractWithProxy( - proxyAdmin, - 'LegacyGNSMock', - [controller.address, bondingCurve.address, subgraphNFT.address], - governor.signer, - )) as unknown as LegacyGNSMock - - // Post-config - await subgraphNFT.connect(governor.signer).setMinter(legacyGNSMock.address) - await subgraphNFT.connect(governor.signer).setTokenDescriptor(subgraphDescriptor.address) - }) it('migrates a legacy subgraph', async function () { const seqID = toBN('2') await legacyGNSMock @@ -1250,14 +1291,170 @@ describe('L1GNS', () => { }) describe('Subgraph migration to L2', function () { describe('lockSubgraphForMigrationToL2', function () { - it('locks and disables a subgraph, burning the signal and storing the block number') - it('locks and disables a legacy subgraph, burning the signal and storing the block number') - it('rejects calls from someone who is not the subgraph owner') - it('rejects a call for a non-existent subgraph') - it('rejects a call for a subgraph with no signal') + it('locks and disables a subgraph, burning the signal and storing the block number', async function () { + // Publish a named subgraph-0 -> subgraphDeployment0 + const subgraph0 = await publishNewSubgraph(me, newSubgraph0) + // Curate on the subgraph + await gns.connect(me.signer).mintSignal(subgraph0.id, toGRT('90000'), 0) + + const curatedTokens = await grt.balanceOf(curation.address) + const subgraphBefore = await gns.subgraphs(subgraph0.id) + expect(subgraphBefore.vSignal).not.eq(0) + const tx = gns.connect(me.signer).lockSubgraphForMigrationToL2(subgraph0.id) + await expect(tx).emit(gns, 'SubgraphLockedForMigrationToL2').withArgs(subgraph0.id) + + const subgraphAfter = await gns.subgraphs(subgraph0.id) + expect(subgraphAfter.vSignal).eq(0) + expect(subgraphAfter.nSignal).eq(subgraphBefore.nSignal) + expect(await grt.balanceOf(gns.address)).eq(curatedTokens) + expect(subgraphAfter.disabled).eq(true) + expect(subgraphAfter.withdrawableGRT).eq(0) + + const migrationData = await gns.subgraphL2MigrationData(subgraph0.id) + expect(migrationData.lockedAtBlock).eq(await latestBlock()) + expect(migrationData.l1Done).eq(false) + + let invalidTx = gns.connect(me.signer).mintSignal(subgraph0.id, toGRT('90000'), 0) + await expect(invalidTx).revertedWith('GNS: Must be active') + invalidTx = gns.connect(me.signer).burnSignal(subgraph0.id, toGRT('90000'), 0) + await expect(invalidTx).revertedWith('GNS: Must be active') + }) + it('locks and disables a legacy subgraph, burning the signal and storing the block number', async function () { + const seqID = toBN('2') + await legacyGNSMock + .connect(me.signer) + .createLegacySubgraph(seqID, newSubgraph0.subgraphDeploymentID) + // The legacy subgraph must be claimed + const migrateTx = legacyGNSMock + .connect(me.signer) + .migrateLegacySubgraph(me.address, seqID, newSubgraph0.subgraphMetadata) + await expect(migrateTx) + .emit(legacyGNSMock, ' LegacySubgraphClaimed') + .withArgs(me.address, seqID) + const subgraphID = buildLegacySubgraphID(me.address, seqID) + + // Curate on the subgraph + await legacyGNSMock.connect(me.signer).mintSignal(subgraphID, toGRT('10000'), 0) + + const curatedTokens = await grt.balanceOf(curation.address) + const subgraphBefore = await legacyGNSMock.legacySubgraphData(me.address, seqID) + expect(subgraphBefore.vSignal).not.eq(0) + const tx = legacyGNSMock.connect(me.signer).lockSubgraphForMigrationToL2(subgraphID) + await expect(tx).emit(legacyGNSMock, 'SubgraphLockedForMigrationToL2').withArgs(subgraphID) + + const subgraphAfter = await legacyGNSMock.legacySubgraphData(me.address, seqID) + expect(subgraphAfter.vSignal).eq(0) + expect(subgraphAfter.nSignal).eq(subgraphBefore.nSignal) + expect(await grt.balanceOf(legacyGNSMock.address)).eq(curatedTokens) + expect(subgraphAfter.disabled).eq(true) + expect(subgraphAfter.withdrawableGRT).eq(0) + + const migrationData = await legacyGNSMock.subgraphL2MigrationData(subgraphID) + expect(migrationData.lockedAtBlock).eq(await latestBlock()) + expect(migrationData.l1Done).eq(false) + + let invalidTx = legacyGNSMock.connect(me.signer).mintSignal(subgraphID, toGRT('90000'), 0) + await expect(invalidTx).revertedWith('GNS: Must be active') + invalidTx = legacyGNSMock.connect(me.signer).burnSignal(subgraphID, toGRT('90000'), 0) + await expect(invalidTx).revertedWith('GNS: Must be active') + }) + it('rejects calls from someone who is not the subgraph owner', async function () { + // Publish a named subgraph-0 -> subgraphDeployment0 + const subgraph0 = await publishNewSubgraph(me, newSubgraph0) + // Curate on the subgraph + await gns.connect(me.signer).mintSignal(subgraph0.id, toGRT('90000'), 0) + + const tx = gns.connect(other.signer).lockSubgraphForMigrationToL2(subgraph0.id) + await expect(tx).revertedWith('GNS: Must be authorized') + }) + it('rejects a call for a non-existent subgraph', async function () { + const subgraphID = buildLegacySubgraphID(me.address, toBN('0')) + + const tx = gns.connect(other.signer).lockSubgraphForMigrationToL2(subgraphID) + await expect(tx).revertedWith('ERC721: owner query for nonexistent token') + }) + it('rejects a call for a subgraph that is already locked', async function () { + // Publish a named subgraph-0 -> subgraphDeployment0 + const subgraph0 = await publishNewSubgraph(me, newSubgraph0) + // Curate on the subgraph + await gns.connect(me.signer).mintSignal(subgraph0.id, toGRT('90000'), 0) + + const tx = gns.connect(me.signer).lockSubgraphForMigrationToL2(subgraph0.id) + await expect(tx).emit(gns, 'SubgraphLockedForMigrationToL2').withArgs(subgraph0.id) + + const tx2 = gns.connect(me.signer).lockSubgraphForMigrationToL2(subgraph0.id) + await expect(tx2).revertedWith('GNS: Must be active') + }) + it('rejects a call for a subgraph that is deprecated', async function () { + // Publish a named subgraph-0 -> subgraphDeployment0 + const subgraph0 = await publishNewSubgraph(me, newSubgraph0) + // Curate on the subgraph + await gns.connect(me.signer).mintSignal(subgraph0.id, toGRT('90000'), 0) + + await gns.connect(me.signer).deprecateSubgraph(subgraph0.id) + + const tx2 = gns.connect(me.signer).lockSubgraphForMigrationToL2(subgraph0.id) + // Deprecating the subgraph burns the NFT + await expect(tx2).revertedWith('ERC721: owner query for nonexistent token') + }) }) describe('sendSubgraphToL2', function () { - it('sends tokens and calldata to L2 through the GRT bridge') + it('sends tokens and calldata to L2 through the GRT bridge', async function () { + // Publish a named subgraph-0 -> subgraphDeployment0 + const subgraph0 = await publishNewSubgraph(me, newSubgraph0) + + // We need the block number to be > 256 to avoid underflows... + await advanceBlocks(256) + + // Curate on the subgraph + await gns.connect(me.signer).mintSignal(subgraph0.id, toGRT('90000'), 0) + + const curatedTokens = await grt.balanceOf(curation.address) + const lockTx = await gns.connect(me.signer).lockSubgraphForMigrationToL2(subgraph0.id) + const lockReceipt = await lockTx.wait() + const lockBlockhash = lockReceipt.blockHash + + const maxSubmissionCost = toBN('100') + const maxGas = toBN('10') + const gasPriceBid = toBN('20') + const tx = gns + .connect(me.signer) + .sendSubgraphToL2(subgraph0.id, maxGas, gasPriceBid, maxSubmissionCost, { + value: maxSubmissionCost.add(maxGas.mul(gasPriceBid)), + }) + await expect(tx).emit(gns, 'SubgraphSentToL2').withArgs(subgraph0.id) + + const subgraphAfter = await gns.subgraphs(subgraph0.id) + expect(subgraphAfter.vSignal).eq(0) + expect(await grt.balanceOf(gns.address)).eq(0) + expect(subgraphAfter.disabled).eq(true) + expect(subgraphAfter.withdrawableGRT).eq(0) + + const migrationData = await gns.subgraphL2MigrationData(subgraph0.id) + expect(migrationData.lockedAtBlock).eq((await latestBlock()).sub(1)) + expect(migrationData.l1Done).eq(true) + + const expectedCallhookData = l2GNSIface.encodeFunctionData('receiveSubgraphFromL1', [ + subgraph0.id, + me.address, + curatedTokens, + lockBlockhash, + subgraphAfter.nSignal, + subgraphAfter.reserveRatio, + newSubgraph0.subgraphMetadata, + ]) + + const expectedL2Data = await l1GraphTokenGateway.getOutboundCalldata( + grt.address, + gns.address, + mockL2GNS.address, + curatedTokens, + expectedCallhookData, + ) + await expect(tx) + .emit(l1GraphTokenGateway, 'TxToL2') + .withArgs(gns.address, mockL2GNS.address, toBN(1), expectedL2Data) + }) it('sends tokens and calldata for a legacy subgraph to L2 through the GRT bridge') it('rejects calls from someone who is not the subgraph owner') it('rejects calls for a subgraph that is not locked') @@ -1272,7 +1469,8 @@ describe('L1GNS', () => { it('rejects calls for a subgraph that was already deprecated') }) describe('claimCuratorBalanceToBeneficiaryOnL2', function () { - it('sends a transaction to the L2GNS using the Arbitrum inbox') + it('sends a transaction with a curator balance to the L2GNS using the Arbitrum inbox') + it('sends a transaction with a curator balance from a legacy subgraph to the L2GNS') it('rejects calls for a subgraph that was locked but not sent to L2') it('rejects calls for a subgraph that was not locked') it('rejects calls for a subgraph that was locked but deprecated') diff --git a/test/l2/l2GNS.test.ts b/test/l2/l2GNS.test.ts index c03646234..5e2330a07 100644 --- a/test/l2/l2GNS.test.ts +++ b/test/l2/l2GNS.test.ts @@ -3,21 +3,21 @@ import { ethers, ContractTransaction, BigNumber, Event } from 'ethers' import { solidityKeccak256 } from 'ethers/lib/utils' import { SubgraphDeploymentID } from '@graphprotocol/common-ts' -import { LegacyGNSMock } from '../build/types/LegacyGNSMock' -import { GraphToken } from '../build/types/GraphToken' -import { Curation } from '../build/types/Curation' -import { SubgraphNFT } from '../build/types/SubgraphNFT' +import { LegacyGNSMock } from '../../build/types/LegacyGNSMock' +import { GraphToken } from '../../build/types/GraphToken' +import { Curation } from '../../build/types/Curation' +import { SubgraphNFT } from '../../build/types/SubgraphNFT' -import { getAccounts, randomHexBytes, Account, toGRT, getChainID } from './lib/testHelpers' -import { NetworkFixture } from './lib/fixtures' -import { toBN, formatGRT } from './lib/testHelpers' -import { getContractAt } from '../cli/network' -import { deployContract } from './lib/deployment' -import { BancorFormula } from '../build/types/BancorFormula' -import { network } from '../cli' -import { Controller } from '../build/types/Controller' -import { GraphProxyAdmin } from '../build/types/GraphProxyAdmin' -import { L2GNS } from '../build/types/L1GNS' +import { getAccounts, randomHexBytes, Account, toGRT, getChainID } from '../lib/testHelpers' +import { NetworkFixture } from '../lib/fixtures' +import { toBN, formatGRT } from '../lib/testHelpers' +import { getContractAt } from '../../cli/network' +import { deployContract } from '../lib/deployment' +import { BancorFormula } from '../../build/types/BancorFormula' +import { network } from '../../cli' +import { Controller } from '../../build/types/Controller' +import { GraphProxyAdmin } from '../../build/types/GraphProxyAdmin' +import { L2GNS } from '../../build/types/L2GNS' const { AddressZero, HashZero } = ethers.constants diff --git a/test/lib/fixtures.ts b/test/lib/fixtures.ts index e063afa8d..874297f45 100644 --- a/test/lib/fixtures.ts +++ b/test/lib/fixtures.ts @@ -238,6 +238,7 @@ export class NetworkFixture { mockRouterAddress: string, mockL2GRTAddress: string, mockL2GatewayAddress: string, + mockL2GNSAddress: string, ): Promise { // First configure the Arbitrum bridge mocks await arbitrumMocks.bridgeMock.connect(deployer).setInbox(arbitrumMocks.inboxMock.address, true) @@ -263,6 +264,10 @@ export class NetworkFixture { await l1FixtureContracts.bridgeEscrow .connect(deployer) .approveAll(l1FixtureContracts.l1GraphTokenGateway.address) + await l1FixtureContracts.gns.connect(deployer).setCounterpartGNSAddress(mockL2GNSAddress) + await l1FixtureContracts.l1GraphTokenGateway + .connect(deployer) + .addToCallhookWhitelist(l1FixtureContracts.gns.address) await l1FixtureContracts.l1GraphTokenGateway.connect(deployer).setPaused(false) } From 303199cacedf0e338259c460c4cc97431930fb28 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pablo=20Carranza=20V=C3=A9lez?= Date: Fri, 1 Jul 2022 18:52:38 +0300 Subject: [PATCH 012/112] test: fix destination address --- test/gns.test.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/gns.test.ts b/test/gns.test.ts index 78cc8d1af..7ed6ec23c 100644 --- a/test/gns.test.ts +++ b/test/gns.test.ts @@ -1453,7 +1453,7 @@ describe('L1GNS', () => { ) await expect(tx) .emit(l1GraphTokenGateway, 'TxToL2') - .withArgs(gns.address, mockL2GNS.address, toBN(1), expectedL2Data) + .withArgs(gns.address, mockL2Gateway.address, toBN(1), expectedL2Data) }) it('sends tokens and calldata for a legacy subgraph to L2 through the GRT bridge') it('rejects calls from someone who is not the subgraph owner') From 8e8cb8e828f469a27e07d1e50dd161f3d59a7a8c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pablo=20Carranza=20V=C3=A9lez?= Date: Fri, 15 Jul 2022 14:59:41 +0200 Subject: [PATCH 013/112] test: fix gateway configuration --- test/gateway/l1GraphTokenGateway.test.ts | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/test/gateway/l1GraphTokenGateway.test.ts b/test/gateway/l1GraphTokenGateway.test.ts index 76b00ee6b..c3e3cdd7c 100644 --- a/test/gateway/l1GraphTokenGateway.test.ts +++ b/test/gateway/l1GraphTokenGateway.test.ts @@ -30,6 +30,7 @@ describe('L1GraphTokenGateway', () => { let mockL2GRT: Account let mockL2Gateway: Account let pauseGuardian: Account + let mockL2GNS: Account let fixture: NetworkFixture let grt: GraphToken @@ -63,8 +64,16 @@ describe('L1GraphTokenGateway', () => { ) before(async function () { - ;[governor, tokenSender, l2Receiver, mockRouter, mockL2GRT, mockL2Gateway, pauseGuardian] = - await getAccounts() + ;[ + governor, + tokenSender, + l2Receiver, + mockRouter, + mockL2GRT, + mockL2Gateway, + pauseGuardian, + mockL2GNS, + ] = await getAccounts() // Dummy code on the mock router so that it appears as a contract await provider().send('hardhat_setCode', [mockRouter.address, '0x1234']) @@ -427,6 +436,7 @@ describe('L1GraphTokenGateway', () => { mockRouter.address, mockL2GRT.address, mockL2Gateway.address, + mockL2GNS.address, ) }) From 1721ba336d1a0b7314058256deef7cdcd4b00516 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pablo=20Carranza=20V=C3=A9lez?= Date: Fri, 15 Jul 2022 15:36:30 +0200 Subject: [PATCH 014/112] test: fix check for reserveRatio in migrated subgraph --- test/gns.test.ts | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/test/gns.test.ts b/test/gns.test.ts index 7ed6ec23c..0ea391de2 100644 --- a/test/gns.test.ts +++ b/test/gns.test.ts @@ -1410,6 +1410,7 @@ describe('L1GNS', () => { await gns.connect(me.signer).mintSignal(subgraph0.id, toGRT('90000'), 0) const curatedTokens = await grt.balanceOf(curation.address) + const subgraphBefore = await gns.subgraphs(subgraph0.id) const lockTx = await gns.connect(me.signer).lockSubgraphForMigrationToL2(subgraph0.id) const lockReceipt = await lockTx.wait() const lockBlockhash = lockReceipt.blockHash @@ -1439,8 +1440,8 @@ describe('L1GNS', () => { me.address, curatedTokens, lockBlockhash, - subgraphAfter.nSignal, - subgraphAfter.reserveRatio, + subgraphBefore.nSignal, + subgraphBefore.reserveRatio, newSubgraph0.subgraphMetadata, ]) From 7b2e7151e40c6e2794a7b4088417508aeaa7078c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pablo=20Carranza=20V=C3=A9lez?= Date: Tue, 19 Jul 2022 17:18:04 +0200 Subject: [PATCH 015/112] test: more tests for sending to L2, plus optimize advanceBlocks --- test/gns.test.ts | 93 ++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 91 insertions(+), 2 deletions(-) diff --git a/test/gns.test.ts b/test/gns.test.ts index 0ea391de2..6d56ca1da 100644 --- a/test/gns.test.ts +++ b/test/gns.test.ts @@ -564,6 +564,8 @@ describe('L1GNS', () => { await subgraphNFT.connect(governor.signer).setTokenDescriptor(subgraphDescriptor.address) await legacyGNSMock.connect(governor.signer).syncAllContracts() await legacyGNSMock.connect(governor.signer).approveAll() + await l1GraphTokenGateway.connect(governor.signer).addToCallhookWhitelist(legacyGNSMock.address) + await legacyGNSMock.connect(governor.signer).setCounterpartGNSAddress(mockL2GNS.address) } before(async function () { @@ -1456,8 +1458,95 @@ describe('L1GNS', () => { .emit(l1GraphTokenGateway, 'TxToL2') .withArgs(gns.address, mockL2Gateway.address, toBN(1), expectedL2Data) }) - it('sends tokens and calldata for a legacy subgraph to L2 through the GRT bridge') - it('rejects calls from someone who is not the subgraph owner') + it('sends tokens and calldata for a legacy subgraph to L2 through the GRT bridge', async function () { + const seqID = toBN('2') + await legacyGNSMock + .connect(me.signer) + .createLegacySubgraph(seqID, newSubgraph0.subgraphDeploymentID) + const migrateTx = legacyGNSMock + .connect(me.signer) + .migrateLegacySubgraph(me.address, seqID, newSubgraph0.subgraphMetadata) + await expect(migrateTx) + .emit(legacyGNSMock, ' LegacySubgraphClaimed') + .withArgs(me.address, seqID) + const subgraphID = buildLegacySubgraphID(me.address, seqID) + + // We need the block number to be > 256 to avoid underflows... + await advanceBlocks(256) + + // Curate on the subgraph + await legacyGNSMock.connect(me.signer).mintSignal(subgraphID, toGRT('90000'), 0) + + const curatedTokens = await grt.balanceOf(curation.address) + const subgraphBefore = await legacyGNSMock.legacySubgraphData(me.address, seqID) + const lockTx = await legacyGNSMock + .connect(me.signer) + .lockSubgraphForMigrationToL2(subgraphID) + const lockReceipt = await lockTx.wait() + const lockBlockhash = lockReceipt.blockHash + + const maxSubmissionCost = toBN('100') + const maxGas = toBN('10') + const gasPriceBid = toBN('20') + const tx = legacyGNSMock + .connect(me.signer) + .sendSubgraphToL2(subgraphID, maxGas, gasPriceBid, maxSubmissionCost, { + value: maxSubmissionCost.add(maxGas.mul(gasPriceBid)), + }) + await expect(tx).emit(legacyGNSMock, 'SubgraphSentToL2').withArgs(subgraphID) + + const subgraphAfter = await legacyGNSMock.legacySubgraphData(me.address, seqID) + expect(subgraphAfter.vSignal).eq(0) + expect(await grt.balanceOf(legacyGNSMock.address)).eq(0) + expect(subgraphAfter.disabled).eq(true) + expect(subgraphAfter.withdrawableGRT).eq(0) + + const migrationData = await legacyGNSMock.subgraphL2MigrationData(subgraphID) + expect(migrationData.lockedAtBlock).eq((await latestBlock()).sub(1)) + expect(migrationData.l1Done).eq(true) + + const expectedCallhookData = l2GNSIface.encodeFunctionData('receiveSubgraphFromL1', [ + subgraphID, + me.address, + curatedTokens, + lockBlockhash, + subgraphBefore.nSignal, + subgraphBefore.reserveRatio, + newSubgraph0.subgraphMetadata, + ]) + + const expectedL2Data = await l1GraphTokenGateway.getOutboundCalldata( + grt.address, + legacyGNSMock.address, + mockL2GNS.address, + curatedTokens, + expectedCallhookData, + ) + await expect(tx) + .emit(l1GraphTokenGateway, 'TxToL2') + .withArgs(legacyGNSMock.address, mockL2Gateway.address, toBN(1), expectedL2Data) + }) + it('rejects calls from someone who is not the subgraph owner', async function () { + // Publish a named subgraph-0 -> subgraphDeployment0 + const subgraph0 = await publishNewSubgraph(me, newSubgraph0) + + // We need the block number to be > 256 to avoid underflows... + await advanceBlocks(256) + + // Curate on the subgraph + await gns.connect(me.signer).mintSignal(subgraph0.id, toGRT('90000'), 0) + await gns.connect(me.signer).lockSubgraphForMigrationToL2(subgraph0.id) + + const maxSubmissionCost = toBN('100') + const maxGas = toBN('10') + const gasPriceBid = toBN('20') + const tx = gns + .connect(other.signer) + .sendSubgraphToL2(subgraph0.id, maxGas, gasPriceBid, maxSubmissionCost, { + value: maxSubmissionCost.add(maxGas.mul(gasPriceBid)), + }) + await expect(tx).revertedWith('GNS: Must be authorized') + }) it('rejects calls for a subgraph that is not locked') it('rejects calls for a subgraph that was already sent') it('rejects calls after too many blocks have passed') From 0bed94da6d9a4444133d823d01425817b474af07 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pablo=20Carranza=20V=C3=A9lez?= Date: Fri, 26 Aug 2022 18:14:16 +0200 Subject: [PATCH 016/112] chore: use AddressAliasHelper in L2GNS --- contracts/l2/discovery/L2GNS.sol | 28 ++++++++++------------------ 1 file changed, 10 insertions(+), 18 deletions(-) diff --git a/contracts/l2/discovery/L2GNS.sol b/contracts/l2/discovery/L2GNS.sol index 3d541fa77..accf29b0d 100644 --- a/contracts/l2/discovery/L2GNS.sol +++ b/contracts/l2/discovery/L2GNS.sol @@ -3,10 +3,13 @@ pragma solidity ^0.7.6; pragma abicoder v2; -import "@openzeppelin/contracts/math/SafeMath.sol"; -import "@openzeppelin/contracts/utils/Address.sol"; +import { SafeMath } from "@openzeppelin/contracts/math/SafeMath.sol"; +import { Address } from "@openzeppelin/contracts/utils/Address.sol"; -import "../../discovery/GNS.sol"; +import { AddressAliasHelper } from "../../arbitrum/AddressAliasHelper.sol"; +import { GNS } from "../../discovery/GNS.sol"; +import { IGNS } from "../../discovery/IGNS.sol"; +import { ICuration } from "../../curation/ICuration.sol"; import { RLPReader } from "../../libraries/RLPReader.sol"; import { StateProofVerifier as Verifier } from "../../libraries/StateProofVerifier.sol"; @@ -25,9 +28,6 @@ contract L2GNS is GNS { using RLPReader for RLPReader.RLPItem; using SafeMath for uint256; - // Offset applied by the bridge to L1 addresses sending messages to L2 - uint160 internal constant L2_ADDRESS_OFFSET = - uint160(0x1111000000000000000000000000000000001111); // Storage slot where the subgraphs mapping is stored on L1GNS uint256 internal constant SUBGRAPH_MAPPING_SLOT = 18; // Storage slot where the legacy subgraphs mapping is stored on L1GNS @@ -55,7 +55,10 @@ contract L2GNS is GNS { * GNS on L1. */ modifier onlyL1Counterpart() { - require(msg.sender == l1ToL2Alias(counterpartGNSAddress), "ONLY_COUNTERPART_GNS"); + require( + msg.sender == AddressAliasHelper.applyL1ToL2Alias(counterpartGNSAddress), + "ONLY_COUNTERPART_GNS" + ); _; } @@ -296,15 +299,4 @@ contract L2GNS is GNS { ); migratedData.curatorBalanceClaimed[_curator] = true; } - - /** - * @notice Converts L1 address to its L2 alias used when sending messages - * @dev The Arbitrum bridge adds an offset to addresses when sending messages, - * so we need to apply it to check any L1 address from a message in L2 - * @param _l1Address The L1 address - * @return _l2Address the L2 alias of _l1Address - */ - function l1ToL2Alias(address _l1Address) internal pure returns (address _l2Address) { - _l2Address = address(uint160(_l1Address) + L2_ADDRESS_OFFSET); - } } From 9a636f242b23bd073d83fcc0642f5f71163b86f6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pablo=20Carranza=20V=C3=A9lez?= Date: Tue, 30 Aug 2022 10:45:06 -0300 Subject: [PATCH 017/112] chore: use separate functions to compute slots --- contracts/l2/discovery/L2GNS.sol | 82 +++++++++++++++++--------------- 1 file changed, 44 insertions(+), 38 deletions(-) diff --git a/contracts/l2/discovery/L2GNS.sol b/contracts/l2/discovery/L2GNS.sol index accf29b0d..8c151f731 100644 --- a/contracts/l2/discovery/L2GNS.sol +++ b/contracts/l2/discovery/L2GNS.sol @@ -163,23 +163,7 @@ contract L2GNS is GNS { require(l1GNSAccount.exists, "!ACCOUNT"); - // subgraphs mapping is stored at slot SUBGRAPH_MAPPING_SLOT. - // So our subgraph is at slot keccak256(abi.encodePacked(uint256(subgraphID), uint256(SUBGRAPH_MAPPING_SLOT))) - // The curatorNSignal mapping is at slot 2 within the SubgraphData struct, - // So the mapping is at slot keccak256(abi.encodePacked(uint256(subgraphID), uint256(SUBGRAPH_MAPPING_SLOT))) + 2 - // Therefore the nSignal value for msg.sender should be at slot: - uint256 curatorSlot = uint256( - keccak256( - abi.encodePacked( - uint256(msg.sender), - uint256( - keccak256( - abi.encodePacked(uint256(_subgraphID), uint256(SUBGRAPH_MAPPING_SLOT)) - ) - ).add(2) - ) - ) - ); + uint256 curatorSlot = _getCuratorSlot(msg.sender, _subgraphID); Verifier.SlotValue memory curatorNSignalSlot = Verifier.extractSlotValueFromProof( keccak256(abi.encodePacked(curatorSlot)), @@ -234,27 +218,7 @@ contract L2GNS is GNS { require(l1GNSAccount.exists, "!ACCOUNT"); - uint256 curatorSlot; - { - // legacy subgraphs mapping is stored at slot LEGACY_SUBGRAPH_MAPPING_SLOT. - // So the subgraphs for the account are at slot keccak256(abi.encodePacked(uint256(_subgraphCreatorAccount), uint256(SUBGRAPH_MAPPING_SLOT))) - uint256 accountSlot = uint256( - keccak256( - abi.encodePacked( - uint256(_subgraphCreatorAccount), - uint256(LEGACY_SUBGRAPH_MAPPING_SLOT) - ) - ) - ); - // Then the subgraph for this _seqID should be at: - uint256 subgraphSlot = uint256(keccak256(abi.encodePacked(_seqID, accountSlot))); - // The curatorNSignal mapping is at slot 2 within the SubgraphData struct, - // So the mapping is at slot subgraphSlot + 2 - // Therefore the nSignal value for msg.sender should be at slot: - curatorSlot = uint256( - keccak256(abi.encodePacked(uint256(msg.sender), uint256(subgraphSlot).add(2))) - ); - } + uint256 curatorSlot = _getLegacyCuratorSlot(msg.sender, _subgraphCreatorAccount, _seqID); Verifier.SlotValue memory curatorNSignalSlot = Verifier.extractSlotValueFromProof( keccak256(abi.encodePacked(curatorSlot)), @@ -299,4 +263,46 @@ contract L2GNS is GNS { ); migratedData.curatorBalanceClaimed[_curator] = true; } + + function _getCuratorSlot(address _curator, uint256 _subgraphID) + internal + pure + returns (uint256) + { + // subgraphs mapping is stored at slot SUBGRAPH_MAPPING_SLOT. + // So our subgraph is at slot keccak256(abi.encodePacked(uint256(subgraphID), uint256(SUBGRAPH_MAPPING_SLOT))) + // The curatorNSignal mapping is at slot 2 within the SubgraphData struct, + // So the mapping is at slot keccak256(abi.encodePacked(uint256(subgraphID), uint256(SUBGRAPH_MAPPING_SLOT))) + 2 + // Therefore the nSignal value for msg.sender should be at slot: + return + uint256( + keccak256( + abi.encodePacked( + uint256(_curator), + uint256(keccak256(abi.encodePacked(_subgraphID, SUBGRAPH_MAPPING_SLOT))) + .add(2) + ) + ) + ); + } + + function _getLegacyCuratorSlot( + address _curator, + address _subgraphCreatorAccount, + uint256 _seqID + ) internal pure returns (uint256) { + // legacy subgraphs mapping is stored at slot LEGACY_SUBGRAPH_MAPPING_SLOT. + // So the subgraphs for the account are at slot keccak256(abi.encodePacked(uint256(_subgraphCreatorAccount), uint256(SUBGRAPH_MAPPING_SLOT))) + uint256 accountSlot = uint256( + keccak256( + abi.encodePacked(uint256(_subgraphCreatorAccount), LEGACY_SUBGRAPH_MAPPING_SLOT) + ) + ); + // Then the subgraph for this _seqID should be at: + uint256 subgraphSlot = uint256(keccak256(abi.encodePacked(_seqID, accountSlot))); + // The curatorNSignal mapping is at slot 2 within the SubgraphData struct, + // So the mapping is at slot subgraphSlot + 2 + // Therefore the nSignal value for msg.sender should be at slot: + return uint256(keccak256(abi.encodePacked(uint256(_curator), subgraphSlot.add(2)))); + } } From f1037c9c00d48e665bb79ccf0bc2228c2accf65b Mon Sep 17 00:00:00 2001 From: Pablo Carranza Velez Date: Fri, 16 Sep 2022 19:31:52 -0300 Subject: [PATCH 018/112] fix(L1GNS): better revert messages, and add more tests --- contracts/discovery/L1GNS.sol | 16 ++-- test/gns.test.ts | 176 ++++++++++++++++++++++++++++++---- 2 files changed, 167 insertions(+), 25 deletions(-) diff --git a/contracts/discovery/L1GNS.sol b/contracts/discovery/L1GNS.sol index c45df5672..1762e667b 100644 --- a/contracts/discovery/L1GNS.sol +++ b/contracts/discovery/L1GNS.sol @@ -76,16 +76,18 @@ contract L1GNS is GNS, L1GNSV1Storage, L1ArbitrumMessenger { uint256 maxGas, uint256 gasPriceBid, uint256 maxSubmissionCost - ) external payable notPartialPaused onlySubgraphAuth(_subgraphID) { + ) external payable notPartialPaused { SubgraphData storage subgraphData = _getSubgraphData(_subgraphID); SubgraphL2MigrationData storage migrationData = subgraphL2MigrationData[_subgraphID]; require( - migrationData.lockedAtBlock > 0 && - migrationData.lockedAtBlock >= block.number.sub(255) && - migrationData.lockedAtBlock < block.number, + migrationData.lockedAtBlock > 0 && migrationData.lockedAtBlock < block.number, "!LOCKED" ); + require(migrationData.lockedAtBlock.add(255) >= block.number, "TOO_LATE"); require(!migrationData.l1Done, "ALREADY_DONE"); + // This is just like onlySubgraphAuth, but we want it to run after the other checks + // to revert with a nicer message in those cases: + require(ownerOf(_subgraphID) == msg.sender, "GNS: Must be authorized"); migrationData.l1Done = true; bytes memory extraData = encodeSubgraphMetadataForL2( @@ -138,10 +140,8 @@ contract L1GNS is GNS, L1GNSV1Storage, L1ArbitrumMessenger { function deprecateLockedSubgraph(uint256 _subgraphID) external notPartialPaused { SubgraphData storage subgraphData = _getSubgraphData(_subgraphID); SubgraphL2MigrationData storage migrationData = subgraphL2MigrationData[_subgraphID]; - require( - migrationData.lockedAtBlock > 0 && migrationData.lockedAtBlock < block.number.sub(256), - "!LOCKED" - ); + require(migrationData.lockedAtBlock > 0, "!LOCKED"); + require(migrationData.lockedAtBlock.add(256) < block.number, "TOO_EARLY"); require(!migrationData.l1Done, "ALREADY_DONE"); migrationData.l1Done = true; diff --git a/test/gns.test.ts b/test/gns.test.ts index 6d56ca1da..60af3ba1e 100644 --- a/test/gns.test.ts +++ b/test/gns.test.ts @@ -16,6 +16,7 @@ import { getChainID, latestBlock, advanceBlocks, + advanceBlock, } from './lib/testHelpers' import { NetworkFixture } from './lib/fixtures' import { toBN, formatGRT } from './lib/testHelpers' @@ -1405,9 +1406,6 @@ describe('L1GNS', () => { // Publish a named subgraph-0 -> subgraphDeployment0 const subgraph0 = await publishNewSubgraph(me, newSubgraph0) - // We need the block number to be > 256 to avoid underflows... - await advanceBlocks(256) - // Curate on the subgraph await gns.connect(me.signer).mintSignal(subgraph0.id, toGRT('90000'), 0) @@ -1471,9 +1469,6 @@ describe('L1GNS', () => { .withArgs(me.address, seqID) const subgraphID = buildLegacySubgraphID(me.address, seqID) - // We need the block number to be > 256 to avoid underflows... - await advanceBlocks(256) - // Curate on the subgraph await legacyGNSMock.connect(me.signer).mintSignal(subgraphID, toGRT('90000'), 0) @@ -1530,9 +1525,6 @@ describe('L1GNS', () => { // Publish a named subgraph-0 -> subgraphDeployment0 const subgraph0 = await publishNewSubgraph(me, newSubgraph0) - // We need the block number to be > 256 to avoid underflows... - await advanceBlocks(256) - // Curate on the subgraph await gns.connect(me.signer).mintSignal(subgraph0.id, toGRT('90000'), 0) await gns.connect(me.signer).lockSubgraphForMigrationToL2(subgraph0.id) @@ -1547,16 +1539,166 @@ describe('L1GNS', () => { }) await expect(tx).revertedWith('GNS: Must be authorized') }) - it('rejects calls for a subgraph that is not locked') - it('rejects calls for a subgraph that was already sent') - it('rejects calls after too many blocks have passed') + it('rejects calls for a subgraph that is not locked', async function () { + // Publish a named subgraph-0 -> subgraphDeployment0 + const subgraph0 = await publishNewSubgraph(me, newSubgraph0) + + // Curate on the subgraph + await gns.connect(me.signer).mintSignal(subgraph0.id, toGRT('90000'), 0) + + const maxSubmissionCost = toBN('100') + const maxGas = toBN('10') + const gasPriceBid = toBN('20') + const tx = gns + .connect(me.signer) + .sendSubgraphToL2(subgraph0.id, maxGas, gasPriceBid, maxSubmissionCost, { + value: maxSubmissionCost.add(maxGas.mul(gasPriceBid)), + }) + await expect(tx).revertedWith('!LOCKED') + }) + it('rejects calls for a subgraph that was already sent', async function () { + // Publish a named subgraph-0 -> subgraphDeployment0 + const subgraph0 = await publishNewSubgraph(me, newSubgraph0) + + // Curate on the subgraph + await gns.connect(me.signer).mintSignal(subgraph0.id, toGRT('90000'), 0) + + await gns.connect(me.signer).lockSubgraphForMigrationToL2(subgraph0.id) + + const maxSubmissionCost = toBN('100') + const maxGas = toBN('10') + const gasPriceBid = toBN('20') + const tx = gns + .connect(me.signer) + .sendSubgraphToL2(subgraph0.id, maxGas, gasPriceBid, maxSubmissionCost, { + value: maxSubmissionCost.add(maxGas.mul(gasPriceBid)), + }) + await expect(tx).emit(gns, 'SubgraphSentToL2').withArgs(subgraph0.id) + + const tx2 = gns + .connect(me.signer) + .sendSubgraphToL2(subgraph0.id, maxGas, gasPriceBid, maxSubmissionCost, { + value: maxSubmissionCost.add(maxGas.mul(gasPriceBid)), + }) + await expect(tx2).revertedWith('ALREADY_DONE') + }) + it('rejects calls after too many blocks have passed', async function () { + // Publish a named subgraph-0 -> subgraphDeployment0 + const subgraph0 = await publishNewSubgraph(me, newSubgraph0) + + // Curate on the subgraph + await gns.connect(me.signer).mintSignal(subgraph0.id, toGRT('90000'), 0) + + await gns.connect(me.signer).lockSubgraphForMigrationToL2(subgraph0.id) + + await advanceBlocks(256) + + const maxSubmissionCost = toBN('100') + const maxGas = toBN('10') + const gasPriceBid = toBN('20') + const tx = gns + .connect(me.signer) + .sendSubgraphToL2(subgraph0.id, maxGas, gasPriceBid, maxSubmissionCost, { + value: maxSubmissionCost.add(maxGas.mul(gasPriceBid)), + }) + await expect(tx).revertedWith('TOO_LATE') + }) }) describe('deprecateLockedSubgraph', function () { - it('makes the GRT from the subgraph withdrawable') - it('rejects calls for a subgraph that was not locked') - it('rejects calls if not enough blocks have passed') - it('rejects calls for a subgraph that was sent to L2') - it('rejects calls for a subgraph that was already deprecated') + it('can be called by anyone, and makes the GRT from the subgraph withdrawable', async function () { + // Publish a named subgraph-0 -> subgraphDeployment0 + const subgraph0 = await publishNewSubgraph(me, newSubgraph0) + + // Curate on the subgraph + await gns.connect(me.signer).mintSignal(subgraph0.id, toGRT('90000'), 0) + + const [beforeTokens] = await getTokensAndVSignal(newSubgraph0.subgraphDeploymentID) + await gns.connect(me.signer).lockSubgraphForMigrationToL2(subgraph0.id) + + await advanceBlocks(256) + + // Now the subgraph can be deprecated (by someone else!) + const tx = gns.connect(other.signer).deprecateLockedSubgraph(subgraph0.id) + await expect(tx).emit(gns, 'SubgraphDeprecated').withArgs(subgraph0.id, beforeTokens) + // After state, same as with deprecateSubgraph + const afterSubgraph = await gns.subgraphs(subgraph0.id) + // Check marked as deprecated + expect(afterSubgraph.disabled).eq(true) + // Signal for the deployment must be all burned + expect(afterSubgraph.vSignal.eq(toBN('0'))) + // Cleanup reserve ratio + expect(afterSubgraph.reserveRatio).eq(0) + // Should be equal since owner pays curation tax + expect(afterSubgraph.withdrawableGRT).eq(beforeTokens) + }) + it('rejects calls for a subgraph that was not locked', async function () { + // Publish a named subgraph-0 -> subgraphDeployment0 + const subgraph0 = await publishNewSubgraph(me, newSubgraph0) + + // Curate on the subgraph + await gns.connect(me.signer).mintSignal(subgraph0.id, toGRT('90000'), 0) + + await advanceBlocks(256) + + const tx = gns.connect(me.signer).deprecateLockedSubgraph(subgraph0.id) + await expect(tx).revertedWith('!LOCKED') + }) + it('rejects calls if not enough blocks have passed', async function () { + // Publish a named subgraph-0 -> subgraphDeployment0 + const subgraph0 = await publishNewSubgraph(me, newSubgraph0) + + // Curate on the subgraph + await gns.connect(me.signer).mintSignal(subgraph0.id, toGRT('90000'), 0) + + await gns.connect(me.signer).lockSubgraphForMigrationToL2(subgraph0.id) + + await advanceBlocks(255) // Not enough! + + const tx = gns.connect(other.signer).deprecateLockedSubgraph(subgraph0.id) + await expect(tx).revertedWith('TOO_EARLY') + }) + it('rejects calls for a subgraph that was sent to L2', async function () { + // Publish a named subgraph-0 -> subgraphDeployment0 + const subgraph0 = await publishNewSubgraph(me, newSubgraph0) + + // Curate on the subgraph + await gns.connect(me.signer).mintSignal(subgraph0.id, toGRT('90000'), 0) + + await gns.connect(me.signer).lockSubgraphForMigrationToL2(subgraph0.id) + + await advanceBlocks(254) + + const maxSubmissionCost = toBN('100') + const maxGas = toBN('10') + const gasPriceBid = toBN('20') + const tx = gns + .connect(me.signer) + .sendSubgraphToL2(subgraph0.id, maxGas, gasPriceBid, maxSubmissionCost, { + value: maxSubmissionCost.add(maxGas.mul(gasPriceBid)), + }) + await expect(tx).emit(gns, 'SubgraphSentToL2').withArgs(subgraph0.id) + + await advanceBlock() + const tx2 = gns.connect(other.signer).deprecateLockedSubgraph(subgraph0.id) + await expect(tx2).revertedWith('ALREADY_DONE') + }) + it('rejects calls for a subgraph that was already deprecated', async function () { + // Publish a named subgraph-0 -> subgraphDeployment0 + const subgraph0 = await publishNewSubgraph(me, newSubgraph0) + + // Curate on the subgraph + await gns.connect(me.signer).mintSignal(subgraph0.id, toGRT('90000'), 0) + + const [beforeTokens] = await getTokensAndVSignal(newSubgraph0.subgraphDeploymentID) + await gns.connect(me.signer).lockSubgraphForMigrationToL2(subgraph0.id) + + await advanceBlocks(256) + + const tx = gns.connect(other.signer).deprecateLockedSubgraph(subgraph0.id) + await expect(tx).emit(gns, 'SubgraphDeprecated').withArgs(subgraph0.id, beforeTokens) + const tx2 = gns.connect(other.signer).deprecateLockedSubgraph(subgraph0.id) + await expect(tx2).revertedWith('ALREADY_DONE') + }) }) describe('claimCuratorBalanceToBeneficiaryOnL2', function () { it('sends a transaction with a curator balance to the L2GNS using the Arbitrum inbox') From cd427cceed48f9dd684b18706c688d55deb47277 Mon Sep 17 00:00:00 2001 From: Pablo Carranza Velez Date: Tue, 20 Sep 2022 14:17:57 -0300 Subject: [PATCH 019/112] fix: use a single storage contract to prevent upgrade issues --- contracts/discovery/GNSStorage.sol | 2 + contracts/discovery/L1GNS.sol | 21 ++++----- contracts/discovery/L1GNSStorage.sol | 8 ---- contracts/l2/discovery/IL2GNS.sol | 68 ++++++++++++++++++++++++++++ contracts/l2/discovery/L2GNS.sol | 47 +++++++++---------- contracts/tests/LegacyGNSMock.sol | 3 +- 6 files changed, 106 insertions(+), 43 deletions(-) delete mode 100644 contracts/discovery/L1GNSStorage.sol create mode 100644 contracts/l2/discovery/IL2GNS.sol diff --git a/contracts/discovery/GNSStorage.sol b/contracts/discovery/GNSStorage.sol index aeb98a460..f79ecb477 100644 --- a/contracts/discovery/GNSStorage.sol +++ b/contracts/discovery/GNSStorage.sol @@ -55,4 +55,6 @@ abstract contract GNSV3Storage is GNSV2Storage { mapping(uint256 => IGNS.SubgraphL2MigrationData) public subgraphL2MigrationData; // Address of the counterpart GNS contract (L1GNS/L2GNS) address public counterpartGNSAddress; + // Address of the Arbitrum DelayedInbox - only used by L1GNS + address public arbitrumInboxAddress; } diff --git a/contracts/discovery/L1GNS.sol b/contracts/discovery/L1GNS.sol index 1762e667b..04e9e273f 100644 --- a/contracts/discovery/L1GNS.sol +++ b/contracts/discovery/L1GNS.sol @@ -3,16 +3,15 @@ pragma solidity ^0.7.6; pragma abicoder v2; -import "@openzeppelin/contracts/math/SafeMath.sol"; -import "@openzeppelin/contracts/utils/Address.sol"; +import { SafeMath } from "@openzeppelin/contracts/math/SafeMath.sol"; +import { Address } from "@openzeppelin/contracts/utils/Address.sol"; -import "./GNS.sol"; -import "./GNSStorage.sol"; -import "./L1GNSStorage.sol"; +import { GNS } from "./GNS.sol"; -import "../arbitrum/ITokenGateway.sol"; -import "../arbitrum/L1ArbitrumMessenger.sol"; -import "../l2/discovery/L2GNS.sol"; +import { ITokenGateway } from "../arbitrum/ITokenGateway.sol"; +import { L1ArbitrumMessenger } from "../arbitrum/L1ArbitrumMessenger.sol"; +import { IL2GNS } from "../l2/discovery/IL2GNS.sol"; +import { IGraphToken } from "../token/IGraphToken.sol"; /** * @title GNS @@ -23,7 +22,7 @@ import "../l2/discovery/L2GNS.sol"; * The contract implements a multicall behaviour to support batching multiple calls in a single * transaction. */ -contract L1GNS is GNS, L1GNSV1Storage, L1ArbitrumMessenger { +contract L1GNS is GNS, L1ArbitrumMessenger { using SafeMath for uint256; event SubgraphLockedForMigrationToL2(uint256 _subgraphID); @@ -121,7 +120,7 @@ contract L1GNS is GNS, L1GNSV1Storage, L1ArbitrumMessenger { ) internal view returns (bytes memory) { return abi.encodeWithSelector( - L2GNS.receiveSubgraphFromL1.selector, + IL2GNS.receiveSubgraphFromL1.selector, _subgraphID, ownerOf(_subgraphID), migrationData.tokens, @@ -179,7 +178,7 @@ contract L1GNS is GNS, L1GNSV1Storage, L1ArbitrumMessenger { L2GasParams memory gasParams = L2GasParams(_maxSubmissionCost, _maxGas, _gasPriceBid); bytes memory outboundCalldata = abi.encodeWithSelector( - L2GNS.claimL1CuratorBalanceToBeneficiary.selector, + IL2GNS.claimL1CuratorBalanceToBeneficiary.selector, _subgraphID, msg.sender, subgraphData.curatorNSignal[msg.sender], diff --git a/contracts/discovery/L1GNSStorage.sol b/contracts/discovery/L1GNSStorage.sol deleted file mode 100644 index b55a70f8c..000000000 --- a/contracts/discovery/L1GNSStorage.sol +++ /dev/null @@ -1,8 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later - -pragma solidity ^0.7.6; -pragma abicoder v2; - -abstract contract L1GNSV1Storage { - address public arbitrumInboxAddress; -} diff --git a/contracts/l2/discovery/IL2GNS.sol b/contracts/l2/discovery/IL2GNS.sol new file mode 100644 index 000000000..6fa338567 --- /dev/null +++ b/contracts/l2/discovery/IL2GNS.sol @@ -0,0 +1,68 @@ +// SPDX-License-Identifier: GPL-2.0-or-later + +pragma solidity ^0.7.6; + +interface IL2GNS { + function receiveSubgraphFromL1( + uint256 _subgraphID, + address _subgraphOwner, + uint256 _tokens, + bytes32 _lockedAtBlockHash, + uint256 _nSignal, + uint32 _reserveRatio, + bytes32 _subgraphMetadata + ) external; + + function finishSubgraphMigrationFromL1( + uint256 _subgraphID, + bytes32 _subgraphDeploymentID, + bytes32 _versionMetadata + ) external; + + /** + * @dev Claim curator balance belonging to a curator from L1. + * This will be credited to the same curator's balance on L2. + * This can only be called by the corresponding curator. + * @param _subgraphID Subgraph for which to claim a balance + * @param _blockHeaderRlpBytes RLP-encoded block header from the block when the subgraph was locked on L1 + * @param _proofRlpBytes RLP-encoded list of proofs: first proof of the L1 GNS account, then proof of the slot for the curator's balance + */ + function claimL1CuratorBalance( + uint256 _subgraphID, + bytes memory _blockHeaderRlpBytes, + bytes memory _proofRlpBytes + ) external; + + /** + * @dev Claim curator balance belonging to a curator from L1 on a legacy subgraph. + * This will be credited to the same curator's balance on L2. + * This can only be called by the corresponding curator. + * Users can query getLegacySubgraphKey on L1 to get the _subgraphCreatorAccount and _seqID. + * @param _subgraphCreatorAccount Account that created the subgraph in L1 + * @param _seqID Sequence number for the subgraph + * @param _blockHeaderRlpBytes RLP-encoded block header from the block when the subgraph was locked on L1 + * @param _proofRlpBytes RLP-encoded list of proofs: first proof of the L1 GNS account, then proof of the slot for the curator's balance + */ + function claimL1CuratorBalanceForLegacySubgraph( + address _subgraphCreatorAccount, + uint256 _seqID, + bytes memory _blockHeaderRlpBytes, + bytes memory _proofRlpBytes + ) external; + + /** + * @dev Claim curator balance belonging to a curator from L1. + * This will be credited to the a beneficiary on L2, and can only be called + * from the GNS on L1 through a retryable ticket. + * @param _subgraphID Subgraph on which to claim the balance + * @param _curator Curator who owns the balance on L1 + * @param _balance Balance of the curator from L1 + * @param _beneficiary Address of an L2 beneficiary for the balance + */ + function claimL1CuratorBalanceToBeneficiary( + uint256 _subgraphID, + address _curator, + uint256 _balance, + address _beneficiary + ) external; +} diff --git a/contracts/l2/discovery/L2GNS.sol b/contracts/l2/discovery/L2GNS.sol index 8c151f731..ed6ed74cb 100644 --- a/contracts/l2/discovery/L2GNS.sol +++ b/contracts/l2/discovery/L2GNS.sol @@ -10,6 +10,7 @@ import { AddressAliasHelper } from "../../arbitrum/AddressAliasHelper.sol"; import { GNS } from "../../discovery/GNS.sol"; import { IGNS } from "../../discovery/IGNS.sol"; import { ICuration } from "../../curation/ICuration.sol"; +import { IL2GNS } from "./IL2GNS.sol"; import { RLPReader } from "../../libraries/RLPReader.sol"; import { StateProofVerifier as Verifier } from "../../libraries/StateProofVerifier.sol"; @@ -23,7 +24,7 @@ import { StateProofVerifier as Verifier } from "../../libraries/StateProofVerifi * The contract implements a multicall behaviour to support batching multiple calls in a single * transaction. */ -contract L2GNS is GNS { +contract L2GNS is GNS, IL2GNS { using RLPReader for bytes; using RLPReader for RLPReader.RLPItem; using SafeMath for uint256; @@ -63,40 +64,40 @@ contract L2GNS is GNS { } function receiveSubgraphFromL1( - uint256 subgraphID, - address subgraphOwner, - uint256 tokens, - bytes32 lockedAtBlockHash, - uint256 nSignal, - uint32 reserveRatio, - bytes32 subgraphMetadata - ) external notPartialPaused onlyL2Gateway { - IGNS.SubgraphL2MigrationData storage migratedData = subgraphL2MigrationData[subgraphID]; - SubgraphData storage subgraphData = _getSubgraphData(subgraphID); - - subgraphData.reserveRatio = reserveRatio; + uint256 _subgraphID, + address _subgraphOwner, + uint256 _tokens, + bytes32 _lockedAtBlockHash, + uint256 _nSignal, + uint32 _reserveRatio, + bytes32 _subgraphMetadata + ) external override notPartialPaused onlyL2Gateway { + IGNS.SubgraphL2MigrationData storage migratedData = subgraphL2MigrationData[_subgraphID]; + SubgraphData storage subgraphData = _getSubgraphData(_subgraphID); + + subgraphData.reserveRatio = _reserveRatio; // The subgraph will be disabled until finishSubgraphMigrationFromL1 is called subgraphData.disabled = true; - subgraphData.nSignal = nSignal; + subgraphData.nSignal = _nSignal; - migratedData.tokens = tokens; - migratedData.lockedAtBlockHash = lockedAtBlockHash; + migratedData.tokens = _tokens; + migratedData.lockedAtBlockHash = _lockedAtBlockHash; migratedData.l1Done = true; // Mint the NFT. Use the subgraphID as tokenID. // This function will check the if tokenID already exists. - _mintNFT(subgraphOwner, subgraphID); + _mintNFT(_subgraphOwner, _subgraphID); // Set the token metadata - _setSubgraphMetadata(subgraphID, subgraphMetadata); - emit SubgraphReceivedFromL1(subgraphID); + _setSubgraphMetadata(_subgraphID, _subgraphMetadata); + emit SubgraphReceivedFromL1(_subgraphID); } function finishSubgraphMigrationFromL1( uint256 _subgraphID, bytes32 _subgraphDeploymentID, bytes32 _versionMetadata - ) external notPartialPaused onlySubgraphAuth(_subgraphID) { + ) external override notPartialPaused onlySubgraphAuth(_subgraphID) { IGNS.SubgraphL2MigrationData storage migratedData = subgraphL2MigrationData[_subgraphID]; SubgraphData storage subgraphData = _getSubgraphData(_subgraphID); // A subgraph @@ -144,7 +145,7 @@ contract L2GNS is GNS { uint256 _subgraphID, bytes memory _blockHeaderRlpBytes, bytes memory _proofRlpBytes - ) external notPartialPaused { + ) external override notPartialPaused { Verifier.BlockHeader memory blockHeader = Verifier.parseBlockHeader(_blockHeaderRlpBytes); IGNS.SubgraphL2MigrationData storage migratedData = subgraphL2MigrationData[_subgraphID]; @@ -197,7 +198,7 @@ contract L2GNS is GNS { uint256 _seqID, bytes memory _blockHeaderRlpBytes, bytes memory _proofRlpBytes - ) external { + ) external override notPartialPaused { uint256 _subgraphID = _buildLegacySubgraphID(_subgraphCreatorAccount, _seqID); Verifier.BlockHeader memory blockHeader = Verifier.parseBlockHeader(_blockHeaderRlpBytes); @@ -251,7 +252,7 @@ contract L2GNS is GNS { address _curator, uint256 _balance, address _beneficiary - ) external notPartialPaused onlyL1Counterpart { + ) external override notPartialPaused onlyL1Counterpart { GNS.SubgraphL2MigrationData storage migratedData = subgraphL2MigrationData[_subgraphID]; require(migratedData.l2Done, "!MIGRATED"); diff --git a/contracts/tests/LegacyGNSMock.sol b/contracts/tests/LegacyGNSMock.sol index 8fd622882..a256d3c39 100644 --- a/contracts/tests/LegacyGNSMock.sol +++ b/contracts/tests/LegacyGNSMock.sol @@ -3,7 +3,8 @@ pragma solidity ^0.7.6; pragma abicoder v2; -import "../discovery/L1GNS.sol"; +import { L1GNS } from "../discovery/L1GNS.sol"; +import { IGNS } from "../discovery/IGNS.sol"; /** * @title LegacyGNSMock contract From 825ad700dfa85151f25c7a9a7dcdd46d780dd3d5 Mon Sep 17 00:00:00 2001 From: Pablo Carranza Velez Date: Tue, 20 Sep 2022 14:29:41 -0300 Subject: [PATCH 020/112] test(e2e): fix L1GNS in config and use new subgraph id format --- addresses.json | 76 ++++++++++++++++++++--------------- config/graph.goerli.yml | 6 +-- config/graph.localhost.yml | 6 +-- e2e/scenarios/lib/subgraph.ts | 3 +- 4 files changed, 52 insertions(+), 39 deletions(-) diff --git a/addresses.json b/addresses.json index df2e2c762..098db3003 100644 --- a/addresses.json +++ b/addresses.json @@ -397,7 +397,7 @@ "runtimeCodeHash": "0x6a7751298d6ffdbcf421a3b72faab5b7d425884b04757303123758dbcfb21dfa", "txHash": "0x8884b65a236c188e4c61cf9593be2f67b27e4f80785939336d659866cfd97aec" }, - "GNS": { + "L1GNS": { "address": "0x065611D3515325aE6fe14f09AEe5Aa2C0a1f0CA7", "initArgs": [ "0x48eD7AfbaB432d1Fc6Ea84EEC70E745d9DAcaF3B", @@ -554,9 +554,9 @@ "proxy": true, "implementation": { "address": "0xC89Ce4735882C9F0f0FE26686c53074E09B0D550", - "creationCodeHash": "0x729aca90fcffdeede93bc42a6e089a93085ec04133e965cf0291cf6245880595", - "runtimeCodeHash": "0xce525d338b6ed471eeb36d2927a26608cca2d5cfe52bd0585945eacc55b525cf", - "txHash": "0x376a2ebfe783246d83c3cbbdd95e20daa3cd3695a52d8e38699a7fa487d8ed64" + "creationCodeHash": "0x492b44ca23b4728151292c5a7a731da511619bbf4fc0194cb3158fde2a0794ed", + "runtimeCodeHash": "0x73009e4f97f097e7b5d67e1e1b6dd41ecc8f5363eb15484019b8000a6d0cb95c", + "txHash": "0xa9e5c9e3585bb68dc538062ca4c2dbfb58c3fc80523ca97c7d0d27f4a7ca1a09" } }, "GraphToken": { @@ -575,9 +575,9 @@ "proxy": true, "implementation": { "address": "0x0290FB167208Af455bB137780163b7B7a9a10C16", - "creationCodeHash": "0x45f56a7ad420cd11a8585594fb29121747d87d412161c8779ea36dfd34a48e88", - "runtimeCodeHash": "0x26aceabe83e2b757b2f000e185017528cdde2323c2129fd612180ac3192adfda", - "txHash": "0x567c09fd4920dd8ec2e359bd3b2a77aa69658af1ff515fe6d687007967229bee" + "creationCodeHash": "0xad443a9c9bf6a6049265e253dc99658bf99e4091c939f68972c5298926d7689d", + "runtimeCodeHash": "0x495a9a8de4aed745b0521e8b24661cf26ff12a9993a0ec5ef17728271a6f8629", + "txHash": "0x69a51f8846d42a786314d56ce00b7321a6576cd8bdc0d5898dd6f3ccb1c63c87" } }, "Curation": { @@ -596,9 +596,9 @@ "proxy": true, "implementation": { "address": "0x2612Af3A521c2df9EAF28422Ca335b04AdF3ac66", - "creationCodeHash": "0x022576ab4b739ee17dab126ea7e5a6814bda724aa0e4c6735a051b38a76bd597", - "runtimeCodeHash": "0xc7b1f9bef01ef92779aab0ae9be86376c47584118c508f5b4e612a694a4aab93", - "txHash": "0xc93d39f849b249792924ee973c022aea2445c6662ce26f450d324b1c721c25a7" + "creationCodeHash": "0xe69ca2e0119fb769311ecd3d4de6b12fd0cedfb56eeb4c537bd3defa2adcca43", + "runtimeCodeHash": "0x364e9b3216fa3a571e8be3cdb757fa007ee8a2afe384396e4a7cda3de79ce4d9", + "txHash": "0xc3278c3fae8f2cfab00755537c9a8d6712e1e8027f46a9ef99eb3b9231620ab2" } }, "L1GNS": { @@ -614,9 +614,9 @@ "proxy": true, "implementation": { "address": "0xDb56f2e9369E0D7bD191099125a3f6C370F8ed15", - "creationCodeHash": "0x2e71e4aefc1e678cb9c71882c1da67fc640389337a7d6ae43f78d0f13294594a", - "runtimeCodeHash": "0xde0e02c6a36a90e11c768f40a81430b7e9cda261aa6dada14eaad392d42efc21", - "txHash": "0x4032407d59e4ac88868270bb8d920bfcc8fe6572a22ad4f3be9c64da5a8f926e" + "creationCodeHash": "0xfbdc6caf28aa09493e0e0032aac06cdb2be8c5f62b8c839876d62d2bb2977e3d", + "runtimeCodeHash": "0x106af7614bdb7cdf60a6a93e5c92dbee03e36c799880d9ee8e8e9585fc077f72", + "txHash": "0xb1e63211ea7b036bf35423034bc60490b3b35b199bddc85200ea926b76e16a4e" } }, "Staking": { @@ -640,9 +640,9 @@ "proxy": true, "implementation": { "address": "0xFC628dd79137395F3C9744e33b1c5DE554D94882", - "creationCodeHash": "0x55e99794a19a3fea4152ac8cbbec6ed93e88fa0b09e21ac6fbf00f39bfa928f6", - "runtimeCodeHash": "0xef297f45b62801f615d3271bb40f07a30a9906be0f70f2d57dbf6a44408191d3", - "txHash": "0x149846f986f24a619f1137be908ed2cf82dce52c18bcbeacefdb663b1a6dd765", + "creationCodeHash": "0xa6ad6904fe70424527494f1401d71972967da4e35dea7ca01858063a56550e42", + "runtimeCodeHash": "0x3146935df7968ca2b32b0610ddb25e40148a9c007d3e81b367a10342b6eed13b", + "txHash": "0xb37e221c74a2237c0d63cc61242106c426b1b46041e6e0e27467f90c4e01da88", "libraries": { "LibCobbDouglas": "0xb09bCc172050fBd4562da8b229Cf3E45Dc3045A6" } @@ -650,23 +650,23 @@ }, "RewardsManager": { "address": "0x4bf749ec68270027C5910220CEAB30Cc284c7BA2", - "initArgs": ["0x254dffcd3277C0b1660F6d42EFbB754edaBAbC2B", "1000000012184945188"], + "initArgs": ["0x254dffcd3277C0b1660F6d42EFbB754edaBAbC2B"], "creationCodeHash": "0x25a7b6cafcebb062169bc25fca9bcce8f23bd7411235859229ae3cc99b9a7d58", "runtimeCodeHash": "0xaf2d63813a0e5059f63ec46e1b280eb9d129d5ad548f0cdd1649d9798fde10b6", "txHash": "0xf4a13ad82b067ffb59e689df3cb828a5cd4eac7e316323dfbb4b05b191127ce5", "proxy": true, "implementation": { "address": "0xD86C8F0327494034F60e25074420BcCF560D5610", - "creationCodeHash": "0xfec6d35d9de8a7234e77484ee4fa5d6867697d607b591ed5a8e01679fa1f0394", - "runtimeCodeHash": "0x4595f2b6c37d65ad1deed2497054b2319fb0c6419439e2e374b29a29aa9fcb81", - "txHash": "0xeea5271a0af6be6cc23e7a98fa84343d6b2c2aefaf1a80be63e945be332b5b0e" + "creationCodeHash": "0x5579914062ff21ef47c68cfab1eddbef1c320ae50e769dc430b73fcb995c5095", + "runtimeCodeHash": "0xae6b7b7a6f02d5964d4a35d66c906dd0fb5a5fb00549e646a586465e97218402", + "txHash": "0x8f411197f5b89b40fd61e2a8d35a9740279cff4fb2a7c2231f3faba1b8d4f581" } }, "DisputeManager": { "address": "0x5017A545b09ab9a30499DE7F431DF0855bCb7275", "initArgs": [ "0x254dffcd3277C0b1660F6d42EFbB754edaBAbC2B", - "0xFD01aa87BeB04D0ac764FC298aCFd05FfC5439cD", + "0xFFcf8FDEE72ac11b5c542428B35EEF5769C409f0", "10000000000000000000000", "500000", "25000", @@ -678,9 +678,9 @@ "proxy": true, "implementation": { "address": "0x7C728214be9A0049e6a86f2137ec61030D0AA964", - "creationCodeHash": "0x5b73c9b910d66426fd965ac3110e9debda1d81134c0354a7af8ec1f2ebd765f6", - "runtimeCodeHash": "0xcaf3547f0d675a1e1d2f887cf4666410bc3b084e65ad283ed3f1ff2b1bccc113", - "txHash": "0x29822affa517965e1995fc4e777cd709daf9df8f16f13e08b3829bba5c50bf90" + "creationCodeHash": "0xbcdd3847552c8819e7b65d77b6929f2b61cd5a7522d1355f2bb1a0c2a099f713", + "runtimeCodeHash": "0x2c0589b92badf53b7cb8a0570e4d28ceefff0add59eb2e75e59e4ae4f76592ff", + "txHash": "0x6773e7db3d0991ad4541cdceb64c035e3c0cc7f5e9ecf7749ba2e699b8793bcf" } }, "EthereumDIDRegistry": { @@ -704,21 +704,33 @@ "SubgraphNFT": { "address": "0x0E696947A06550DEf604e82C26fd9E493e576337", "constructorArgs": ["0x90F8bf6A479f320ead074411a4B0e7944Ea8c9C1"], - "creationCodeHash": "0x5de044b15df24beb8781d1ebe71f01301a6b8985183f37eb8d599aa4059a1d3e", - "runtimeCodeHash": "0x6a7751298d6ffdbcf421a3b72faab5b7d425884b04757303123758dbcfb21dfa", - "txHash": "0xcb40328bd03b6b25e74203e10f9ce17a131aa514f6ba9156aa8fcb81fe5f8cc2" + "creationCodeHash": "0xc3559f8ffca442b8a3706003d3c89d11bc918398551a197bbbd66ae649cc14c4", + "runtimeCodeHash": "0x16c4bfbb2374879d3f9373178fe14170332e274a3a4e6a07f7ffc5194420584d", + "txHash": "0xa03c6e4755494c8334fa9175941cb5655be943a930950312a6e3572204d6259f" }, "AllocationExchange": { "address": "0xFF6049B87215476aBf744eaA3a476cBAd46fB1cA", "constructorArgs": [ "0xe982E462b094850F12AF94d21D470e21bE9D0E9C", "0x5f8e26fAcC23FA4cbd87b8d9Dbbd33D5047abDE1", - "0xf1135bFF22512FF2A585b8d4489426CE660f204c", - "0x52e498aE9B8A5eE2A5Cd26805F06A9f29A7F489F" + "0x3E5e9111Ae8eB78Fe1CC3bb8915d5D461F3Ef9A9", + "0xE11BA2b4D45Eaed5996Cd0823791E0C93114882d" ], - "creationCodeHash": "0x97714e1a80674ab0af90a10f2c7156cc92794ef81565fe9c7c35ecbe0025cc08", - "runtimeCodeHash": "0x07012b5692ec6cbeb7a6986e061fc5026a2f76545b07bfd9182985de002fa281", - "txHash": "0xe3d870434e38ee37142a86e0fc54063df59c02c3b70135f070c3a1025c5e8246" + "creationCodeHash": "0xe7db7b38369ff61ea6cb2abdaf64f94deb88703faec5fa7a33866d1144a7da5f", + "runtimeCodeHash": "0x0792084bfc42580dc14eff231a75eab772eca117894dca8f1544cf0d38df219c", + "txHash": "0xeb2ac7e11256e10591b396fff48d0526c6bab20f9d45036ba07b8e32238d8397" + }, + "L1GraphTokenGateway": { + "address": "0xA586074FA4Fe3E546A132a16238abe37951D41fE", + "creationCodeHash": "0x506b750ce67ef926070c8918e372003d0cd9d21f8198a1e5447ff65a8ca8759e", + "runtimeCodeHash": "0x6cc716875c9de6a3bdc8b53366cb7adf83f96f2254b1f3171c996ac99449bc8c", + "txHash": "0x5f49cd4389f3c59b18bf1bcc7f5bf6feaa4a5e1e3f08b66805b4e1b7329a991c" + }, + "BridgeEscrow": { + "address": "0x2D8BE6BF0baA74e0A907016679CaE9190e80dD0A", + "creationCodeHash": "0x09b0de6d1f3afb28f3008befbc5c6303a8d510c31a4364483c009f3446082175", + "runtimeCodeHash": "0x7c242c472191805935e451bae6aaf0417ff7192d0b2a76422bc1c93b2284e2d4", + "txHash": "0x1881f59227e8f77a4b28c1877d5a4b08df576e1a22785800e35aeacfb3f6958e" } }, "421613": { diff --git a/config/graph.goerli.yml b/config/graph.goerli.yml index b4b735b4e..a0d4353f3 100644 --- a/config/graph.goerli.yml +++ b/config/graph.goerli.yml @@ -14,7 +14,7 @@ contracts: contractAddress: "${{Curation.address}}" - fn: "setContractProxy" id: "0x39605a6c26a173774ca666c67ef70cf491880e5d3d6d0ca66ec0a31034f15ea3" # keccak256('GNS') - contractAddress: "${{GNS.address}}" + contractAddress: "${{L1GNS.address}}" - fn: "setContractProxy" id: "0xf942813d07d17b56de9a9afc8de0ced6e8c053bbfdcc87b7badea4ddcf27c307" # keccak256('DisputeManager') contractAddress: "${{DisputeManager.address}}" @@ -83,7 +83,7 @@ contracts: qrySlashingPercentage: 25000 # in parts per million calls: - fn: "syncAllContracts" - GNS: + L1GNS: proxy: true init: controller: "${{Controller.address}}" @@ -99,7 +99,7 @@ contracts: - fn: "setTokenDescriptor" tokenDescriptor: "${{SubgraphNFTDescriptor.address}}" - fn: "setMinter" - minter: "${{GNS.address}}" + minter: "${{L1GNS.address}}" - fn: "transferOwnership" owner: *governor Staking: diff --git a/config/graph.localhost.yml b/config/graph.localhost.yml index 4a90dfd20..1e82db61c 100644 --- a/config/graph.localhost.yml +++ b/config/graph.localhost.yml @@ -14,7 +14,7 @@ contracts: contractAddress: "${{Curation.address}}" - fn: "setContractProxy" id: "0x39605a6c26a173774ca666c67ef70cf491880e5d3d6d0ca66ec0a31034f15ea3" # keccak256('GNS') - contractAddress: "${{GNS.address}}" + contractAddress: "${{L1GNS.address}}" - fn: "setContractProxy" id: "0xf942813d07d17b56de9a9afc8de0ced6e8c053bbfdcc87b7badea4ddcf27c307" # keccak256('DisputeManager') contractAddress: "${{DisputeManager.address}}" @@ -83,7 +83,7 @@ contracts: qrySlashingPercentage: 25000 # in parts per million calls: - fn: "syncAllContracts" - GNS: + L1GNS: proxy: true init: controller: "${{Controller.address}}" @@ -99,7 +99,7 @@ contracts: - fn: "setTokenDescriptor" tokenDescriptor: "${{SubgraphNFTDescriptor.address}}" - fn: "setMinter" - minter: "${{GNS.address}}" + minter: "${{L1GNS.address}}" - fn: "transferOwnership" owner: *governor Staking: diff --git a/e2e/scenarios/lib/subgraph.ts b/e2e/scenarios/lib/subgraph.ts index f97f1d76b..24d523d45 100644 --- a/e2e/scenarios/lib/subgraph.ts +++ b/e2e/scenarios/lib/subgraph.ts @@ -3,6 +3,7 @@ import { BigNumber } from 'ethers' import { solidityKeccak256 } from 'ethers/lib/utils' import { NetworkContracts } from '../../../cli/contracts' import { randomHexBytes, sendTransaction } from '../../../cli/network' +import hre from 'hardhat' export const recreatePreviousSubgraphId = async ( contracts: NetworkContracts, @@ -14,7 +15,7 @@ export const recreatePreviousSubgraphId = async ( } export const buildSubgraphID = (account: string, seqID: BigNumber): string => - solidityKeccak256(['address', 'uint256'], [account, seqID]) + solidityKeccak256(['address', 'uint256', 'uint256'], [account, seqID, hre.network.config.chainId]) export const publishNewSubgraph = async ( contracts: NetworkContracts, From 9510d274c5e159c081918e40ee6c2831925feb28 Mon Sep 17 00:00:00 2001 From: Pablo Carranza Velez Date: Tue, 20 Sep 2022 17:25:17 -0300 Subject: [PATCH 021/112] fix: bool for deprecated, and more tests --- contracts/discovery/IGNS.sol | 1 + contracts/discovery/L1GNS.sol | 7 +- test/gns.test.ts | 393 +++++++++++++++++++++++----------- 3 files changed, 271 insertions(+), 130 deletions(-) diff --git a/contracts/discovery/IGNS.sol b/contracts/discovery/IGNS.sol index 7e58badf0..470a6191e 100644 --- a/contracts/discovery/IGNS.sol +++ b/contracts/discovery/IGNS.sol @@ -22,6 +22,7 @@ interface IGNS { bytes32 lockedAtBlockHash; // Blockhash from block at which the subgraph was locked for migration mapping(address => bool) curatorBalanceClaimed; // True for curators whose balance has been claimed in L2 bool l2Done; // Migration finished on L2 side + bool deprecated; // Subgraph was deprecated instead of sent to L2 } struct LegacySubgraphKey { diff --git a/contracts/discovery/L1GNS.sol b/contracts/discovery/L1GNS.sol index 04e9e273f..ad58b14a1 100644 --- a/contracts/discovery/L1GNS.sol +++ b/contracts/discovery/L1GNS.sol @@ -143,7 +143,7 @@ contract L1GNS is GNS, L1ArbitrumMessenger { require(migrationData.lockedAtBlock.add(256) < block.number, "TOO_EARLY"); require(!migrationData.l1Done, "ALREADY_DONE"); migrationData.l1Done = true; - + migrationData.deprecated = true; subgraphData.withdrawableGRT = migrationData.tokens; subgraphData.reserveRatio = 0; @@ -160,11 +160,10 @@ contract L1GNS is GNS, L1ArbitrumMessenger { uint256 _gasPriceBid, uint256 _maxSubmissionCost ) external payable notPartialPaused returns (bytes memory) { - SubgraphData storage subgraphData = _getSubgraphData(_subgraphID); SubgraphL2MigrationData storage migrationData = subgraphL2MigrationData[_subgraphID]; require(migrationData.l1Done, "!MIGRATED"); - require(subgraphData.withdrawableGRT == 0, "DEPRECATED"); + require(!migrationData.deprecated, "SUBGRAPH_DEPRECATED"); require(_maxSubmissionCost > 0, "NO_SUBMISSION_COST"); @@ -181,7 +180,7 @@ contract L1GNS is GNS, L1ArbitrumMessenger { IL2GNS.claimL1CuratorBalanceToBeneficiary.selector, _subgraphID, msg.sender, - subgraphData.curatorNSignal[msg.sender], + getCuratorSignal(_subgraphID, msg.sender), _beneficiary ); diff --git a/test/gns.test.ts b/test/gns.test.ts index 60af3ba1e..b44bff619 100644 --- a/test/gns.test.ts +++ b/test/gns.test.ts @@ -18,7 +18,7 @@ import { advanceBlocks, advanceBlock, } from './lib/testHelpers' -import { NetworkFixture } from './lib/fixtures' +import { ArbitrumL1Mocks, NetworkFixture } from './lib/fixtures' import { toBN, formatGRT } from './lib/testHelpers' import { getContractAt } from '../cli/network' import { deployContract } from './lib/deployment' @@ -90,6 +90,7 @@ describe('L1GNS', () => { let controller: Controller let proxyAdmin: GraphProxyAdmin let l1GraphTokenGateway: L1GraphTokenGateway + let arbitrumMocks: ArbitrumL1Mocks const tokens1000 = toGRT('1000') const tokens10000 = toGRT('10000') @@ -592,7 +593,7 @@ describe('L1GNS', () => { await deployLegacyGNSMock() await grt.connect(me.signer).approve(legacyGNSMock.address, tokens100000) - const arbitrumMocks = await fixture.loadArbitrumL1Mocks(governor.signer) + arbitrumMocks = await fixture.loadArbitrumL1Mocks(governor.signer) await fixture.configureL1Bridge( governor.signer, arbitrumMocks, @@ -1293,12 +1294,62 @@ describe('L1GNS', () => { }) }) describe('Subgraph migration to L2', function () { + const publishAndCurateOnSubgraph = async function (): Promise { + // Publish a named subgraph-0 -> subgraphDeployment0 + const subgraph0 = await publishNewSubgraph(me, newSubgraph0) + // Curate on the subgraph + await gns.connect(me.signer).mintSignal(subgraph0.id, toGRT('90000'), 0) + + return subgraph0 + } + const publishCurateAndLockSubgraph = async function (): Promise { + const subgraph0 = await publishAndCurateOnSubgraph() + await gns.connect(me.signer).lockSubgraphForMigrationToL2(subgraph0.id) + return subgraph0 + } + const publishCurateLockAndSendSubgraph = async function ( + beforeMigrationCallback?: (subgraphID: string) => Promise, + ): Promise { + const subgraph0 = await publishAndCurateOnSubgraph() + + if (beforeMigrationCallback != null) { + await beforeMigrationCallback(subgraph0.id) + } + + await gns.connect(me.signer).lockSubgraphForMigrationToL2(subgraph0.id) + const maxSubmissionCost = toBN('100') + const maxGas = toBN('10') + const gasPriceBid = toBN('20') + const tx = gns + .connect(me.signer) + .sendSubgraphToL2(subgraph0.id, maxGas, gasPriceBid, maxSubmissionCost, { + value: maxSubmissionCost.add(maxGas.mul(gasPriceBid)), + }) + await expect(tx).emit(gns, 'SubgraphSentToL2').withArgs(subgraph0.id) + return subgraph0 + } + const publishAndCurateOnLegacySubgraph = async function (seqID: BigNumber): Promise { + await legacyGNSMock + .connect(me.signer) + .createLegacySubgraph(seqID, newSubgraph0.subgraphDeploymentID) + // The legacy subgraph must be claimed + const migrateTx = legacyGNSMock + .connect(me.signer) + .migrateLegacySubgraph(me.address, seqID, newSubgraph0.subgraphMetadata) + await expect(migrateTx) + .emit(legacyGNSMock, ' LegacySubgraphClaimed') + .withArgs(me.address, seqID) + const subgraphID = buildLegacySubgraphID(me.address, seqID) + + // Curate on the subgraph + await legacyGNSMock.connect(me.signer).mintSignal(subgraphID, toGRT('10000'), 0) + + return subgraphID + } describe('lockSubgraphForMigrationToL2', function () { it('locks and disables a subgraph, burning the signal and storing the block number', async function () { // Publish a named subgraph-0 -> subgraphDeployment0 - const subgraph0 = await publishNewSubgraph(me, newSubgraph0) - // Curate on the subgraph - await gns.connect(me.signer).mintSignal(subgraph0.id, toGRT('90000'), 0) + const subgraph0 = await publishAndCurateOnSubgraph() const curatedTokens = await grt.balanceOf(curation.address) const subgraphBefore = await gns.subgraphs(subgraph0.id) @@ -1324,21 +1375,7 @@ describe('L1GNS', () => { }) it('locks and disables a legacy subgraph, burning the signal and storing the block number', async function () { const seqID = toBN('2') - await legacyGNSMock - .connect(me.signer) - .createLegacySubgraph(seqID, newSubgraph0.subgraphDeploymentID) - // The legacy subgraph must be claimed - const migrateTx = legacyGNSMock - .connect(me.signer) - .migrateLegacySubgraph(me.address, seqID, newSubgraph0.subgraphMetadata) - await expect(migrateTx) - .emit(legacyGNSMock, ' LegacySubgraphClaimed') - .withArgs(me.address, seqID) - const subgraphID = buildLegacySubgraphID(me.address, seqID) - - // Curate on the subgraph - await legacyGNSMock.connect(me.signer).mintSignal(subgraphID, toGRT('10000'), 0) - + const subgraphID = await publishAndCurateOnLegacySubgraph(seqID) const curatedTokens = await grt.balanceOf(curation.address) const subgraphBefore = await legacyGNSMock.legacySubgraphData(me.address, seqID) expect(subgraphBefore.vSignal).not.eq(0) @@ -1362,10 +1399,7 @@ describe('L1GNS', () => { await expect(invalidTx).revertedWith('GNS: Must be active') }) it('rejects calls from someone who is not the subgraph owner', async function () { - // Publish a named subgraph-0 -> subgraphDeployment0 - const subgraph0 = await publishNewSubgraph(me, newSubgraph0) - // Curate on the subgraph - await gns.connect(me.signer).mintSignal(subgraph0.id, toGRT('90000'), 0) + const subgraph0 = await publishAndCurateOnSubgraph() const tx = gns.connect(other.signer).lockSubgraphForMigrationToL2(subgraph0.id) await expect(tx).revertedWith('GNS: Must be authorized') @@ -1377,22 +1411,13 @@ describe('L1GNS', () => { await expect(tx).revertedWith('ERC721: owner query for nonexistent token') }) it('rejects a call for a subgraph that is already locked', async function () { - // Publish a named subgraph-0 -> subgraphDeployment0 - const subgraph0 = await publishNewSubgraph(me, newSubgraph0) - // Curate on the subgraph - await gns.connect(me.signer).mintSignal(subgraph0.id, toGRT('90000'), 0) - - const tx = gns.connect(me.signer).lockSubgraphForMigrationToL2(subgraph0.id) - await expect(tx).emit(gns, 'SubgraphLockedForMigrationToL2').withArgs(subgraph0.id) + const subgraph0 = await publishCurateAndLockSubgraph() const tx2 = gns.connect(me.signer).lockSubgraphForMigrationToL2(subgraph0.id) await expect(tx2).revertedWith('GNS: Must be active') }) it('rejects a call for a subgraph that is deprecated', async function () { - // Publish a named subgraph-0 -> subgraphDeployment0 - const subgraph0 = await publishNewSubgraph(me, newSubgraph0) - // Curate on the subgraph - await gns.connect(me.signer).mintSignal(subgraph0.id, toGRT('90000'), 0) + const subgraph0 = await publishAndCurateOnSubgraph() await gns.connect(me.signer).deprecateSubgraph(subgraph0.id) @@ -1403,11 +1428,7 @@ describe('L1GNS', () => { }) describe('sendSubgraphToL2', function () { it('sends tokens and calldata to L2 through the GRT bridge', async function () { - // Publish a named subgraph-0 -> subgraphDeployment0 - const subgraph0 = await publishNewSubgraph(me, newSubgraph0) - - // Curate on the subgraph - await gns.connect(me.signer).mintSignal(subgraph0.id, toGRT('90000'), 0) + const subgraph0 = await publishAndCurateOnSubgraph() const curatedTokens = await grt.balanceOf(curation.address) const subgraphBefore = await gns.subgraphs(subgraph0.id) @@ -1458,20 +1479,7 @@ describe('L1GNS', () => { }) it('sends tokens and calldata for a legacy subgraph to L2 through the GRT bridge', async function () { const seqID = toBN('2') - await legacyGNSMock - .connect(me.signer) - .createLegacySubgraph(seqID, newSubgraph0.subgraphDeploymentID) - const migrateTx = legacyGNSMock - .connect(me.signer) - .migrateLegacySubgraph(me.address, seqID, newSubgraph0.subgraphMetadata) - await expect(migrateTx) - .emit(legacyGNSMock, ' LegacySubgraphClaimed') - .withArgs(me.address, seqID) - const subgraphID = buildLegacySubgraphID(me.address, seqID) - - // Curate on the subgraph - await legacyGNSMock.connect(me.signer).mintSignal(subgraphID, toGRT('90000'), 0) - + const subgraphID = await publishAndCurateOnLegacySubgraph(seqID) const curatedTokens = await grt.balanceOf(curation.address) const subgraphBefore = await legacyGNSMock.legacySubgraphData(me.address, seqID) const lockTx = await legacyGNSMock @@ -1522,12 +1530,7 @@ describe('L1GNS', () => { .withArgs(legacyGNSMock.address, mockL2Gateway.address, toBN(1), expectedL2Data) }) it('rejects calls from someone who is not the subgraph owner', async function () { - // Publish a named subgraph-0 -> subgraphDeployment0 - const subgraph0 = await publishNewSubgraph(me, newSubgraph0) - - // Curate on the subgraph - await gns.connect(me.signer).mintSignal(subgraph0.id, toGRT('90000'), 0) - await gns.connect(me.signer).lockSubgraphForMigrationToL2(subgraph0.id) + const subgraph0 = await publishCurateAndLockSubgraph() const maxSubmissionCost = toBN('100') const maxGas = toBN('10') @@ -1540,11 +1543,7 @@ describe('L1GNS', () => { await expect(tx).revertedWith('GNS: Must be authorized') }) it('rejects calls for a subgraph that is not locked', async function () { - // Publish a named subgraph-0 -> subgraphDeployment0 - const subgraph0 = await publishNewSubgraph(me, newSubgraph0) - - // Curate on the subgraph - await gns.connect(me.signer).mintSignal(subgraph0.id, toGRT('90000'), 0) + const subgraph0 = await publishAndCurateOnSubgraph() const maxSubmissionCost = toBN('100') const maxGas = toBN('10') @@ -1557,13 +1556,7 @@ describe('L1GNS', () => { await expect(tx).revertedWith('!LOCKED') }) it('rejects calls for a subgraph that was already sent', async function () { - // Publish a named subgraph-0 -> subgraphDeployment0 - const subgraph0 = await publishNewSubgraph(me, newSubgraph0) - - // Curate on the subgraph - await gns.connect(me.signer).mintSignal(subgraph0.id, toGRT('90000'), 0) - - await gns.connect(me.signer).lockSubgraphForMigrationToL2(subgraph0.id) + const subgraph0 = await publishCurateAndLockSubgraph() const maxSubmissionCost = toBN('100') const maxGas = toBN('10') @@ -1583,13 +1576,7 @@ describe('L1GNS', () => { await expect(tx2).revertedWith('ALREADY_DONE') }) it('rejects calls after too many blocks have passed', async function () { - // Publish a named subgraph-0 -> subgraphDeployment0 - const subgraph0 = await publishNewSubgraph(me, newSubgraph0) - - // Curate on the subgraph - await gns.connect(me.signer).mintSignal(subgraph0.id, toGRT('90000'), 0) - - await gns.connect(me.signer).lockSubgraphForMigrationToL2(subgraph0.id) + const subgraph0 = await publishCurateAndLockSubgraph() await advanceBlocks(256) @@ -1606,11 +1593,7 @@ describe('L1GNS', () => { }) describe('deprecateLockedSubgraph', function () { it('can be called by anyone, and makes the GRT from the subgraph withdrawable', async function () { - // Publish a named subgraph-0 -> subgraphDeployment0 - const subgraph0 = await publishNewSubgraph(me, newSubgraph0) - - // Curate on the subgraph - await gns.connect(me.signer).mintSignal(subgraph0.id, toGRT('90000'), 0) + const subgraph0 = await publishAndCurateOnSubgraph() const [beforeTokens] = await getTokensAndVSignal(newSubgraph0.subgraphDeploymentID) await gns.connect(me.signer).lockSubgraphForMigrationToL2(subgraph0.id) @@ -1630,13 +1613,10 @@ describe('L1GNS', () => { expect(afterSubgraph.reserveRatio).eq(0) // Should be equal since owner pays curation tax expect(afterSubgraph.withdrawableGRT).eq(beforeTokens) + expect((await gns.subgraphL2MigrationData(subgraph0.id)).deprecated).to.eq(true) }) it('rejects calls for a subgraph that was not locked', async function () { - // Publish a named subgraph-0 -> subgraphDeployment0 - const subgraph0 = await publishNewSubgraph(me, newSubgraph0) - - // Curate on the subgraph - await gns.connect(me.signer).mintSignal(subgraph0.id, toGRT('90000'), 0) + const subgraph0 = await publishAndCurateOnSubgraph() await advanceBlocks(256) @@ -1644,13 +1624,7 @@ describe('L1GNS', () => { await expect(tx).revertedWith('!LOCKED') }) it('rejects calls if not enough blocks have passed', async function () { - // Publish a named subgraph-0 -> subgraphDeployment0 - const subgraph0 = await publishNewSubgraph(me, newSubgraph0) - - // Curate on the subgraph - await gns.connect(me.signer).mintSignal(subgraph0.id, toGRT('90000'), 0) - - await gns.connect(me.signer).lockSubgraphForMigrationToL2(subgraph0.id) + const subgraph0 = await publishCurateAndLockSubgraph() await advanceBlocks(255) // Not enough! @@ -1658,56 +1632,223 @@ describe('L1GNS', () => { await expect(tx).revertedWith('TOO_EARLY') }) it('rejects calls for a subgraph that was sent to L2', async function () { - // Publish a named subgraph-0 -> subgraphDeployment0 - const subgraph0 = await publishNewSubgraph(me, newSubgraph0) + const subgraph0 = await publishCurateLockAndSendSubgraph() - // Curate on the subgraph - await gns.connect(me.signer).mintSignal(subgraph0.id, toGRT('90000'), 0) + await advanceBlocks(255) + const tx2 = gns.connect(other.signer).deprecateLockedSubgraph(subgraph0.id) + await expect(tx2).revertedWith('ALREADY_DONE') + }) + it('rejects calls for a subgraph that was already deprecated', async function () { + const subgraph0 = await publishAndCurateOnSubgraph() + const [beforeTokens] = await getTokensAndVSignal(newSubgraph0.subgraphDeploymentID) await gns.connect(me.signer).lockSubgraphForMigrationToL2(subgraph0.id) - await advanceBlocks(254) + await advanceBlocks(256) + const tx = gns.connect(other.signer).deprecateLockedSubgraph(subgraph0.id) + await expect(tx).emit(gns, 'SubgraphDeprecated').withArgs(subgraph0.id, beforeTokens) + const tx2 = gns.connect(other.signer).deprecateLockedSubgraph(subgraph0.id) + await expect(tx2).revertedWith('ALREADY_DONE') + }) + }) + describe('claimCuratorBalanceToBeneficiaryOnL2', function () { + beforeEach(async function () { + await gns.connect(governor.signer).setArbitrumInboxAddress(arbitrumMocks.inboxMock.address) + await legacyGNSMock + .connect(governor.signer) + .setArbitrumInboxAddress(arbitrumMocks.inboxMock.address) + }) + it('sends a transaction with a curator balance to the L2GNS using the Arbitrum inbox', async function () { + let beforeCuratorNSignal: BigNumber + const subgraph0 = await publishCurateLockAndSendSubgraph(async (subgraphID) => { + beforeCuratorNSignal = await gns.getCuratorSignal(subgraphID, me.address) + }) + + const expectedCalldata = l2GNSIface.encodeFunctionData( + 'claimL1CuratorBalanceToBeneficiary', + [subgraph0.id, me.address, beforeCuratorNSignal, other.address], + ) const maxSubmissionCost = toBN('100') const maxGas = toBN('10') const gasPriceBid = toBN('20') + const tx = gns .connect(me.signer) - .sendSubgraphToL2(subgraph0.id, maxGas, gasPriceBid, maxSubmissionCost, { + .claimCuratorBalanceToBeneficiaryOnL2( + subgraph0.id, + other.address, + maxGas, + gasPriceBid, + maxSubmissionCost, + { + value: maxSubmissionCost.add(maxGas.mul(gasPriceBid)), + }, + ) + + // seqNum (third argument in the event) is 2, because number 1 was when the subgraph was sent to L2 + await expect(tx) + .emit(gns, 'TxToL2') + .withArgs(me.address, mockL2GNS.address, toBN('2'), expectedCalldata) + }) + it('sends a transaction with a curator balance from a legacy subgraph to the L2GNS', async function () { + const subgraphID = await publishAndCurateOnLegacySubgraph(toBN('2')) + + const beforeCuratorNSignal = await legacyGNSMock.getCuratorSignal(subgraphID, me.address) + + await legacyGNSMock.connect(me.signer).lockSubgraphForMigrationToL2(subgraphID) + + const maxSubmissionCost = toBN('100') + const maxGas = toBN('10') + const gasPriceBid = toBN('20') + const tx = legacyGNSMock + .connect(me.signer) + .sendSubgraphToL2(subgraphID, maxGas, gasPriceBid, maxSubmissionCost, { value: maxSubmissionCost.add(maxGas.mul(gasPriceBid)), }) - await expect(tx).emit(gns, 'SubgraphSentToL2').withArgs(subgraph0.id) + await expect(tx).emit(legacyGNSMock, 'SubgraphSentToL2').withArgs(subgraphID) - await advanceBlock() - const tx2 = gns.connect(other.signer).deprecateLockedSubgraph(subgraph0.id) - await expect(tx2).revertedWith('ALREADY_DONE') + const expectedCalldata = l2GNSIface.encodeFunctionData( + 'claimL1CuratorBalanceToBeneficiary', + [subgraphID, me.address, beforeCuratorNSignal, other.address], + ) + + const tx2 = legacyGNSMock + .connect(me.signer) + .claimCuratorBalanceToBeneficiaryOnL2( + subgraphID, + other.address, + maxGas, + gasPriceBid, + maxSubmissionCost, + { + value: maxSubmissionCost.add(maxGas.mul(gasPriceBid)), + }, + ) + + // seqNum (third argument in the event) is 2, because number 1 was when the subgraph was sent to L2 + await expect(tx2) + .emit(legacyGNSMock, 'TxToL2') + .withArgs(me.address, mockL2GNS.address, toBN('2'), expectedCalldata) }) - it('rejects calls for a subgraph that was already deprecated', async function () { - // Publish a named subgraph-0 -> subgraphDeployment0 - const subgraph0 = await publishNewSubgraph(me, newSubgraph0) + it('rejects calls for a subgraph that was locked but not sent to L2', async function () { + const subgraph0 = await publishCurateAndLockSubgraph() - // Curate on the subgraph - await gns.connect(me.signer).mintSignal(subgraph0.id, toGRT('90000'), 0) + const maxSubmissionCost = toBN('100') + const maxGas = toBN('10') + const gasPriceBid = toBN('20') - const [beforeTokens] = await getTokensAndVSignal(newSubgraph0.subgraphDeploymentID) - await gns.connect(me.signer).lockSubgraphForMigrationToL2(subgraph0.id) + const tx = gns + .connect(me.signer) + .claimCuratorBalanceToBeneficiaryOnL2( + subgraph0.id, + other.address, + maxGas, + gasPriceBid, + maxSubmissionCost, + { + value: maxSubmissionCost.add(maxGas.mul(gasPriceBid)), + }, + ) + + // seqNum (third argument in the event) is 2, because number 1 was when the subgraph was sent to L2 + await expect(tx).revertedWith('!MIGRATED') + }) + it('rejects calls for a subgraph that was not locked', async function () { + const subgraph0 = await publishAndCurateOnSubgraph() + + const maxSubmissionCost = toBN('100') + const maxGas = toBN('10') + const gasPriceBid = toBN('20') + + const tx = gns + .connect(me.signer) + .claimCuratorBalanceToBeneficiaryOnL2( + subgraph0.id, + other.address, + maxGas, + gasPriceBid, + maxSubmissionCost, + { + value: maxSubmissionCost.add(maxGas.mul(gasPriceBid)), + }, + ) + + // seqNum (third argument in the event) is 2, because number 1 was when the subgraph was sent to L2 + await expect(tx).revertedWith('!MIGRATED') + }) + it('rejects calls for a subgraph that was locked but deprecated', async function () { + const subgraph0 = await publishCurateAndLockSubgraph() await advanceBlocks(256) + await gns.connect(me.signer).deprecateLockedSubgraph(subgraph0.id) - const tx = gns.connect(other.signer).deprecateLockedSubgraph(subgraph0.id) - await expect(tx).emit(gns, 'SubgraphDeprecated').withArgs(subgraph0.id, beforeTokens) - const tx2 = gns.connect(other.signer).deprecateLockedSubgraph(subgraph0.id) - await expect(tx2).revertedWith('ALREADY_DONE') + const maxSubmissionCost = toBN('100') + const maxGas = toBN('10') + const gasPriceBid = toBN('20') + + const tx = gns + .connect(me.signer) + .claimCuratorBalanceToBeneficiaryOnL2( + subgraph0.id, + other.address, + maxGas, + gasPriceBid, + maxSubmissionCost, + { + value: maxSubmissionCost.add(maxGas.mul(gasPriceBid)), + }, + ) + + // seqNum (third argument in the event) is 2, because number 1 was when the subgraph was sent to L2 + await expect(tx).revertedWith('SUBGRAPH_DEPRECATED') + }) + it('rejects calls with an incorrect eth value', async function () { + const subgraph0 = await publishCurateLockAndSendSubgraph() + + const maxSubmissionCost = toBN('100') + const maxGas = toBN('10') + const gasPriceBid = toBN('20') + + const tx = gns + .connect(me.signer) + .claimCuratorBalanceToBeneficiaryOnL2( + subgraph0.id, + other.address, + maxGas, + gasPriceBid, + maxSubmissionCost, + { + value: maxSubmissionCost.add(maxGas.mul(gasPriceBid)).sub(1), + }, + ) + + // seqNum (third argument in the event) is 2, because number 1 was when the subgraph was sent to L2 + await expect(tx).revertedWith('WRONG_ETH_VALUE') + }) + it('rejects calls with zero maxSubmissionCost', async function () { + const subgraph0 = await publishCurateLockAndSendSubgraph() + + const maxSubmissionCost = toBN('0') + const maxGas = toBN('10') + const gasPriceBid = toBN('20') + + const tx = gns + .connect(me.signer) + .claimCuratorBalanceToBeneficiaryOnL2( + subgraph0.id, + other.address, + maxGas, + gasPriceBid, + maxSubmissionCost, + { + value: maxSubmissionCost.add(maxGas.mul(gasPriceBid)), + }, + ) + + // seqNum (third argument in the event) is 2, because number 1 was when the subgraph was sent to L2 + await expect(tx).revertedWith('NO_SUBMISSION_COST') }) - }) - describe('claimCuratorBalanceToBeneficiaryOnL2', function () { - it('sends a transaction with a curator balance to the L2GNS using the Arbitrum inbox') - it('sends a transaction with a curator balance from a legacy subgraph to the L2GNS') - it('rejects calls for a subgraph that was locked but not sent to L2') - it('rejects calls for a subgraph that was not locked') - it('rejects calls for a subgraph that was locked but deprecated') - it('rejects calls with an incorrect eth value') - it('rejects calls with zero maxSubmissionCost') }) }) }) From ea375bc9551c4192c3b545cc3385d190f8fabb00 Mon Sep 17 00:00:00 2001 From: Pablo Carranza Velez Date: Tue, 20 Sep 2022 18:45:16 -0300 Subject: [PATCH 022/112] test: wip on initial tests for L2GNS --- contracts/discovery/IGNS.sol | 4 +- test/gns.test.ts | 337 ++++++----------------------------- test/l2/l2GNS.test.ts | 228 +++++++++++++++++++++--- test/lib/gnsUtils.ts | 195 ++++++++++++++++++++ 4 files changed, 459 insertions(+), 305 deletions(-) create mode 100644 test/lib/gnsUtils.ts diff --git a/contracts/discovery/IGNS.sol b/contracts/discovery/IGNS.sol index 470a6191e..987c1eb50 100644 --- a/contracts/discovery/IGNS.sol +++ b/contracts/discovery/IGNS.sol @@ -16,13 +16,13 @@ interface IGNS { } struct SubgraphL2MigrationData { - uint256 lockedAtBlock; // Block at which the subgraph was locked for migration + uint256 lockedAtBlock; // Block at which the subgraph was locked for migration. L1 only uint256 tokens; // GRT that will be sent to L2 to mint signal bool l1Done; // Migration finished on L1 side (or subgraph deprecated) bytes32 lockedAtBlockHash; // Blockhash from block at which the subgraph was locked for migration mapping(address => bool) curatorBalanceClaimed; // True for curators whose balance has been claimed in L2 bool l2Done; // Migration finished on L2 side - bool deprecated; // Subgraph was deprecated instead of sent to L2 + bool deprecated; // Subgraph was deprecated instead of sent. L1 only } struct LegacySubgraphKey { diff --git a/test/gns.test.ts b/test/gns.test.ts index b44bff619..beee4ebf3 100644 --- a/test/gns.test.ts +++ b/test/gns.test.ts @@ -1,6 +1,6 @@ import { expect } from 'chai' import { ethers, ContractTransaction, BigNumber, Event } from 'ethers' -import { Interface, solidityKeccak256 } from 'ethers/lib/utils' +import { Interface } from 'ethers/lib/utils' import { SubgraphDeploymentID } from '@graphprotocol/common-ts' import { LegacyGNSMock } from '../build/types/LegacyGNSMock' @@ -13,10 +13,8 @@ import { randomHexBytes, Account, toGRT, - getChainID, latestBlock, advanceBlocks, - advanceBlock, } from './lib/testHelpers' import { ArbitrumL1Mocks, NetworkFixture } from './lib/fixtures' import { toBN, formatGRT } from './lib/testHelpers' @@ -30,6 +28,19 @@ import { L1GNS } from '../build/types/L1GNS' import path from 'path' import { Artifacts } from 'hardhat/internal/artifacts' import { L1GraphTokenGateway } from '../build/types/L1GraphTokenGateway' +import { + AccountDefaultName, + buildLegacySubgraphID, + buildSubgraph, + buildSubgraphID, + createDefaultName, + PublishSubgraph, + Subgraph, + DEFAULT_RESERVE_RATIO, + getTokensAndVSignal, + publishNewSubgraph, + publishNewVersion, +} from './lib/gnsUtils' const { AddressZero, HashZero } = ethers.constants @@ -38,38 +49,9 @@ const artifacts = new Artifacts(ARTIFACTS_PATH) const l2GNSabi = artifacts.readArtifactSync('L2GNS').abi const l2GNSIface = new Interface(l2GNSabi) -// Entities -interface PublishSubgraph { - subgraphDeploymentID: string - versionMetadata: string - subgraphMetadata: string -} - -interface Subgraph { - vSignal: BigNumber - nSignal: BigNumber - subgraphDeploymentID: string - reserveRatio: number - disabled: boolean - withdrawableGRT: BigNumber - id?: string -} - -interface AccountDefaultName { - name: string - nameIdentifier: string -} - // Utils - -const DEFAULT_RESERVE_RATIO = 1000000 const toFloat = (n: BigNumber) => parseFloat(formatGRT(n)) const toRound = (n: number) => n.toFixed(12) -const buildSubgraphID = async (account: string, seqID: BigNumber): Promise => - solidityKeccak256(['address', 'uint256', 'uint256'], [account, seqID, await getChainID()]) - -const buildLegacySubgraphID = (account: string, seqID: BigNumber): string => - solidityKeccak256(['address', 'uint256'], [account, seqID]) describe('L1GNS', () => { let me: Account @@ -101,27 +83,6 @@ describe('L1GNS', () => { let newSubgraph1: PublishSubgraph let defaultName: AccountDefaultName - const buildSubgraph = (): PublishSubgraph => { - return { - subgraphDeploymentID: randomHexBytes(), - versionMetadata: randomHexBytes(), - subgraphMetadata: randomHexBytes(), - } - } - - const createDefaultName = (name: string): AccountDefaultName => { - return { - name: name, - nameIdentifier: ethers.utils.namehash(name), - } - } - - const getTokensAndVSignal = async (subgraphDeploymentID: string): Promise> => { - const curationPool = await curation.pools(subgraphDeploymentID) - const vSignal = await curation.getCurationPoolSignal(subgraphDeploymentID) - return [curationPool.tokens, vSignal] - } - async function calcGNSBondingCurve( gnsSupply: BigNumber, // nSignal gnsReserveBalance: BigNumber, // vSignal @@ -177,133 +138,10 @@ describe('L1GNS', () => { ) } - const publishNewSubgraph = async ( - account: Account, - newSubgraph: PublishSubgraph, // Defaults to subgraph created in before() - ): Promise => { - const subgraphID = await buildSubgraphID( - account.address, - await gns.nextAccountSeqID(account.address), - ) - - // Send tx - const tx = gns - .connect(account.signer) - .publishNewSubgraph( - newSubgraph.subgraphDeploymentID, - newSubgraph.versionMetadata, - newSubgraph.subgraphMetadata, - ) - - // Check events - await expect(tx) - .emit(gns, 'SubgraphPublished') - .withArgs(subgraphID, newSubgraph.subgraphDeploymentID, DEFAULT_RESERVE_RATIO) - .emit(gns, 'SubgraphMetadataUpdated') - .withArgs(subgraphID, newSubgraph.subgraphMetadata) - .emit(gns, 'SubgraphVersionUpdated') - .withArgs(subgraphID, newSubgraph.subgraphDeploymentID, newSubgraph.versionMetadata) - - // Check state - const subgraph = await gns.subgraphs(subgraphID) - expect(subgraph.vSignal).eq(0) - expect(subgraph.nSignal).eq(0) - expect(subgraph.subgraphDeploymentID).eq(newSubgraph.subgraphDeploymentID) - expect(subgraph.reserveRatio).eq(DEFAULT_RESERVE_RATIO) - expect(subgraph.disabled).eq(false) - expect(subgraph.withdrawableGRT).eq(0) - - // Check NFT issuance - const owner = await gns.ownerOf(subgraphID) - expect(owner).eq(account.address) - - return { ...subgraph, id: subgraphID } - } - - const publishNewVersion = async ( - account: Account, - subgraphID: string, - newSubgraph: PublishSubgraph, - ) => { - // Before state - const ownerTaxPercentage = await gns.ownerTaxPercentage() - const curationTaxPercentage = await curation.curationTaxPercentage() - const beforeSubgraph = await gns.subgraphs(subgraphID) - - // Check what selling all nSignal, which == selling all vSignal, should return for tokens - // NOTE - no tax on burning on nSignal - const tokensReceivedEstimate = beforeSubgraph.nSignal.gt(0) - ? (await gns.nSignalToTokens(subgraphID, beforeSubgraph.nSignal))[1] - : toBN(0) - // Example: - // Deposit 100, 5 is taxed, 95 GRT in curve - // Upgrade - calculate 5% tax on 95 --> 4.75 GRT - // Multiple by ownerPercentage --> 50% * 4.75 = 2.375 GRT - // Owner adds 2.375 to 90.25, we deposit 92.625 GRT into the curve - // Divide this by 0.95 to get exactly 97.5 total tokens to be deposited - - // nSignalToTokens returns the amount of tokens with tax removed - // already. So we must add in the tokens removed - const MAX_PPM = 1000000 - const taxOnOriginal = tokensReceivedEstimate.mul(curationTaxPercentage).div(MAX_PPM) - const totalWithoutOwnerTax = tokensReceivedEstimate.sub(taxOnOriginal) - const ownerTax = taxOnOriginal.mul(ownerTaxPercentage).div(MAX_PPM) - const totalWithOwnerTax = totalWithoutOwnerTax.add(ownerTax) - const totalAdjustedUp = totalWithOwnerTax.mul(MAX_PPM).div(MAX_PPM - curationTaxPercentage) - - // Re-estimate amount of signal to get considering the owner tax paid by the owner - - const { 0: newVSignalEstimate, 1: newCurationTaxEstimate } = beforeSubgraph.nSignal.gt(0) - ? await curation.tokensToSignal(newSubgraph.subgraphDeploymentID, totalAdjustedUp) - : [toBN(0), toBN(0)] - - // Send tx - const tx = gns - .connect(account.signer) - .publishNewVersion(subgraphID, newSubgraph.subgraphDeploymentID, newSubgraph.versionMetadata) - const txResult = expect(tx) - .emit(gns, 'SubgraphVersionUpdated') - .withArgs(subgraphID, newSubgraph.subgraphDeploymentID, newSubgraph.versionMetadata) - - // Only emits this event if there was actual signal to upgrade - if (beforeSubgraph.nSignal.gt(0)) { - txResult - .emit(gns, 'SubgraphUpgraded') - .withArgs(subgraphID, newVSignalEstimate, totalAdjustedUp, newSubgraph.subgraphDeploymentID) - } - await txResult - - // Check curation vSignal old are set to zero - const [afterTokensOldCuration, afterVSignalOldCuration] = await getTokensAndVSignal( - beforeSubgraph.subgraphDeploymentID, - ) - expect(afterTokensOldCuration).eq(0) - expect(afterVSignalOldCuration).eq(0) - - // Check the vSignal of the new curation curve, and tokens - const [afterTokensNewCurve, afterVSignalNewCurve] = await getTokensAndVSignal( - newSubgraph.subgraphDeploymentID, - ) - expect(afterTokensNewCurve).eq(totalAdjustedUp.sub(newCurationTaxEstimate)) - expect(afterVSignalNewCurve).eq(newVSignalEstimate) - - // Check the nSignal pool - const afterSubgraph = await gns.subgraphs(subgraphID) - expect(afterSubgraph.vSignal).eq(afterVSignalNewCurve).eq(newVSignalEstimate) - expect(afterSubgraph.nSignal).eq(beforeSubgraph.nSignal) // should not change - expect(afterSubgraph.subgraphDeploymentID).eq(newSubgraph.subgraphDeploymentID) - - // Check NFT should not change owner - const owner = await gns.ownerOf(subgraphID) - expect(owner).eq(account.address) - - return tx - } - const deprecateSubgraph = async (account: Account, subgraphID: string) => { // Before state const beforeSubgraph = await gns.subgraphs(subgraphID) - const [beforeTokens] = await getTokensAndVSignal(beforeSubgraph.subgraphDeploymentID) + const [beforeTokens] = await getTokensAndVSignal(beforeSubgraph.subgraphDeploymentID, curation) // We can use the whole amount, since in this test suite all vSignal is used to be staked on nSignal const ownerBalanceBefore = await grt.balanceOf(account.address) @@ -336,82 +174,6 @@ describe('L1GNS', () => { return tx } - /* - const upgradeNameSignal = async ( - account: Account, - graphAccount: string, - subgraphNumber0: number, - newSubgraphDeplyomentID: string, - ): Promise => { - // Before stats for the old vSignal curve - const beforeTokensVSigOldCuration = await getTokensAndVSignal(subgraph0.subgraphDeploymentID) - const beforeTokensOldCuration = beforeTokensVSigOldCuration[0] - const beforeVSignalOldCuration = beforeTokensVSigOldCuration[1] - - // Before stats for the name curve - const poolBefore = await gns.nameSignals(graphAccount, subgraphNumber0) - const nSigBefore = poolBefore[1] - - // Check what selling all nSignal, which == selling all vSignal, should return for tokens - const nSignalToTokensResult = await gns.nSignalToTokens( - graphAccount, - subgraphNumber0, - nSigBefore, - ) - const vSignalBurnEstimate = nSignalToTokensResult[0] - const tokensReceivedEstimate = nSignalToTokensResult[1] - - // since in upgrade, owner must refund fees, we need to actually add this back in - const feesToAddBackEstimate = nSignalToTokensResult[2] - const upgradeTokenReturn = tokensReceivedEstimate.add(feesToAddBackEstimate) - - // Get the value for new vSignal that should be created on the new curve - const newVSignalEstimate = await curation.tokensToSignal( - newSubgraphDeplyomentID, - upgradeTokenReturn, - ) - - // Do the upgrade - const tx = gns - .connect(account.signer) - .upgradeNameSignal(graphAccount, subgraphNumber0, newSubgraphDeplyomentID) - await expect(tx) - .emit(gns, 'NameSignalUpgrade') - .withArgs( - graphAccount, - subgraphNumber0, - newVSignalEstimate, - upgradeTokenReturn, - newSubgraphDeplyomentID, - ) - - // Check curation vSignal old was lowered and tokens too - const [afterTokensOldCuration, vSigAfterOldCuration] = await getTokensAndVSignal( - subgraph0.subgraphDeploymentID, - ) - expect(afterTokensOldCuration).eq(beforeTokensOldCuration.sub(upgradeTokenReturn)) - expect(vSigAfterOldCuration).eq(beforeVSignalOldCuration.sub(vSignalBurnEstimate)) - - // Check the vSignal of the new curation curve, amd tokens - const [afterTokensNewCurve, vSigAfterNewCurve] = await getTokensAndVSignal( - newSubgraphDeplyomentID, - ) - expect(afterTokensNewCurve).eq(upgradeTokenReturn) - expect(vSigAfterNewCurve).eq(newVSignalEstimate) - - // Check the nSignal pool - const pool = await gns.nameSignals(graphAccount, subgraphNumber0) - const vSigPool = pool[0] - const nSigAfter = pool[1] - const deploymentID = pool[2] - expect(vSigAfterNewCurve).eq(vSigPool).eq(newVSignalEstimate) - expect(nSigBefore).eq(nSigAfter) // should not change - expect(deploymentID).eq(newSubgraphDeplyomentID) - - return tx - } - */ - const mintSignal = async ( account: Account, subgraphID: string, @@ -421,6 +183,7 @@ describe('L1GNS', () => { const beforeSubgraph = await gns.subgraphs(subgraphID) const [beforeTokens, beforeVSignal] = await getTokensAndVSignal( beforeSubgraph.subgraphDeploymentID, + curation, ) // Deposit @@ -438,6 +201,7 @@ describe('L1GNS', () => { const afterSubgraph = await gns.subgraphs(subgraphID) const [afterTokens, afterVSignal] = await getTokensAndVSignal( afterSubgraph.subgraphDeploymentID, + curation, ) // Check state @@ -454,6 +218,7 @@ describe('L1GNS', () => { const beforeSubgraph = await gns.subgraphs(subgraphID) const [beforeTokens, beforeVSignal] = await getTokensAndVSignal( beforeSubgraph.subgraphDeploymentID, + curation, ) const beforeUsersNSignal = await gns.getCuratorSignal(subgraphID, account.address) @@ -473,6 +238,7 @@ describe('L1GNS', () => { const afterSubgraph = await gns.subgraphs(subgraphID) const [afterTokens, afterVSignalCuration] = await getTokensAndVSignal( afterSubgraph.subgraphDeploymentID, + curation, ) // Check state @@ -709,7 +475,7 @@ describe('L1GNS', () => { let subgraph: Subgraph beforeEach(async function () { - subgraph = await publishNewSubgraph(me, newSubgraph0) + subgraph = await publishNewSubgraph(me, newSubgraph0, gns) }) it('updateSubgraphMetadata emits the event', async function () { @@ -733,19 +499,19 @@ describe('L1GNS', () => { it('should return if the subgraph is published', async function () { const subgraphID = await buildSubgraphID(me.address, toBN(0)) expect(await gns.isPublished(subgraphID)).eq(false) - await publishNewSubgraph(me, newSubgraph0) + await publishNewSubgraph(me, newSubgraph0, gns) expect(await gns.isPublished(subgraphID)).eq(true) }) }) describe('publishNewSubgraph', async function () { it('should publish a new subgraph and first version with it', async function () { - await publishNewSubgraph(me, newSubgraph0) + await publishNewSubgraph(me, newSubgraph0, gns) }) it('should publish a new subgraph with an incremented value', async function () { - const subgraph1 = await publishNewSubgraph(me, newSubgraph0) - const subgraph2 = await publishNewSubgraph(me, newSubgraph1) + const subgraph1 = await publishNewSubgraph(me, newSubgraph0, gns) + const subgraph2 = await publishNewSubgraph(me, newSubgraph1, gns) expect(subgraph1.id).not.eq(subgraph2.id) }) @@ -761,17 +527,17 @@ describe('L1GNS', () => { let subgraph: Subgraph beforeEach(async () => { - subgraph = await publishNewSubgraph(me, newSubgraph0) + subgraph = await publishNewSubgraph(me, newSubgraph0, gns) await mintSignal(me, subgraph.id, tokens10000) }) it('should publish a new version on an existing subgraph', async function () { - await publishNewVersion(me, subgraph.id, newSubgraph1) + await publishNewVersion(me, subgraph.id, newSubgraph1, gns, curation) }) it('should publish a new version on an existing subgraph with no current signal', async function () { - const emptySignalSubgraph = await publishNewSubgraph(me, buildSubgraph()) - await publishNewVersion(me, emptySignalSubgraph.id, newSubgraph1) + const emptySignalSubgraph = await publishNewSubgraph(me, buildSubgraph(), gns) + await publishNewVersion(me, emptySignalSubgraph.id, newSubgraph1, gns, curation) }) it('should reject a new version with the same subgraph deployment ID', async function () { @@ -858,7 +624,7 @@ describe('L1GNS', () => { let subgraph: Subgraph beforeEach(async () => { - subgraph = await publishNewSubgraph(me, newSubgraph0) + subgraph = await publishNewSubgraph(me, newSubgraph0, gns) await mintSignal(me, subgraph.id, tokens10000) }) @@ -895,12 +661,12 @@ describe('L1GNS', () => { describe('Curating on names', async function () { describe('mintSignal()', async function () { it('should deposit into the name signal curve', async function () { - const subgraph = await publishNewSubgraph(me, newSubgraph0) + const subgraph = await publishNewSubgraph(me, newSubgraph0, gns) await mintSignal(other, subgraph.id, tokens10000) }) it('should fail when name signal is disabled', async function () { - const subgraph = await publishNewSubgraph(me, newSubgraph0) + const subgraph = await publishNewSubgraph(me, newSubgraph0, gns) await deprecateSubgraph(me, subgraph.id) const tx = gns.connect(me.signer).mintSignal(subgraph.id, tokens1000, 0) await expect(tx).revertedWith('GNS: Must be active') @@ -914,7 +680,7 @@ describe('L1GNS', () => { it('reject minting if under slippage', async function () { // First publish the subgraph - const subgraph = await publishNewSubgraph(me, newSubgraph0) + const subgraph = await publishNewSubgraph(me, newSubgraph0, gns) // Set slippage to be 1 less than expected result to force reverting const { 1: expectedNSignal } = await gns.tokensToNSignal(subgraph.id, tokens1000) @@ -929,7 +695,7 @@ describe('L1GNS', () => { let subgraph: Subgraph beforeEach(async () => { - subgraph = await publishNewSubgraph(me, newSubgraph0) + subgraph = await publishNewSubgraph(me, newSubgraph0, gns) await mintSignal(other, subgraph.id, tokens10000) }) @@ -974,7 +740,7 @@ describe('L1GNS', () => { let otherNSignal: BigNumber beforeEach(async () => { - subgraph = await publishNewSubgraph(me, newSubgraph0) + subgraph = await publishNewSubgraph(me, newSubgraph0, gns) await mintSignal(other, subgraph.id, tokens10000) otherNSignal = await gns.getCuratorSignal(subgraph.id, other.address) }) @@ -1013,7 +779,7 @@ describe('L1GNS', () => { let subgraph: Subgraph beforeEach(async () => { - subgraph = await publishNewSubgraph(me, newSubgraph0) + subgraph = await publishNewSubgraph(me, newSubgraph0, gns) await mintSignal(other, subgraph.id, tokens10000) }) @@ -1053,7 +819,7 @@ describe('L1GNS', () => { toGRT('2000'), toGRT('123'), ] - const subgraph = await publishNewSubgraph(me, newSubgraph0) + const subgraph = await publishNewSubgraph(me, newSubgraph0, gns) // State updated const curationTaxPercentage = await curation.curationTaxPercentage() @@ -1095,7 +861,7 @@ describe('L1GNS', () => { toGRT('1'), // should mint below minimum deposit ] - const subgraph = await publishNewSubgraph(me, newSubgraph0) + const subgraph = await publishNewSubgraph(me, newSubgraph0, gns) // State updated for (const tokensToDeposit of tokensToDepositMany) { @@ -1110,12 +876,12 @@ describe('L1GNS', () => { await curation.setMinimumCurationDeposit(toGRT('1')) // Publish a named subgraph-0 -> subgraphDeployment0 - const subgraph0 = await publishNewSubgraph(me, newSubgraph0) + const subgraph0 = await publishNewSubgraph(me, newSubgraph0, gns) // Curate on the first subgraph await gns.connect(me.signer).mintSignal(subgraph0.id, toGRT('90000'), 0) // Publish a named subgraph-1 -> subgraphDeployment0 - const subgraph1 = await publishNewSubgraph(me, newSubgraph0) + const subgraph1 = await publishNewSubgraph(me, newSubgraph0, gns) // Curate on the second subgraph should work await gns.connect(me.signer).mintSignal(subgraph1.id, toGRT('10'), 0) }) @@ -1193,7 +959,7 @@ describe('L1GNS', () => { describe('NFT descriptor', function () { it('with token descriptor', async function () { - const subgraph0 = await publishNewSubgraph(me, newSubgraph0) + const subgraph0 = await publishNewSubgraph(me, newSubgraph0, gns) const subgraphNFTAddress = await gns.subgraphNFT() const subgraphNFT = getContractAt('SubgraphNFT', subgraphNFTAddress) as SubgraphNFT @@ -1204,7 +970,7 @@ describe('L1GNS', () => { }) it('with token descriptor and baseURI', async function () { - const subgraph0 = await publishNewSubgraph(me, newSubgraph0) + const subgraph0 = await publishNewSubgraph(me, newSubgraph0, gns) const subgraphNFTAddress = await gns.subgraphNFT() const subgraphNFT = getContractAt('SubgraphNFT', subgraphNFTAddress) as SubgraphNFT @@ -1216,7 +982,7 @@ describe('L1GNS', () => { }) it('without token descriptor', async function () { - const subgraph0 = await publishNewSubgraph(me, newSubgraph0) + const subgraph0 = await publishNewSubgraph(me, newSubgraph0, gns) const subgraphNFTAddress = await gns.subgraphNFT() const subgraphNFT = getContractAt('SubgraphNFT', subgraphNFTAddress) as SubgraphNFT @@ -1228,7 +994,7 @@ describe('L1GNS', () => { }) it('without token descriptor and baseURI', async function () { - const subgraph0 = await publishNewSubgraph(me, newSubgraph0) + const subgraph0 = await publishNewSubgraph(me, newSubgraph0, gns) const subgraphNFTAddress = await gns.subgraphNFT() const subgraphNFT = getContractAt('SubgraphNFT', subgraphNFTAddress) as SubgraphNFT @@ -1243,7 +1009,7 @@ describe('L1GNS', () => { it('without token descriptor and 0x0 metadata', async function () { const newSubgraphNoMetadata = buildSubgraph() newSubgraphNoMetadata.subgraphMetadata = HashZero - const subgraph0 = await publishNewSubgraph(me, newSubgraphNoMetadata) + const subgraph0 = await publishNewSubgraph(me, newSubgraphNoMetadata, gns) const subgraphNFTAddress = await gns.subgraphNFT() const subgraphNFT = getContractAt('SubgraphNFT', subgraphNFTAddress) as SubgraphNFT @@ -1296,7 +1062,7 @@ describe('L1GNS', () => { describe('Subgraph migration to L2', function () { const publishAndCurateOnSubgraph = async function (): Promise { // Publish a named subgraph-0 -> subgraphDeployment0 - const subgraph0 = await publishNewSubgraph(me, newSubgraph0) + const subgraph0 = await publishNewSubgraph(me, newSubgraph0, gns) // Curate on the subgraph await gns.connect(me.signer).mintSignal(subgraph0.id, toGRT('90000'), 0) @@ -1595,7 +1361,10 @@ describe('L1GNS', () => { it('can be called by anyone, and makes the GRT from the subgraph withdrawable', async function () { const subgraph0 = await publishAndCurateOnSubgraph() - const [beforeTokens] = await getTokensAndVSignal(newSubgraph0.subgraphDeploymentID) + const [beforeTokens] = await getTokensAndVSignal( + newSubgraph0.subgraphDeploymentID, + curation, + ) await gns.connect(me.signer).lockSubgraphForMigrationToL2(subgraph0.id) await advanceBlocks(256) @@ -1613,7 +1382,8 @@ describe('L1GNS', () => { expect(afterSubgraph.reserveRatio).eq(0) // Should be equal since owner pays curation tax expect(afterSubgraph.withdrawableGRT).eq(beforeTokens) - expect((await gns.subgraphL2MigrationData(subgraph0.id)).deprecated).to.eq(true) + const migrationData = await gns.subgraphL2MigrationData(subgraph0.id) + expect(migrationData.deprecated).to.eq(true) }) it('rejects calls for a subgraph that was not locked', async function () { const subgraph0 = await publishAndCurateOnSubgraph() @@ -1641,7 +1411,10 @@ describe('L1GNS', () => { it('rejects calls for a subgraph that was already deprecated', async function () { const subgraph0 = await publishAndCurateOnSubgraph() - const [beforeTokens] = await getTokensAndVSignal(newSubgraph0.subgraphDeploymentID) + const [beforeTokens] = await getTokensAndVSignal( + newSubgraph0.subgraphDeploymentID, + curation, + ) await gns.connect(me.signer).lockSubgraphForMigrationToL2(subgraph0.id) await advanceBlocks(256) diff --git a/test/l2/l2GNS.test.ts b/test/l2/l2GNS.test.ts index 5e2330a07..25114fe19 100644 --- a/test/l2/l2GNS.test.ts +++ b/test/l2/l2GNS.test.ts @@ -1,31 +1,217 @@ import { expect } from 'chai' import { ethers, ContractTransaction, BigNumber, Event } from 'ethers' -import { solidityKeccak256 } from 'ethers/lib/utils' -import { SubgraphDeploymentID } from '@graphprotocol/common-ts' - -import { LegacyGNSMock } from '../../build/types/LegacyGNSMock' -import { GraphToken } from '../../build/types/GraphToken' -import { Curation } from '../../build/types/Curation' -import { SubgraphNFT } from '../../build/types/SubgraphNFT' - -import { getAccounts, randomHexBytes, Account, toGRT, getChainID } from '../lib/testHelpers' -import { NetworkFixture } from '../lib/fixtures' -import { toBN, formatGRT } from '../lib/testHelpers' -import { getContractAt } from '../../cli/network' -import { deployContract } from '../lib/deployment' -import { BancorFormula } from '../../build/types/BancorFormula' -import { network } from '../../cli' -import { Controller } from '../../build/types/Controller' -import { GraphProxyAdmin } from '../../build/types/GraphProxyAdmin' +import { defaultAbiCoder, parseUnits } from 'ethers/lib/utils' + +import { getAccounts, randomHexBytes, Account, toGRT, getL2SignerFromL1 } from '../lib/testHelpers' +import { L2FixtureContracts, NetworkFixture } from '../lib/fixtures' +import { toBN } from '../lib/testHelpers' + import { L2GNS } from '../../build/types/L2GNS' +import { L2GraphToken } from '../../build/types/L2GraphToken' +import { L2GraphTokenGateway } from '../../build/types/L2GraphTokenGateway' +import { + buildSubgraph, + buildSubgraphID, + DEFAULT_RESERVE_RATIO, + publishNewSubgraph, + PublishSubgraph, +} from '../lib/gnsUtils' -const { AddressZero, HashZero } = ethers.constants +const { HashZero } = ethers.constants describe('L2GNS', () => { + let me: Account + let other: Account + let governor: Account + let tokenSender: Account + let l1Receiver: Account + let l2Receiver: Account + let mockRouter: Account + let mockL1GRT: Account + let mockL1Gateway: Account + let mockL1GNS: Account + let pauseGuardian: Account + let fixture: NetworkFixture + + let fixtureContracts: L2FixtureContracts + let l2GraphTokenGateway: L2GraphTokenGateway + let gns: L2GNS + + let newSubgraph0: PublishSubgraph + + const gatewayFinalizeTransfer = async function ( + from: string, + to: string, + amount: BigNumber, + callhookData: string, + ): Promise { + const mockL1GatewayL2Alias = await getL2SignerFromL1(mockL1Gateway.address) + // Eth for gas: + await me.signer.sendTransaction({ + to: await mockL1GatewayL2Alias.getAddress(), + value: parseUnits('1', 'ether'), + }) + const data = defaultAbiCoder.encode(['bytes', 'bytes'], ['0x', callhookData]) + const tx = l2GraphTokenGateway + .connect(mockL1GatewayL2Alias) + .finalizeInboundTransfer(mockL1GRT.address, from, to, amount, data) + return tx + } + + before(async function () { + newSubgraph0 = buildSubgraph() + ;[ + me, + other, + governor, + tokenSender, + l1Receiver, + mockRouter, + mockL1GRT, + mockL1Gateway, + l2Receiver, + pauseGuardian, + mockL1GNS, + ] = await getAccounts() + + fixture = new NetworkFixture() + fixtureContracts = await fixture.loadL2(governor.signer) + ;({ l2GraphTokenGateway, gns } = fixtureContracts) + + await fixture.configureL2Bridge( + governor.signer, + fixtureContracts, + mockRouter.address, + mockL1GRT.address, + mockL1Gateway.address, + ) + }) + describe('receiving a subgraph from L1', function () { - it('cannot be called by someone other than the L2GraphTokenGateway') - it('creates a subgraph in a disabled state') - it('does not conflict with a locally created subgraph') + it('cannot be called by someone other than the L2GraphTokenGateway', async function () { + const l1SubgraphId = await buildSubgraphID(me.address, toBN('1'), 1) + const curatedTokens = toGRT('1337') + const lockBlockhash = randomHexBytes(32) + const metadata = randomHexBytes() + const nSignal = toBN('4567') + const tx = gns + .connect(me.signer) + .receiveSubgraphFromL1( + l1SubgraphId, + me.address, + curatedTokens, + lockBlockhash, + nSignal, + DEFAULT_RESERVE_RATIO, + metadata, + ) + await expect(tx).revertedWith('ONLY_GATEWAY') + }) + it('creates a subgraph in a disabled state', async function () { + const l1SubgraphId = await buildSubgraphID(me.address, toBN('1'), 1) + const curatedTokens = toGRT('1337') + const lockBlockhash = randomHexBytes(32) + const metadata = randomHexBytes() + const nSignal = toBN('4567') + const callhookData = gns.interface.encodeFunctionData('receiveSubgraphFromL1', [ + l1SubgraphId, + me.address, + curatedTokens, + lockBlockhash, + nSignal, + DEFAULT_RESERVE_RATIO, + metadata, + ]) + const tx = gatewayFinalizeTransfer( + mockL1GNS.address, + gns.address, + curatedTokens, + callhookData, + ) + + await expect(tx) + .emit(l2GraphTokenGateway, 'DepositFinalized') + .withArgs(mockL1GRT.address, mockL1GNS.address, gns.address, curatedTokens) + await expect(tx).emit(gns, 'SubgraphReceivedFromL1').withArgs(l1SubgraphId) + await expect(tx).emit(gns, 'SubgraphMetadataUpdated').withArgs(l1SubgraphId, metadata) + + const migrationData = await gns.subgraphL2MigrationData(l1SubgraphId) + const subgraphData = await gns.subgraphs(l1SubgraphId) + + expect(migrationData.lockedAtBlock).eq(0) // We don't use this in L2 + expect(migrationData.tokens).eq(curatedTokens) + expect(migrationData.lockedAtBlockHash).eq(lockBlockhash) + expect(migrationData.l1Done).eq(true) // We don't use this in L2 + expect(migrationData.l2Done).eq(false) + expect(migrationData.deprecated).eq(false) // We don't use this in L2 + + expect(subgraphData.vSignal).eq(0) + expect(subgraphData.nSignal).eq(nSignal) + expect(subgraphData.subgraphDeploymentID).eq(HashZero) + expect(subgraphData.reserveRatio).eq(DEFAULT_RESERVE_RATIO) + expect(subgraphData.disabled).eq(true) + expect(subgraphData.withdrawableGRT).eq(0) // Important so that it's not the same as a deprecated subgraph! + + expect(await gns.ownerOf(l1SubgraphId)).eq(me.address) + }) + it('does not conflict with a locally created subgraph', async function () { + const l2Subgraph = await publishNewSubgraph(me, newSubgraph0, gns) + + const l1SubgraphId = await buildSubgraphID(me.address, toBN('0'), 1) + const curatedTokens = toGRT('1337') + const lockBlockhash = randomHexBytes(32) + const metadata = randomHexBytes() + const nSignal = toBN('4567') + const callhookData = gns.interface.encodeFunctionData('receiveSubgraphFromL1', [ + l1SubgraphId, + me.address, + curatedTokens, + lockBlockhash, + nSignal, + DEFAULT_RESERVE_RATIO, + metadata, + ]) + const tx = gatewayFinalizeTransfer( + mockL1GNS.address, + gns.address, + curatedTokens, + callhookData, + ) + + await expect(tx) + .emit(l2GraphTokenGateway, 'DepositFinalized') + .withArgs(mockL1GRT.address, mockL1GNS.address, gns.address, curatedTokens) + await expect(tx).emit(gns, 'SubgraphReceivedFromL1').withArgs(l1SubgraphId) + await expect(tx).emit(gns, 'SubgraphMetadataUpdated').withArgs(l1SubgraphId, metadata) + + const migrationData = await gns.subgraphL2MigrationData(l1SubgraphId) + const subgraphData = await gns.subgraphs(l1SubgraphId) + + expect(migrationData.lockedAtBlock).eq(0) // We don't use this in L2 + expect(migrationData.tokens).eq(curatedTokens) + expect(migrationData.lockedAtBlockHash).eq(lockBlockhash) + expect(migrationData.l1Done).eq(true) // We don't use this in L2 + expect(migrationData.l2Done).eq(false) + expect(migrationData.deprecated).eq(false) // We don't use this in L2 + + expect(subgraphData.vSignal).eq(0) + expect(subgraphData.nSignal).eq(nSignal) + expect(subgraphData.subgraphDeploymentID).eq(HashZero) + expect(subgraphData.reserveRatio).eq(DEFAULT_RESERVE_RATIO) + expect(subgraphData.disabled).eq(true) + expect(subgraphData.withdrawableGRT).eq(0) // Important so that it's not the same as a deprecated subgraph! + + expect(await gns.ownerOf(l1SubgraphId)).eq(me.address) + + expect(l2Subgraph.id).not.eq(l1SubgraphId) + const l2SubgraphData = await gns.subgraphs(l2Subgraph.id) + expect(l2SubgraphData.vSignal).eq(0) + expect(l2SubgraphData.nSignal).eq(0) + expect(l2SubgraphData.subgraphDeploymentID).eq(l2Subgraph.subgraphDeploymentID) + expect(l2SubgraphData.reserveRatio).eq(DEFAULT_RESERVE_RATIO) + expect(l2SubgraphData.disabled).eq(false) + expect(l2SubgraphData.withdrawableGRT).eq(0) + }) }) describe('finishing a subgraph migration from L1', function () { diff --git a/test/lib/gnsUtils.ts b/test/lib/gnsUtils.ts new file mode 100644 index 000000000..32395957a --- /dev/null +++ b/test/lib/gnsUtils.ts @@ -0,0 +1,195 @@ +import { BigNumber } from 'ethers' +import { namehash, solidityKeccak256 } from 'ethers/lib/utils' +import { Curation } from '../../build/types/Curation' +import { L1GNS } from '../../build/types/L1GNS' +import { L2GNS } from '../../build/types/L2GNS' +import { Account, getChainID, randomHexBytes, toBN } from './testHelpers' +import { expect } from 'chai' + +// Entities +export interface PublishSubgraph { + subgraphDeploymentID: string + versionMetadata: string + subgraphMetadata: string +} + +export interface Subgraph { + vSignal: BigNumber + nSignal: BigNumber + subgraphDeploymentID: string + reserveRatio: number + disabled: boolean + withdrawableGRT: BigNumber + id?: string +} + +export interface AccountDefaultName { + name: string + nameIdentifier: string +} + +export const DEFAULT_RESERVE_RATIO = 1000000 + +export const buildSubgraphID = async ( + account: string, + seqID: BigNumber, + chainID?: number, +): Promise => { + chainID = chainID ?? (await getChainID()) + return solidityKeccak256(['address', 'uint256', 'uint256'], [account, seqID, chainID]) +} + +export const buildLegacySubgraphID = (account: string, seqID: BigNumber): string => + solidityKeccak256(['address', 'uint256'], [account, seqID]) + +export const buildSubgraph = (): PublishSubgraph => { + return { + subgraphDeploymentID: randomHexBytes(), + versionMetadata: randomHexBytes(), + subgraphMetadata: randomHexBytes(), + } +} + +export const createDefaultName = (name: string): AccountDefaultName => { + return { + name: name, + nameIdentifier: namehash(name), + } +} + +export const getTokensAndVSignal = async ( + subgraphDeploymentID: string, + curation: Curation, +): Promise> => { + const curationPool = await curation.pools(subgraphDeploymentID) + const vSignal = await curation.getCurationPoolSignal(subgraphDeploymentID) + return [curationPool.tokens, vSignal] +} + +export const publishNewSubgraph = async ( + account: Account, + newSubgraph: PublishSubgraph, + gns: L1GNS | L2GNS, +): Promise => { + const subgraphID = await buildSubgraphID( + account.address, + await gns.nextAccountSeqID(account.address), + ) + + // Send tx + const tx = gns + .connect(account.signer) + .publishNewSubgraph( + newSubgraph.subgraphDeploymentID, + newSubgraph.versionMetadata, + newSubgraph.subgraphMetadata, + ) + + // Check events + await expect(tx) + .emit(gns, 'SubgraphPublished') + .withArgs(subgraphID, newSubgraph.subgraphDeploymentID, DEFAULT_RESERVE_RATIO) + .emit(gns, 'SubgraphMetadataUpdated') + .withArgs(subgraphID, newSubgraph.subgraphMetadata) + .emit(gns, 'SubgraphVersionUpdated') + .withArgs(subgraphID, newSubgraph.subgraphDeploymentID, newSubgraph.versionMetadata) + + // Check state + const subgraph = await gns.subgraphs(subgraphID) + expect(subgraph.vSignal).eq(0) + expect(subgraph.nSignal).eq(0) + expect(subgraph.subgraphDeploymentID).eq(newSubgraph.subgraphDeploymentID) + expect(subgraph.reserveRatio).eq(DEFAULT_RESERVE_RATIO) + expect(subgraph.disabled).eq(false) + expect(subgraph.withdrawableGRT).eq(0) + + // Check NFT issuance + const owner = await gns.ownerOf(subgraphID) + expect(owner).eq(account.address) + + return { ...subgraph, id: subgraphID } +} + +export const publishNewVersion = async ( + account: Account, + subgraphID: string, + newSubgraph: PublishSubgraph, + gns: L1GNS | L2GNS, + curation: Curation, +) => { + // Before state + const ownerTaxPercentage = await gns.ownerTaxPercentage() + const curationTaxPercentage = await curation.curationTaxPercentage() + const beforeSubgraph = await gns.subgraphs(subgraphID) + + // Check what selling all nSignal, which == selling all vSignal, should return for tokens + // NOTE - no tax on burning on nSignal + const tokensReceivedEstimate = beforeSubgraph.nSignal.gt(0) + ? (await gns.nSignalToTokens(subgraphID, beforeSubgraph.nSignal))[1] + : toBN(0) + // Example: + // Deposit 100, 5 is taxed, 95 GRT in curve + // Upgrade - calculate 5% tax on 95 --> 4.75 GRT + // Multiple by ownerPercentage --> 50% * 4.75 = 2.375 GRT + // Owner adds 2.375 to 90.25, we deposit 92.625 GRT into the curve + // Divide this by 0.95 to get exactly 97.5 total tokens to be deposited + + // nSignalToTokens returns the amount of tokens with tax removed + // already. So we must add in the tokens removed + const MAX_PPM = 1000000 + const taxOnOriginal = tokensReceivedEstimate.mul(curationTaxPercentage).div(MAX_PPM) + const totalWithoutOwnerTax = tokensReceivedEstimate.sub(taxOnOriginal) + const ownerTax = taxOnOriginal.mul(ownerTaxPercentage).div(MAX_PPM) + const totalWithOwnerTax = totalWithoutOwnerTax.add(ownerTax) + const totalAdjustedUp = totalWithOwnerTax.mul(MAX_PPM).div(MAX_PPM - curationTaxPercentage) + + // Re-estimate amount of signal to get considering the owner tax paid by the owner + + const { 0: newVSignalEstimate, 1: newCurationTaxEstimate } = beforeSubgraph.nSignal.gt(0) + ? await curation.tokensToSignal(newSubgraph.subgraphDeploymentID, totalAdjustedUp) + : [toBN(0), toBN(0)] + + // Send tx + const tx = gns + .connect(account.signer) + .publishNewVersion(subgraphID, newSubgraph.subgraphDeploymentID, newSubgraph.versionMetadata) + const txResult = expect(tx) + .emit(gns, 'SubgraphVersionUpdated') + .withArgs(subgraphID, newSubgraph.subgraphDeploymentID, newSubgraph.versionMetadata) + + // Only emits this event if there was actual signal to upgrade + if (beforeSubgraph.nSignal.gt(0)) { + txResult + .emit(gns, 'SubgraphUpgraded') + .withArgs(subgraphID, newVSignalEstimate, totalAdjustedUp, newSubgraph.subgraphDeploymentID) + } + await txResult + + // Check curation vSignal old are set to zero + const [afterTokensOldCuration, afterVSignalOldCuration] = await getTokensAndVSignal( + beforeSubgraph.subgraphDeploymentID, + curation, + ) + expect(afterTokensOldCuration).eq(0) + expect(afterVSignalOldCuration).eq(0) + + // Check the vSignal of the new curation curve, and tokens + const [afterTokensNewCurve, afterVSignalNewCurve] = await getTokensAndVSignal( + newSubgraph.subgraphDeploymentID, + curation, + ) + expect(afterTokensNewCurve).eq(totalAdjustedUp.sub(newCurationTaxEstimate)) + expect(afterVSignalNewCurve).eq(newVSignalEstimate) + + // Check the nSignal pool + const afterSubgraph = await gns.subgraphs(subgraphID) + expect(afterSubgraph.vSignal).eq(afterVSignalNewCurve).eq(newVSignalEstimate) + expect(afterSubgraph.nSignal).eq(beforeSubgraph.nSignal) // should not change + expect(afterSubgraph.subgraphDeploymentID).eq(newSubgraph.subgraphDeploymentID) + + // Check NFT should not change owner + const owner = await gns.ownerOf(subgraphID) + expect(owner).eq(account.address) + + return tx +} From 29445ce84b4a846e9d30bd9ac46cf6db64190fd6 Mon Sep 17 00:00:00 2001 From: Pablo Carranza Velez Date: Mon, 10 Oct 2022 11:52:56 -0500 Subject: [PATCH 023/112] fix: allow larger msg.value when claiming curation --- contracts/discovery/L1GNS.sol | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contracts/discovery/L1GNS.sol b/contracts/discovery/L1GNS.sol index ad58b14a1..63e3a1709 100644 --- a/contracts/discovery/L1GNS.sol +++ b/contracts/discovery/L1GNS.sol @@ -172,7 +172,7 @@ contract L1GNS is GNS, L1ArbitrumMessenger { // if a user does not desire immediate redemption they should provide // a msg.value of AT LEAST maxSubmissionCost uint256 expectedEth = _maxSubmissionCost + (_maxGas * _gasPriceBid); - require(msg.value == expectedEth, "WRONG_ETH_VALUE"); + require(msg.value >= expectedEth, "WRONG_ETH_VALUE"); } L2GasParams memory gasParams = L2GasParams(_maxSubmissionCost, _maxGas, _gasPriceBid); From 5fa1e1cd32192b9616f2cb6ac70b69b254e09cd4 Mon Sep 17 00:00:00 2001 From: Pablo Carranza Velez Date: Tue, 11 Oct 2022 12:46:39 -0500 Subject: [PATCH 024/112] fix: use the new onTokenTransfer interface --- contracts/discovery/L1GNS.sol | 4 +- contracts/l2/discovery/IL2GNS.sol | 12 +---- contracts/l2/discovery/L2GNS.sol | 87 +++++++++++++++++++++---------- test/gns.test.ts | 42 ++++++++------- test/l2/l2GNS.test.ts | 57 ++++++++++---------- test/lib/fixtures.ts | 2 + 6 files changed, 117 insertions(+), 87 deletions(-) diff --git a/contracts/discovery/L1GNS.sol b/contracts/discovery/L1GNS.sol index 63e3a1709..9321277fb 100644 --- a/contracts/discovery/L1GNS.sol +++ b/contracts/discovery/L1GNS.sol @@ -119,11 +119,9 @@ contract L1GNS is GNS, L1ArbitrumMessenger { SubgraphData storage subgraphData ) internal view returns (bytes memory) { return - abi.encodeWithSelector( - IL2GNS.receiveSubgraphFromL1.selector, + abi.encode( _subgraphID, ownerOf(_subgraphID), - migrationData.tokens, blockhash(migrationData.lockedAtBlock), subgraphData.nSignal, subgraphData.reserveRatio, diff --git a/contracts/l2/discovery/IL2GNS.sol b/contracts/l2/discovery/IL2GNS.sol index 6fa338567..9fe75dcf5 100644 --- a/contracts/l2/discovery/IL2GNS.sol +++ b/contracts/l2/discovery/IL2GNS.sol @@ -2,17 +2,9 @@ pragma solidity ^0.7.6; -interface IL2GNS { - function receiveSubgraphFromL1( - uint256 _subgraphID, - address _subgraphOwner, - uint256 _tokens, - bytes32 _lockedAtBlockHash, - uint256 _nSignal, - uint32 _reserveRatio, - bytes32 _subgraphMetadata - ) external; +import { ICallhookReceiver } from "../../gateway/ICallhookReceiver.sol"; +interface IL2GNS is ICallhookReceiver { function finishSubgraphMigrationFromL1( uint256 _subgraphID, bytes32 _subgraphDeploymentID, diff --git a/contracts/l2/discovery/L2GNS.sol b/contracts/l2/discovery/L2GNS.sol index ed6ed74cb..a75a53f4a 100644 --- a/contracts/l2/discovery/L2GNS.sol +++ b/contracts/l2/discovery/L2GNS.sol @@ -63,34 +63,37 @@ contract L2GNS is GNS, IL2GNS { _; } - function receiveSubgraphFromL1( - uint256 _subgraphID, - address _subgraphOwner, - uint256 _tokens, - bytes32 _lockedAtBlockHash, - uint256 _nSignal, - uint32 _reserveRatio, - bytes32 _subgraphMetadata + /** + * @dev Receive tokens with a callhook from the bridge. + * The callhook will receive a subgraph from L1 + * @param _from Token sender in L1 (must be the L1GNS) + * @param _amount Amount of tokens that were transferred + * @param _data ABI-encoded callhook data + */ + function onTokenTransfer( + address _from, + uint256 _amount, + bytes calldata _data ) external override notPartialPaused onlyL2Gateway { - IGNS.SubgraphL2MigrationData storage migratedData = subgraphL2MigrationData[_subgraphID]; - SubgraphData storage subgraphData = _getSubgraphData(_subgraphID); - - subgraphData.reserveRatio = _reserveRatio; - // The subgraph will be disabled until finishSubgraphMigrationFromL1 is called - subgraphData.disabled = true; - subgraphData.nSignal = _nSignal; - - migratedData.tokens = _tokens; - migratedData.lockedAtBlockHash = _lockedAtBlockHash; - migratedData.l1Done = true; - - // Mint the NFT. Use the subgraphID as tokenID. - // This function will check the if tokenID already exists. - _mintNFT(_subgraphOwner, _subgraphID); - - // Set the token metadata - _setSubgraphMetadata(_subgraphID, _subgraphMetadata); - emit SubgraphReceivedFromL1(_subgraphID); + require(_from == counterpartGNSAddress, "ONLY_L1_GNS_THROUGH_BRIDGE"); + ( + uint256 subgraphID, + address subgraphOwner, + bytes32 lockedAtBlockHash, + uint256 nSignal, + uint32 reserveRatio, + bytes32 subgraphMetadata + ) = abi.decode(_data, (uint256, address, bytes32, uint256, uint32, bytes32)); + + _receiveSubgraphFromL1( + subgraphID, + subgraphOwner, + _amount, + lockedAtBlockHash, + nSignal, + reserveRatio, + subgraphMetadata + ); } function finishSubgraphMigrationFromL1( @@ -265,6 +268,36 @@ contract L2GNS is GNS, IL2GNS { migratedData.curatorBalanceClaimed[_curator] = true; } + function _receiveSubgraphFromL1( + uint256 _subgraphID, + address _subgraphOwner, + uint256 _tokens, + bytes32 _lockedAtBlockHash, + uint256 _nSignal, + uint32 _reserveRatio, + bytes32 _subgraphMetadata + ) internal { + IGNS.SubgraphL2MigrationData storage migratedData = subgraphL2MigrationData[_subgraphID]; + SubgraphData storage subgraphData = _getSubgraphData(_subgraphID); + + subgraphData.reserveRatio = _reserveRatio; + // The subgraph will be disabled until finishSubgraphMigrationFromL1 is called + subgraphData.disabled = true; + subgraphData.nSignal = _nSignal; + + migratedData.tokens = _tokens; + migratedData.lockedAtBlockHash = _lockedAtBlockHash; + migratedData.l1Done = true; + + // Mint the NFT. Use the subgraphID as tokenID. + // This function will check the if tokenID already exists. + _mintNFT(_subgraphOwner, _subgraphID); + + // Set the token metadata + _setSubgraphMetadata(_subgraphID, _subgraphMetadata); + emit SubgraphReceivedFromL1(_subgraphID); + } + function _getCuratorSlot(address _curator, uint256 _subgraphID) internal pure diff --git a/test/gns.test.ts b/test/gns.test.ts index beee4ebf3..11ba7ca4f 100644 --- a/test/gns.test.ts +++ b/test/gns.test.ts @@ -1,6 +1,6 @@ import { expect } from 'chai' import { ethers, ContractTransaction, BigNumber, Event } from 'ethers' -import { Interface } from 'ethers/lib/utils' +import { defaultAbiCoder, Interface } from 'ethers/lib/utils' import { SubgraphDeploymentID } from '@graphprotocol/common-ts' import { LegacyGNSMock } from '../build/types/LegacyGNSMock' @@ -1222,15 +1222,17 @@ describe('L1GNS', () => { expect(migrationData.lockedAtBlock).eq((await latestBlock()).sub(1)) expect(migrationData.l1Done).eq(true) - const expectedCallhookData = l2GNSIface.encodeFunctionData('receiveSubgraphFromL1', [ - subgraph0.id, - me.address, - curatedTokens, - lockBlockhash, - subgraphBefore.nSignal, - subgraphBefore.reserveRatio, - newSubgraph0.subgraphMetadata, - ]) + const expectedCallhookData = defaultAbiCoder.encode( + ['uint256', 'address', 'bytes32', 'uint256', 'uint32', 'bytes32'], + [ + subgraph0.id, + me.address, + lockBlockhash, + subgraphBefore.nSignal, + subgraphBefore.reserveRatio, + newSubgraph0.subgraphMetadata, + ], + ) const expectedL2Data = await l1GraphTokenGateway.getOutboundCalldata( grt.address, @@ -1274,15 +1276,17 @@ describe('L1GNS', () => { expect(migrationData.lockedAtBlock).eq((await latestBlock()).sub(1)) expect(migrationData.l1Done).eq(true) - const expectedCallhookData = l2GNSIface.encodeFunctionData('receiveSubgraphFromL1', [ - subgraphID, - me.address, - curatedTokens, - lockBlockhash, - subgraphBefore.nSignal, - subgraphBefore.reserveRatio, - newSubgraph0.subgraphMetadata, - ]) + const expectedCallhookData = defaultAbiCoder.encode( + ['uint256', 'address', 'bytes32', 'uint256', 'uint32', 'bytes32'], + [ + subgraphID, + me.address, + lockBlockhash, + subgraphBefore.nSignal, + subgraphBefore.reserveRatio, + newSubgraph0.subgraphMetadata, + ], + ) const expectedL2Data = await l1GraphTokenGateway.getOutboundCalldata( grt.address, diff --git a/test/l2/l2GNS.test.ts b/test/l2/l2GNS.test.ts index 25114fe19..5e6b17de1 100644 --- a/test/l2/l2GNS.test.ts +++ b/test/l2/l2GNS.test.ts @@ -84,44 +84,50 @@ describe('L2GNS', () => { mockRouter.address, mockL1GRT.address, mockL1Gateway.address, + mockL1GNS.address, ) }) - describe('receiving a subgraph from L1', function () { + describe('receiving a subgraph from L1 (onTokenTransfer)', function () { it('cannot be called by someone other than the L2GraphTokenGateway', async function () { const l1SubgraphId = await buildSubgraphID(me.address, toBN('1'), 1) const curatedTokens = toGRT('1337') const lockBlockhash = randomHexBytes(32) const metadata = randomHexBytes() const nSignal = toBN('4567') + const callhookData = defaultAbiCoder.encode( + ['uint256', 'address', 'bytes32', 'uint256', 'uint32', 'bytes32'], + [l1SubgraphId, me.address, lockBlockhash, nSignal, DEFAULT_RESERVE_RATIO, metadata], + ) const tx = gns .connect(me.signer) - .receiveSubgraphFromL1( - l1SubgraphId, - me.address, - curatedTokens, - lockBlockhash, - nSignal, - DEFAULT_RESERVE_RATIO, - metadata, - ) + .onTokenTransfer(mockL1GNS.address, curatedTokens, callhookData) await expect(tx).revertedWith('ONLY_GATEWAY') }) + it('rejects calls if the L1 sender is not the L1GNS', async function () { + const l1SubgraphId = await buildSubgraphID(me.address, toBN('1'), 1) + const curatedTokens = toGRT('1337') + const lockBlockhash = randomHexBytes(32) + const metadata = randomHexBytes() + const nSignal = toBN('4567') + const callhookData = defaultAbiCoder.encode( + ['uint256', 'address', 'bytes32', 'uint256', 'uint32', 'bytes32'], + [l1SubgraphId, me.address, lockBlockhash, nSignal, DEFAULT_RESERVE_RATIO, metadata], + ) + const tx = gatewayFinalizeTransfer(me.address, gns.address, curatedTokens, callhookData) + + await expect(tx).revertedWith('ONLY_L1_GNS_THROUGH_BRIDGE') + }) it('creates a subgraph in a disabled state', async function () { const l1SubgraphId = await buildSubgraphID(me.address, toBN('1'), 1) const curatedTokens = toGRT('1337') const lockBlockhash = randomHexBytes(32) const metadata = randomHexBytes() const nSignal = toBN('4567') - const callhookData = gns.interface.encodeFunctionData('receiveSubgraphFromL1', [ - l1SubgraphId, - me.address, - curatedTokens, - lockBlockhash, - nSignal, - DEFAULT_RESERVE_RATIO, - metadata, - ]) + const callhookData = defaultAbiCoder.encode( + ['uint256', 'address', 'bytes32', 'uint256', 'uint32', 'bytes32'], + [l1SubgraphId, me.address, lockBlockhash, nSignal, DEFAULT_RESERVE_RATIO, metadata], + ) const tx = gatewayFinalizeTransfer( mockL1GNS.address, gns.address, @@ -162,15 +168,10 @@ describe('L2GNS', () => { const lockBlockhash = randomHexBytes(32) const metadata = randomHexBytes() const nSignal = toBN('4567') - const callhookData = gns.interface.encodeFunctionData('receiveSubgraphFromL1', [ - l1SubgraphId, - me.address, - curatedTokens, - lockBlockhash, - nSignal, - DEFAULT_RESERVE_RATIO, - metadata, - ]) + const callhookData = defaultAbiCoder.encode( + ['uint256', 'address', 'bytes32', 'uint256', 'uint32', 'bytes32'], + [l1SubgraphId, me.address, lockBlockhash, nSignal, DEFAULT_RESERVE_RATIO, metadata], + ) const tx = gatewayFinalizeTransfer( mockL1GNS.address, gns.address, diff --git a/test/lib/fixtures.ts b/test/lib/fixtures.ts index 874297f45..0ff9d03ef 100644 --- a/test/lib/fixtures.ts +++ b/test/lib/fixtures.ts @@ -277,6 +277,7 @@ export class NetworkFixture { mockRouterAddress: string, mockL1GRTAddress: string, mockL1GatewayAddress: string, + mockL1GNSAddress: string, ): Promise { // Configure the L2 GRT // Configure the gateway @@ -292,6 +293,7 @@ export class NetworkFixture { await l2FixtureContracts.l2GraphTokenGateway .connect(deployer) .setL1CounterpartAddress(mockL1GatewayAddress) + await l2FixtureContracts.gns.connect(deployer).setCounterpartGNSAddress(mockL1GNSAddress) await l2FixtureContracts.l2GraphTokenGateway.connect(deployer).setPaused(false) } From 79185cea73c1e8a30d5b5bb613c44592492116aa Mon Sep 17 00:00:00 2001 From: Pablo Carranza Velez Date: Fri, 21 Oct 2022 16:50:21 -0300 Subject: [PATCH 025/112] test: more WIP on tests for L2GNS --- contracts/curation/Curation.sol | 2 +- contracts/governance/Managed.sol | 11 +++++++- test/l2/l2GNS.test.ts | 44 ++++++++++++++++++++++++++++++-- test/lib/fixtures.ts | 1 + 4 files changed, 54 insertions(+), 4 deletions(-) diff --git a/contracts/curation/Curation.sol b/contracts/curation/Curation.sol index 7efef53e4..d3b6c1f1c 100644 --- a/contracts/curation/Curation.sol +++ b/contracts/curation/Curation.sol @@ -71,7 +71,7 @@ contract Curation is CurationV1Storage, GraphUpgradeable { event Collected(bytes32 indexed subgraphDeploymentID, uint256 tokens); modifier onlyGNS() { - require(msg.sender == _resolveContract(keccak256("GNS")), "Only the GNS can call this"); + require(msg.sender == address(gns()), "Only the GNS can call this"); _; } diff --git a/contracts/governance/Managed.sol b/contracts/governance/Managed.sol index 568acd0e8..fde7d7954 100644 --- a/contracts/governance/Managed.sol +++ b/contracts/governance/Managed.sol @@ -10,6 +10,7 @@ import { IRewardsManager } from "../rewards/IRewardsManager.sol"; import { IStaking } from "../staking/IStaking.sol"; import { IGraphToken } from "../token/IGraphToken.sol"; import { ITokenGateway } from "../arbitrum/ITokenGateway.sol"; +import { IGNS } from "../discovery/IGNS.sol"; import { IManaged } from "./IManaged.sol"; @@ -190,7 +191,15 @@ abstract contract Managed is IManaged { } /** - * @dev Resolve a contract address from the cache or the Controller if not found + * @dev Return GNS (L1 or L2) interface. + * @return Address of the GNS contract registered with Controller, as an IGNS interface. + */ + function gns() internal view returns (IGNS) { + return IGNS(_resolveContract(keccak256("GNS"))); + } + + /** + * @dev Resolve a contract address from the cache or the Controller if not found. * @param _nameHash keccak256 hash of the contract name * @return Address of the contract */ diff --git a/test/l2/l2GNS.test.ts b/test/l2/l2GNS.test.ts index 5e6b17de1..58761b735 100644 --- a/test/l2/l2GNS.test.ts +++ b/test/l2/l2GNS.test.ts @@ -16,6 +16,7 @@ import { publishNewSubgraph, PublishSubgraph, } from '../lib/gnsUtils' +import { Curation } from '../../build/types/Curation' const { HashZero } = ethers.constants @@ -36,6 +37,7 @@ describe('L2GNS', () => { let fixtureContracts: L2FixtureContracts let l2GraphTokenGateway: L2GraphTokenGateway let gns: L2GNS + let curation: Curation let newSubgraph0: PublishSubgraph @@ -76,7 +78,7 @@ describe('L2GNS', () => { fixture = new NetworkFixture() fixtureContracts = await fixture.loadL2(governor.signer) - ;({ l2GraphTokenGateway, gns } = fixtureContracts) + ;({ l2GraphTokenGateway, gns, curation } = fixtureContracts) await fixture.configureL2Bridge( governor.signer, @@ -88,6 +90,14 @@ describe('L2GNS', () => { ) }) + beforeEach(async function () { + await fixture.setUp() + }) + + afterEach(async function () { + await fixture.tearDown() + }) + describe('receiving a subgraph from L1 (onTokenTransfer)', function () { it('cannot be called by someone other than the L2GraphTokenGateway', async function () { const l1SubgraphId = await buildSubgraphID(me.address, toBN('1'), 1) @@ -216,7 +226,37 @@ describe('L2GNS', () => { }) describe('finishing a subgraph migration from L1', function () { - it('publishes the migrated subgraph and mints signal with no tax') + it('publishes the migrated subgraph and mints signal with no tax', async function () { + const l1SubgraphId = await buildSubgraphID(me.address, toBN('1'), 1) + const curatedTokens = toGRT('1337') + const lockBlockhash = randomHexBytes(32) + const metadata = randomHexBytes() + const nSignal = toBN('4567') + const callhookData = defaultAbiCoder.encode( + ['uint256', 'address', 'bytes32', 'uint256', 'uint32', 'bytes32'], + [l1SubgraphId, me.address, lockBlockhash, nSignal, DEFAULT_RESERVE_RATIO, metadata], + ) + await gatewayFinalizeTransfer(mockL1GNS.address, gns.address, curatedTokens, callhookData) + // Calculate expected signal before minting, which changes the price + const expectedSignal = await curation.tokensToSignalNoTax( + newSubgraph0.subgraphDeploymentID, + curatedTokens, + ) + + const tx = gns + .connect(me.signer) + .finishSubgraphMigrationFromL1(l1SubgraphId, newSubgraph0.subgraphDeploymentID, metadata) + await expect(tx) + .emit(gns, 'SubgraphPublished') + .withArgs(l1SubgraphId, newSubgraph0.subgraphDeploymentID, DEFAULT_RESERVE_RATIO) + + const subgraphAfter = await gns.subgraphs(l1SubgraphId) + const migrationDataAfter = await gns.subgraphL2MigrationData(l1SubgraphId) + expect(subgraphAfter.vSignal).eq(expectedSignal) + expect(migrationDataAfter.l2Done).eq(true) + expect(migrationDataAfter.deprecated).eq(false) + expect(subgraphAfter.subgraphDeploymentID).eq(newSubgraph0.subgraphDeploymentID) + }) it('cannot be called by someone other than the subgraph owner') it('rejects calls for a subgraph that was not migrated') it('rejects calls to a pre-curated subgraph deployment') diff --git a/test/lib/fixtures.ts b/test/lib/fixtures.ts index 0ff9d03ef..4fc8a3be5 100644 --- a/test/lib/fixtures.ts +++ b/test/lib/fixtures.ts @@ -143,6 +143,7 @@ export class NetworkFixture { await controller.setContractProxy(utils.id('DisputeManager'), staking.address) await controller.setContractProxy(utils.id('RewardsManager'), rewardsManager.address) await controller.setContractProxy(utils.id('ServiceRegistry'), serviceRegistry.address) + await controller.setContractProxy(utils.id('GNS'), gns.address) if (isL2) { await controller.setContractProxy(utils.id('GraphTokenGateway'), l2GraphTokenGateway.address) } else { From 421396cd0b70d403411f4ef5217abd0d8e1a432a Mon Sep 17 00:00:00 2001 From: Pablo Carranza Velez Date: Thu, 27 Oct 2022 17:40:45 -0300 Subject: [PATCH 026/112] test: fix GNS tests, add some more L2GNS tests --- contracts/l2/discovery/L2GNS.sol | 7 +-- test/gns.test.ts | 9 ++-- test/l2/l2GNS.test.ts | 84 +++++++++++++++++++++++++++++--- test/lib/fixtures.ts | 2 +- 4 files changed, 87 insertions(+), 15 deletions(-) diff --git a/contracts/l2/discovery/L2GNS.sol b/contracts/l2/discovery/L2GNS.sol index a75a53f4a..f1bbc2b9f 100644 --- a/contracts/l2/discovery/L2GNS.sol +++ b/contracts/l2/discovery/L2GNS.sol @@ -109,15 +109,12 @@ contract L2GNS is GNS, IL2GNS { migratedData.l2Done = true; // New subgraph deployment must be non-empty - require(_subgraphDeploymentID != 0, "GNS: Cannot set deploymentID to 0"); + require(_subgraphDeploymentID != 0, "GNS: deploymentID != 0"); // This is to prevent the owner from front running its name curators signal by posting // its own signal ahead, bringing the name curators in, and dumping on them ICuration curation = curation(); - require( - !curation.isCurated(_subgraphDeploymentID), - "GNS: Owner cannot point to a subgraphID that has been pre-curated" - ); + require(!curation.isCurated(_subgraphDeploymentID), "GNS: Deployment pre-curated"); // Update pool: constant nSignal, vSignal can change (w/no slippage protection) // Buy all signal from the new deployment diff --git a/test/gns.test.ts b/test/gns.test.ts index 11ba7ca4f..cd68afdc2 100644 --- a/test/gns.test.ts +++ b/test/gns.test.ts @@ -15,6 +15,7 @@ import { toGRT, latestBlock, advanceBlocks, + provider, } from './lib/testHelpers' import { ArbitrumL1Mocks, NetworkFixture } from './lib/fixtures' import { toBN, formatGRT } from './lib/testHelpers' @@ -332,13 +333,15 @@ describe('L1GNS', () => { await subgraphNFT.connect(governor.signer).setTokenDescriptor(subgraphDescriptor.address) await legacyGNSMock.connect(governor.signer).syncAllContracts() await legacyGNSMock.connect(governor.signer).approveAll() - await l1GraphTokenGateway.connect(governor.signer).addToCallhookWhitelist(legacyGNSMock.address) + await l1GraphTokenGateway.connect(governor.signer).addToCallhookAllowlist(legacyGNSMock.address) await legacyGNSMock.connect(governor.signer).setCounterpartGNSAddress(mockL2GNS.address) } before(async function () { ;[me, other, governor, another, mockRouter, mockL2GRT, mockL2Gateway, mockL2GNS] = await getAccounts() + // Dummy code on the mock router so that it appears as a contract + await provider().send('hardhat_setCode', [mockRouter.address, '0x1234']) fixture = new NetworkFixture() const fixtureContracts = await fixture.load(governor.signer) ;({ grt, curation, gns, controller, proxyAdmin, l1GraphTokenGateway } = fixtureContracts) @@ -412,7 +415,7 @@ describe('L1GNS', () => { it('reject set `counterpartGNSAddress` if not allowed', async function () { const newValue = other.address const tx = gns.connect(me.signer).setCounterpartGNSAddress(newValue) - await expect(tx).revertedWith('Caller must be Controller governor') + await expect(tx).revertedWith('Only Controller governor') }) }) @@ -428,7 +431,7 @@ describe('L1GNS', () => { it('reject set `arbitrumInboxAddress` if not allowed', async function () { const newValue = other.address const tx = gns.connect(me.signer).setArbitrumInboxAddress(newValue) - await expect(tx).revertedWith('Caller must be Controller governor') + await expect(tx).revertedWith('Only Controller governor') }) }) diff --git a/test/l2/l2GNS.test.ts b/test/l2/l2GNS.test.ts index 58761b735..5bb58a003 100644 --- a/test/l2/l2GNS.test.ts +++ b/test/l2/l2GNS.test.ts @@ -1,6 +1,6 @@ import { expect } from 'chai' import { ethers, ContractTransaction, BigNumber, Event } from 'ethers' -import { defaultAbiCoder, parseUnits } from 'ethers/lib/utils' +import { arrayify, defaultAbiCoder, hexlify, parseUnits } from 'ethers/lib/utils' import { getAccounts, randomHexBytes, Account, toGRT, getL2SignerFromL1 } from '../lib/testHelpers' import { L2FixtureContracts, NetworkFixture } from '../lib/fixtures' @@ -17,6 +17,7 @@ import { PublishSubgraph, } from '../lib/gnsUtils' import { Curation } from '../../build/types/Curation' +import { GraphToken } from '../../build/types/GraphToken' const { HashZero } = ethers.constants @@ -38,6 +39,7 @@ describe('L2GNS', () => { let l2GraphTokenGateway: L2GraphTokenGateway let gns: L2GNS let curation: Curation + let grt: GraphToken let newSubgraph0: PublishSubgraph @@ -78,8 +80,9 @@ describe('L2GNS', () => { fixture = new NetworkFixture() fixtureContracts = await fixture.loadL2(governor.signer) - ;({ l2GraphTokenGateway, gns, curation } = fixtureContracts) + ;({ l2GraphTokenGateway, gns, curation, grt } = fixtureContracts) + await grt.connect(governor.signer).mint(me.address, toGRT('10000')) await fixture.configureL2Bridge( governor.signer, fixtureContracts, @@ -257,10 +260,79 @@ describe('L2GNS', () => { expect(migrationDataAfter.deprecated).eq(false) expect(subgraphAfter.subgraphDeploymentID).eq(newSubgraph0.subgraphDeploymentID) }) - it('cannot be called by someone other than the subgraph owner') - it('rejects calls for a subgraph that was not migrated') - it('rejects calls to a pre-curated subgraph deployment') - it('rejects calls if the subgraph deployment ID is zero') + it('cannot be called by someone other than the subgraph owner', async function () { + const l1SubgraphId = await buildSubgraphID(me.address, toBN('1'), 1) + const curatedTokens = toGRT('1337') + const lockBlockhash = randomHexBytes(32) + const metadata = randomHexBytes() + const nSignal = toBN('4567') + const callhookData = defaultAbiCoder.encode( + ['uint256', 'address', 'bytes32', 'uint256', 'uint32', 'bytes32'], + [l1SubgraphId, me.address, lockBlockhash, nSignal, DEFAULT_RESERVE_RATIO, metadata], + ) + await gatewayFinalizeTransfer(mockL1GNS.address, gns.address, curatedTokens, callhookData) + + const tx = gns + .connect(other.signer) + .finishSubgraphMigrationFromL1(l1SubgraphId, newSubgraph0.subgraphDeploymentID, metadata) + await expect(tx).revertedWith('GNS: Must be authorized') + }) + it('rejects calls for a subgraph that does not exist', async function () { + const l1SubgraphId = await buildSubgraphID(me.address, toBN('1'), 1) + const metadata = randomHexBytes() + + const tx = gns + .connect(me.signer) + .finishSubgraphMigrationFromL1(l1SubgraphId, newSubgraph0.subgraphDeploymentID, metadata) + await expect(tx).revertedWith('ERC721: owner query for nonexistent token') + }) + it('rejects calls for a subgraph that was not migrated', async function () { + const l2Subgraph = await publishNewSubgraph(me, newSubgraph0, gns) + const metadata = randomHexBytes() + + const tx = gns + .connect(me.signer) + .finishSubgraphMigrationFromL1(l2Subgraph.id, newSubgraph0.subgraphDeploymentID, metadata) + await expect(tx).revertedWith('INVALID_SUBGRAPH') + }) + it('rejects calls to a pre-curated subgraph deployment', async function () { + const l1SubgraphId = await buildSubgraphID(me.address, toBN('1'), 1) + const curatedTokens = toGRT('1337') + const lockBlockhash = randomHexBytes(32) + const metadata = randomHexBytes() + const nSignal = toBN('4567') + const callhookData = defaultAbiCoder.encode( + ['uint256', 'address', 'bytes32', 'uint256', 'uint32', 'bytes32'], + [l1SubgraphId, me.address, lockBlockhash, nSignal, DEFAULT_RESERVE_RATIO, metadata], + ) + await gatewayFinalizeTransfer(mockL1GNS.address, gns.address, curatedTokens, callhookData) + + await grt.connect(me.signer).approve(curation.address, toGRT('100')) + await curation + .connect(me.signer) + .mint(newSubgraph0.subgraphDeploymentID, toGRT('100'), toBN('0')) + const tx = gns + .connect(me.signer) + .finishSubgraphMigrationFromL1(l1SubgraphId, newSubgraph0.subgraphDeploymentID, metadata) + await expect(tx).revertedWith('GNS: Deployment pre-curated') + }) + it('rejects calls if the subgraph deployment ID is zero', async function () { + const l1SubgraphId = await buildSubgraphID(me.address, toBN('1'), 1) + const curatedTokens = toGRT('1337') + const lockBlockhash = randomHexBytes(32) + const metadata = randomHexBytes() + const nSignal = toBN('4567') + const callhookData = defaultAbiCoder.encode( + ['uint256', 'address', 'bytes32', 'uint256', 'uint32', 'bytes32'], + [l1SubgraphId, me.address, lockBlockhash, nSignal, DEFAULT_RESERVE_RATIO, metadata], + ) + await gatewayFinalizeTransfer(mockL1GNS.address, gns.address, curatedTokens, callhookData) + + const tx = gns + .connect(me.signer) + .finishSubgraphMigrationFromL1(l1SubgraphId, HashZero, metadata) + await expect(tx).revertedWith('GNS: deploymentID != 0') + }) }) describe('claiming a curator balance using a proof', function () { diff --git a/test/lib/fixtures.ts b/test/lib/fixtures.ts index 4fc8a3be5..69d9ec896 100644 --- a/test/lib/fixtures.ts +++ b/test/lib/fixtures.ts @@ -268,7 +268,7 @@ export class NetworkFixture { await l1FixtureContracts.gns.connect(deployer).setCounterpartGNSAddress(mockL2GNSAddress) await l1FixtureContracts.l1GraphTokenGateway .connect(deployer) - .addToCallhookWhitelist(l1FixtureContracts.gns.address) + .addToCallhookAllowlist(l1FixtureContracts.gns.address) await l1FixtureContracts.l1GraphTokenGateway.connect(deployer).setPaused(false) } From 61575ac391805fbef909346823a447b6a175747d Mon Sep 17 00:00:00 2001 From: Pablo Carranza Velez Date: Mon, 31 Oct 2022 14:24:01 -0300 Subject: [PATCH 027/112] test: more fixes in curation and gns tests --- test/curation/curation.test.ts | 43 +++++++++++------------- test/gateway/l1GraphTokenGateway.test.ts | 2 ++ test/l2/l2GraphTokenGateway.test.ts | 5 +++ test/lib/testHelpers.ts | 4 +++ 4 files changed, 31 insertions(+), 23 deletions(-) diff --git a/test/curation/curation.test.ts b/test/curation/curation.test.ts index b570a20e8..3e9d374a7 100644 --- a/test/curation/curation.test.ts +++ b/test/curation/curation.test.ts @@ -1,5 +1,5 @@ import { expect } from 'chai' -import { utils, BigNumber, Event } from 'ethers' +import { utils, BigNumber, Event, Signer } from 'ethers' import { Curation } from '../../build/types/Curation' import { GraphToken } from '../../build/types/GraphToken' @@ -14,7 +14,10 @@ import { formatGRT, Account, impersonateAccount, + setAccountBalance, } from '../lib/testHelpers' +import { GNS } from '../../build/types/GNS' +import { parseEther } from 'ethers/lib/utils' const MAX_PPM = 1000000 @@ -42,13 +45,14 @@ describe('Curation', () => { let governor: Account let curator: Account let stakingMock: Account - let gnsImpersonator: Account + let gnsImpersonator: Signer let fixture: NetworkFixture let curation: Curation let grt: GraphToken let controller: Controller + let gns: GNS // Test values const signalAmountFor1000Tokens = toGRT('3.162277660168379331') @@ -136,30 +140,24 @@ describe('Curation', () => { const shouldMintTaxFree = async (tokensToDeposit: BigNumber, expectedSignal: BigNumber) => { // Before state const beforeTokenTotalSupply = await grt.totalSupply() - const beforeCuratorTokens = await grt.balanceOf(gnsImpersonator.address) - const beforeCuratorSignal = await curation.getCuratorSignal( - gnsImpersonator.address, - subgraphDeploymentID, - ) + const beforeCuratorTokens = await grt.balanceOf(gns.address) + const beforeCuratorSignal = await curation.getCuratorSignal(gns.address, subgraphDeploymentID) const beforePool = await curation.pools(subgraphDeploymentID) const beforePoolSignal = await curation.getCurationPoolSignal(subgraphDeploymentID) const beforeTotalTokens = await grt.balanceOf(curation.address) // Curate const tx = curation - .connect(gnsImpersonator.signer) + .connect(gnsImpersonator) .mintTaxFree(subgraphDeploymentID, tokensToDeposit, 0) await expect(tx) .emit(curation, 'Signalled') - .withArgs(gnsImpersonator.address, subgraphDeploymentID, tokensToDeposit, expectedSignal, 0) + .withArgs(gns.address, subgraphDeploymentID, tokensToDeposit, expectedSignal, 0) // After state const afterTokenTotalSupply = await grt.totalSupply() - const afterCuratorTokens = await grt.balanceOf(gnsImpersonator.address) - const afterCuratorSignal = await curation.getCuratorSignal( - gnsImpersonator.address, - subgraphDeploymentID, - ) + const afterCuratorTokens = await grt.balanceOf(gns.address) + const afterCuratorSignal = await curation.getCuratorSignal(gns.address, subgraphDeploymentID) const afterPool = await curation.pools(subgraphDeploymentID) const afterPoolSignal = await curation.getCurationPoolSignal(subgraphDeploymentID) const afterTotalTokens = await grt.balanceOf(curation.address) @@ -239,16 +237,18 @@ describe('Curation', () => { before(async function () { // Use stakingMock so we can call collect - ;[me, governor, curator, stakingMock, gnsImpersonator] = await getAccounts() + ;[me, governor, curator, stakingMock] = await getAccounts() fixture = new NetworkFixture() - ;({ controller, curation, grt } = await fixture.load(governor.signer)) + ;({ controller, curation, grt, gns } = await fixture.load(governor.signer)) + gnsImpersonator = await impersonateAccount(gns.address) + await setAccountBalance(gns.address, parseEther('1')) // Give some funds to the curator and GNS impersonator and approve the curation contract await grt.connect(governor.signer).mint(curator.address, curatorTokens) await grt.connect(curator.signer).approve(curation.address, curatorTokens) - await grt.connect(governor.signer).mint(gnsImpersonator.address, curatorTokens) - await grt.connect(gnsImpersonator.signer).approve(curation.address, curatorTokens) + await grt.connect(governor.signer).mint(gns.address, curatorTokens) + await grt.connect(gnsImpersonator).approve(curation.address, curatorTokens) // Give some funds to the staking contract and approve the curation contract await grt.connect(governor.signer).mint(stakingMock.address, tokensToCollect) @@ -360,9 +360,6 @@ describe('Curation', () => { }) describe('curate tax free (from GNS)', async function () { - beforeEach(async function () { - await controller.setContractProxy(utils.id('GNS'), gnsImpersonator.address) - }) it('can not be called by anyone other than GNS', async function () { const tokensToDeposit = await curation.minimumCurationDeposit() const tx = curation @@ -374,7 +371,7 @@ describe('Curation', () => { it('reject deposit below minimum tokens required', async function () { const tokensToDeposit = (await curation.minimumCurationDeposit()).sub(toBN(1)) const tx = curation - .connect(gnsImpersonator.signer) + .connect(gnsImpersonator) .mintTaxFree(subgraphDeploymentID, tokensToDeposit, 0) await expect(tx).revertedWith('Curation deposit is below minimum required') }) @@ -408,7 +405,7 @@ describe('Curation', () => { const tokensToDeposit = toGRT('1000') const expectedSignal = signalAmountFor1000Tokens const tx = curation - .connect(gnsImpersonator.signer) + .connect(gnsImpersonator) .mintTaxFree(subgraphDeploymentID, tokensToDeposit, expectedSignal.add(1)) await expect(tx).revertedWith('Slippage protection') }) diff --git a/test/gateway/l1GraphTokenGateway.test.ts b/test/gateway/l1GraphTokenGateway.test.ts index c3e3cdd7c..ac9aec4c9 100644 --- a/test/gateway/l1GraphTokenGateway.test.ts +++ b/test/gateway/l1GraphTokenGateway.test.ts @@ -300,6 +300,7 @@ describe('L1GraphTokenGateway', () => { mockRouter.address, mockL2GRT.address, mockL2Gateway.address, + mockL2GNS.address, ) let tx = l1GraphTokenGateway.connect(governor.signer).setPaused(true) await expect(tx).emit(l1GraphTokenGateway, 'PauseChanged').withArgs(true) @@ -331,6 +332,7 @@ describe('L1GraphTokenGateway', () => { mockRouter.address, mockL2GRT.address, mockL2Gateway.address, + mockL2GNS.address, ) await l1GraphTokenGateway.connect(governor.signer).setPauseGuardian(pauseGuardian.address) let tx = l1GraphTokenGateway.connect(pauseGuardian.signer).setPaused(true) diff --git a/test/l2/l2GraphTokenGateway.test.ts b/test/l2/l2GraphTokenGateway.test.ts index 236817afd..2a2595419 100644 --- a/test/l2/l2GraphTokenGateway.test.ts +++ b/test/l2/l2GraphTokenGateway.test.ts @@ -28,6 +28,7 @@ describe('L2GraphTokenGateway', () => { let mockL1GRT: Account let mockL1Gateway: Account let pauseGuardian: Account + let mockL1GNS: Account let fixture: NetworkFixture let arbSysMock: FakeContract @@ -55,6 +56,7 @@ describe('L2GraphTokenGateway', () => { mockL1Gateway, l2Receiver, pauseGuardian, + mockL1GNS, ] = await getAccounts() fixture = new NetworkFixture() @@ -188,6 +190,7 @@ describe('L2GraphTokenGateway', () => { mockRouter.address, mockL1GRT.address, mockL1Gateway.address, + mockL1GNS.address, ) let tx = l2GraphTokenGateway.connect(governor.signer).setPaused(true) await expect(tx).emit(l2GraphTokenGateway, 'PauseChanged').withArgs(true) @@ -218,6 +221,7 @@ describe('L2GraphTokenGateway', () => { mockRouter.address, mockL1GRT.address, mockL1Gateway.address, + mockL1GNS.address, ) await l2GraphTokenGateway.connect(governor.signer).setPauseGuardian(pauseGuardian.address) let tx = l2GraphTokenGateway.connect(pauseGuardian.signer).setPaused(true) @@ -280,6 +284,7 @@ describe('L2GraphTokenGateway', () => { mockRouter.address, mockL1GRT.address, mockL1Gateway.address, + mockL1GNS.address, ) }) diff --git a/test/lib/testHelpers.ts b/test/lib/testHelpers.ts index d3cab0849..266c6d628 100644 --- a/test/lib/testHelpers.ts +++ b/test/lib/testHelpers.ts @@ -138,6 +138,10 @@ export async function impersonateAccount(address: string): Promise { return hre.ethers.getSigner(address) } +export async function setAccountBalance(address: string, newBalance: BigNumber): Promise { + await provider().send('hardhat_setBalance', [address, hexValue(newBalance)]) +} + // Adapted from: // https://github.com/livepeer/arbitrum-lpt-bridge/blob/e1a81edda3594e434dbcaa4f1ebc95b7e67ecf2a/test/utils/messaging.ts#L5 export async function getL2SignerFromL1(l1Address: string): Promise { From 50624ae05693c2b0848b81c6a5850298407b4556 Mon Sep 17 00:00:00 2001 From: Pablo Carranza Velez Date: Mon, 31 Oct 2022 14:40:47 -0300 Subject: [PATCH 028/112] test: align to epoch to prevent issues in allocation tests --- test/staking/allocation.test.ts | 1 + 1 file changed, 1 insertion(+) diff --git a/test/staking/allocation.test.ts b/test/staking/allocation.test.ts index 4d6a9150c..790c528d1 100644 --- a/test/staking/allocation.test.ts +++ b/test/staking/allocation.test.ts @@ -582,6 +582,7 @@ describe('Staking:Allocation', () => { for (const tokensToAllocate of [toBN(100), toBN(0)]) { context(`> with ${tokensToAllocate} allocated tokens`, async function () { beforeEach(async function () { + await advanceToNextEpoch(epochManager) await allocate(tokensToAllocate) }) From 9b2b54d154f09d20f18f49f313ff641581f8ba3b Mon Sep 17 00:00:00 2001 From: Pablo Carranza Velez Date: Wed, 2 Nov 2022 18:36:56 -0300 Subject: [PATCH 029/112] fix: make slot functions public and part of base GNS --- contracts/discovery/GNS.sol | 46 +++++++++++++++++++++++++ contracts/l2/discovery/L2GNS.sol | 58 ++++---------------------------- 2 files changed, 52 insertions(+), 52 deletions(-) diff --git a/contracts/discovery/GNS.sol b/contracts/discovery/GNS.sol index b53027502..f8e9d30c9 100644 --- a/contracts/discovery/GNS.sol +++ b/contracts/discovery/GNS.sol @@ -36,6 +36,12 @@ contract GNS is GNSV3Storage, GraphUpgradeable, IGNS, Multicall { // Equates to Connector weight on bancor formula to be CW = 1 uint32 private constant defaultReserveRatio = 1000000; + // Storage slot where the subgraphs mapping is stored on L1GNS + uint256 internal constant SUBGRAPH_MAPPING_SLOT = 18; + + // Storage slot where the legacy subgraphs mapping is stored on L1GNS + uint256 internal constant LEGACY_SUBGRAPH_MAPPING_SLOT = 15; + // -- Events -- event SubgraphNFTUpdated(address subgraphNFT); @@ -791,6 +797,46 @@ contract GNS is GNSV3Storage, GraphUpgradeable, IGNS, Multicall { seqID = legacySubgraphKey.accountSeqID; } + // TODO add NatSpec + function getCuratorSlot(address _curator, uint256 _subgraphID) public pure returns (uint256) { + // subgraphs mapping is stored at slot SUBGRAPH_MAPPING_SLOT. + // So our subgraph is at slot keccak256(abi.encodePacked(uint256(subgraphID), uint256(SUBGRAPH_MAPPING_SLOT))) + // The curatorNSignal mapping is at slot 2 within the SubgraphData struct, + // So the mapping is at slot keccak256(abi.encodePacked(uint256(subgraphID), uint256(SUBGRAPH_MAPPING_SLOT))) + 2 + // Therefore the nSignal value for msg.sender should be at slot: + return + uint256( + keccak256( + abi.encodePacked( + uint256(_curator), + uint256(keccak256(abi.encodePacked(_subgraphID, SUBGRAPH_MAPPING_SLOT))) + .add(2) + ) + ) + ); + } + + // TODO add NatSpec + function getLegacyCuratorSlot( + address _curator, + address _subgraphCreatorAccount, + uint256 _seqID + ) public pure returns (uint256) { + // legacy subgraphs mapping is stored at slot LEGACY_SUBGRAPH_MAPPING_SLOT. + // So the subgraphs for the account are at slot keccak256(abi.encodePacked(uint256(_subgraphCreatorAccount), uint256(SUBGRAPH_MAPPING_SLOT))) + uint256 accountSlot = uint256( + keccak256( + abi.encodePacked(uint256(_subgraphCreatorAccount), LEGACY_SUBGRAPH_MAPPING_SLOT) + ) + ); + // Then the subgraph for this _seqID should be at: + uint256 subgraphSlot = uint256(keccak256(abi.encodePacked(_seqID, accountSlot))); + // The curatorNSignal mapping is at slot 2 within the SubgraphData struct, + // So the mapping is at slot subgraphSlot + 2 + // Therefore the nSignal value for msg.sender should be at slot: + return uint256(keccak256(abi.encodePacked(uint256(_curator), subgraphSlot.add(2)))); + } + /** * @dev Build a subgraph ID based on the account creating it and a sequence number for that account. * Only used for legacy subgraphs being migrated, as new ones will also use the chainid. diff --git a/contracts/l2/discovery/L2GNS.sol b/contracts/l2/discovery/L2GNS.sol index f1bbc2b9f..0288e9b88 100644 --- a/contracts/l2/discovery/L2GNS.sol +++ b/contracts/l2/discovery/L2GNS.sol @@ -29,11 +29,6 @@ contract L2GNS is GNS, IL2GNS { using RLPReader for RLPReader.RLPItem; using SafeMath for uint256; - // Storage slot where the subgraphs mapping is stored on L1GNS - uint256 internal constant SUBGRAPH_MAPPING_SLOT = 18; - // Storage slot where the legacy subgraphs mapping is stored on L1GNS - uint256 internal constant LEGACY_SUBGRAPH_MAPPING_SLOT = 15; - event SubgraphReceivedFromL1(uint256 _subgraphID); event SubgraphMigrationFinalized(uint256 _subgraphID); event CuratorBalanceClaimed( @@ -146,13 +141,13 @@ contract L2GNS is GNS, IL2GNS { bytes memory _blockHeaderRlpBytes, bytes memory _proofRlpBytes ) external override notPartialPaused { - Verifier.BlockHeader memory blockHeader = Verifier.parseBlockHeader(_blockHeaderRlpBytes); IGNS.SubgraphL2MigrationData storage migratedData = subgraphL2MigrationData[_subgraphID]; - require(migratedData.l2Done, "!MIGRATED"); - require(blockHeader.hash == migratedData.lockedAtBlockHash, "!BLOCKHASH"); require(!migratedData.curatorBalanceClaimed[msg.sender], "ALREADY_CLAIMED"); + Verifier.BlockHeader memory blockHeader = Verifier.parseBlockHeader(_blockHeaderRlpBytes); + require(blockHeader.hash == migratedData.lockedAtBlockHash, "!BLOCKHASH"); + RLPReader.RLPItem[] memory proofs = _proofRlpBytes.toRlpItem().toList(); require(proofs.length == 2, "!N_PROOFS"); @@ -164,7 +159,7 @@ contract L2GNS is GNS, IL2GNS { require(l1GNSAccount.exists, "!ACCOUNT"); - uint256 curatorSlot = _getCuratorSlot(msg.sender, _subgraphID); + uint256 curatorSlot = getCuratorSlot(msg.sender, _subgraphID); Verifier.SlotValue memory curatorNSignalSlot = Verifier.extractSlotValueFromProof( keccak256(abi.encodePacked(curatorSlot)), @@ -219,7 +214,7 @@ contract L2GNS is GNS, IL2GNS { require(l1GNSAccount.exists, "!ACCOUNT"); - uint256 curatorSlot = _getLegacyCuratorSlot(msg.sender, _subgraphCreatorAccount, _seqID); + uint256 curatorSlot = getLegacyCuratorSlot(msg.sender, _subgraphCreatorAccount, _seqID); Verifier.SlotValue memory curatorNSignalSlot = Verifier.extractSlotValueFromProof( keccak256(abi.encodePacked(curatorSlot)), @@ -265,6 +260,7 @@ contract L2GNS is GNS, IL2GNS { migratedData.curatorBalanceClaimed[_curator] = true; } + // TODO add NatSpec function _receiveSubgraphFromL1( uint256 _subgraphID, address _subgraphOwner, @@ -294,46 +290,4 @@ contract L2GNS is GNS, IL2GNS { _setSubgraphMetadata(_subgraphID, _subgraphMetadata); emit SubgraphReceivedFromL1(_subgraphID); } - - function _getCuratorSlot(address _curator, uint256 _subgraphID) - internal - pure - returns (uint256) - { - // subgraphs mapping is stored at slot SUBGRAPH_MAPPING_SLOT. - // So our subgraph is at slot keccak256(abi.encodePacked(uint256(subgraphID), uint256(SUBGRAPH_MAPPING_SLOT))) - // The curatorNSignal mapping is at slot 2 within the SubgraphData struct, - // So the mapping is at slot keccak256(abi.encodePacked(uint256(subgraphID), uint256(SUBGRAPH_MAPPING_SLOT))) + 2 - // Therefore the nSignal value for msg.sender should be at slot: - return - uint256( - keccak256( - abi.encodePacked( - uint256(_curator), - uint256(keccak256(abi.encodePacked(_subgraphID, SUBGRAPH_MAPPING_SLOT))) - .add(2) - ) - ) - ); - } - - function _getLegacyCuratorSlot( - address _curator, - address _subgraphCreatorAccount, - uint256 _seqID - ) internal pure returns (uint256) { - // legacy subgraphs mapping is stored at slot LEGACY_SUBGRAPH_MAPPING_SLOT. - // So the subgraphs for the account are at slot keccak256(abi.encodePacked(uint256(_subgraphCreatorAccount), uint256(SUBGRAPH_MAPPING_SLOT))) - uint256 accountSlot = uint256( - keccak256( - abi.encodePacked(uint256(_subgraphCreatorAccount), LEGACY_SUBGRAPH_MAPPING_SLOT) - ) - ); - // Then the subgraph for this _seqID should be at: - uint256 subgraphSlot = uint256(keccak256(abi.encodePacked(_seqID, accountSlot))); - // The curatorNSignal mapping is at slot 2 within the SubgraphData struct, - // So the mapping is at slot subgraphSlot + 2 - // Therefore the nSignal value for msg.sender should be at slot: - return uint256(keccak256(abi.encodePacked(uint256(_curator), subgraphSlot.add(2)))); - } } From a3fb8ca979af92f60d62a4534de70e9e8a068200 Mon Sep 17 00:00:00 2001 From: Pablo Carranza Velez Date: Wed, 2 Nov 2022 18:39:24 -0300 Subject: [PATCH 030/112] test: progress on setting up the test for the MPT proofs --- test/l2/l2GNS.test.ts | 174 +++++++++++++++++++++++++++++++++++------- 1 file changed, 145 insertions(+), 29 deletions(-) diff --git a/test/l2/l2GNS.test.ts b/test/l2/l2GNS.test.ts index 5bb58a003..17727f502 100644 --- a/test/l2/l2GNS.test.ts +++ b/test/l2/l2GNS.test.ts @@ -2,7 +2,14 @@ import { expect } from 'chai' import { ethers, ContractTransaction, BigNumber, Event } from 'ethers' import { arrayify, defaultAbiCoder, hexlify, parseUnits } from 'ethers/lib/utils' -import { getAccounts, randomHexBytes, Account, toGRT, getL2SignerFromL1 } from '../lib/testHelpers' +import { + getAccounts, + randomHexBytes, + Account, + toGRT, + getL2SignerFromL1, + provider, +} from '../lib/testHelpers' import { L2FixtureContracts, NetworkFixture } from '../lib/fixtures' import { toBN } from '../lib/testHelpers' @@ -21,6 +28,83 @@ import { GraphToken } from '../../build/types/GraphToken' const { HashZero } = ethers.constants +interface L1SubgraphParams { + l1SubgraphId: string + curatedTokens: BigNumber + lockBlockhash: string + metadata: string + nSignal: BigNumber +} + +// Subgraph values taken from a mainnet subgraph, including a proof +// for a specific curator's balance, obtained using eth_getProof: +// await provider.send('eth_getProof', [ g.contracts.GNS.address, [ '0x2757396e3ce68a9104b5d84b5b0988e37067e780df1ad018184da3616033f432' ], '0x82e59e8ef5e6c4352d363fc5b6ea64d6f605d47ff0c454ea1133be6bacaff487']) +// Where the curator slot is 0x2757396e3ce68a9104b5d84b5b0988e37067e780df1ad018184da3616033f432, +// which was obtained by calling this in a localhost hardhat console: +// await g.contracts.GNS.getCuratorSlot('0xE99bD186DBdA4Dc0A499b158E9E8eA7a628EDD14', '0x715f5c54c9a35783823650c340586f43acede4a907726e4e6499abde90331184') +const mainnetSubgraphWithProof = { + subgraphId: '0x715f5c54c9a35783823650c340586f43acede4a907726e4e6499abde90331184', + curator: '0xE99bD186DBdA4Dc0A499b158E9E8eA7a628EDD14', + blockhash: '0x82e59e8ef5e6c4352d363fc5b6ea64d6f605d47ff0c454ea1133be6bacaff487', + blockNumber: 15884906, + nSignal: BigNumber.from('36740350312298917761'), + curatedTokens: BigNumber.from('1349853341070443183932'), + metadata: '0x7c0b534d4a5ee2a14b3209e678671ad7db2aa23d741a27ad4573daa5da4a67bb', // Obtained with a SubgraphMetadataUpdated event filter + getProofResponse: { + accountProof: [ + '0xf90211a08a9701cbb65b3ebd5ffd5d0c4e959a01f0f5777b60a7d3069d560aae9ced519fa05c14f1e3eb1aa27b98c5421813cd0a2ccd607f338aa5c6e51b01b5bbae9b7a22a0a8ef688324a1830e5052802e44e76122378468f08085b74584aab3dd7d655dfca0460ef2adac161e0a86112a2a9246e1d36e8006f344c146b211ec6985f371282fa077fee3062bfd699d695542b880f7cdf1f469500b2b6385cf8fe266bcb619f16ca0799795d800b383e54b1b70b89a462510a26f702e55d6e234ae599885cba183a4a0c21957e0a6895f39ee67c0db2bb2eb732b821fe034549d0f7e68db05fb434db4a0a71cd96e8ef9233fbe6ec72dae6208e06875bc3a2d7aeffc5a68e65a3edd353ca0549db853704cb95f28e3081c3ea5ea9953d6716e5ed1e85f1f07ca06cf3562cca0eb12b05a20566fdc91ff6e87344cb27a7739e2869978592081b3ee5da20e2a72a05cf1f39fc25860045fc1d5b12645d47eda0988b2f847d26bb871dd98f25ef608a05f56eb881b3957f3b0d27463f5db8dc0aa467fcc07420b38e7824e779099c78aa0167782c6e8c2a5c63f823f9a80749dc42807677cdf1baa489b6b3fd29913f66ea092c32da10ee6754d7864639ddd7bc849029bb789a0ac60624c06d54c0c4dea2da04753ee0c68d9ea737a61737780889d3c70853b02c42bdce010141e8974865049a06c66113c6c605086e103ec918a6ac51c0807f1475a8947174c4e7ca0b77d1ab980', + '0xf90211a092b4f87a7a56eb1b0cc4e37b1a470983de47b6e59bb9f001713eceeaf1e1b778a0570de7dce4feeb8714bfb203a85b6baaa6e828e4de6cef1b03a2214982523c1ea01366fb14fa2dcc99de2a1a32454d26a1f36c4d16d07dd693e33f7a5227dfd260a0aa87fd12b8b39ec060335012e43f95fb6c3eac97557d7ca8e75219be8f3b7da8a02dd06fd857e864e4f451c07c1b8fcbd01826ae296a943bcd1754baf28dfe1fc1a0844c26cacd9dda7a88d11c2fbc60773c7f6260df5c6cfba0204e666ea0dee13ba03bae90508ad2ca51f8e41ae91a7efdef4eb1894e7aa52b2e6d55b36e3621e484a00e85200c5a56f6a221eb10c4497b4a8dcdaf143fc02c84511d99eb51e1714bfca0dcd8e4198135ff184e437dc7f7be85f77c0b22cd5e2a682bea72d34b1732dba5a01d3f9883287cfbf33961c4700b91d31a5c103246302422f7f670ffcdd0d6da9aa02cb5f762b4718da65563d25a86934ef30127b07980013973942ace532d4693fba056bd9dbc1eeedb8dd7f1bc7b6750a58d50ade9ebc4ea1e448f74d0d28c998190a07125ff6fbc2aa718ee13fa1e18e96df6e1e08e6308b41ace8ce2bfd8a76f5ccaa036328b9158819bc7538f16b3915e58c4b188a6c6022715d164a815715b7e3e83a0a60be8f4456b0fad56abe9e9e34b08a5e6aba3363fb7861a69ac2059503f452ba0da1999c819fd92e96c21aec4206d3b4dd7c3ac322c233a237e2be6837ab377b680', + '0xf90211a0a4ec77fb4bb0a98e8ad69a419e3b0d1250a9609955a6c9bf537ba99e0f20a691a06be377d2802e354d166a7352df70b7912452edc1abeb4b1d4c42273a43a901cda06cc656bcb5ed234290549f9fc0cf2ec31f8ab58d3366b0a55272d4b963d57e98a07af81904e659c472a5aecfbab5b1368504fd8686d6c407af0e4e6a4027cb4374a0f66d3d2df212e13913b17f9f744248253843a5106ac91a9a4ece07576e12cc76a02765d2d176513a83f8ce5b91289571ac61dc0b6af1fbca8de8f737f7c14cf2a9a05774d994c9f98969ed39fbc775e8afd7432148bb46e9fc9b2eb085a4f8737ac3a0d122da0dc7a5a62c1d1708e558b396d38630c1168729f82020dcc9fd1e44448da0b17ed04570d4f4da14053fb9384c7edc8f20c11e76c6fdf3364947005a1608ada0deca116b59ebfa7cd4fb5d869212a7c92af35a3b8ee077a23eb17e37fe98ca40a01209069e0803e14a97d9ca11e34179b8857469ddbd6c6703ba33ab6ade014ef6a004f174729c89807aabd2850d35ed48f594875de96d1f89d93249aa0728c5840aa04dd240d8db8127a59db6131e6d32053fbc1884a5a0438edac929d7838a7053dba0bedb75f907bb25814a45ef07364882910e9730ab535cfadf8278d66c0ed17afaa07c4367a2c963808f0722fe007587fd2031b369198ee0794a29a7938f62eac828a039523e340a8c2968ba22b611a694694d467bfc8e7f8a467cef610cc2e8774be980', + '0xf90211a07238565a4a96d9c37896f8f48b8daca4e74ea1d4b767d5476a1ca945fe8d9736a0751c83fcffa8f0747cbadb4425e2d83e7c181ba5ba19a6df60931a63546e87aca0f7d9281e8e6c375deea49b98f55f5eb08a9511412e381d7bd96a25a7fbc9ca86a0d7373d9df46a011025971a3be7884a179e5af6fe90868d4105404c06a5c2f908a03c8830d58461246211f9b13dd0afd3ac34e1dac1e55329785e79c1ae14845b6ca06f7454b021f29191f006457aecf4e4695dbd652a4443162cf69cde1845b85df6a08c334bff53b2ba1e8df6f6aee68045ab8ee9f02b38f9766b97de48dcc02edcaea061db2c2f8b55ac092b1e3eba4a1e82f677fa52e4f4095d3dba831cb89f0306c3a04293fdf7986e8a464cf5a976b6ddff82ded83f28eef942ff1d8418d2799b06bfa07505f623087c999f63b8b2407853438ea3f747c4103bacc5fc6c62b330314624a0a2b540fa6b0564f959d8ccdba3659a59a00494fdf9cd1d9f4ea9efbe78227f70a0f9cc8d6b4cf4cb3178733e1daf8dd4e86e8c65d5e153cdae77542fcabdfd75fca0beebf7560922a87838e1c2119dd5f11a23b2f9f492d3d34d6faa8f2052a64722a069a3753b6b036c372444940038e387a6d3f77383cb48a302d0d8742a607652b7a02a1ddc02796d842608f4a372f8cb3beb90996acf8288bbb22d50331b56979c5fa0a0a548553326e53e260ce87c4b0c8271724aacd0115b3d0d28ce43ca208883e380', + '0xf90211a0e7efc1ad587fb9ecc0c343d94c894146f9ac499ad3b250368c11d6f531354b8fa07237f64ded7d0941d59656e5b590d3e6fc61093cc1740ad209dd300ee9f0ca12a042ac0a64ac87b16ec296edb580ce0910083690d9d1ace367369351a6fbfe0882a05533447ef90d3623bceccef86860a029ea394aa8783ee6cf3e982bd47ff12c03a0df248d8095d09d69e25381eb1ee6a90407fba3fe1baae6fbd56c2660986573bfa0622e8063b57c51b19747bc851ae0d828d1cde0bbf46f8a5180102dd94459c802a0e800b6c40184f7b7fa683ae191bb4aac1ce585bb6791b99eb4244e351d02f1cba04df04e181c844dd951cb08153bbf92c456bdbc68891bee2b5699f7dfb55b90a7a0833a530c25ed992d20626c55af19c9abe4d1c7a07d5a058dde29907fe65fbcd1a0e133c4cd151948b47d986b93c3572b04098c5da3435c27a9c847c7d5f990bc9ea0f3d3855ffbcc3c26adbeb526fae48536f4dbc39b9bf24f7a17b76335f6b000eea0c7a4d3135faba63cd89f64b0fabf4d726f0543fa347e8cf44db30bfe6ea9e11da0c2e15f8f776d1e3d9cfd29ef9b1e1c5ee5d6334152f587d72ecb9eed5fc3193ea0481f3b80d234d30cd1294075e557549e908d8152903e7f65382a68fd4aa1c683a0a9ba4206ef4055b28d1126bd21afd4ab26898267d7334191a6cc7f8b07a54122a0715b72d6ed83a6da4e9d376f86690caf329adbc5dcda4cfd0839e3f02066e20a80', + '0xf90211a00cad8552ddac3a1aa1c598c4d43a80d5a6cac7e58b543c86d5920a78d5b0f0dea0aa5f5aa9836447977b447ef698df483b8e458106b3e64a87005300bf2008562ea0c5925754c6c72a7b07512ee07acdae077ee70e9d3ab04065360fdc4bebdb155fa045f1e4df1025988aa9d0ce23c03f4b366a99286de59d82f1eafdf9a3890905a3a07c86218196a9dea70252b56ee769c10514bbdf33aebcd41fc4392af63febd239a08e202445f7c2fa69da1f1492a1b0e46d8b66b0b7024c7cff23ed5c07191da66fa0b3c179e3f3b9b216e4b35174e4e4d119526af446fdf757ad95e02e49cac28565a0fd74d0a8922342560f6dd820cfa373ec7353c6c66d74bd43351ebb7d103d5ceaa04a8689c3cb5396ee5a99469957f1f0670b0024b2ea3b75e0455797a5175c72a3a085270faec5854bff806bb9951261092745f657e062ae1499d3c5fde81fe14713a07dd8daf759fa359c36c7afc9f7963a557088f5483a8c5d7a0866237fb5f055c5a0d3ec4525a4f0d209a566b07f46a91c609b9c7acbc427db1390485cf4b5105557a005983a192b1f780b095661d92ef4d4102ffd03aad9adb6f3084ba26a11cd0daaa0afd710661f91421da1ece5ea87acc4f76e8af3dad5fa14f0a4ba1ac1a7276449a0ba0374b7981b92f55525b830723b32dce4ebd3c6a13fd06f61b465728ca077c7a0349075b6ff5265073d6ec6676f9b82991159e0bd8170596bcd80573f95576b7380', + '0xf90131a000e3833f5535c6eae67533a61520c8a99ec1d617d8230498ea57aaac1080ebf880a0432d16911e0f89bb5b6faff16255b203ee2e80db68098f75aee4673d327346b680a04911cdce5361377651739ba44d7f0dcb98e7d22c18f51c955480fcfb5e59abd580a09dec563e0a5682d43213c9a511e954705231ebaee0c72f0aa4f95792823ca0e280a01560fe4a9d9af402122701cccc9d3a13f77747b965d5efe09d0dfce95f807dcca08b5cd207548549e40fd1658e38b5b4227f7f03d8dd112541461c50f3c3ff38a180a0fbf6596703d7037eb2cc332d54fdfcda8e95c23e7478cfe31f6c1da43e7222f78080a0a67c5dda3bd39b79b00911abebf9c976950393b186cb5377ea09536dc48a1ff7a016a9123689ca894c201645726ead95406839cf2f8004461c0bd529321165857180', + '0xf851808080808080808080a0600efc8e5996c533afd640c3448c198e1101fa32e5bd246f71dd99c7201575308080808080a0a489e21458e112f8f8336e3e90ce8668b0a07bfe7921696a3f0feb657d05a50a80', + '0xf8669d2004b4599193722f03c0e529c8aab049a7fe5ed19ea9c3fed8c9365470b846f8440180a0384c27b2da88cde93261056c98ced4e09bba7ba17ecbd2c37e9c2cf26f836a22a0db307489fd9a4a438b5b48909e12020b209280ad777561c0a7451655db097e75', + ], + address: '0xadca0dd4729c8ba3acf3e99f3a9f471ef37b6825', + balance: '0x0', + codeHash: '0xdb307489fd9a4a438b5b48909e12020b209280ad777561c0a7451655db097e75', + nonce: '0x1', + storageHash: '0x384c27b2da88cde93261056c98ced4e09bba7ba17ecbd2c37e9c2cf26f836a22', + storageProof: [ + { + key: '0x2757396e3ce68a9104b5d84b5b0988e37067e780df1ad018184da3616033f432', + proof: [ + '0xf90211a0a718fd4452e43b9e3d1e25974976f536a603dde7c12e51d8189b4e3ea6c8dd6aa0a71f668d3dba2a9f242174738ff3596c68a84eb9088fffb307f48e061fbdc667a0a89dbcb1109a64587fdcde7b4268af231c5f0d27e1b25062c6c0bf7b48124d67a0bedf16b76516325a66ac35545179a8dd15ee1c6cd11b2e4357d533b19acb4b26a08b9b03cc165363ebc8f9f0590e76f98fc8502810e4ea87700f41f75a7f6692d8a037444b4dc0ef44f017449fe3b9ce45d9193edbf5c88b6e7bc22884424bf10373a0ff5c4bbed0973d8a097d7d8aa9d4534945aeb03a5785ada86b3a0ae079318894a0711fe60589286b4c83daf48cfba53e3242360c18b59ff7d93c72ffc766ed0428a08ae789ec3e7cce80fafd53e3f0c36744e15d1b0f293f93f691e451faa76b9327a0ca40f7477aca5208d28a6f9a00e6f6ad4fc49ebf83f9344443f004ba2d26a8aaa0958fd01948214784c18bdca21ef8419f04e108ea09f06eaea285f64812b98bada0458b092fc9ba5453463ca558487c118d5f0493aa98c1eb8306722c6fdabc2c7fa02c7c57f079bd040ff813a0a74ac9e46beadd2960eb33a6cd311c6aef4514592da0c785693d9760e93b431bf4b1d5933373a2ef1fe20599a38f3ce7c9643c2e9f23a0bdbe251449087722a740e7bdc0801bf55f3849e23e63d9dda2a8409d5163cd01a03dcac75caeb76acf717184167b6b490a6b96b2f0024daaf13dd8390b5a7c1baf80', + '0xf90211a0ff5fdab83f7d1d54dfb1fecdd0eb714225aa2533e5e999836c77588671815475a0ee2f0d24e448f85fc8520cf2d98035b2263a8af1db5b837f3fca3124b7b91f48a0787350c2fece0e0b614a68bfb83c20526d19142641b0588005eafb5678599f9ca09fa4124da658c059955c51944334a7891d0c8805f114d0a857079e920cbe6f6ca0b19f68062d189e03ae068799e351f9e1a5927c567067563ccff2f597b8dfd45da05457b729e133026647b6d98180bbbc56076f454fb291879a0c16b22da2a335c5a072031df309f78657aee2acb7b43a486effb4ecd68707d8a438c113bfaf6f1913a0dc0fba7acc1c0a48fc5c978af68fb20c9acaafc7c7515040b1448f324da6050aa0295ff43c4950ab5dee47b2f9e8a04d6a80180643e96488b152ddbd71e25c3b45a0b435feea8e8a46b90fc0156339bce6a210a314694925976b5c82892e1befaaada087dbef5907ae3f99cbe9de597444d7cd15388ccbe88c1b36406f1dad4b0e10eca0f2f0da32846e51736baa70f8bb7922b9fe74002df69ae9d6af48115264b959e9a0462ec92782e4c8f04061daa351713be998149398a2934b4c816b2b9c54e7968da069d20640c46c43d8c5feb541fb0327481d985b623e4f91bea6109d66f486798ea0104e278ae371a220a0d56a72e70ee9657e333baae96329cc862d96eab978804fa06ad2bac3206493db0c51b790f25ecb10ac634112c188c12f5e65496fc14061d180', + '0xf901f1a01bce8a1cac817f9bd318953b01214d46d0c2ffcffe3f22c81901d9fb8aa55009a0b4880ebbfa94b50526e3de8b46ac96ea60dda4f4edcb1e0316b0299a5d30b04ca0e0d4603a3cd66de5abbe1bb435ed7c317b9edfdad08a0afe84eba49b9fcf088da0c78be3a18158fcef5f88ecd1044da21d03b37d91b906f1abf1ae4cc753088122a008bb32eda0081f564b3426a9ffdd06d9e2856b498b47315622058f176626ed1280a05f6af6349189ad63f9a3af757da563c33e42ffffe1f69a9d4855957920c583fca09c3789f507808280b4a7c4e6234d6582337a2aae5d394111efb07e55e3c1c448a0b7234c0127f2d87aa64f17f09d7d1d72f5701d5410459309da5d15979b6c8c9aa066aabcac035cc9a5fd651bd52328a36a37d4762a6491eb2808af5267acb3f775a0b2d7d676b32bcfd5e8df9cd7f95a9bb91eac071a5b881d9fbc4d9cee0fafedf6a0102c6f1a447995d714d64ab2729b4261df1226374c2f4844f29b2edc69a8b46ca0d03a7b0103fbcba49b8573b566d50d117b00b2c69c048148ef8795fa0a63c7efa0cf6ad8ab9618d75f6d00f49e7b496c77f4591869bc2d0a3ff65d503b2383cfa9a06488cd46027de9ede4d7a7e10327e673234273533310addef6dc3a969aad0bdea0225875ae810220c85166fe921555be9efacceae0aa4654e9fdc2df25cbd1642380', + '0xf891a01cc2e5507a5150448fe06d254adc104702198a9f8eb5afb15567e80282229e2f80808080808080a04ad7cdbaba63f4b3b9c397858d06888424b7a9aa49d59f9c24fe54211b11d1e68080a09af52c684dd75b985f4aed07ea00ca7ac18201d717064f657fb86f9427aded33808080a03e61dcabfaf134b2b84b92607a7d7abf5b7950f05129a63e77c1d97d7c5e411580', + '0xeb9f20cb3e0c7eaed59eb82ba9e6f55fbf77c28472e242e7bfa15f1e2c3305ef528a8901523b25a875df6c79', + ], + value: '0x1523b25a875df6c79', + }, + ], + }, +} + +// Data for the block we used to get the mainnet subgraph proof. +// This was obtained using eth_getBlockByNumber, and we only kept +// the fields we needed to reconstruct the block header. +const mainnetSubgraphBlockData = { + parentHash: '0x402376f31f89f631e5372b7f6522bc8465fa0e5eebf2eae46b8a7725c685cbd9', + sha3Uncles: '0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347', + miner: '0x95222290dd7278aa3ddd389cc1e1d165cc4bafe5', + stateRoot: '0x9d63f5e0289258a0566eaf260c79f152c1ddd624735f2698d9eac5106cfe7852', + transactionsRoot: '0x5d3fca3e5a32dfc190dce3412479e4f3ece7492d103e9eb80b74f3decfda2aa8', + receiptsRoot: '0x0bad122ad39e4b2affe59b70ac5e2062533d3ce61c7f2c077cdebb18d8dafbba', + logsBloom: + '0x3c247501c104808992481280850305232000084104000910020156c4d46009405409158e041824160e04180070010504020881580acc3c200300408001f01011400681100609042e28020188c030447204c46005204a4a2860c0c528b20030009e4a0880128ac0e1150564802c00aad000006308001906204200001000282008404585438303310385cc8780011840c61024008101009f4c832300406818c00c9a18414a000070430a0160b10940612c00c0020180132003c02f0242a0198000230aba568001a250920c19000c6310010e2702501086401840285917098160395239221c0c0288620001f140010588800310512110ec04c14004e840c88271d2', + difficulty: '0x0', + number: '0xf2626a', + gasLimit: '0x1c9c380', + gasUsed: '0x6ae2b2', + timestamp: '0x6362dbc3', + extraData: '0x6265617665726275696c642e6f7267', + mixHash: '0x1751b7bb3547c7f27cc383bd35dcbf06a24f9a7629a3c963f75029828fe0c67e', + nonce: '0x0000000000000000', +} + describe('L2GNS', () => { let me: Account let other: Account @@ -55,13 +139,39 @@ describe('L2GNS', () => { to: await mockL1GatewayL2Alias.getAddress(), value: parseUnits('1', 'ether'), }) - const data = defaultAbiCoder.encode(['bytes', 'bytes'], ['0x', callhookData]) const tx = l2GraphTokenGateway .connect(mockL1GatewayL2Alias) - .finalizeInboundTransfer(mockL1GRT.address, from, to, amount, data) + .finalizeInboundTransfer(mockL1GRT.address, from, to, amount, callhookData) return tx } + const defaultL1SubgraphParams = async function (): Promise { + return { + l1SubgraphId: await buildSubgraphID(me.address, toBN('1'), 1), + curatedTokens: toGRT('1337'), + lockBlockhash: randomHexBytes(32), + metadata: randomHexBytes(), + nSignal: toBN('4567'), + } + } + const migrateMockSubgraphFromL1 = async function ( + l1SubgraphId: string, + curatedTokens: BigNumber, + lockBlockhash: string, + metadata: string, + nSignal: BigNumber, + ) { + const callhookData = defaultAbiCoder.encode( + ['uint256', 'address', 'bytes32', 'uint256', 'uint32', 'bytes32'], + [l1SubgraphId, me.address, lockBlockhash, nSignal, DEFAULT_RESERVE_RATIO, metadata], + ) + await gatewayFinalizeTransfer(mockL1GNS.address, gns.address, curatedTokens, callhookData) + + await gns + .connect(me.signer) + .finishSubgraphMigrationFromL1(l1SubgraphId, newSubgraph0.subgraphDeploymentID, metadata) + } + before(async function () { newSubgraph0 = buildSubgraph() ;[ @@ -103,11 +213,8 @@ describe('L2GNS', () => { describe('receiving a subgraph from L1 (onTokenTransfer)', function () { it('cannot be called by someone other than the L2GraphTokenGateway', async function () { - const l1SubgraphId = await buildSubgraphID(me.address, toBN('1'), 1) - const curatedTokens = toGRT('1337') - const lockBlockhash = randomHexBytes(32) - const metadata = randomHexBytes() - const nSignal = toBN('4567') + const { l1SubgraphId, curatedTokens, lockBlockhash, metadata, nSignal } = + await defaultL1SubgraphParams() const callhookData = defaultAbiCoder.encode( ['uint256', 'address', 'bytes32', 'uint256', 'uint32', 'bytes32'], [l1SubgraphId, me.address, lockBlockhash, nSignal, DEFAULT_RESERVE_RATIO, metadata], @@ -118,11 +225,8 @@ describe('L2GNS', () => { await expect(tx).revertedWith('ONLY_GATEWAY') }) it('rejects calls if the L1 sender is not the L1GNS', async function () { - const l1SubgraphId = await buildSubgraphID(me.address, toBN('1'), 1) - const curatedTokens = toGRT('1337') - const lockBlockhash = randomHexBytes(32) - const metadata = randomHexBytes() - const nSignal = toBN('4567') + const { l1SubgraphId, curatedTokens, lockBlockhash, metadata, nSignal } = + await defaultL1SubgraphParams() const callhookData = defaultAbiCoder.encode( ['uint256', 'address', 'bytes32', 'uint256', 'uint32', 'bytes32'], [l1SubgraphId, me.address, lockBlockhash, nSignal, DEFAULT_RESERVE_RATIO, metadata], @@ -230,11 +334,8 @@ describe('L2GNS', () => { describe('finishing a subgraph migration from L1', function () { it('publishes the migrated subgraph and mints signal with no tax', async function () { - const l1SubgraphId = await buildSubgraphID(me.address, toBN('1'), 1) - const curatedTokens = toGRT('1337') - const lockBlockhash = randomHexBytes(32) - const metadata = randomHexBytes() - const nSignal = toBN('4567') + const { l1SubgraphId, curatedTokens, lockBlockhash, metadata, nSignal } = + await defaultL1SubgraphParams() const callhookData = defaultAbiCoder.encode( ['uint256', 'address', 'bytes32', 'uint256', 'uint32', 'bytes32'], [l1SubgraphId, me.address, lockBlockhash, nSignal, DEFAULT_RESERVE_RATIO, metadata], @@ -261,11 +362,8 @@ describe('L2GNS', () => { expect(subgraphAfter.subgraphDeploymentID).eq(newSubgraph0.subgraphDeploymentID) }) it('cannot be called by someone other than the subgraph owner', async function () { - const l1SubgraphId = await buildSubgraphID(me.address, toBN('1'), 1) - const curatedTokens = toGRT('1337') - const lockBlockhash = randomHexBytes(32) - const metadata = randomHexBytes() - const nSignal = toBN('4567') + const { l1SubgraphId, curatedTokens, lockBlockhash, metadata, nSignal } = + await defaultL1SubgraphParams() const callhookData = defaultAbiCoder.encode( ['uint256', 'address', 'bytes32', 'uint256', 'uint32', 'bytes32'], [l1SubgraphId, me.address, lockBlockhash, nSignal, DEFAULT_RESERVE_RATIO, metadata], @@ -296,11 +394,8 @@ describe('L2GNS', () => { await expect(tx).revertedWith('INVALID_SUBGRAPH') }) it('rejects calls to a pre-curated subgraph deployment', async function () { - const l1SubgraphId = await buildSubgraphID(me.address, toBN('1'), 1) - const curatedTokens = toGRT('1337') - const lockBlockhash = randomHexBytes(32) - const metadata = randomHexBytes() - const nSignal = toBN('4567') + const { l1SubgraphId, curatedTokens, lockBlockhash, metadata, nSignal } = + await defaultL1SubgraphParams() const callhookData = defaultAbiCoder.encode( ['uint256', 'address', 'bytes32', 'uint256', 'uint32', 'bytes32'], [l1SubgraphId, me.address, lockBlockhash, nSignal, DEFAULT_RESERVE_RATIO, metadata], @@ -336,7 +431,28 @@ describe('L2GNS', () => { }) describe('claiming a curator balance using a proof', function () { - it('verifies a proof and assigns a curator balance') + it('verifies a proof and assigns a curator balance (WIP)', async function () { + const l1Subgraph = mainnetSubgraphWithProof + + // Now we pretend the L1 subgraph was locked and migrated at the specified block + await migrateMockSubgraphFromL1( + l1Subgraph.subgraphId, + l1Subgraph.curatedTokens, + l1Subgraph.blockhash, + l1Subgraph.metadata, + l1Subgraph.nSignal, + ) + + // We need L2GNS to think the mainnet GNS is its counterpart for the proof to be valid + await gns + .connect(governor.signer) + .setCounterpartGNSAddress(l1Subgraph.getProofResponse.address) + + // TODO: + // We need to construct the block header RLP + // Then we encode the proof into an RLP list as well + // And finally we verify the proof + }) it('adds the balance to any existing balance for the curator') it('rejects calls with an invalid proof') it('rejects calls for a subgraph that was not migrated') From 63ba5c08f070d0ece90b48f99c6627ba38efdcb5 Mon Sep 17 00:00:00 2001 From: Pablo Carranza Velez Date: Thu, 3 Nov 2022 15:40:01 -0300 Subject: [PATCH 031/112] fix: revert messages, tests for MPT proofs --- contracts/l2/discovery/L2GNS.sol | 1 + .../libraries/MerklePatriciaProofVerifier.sol | 30 ++- contracts/libraries/RLPReader.sol | 16 +- contracts/libraries/StateProofVerifier.sol | 7 +- test/l2/l2GNS.test.ts | 255 +++++++++++++++++- test/lib/mptProofUtils.ts | 79 ++++++ 6 files changed, 351 insertions(+), 37 deletions(-) create mode 100644 test/lib/mptProofUtils.ts diff --git a/contracts/l2/discovery/L2GNS.sol b/contracts/l2/discovery/L2GNS.sol index 0288e9b88..64b8371ce 100644 --- a/contracts/l2/discovery/L2GNS.sol +++ b/contracts/l2/discovery/L2GNS.sol @@ -114,6 +114,7 @@ contract L2GNS is GNS, IL2GNS { // Update pool: constant nSignal, vSignal can change (w/no slippage protection) // Buy all signal from the new deployment subgraphData.vSignal = curation.mintTaxFree(_subgraphDeploymentID, migratedData.tokens, 0); + subgraphData.disabled = false; emit SubgraphPublished(_subgraphID, _subgraphDeploymentID, subgraphData.reserveRatio); emit SubgraphUpgraded( diff --git a/contracts/libraries/MerklePatriciaProofVerifier.sol b/contracts/libraries/MerklePatriciaProofVerifier.sol index c2704b67b..5b734454d 100644 --- a/contracts/libraries/MerklePatriciaProofVerifier.sol +++ b/contracts/libraries/MerklePatriciaProofVerifier.sol @@ -9,6 +9,7 @@ * - Using local copy of the RLPReader library instead of using the package * - Silenced linter warnings about inline assembly * - Renamed a variable for mixedCase consistency + * - Added clearer revert messages */ /** @@ -50,7 +51,10 @@ library MerklePatriciaProofVerifier { if (stack.length == 0) { // Root hash of empty Merkle-Patricia-Trie - require(rootHash == 0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421); + require( + rootHash == 0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421, + "MPT: invalid empty tree root" + ); return new bytes(0); } @@ -62,12 +66,12 @@ library MerklePatriciaProofVerifier { // The root node is hashed with Keccak-256 ... if (i == 0 && rootHash != stack[i].rlpBytesKeccak256()) { - revert(); + revert("MPT: invalid root hash"); } // ... whereas all other nodes are hashed with the MPT // hash function. if (i != 0 && nodeHashHash != _mptHashHash(stack[i])) { - revert(); + revert("MPT: invalid node hash"); } // We verified that stack[i] has the correct hash, so we // may safely decode it. @@ -98,7 +102,7 @@ library MerklePatriciaProofVerifier { // Sanity check if (i < stack.length - 1) { // divergent node must come last in proof - revert(); + revert("MPT: divergent node not last"); } return new bytes(0); @@ -108,7 +112,7 @@ library MerklePatriciaProofVerifier { // Sanity check if (i < stack.length - 1) { // leaf node must come last in proof - revert(); + revert("MPT: leaf node not last"); } if (mptKeyOffset < mptKey.length) { @@ -122,7 +126,7 @@ library MerklePatriciaProofVerifier { // Sanity check if (i == stack.length - 1) { // shouldn't be at last level - revert(); + revert("MPT: non-leaf node last"); } if (!node[1].isList()) { @@ -144,14 +148,14 @@ library MerklePatriciaProofVerifier { mptKeyOffset += 1; if (nibble >= 16) { // each element of the path has to be a nibble - revert(); + revert("MPT: element not nibble"); } if (_isEmptyBytesequence(node[nibble])) { // Sanity if (i != stack.length - 1) { // leaf node should be at last level - revert(); + revert("MPT: leaf not last"); } return new bytes(0); @@ -166,7 +170,7 @@ library MerklePatriciaProofVerifier { // Sanity if (i != stack.length - 1) { // should be at last level - revert(); + revert("MPT: end not last"); } return node[16].toBytes(); @@ -210,7 +214,7 @@ library MerklePatriciaProofVerifier { pure returns (bool isLeaf, bytes memory nibbles) { - require(compact.length > 0); + require(compact.length > 0, "MPT: invalid compact length"); uint256 firstNibble = (uint8(compact[0]) >> 4) & 0xF; uint256 skipNibbles; if (firstNibble == 0) { @@ -227,7 +231,7 @@ library MerklePatriciaProofVerifier { isLeaf = true; } else { // Not supposed to happen! - revert(); + revert("MPT: invalid first nibble"); } return (isLeaf, _decodeNibbles(compact, skipNibbles)); } @@ -237,10 +241,10 @@ library MerklePatriciaProofVerifier { pure returns (bytes memory nibbles) { - require(compact.length > 0); + require(compact.length > 0, "MPT: _dN invalid compact length"); uint256 length = compact.length * 2; - require(skipNibbles <= length); + require(skipNibbles <= length, "MPT: _dN invalid skipNibbles"); length -= skipNibbles; nibbles = new bytes(length); diff --git a/contracts/libraries/RLPReader.sol b/contracts/libraries/RLPReader.sol index 4c3d177a3..645776969 100644 --- a/contracts/libraries/RLPReader.sol +++ b/contracts/libraries/RLPReader.sol @@ -42,7 +42,7 @@ library RLPReader { * @return The next element in the iteration. */ function next(Iterator memory self) internal pure returns (RLPItem memory) { - require(hasNext(self)); + require(hasNext(self), "RLP: no next"); uint256 ptr = self.nextPtr; uint256 itemLength = _itemLength(ptr); @@ -79,7 +79,7 @@ library RLPReader { * @return An 'Iterator' over the item. */ function iterator(RLPItem memory self) internal pure returns (Iterator memory) { - require(isList(self)); + require(isList(self), "RLP: not list (iterator)"); uint256 ptr = self.memPtr + _payloadOffset(self.memPtr); return Iterator(self, ptr); @@ -115,7 +115,7 @@ library RLPReader { * @param the RLP item containing the encoded list. */ function toList(RLPItem memory item) internal pure returns (RLPItem[] memory) { - require(isList(item)); + require(isList(item), "RLP: not list (toList)"); uint256 items = numItems(item); RLPItem[] memory result = new RLPItem[](items); @@ -190,7 +190,7 @@ library RLPReader { // any non-zero byte except "0x80" is considered true function toBoolean(RLPItem memory item) internal pure returns (bool) { - require(item.len == 1); + require(item.len == 1, "RLP: invalid boolean length"); uint256 result; uint256 memPtr = item.memPtr; assembly { @@ -210,13 +210,13 @@ library RLPReader { function toAddress(RLPItem memory item) internal pure returns (address) { // 1 byte for the length prefix - require(item.len == 21); + require(item.len == 21, "RLP: invalid addr length"); return address(uint160(toUint(item))); } function toUint(RLPItem memory item) internal pure returns (uint256) { - require(item.len > 0 && item.len <= 33); + require(item.len > 0 && item.len <= 33, "RLP: invalid uint length"); (uint256 memPtr, uint256 len) = payloadLocation(item); @@ -236,7 +236,7 @@ library RLPReader { // enforces 32 byte length function toUintStrict(RLPItem memory item) internal pure returns (uint256) { // one byte prefix - require(item.len == 33); + require(item.len == 33, "RLP: invalid uint strict length"); uint256 result; uint256 memPtr = item.memPtr + 1; @@ -248,7 +248,7 @@ library RLPReader { } function toBytes(RLPItem memory item) internal pure returns (bytes memory) { - require(item.len > 0); + require(item.len > 0, "RLP: invalid zero length bytes"); (uint256 memPtr, uint256 len) = payloadLocation(item); bytes memory result = new bytes(len); diff --git a/contracts/libraries/StateProofVerifier.sol b/contracts/libraries/StateProofVerifier.sol index efacf94c3..bcc5c0783 100644 --- a/contracts/libraries/StateProofVerifier.sol +++ b/contracts/libraries/StateProofVerifier.sol @@ -8,6 +8,7 @@ * - Changed solidity version to 0.7.6 (pablo@edgeandnode.com) * - Using local copy of the RLPReader library instead of using the package * - Explicitly marked visibility of constants + * - Added revert messages */ pragma solidity 0.7.6; @@ -57,7 +58,7 @@ library StateProofVerifier { { BlockHeader memory header = parseBlockHeader(_headerRlpBytes); // ensure that the block is actually in the blockchain - require(header.hash == blockhash(header.number), "blockhash mismatch"); + require(header.hash == blockhash(header.number), "SPV: blockhash mismatch"); return header; } @@ -73,7 +74,7 @@ library StateProofVerifier { BlockHeader memory result; RLPReader.RLPItem[] memory headerFields = _headerRlpBytes.toRlpItem().toList(); - require(headerFields.length > HEADER_TIMESTAMP_INDEX); + require(headerFields.length > HEADER_TIMESTAMP_INDEX, "SPV: invalid header length"); result.stateRootHash = bytes32(headerFields[HEADER_STATE_ROOT_INDEX].toUint()); result.number = headerFields[HEADER_NUMBER_INDEX].toUint(); @@ -107,7 +108,7 @@ library StateProofVerifier { } RLPReader.RLPItem[] memory acctFields = acctRlpBytes.toRlpItem().toList(); - require(acctFields.length == 4); + require(acctFields.length == 4, "SPV: invalid accFields length"); account.exists = true; account.nonce = acctFields[0].toUint(); diff --git a/test/l2/l2GNS.test.ts b/test/l2/l2GNS.test.ts index 17727f502..7e6f5e6f9 100644 --- a/test/l2/l2GNS.test.ts +++ b/test/l2/l2GNS.test.ts @@ -1,6 +1,6 @@ import { expect } from 'chai' import { ethers, ContractTransaction, BigNumber, Event } from 'ethers' -import { arrayify, defaultAbiCoder, hexlify, parseUnits } from 'ethers/lib/utils' +import { arrayify, defaultAbiCoder, hexlify, parseEther, parseUnits } from 'ethers/lib/utils' import { getAccounts, @@ -9,6 +9,8 @@ import { toGRT, getL2SignerFromL1, provider, + impersonateAccount, + setAccountBalance, } from '../lib/testHelpers' import { L2FixtureContracts, NetworkFixture } from '../lib/fixtures' import { toBN } from '../lib/testHelpers' @@ -25,6 +27,7 @@ import { } from '../lib/gnsUtils' import { Curation } from '../../build/types/Curation' import { GraphToken } from '../../build/types/GraphToken' +import { encodeMPTProofRLP, getBlockHeaderRLP } from '../lib/mptProofUtils' const { HashZero } = ethers.constants @@ -83,6 +86,38 @@ const mainnetSubgraphWithProof = { }, } +const mainnetProofForDifferentBlock = { + accountProof: [ + '0xf90211a008ba162be4a831acbdfe628aa1867ea899e724b78570d2e2e6a3389c4f51e7aaa0901aa8bef1925917994c6abcb439808bbfae39aae8623b255c3529a898c14e5ca05b3ff03602e8561e2f4fdaccf0daff0afd6c59dd6314a7d5754a8d3658f48864a06ea25db38ef4149ea9716d73996cc67806a9db5a244fbaedb353388b39cd31bfa0bfef765e7fe1f80cc810235ac303c4490fed198b7b7fff3923d1d0004d98a840a0e00f852dd111d919df6f03fa88815797b13909ead7175f731e8f58f8756c0105a0aafce80dc97c6059a771e475e4076e6abd5c447f7e04104fc9d0d6a6dfd0932da0e6b2f28ff41158e14d6b410e99511f6f7554e74f7762629dfb4ad179714b5ac7a0e83694d3f79b52db460b9cf1aba33cc008cd1e12de9bedb08308c260250555f4a0c9436bde76cf5e9712b2d9e03d394e9f779ab45b0f771c79f087d6b289887adca0bf80398498ecfbd534a5771cfc1f79ae5d494aab3faa83b4b7d5858ff0e58580a095118ba475cfd1c776b02ac34716a9bc1d52a00c56721d4ba725d3f876f5f315a0f0ea8039d2ccf1651fb7eb75134367d1ab2f1849b9ac702a924642a230c5bb51a038aaf7f55c78bb4933bd6cfb030e274a459e1fda0431d142505a4e6f6e3a5123a009c2d3201fd7d93a686677076fa92557a47c35bad888d856d9d7849a8ea01b61a0c10c88e88b8d77bcaa5e8f183fb0558ca98a38cebb60184c48645ddd4b38092c80', + '0xf90211a0a42a0ef19c23b780e03a3b5f9de863786af2169fa15b85d393fbae2052c07d57a0dfd8f4a92f62a08a297e2525f297a2a730a8edc8aa81cfa92a01dbecdfd16a79a00cfd319d602d6a17eaa69ac1ac48efb56867fc71fb55c24a17dc758492ef510aa01d8c4d2a39257a0f22c164e26504685a6d223a8482fa21f01168a8663573ce62a07f4c2fdf5f1b961b5762ce9d2bd729c33e0dfdc47a89127f61ec6589bf45d675a0c898c361c0affc958650814701aab3746a46e70379035783d95c159db1c09266a00f734e2c6cfca74f7946a5973f773d2ef50019619e5106608e304d5e6746a61ca03ca7b92c054c934f5a321784778475f3cf4356ebfe298a1b0633864c6e8f4c4ea0303e606e88bc5a64911e3fd2366c394cd95a0f7821b635c9dc1675aadd90b338a0fdc3d4895ccae7d5e643e2a556d4d0761756559ba6823e5b579e0eb0f7fab581a0724c78e570600ed9b63ef27f37c833dafff499b020e1ac8dcbe638bf400c0968a0baf64f7207dc9f24b0d6baf69cd2712ed11f5ca94c1b7f3d6a00e2b6e40c1d02a0074b2ce83ab279776f145d98420e421a7db0058a36cf901b7a2ec6b21bb740e4a07a6f49435408d90fca807ede88d4f980a55e9879b902139be8a0b7b4478a6a29a06fd16bc6196aec8f3a236551709c5d375c49b7185e1f98dedbb0ab49794659c6a0c442b425ea1bb4f4b1841468be4a1fd080fb67138439d68b91d235a7d0d8542c80', + '0xf90211a0efd2613d0a804f4fa546e7a064da4267b1b5ee413cc0eac950fa068d44d66d58a0ddbe7e522df08d935405a051f6a5ac4ece17b713078279a47c4cda03aa00a1e4a085f48e639de7e35a5929fc18a5283bd886f1db1daa111a2037f191642f813ff3a01b600c46499d6720e691006359324d39ec9dabdf285dd703cc1ac4c5d54eb33fa08f1e1bd5560548120655491e5184d090095a92f778db5884f984d822d0df587ea01f49ad2a577f00dd0e7eba492836f22a38e91acf463a0151d72f3018e1063fd9a09a1f1d77d752cf64d4a9808b881e7308dbd1cd9db6d5f43b5bf861ab23107ee1a0da17e1f1ee4f2d0ba1e86fae61f56fa6973512d3e67d2be803b87b0a708340ffa0d953d5d71f1da9bdf3b639eccdddcebb0b3f279e7a5c8b5a4c623ea9f64afb96a077c6c029a1bd6ae13e57204ad02435c9d16ac08991936b793bfa3a25c9bc6a22a0cb9737b08c26b3b367d27e25c89625a131833ffb6fb32752cede3774e65f0d15a0a41fbe982ce84f9a8c815c1b2624daf2dfee2722dc0e165499ac4715ab0ad6a0a038b116b0c61672e61e5245671ab797a9c5755100081782631a09a0ab7677e5a5a0ff94af9e2b34b8ae9f2bb0851800a8d79409f71ef92dd5ac76bb387fdc4bca17a03bf35ce5cd3f63e84b36e75ff858aaaa824ddb29c4d49e9caeaea9c5aff38d0ba03802c963326159a902c71e5627c44a4435831d126ea13c4457c980f8b456022f80', + '0xf90211a0750f9a5ef0d6aef805bc3542ea9e45dd1c1688e676bcfb962604e2f05a935afba0c974aae944f91467b5678fc1f39889b5a52d9013517aa79d1296a0f98d3608eea076670c0ae12a32aba44db37dd7f46015419ac8d4dcb5e7f11dfd0883c6a9a27ba04539dac694cf59b90c7146850d0e21ded661e02673d0066042281b935c83d166a0ddb0213975d2fe1d4266edbf9e5567fe9af3eb32a943dc6de60ab14fc62896e1a0a36ef0befab6acb3465e84e1424ebd0255fa7885765bfc82ebacb13b4c3f4bf2a0909850012d77c57ad74720c0944edcec60fd77cc91e1bf79cfbb8c278e73ca6ca0b843bb94c7543757b3818e585139cdff16e3dc3815943c08eda53c8d9e8153faa052da49f83ce02065944aad3b0df9b026cec65f1622a35e5162cc4f44e50f3da6a0c6d0966eb43a9d33ea326a8d6a1762efc886072e9314bfb93e6d9a81594ea852a0189167569b2e7eb59cae48e74f0b358c129d504c007eec2fda6f4b716149e1aea0d835433ad49cd8106ef8d03eb79a2e6bd9459da70411fe37983ef026c8236471a023e6a589a587d624703575127dbb3865f157fca76190fdc33f2a3f73c39105f0a0c998aa53170787e29bdc444989965032d4258da718175163368a306c04229431a0abb958a4cf70d39472163e1b2309888d510cc3e0445748bb127eb69e5d7c35aea09592f1f09c59b2289749038535defffa1b98bcf7344ad05b9d3cccd75110844a80', + '0xf90211a0e7efc1ad587fb9ecc0c343d94c894146f9ac499ad3b250368c11d6f531354b8fa07237f64ded7d0941d59656e5b590d3e6fc61093cc1740ad209dd300ee9f0ca12a042ac0a64ac87b16ec296edb580ce0910083690d9d1ace367369351a6fbfe0882a05533447ef90d3623bceccef86860a029ea394aa8783ee6cf3e982bd47ff12c03a0df248d8095d09d69e25381eb1ee6a90407fba3fe1baae6fbd56c2660986573bfa0622e8063b57c51b19747bc851ae0d828d1cde0bbf46f8a5180102dd94459c802a0e800b6c40184f7b7fa683ae191bb4aac1ce585bb6791b99eb4244e351d02f1cba03104783681ab55e0f05486fcdc8e2fcf784d5a52c78c32832d7ce4794524b824a0833a530c25ed992d20626c55af19c9abe4d1c7a07d5a058dde29907fe65fbcd1a0e133c4cd151948b47d986b93c3572b04098c5da3435c27a9c847c7d5f990bc9ea0f3d3855ffbcc3c26adbeb526fae48536f4dbc39b9bf24f7a17b76335f6b000eea0c7a4d3135faba63cd89f64b0fabf4d726f0543fa347e8cf44db30bfe6ea9e11da0c2e15f8f776d1e3d9cfd29ef9b1e1c5ee5d6334152f587d72ecb9eed5fc3193ea05606f5dc9f0d6d58473595cca2a3bfe3a58cfd9f6f530f52a40dfcf477428f22a0a9ba4206ef4055b28d1126bd21afd4ab26898267d7334191a6cc7f8b07a54122a0715b72d6ed83a6da4e9d376f86690caf329adbc5dcda4cfd0839e3f02066e20a80', + '0xf90211a00cad8552ddac3a1aa1c598c4d43a80d5a6cac7e58b543c86d5920a78d5b0f0dea0dd59269713fe63d6391c36afe5676c00a2077bd60482e391360af5c3771248eca0c5925754c6c72a7b07512ee07acdae077ee70e9d3ab04065360fdc4bebdb155fa045f1e4df1025988aa9d0ce23c03f4b366a99286de59d82f1eafdf9a3890905a3a082f4d71cb736ffdf729a683152c26b2f99c8dda4b28693dccd9853c58982a2c4a08e202445f7c2fa69da1f1492a1b0e46d8b66b0b7024c7cff23ed5c07191da66fa0b3c179e3f3b9b216e4b35174e4e4d119526af446fdf757ad95e02e49cac28565a0fd74d0a8922342560f6dd820cfa373ec7353c6c66d74bd43351ebb7d103d5ceaa04a8689c3cb5396ee5a99469957f1f0670b0024b2ea3b75e0455797a5175c72a3a085270faec5854bff806bb9951261092745f657e062ae1499d3c5fde81fe14713a07dd8daf759fa359c36c7afc9f7963a557088f5483a8c5d7a0866237fb5f055c5a0d3ec4525a4f0d209a566b07f46a91c609b9c7acbc427db1390485cf4b5105557a005983a192b1f780b095661d92ef4d4102ffd03aad9adb6f3084ba26a11cd0daaa0afd710661f91421da1ece5ea87acc4f76e8af3dad5fa14f0a4ba1ac1a7276449a0ba0374b7981b92f55525b830723b32dce4ebd3c6a13fd06f61b465728ca077c7a0349075b6ff5265073d6ec6676f9b82991159e0bd8170596bcd80573f95576b7380', + '0xf90131a000e3833f5535c6eae67533a61520c8a99ec1d617d8230498ea57aaac1080ebf880a0432d16911e0f89bb5b6faff16255b203ee2e80db68098f75aee4673d327346b680a0241e5caf848b74ce5efbaa4f83b7df94d3bf5ae87d8fa7f97aff4094b05459bb80a09dec563e0a5682d43213c9a511e954705231ebaee0c72f0aa4f95792823ca0e280a01560fe4a9d9af402122701cccc9d3a13f77747b965d5efe09d0dfce95f807dcca08b5cd207548549e40fd1658e38b5b4227f7f03d8dd112541461c50f3c3ff38a180a0fbf6596703d7037eb2cc332d54fdfcda8e95c23e7478cfe31f6c1da43e7222f78080a0a67c5dda3bd39b79b00911abebf9c976950393b186cb5377ea09536dc48a1ff7a016a9123689ca894c201645726ead95406839cf2f8004461c0bd529321165857180', + '0xf851808080808080808080a0600efc8e5996c533afd640c3448c198e1101fa32e5bd246f71dd99c7201575308080808080a02a55c146621228f2dcddd1135d942971c0ee296df5055f5dee8e92b9ab462c6380', + '0xf8669d2004b4599193722f03c0e529c8aab049a7fe5ed19ea9c3fed8c9365470b846f8440180a0a32e5d12226001f1f5f4a3d574ebf9487af319b24eb0f98f02e26dec3944c3f1a0db307489fd9a4a438b5b48909e12020b209280ad777561c0a7451655db097e75', + ], + address: '0xadca0dd4729c8ba3acf3e99f3a9f471ef37b6825', + balance: '0x0', + codeHash: '0xdb307489fd9a4a438b5b48909e12020b209280ad777561c0a7451655db097e75', + nonce: '0x1', + storageHash: '0xa32e5d12226001f1f5f4a3d574ebf9487af319b24eb0f98f02e26dec3944c3f1', + storageProof: [ + { + key: '0x2757396e3ce68a9104b5d84b5b0988e37067e780df1ad018184da3616033f432', + proof: [ + '0xf90211a0a8e75f540571eb3c42baaac34fc6cbf805bab88fc9b56a89d2f34cdb24501870a0a71f668d3dba2a9f242174738ff3596c68a84eb9088fffb307f48e061fbdc667a0885ca4c629f3924e02c8e45cf078e484257af19e1a4b58aee012147ae3a92b95a0bedf16b76516325a66ac35545179a8dd15ee1c6cd11b2e4357d533b19acb4b26a0582f96c7d74fe3db5e03f6bec8bd270851854184c0fe603818618cde931dd9f0a02cd0952b4eeac88968ee221063915ef781eaeabb03de5aa1004b793a4f718cf6a0fbef9a34532cfe338a73ccedd177eaf1499f4a2e64095f055ac7908290baf4f9a0eeba7e56f3973a00a3ff5539d81ffb84df02f3798aee2561c315a00ee4b47489a0daf1b46b0f454e044a2a79454f900e02846f7a83f68f9a24680cbea8b9f78890a0ca9205467afc9ca2b2e12de01bbd97271e34bd39df54319c1efa35fee3e5344ba0958fd01948214784c18bdca21ef8419f04e108ea09f06eaea285f64812b98bada045d19971e0a4e566bd5d8fcdfb0c0fd243e9efa3733fb4f80d77438bd1698577a00fac3ae214e57a589a3dc3d5e5249cb2ab1966f73d35fac13b448270827d1effa0c785693d9760e93b431bf4b1d5933373a2ef1fe20599a38f3ce7c9643c2e9f23a0bdbe251449087722a740e7bdc0801bf55f3849e23e63d9dda2a8409d5163cd01a00f6e4f80e267fafdd75194ca49ac0eb7144bb6dcbbe0d50e810c9386b876524580', + '0xf90211a0b719adad765af02b76641e4ac0a5eb918f5c52e9cf0f38f0f507e4e8d4bb1456a0488e936d22182c75c0ec64be2e1e5f0b2066890719376ea408560a182988425da06ee266499e1f3d0c3d3c82e2085fa76c42324298736566ed40059de26880e7a9a09fa4124da658c059955c51944334a7891d0c8805f114d0a857079e920cbe6f6ca074271a2e9c903cb19f1b1cd3ef7c2f8260968be6aaac50cc6d7f8370c225f390a05457b729e133026647b6d98180bbbc56076f454fb291879a0c16b22da2a335c5a072031df309f78657aee2acb7b43a486effb4ecd68707d8a438c113bfaf6f1913a0dc0fba7acc1c0a48fc5c978af68fb20c9acaafc7c7515040b1448f324da6050aa0295ff43c4950ab5dee47b2f9e8a04d6a80180643e96488b152ddbd71e25c3b45a0b435feea8e8a46b90fc0156339bce6a210a314694925976b5c82892e1befaaada087dbef5907ae3f99cbe9de597444d7cd15388ccbe88c1b36406f1dad4b0e10eca0f2f0da32846e51736baa70f8bb7922b9fe74002df69ae9d6af48115264b959e9a0462ec92782e4c8f04061daa351713be998149398a2934b4c816b2b9c54e7968da069d20640c46c43d8c5feb541fb0327481d985b623e4f91bea6109d66f486798ea0104e278ae371a220a0d56a72e70ee9657e333baae96329cc862d96eab978804fa06ad2bac3206493db0c51b790f25ecb10ac634112c188c12f5e65496fc14061d180', + '0xf901f1a01bce8a1cac817f9bd318953b01214d46d0c2ffcffe3f22c81901d9fb8aa55009a0b4880ebbfa94b50526e3de8b46ac96ea60dda4f4edcb1e0316b0299a5d30b04ca0e0d4603a3cd66de5abbe1bb435ed7c317b9edfdad08a0afe84eba49b9fcf088da0c78be3a18158fcef5f88ecd1044da21d03b37d91b906f1abf1ae4cc753088122a008bb32eda0081f564b3426a9ffdd06d9e2856b498b47315622058f176626ed1280a05f6af6349189ad63f9a3af757da563c33e42ffffe1f69a9d4855957920c583fca09c3789f507808280b4a7c4e6234d6582337a2aae5d394111efb07e55e3c1c448a0b7234c0127f2d87aa64f17f09d7d1d72f5701d5410459309da5d15979b6c8c9aa066aabcac035cc9a5fd651bd52328a36a37d4762a6491eb2808af5267acb3f775a0b2d7d676b32bcfd5e8df9cd7f95a9bb91eac071a5b881d9fbc4d9cee0fafedf6a0102c6f1a447995d714d64ab2729b4261df1226374c2f4844f29b2edc69a8b46ca0d03a7b0103fbcba49b8573b566d50d117b00b2c69c048148ef8795fa0a63c7efa0cf6ad8ab9618d75f6d00f49e7b496c77f4591869bc2d0a3ff65d503b2383cfa9a06488cd46027de9ede4d7a7e10327e673234273533310addef6dc3a969aad0bdea0225875ae810220c85166fe921555be9efacceae0aa4654e9fdc2df25cbd1642380', + '0xf891a01cc2e5507a5150448fe06d254adc104702198a9f8eb5afb15567e80282229e2f80808080808080a04ad7cdbaba63f4b3b9c397858d06888424b7a9aa49d59f9c24fe54211b11d1e68080a09af52c684dd75b985f4aed07ea00ca7ac18201d717064f657fb86f9427aded33808080a03e61dcabfaf134b2b84b92607a7d7abf5b7950f05129a63e77c1d97d7c5e411580', + '0xeb9f20cb3e0c7eaed59eb82ba9e6f55fbf77c28472e242e7bfa15f1e2c3305ef528a8901523b25a875df6c79', + ], + value: '0x1523b25a875df6c79', + }, + ], +} + // Data for the block we used to get the mainnet subgraph proof. // This was obtained using eth_getBlockByNumber, and we only kept // the fields we needed to reconstruct the block header. @@ -103,6 +138,7 @@ const mainnetSubgraphBlockData = { extraData: '0x6265617665726275696c642e6f7267', mixHash: '0x1751b7bb3547c7f27cc383bd35dcbf06a24f9a7629a3c963f75029828fe0c67e', nonce: '0x0000000000000000', + baseFeePerGas: '0x431ed95bc', } describe('L2GNS', () => { @@ -359,6 +395,7 @@ describe('L2GNS', () => { expect(subgraphAfter.vSignal).eq(expectedSignal) expect(migrationDataAfter.l2Done).eq(true) expect(migrationDataAfter.deprecated).eq(false) + expect(subgraphAfter.disabled).eq(false) expect(subgraphAfter.subgraphDeploymentID).eq(newSubgraph0.subgraphDeploymentID) }) it('cannot be called by someone other than the subgraph owner', async function () { @@ -431,7 +468,7 @@ describe('L2GNS', () => { }) describe('claiming a curator balance using a proof', function () { - it('verifies a proof and assigns a curator balance (WIP)', async function () { + it('verifies a proof and assigns a curator balance', async function () { const l1Subgraph = mainnetSubgraphWithProof // Now we pretend the L1 subgraph was locked and migrated at the specified block @@ -448,18 +485,210 @@ describe('L2GNS', () => { .connect(governor.signer) .setCounterpartGNSAddress(l1Subgraph.getProofResponse.address) - // TODO: - // We need to construct the block header RLP - // Then we encode the proof into an RLP list as well - // And finally we verify the proof + const blockHeaderRLP = getBlockHeaderRLP(mainnetSubgraphBlockData) + const proofRLP = encodeMPTProofRLP(l1Subgraph.getProofResponse) + + const curatorSigner = await impersonateAccount(l1Subgraph.curator) + await setAccountBalance(l1Subgraph.curator, parseEther('1000')) + const tx = gns + .connect(curatorSigner) + .claimL1CuratorBalance(l1Subgraph.subgraphId, blockHeaderRLP, proofRLP) + await expect(tx) + .emit(gns, 'CuratorBalanceClaimed') + .withArgs( + l1Subgraph.subgraphId, + l1Subgraph.curator, + l1Subgraph.curator, + l1Subgraph.getProofResponse.storageProof[0].value, + ) + const curatorBalance = await gns.getCuratorSignal(l1Subgraph.subgraphId, l1Subgraph.curator) + expect(curatorBalance).eq(l1Subgraph.getProofResponse.storageProof[0].value) + }) + it('adds the balance to any existing balance for the curator', async function () { + const l1Subgraph = mainnetSubgraphWithProof + + // Now we pretend the L1 subgraph was locked and migrated at the specified block + await migrateMockSubgraphFromL1( + l1Subgraph.subgraphId, + l1Subgraph.curatedTokens, + l1Subgraph.blockhash, + l1Subgraph.metadata, + l1Subgraph.nSignal, + ) + + // We need L2GNS to think the mainnet GNS is its counterpart for the proof to be valid + await gns + .connect(governor.signer) + .setCounterpartGNSAddress(l1Subgraph.getProofResponse.address) + + const blockHeaderRLP = getBlockHeaderRLP(mainnetSubgraphBlockData) + const proofRLP = encodeMPTProofRLP(l1Subgraph.getProofResponse) + + const curatorSigner = await impersonateAccount(l1Subgraph.curator) + await setAccountBalance(l1Subgraph.curator, parseEther('1000')) + + // We add some pre-existing balance on L2 to the curator: + await grt.connect(governor.signer).mint(l1Subgraph.curator, toGRT('100')) + await grt.connect(curatorSigner).approve(gns.address, toGRT('100')) + await gns.connect(curatorSigner).mintSignal(l1Subgraph.subgraphId, toGRT('100'), toBN('0')) + const prevSignal = await gns.getCuratorSignal(l1Subgraph.subgraphId, l1Subgraph.curator) + expect(prevSignal).not.eq(toBN(0)) + + const tx = gns + .connect(curatorSigner) + .claimL1CuratorBalance(l1Subgraph.subgraphId, blockHeaderRLP, proofRLP) + const expectedClaimedSignal = l1Subgraph.getProofResponse.storageProof[0].value + await expect(tx) + .emit(gns, 'CuratorBalanceClaimed') + .withArgs( + l1Subgraph.subgraphId, + l1Subgraph.curator, + l1Subgraph.curator, + expectedClaimedSignal, + ) + const curatorBalance = await gns.getCuratorSignal(l1Subgraph.subgraphId, l1Subgraph.curator) + expect(curatorBalance).eq(prevSignal.add(expectedClaimedSignal)) + }) + it('rejects calls with an invalid proof (e.g. from a different L1GNS address)', async function () { + const l1Subgraph = mainnetSubgraphWithProof + + // Now we pretend the L1 subgraph was locked and migrated at the specified block + await migrateMockSubgraphFromL1( + l1Subgraph.subgraphId, + l1Subgraph.curatedTokens, + l1Subgraph.blockhash, + l1Subgraph.metadata, + l1Subgraph.nSignal, + ) + + // We haven't updated the L1 counterpart address, so GNS will not accept the account proof as valid + + const blockHeaderRLP = getBlockHeaderRLP(mainnetSubgraphBlockData) + const proofRLP = encodeMPTProofRLP(l1Subgraph.getProofResponse) + + const curatorSigner = await impersonateAccount(l1Subgraph.curator) + await setAccountBalance(l1Subgraph.curator, parseEther('1000')) + const tx = gns + .connect(curatorSigner) + .claimL1CuratorBalance(l1Subgraph.subgraphId, blockHeaderRLP, proofRLP) + // The key for the L1 counterpart is not present in the proof, + // so the verifier will not be able to find a node for the expected path + await expect(tx).revertedWith('MPT: invalid node hash') + }) + it('rejects calls with an invalid proof (e.g. from a different curator)', async function () { + const l1Subgraph = mainnetSubgraphWithProof + + // Now we pretend the L1 subgraph was locked and migrated at the specified block + await migrateMockSubgraphFromL1( + l1Subgraph.subgraphId, + l1Subgraph.curatedTokens, + l1Subgraph.blockhash, + l1Subgraph.metadata, + l1Subgraph.nSignal, + ) + + // We need L2GNS to think the mainnet GNS is its counterpart for the proof to be valid + await gns + .connect(governor.signer) + .setCounterpartGNSAddress(l1Subgraph.getProofResponse.address) + + const blockHeaderRLP = getBlockHeaderRLP(mainnetSubgraphBlockData) + const proofRLP = encodeMPTProofRLP(l1Subgraph.getProofResponse) + + const tx = gns + .connect(me.signer) + .claimL1CuratorBalance(l1Subgraph.subgraphId, blockHeaderRLP, proofRLP) + // The curator slot we're looking for isn't present in the proof, + // so the verifier will fail when looking for it + await expect(tx).revertedWith('MPT: invalid node hash') + }) + it('rejects calls for a subgraph that was not migrated', async function () { + const l1Subgraph = mainnetSubgraphWithProof + const l2Subgraph = await publishNewSubgraph(me, newSubgraph0, gns) + + // We need L2GNS to think the mainnet GNS is its counterpart for the proof to be valid + await gns + .connect(governor.signer) + .setCounterpartGNSAddress(l1Subgraph.getProofResponse.address) + + const blockHeaderRLP = getBlockHeaderRLP(mainnetSubgraphBlockData) + const proofRLP = encodeMPTProofRLP(l1Subgraph.getProofResponse) + + const tx = gns + .connect(me.signer) + .claimL1CuratorBalance(l2Subgraph.id!, blockHeaderRLP, proofRLP) + await expect(tx).revertedWith('!MIGRATED') + }) + it('rejects calls if the balance was already claimed', async function () { + const l1Subgraph = mainnetSubgraphWithProof + + // Now we pretend the L1 subgraph was locked and migrated at the specified block + await migrateMockSubgraphFromL1( + l1Subgraph.subgraphId, + l1Subgraph.curatedTokens, + l1Subgraph.blockhash, + l1Subgraph.metadata, + l1Subgraph.nSignal, + ) + + // We need L2GNS to think the mainnet GNS is its counterpart for the proof to be valid + await gns + .connect(governor.signer) + .setCounterpartGNSAddress(l1Subgraph.getProofResponse.address) + + const blockHeaderRLP = getBlockHeaderRLP(mainnetSubgraphBlockData) + const proofRLP = encodeMPTProofRLP(l1Subgraph.getProofResponse) + + const curatorSigner = await impersonateAccount(l1Subgraph.curator) + await setAccountBalance(l1Subgraph.curator, parseEther('1000')) + const tx = gns + .connect(curatorSigner) + .claimL1CuratorBalance(l1Subgraph.subgraphId, blockHeaderRLP, proofRLP) + await expect(tx) + .emit(gns, 'CuratorBalanceClaimed') + .withArgs( + l1Subgraph.subgraphId, + l1Subgraph.curator, + l1Subgraph.curator, + l1Subgraph.getProofResponse.storageProof[0].value, + ) + const curatorBalance = await gns.getCuratorSignal(l1Subgraph.subgraphId, l1Subgraph.curator) + expect(curatorBalance).eq(l1Subgraph.getProofResponse.storageProof[0].value) + + // Now we try to double-claim + const tx2 = gns + .connect(curatorSigner) + .claimL1CuratorBalance(l1Subgraph.subgraphId, blockHeaderRLP, proofRLP) + await expect(tx2).revertedWith('ALREADY_CLAIMED') + }) + it('rejects calls with a proof from a different block', async function () { + const l1Subgraph = mainnetSubgraphWithProof + + // Now we pretend the L1 subgraph was locked and migrated at the specified block + await migrateMockSubgraphFromL1( + l1Subgraph.subgraphId, + l1Subgraph.curatedTokens, + l1Subgraph.blockhash, + l1Subgraph.metadata, + l1Subgraph.nSignal, + ) + + // We need L2GNS to think the mainnet GNS is its counterpart for the proof to be valid + await gns + .connect(governor.signer) + .setCounterpartGNSAddress(l1Subgraph.getProofResponse.address) + + const blockHeaderRLP = getBlockHeaderRLP(mainnetSubgraphBlockData) + const proofRLP = encodeMPTProofRLP(mainnetProofForDifferentBlock) + + const curatorSigner = await impersonateAccount(l1Subgraph.curator) + await setAccountBalance(l1Subgraph.curator, parseEther('1000')) + const tx = gns + .connect(curatorSigner) + .claimL1CuratorBalance(l1Subgraph.subgraphId, blockHeaderRLP, proofRLP) + // The root hash from the block header won't match the root hash from the proof + await expect(tx).revertedWith('MPT: invalid root hash') }) - it('adds the balance to any existing balance for the curator') - it('rejects calls with an invalid proof') - it('rejects calls for a subgraph that was not migrated') - it('rejects calls if the balance was already claimed') - it('rejects calls with proof from a different curator') - it('rejects calls with proof from a different contract') - it('rejects calls with a proof from a different block') }) describe('claiming a curator balance with a message from L1', function () { it('assigns a curator balance to a beneficiary') diff --git a/test/lib/mptProofUtils.ts b/test/lib/mptProofUtils.ts new file mode 100644 index 000000000..5fe76abb2 --- /dev/null +++ b/test/lib/mptProofUtils.ts @@ -0,0 +1,79 @@ +import { hexlify, hexZeroPad, RLP } from 'ethers/lib/utils' + +const BLOCK_HEADER_FIELDS = [ + 'parentHash', + 'sha3Uncles', + 'miner', + 'stateRoot', + 'transactionsRoot', + 'receiptsRoot', + 'logsBloom', + 'difficulty', + 'number', + 'gasLimit', + 'gasUsed', + 'timestamp', + 'extraData', + 'mixHash', + 'nonce', + 'baseFeePerGas', +] + +// Expected to come from an eth_getBlockByNumber call +interface GetBlockResponse { + parentHash: string + sha3Uncles: string + miner: string + stateRoot: string + transactionsRoot: string + receiptsRoot: string + logsBloom: string + difficulty: string + number: string + gasLimit: string + gasUsed: string + timestamp: string + extraData: string + mixHash: string + nonce: string + baseFeePerGas: string +} + +interface SlotProof { + key: string + proof: Array + value: string +} +interface GetProofResponse { + accountProof: Array + address: string + balance: string + codeHash: string + nonce: string + storageHash: string + storageProof: Array +} + +const toNonzeroEvenLengthHex = (hex: string): string => { + if (hex == '0x0') { + return '0x' + } else if (hex.length % 2 == 0) { + return hex + } else { + return hexZeroPad(hex, Math.floor(hex.length / 2)) + } +} + +export const getBlockHeaderRLP = (block: GetBlockResponse): string => { + const header = BLOCK_HEADER_FIELDS.map((field) => hexlify(toNonzeroEvenLengthHex(block[field]))) + return RLP.encode(header) +} + +export const encodeMPTProofRLP = (proof: GetProofResponse): string => { + if (proof.storageProof.length !== 1) { + throw new Error('Expected exactly one storage slot proof') + } + const accountProof = proof.accountProof.map((node) => RLP.decode(hexlify(node))) + const storageProof = proof.storageProof[0].proof.map((node) => RLP.decode(hexlify(node))) + return RLP.encode([accountProof, storageProof]) +} From 2b0a27ef3f596bb5b487fe4b60c0a9c5128caa90 Mon Sep 17 00:00:00 2001 From: Pablo Carranza Velez Date: Fri, 4 Nov 2022 11:26:09 -0300 Subject: [PATCH 032/112] test: more tests for claiming L2 balances --- contracts/l2/discovery/L2GNS.sol | 1 + test/l2/l2GNS.test.ts | 133 +++++++++++++++++++++++++++++-- 2 files changed, 126 insertions(+), 8 deletions(-) diff --git a/contracts/l2/discovery/L2GNS.sol b/contracts/l2/discovery/L2GNS.sol index 64b8371ce..173a60b84 100644 --- a/contracts/l2/discovery/L2GNS.sol +++ b/contracts/l2/discovery/L2GNS.sol @@ -259,6 +259,7 @@ contract L2GNS is GNS, IL2GNS { _balance ); migratedData.curatorBalanceClaimed[_curator] = true; + emit CuratorBalanceClaimed(_subgraphID, _curator, _beneficiary, _balance); } // TODO add NatSpec diff --git a/test/l2/l2GNS.test.ts b/test/l2/l2GNS.test.ts index 7e6f5e6f9..c86631346 100644 --- a/test/l2/l2GNS.test.ts +++ b/test/l2/l2GNS.test.ts @@ -171,10 +171,8 @@ describe('L2GNS', () => { ): Promise { const mockL1GatewayL2Alias = await getL2SignerFromL1(mockL1Gateway.address) // Eth for gas: - await me.signer.sendTransaction({ - to: await mockL1GatewayL2Alias.getAddress(), - value: parseUnits('1', 'ether'), - }) + await setAccountBalance(await mockL1GatewayL2Alias.getAddress(), parseEther('1')) + const tx = l2GraphTokenGateway .connect(mockL1GatewayL2Alias) .finalizeInboundTransfer(mockL1GRT.address, from, to, amount, callhookData) @@ -690,11 +688,130 @@ describe('L2GNS', () => { await expect(tx).revertedWith('MPT: invalid root hash') }) }) - describe('claiming a curator balance with a message from L1', function () { - it('assigns a curator balance to a beneficiary') - it('adds the balance to any existing balance for the beneficiary') - it('can only be called from the gateway') + describe('claiming a curator balance for a legacy subgraph using a proof', function () { + it('verifies a proof and assigns a curator balance') + it('adds the balance to any existing balance for the curator') + it('rejects calls with an invalid proof (e.g. from a different L1GNS address)') + it('rejects calls with an invalid proof (e.g. from a different curator)') it('rejects calls for a subgraph that was not migrated') it('rejects calls if the balance was already claimed') + it('rejects calls with a proof from a different block') + }) + describe('claiming a curator balance with a message from L1', function () { + it('assigns a curator balance to a beneficiary', async function () { + const mockL1GNSL2Alias = await getL2SignerFromL1(mockL1GNS.address) + // Eth for gas: + await setAccountBalance(await mockL1GNSL2Alias.getAddress(), parseEther('1')) + + const { l1SubgraphId, curatedTokens, lockBlockhash, metadata, nSignal } = + await defaultL1SubgraphParams() + await migrateMockSubgraphFromL1(l1SubgraphId, curatedTokens, lockBlockhash, metadata, nSignal) + + const tx = gns + .connect(mockL1GNSL2Alias) + .claimL1CuratorBalanceToBeneficiary(l1SubgraphId, me.address, toGRT('10'), other.address) + await expect(tx) + .emit(gns, 'CuratorBalanceClaimed') + .withArgs(l1SubgraphId, me.address, other.address, toGRT('10')) + const l1CuratorBalance = await gns.getCuratorSignal(l1SubgraphId, me.address) + const l2CuratorBalance = await gns.getCuratorSignal(l1SubgraphId, other.address) + expect(l1CuratorBalance).eq(0) + expect(l2CuratorBalance).eq(toGRT('10')) + }) + it('adds the balance to any existing balance for the beneficiary', async function () { + const mockL1GNSL2Alias = await getL2SignerFromL1(mockL1GNS.address) + // Eth for gas: + await setAccountBalance(await mockL1GNSL2Alias.getAddress(), parseEther('1')) + + const { l1SubgraphId, curatedTokens, lockBlockhash, metadata, nSignal } = + await defaultL1SubgraphParams() + await migrateMockSubgraphFromL1(l1SubgraphId, curatedTokens, lockBlockhash, metadata, nSignal) + + await grt.connect(governor.signer).mint(other.address, toGRT('10')) + await grt.connect(other.signer).approve(gns.address, toGRT('10')) + await gns.connect(other.signer).mintSignal(l1SubgraphId, toGRT('10'), toBN(0)) + const prevSignal = await gns.getCuratorSignal(l1SubgraphId, other.address) + + const tx = gns + .connect(mockL1GNSL2Alias) + .claimL1CuratorBalanceToBeneficiary(l1SubgraphId, me.address, toGRT('10'), other.address) + await expect(tx) + .emit(gns, 'CuratorBalanceClaimed') + .withArgs(l1SubgraphId, me.address, other.address, toGRT('10')) + const l1CuratorBalance = await gns.getCuratorSignal(l1SubgraphId, me.address) + const l2CuratorBalance = await gns.getCuratorSignal(l1SubgraphId, other.address) + expect(l1CuratorBalance).eq(0) + expect(l2CuratorBalance).eq(prevSignal.add(toGRT('10'))) + }) + it('can only be called from the counterpart GNS L2 alias', async function () { + const { l1SubgraphId, curatedTokens, lockBlockhash, metadata, nSignal } = + await defaultL1SubgraphParams() + await migrateMockSubgraphFromL1(l1SubgraphId, curatedTokens, lockBlockhash, metadata, nSignal) + + const tx = gns + .connect(governor.signer) + .claimL1CuratorBalanceToBeneficiary(l1SubgraphId, me.address, toGRT('10'), other.address) + await expect(tx).revertedWith('ONLY_COUNTERPART_GNS') + + const tx2 = gns + .connect(me.signer) + .claimL1CuratorBalanceToBeneficiary(l1SubgraphId, me.address, toGRT('10'), other.address) + await expect(tx2).revertedWith('ONLY_COUNTERPART_GNS') + + const tx3 = gns + .connect(mockL1GNS.signer) + .claimL1CuratorBalanceToBeneficiary(l1SubgraphId, me.address, toGRT('10'), other.address) + await expect(tx3).revertedWith('ONLY_COUNTERPART_GNS') + }) + it('rejects calls for a subgraph that does not exist', async function () { + const mockL1GNSL2Alias = await getL2SignerFromL1(mockL1GNS.address) + // Eth for gas: + await setAccountBalance(await mockL1GNSL2Alias.getAddress(), parseEther('1')) + + const { l1SubgraphId } = await defaultL1SubgraphParams() + + const tx = gns + .connect(mockL1GNSL2Alias) + .claimL1CuratorBalanceToBeneficiary(l1SubgraphId, me.address, toGRT('10'), other.address) + await expect(tx).revertedWith('!MIGRATED') + }) + it('rejects calls for an L2-native subgraph', async function () { + const mockL1GNSL2Alias = await getL2SignerFromL1(mockL1GNS.address) + // Eth for gas: + await setAccountBalance(await mockL1GNSL2Alias.getAddress(), parseEther('1')) + + const l2Subgraph = await publishNewSubgraph(me, newSubgraph0, gns) + + const tx = gns + .connect(mockL1GNSL2Alias) + .claimL1CuratorBalanceToBeneficiary(l2Subgraph.id!, me.address, toGRT('10'), other.address) + await expect(tx).revertedWith('!MIGRATED') + }) + it('rejects calls if the balance was already claimed', async function () { + const mockL1GNSL2Alias = await getL2SignerFromL1(mockL1GNS.address) + // Eth for gas: + await setAccountBalance(await mockL1GNSL2Alias.getAddress(), parseEther('1')) + + const { l1SubgraphId, curatedTokens, lockBlockhash, metadata, nSignal } = + await defaultL1SubgraphParams() + await migrateMockSubgraphFromL1(l1SubgraphId, curatedTokens, lockBlockhash, metadata, nSignal) + + const tx = gns + .connect(mockL1GNSL2Alias) + .claimL1CuratorBalanceToBeneficiary(l1SubgraphId, me.address, toGRT('10'), other.address) + await expect(tx) + .emit(gns, 'CuratorBalanceClaimed') + .withArgs(l1SubgraphId, me.address, other.address, toGRT('10')) + const l1CuratorBalance = await gns.getCuratorSignal(l1SubgraphId, me.address) + const l2CuratorBalance = await gns.getCuratorSignal(l1SubgraphId, other.address) + expect(l1CuratorBalance).eq(0) + expect(l2CuratorBalance).eq(toGRT('10')) + + // Now trying again should revert + const tx2 = gns + .connect(mockL1GNSL2Alias) + .claimL1CuratorBalanceToBeneficiary(l1SubgraphId, me.address, toGRT('10'), other.address) + await expect(tx2).revertedWith('ALREADY_CLAIMED') + }) }) }) From 699a247adeb250d6617c62731201439de8688386 Mon Sep 17 00:00:00 2001 From: Pablo Carranza Velez Date: Fri, 4 Nov 2022 12:29:52 -0300 Subject: [PATCH 033/112] test: add tests for claiming from legacy subgraphs --- test/l2/l2GNS.test.ts | 337 +++++++++++++++++++++++++++++++++++++++++- 1 file changed, 330 insertions(+), 7 deletions(-) diff --git a/test/l2/l2GNS.test.ts b/test/l2/l2GNS.test.ts index c86631346..56975af2b 100644 --- a/test/l2/l2GNS.test.ts +++ b/test/l2/l2GNS.test.ts @@ -86,6 +86,51 @@ const mainnetSubgraphWithProof = { }, } +// Same but using the following slot for the getProof call: +// getLegacyCuratorSlot(curator, account, accountSeqId) => '0xbda2ea2df35ed9dad1726e4b7b20512302d0f12693c5cf63a4d778d0945b456b' +const mainnetLegacySubgraphWithProof = { + subgraphId: '0xb3424eb47c56b1cd4e82ab42f8a614d7fdc97c88a6887e0b51998968da8bca12', + account: '0x9EfbEA665B79F366fCBB390a55C617257E0C678c', + accountSeqId: 0, + curator: '0x9EfbEA665B79F366fCBB390a55C617257E0C678c', + blockhash: '0x82e59e8ef5e6c4352d363fc5b6ea64d6f605d47ff0c454ea1133be6bacaff487', + blockNumber: 15884906, + nSignal: BigNumber.from('409307499657003028320'), + curatedTokens: BigNumber.from('163746633794032920672522'), + metadata: '0xe559b3bce99b7e621504b23bb804a95daa48e2a8aacb7b836d64626d63b2b5c2', // Obtained with a SubgraphMetadataUpdated event filter + getProofResponse: { + accountProof: [ + '0xf90211a08a9701cbb65b3ebd5ffd5d0c4e959a01f0f5777b60a7d3069d560aae9ced519fa05c14f1e3eb1aa27b98c5421813cd0a2ccd607f338aa5c6e51b01b5bbae9b7a22a0a8ef688324a1830e5052802e44e76122378468f08085b74584aab3dd7d655dfca0460ef2adac161e0a86112a2a9246e1d36e8006f344c146b211ec6985f371282fa077fee3062bfd699d695542b880f7cdf1f469500b2b6385cf8fe266bcb619f16ca0799795d800b383e54b1b70b89a462510a26f702e55d6e234ae599885cba183a4a0c21957e0a6895f39ee67c0db2bb2eb732b821fe034549d0f7e68db05fb434db4a0a71cd96e8ef9233fbe6ec72dae6208e06875bc3a2d7aeffc5a68e65a3edd353ca0549db853704cb95f28e3081c3ea5ea9953d6716e5ed1e85f1f07ca06cf3562cca0eb12b05a20566fdc91ff6e87344cb27a7739e2869978592081b3ee5da20e2a72a05cf1f39fc25860045fc1d5b12645d47eda0988b2f847d26bb871dd98f25ef608a05f56eb881b3957f3b0d27463f5db8dc0aa467fcc07420b38e7824e779099c78aa0167782c6e8c2a5c63f823f9a80749dc42807677cdf1baa489b6b3fd29913f66ea092c32da10ee6754d7864639ddd7bc849029bb789a0ac60624c06d54c0c4dea2da04753ee0c68d9ea737a61737780889d3c70853b02c42bdce010141e8974865049a06c66113c6c605086e103ec918a6ac51c0807f1475a8947174c4e7ca0b77d1ab980', + '0xf90211a092b4f87a7a56eb1b0cc4e37b1a470983de47b6e59bb9f001713eceeaf1e1b778a0570de7dce4feeb8714bfb203a85b6baaa6e828e4de6cef1b03a2214982523c1ea01366fb14fa2dcc99de2a1a32454d26a1f36c4d16d07dd693e33f7a5227dfd260a0aa87fd12b8b39ec060335012e43f95fb6c3eac97557d7ca8e75219be8f3b7da8a02dd06fd857e864e4f451c07c1b8fcbd01826ae296a943bcd1754baf28dfe1fc1a0844c26cacd9dda7a88d11c2fbc60773c7f6260df5c6cfba0204e666ea0dee13ba03bae90508ad2ca51f8e41ae91a7efdef4eb1894e7aa52b2e6d55b36e3621e484a00e85200c5a56f6a221eb10c4497b4a8dcdaf143fc02c84511d99eb51e1714bfca0dcd8e4198135ff184e437dc7f7be85f77c0b22cd5e2a682bea72d34b1732dba5a01d3f9883287cfbf33961c4700b91d31a5c103246302422f7f670ffcdd0d6da9aa02cb5f762b4718da65563d25a86934ef30127b07980013973942ace532d4693fba056bd9dbc1eeedb8dd7f1bc7b6750a58d50ade9ebc4ea1e448f74d0d28c998190a07125ff6fbc2aa718ee13fa1e18e96df6e1e08e6308b41ace8ce2bfd8a76f5ccaa036328b9158819bc7538f16b3915e58c4b188a6c6022715d164a815715b7e3e83a0a60be8f4456b0fad56abe9e9e34b08a5e6aba3363fb7861a69ac2059503f452ba0da1999c819fd92e96c21aec4206d3b4dd7c3ac322c233a237e2be6837ab377b680', + '0xf90211a0a4ec77fb4bb0a98e8ad69a419e3b0d1250a9609955a6c9bf537ba99e0f20a691a06be377d2802e354d166a7352df70b7912452edc1abeb4b1d4c42273a43a901cda06cc656bcb5ed234290549f9fc0cf2ec31f8ab58d3366b0a55272d4b963d57e98a07af81904e659c472a5aecfbab5b1368504fd8686d6c407af0e4e6a4027cb4374a0f66d3d2df212e13913b17f9f744248253843a5106ac91a9a4ece07576e12cc76a02765d2d176513a83f8ce5b91289571ac61dc0b6af1fbca8de8f737f7c14cf2a9a05774d994c9f98969ed39fbc775e8afd7432148bb46e9fc9b2eb085a4f8737ac3a0d122da0dc7a5a62c1d1708e558b396d38630c1168729f82020dcc9fd1e44448da0b17ed04570d4f4da14053fb9384c7edc8f20c11e76c6fdf3364947005a1608ada0deca116b59ebfa7cd4fb5d869212a7c92af35a3b8ee077a23eb17e37fe98ca40a01209069e0803e14a97d9ca11e34179b8857469ddbd6c6703ba33ab6ade014ef6a004f174729c89807aabd2850d35ed48f594875de96d1f89d93249aa0728c5840aa04dd240d8db8127a59db6131e6d32053fbc1884a5a0438edac929d7838a7053dba0bedb75f907bb25814a45ef07364882910e9730ab535cfadf8278d66c0ed17afaa07c4367a2c963808f0722fe007587fd2031b369198ee0794a29a7938f62eac828a039523e340a8c2968ba22b611a694694d467bfc8e7f8a467cef610cc2e8774be980', + '0xf90211a07238565a4a96d9c37896f8f48b8daca4e74ea1d4b767d5476a1ca945fe8d9736a0751c83fcffa8f0747cbadb4425e2d83e7c181ba5ba19a6df60931a63546e87aca0f7d9281e8e6c375deea49b98f55f5eb08a9511412e381d7bd96a25a7fbc9ca86a0d7373d9df46a011025971a3be7884a179e5af6fe90868d4105404c06a5c2f908a03c8830d58461246211f9b13dd0afd3ac34e1dac1e55329785e79c1ae14845b6ca06f7454b021f29191f006457aecf4e4695dbd652a4443162cf69cde1845b85df6a08c334bff53b2ba1e8df6f6aee68045ab8ee9f02b38f9766b97de48dcc02edcaea061db2c2f8b55ac092b1e3eba4a1e82f677fa52e4f4095d3dba831cb89f0306c3a04293fdf7986e8a464cf5a976b6ddff82ded83f28eef942ff1d8418d2799b06bfa07505f623087c999f63b8b2407853438ea3f747c4103bacc5fc6c62b330314624a0a2b540fa6b0564f959d8ccdba3659a59a00494fdf9cd1d9f4ea9efbe78227f70a0f9cc8d6b4cf4cb3178733e1daf8dd4e86e8c65d5e153cdae77542fcabdfd75fca0beebf7560922a87838e1c2119dd5f11a23b2f9f492d3d34d6faa8f2052a64722a069a3753b6b036c372444940038e387a6d3f77383cb48a302d0d8742a607652b7a02a1ddc02796d842608f4a372f8cb3beb90996acf8288bbb22d50331b56979c5fa0a0a548553326e53e260ce87c4b0c8271724aacd0115b3d0d28ce43ca208883e380', + '0xf90211a0e7efc1ad587fb9ecc0c343d94c894146f9ac499ad3b250368c11d6f531354b8fa07237f64ded7d0941d59656e5b590d3e6fc61093cc1740ad209dd300ee9f0ca12a042ac0a64ac87b16ec296edb580ce0910083690d9d1ace367369351a6fbfe0882a05533447ef90d3623bceccef86860a029ea394aa8783ee6cf3e982bd47ff12c03a0df248d8095d09d69e25381eb1ee6a90407fba3fe1baae6fbd56c2660986573bfa0622e8063b57c51b19747bc851ae0d828d1cde0bbf46f8a5180102dd94459c802a0e800b6c40184f7b7fa683ae191bb4aac1ce585bb6791b99eb4244e351d02f1cba04df04e181c844dd951cb08153bbf92c456bdbc68891bee2b5699f7dfb55b90a7a0833a530c25ed992d20626c55af19c9abe4d1c7a07d5a058dde29907fe65fbcd1a0e133c4cd151948b47d986b93c3572b04098c5da3435c27a9c847c7d5f990bc9ea0f3d3855ffbcc3c26adbeb526fae48536f4dbc39b9bf24f7a17b76335f6b000eea0c7a4d3135faba63cd89f64b0fabf4d726f0543fa347e8cf44db30bfe6ea9e11da0c2e15f8f776d1e3d9cfd29ef9b1e1c5ee5d6334152f587d72ecb9eed5fc3193ea0481f3b80d234d30cd1294075e557549e908d8152903e7f65382a68fd4aa1c683a0a9ba4206ef4055b28d1126bd21afd4ab26898267d7334191a6cc7f8b07a54122a0715b72d6ed83a6da4e9d376f86690caf329adbc5dcda4cfd0839e3f02066e20a80', + '0xf90211a00cad8552ddac3a1aa1c598c4d43a80d5a6cac7e58b543c86d5920a78d5b0f0dea0aa5f5aa9836447977b447ef698df483b8e458106b3e64a87005300bf2008562ea0c5925754c6c72a7b07512ee07acdae077ee70e9d3ab04065360fdc4bebdb155fa045f1e4df1025988aa9d0ce23c03f4b366a99286de59d82f1eafdf9a3890905a3a07c86218196a9dea70252b56ee769c10514bbdf33aebcd41fc4392af63febd239a08e202445f7c2fa69da1f1492a1b0e46d8b66b0b7024c7cff23ed5c07191da66fa0b3c179e3f3b9b216e4b35174e4e4d119526af446fdf757ad95e02e49cac28565a0fd74d0a8922342560f6dd820cfa373ec7353c6c66d74bd43351ebb7d103d5ceaa04a8689c3cb5396ee5a99469957f1f0670b0024b2ea3b75e0455797a5175c72a3a085270faec5854bff806bb9951261092745f657e062ae1499d3c5fde81fe14713a07dd8daf759fa359c36c7afc9f7963a557088f5483a8c5d7a0866237fb5f055c5a0d3ec4525a4f0d209a566b07f46a91c609b9c7acbc427db1390485cf4b5105557a005983a192b1f780b095661d92ef4d4102ffd03aad9adb6f3084ba26a11cd0daaa0afd710661f91421da1ece5ea87acc4f76e8af3dad5fa14f0a4ba1ac1a7276449a0ba0374b7981b92f55525b830723b32dce4ebd3c6a13fd06f61b465728ca077c7a0349075b6ff5265073d6ec6676f9b82991159e0bd8170596bcd80573f95576b7380', + '0xf90131a000e3833f5535c6eae67533a61520c8a99ec1d617d8230498ea57aaac1080ebf880a0432d16911e0f89bb5b6faff16255b203ee2e80db68098f75aee4673d327346b680a04911cdce5361377651739ba44d7f0dcb98e7d22c18f51c955480fcfb5e59abd580a09dec563e0a5682d43213c9a511e954705231ebaee0c72f0aa4f95792823ca0e280a01560fe4a9d9af402122701cccc9d3a13f77747b965d5efe09d0dfce95f807dcca08b5cd207548549e40fd1658e38b5b4227f7f03d8dd112541461c50f3c3ff38a180a0fbf6596703d7037eb2cc332d54fdfcda8e95c23e7478cfe31f6c1da43e7222f78080a0a67c5dda3bd39b79b00911abebf9c976950393b186cb5377ea09536dc48a1ff7a016a9123689ca894c201645726ead95406839cf2f8004461c0bd529321165857180', + '0xf851808080808080808080a0600efc8e5996c533afd640c3448c198e1101fa32e5bd246f71dd99c7201575308080808080a0a489e21458e112f8f8336e3e90ce8668b0a07bfe7921696a3f0feb657d05a50a80', + '0xf8669d2004b4599193722f03c0e529c8aab049a7fe5ed19ea9c3fed8c9365470b846f8440180a0384c27b2da88cde93261056c98ced4e09bba7ba17ecbd2c37e9c2cf26f836a22a0db307489fd9a4a438b5b48909e12020b209280ad777561c0a7451655db097e75', + ], + address: '0xadca0dd4729c8ba3acf3e99f3a9f471ef37b6825', + balance: '0x0', + codeHash: '0xdb307489fd9a4a438b5b48909e12020b209280ad777561c0a7451655db097e75', + nonce: '0x1', + storageHash: '0x384c27b2da88cde93261056c98ced4e09bba7ba17ecbd2c37e9c2cf26f836a22', + storageProof: [ + { + key: '0xbda2ea2df35ed9dad1726e4b7b20512302d0f12693c5cf63a4d778d0945b456b', + proof: [ + '0xf90211a0a718fd4452e43b9e3d1e25974976f536a603dde7c12e51d8189b4e3ea6c8dd6aa0a71f668d3dba2a9f242174738ff3596c68a84eb9088fffb307f48e061fbdc667a0a89dbcb1109a64587fdcde7b4268af231c5f0d27e1b25062c6c0bf7b48124d67a0bedf16b76516325a66ac35545179a8dd15ee1c6cd11b2e4357d533b19acb4b26a08b9b03cc165363ebc8f9f0590e76f98fc8502810e4ea87700f41f75a7f6692d8a037444b4dc0ef44f017449fe3b9ce45d9193edbf5c88b6e7bc22884424bf10373a0ff5c4bbed0973d8a097d7d8aa9d4534945aeb03a5785ada86b3a0ae079318894a0711fe60589286b4c83daf48cfba53e3242360c18b59ff7d93c72ffc766ed0428a08ae789ec3e7cce80fafd53e3f0c36744e15d1b0f293f93f691e451faa76b9327a0ca40f7477aca5208d28a6f9a00e6f6ad4fc49ebf83f9344443f004ba2d26a8aaa0958fd01948214784c18bdca21ef8419f04e108ea09f06eaea285f64812b98bada0458b092fc9ba5453463ca558487c118d5f0493aa98c1eb8306722c6fdabc2c7fa02c7c57f079bd040ff813a0a74ac9e46beadd2960eb33a6cd311c6aef4514592da0c785693d9760e93b431bf4b1d5933373a2ef1fe20599a38f3ce7c9643c2e9f23a0bdbe251449087722a740e7bdc0801bf55f3849e23e63d9dda2a8409d5163cd01a03dcac75caeb76acf717184167b6b490a6b96b2f0024daaf13dd8390b5a7c1baf80', + '0xf90211a004e45dbe38ce18e4066d8ef3f3601a60e99bfe8108bdd33fc2a4412a871a92f2a0009a82307cb4be4409ea695c2a2615dcea6ac78723a41b3c66aaa582ec96c436a0cd3d3c05eaf081845f8d7129f3b95f2426d55049a339bc2cf7dd0c5e0440fcefa0dc43a9e95d04857aec0f278c210c323e2fa55b481e60e1c347d22a7f2e9cc044a0303cab7a9dae9b7e0306c3ac2adc6cca1af8319d4c70994ace25ae651121dcd6a092b1582b96d9f7201a6d77b520381d08dab1884b144ea6fa06a3c2c652e88fd7a0833bcc144833b489d85b9118dec149116ed0142d2b0ad6019ea93c58a523d4cea0d16f7a3d5e5a261eee467c374c4d59bedac32173d3cbc679c6cf6036cb4c67f9a01dc6c5e592451fccb000cbd36a4f111f0a6cd8af2ba300754d5ef34d9be9b9f3a05f5066b1000280d7d72fb9dc1dd388c87bfef059f4777580f33b13cb9a96e65ca0d2eb2645cc5c78883c221f844180b95fb888b4b56345b9fb9b9e2e3e9870d21fa0da3872e955330bcbbc3d89ba192b3e9a114039bd03dd2ce287a37b6e8119b83ea06b130d034f5ec06edefe0f5e75a03f3be8eb655e13d9f79918c19dc0661e29b6a026b2e7a3028a503b128b5eabe667b0b90add26dc0380525f25517195049971aca0be5369d1ed4a91ea36f43e9d9c26aa4e2f4f0f5755c481a9d57026147889c105a0ccd290357453ed6b5108b42ba17821172fcdd1fd985b0a20a72547222fec74ec80', + '0xf901b1a0a09eaceab2d25ef5a6488f0811ba68e754636aea2efe87d46bad185ee5cc353e80a00a13901525f5047839e8ccb6ef391d4d1ec24b5e5bcd4c428b5406116c67b21da05232c3a156b161974b4d4914fecd4962fd09d9c7059ca022ea0332f496f40bcda06e95132ede276dc9d77eea4ec90e7cedcd3343a96baa92d424da4559cb9542f6a068f6197670729e75f09f720167c9f5d15e09bff58c923e6eeef5e54d308cd299a090a495dd586ad6ce8922a7590183f6944d55082cfacfafbfd66935994878c609a01afc7d6bd7ed70e71a927fede6491cd04092c6bfc161bf1c954f73b921fd9de88080a0047707cacaff095fa4b8bfb01f79979023d27fa0510b23003cc4656bb5c771d1a0534d763003352d793214b28f3a07d0bf6be7a7f02d7411086d69a2d89680140ba0d19703221c00ad02c77ac9d97f56994a86831bb12a3921a189c287fef3e2275ca036f8494a2dacb798104d69e255203ff27aa5d767664a8cefc5eccb77886077f2a0d4687c24a348d07ea079c536c8c2f584eb518be959cfecbe6f480d0b7e735326a02093357f40f0bad6b00bf8f0282bfd34a0dd010b6dc52bfc9e1355992c3b1cd280', + '0xf851a06786edcc9c67f1504119c4d0e36ae6f9127266389d0ef5ee4afd4b5e50bf3f4a80808080808080808080a0d91ee0a4174d652a7c3c2a2db3d7e968cc3a84f5919e6b7af4fdd4d5a1acbebc8080808080', + '0xeb9f20ac8d80e0fe0db54a620deaa06305b68851845a4da688f862fb5fea1692a58a890876ab3ecd9b0cc6ca', + ], + value: '0x876ab3ecd9b0cc6ca', + }, + ], + }, +} + const mainnetProofForDifferentBlock = { accountProof: [ '0xf90211a008ba162be4a831acbdfe628aa1867ea899e724b78570d2e2e6a3389c4f51e7aaa0901aa8bef1925917994c6abcb439808bbfae39aae8623b255c3529a898c14e5ca05b3ff03602e8561e2f4fdaccf0daff0afd6c59dd6314a7d5754a8d3658f48864a06ea25db38ef4149ea9716d73996cc67806a9db5a244fbaedb353388b39cd31bfa0bfef765e7fe1f80cc810235ac303c4490fed198b7b7fff3923d1d0004d98a840a0e00f852dd111d919df6f03fa88815797b13909ead7175f731e8f58f8756c0105a0aafce80dc97c6059a771e475e4076e6abd5c447f7e04104fc9d0d6a6dfd0932da0e6b2f28ff41158e14d6b410e99511f6f7554e74f7762629dfb4ad179714b5ac7a0e83694d3f79b52db460b9cf1aba33cc008cd1e12de9bedb08308c260250555f4a0c9436bde76cf5e9712b2d9e03d394e9f779ab45b0f771c79f087d6b289887adca0bf80398498ecfbd534a5771cfc1f79ae5d494aab3faa83b4b7d5858ff0e58580a095118ba475cfd1c776b02ac34716a9bc1d52a00c56721d4ba725d3f876f5f315a0f0ea8039d2ccf1651fb7eb75134367d1ab2f1849b9ac702a924642a230c5bb51a038aaf7f55c78bb4933bd6cfb030e274a459e1fda0431d142505a4e6f6e3a5123a009c2d3201fd7d93a686677076fa92557a47c35bad888d856d9d7849a8ea01b61a0c10c88e88b8d77bcaa5e8f183fb0558ca98a38cebb60184c48645ddd4b38092c80', @@ -687,15 +732,293 @@ describe('L2GNS', () => { // The root hash from the block header won't match the root hash from the proof await expect(tx).revertedWith('MPT: invalid root hash') }) + it('rejects calls with a proof from a legacy subgraph', async function () { + const l1Subgraph = mainnetLegacySubgraphWithProof + + // Now we pretend the L1 subgraph was locked and migrated at the specified block + await migrateMockSubgraphFromL1( + l1Subgraph.subgraphId, + l1Subgraph.curatedTokens, + l1Subgraph.blockhash, + l1Subgraph.metadata, + l1Subgraph.nSignal, + ) + + // We need L2GNS to think the mainnet GNS is its counterpart for the proof to be valid + await gns + .connect(governor.signer) + .setCounterpartGNSAddress(l1Subgraph.getProofResponse.address) + + const blockHeaderRLP = getBlockHeaderRLP(mainnetSubgraphBlockData) + const proofRLP = encodeMPTProofRLP(l1Subgraph.getProofResponse) + + const curatorSigner = await impersonateAccount(l1Subgraph.curator) + await setAccountBalance(l1Subgraph.curator, parseEther('1000')) + const tx = gns + .connect(curatorSigner) + .claimL1CuratorBalance(l1Subgraph.subgraphId, blockHeaderRLP, proofRLP) + + await expect(tx).revertedWith('MPT: invalid node hash') + }) }) describe('claiming a curator balance for a legacy subgraph using a proof', function () { - it('verifies a proof and assigns a curator balance') - it('adds the balance to any existing balance for the curator') - it('rejects calls with an invalid proof (e.g. from a different L1GNS address)') - it('rejects calls with an invalid proof (e.g. from a different curator)') - it('rejects calls for a subgraph that was not migrated') - it('rejects calls if the balance was already claimed') - it('rejects calls with a proof from a different block') + it('verifies a proof and assigns a curator balance', async function () { + const l1Subgraph = mainnetLegacySubgraphWithProof + + // Now we pretend the L1 subgraph was locked and migrated at the specified block + await migrateMockSubgraphFromL1( + l1Subgraph.subgraphId, + l1Subgraph.curatedTokens, + l1Subgraph.blockhash, + l1Subgraph.metadata, + l1Subgraph.nSignal, + ) + + // We need L2GNS to think the mainnet GNS is its counterpart for the proof to be valid + await gns + .connect(governor.signer) + .setCounterpartGNSAddress(l1Subgraph.getProofResponse.address) + + const blockHeaderRLP = getBlockHeaderRLP(mainnetSubgraphBlockData) + const proofRLP = encodeMPTProofRLP(l1Subgraph.getProofResponse) + + const curatorSigner = await impersonateAccount(l1Subgraph.curator) + await setAccountBalance(l1Subgraph.curator, parseEther('1000')) + const tx = gns + .connect(curatorSigner) + .claimL1CuratorBalanceForLegacySubgraph( + l1Subgraph.account, + l1Subgraph.accountSeqId, + blockHeaderRLP, + proofRLP, + ) + await expect(tx) + .emit(gns, 'CuratorBalanceClaimed') + .withArgs( + l1Subgraph.subgraphId, + l1Subgraph.curator, + l1Subgraph.curator, + l1Subgraph.getProofResponse.storageProof[0].value, + ) + const curatorBalance = await gns.getCuratorSignal(l1Subgraph.subgraphId, l1Subgraph.curator) + expect(curatorBalance).eq(l1Subgraph.getProofResponse.storageProof[0].value) + }) + it('adds the balance to any existing balance for the curator', async function () { + const l1Subgraph = mainnetLegacySubgraphWithProof + + // Now we pretend the L1 subgraph was locked and migrated at the specified block + await migrateMockSubgraphFromL1( + l1Subgraph.subgraphId, + l1Subgraph.curatedTokens, + l1Subgraph.blockhash, + l1Subgraph.metadata, + l1Subgraph.nSignal, + ) + + // We need L2GNS to think the mainnet GNS is its counterpart for the proof to be valid + await gns + .connect(governor.signer) + .setCounterpartGNSAddress(l1Subgraph.getProofResponse.address) + + const blockHeaderRLP = getBlockHeaderRLP(mainnetSubgraphBlockData) + const proofRLP = encodeMPTProofRLP(l1Subgraph.getProofResponse) + + const curatorSigner = await impersonateAccount(l1Subgraph.curator) + await setAccountBalance(l1Subgraph.curator, parseEther('1000')) + + // We add some pre-existing balance on L2 to the curator: + await grt.connect(governor.signer).mint(l1Subgraph.curator, toGRT('100')) + await grt.connect(curatorSigner).approve(gns.address, toGRT('100')) + await gns.connect(curatorSigner).mintSignal(l1Subgraph.subgraphId, toGRT('100'), toBN('0')) + const prevSignal = await gns.getCuratorSignal(l1Subgraph.subgraphId, l1Subgraph.curator) + expect(prevSignal).not.eq(toBN(0)) + + const tx = gns + .connect(curatorSigner) + .claimL1CuratorBalanceForLegacySubgraph( + l1Subgraph.account, + l1Subgraph.accountSeqId, + blockHeaderRLP, + proofRLP, + ) + await expect(tx) + .emit(gns, 'CuratorBalanceClaimed') + .withArgs( + l1Subgraph.subgraphId, + l1Subgraph.curator, + l1Subgraph.curator, + l1Subgraph.getProofResponse.storageProof[0].value, + ) + const curatorBalance = await gns.getCuratorSignal(l1Subgraph.subgraphId, l1Subgraph.curator) + expect(curatorBalance).eq(prevSignal.add(l1Subgraph.getProofResponse.storageProof[0].value)) + }) + it('rejects calls with an invalid proof (e.g. from a different L1GNS address)', async function () { + const l1Subgraph = mainnetLegacySubgraphWithProof + + // Now we pretend the L1 subgraph was locked and migrated at the specified block + await migrateMockSubgraphFromL1( + l1Subgraph.subgraphId, + l1Subgraph.curatedTokens, + l1Subgraph.blockhash, + l1Subgraph.metadata, + l1Subgraph.nSignal, + ) + + // We haven't updated the L1 counterpart address, so GNS will not accept the account proof as valid + + const blockHeaderRLP = getBlockHeaderRLP(mainnetSubgraphBlockData) + const proofRLP = encodeMPTProofRLP(l1Subgraph.getProofResponse) + + const curatorSigner = await impersonateAccount(l1Subgraph.curator) + await setAccountBalance(l1Subgraph.curator, parseEther('1000')) + const tx = gns + .connect(curatorSigner) + .claimL1CuratorBalanceForLegacySubgraph( + l1Subgraph.account, + l1Subgraph.accountSeqId, + blockHeaderRLP, + proofRLP, + ) + // The key for the L1 counterpart is not present in the proof, + // so the verifier will not be able to find a node for the expected path + await expect(tx).revertedWith('MPT: invalid node hash') + }) + it('rejects calls with an invalid proof (e.g. from a different curator)', async function () { + const l1Subgraph = mainnetLegacySubgraphWithProof + + // Now we pretend the L1 subgraph was locked and migrated at the specified block + await migrateMockSubgraphFromL1( + l1Subgraph.subgraphId, + l1Subgraph.curatedTokens, + l1Subgraph.blockhash, + l1Subgraph.metadata, + l1Subgraph.nSignal, + ) + + // We need L2GNS to think the mainnet GNS is its counterpart for the proof to be valid + await gns + .connect(governor.signer) + .setCounterpartGNSAddress(l1Subgraph.getProofResponse.address) + + const blockHeaderRLP = getBlockHeaderRLP(mainnetSubgraphBlockData) + const proofRLP = encodeMPTProofRLP(l1Subgraph.getProofResponse) + + const tx = gns + .connect(me.signer) + .claimL1CuratorBalanceForLegacySubgraph( + l1Subgraph.account, + l1Subgraph.accountSeqId, + blockHeaderRLP, + proofRLP, + ) + // The curator slot we're looking for isn't present in the proof, + // so the verifier will fail when looking for it + await expect(tx).revertedWith('MPT: invalid node hash') + }) + it('rejects calls for a subgraph that was not migrated', async function () { + const l1Subgraph = mainnetLegacySubgraphWithProof + const l2Subgraph = await publishNewSubgraph(me, newSubgraph0, gns) + + // We need L2GNS to think the mainnet GNS is its counterpart for the proof to be valid + await gns + .connect(governor.signer) + .setCounterpartGNSAddress(l1Subgraph.getProofResponse.address) + + const blockHeaderRLP = getBlockHeaderRLP(mainnetSubgraphBlockData) + const proofRLP = encodeMPTProofRLP(l1Subgraph.getProofResponse) + + const tx = gns + .connect(me.signer) + .claimL1CuratorBalanceForLegacySubgraph(me.address, toBN(0), blockHeaderRLP, proofRLP) + await expect(tx).revertedWith('!MIGRATED') + }) + it('rejects calls if the balance was already claimed', async function () { + const l1Subgraph = mainnetLegacySubgraphWithProof + + // Now we pretend the L1 subgraph was locked and migrated at the specified block + await migrateMockSubgraphFromL1( + l1Subgraph.subgraphId, + l1Subgraph.curatedTokens, + l1Subgraph.blockhash, + l1Subgraph.metadata, + l1Subgraph.nSignal, + ) + + // We need L2GNS to think the mainnet GNS is its counterpart for the proof to be valid + await gns + .connect(governor.signer) + .setCounterpartGNSAddress(l1Subgraph.getProofResponse.address) + + const blockHeaderRLP = getBlockHeaderRLP(mainnetSubgraphBlockData) + const proofRLP = encodeMPTProofRLP(l1Subgraph.getProofResponse) + + const curatorSigner = await impersonateAccount(l1Subgraph.curator) + await setAccountBalance(l1Subgraph.curator, parseEther('1000')) + const tx = gns + .connect(curatorSigner) + .claimL1CuratorBalanceForLegacySubgraph( + l1Subgraph.account, + l1Subgraph.accountSeqId, + blockHeaderRLP, + proofRLP, + ) + await expect(tx) + .emit(gns, 'CuratorBalanceClaimed') + .withArgs( + l1Subgraph.subgraphId, + l1Subgraph.curator, + l1Subgraph.curator, + l1Subgraph.getProofResponse.storageProof[0].value, + ) + const curatorBalance = await gns.getCuratorSignal(l1Subgraph.subgraphId, l1Subgraph.curator) + expect(curatorBalance).eq(l1Subgraph.getProofResponse.storageProof[0].value) + + // Now we try to double-claim + const tx2 = gns + .connect(curatorSigner) + .claimL1CuratorBalanceForLegacySubgraph( + l1Subgraph.account, + l1Subgraph.accountSeqId, + blockHeaderRLP, + proofRLP, + ) + await expect(tx2).revertedWith('ALREADY_CLAIMED') + }) + it('rejects calls with a proof from a non-legacy subgraph', async function () { + const l1Subgraph = mainnetSubgraphWithProof + + // Now we pretend the L1 subgraph was locked and migrated at the specified block + await migrateMockSubgraphFromL1( + l1Subgraph.subgraphId, + l1Subgraph.curatedTokens, + l1Subgraph.blockhash, + l1Subgraph.metadata, + l1Subgraph.nSignal, + ) + + // We need L2GNS to think the mainnet GNS is its counterpart for the proof to be valid + await gns + .connect(governor.signer) + .setCounterpartGNSAddress(l1Subgraph.getProofResponse.address) + + const blockHeaderRLP = getBlockHeaderRLP(mainnetSubgraphBlockData) + const proofRLP = encodeMPTProofRLP(l1Subgraph.getProofResponse) + + const curatorSigner = await impersonateAccount(l1Subgraph.curator) + await setAccountBalance(l1Subgraph.curator, parseEther('1000')) + // For the mainnet subgraph we picked, the curator is also the owner, + // and it was their first subgraph, so the accountSeqId is 0 + const tx = gns + .connect(curatorSigner) + .claimL1CuratorBalanceForLegacySubgraph( + l1Subgraph.curator, + toBN('0'), + blockHeaderRLP, + proofRLP, + ) + + await expect(tx).revertedWith('MPT: invalid node hash') + }) }) describe('claiming a curator balance with a message from L1', function () { it('assigns a curator balance to a beneficiary', async function () { From 034049cd4c04686265856856c58a4db1ac61c464 Mon Sep 17 00:00:00 2001 From: Pablo Carranza Velez Date: Fri, 4 Nov 2022 12:37:27 -0300 Subject: [PATCH 034/112] fix: config changes for L2GNS instead of GNS --- config/graph.arbitrum-goerli.yml | 6 +++--- config/graph.arbitrum-localhost.yml | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/config/graph.arbitrum-goerli.yml b/config/graph.arbitrum-goerli.yml index 5b41e4382..ff58ff316 100644 --- a/config/graph.arbitrum-goerli.yml +++ b/config/graph.arbitrum-goerli.yml @@ -14,7 +14,7 @@ contracts: contractAddress: "${{Curation.address}}" - fn: "setContractProxy" id: "0x39605a6c26a173774ca666c67ef70cf491880e5d3d6d0ca66ec0a31034f15ea3" # keccak256('GNS') - contractAddress: "${{GNS.address}}" + contractAddress: "${{L2GNS.address}}" - fn: "setContractProxy" id: "0xf942813d07d17b56de9a9afc8de0ced6e8c053bbfdcc87b7badea4ddcf27c307" # keccak256('DisputeManager') contractAddress: "${{DisputeManager.address}}" @@ -82,7 +82,7 @@ contracts: qrySlashingPercentage: 25000 # in parts per million calls: - fn: "syncAllContracts" - GNS: + L2GNS: proxy: true init: controller: "${{Controller.address}}" @@ -98,7 +98,7 @@ contracts: - fn: "setTokenDescriptor" tokenDescriptor: "${{SubgraphNFTDescriptor.address}}" - fn: "setMinter" - minter: "${{GNS.address}}" + minter: "${{L2GNS.address}}" - fn: "transferOwnership" owner: *governor Staking: diff --git a/config/graph.arbitrum-localhost.yml b/config/graph.arbitrum-localhost.yml index a17755fbc..e24e421fe 100644 --- a/config/graph.arbitrum-localhost.yml +++ b/config/graph.arbitrum-localhost.yml @@ -14,7 +14,7 @@ contracts: contractAddress: "${{Curation.address}}" - fn: "setContractProxy" id: "0x39605a6c26a173774ca666c67ef70cf491880e5d3d6d0ca66ec0a31034f15ea3" # keccak256('GNS') - contractAddress: "${{GNS.address}}" + contractAddress: "${{L2GNS.address}}" - fn: "setContractProxy" id: "0xf942813d07d17b56de9a9afc8de0ced6e8c053bbfdcc87b7badea4ddcf27c307" # keccak256('DisputeManager') contractAddress: "${{DisputeManager.address}}" @@ -82,7 +82,7 @@ contracts: qrySlashingPercentage: 25000 # in parts per million calls: - fn: "syncAllContracts" - GNS: + L2GNS: proxy: true init: controller: "${{Controller.address}}" @@ -98,7 +98,7 @@ contracts: - fn: "setTokenDescriptor" tokenDescriptor: "${{SubgraphNFTDescriptor.address}}" - fn: "setMinter" - minter: "${{GNS.address}}" + minter: "${{L2GNS.address}}" - fn: "transferOwnership" owner: *governor Staking: From 00c16ee9978e1ea6c161cbb64eab8f312ce689fd Mon Sep 17 00:00:00 2001 From: Pablo Carranza Velez Date: Fri, 4 Nov 2022 13:48:08 -0300 Subject: [PATCH 035/112] fix: reverse order of arguments and add tests for legacy view functions --- contracts/discovery/GNS.sol | 25 +++++++++++++++++----- contracts/l2/discovery/L2GNS.sol | 4 ++-- test/gns.test.ts | 36 ++++++++++++++++++++++++++++++++ test/l2/l2GNS.test.ts | 4 ++-- 4 files changed, 60 insertions(+), 9 deletions(-) diff --git a/contracts/discovery/GNS.sol b/contracts/discovery/GNS.sol index f8e9d30c9..70d989ad5 100644 --- a/contracts/discovery/GNS.sol +++ b/contracts/discovery/GNS.sol @@ -797,8 +797,15 @@ contract GNS is GNSV3Storage, GraphUpgradeable, IGNS, Multicall { seqID = legacySubgraphKey.accountSeqID; } - // TODO add NatSpec - function getCuratorSlot(address _curator, uint256 _subgraphID) public pure returns (uint256) { + /** + * @notice Get the storage slot that corresponds to a curator's signal within a subgraph + * @dev This can be useful to produce proofs to claim balances in L2, as implemented + * in L2GNS. Note this only works with non-legacy subgraphs. + * @param _subgraphID Subgraph ID + * @param _curator Curator address + * @return Storage slot for the curator's signal in the specified subgraph + */ + function getCuratorSlot(uint256 _subgraphID, address _curator) public pure returns (uint256) { // subgraphs mapping is stored at slot SUBGRAPH_MAPPING_SLOT. // So our subgraph is at slot keccak256(abi.encodePacked(uint256(subgraphID), uint256(SUBGRAPH_MAPPING_SLOT))) // The curatorNSignal mapping is at slot 2 within the SubgraphData struct, @@ -816,11 +823,19 @@ contract GNS is GNSV3Storage, GraphUpgradeable, IGNS, Multicall { ); } - // TODO add NatSpec + /** + * @notice Get the storage slot that corresponds to a curator's signal within a legacy subgraph + * @dev This can be useful to produce proofs to claim balances in L2, as implemented + * in L2GNS. Note this only works with legacy subgraphs. + * @param _subgraphCreatorAccount Address of the account that created the account + * @param _seqID Sequence number for the subgraph + * @param _curator Curator address + * @return Storage slot for the curator's signal in the specified legacy subgraph + */ function getLegacyCuratorSlot( - address _curator, address _subgraphCreatorAccount, - uint256 _seqID + uint256 _seqID, + address _curator ) public pure returns (uint256) { // legacy subgraphs mapping is stored at slot LEGACY_SUBGRAPH_MAPPING_SLOT. // So the subgraphs for the account are at slot keccak256(abi.encodePacked(uint256(_subgraphCreatorAccount), uint256(SUBGRAPH_MAPPING_SLOT))) diff --git a/contracts/l2/discovery/L2GNS.sol b/contracts/l2/discovery/L2GNS.sol index 173a60b84..4fb872b63 100644 --- a/contracts/l2/discovery/L2GNS.sol +++ b/contracts/l2/discovery/L2GNS.sol @@ -160,7 +160,7 @@ contract L2GNS is GNS, IL2GNS { require(l1GNSAccount.exists, "!ACCOUNT"); - uint256 curatorSlot = getCuratorSlot(msg.sender, _subgraphID); + uint256 curatorSlot = getCuratorSlot(_subgraphID, msg.sender); Verifier.SlotValue memory curatorNSignalSlot = Verifier.extractSlotValueFromProof( keccak256(abi.encodePacked(curatorSlot)), @@ -215,7 +215,7 @@ contract L2GNS is GNS, IL2GNS { require(l1GNSAccount.exists, "!ACCOUNT"); - uint256 curatorSlot = getLegacyCuratorSlot(msg.sender, _subgraphCreatorAccount, _seqID); + uint256 curatorSlot = getLegacyCuratorSlot(_subgraphCreatorAccount, _seqID, msg.sender); Verifier.SlotValue memory curatorNSignalSlot = Verifier.extractSlotValueFromProof( keccak256(abi.encodePacked(curatorSlot)), diff --git a/test/gns.test.ts b/test/gns.test.ts index cd68afdc2..32d24447b 100644 --- a/test/gns.test.ts +++ b/test/gns.test.ts @@ -1062,6 +1062,42 @@ describe('L1GNS', () => { await expect(tx).revertedWith('GNS: Subgraph was already claimed') }) }) + describe('Legacy subgraph view functions', function () { + it('isLegacySubgraph returns whether a subgraph is legacy or not', async function () { + const seqID = toBN('2') + const subgraphId = buildLegacySubgraphID(me.address, seqID) + await legacyGNSMock + .connect(me.signer) + .createLegacySubgraph(seqID, newSubgraph0.subgraphDeploymentID) + await legacyGNSMock + .connect(me.signer) + .migrateLegacySubgraph(me.address, seqID, newSubgraph0.subgraphMetadata) + + expect(await legacyGNSMock.isLegacySubgraph(subgraphId)).eq(true) + + const subgraph0 = await publishNewSubgraph(me, newSubgraph0, legacyGNSMock) + expect(await legacyGNSMock.isLegacySubgraph(subgraph0.id)).eq(false) + }) + it('getLegacySubgraphKey returns the account and seqID for a legacy subgraph', async function () { + const seqID = toBN('2') + const subgraphId = buildLegacySubgraphID(me.address, seqID) + await legacyGNSMock + .connect(me.signer) + .createLegacySubgraph(seqID, newSubgraph0.subgraphDeploymentID) + await legacyGNSMock + .connect(me.signer) + .migrateLegacySubgraph(me.address, seqID, newSubgraph0.subgraphMetadata) + const [account, id] = await legacyGNSMock.getLegacySubgraphKey(subgraphId) + expect(account).eq(me.address) + expect(id).eq(seqID) + }) + it('getLegacySubgraphKey returns zero values for a non-legacy subgraph', async function () { + const subgraph0 = await publishNewSubgraph(me, newSubgraph0, legacyGNSMock) + const [account, id] = await legacyGNSMock.getLegacySubgraphKey(subgraph0.id) + expect(account).eq(AddressZero) + expect(id).eq(toBN('0')) + }) + }) describe('Subgraph migration to L2', function () { const publishAndCurateOnSubgraph = async function (): Promise { // Publish a named subgraph-0 -> subgraphDeployment0 diff --git a/test/l2/l2GNS.test.ts b/test/l2/l2GNS.test.ts index 56975af2b..acbb6947d 100644 --- a/test/l2/l2GNS.test.ts +++ b/test/l2/l2GNS.test.ts @@ -44,7 +44,7 @@ interface L1SubgraphParams { // await provider.send('eth_getProof', [ g.contracts.GNS.address, [ '0x2757396e3ce68a9104b5d84b5b0988e37067e780df1ad018184da3616033f432' ], '0x82e59e8ef5e6c4352d363fc5b6ea64d6f605d47ff0c454ea1133be6bacaff487']) // Where the curator slot is 0x2757396e3ce68a9104b5d84b5b0988e37067e780df1ad018184da3616033f432, // which was obtained by calling this in a localhost hardhat console: -// await g.contracts.GNS.getCuratorSlot('0xE99bD186DBdA4Dc0A499b158E9E8eA7a628EDD14', '0x715f5c54c9a35783823650c340586f43acede4a907726e4e6499abde90331184') +// await g.contracts.GNS.getCuratorSlot('0x715f5c54c9a35783823650c340586f43acede4a907726e4e6499abde90331184', '0xE99bD186DBdA4Dc0A499b158E9E8eA7a628EDD14') const mainnetSubgraphWithProof = { subgraphId: '0x715f5c54c9a35783823650c340586f43acede4a907726e4e6499abde90331184', curator: '0xE99bD186DBdA4Dc0A499b158E9E8eA7a628EDD14', @@ -87,7 +87,7 @@ const mainnetSubgraphWithProof = { } // Same but using the following slot for the getProof call: -// getLegacyCuratorSlot(curator, account, accountSeqId) => '0xbda2ea2df35ed9dad1726e4b7b20512302d0f12693c5cf63a4d778d0945b456b' +// getLegacyCuratorSlot(account, accountSeqId, curator) => '0xbda2ea2df35ed9dad1726e4b7b20512302d0f12693c5cf63a4d778d0945b456b' const mainnetLegacySubgraphWithProof = { subgraphId: '0xb3424eb47c56b1cd4e82ab42f8a614d7fdc97c88a6887e0b51998968da8bca12', account: '0x9EfbEA665B79F366fCBB390a55C617257E0C678c', From 7ed00786a6a748ca92238878b10c55c5bf66f9ba Mon Sep 17 00:00:00 2001 From: Pablo Carranza Velez Date: Fri, 4 Nov 2022 19:08:35 -0300 Subject: [PATCH 036/112] test: initial structure to test the MPT verifier --- .../tests/MerklePatriciaProofVerifierMock.sol | 25 +++++++ contracts/tests/RLPReaderMock.sol | 0 contracts/tests/StateProofVerifierMock.sol | 0 package.json | 1 + test/l2/l2GNS.test.ts | 32 ++++----- test/lib/mptProofUtils.ts | 2 +- test/mpt.test.ts | 65 +++++++++++++++++++ 7 files changed, 108 insertions(+), 17 deletions(-) create mode 100644 contracts/tests/MerklePatriciaProofVerifierMock.sol create mode 100644 contracts/tests/RLPReaderMock.sol create mode 100644 contracts/tests/StateProofVerifierMock.sol create mode 100644 test/mpt.test.ts diff --git a/contracts/tests/MerklePatriciaProofVerifierMock.sol b/contracts/tests/MerklePatriciaProofVerifierMock.sol new file mode 100644 index 000000000..e1a4bbdec --- /dev/null +++ b/contracts/tests/MerklePatriciaProofVerifierMock.sol @@ -0,0 +1,25 @@ +// SPDX-License-Identifier: GPL-2.0-or-later + +pragma solidity ^0.7.6; +pragma abicoder v2; + +import { MerklePatriciaProofVerifier } from "../libraries/MerklePatriciaProofVerifier.sol"; +import { RLPReader } from "../libraries/RLPReader.sol"; + +/** + * @title MerklePatriciaProofVerifierMock contract + * @dev This test contract is used to run unit tests on the MerklePatriciaProofVerifier library + */ +contract MerklePatriciaProofVerifierMock { + using RLPReader for RLPReader.RLPItem; + using RLPReader for bytes; + + function extractProofValue( + bytes32 rootHash, + bytes memory path, + bytes memory _proofRlpBytes + ) external pure returns (bytes memory) { + RLPReader.RLPItem[] memory stack = _proofRlpBytes.toRlpItem().toList(); + return MerklePatriciaProofVerifier.extractProofValue(rootHash, path, stack); + } +} diff --git a/contracts/tests/RLPReaderMock.sol b/contracts/tests/RLPReaderMock.sol new file mode 100644 index 000000000..e69de29bb diff --git a/contracts/tests/StateProofVerifierMock.sol b/contracts/tests/StateProofVerifierMock.sol new file mode 100644 index 000000000..e69de29bb diff --git a/package.json b/package.json index ef123981e..b89710a08 100644 --- a/package.json +++ b/package.json @@ -69,6 +69,7 @@ "ipfs-http-client": "47.0.1", "isomorphic-fetch": "^3.0.0", "lint-staged": "^10.5.4", + "merkle-patricia-tree": "^4.2.4", "p-queue": "^6.6.1", "prettier": "^2.2.1", "prettier-plugin-solidity": "^1.0.0-beta.9", diff --git a/test/l2/l2GNS.test.ts b/test/l2/l2GNS.test.ts index acbb6947d..11713f746 100644 --- a/test/l2/l2GNS.test.ts +++ b/test/l2/l2GNS.test.ts @@ -27,7 +27,7 @@ import { } from '../lib/gnsUtils' import { Curation } from '../../build/types/Curation' import { GraphToken } from '../../build/types/GraphToken' -import { encodeMPTProofRLP, getBlockHeaderRLP } from '../lib/mptProofUtils' +import { encodeMPTStorageProofRLP, getBlockHeaderRLP } from '../lib/mptProofUtils' const { HashZero } = ethers.constants @@ -529,7 +529,7 @@ describe('L2GNS', () => { .setCounterpartGNSAddress(l1Subgraph.getProofResponse.address) const blockHeaderRLP = getBlockHeaderRLP(mainnetSubgraphBlockData) - const proofRLP = encodeMPTProofRLP(l1Subgraph.getProofResponse) + const proofRLP = encodeMPTStorageProofRLP(l1Subgraph.getProofResponse) const curatorSigner = await impersonateAccount(l1Subgraph.curator) await setAccountBalance(l1Subgraph.curator, parseEther('1000')) @@ -565,7 +565,7 @@ describe('L2GNS', () => { .setCounterpartGNSAddress(l1Subgraph.getProofResponse.address) const blockHeaderRLP = getBlockHeaderRLP(mainnetSubgraphBlockData) - const proofRLP = encodeMPTProofRLP(l1Subgraph.getProofResponse) + const proofRLP = encodeMPTStorageProofRLP(l1Subgraph.getProofResponse) const curatorSigner = await impersonateAccount(l1Subgraph.curator) await setAccountBalance(l1Subgraph.curator, parseEther('1000')) @@ -607,7 +607,7 @@ describe('L2GNS', () => { // We haven't updated the L1 counterpart address, so GNS will not accept the account proof as valid const blockHeaderRLP = getBlockHeaderRLP(mainnetSubgraphBlockData) - const proofRLP = encodeMPTProofRLP(l1Subgraph.getProofResponse) + const proofRLP = encodeMPTStorageProofRLP(l1Subgraph.getProofResponse) const curatorSigner = await impersonateAccount(l1Subgraph.curator) await setAccountBalance(l1Subgraph.curator, parseEther('1000')) @@ -636,7 +636,7 @@ describe('L2GNS', () => { .setCounterpartGNSAddress(l1Subgraph.getProofResponse.address) const blockHeaderRLP = getBlockHeaderRLP(mainnetSubgraphBlockData) - const proofRLP = encodeMPTProofRLP(l1Subgraph.getProofResponse) + const proofRLP = encodeMPTStorageProofRLP(l1Subgraph.getProofResponse) const tx = gns .connect(me.signer) @@ -655,7 +655,7 @@ describe('L2GNS', () => { .setCounterpartGNSAddress(l1Subgraph.getProofResponse.address) const blockHeaderRLP = getBlockHeaderRLP(mainnetSubgraphBlockData) - const proofRLP = encodeMPTProofRLP(l1Subgraph.getProofResponse) + const proofRLP = encodeMPTStorageProofRLP(l1Subgraph.getProofResponse) const tx = gns .connect(me.signer) @@ -680,7 +680,7 @@ describe('L2GNS', () => { .setCounterpartGNSAddress(l1Subgraph.getProofResponse.address) const blockHeaderRLP = getBlockHeaderRLP(mainnetSubgraphBlockData) - const proofRLP = encodeMPTProofRLP(l1Subgraph.getProofResponse) + const proofRLP = encodeMPTStorageProofRLP(l1Subgraph.getProofResponse) const curatorSigner = await impersonateAccount(l1Subgraph.curator) await setAccountBalance(l1Subgraph.curator, parseEther('1000')) @@ -722,7 +722,7 @@ describe('L2GNS', () => { .setCounterpartGNSAddress(l1Subgraph.getProofResponse.address) const blockHeaderRLP = getBlockHeaderRLP(mainnetSubgraphBlockData) - const proofRLP = encodeMPTProofRLP(mainnetProofForDifferentBlock) + const proofRLP = encodeMPTStorageProofRLP(mainnetProofForDifferentBlock) const curatorSigner = await impersonateAccount(l1Subgraph.curator) await setAccountBalance(l1Subgraph.curator, parseEther('1000')) @@ -750,7 +750,7 @@ describe('L2GNS', () => { .setCounterpartGNSAddress(l1Subgraph.getProofResponse.address) const blockHeaderRLP = getBlockHeaderRLP(mainnetSubgraphBlockData) - const proofRLP = encodeMPTProofRLP(l1Subgraph.getProofResponse) + const proofRLP = encodeMPTStorageProofRLP(l1Subgraph.getProofResponse) const curatorSigner = await impersonateAccount(l1Subgraph.curator) await setAccountBalance(l1Subgraph.curator, parseEther('1000')) @@ -780,7 +780,7 @@ describe('L2GNS', () => { .setCounterpartGNSAddress(l1Subgraph.getProofResponse.address) const blockHeaderRLP = getBlockHeaderRLP(mainnetSubgraphBlockData) - const proofRLP = encodeMPTProofRLP(l1Subgraph.getProofResponse) + const proofRLP = encodeMPTStorageProofRLP(l1Subgraph.getProofResponse) const curatorSigner = await impersonateAccount(l1Subgraph.curator) await setAccountBalance(l1Subgraph.curator, parseEther('1000')) @@ -821,7 +821,7 @@ describe('L2GNS', () => { .setCounterpartGNSAddress(l1Subgraph.getProofResponse.address) const blockHeaderRLP = getBlockHeaderRLP(mainnetSubgraphBlockData) - const proofRLP = encodeMPTProofRLP(l1Subgraph.getProofResponse) + const proofRLP = encodeMPTStorageProofRLP(l1Subgraph.getProofResponse) const curatorSigner = await impersonateAccount(l1Subgraph.curator) await setAccountBalance(l1Subgraph.curator, parseEther('1000')) @@ -867,7 +867,7 @@ describe('L2GNS', () => { // We haven't updated the L1 counterpart address, so GNS will not accept the account proof as valid const blockHeaderRLP = getBlockHeaderRLP(mainnetSubgraphBlockData) - const proofRLP = encodeMPTProofRLP(l1Subgraph.getProofResponse) + const proofRLP = encodeMPTStorageProofRLP(l1Subgraph.getProofResponse) const curatorSigner = await impersonateAccount(l1Subgraph.curator) await setAccountBalance(l1Subgraph.curator, parseEther('1000')) @@ -901,7 +901,7 @@ describe('L2GNS', () => { .setCounterpartGNSAddress(l1Subgraph.getProofResponse.address) const blockHeaderRLP = getBlockHeaderRLP(mainnetSubgraphBlockData) - const proofRLP = encodeMPTProofRLP(l1Subgraph.getProofResponse) + const proofRLP = encodeMPTStorageProofRLP(l1Subgraph.getProofResponse) const tx = gns .connect(me.signer) @@ -925,7 +925,7 @@ describe('L2GNS', () => { .setCounterpartGNSAddress(l1Subgraph.getProofResponse.address) const blockHeaderRLP = getBlockHeaderRLP(mainnetSubgraphBlockData) - const proofRLP = encodeMPTProofRLP(l1Subgraph.getProofResponse) + const proofRLP = encodeMPTStorageProofRLP(l1Subgraph.getProofResponse) const tx = gns .connect(me.signer) @@ -950,7 +950,7 @@ describe('L2GNS', () => { .setCounterpartGNSAddress(l1Subgraph.getProofResponse.address) const blockHeaderRLP = getBlockHeaderRLP(mainnetSubgraphBlockData) - const proofRLP = encodeMPTProofRLP(l1Subgraph.getProofResponse) + const proofRLP = encodeMPTStorageProofRLP(l1Subgraph.getProofResponse) const curatorSigner = await impersonateAccount(l1Subgraph.curator) await setAccountBalance(l1Subgraph.curator, parseEther('1000')) @@ -1002,7 +1002,7 @@ describe('L2GNS', () => { .setCounterpartGNSAddress(l1Subgraph.getProofResponse.address) const blockHeaderRLP = getBlockHeaderRLP(mainnetSubgraphBlockData) - const proofRLP = encodeMPTProofRLP(l1Subgraph.getProofResponse) + const proofRLP = encodeMPTStorageProofRLP(l1Subgraph.getProofResponse) const curatorSigner = await impersonateAccount(l1Subgraph.curator) await setAccountBalance(l1Subgraph.curator, parseEther('1000')) diff --git a/test/lib/mptProofUtils.ts b/test/lib/mptProofUtils.ts index 5fe76abb2..0de2a374c 100644 --- a/test/lib/mptProofUtils.ts +++ b/test/lib/mptProofUtils.ts @@ -69,7 +69,7 @@ export const getBlockHeaderRLP = (block: GetBlockResponse): string => { return RLP.encode(header) } -export const encodeMPTProofRLP = (proof: GetProofResponse): string => { +export const encodeMPTStorageProofRLP = (proof: GetProofResponse): string => { if (proof.storageProof.length !== 1) { throw new Error('Expected exactly one storage slot proof') } diff --git a/test/mpt.test.ts b/test/mpt.test.ts new file mode 100644 index 000000000..7869b813c --- /dev/null +++ b/test/mpt.test.ts @@ -0,0 +1,65 @@ +import { expect } from 'chai' +import { ethers, ContractTransaction, BigNumber, Event } from 'ethers' +import { RLP } from 'ethers/lib/utils' +import { BaseTrie } from 'merkle-patricia-tree' + +import { MerklePatriciaProofVerifierMock } from '../build/types/MerklePatriciaProofVerifierMock' +import { deployContract } from './lib/deployment' +import { Account, getAccounts } from './lib/testHelpers' + +const bufferToHex = (buf: Buffer): string => { + return '0x' + buf.toString('hex') +} + +const encodeProofRLP = (proof: Array): string => { + const decodedArr = proof.map((v) => RLP.decode(bufferToHex(v))) + return RLP.encode(decodedArr) +} + +describe('MerklePatriciaProofVerifier', () => { + let me: Account + let mpt: MerklePatriciaProofVerifierMock + + before(async function () { + ;[me] = await getAccounts() + mpt = (await deployContract( + 'MerklePatriciaProofVerifierMock', + me.signer, + )) as unknown as MerklePatriciaProofVerifierMock + }) + + it('verifies a valid proof of inclusion', async function () { + const trie = new BaseTrie() + const key = Buffer.from('foo') + const value = Buffer.from('bar') + await trie.put(key, value) + + // We add a few more random values + await trie.put(Buffer.from('food'), Buffer.from('baz')) + await trie.put(Buffer.from('fob'), Buffer.from('bat')) + await trie.put(Buffer.from('zort'), Buffer.from('narf')) + + const proof = await BaseTrie.createProof(trie, key) + + const encodedProof = encodeProofRLP(proof) + + const val = await mpt.extractProofValue(bufferToHex(trie.root), bufferToHex(key), encodedProof) + expect(val).to.equal(bufferToHex(value)) + }) + it('verifies a valid proof of exclusion', async function () { + const trie = new BaseTrie() + const key = Buffer.from('foo') + + // We add a few more random values + await trie.put(Buffer.from('food'), Buffer.from('baz')) + await trie.put(Buffer.from('fob'), Buffer.from('bat')) + await trie.put(Buffer.from('zort'), Buffer.from('narf')) + + const proof = await BaseTrie.createProof(trie, key) + + const encodedProof = encodeProofRLP(proof) + + const val = await mpt.extractProofValue(bufferToHex(trie.root), bufferToHex(key), encodedProof) + expect(val).to.equal('0x') + }) +}) From 1412ca20e7c06dc61e0943aaa48d1b83a704934e Mon Sep 17 00:00:00 2001 From: Pablo Carranza Velez Date: Tue, 8 Nov 2022 10:53:33 -0300 Subject: [PATCH 037/112] test: use newer MPT library --- package.json | 2 +- test/mpt.test.ts | 22 +++++++++---- yarn.lock | 85 ++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 101 insertions(+), 8 deletions(-) diff --git a/package.json b/package.json index b89710a08..b3074359a 100644 --- a/package.json +++ b/package.json @@ -19,6 +19,7 @@ "@commitlint/cli": "^13.2.1", "@commitlint/config-conventional": "^13.2.0", "@defi-wonderland/smock": "^2.0.7", + "@ethereumjs/trie": "^5.0.1", "@ethersproject/experimental": "^5.6.0", "@graphprotocol/common-ts": "^1.8.3", "@nomiclabs/hardhat-ethers": "^2.0.2", @@ -69,7 +70,6 @@ "ipfs-http-client": "47.0.1", "isomorphic-fetch": "^3.0.0", "lint-staged": "^10.5.4", - "merkle-patricia-tree": "^4.2.4", "p-queue": "^6.6.1", "prettier": "^2.2.1", "prettier-plugin-solidity": "^1.0.0-beta.9", diff --git a/test/mpt.test.ts b/test/mpt.test.ts index 7869b813c..218e03c31 100644 --- a/test/mpt.test.ts +++ b/test/mpt.test.ts @@ -1,7 +1,7 @@ import { expect } from 'chai' import { ethers, ContractTransaction, BigNumber, Event } from 'ethers' import { RLP } from 'ethers/lib/utils' -import { BaseTrie } from 'merkle-patricia-tree' +import { Trie } from '@ethereumjs/trie' import { MerklePatriciaProofVerifierMock } from '../build/types/MerklePatriciaProofVerifierMock' import { deployContract } from './lib/deployment' @@ -29,7 +29,7 @@ describe('MerklePatriciaProofVerifier', () => { }) it('verifies a valid proof of inclusion', async function () { - const trie = new BaseTrie() + const trie = new Trie() const key = Buffer.from('foo') const value = Buffer.from('bar') await trie.put(key, value) @@ -39,15 +39,19 @@ describe('MerklePatriciaProofVerifier', () => { await trie.put(Buffer.from('fob'), Buffer.from('bat')) await trie.put(Buffer.from('zort'), Buffer.from('narf')) - const proof = await BaseTrie.createProof(trie, key) + const proof = await trie.createProof(key) const encodedProof = encodeProofRLP(proof) - const val = await mpt.extractProofValue(bufferToHex(trie.root), bufferToHex(key), encodedProof) + const val = await mpt.extractProofValue( + bufferToHex(trie.root()), + bufferToHex(key), + encodedProof, + ) expect(val).to.equal(bufferToHex(value)) }) it('verifies a valid proof of exclusion', async function () { - const trie = new BaseTrie() + const trie = new Trie() const key = Buffer.from('foo') // We add a few more random values @@ -55,11 +59,15 @@ describe('MerklePatriciaProofVerifier', () => { await trie.put(Buffer.from('fob'), Buffer.from('bat')) await trie.put(Buffer.from('zort'), Buffer.from('narf')) - const proof = await BaseTrie.createProof(trie, key) + const proof = await trie.createProof(key) const encodedProof = encodeProofRLP(proof) - const val = await mpt.extractProofValue(bufferToHex(trie.root), bufferToHex(key), encodedProof) + const val = await mpt.extractProofValue( + bufferToHex(trie.root()), + bufferToHex(key), + encodedProof, + ) expect(val).to.equal('0x') }) }) diff --git a/yarn.lock b/yarn.lock index 42084d9cd..dbf97f84b 100644 --- a/yarn.lock +++ b/yarn.lock @@ -390,6 +390,22 @@ ethereumjs-util "^7.1.1" miller-rabin "^4.0.0" +"@ethereumjs/rlp@^4.0.0", "@ethereumjs/rlp@^4.0.0-beta.2": + version "4.0.0" + resolved "https://registry.yarnpkg.com/@ethereumjs/rlp/-/rlp-4.0.0.tgz#66719891bd727251a7f233f9ca80212d1994f8c8" + integrity sha512-LM4jS5n33bJN60fM5EC8VeyhUgga6/DjCPBV2vWjnfVtobqtOiNC4SQ1MRFqyBSmJGGdB533JZWewyvlcdJtkQ== + +"@ethereumjs/trie@^5.0.1": + version "5.0.1" + resolved "https://registry.yarnpkg.com/@ethereumjs/trie/-/trie-5.0.1.tgz#3cb5730450839c8a540ec48e16a7825a1d0897e5" + integrity sha512-MA8uXR2pa+r8+wBvlyeZoUewwbUZe4Iy4zKi38THP6/flYvIIY+bTVjf/EA3jMhH68j6nJH5SFu5wi5SBdS/2A== + dependencies: + "@ethereumjs/rlp" "^4.0.0" + "@ethereumjs/util" "^8.0.0" + "@types/readable-stream" "^2.3.13" + ethereum-cryptography "^1.1.2" + readable-stream "^3.6.0" + "@ethereumjs/tx@^3.2.1", "@ethereumjs/tx@^3.5.2": version "3.5.2" resolved "https://registry.yarnpkg.com/@ethereumjs/tx/-/tx-3.5.2.tgz#197b9b6299582ad84f9527ca961466fce2296c1c" @@ -406,6 +422,15 @@ "@ethereumjs/common" "^2.6.3" ethereumjs-util "^7.1.4" +"@ethereumjs/util@^8.0.0": + version "8.0.2" + resolved "https://registry.yarnpkg.com/@ethereumjs/util/-/util-8.0.2.tgz#b7348fc7253649b0f00685a94546c6eee1fad819" + integrity sha512-b1Fcxmq+ckCdoLPhVIBkTcH8szigMapPuEmD8EDakvtI5Na5rzmX1sBW73YQqaPc7iUxGCAzZP1LrFQ7aEMugA== + dependencies: + "@ethereumjs/rlp" "^4.0.0-beta.2" + async "^3.2.4" + ethereum-cryptography "^1.1.2" + "@ethereumjs/vm@^5.6.0": version "5.8.0" resolved "https://registry.yarnpkg.com/@ethereumjs/vm/-/vm-5.8.0.tgz#c9055f96afc13dd7b72893b57fa20027effea6fe" @@ -923,11 +948,26 @@ resolved "https://registry.yarnpkg.com/@noble/hashes/-/hashes-1.0.0.tgz#d5e38bfbdaba174805a4e649f13be9a9ed3351ae" integrity sha512-DZVbtY62kc3kkBtMHqwCOfXrT/hnoORy5BJ4+HU1IR59X0KWAOqsfzQPcUl/lQLlG7qXbe/fZ3r/emxtAl+sqg== +"@noble/hashes@1.1.2": + version "1.1.2" + resolved "https://registry.yarnpkg.com/@noble/hashes/-/hashes-1.1.2.tgz#e9e035b9b166ca0af657a7848eb2718f0f22f183" + integrity sha512-KYRCASVTv6aeUi1tsF8/vpyR7zpfs3FUzy2Jqm+MU+LmUKhQ0y2FpfwqkCcxSg2ua4GALJd8k2R76WxwZGbQpA== + +"@noble/hashes@~1.1.1": + version "1.1.3" + resolved "https://registry.yarnpkg.com/@noble/hashes/-/hashes-1.1.3.tgz#360afc77610e0a61f3417e497dcf36862e4f8111" + integrity sha512-CE0FCR57H2acVI5UOzIGSSIYxZ6v/HOhDR0Ro9VLyhnzLwx0o8W1mmgaqlEUx4049qJDlIBRztv5k+MM8vbO3A== + "@noble/secp256k1@1.5.5", "@noble/secp256k1@~1.5.2": version "1.5.5" resolved "https://registry.yarnpkg.com/@noble/secp256k1/-/secp256k1-1.5.5.tgz#315ab5745509d1a8c8e90d0bdf59823ccf9bcfc3" integrity sha512-sZ1W6gQzYnu45wPrWx8D3kwI2/U29VYTx9OjbDAd7jwRItJ0cSTMPRL/C8AWZFn9kWFLQGqEXVEE86w4Z8LpIQ== +"@noble/secp256k1@1.6.3", "@noble/secp256k1@~1.6.0": + version "1.6.3" + resolved "https://registry.yarnpkg.com/@noble/secp256k1/-/secp256k1-1.6.3.tgz#7eed12d9f4404b416999d0c87686836c4c5c9b94" + integrity sha512-T04e4iTurVy7I8Sw4+c5OSN9/RkPlo1uKxAomtxQNLq8j1uPAqnsqG1bqvY3Jv7c13gyr6dui0zmh/I3+f/JaQ== + "@nodelib/fs.scandir@2.1.5": version "2.1.5" resolved "https://registry.yarnpkg.com/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz#7619c2eb21b25483f6d167548b4cfd5a7488c3d5" @@ -1071,6 +1111,11 @@ resolved "https://registry.yarnpkg.com/@scure/base/-/base-1.0.0.tgz#109fb595021de285f05a7db6806f2f48296fcee7" integrity sha512-gIVaYhUsy+9s58m/ETjSJVKHhKTBMmcRb9cEV5/5dwvfDlfORjKrFsDeDHWRrm6RjcPvCLZFwGJjAjLj1gg4HA== +"@scure/base@~1.1.0": + version "1.1.1" + resolved "https://registry.yarnpkg.com/@scure/base/-/base-1.1.1.tgz#ebb651ee52ff84f420097055f4bf46cfba403938" + integrity sha512-ZxOhsSyxYwLJj3pLZCefNitxsj093tb2vq90mp2txoYeBqbcjDjqFhyM8eUjq/uFm6zJ+mUuqxlS2FkuSY1MTA== + "@scure/bip32@1.0.1": version "1.0.1" resolved "https://registry.yarnpkg.com/@scure/bip32/-/bip32-1.0.1.tgz#1409bdf9f07f0aec99006bb0d5827693418d3aa5" @@ -1080,6 +1125,15 @@ "@noble/secp256k1" "~1.5.2" "@scure/base" "~1.0.0" +"@scure/bip32@1.1.0": + version "1.1.0" + resolved "https://registry.yarnpkg.com/@scure/bip32/-/bip32-1.1.0.tgz#dea45875e7fbc720c2b4560325f1cf5d2246d95b" + integrity sha512-ftTW3kKX54YXLCxH6BB7oEEoJfoE2pIgw7MINKAs5PsS6nqKPuKk1haTF/EuHmYqG330t5GSrdmtRuHaY1a62Q== + dependencies: + "@noble/hashes" "~1.1.1" + "@noble/secp256k1" "~1.6.0" + "@scure/base" "~1.1.0" + "@scure/bip39@1.0.0": version "1.0.0" resolved "https://registry.yarnpkg.com/@scure/bip39/-/bip39-1.0.0.tgz#47504e58de9a56a4bbed95159d2d6829fa491bb0" @@ -1088,6 +1142,14 @@ "@noble/hashes" "~1.0.0" "@scure/base" "~1.0.0" +"@scure/bip39@1.1.0": + version "1.1.0" + resolved "https://registry.yarnpkg.com/@scure/bip39/-/bip39-1.1.0.tgz#92f11d095bae025f166bef3defcc5bf4945d419a" + integrity sha512-pwrPOS16VeTKg98dYXQyIjJEcWfz7/1YJIwxUEPFfQPtc86Ym/1sVgQ2RLoD43AazMk2l/unK4ITySSpW2+82w== + dependencies: + "@noble/hashes" "~1.1.1" + "@scure/base" "~1.1.0" + "@sentry/core@5.30.0": version "5.30.0" resolved "https://registry.yarnpkg.com/@sentry/core/-/core-5.30.0.tgz#6b203664f69e75106ee8b5a2fe1d717379b331f3" @@ -1449,6 +1511,14 @@ resolved "https://registry.yarnpkg.com/@types/qs/-/qs-6.9.7.tgz#63bb7d067db107cc1e457c303bc25d511febf6cb" integrity sha512-FGa1F62FT09qcrueBA6qYTrJPVDzah9a+493+o2PCXsesWHIn27G98TsSMs3WPNbZIEj4+VJf6saSFpvD+3Zsw== +"@types/readable-stream@^2.3.13": + version "2.3.15" + resolved "https://registry.yarnpkg.com/@types/readable-stream/-/readable-stream-2.3.15.tgz#3d79c9ceb1b6a57d5f6e6976f489b9b5384321ae" + integrity sha512-oM5JSKQCcICF1wvGgmecmHldZ48OZamtMxcGGVICOJA8o8cahXC1zEVAif8iwoc5j8etxFaRFnf095+CDsuoFQ== + dependencies: + "@types/node" "*" + safe-buffer "~5.1.1" + "@types/resolve@^0.0.8": version "0.0.8" resolved "https://registry.yarnpkg.com/@types/resolve/-/resolve-0.0.8.tgz#f26074d238e02659e323ce1a13d041eee280e194" @@ -2066,6 +2136,11 @@ async@^3.2.3: resolved "https://registry.yarnpkg.com/async/-/async-3.2.3.tgz#ac53dafd3f4720ee9e8a160628f18ea91df196c9" integrity sha512-spZRyzKL5l5BZQrr/6m/SqFdBN0q3OCI0f9rjfBzCMBIP4p75P620rR3gTmaksNOhmzgdxcaxdNfMy6anrbM0g== +async@^3.2.4: + version "3.2.4" + resolved "https://registry.yarnpkg.com/async/-/async-3.2.4.tgz#2d22e00f8cddeb5fde5dd33522b56d1cf569a81c" + integrity sha512-iAB+JbDEGXhyIUavoDl9WP/Jj106Kz9DEn1DPgYw5ruDn0e3Wgi3sKFm55sASdGBNOQB8F59d9qQ7deqrHA8wQ== + asynckit@^0.4.0: version "0.4.0" resolved "https://registry.yarnpkg.com/asynckit/-/asynckit-0.4.0.tgz#c79ed97f7f34cb8f2ba1bc9790bcc366474b4b79" @@ -4785,6 +4860,16 @@ ethereum-cryptography@^1.0.3: "@scure/bip32" "1.0.1" "@scure/bip39" "1.0.0" +ethereum-cryptography@^1.1.2: + version "1.1.2" + resolved "https://registry.yarnpkg.com/ethereum-cryptography/-/ethereum-cryptography-1.1.2.tgz#74f2ac0f0f5fe79f012c889b3b8446a9a6264e6d" + integrity sha512-XDSJlg4BD+hq9N2FjvotwUET9Tfxpxc3kWGE2AqUG5vcbeunnbImVk3cj6e/xT3phdW21mE8R5IugU4fspQDcQ== + dependencies: + "@noble/hashes" "1.1.2" + "@noble/secp256k1" "1.6.3" + "@scure/bip32" "1.1.0" + "@scure/bip39" "1.1.0" + ethereum-waffle@^3.3.0: version "3.4.4" resolved "https://registry.yarnpkg.com/ethereum-waffle/-/ethereum-waffle-3.4.4.tgz#1378b72040697857b7f5e8f473ca8f97a37b5840" From 39493c8b4f1c22da3c9eadb6e32225d201945b93 Mon Sep 17 00:00:00 2001 From: Pablo Carranza Velez Date: Tue, 8 Nov 2022 11:39:04 -0300 Subject: [PATCH 038/112] fix: can't upgrade subgraphNFT, so metadata must be added manually --- contracts/discovery/L1GNS.sol | 11 +- contracts/l2/discovery/IL2GNS.sol | 1 + contracts/l2/discovery/L2GNS.sol | 17 +- test/gns.test.ts | 6 +- test/l2/l2GNS.test.ts | 249 ++++++++++++++++++++++-------- 5 files changed, 199 insertions(+), 85 deletions(-) diff --git a/contracts/discovery/L1GNS.sol b/contracts/discovery/L1GNS.sol index 9321277fb..122d36608 100644 --- a/contracts/discovery/L1GNS.sol +++ b/contracts/discovery/L1GNS.sol @@ -89,11 +89,7 @@ contract L1GNS is GNS, L1ArbitrumMessenger { require(ownerOf(_subgraphID) == msg.sender, "GNS: Must be authorized"); migrationData.l1Done = true; - bytes memory extraData = encodeSubgraphMetadataForL2( - _subgraphID, - migrationData, - subgraphData - ); + bytes memory extraData = encodeSubgraphDataForL2(_subgraphID, migrationData, subgraphData); bytes memory data = abi.encode(maxSubmissionCost, extraData); IGraphToken grt = graphToken(); @@ -113,7 +109,7 @@ contract L1GNS is GNS, L1ArbitrumMessenger { emit SubgraphSentToL2(_subgraphID); } - function encodeSubgraphMetadataForL2( + function encodeSubgraphDataForL2( uint256 _subgraphID, SubgraphL2MigrationData storage migrationData, SubgraphData storage subgraphData @@ -124,8 +120,7 @@ contract L1GNS is GNS, L1ArbitrumMessenger { ownerOf(_subgraphID), blockhash(migrationData.lockedAtBlock), subgraphData.nSignal, - subgraphData.reserveRatio, - subgraphNFT.getSubgraphMetadata(_subgraphID) + subgraphData.reserveRatio ); } diff --git a/contracts/l2/discovery/IL2GNS.sol b/contracts/l2/discovery/IL2GNS.sol index 9fe75dcf5..361637bc9 100644 --- a/contracts/l2/discovery/IL2GNS.sol +++ b/contracts/l2/discovery/IL2GNS.sol @@ -8,6 +8,7 @@ interface IL2GNS is ICallhookReceiver { function finishSubgraphMigrationFromL1( uint256 _subgraphID, bytes32 _subgraphDeploymentID, + bytes32 _subgraphMetadata, bytes32 _versionMetadata ) external; diff --git a/contracts/l2/discovery/L2GNS.sol b/contracts/l2/discovery/L2GNS.sol index 4fb872b63..7d913f509 100644 --- a/contracts/l2/discovery/L2GNS.sol +++ b/contracts/l2/discovery/L2GNS.sol @@ -76,9 +76,8 @@ contract L2GNS is GNS, IL2GNS { address subgraphOwner, bytes32 lockedAtBlockHash, uint256 nSignal, - uint32 reserveRatio, - bytes32 subgraphMetadata - ) = abi.decode(_data, (uint256, address, bytes32, uint256, uint32, bytes32)); + uint32 reserveRatio + ) = abi.decode(_data, (uint256, address, bytes32, uint256, uint32)); _receiveSubgraphFromL1( subgraphID, @@ -86,14 +85,14 @@ contract L2GNS is GNS, IL2GNS { _amount, lockedAtBlockHash, nSignal, - reserveRatio, - subgraphMetadata + reserveRatio ); } function finishSubgraphMigrationFromL1( uint256 _subgraphID, bytes32 _subgraphDeploymentID, + bytes32 _subgraphMetadata, bytes32 _versionMetadata ) external override notPartialPaused onlySubgraphAuth(_subgraphID) { IGNS.SubgraphL2MigrationData storage migratedData = subgraphL2MigrationData[_subgraphID]; @@ -116,6 +115,9 @@ contract L2GNS is GNS, IL2GNS { subgraphData.vSignal = curation.mintTaxFree(_subgraphDeploymentID, migratedData.tokens, 0); subgraphData.disabled = false; + // Set the token metadata + _setSubgraphMetadata(_subgraphID, _subgraphMetadata); + emit SubgraphPublished(_subgraphID, _subgraphDeploymentID, subgraphData.reserveRatio); emit SubgraphUpgraded( _subgraphID, @@ -269,8 +271,7 @@ contract L2GNS is GNS, IL2GNS { uint256 _tokens, bytes32 _lockedAtBlockHash, uint256 _nSignal, - uint32 _reserveRatio, - bytes32 _subgraphMetadata + uint32 _reserveRatio ) internal { IGNS.SubgraphL2MigrationData storage migratedData = subgraphL2MigrationData[_subgraphID]; SubgraphData storage subgraphData = _getSubgraphData(_subgraphID); @@ -288,8 +289,6 @@ contract L2GNS is GNS, IL2GNS { // This function will check the if tokenID already exists. _mintNFT(_subgraphOwner, _subgraphID); - // Set the token metadata - _setSubgraphMetadata(_subgraphID, _subgraphMetadata); emit SubgraphReceivedFromL1(_subgraphID); } } diff --git a/test/gns.test.ts b/test/gns.test.ts index 32d24447b..f50c52c19 100644 --- a/test/gns.test.ts +++ b/test/gns.test.ts @@ -1262,14 +1262,13 @@ describe('L1GNS', () => { expect(migrationData.l1Done).eq(true) const expectedCallhookData = defaultAbiCoder.encode( - ['uint256', 'address', 'bytes32', 'uint256', 'uint32', 'bytes32'], + ['uint256', 'address', 'bytes32', 'uint256', 'uint32'], [ subgraph0.id, me.address, lockBlockhash, subgraphBefore.nSignal, subgraphBefore.reserveRatio, - newSubgraph0.subgraphMetadata, ], ) @@ -1316,14 +1315,13 @@ describe('L1GNS', () => { expect(migrationData.l1Done).eq(true) const expectedCallhookData = defaultAbiCoder.encode( - ['uint256', 'address', 'bytes32', 'uint256', 'uint32', 'bytes32'], + ['uint256', 'address', 'bytes32', 'uint256', 'uint32'], [ subgraphID, me.address, lockBlockhash, subgraphBefore.nSignal, subgraphBefore.reserveRatio, - newSubgraph0.subgraphMetadata, ], ) diff --git a/test/l2/l2GNS.test.ts b/test/l2/l2GNS.test.ts index 11713f746..cf735eb72 100644 --- a/test/l2/l2GNS.test.ts +++ b/test/l2/l2GNS.test.ts @@ -35,7 +35,8 @@ interface L1SubgraphParams { l1SubgraphId: string curatedTokens: BigNumber lockBlockhash: string - metadata: string + subgraphMetadata: string + versionMetadata: string nSignal: BigNumber } @@ -229,7 +230,8 @@ describe('L2GNS', () => { l1SubgraphId: await buildSubgraphID(me.address, toBN('1'), 1), curatedTokens: toGRT('1337'), lockBlockhash: randomHexBytes(32), - metadata: randomHexBytes(), + subgraphMetadata: randomHexBytes(), + versionMetadata: randomHexBytes(), nSignal: toBN('4567'), } } @@ -237,18 +239,24 @@ describe('L2GNS', () => { l1SubgraphId: string, curatedTokens: BigNumber, lockBlockhash: string, - metadata: string, + subgraphMetadata: string, + versionMetadata: string, nSignal: BigNumber, ) { const callhookData = defaultAbiCoder.encode( - ['uint256', 'address', 'bytes32', 'uint256', 'uint32', 'bytes32'], - [l1SubgraphId, me.address, lockBlockhash, nSignal, DEFAULT_RESERVE_RATIO, metadata], + ['uint256', 'address', 'bytes32', 'uint256', 'uint32'], + [l1SubgraphId, me.address, lockBlockhash, nSignal, DEFAULT_RESERVE_RATIO], ) await gatewayFinalizeTransfer(mockL1GNS.address, gns.address, curatedTokens, callhookData) await gns .connect(me.signer) - .finishSubgraphMigrationFromL1(l1SubgraphId, newSubgraph0.subgraphDeploymentID, metadata) + .finishSubgraphMigrationFromL1( + l1SubgraphId, + newSubgraph0.subgraphDeploymentID, + subgraphMetadata, + versionMetadata, + ) } before(async function () { @@ -292,11 +300,11 @@ describe('L2GNS', () => { describe('receiving a subgraph from L1 (onTokenTransfer)', function () { it('cannot be called by someone other than the L2GraphTokenGateway', async function () { - const { l1SubgraphId, curatedTokens, lockBlockhash, metadata, nSignal } = + const { l1SubgraphId, curatedTokens, lockBlockhash, nSignal } = await defaultL1SubgraphParams() const callhookData = defaultAbiCoder.encode( - ['uint256', 'address', 'bytes32', 'uint256', 'uint32', 'bytes32'], - [l1SubgraphId, me.address, lockBlockhash, nSignal, DEFAULT_RESERVE_RATIO, metadata], + ['uint256', 'address', 'bytes32', 'uint256', 'uint32'], + [l1SubgraphId, me.address, lockBlockhash, nSignal, DEFAULT_RESERVE_RATIO], ) const tx = gns .connect(me.signer) @@ -304,11 +312,11 @@ describe('L2GNS', () => { await expect(tx).revertedWith('ONLY_GATEWAY') }) it('rejects calls if the L1 sender is not the L1GNS', async function () { - const { l1SubgraphId, curatedTokens, lockBlockhash, metadata, nSignal } = + const { l1SubgraphId, curatedTokens, lockBlockhash, nSignal } = await defaultL1SubgraphParams() const callhookData = defaultAbiCoder.encode( - ['uint256', 'address', 'bytes32', 'uint256', 'uint32', 'bytes32'], - [l1SubgraphId, me.address, lockBlockhash, nSignal, DEFAULT_RESERVE_RATIO, metadata], + ['uint256', 'address', 'bytes32', 'uint256', 'uint32'], + [l1SubgraphId, me.address, lockBlockhash, nSignal, DEFAULT_RESERVE_RATIO], ) const tx = gatewayFinalizeTransfer(me.address, gns.address, curatedTokens, callhookData) @@ -318,11 +326,10 @@ describe('L2GNS', () => { const l1SubgraphId = await buildSubgraphID(me.address, toBN('1'), 1) const curatedTokens = toGRT('1337') const lockBlockhash = randomHexBytes(32) - const metadata = randomHexBytes() const nSignal = toBN('4567') const callhookData = defaultAbiCoder.encode( - ['uint256', 'address', 'bytes32', 'uint256', 'uint32', 'bytes32'], - [l1SubgraphId, me.address, lockBlockhash, nSignal, DEFAULT_RESERVE_RATIO, metadata], + ['uint256', 'address', 'bytes32', 'uint256', 'uint32'], + [l1SubgraphId, me.address, lockBlockhash, nSignal, DEFAULT_RESERVE_RATIO], ) const tx = gatewayFinalizeTransfer( mockL1GNS.address, @@ -335,7 +342,6 @@ describe('L2GNS', () => { .emit(l2GraphTokenGateway, 'DepositFinalized') .withArgs(mockL1GRT.address, mockL1GNS.address, gns.address, curatedTokens) await expect(tx).emit(gns, 'SubgraphReceivedFromL1').withArgs(l1SubgraphId) - await expect(tx).emit(gns, 'SubgraphMetadataUpdated').withArgs(l1SubgraphId, metadata) const migrationData = await gns.subgraphL2MigrationData(l1SubgraphId) const subgraphData = await gns.subgraphs(l1SubgraphId) @@ -362,11 +368,10 @@ describe('L2GNS', () => { const l1SubgraphId = await buildSubgraphID(me.address, toBN('0'), 1) const curatedTokens = toGRT('1337') const lockBlockhash = randomHexBytes(32) - const metadata = randomHexBytes() const nSignal = toBN('4567') const callhookData = defaultAbiCoder.encode( - ['uint256', 'address', 'bytes32', 'uint256', 'uint32', 'bytes32'], - [l1SubgraphId, me.address, lockBlockhash, nSignal, DEFAULT_RESERVE_RATIO, metadata], + ['uint256', 'address', 'bytes32', 'uint256', 'uint32'], + [l1SubgraphId, me.address, lockBlockhash, nSignal, DEFAULT_RESERVE_RATIO], ) const tx = gatewayFinalizeTransfer( mockL1GNS.address, @@ -379,7 +384,6 @@ describe('L2GNS', () => { .emit(l2GraphTokenGateway, 'DepositFinalized') .withArgs(mockL1GRT.address, mockL1GNS.address, gns.address, curatedTokens) await expect(tx).emit(gns, 'SubgraphReceivedFromL1').withArgs(l1SubgraphId) - await expect(tx).emit(gns, 'SubgraphMetadataUpdated').withArgs(l1SubgraphId, metadata) const migrationData = await gns.subgraphL2MigrationData(l1SubgraphId) const subgraphData = await gns.subgraphs(l1SubgraphId) @@ -413,11 +417,17 @@ describe('L2GNS', () => { describe('finishing a subgraph migration from L1', function () { it('publishes the migrated subgraph and mints signal with no tax', async function () { - const { l1SubgraphId, curatedTokens, lockBlockhash, metadata, nSignal } = - await defaultL1SubgraphParams() + const { + l1SubgraphId, + curatedTokens, + lockBlockhash, + subgraphMetadata, + versionMetadata, + nSignal, + } = await defaultL1SubgraphParams() const callhookData = defaultAbiCoder.encode( - ['uint256', 'address', 'bytes32', 'uint256', 'uint32', 'bytes32'], - [l1SubgraphId, me.address, lockBlockhash, nSignal, DEFAULT_RESERVE_RATIO, metadata], + ['uint256', 'address', 'bytes32', 'uint256', 'uint32'], + [l1SubgraphId, me.address, lockBlockhash, nSignal, DEFAULT_RESERVE_RATIO], ) await gatewayFinalizeTransfer(mockL1GNS.address, gns.address, curatedTokens, callhookData) // Calculate expected signal before minting, which changes the price @@ -428,10 +438,23 @@ describe('L2GNS', () => { const tx = gns .connect(me.signer) - .finishSubgraphMigrationFromL1(l1SubgraphId, newSubgraph0.subgraphDeploymentID, metadata) + .finishSubgraphMigrationFromL1( + l1SubgraphId, + newSubgraph0.subgraphDeploymentID, + subgraphMetadata, + versionMetadata, + ) await expect(tx) .emit(gns, 'SubgraphPublished') .withArgs(l1SubgraphId, newSubgraph0.subgraphDeploymentID, DEFAULT_RESERVE_RATIO) + await expect(tx).emit(gns, 'SubgraphMetadataUpdated').withArgs(l1SubgraphId, subgraphMetadata) + await expect(tx) + .emit(gns, 'SubgraphUpgraded') + .withArgs(l1SubgraphId, expectedSignal, curatedTokens, newSubgraph0.subgraphDeploymentID) + await expect(tx) + .emit(gns, 'SubgraphVersionUpdated') + .withArgs(l1SubgraphId, newSubgraph0.subgraphDeploymentID, versionMetadata) + await expect(tx).emit(gns, 'SubgraphMigrationFinalized').withArgs(l1SubgraphId) const subgraphAfter = await gns.subgraphs(l1SubgraphId) const migrationDataAfter = await gns.subgraphL2MigrationData(l1SubgraphId) @@ -442,17 +465,28 @@ describe('L2GNS', () => { expect(subgraphAfter.subgraphDeploymentID).eq(newSubgraph0.subgraphDeploymentID) }) it('cannot be called by someone other than the subgraph owner', async function () { - const { l1SubgraphId, curatedTokens, lockBlockhash, metadata, nSignal } = - await defaultL1SubgraphParams() + const { + l1SubgraphId, + curatedTokens, + lockBlockhash, + subgraphMetadata, + versionMetadata, + nSignal, + } = await defaultL1SubgraphParams() const callhookData = defaultAbiCoder.encode( - ['uint256', 'address', 'bytes32', 'uint256', 'uint32', 'bytes32'], - [l1SubgraphId, me.address, lockBlockhash, nSignal, DEFAULT_RESERVE_RATIO, metadata], + ['uint256', 'address', 'bytes32', 'uint256', 'uint32'], + [l1SubgraphId, me.address, lockBlockhash, nSignal, DEFAULT_RESERVE_RATIO], ) await gatewayFinalizeTransfer(mockL1GNS.address, gns.address, curatedTokens, callhookData) const tx = gns .connect(other.signer) - .finishSubgraphMigrationFromL1(l1SubgraphId, newSubgraph0.subgraphDeploymentID, metadata) + .finishSubgraphMigrationFromL1( + l1SubgraphId, + newSubgraph0.subgraphDeploymentID, + subgraphMetadata, + versionMetadata, + ) await expect(tx).revertedWith('GNS: Must be authorized') }) it('rejects calls for a subgraph that does not exist', async function () { @@ -461,7 +495,12 @@ describe('L2GNS', () => { const tx = gns .connect(me.signer) - .finishSubgraphMigrationFromL1(l1SubgraphId, newSubgraph0.subgraphDeploymentID, metadata) + .finishSubgraphMigrationFromL1( + l1SubgraphId, + newSubgraph0.subgraphDeploymentID, + metadata, + metadata, + ) await expect(tx).revertedWith('ERC721: owner query for nonexistent token') }) it('rejects calls for a subgraph that was not migrated', async function () { @@ -470,15 +509,26 @@ describe('L2GNS', () => { const tx = gns .connect(me.signer) - .finishSubgraphMigrationFromL1(l2Subgraph.id, newSubgraph0.subgraphDeploymentID, metadata) + .finishSubgraphMigrationFromL1( + l2Subgraph.id, + newSubgraph0.subgraphDeploymentID, + metadata, + metadata, + ) await expect(tx).revertedWith('INVALID_SUBGRAPH') }) it('rejects calls to a pre-curated subgraph deployment', async function () { - const { l1SubgraphId, curatedTokens, lockBlockhash, metadata, nSignal } = - await defaultL1SubgraphParams() + const { + l1SubgraphId, + curatedTokens, + lockBlockhash, + subgraphMetadata, + versionMetadata, + nSignal, + } = await defaultL1SubgraphParams() const callhookData = defaultAbiCoder.encode( - ['uint256', 'address', 'bytes32', 'uint256', 'uint32', 'bytes32'], - [l1SubgraphId, me.address, lockBlockhash, nSignal, DEFAULT_RESERVE_RATIO, metadata], + ['uint256', 'address', 'bytes32', 'uint256', 'uint32'], + [l1SubgraphId, me.address, lockBlockhash, nSignal, DEFAULT_RESERVE_RATIO], ) await gatewayFinalizeTransfer(mockL1GNS.address, gns.address, curatedTokens, callhookData) @@ -488,7 +538,12 @@ describe('L2GNS', () => { .mint(newSubgraph0.subgraphDeploymentID, toGRT('100'), toBN('0')) const tx = gns .connect(me.signer) - .finishSubgraphMigrationFromL1(l1SubgraphId, newSubgraph0.subgraphDeploymentID, metadata) + .finishSubgraphMigrationFromL1( + l1SubgraphId, + newSubgraph0.subgraphDeploymentID, + subgraphMetadata, + versionMetadata, + ) await expect(tx).revertedWith('GNS: Deployment pre-curated') }) it('rejects calls if the subgraph deployment ID is zero', async function () { @@ -498,14 +553,14 @@ describe('L2GNS', () => { const metadata = randomHexBytes() const nSignal = toBN('4567') const callhookData = defaultAbiCoder.encode( - ['uint256', 'address', 'bytes32', 'uint256', 'uint32', 'bytes32'], - [l1SubgraphId, me.address, lockBlockhash, nSignal, DEFAULT_RESERVE_RATIO, metadata], + ['uint256', 'address', 'bytes32', 'uint256', 'uint32'], + [l1SubgraphId, me.address, lockBlockhash, nSignal, DEFAULT_RESERVE_RATIO], ) await gatewayFinalizeTransfer(mockL1GNS.address, gns.address, curatedTokens, callhookData) const tx = gns .connect(me.signer) - .finishSubgraphMigrationFromL1(l1SubgraphId, HashZero, metadata) + .finishSubgraphMigrationFromL1(l1SubgraphId, HashZero, metadata, metadata) await expect(tx).revertedWith('GNS: deploymentID != 0') }) }) @@ -513,13 +568,14 @@ describe('L2GNS', () => { describe('claiming a curator balance using a proof', function () { it('verifies a proof and assigns a curator balance', async function () { const l1Subgraph = mainnetSubgraphWithProof - + const versionMetadata = randomHexBytes() // Dummy value // Now we pretend the L1 subgraph was locked and migrated at the specified block await migrateMockSubgraphFromL1( l1Subgraph.subgraphId, l1Subgraph.curatedTokens, l1Subgraph.blockhash, l1Subgraph.metadata, + versionMetadata, l1Subgraph.nSignal, ) @@ -549,6 +605,7 @@ describe('L2GNS', () => { }) it('adds the balance to any existing balance for the curator', async function () { const l1Subgraph = mainnetSubgraphWithProof + const versionMetadata = randomHexBytes() // Now we pretend the L1 subgraph was locked and migrated at the specified block await migrateMockSubgraphFromL1( @@ -556,6 +613,7 @@ describe('L2GNS', () => { l1Subgraph.curatedTokens, l1Subgraph.blockhash, l1Subgraph.metadata, + versionMetadata, l1Subgraph.nSignal, ) @@ -594,13 +652,14 @@ describe('L2GNS', () => { }) it('rejects calls with an invalid proof (e.g. from a different L1GNS address)', async function () { const l1Subgraph = mainnetSubgraphWithProof - + const versionMetadata = randomHexBytes() // Now we pretend the L1 subgraph was locked and migrated at the specified block await migrateMockSubgraphFromL1( l1Subgraph.subgraphId, l1Subgraph.curatedTokens, l1Subgraph.blockhash, l1Subgraph.metadata, + versionMetadata, l1Subgraph.nSignal, ) @@ -620,13 +679,14 @@ describe('L2GNS', () => { }) it('rejects calls with an invalid proof (e.g. from a different curator)', async function () { const l1Subgraph = mainnetSubgraphWithProof - + const versionMetadata = randomHexBytes() // Now we pretend the L1 subgraph was locked and migrated at the specified block await migrateMockSubgraphFromL1( l1Subgraph.subgraphId, l1Subgraph.curatedTokens, l1Subgraph.blockhash, l1Subgraph.metadata, + versionMetadata, l1Subgraph.nSignal, ) @@ -664,13 +724,14 @@ describe('L2GNS', () => { }) it('rejects calls if the balance was already claimed', async function () { const l1Subgraph = mainnetSubgraphWithProof - + const versionMetadata = randomHexBytes() // Now we pretend the L1 subgraph was locked and migrated at the specified block await migrateMockSubgraphFromL1( l1Subgraph.subgraphId, l1Subgraph.curatedTokens, l1Subgraph.blockhash, l1Subgraph.metadata, + versionMetadata, l1Subgraph.nSignal, ) @@ -706,13 +767,14 @@ describe('L2GNS', () => { }) it('rejects calls with a proof from a different block', async function () { const l1Subgraph = mainnetSubgraphWithProof - + const versionMetadata = randomHexBytes() // Now we pretend the L1 subgraph was locked and migrated at the specified block await migrateMockSubgraphFromL1( l1Subgraph.subgraphId, l1Subgraph.curatedTokens, l1Subgraph.blockhash, l1Subgraph.metadata, + versionMetadata, l1Subgraph.nSignal, ) @@ -734,13 +796,14 @@ describe('L2GNS', () => { }) it('rejects calls with a proof from a legacy subgraph', async function () { const l1Subgraph = mainnetLegacySubgraphWithProof - + const versionMetadata = randomHexBytes() // Now we pretend the L1 subgraph was locked and migrated at the specified block await migrateMockSubgraphFromL1( l1Subgraph.subgraphId, l1Subgraph.curatedTokens, l1Subgraph.blockhash, l1Subgraph.metadata, + versionMetadata, l1Subgraph.nSignal, ) @@ -764,13 +827,14 @@ describe('L2GNS', () => { describe('claiming a curator balance for a legacy subgraph using a proof', function () { it('verifies a proof and assigns a curator balance', async function () { const l1Subgraph = mainnetLegacySubgraphWithProof - + const versionMetadata = randomHexBytes() // Now we pretend the L1 subgraph was locked and migrated at the specified block await migrateMockSubgraphFromL1( l1Subgraph.subgraphId, l1Subgraph.curatedTokens, l1Subgraph.blockhash, l1Subgraph.metadata, + versionMetadata, l1Subgraph.nSignal, ) @@ -805,13 +869,14 @@ describe('L2GNS', () => { }) it('adds the balance to any existing balance for the curator', async function () { const l1Subgraph = mainnetLegacySubgraphWithProof - + const versionMetadata = randomHexBytes() // Now we pretend the L1 subgraph was locked and migrated at the specified block await migrateMockSubgraphFromL1( l1Subgraph.subgraphId, l1Subgraph.curatedTokens, l1Subgraph.blockhash, l1Subgraph.metadata, + versionMetadata, l1Subgraph.nSignal, ) @@ -854,13 +919,14 @@ describe('L2GNS', () => { }) it('rejects calls with an invalid proof (e.g. from a different L1GNS address)', async function () { const l1Subgraph = mainnetLegacySubgraphWithProof - + const versionMetadata = randomHexBytes() // Now we pretend the L1 subgraph was locked and migrated at the specified block await migrateMockSubgraphFromL1( l1Subgraph.subgraphId, l1Subgraph.curatedTokens, l1Subgraph.blockhash, l1Subgraph.metadata, + versionMetadata, l1Subgraph.nSignal, ) @@ -885,13 +951,14 @@ describe('L2GNS', () => { }) it('rejects calls with an invalid proof (e.g. from a different curator)', async function () { const l1Subgraph = mainnetLegacySubgraphWithProof - + const versionMetadata = randomHexBytes() // Now we pretend the L1 subgraph was locked and migrated at the specified block await migrateMockSubgraphFromL1( l1Subgraph.subgraphId, l1Subgraph.curatedTokens, l1Subgraph.blockhash, l1Subgraph.metadata, + versionMetadata, l1Subgraph.nSignal, ) @@ -934,13 +1001,14 @@ describe('L2GNS', () => { }) it('rejects calls if the balance was already claimed', async function () { const l1Subgraph = mainnetLegacySubgraphWithProof - + const versionMetadata = randomHexBytes() // Now we pretend the L1 subgraph was locked and migrated at the specified block await migrateMockSubgraphFromL1( l1Subgraph.subgraphId, l1Subgraph.curatedTokens, l1Subgraph.blockhash, l1Subgraph.metadata, + versionMetadata, l1Subgraph.nSignal, ) @@ -986,13 +1054,14 @@ describe('L2GNS', () => { }) it('rejects calls with a proof from a non-legacy subgraph', async function () { const l1Subgraph = mainnetSubgraphWithProof - + const versionMetadata = randomHexBytes() // Now we pretend the L1 subgraph was locked and migrated at the specified block await migrateMockSubgraphFromL1( l1Subgraph.subgraphId, l1Subgraph.curatedTokens, l1Subgraph.blockhash, l1Subgraph.metadata, + versionMetadata, l1Subgraph.nSignal, ) @@ -1026,9 +1095,22 @@ describe('L2GNS', () => { // Eth for gas: await setAccountBalance(await mockL1GNSL2Alias.getAddress(), parseEther('1')) - const { l1SubgraphId, curatedTokens, lockBlockhash, metadata, nSignal } = - await defaultL1SubgraphParams() - await migrateMockSubgraphFromL1(l1SubgraphId, curatedTokens, lockBlockhash, metadata, nSignal) + const { + l1SubgraphId, + curatedTokens, + lockBlockhash, + subgraphMetadata, + versionMetadata, + nSignal, + } = await defaultL1SubgraphParams() + await migrateMockSubgraphFromL1( + l1SubgraphId, + curatedTokens, + lockBlockhash, + subgraphMetadata, + versionMetadata, + nSignal, + ) const tx = gns .connect(mockL1GNSL2Alias) @@ -1046,9 +1128,22 @@ describe('L2GNS', () => { // Eth for gas: await setAccountBalance(await mockL1GNSL2Alias.getAddress(), parseEther('1')) - const { l1SubgraphId, curatedTokens, lockBlockhash, metadata, nSignal } = - await defaultL1SubgraphParams() - await migrateMockSubgraphFromL1(l1SubgraphId, curatedTokens, lockBlockhash, metadata, nSignal) + const { + l1SubgraphId, + curatedTokens, + lockBlockhash, + subgraphMetadata, + versionMetadata, + nSignal, + } = await defaultL1SubgraphParams() + await migrateMockSubgraphFromL1( + l1SubgraphId, + curatedTokens, + lockBlockhash, + subgraphMetadata, + versionMetadata, + nSignal, + ) await grt.connect(governor.signer).mint(other.address, toGRT('10')) await grt.connect(other.signer).approve(gns.address, toGRT('10')) @@ -1067,9 +1162,22 @@ describe('L2GNS', () => { expect(l2CuratorBalance).eq(prevSignal.add(toGRT('10'))) }) it('can only be called from the counterpart GNS L2 alias', async function () { - const { l1SubgraphId, curatedTokens, lockBlockhash, metadata, nSignal } = - await defaultL1SubgraphParams() - await migrateMockSubgraphFromL1(l1SubgraphId, curatedTokens, lockBlockhash, metadata, nSignal) + const { + l1SubgraphId, + curatedTokens, + lockBlockhash, + subgraphMetadata, + versionMetadata, + nSignal, + } = await defaultL1SubgraphParams() + await migrateMockSubgraphFromL1( + l1SubgraphId, + curatedTokens, + lockBlockhash, + subgraphMetadata, + versionMetadata, + nSignal, + ) const tx = gns .connect(governor.signer) @@ -1115,9 +1223,22 @@ describe('L2GNS', () => { // Eth for gas: await setAccountBalance(await mockL1GNSL2Alias.getAddress(), parseEther('1')) - const { l1SubgraphId, curatedTokens, lockBlockhash, metadata, nSignal } = - await defaultL1SubgraphParams() - await migrateMockSubgraphFromL1(l1SubgraphId, curatedTokens, lockBlockhash, metadata, nSignal) + const { + l1SubgraphId, + curatedTokens, + lockBlockhash, + subgraphMetadata, + versionMetadata, + nSignal, + } = await defaultL1SubgraphParams() + await migrateMockSubgraphFromL1( + l1SubgraphId, + curatedTokens, + lockBlockhash, + subgraphMetadata, + versionMetadata, + nSignal, + ) const tx = gns .connect(mockL1GNSL2Alias) From 26be8ac8c78351568cef3df473be72266b0cd865 Mon Sep 17 00:00:00 2001 From: Pablo Carranza Velez Date: Tue, 8 Nov 2022 14:36:33 -0300 Subject: [PATCH 039/112] fix: cleaner storage, and allow disabling MPT proofs --- contracts/discovery/GNS.sol | 23 +++--- contracts/discovery/GNSStorage.sol | 66 ++++++++++------- contracts/discovery/L1GNS.sol | 3 +- contracts/discovery/L1GNSStorage.sol | 16 ++++ contracts/l2/discovery/L2GNS.sol | 33 ++++++++- contracts/l2/discovery/L2GNSStorage.sol | 16 ++++ test/l2/l2GNS.test.ts | 99 +++++++++++++++++++++++++ 7 files changed, 216 insertions(+), 40 deletions(-) create mode 100644 contracts/discovery/L1GNSStorage.sol create mode 100644 contracts/l2/discovery/L2GNSStorage.sol diff --git a/contracts/discovery/GNS.sol b/contracts/discovery/GNS.sol index 70d989ad5..9fd962331 100644 --- a/contracts/discovery/GNS.sol +++ b/contracts/discovery/GNS.sol @@ -3,16 +3,19 @@ pragma solidity ^0.7.6; pragma abicoder v2; -import "@openzeppelin/contracts/math/SafeMath.sol"; -import "@openzeppelin/contracts/utils/Address.sol"; - -import "../base/Multicall.sol"; -import "../bancor/BancorFormula.sol"; -import "../upgrades/GraphUpgradeable.sol"; -import "../utils/TokenUtils.sol"; - -import "./IGNS.sol"; -import "./GNSStorage.sol"; +import { SafeMath } from "@openzeppelin/contracts/math/SafeMath.sol"; +import { Address } from "@openzeppelin/contracts/utils/Address.sol"; + +import { Multicall } from "../base/Multicall.sol"; +import { BancorFormula } from "../bancor/BancorFormula.sol"; +import { GraphUpgradeable } from "../upgrades/GraphUpgradeable.sol"; +import { TokenUtils } from "../utils/TokenUtils.sol"; +import { ICuration } from "../curation/ICuration.sol"; +import { Managed } from "../governance/Managed.sol"; +import { ISubgraphNFT } from "./ISubgraphNFT.sol"; + +import { IGNS } from "./IGNS.sol"; +import { GNSV3Storage } from "./GNSStorage.sol"; /** * @title GNS diff --git a/contracts/discovery/GNSStorage.sol b/contracts/discovery/GNSStorage.sol index f79ecb477..be7a6f0c7 100644 --- a/contracts/discovery/GNSStorage.sol +++ b/contracts/discovery/GNSStorage.sol @@ -3,58 +3,74 @@ pragma solidity ^0.7.6; pragma abicoder v2; -import "../governance/Managed.sol"; +import { Managed } from "../governance/Managed.sol"; -import "./erc1056/IEthereumDIDRegistry.sol"; -import "./IGNS.sol"; -import "./ISubgraphNFT.sol"; +import { IEthereumDIDRegistry } from "./erc1056/IEthereumDIDRegistry.sol"; +import { IGNS } from "./IGNS.sol"; +import { ISubgraphNFT } from "./ISubgraphNFT.sol"; +/** + * @title GNSV1Storage + * @notice This contract holds all the storage variables for the GNS contract, version 1 + */ abstract contract GNSV1Storage is Managed { // -- State -- - // In parts per hundred + /// Percentage of curation tax that must be paid by the owner, in parts per hundred. uint32 public ownerTaxPercentage; - // Bonding curve formula + /// Bonding curve formula. address public bondingCurve; - // Stores what subgraph deployment a particular legacy subgraph targets - // A subgraph is defined by (graphAccountID, subgraphNumber) - // A subgraph can target one subgraph deployment (bytes32 hash) - // (graphAccountID, subgraphNumber) => subgraphDeploymentID + /// @dev Stores what subgraph deployment a particular legacy subgraph targets. + /// A subgraph is defined by (graphAccountID, subgraphNumber). + /// A subgraph can target one subgraph deployment (bytes32 hash). + /// (graphAccountID, subgraphNumber) => subgraphDeploymentID mapping(address => mapping(uint256 => bytes32)) internal legacySubgraphs; - // Every time an account creates a subgraph it increases a per-account sequence ID - // account => seqID + /// Every time an account creates a subgraph it increases a per-account sequence ID. + /// account => seqID mapping(address => uint256) public nextAccountSeqID; - // Stores all the signal deposited on a legacy subgraph - // (graphAccountID, subgraphNumber) => SubgraphData + /// Stores all the signal deposited on a legacy subgraph. + /// (graphAccountID, subgraphNumber) => SubgraphData mapping(address => mapping(uint256 => IGNS.SubgraphData)) public legacySubgraphData; - // [DEPRECATED] ERC-1056 contract reference - // This contract is used for managing identities + /// @dev [DEPRECATED] ERC-1056 contract reference. + /// This contract was used for managing identities. IEthereumDIDRegistry private __DEPRECATED_erc1056Registry; } +/** + * @title GNSV2Storage + * @notice This contract holds all the storage variables for the GNS contract, version 2 + */ abstract contract GNSV2Storage is GNSV1Storage { - // Use it whenever a legacy (v1) subgraph NFT was claimed to maintain compatibility - // Keep a reference from subgraphID => (graphAccount, subgraphNumber) + /// Stores the account and seqID for a legacy subgraph that has been migrated. + /// Use it whenever a legacy (v1) subgraph NFT was claimed to maintain compatibility. + /// Keep a reference from subgraphID => (graphAccount, subgraphNumber) mapping(uint256 => IGNS.LegacySubgraphKey) public legacySubgraphKeys; - // Store data for all NFT-based (v2) subgraphs - // subgraphID => SubgraphData + /// Store data for all NFT-based (v2) subgraphs. + /// subgraphID => SubgraphData mapping(uint256 => IGNS.SubgraphData) public subgraphs; - // Contract that represents subgraph ownership through an NFT + /// Contract that represents subgraph ownership through an NFT ISubgraphNFT public subgraphNFT; } +/** + * @title GNSV3Storage + * @notice This contract holds all the storage variables for the base GNS contract, version 3. + * @dev Note that this is the first version that includes a storage gap - if adding + * future versions, make sure to move the gap to the new version and + * reduce the size of the gap accordingly. + */ abstract contract GNSV3Storage is GNSV2Storage { - // Data for subgraph migration from L1 to L2, some fields will be empty or set differently on each layer + /// Data for subgraph migration from L1 to L2, some fields will be empty or set differently on each layer mapping(uint256 => IGNS.SubgraphL2MigrationData) public subgraphL2MigrationData; - // Address of the counterpart GNS contract (L1GNS/L2GNS) + /// Address of the counterpart GNS contract (L1GNS/L2GNS) address public counterpartGNSAddress; - // Address of the Arbitrum DelayedInbox - only used by L1GNS - address public arbitrumInboxAddress; + /// @dev Gap to allow adding variables in future upgrades (since L1GNS and L2GNS have their own storage as well) + uint256[50] private __gap; } diff --git a/contracts/discovery/L1GNS.sol b/contracts/discovery/L1GNS.sol index 122d36608..4bf8d76a3 100644 --- a/contracts/discovery/L1GNS.sol +++ b/contracts/discovery/L1GNS.sol @@ -12,6 +12,7 @@ import { ITokenGateway } from "../arbitrum/ITokenGateway.sol"; import { L1ArbitrumMessenger } from "../arbitrum/L1ArbitrumMessenger.sol"; import { IL2GNS } from "../l2/discovery/IL2GNS.sol"; import { IGraphToken } from "../token/IGraphToken.sol"; +import { L1GNSV1Storage } from "./L1GNSStorage.sol"; /** * @title GNS @@ -22,7 +23,7 @@ import { IGraphToken } from "../token/IGraphToken.sol"; * The contract implements a multicall behaviour to support batching multiple calls in a single * transaction. */ -contract L1GNS is GNS, L1ArbitrumMessenger { +contract L1GNS is GNS, L1GNSV1Storage, L1ArbitrumMessenger { using SafeMath for uint256; event SubgraphLockedForMigrationToL2(uint256 _subgraphID); diff --git a/contracts/discovery/L1GNSStorage.sol b/contracts/discovery/L1GNSStorage.sol new file mode 100644 index 000000000..e591e2c81 --- /dev/null +++ b/contracts/discovery/L1GNSStorage.sol @@ -0,0 +1,16 @@ +// SPDX-License-Identifier: GPL-2.0-or-later + +pragma solidity ^0.7.6; +pragma abicoder v2; + +/** + * @title L1GNSV1Storage + * @notice This contract holds all the L1-specific storage variables for the L1GNS contract, version 1 + * @dev + */ +abstract contract L1GNSV1Storage { + /// Address of the Arbitrum DelayedInbox + address public arbitrumInboxAddress; + /// @dev Storage gap to keep storage slots fixed in future versions + uint256[50] private __gap; +} diff --git a/contracts/l2/discovery/L2GNS.sol b/contracts/l2/discovery/L2GNS.sol index 7d913f509..9d5cecd68 100644 --- a/contracts/l2/discovery/L2GNS.sol +++ b/contracts/l2/discovery/L2GNS.sol @@ -11,12 +11,13 @@ import { GNS } from "../../discovery/GNS.sol"; import { IGNS } from "../../discovery/IGNS.sol"; import { ICuration } from "../../curation/ICuration.sol"; import { IL2GNS } from "./IL2GNS.sol"; +import { L2GNSV1Storage } from "./L2GNSStorage.sol"; import { RLPReader } from "../../libraries/RLPReader.sol"; import { StateProofVerifier as Verifier } from "../../libraries/StateProofVerifier.sol"; /** - * @title GNS + * @title L2GNS * @dev The Graph Name System contract provides a decentralized naming system for subgraphs * used in the scope of the Graph Network. It translates Subgraphs into Subgraph Versions. * Each version is associated with a Subgraph Deployment. The contract has no knowledge of @@ -24,11 +25,12 @@ import { StateProofVerifier as Verifier } from "../../libraries/StateProofVerifi * The contract implements a multicall behaviour to support batching multiple calls in a single * transaction. */ -contract L2GNS is GNS, IL2GNS { +contract L2GNS is GNS, L2GNSV1Storage, IL2GNS { using RLPReader for bytes; using RLPReader for RLPReader.RLPItem; using SafeMath for uint256; + /// Emitted when a subgraph is received from L1 through the bridge event SubgraphReceivedFromL1(uint256 _subgraphID); event SubgraphMigrationFinalized(uint256 _subgraphID); event CuratorBalanceClaimed( @@ -37,6 +39,8 @@ contract L2GNS is GNS, IL2GNS { address _l2Curator, uint256 _nSignalClaimed ); + event MPTClaimingEnabled(); + event MPTClaimingDisabled(); /** * @dev Checks that the sender is the L2GraphTokenGateway as configured on the Controller. @@ -46,6 +50,14 @@ contract L2GNS is GNS, IL2GNS { _; } + /** + * @dev Checks that claiming balances using Merkle Patricia proofs is enabled. + */ + modifier ifMPTClaimingEnabled() { + require(mptClaimingEnabled, "MPT_CLAIMING_DISABLED"); + _; + } + /** * @dev Checks that the sender is the L2 alias of the counterpart * GNS on L1. @@ -58,6 +70,19 @@ contract L2GNS is GNS, IL2GNS { _; } + /** + * @notice Enables or disables claiming L1 balances using Merkle Patricia proofs + * @param _enabled If true, claiming MPT proofs will be enabled; if false, they will be disabled + */ + function setMPTClaimingEnabled(bool _enabled) external onlyGovernor { + mptClaimingEnabled = _enabled; + if (_enabled) { + emit MPTClaimingEnabled(); + } else { + emit MPTClaimingDisabled(); + } + } + /** * @dev Receive tokens with a callhook from the bridge. * The callhook will receive a subgraph from L1 @@ -143,7 +168,7 @@ contract L2GNS is GNS, IL2GNS { uint256 _subgraphID, bytes memory _blockHeaderRlpBytes, bytes memory _proofRlpBytes - ) external override notPartialPaused { + ) external override notPartialPaused ifMPTClaimingEnabled { IGNS.SubgraphL2MigrationData storage migratedData = subgraphL2MigrationData[_subgraphID]; require(migratedData.l2Done, "!MIGRATED"); require(!migratedData.curatorBalanceClaimed[msg.sender], "ALREADY_CLAIMED"); @@ -196,7 +221,7 @@ contract L2GNS is GNS, IL2GNS { uint256 _seqID, bytes memory _blockHeaderRlpBytes, bytes memory _proofRlpBytes - ) external override notPartialPaused { + ) external override notPartialPaused ifMPTClaimingEnabled { uint256 _subgraphID = _buildLegacySubgraphID(_subgraphCreatorAccount, _seqID); Verifier.BlockHeader memory blockHeader = Verifier.parseBlockHeader(_blockHeaderRlpBytes); diff --git a/contracts/l2/discovery/L2GNSStorage.sol b/contracts/l2/discovery/L2GNSStorage.sol new file mode 100644 index 000000000..016f12adb --- /dev/null +++ b/contracts/l2/discovery/L2GNSStorage.sol @@ -0,0 +1,16 @@ +// SPDX-License-Identifier: GPL-2.0-or-later + +pragma solidity ^0.7.6; +pragma abicoder v2; + +/** + * @title L2GNSV1Storage + * @notice This contract holds all the L2-specific storage variables for the L2GNS contract, version 1 + * @dev + */ +abstract contract L2GNSV1Storage { + /// Specifies whether claiming L1 balances using Merkle Patricia proofs is enabled + bool public mptClaimingEnabled; + /// @dev Storage gap to keep storage slots fixed in future versions + uint256[50] private __gap; +} diff --git a/test/l2/l2GNS.test.ts b/test/l2/l2GNS.test.ts index cf735eb72..09cf6d157 100644 --- a/test/l2/l2GNS.test.ts +++ b/test/l2/l2GNS.test.ts @@ -298,6 +298,22 @@ describe('L2GNS', () => { await fixture.tearDown() }) + describe('enabling and disabling claiming through proofs', function () { + it('enables and disables mptClaimingEnabled and emits an event', async function () { + expect(await gns.mptClaimingEnabled()).eq(false) + const tx = gns.connect(governor.signer).setMPTClaimingEnabled(true) + await expect(tx).emit(gns, 'MPTClaimingEnabled') + expect(await gns.mptClaimingEnabled()).eq(true) + + const tx2 = gns.connect(governor.signer).setMPTClaimingEnabled(false) + await expect(tx2).emit(gns, 'MPTClaimingDisabled') + expect(await gns.mptClaimingEnabled()).eq(false) + }) + it('can only be called by the governor', async function () { + const tx = gns.connect(me.signer).setMPTClaimingEnabled(true) + await expect(tx).revertedWith('Only Controller governor') + }) + }) describe('receiving a subgraph from L1 (onTokenTransfer)', function () { it('cannot be called by someone other than the L2GraphTokenGateway', async function () { const { l1SubgraphId, curatedTokens, lockBlockhash, nSignal } = @@ -584,6 +600,8 @@ describe('L2GNS', () => { .connect(governor.signer) .setCounterpartGNSAddress(l1Subgraph.getProofResponse.address) + await gns.connect(governor.signer).setMPTClaimingEnabled(true) + const blockHeaderRLP = getBlockHeaderRLP(mainnetSubgraphBlockData) const proofRLP = encodeMPTStorageProofRLP(l1Subgraph.getProofResponse) @@ -622,6 +640,8 @@ describe('L2GNS', () => { .connect(governor.signer) .setCounterpartGNSAddress(l1Subgraph.getProofResponse.address) + await gns.connect(governor.signer).setMPTClaimingEnabled(true) + const blockHeaderRLP = getBlockHeaderRLP(mainnetSubgraphBlockData) const proofRLP = encodeMPTStorageProofRLP(l1Subgraph.getProofResponse) @@ -663,6 +683,7 @@ describe('L2GNS', () => { l1Subgraph.nSignal, ) + await gns.connect(governor.signer).setMPTClaimingEnabled(true) // We haven't updated the L1 counterpart address, so GNS will not accept the account proof as valid const blockHeaderRLP = getBlockHeaderRLP(mainnetSubgraphBlockData) @@ -695,6 +716,8 @@ describe('L2GNS', () => { .connect(governor.signer) .setCounterpartGNSAddress(l1Subgraph.getProofResponse.address) + await gns.connect(governor.signer).setMPTClaimingEnabled(true) + const blockHeaderRLP = getBlockHeaderRLP(mainnetSubgraphBlockData) const proofRLP = encodeMPTStorageProofRLP(l1Subgraph.getProofResponse) @@ -714,6 +737,7 @@ describe('L2GNS', () => { .connect(governor.signer) .setCounterpartGNSAddress(l1Subgraph.getProofResponse.address) + await gns.connect(governor.signer).setMPTClaimingEnabled(true) const blockHeaderRLP = getBlockHeaderRLP(mainnetSubgraphBlockData) const proofRLP = encodeMPTStorageProofRLP(l1Subgraph.getProofResponse) @@ -740,6 +764,7 @@ describe('L2GNS', () => { .connect(governor.signer) .setCounterpartGNSAddress(l1Subgraph.getProofResponse.address) + await gns.connect(governor.signer).setMPTClaimingEnabled(true) const blockHeaderRLP = getBlockHeaderRLP(mainnetSubgraphBlockData) const proofRLP = encodeMPTStorageProofRLP(l1Subgraph.getProofResponse) @@ -783,6 +808,7 @@ describe('L2GNS', () => { .connect(governor.signer) .setCounterpartGNSAddress(l1Subgraph.getProofResponse.address) + await gns.connect(governor.signer).setMPTClaimingEnabled(true) const blockHeaderRLP = getBlockHeaderRLP(mainnetSubgraphBlockData) const proofRLP = encodeMPTStorageProofRLP(mainnetProofForDifferentBlock) @@ -812,6 +838,8 @@ describe('L2GNS', () => { .connect(governor.signer) .setCounterpartGNSAddress(l1Subgraph.getProofResponse.address) + await gns.connect(governor.signer).setMPTClaimingEnabled(true) + const blockHeaderRLP = getBlockHeaderRLP(mainnetSubgraphBlockData) const proofRLP = encodeMPTStorageProofRLP(l1Subgraph.getProofResponse) @@ -823,6 +851,34 @@ describe('L2GNS', () => { await expect(tx).revertedWith('MPT: invalid node hash') }) + it('rejects calls if MPT claiming is not enabled', async function () { + const l1Subgraph = mainnetSubgraphWithProof + const versionMetadata = randomHexBytes() + // Now we pretend the L1 subgraph was locked and migrated at the specified block + await migrateMockSubgraphFromL1( + l1Subgraph.subgraphId, + l1Subgraph.curatedTokens, + l1Subgraph.blockhash, + l1Subgraph.metadata, + versionMetadata, + l1Subgraph.nSignal, + ) + + // We need L2GNS to think the mainnet GNS is its counterpart for the proof to be valid + await gns + .connect(governor.signer) + .setCounterpartGNSAddress(l1Subgraph.getProofResponse.address) + + const blockHeaderRLP = getBlockHeaderRLP(mainnetSubgraphBlockData) + const proofRLP = encodeMPTStorageProofRLP(l1Subgraph.getProofResponse) + + const curatorSigner = await impersonateAccount(l1Subgraph.curator) + await setAccountBalance(l1Subgraph.curator, parseEther('1000')) + const tx = gns + .connect(curatorSigner) + .claimL1CuratorBalance(l1Subgraph.subgraphId, blockHeaderRLP, proofRLP) + await expect(tx).revertedWith('MPT_CLAIMING_DISABLED') + }) }) describe('claiming a curator balance for a legacy subgraph using a proof', function () { it('verifies a proof and assigns a curator balance', async function () { @@ -843,6 +899,7 @@ describe('L2GNS', () => { .connect(governor.signer) .setCounterpartGNSAddress(l1Subgraph.getProofResponse.address) + await gns.connect(governor.signer).setMPTClaimingEnabled(true) const blockHeaderRLP = getBlockHeaderRLP(mainnetSubgraphBlockData) const proofRLP = encodeMPTStorageProofRLP(l1Subgraph.getProofResponse) @@ -885,6 +942,7 @@ describe('L2GNS', () => { .connect(governor.signer) .setCounterpartGNSAddress(l1Subgraph.getProofResponse.address) + await gns.connect(governor.signer).setMPTClaimingEnabled(true) const blockHeaderRLP = getBlockHeaderRLP(mainnetSubgraphBlockData) const proofRLP = encodeMPTStorageProofRLP(l1Subgraph.getProofResponse) @@ -930,6 +988,7 @@ describe('L2GNS', () => { l1Subgraph.nSignal, ) + await gns.connect(governor.signer).setMPTClaimingEnabled(true) // We haven't updated the L1 counterpart address, so GNS will not accept the account proof as valid const blockHeaderRLP = getBlockHeaderRLP(mainnetSubgraphBlockData) @@ -967,6 +1026,7 @@ describe('L2GNS', () => { .connect(governor.signer) .setCounterpartGNSAddress(l1Subgraph.getProofResponse.address) + await gns.connect(governor.signer).setMPTClaimingEnabled(true) const blockHeaderRLP = getBlockHeaderRLP(mainnetSubgraphBlockData) const proofRLP = encodeMPTStorageProofRLP(l1Subgraph.getProofResponse) @@ -991,6 +1051,8 @@ describe('L2GNS', () => { .connect(governor.signer) .setCounterpartGNSAddress(l1Subgraph.getProofResponse.address) + await gns.connect(governor.signer).setMPTClaimingEnabled(true) + const blockHeaderRLP = getBlockHeaderRLP(mainnetSubgraphBlockData) const proofRLP = encodeMPTStorageProofRLP(l1Subgraph.getProofResponse) @@ -1017,6 +1079,8 @@ describe('L2GNS', () => { .connect(governor.signer) .setCounterpartGNSAddress(l1Subgraph.getProofResponse.address) + await gns.connect(governor.signer).setMPTClaimingEnabled(true) + const blockHeaderRLP = getBlockHeaderRLP(mainnetSubgraphBlockData) const proofRLP = encodeMPTStorageProofRLP(l1Subgraph.getProofResponse) @@ -1070,6 +1134,8 @@ describe('L2GNS', () => { .connect(governor.signer) .setCounterpartGNSAddress(l1Subgraph.getProofResponse.address) + await gns.connect(governor.signer).setMPTClaimingEnabled(true) + const blockHeaderRLP = getBlockHeaderRLP(mainnetSubgraphBlockData) const proofRLP = encodeMPTStorageProofRLP(l1Subgraph.getProofResponse) @@ -1088,6 +1154,39 @@ describe('L2GNS', () => { await expect(tx).revertedWith('MPT: invalid node hash') }) + it('rejects calls if MPT claiming is not enabled', async function () { + const l1Subgraph = mainnetLegacySubgraphWithProof + const versionMetadata = randomHexBytes() + // Now we pretend the L1 subgraph was locked and migrated at the specified block + await migrateMockSubgraphFromL1( + l1Subgraph.subgraphId, + l1Subgraph.curatedTokens, + l1Subgraph.blockhash, + l1Subgraph.metadata, + versionMetadata, + l1Subgraph.nSignal, + ) + + // We need L2GNS to think the mainnet GNS is its counterpart for the proof to be valid + await gns + .connect(governor.signer) + .setCounterpartGNSAddress(l1Subgraph.getProofResponse.address) + + const blockHeaderRLP = getBlockHeaderRLP(mainnetSubgraphBlockData) + const proofRLP = encodeMPTStorageProofRLP(l1Subgraph.getProofResponse) + + const curatorSigner = await impersonateAccount(l1Subgraph.curator) + await setAccountBalance(l1Subgraph.curator, parseEther('1000')) + const tx = gns + .connect(curatorSigner) + .claimL1CuratorBalanceForLegacySubgraph( + l1Subgraph.account, + l1Subgraph.accountSeqId, + blockHeaderRLP, + proofRLP, + ) + await expect(tx).revertedWith('MPT_CLAIMING_DISABLED') + }) }) describe('claiming a curator balance with a message from L1', function () { it('assigns a curator balance to a beneficiary', async function () { From 602a9ff3e4837fccb27cdef4cbee5697061986dd Mon Sep 17 00:00:00 2001 From: Pablo Carranza Velez Date: Tue, 8 Nov 2022 15:08:39 -0300 Subject: [PATCH 040/112] test: a few more tests for MPT --- test/l2/l2GNS.test.ts | 5 ++--- test/mpt.test.ts | 49 ++++++++++++++++++++++++++++++++++++++++++- 2 files changed, 50 insertions(+), 4 deletions(-) diff --git a/test/l2/l2GNS.test.ts b/test/l2/l2GNS.test.ts index 09cf6d157..641fbf14c 100644 --- a/test/l2/l2GNS.test.ts +++ b/test/l2/l2GNS.test.ts @@ -1,6 +1,6 @@ import { expect } from 'chai' -import { ethers, ContractTransaction, BigNumber, Event } from 'ethers' -import { arrayify, defaultAbiCoder, hexlify, parseEther, parseUnits } from 'ethers/lib/utils' +import { ethers, ContractTransaction, BigNumber } from 'ethers' +import { defaultAbiCoder, parseEther } from 'ethers/lib/utils' import { getAccounts, @@ -16,7 +16,6 @@ import { L2FixtureContracts, NetworkFixture } from '../lib/fixtures' import { toBN } from '../lib/testHelpers' import { L2GNS } from '../../build/types/L2GNS' -import { L2GraphToken } from '../../build/types/L2GraphToken' import { L2GraphTokenGateway } from '../../build/types/L2GraphTokenGateway' import { buildSubgraph, diff --git a/test/mpt.test.ts b/test/mpt.test.ts index 218e03c31..664911304 100644 --- a/test/mpt.test.ts +++ b/test/mpt.test.ts @@ -5,7 +5,7 @@ import { Trie } from '@ethereumjs/trie' import { MerklePatriciaProofVerifierMock } from '../build/types/MerklePatriciaProofVerifierMock' import { deployContract } from './lib/deployment' -import { Account, getAccounts } from './lib/testHelpers' +import { Account, getAccounts, randomHexBytes } from './lib/testHelpers' const bufferToHex = (buf: Buffer): string => { return '0x' + buf.toString('hex') @@ -28,6 +28,31 @@ describe('MerklePatriciaProofVerifier', () => { )) as unknown as MerklePatriciaProofVerifierMock }) + it('verifies a valid proof of exclusion for the empty tree', async function () { + const trie = new Trie() + const key = Buffer.from('whatever') + const proof = await trie.createProof(key) + + const encodedProof = encodeProofRLP(proof) + + const val = await mpt.extractProofValue( + bufferToHex(trie.root()), + bufferToHex(key), + encodedProof, + ) + expect(val).to.equal('0x') + }) + + it('rejects an invalid root for the empty tree', async function () { + const trie = new Trie() + const key = Buffer.from('whatever') + const proof = await trie.createProof(key) + + const encodedProof = encodeProofRLP(proof) + + const call = mpt.extractProofValue(randomHexBytes(), bufferToHex(key), encodedProof) + await expect(call).revertedWith('MPT: invalid empty tree root') + }) it('verifies a valid proof of inclusion', async function () { const trie = new Trie() const key = Buffer.from('foo') @@ -70,4 +95,26 @@ describe('MerklePatriciaProofVerifier', () => { ) expect(val).to.equal('0x') }) + it('rejects a proof with an invalid value', async function () { + const trie = new Trie() + const key = Buffer.from('foo') + const value = Buffer.from('bar') + await trie.put(key, value) + + // We add a few more random values + await trie.put(Buffer.from('food'), Buffer.from('baz')) + await trie.put(Buffer.from('fob'), Buffer.from('bat')) + await trie.put(Buffer.from('zort'), Buffer.from('narf')) + + const proof = await trie.createProof(key) + + const decodedProof = proof.map((v) => RLP.decode(bufferToHex(v))) + decodedProof[3][16] = bufferToHex(Buffer.from('wrong')) + const reEncodedProof = decodedProof.map((v) => Buffer.from(RLP.encode(v).slice(2), 'hex')) + + const encodedProof = encodeProofRLP(reEncodedProof) + + const call = mpt.extractProofValue(bufferToHex(trie.root()), bufferToHex(key), encodedProof) + await expect(call).revertedWith('MPT: invalid node hash') + }) }) From 06534f03f22a70b02cf256a06c4dab80f7cdbcc0 Mon Sep 17 00:00:00 2001 From: Pablo Carranza Velez Date: Tue, 8 Nov 2022 19:30:48 -0300 Subject: [PATCH 041/112] test: improve coverage for MPT verifier --- .../libraries/MerklePatriciaProofVerifier.sol | 30 ++-- test/mpt.test.ts | 144 +++++++++++++++++- 2 files changed, 160 insertions(+), 14 deletions(-) diff --git a/contracts/libraries/MerklePatriciaProofVerifier.sol b/contracts/libraries/MerklePatriciaProofVerifier.sol index 5b734454d..18c8c5f2d 100644 --- a/contracts/libraries/MerklePatriciaProofVerifier.sol +++ b/contracts/libraries/MerklePatriciaProofVerifier.sol @@ -10,6 +10,8 @@ * - Silenced linter warnings about inline assembly * - Renamed a variable for mixedCase consistency * - Added clearer revert messages + * - Use assert when checking for a condition that should be impossible (nibble >= 16) + * - Other minor QA changes */ /** @@ -42,7 +44,7 @@ library MerklePatriciaProofVerifier { RLPReader.RLPItem[] memory stack ) internal pure returns (bytes memory value) { bytes memory mptKey = _decodeNibbles(path, 0); - uint256 mptKeyOffset = 0; + uint256 mptKeyOffset; bytes32 nodeHashHash; RLPReader.RLPItem[] memory node; @@ -59,7 +61,7 @@ library MerklePatriciaProofVerifier { } // Traverse stack of nodes starting at root. - for (uint256 i = 0; i < stack.length; i++) { + for (uint256 i; i < stack.length; ++i) { // We use the fact that an rlp encoded list consists of some // encoding of its length plus the concatenation of its // *rlp-encoded* items. @@ -146,16 +148,18 @@ library MerklePatriciaProofVerifier { // we haven't consumed the entire path, so we need to look at a child uint8 nibble = uint8(mptKey[mptKeyOffset]); mptKeyOffset += 1; - if (nibble >= 16) { - // each element of the path has to be a nibble - revert("MPT: element not nibble"); - } - if (_isEmptyBytesequence(node[nibble])) { + // mptKey comes from _decodeNibbles which should never + // return a nibble >= 16, which is why we should never + // ever have a nibble >= 16 here. (This is a sanity check + // which is why we use assert and not require.) + assert(nibble < 16); + + if (_isEmptyByteSequence(node[nibble])) { // Sanity if (i != stack.length - 1) { // leaf node should be at last level - revert("MPT: leaf not last"); + revert("MPT: empty leaf not last"); } return new bytes(0); @@ -196,7 +200,7 @@ library MerklePatriciaProofVerifier { } } - function _isEmptyBytesequence(RLPReader.RLPItem memory item) private pure returns (bool) { + function _isEmptyByteSequence(RLPReader.RLPItem memory item) private pure returns (bool) { if (item.len != 1) { return false; } @@ -241,20 +245,20 @@ library MerklePatriciaProofVerifier { pure returns (bytes memory nibbles) { - require(compact.length > 0, "MPT: _dN invalid compact length"); + require(compact.length != 0, "MPT: _dN invalid compact length"); uint256 length = compact.length * 2; require(skipNibbles <= length, "MPT: _dN invalid skipNibbles"); length -= skipNibbles; nibbles = new bytes(length); - uint256 nibblesLength = 0; + uint256 nibblesLength; for (uint256 i = skipNibbles; i < skipNibbles + length; i += 1) { if (i % 2 == 0) { nibbles[nibblesLength] = bytes1((uint8(compact[i / 2]) >> 4) & 0xF); } else { - nibbles[nibblesLength] = bytes1((uint8(compact[i / 2]) >> 0) & 0xF); + nibbles[nibblesLength] = bytes1((uint8(compact[i / 2])) & 0xF); } nibblesLength += 1; } @@ -268,7 +272,7 @@ library MerklePatriciaProofVerifier { bytes memory ys ) private pure returns (uint256) { uint256 i; - for (i = 0; i + xsOffset < xs.length && i < ys.length; i++) { + for (; i + xsOffset < xs.length && i < ys.length; ++i) { if (xs[i + xsOffset] != ys[i]) { return i; } diff --git a/test/mpt.test.ts b/test/mpt.test.ts index 664911304..a930897cc 100644 --- a/test/mpt.test.ts +++ b/test/mpt.test.ts @@ -75,7 +75,7 @@ describe('MerklePatriciaProofVerifier', () => { ) expect(val).to.equal(bufferToHex(value)) }) - it('verifies a valid proof of exclusion', async function () { + it('verifies a valid proof of exclusion based on a divergent node', async function () { const trie = new Trie() const key = Buffer.from('foo') @@ -86,6 +86,7 @@ describe('MerklePatriciaProofVerifier', () => { const proof = await trie.createProof(key) + // The path for "food" should form a divergent path for "foo" const encodedProof = encodeProofRLP(proof) const val = await mpt.extractProofValue( @@ -95,6 +96,47 @@ describe('MerklePatriciaProofVerifier', () => { ) expect(val).to.equal('0x') }) + it('verifies a valid proof of exclusion based on a leaf node', async function () { + const trie = new Trie() + const key = Buffer.from('food') + + // We add a few more random values + await trie.put(Buffer.from('foo'), Buffer.from('baz')) + + const proof = await trie.createProof(key) + + // The path for "foo" should be a leaf node, which proofs "food" is excluded + const encodedProof = encodeProofRLP(proof) + + const val = await mpt.extractProofValue( + bufferToHex(trie.root()), + bufferToHex(key), + encodedProof, + ) + expect(val).to.equal('0x') + }) + it('verifies a valid proof of exclusion based on an empty leaf on a branch node', async function () { + const trie = new Trie() + const key = Buffer.from('zork') + + await trie.put(Buffer.from('zor'), Buffer.from('baz')) + + // The fact that we have two keys that only differ in the + // last nibble gives us a proof that ends with a branch node + // with an empty value for the last nibble. + await trie.put(Buffer.from('zorl'), Buffer.from('bart')) + await trie.put(Buffer.from('zorm'), Buffer.from('bort')) + + const proof = await trie.createProof(key) + const encodedProof = encodeProofRLP(proof) + + const val = await mpt.extractProofValue( + bufferToHex(trie.root()), + bufferToHex(key), + encodedProof, + ) + expect(val).eq('0x') + }) it('rejects a proof with an invalid value', async function () { const trie = new Trie() const key = Buffer.from('foo') @@ -117,4 +159,104 @@ describe('MerklePatriciaProofVerifier', () => { const call = mpt.extractProofValue(bufferToHex(trie.root()), bufferToHex(key), encodedProof) await expect(call).revertedWith('MPT: invalid node hash') }) + it('rejects a proof of exclusion where the divergent node is not last', async function () { + const trie = new Trie() + const key = Buffer.from('foo') + + // We add a few more random values + await trie.put(Buffer.from('food'), Buffer.from('baz')) + await trie.put(Buffer.from('fob'), Buffer.from('bat')) + await trie.put(Buffer.from('zort'), Buffer.from('narf')) + + const proof = await trie.createProof(key) + + const decodedProof = proof.map((v) => RLP.decode(bufferToHex(v))) + // We add a random node to the end of the proof + decodedProof.push(bufferToHex(Buffer.from('wrong'))) + const reEncodedProof = decodedProof.map((v) => Buffer.from(RLP.encode(v).slice(2), 'hex')) + const encodedProof = encodeProofRLP(reEncodedProof) + + const call = mpt.extractProofValue(bufferToHex(trie.root()), bufferToHex(key), encodedProof) + await expect(call).revertedWith('MPT: divergent node not last') + }) + it('rejects a proof of inclusion with garbage at the end', async function () { + const trie = new Trie() + const key = Buffer.from('foo') + const value = Buffer.from('bar') + await trie.put(key, value) + + // We add a few more random values + await trie.put(Buffer.from('food'), Buffer.from('baz')) + await trie.put(Buffer.from('fob'), Buffer.from('bat')) + await trie.put(Buffer.from('zort'), Buffer.from('narf')) + + const proof = await trie.createProof(key) + const decodedProof = proof.map((v) => RLP.decode(bufferToHex(v))) + // We add a random node to the end of the proof + decodedProof.push(bufferToHex(Buffer.from('wrong'))) + const reEncodedProof = decodedProof.map((v) => Buffer.from(RLP.encode(v).slice(2), 'hex')) + const encodedProof = encodeProofRLP(reEncodedProof) + + const call = mpt.extractProofValue(bufferToHex(trie.root()), bufferToHex(key), encodedProof) + await expect(call).revertedWith('MPT: end not last') + }) + it('rejects a proof of inclusion with garbage after a leaf node', async function () { + const trie = new Trie() + const key = Buffer.from('foo') + const value = Buffer.from('bar') + await trie.put(key, value) + + const proof = await trie.createProof(key) + const decodedProof = proof.map((v) => RLP.decode(bufferToHex(v))) + // We add a random node to the end of the proof + decodedProof.push(bufferToHex(Buffer.from('wrong'))) + const reEncodedProof = decodedProof.map((v) => Buffer.from(RLP.encode(v).slice(2), 'hex')) + const encodedProof = encodeProofRLP(reEncodedProof) + + const call = mpt.extractProofValue(bufferToHex(trie.root()), bufferToHex(key), encodedProof) + await expect(call).revertedWith('MPT: leaf node not last') + }) + it('rejects a truncated proof of inclusion', async function () { + const trie = new Trie() + const key = Buffer.from('foo') + const value = Buffer.from('bar') + await trie.put(key, value) + + // We add a few more random values + await trie.put(Buffer.from('food'), Buffer.from('baz')) + await trie.put(Buffer.from('fob'), Buffer.from('bat')) + await trie.put(Buffer.from('zort'), Buffer.from('narf')) + + const proof = await trie.createProof(key) + const decodedProof = proof.map((v) => RLP.decode(bufferToHex(v))) + // We remove some nodes from the end, leaving a non-leaf node last + const truncatedProof = [decodedProof[0], decodedProof[1]] + const reEncodedProof = truncatedProof.map((v) => Buffer.from(RLP.encode(v).slice(2), 'hex')) + const encodedProof = encodeProofRLP(reEncodedProof) + + const call = mpt.extractProofValue(bufferToHex(trie.root()), bufferToHex(key), encodedProof) + await expect(call).revertedWith('MPT: non-leaf node last') + }) + it('rejects a proof of exclusion with a non-last empty byte sequence', async function () { + const trie = new Trie() + const key = Buffer.from('zork') + + await trie.put(Buffer.from('zor'), Buffer.from('baz')) + + // The fact that we have two keys that only differ in the + // last nibble gives us a proof that ends with a branch node + // with an empty value for the last nibble. + await trie.put(Buffer.from('zorl'), Buffer.from('bart')) + await trie.put(Buffer.from('zorm'), Buffer.from('bort')) + + const proof = await trie.createProof(key) + const decodedProof = proof.map((v) => RLP.decode(bufferToHex(v))) + // We add a random node to the end of the proof + decodedProof.push(bufferToHex(Buffer.from('wrong'))) + const reEncodedProof = decodedProof.map((v) => Buffer.from(RLP.encode(v).slice(2), 'hex')) + const encodedProof = encodeProofRLP(reEncodedProof) + + const call = mpt.extractProofValue(bufferToHex(trie.root()), bufferToHex(key), encodedProof) + await expect(call).revertedWith('MPT: empty leaf not last') + }) }) From 5f2fdea0b5f13668395d97ea738245e2700fbbec Mon Sep 17 00:00:00 2001 From: Pablo Carranza Velez Date: Wed, 9 Nov 2022 02:31:34 -0300 Subject: [PATCH 042/112] test: add tests for MPT using hashed keys like Ethereum --- test/lib/mptProofUtils.ts | 4 ++++ test/mpt.test.ts | 34 +++++++++++++++++++++++++++++++++- 2 files changed, 37 insertions(+), 1 deletion(-) diff --git a/test/lib/mptProofUtils.ts b/test/lib/mptProofUtils.ts index 0de2a374c..db1fd3b9f 100644 --- a/test/lib/mptProofUtils.ts +++ b/test/lib/mptProofUtils.ts @@ -74,6 +74,10 @@ export const encodeMPTStorageProofRLP = (proof: GetProofResponse): string => { throw new Error('Expected exactly one storage slot proof') } const accountProof = proof.accountProof.map((node) => RLP.decode(hexlify(node))) + console.log('Account proof:') + console.log(accountProof) const storageProof = proof.storageProof[0].proof.map((node) => RLP.decode(hexlify(node))) + console.log('Storage proof:') + console.log(storageProof) return RLP.encode([accountProof, storageProof]) } diff --git a/test/mpt.test.ts b/test/mpt.test.ts index a930897cc..4773d846c 100644 --- a/test/mpt.test.ts +++ b/test/mpt.test.ts @@ -1,6 +1,6 @@ import { expect } from 'chai' import { ethers, ContractTransaction, BigNumber, Event } from 'ethers' -import { RLP } from 'ethers/lib/utils' +import { keccak256, RLP } from 'ethers/lib/utils' import { Trie } from '@ethereumjs/trie' import { MerklePatriciaProofVerifierMock } from '../build/types/MerklePatriciaProofVerifierMock' @@ -259,4 +259,36 @@ describe('MerklePatriciaProofVerifier', () => { const call = mpt.extractProofValue(bufferToHex(trie.root()), bufferToHex(key), encodedProof) await expect(call).revertedWith('MPT: empty leaf not last') }) + it('verifies an inclusion proof for a trie that uses hashed keys', async function () { + const trie = new Trie({ useKeyHashing: true }) + const key = Buffer.from('something') + const value = Buffer.from('a value') + await trie.put(key, value) + + // We add a few more random values + await trie.put(Buffer.from('something else'), Buffer.from('baz')) + await trie.put(Buffer.from('more stuff'), Buffer.from('bat')) + await trie.put(Buffer.from('zort'), Buffer.from('narf')) + + const proof = await trie.createProof(key) + + const encodedProof = encodeProofRLP(proof) + const val = await mpt.extractProofValue(bufferToHex(trie.root()), keccak256(key), encodedProof) + await expect(val).eq(bufferToHex(value)) + }) + it('verifies an exclusion proof for a trie that uses hashed keys', async function () { + const trie = new Trie({ useKeyHashing: true }) + const key = Buffer.from('something') + + // We add a few more random values + await trie.put(Buffer.from('something else'), Buffer.from('baz')) + await trie.put(Buffer.from('more stuff'), Buffer.from('bat')) + await trie.put(Buffer.from('zort'), Buffer.from('narf')) + + const proof = await trie.createProof(key) + + const encodedProof = encodeProofRLP(proof) + const val = await mpt.extractProofValue(bufferToHex(trie.root()), keccak256(key), encodedProof) + await expect(val).eq('0x') + }) }) From 18bc53b32e296e0ca5a173481cd6755f72c7ddc2 Mon Sep 17 00:00:00 2001 From: Pablo Carranza Velez Date: Thu, 10 Nov 2022 12:09:56 -0300 Subject: [PATCH 043/112] feat: hardcode reserve ratio to 1 in L2, and allow pre-curated deployments --- cli/commands/migrate.ts | 2 +- cli/contracts.ts | 7 +- config/graph.arbitrum-goerli.yml | 5 +- config/graph.arbitrum-localhost.yml | 5 +- config/graph.arbitrum-one.yml | 5 +- contracts/curation/Curation.sol | 73 --- contracts/curation/CurationStorage.sol | 29 +- contracts/curation/ICuration.sol | 12 - contracts/discovery/GNS.sol | 15 +- contracts/l2/curation/IL2Curation.sol | 17 + contracts/l2/curation/L2Curation.sol | 537 +++++++++++++++++ contracts/l2/discovery/L2GNS.sol | 96 ++- test/curation/curation.test.ts | 90 --- test/l2/l2Curation.test.ts | 790 +++++++++++++++++++++++++ test/l2/l2GNS.test.ts | 40 +- test/lib/deployment.ts | 25 + test/lib/fixtures.ts | 10 +- test/lib/mptProofUtils.ts | 4 - 18 files changed, 1542 insertions(+), 220 deletions(-) create mode 100644 contracts/l2/curation/IL2Curation.sol create mode 100644 contracts/l2/curation/L2Curation.sol create mode 100644 test/l2/l2Curation.test.ts diff --git a/cli/commands/migrate.ts b/cli/commands/migrate.ts index 3756f2b69..18c66615c 100644 --- a/cli/commands/migrate.ts +++ b/cli/commands/migrate.ts @@ -46,7 +46,7 @@ const l2Contracts = [ 'L2GraphToken', 'GraphCurationToken', 'ServiceRegistry', - 'Curation', + 'L2Curation', 'SubgraphNFTDescriptor', 'SubgraphNFT', 'L2GNS', diff --git a/cli/contracts.ts b/cli/contracts.ts index 883473252..3a20bc727 100644 --- a/cli/contracts.ts +++ b/cli/contracts.ts @@ -39,13 +39,15 @@ import { L1GraphTokenGateway } from '../build/types/L1GraphTokenGateway' import { L2GraphToken } from '../build/types/L2GraphToken' import { L2GraphTokenGateway } from '../build/types/L2GraphTokenGateway' import { BridgeEscrow } from '../build/types/BridgeEscrow' +import { L2Curation } from '../build/types/L2Curation' export interface NetworkContracts { EpochManager: EpochManager DisputeManager: DisputeManager Staking: Staking ServiceRegistry: ServiceRegistry - Curation: Curation + Curation: Curation | L2Curation + L2Curation: L2Curation RewardsManager: RewardsManager GNS: GNS | L1GNS | L2GNS GraphProxyAdmin: GraphProxyAdmin @@ -108,6 +110,9 @@ export const loadContracts = ( if (signerOrProvider && chainIdIsL2(chainId) && contractName == 'L2GNS') { contracts['GNS'] = contracts[contractName] } + if (signerOrProvider && chainIdIsL2(chainId) && contractName == 'L2Curation') { + contracts['Curation'] = contracts[contractName] + } if (signerOrProvider && !chainIdIsL2(chainId) && contractName == 'L1GNS') { contracts['GNS'] = contracts[contractName] } diff --git a/config/graph.arbitrum-goerli.yml b/config/graph.arbitrum-goerli.yml index ff58ff316..7d313206d 100644 --- a/config/graph.arbitrum-goerli.yml +++ b/config/graph.arbitrum-goerli.yml @@ -11,7 +11,7 @@ contracts: calls: - fn: "setContractProxy" id: "0xe6876326c1291dfcbbd3864a6816d698cd591defc7aa2153d7f9c4c04016c89f" # keccak256('Curation') - contractAddress: "${{Curation.address}}" + contractAddress: "${{L2Curation.address}}" - fn: "setContractProxy" id: "0x39605a6c26a173774ca666c67ef70cf491880e5d3d6d0ca66ec0a31034f15ea3" # keccak256('GNS') contractAddress: "${{L2GNS.address}}" @@ -60,13 +60,12 @@ contracts: - fn: "renounceMinter" - fn: "transferOwnership" owner: *governor - Curation: + L2Curation: proxy: true init: controller: "${{Controller.address}}" bondingCurve: "${{BancorFormula.address}}" curationTokenMaster: "${{GraphCurationToken.address}}" - reserveRatio: 1000000 # in parts per million curationTaxPercentage: 10000 # in parts per million minimumCurationDeposit: "1000000000000000000" # in wei calls: diff --git a/config/graph.arbitrum-localhost.yml b/config/graph.arbitrum-localhost.yml index e24e421fe..6161bd13e 100644 --- a/config/graph.arbitrum-localhost.yml +++ b/config/graph.arbitrum-localhost.yml @@ -11,7 +11,7 @@ contracts: calls: - fn: "setContractProxy" id: "0xe6876326c1291dfcbbd3864a6816d698cd591defc7aa2153d7f9c4c04016c89f" # keccak256('Curation') - contractAddress: "${{Curation.address}}" + contractAddress: "${{L2Curation.address}}" - fn: "setContractProxy" id: "0x39605a6c26a173774ca666c67ef70cf491880e5d3d6d0ca66ec0a31034f15ea3" # keccak256('GNS') contractAddress: "${{L2GNS.address}}" @@ -60,13 +60,12 @@ contracts: - fn: "renounceMinter" - fn: "transferOwnership" owner: *governor - Curation: + L2Curation: proxy: true init: controller: "${{Controller.address}}" bondingCurve: "${{BancorFormula.address}}" curationTokenMaster: "${{GraphCurationToken.address}}" - reserveRatio: 1000000 # in parts per million curationTaxPercentage: 10000 # in parts per million minimumCurationDeposit: "1000000000000000000" # in wei calls: diff --git a/config/graph.arbitrum-one.yml b/config/graph.arbitrum-one.yml index 1e2750b50..d3f41863f 100644 --- a/config/graph.arbitrum-one.yml +++ b/config/graph.arbitrum-one.yml @@ -11,7 +11,7 @@ contracts: calls: - fn: "setContractProxy" id: "0xe6876326c1291dfcbbd3864a6816d698cd591defc7aa2153d7f9c4c04016c89f" # keccak256('Curation') - contractAddress: "${{Curation.address}}" + contractAddress: "${{L2Curation.address}}" - fn: "setContractProxy" id: "0x39605a6c26a173774ca666c67ef70cf491880e5d3d6d0ca66ec0a31034f15ea3" # keccak256('GNS') contractAddress: "${{L2GNS.address}}" @@ -60,13 +60,12 @@ contracts: - fn: "renounceMinter" - fn: "transferOwnership" owner: *governor - Curation: + L2Curation: proxy: true init: controller: "${{Controller.address}}" bondingCurve: "${{BancorFormula.address}}" curationTokenMaster: "${{GraphCurationToken.address}}" - reserveRatio: 1000000 # in parts per million curationTaxPercentage: 10000 # in parts per million minimumCurationDeposit: "1000000000000000000" # in wei calls: diff --git a/contracts/curation/Curation.sol b/contracts/curation/Curation.sol index d3b6c1f1c..a8ffd9890 100644 --- a/contracts/curation/Curation.sol +++ b/contracts/curation/Curation.sol @@ -272,63 +272,6 @@ contract Curation is CurationV1Storage, GraphUpgradeable { return (signalOut, curationTax); } - /** - * @dev Deposit Graph Tokens in exchange for signal of a SubgraphDeployment curation pool. - * This function charges no tax and can only be called by GNS in specific scenarios (for now - * only during an L1-L2 migration). - * @param _subgraphDeploymentID Subgraph deployment pool from where to mint signal - * @param _tokensIn Amount of Graph Tokens to deposit - * @param _signalOutMin Expected minimum amount of signal to receive - * @return Signal minted - */ - function mintTaxFree( - bytes32 _subgraphDeploymentID, - uint256 _tokensIn, - uint256 _signalOutMin - ) external override notPartialPaused onlyGNS returns (uint256) { - // Need to deposit some funds - require(_tokensIn > 0, "Cannot deposit zero tokens"); - - // Exchange GRT tokens for GCS of the subgraph pool (no tax) - uint256 signalOut = _tokensToSignal(_subgraphDeploymentID, _tokensIn); - - // Slippage protection - require(signalOut >= _signalOutMin, "Slippage protection"); - - address curator = msg.sender; - CurationPool storage curationPool = pools[_subgraphDeploymentID]; - - // If it hasn't been curated before then initialize the curve - if (!isCurated(_subgraphDeploymentID)) { - curationPool.reserveRatio = defaultReserveRatio; - - // If no signal token for the pool - create one - if (address(curationPool.gcs) == address(0)) { - // Use a minimal proxy to reduce gas cost - IGraphCurationToken gcs = IGraphCurationToken(Clones.clone(curationTokenMaster)); - gcs.initialize(address(this)); - curationPool.gcs = gcs; - } - } - - // Trigger update rewards calculation snapshot - _updateRewards(_subgraphDeploymentID); - - // Transfer tokens from the curator to this contract - // NOTE: This needs to happen after _updateRewards snapshot as that function - // is using balanceOf(curation) - IGraphToken _graphToken = graphToken(); - TokenUtils.pullTokens(_graphToken, curator, _tokensIn); - - // Update curation pool - curationPool.tokens = curationPool.tokens.add(_tokensIn); - curationPool.gcs.mint(curator, signalOut); - - emit Signalled(curator, _subgraphDeploymentID, _tokensIn, signalOut, 0); - - return signalOut; - } - /** * @dev Return an amount of signal to get tokens back. * @notice Burn _signal from the SubgraphDeployment curation pool @@ -452,22 +395,6 @@ contract Curation is CurationV1Storage, GraphUpgradeable { return (signalOut, curationTax); } - /** - * @dev Calculate amount of signal that can be bought with tokens in a curation pool, - * without accounting for curation tax. - * @param _subgraphDeploymentID Subgraph deployment to mint signal - * @param _tokensIn Amount of tokens used to mint signal - * @return Amount of signal that can be bought and tokens subtracted for the tax - */ - function tokensToSignalNoTax(bytes32 _subgraphDeploymentID, uint256 _tokensIn) - public - view - override - returns (uint256) - { - return _tokensToSignal(_subgraphDeploymentID, _tokensIn); - } - /** * @dev Calculate amount of signal that can be bought with tokens in a curation pool. * @param _subgraphDeploymentID Subgraph deployment to mint signal diff --git a/contracts/curation/CurationStorage.sol b/contracts/curation/CurationStorage.sol index a530d9199..dab28771a 100644 --- a/contracts/curation/CurationStorage.sol +++ b/contracts/curation/CurationStorage.sol @@ -9,34 +9,39 @@ import { Managed } from "../governance/Managed.sol"; abstract contract CurationV1Storage is Managed, ICuration { // -- Pool -- + /** + * @dev CurationPool structure that holds the pool's state + * for a particular subgraph deployment. + */ struct CurationPool { uint256 tokens; // GRT Tokens stored as reserves for the subgraph deployment - uint32 reserveRatio; // Ratio for the bonding curve + uint32 reserveRatio; // Ratio for the bonding curve, unused in L2 IGraphCurationToken gcs; // Curation token contract for this curation pool } // -- State -- - // Tax charged when curator deposit funds - // Parts per million. (Allows for 4 decimal points, 999,999 = 99.9999%) + /// Tax charged when curatos deposit funds. + /// Parts per million. (Allows for 4 decimal points, 999,999 = 99.9999%) uint32 public override curationTaxPercentage; - // Default reserve ratio to configure curator shares bonding curve - // Parts per million. (Allows for 4 decimal points, 999,999 = 99.9999%) + /// Default reserve ratio to configure curator shares bonding curve + /// Parts per million. (Allows for 4 decimal points, 999,999 = 99.9999%). + /// Unused in L2. uint32 public defaultReserveRatio; - // Master copy address that holds implementation of curation token - // This is used as the target for GraphCurationToken clones + /// Master copy address that holds implementation of curation token. + /// @dev This is used as the target for GraphCurationToken clones. address public curationTokenMaster; - // Minimum amount allowed to be deposited by curators to initialize a pool - // This is the `startPoolBalance` for the bonding curve + /// Minimum amount allowed to be deposited by curators to initialize a pool + /// @dev This is the `startPoolBalance` for the bonding curve uint256 public minimumCurationDeposit; - // Bonding curve library + /// Bonding curve library address public bondingCurve; - // Mapping of subgraphDeploymentID => CurationPool - // There is only one CurationPool per SubgraphDeploymentID + /// Mapping of subgraphDeploymentID => CurationPool + /// @dev There is only one CurationPool per SubgraphDeploymentID mapping(bytes32 => CurationPool) public pools; } diff --git a/contracts/curation/ICuration.sol b/contracts/curation/ICuration.sol index 1da4252cc..9e1701aaf 100644 --- a/contracts/curation/ICuration.sol +++ b/contracts/curation/ICuration.sol @@ -23,13 +23,6 @@ interface ICuration { uint256 _signalOutMin ) external returns (uint256, uint256); - // Callable only by GNS in specific scenarios - function mintTaxFree( - bytes32 _subgraphDeploymentID, - uint256 _tokensIn, - uint256 _signalOutMin - ) external returns (uint256); - function burn( bytes32 _subgraphDeploymentID, uint256 _signalIn, @@ -56,11 +49,6 @@ interface ICuration { view returns (uint256, uint256); - function tokensToSignalNoTax(bytes32 _subgraphDeploymentID, uint256 _tokensIn) - external - view - returns (uint256); - function signalToTokens(bytes32 _subgraphDeploymentID, uint256 _signalIn) external view diff --git a/contracts/discovery/GNS.sol b/contracts/discovery/GNS.sol index 9fd962331..207284cfe 100644 --- a/contracts/discovery/GNS.sol +++ b/contracts/discovery/GNS.sol @@ -31,8 +31,6 @@ contract GNS is GNSV3Storage, GraphUpgradeable, IGNS, Multicall { // -- Constants -- - uint256 private constant MAX_UINT256 = 2**256 - 1; - // 100% in parts per million uint32 private constant MAX_PPM = 1000000; @@ -185,7 +183,7 @@ contract GNS is GNSV3Storage, GraphUpgradeable, IGNS, Multicall { * @dev Approve curation contract to pull funds. */ function approveAll() external override { - graphToken().approve(address(curation()), MAX_UINT256); + graphToken().approve(address(curation()), type(uint256).max); } // -- Config -- @@ -316,7 +314,7 @@ contract GNS is GNSV3Storage, GraphUpgradeable, IGNS, Multicall { uint256 _subgraphID, bytes32 _subgraphDeploymentID, bytes32 _versionMetadata - ) external override notPaused onlySubgraphAuth(_subgraphID) { + ) external virtual override notPaused onlySubgraphAuth(_subgraphID) { // Perform the upgrade from the current subgraph deployment to the new one. // This involves burning all signal from the old deployment and using the funds to buy // from the new deployment. @@ -559,7 +557,7 @@ contract GNS is GNSV3Storage, GraphUpgradeable, IGNS, Multicall { uint256 _tokens, address _owner, uint32 _curationTaxPercentage - ) private returns (uint256) { + ) internal returns (uint256) { if (_curationTaxPercentage == 0 || ownerTaxPercentage == 0) { return 0; } @@ -910,7 +908,12 @@ contract GNS is GNSV3Storage, GraphUpgradeable, IGNS, Multicall { * @param _subgraphID Subgraph ID * @return Subgraph Data */ - function _getSubgraphData(uint256 _subgraphID) internal view returns (SubgraphData storage) { + function _getSubgraphData(uint256 _subgraphID) + internal + view + virtual + returns (SubgraphData storage) + { // If there is a legacy subgraph created return it LegacySubgraphKey storage legacySubgraphKey = legacySubgraphKeys[_subgraphID]; if (legacySubgraphKey.account != address(0)) { diff --git a/contracts/l2/curation/IL2Curation.sol b/contracts/l2/curation/IL2Curation.sol new file mode 100644 index 000000000..05f2a2775 --- /dev/null +++ b/contracts/l2/curation/IL2Curation.sol @@ -0,0 +1,17 @@ +// SPDX-License-Identifier: GPL-2.0-or-later + +pragma solidity ^0.7.6; + +interface IL2Curation { + // Callable only by GNS in specific scenarios + function mintTaxFree( + bytes32 _subgraphDeploymentID, + uint256 _tokensIn, + uint256 _signalOutMin + ) external returns (uint256); + + function tokensToSignalNoTax(bytes32 _subgraphDeploymentID, uint256 _tokensIn) + external + view + returns (uint256); +} diff --git a/contracts/l2/curation/L2Curation.sol b/contracts/l2/curation/L2Curation.sol new file mode 100644 index 000000000..86b349650 --- /dev/null +++ b/contracts/l2/curation/L2Curation.sol @@ -0,0 +1,537 @@ +// SPDX-License-Identifier: GPL-2.0-or-later + +pragma solidity ^0.7.6; + +import { Address } from "@openzeppelin/contracts/utils/Address.sol"; +import { SafeMath } from "@openzeppelin/contracts/math/SafeMath.sol"; +import { Clones } from "@openzeppelin/contracts/proxy/Clones.sol"; + +import { BancorFormula } from "../../bancor/BancorFormula.sol"; +import { GraphUpgradeable } from "../../upgrades/GraphUpgradeable.sol"; +import { TokenUtils } from "../../utils/TokenUtils.sol"; +import { IRewardsManager } from "../../rewards/IRewardsManager.sol"; +import { Managed } from "../../governance/Managed.sol"; +import { IGraphToken } from "../../token/IGraphToken.sol"; +import { CurationV1Storage } from "../../curation/CurationStorage.sol"; +import { ICuration } from "../../curation/ICuration.sol"; +import { IGraphCurationToken } from "../../curation/IGraphCurationToken.sol"; +import { GraphCurationToken } from "../../curation/GraphCurationToken.sol"; +import { IL2Curation } from "./IL2Curation.sol"; + +/** + * @title Curation contract + * @dev Allows curators to signal on subgraph deployments that might be relevant to indexers by + * staking Graph Tokens (GRT). Additionally, curators earn fees from the Query Market related to the + * subgraph deployment they curate. + * A curators deposit goes to a curation pool along with the deposits of other curators, + * only one such pool exists for each subgraph deployment. + * The contract mints Graph Curation Shares (GCS) according to a bonding curve for each individual + * curation pool where GRT is deposited. + * Holders can burn GCS using this contract to get GRT tokens back according to the + * bonding curve. + */ +contract L2Curation is CurationV1Storage, GraphUpgradeable, IL2Curation { + using SafeMath for uint256; + + // 100% in parts per million + uint32 private constant MAX_PPM = 1000000; + + // Amount of signal you get with your minimum token deposit + uint256 private constant SIGNAL_PER_MINIMUM_DEPOSIT = 1e18; // 1 signal as 18 decimal number + + // Reserve ratio for all subgraphs set to 100% for a flat bonding curve + uint32 private immutable FIXED_RESERVE_RATIO = MAX_PPM; + + // -- Events -- + + /** + * @dev Emitted when `curator` deposited `tokens` on `subgraphDeploymentID` as curation signal. + * The `curator` receives `signal` amount according to the curation pool bonding curve. + * An amount of `curationTax` will be collected and burned. + */ + event Signalled( + address indexed curator, + bytes32 indexed subgraphDeploymentID, + uint256 tokens, + uint256 signal, + uint256 curationTax + ); + + /** + * @dev Emitted when `curator` burned `signal` for a `subgraphDeploymentID`. + * The curator will receive `tokens` according to the value of the bonding curve. + */ + event Burned( + address indexed curator, + bytes32 indexed subgraphDeploymentID, + uint256 tokens, + uint256 signal + ); + + /** + * @dev Emitted when `tokens` amount were collected for `subgraphDeploymentID` as part of fees + * distributed by an indexer from query fees received from state channels. + */ + event Collected(bytes32 indexed subgraphDeploymentID, uint256 tokens); + + modifier onlyGNS() { + require(msg.sender == address(gns()), "Only the GNS can call this"); + _; + } + + /** + * @dev Initialize this contract. + */ + function initialize( + address _controller, + address _bondingCurve, + address _curationTokenMaster, + uint32 _curationTaxPercentage, + uint256 _minimumCurationDeposit + ) external onlyImpl { + Managed._initialize(_controller); + + require(_bondingCurve != address(0), "Bonding curve must be set"); + bondingCurve = _bondingCurve; + + // For backwards compatibility: + defaultReserveRatio = FIXED_RESERVE_RATIO; + emit ParameterUpdated("defaultReserveRatio"); + _setCurationTaxPercentage(_curationTaxPercentage); + _setMinimumCurationDeposit(_minimumCurationDeposit); + _setCurationTokenMaster(_curationTokenMaster); + } + + /** + * @notice Set the default reserve ratio - not implemented in L2 + */ + function setDefaultReserveRatio(uint32) external override onlyGovernor { + revert("Not implemented in L2"); + } + + /** + * @dev Set the minimum deposit amount for curators. + * @notice Update the minimum deposit amount to `_minimumCurationDeposit` + * @param _minimumCurationDeposit Minimum amount of tokens required deposit + */ + function setMinimumCurationDeposit(uint256 _minimumCurationDeposit) + external + override + onlyGovernor + { + _setMinimumCurationDeposit(_minimumCurationDeposit); + } + + /** + * @dev Internal: Set the minimum deposit amount for curators. + * @notice Update the minimum deposit amount to `_minimumCurationDeposit` + * @param _minimumCurationDeposit Minimum amount of tokens required deposit + */ + function _setMinimumCurationDeposit(uint256 _minimumCurationDeposit) private { + require(_minimumCurationDeposit > 0, "Minimum curation deposit cannot be 0"); + + minimumCurationDeposit = _minimumCurationDeposit; + emit ParameterUpdated("minimumCurationDeposit"); + } + + /** + * @dev Set the curation tax percentage to charge when a curator deposits GRT tokens. + * @param _percentage Curation tax percentage charged when depositing GRT tokens + */ + function setCurationTaxPercentage(uint32 _percentage) external override onlyGovernor { + _setCurationTaxPercentage(_percentage); + } + + /** + * @dev Internal: Set the curation tax percentage to charge when a curator deposits GRT tokens. + * @param _percentage Curation tax percentage charged when depositing GRT tokens + */ + function _setCurationTaxPercentage(uint32 _percentage) private { + require( + _percentage <= MAX_PPM, + "Curation tax percentage must be below or equal to MAX_PPM" + ); + + curationTaxPercentage = _percentage; + emit ParameterUpdated("curationTaxPercentage"); + } + + /** + * @dev Set the master copy to use as clones for the curation token. + * @param _curationTokenMaster Address of implementation contract to use for curation tokens + */ + function setCurationTokenMaster(address _curationTokenMaster) external override onlyGovernor { + _setCurationTokenMaster(_curationTokenMaster); + } + + /** + * @dev Internal: Set the master copy to use as clones for the curation token. + * @param _curationTokenMaster Address of implementation contract to use for curation tokens + */ + function _setCurationTokenMaster(address _curationTokenMaster) private { + require(_curationTokenMaster != address(0), "Token master must be non-empty"); + require(Address.isContract(_curationTokenMaster), "Token master must be a contract"); + + curationTokenMaster = _curationTokenMaster; + emit ParameterUpdated("curationTokenMaster"); + } + + /** + * @dev Assign Graph Tokens collected as curation fees to the curation pool reserve. + * This function can only be called by the Staking contract and will do the bookeeping of + * transferred tokens into this contract. + * @param _subgraphDeploymentID SubgraphDeployment where funds should be allocated as reserves + * @param _tokens Amount of Graph Tokens to add to reserves + */ + function collect(bytes32 _subgraphDeploymentID, uint256 _tokens) external override { + // Only Staking contract is authorized as caller + require(msg.sender == address(staking()), "Caller must be the staking contract"); + + // Must be curated to accept tokens + require( + isCurated(_subgraphDeploymentID), + "Subgraph deployment must be curated to collect fees" + ); + + // Collect new funds into reserve + CurationPool storage curationPool = pools[_subgraphDeploymentID]; + curationPool.tokens = curationPool.tokens.add(_tokens); + + emit Collected(_subgraphDeploymentID, _tokens); + } + + /** + * @dev Deposit Graph Tokens in exchange for signal of a SubgraphDeployment curation pool. + * @param _subgraphDeploymentID Subgraph deployment pool from where to mint signal + * @param _tokensIn Amount of Graph Tokens to deposit + * @param _signalOutMin Expected minimum amount of signal to receive + * @return Signal minted and deposit tax + */ + function mint( + bytes32 _subgraphDeploymentID, + uint256 _tokensIn, + uint256 _signalOutMin + ) external override notPartialPaused returns (uint256, uint256) { + // Need to deposit some funds + require(_tokensIn > 0, "Cannot deposit zero tokens"); + + // Exchange GRT tokens for GCS of the subgraph pool + (uint256 signalOut, uint256 curationTax) = tokensToSignal(_subgraphDeploymentID, _tokensIn); + + // Slippage protection + require(signalOut >= _signalOutMin, "Slippage protection"); + + address curator = msg.sender; + CurationPool storage curationPool = pools[_subgraphDeploymentID]; + + // If it hasn't been curated before then initialize the curve + if (!isCurated(_subgraphDeploymentID)) { + curationPool.reserveRatio = FIXED_RESERVE_RATIO; + + // If no signal token for the pool - create one + if (address(curationPool.gcs) == address(0)) { + // Use a minimal proxy to reduce gas cost + IGraphCurationToken gcs = IGraphCurationToken(Clones.clone(curationTokenMaster)); + gcs.initialize(address(this)); + curationPool.gcs = gcs; + } + } + + // Trigger update rewards calculation snapshot + _updateRewards(_subgraphDeploymentID); + + // Transfer tokens from the curator to this contract + // Burn the curation tax + // NOTE: This needs to happen after _updateRewards snapshot as that function + // is using balanceOf(curation) + IGraphToken _graphToken = graphToken(); + TokenUtils.pullTokens(_graphToken, curator, _tokensIn); + TokenUtils.burnTokens(_graphToken, curationTax); + + // Update curation pool + curationPool.tokens = curationPool.tokens.add(_tokensIn.sub(curationTax)); + curationPool.gcs.mint(curator, signalOut); + + emit Signalled(curator, _subgraphDeploymentID, _tokensIn, signalOut, curationTax); + + return (signalOut, curationTax); + } + + /** + * @dev Deposit Graph Tokens in exchange for signal of a SubgraphDeployment curation pool. + * This function charges no tax and can only be called by GNS in specific scenarios (for now + * only during an L1-L2 migration). + * @param _subgraphDeploymentID Subgraph deployment pool from where to mint signal + * @param _tokensIn Amount of Graph Tokens to deposit + * @param _signalOutMin Expected minimum amount of signal to receive + * @return Signal minted + */ + function mintTaxFree( + bytes32 _subgraphDeploymentID, + uint256 _tokensIn, + uint256 _signalOutMin + ) external override notPartialPaused onlyGNS returns (uint256) { + // Need to deposit some funds + require(_tokensIn > 0, "Cannot deposit zero tokens"); + + // Exchange GRT tokens for GCS of the subgraph pool (no tax) + uint256 signalOut = _tokensToSignal(_subgraphDeploymentID, _tokensIn); + + // Slippage protection + require(signalOut >= _signalOutMin, "Slippage protection"); + + address curator = msg.sender; + CurationPool storage curationPool = pools[_subgraphDeploymentID]; + + // If it hasn't been curated before then initialize the curve + if (!isCurated(_subgraphDeploymentID)) { + curationPool.reserveRatio = FIXED_RESERVE_RATIO; + + // If no signal token for the pool - create one + if (address(curationPool.gcs) == address(0)) { + // Use a minimal proxy to reduce gas cost + IGraphCurationToken gcs = IGraphCurationToken(Clones.clone(curationTokenMaster)); + gcs.initialize(address(this)); + curationPool.gcs = gcs; + } + } + + // Trigger update rewards calculation snapshot + _updateRewards(_subgraphDeploymentID); + + // Transfer tokens from the curator to this contract + // NOTE: This needs to happen after _updateRewards snapshot as that function + // is using balanceOf(curation) + IGraphToken _graphToken = graphToken(); + TokenUtils.pullTokens(_graphToken, curator, _tokensIn); + + // Update curation pool + curationPool.tokens = curationPool.tokens.add(_tokensIn); + curationPool.gcs.mint(curator, signalOut); + + emit Signalled(curator, _subgraphDeploymentID, _tokensIn, signalOut, 0); + + return signalOut; + } + + /** + * @dev Return an amount of signal to get tokens back. + * @notice Burn _signal from the SubgraphDeployment curation pool + * @param _subgraphDeploymentID SubgraphDeployment the curator is returning signal + * @param _signalIn Amount of signal to return + * @param _tokensOutMin Expected minimum amount of tokens to receive + * @return Tokens returned + */ + function burn( + bytes32 _subgraphDeploymentID, + uint256 _signalIn, + uint256 _tokensOutMin + ) external override notPartialPaused returns (uint256) { + address curator = msg.sender; + + // Validations + require(_signalIn > 0, "Cannot burn zero signal"); + require( + getCuratorSignal(curator, _subgraphDeploymentID) >= _signalIn, + "Cannot burn more signal than you own" + ); + + // Get the amount of tokens to refund based on returned signal + uint256 tokensOut = signalToTokens(_subgraphDeploymentID, _signalIn); + + // Slippage protection + require(tokensOut >= _tokensOutMin, "Slippage protection"); + + // Trigger update rewards calculation + _updateRewards(_subgraphDeploymentID); + + // Update curation pool + CurationPool storage curationPool = pools[_subgraphDeploymentID]; + curationPool.tokens = curationPool.tokens.sub(tokensOut); + curationPool.gcs.burnFrom(curator, _signalIn); + + // If all signal burnt delete the curation pool except for the + // curation token contract to avoid recreating it on a new mint + if (getCurationPoolSignal(_subgraphDeploymentID) == 0) { + curationPool.tokens = 0; + } + + // Return the tokens to the curator + TokenUtils.pushTokens(graphToken(), curator, tokensOut); + + emit Burned(curator, _subgraphDeploymentID, tokensOut, _signalIn); + + return tokensOut; + } + + /** + * @dev Check if any GRT tokens are deposited for a SubgraphDeployment. + * @param _subgraphDeploymentID SubgraphDeployment to check if curated + * @return True if curated + */ + function isCurated(bytes32 _subgraphDeploymentID) public view override returns (bool) { + return pools[_subgraphDeploymentID].tokens > 0; + } + + /** + * @dev Get the amount of signal a curator has in a curation pool. + * @param _curator Curator owning the signal tokens + * @param _subgraphDeploymentID Subgraph deployment curation pool + * @return Amount of signal owned by a curator for the subgraph deployment + */ + function getCuratorSignal(address _curator, bytes32 _subgraphDeploymentID) + public + view + override + returns (uint256) + { + IGraphCurationToken gcs = pools[_subgraphDeploymentID].gcs; + return (address(gcs) == address(0)) ? 0 : gcs.balanceOf(_curator); + } + + /** + * @dev Get the amount of signal in a curation pool. + * @param _subgraphDeploymentID Subgraph deployment curation poool + * @return Amount of signal minted for the subgraph deployment + */ + function getCurationPoolSignal(bytes32 _subgraphDeploymentID) + public + view + override + returns (uint256) + { + IGraphCurationToken gcs = pools[_subgraphDeploymentID].gcs; + return (address(gcs) == address(0)) ? 0 : gcs.totalSupply(); + } + + /** + * @dev Get the amount of token reserves in a curation pool. + * @param _subgraphDeploymentID Subgraph deployment curation poool + * @return Amount of token reserves in the curation pool + */ + function getCurationPoolTokens(bytes32 _subgraphDeploymentID) + external + view + override + returns (uint256) + { + return pools[_subgraphDeploymentID].tokens; + } + + /** + * @dev Calculate amount of signal that can be bought with tokens in a curation pool. + * This function considers and excludes the deposit tax. + * @param _subgraphDeploymentID Subgraph deployment to mint signal + * @param _tokensIn Amount of tokens used to mint signal + * @return Amount of signal that can be bought and tokens subtracted for the tax + */ + function tokensToSignal(bytes32 _subgraphDeploymentID, uint256 _tokensIn) + public + view + override + returns (uint256, uint256) + { + uint256 curationTax = _tokensIn.mul(uint256(curationTaxPercentage)).div(MAX_PPM); + uint256 signalOut = _tokensToSignal(_subgraphDeploymentID, _tokensIn.sub(curationTax)); + return (signalOut, curationTax); + } + + /** + * @dev Calculate amount of signal that can be bought with tokens in a curation pool, + * without accounting for curation tax. + * @param _subgraphDeploymentID Subgraph deployment to mint signal + * @param _tokensIn Amount of tokens used to mint signal + * @return Amount of signal that can be bought and tokens subtracted for the tax + */ + function tokensToSignalNoTax(bytes32 _subgraphDeploymentID, uint256 _tokensIn) + public + view + override + returns (uint256) + { + return _tokensToSignal(_subgraphDeploymentID, _tokensIn); + } + + /** + * @dev Calculate amount of signal that can be bought with tokens in a curation pool. + * @param _subgraphDeploymentID Subgraph deployment to mint signal + * @param _tokensIn Amount of tokens used to mint signal + * @return Amount of signal that can be bought with tokens + */ + function _tokensToSignal(bytes32 _subgraphDeploymentID, uint256 _tokensIn) + private + view + returns (uint256) + { + // Get curation pool tokens and signal + CurationPool memory curationPool = pools[_subgraphDeploymentID]; + + // Init curation pool + if (curationPool.tokens == 0) { + require( + _tokensIn >= minimumCurationDeposit, + "Curation deposit is below minimum required" + ); + return + BancorFormula(bondingCurve) + .calculatePurchaseReturn( + SIGNAL_PER_MINIMUM_DEPOSIT, + minimumCurationDeposit, + FIXED_RESERVE_RATIO, + _tokensIn.sub(minimumCurationDeposit) + ) + .add(SIGNAL_PER_MINIMUM_DEPOSIT); + } + + return + BancorFormula(bondingCurve).calculatePurchaseReturn( + getCurationPoolSignal(_subgraphDeploymentID), + curationPool.tokens, + FIXED_RESERVE_RATIO, + _tokensIn + ); + } + + /** + * @dev Calculate number of tokens to get when burning signal from a curation pool. + * @param _subgraphDeploymentID Subgraph deployment to burn signal + * @param _signalIn Amount of signal to burn + * @return Amount of tokens to get for an amount of signal + */ + function signalToTokens(bytes32 _subgraphDeploymentID, uint256 _signalIn) + public + view + override + returns (uint256) + { + CurationPool memory curationPool = pools[_subgraphDeploymentID]; + uint256 curationPoolSignal = getCurationPoolSignal(_subgraphDeploymentID); + require( + curationPool.tokens > 0, + "Subgraph deployment must be curated to perform calculations" + ); + require( + curationPoolSignal >= _signalIn, + "Signal must be above or equal to signal issued in the curation pool" + ); + + return + BancorFormula(bondingCurve).calculateSaleReturn( + curationPoolSignal, + curationPool.tokens, + FIXED_RESERVE_RATIO, + _signalIn + ); + } + + /** + * @dev Triggers an update of rewards due to a change in signal. + * @param _subgraphDeploymentID Subgraph deployment updated + */ + function _updateRewards(bytes32 _subgraphDeploymentID) private { + IRewardsManager rewardsManager = rewardsManager(); + if (address(rewardsManager) != address(0)) { + rewardsManager.onSubgraphSignalUpdate(_subgraphDeploymentID); + } + } +} diff --git a/contracts/l2/discovery/L2GNS.sol b/contracts/l2/discovery/L2GNS.sol index 9d5cecd68..7dd03e604 100644 --- a/contracts/l2/discovery/L2GNS.sol +++ b/contracts/l2/discovery/L2GNS.sol @@ -16,6 +16,8 @@ import { L2GNSV1Storage } from "./L2GNSStorage.sol"; import { RLPReader } from "../../libraries/RLPReader.sol"; import { StateProofVerifier as Verifier } from "../../libraries/StateProofVerifier.sol"; +import { IL2Curation } from "../curation/IL2Curation.sol"; + /** * @title L2GNS * @dev The Graph Name System contract provides a decentralized naming system for subgraphs @@ -130,11 +132,7 @@ contract L2GNS is GNS, L2GNSV1Storage, IL2GNS { // New subgraph deployment must be non-empty require(_subgraphDeploymentID != 0, "GNS: deploymentID != 0"); - // This is to prevent the owner from front running its name curators signal by posting - // its own signal ahead, bringing the name curators in, and dumping on them - ICuration curation = curation(); - require(!curation.isCurated(_subgraphDeploymentID), "GNS: Deployment pre-curated"); - + IL2Curation curation = IL2Curation(address(curation())); // Update pool: constant nSignal, vSignal can change (w/no slippage protection) // Buy all signal from the new deployment subgraphData.vSignal = curation.mintTaxFree(_subgraphDeploymentID, migratedData.tokens, 0); @@ -316,4 +314,92 @@ contract L2GNS is GNS, L2GNSV1Storage, IL2GNS { emit SubgraphReceivedFromL1(_subgraphID); } + + /** + * @notice Publish a new version of an existing subgraph. + * @dev This is the same as the one in the base GNS, but skips the check for + * a subgraph to not be pre-curated, as the reserve ration in L2 is set to 1, + * which prevents the risk of rug-pulling. + * @param _subgraphID Subgraph ID + * @param _subgraphDeploymentID Subgraph deployment ID of the new version + * @param _versionMetadata IPFS hash for the subgraph version metadata + */ + function publishNewVersion( + uint256 _subgraphID, + bytes32 _subgraphDeploymentID, + bytes32 _versionMetadata + ) external override notPaused onlySubgraphAuth(_subgraphID) { + // Perform the upgrade from the current subgraph deployment to the new one. + // This involves burning all signal from the old deployment and using the funds to buy + // from the new deployment. + // This will also make the change to target to the new deployment. + + // Subgraph check + SubgraphData storage subgraphData = _getSubgraphOrRevert(_subgraphID); + + // New subgraph deployment must be non-empty + require(_subgraphDeploymentID != 0, "GNS: Cannot set deploymentID to 0 in publish"); + + // New subgraph deployment must be different than current + require( + _subgraphDeploymentID != subgraphData.subgraphDeploymentID, + "GNS: Cannot publish a new version with the same subgraph deployment ID" + ); + + ICuration curation = curation(); + + // Move all signal from previous version to new version + // NOTE: We will only do this as long as there is signal on the subgraph + if (subgraphData.nSignal > 0) { + // Burn all version signal in the name pool for tokens (w/no slippage protection) + // Sell all signal from the old deployment + uint256 tokens = curation.burn( + subgraphData.subgraphDeploymentID, + subgraphData.vSignal, + 0 + ); + + // Take the owner cut of the curation tax, add it to the total + // Upgrade is only callable by the owner, we assume then that msg.sender = owner + address subgraphOwner = msg.sender; + uint256 tokensWithTax = _chargeOwnerTax( + tokens, + subgraphOwner, + curation.curationTaxPercentage() + ); + + // Update pool: constant nSignal, vSignal can change (w/no slippage protection) + // Buy all signal from the new deployment + (subgraphData.vSignal, ) = curation.mint(_subgraphDeploymentID, tokensWithTax, 0); + + emit SubgraphUpgraded( + _subgraphID, + subgraphData.vSignal, + tokensWithTax, + _subgraphDeploymentID + ); + } + + // Update target deployment + subgraphData.subgraphDeploymentID = _subgraphDeploymentID; + + emit SubgraphVersionUpdated(_subgraphID, _subgraphDeploymentID, _versionMetadata); + } + + /** + * @dev Get subgraph data. + * Since there are no legacy subgraphs in L2, we override the base + * GNS method to save us the step of checking for legacy subgraphs. + * @param _subgraphID Subgraph ID + * @return Subgraph Data + */ + function _getSubgraphData(uint256 _subgraphID) + internal + view + override + returns (SubgraphData storage) + { + // Return new subgraph type + return subgraphs[_subgraphID]; + } } diff --git a/test/curation/curation.test.ts b/test/curation/curation.test.ts index 3e9d374a7..51b739f44 100644 --- a/test/curation/curation.test.ts +++ b/test/curation/curation.test.ts @@ -137,44 +137,6 @@ describe('Curation', () => { expect(afterTokenTotalSupply).eq(beforeTokenTotalSupply.sub(curationTax)) } - const shouldMintTaxFree = async (tokensToDeposit: BigNumber, expectedSignal: BigNumber) => { - // Before state - const beforeTokenTotalSupply = await grt.totalSupply() - const beforeCuratorTokens = await grt.balanceOf(gns.address) - const beforeCuratorSignal = await curation.getCuratorSignal(gns.address, subgraphDeploymentID) - const beforePool = await curation.pools(subgraphDeploymentID) - const beforePoolSignal = await curation.getCurationPoolSignal(subgraphDeploymentID) - const beforeTotalTokens = await grt.balanceOf(curation.address) - - // Curate - const tx = curation - .connect(gnsImpersonator) - .mintTaxFree(subgraphDeploymentID, tokensToDeposit, 0) - await expect(tx) - .emit(curation, 'Signalled') - .withArgs(gns.address, subgraphDeploymentID, tokensToDeposit, expectedSignal, 0) - - // After state - const afterTokenTotalSupply = await grt.totalSupply() - const afterCuratorTokens = await grt.balanceOf(gns.address) - const afterCuratorSignal = await curation.getCuratorSignal(gns.address, subgraphDeploymentID) - const afterPool = await curation.pools(subgraphDeploymentID) - const afterPoolSignal = await curation.getCurationPoolSignal(subgraphDeploymentID) - const afterTotalTokens = await grt.balanceOf(curation.address) - - // Curator balance updated - expect(afterCuratorTokens).eq(beforeCuratorTokens.sub(tokensToDeposit)) - expect(afterCuratorSignal).eq(beforeCuratorSignal.add(expectedSignal)) - // Allocated and balance updated - expect(afterPool.tokens).eq(beforePool.tokens.add(tokensToDeposit)) - expect(afterPoolSignal).eq(beforePoolSignal.add(expectedSignal)) - expect(afterPool.reserveRatio).eq(await curation.defaultReserveRatio()) - // Contract balance updated - expect(afterTotalTokens).eq(beforeTotalTokens.add(tokensToDeposit)) - // Total supply is reduced to curation tax burning - expect(afterTokenTotalSupply).eq(beforeTokenTotalSupply) - } - const shouldBurn = async (signalToRedeem: BigNumber, expectedTokens: BigNumber) => { // Before balances const beforeTokenTotalSupply = await grt.totalSupply() @@ -359,58 +321,6 @@ describe('Curation', () => { }) }) - describe('curate tax free (from GNS)', async function () { - it('can not be called by anyone other than GNS', async function () { - const tokensToDeposit = await curation.minimumCurationDeposit() - const tx = curation - .connect(curator.signer) - .mintTaxFree(subgraphDeploymentID, tokensToDeposit, 0) - await expect(tx).revertedWith('Only the GNS can call this') - }) - - it('reject deposit below minimum tokens required', async function () { - const tokensToDeposit = (await curation.minimumCurationDeposit()).sub(toBN(1)) - const tx = curation - .connect(gnsImpersonator) - .mintTaxFree(subgraphDeploymentID, tokensToDeposit, 0) - await expect(tx).revertedWith('Curation deposit is below minimum required') - }) - - it('should deposit on a subgraph deployment', async function () { - const tokensToDeposit = await curation.minimumCurationDeposit() - const expectedSignal = toGRT('1') - await shouldMintTaxFree(tokensToDeposit, expectedSignal) - }) - - it('should get signal according to bonding curve', async function () { - const tokensToDeposit = toGRT('1000') - const expectedSignal = signalAmountFor1000Tokens - await shouldMintTaxFree(tokensToDeposit, expectedSignal) - }) - - it('should get signal according to bonding curve (and with zero tax)', async function () { - // Set curation tax - await curation.connect(governor.signer).setCurationTaxPercentage(50000) // 5% - - // Mint - const tokensToDeposit = toGRT('1000') - const expectedSignal = await curation.tokensToSignalNoTax( - subgraphDeploymentID, - tokensToDeposit, - ) - await shouldMintTaxFree(tokensToDeposit, expectedSignal) - }) - - it('should revert curate if over slippage', async function () { - const tokensToDeposit = toGRT('1000') - const expectedSignal = signalAmountFor1000Tokens - const tx = curation - .connect(gnsImpersonator) - .mintTaxFree(subgraphDeploymentID, tokensToDeposit, expectedSignal.add(1)) - await expect(tx).revertedWith('Slippage protection') - }) - }) - describe('collect', async function () { context('> not curated', async function () { it('reject collect tokens distributed to the curation pool', async function () { diff --git a/test/l2/l2Curation.test.ts b/test/l2/l2Curation.test.ts new file mode 100644 index 000000000..ec54eb633 --- /dev/null +++ b/test/l2/l2Curation.test.ts @@ -0,0 +1,790 @@ +import { expect } from 'chai' +import { utils, BigNumber, Event, Signer, constants } from 'ethers' + +import { L2Curation } from '../../build/types/L2Curation' +import { GraphToken } from '../../build/types/GraphToken' +import { Controller } from '../../build/types/Controller' +import { defaults } from '../lib/deployment' + +import { NetworkFixture } from '../lib/fixtures' +import { + getAccounts, + randomHexBytes, + toBN, + toGRT, + formatGRT, + Account, + impersonateAccount, + setAccountBalance, + randomAddress, +} from '../lib/testHelpers' +import { GNS } from '../../build/types/GNS' +import { parseEther, toUtf8String } from 'ethers/lib/utils' + +const { AddressZero } = constants + +const MAX_PPM = 1000000 + +const chunkify = (total: BigNumber, maxChunks = 10): Array => { + const chunks = [] + while (total.gt(0) && maxChunks > 0) { + const m = 1000000 + const p = Math.floor(Math.random() * m) + const n = total.mul(p).div(m) + chunks.push(n) + total = total.sub(n) + maxChunks-- + } + if (total.gt(0)) { + chunks.push(total) + } + return chunks +} + +const toFloat = (n: BigNumber) => parseFloat(formatGRT(n)) +const toRound = (n: number) => n.toFixed(12) + +describe('L2Curation:Config', () => { + let me: Account + let governor: Account + + let fixture: NetworkFixture + + let curation: L2Curation + + before(async function () { + ;[me, governor] = await getAccounts() + + fixture = new NetworkFixture() + ;({ curation } = await fixture.loadL2(governor.signer)) + }) + + beforeEach(async function () { + await fixture.setUp() + }) + + afterEach(async function () { + await fixture.tearDown() + }) + + describe('defaultReserveRatio', function () { + it('should be fixed to MAX_PPM', async function () { + // Set right in the constructor + expect(await curation.defaultReserveRatio()).eq(MAX_PPM) + }) + }) + + describe('minimumCurationDeposit', function () { + it('should set `minimumCurationDeposit`', async function () { + // Set right in the constructor + expect(await curation.minimumCurationDeposit()).eq(defaults.curation.minimumCurationDeposit) + + // Can set if allowed + const newValue = toBN('100') + await curation.connect(governor.signer).setMinimumCurationDeposit(newValue) + expect(await curation.minimumCurationDeposit()).eq(newValue) + }) + + it('reject set `minimumCurationDeposit` if out of bounds', async function () { + const tx = curation.connect(governor.signer).setMinimumCurationDeposit(0) + await expect(tx).revertedWith('Minimum curation deposit cannot be 0') + }) + + it('reject set `minimumCurationDeposit` if not allowed', async function () { + const tx = curation + .connect(me.signer) + .setMinimumCurationDeposit(defaults.curation.minimumCurationDeposit) + await expect(tx).revertedWith('Only Controller governor') + }) + }) + + describe('curationTaxPercentage', function () { + it('should set `curationTaxPercentage`', async function () { + const curationTaxPercentage = defaults.curation.curationTaxPercentage + + // Set new value + await curation.connect(governor.signer).setCurationTaxPercentage(0) + await curation.connect(governor.signer).setCurationTaxPercentage(curationTaxPercentage) + }) + + it('reject set `curationTaxPercentage` if out of bounds', async function () { + const tx = curation.connect(governor.signer).setCurationTaxPercentage(MAX_PPM + 1) + await expect(tx).revertedWith('Curation tax percentage must be below or equal to MAX_PPM') + }) + + it('reject set `curationTaxPercentage` if not allowed', async function () { + const tx = curation.connect(me.signer).setCurationTaxPercentage(0) + await expect(tx).revertedWith('Only Controller governor') + }) + }) + + describe('curationTokenMaster', function () { + it('should set `curationTokenMaster`', async function () { + const newCurationTokenMaster = curation.address + await curation.connect(governor.signer).setCurationTokenMaster(newCurationTokenMaster) + }) + + it('reject set `curationTokenMaster` to empty value', async function () { + const newCurationTokenMaster = AddressZero + const tx = curation.connect(governor.signer).setCurationTokenMaster(newCurationTokenMaster) + await expect(tx).revertedWith('Token master must be non-empty') + }) + + it('reject set `curationTokenMaster` to non-contract', async function () { + const newCurationTokenMaster = randomAddress() + const tx = curation.connect(governor.signer).setCurationTokenMaster(newCurationTokenMaster) + await expect(tx).revertedWith('Token master must be a contract') + }) + + it('reject set `curationTokenMaster` if not allowed', async function () { + const newCurationTokenMaster = curation.address + const tx = curation.connect(me.signer).setCurationTokenMaster(newCurationTokenMaster) + await expect(tx).revertedWith('Only Controller governor') + }) + }) +}) + +describe('L2Curation', () => { + let me: Account + let governor: Account + let curator: Account + let stakingMock: Account + let gnsImpersonator: Signer + + let fixture: NetworkFixture + + let curation: L2Curation + let grt: GraphToken + let controller: Controller + let gns: GNS + + // Test values + const signalAmountFor1000Tokens = toGRT('10.0') + const subgraphDeploymentID = randomHexBytes() + const curatorTokens = toGRT('1000000000') + const tokensToDeposit = toGRT('1000') + const tokensToCollect = toGRT('2000') + + async function calcBondingCurve( + supply: BigNumber, + reserveBalance: BigNumber, + reserveRatio: number, + depositAmount: BigNumber, + ): Promise { + // Handle the initialization of the bonding curve + if (supply.eq(0)) { + const minDeposit = await curation.minimumCurationDeposit() + if (depositAmount.lt(minDeposit)) { + throw new Error('deposit must be above minimum') + } + const defaultReserveRatio = await curation.defaultReserveRatio() + const minSupply = toGRT('1') + return ( + (await calcBondingCurve( + minSupply, + minDeposit, + defaultReserveRatio, + depositAmount.sub(minDeposit), + )) + toFloat(minSupply) + ) + } + // Calculate bonding curve in the test + return ( + toFloat(supply) * + ((1 + toFloat(depositAmount) / toFloat(reserveBalance)) ** (reserveRatio / 1000000) - 1) + ) + } + + const shouldMint = async (tokensToDeposit: BigNumber, expectedSignal: BigNumber) => { + // Before state + const beforeTokenTotalSupply = await grt.totalSupply() + const beforeCuratorTokens = await grt.balanceOf(curator.address) + const beforeCuratorSignal = await curation.getCuratorSignal( + curator.address, + subgraphDeploymentID, + ) + const beforePool = await curation.pools(subgraphDeploymentID) + const beforePoolSignal = await curation.getCurationPoolSignal(subgraphDeploymentID) + const beforeTotalTokens = await grt.balanceOf(curation.address) + + // Calculations + const curationTaxPercentage = await curation.curationTaxPercentage() + const curationTax = tokensToDeposit.mul(toBN(curationTaxPercentage)).div(toBN(MAX_PPM)) + + // Curate + const tx = curation.connect(curator.signer).mint(subgraphDeploymentID, tokensToDeposit, 0) + await expect(tx) + .emit(curation, 'Signalled') + .withArgs(curator.address, subgraphDeploymentID, tokensToDeposit, expectedSignal, curationTax) + + // After state + const afterTokenTotalSupply = await grt.totalSupply() + const afterCuratorTokens = await grt.balanceOf(curator.address) + const afterCuratorSignal = await curation.getCuratorSignal( + curator.address, + subgraphDeploymentID, + ) + const afterPool = await curation.pools(subgraphDeploymentID) + const afterPoolSignal = await curation.getCurationPoolSignal(subgraphDeploymentID) + const afterTotalTokens = await grt.balanceOf(curation.address) + + // Curator balance updated + expect(afterCuratorTokens).eq(beforeCuratorTokens.sub(tokensToDeposit)) + expect(afterCuratorSignal).eq(beforeCuratorSignal.add(expectedSignal)) + // Allocated and balance updated + expect(afterPool.tokens).eq(beforePool.tokens.add(tokensToDeposit.sub(curationTax))) + expect(afterPoolSignal).eq(beforePoolSignal.add(expectedSignal)) + expect(afterPool.reserveRatio).eq(await curation.defaultReserveRatio()) + // Contract balance updated + expect(afterTotalTokens).eq(beforeTotalTokens.add(tokensToDeposit.sub(curationTax))) + // Total supply is reduced to curation tax burning + expect(afterTokenTotalSupply).eq(beforeTokenTotalSupply.sub(curationTax)) + } + + const shouldMintTaxFree = async (tokensToDeposit: BigNumber, expectedSignal: BigNumber) => { + // Before state + const beforeTokenTotalSupply = await grt.totalSupply() + const beforeCuratorTokens = await grt.balanceOf(gns.address) + const beforeCuratorSignal = await curation.getCuratorSignal(gns.address, subgraphDeploymentID) + const beforePool = await curation.pools(subgraphDeploymentID) + const beforePoolSignal = await curation.getCurationPoolSignal(subgraphDeploymentID) + const beforeTotalTokens = await grt.balanceOf(curation.address) + + // Curate + const tx = curation + .connect(gnsImpersonator) + .mintTaxFree(subgraphDeploymentID, tokensToDeposit, 0) + await expect(tx) + .emit(curation, 'Signalled') + .withArgs(gns.address, subgraphDeploymentID, tokensToDeposit, expectedSignal, 0) + + // After state + const afterTokenTotalSupply = await grt.totalSupply() + const afterCuratorTokens = await grt.balanceOf(gns.address) + const afterCuratorSignal = await curation.getCuratorSignal(gns.address, subgraphDeploymentID) + const afterPool = await curation.pools(subgraphDeploymentID) + const afterPoolSignal = await curation.getCurationPoolSignal(subgraphDeploymentID) + const afterTotalTokens = await grt.balanceOf(curation.address) + + // Curator balance updated + expect(afterCuratorTokens).eq(beforeCuratorTokens.sub(tokensToDeposit)) + expect(afterCuratorSignal).eq(beforeCuratorSignal.add(expectedSignal)) + // Allocated and balance updated + expect(afterPool.tokens).eq(beforePool.tokens.add(tokensToDeposit)) + expect(afterPoolSignal).eq(beforePoolSignal.add(expectedSignal)) + expect(afterPool.reserveRatio).eq(await curation.defaultReserveRatio()) + // Contract balance updated + expect(afterTotalTokens).eq(beforeTotalTokens.add(tokensToDeposit)) + // Total supply is reduced to curation tax burning + expect(afterTokenTotalSupply).eq(beforeTokenTotalSupply) + } + + const shouldBurn = async (signalToRedeem: BigNumber, expectedTokens: BigNumber) => { + // Before balances + const beforeTokenTotalSupply = await grt.totalSupply() + const beforeCuratorTokens = await grt.balanceOf(curator.address) + const beforeCuratorSignal = await curation.getCuratorSignal( + curator.address, + subgraphDeploymentID, + ) + const beforePool = await curation.pools(subgraphDeploymentID) + const beforePoolSignal = await curation.getCurationPoolSignal(subgraphDeploymentID) + const beforeTotalTokens = await grt.balanceOf(curation.address) + + // Redeem + const tx = curation.connect(curator.signer).burn(subgraphDeploymentID, signalToRedeem, 0) + await expect(tx) + .emit(curation, 'Burned') + .withArgs(curator.address, subgraphDeploymentID, expectedTokens, signalToRedeem) + + // After balances + const afterTokenTotalSupply = await grt.totalSupply() + const afterCuratorTokens = await grt.balanceOf(curator.address) + const afterCuratorSignal = await curation.getCuratorSignal( + curator.address, + subgraphDeploymentID, + ) + const afterPool = await curation.pools(subgraphDeploymentID) + const afterPoolSignal = await curation.getCurationPoolSignal(subgraphDeploymentID) + const afterTotalTokens = await grt.balanceOf(curation.address) + + // Curator balance updated + expect(afterCuratorTokens).eq(beforeCuratorTokens.add(expectedTokens)) + expect(afterCuratorSignal).eq(beforeCuratorSignal.sub(signalToRedeem)) + // Curation balance updated + expect(afterPool.tokens).eq(beforePool.tokens.sub(expectedTokens)) + expect(afterPoolSignal).eq(beforePoolSignal.sub(signalToRedeem)) + // Contract balance updated + expect(afterTotalTokens).eq(beforeTotalTokens.sub(expectedTokens)) + // Total supply is conserved + expect(afterTokenTotalSupply).eq(beforeTokenTotalSupply) + } + + const shouldCollect = async (tokensToCollect: BigNumber) => { + // Before state + const beforePool = await curation.pools(subgraphDeploymentID) + const beforeTotalBalance = await grt.balanceOf(curation.address) + + // Source of tokens must be the staking for this to work + await grt.connect(stakingMock.signer).transfer(curation.address, tokensToCollect) + const tx = curation.connect(stakingMock.signer).collect(subgraphDeploymentID, tokensToCollect) + await expect(tx).emit(curation, 'Collected').withArgs(subgraphDeploymentID, tokensToCollect) + + // After state + const afterPool = await curation.pools(subgraphDeploymentID) + const afterTotalBalance = await grt.balanceOf(curation.address) + + // State updated + expect(afterPool.tokens).eq(beforePool.tokens.add(tokensToCollect)) + expect(afterTotalBalance).eq(beforeTotalBalance.add(tokensToCollect)) + } + + before(async function () { + // Use stakingMock so we can call collect + ;[me, governor, curator, stakingMock] = await getAccounts() + + fixture = new NetworkFixture() + ;({ controller, curation, grt, gns } = await fixture.loadL2(governor.signer)) + + gnsImpersonator = await impersonateAccount(gns.address) + await setAccountBalance(gns.address, parseEther('1')) + // Give some funds to the curator and GNS impersonator and approve the curation contract + await grt.connect(governor.signer).mint(curator.address, curatorTokens) + await grt.connect(curator.signer).approve(curation.address, curatorTokens) + await grt.connect(governor.signer).mint(gns.address, curatorTokens) + await grt.connect(gnsImpersonator).approve(curation.address, curatorTokens) + + // Give some funds to the staking contract and approve the curation contract + await grt.connect(governor.signer).mint(stakingMock.address, tokensToCollect) + await grt.connect(stakingMock.signer).approve(curation.address, tokensToCollect) + }) + + beforeEach(async function () { + await fixture.setUp() + }) + + afterEach(async function () { + await fixture.tearDown() + }) + + describe('bonding curve', function () { + const tokensToDeposit = curatorTokens + + it('reject convert signal to tokens if subgraph deployment not initted', async function () { + const tx = curation.signalToTokens(subgraphDeploymentID, toGRT('100')) + await expect(tx).revertedWith('Subgraph deployment must be curated to perform calculations') + }) + + it('convert signal to tokens', async function () { + // Curate + await curation.connect(curator.signer).mint(subgraphDeploymentID, tokensToDeposit, 0) + + // Conversion + const signal = await curation.getCurationPoolSignal(subgraphDeploymentID) + const expectedTokens = await curation.signalToTokens(subgraphDeploymentID, signal) + expect(expectedTokens).eq(tokensToDeposit) + }) + + it('convert signal to tokens (with curation tax)', async function () { + // Set curation tax + const curationTaxPercentage = 50000 // 5% + await curation.connect(governor.signer).setCurationTaxPercentage(curationTaxPercentage) + + // Curate + const expectedCurationTax = tokensToDeposit.mul(curationTaxPercentage).div(MAX_PPM) + const { 1: curationTax } = await curation.tokensToSignal( + subgraphDeploymentID, + tokensToDeposit, + ) + await curation.connect(curator.signer).mint(subgraphDeploymentID, tokensToDeposit, 0) + + // Conversion + const signal = await curation.getCurationPoolSignal(subgraphDeploymentID) + const tokens = await curation.signalToTokens(subgraphDeploymentID, signal) + expect(tokens).eq(tokensToDeposit.sub(expectedCurationTax)) + expect(expectedCurationTax).eq(curationTax) + }) + + it('convert tokens to signal', async function () { + // Conversion + const tokens = toGRT('1000') + const { 0: signal } = await curation.tokensToSignal(subgraphDeploymentID, tokens) + expect(signal).eq(signalAmountFor1000Tokens) + }) + + it('convert tokens to signal if non-curated subgraph', async function () { + // Conversion + const nonCuratedSubgraphDeploymentID = randomHexBytes() + const tokens = toGRT('1') + const tx = curation.tokensToSignal(nonCuratedSubgraphDeploymentID, tokens) + await expect(tx).revertedWith('Curation deposit is below minimum required') + }) + }) + + describe('curate', async function () { + it('reject deposit below minimum tokens required', async function () { + const tokensToDeposit = (await curation.minimumCurationDeposit()).sub(toBN(1)) + const tx = curation.connect(curator.signer).mint(subgraphDeploymentID, tokensToDeposit, 0) + await expect(tx).revertedWith('Curation deposit is below minimum required') + }) + + it('should deposit on a subgraph deployment', async function () { + const tokensToDeposit = await curation.minimumCurationDeposit() + const expectedSignal = toGRT('1') + await shouldMint(tokensToDeposit, expectedSignal) + }) + + it('should get signal according to bonding curve', async function () { + const tokensToDeposit = toGRT('1000') + const expectedSignal = signalAmountFor1000Tokens + await shouldMint(tokensToDeposit, expectedSignal) + }) + + it('should get signal according to bonding curve (and account for curation tax)', async function () { + // Set curation tax + await curation.connect(governor.signer).setCurationTaxPercentage(50000) // 5% + + // Mint + const tokensToDeposit = toGRT('1000') + const { 0: expectedSignal } = await curation.tokensToSignal( + subgraphDeploymentID, + tokensToDeposit, + ) + await shouldMint(tokensToDeposit, expectedSignal) + }) + + it('should revert curate if over slippage', async function () { + const tokensToDeposit = toGRT('1000') + const expectedSignal = signalAmountFor1000Tokens + const tx = curation + .connect(curator.signer) + .mint(subgraphDeploymentID, tokensToDeposit, expectedSignal.add(1)) + await expect(tx).revertedWith('Slippage protection') + }) + }) + + describe('curate tax free (from GNS)', async function () { + it('can not be called by anyone other than GNS', async function () { + const tokensToDeposit = await curation.minimumCurationDeposit() + const tx = curation + .connect(curator.signer) + .mintTaxFree(subgraphDeploymentID, tokensToDeposit, 0) + await expect(tx).revertedWith('Only the GNS can call this') + }) + + it('reject deposit below minimum tokens required', async function () { + const tokensToDeposit = (await curation.minimumCurationDeposit()).sub(toBN(1)) + const tx = curation + .connect(gnsImpersonator) + .mintTaxFree(subgraphDeploymentID, tokensToDeposit, 0) + await expect(tx).revertedWith('Curation deposit is below minimum required') + }) + + it('should deposit on a subgraph deployment', async function () { + const tokensToDeposit = await curation.minimumCurationDeposit() + const expectedSignal = toGRT('1') + await shouldMintTaxFree(tokensToDeposit, expectedSignal) + }) + + it('should get signal according to bonding curve', async function () { + const tokensToDeposit = toGRT('1000') + const expectedSignal = signalAmountFor1000Tokens + await shouldMintTaxFree(tokensToDeposit, expectedSignal) + }) + + it('should get signal according to bonding curve (and with zero tax)', async function () { + // Set curation tax + await curation.connect(governor.signer).setCurationTaxPercentage(50000) // 5% + + // Mint + const tokensToDeposit = toGRT('1000') + const expectedSignal = await curation.tokensToSignalNoTax( + subgraphDeploymentID, + tokensToDeposit, + ) + await shouldMintTaxFree(tokensToDeposit, expectedSignal) + }) + + it('should revert curate if over slippage', async function () { + const tokensToDeposit = toGRT('1000') + const expectedSignal = signalAmountFor1000Tokens + const tx = curation + .connect(gnsImpersonator) + .mintTaxFree(subgraphDeploymentID, tokensToDeposit, expectedSignal.add(1)) + await expect(tx).revertedWith('Slippage protection') + }) + }) + + describe('collect', async function () { + context('> not curated', async function () { + it('reject collect tokens distributed to the curation pool', async function () { + // Source of tokens must be the staking for this to work + await controller + .connect(governor.signer) + .setContractProxy(utils.id('Staking'), stakingMock.address) + await curation.syncAllContracts() // call sync because we change the proxy for staking + + const tx = curation + .connect(stakingMock.signer) + .collect(subgraphDeploymentID, tokensToCollect) + await expect(tx).revertedWith('Subgraph deployment must be curated to collect fees') + }) + }) + + context('> curated', async function () { + beforeEach(async function () { + await curation.connect(curator.signer).mint(subgraphDeploymentID, toGRT('1000'), 0) + }) + + it('reject collect tokens distributed from invalid address', async function () { + const tx = curation.connect(me.signer).collect(subgraphDeploymentID, tokensToCollect) + await expect(tx).revertedWith('Caller must be the staking contract') + }) + + it('should collect tokens distributed to the curation pool', async function () { + await controller + .connect(governor.signer) + .setContractProxy(utils.id('Staking'), stakingMock.address) + await curation.syncAllContracts() // call sync because we change the proxy for staking + + await shouldCollect(toGRT('1')) + await shouldCollect(toGRT('10')) + await shouldCollect(toGRT('100')) + await shouldCollect(toGRT('200')) + await shouldCollect(toGRT('500.25')) + }) + + it('should collect tokens and then unsignal all', async function () { + await controller + .connect(governor.signer) + .setContractProxy(utils.id('Staking'), stakingMock.address) + await curation.syncAllContracts() // call sync because we change the proxy for staking + + // Collect increase the pool reserves + await shouldCollect(toGRT('100')) + + // When we burn signal we should get more tokens than initially curated + const signalToRedeem = await curation.getCuratorSignal( + curator.address, + subgraphDeploymentID, + ) + await shouldBurn(signalToRedeem, toGRT('1100')) + }) + + it('should collect tokens and then unsignal multiple times', async function () { + await controller + .connect(governor.signer) + .setContractProxy(utils.id('Staking'), stakingMock.address) + await curation.syncAllContracts() // call sync because we change the proxy for staking + + // Collect increase the pool reserves + const tokensToCollect = toGRT('100') + await shouldCollect(tokensToCollect) + + // Unsignal partially + const signalOutRemainder = toGRT(1) + const signalOutPartial = ( + await curation.getCuratorSignal(curator.address, subgraphDeploymentID) + ).sub(signalOutRemainder) + const tx1 = await curation + .connect(curator.signer) + .burn(subgraphDeploymentID, signalOutPartial, 0) + const r1 = await tx1.wait() + const event1 = curation.interface.parseLog(r1.events[2]).args + const tokensOut1 = event1.tokens + + // Collect increase the pool reserves + await shouldCollect(tokensToCollect) + + // Unsignal the rest + const tx2 = await curation + .connect(curator.signer) + .burn(subgraphDeploymentID, signalOutRemainder, 0) + const r2 = await tx2.wait() + const event2 = curation.interface.parseLog(r2.events[2]).args + const tokensOut2 = event2.tokens + + expect(tokensOut1.add(tokensOut2)).eq(toGRT('1000').add(tokensToCollect.mul(2))) + }) + }) + }) + + describe('burn', async function () { + beforeEach(async function () { + await curation.connect(curator.signer).mint(subgraphDeploymentID, tokensToDeposit, 0) + }) + + it('reject redeem more than a curator owns', async function () { + const tx = curation.connect(me.signer).burn(subgraphDeploymentID, toGRT('1'), 0) + await expect(tx).revertedWith('Cannot burn more signal than you own') + }) + + it('reject redeem zero signal', async function () { + const tx = curation.connect(me.signer).burn(subgraphDeploymentID, toGRT('0'), 0) + await expect(tx).revertedWith('Cannot burn zero signal') + }) + + it('should allow to redeem *partially*', async function () { + // Redeem just one signal + const signalToRedeem = toGRT('1') + const expectedTokens = toGRT('100') + await shouldBurn(signalToRedeem, expectedTokens) + }) + + it('should allow to redeem *fully*', async function () { + // Get all signal of the curator + const signalToRedeem = await curation.getCuratorSignal(curator.address, subgraphDeploymentID) + const expectedTokens = tokensToDeposit + await shouldBurn(signalToRedeem, expectedTokens) + }) + + it('should allow to redeem back below minimum deposit', async function () { + // Redeem "almost" all signal + const signal = await curation.getCuratorSignal(curator.address, subgraphDeploymentID) + const signalToRedeem = signal.sub(toGRT('0.000001')) + const expectedTokens = await curation.signalToTokens(subgraphDeploymentID, signalToRedeem) + await shouldBurn(signalToRedeem, expectedTokens) + + // The pool should have less tokens that required by minimumCurationDeposit + const afterPool = await curation.pools(subgraphDeploymentID) + expect(afterPool.tokens).lt(await curation.minimumCurationDeposit()) + + // Should be able to deposit more after being under minimumCurationDeposit + const tokensToDeposit = toGRT('1') + const { 0: expectedSignal } = await curation.tokensToSignal( + subgraphDeploymentID, + tokensToDeposit, + ) + await shouldMint(tokensToDeposit, expectedSignal) + }) + + it('should revert redeem if over slippage', async function () { + const signalToRedeem = await curation.getCuratorSignal(curator.address, subgraphDeploymentID) + const expectedTokens = tokensToDeposit + + const tx = curation + .connect(curator.signer) + .burn(subgraphDeploymentID, signalToRedeem, expectedTokens.add(1)) + await expect(tx).revertedWith('Slippage protection') + }) + + it('should not re-deploy the curation token when signal is reset', async function () { + const beforeSubgraphPool = await curation.pools(subgraphDeploymentID) + + // Burn all the signal + const signalToRedeem = await curation.getCuratorSignal(curator.address, subgraphDeploymentID) + const expectedTokens = tokensToDeposit + await shouldBurn(signalToRedeem, expectedTokens) + + // Mint again on the same subgraph + await curation.connect(curator.signer).mint(subgraphDeploymentID, tokensToDeposit, 0) + + // Check state + const afterSubgraphPool = await curation.pools(subgraphDeploymentID) + expect(afterSubgraphPool.gcs).eq(beforeSubgraphPool.gcs) + }) + }) + + describe('conservation', async function () { + it('should match multiple deposits and redeems back to initial state', async function () { + this.timeout(60000) // increase timeout for test runner + + const totalDeposits = toGRT('1000000000') + + // Signal multiple times + let totalSignal = toGRT('0') + for (const tokensToDeposit of chunkify(totalDeposits, 10)) { + const tx = await curation + .connect(curator.signer) + .mint(subgraphDeploymentID, tokensToDeposit, 0) + const receipt = await tx.wait() + const event: Event = receipt.events.pop() + const signal = event.args['signal'] + totalSignal = totalSignal.add(signal) + } + + // Redeem signal multiple times + let totalTokens = toGRT('0') + for (const signalToRedeem of chunkify(totalSignal, 10)) { + const tx = await curation + .connect(curator.signer) + .burn(subgraphDeploymentID, signalToRedeem, 0) + const receipt = await tx.wait() + const event: Event = receipt.events.pop() + const tokens = event.args['tokens'] + totalTokens = totalTokens.add(tokens) + // console.log('<', formatEther(signalToRedeem), '=', formatEther(tokens)) + } + + // Conservation of work + const afterPool = await curation.pools(subgraphDeploymentID) + const afterPoolSignal = await curation.getCurationPoolSignal(subgraphDeploymentID) + expect(afterPool.tokens).eq(toGRT('0')) + expect(afterPoolSignal).eq(toGRT('0')) + expect(await curation.isCurated(subgraphDeploymentID)).eq(false) + expect(totalDeposits).eq(totalTokens) + }) + }) + + describe('multiple minting', async function () { + it('should mint the same signal every time due to the linear bonding curve', async function () { + const tokensToDepositMany = [ + toGRT('1000'), // should mint if we start with number above minimum deposit + toGRT('1000'), // every time it should mint the same GCS due to bonding curve! + toGRT('1000'), + toGRT('1000'), + toGRT('2000'), + toGRT('2000'), + toGRT('123'), + toGRT('1'), // should mint below minimum deposit + ] + for (const tokensToDeposit of tokensToDepositMany) { + const expectedSignal = await calcBondingCurve( + await curation.getCurationPoolSignal(subgraphDeploymentID), + await curation.getCurationPoolTokens(subgraphDeploymentID), + await curation.defaultReserveRatio(), + tokensToDeposit, + ) + // SIGNAL_PER_MINIMUM_DEPOSIT should always give the same ratio + expect(tokensToDeposit.div(toGRT(expectedSignal))).eq(100) + + const tx = await curation + .connect(curator.signer) + .mint(subgraphDeploymentID, tokensToDeposit, 0) + const receipt = await tx.wait() + const event: Event = receipt.events.pop() + const signal = event.args['signal'] + expect(toRound(expectedSignal)).eq(toRound(toFloat(signal))) + } + }) + + it('should mint when using the edge case of a 1:1 linear function', async function () { + this.timeout(60000) // increase timeout for test runner + + // Setup edge case like linear function: 1 GRT = 1 GCS + await curation.setMinimumCurationDeposit(toGRT('1')) + + const tokensToDepositMany = [ + toGRT('1000'), // should mint if we start with number above minimum deposit + toGRT('1000'), // every time it should mint less GCS due to bonding curve... + toGRT('1000'), + toGRT('1000'), + toGRT('2000'), + toGRT('2000'), + toGRT('123'), + toGRT('1'), // should mint below minimum deposit + ] + + // Mint multiple times + for (const tokensToDeposit of tokensToDepositMany) { + const tx = await curation + .connect(curator.signer) + .mint(subgraphDeploymentID, tokensToDeposit, 0) + const receipt = await tx.wait() + const event: Event = receipt.events.pop() + const signal = event.args['signal'] + expect(tokensToDeposit).eq(signal) // we compare 1:1 ratio + } + }) + }) +}) diff --git a/test/l2/l2GNS.test.ts b/test/l2/l2GNS.test.ts index 641fbf14c..aaff73658 100644 --- a/test/l2/l2GNS.test.ts +++ b/test/l2/l2GNS.test.ts @@ -24,7 +24,7 @@ import { publishNewSubgraph, PublishSubgraph, } from '../lib/gnsUtils' -import { Curation } from '../../build/types/Curation' +import { L2Curation } from '../../build/types/L2Curation' import { GraphToken } from '../../build/types/GraphToken' import { encodeMPTStorageProofRLP, getBlockHeaderRLP } from '../lib/mptProofUtils' @@ -203,7 +203,7 @@ describe('L2GNS', () => { let fixtureContracts: L2FixtureContracts let l2GraphTokenGateway: L2GraphTokenGateway let gns: L2GNS - let curation: Curation + let curation: L2Curation let grt: GraphToken let newSubgraph0: PublishSubgraph @@ -445,7 +445,7 @@ describe('L2GNS', () => { [l1SubgraphId, me.address, lockBlockhash, nSignal, DEFAULT_RESERVE_RATIO], ) await gatewayFinalizeTransfer(mockL1GNS.address, gns.address, curatedTokens, callhookData) - // Calculate expected signal before minting, which changes the price + // Calculate expected signal before minting const expectedSignal = await curation.tokensToSignalNoTax( newSubgraph0.subgraphDeploymentID, curatedTokens, @@ -532,7 +532,7 @@ describe('L2GNS', () => { ) await expect(tx).revertedWith('INVALID_SUBGRAPH') }) - it('rejects calls to a pre-curated subgraph deployment', async function () { + it('accepts calls to a pre-curated subgraph deployment', async function () { const { l1SubgraphId, curatedTokens, @@ -547,10 +547,19 @@ describe('L2GNS', () => { ) await gatewayFinalizeTransfer(mockL1GNS.address, gns.address, curatedTokens, callhookData) + // Calculate expected signal before minting + const expectedSignal = await curation.tokensToSignalNoTax( + newSubgraph0.subgraphDeploymentID, + curatedTokens, + ) await grt.connect(me.signer).approve(curation.address, toGRT('100')) await curation .connect(me.signer) .mint(newSubgraph0.subgraphDeploymentID, toGRT('100'), toBN('0')) + + expect(await curation.getCurationPoolTokens(newSubgraph0.subgraphDeploymentID)).eq( + toGRT('100'), + ) const tx = gns .connect(me.signer) .finishSubgraphMigrationFromL1( @@ -559,7 +568,28 @@ describe('L2GNS', () => { subgraphMetadata, versionMetadata, ) - await expect(tx).revertedWith('GNS: Deployment pre-curated') + await expect(tx) + .emit(gns, 'SubgraphPublished') + .withArgs(l1SubgraphId, newSubgraph0.subgraphDeploymentID, DEFAULT_RESERVE_RATIO) + await expect(tx).emit(gns, 'SubgraphMetadataUpdated').withArgs(l1SubgraphId, subgraphMetadata) + await expect(tx) + .emit(gns, 'SubgraphUpgraded') + .withArgs(l1SubgraphId, expectedSignal, curatedTokens, newSubgraph0.subgraphDeploymentID) + await expect(tx) + .emit(gns, 'SubgraphVersionUpdated') + .withArgs(l1SubgraphId, newSubgraph0.subgraphDeploymentID, versionMetadata) + await expect(tx).emit(gns, 'SubgraphMigrationFinalized').withArgs(l1SubgraphId) + + const subgraphAfter = await gns.subgraphs(l1SubgraphId) + const migrationDataAfter = await gns.subgraphL2MigrationData(l1SubgraphId) + expect(subgraphAfter.vSignal).eq(expectedSignal) + expect(migrationDataAfter.l2Done).eq(true) + expect(migrationDataAfter.deprecated).eq(false) + expect(subgraphAfter.disabled).eq(false) + expect(subgraphAfter.subgraphDeploymentID).eq(newSubgraph0.subgraphDeploymentID) + expect(await curation.getCurationPoolTokens(newSubgraph0.subgraphDeploymentID)).eq( + toGRT('100').add(curatedTokens), + ) }) it('rejects calls if the subgraph deployment ID is zero', async function () { const l1SubgraphId = await buildSubgraphID(me.address, toBN('1'), 1) diff --git a/test/lib/deployment.ts b/test/lib/deployment.ts index 6b667a12f..1b469458b 100644 --- a/test/lib/deployment.ts +++ b/test/lib/deployment.ts @@ -9,6 +9,7 @@ import { BancorFormula } from '../../build/types/BancorFormula' import { Controller } from '../../build/types/Controller' import { GraphProxyAdmin } from '../../build/types/GraphProxyAdmin' import { Curation } from '../../build/types/Curation' +import { L2Curation } from '../../build/types/L2Curation' import { DisputeManager } from '../../build/types/DisputeManager' import { EpochManager } from '../../build/types/EpochManager' import { GNS } from '../../build/types/GNS' @@ -122,6 +123,30 @@ export async function deployCuration( ) as unknown as Curation } +export async function deployL2Curation( + deployer: Signer, + controller: string, + proxyAdmin: GraphProxyAdmin, +): Promise { + // Dependency + const bondingCurve = (await deployContract('BancorFormula', deployer)) as unknown as BancorFormula + const curationTokenMaster = await deployContract('GraphCurationToken', deployer) + + // Deploy + return network.deployContractWithProxy( + proxyAdmin, + 'L2Curation', + [ + controller, + bondingCurve.address, + curationTokenMaster.address, + defaults.curation.curationTaxPercentage, + defaults.curation.minimumCurationDeposit, + ], + deployer, + ) as unknown as L2Curation +} + export async function deployDisputeManager( deployer: Signer, controller: string, diff --git a/test/lib/fixtures.ts b/test/lib/fixtures.ts index 69d9ec896..505cf973d 100644 --- a/test/lib/fixtures.ts +++ b/test/lib/fixtures.ts @@ -12,6 +12,7 @@ import { DisputeManager } from '../../build/types/DisputeManager' import { EpochManager } from '../../build/types/EpochManager' import { GraphToken } from '../../build/types/GraphToken' import { Curation } from '../../build/types/Curation' +import { L2Curation } from '../../build/types/L2Curation' import { L1GNS } from '../../build/types/L1GNS' import { L2GNS } from '../../build/types/L2GNS' import { Staking } from '../../build/types/Staking' @@ -43,7 +44,7 @@ export interface L2FixtureContracts { disputeManager: DisputeManager epochManager: EpochManager grt: L2GraphToken - curation: Curation + curation: L2Curation gns: L2GNS staking: Staking rewardsManager: RewardsManager @@ -92,7 +93,12 @@ export class NetworkFixture { grt = await deployment.deployGRT(deployer) } - const curation = await deployment.deployCuration(deployer, controller.address, proxyAdmin) + let curation: Curation | L2Curation + if (isL2) { + curation = await deployment.deployL2Curation(deployer, controller.address, proxyAdmin) + } else { + curation = await deployment.deployCuration(deployer, controller.address, proxyAdmin) + } let gns: L1GNS | L2GNS if (isL2) { gns = await deployment.deployL2GNS(deployer, controller.address, proxyAdmin) diff --git a/test/lib/mptProofUtils.ts b/test/lib/mptProofUtils.ts index db1fd3b9f..0de2a374c 100644 --- a/test/lib/mptProofUtils.ts +++ b/test/lib/mptProofUtils.ts @@ -74,10 +74,6 @@ export const encodeMPTStorageProofRLP = (proof: GetProofResponse): string => { throw new Error('Expected exactly one storage slot proof') } const accountProof = proof.accountProof.map((node) => RLP.decode(hexlify(node))) - console.log('Account proof:') - console.log(accountProof) const storageProof = proof.storageProof[0].proof.map((node) => RLP.decode(hexlify(node))) - console.log('Storage proof:') - console.log(storageProof) return RLP.encode([accountProof, storageProof]) } From dd38b013c14ff48546282ab148af00933e405aa9 Mon Sep 17 00:00:00 2001 From: Pablo Carranza Velez Date: Thu, 10 Nov 2022 12:23:25 -0300 Subject: [PATCH 044/112] test(e2e): separate L1/L2 curation tests --- .../config/{ => l1}/curation.test.ts | 12 +++-- e2e/deployment/config/l2/l2Curation.test.ts | 49 +++++++++++++++++++ 2 files changed, 58 insertions(+), 3 deletions(-) rename e2e/deployment/config/{ => l1}/curation.test.ts (85%) create mode 100644 e2e/deployment/config/l2/l2Curation.test.ts diff --git a/e2e/deployment/config/curation.test.ts b/e2e/deployment/config/l1/curation.test.ts similarity index 85% rename from e2e/deployment/config/curation.test.ts rename to e2e/deployment/config/l1/curation.test.ts index 6d6a4d1ea..9d107fb5e 100644 --- a/e2e/deployment/config/curation.test.ts +++ b/e2e/deployment/config/l1/curation.test.ts @@ -1,12 +1,18 @@ import { expect } from 'chai' import hre from 'hardhat' -import { getItemValue } from '../../../cli/config' +import { getItemValue } from '../../../../cli/config' +import GraphChain from '../../../../gre/helpers/network' -describe('Curation configuration', () => { +describe('[L1] Curation configuration', () => { + const graph = hre.graph() const { graphConfig, contracts: { Controller, Curation, BancorFormula, GraphCurationToken }, - } = hre.graph() + } = graph + + before(async function () { + if (GraphChain.isL2(graph.chainId)) this.skip() + }) it('should be controlled by Controller', async function () { const controller = await Curation.controller() diff --git a/e2e/deployment/config/l2/l2Curation.test.ts b/e2e/deployment/config/l2/l2Curation.test.ts new file mode 100644 index 000000000..ab20454e4 --- /dev/null +++ b/e2e/deployment/config/l2/l2Curation.test.ts @@ -0,0 +1,49 @@ +import { expect } from 'chai' +import hre from 'hardhat' +import { getItemValue } from '../../../../cli/config' +import GraphChain from '../../../../gre/helpers/network' + +describe('[L2] L2Curation configuration', () => { + const graph = hre.graph() + const { + graphConfig, + contracts: { Controller, L2Curation, BancorFormula, GraphCurationToken }, + } = graph + + before(async function () { + if (GraphChain.isL1(graph.chainId)) this.skip() + }) + + it('should be controlled by Controller', async function () { + const controller = await L2Curation.controller() + expect(controller).eq(Controller.address) + }) + + it('bondingCurve should match the BancorFormula deployment address', async function () { + const bondingCurve = await L2Curation.bondingCurve() + expect(bondingCurve).eq(BancorFormula.address) + }) + + it('curationTokenMaster should match the GraphCurationToken deployment address', async function () { + const bondingCurve = await L2Curation.curationTokenMaster() + expect(bondingCurve).eq(GraphCurationToken.address) + }) + + it('defaultReserveRatio should be a constant 1000000', async function () { + const value = await L2Curation.defaultReserveRatio() + const expected = 1000000 + expect(value).eq(expected) + }) + + it('curationTaxPercentage should match "curationTaxPercentage" in the config file', async function () { + const value = await L2Curation.curationTaxPercentage() + const expected = getItemValue(graphConfig, 'contracts/L2Curation/init/curationTaxPercentage') + expect(value).eq(expected) + }) + + it('minimumCurationDeposit should match "minimumCurationDeposit" in the config file', async function () { + const value = await L2Curation.minimumCurationDeposit() + const expected = getItemValue(graphConfig, 'contracts/L2Curation/init/minimumCurationDeposit') + expect(value).eq(expected) + }) +}) From 90e41e7c5d96fd4d96bf9f1f3a2aece8ef3ab926 Mon Sep 17 00:00:00 2001 From: Pablo Carranza Velez Date: Thu, 10 Nov 2022 12:55:11 -0300 Subject: [PATCH 045/112] fix: remove bonding curve from GNS --- contracts/discovery/GNS.sol | 34 ++++++------------------------ contracts/discovery/GNSStorage.sol | 4 ++-- e2e/deployment/config/gns.test.ts | 7 +----- test/gns.test.ts | 9 ++------ test/lib/deployment.ts | 3 +-- 5 files changed, 13 insertions(+), 44 deletions(-) diff --git a/contracts/discovery/GNS.sol b/contracts/discovery/GNS.sol index 207284cfe..422f9db75 100644 --- a/contracts/discovery/GNS.sol +++ b/contracts/discovery/GNS.sol @@ -34,15 +34,14 @@ contract GNS is GNSV3Storage, GraphUpgradeable, IGNS, Multicall { // 100% in parts per million uint32 private constant MAX_PPM = 1000000; - // Equates to Connector weight on bancor formula to be CW = 1 - uint32 private constant defaultReserveRatio = 1000000; - // Storage slot where the subgraphs mapping is stored on L1GNS uint256 internal constant SUBGRAPH_MAPPING_SLOT = 18; // Storage slot where the legacy subgraphs mapping is stored on L1GNS uint256 internal constant LEGACY_SUBGRAPH_MAPPING_SLOT = 15; + // Equates to Connector weight on bancor formula to be CW = 1 + uint32 internal immutable FIXED_RESERVE_RATIO = MAX_PPM; // -- Events -- event SubgraphNFTUpdated(address subgraphNFT); @@ -164,16 +163,9 @@ contract GNS is GNSV3Storage, GraphUpgradeable, IGNS, Multicall { /** * @dev Initialize this contract. */ - function initialize( - address _controller, - address _bondingCurve, - address _subgraphNFT - ) external onlyImpl { + function initialize(address _controller, address _subgraphNFT) external onlyImpl { Managed._initialize(_controller); - // Dependencies - bondingCurve = _bondingCurve; - // Settings _setOwnerTaxPercentage(500000); _setSubgraphNFT(_subgraphNFT); @@ -291,12 +283,12 @@ contract GNS is GNSV3Storage, GraphUpgradeable, IGNS, Multicall { uint256 subgraphID = _nextSubgraphID(subgraphOwner); SubgraphData storage subgraphData = _getSubgraphData(subgraphID); subgraphData.subgraphDeploymentID = _subgraphDeploymentID; - subgraphData.reserveRatio = defaultReserveRatio; + subgraphData.reserveRatio = FIXED_RESERVE_RATIO; // Mint the NFT. Use the subgraphID as tokenID. // This function will check the if tokenID already exists. _mintNFT(subgraphOwner, subgraphID); - emit SubgraphPublished(subgraphID, _subgraphDeploymentID, defaultReserveRatio); + emit SubgraphPublished(subgraphID, _subgraphDeploymentID, FIXED_RESERVE_RATIO); // Set the token metadata _setSubgraphMetadata(subgraphID, _subgraphMetadata); @@ -652,13 +644,7 @@ contract GNS is GNSV3Storage, GraphUpgradeable, IGNS, Multicall { return _vSignalIn; } - return - BancorFormula(bondingCurve).calculatePurchaseReturn( - subgraphData.nSignal, - subgraphData.vSignal, - subgraphData.reserveRatio, - _vSignalIn - ); + return subgraphData.nSignal.mul(_vSignalIn).div(subgraphData.vSignal); } /** @@ -674,13 +660,7 @@ contract GNS is GNSV3Storage, GraphUpgradeable, IGNS, Multicall { returns (uint256) { SubgraphData storage subgraphData = _getSubgraphData(_subgraphID); - return - BancorFormula(bondingCurve).calculateSaleReturn( - subgraphData.nSignal, - subgraphData.vSignal, - subgraphData.reserveRatio, - _nSignalIn - ); + return subgraphData.vSignal.mul(_nSignalIn).div(subgraphData.nSignal); } /** diff --git a/contracts/discovery/GNSStorage.sol b/contracts/discovery/GNSStorage.sol index be7a6f0c7..46960e313 100644 --- a/contracts/discovery/GNSStorage.sol +++ b/contracts/discovery/GNSStorage.sol @@ -19,8 +19,8 @@ abstract contract GNSV1Storage is Managed { /// Percentage of curation tax that must be paid by the owner, in parts per hundred. uint32 public ownerTaxPercentage; - /// Bonding curve formula. - address public bondingCurve; + /// [DEPRECATED] Bonding curve formula. + address public __DEPRECATED_bondingCurve; /// @dev Stores what subgraph deployment a particular legacy subgraph targets. /// A subgraph is defined by (graphAccountID, subgraphNumber). diff --git a/e2e/deployment/config/gns.test.ts b/e2e/deployment/config/gns.test.ts index 4337dffd0..08408bcdd 100644 --- a/e2e/deployment/config/gns.test.ts +++ b/e2e/deployment/config/gns.test.ts @@ -3,7 +3,7 @@ import hre from 'hardhat' describe('GNS configuration', () => { const { - contracts: { Controller, GNS, BancorFormula, SubgraphNFT }, + contracts: { Controller, GNS, SubgraphNFT }, } = hre.graph() it('should be controlled by Controller', async function () { @@ -11,11 +11,6 @@ describe('GNS configuration', () => { expect(controller).eq(Controller.address) }) - it('bondingCurve should match the BancorFormula deployment address', async function () { - const bondingCurve = await GNS.bondingCurve() - expect(bondingCurve).eq(BancorFormula.address) - }) - it('subgraphNFT should match the SubgraphNFT deployment address', async function () { const subgraphNFT = await GNS.subgraphNFT() expect(subgraphNFT).eq(SubgraphNFT.address) diff --git a/test/gns.test.ts b/test/gns.test.ts index f50c52c19..c07fd5b34 100644 --- a/test/gns.test.ts +++ b/test/gns.test.ts @@ -21,7 +21,6 @@ import { ArbitrumL1Mocks, NetworkFixture } from './lib/fixtures' import { toBN, formatGRT } from './lib/testHelpers' import { getContractAt } from '../cli/network' import { deployContract } from './lib/deployment' -import { BancorFormula } from '../build/types/BancorFormula' import { network } from '../cli' import { Controller } from '../build/types/Controller' import { GraphProxyAdmin } from '../build/types/GraphProxyAdmin' @@ -309,10 +308,6 @@ describe('L1GNS', () => { } const deployLegacyGNSMock = async (): Promise => { - const bondingCurve = (await deployContract( - 'BancorFormula', - governor.signer, - )) as unknown as BancorFormula const subgraphDescriptor = await deployContract('SubgraphNFTDescriptor', governor.signer) const subgraphNFT = (await deployContract( 'SubgraphNFT', @@ -324,7 +319,7 @@ describe('L1GNS', () => { legacyGNSMock = (await network.deployContractWithProxy( proxyAdmin, 'LegacyGNSMock', - [controller.address, bondingCurve.address, subgraphNFT.address], + [controller.address, subgraphNFT.address], governor.signer, )) as unknown as LegacyGNSMock @@ -924,7 +919,7 @@ describe('L1GNS', () => { it('should revert if batching a call to initialize', async function () { // Call a forbidden function - const tx1 = await gns.populateTransaction.initialize(me.address, me.address, me.address) + const tx1 = await gns.populateTransaction.initialize(me.address, me.address) // Create a subgraph const tx2 = await gns.populateTransaction.publishNewSubgraph( diff --git a/test/lib/deployment.ts b/test/lib/deployment.ts index 1b469458b..da5d50db8 100644 --- a/test/lib/deployment.ts +++ b/test/lib/deployment.ts @@ -189,7 +189,6 @@ async function deployL1OrL2GNS( isL2: boolean, ): Promise { // Dependency - const bondingCurve = (await deployContract('BancorFormula', deployer)) as unknown as BancorFormula const subgraphDescriptor = await deployContract('SubgraphNFTDescriptor', deployer) const subgraphNFT = (await deployContract( 'SubgraphNFT', @@ -207,7 +206,7 @@ async function deployL1OrL2GNS( const proxy = (await network.deployContractWithProxy( proxyAdmin, name, - [controller, bondingCurve.address, subgraphNFT.address], + [controller, subgraphNFT.address], deployer, )) as unknown as GNS From eeee6e4ef5726416578fbaf86f3ebc15bf6eedf6 Mon Sep 17 00:00:00 2001 From: Pablo Carranza Velez Date: Thu, 10 Nov 2022 14:50:34 -0300 Subject: [PATCH 046/112] fix: remove bancor formula from L2 curation --- cli/commands/migrate.ts | 1 - config/graph.arbitrum-goerli.yml | 2 - config/graph.arbitrum-localhost.yml | 2 - config/graph.arbitrum-one.yml | 2 - config/graph.goerli.yml | 1 - config/graph.localhost.yml | 1 - config/graph.mainnet.yml | 1 - contracts/curation/Curation.sol | 30 ++++++--- contracts/curation/CurationStorage.sol | 7 +- contracts/l2/curation/L2Curation.sol | 73 ++++++++++----------- e2e/deployment/config/l2/l2Curation.test.ts | 9 +-- tasks/e2e/e2e.ts | 12 +--- test/lib/deployment.ts | 2 - 13 files changed, 65 insertions(+), 78 deletions(-) diff --git a/cli/commands/migrate.ts b/cli/commands/migrate.ts index 18c66615c..4c9c03768 100644 --- a/cli/commands/migrate.ts +++ b/cli/commands/migrate.ts @@ -40,7 +40,6 @@ let allContracts = [ const l2Contracts = [ 'GraphProxyAdmin', - 'BancorFormula', 'Controller', 'EpochManager', 'L2GraphToken', diff --git a/config/graph.arbitrum-goerli.yml b/config/graph.arbitrum-goerli.yml index 7d313206d..dc5c7022c 100644 --- a/config/graph.arbitrum-goerli.yml +++ b/config/graph.arbitrum-goerli.yml @@ -64,7 +64,6 @@ contracts: proxy: true init: controller: "${{Controller.address}}" - bondingCurve: "${{BancorFormula.address}}" curationTokenMaster: "${{GraphCurationToken.address}}" curationTaxPercentage: 10000 # in parts per million minimumCurationDeposit: "1000000000000000000" # in wei @@ -85,7 +84,6 @@ contracts: proxy: true init: controller: "${{Controller.address}}" - bondingCurve: "${{BancorFormula.address}}" subgraphNFT: "${{SubgraphNFT.address}}" calls: - fn: "approveAll" diff --git a/config/graph.arbitrum-localhost.yml b/config/graph.arbitrum-localhost.yml index 6161bd13e..a5674225b 100644 --- a/config/graph.arbitrum-localhost.yml +++ b/config/graph.arbitrum-localhost.yml @@ -64,7 +64,6 @@ contracts: proxy: true init: controller: "${{Controller.address}}" - bondingCurve: "${{BancorFormula.address}}" curationTokenMaster: "${{GraphCurationToken.address}}" curationTaxPercentage: 10000 # in parts per million minimumCurationDeposit: "1000000000000000000" # in wei @@ -85,7 +84,6 @@ contracts: proxy: true init: controller: "${{Controller.address}}" - bondingCurve: "${{BancorFormula.address}}" subgraphNFT: "${{SubgraphNFT.address}}" calls: - fn: "approveAll" diff --git a/config/graph.arbitrum-one.yml b/config/graph.arbitrum-one.yml index d3f41863f..9277bb051 100644 --- a/config/graph.arbitrum-one.yml +++ b/config/graph.arbitrum-one.yml @@ -64,7 +64,6 @@ contracts: proxy: true init: controller: "${{Controller.address}}" - bondingCurve: "${{BancorFormula.address}}" curationTokenMaster: "${{GraphCurationToken.address}}" curationTaxPercentage: 10000 # in parts per million minimumCurationDeposit: "1000000000000000000" # in wei @@ -85,7 +84,6 @@ contracts: proxy: true init: controller: "${{Controller.address}}" - bondingCurve: "${{BancorFormula.address}}" subgraphNFT: "${{SubgraphNFT.address}}" calls: - fn: "approveAll" diff --git a/config/graph.goerli.yml b/config/graph.goerli.yml index a0d4353f3..223fda79a 100644 --- a/config/graph.goerli.yml +++ b/config/graph.goerli.yml @@ -87,7 +87,6 @@ contracts: proxy: true init: controller: "${{Controller.address}}" - bondingCurve: "${{BancorFormula.address}}" subgraphNFT: "${{SubgraphNFT.address}}" calls: - fn: "approveAll" diff --git a/config/graph.localhost.yml b/config/graph.localhost.yml index 1e82db61c..23643d2df 100644 --- a/config/graph.localhost.yml +++ b/config/graph.localhost.yml @@ -87,7 +87,6 @@ contracts: proxy: true init: controller: "${{Controller.address}}" - bondingCurve: "${{BancorFormula.address}}" subgraphNFT: "${{SubgraphNFT.address}}" calls: - fn: "approveAll" diff --git a/config/graph.mainnet.yml b/config/graph.mainnet.yml index 36b1f49c3..e90f502c0 100644 --- a/config/graph.mainnet.yml +++ b/config/graph.mainnet.yml @@ -87,7 +87,6 @@ contracts: proxy: true init: controller: "${{Controller.address}}" - bondingCurve: "${{BancorFormula.address}}" subgraphNFT: "${{SubgraphNFT.address}}" calls: - fn: "approveAll" diff --git a/contracts/curation/Curation.sol b/contracts/curation/Curation.sol index a8ffd9890..7922b5356 100644 --- a/contracts/curation/Curation.sol +++ b/contracts/curation/Curation.sol @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-or-later pragma solidity ^0.7.6; +pragma abicoder v2; import { Address } from "@openzeppelin/contracts/utils/Address.sol"; import { SafeMath } from "@openzeppelin/contracts/math/SafeMath.sol"; @@ -209,7 +210,7 @@ contract Curation is CurationV1Storage, GraphUpgradeable { ); // Collect new funds into reserve - CurationPool storage curationPool = pools[_subgraphDeploymentID]; + CurationPool storage curationPool = _pools[_subgraphDeploymentID]; curationPool.tokens = curationPool.tokens.add(_tokens); emit Collected(_subgraphDeploymentID, _tokens); @@ -237,7 +238,7 @@ contract Curation is CurationV1Storage, GraphUpgradeable { require(signalOut >= _signalOutMin, "Slippage protection"); address curator = msg.sender; - CurationPool storage curationPool = pools[_subgraphDeploymentID]; + CurationPool storage curationPool = _pools[_subgraphDeploymentID]; // If it hasn't been curated before then initialize the curve if (!isCurated(_subgraphDeploymentID)) { @@ -304,7 +305,7 @@ contract Curation is CurationV1Storage, GraphUpgradeable { _updateRewards(_subgraphDeploymentID); // Update curation pool - CurationPool storage curationPool = pools[_subgraphDeploymentID]; + CurationPool storage curationPool = _pools[_subgraphDeploymentID]; curationPool.tokens = curationPool.tokens.sub(tokensOut); curationPool.gcs.burnFrom(curator, _signalIn); @@ -329,7 +330,7 @@ contract Curation is CurationV1Storage, GraphUpgradeable { * @return True if curated */ function isCurated(bytes32 _subgraphDeploymentID) public view override returns (bool) { - return pools[_subgraphDeploymentID].tokens > 0; + return _pools[_subgraphDeploymentID].tokens > 0; } /** @@ -344,7 +345,7 @@ contract Curation is CurationV1Storage, GraphUpgradeable { override returns (uint256) { - IGraphCurationToken gcs = pools[_subgraphDeploymentID].gcs; + IGraphCurationToken gcs = _pools[_subgraphDeploymentID].gcs; return (address(gcs) == address(0)) ? 0 : gcs.balanceOf(_curator); } @@ -359,7 +360,7 @@ contract Curation is CurationV1Storage, GraphUpgradeable { override returns (uint256) { - IGraphCurationToken gcs = pools[_subgraphDeploymentID].gcs; + IGraphCurationToken gcs = _pools[_subgraphDeploymentID].gcs; return (address(gcs) == address(0)) ? 0 : gcs.totalSupply(); } @@ -374,7 +375,7 @@ contract Curation is CurationV1Storage, GraphUpgradeable { override returns (uint256) { - return pools[_subgraphDeploymentID].tokens; + return _pools[_subgraphDeploymentID].tokens; } /** @@ -395,6 +396,17 @@ contract Curation is CurationV1Storage, GraphUpgradeable { return (signalOut, curationTax); } + /** + * @notice Get a curation pool for a subgraph deployment + * @dev We add this when making the pools variable internal, to keep + * backwards compatibility. + * @param _subgraphDeploymentID Subgraph deployment for which to get the curation pool + * @return Curation pool for the subgraph deployment + */ + function pools(bytes32 _subgraphDeploymentID) external view returns (CurationPool memory) { + return _pools[_subgraphDeploymentID]; + } + /** * @dev Calculate amount of signal that can be bought with tokens in a curation pool. * @param _subgraphDeploymentID Subgraph deployment to mint signal @@ -407,7 +419,7 @@ contract Curation is CurationV1Storage, GraphUpgradeable { returns (uint256) { // Get curation pool tokens and signal - CurationPool memory curationPool = pools[_subgraphDeploymentID]; + CurationPool memory curationPool = _pools[_subgraphDeploymentID]; // Init curation pool if (curationPool.tokens == 0) { @@ -447,7 +459,7 @@ contract Curation is CurationV1Storage, GraphUpgradeable { override returns (uint256) { - CurationPool memory curationPool = pools[_subgraphDeploymentID]; + CurationPool memory curationPool = _pools[_subgraphDeploymentID]; uint256 curationPoolSignal = getCurationPoolSignal(_subgraphDeploymentID); require( curationPool.tokens > 0, diff --git a/contracts/curation/CurationStorage.sol b/contracts/curation/CurationStorage.sol index dab28771a..9821d37ed 100644 --- a/contracts/curation/CurationStorage.sol +++ b/contracts/curation/CurationStorage.sol @@ -39,9 +39,10 @@ abstract contract CurationV1Storage is Managed, ICuration { uint256 public minimumCurationDeposit; /// Bonding curve library + /// Unused in L2. address public bondingCurve; - /// Mapping of subgraphDeploymentID => CurationPool - /// @dev There is only one CurationPool per SubgraphDeploymentID - mapping(bytes32 => CurationPool) public pools; + /// @dev Mapping of subgraphDeploymentID => CurationPool + /// There is only one CurationPool per SubgraphDeploymentID + mapping(bytes32 => CurationPool) internal _pools; } diff --git a/contracts/l2/curation/L2Curation.sol b/contracts/l2/curation/L2Curation.sol index 86b349650..8928a5746 100644 --- a/contracts/l2/curation/L2Curation.sol +++ b/contracts/l2/curation/L2Curation.sol @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-or-later pragma solidity ^0.7.6; +pragma abicoder v2; import { Address } from "@openzeppelin/contracts/utils/Address.sol"; import { SafeMath } from "@openzeppelin/contracts/math/SafeMath.sol"; @@ -25,7 +26,7 @@ import { IL2Curation } from "./IL2Curation.sol"; * subgraph deployment they curate. * A curators deposit goes to a curation pool along with the deposits of other curators, * only one such pool exists for each subgraph deployment. - * The contract mints Graph Curation Shares (GCS) according to a bonding curve for each individual + * The contract mints Graph Curation Shares (GCS) according to a (linear) bonding curve for each individual * curation pool where GRT is deposited. * Holders can burn GCS using this contract to get GRT tokens back according to the * bonding curve. @@ -84,16 +85,12 @@ contract L2Curation is CurationV1Storage, GraphUpgradeable, IL2Curation { */ function initialize( address _controller, - address _bondingCurve, address _curationTokenMaster, uint32 _curationTaxPercentage, uint256 _minimumCurationDeposit ) external onlyImpl { Managed._initialize(_controller); - require(_bondingCurve != address(0), "Bonding curve must be set"); - bondingCurve = _bondingCurve; - // For backwards compatibility: defaultReserveRatio = FIXED_RESERVE_RATIO; emit ParameterUpdated("defaultReserveRatio"); @@ -194,7 +191,7 @@ contract L2Curation is CurationV1Storage, GraphUpgradeable, IL2Curation { ); // Collect new funds into reserve - CurationPool storage curationPool = pools[_subgraphDeploymentID]; + CurationPool storage curationPool = _pools[_subgraphDeploymentID]; curationPool.tokens = curationPool.tokens.add(_tokens); emit Collected(_subgraphDeploymentID, _tokens); @@ -222,11 +219,12 @@ contract L2Curation is CurationV1Storage, GraphUpgradeable, IL2Curation { require(signalOut >= _signalOutMin, "Slippage protection"); address curator = msg.sender; - CurationPool storage curationPool = pools[_subgraphDeploymentID]; + CurationPool storage curationPool = _pools[_subgraphDeploymentID]; // If it hasn't been curated before then initialize the curve if (!isCurated(_subgraphDeploymentID)) { - curationPool.reserveRatio = FIXED_RESERVE_RATIO; + // Note we don't set the reserveRatio to save the gas + // cost, but in the pools() getter we'll inject the value. // If no signal token for the pool - create one if (address(curationPool.gcs) == address(0)) { @@ -281,11 +279,12 @@ contract L2Curation is CurationV1Storage, GraphUpgradeable, IL2Curation { require(signalOut >= _signalOutMin, "Slippage protection"); address curator = msg.sender; - CurationPool storage curationPool = pools[_subgraphDeploymentID]; + CurationPool storage curationPool = _pools[_subgraphDeploymentID]; // If it hasn't been curated before then initialize the curve if (!isCurated(_subgraphDeploymentID)) { - curationPool.reserveRatio = FIXED_RESERVE_RATIO; + // Note we don't set the reserveRatio to save the gas + // cost, but in the pools() getter we'll inject the value. // If no signal token for the pool - create one if (address(curationPool.gcs) == address(0)) { @@ -346,7 +345,7 @@ contract L2Curation is CurationV1Storage, GraphUpgradeable, IL2Curation { _updateRewards(_subgraphDeploymentID); // Update curation pool - CurationPool storage curationPool = pools[_subgraphDeploymentID]; + CurationPool storage curationPool = _pools[_subgraphDeploymentID]; curationPool.tokens = curationPool.tokens.sub(tokensOut); curationPool.gcs.burnFrom(curator, _signalIn); @@ -370,7 +369,7 @@ contract L2Curation is CurationV1Storage, GraphUpgradeable, IL2Curation { * @return True if curated */ function isCurated(bytes32 _subgraphDeploymentID) public view override returns (bool) { - return pools[_subgraphDeploymentID].tokens > 0; + return _pools[_subgraphDeploymentID].tokens > 0; } /** @@ -385,7 +384,7 @@ contract L2Curation is CurationV1Storage, GraphUpgradeable, IL2Curation { override returns (uint256) { - IGraphCurationToken gcs = pools[_subgraphDeploymentID].gcs; + IGraphCurationToken gcs = _pools[_subgraphDeploymentID].gcs; return (address(gcs) == address(0)) ? 0 : gcs.balanceOf(_curator); } @@ -400,7 +399,7 @@ contract L2Curation is CurationV1Storage, GraphUpgradeable, IL2Curation { override returns (uint256) { - IGraphCurationToken gcs = pools[_subgraphDeploymentID].gcs; + IGraphCurationToken gcs = _pools[_subgraphDeploymentID].gcs; return (address(gcs) == address(0)) ? 0 : gcs.totalSupply(); } @@ -415,7 +414,7 @@ contract L2Curation is CurationV1Storage, GraphUpgradeable, IL2Curation { override returns (uint256) { - return pools[_subgraphDeploymentID].tokens; + return _pools[_subgraphDeploymentID].tokens; } /** @@ -464,7 +463,7 @@ contract L2Curation is CurationV1Storage, GraphUpgradeable, IL2Curation { returns (uint256) { // Get curation pool tokens and signal - CurationPool memory curationPool = pools[_subgraphDeploymentID]; + CurationPool memory curationPool = _pools[_subgraphDeploymentID]; // Init curation pool if (curationPool.tokens == 0) { @@ -473,23 +472,14 @@ contract L2Curation is CurationV1Storage, GraphUpgradeable, IL2Curation { "Curation deposit is below minimum required" ); return - BancorFormula(bondingCurve) - .calculatePurchaseReturn( - SIGNAL_PER_MINIMUM_DEPOSIT, - minimumCurationDeposit, - FIXED_RESERVE_RATIO, - _tokensIn.sub(minimumCurationDeposit) + SIGNAL_PER_MINIMUM_DEPOSIT.add( + SIGNAL_PER_MINIMUM_DEPOSIT.mul(_tokensIn.sub(minimumCurationDeposit)).div( + minimumCurationDeposit ) - .add(SIGNAL_PER_MINIMUM_DEPOSIT); + ); } - return - BancorFormula(bondingCurve).calculatePurchaseReturn( - getCurationPoolSignal(_subgraphDeploymentID), - curationPool.tokens, - FIXED_RESERVE_RATIO, - _tokensIn - ); + return getCurationPoolSignal(_subgraphDeploymentID).mul(_tokensIn).div(curationPool.tokens); } /** @@ -504,7 +494,7 @@ contract L2Curation is CurationV1Storage, GraphUpgradeable, IL2Curation { override returns (uint256) { - CurationPool memory curationPool = pools[_subgraphDeploymentID]; + CurationPool memory curationPool = _pools[_subgraphDeploymentID]; uint256 curationPoolSignal = getCurationPoolSignal(_subgraphDeploymentID); require( curationPool.tokens > 0, @@ -515,13 +505,20 @@ contract L2Curation is CurationV1Storage, GraphUpgradeable, IL2Curation { "Signal must be above or equal to signal issued in the curation pool" ); - return - BancorFormula(bondingCurve).calculateSaleReturn( - curationPoolSignal, - curationPool.tokens, - FIXED_RESERVE_RATIO, - _signalIn - ); + return curationPool.tokens.mul(_signalIn).div(curationPoolSignal); + } + + /** + * @notice Get a curation pool for a subgraph deployment + * @dev We add this when making the pools variable internal, to keep + * backwards compatibility. + * @param _subgraphDeploymentID Subgraph deployment for which to get the curation pool + * @return Curation pool for the subgraph deployment + */ + function pools(bytes32 _subgraphDeploymentID) external view returns (CurationPool memory) { + CurationPool memory pool = _pools[_subgraphDeploymentID]; + pool.reserveRatio = FIXED_RESERVE_RATIO; + return pool; } /** diff --git a/e2e/deployment/config/l2/l2Curation.test.ts b/e2e/deployment/config/l2/l2Curation.test.ts index ab20454e4..309d3aa22 100644 --- a/e2e/deployment/config/l2/l2Curation.test.ts +++ b/e2e/deployment/config/l2/l2Curation.test.ts @@ -19,14 +19,9 @@ describe('[L2] L2Curation configuration', () => { expect(controller).eq(Controller.address) }) - it('bondingCurve should match the BancorFormula deployment address', async function () { - const bondingCurve = await L2Curation.bondingCurve() - expect(bondingCurve).eq(BancorFormula.address) - }) - it('curationTokenMaster should match the GraphCurationToken deployment address', async function () { - const bondingCurve = await L2Curation.curationTokenMaster() - expect(bondingCurve).eq(GraphCurationToken.address) + const gct = await L2Curation.curationTokenMaster() + expect(gct).eq(GraphCurationToken.address) }) it('defaultReserveRatio should be a constant 1000000', async function () { diff --git a/tasks/e2e/e2e.ts b/tasks/e2e/e2e.ts index aa712380c..627f527f9 100644 --- a/tasks/e2e/e2e.ts +++ b/tasks/e2e/e2e.ts @@ -34,6 +34,7 @@ const setGraphConfig = async (args: TaskArguments, hre: HardhatRuntimeEnvironmen } task('e2e', 'Run all e2e tests') + .addFlag('disableSecureAccounts', 'Disable secure accounts on GRE') .addOptionalParam('graphConfig', cliOpts.graphConfig.description) .addOptionalParam('l1GraphConfig', cliOpts.graphConfig.description) .addOptionalParam('l2GraphConfig', cliOpts.graphConfig.description) @@ -49,9 +50,6 @@ task('e2e', 'Run all e2e tests') testFiles = testFiles.filter((file) => !['l1', 'l2'].includes(file.split('/')[3])) } - // Disable secure accounts, we don't need them for this task - hre.config.graph.disableSecureAccounts = true - setGraphConfig(args, hre) await hre.run(TASK_TEST, { testFiles: testFiles, @@ -59,6 +57,7 @@ task('e2e', 'Run all e2e tests') }) task('e2e:config', 'Run deployment configuration e2e tests') + .addFlag('disableSecureAccounts', 'Disable secure accounts on GRE') .addOptionalParam('graphConfig', cliOpts.graphConfig.description) .addOptionalParam('l1GraphConfig', cliOpts.graphConfig.description) .addOptionalParam('l2GraphConfig', cliOpts.graphConfig.description) @@ -66,9 +65,6 @@ task('e2e:config', 'Run deployment configuration e2e tests') .setAction(async (args, hre: HardhatRuntimeEnvironment) => { const files = new glob.GlobSync(CONFIG_TESTS).found - // Disable secure accounts, we don't need them for this task - hre.config.graph.disableSecureAccounts = true - setGraphConfig(args, hre) await hre.run(TASK_TEST, { testFiles: files, @@ -76,6 +72,7 @@ task('e2e:config', 'Run deployment configuration e2e tests') }) task('e2e:init', 'Run deployment initialization e2e tests') + .addFlag('disableSecureAccounts', 'Disable secure accounts on GRE') .addOptionalParam('graphConfig', cliOpts.graphConfig.description) .addOptionalParam('l1GraphConfig', cliOpts.graphConfig.description) .addOptionalParam('l2GraphConfig', cliOpts.graphConfig.description) @@ -83,9 +80,6 @@ task('e2e:init', 'Run deployment initialization e2e tests') .setAction(async (args, hre: HardhatRuntimeEnvironment) => { const files = new glob.GlobSync(INIT_TESTS).found - // Disable secure accounts, we don't need them for this task - hre.config.graph.disableSecureAccounts = true - setGraphConfig(args, hre) await hre.run(TASK_TEST, { testFiles: files, diff --git a/test/lib/deployment.ts b/test/lib/deployment.ts index da5d50db8..e54ff31da 100644 --- a/test/lib/deployment.ts +++ b/test/lib/deployment.ts @@ -129,7 +129,6 @@ export async function deployL2Curation( proxyAdmin: GraphProxyAdmin, ): Promise { // Dependency - const bondingCurve = (await deployContract('BancorFormula', deployer)) as unknown as BancorFormula const curationTokenMaster = await deployContract('GraphCurationToken', deployer) // Deploy @@ -138,7 +137,6 @@ export async function deployL2Curation( 'L2Curation', [ controller, - bondingCurve.address, curationTokenMaster.address, defaults.curation.curationTaxPercentage, defaults.curation.minimumCurationDeposit, From 676f7e832659501350d4e8c457a9184053599e42 Mon Sep 17 00:00:00 2001 From: Pablo Carranza Velez Date: Thu, 10 Nov 2022 14:53:36 -0300 Subject: [PATCH 047/112] fix: no need for onlyGNS in L1 Curation now --- contracts/curation/Curation.sol | 5 ----- 1 file changed, 5 deletions(-) diff --git a/contracts/curation/Curation.sol b/contracts/curation/Curation.sol index 7922b5356..e7dfed19c 100644 --- a/contracts/curation/Curation.sol +++ b/contracts/curation/Curation.sol @@ -71,11 +71,6 @@ contract Curation is CurationV1Storage, GraphUpgradeable { */ event Collected(bytes32 indexed subgraphDeploymentID, uint256 tokens); - modifier onlyGNS() { - require(msg.sender == address(gns()), "Only the GNS can call this"); - _; - } - /** * @dev Initialize this contract. */ From d128873bf3ca93a67acbbdbf443ca9434d512379 Mon Sep 17 00:00:00 2001 From: Pablo Carranza Velez Date: Thu, 10 Nov 2022 15:29:27 -0300 Subject: [PATCH 048/112] fix: remove reserve ratio from retryable tx, and other QA improvements --- contracts/discovery/IGNS.sol | 2 +- contracts/discovery/L1GNS.sol | 3 +- contracts/l2/curation/IL2Curation.sol | 20 ++- contracts/l2/curation/L2Curation.sol | 233 +++++++++++++------------- contracts/l2/discovery/IL2GNS.sol | 18 +- contracts/l2/discovery/L2GNS.sol | 101 ++++++----- test/gns.test.ts | 20 +-- test/l2/l2GNS.test.ts | 36 ++-- 8 files changed, 233 insertions(+), 200 deletions(-) diff --git a/contracts/discovery/IGNS.sol b/contracts/discovery/IGNS.sol index 987c1eb50..6b11a6b01 100644 --- a/contracts/discovery/IGNS.sol +++ b/contracts/discovery/IGNS.sol @@ -10,7 +10,7 @@ interface IGNS { uint256 nSignal; // The token of the subgraph bonding curve mapping(address => uint256) curatorNSignal; bytes32 subgraphDeploymentID; - uint32 reserveRatio; + uint32 reserveRatio; // Ratio for the bonding curve, always 1 in PPM, kept only for backwards compatibility bool disabled; uint256 withdrawableGRT; } diff --git a/contracts/discovery/L1GNS.sol b/contracts/discovery/L1GNS.sol index 4bf8d76a3..c597ee4f0 100644 --- a/contracts/discovery/L1GNS.sol +++ b/contracts/discovery/L1GNS.sol @@ -120,8 +120,7 @@ contract L1GNS is GNS, L1GNSV1Storage, L1ArbitrumMessenger { _subgraphID, ownerOf(_subgraphID), blockhash(migrationData.lockedAtBlock), - subgraphData.nSignal, - subgraphData.reserveRatio + subgraphData.nSignal ); } diff --git a/contracts/l2/curation/IL2Curation.sol b/contracts/l2/curation/IL2Curation.sol index 05f2a2775..57b6a145f 100644 --- a/contracts/l2/curation/IL2Curation.sol +++ b/contracts/l2/curation/IL2Curation.sol @@ -2,14 +2,32 @@ pragma solidity ^0.7.6; +/** + * @title Interface of the L2 Curation contract. + */ interface IL2Curation { - // Callable only by GNS in specific scenarios + /** + * @notice Deposit Graph Tokens in exchange for signal of a SubgraphDeployment curation pool. + * @dev This function charges no tax and can only be called by GNS in specific scenarios (for now + * only during an L1-L2 migration). + * @param _subgraphDeploymentID Subgraph deployment pool from where to mint signal + * @param _tokensIn Amount of Graph Tokens to deposit + * @param _signalOutMin Expected minimum amount of signal to receive + * @return Signal minted + */ function mintTaxFree( bytes32 _subgraphDeploymentID, uint256 _tokensIn, uint256 _signalOutMin ) external returns (uint256); + /** + * @notice Calculate amount of signal that can be bought with tokens in a curation pool, + * without accounting for curation tax. + * @param _subgraphDeploymentID Subgraph deployment to mint signal + * @param _tokensIn Amount of tokens used to mint signal + * @return Amount of signal that can be bought and tokens subtracted for the tax + */ function tokensToSignalNoTax(bytes32 _subgraphDeploymentID, uint256 _tokensIn) external view diff --git a/contracts/l2/curation/L2Curation.sol b/contracts/l2/curation/L2Curation.sol index 8928a5746..418eb1424 100644 --- a/contracts/l2/curation/L2Curation.sol +++ b/contracts/l2/curation/L2Curation.sol @@ -20,7 +20,7 @@ import { GraphCurationToken } from "../../curation/GraphCurationToken.sol"; import { IL2Curation } from "./IL2Curation.sol"; /** - * @title Curation contract + * @title L2Curation contract * @dev Allows curators to signal on subgraph deployments that might be relevant to indexers by * staking Graph Tokens (GRT). Additionally, curators earn fees from the Query Market related to the * subgraph deployment they curate. @@ -34,13 +34,13 @@ import { IL2Curation } from "./IL2Curation.sol"; contract L2Curation is CurationV1Storage, GraphUpgradeable, IL2Curation { using SafeMath for uint256; - // 100% in parts per million + /// @dev 100% in parts per million uint32 private constant MAX_PPM = 1000000; - // Amount of signal you get with your minimum token deposit + /// @dev Amount of signal you get with your minimum token deposit uint256 private constant SIGNAL_PER_MINIMUM_DEPOSIT = 1e18; // 1 signal as 18 decimal number - // Reserve ratio for all subgraphs set to 100% for a flat bonding curve + /// @dev Reserve ratio for all subgraphs set to 100% for a flat bonding curve uint32 private immutable FIXED_RESERVE_RATIO = MAX_PPM; // -- Events -- @@ -75,13 +75,20 @@ contract L2Curation is CurationV1Storage, GraphUpgradeable, IL2Curation { */ event Collected(bytes32 indexed subgraphDeploymentID, uint256 tokens); + /** + * @dev Modifier for functions that can only be called by the GNS contract + */ modifier onlyGNS() { require(msg.sender == address(gns()), "Only the GNS can call this"); _; } /** - * @dev Initialize this contract. + * @notice Initialize the L2Curation contract + * @param _controller Controller contract that manages this contract + * @param _curationTokenMaster Address of the GraphCurationToken master copy + * @param _curationTaxPercentage Percentage of curation tax to be collected + * @param _minimumCurationDeposit Minimum amount of tokens that can be deposited as curation signal */ function initialize( address _controller, @@ -120,19 +127,7 @@ contract L2Curation is CurationV1Storage, GraphUpgradeable, IL2Curation { } /** - * @dev Internal: Set the minimum deposit amount for curators. - * @notice Update the minimum deposit amount to `_minimumCurationDeposit` - * @param _minimumCurationDeposit Minimum amount of tokens required deposit - */ - function _setMinimumCurationDeposit(uint256 _minimumCurationDeposit) private { - require(_minimumCurationDeposit > 0, "Minimum curation deposit cannot be 0"); - - minimumCurationDeposit = _minimumCurationDeposit; - emit ParameterUpdated("minimumCurationDeposit"); - } - - /** - * @dev Set the curation tax percentage to charge when a curator deposits GRT tokens. + * @notice Set the curation tax percentage to charge when a curator deposits GRT tokens. * @param _percentage Curation tax percentage charged when depositing GRT tokens */ function setCurationTaxPercentage(uint32 _percentage) external override onlyGovernor { @@ -140,21 +135,7 @@ contract L2Curation is CurationV1Storage, GraphUpgradeable, IL2Curation { } /** - * @dev Internal: Set the curation tax percentage to charge when a curator deposits GRT tokens. - * @param _percentage Curation tax percentage charged when depositing GRT tokens - */ - function _setCurationTaxPercentage(uint32 _percentage) private { - require( - _percentage <= MAX_PPM, - "Curation tax percentage must be below or equal to MAX_PPM" - ); - - curationTaxPercentage = _percentage; - emit ParameterUpdated("curationTaxPercentage"); - } - - /** - * @dev Set the master copy to use as clones for the curation token. + * @notice Set the master copy to use as clones for the curation token. * @param _curationTokenMaster Address of implementation contract to use for curation tokens */ function setCurationTokenMaster(address _curationTokenMaster) external override onlyGovernor { @@ -162,20 +143,8 @@ contract L2Curation is CurationV1Storage, GraphUpgradeable, IL2Curation { } /** - * @dev Internal: Set the master copy to use as clones for the curation token. - * @param _curationTokenMaster Address of implementation contract to use for curation tokens - */ - function _setCurationTokenMaster(address _curationTokenMaster) private { - require(_curationTokenMaster != address(0), "Token master must be non-empty"); - require(Address.isContract(_curationTokenMaster), "Token master must be a contract"); - - curationTokenMaster = _curationTokenMaster; - emit ParameterUpdated("curationTokenMaster"); - } - - /** - * @dev Assign Graph Tokens collected as curation fees to the curation pool reserve. - * This function can only be called by the Staking contract and will do the bookeeping of + * @notice Assign Graph Tokens collected as curation fees to the curation pool reserve. + * @dev This function can only be called by the Staking contract and will do the bookeeping of * transferred tokens into this contract. * @param _subgraphDeploymentID SubgraphDeployment where funds should be allocated as reserves * @param _tokens Amount of Graph Tokens to add to reserves @@ -198,7 +167,7 @@ contract L2Curation is CurationV1Storage, GraphUpgradeable, IL2Curation { } /** - * @dev Deposit Graph Tokens in exchange for signal of a SubgraphDeployment curation pool. + * @notice Deposit Graph Tokens in exchange for signal of a SubgraphDeployment curation pool. * @param _subgraphDeploymentID Subgraph deployment pool from where to mint signal * @param _tokensIn Amount of Graph Tokens to deposit * @param _signalOutMin Expected minimum amount of signal to receive @@ -256,8 +225,8 @@ contract L2Curation is CurationV1Storage, GraphUpgradeable, IL2Curation { } /** - * @dev Deposit Graph Tokens in exchange for signal of a SubgraphDeployment curation pool. - * This function charges no tax and can only be called by GNS in specific scenarios (for now + * @notice Deposit Graph Tokens in exchange for signal of a SubgraphDeployment curation pool. + * @dev This function charges no tax and can only be called by GNS in specific scenarios (for now * only during an L1-L2 migration). * @param _subgraphDeploymentID Subgraph deployment pool from where to mint signal * @param _tokensIn Amount of Graph Tokens to deposit @@ -315,11 +284,11 @@ contract L2Curation is CurationV1Storage, GraphUpgradeable, IL2Curation { /** * @dev Return an amount of signal to get tokens back. - * @notice Burn _signal from the SubgraphDeployment curation pool - * @param _subgraphDeploymentID SubgraphDeployment the curator is returning signal + * @notice Burn _signalIn from the SubgraphDeployment curation pool + * @param _subgraphDeploymentID SubgraphDeployment for which the curator is returning signal * @param _signalIn Amount of signal to return * @param _tokensOutMin Expected minimum amount of tokens to receive - * @return Tokens returned + * @return Amount of tokens returned to the sender */ function burn( bytes32 _subgraphDeploymentID, @@ -364,7 +333,34 @@ contract L2Curation is CurationV1Storage, GraphUpgradeable, IL2Curation { } /** - * @dev Check if any GRT tokens are deposited for a SubgraphDeployment. + * @notice Get the amount of token reserves in a curation pool. + * @param _subgraphDeploymentID Subgraph deployment curation poool + * @return Amount of token reserves in the curation pool + */ + function getCurationPoolTokens(bytes32 _subgraphDeploymentID) + external + view + override + returns (uint256) + { + return _pools[_subgraphDeploymentID].tokens; + } + + /** + * @notice Get a curation pool for a subgraph deployment + * @dev We add this when making the pools variable internal, to keep + * backwards compatibility. + * @param _subgraphDeploymentID Subgraph deployment for which to get the curation pool + * @return Curation pool for the subgraph deployment + */ + function pools(bytes32 _subgraphDeploymentID) external view returns (CurationPool memory) { + CurationPool memory pool = _pools[_subgraphDeploymentID]; + pool.reserveRatio = FIXED_RESERVE_RATIO; + return pool; + } + + /** + * @notice Check if any GRT tokens are deposited for a SubgraphDeployment. * @param _subgraphDeploymentID SubgraphDeployment to check if curated * @return True if curated */ @@ -373,7 +369,7 @@ contract L2Curation is CurationV1Storage, GraphUpgradeable, IL2Curation { } /** - * @dev Get the amount of signal a curator has in a curation pool. + * @notice Get the amount of signal a curator has in a curation pool. * @param _curator Curator owning the signal tokens * @param _subgraphDeploymentID Subgraph deployment curation pool * @return Amount of signal owned by a curator for the subgraph deployment @@ -389,7 +385,7 @@ contract L2Curation is CurationV1Storage, GraphUpgradeable, IL2Curation { } /** - * @dev Get the amount of signal in a curation pool. + * @notice Get the amount of signal in a curation pool. * @param _subgraphDeploymentID Subgraph deployment curation poool * @return Amount of signal minted for the subgraph deployment */ @@ -404,21 +400,7 @@ contract L2Curation is CurationV1Storage, GraphUpgradeable, IL2Curation { } /** - * @dev Get the amount of token reserves in a curation pool. - * @param _subgraphDeploymentID Subgraph deployment curation poool - * @return Amount of token reserves in the curation pool - */ - function getCurationPoolTokens(bytes32 _subgraphDeploymentID) - external - view - override - returns (uint256) - { - return _pools[_subgraphDeploymentID].tokens; - } - - /** - * @dev Calculate amount of signal that can be bought with tokens in a curation pool. + * @notice Calculate amount of signal that can be bought with tokens in a curation pool. * This function considers and excludes the deposit tax. * @param _subgraphDeploymentID Subgraph deployment to mint signal * @param _tokensIn Amount of tokens used to mint signal @@ -436,7 +418,7 @@ contract L2Curation is CurationV1Storage, GraphUpgradeable, IL2Curation { } /** - * @dev Calculate amount of signal that can be bought with tokens in a curation pool, + * @notice Calculate amount of signal that can be bought with tokens in a curation pool, * without accounting for curation tax. * @param _subgraphDeploymentID Subgraph deployment to mint signal * @param _tokensIn Amount of tokens used to mint signal @@ -452,38 +434,7 @@ contract L2Curation is CurationV1Storage, GraphUpgradeable, IL2Curation { } /** - * @dev Calculate amount of signal that can be bought with tokens in a curation pool. - * @param _subgraphDeploymentID Subgraph deployment to mint signal - * @param _tokensIn Amount of tokens used to mint signal - * @return Amount of signal that can be bought with tokens - */ - function _tokensToSignal(bytes32 _subgraphDeploymentID, uint256 _tokensIn) - private - view - returns (uint256) - { - // Get curation pool tokens and signal - CurationPool memory curationPool = _pools[_subgraphDeploymentID]; - - // Init curation pool - if (curationPool.tokens == 0) { - require( - _tokensIn >= minimumCurationDeposit, - "Curation deposit is below minimum required" - ); - return - SIGNAL_PER_MINIMUM_DEPOSIT.add( - SIGNAL_PER_MINIMUM_DEPOSIT.mul(_tokensIn.sub(minimumCurationDeposit)).div( - minimumCurationDeposit - ) - ); - } - - return getCurationPoolSignal(_subgraphDeploymentID).mul(_tokensIn).div(curationPool.tokens); - } - - /** - * @dev Calculate number of tokens to get when burning signal from a curation pool. + * @notice Calculate number of tokens to get when burning signal from a curation pool. * @param _subgraphDeploymentID Subgraph deployment to burn signal * @param _signalIn Amount of signal to burn * @return Amount of tokens to get for an amount of signal @@ -509,16 +460,41 @@ contract L2Curation is CurationV1Storage, GraphUpgradeable, IL2Curation { } /** - * @notice Get a curation pool for a subgraph deployment - * @dev We add this when making the pools variable internal, to keep - * backwards compatibility. - * @param _subgraphDeploymentID Subgraph deployment for which to get the curation pool - * @return Curation pool for the subgraph deployment + * @dev Internal: Set the minimum deposit amount for curators. + * Update the minimum deposit amount to `_minimumCurationDeposit` + * @param _minimumCurationDeposit Minimum amount of tokens required deposit */ - function pools(bytes32 _subgraphDeploymentID) external view returns (CurationPool memory) { - CurationPool memory pool = _pools[_subgraphDeploymentID]; - pool.reserveRatio = FIXED_RESERVE_RATIO; - return pool; + function _setMinimumCurationDeposit(uint256 _minimumCurationDeposit) private { + require(_minimumCurationDeposit > 0, "Minimum curation deposit cannot be 0"); + + minimumCurationDeposit = _minimumCurationDeposit; + emit ParameterUpdated("minimumCurationDeposit"); + } + + /** + * @dev Internal: Set the curation tax percentage to charge when a curator deposits GRT tokens. + * @param _percentage Curation tax percentage charged when depositing GRT tokens + */ + function _setCurationTaxPercentage(uint32 _percentage) private { + require( + _percentage <= MAX_PPM, + "Curation tax percentage must be below or equal to MAX_PPM" + ); + + curationTaxPercentage = _percentage; + emit ParameterUpdated("curationTaxPercentage"); + } + + /** + * @dev Internal: Set the master copy to use as clones for the curation token. + * @param _curationTokenMaster Address of implementation contract to use for curation tokens + */ + function _setCurationTokenMaster(address _curationTokenMaster) private { + require(_curationTokenMaster != address(0), "Token master must be non-empty"); + require(Address.isContract(_curationTokenMaster), "Token master must be a contract"); + + curationTokenMaster = _curationTokenMaster; + emit ParameterUpdated("curationTokenMaster"); } /** @@ -531,4 +507,35 @@ contract L2Curation is CurationV1Storage, GraphUpgradeable, IL2Curation { rewardsManager.onSubgraphSignalUpdate(_subgraphDeploymentID); } } + + /** + * @dev Calculate amount of signal that can be bought with tokens in a curation pool. + * @param _subgraphDeploymentID Subgraph deployment to mint signal + * @param _tokensIn Amount of tokens used to mint signal + * @return Amount of signal that can be bought with tokens + */ + function _tokensToSignal(bytes32 _subgraphDeploymentID, uint256 _tokensIn) + private + view + returns (uint256) + { + // Get curation pool tokens and signal + CurationPool memory curationPool = _pools[_subgraphDeploymentID]; + + // Init curation pool + if (curationPool.tokens == 0) { + require( + _tokensIn >= minimumCurationDeposit, + "Curation deposit is below minimum required" + ); + return + SIGNAL_PER_MINIMUM_DEPOSIT.add( + SIGNAL_PER_MINIMUM_DEPOSIT.mul(_tokensIn.sub(minimumCurationDeposit)).div( + minimumCurationDeposit + ) + ); + } + + return getCurationPoolSignal(_subgraphDeploymentID).mul(_tokensIn).div(curationPool.tokens); + } } diff --git a/contracts/l2/discovery/IL2GNS.sol b/contracts/l2/discovery/IL2GNS.sol index 361637bc9..cbc40b9b4 100644 --- a/contracts/l2/discovery/IL2GNS.sol +++ b/contracts/l2/discovery/IL2GNS.sol @@ -4,7 +4,19 @@ pragma solidity ^0.7.6; import { ICallhookReceiver } from "../../gateway/ICallhookReceiver.sol"; +/** + * @title Interface for the L2GNS contract. + */ interface IL2GNS is ICallhookReceiver { + /** + * @notice Finish a subgraph migration from L1. + * The subgraph must have been previously sent through the bridge + * using the sendSubgraphToL2 function on L1GNS. + * @param _subgraphID Subgraph ID + * @param _subgraphDeploymentID Latest subgraph deployment to assign to the subgraph + * @param _subgraphMetadata IPFS hash of the subgraph metadata + * @param _versionMetadata IPFS hash of the version metadata + */ function finishSubgraphMigrationFromL1( uint256 _subgraphID, bytes32 _subgraphDeploymentID, @@ -13,7 +25,7 @@ interface IL2GNS is ICallhookReceiver { ) external; /** - * @dev Claim curator balance belonging to a curator from L1. + * @notice Claim curator balance belonging to a curator from L1. * This will be credited to the same curator's balance on L2. * This can only be called by the corresponding curator. * @param _subgraphID Subgraph for which to claim a balance @@ -27,7 +39,7 @@ interface IL2GNS is ICallhookReceiver { ) external; /** - * @dev Claim curator balance belonging to a curator from L1 on a legacy subgraph. + * @notice Claim curator balance belonging to a curator from L1 on a legacy subgraph. * This will be credited to the same curator's balance on L2. * This can only be called by the corresponding curator. * Users can query getLegacySubgraphKey on L1 to get the _subgraphCreatorAccount and _seqID. @@ -44,7 +56,7 @@ interface IL2GNS is ICallhookReceiver { ) external; /** - * @dev Claim curator balance belonging to a curator from L1. + * @notice Claim curator balance belonging to a curator from L1. * This will be credited to the a beneficiary on L2, and can only be called * from the GNS on L1 through a retryable ticket. * @param _subgraphID Subgraph on which to claim the balance diff --git a/contracts/l2/discovery/L2GNS.sol b/contracts/l2/discovery/L2GNS.sol index 7dd03e604..3ac1aadf3 100644 --- a/contracts/l2/discovery/L2GNS.sol +++ b/contracts/l2/discovery/L2GNS.sol @@ -86,8 +86,8 @@ contract L2GNS is GNS, L2GNSV1Storage, IL2GNS { } /** - * @dev Receive tokens with a callhook from the bridge. - * The callhook will receive a subgraph from L1 + * @notice Receive tokens with a callhook from the bridge. + * The callhook will receive a subgraph from L1. * @param _from Token sender in L1 (must be the L1GNS) * @param _amount Amount of tokens that were transferred * @param _data ABI-encoded callhook data @@ -102,20 +102,21 @@ contract L2GNS is GNS, L2GNSV1Storage, IL2GNS { uint256 subgraphID, address subgraphOwner, bytes32 lockedAtBlockHash, - uint256 nSignal, - uint32 reserveRatio - ) = abi.decode(_data, (uint256, address, bytes32, uint256, uint32)); - - _receiveSubgraphFromL1( - subgraphID, - subgraphOwner, - _amount, - lockedAtBlockHash, - nSignal, - reserveRatio - ); + uint256 nSignal + ) = abi.decode(_data, (uint256, address, bytes32, uint256)); + + _receiveSubgraphFromL1(subgraphID, subgraphOwner, _amount, lockedAtBlockHash, nSignal); } + /** + * @notice Finish a subgraph migration from L1. + * The subgraph must have been previously sent through the bridge + * using the sendSubgraphToL2 function on L1GNS. + * @param _subgraphID Subgraph ID + * @param _subgraphDeploymentID Latest subgraph deployment to assign to the subgraph + * @param _subgraphMetadata IPFS hash of the subgraph metadata + * @param _versionMetadata IPFS hash of the version metadata + */ function finishSubgraphMigrationFromL1( uint256 _subgraphID, bytes32 _subgraphDeploymentID, @@ -141,7 +142,7 @@ contract L2GNS is GNS, L2GNSV1Storage, IL2GNS { // Set the token metadata _setSubgraphMetadata(_subgraphID, _subgraphMetadata); - emit SubgraphPublished(_subgraphID, _subgraphDeploymentID, subgraphData.reserveRatio); + emit SubgraphPublished(_subgraphID, _subgraphDeploymentID, FIXED_RESERVE_RATIO); emit SubgraphUpgraded( _subgraphID, subgraphData.vSignal, @@ -155,7 +156,7 @@ contract L2GNS is GNS, L2GNSV1Storage, IL2GNS { } /** - * @dev Claim curator balance belonging to a curator from L1. + * @notice Claim curator balance belonging to a curator from L1. * This will be credited to the same curator's balance on L2. * This can only be called by the corresponding curator. * @param _subgraphID Subgraph for which to claim a balance @@ -205,7 +206,7 @@ contract L2GNS is GNS, L2GNSV1Storage, IL2GNS { } /** - * @dev Claim curator balance belonging to a curator from L1 on a legacy subgraph. + * @notice Claim curator balance belonging to a curator from L1 on a legacy subgraph. * This will be credited to the same curator's balance on L2. * This can only be called by the corresponding curator. * Users can query getLegacySubgraphKey on L1 to get the _subgraphCreatorAccount and _seqID. @@ -260,7 +261,7 @@ contract L2GNS is GNS, L2GNSV1Storage, IL2GNS { } /** - * @dev Claim curator balance belonging to a curator from L1. + * @notice Claim curator balance belonging to a curator from L1. * This will be credited to the a beneficiary on L2, and can only be called * from the GNS on L1 through a retryable ticket. * @param _subgraphID Subgraph on which to claim the balance @@ -287,34 +288,6 @@ contract L2GNS is GNS, L2GNSV1Storage, IL2GNS { emit CuratorBalanceClaimed(_subgraphID, _curator, _beneficiary, _balance); } - // TODO add NatSpec - function _receiveSubgraphFromL1( - uint256 _subgraphID, - address _subgraphOwner, - uint256 _tokens, - bytes32 _lockedAtBlockHash, - uint256 _nSignal, - uint32 _reserveRatio - ) internal { - IGNS.SubgraphL2MigrationData storage migratedData = subgraphL2MigrationData[_subgraphID]; - SubgraphData storage subgraphData = _getSubgraphData(_subgraphID); - - subgraphData.reserveRatio = _reserveRatio; - // The subgraph will be disabled until finishSubgraphMigrationFromL1 is called - subgraphData.disabled = true; - subgraphData.nSignal = _nSignal; - - migratedData.tokens = _tokens; - migratedData.lockedAtBlockHash = _lockedAtBlockHash; - migratedData.l1Done = true; - - // Mint the NFT. Use the subgraphID as tokenID. - // This function will check the if tokenID already exists. - _mintNFT(_subgraphOwner, _subgraphID); - - emit SubgraphReceivedFromL1(_subgraphID); - } - /** * @notice Publish a new version of an existing subgraph. * @dev This is the same as the one in the base GNS, but skips the check for @@ -386,6 +359,42 @@ contract L2GNS is GNS, L2GNSV1Storage, IL2GNS { emit SubgraphVersionUpdated(_subgraphID, _subgraphDeploymentID, _versionMetadata); } + /** + * @dev Receive a subgraph from L1. + * This function will initialize a subgraph received through the bridge, + * and store the migration data so that it's finalized later using finishSubgraphMigrationFromL1. + * @param _subgraphID Subgraph ID + * @param _subgraphOwner Owner of the subgraph + * @param _tokens Tokens to be deposited in the subgraph + * @param _lockedAtBlockHash Blockhash of the block at which the subgraph was locked in L1 + * @param _nSignal Name signal for the subgraph in L1 + */ + function _receiveSubgraphFromL1( + uint256 _subgraphID, + address _subgraphOwner, + uint256 _tokens, + bytes32 _lockedAtBlockHash, + uint256 _nSignal + ) internal { + IGNS.SubgraphL2MigrationData storage migratedData = subgraphL2MigrationData[_subgraphID]; + SubgraphData storage subgraphData = _getSubgraphData(_subgraphID); + + subgraphData.reserveRatio = FIXED_RESERVE_RATIO; + // The subgraph will be disabled until finishSubgraphMigrationFromL1 is called + subgraphData.disabled = true; + subgraphData.nSignal = _nSignal; + + migratedData.tokens = _tokens; + migratedData.lockedAtBlockHash = _lockedAtBlockHash; + migratedData.l1Done = true; + + // Mint the NFT. Use the subgraphID as tokenID. + // This function will check the if tokenID already exists. + _mintNFT(_subgraphOwner, _subgraphID); + + emit SubgraphReceivedFromL1(_subgraphID); + } + /** * @dev Get subgraph data. * Since there are no legacy subgraphs in L2, we override the base diff --git a/test/gns.test.ts b/test/gns.test.ts index c07fd5b34..2b9a109a4 100644 --- a/test/gns.test.ts +++ b/test/gns.test.ts @@ -1257,14 +1257,8 @@ describe('L1GNS', () => { expect(migrationData.l1Done).eq(true) const expectedCallhookData = defaultAbiCoder.encode( - ['uint256', 'address', 'bytes32', 'uint256', 'uint32'], - [ - subgraph0.id, - me.address, - lockBlockhash, - subgraphBefore.nSignal, - subgraphBefore.reserveRatio, - ], + ['uint256', 'address', 'bytes32', 'uint256'], + [subgraph0.id, me.address, lockBlockhash, subgraphBefore.nSignal], ) const expectedL2Data = await l1GraphTokenGateway.getOutboundCalldata( @@ -1310,14 +1304,8 @@ describe('L1GNS', () => { expect(migrationData.l1Done).eq(true) const expectedCallhookData = defaultAbiCoder.encode( - ['uint256', 'address', 'bytes32', 'uint256', 'uint32'], - [ - subgraphID, - me.address, - lockBlockhash, - subgraphBefore.nSignal, - subgraphBefore.reserveRatio, - ], + ['uint256', 'address', 'bytes32', 'uint256'], + [subgraphID, me.address, lockBlockhash, subgraphBefore.nSignal], ) const expectedL2Data = await l1GraphTokenGateway.getOutboundCalldata( diff --git a/test/l2/l2GNS.test.ts b/test/l2/l2GNS.test.ts index aaff73658..d6b8af706 100644 --- a/test/l2/l2GNS.test.ts +++ b/test/l2/l2GNS.test.ts @@ -243,8 +243,8 @@ describe('L2GNS', () => { nSignal: BigNumber, ) { const callhookData = defaultAbiCoder.encode( - ['uint256', 'address', 'bytes32', 'uint256', 'uint32'], - [l1SubgraphId, me.address, lockBlockhash, nSignal, DEFAULT_RESERVE_RATIO], + ['uint256', 'address', 'bytes32', 'uint256'], + [l1SubgraphId, me.address, lockBlockhash, nSignal], ) await gatewayFinalizeTransfer(mockL1GNS.address, gns.address, curatedTokens, callhookData) @@ -318,8 +318,8 @@ describe('L2GNS', () => { const { l1SubgraphId, curatedTokens, lockBlockhash, nSignal } = await defaultL1SubgraphParams() const callhookData = defaultAbiCoder.encode( - ['uint256', 'address', 'bytes32', 'uint256', 'uint32'], - [l1SubgraphId, me.address, lockBlockhash, nSignal, DEFAULT_RESERVE_RATIO], + ['uint256', 'address', 'bytes32', 'uint256'], + [l1SubgraphId, me.address, lockBlockhash, nSignal], ) const tx = gns .connect(me.signer) @@ -330,8 +330,8 @@ describe('L2GNS', () => { const { l1SubgraphId, curatedTokens, lockBlockhash, nSignal } = await defaultL1SubgraphParams() const callhookData = defaultAbiCoder.encode( - ['uint256', 'address', 'bytes32', 'uint256', 'uint32'], - [l1SubgraphId, me.address, lockBlockhash, nSignal, DEFAULT_RESERVE_RATIO], + ['uint256', 'address', 'bytes32', 'uint256'], + [l1SubgraphId, me.address, lockBlockhash, nSignal], ) const tx = gatewayFinalizeTransfer(me.address, gns.address, curatedTokens, callhookData) @@ -343,8 +343,8 @@ describe('L2GNS', () => { const lockBlockhash = randomHexBytes(32) const nSignal = toBN('4567') const callhookData = defaultAbiCoder.encode( - ['uint256', 'address', 'bytes32', 'uint256', 'uint32'], - [l1SubgraphId, me.address, lockBlockhash, nSignal, DEFAULT_RESERVE_RATIO], + ['uint256', 'address', 'bytes32', 'uint256'], + [l1SubgraphId, me.address, lockBlockhash, nSignal], ) const tx = gatewayFinalizeTransfer( mockL1GNS.address, @@ -385,8 +385,8 @@ describe('L2GNS', () => { const lockBlockhash = randomHexBytes(32) const nSignal = toBN('4567') const callhookData = defaultAbiCoder.encode( - ['uint256', 'address', 'bytes32', 'uint256', 'uint32'], - [l1SubgraphId, me.address, lockBlockhash, nSignal, DEFAULT_RESERVE_RATIO], + ['uint256', 'address', 'bytes32', 'uint256'], + [l1SubgraphId, me.address, lockBlockhash, nSignal], ) const tx = gatewayFinalizeTransfer( mockL1GNS.address, @@ -441,8 +441,8 @@ describe('L2GNS', () => { nSignal, } = await defaultL1SubgraphParams() const callhookData = defaultAbiCoder.encode( - ['uint256', 'address', 'bytes32', 'uint256', 'uint32'], - [l1SubgraphId, me.address, lockBlockhash, nSignal, DEFAULT_RESERVE_RATIO], + ['uint256', 'address', 'bytes32', 'uint256'], + [l1SubgraphId, me.address, lockBlockhash, nSignal], ) await gatewayFinalizeTransfer(mockL1GNS.address, gns.address, curatedTokens, callhookData) // Calculate expected signal before minting @@ -489,8 +489,8 @@ describe('L2GNS', () => { nSignal, } = await defaultL1SubgraphParams() const callhookData = defaultAbiCoder.encode( - ['uint256', 'address', 'bytes32', 'uint256', 'uint32'], - [l1SubgraphId, me.address, lockBlockhash, nSignal, DEFAULT_RESERVE_RATIO], + ['uint256', 'address', 'bytes32', 'uint256'], + [l1SubgraphId, me.address, lockBlockhash, nSignal], ) await gatewayFinalizeTransfer(mockL1GNS.address, gns.address, curatedTokens, callhookData) @@ -542,8 +542,8 @@ describe('L2GNS', () => { nSignal, } = await defaultL1SubgraphParams() const callhookData = defaultAbiCoder.encode( - ['uint256', 'address', 'bytes32', 'uint256', 'uint32'], - [l1SubgraphId, me.address, lockBlockhash, nSignal, DEFAULT_RESERVE_RATIO], + ['uint256', 'address', 'bytes32', 'uint256'], + [l1SubgraphId, me.address, lockBlockhash, nSignal], ) await gatewayFinalizeTransfer(mockL1GNS.address, gns.address, curatedTokens, callhookData) @@ -598,8 +598,8 @@ describe('L2GNS', () => { const metadata = randomHexBytes() const nSignal = toBN('4567') const callhookData = defaultAbiCoder.encode( - ['uint256', 'address', 'bytes32', 'uint256', 'uint32'], - [l1SubgraphId, me.address, lockBlockhash, nSignal, DEFAULT_RESERVE_RATIO], + ['uint256', 'address', 'bytes32', 'uint256'], + [l1SubgraphId, me.address, lockBlockhash, nSignal], ) await gatewayFinalizeTransfer(mockL1GNS.address, gns.address, curatedTokens, callhookData) From 556010da0aff625705aaa9379a9bc111881b6e9c Mon Sep 17 00:00:00 2001 From: Pablo Carranza Velez Date: Thu, 10 Nov 2022 15:32:27 -0300 Subject: [PATCH 049/112] test: fix chain path in e2e --- e2e/deployment/config/l1/curation.test.ts | 2 +- e2e/deployment/config/l2/l2Curation.test.ts | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/e2e/deployment/config/l1/curation.test.ts b/e2e/deployment/config/l1/curation.test.ts index 9d107fb5e..612510008 100644 --- a/e2e/deployment/config/l1/curation.test.ts +++ b/e2e/deployment/config/l1/curation.test.ts @@ -1,7 +1,7 @@ import { expect } from 'chai' import hre from 'hardhat' import { getItemValue } from '../../../../cli/config' -import GraphChain from '../../../../gre/helpers/network' +import GraphChain from '../../../../gre/helpers/chain' describe('[L1] Curation configuration', () => { const graph = hre.graph() diff --git a/e2e/deployment/config/l2/l2Curation.test.ts b/e2e/deployment/config/l2/l2Curation.test.ts index 309d3aa22..809eb0ecd 100644 --- a/e2e/deployment/config/l2/l2Curation.test.ts +++ b/e2e/deployment/config/l2/l2Curation.test.ts @@ -1,7 +1,7 @@ import { expect } from 'chai' import hre from 'hardhat' import { getItemValue } from '../../../../cli/config' -import GraphChain from '../../../../gre/helpers/network' +import GraphChain from '../../../../gre/helpers/chain' describe('[L2] L2Curation configuration', () => { const graph = hre.graph() From 57b84cc653307939de42d050d32bf10a9082efeb Mon Sep 17 00:00:00 2001 From: Pablo Carranza Velez Date: Thu, 10 Nov 2022 19:40:53 -0300 Subject: [PATCH 050/112] fix: various QA improvements --- contracts/curation/Curation.sol | 181 ++++---- contracts/curation/CurationStorage.sol | 8 +- contracts/discovery/GNS.sol | 400 +++++++++--------- contracts/discovery/GNSStorage.sol | 5 +- contracts/discovery/IGNS.sol | 141 ++++++ contracts/discovery/ISubgraphNFT.sol | 2 - contracts/discovery/L1GNS.sol | 94 ++-- contracts/discovery/L1GNSStorage.sol | 3 +- contracts/discovery/SubgraphNFT.sol | 11 - contracts/l2/curation/L2Curation.sol | 27 +- contracts/l2/discovery/L2GNS.sol | 11 +- .../libraries/MerklePatriciaProofVerifier.sol | 31 ++ contracts/libraries/RLPReader.sol | 127 ++++-- contracts/libraries/StateProofVerifier.sol | 16 +- contracts/tests/LegacyGNSMock.sol | 16 + .../tests/MerklePatriciaProofVerifierMock.sol | 13 +- contracts/tests/RLPReaderMock.sol | 0 contracts/tests/StateProofVerifierMock.sol | 0 18 files changed, 705 insertions(+), 381 deletions(-) delete mode 100644 contracts/tests/RLPReaderMock.sol delete mode 100644 contracts/tests/StateProofVerifierMock.sol diff --git a/contracts/curation/Curation.sol b/contracts/curation/Curation.sol index e7dfed19c..7f1dc50e3 100644 --- a/contracts/curation/Curation.sol +++ b/contracts/curation/Curation.sol @@ -3,9 +3,9 @@ pragma solidity ^0.7.6; pragma abicoder v2; -import { Address } from "@openzeppelin/contracts/utils/Address.sol"; -import { SafeMath } from "@openzeppelin/contracts/math/SafeMath.sol"; -import { Clones } from "@openzeppelin/contracts/proxy/Clones.sol"; +import { AddressUpgradeable } from "@openzeppelin/contracts-upgradeable/utils/AddressUpgradeable.sol"; +import { SafeMathUpgradeable } from "@openzeppelin/contracts-upgradeable/math/SafeMathUpgradeable.sol"; +import { ClonesUpgradeable } from "@openzeppelin/contracts-upgradeable/proxy/ClonesUpgradeable.sol"; import { BancorFormula } from "../bancor/BancorFormula.sol"; import { GraphUpgradeable } from "../upgrades/GraphUpgradeable.sol"; @@ -13,7 +13,7 @@ import { TokenUtils } from "../utils/TokenUtils.sol"; import { IRewardsManager } from "../rewards/IRewardsManager.sol"; import { Managed } from "../governance/Managed.sol"; import { IGraphToken } from "../token/IGraphToken.sol"; -import { CurationV1Storage } from "./CurationStorage.sol"; +import { CurationV2Storage } from "./CurationStorage.sol"; import { ICuration } from "./ICuration.sol"; import { IGraphCurationToken } from "./IGraphCurationToken.sol"; import { GraphCurationToken } from "./GraphCurationToken.sol"; @@ -30,8 +30,8 @@ import { GraphCurationToken } from "./GraphCurationToken.sol"; * Holders can burn GCS using this contract to get GRT tokens back according to the * bonding curve. */ -contract Curation is CurationV1Storage, GraphUpgradeable { - using SafeMath for uint256; +contract Curation is CurationV2Storage, GraphUpgradeable { + using SafeMathUpgradeable for uint256; // 100% in parts per million uint32 private constant MAX_PPM = 1000000; @@ -81,7 +81,7 @@ contract Curation is CurationV1Storage, GraphUpgradeable { uint32 _defaultReserveRatio, uint32 _curationTaxPercentage, uint256 _minimumCurationDeposit - ) external onlyImpl { + ) external onlyImpl initializer { Managed._initialize(_controller); require(_bondingCurve != address(0), "Bonding curve must be set"); @@ -103,23 +103,6 @@ contract Curation is CurationV1Storage, GraphUpgradeable { _setDefaultReserveRatio(_defaultReserveRatio); } - /** - * @dev Internal: Set the default reserve ratio percentage for a curation pool. - * @notice Update the default reserver ratio to `_defaultReserveRatio` - * @param _defaultReserveRatio Reserve ratio (in PPM) - */ - function _setDefaultReserveRatio(uint32 _defaultReserveRatio) private { - // Reserve Ratio must be within 0% to 100% (inclusive, in PPM) - require(_defaultReserveRatio > 0, "Default reserve ratio must be > 0"); - require( - _defaultReserveRatio <= MAX_PPM, - "Default reserve ratio cannot be higher than MAX_PPM" - ); - - defaultReserveRatio = _defaultReserveRatio; - emit ParameterUpdated("defaultReserveRatio"); - } - /** * @dev Set the minimum deposit amount for curators. * @notice Update the minimum deposit amount to `_minimumCurationDeposit` @@ -133,18 +116,6 @@ contract Curation is CurationV1Storage, GraphUpgradeable { _setMinimumCurationDeposit(_minimumCurationDeposit); } - /** - * @dev Internal: Set the minimum deposit amount for curators. - * @notice Update the minimum deposit amount to `_minimumCurationDeposit` - * @param _minimumCurationDeposit Minimum amount of tokens required deposit - */ - function _setMinimumCurationDeposit(uint256 _minimumCurationDeposit) private { - require(_minimumCurationDeposit > 0, "Minimum curation deposit cannot be 0"); - - minimumCurationDeposit = _minimumCurationDeposit; - emit ParameterUpdated("minimumCurationDeposit"); - } - /** * @dev Set the curation tax percentage to charge when a curator deposits GRT tokens. * @param _percentage Curation tax percentage charged when depositing GRT tokens @@ -153,20 +124,6 @@ contract Curation is CurationV1Storage, GraphUpgradeable { _setCurationTaxPercentage(_percentage); } - /** - * @dev Internal: Set the curation tax percentage to charge when a curator deposits GRT tokens. - * @param _percentage Curation tax percentage charged when depositing GRT tokens - */ - function _setCurationTaxPercentage(uint32 _percentage) private { - require( - _percentage <= MAX_PPM, - "Curation tax percentage must be below or equal to MAX_PPM" - ); - - curationTaxPercentage = _percentage; - emit ParameterUpdated("curationTaxPercentage"); - } - /** * @dev Set the master copy to use as clones for the curation token. * @param _curationTokenMaster Address of implementation contract to use for curation tokens @@ -175,18 +132,6 @@ contract Curation is CurationV1Storage, GraphUpgradeable { _setCurationTokenMaster(_curationTokenMaster); } - /** - * @dev Internal: Set the master copy to use as clones for the curation token. - * @param _curationTokenMaster Address of implementation contract to use for curation tokens - */ - function _setCurationTokenMaster(address _curationTokenMaster) private { - require(_curationTokenMaster != address(0), "Token master must be non-empty"); - require(Address.isContract(_curationTokenMaster), "Token master must be a contract"); - - curationTokenMaster = _curationTokenMaster; - emit ParameterUpdated("curationTokenMaster"); - } - /** * @dev Assign Graph Tokens collected as curation fees to the curation pool reserve. * This function can only be called by the Staking contract and will do the bookeeping of @@ -242,7 +187,9 @@ contract Curation is CurationV1Storage, GraphUpgradeable { // If no signal token for the pool - create one if (address(curationPool.gcs) == address(0)) { // Use a minimal proxy to reduce gas cost - IGraphCurationToken gcs = IGraphCurationToken(Clones.clone(curationTokenMaster)); + IGraphCurationToken gcs = IGraphCurationToken( + ClonesUpgradeable.clone(curationTokenMaster) + ); gcs.initialize(address(this)); curationPool.gcs = gcs; } @@ -319,6 +266,31 @@ contract Curation is CurationV1Storage, GraphUpgradeable { return tokensOut; } + /** + * @dev Get the amount of token reserves in a curation pool. + * @param _subgraphDeploymentID Subgraph deployment curation poool + * @return Amount of token reserves in the curation pool + */ + function getCurationPoolTokens(bytes32 _subgraphDeploymentID) + external + view + override + returns (uint256) + { + return _pools[_subgraphDeploymentID].tokens; + } + + /** + * @notice Get a curation pool for a subgraph deployment + * @dev We add this when making the pools variable internal, to keep + * backwards compatibility. + * @param _subgraphDeploymentID Subgraph deployment for which to get the curation pool + * @return Curation pool for the subgraph deployment + */ + function pools(bytes32 _subgraphDeploymentID) external view returns (CurationPool memory) { + return _pools[_subgraphDeploymentID]; + } + /** * @dev Check if any GRT tokens are deposited for a SubgraphDeployment. * @param _subgraphDeploymentID SubgraphDeployment to check if curated @@ -359,20 +331,6 @@ contract Curation is CurationV1Storage, GraphUpgradeable { return (address(gcs) == address(0)) ? 0 : gcs.totalSupply(); } - /** - * @dev Get the amount of token reserves in a curation pool. - * @param _subgraphDeploymentID Subgraph deployment curation poool - * @return Amount of token reserves in the curation pool - */ - function getCurationPoolTokens(bytes32 _subgraphDeploymentID) - external - view - override - returns (uint256) - { - return _pools[_subgraphDeploymentID].tokens; - } - /** * @dev Calculate amount of signal that can be bought with tokens in a curation pool. * This function considers and excludes the deposit tax. @@ -391,17 +349,6 @@ contract Curation is CurationV1Storage, GraphUpgradeable { return (signalOut, curationTax); } - /** - * @notice Get a curation pool for a subgraph deployment - * @dev We add this when making the pools variable internal, to keep - * backwards compatibility. - * @param _subgraphDeploymentID Subgraph deployment for which to get the curation pool - * @return Curation pool for the subgraph deployment - */ - function pools(bytes32 _subgraphDeploymentID) external view returns (CurationPool memory) { - return _pools[_subgraphDeploymentID]; - } - /** * @dev Calculate amount of signal that can be bought with tokens in a curation pool. * @param _subgraphDeploymentID Subgraph deployment to mint signal @@ -474,6 +421,64 @@ contract Curation is CurationV1Storage, GraphUpgradeable { ); } + /** + * @dev Internal: Set the default reserve ratio percentage for a curation pool. + * @notice Update the default reserver ratio to `_defaultReserveRatio` + * @param _defaultReserveRatio Reserve ratio (in PPM) + */ + function _setDefaultReserveRatio(uint32 _defaultReserveRatio) private { + // Reserve Ratio must be within 0% to 100% (inclusive, in PPM) + require(_defaultReserveRatio > 0, "Default reserve ratio must be > 0"); + require( + _defaultReserveRatio <= MAX_PPM, + "Default reserve ratio cannot be higher than MAX_PPM" + ); + + defaultReserveRatio = _defaultReserveRatio; + emit ParameterUpdated("defaultReserveRatio"); + } + + /** + * @dev Internal: Set the minimum deposit amount for curators. + * @notice Update the minimum deposit amount to `_minimumCurationDeposit` + * @param _minimumCurationDeposit Minimum amount of tokens required deposit + */ + function _setMinimumCurationDeposit(uint256 _minimumCurationDeposit) private { + require(_minimumCurationDeposit > 0, "Minimum curation deposit cannot be 0"); + + minimumCurationDeposit = _minimumCurationDeposit; + emit ParameterUpdated("minimumCurationDeposit"); + } + + /** + * @dev Internal: Set the curation tax percentage to charge when a curator deposits GRT tokens. + * @param _percentage Curation tax percentage charged when depositing GRT tokens + */ + function _setCurationTaxPercentage(uint32 _percentage) private { + require( + _percentage <= MAX_PPM, + "Curation tax percentage must be below or equal to MAX_PPM" + ); + + curationTaxPercentage = _percentage; + emit ParameterUpdated("curationTaxPercentage"); + } + + /** + * @dev Internal: Set the master copy to use as clones for the curation token. + * @param _curationTokenMaster Address of implementation contract to use for curation tokens + */ + function _setCurationTokenMaster(address _curationTokenMaster) private { + require(_curationTokenMaster != address(0), "Token master must be non-empty"); + require( + AddressUpgradeable.isContract(_curationTokenMaster), + "Token master must be a contract" + ); + + curationTokenMaster = _curationTokenMaster; + emit ParameterUpdated("curationTokenMaster"); + } + /** * @dev Triggers an update of rewards due to a change in signal. * @param _subgraphDeploymentID Subgraph deployment updated diff --git a/contracts/curation/CurationStorage.sol b/contracts/curation/CurationStorage.sol index 9821d37ed..12a6f697e 100644 --- a/contracts/curation/CurationStorage.sol +++ b/contracts/curation/CurationStorage.sol @@ -2,6 +2,8 @@ pragma solidity ^0.7.6; +import { Initializable } from "@openzeppelin/contracts-upgradeable/proxy/Initializable.sol"; + import { ICuration } from "./ICuration.sol"; import { IGraphCurationToken } from "./IGraphCurationToken.sol"; import { Managed } from "../governance/Managed.sol"; @@ -21,7 +23,7 @@ abstract contract CurationV1Storage is Managed, ICuration { // -- State -- - /// Tax charged when curatos deposit funds. + /// Tax charged when curators deposit funds. /// Parts per million. (Allows for 4 decimal points, 999,999 = 99.9999%) uint32 public override curationTaxPercentage; @@ -46,3 +48,7 @@ abstract contract CurationV1Storage is Managed, ICuration { /// There is only one CurationPool per SubgraphDeploymentID mapping(bytes32 => CurationPool) internal _pools; } + +abstract contract CurationV2Storage is CurationV1Storage, Initializable { + // Nothing here, just adding Initializable +} diff --git a/contracts/discovery/GNS.sol b/contracts/discovery/GNS.sol index 422f9db75..7a8e71bdc 100644 --- a/contracts/discovery/GNS.sol +++ b/contracts/discovery/GNS.sol @@ -3,8 +3,8 @@ pragma solidity ^0.7.6; pragma abicoder v2; -import { SafeMath } from "@openzeppelin/contracts/math/SafeMath.sol"; -import { Address } from "@openzeppelin/contracts/utils/Address.sol"; +import { SafeMathUpgradeable } from "@openzeppelin/contracts-upgradeable/math/SafeMathUpgradeable.sol"; +import { AddressUpgradeable } from "@openzeppelin/contracts-upgradeable/utils/AddressUpgradeable.sol"; import { Multicall } from "../base/Multicall.sol"; import { BancorFormula } from "../bancor/BancorFormula.sol"; @@ -27,23 +27,25 @@ import { GNSV3Storage } from "./GNSStorage.sol"; * transaction. */ contract GNS is GNSV3Storage, GraphUpgradeable, IGNS, Multicall { - using SafeMath for uint256; + using SafeMathUpgradeable for uint256; // -- Constants -- - // 100% in parts per million + /// @dev 100% in parts per million uint32 private constant MAX_PPM = 1000000; - // Storage slot where the subgraphs mapping is stored on L1GNS + /// @dev Storage slot where the subgraphs mapping is stored on L1GNS uint256 internal constant SUBGRAPH_MAPPING_SLOT = 18; - // Storage slot where the legacy subgraphs mapping is stored on L1GNS + /// @dev Storage slot where the legacy subgraphs mapping is stored on L1GNS uint256 internal constant LEGACY_SUBGRAPH_MAPPING_SLOT = 15; - // Equates to Connector weight on bancor formula to be CW = 1 + /// @dev Equates to Connector weight on bancor formula to be CW = 1 uint32 internal immutable FIXED_RESERVE_RATIO = MAX_PPM; + // -- Events -- + /// @dev Emitted when the subgraph NFT contract is updated event SubgraphNFTUpdated(address subgraphNFT); /** @@ -161,9 +163,11 @@ contract GNS is GNSV3Storage, GraphUpgradeable, IGNS, Multicall { // -- Functions -- /** - * @dev Initialize this contract. + * @notice Initialize the GNS contract. + * @param _controller Address of the Controller contract that manages this contract + * @param _subgraphNFT Address of the Subgraph NFT contract */ - function initialize(address _controller, address _subgraphNFT) external onlyImpl { + function initialize(address _controller, address _subgraphNFT) external onlyImpl initializer { Managed._initialize(_controller); // Settings @@ -172,7 +176,7 @@ contract GNS is GNSV3Storage, GraphUpgradeable, IGNS, Multicall { } /** - * @dev Approve curation contract to pull funds. + * @notice Approve curation contract to pull funds. */ function approveAll() external override { graphToken().approve(address(curation()), type(uint256).max); @@ -181,7 +185,7 @@ contract GNS is GNSV3Storage, GraphUpgradeable, IGNS, Multicall { // -- Config -- /** - * @dev Set the owner fee percentage. This is used to prevent a subgraph owner to drain all + * @notice Set the owner fee percentage. This is used to prevent a subgraph owner to drain all * the name curators tokens while upgrading or deprecating and is configurable in parts per million. * @param _ownerTaxPercentage Owner tax percentage */ @@ -190,41 +194,18 @@ contract GNS is GNSV3Storage, GraphUpgradeable, IGNS, Multicall { } /** - * @dev Internal: Set the owner tax percentage. This is used to prevent a subgraph owner to drain all - * the name curators tokens while upgrading or deprecating and is configurable in parts per million. - * @param _ownerTaxPercentage Owner tax percentage - */ - function _setOwnerTaxPercentage(uint32 _ownerTaxPercentage) private { - require(_ownerTaxPercentage <= MAX_PPM, "Owner tax must be MAX_PPM or less"); - ownerTaxPercentage = _ownerTaxPercentage; - emit ParameterUpdated("ownerTaxPercentage"); - } - - /** - * @dev Set the NFT registry contract + * @notice Set the NFT registry contract * NOTE: Calling this function will break the ownership model unless * it is replaced with a fully migrated version of the NFT contract state * Use with care. * @param _subgraphNFT Address of the ERC721 contract */ - function setSubgraphNFT(address _subgraphNFT) public onlyGovernor { + function setSubgraphNFT(address _subgraphNFT) external onlyGovernor { _setSubgraphNFT(_subgraphNFT); } /** - * @dev Internal: Set the NFT registry contract - * @param _subgraphNFT Address of the ERC721 contract - */ - function _setSubgraphNFT(address _subgraphNFT) private { - require(_subgraphNFT != address(0), "NFT address cant be zero"); - require(Address.isContract(_subgraphNFT), "NFT must be valid"); - - subgraphNFT = ISubgraphNFT(_subgraphNFT); - emit SubgraphNFTUpdated(_subgraphNFT); - } - - /** - * @dev Set the counterpart (L1/L2) GNS address + * @notice Set the counterpart (L1/L2) GNS address * @param _counterpart Owner tax percentage */ function setCounterpartGNSAddress(address _counterpart) external onlyGovernor { @@ -235,7 +216,7 @@ contract GNS is GNSV3Storage, GraphUpgradeable, IGNS, Multicall { // -- Actions -- /** - * @dev Allows a graph account to set a default name + * @notice Allows a graph account to set a default name * @param _graphAccount Account that is setting its name * @param _nameSystem Name system account already has ownership of a name in * @param _nameIdentifier The unique identifier that is used to identify the name in the system @@ -252,12 +233,12 @@ contract GNS is GNSV3Storage, GraphUpgradeable, IGNS, Multicall { } /** - * @dev Allows a subgraph owner to update the metadata of a subgraph they have published + * @notice Allows a subgraph owner to update the metadata of a subgraph they have published * @param _subgraphID Subgraph ID * @param _subgraphMetadata IPFS hash for the subgraph metadata */ function updateSubgraphMetadata(uint256 _subgraphID, bytes32 _subgraphMetadata) - public + external override onlySubgraphAuth(_subgraphID) { @@ -265,7 +246,7 @@ contract GNS is GNSV3Storage, GraphUpgradeable, IGNS, Multicall { } /** - * @dev Publish a new subgraph. + * @notice Publish a new subgraph. * @param _subgraphDeploymentID Subgraph deployment for the subgraph * @param _versionMetadata IPFS hash for the subgraph version metadata * @param _subgraphMetadata IPFS hash for the subgraph metadata @@ -297,7 +278,7 @@ contract GNS is GNSV3Storage, GraphUpgradeable, IGNS, Multicall { } /** - * @dev Publish a new version of an existing subgraph. + * @notice Publish a new version of an existing subgraph. * @param _subgraphID Subgraph ID * @param _subgraphDeploymentID Subgraph deployment ID of the new version * @param _versionMetadata IPFS hash for the subgraph version metadata @@ -371,7 +352,7 @@ contract GNS is GNSV3Storage, GraphUpgradeable, IGNS, Multicall { } /** - * @dev Deprecate a subgraph. The bonding curve is destroyed, the vSignal is burned, and the GNS + * @notice Deprecate a subgraph. The bonding curve is destroyed, the vSignal is burned, and the GNS * contract holds the GRT from burning the vSignal, which all curators can withdraw manually. * Can only be done by the subgraph owner. * @param _subgraphID Subgraph ID @@ -408,7 +389,7 @@ contract GNS is GNSV3Storage, GraphUpgradeable, IGNS, Multicall { } /** - * @dev Deposit GRT into a subgraph and mint signal. + * @notice Deposit GRT into a subgraph and mint signal. * @param _subgraphID Subgraph ID * @param _tokensIn The amount of tokens the nameCurator wants to deposit * @param _nSignalOutMin Expected minimum amount of name signal to receive @@ -441,7 +422,7 @@ contract GNS is GNSV3Storage, GraphUpgradeable, IGNS, Multicall { } /** - * @dev Burn signal for a subgraph and return the GRT. + * @notice Burn signal for a subgraph and return the GRT. * @param _subgraphID Subgraph ID * @param _nSignal The amount of nSignal the nameCurator wants to burn * @param _tokensOutMin Expected minimum amount of tokens to receive @@ -478,7 +459,7 @@ contract GNS is GNSV3Storage, GraphUpgradeable, IGNS, Multicall { } /** - * @dev Move subgraph signal from sender to `_recipient` + * @notice Move subgraph signal from sender to `_recipient` * @param _subgraphID Subgraph ID * @param _recipient Address to send the signal to * @param _amount The amount of nSignal to transfer @@ -508,7 +489,7 @@ contract GNS is GNSV3Storage, GraphUpgradeable, IGNS, Multicall { } /** - * @dev Withdraw tokens from a deprecated subgraph. + * @notice Withdraw tokens from a deprecated subgraph. * When the subgraph is deprecated, any curator can call this function and * withdraw the GRT they are entitled for its original deposit * @param _subgraphID Subgraph ID @@ -539,49 +520,71 @@ contract GNS is GNSV3Storage, GraphUpgradeable, IGNS, Multicall { } /** - * @dev Calculate tax that owner will have to cover for upgrading or deprecating. - * @param _tokens Tokens that were received from deprecating the old subgraph - * @param _owner Subgraph owner - * @param _curationTaxPercentage Tax percentage on curation deposits from Curation contract - * @return Total tokens that will be sent to curation, _tokens + ownerTax + * @notice Create subgraphID for legacy subgraph and mint ownership NFT. + * @param _graphAccount Account that created the subgraph + * @param _subgraphNumber The sequence number of the created subgraph + * @param _subgraphMetadata IPFS hash for the subgraph metadata */ - function _chargeOwnerTax( - uint256 _tokens, - address _owner, - uint32 _curationTaxPercentage - ) internal returns (uint256) { - if (_curationTaxPercentage == 0 || ownerTaxPercentage == 0) { - return 0; - } + function migrateLegacySubgraph( + address _graphAccount, + uint256 _subgraphNumber, + bytes32 _subgraphMetadata + ) external { + // Must be an existing legacy subgraph + bool legacySubgraphExists = legacySubgraphData[_graphAccount][_subgraphNumber] + .subgraphDeploymentID != 0; + require(legacySubgraphExists == true, "GNS: Subgraph does not exist"); - // Tax on the total bonding curve funds - uint256 taxOnOriginal = _tokens.mul(_curationTaxPercentage).div(MAX_PPM); - // Total after the tax - uint256 totalWithoutOwnerTax = _tokens.sub(taxOnOriginal); - // The portion of tax that the owner will pay - uint256 ownerTax = taxOnOriginal.mul(ownerTaxPercentage).div(MAX_PPM); + // Must not be a claimed subgraph + uint256 subgraphID = _buildLegacySubgraphID(_graphAccount, _subgraphNumber); + require( + legacySubgraphKeys[subgraphID].account == address(0), + "GNS: Subgraph was already claimed" + ); - uint256 totalWithOwnerTax = totalWithoutOwnerTax.add(ownerTax); + // Store a reference for a legacy subgraph + legacySubgraphKeys[subgraphID] = IGNS.LegacySubgraphKey({ + account: _graphAccount, + accountSeqID: _subgraphNumber + }); - // The total after tax, plus owner partial repay, divided by - // the tax, to adjust it slightly upwards. ex: - // 100 GRT, 5 GRT Tax, owner pays 100% --> 5 GRT - // To get 100 in the protocol after tax, Owner deposits - // ~5.26, as ~105.26 * .95 = 100 - uint256 totalAdjustedUp = totalWithOwnerTax.mul(MAX_PPM).div( - uint256(MAX_PPM).sub(uint256(_curationTaxPercentage)) - ); + // Delete state for legacy subgraph + legacySubgraphs[_graphAccount][_subgraphNumber] = 0; - uint256 ownerTaxAdjustedUp = totalAdjustedUp.sub(_tokens); + // Mint the NFT and send to owner + // The subgraph owner is the graph account that created it + _mintNFT(_graphAccount, subgraphID); + emit LegacySubgraphClaimed(_graphAccount, _subgraphNumber); - // Get the owner of the subgraph to reimburse the curation tax - TokenUtils.pullTokens(graphToken(), _owner, ownerTaxAdjustedUp); + // Set the token metadata + _setSubgraphMetadata(subgraphID, _subgraphMetadata); + } - return totalAdjustedUp; + /** + * @notice Return the total signal on the subgraph. + * @param _subgraphID Subgraph ID + * @return Total signal on the subgraph + */ + function subgraphSignal(uint256 _subgraphID) external view override returns (uint256) { + return _getSubgraphData(_subgraphID).nSignal; } /** - * @dev Calculate subgraph signal to be returned for an amount of tokens. + * @notice Return the total tokens on the subgraph at current value. + * @param _subgraphID Subgraph ID + * @return Total tokens on the subgraph + */ + function subgraphTokens(uint256 _subgraphID) external view override returns (uint256) { + uint256 signal = _getSubgraphData(_subgraphID).nSignal; + if (signal > 0) { + (, uint256 tokens) = nSignalToTokens(_subgraphID, signal); + return tokens; + } + return 0; + } + + /** + * @notice Calculate subgraph signal to be returned for an amount of tokens. * @param _subgraphID Subgraph ID * @param _tokensIn Tokens being exchanged for subgraph signal * @return Amount of subgraph signal and curation tax @@ -606,7 +609,7 @@ contract GNS is GNSV3Storage, GraphUpgradeable, IGNS, Multicall { } /** - * @dev Calculate tokens returned for an amount of subgraph signal. + * @notice Calculate tokens returned for an amount of subgraph signal. * @param _subgraphID Subgraph ID * @param _nSignalIn Subgraph signal being exchanged for tokens * @return Amount of tokens returned for an amount of subgraph signal @@ -626,7 +629,7 @@ contract GNS is GNSV3Storage, GraphUpgradeable, IGNS, Multicall { } /** - * @dev Calculate subgraph signal to be returned for an amount of subgraph deployment signal. + * @notice Calculate subgraph signal to be returned for an amount of subgraph deployment signal. * @param _subgraphID Subgraph ID * @param _vSignalIn Amount of subgraph deployment signal to exchange for subgraph signal * @return Amount of subgraph signal that can be bought @@ -648,7 +651,7 @@ contract GNS is GNSV3Storage, GraphUpgradeable, IGNS, Multicall { } /** - * @dev Calculate subgraph deployment signal to be returned for an amount of subgraph signal. + * @notice Calculate subgraph deployment signal to be returned for an amount of subgraph signal. * @param _subgraphID Subgraph ID * @param _nSignalIn Subgraph signal being exchanged for subgraph deployment signal * @return Amount of subgraph deployment signal that can be returned @@ -664,7 +667,7 @@ contract GNS is GNSV3Storage, GraphUpgradeable, IGNS, Multicall { } /** - * @dev Get the amount of subgraph signal a curator has. + * @notice Get the amount of subgraph signal a curator has. * @param _subgraphID Subgraph ID * @param _curator Curator address * @return Amount of subgraph signal owned by a curator @@ -679,71 +682,7 @@ contract GNS is GNSV3Storage, GraphUpgradeable, IGNS, Multicall { } /** - * @dev Return the total signal on the subgraph. - * @param _subgraphID Subgraph ID - * @return Total signal on the subgraph - */ - function subgraphSignal(uint256 _subgraphID) external view override returns (uint256) { - return _getSubgraphData(_subgraphID).nSignal; - } - - /** - * @dev Return the total tokens on the subgraph at current value. - * @param _subgraphID Subgraph ID - * @return Total tokens on the subgraph - */ - function subgraphTokens(uint256 _subgraphID) external view override returns (uint256) { - uint256 signal = _getSubgraphData(_subgraphID).nSignal; - if (signal > 0) { - (, uint256 tokens) = nSignalToTokens(_subgraphID, signal); - return tokens; - } - return 0; - } - - /** - * @dev Create subgraphID for legacy subgraph and mint ownership NFT. - * @param _graphAccount Account that created the subgraph - * @param _subgraphNumber The sequence number of the created subgraph - * @param _subgraphMetadata IPFS hash for the subgraph metadata - */ - function migrateLegacySubgraph( - address _graphAccount, - uint256 _subgraphNumber, - bytes32 _subgraphMetadata - ) external { - // Must be an existing legacy subgraph - bool legacySubgraphExists = legacySubgraphData[_graphAccount][_subgraphNumber] - .subgraphDeploymentID != 0; - require(legacySubgraphExists == true, "GNS: Subgraph does not exist"); - - // Must not be a claimed subgraph - uint256 subgraphID = _buildLegacySubgraphID(_graphAccount, _subgraphNumber); - require( - legacySubgraphKeys[subgraphID].account == address(0), - "GNS: Subgraph was already claimed" - ); - - // Store a reference for a legacy subgraph - legacySubgraphKeys[subgraphID] = IGNS.LegacySubgraphKey({ - account: _graphAccount, - accountSeqID: _subgraphNumber - }); - - // Delete state for legacy subgraph - legacySubgraphs[_graphAccount][_subgraphNumber] = 0; - - // Mint the NFT and send to owner - // The subgraph owner is the graph account that created it - _mintNFT(_graphAccount, subgraphID); - emit LegacySubgraphClaimed(_graphAccount, _subgraphNumber); - - // Set the token metadata - _setSubgraphMetadata(subgraphID, _subgraphMetadata); - } - - /** - * @dev Return whether a subgraph is published. + * @notice Return whether a subgraph is published. * @param _subgraphID Subgraph ID * @return Return true if subgraph is currently published */ @@ -752,7 +691,7 @@ contract GNS is GNSV3Storage, GraphUpgradeable, IGNS, Multicall { } /** - * @dev Return whether a subgraph is a legacy subgraph (created before subgraph NFTs). + * @notice Return whether a subgraph is a legacy subgraph (created before subgraph NFTs). * @param _subgraphID Subgraph ID * @return Return true if subgraph is a legacy subgraph */ @@ -762,7 +701,7 @@ contract GNS is GNSV3Storage, GraphUpgradeable, IGNS, Multicall { } /** - * @dev Returns account and sequence ID for a legacy subgraph (created before subgraph NFTs). + * @notice Returns account and sequence ID for a legacy subgraph (created before subgraph NFTs). * @param _subgraphID Subgraph ID * @return account Account that created the subgraph (or 0 if it's not a legacy subgraph) * @return seqID Sequence number for the subgraph @@ -778,6 +717,15 @@ contract GNS is GNSV3Storage, GraphUpgradeable, IGNS, Multicall { seqID = legacySubgraphKey.accountSeqID; } + /** + * @notice Return the owner of a subgraph. + * @param _tokenID Subgraph ID + * @return Owner address + */ + function ownerOf(uint256 _tokenID) public view override returns (address) { + return subgraphNFT.ownerOf(_tokenID); + } + /** * @notice Get the storage slot that corresponds to a curator's signal within a subgraph * @dev This can be useful to produce proofs to claim balances in L2, as implemented @@ -834,32 +782,45 @@ contract GNS is GNSV3Storage, GraphUpgradeable, IGNS, Multicall { } /** - * @dev Build a subgraph ID based on the account creating it and a sequence number for that account. - * Only used for legacy subgraphs being migrated, as new ones will also use the chainid. - * Subgraph ID is the keccak hash of account+seqID - * @return Subgraph ID - */ - function _buildLegacySubgraphID(address _account, uint256 _seqID) - internal - pure - returns (uint256) - { - return uint256(keccak256(abi.encodePacked(_account, _seqID))); - } - - /** - * @dev Build a subgraph ID based on the account creating it and a sequence number for that account. - * Subgraph ID is the keccak hash of account+seqID - * @return Subgraph ID + * @dev Calculate tax that owner will have to cover for upgrading or deprecating. + * @param _tokens Tokens that were received from deprecating the old subgraph + * @param _owner Subgraph owner + * @param _curationTaxPercentage Tax percentage on curation deposits from Curation contract + * @return Total tokens that will be sent to curation, _tokens + ownerTax */ - function _buildSubgraphID(address _account, uint256 _seqID) internal pure returns (uint256) { - uint256 chainId; - // Too bad solidity 0.7.6 still doesn't have block.chainid - // solhint-disable-next-line no-inline-assembly - assembly { - chainId := chainid() + function _chargeOwnerTax( + uint256 _tokens, + address _owner, + uint32 _curationTaxPercentage + ) internal returns (uint256) { + if (_curationTaxPercentage == 0 || ownerTaxPercentage == 0) { + return 0; } - return uint256(keccak256(abi.encodePacked(_account, _seqID, chainId))); + + // Tax on the total bonding curve funds + uint256 taxOnOriginal = _tokens.mul(_curationTaxPercentage).div(MAX_PPM); + // Total after the tax + uint256 totalWithoutOwnerTax = _tokens.sub(taxOnOriginal); + // The portion of tax that the owner will pay + uint256 ownerTax = taxOnOriginal.mul(ownerTaxPercentage).div(MAX_PPM); + + uint256 totalWithOwnerTax = totalWithoutOwnerTax.add(ownerTax); + + // The total after tax, plus owner partial repay, divided by + // the tax, to adjust it slightly upwards. ex: + // 100 GRT, 5 GRT Tax, owner pays 100% --> 5 GRT + // To get 100 in the protocol after tax, Owner deposits + // ~5.26, as ~105.26 * .95 = 100 + uint256 totalAdjustedUp = totalWithOwnerTax.mul(MAX_PPM).div( + uint256(MAX_PPM).sub(uint256(_curationTaxPercentage)) + ); + + uint256 ownerTaxAdjustedUp = totalAdjustedUp.sub(_tokens); + + // Get the owner of the subgraph to reimburse the curation tax + TokenUtils.pullTokens(graphToken(), _owner, ownerTaxAdjustedUp); + + return totalAdjustedUp; } /** @@ -882,6 +843,36 @@ contract GNS is GNSV3Storage, GraphUpgradeable, IGNS, Multicall { return seqID; } + /** + * @dev Mint the NFT for the subgraph. + * @param _owner Owner address + * @param _tokenID Subgraph ID + */ + function _mintNFT(address _owner, uint256 _tokenID) internal { + subgraphNFT.mint(_owner, _tokenID); + } + + /** + * @dev Burn the NFT for the subgraph. + * @param _tokenID Subgraph ID + */ + function _burnNFT(uint256 _tokenID) internal { + subgraphNFT.burn(_tokenID); + } + + /** + * @dev Set the subgraph metadata. + * @param _tokenID Subgraph ID + * @param _subgraphMetadata IPFS hash of the subgraph metadata + */ + function _setSubgraphMetadata(uint256 _tokenID, bytes32 _subgraphMetadata) internal { + subgraphNFT.setSubgraphMetadata(_tokenID, _subgraphMetadata); + + // Even if the following event is emitted in the NFT we emit it here to facilitate + // subgraph indexing + emit SubgraphMetadataUpdated(_tokenID, _subgraphMetadata); + } + /** * @dev Get subgraph data. * This function will first look for a v1 subgraph and return it if found. @@ -927,44 +918,55 @@ contract GNS is GNSV3Storage, GraphUpgradeable, IGNS, Multicall { return subgraphData; } - // -- NFT -- - /** - * @dev Return the owner of a subgraph. - * @param _tokenID Subgraph ID - * @return Owner address + * @dev Build a subgraph ID based on the account creating it and a sequence number for that account. + * Only used for legacy subgraphs being migrated, as new ones will also use the chainid. + * Subgraph ID is the keccak hash of account+seqID + * @return Subgraph ID */ - function ownerOf(uint256 _tokenID) public view override returns (address) { - return subgraphNFT.ownerOf(_tokenID); + function _buildLegacySubgraphID(address _account, uint256 _seqID) + internal + pure + returns (uint256) + { + return uint256(keccak256(abi.encodePacked(_account, _seqID))); } /** - * @dev Mint the NFT for the subgraph. - * @param _owner Owner address - * @param _tokenID Subgraph ID + * @dev Build a subgraph ID based on the account creating it and a sequence number for that account. + * Subgraph ID is the keccak hash of account+seqID + * @return Subgraph ID */ - function _mintNFT(address _owner, uint256 _tokenID) internal { - subgraphNFT.mint(_owner, _tokenID); + function _buildSubgraphID(address _account, uint256 _seqID) internal pure returns (uint256) { + uint256 chainId; + // Too bad solidity 0.7.6 still doesn't have block.chainid + // solhint-disable-next-line no-inline-assembly + assembly { + chainId := chainid() + } + return uint256(keccak256(abi.encodePacked(_account, _seqID, chainId))); } /** - * @dev Burn the NFT for the subgraph. - * @param _tokenID Subgraph ID + * @dev Internal: Set the owner tax percentage. This is used to prevent a subgraph owner to drain all + * the name curators tokens while upgrading or deprecating and is configurable in parts per million. + * @param _ownerTaxPercentage Owner tax percentage */ - function _burnNFT(uint256 _tokenID) internal { - subgraphNFT.burn(_tokenID); + function _setOwnerTaxPercentage(uint32 _ownerTaxPercentage) private { + require(_ownerTaxPercentage <= MAX_PPM, "Owner tax must be MAX_PPM or less"); + ownerTaxPercentage = _ownerTaxPercentage; + emit ParameterUpdated("ownerTaxPercentage"); } /** - * @dev Set the subgraph metadata. - * @param _tokenID Subgraph ID - * @param _subgraphMetadata IPFS hash of the subgraph metadata + * @dev Internal: Set the NFT registry contract + * @param _subgraphNFT Address of the ERC721 contract */ - function _setSubgraphMetadata(uint256 _tokenID, bytes32 _subgraphMetadata) internal { - subgraphNFT.setSubgraphMetadata(_tokenID, _subgraphMetadata); + function _setSubgraphNFT(address _subgraphNFT) private { + require(_subgraphNFT != address(0), "NFT address cant be zero"); + require(AddressUpgradeable.isContract(_subgraphNFT), "NFT must be valid"); - // Even if the following event is emitted in the NFT we emit it here to facilitate - // subgraph indexing - emit SubgraphMetadataUpdated(_tokenID, _subgraphMetadata); + subgraphNFT = ISubgraphNFT(_subgraphNFT); + emit SubgraphNFTUpdated(_subgraphNFT); } } diff --git a/contracts/discovery/GNSStorage.sol b/contracts/discovery/GNSStorage.sol index 46960e313..3143d2651 100644 --- a/contracts/discovery/GNSStorage.sol +++ b/contracts/discovery/GNSStorage.sol @@ -3,6 +3,7 @@ pragma solidity ^0.7.6; pragma abicoder v2; +import { Initializable } from "@openzeppelin/contracts-upgradeable/proxy/Initializable.sol"; import { Managed } from "../governance/Managed.sol"; import { IEthereumDIDRegistry } from "./erc1056/IEthereumDIDRegistry.sol"; @@ -64,9 +65,9 @@ abstract contract GNSV2Storage is GNSV1Storage { * @notice This contract holds all the storage variables for the base GNS contract, version 3. * @dev Note that this is the first version that includes a storage gap - if adding * future versions, make sure to move the gap to the new version and - * reduce the size of the gap accordingly. + * reduce the size of the gap accordingly. */ -abstract contract GNSV3Storage is GNSV2Storage { +abstract contract GNSV3Storage is GNSV2Storage, Initializable { /// Data for subgraph migration from L1 to L2, some fields will be empty or set differently on each layer mapping(uint256 => IGNS.SubgraphL2MigrationData) public subgraphL2MigrationData; /// Address of the counterpart GNS contract (L1GNS/L2GNS) diff --git a/contracts/discovery/IGNS.sol b/contracts/discovery/IGNS.sol index 6b11a6b01..0d7ac045c 100644 --- a/contracts/discovery/IGNS.sol +++ b/contracts/discovery/IGNS.sol @@ -2,9 +2,17 @@ pragma solidity ^0.7.6; +/** + * @title Interface for GNS + */ interface IGNS { // -- Pool -- + /** + * @dev The SubgraphData struct holds information about subgraphs + * and their signal; both nSignal (i.e. name signal at the GNS level) + * and vSignal (i.e. version signal at the Curation contract level) + */ struct SubgraphData { uint256 vSignal; // The token of the subgraph-deployment bonding curve uint256 nSignal; // The token of the subgraph bonding curve @@ -15,6 +23,12 @@ interface IGNS { uint256 withdrawableGRT; } + /** + * @dev The SubgraphL2MigrationData struct holds information + * about a subgraph related to its migration from L1 to L2. + * Some fields of this are used by the L1GNS, and some are used by + * the L2GNS. + */ struct SubgraphL2MigrationData { uint256 lockedAtBlock; // Block at which the subgraph was locked for migration. L1 only uint256 tokens; // GRT that will be sent to L2 to mint signal @@ -25,6 +39,10 @@ interface IGNS { bool deprecated; // Subgraph was deprecated instead of sent. L1 only } + /** + * @dev The LegacySubgraphKey struct holds the account and sequence ID + * used to generate subgraph IDs in legacy subgraphs. + */ struct LegacySubgraphKey { address account; uint256 accountSeqID; @@ -32,12 +50,27 @@ interface IGNS { // -- Configuration -- + /** + * @notice Approve curation contract to pull funds. + */ function approveAll() external; + /** + * @notice Set the owner fee percentage. This is used to prevent a subgraph owner to drain all + * the name curators tokens while upgrading or deprecating and is configurable in parts per million. + * @param _ownerTaxPercentage Owner tax percentage + */ function setOwnerTaxPercentage(uint32 _ownerTaxPercentage) external; // -- Publishing -- + /** + * @notice Allows a graph account to set a default name + * @param _graphAccount Account that is setting its name + * @param _nameSystem Name system account already has ownership of a name in + * @param _nameIdentifier The unique identifier that is used to identify the name in the system + * @param _name The name being set as default + */ function setDefaultName( address _graphAccount, uint8 _nameSystem, @@ -45,52 +78,120 @@ interface IGNS { string calldata _name ) external; + /** + * @notice Allows a subgraph owner to update the metadata of a subgraph they have published + * @param _subgraphID Subgraph ID + * @param _subgraphMetadata IPFS hash for the subgraph metadata + */ function updateSubgraphMetadata(uint256 _subgraphID, bytes32 _subgraphMetadata) external; + /** + * @notice Publish a new subgraph. + * @param _subgraphDeploymentID Subgraph deployment for the subgraph + * @param _versionMetadata IPFS hash for the subgraph version metadata + * @param _subgraphMetadata IPFS hash for the subgraph metadata + */ function publishNewSubgraph( bytes32 _subgraphDeploymentID, bytes32 _versionMetadata, bytes32 _subgraphMetadata ) external; + /** + * @notice Publish a new version of an existing subgraph. + * @param _subgraphID Subgraph ID + * @param _subgraphDeploymentID Subgraph deployment ID of the new version + * @param _versionMetadata IPFS hash for the subgraph version metadata + */ function publishNewVersion( uint256 _subgraphID, bytes32 _subgraphDeploymentID, bytes32 _versionMetadata ) external; + /** + * @notice Deprecate a subgraph. The bonding curve is destroyed, the vSignal is burned, and the GNS + * contract holds the GRT from burning the vSignal, which all curators can withdraw manually. + * Can only be done by the subgraph owner. + * @param _subgraphID Subgraph ID + */ function deprecateSubgraph(uint256 _subgraphID) external; // -- Curation -- + /** + * @notice Deposit GRT into a subgraph and mint signal. + * @param _subgraphID Subgraph ID + * @param _tokensIn The amount of tokens the nameCurator wants to deposit + * @param _nSignalOutMin Expected minimum amount of name signal to receive + */ function mintSignal( uint256 _subgraphID, uint256 _tokensIn, uint256 _nSignalOutMin ) external; + /** + * @notice Burn signal for a subgraph and return the GRT. + * @param _subgraphID Subgraph ID + * @param _nSignal The amount of nSignal the nameCurator wants to burn + * @param _tokensOutMin Expected minimum amount of tokens to receive + */ function burnSignal( uint256 _subgraphID, uint256 _nSignal, uint256 _tokensOutMin ) external; + /** + * @notice Move subgraph signal from sender to `_recipient` + * @param _subgraphID Subgraph ID + * @param _recipient Address to send the signal to + * @param _amount The amount of nSignal to transfer + */ function transferSignal( uint256 _subgraphID, address _recipient, uint256 _amount ) external; + /** + * @notice Withdraw tokens from a deprecated subgraph. + * When the subgraph is deprecated, any curator can call this function and + * withdraw the GRT they are entitled for its original deposit + * @param _subgraphID Subgraph ID + */ function withdraw(uint256 _subgraphID) external; // -- Getters -- + /** + * @notice Return the owner of a subgraph. + * @param _tokenID Subgraph ID + * @return Owner address + */ function ownerOf(uint256 _tokenID) external view returns (address); + /** + * @notice Return the total signal on the subgraph. + * @param _subgraphID Subgraph ID + * @return Total signal on the subgraph + */ function subgraphSignal(uint256 _subgraphID) external view returns (uint256); + /** + * @notice Return the total tokens on the subgraph at current value. + * @param _subgraphID Subgraph ID + * @return Total tokens on the subgraph + */ function subgraphTokens(uint256 _subgraphID) external view returns (uint256); + /** + * @notice Calculate subgraph signal to be returned for an amount of tokens. + * @param _subgraphID Subgraph ID + * @param _tokensIn Tokens being exchanged for subgraph signal + * @return Amount of subgraph signal and curation tax + */ function tokensToNSignal(uint256 _subgraphID, uint256 _tokensIn) external view @@ -100,30 +201,70 @@ interface IGNS { uint256 ); + /** + * @notice Calculate tokens returned for an amount of subgraph signal. + * @param _subgraphID Subgraph ID + * @param _nSignalIn Subgraph signal being exchanged for tokens + * @return Amount of tokens returned for an amount of subgraph signal + */ function nSignalToTokens(uint256 _subgraphID, uint256 _nSignalIn) external view returns (uint256, uint256); + /** + * @notice Calculate subgraph signal to be returned for an amount of subgraph deployment signal. + * @param _subgraphID Subgraph ID + * @param _vSignalIn Amount of subgraph deployment signal to exchange for subgraph signal + * @return Amount of subgraph signal that can be bought + */ function vSignalToNSignal(uint256 _subgraphID, uint256 _vSignalIn) external view returns (uint256); + /** + * @notice Calculate subgraph deployment signal to be returned for an amount of subgraph signal. + * @param _subgraphID Subgraph ID + * @param _nSignalIn Subgraph signal being exchanged for subgraph deployment signal + * @return Amount of subgraph deployment signal that can be returned + */ function nSignalToVSignal(uint256 _subgraphID, uint256 _nSignalIn) external view returns (uint256); + /** + * @notice Get the amount of subgraph signal a curator has. + * @param _subgraphID Subgraph ID + * @param _curator Curator address + * @return Amount of subgraph signal owned by a curator + */ function getCuratorSignal(uint256 _subgraphID, address _curator) external view returns (uint256); + /** + * @notice Return whether a subgraph is published. + * @param _subgraphID Subgraph ID + * @return Return true if subgraph is currently published + */ function isPublished(uint256 _subgraphID) external view returns (bool); + /** + * @notice Return whether a subgraph is a legacy subgraph (created before subgraph NFTs). + * @param _subgraphID Subgraph ID + * @return Return true if subgraph is a legacy subgraph + */ function isLegacySubgraph(uint256 _subgraphID) external view returns (bool); + /** + * @notice Returns account and sequence ID for a legacy subgraph (created before subgraph NFTs). + * @param _subgraphID Subgraph ID + * @return account Account that created the subgraph (or 0 if it's not a legacy subgraph) + * @return seqID Sequence number for the subgraph + */ function getLegacySubgraphKey(uint256 _subgraphID) external view diff --git a/contracts/discovery/ISubgraphNFT.sol b/contracts/discovery/ISubgraphNFT.sol index bf0cb2bfe..4b0495a28 100644 --- a/contracts/discovery/ISubgraphNFT.sol +++ b/contracts/discovery/ISubgraphNFT.sol @@ -22,6 +22,4 @@ interface ISubgraphNFT is IERC721 { function setSubgraphMetadata(uint256 _tokenId, bytes32 _subgraphMetadata) external; function tokenURI(uint256 _tokenId) external view returns (string memory); - - function getSubgraphMetadata(uint256 _tokenId) external view returns (bytes32); } diff --git a/contracts/discovery/L1GNS.sol b/contracts/discovery/L1GNS.sol index c597ee4f0..1979bf5c9 100644 --- a/contracts/discovery/L1GNS.sol +++ b/contracts/discovery/L1GNS.sol @@ -3,8 +3,7 @@ pragma solidity ^0.7.6; pragma abicoder v2; -import { SafeMath } from "@openzeppelin/contracts/math/SafeMath.sol"; -import { Address } from "@openzeppelin/contracts/utils/Address.sol"; +import { SafeMathUpgradeable } from "@openzeppelin/contracts-upgradeable/math/SafeMathUpgradeable.sol"; import { GNS } from "./GNS.sol"; @@ -24,10 +23,13 @@ import { L1GNSV1Storage } from "./L1GNSStorage.sol"; * transaction. */ contract L1GNS is GNS, L1GNSV1Storage, L1ArbitrumMessenger { - using SafeMath for uint256; + using SafeMathUpgradeable for uint256; + /// @dev Emitted when a subgraph was locked as preparation to migrating it to L2 event SubgraphLockedForMigrationToL2(uint256 _subgraphID); + /// @dev Emitted when a subgraph was sent to L2 through the bridge event SubgraphSentToL2(uint256 _subgraphID); + /// @dev Emitted when the address of the Arbitrum Inbox was updated event ArbitrumInboxAddressUpdated(address _inbox); /** @@ -39,6 +41,17 @@ contract L1GNS is GNS, L1GNSV1Storage, L1ArbitrumMessenger { emit ArbitrumInboxAddressUpdated(_inbox); } + /** + * @notice Lock a subgraph for migration to L2. + * This will lock the subgraph's curator balance and prevent any further + * changes to the subgraph. + * WARNING: After calling this function, the subgraph owner has 255 blocks + * to call sendSubgraphToL2 to complete the migration; otherwise, the + * subgraph will have to be deprecated using deprecateLockedSubgraph, + * and the deployment to L2 will have to be manual (and curators will + * have to manually move the signal over too). + * @param _subgraphID Subgraph ID + */ function lockSubgraphForMigrationToL2(uint256 _subgraphID) external payable @@ -67,15 +80,20 @@ contract L1GNS is GNS, L1GNSV1Storage, L1ArbitrumMessenger { } /** - * @dev Send a subgraph's data and tokens to L2. + * @notice Send a subgraph's data and tokens to L2. * The subgraph must be locked using lockSubgraphForMigrationToL2 in a previous block - * (less than 256 blocks ago). + * (less than 255 blocks ago). + * Use the Arbitrum SDK to estimate the L2 retryable ticket parameters. + * @param _subgraphID Subgraph ID + * @param _maxGas Max gas to use for the L2 retryable ticket + * @param _gasPriceBid Gas price bid for the L2 retryable ticket + * @param _maxSubmissionCost Max submission cost for the L2 retryable ticket */ function sendSubgraphToL2( uint256 _subgraphID, - uint256 maxGas, - uint256 gasPriceBid, - uint256 maxSubmissionCost + uint256 _maxGas, + uint256 _gasPriceBid, + uint256 _maxSubmissionCost ) external payable notPartialPaused { SubgraphData storage subgraphData = _getSubgraphData(_subgraphID); SubgraphL2MigrationData storage migrationData = subgraphL2MigrationData[_subgraphID]; @@ -90,9 +108,9 @@ contract L1GNS is GNS, L1GNSV1Storage, L1ArbitrumMessenger { require(ownerOf(_subgraphID) == msg.sender, "GNS: Must be authorized"); migrationData.l1Done = true; - bytes memory extraData = encodeSubgraphDataForL2(_subgraphID, migrationData, subgraphData); + bytes memory extraData = _encodeSubgraphDataForL2(_subgraphID, migrationData, subgraphData); - bytes memory data = abi.encode(maxSubmissionCost, extraData); + bytes memory data = abi.encode(_maxSubmissionCost, extraData); IGraphToken grt = graphToken(); ITokenGateway gateway = ITokenGateway(_resolveContract(keccak256("GraphTokenGateway"))); grt.approve(address(gateway), migrationData.tokens); @@ -100,8 +118,8 @@ contract L1GNS is GNS, L1GNSV1Storage, L1ArbitrumMessenger { address(grt), counterpartGNSAddress, migrationData.tokens, - maxGas, - gasPriceBid, + _maxGas, + _gasPriceBid, data ); @@ -110,24 +128,11 @@ contract L1GNS is GNS, L1GNSV1Storage, L1ArbitrumMessenger { emit SubgraphSentToL2(_subgraphID); } - function encodeSubgraphDataForL2( - uint256 _subgraphID, - SubgraphL2MigrationData storage migrationData, - SubgraphData storage subgraphData - ) internal view returns (bytes memory) { - return - abi.encode( - _subgraphID, - ownerOf(_subgraphID), - blockhash(migrationData.lockedAtBlock), - subgraphData.nSignal - ); - } - /** - * @dev Deprecate a subgraph locked more than 256 blocks ago. + * @notice Deprecate a subgraph locked more than 256 blocks ago. * This allows curators to recover their funds if the subgraph was locked * for a migration to L2 but the subgraph was never actually sent to L2. + * @param _subgraphID Subgraph ID */ function deprecateLockedSubgraph(uint256 _subgraphID) external notPartialPaused { SubgraphData storage subgraphData = _getSubgraphData(_subgraphID); @@ -146,6 +151,18 @@ contract L1GNS is GNS, L1GNSV1Storage, L1ArbitrumMessenger { emit SubgraphDeprecated(_subgraphID, subgraphData.withdrawableGRT); } + /** + * @notice Claim the balance for a curator's signal in a subgraph that was + * migrated to L2, by sending a retryable ticket to the L2GNS. + * The balance will be claimed for a beneficiary address, as this method can be + * used by curators that use a contract address in L1 that may not exist in L2. + * @param _subgraphID Subgraph ID + * @param _beneficiary Address that will receive the tokens in L2 + * @param _maxGas Max gas to use for the L2 retryable ticket + * @param _gasPriceBid Gas price bid for the L2 retryable ticket + * @param _maxSubmissionCost Max submission cost for the L2 retryable ticket + * @return The sequence ID for the retryable ticket, as returned by the Arbitrum inbox. + */ function claimCuratorBalanceToBeneficiaryOnL2( uint256 _subgraphID, address _beneficiary, @@ -163,7 +180,7 @@ contract L1GNS is GNS, L1GNSV1Storage, L1ArbitrumMessenger { { // makes sure only sufficient ETH is supplied required for successful redemption on L2 // if a user does not desire immediate redemption they should provide - // a msg.value of AT LEAST maxSubmissionCost + // a msg.value of AT LEAST _maxSubmissionCost uint256 expectedEth = _maxSubmissionCost + (_maxGas * _gasPriceBid); require(msg.value >= expectedEth, "WRONG_ETH_VALUE"); } @@ -189,4 +206,25 @@ contract L1GNS is GNS, L1GNSV1Storage, L1ArbitrumMessenger { return abi.encode(seqNum); } + + /** + * @dev Encodes the subgraph data as callhook parameters + * for the L2 migration. + * @param _subgraphID Subgraph ID + * @param _migrationData Subgraph L2 migration data + * @param _subgraphData Subgraph data + */ + function _encodeSubgraphDataForL2( + uint256 _subgraphID, + SubgraphL2MigrationData storage _migrationData, + SubgraphData storage _subgraphData + ) internal view returns (bytes memory) { + return + abi.encode( + _subgraphID, + ownerOf(_subgraphID), + blockhash(_migrationData.lockedAtBlock), + _subgraphData.nSignal + ); + } } diff --git a/contracts/discovery/L1GNSStorage.sol b/contracts/discovery/L1GNSStorage.sol index e591e2c81..c1d14e0b5 100644 --- a/contracts/discovery/L1GNSStorage.sol +++ b/contracts/discovery/L1GNSStorage.sol @@ -6,7 +6,8 @@ pragma abicoder v2; /** * @title L1GNSV1Storage * @notice This contract holds all the L1-specific storage variables for the L1GNS contract, version 1 - * @dev + * @dev When adding new versions, make sure to move the gap to the new version and + * reduce the size of the gap accordingly. */ abstract contract L1GNSV1Storage { /// Address of the Arbitrum DelayedInbox diff --git a/contracts/discovery/SubgraphNFT.sol b/contracts/discovery/SubgraphNFT.sol index 307b089c7..c6dadaa81 100644 --- a/contracts/discovery/SubgraphNFT.sol +++ b/contracts/discovery/SubgraphNFT.sol @@ -164,15 +164,4 @@ contract SubgraphNFT is Governed, ERC721, ISubgraphNFT { // If there is a baseURI but no tokenURI, concatenate the tokenID to the baseURI. return string(abi.encodePacked(base, HexStrings.toString(_tokenId))); } - - /** - * @notice Get the metadata for a subgraph represented by `_tokenId`. - * @dev `_tokenId` must exist. - * @param _tokenId ID of the NFT - * @return IPFS hash for the metadata - */ - function getSubgraphMetadata(uint256 _tokenId) external view override returns (bytes32) { - require(_exists(_tokenId), "ERC721Metadata: metadata query of nonexistent token"); - return _subgraphMetadataHashes[_tokenId]; - } } diff --git a/contracts/l2/curation/L2Curation.sol b/contracts/l2/curation/L2Curation.sol index 418eb1424..9d39d82f6 100644 --- a/contracts/l2/curation/L2Curation.sol +++ b/contracts/l2/curation/L2Curation.sol @@ -3,9 +3,9 @@ pragma solidity ^0.7.6; pragma abicoder v2; -import { Address } from "@openzeppelin/contracts/utils/Address.sol"; -import { SafeMath } from "@openzeppelin/contracts/math/SafeMath.sol"; -import { Clones } from "@openzeppelin/contracts/proxy/Clones.sol"; +import { AddressUpgradeable } from "@openzeppelin/contracts-upgradeable/utils/AddressUpgradeable.sol"; +import { SafeMathUpgradeable } from "@openzeppelin/contracts-upgradeable/math/SafeMathUpgradeable.sol"; +import { ClonesUpgradeable } from "@openzeppelin/contracts-upgradeable/proxy/ClonesUpgradeable.sol"; import { BancorFormula } from "../../bancor/BancorFormula.sol"; import { GraphUpgradeable } from "../../upgrades/GraphUpgradeable.sol"; @@ -13,7 +13,7 @@ import { TokenUtils } from "../../utils/TokenUtils.sol"; import { IRewardsManager } from "../../rewards/IRewardsManager.sol"; import { Managed } from "../../governance/Managed.sol"; import { IGraphToken } from "../../token/IGraphToken.sol"; -import { CurationV1Storage } from "../../curation/CurationStorage.sol"; +import { CurationV2Storage } from "../../curation/CurationStorage.sol"; import { ICuration } from "../../curation/ICuration.sol"; import { IGraphCurationToken } from "../../curation/IGraphCurationToken.sol"; import { GraphCurationToken } from "../../curation/GraphCurationToken.sol"; @@ -31,8 +31,8 @@ import { IL2Curation } from "./IL2Curation.sol"; * Holders can burn GCS using this contract to get GRT tokens back according to the * bonding curve. */ -contract L2Curation is CurationV1Storage, GraphUpgradeable, IL2Curation { - using SafeMath for uint256; +contract L2Curation is CurationV2Storage, GraphUpgradeable, IL2Curation { + using SafeMathUpgradeable for uint256; /// @dev 100% in parts per million uint32 private constant MAX_PPM = 1000000; @@ -95,7 +95,7 @@ contract L2Curation is CurationV1Storage, GraphUpgradeable, IL2Curation { address _curationTokenMaster, uint32 _curationTaxPercentage, uint256 _minimumCurationDeposit - ) external onlyImpl { + ) external onlyImpl initializer { Managed._initialize(_controller); // For backwards compatibility: @@ -198,7 +198,9 @@ contract L2Curation is CurationV1Storage, GraphUpgradeable, IL2Curation { // If no signal token for the pool - create one if (address(curationPool.gcs) == address(0)) { // Use a minimal proxy to reduce gas cost - IGraphCurationToken gcs = IGraphCurationToken(Clones.clone(curationTokenMaster)); + IGraphCurationToken gcs = IGraphCurationToken( + ClonesUpgradeable.clone(curationTokenMaster) + ); gcs.initialize(address(this)); curationPool.gcs = gcs; } @@ -258,7 +260,9 @@ contract L2Curation is CurationV1Storage, GraphUpgradeable, IL2Curation { // If no signal token for the pool - create one if (address(curationPool.gcs) == address(0)) { // Use a minimal proxy to reduce gas cost - IGraphCurationToken gcs = IGraphCurationToken(Clones.clone(curationTokenMaster)); + IGraphCurationToken gcs = IGraphCurationToken( + ClonesUpgradeable.clone(curationTokenMaster) + ); gcs.initialize(address(this)); curationPool.gcs = gcs; } @@ -491,7 +495,10 @@ contract L2Curation is CurationV1Storage, GraphUpgradeable, IL2Curation { */ function _setCurationTokenMaster(address _curationTokenMaster) private { require(_curationTokenMaster != address(0), "Token master must be non-empty"); - require(Address.isContract(_curationTokenMaster), "Token master must be a contract"); + require( + AddressUpgradeable.isContract(_curationTokenMaster), + "Token master must be a contract" + ); curationTokenMaster = _curationTokenMaster; emit ParameterUpdated("curationTokenMaster"); diff --git a/contracts/l2/discovery/L2GNS.sol b/contracts/l2/discovery/L2GNS.sol index 3ac1aadf3..4a7c65540 100644 --- a/contracts/l2/discovery/L2GNS.sol +++ b/contracts/l2/discovery/L2GNS.sol @@ -3,8 +3,7 @@ pragma solidity ^0.7.6; pragma abicoder v2; -import { SafeMath } from "@openzeppelin/contracts/math/SafeMath.sol"; -import { Address } from "@openzeppelin/contracts/utils/Address.sol"; +import { SafeMathUpgradeable } from "@openzeppelin/contracts-upgradeable/math/SafeMathUpgradeable.sol"; import { AddressAliasHelper } from "../../arbitrum/AddressAliasHelper.sol"; import { GNS } from "../../discovery/GNS.sol"; @@ -30,18 +29,22 @@ import { IL2Curation } from "../curation/IL2Curation.sol"; contract L2GNS is GNS, L2GNSV1Storage, IL2GNS { using RLPReader for bytes; using RLPReader for RLPReader.RLPItem; - using SafeMath for uint256; + using SafeMathUpgradeable for uint256; - /// Emitted when a subgraph is received from L1 through the bridge + /// @dev Emitted when a subgraph is received from L1 through the bridge event SubgraphReceivedFromL1(uint256 _subgraphID); + /// @dev Emitted when a subgraph migration from L1 is finalized, so the subgraph is published event SubgraphMigrationFinalized(uint256 _subgraphID); + /// @dev Emitted when the L1 balance for a curator has been claimed event CuratorBalanceClaimed( uint256 _subgraphID, address _l1Curator, address _l2Curator, uint256 _nSignalClaimed ); + /// @dev Emitted when claiming L1 balances using MPT proofs is enabled event MPTClaimingEnabled(); + /// @dev Emitted when claiming L1 balances using MPT proofs is disabled event MPTClaimingDisabled(); /** diff --git a/contracts/libraries/MerklePatriciaProofVerifier.sol b/contracts/libraries/MerklePatriciaProofVerifier.sol index 18c8c5f2d..d9bf28a79 100644 --- a/contracts/libraries/MerklePatriciaProofVerifier.sol +++ b/contracts/libraries/MerklePatriciaProofVerifier.sol @@ -22,6 +22,11 @@ pragma solidity 0.7.6; import { RLPReader } from "./RLPReader.sol"; +/** + * @title MerklePatriciaProofVerifier + * @notice This contract verifies proofs of inclusion or exclusion + * for Merkle Patricia tries. + */ library MerklePatriciaProofVerifier { using RLPReader for RLPReader.RLPItem; using RLPReader for bytes; @@ -200,6 +205,11 @@ library MerklePatriciaProofVerifier { } } + /** + * @dev Checks if an RLP item corresponds to an empty byte sequence, encoded as 0x80. + * @param item The RLP item to be checked. + * @return True if the item is an empty byte string, false otherwise. + */ function _isEmptyByteSequence(RLPReader.RLPItem memory item) private pure returns (bool) { if (item.len != 1) { return false; @@ -213,6 +223,13 @@ library MerklePatriciaProofVerifier { return b == 0x80; /* empty byte string */ } + /** + * @dev Decode a compact-encoded Merkle-Patricia proof node, + * which must be a leaf or extension node + * @param compact The compact-encoded node + * @return isLeaf True if the node is a leaf node, false if it is an extension node. + * @return nibbles The decoded path of the node split into nibbles. + */ function _merklePatriciaCompactDecode(bytes memory compact) private pure @@ -240,6 +257,12 @@ library MerklePatriciaProofVerifier { return (isLeaf, _decodeNibbles(compact, skipNibbles)); } + /** + * @dev Decode the nibbles of a compact-encoded Merkle-Patricia proof node. + * @param compact The compact-encoded node + * @param skipNibbles The number of nibbles to skip at the beginning of the node. + * @return nibbles The decoded path of the node split into nibbles. + */ function _decodeNibbles(bytes memory compact, uint256 skipNibbles) private pure @@ -266,6 +289,14 @@ library MerklePatriciaProofVerifier { assert(nibblesLength == nibbles.length); } + /** + * @dev Compute the length of the shared prefix between two byte sequences. + * This will be the count of how many bytes (representing path nibbles) are the same at the beginning of the sequences. + * @param xsOffset The offset to skip on the first sequence + * @param xs The first sequence + * @param ys The second sequence + * @return The length of the shared prefix. + */ function _sharedPrefixLength( uint256 xsOffset, bytes memory xs, diff --git a/contracts/libraries/RLPReader.sol b/contracts/libraries/RLPReader.sol index 645776969..ab4bf33da 100644 --- a/contracts/libraries/RLPReader.sol +++ b/contracts/libraries/RLPReader.sol @@ -10,33 +10,46 @@ * MODIFIED from hamdiallam's implementation: * - Explicitly marked visibility of constants * - Silenced linter warnings about inline assembly + * - Other minor QA improvements */ -/* +/** * @author Hamdi Allam hamdi.allam97@gmail.com * Please reach out with any questions or concerns */ pragma solidity >=0.5.0 <0.9.0; // solhint-disable no-inline-assembly + +/** + * @title RLPReader library + * @notice This library is used to decode RLP-encoded lists and strings + */ library RLPReader { + /// Minimum value that encodes a short string uint8 public constant STRING_SHORT_START = 0x80; + /// Minimum value that encodes a long string uint8 public constant STRING_LONG_START = 0xb8; + /// Minimum value that encodes a short list uint8 public constant LIST_SHORT_START = 0xc0; + /// Minimum value that encodes a long list uint8 public constant LIST_LONG_START = 0xf8; + /// Size of each EVM word, used to copy strings in less steps uint8 public constant WORD_SIZE = 32; + /// @dev Structure to represent an RLP-encoded item struct RLPItem { uint256 len; uint256 memPtr; } + /// @dev Iterator structure to iterate over an RLP-encoded list struct Iterator { RLPItem item; // Item that's being iterated over. uint256 nextPtr; // Position of the next item in the list. } - /* + /** * @dev Returns the next element in the iteration. Reverts if it has not next element. * @param self The iterator. * @return The next element in the iteration. @@ -51,18 +64,20 @@ library RLPReader { return RLPItem(itemLength, ptr); } - /* + /** * @dev Returns true if the iteration has more elements. * @param self The iterator. - * @return true if the iteration has more elements. + * @return True if the iteration has more elements, false otherwise. */ function hasNext(Iterator memory self) internal pure returns (bool) { RLPItem memory item = self.item; return self.nextPtr < item.memPtr + item.len; } - /* + /** + * @dev Concerts an RLP-encoded bytes array to an RLPItem. * @param item RLP encoded bytes + * @return The RLP item with a pointer to the payload within the bytes array */ function toRlpItem(bytes memory item) internal pure returns (RLPItem memory) { uint256 memPtr; @@ -73,7 +88,7 @@ library RLPReader { return RLPItem(item.length, memPtr); } - /* + /** * @dev Create an iterator. Reverts if item is not a list. * @param self The RLP item. * @return An 'Iterator' over the item. @@ -85,16 +100,20 @@ library RLPReader { return Iterator(self, ptr); } - /* - * @param the RLP item. + /** + * @dev Returns the length of an RLP item + * @param item The RLP item. + * @return The length of the RLP item. */ function rlpLen(RLPItem memory item) internal pure returns (uint256) { return item.len; } - /* - * @param the RLP item. - * @return (memPtr, len) pair: location of the item's payload in memory. + /** + * @dev Returns the location of the payload of an RLP item + * @param item The RLP item. + * @return Pointer to the payload within the RLP encoded bytes in memory + * @return Length of the payload */ function payloadLocation(RLPItem memory item) internal pure returns (uint256, uint256) { uint256 offset = _payloadOffset(item.memPtr); @@ -103,16 +122,20 @@ library RLPReader { return (memPtr, len); } - /* - * @param the RLP item. + /** + * @dev Returns the size of the payload in an RLP encoded item + * @param item The RLP item. + * @return The size of the payload */ function payloadLen(RLPItem memory item) internal pure returns (uint256) { (, uint256 len) = payloadLocation(item); return len; } - /* - * @param the RLP item containing the encoded list. + /** + * @dev Decode an RLP item that represents a list into an array of RLP items + * @param item The RLP item containing the encoded list. + * @return The list of RLP-encoded items contained by the input item */ function toList(RLPItem memory item) internal pure returns (RLPItem[] memory) { require(isList(item), "RLP: not list (toList)"); @@ -131,7 +154,11 @@ library RLPReader { return result; } - // @return indicator whether encoded payload is a list. negate this function call for isData. + /** + * @dev Check if an RLP item is a list + * @param item The RLP item. + * @return True if the item is a list, false otherwise (in which case it's a string / raw data) + */ function isList(RLPItem memory item) internal pure returns (bool) { if (item.len == 0) return false; @@ -145,9 +172,10 @@ library RLPReader { return true; } - /* + /** * @dev A cheaper version of keccak256(toRlpBytes(item)) that avoids copying memory. - * @return keccak256 hash of RLP encoded bytes. + * @param item The RLP item. + * @return The keccak256 hash of RLP encoded bytes. */ function rlpBytesKeccak256(RLPItem memory item) internal pure returns (bytes32) { uint256 ptr = item.memPtr; @@ -159,9 +187,10 @@ library RLPReader { return result; } - /* + /** * @dev A cheaper version of keccak256(toBytes(item)) that avoids copying memory. - * @return keccak256 hash of the item payload. + * @param item The RLP item. + * @return The keccak256 hash of the item's payload. */ function payloadKeccak256(RLPItem memory item) internal pure returns (bytes32) { (uint256 memPtr, uint256 len) = payloadLocation(item); @@ -172,9 +201,13 @@ library RLPReader { return result; } - /** RLPItem conversions into data types **/ + /* RLPItem conversions into data types */ - // @returns raw rlp encoding in bytes + /** + * @dev Get the raw RLP encoded item in bytes + * @param item The RLP item. + * @return Raw RLP encoded item. + */ function toRlpBytes(RLPItem memory item) internal pure returns (bytes memory) { bytes memory result = new bytes(item.len); if (result.length == 0) return result; @@ -188,7 +221,12 @@ library RLPReader { return result; } - // any non-zero byte except "0x80" is considered true + /** + * @dev Interpret an RLP encoded item as a boolean. + * Any non-zero byte except "0x80" is considered true. + * @param item The RLP item. + * @return The boolean value. + */ function toBoolean(RLPItem memory item) internal pure returns (bool) { require(item.len == 1, "RLP: invalid boolean length"); uint256 result; @@ -208,6 +246,11 @@ library RLPReader { } } + /** + * @dev Interpret an RLP encoded item as an address. + * @param item The RLP item. + * @return The address value. + */ function toAddress(RLPItem memory item) internal pure returns (address) { // 1 byte for the length prefix require(item.len == 21, "RLP: invalid addr length"); @@ -215,6 +258,11 @@ library RLPReader { return address(uint160(toUint(item))); } + /** + * @dev Interpret an RLP encoded item as a uint. + * @param item The RLP item. + * @return The uint value. + */ function toUint(RLPItem memory item) internal pure returns (uint256) { require(item.len > 0 && item.len <= 33, "RLP: invalid uint length"); @@ -233,7 +281,12 @@ library RLPReader { return result; } - // enforces 32 byte length + /** + * @dev Interpret an RLP encoded item as an uint, ensuring the payload + * is 32 bytes in size. + * @param item The RLP item. + * @return The uint256 value. + */ function toUintStrict(RLPItem memory item) internal pure returns (uint256) { // one byte prefix require(item.len == 33, "RLP: invalid uint strict length"); @@ -247,6 +300,11 @@ library RLPReader { return result; } + /** + * @dev Interpret an RLP encoded item as a bytes array. + * @param item The RLP item. + * @return The bytes value. + */ function toBytes(RLPItem memory item) internal pure returns (bytes memory) { require(item.len > 0, "RLP: invalid zero length bytes"); @@ -266,7 +324,11 @@ library RLPReader { * Private Helpers */ - // @return number of payload items inside an encoded list. + /** + * @dev Get the number of items in an RLP encoded list. + * @param item The RLP item. + * @return The number of items in the list. + */ function numItems(RLPItem memory item) private pure returns (uint256) { if (item.len == 0) return 0; @@ -281,7 +343,11 @@ library RLPReader { return count; } - // @return entire rlp item byte length + /** + * @dev Get the entire length of an RLP item. + * @param memPtr The memory pointer to the start of the item. + * @return The length of the item. + */ function _itemLength(uint256 memPtr) private pure returns (uint256) { uint256 itemLen; uint256 byte0; @@ -315,7 +381,11 @@ library RLPReader { return itemLen; } - // @return number of bytes until the data + /** + * @dev Get the location of the payload for an RLP item. + * @param memPtr The memory pointer to the start of the item. + * @return The offset of the payload from the start of the item + */ function _payloadOffset(uint256 memPtr) private pure returns (uint256) { uint256 byte0; assembly { @@ -332,7 +402,8 @@ library RLPReader { else return byte0 - (LIST_LONG_START - 1) + 1; } - /* + /** + * @dev Copy two areas of memory * @param src Pointer to source * @param dest Pointer to destination * @param len Amount of memory to copy from the source diff --git a/contracts/libraries/StateProofVerifier.sol b/contracts/libraries/StateProofVerifier.sol index bcc5c0783..edcc7d35c 100644 --- a/contracts/libraries/StateProofVerifier.sol +++ b/contracts/libraries/StateProofVerifier.sol @@ -9,6 +9,7 @@ * - Using local copy of the RLPReader library instead of using the package * - Explicitly marked visibility of constants * - Added revert messages + * - A few other QA improvements, e.g. NatSpec */ pragma solidity 0.7.6; @@ -23,8 +24,11 @@ library StateProofVerifier { using RLPReader for RLPReader.RLPItem; using RLPReader for bytes; + /// Index within a block header for the state root hash uint256 public constant HEADER_STATE_ROOT_INDEX = 3; + /// Index within a block header for the block number uint256 public constant HEADER_NUMBER_INDEX = 8; + /// Index within a block header for the timestamp uint256 public constant HEADER_TIMESTAMP_INDEX = 11; struct BlockHeader { @@ -50,6 +54,7 @@ library StateProofVerifier { /** * @notice Parses block header and verifies its presence onchain within the latest 256 blocks. * @param _headerRlpBytes RLP-encoded block header. + * @return The block header as a BlockHeader struct. */ function verifyBlockHeader(bytes memory _headerRlpBytes) internal @@ -65,6 +70,7 @@ library StateProofVerifier { /** * @notice Parses RLP-encoded block header. * @param _headerRlpBytes RLP-encoded block header. + * @return The block header as a BlockHeader struct. */ function parseBlockHeader(bytes memory _headerRlpBytes) internal @@ -85,10 +91,11 @@ library StateProofVerifier { } /** - * @notice Verifies Merkle Patricia proof of an account and extracts the account fields. - * + * @dev Verifies Merkle Patricia proof of an account and extracts the account fields. * @param _addressHash Keccak256 hash of the address corresponding to the account. * @param _stateRootHash MPT root hash of the Ethereum state trie. + * @param _proof RLP-encoded Merkle Patricia proof for the account. + * @return The account as an Account struct, if the proof shows it exists, or an empty struct otherwise. */ function extractAccountFromProof( bytes32 _addressHash, // keccak256(abi.encodePacked(address)) @@ -120,10 +127,11 @@ library StateProofVerifier { } /** - * @notice Verifies Merkle Patricia proof of a slot and extracts the slot's value. - * + * @dev Verifies Merkle Patricia proof of a slot and extracts the slot's value. * @param _slotHash Keccak256 hash of the slot position. * @param _storageRootHash MPT root hash of the account's storage trie. + * @param _proof RLP-encoded Merkle Patricia proof for the slot. + * @return The slot's value as a SlotValue struct, if the proof shows it exists, or an empty struct otherwise. */ function extractSlotValueFromProof( bytes32 _slotHash, diff --git a/contracts/tests/LegacyGNSMock.sol b/contracts/tests/LegacyGNSMock.sol index a256d3c39..f8878db8a 100644 --- a/contracts/tests/LegacyGNSMock.sol +++ b/contracts/tests/LegacyGNSMock.sol @@ -8,8 +8,14 @@ import { IGNS } from "../discovery/IGNS.sol"; /** * @title LegacyGNSMock contract + * @dev This is used to test the migration of legacy subgraphs (both to NFT-based subgraphs and to L2) */ contract LegacyGNSMock is L1GNS { + /** + * @notice Create a mock legacy subgraph (owned by the msg.sender) + * @param subgraphNumber Number of the subgraph (sequence ID for the account) + * @param subgraphDeploymentID Subgraph deployment ID + */ function createLegacySubgraph(uint256 subgraphNumber, bytes32 subgraphDeploymentID) external { SubgraphData storage subgraphData = legacySubgraphData[msg.sender][subgraphNumber]; legacySubgraphs[msg.sender][subgraphNumber] = subgraphDeploymentID; @@ -17,6 +23,11 @@ contract LegacyGNSMock is L1GNS { subgraphData.nSignal = 1000; // Mock value } + /** + * @notice Get the subgraph deployment ID for a subgraph + * @param subgraphID Subgraph ID + * @return subgraphDeploymentID Subgraph deployment ID + */ function getSubgraphDeploymentID(uint256 subgraphID) external view @@ -26,6 +37,11 @@ contract LegacyGNSMock is L1GNS { subgraphDeploymentID = subgraph.subgraphDeploymentID; } + /** + * @notice Get the nSignal for a subgraph + * @param subgraphID Subgraph ID + * @return nSignal The subgraph's nSignal + */ function getSubgraphNSignal(uint256 subgraphID) external view returns (uint256 nSignal) { IGNS.SubgraphData storage subgraph = _getSubgraphData(subgraphID); nSignal = subgraph.nSignal; diff --git a/contracts/tests/MerklePatriciaProofVerifierMock.sol b/contracts/tests/MerklePatriciaProofVerifierMock.sol index e1a4bbdec..f8b958022 100644 --- a/contracts/tests/MerklePatriciaProofVerifierMock.sol +++ b/contracts/tests/MerklePatriciaProofVerifierMock.sol @@ -14,12 +14,19 @@ contract MerklePatriciaProofVerifierMock { using RLPReader for RLPReader.RLPItem; using RLPReader for bytes; + /** + * @notice Extract the proof value from a Merkle-Patricia proof + * @param _rootHash Root hash of the Merkle-Patricia tree + * @param _path Path for which the proof should prove inclusion or exclusion + * @param _proofRlpBytes Merkle-Patricia proof of inclusion or exclusion, as an RLP-encoded list + * @return The value for the given path, if it exists, or an empty bytes if it's a valid proof of exclusion + */ function extractProofValue( - bytes32 rootHash, - bytes memory path, + bytes32 _rootHash, + bytes memory _path, bytes memory _proofRlpBytes ) external pure returns (bytes memory) { RLPReader.RLPItem[] memory stack = _proofRlpBytes.toRlpItem().toList(); - return MerklePatriciaProofVerifier.extractProofValue(rootHash, path, stack); + return MerklePatriciaProofVerifier.extractProofValue(_rootHash, _path, stack); } } diff --git a/contracts/tests/RLPReaderMock.sol b/contracts/tests/RLPReaderMock.sol deleted file mode 100644 index e69de29bb..000000000 diff --git a/contracts/tests/StateProofVerifierMock.sol b/contracts/tests/StateProofVerifierMock.sol deleted file mode 100644 index e69de29bb..000000000 From 9df2a024ccf380aa9e60f24de21c92570da0ccd8 Mon Sep 17 00:00:00 2001 From: Pablo Carranza Velez Date: Thu, 10 Nov 2022 20:06:19 -0300 Subject: [PATCH 051/112] fix: gas optimization: > 0 is != 0 --- contracts/curation/Curation.sol | 12 ++++++------ contracts/discovery/GNS.sol | 10 +++++----- contracts/discovery/L1GNS.sol | 6 +++--- contracts/l2/curation/L2Curation.sol | 12 ++++++------ contracts/l2/discovery/L2GNS.sol | 2 +- contracts/libraries/MerklePatriciaProofVerifier.sol | 2 +- contracts/libraries/RLPReader.sol | 6 +++--- 7 files changed, 25 insertions(+), 25 deletions(-) diff --git a/contracts/curation/Curation.sol b/contracts/curation/Curation.sol index 7f1dc50e3..a51c95902 100644 --- a/contracts/curation/Curation.sol +++ b/contracts/curation/Curation.sol @@ -169,7 +169,7 @@ contract Curation is CurationV2Storage, GraphUpgradeable { uint256 _signalOutMin ) external override notPartialPaused returns (uint256, uint256) { // Need to deposit some funds - require(_tokensIn > 0, "Cannot deposit zero tokens"); + require(_tokensIn != 0, "Cannot deposit zero tokens"); // Exchange GRT tokens for GCS of the subgraph pool (uint256 signalOut, uint256 curationTax) = tokensToSignal(_subgraphDeploymentID, _tokensIn); @@ -231,7 +231,7 @@ contract Curation is CurationV2Storage, GraphUpgradeable { address curator = msg.sender; // Validations - require(_signalIn > 0, "Cannot burn zero signal"); + require(_signalIn != 0, "Cannot burn zero signal"); require( getCuratorSignal(curator, _subgraphDeploymentID) >= _signalIn, "Cannot burn more signal than you own" @@ -297,7 +297,7 @@ contract Curation is CurationV2Storage, GraphUpgradeable { * @return True if curated */ function isCurated(bytes32 _subgraphDeploymentID) public view override returns (bool) { - return _pools[_subgraphDeploymentID].tokens > 0; + return _pools[_subgraphDeploymentID].tokens != 0; } /** @@ -404,7 +404,7 @@ contract Curation is CurationV2Storage, GraphUpgradeable { CurationPool memory curationPool = _pools[_subgraphDeploymentID]; uint256 curationPoolSignal = getCurationPoolSignal(_subgraphDeploymentID); require( - curationPool.tokens > 0, + curationPool.tokens != 0, "Subgraph deployment must be curated to perform calculations" ); require( @@ -428,7 +428,7 @@ contract Curation is CurationV2Storage, GraphUpgradeable { */ function _setDefaultReserveRatio(uint32 _defaultReserveRatio) private { // Reserve Ratio must be within 0% to 100% (inclusive, in PPM) - require(_defaultReserveRatio > 0, "Default reserve ratio must be > 0"); + require(_defaultReserveRatio != 0, "Default reserve ratio must be > 0"); require( _defaultReserveRatio <= MAX_PPM, "Default reserve ratio cannot be higher than MAX_PPM" @@ -444,7 +444,7 @@ contract Curation is CurationV2Storage, GraphUpgradeable { * @param _minimumCurationDeposit Minimum amount of tokens required deposit */ function _setMinimumCurationDeposit(uint256 _minimumCurationDeposit) private { - require(_minimumCurationDeposit > 0, "Minimum curation deposit cannot be 0"); + require(_minimumCurationDeposit != 0, "Minimum curation deposit cannot be 0"); minimumCurationDeposit = _minimumCurationDeposit; emit ParameterUpdated("minimumCurationDeposit"); diff --git a/contracts/discovery/GNS.sol b/contracts/discovery/GNS.sol index 7a8e71bdc..07355e35a 100644 --- a/contracts/discovery/GNS.sol +++ b/contracts/discovery/GNS.sol @@ -315,7 +315,7 @@ contract GNS is GNSV3Storage, GraphUpgradeable, IGNS, Multicall { // Move all signal from previous version to new version // NOTE: We will only do this as long as there is signal on the subgraph - if (subgraphData.nSignal > 0) { + if (subgraphData.nSignal != 0) { // Burn all version signal in the name pool for tokens (w/no slippage protection) // Sell all signal from the old deployment uint256 tokens = curation.burn( @@ -367,7 +367,7 @@ contract GNS is GNSV3Storage, GraphUpgradeable, IGNS, Multicall { SubgraphData storage subgraphData = _getSubgraphOrRevert(_subgraphID); // Burn signal only if it has any available - if (subgraphData.nSignal > 0) { + if (subgraphData.nSignal != 0) { subgraphData.withdrawableGRT = curation().burn( subgraphData.subgraphDeploymentID, subgraphData.vSignal, @@ -498,12 +498,12 @@ contract GNS is GNSV3Storage, GraphUpgradeable, IGNS, Multicall { // Subgraph validations SubgraphData storage subgraphData = _getSubgraphData(_subgraphID); require(subgraphData.disabled == true, "GNS: Must be disabled first"); - require(subgraphData.withdrawableGRT > 0, "GNS: No more GRT to withdraw"); + require(subgraphData.withdrawableGRT != 0, "GNS: No more GRT to withdraw"); // Curator validations address curator = msg.sender; uint256 curatorNSignal = subgraphData.curatorNSignal[curator]; - require(curatorNSignal > 0, "GNS: No signal to withdraw GRT"); + require(curatorNSignal != 0, "GNS: No signal to withdraw GRT"); // Get curator share of tokens to be withdrawn uint256 tokensOut = curatorNSignal.mul(subgraphData.withdrawableGRT).div( @@ -576,7 +576,7 @@ contract GNS is GNSV3Storage, GraphUpgradeable, IGNS, Multicall { */ function subgraphTokens(uint256 _subgraphID) external view override returns (uint256) { uint256 signal = _getSubgraphData(_subgraphID).nSignal; - if (signal > 0) { + if (signal != 0) { (, uint256 tokens) = nSignalToTokens(_subgraphID, signal); return tokens; } diff --git a/contracts/discovery/L1GNS.sol b/contracts/discovery/L1GNS.sol index 1979bf5c9..e8d662029 100644 --- a/contracts/discovery/L1GNS.sol +++ b/contracts/discovery/L1GNS.sol @@ -98,7 +98,7 @@ contract L1GNS is GNS, L1GNSV1Storage, L1ArbitrumMessenger { SubgraphData storage subgraphData = _getSubgraphData(_subgraphID); SubgraphL2MigrationData storage migrationData = subgraphL2MigrationData[_subgraphID]; require( - migrationData.lockedAtBlock > 0 && migrationData.lockedAtBlock < block.number, + migrationData.lockedAtBlock != 0 && migrationData.lockedAtBlock < block.number, "!LOCKED" ); require(migrationData.lockedAtBlock.add(255) >= block.number, "TOO_LATE"); @@ -137,7 +137,7 @@ contract L1GNS is GNS, L1GNSV1Storage, L1ArbitrumMessenger { function deprecateLockedSubgraph(uint256 _subgraphID) external notPartialPaused { SubgraphData storage subgraphData = _getSubgraphData(_subgraphID); SubgraphL2MigrationData storage migrationData = subgraphL2MigrationData[_subgraphID]; - require(migrationData.lockedAtBlock > 0, "!LOCKED"); + require(migrationData.lockedAtBlock != 0, "!LOCKED"); require(migrationData.lockedAtBlock.add(256) < block.number, "TOO_EARLY"); require(!migrationData.l1Done, "ALREADY_DONE"); migrationData.l1Done = true; @@ -175,7 +175,7 @@ contract L1GNS is GNS, L1GNSV1Storage, L1ArbitrumMessenger { require(migrationData.l1Done, "!MIGRATED"); require(!migrationData.deprecated, "SUBGRAPH_DEPRECATED"); - require(_maxSubmissionCost > 0, "NO_SUBMISSION_COST"); + require(_maxSubmissionCost != 0, "NO_SUBMISSION_COST"); { // makes sure only sufficient ETH is supplied required for successful redemption on L2 diff --git a/contracts/l2/curation/L2Curation.sol b/contracts/l2/curation/L2Curation.sol index 9d39d82f6..f5735f1f0 100644 --- a/contracts/l2/curation/L2Curation.sol +++ b/contracts/l2/curation/L2Curation.sol @@ -179,7 +179,7 @@ contract L2Curation is CurationV2Storage, GraphUpgradeable, IL2Curation { uint256 _signalOutMin ) external override notPartialPaused returns (uint256, uint256) { // Need to deposit some funds - require(_tokensIn > 0, "Cannot deposit zero tokens"); + require(_tokensIn != 0, "Cannot deposit zero tokens"); // Exchange GRT tokens for GCS of the subgraph pool (uint256 signalOut, uint256 curationTax) = tokensToSignal(_subgraphDeploymentID, _tokensIn); @@ -241,7 +241,7 @@ contract L2Curation is CurationV2Storage, GraphUpgradeable, IL2Curation { uint256 _signalOutMin ) external override notPartialPaused onlyGNS returns (uint256) { // Need to deposit some funds - require(_tokensIn > 0, "Cannot deposit zero tokens"); + require(_tokensIn != 0, "Cannot deposit zero tokens"); // Exchange GRT tokens for GCS of the subgraph pool (no tax) uint256 signalOut = _tokensToSignal(_subgraphDeploymentID, _tokensIn); @@ -302,7 +302,7 @@ contract L2Curation is CurationV2Storage, GraphUpgradeable, IL2Curation { address curator = msg.sender; // Validations - require(_signalIn > 0, "Cannot burn zero signal"); + require(_signalIn != 0, "Cannot burn zero signal"); require( getCuratorSignal(curator, _subgraphDeploymentID) >= _signalIn, "Cannot burn more signal than you own" @@ -369,7 +369,7 @@ contract L2Curation is CurationV2Storage, GraphUpgradeable, IL2Curation { * @return True if curated */ function isCurated(bytes32 _subgraphDeploymentID) public view override returns (bool) { - return _pools[_subgraphDeploymentID].tokens > 0; + return _pools[_subgraphDeploymentID].tokens != 0; } /** @@ -452,7 +452,7 @@ contract L2Curation is CurationV2Storage, GraphUpgradeable, IL2Curation { CurationPool memory curationPool = _pools[_subgraphDeploymentID]; uint256 curationPoolSignal = getCurationPoolSignal(_subgraphDeploymentID); require( - curationPool.tokens > 0, + curationPool.tokens != 0, "Subgraph deployment must be curated to perform calculations" ); require( @@ -469,7 +469,7 @@ contract L2Curation is CurationV2Storage, GraphUpgradeable, IL2Curation { * @param _minimumCurationDeposit Minimum amount of tokens required deposit */ function _setMinimumCurationDeposit(uint256 _minimumCurationDeposit) private { - require(_minimumCurationDeposit > 0, "Minimum curation deposit cannot be 0"); + require(_minimumCurationDeposit != 0, "Minimum curation deposit cannot be 0"); minimumCurationDeposit = _minimumCurationDeposit; emit ParameterUpdated("minimumCurationDeposit"); diff --git a/contracts/l2/discovery/L2GNS.sol b/contracts/l2/discovery/L2GNS.sol index 4a7c65540..fe89248bf 100644 --- a/contracts/l2/discovery/L2GNS.sol +++ b/contracts/l2/discovery/L2GNS.sol @@ -326,7 +326,7 @@ contract L2GNS is GNS, L2GNSV1Storage, IL2GNS { // Move all signal from previous version to new version // NOTE: We will only do this as long as there is signal on the subgraph - if (subgraphData.nSignal > 0) { + if (subgraphData.nSignal != 0) { // Burn all version signal in the name pool for tokens (w/no slippage protection) // Sell all signal from the old deployment uint256 tokens = curation.burn( diff --git a/contracts/libraries/MerklePatriciaProofVerifier.sol b/contracts/libraries/MerklePatriciaProofVerifier.sol index d9bf28a79..dfb901661 100644 --- a/contracts/libraries/MerklePatriciaProofVerifier.sol +++ b/contracts/libraries/MerklePatriciaProofVerifier.sol @@ -235,7 +235,7 @@ library MerklePatriciaProofVerifier { pure returns (bool isLeaf, bytes memory nibbles) { - require(compact.length > 0, "MPT: invalid compact length"); + require(compact.length != 0, "MPT: invalid compact length"); uint256 firstNibble = (uint8(compact[0]) >> 4) & 0xF; uint256 skipNibbles; if (firstNibble == 0) { diff --git a/contracts/libraries/RLPReader.sol b/contracts/libraries/RLPReader.sol index ab4bf33da..1f0a673a6 100644 --- a/contracts/libraries/RLPReader.sol +++ b/contracts/libraries/RLPReader.sol @@ -264,7 +264,7 @@ library RLPReader { * @return The uint value. */ function toUint(RLPItem memory item) internal pure returns (uint256) { - require(item.len > 0 && item.len <= 33, "RLP: invalid uint length"); + require(item.len != 0 && item.len <= 33, "RLP: invalid uint length"); (uint256 memPtr, uint256 len) = payloadLocation(item); @@ -306,7 +306,7 @@ library RLPReader { * @return The bytes value. */ function toBytes(RLPItem memory item) internal pure returns (bytes memory) { - require(item.len > 0, "RLP: invalid zero length bytes"); + require(item.len != 0, "RLP: invalid zero length bytes"); (uint256 memPtr, uint256 len) = payloadLocation(item); bytes memory result = new bytes(len); @@ -425,7 +425,7 @@ library RLPReader { dest += WORD_SIZE; } - if (len > 0) { + if (len != 0) { // left over bytes. Mask is used to remove unwanted bytes from the word uint256 mask = 256**(WORD_SIZE - len) - 1; assembly { From 92c9e011f3b42edbf7a0374c03c0ca97687b8b71 Mon Sep 17 00:00:00 2001 From: Pablo Carranza Velez Date: Thu, 17 Nov 2022 15:27:25 +0100 Subject: [PATCH 052/112] fix: owner --- contracts/discovery/L1GNS.sol | 17 +++++++++++++---- test/gns.test.ts | 34 +++++++++++++++++----------------- 2 files changed, 30 insertions(+), 21 deletions(-) diff --git a/contracts/discovery/L1GNS.sol b/contracts/discovery/L1GNS.sol index e8d662029..107ee414d 100644 --- a/contracts/discovery/L1GNS.sol +++ b/contracts/discovery/L1GNS.sol @@ -28,7 +28,7 @@ contract L1GNS is GNS, L1GNSV1Storage, L1ArbitrumMessenger { /// @dev Emitted when a subgraph was locked as preparation to migrating it to L2 event SubgraphLockedForMigrationToL2(uint256 _subgraphID); /// @dev Emitted when a subgraph was sent to L2 through the bridge - event SubgraphSentToL2(uint256 _subgraphID); + event SubgraphSentToL2(uint256 _subgraphID, address _l2Owner); /// @dev Emitted when the address of the Arbitrum Inbox was updated event ArbitrumInboxAddressUpdated(address _inbox); @@ -85,12 +85,14 @@ contract L1GNS is GNS, L1GNSV1Storage, L1ArbitrumMessenger { * (less than 255 blocks ago). * Use the Arbitrum SDK to estimate the L2 retryable ticket parameters. * @param _subgraphID Subgraph ID + * @param _l2Owner Address that will own the subgraph in L2 (could be the L1 owner, but could be different if the L1 owner is an L1 contract) * @param _maxGas Max gas to use for the L2 retryable ticket * @param _gasPriceBid Gas price bid for the L2 retryable ticket * @param _maxSubmissionCost Max submission cost for the L2 retryable ticket */ function sendSubgraphToL2( uint256 _subgraphID, + address _l2Owner, uint256 _maxGas, uint256 _gasPriceBid, uint256 _maxSubmissionCost @@ -108,7 +110,12 @@ contract L1GNS is GNS, L1GNSV1Storage, L1ArbitrumMessenger { require(ownerOf(_subgraphID) == msg.sender, "GNS: Must be authorized"); migrationData.l1Done = true; - bytes memory extraData = _encodeSubgraphDataForL2(_subgraphID, migrationData, subgraphData); + bytes memory extraData = _encodeSubgraphDataForL2( + _subgraphID, + _l2Owner, + migrationData, + subgraphData + ); bytes memory data = abi.encode(_maxSubmissionCost, extraData); IGraphToken grt = graphToken(); @@ -125,7 +132,7 @@ contract L1GNS is GNS, L1GNSV1Storage, L1ArbitrumMessenger { subgraphData.reserveRatio = 0; _burnNFT(_subgraphID); - emit SubgraphSentToL2(_subgraphID); + emit SubgraphSentToL2(_subgraphID, _l2Owner); } /** @@ -211,18 +218,20 @@ contract L1GNS is GNS, L1GNSV1Storage, L1ArbitrumMessenger { * @dev Encodes the subgraph data as callhook parameters * for the L2 migration. * @param _subgraphID Subgraph ID + * @param _l2Owner Owner of the subgraph on L2 * @param _migrationData Subgraph L2 migration data * @param _subgraphData Subgraph data */ function _encodeSubgraphDataForL2( uint256 _subgraphID, + address _l2Owner, SubgraphL2MigrationData storage _migrationData, SubgraphData storage _subgraphData ) internal view returns (bytes memory) { return abi.encode( _subgraphID, - ownerOf(_subgraphID), + _l2Owner, blockhash(_migrationData.lockedAtBlock), _subgraphData.nSignal ); diff --git a/test/gns.test.ts b/test/gns.test.ts index 2b9a109a4..101e1038f 100644 --- a/test/gns.test.ts +++ b/test/gns.test.ts @@ -1122,10 +1122,10 @@ describe('L1GNS', () => { const gasPriceBid = toBN('20') const tx = gns .connect(me.signer) - .sendSubgraphToL2(subgraph0.id, maxGas, gasPriceBid, maxSubmissionCost, { + .sendSubgraphToL2(subgraph0.id, me.address, maxGas, gasPriceBid, maxSubmissionCost, { value: maxSubmissionCost.add(maxGas.mul(gasPriceBid)), }) - await expect(tx).emit(gns, 'SubgraphSentToL2').withArgs(subgraph0.id) + await expect(tx).emit(gns, 'SubgraphSentToL2').withArgs(subgraph0.id, me.address) return subgraph0 } const publishAndCurateOnLegacySubgraph = async function (seqID: BigNumber): Promise { @@ -1227,7 +1227,7 @@ describe('L1GNS', () => { }) }) describe('sendSubgraphToL2', function () { - it('sends tokens and calldata to L2 through the GRT bridge', async function () { + it('sends tokens and calldata to L2 through the GRT bridge, for a desired L2 owner', async function () { const subgraph0 = await publishAndCurateOnSubgraph() const curatedTokens = await grt.balanceOf(curation.address) @@ -1241,10 +1241,10 @@ describe('L1GNS', () => { const gasPriceBid = toBN('20') const tx = gns .connect(me.signer) - .sendSubgraphToL2(subgraph0.id, maxGas, gasPriceBid, maxSubmissionCost, { + .sendSubgraphToL2(subgraph0.id, other.address, maxGas, gasPriceBid, maxSubmissionCost, { value: maxSubmissionCost.add(maxGas.mul(gasPriceBid)), }) - await expect(tx).emit(gns, 'SubgraphSentToL2').withArgs(subgraph0.id) + await expect(tx).emit(gns, 'SubgraphSentToL2').withArgs(subgraph0.id, other.address) const subgraphAfter = await gns.subgraphs(subgraph0.id) expect(subgraphAfter.vSignal).eq(0) @@ -1258,7 +1258,7 @@ describe('L1GNS', () => { const expectedCallhookData = defaultAbiCoder.encode( ['uint256', 'address', 'bytes32', 'uint256'], - [subgraph0.id, me.address, lockBlockhash, subgraphBefore.nSignal], + [subgraph0.id, other.address, lockBlockhash, subgraphBefore.nSignal], ) const expectedL2Data = await l1GraphTokenGateway.getOutboundCalldata( @@ -1288,10 +1288,10 @@ describe('L1GNS', () => { const gasPriceBid = toBN('20') const tx = legacyGNSMock .connect(me.signer) - .sendSubgraphToL2(subgraphID, maxGas, gasPriceBid, maxSubmissionCost, { + .sendSubgraphToL2(subgraphID, other.address, maxGas, gasPriceBid, maxSubmissionCost, { value: maxSubmissionCost.add(maxGas.mul(gasPriceBid)), }) - await expect(tx).emit(legacyGNSMock, 'SubgraphSentToL2').withArgs(subgraphID) + await expect(tx).emit(legacyGNSMock, 'SubgraphSentToL2').withArgs(subgraphID, other.address) const subgraphAfter = await legacyGNSMock.legacySubgraphData(me.address, seqID) expect(subgraphAfter.vSignal).eq(0) @@ -1305,7 +1305,7 @@ describe('L1GNS', () => { const expectedCallhookData = defaultAbiCoder.encode( ['uint256', 'address', 'bytes32', 'uint256'], - [subgraphID, me.address, lockBlockhash, subgraphBefore.nSignal], + [subgraphID, other.address, lockBlockhash, subgraphBefore.nSignal], ) const expectedL2Data = await l1GraphTokenGateway.getOutboundCalldata( @@ -1327,7 +1327,7 @@ describe('L1GNS', () => { const gasPriceBid = toBN('20') const tx = gns .connect(other.signer) - .sendSubgraphToL2(subgraph0.id, maxGas, gasPriceBid, maxSubmissionCost, { + .sendSubgraphToL2(subgraph0.id, other.address, maxGas, gasPriceBid, maxSubmissionCost, { value: maxSubmissionCost.add(maxGas.mul(gasPriceBid)), }) await expect(tx).revertedWith('GNS: Must be authorized') @@ -1340,7 +1340,7 @@ describe('L1GNS', () => { const gasPriceBid = toBN('20') const tx = gns .connect(me.signer) - .sendSubgraphToL2(subgraph0.id, maxGas, gasPriceBid, maxSubmissionCost, { + .sendSubgraphToL2(subgraph0.id, me.address, maxGas, gasPriceBid, maxSubmissionCost, { value: maxSubmissionCost.add(maxGas.mul(gasPriceBid)), }) await expect(tx).revertedWith('!LOCKED') @@ -1353,14 +1353,14 @@ describe('L1GNS', () => { const gasPriceBid = toBN('20') const tx = gns .connect(me.signer) - .sendSubgraphToL2(subgraph0.id, maxGas, gasPriceBid, maxSubmissionCost, { + .sendSubgraphToL2(subgraph0.id, me.address, maxGas, gasPriceBid, maxSubmissionCost, { value: maxSubmissionCost.add(maxGas.mul(gasPriceBid)), }) - await expect(tx).emit(gns, 'SubgraphSentToL2').withArgs(subgraph0.id) + await expect(tx).emit(gns, 'SubgraphSentToL2').withArgs(subgraph0.id, me.address) const tx2 = gns .connect(me.signer) - .sendSubgraphToL2(subgraph0.id, maxGas, gasPriceBid, maxSubmissionCost, { + .sendSubgraphToL2(subgraph0.id, me.address, maxGas, gasPriceBid, maxSubmissionCost, { value: maxSubmissionCost.add(maxGas.mul(gasPriceBid)), }) await expect(tx2).revertedWith('ALREADY_DONE') @@ -1375,7 +1375,7 @@ describe('L1GNS', () => { const gasPriceBid = toBN('20') const tx = gns .connect(me.signer) - .sendSubgraphToL2(subgraph0.id, maxGas, gasPriceBid, maxSubmissionCost, { + .sendSubgraphToL2(subgraph0.id, me.address, maxGas, gasPriceBid, maxSubmissionCost, { value: maxSubmissionCost.add(maxGas.mul(gasPriceBid)), }) await expect(tx).revertedWith('TOO_LATE') @@ -1500,10 +1500,10 @@ describe('L1GNS', () => { const gasPriceBid = toBN('20') const tx = legacyGNSMock .connect(me.signer) - .sendSubgraphToL2(subgraphID, maxGas, gasPriceBid, maxSubmissionCost, { + .sendSubgraphToL2(subgraphID, me.address, maxGas, gasPriceBid, maxSubmissionCost, { value: maxSubmissionCost.add(maxGas.mul(gasPriceBid)), }) - await expect(tx).emit(legacyGNSMock, 'SubgraphSentToL2').withArgs(subgraphID) + await expect(tx).emit(legacyGNSMock, 'SubgraphSentToL2').withArgs(subgraphID, me.address) const expectedCalldata = l2GNSIface.encodeFunctionData( 'claimL1CuratorBalanceToBeneficiary', From 115a5753f7f7db102fc1c98bf587ba1bc8bc9661 Mon Sep 17 00:00:00 2001 From: Pablo Carranza Velez Date: Fri, 18 Nov 2022 09:45:11 +0100 Subject: [PATCH 053/112] fix: remove MPT claiming and the need to lock subgraphs before sending to L2 --- contracts/discovery/GNS.sol | 81 +- contracts/discovery/IGNS.sol | 5 +- contracts/discovery/L1GNS.sol | 110 +-- contracts/l2/discovery/IL2GNS.sol | 31 - contracts/l2/discovery/L2GNS.sol | 154 +-- contracts/l2/discovery/L2GNSStorage.sol | 2 - .../libraries/MerklePatriciaProofVerifier.sol | 313 ------ contracts/libraries/RLPReader.sol | 438 --------- contracts/libraries/StateProofVerifier.sol | 156 --- .../tests/MerklePatriciaProofVerifierMock.sol | 32 - test/gns.test.ts | 254 +---- test/l2/l2GNS.test.ts | 926 +----------------- test/lib/mptProofUtils.ts | 79 -- test/mpt.test.ts | 294 ------ 14 files changed, 106 insertions(+), 2769 deletions(-) delete mode 100644 contracts/libraries/MerklePatriciaProofVerifier.sol delete mode 100644 contracts/libraries/RLPReader.sol delete mode 100644 contracts/libraries/StateProofVerifier.sol delete mode 100644 contracts/tests/MerklePatriciaProofVerifierMock.sol delete mode 100644 test/lib/mptProofUtils.ts delete mode 100644 test/mpt.test.ts diff --git a/contracts/discovery/GNS.sol b/contracts/discovery/GNS.sol index 07355e35a..2d39c0d6c 100644 --- a/contracts/discovery/GNS.sol +++ b/contracts/discovery/GNS.sol @@ -34,12 +34,6 @@ contract GNS is GNSV3Storage, GraphUpgradeable, IGNS, Multicall { /// @dev 100% in parts per million uint32 private constant MAX_PPM = 1000000; - /// @dev Storage slot where the subgraphs mapping is stored on L1GNS - uint256 internal constant SUBGRAPH_MAPPING_SLOT = 18; - - /// @dev Storage slot where the legacy subgraphs mapping is stored on L1GNS - uint256 internal constant LEGACY_SUBGRAPH_MAPPING_SLOT = 15; - /// @dev Equates to Connector weight on bancor formula to be CW = 1 uint32 internal immutable FIXED_RESERVE_RATIO = MAX_PPM; @@ -583,6 +577,16 @@ contract GNS is GNSV3Storage, GraphUpgradeable, IGNS, Multicall { return 0; } + /** + * @notice Return whether a subgraph is a legacy subgraph (created before subgraph NFTs). + * @param _subgraphID Subgraph ID + * @return Return true if subgraph is a legacy subgraph + */ + function isLegacySubgraph(uint256 _subgraphID) external view override returns (bool) { + (address account, ) = getLegacySubgraphKey(_subgraphID); + return account != address(0); + } + /** * @notice Calculate subgraph signal to be returned for an amount of tokens. * @param _subgraphID Subgraph ID @@ -690,16 +694,6 @@ contract GNS is GNSV3Storage, GraphUpgradeable, IGNS, Multicall { return _isPublished(_getSubgraphData(_subgraphID)); } - /** - * @notice Return whether a subgraph is a legacy subgraph (created before subgraph NFTs). - * @param _subgraphID Subgraph ID - * @return Return true if subgraph is a legacy subgraph - */ - function isLegacySubgraph(uint256 _subgraphID) public view override returns (bool) { - (address account, ) = getLegacySubgraphKey(_subgraphID); - return account != address(0); - } - /** * @notice Returns account and sequence ID for a legacy subgraph (created before subgraph NFTs). * @param _subgraphID Subgraph ID @@ -726,61 +720,6 @@ contract GNS is GNSV3Storage, GraphUpgradeable, IGNS, Multicall { return subgraphNFT.ownerOf(_tokenID); } - /** - * @notice Get the storage slot that corresponds to a curator's signal within a subgraph - * @dev This can be useful to produce proofs to claim balances in L2, as implemented - * in L2GNS. Note this only works with non-legacy subgraphs. - * @param _subgraphID Subgraph ID - * @param _curator Curator address - * @return Storage slot for the curator's signal in the specified subgraph - */ - function getCuratorSlot(uint256 _subgraphID, address _curator) public pure returns (uint256) { - // subgraphs mapping is stored at slot SUBGRAPH_MAPPING_SLOT. - // So our subgraph is at slot keccak256(abi.encodePacked(uint256(subgraphID), uint256(SUBGRAPH_MAPPING_SLOT))) - // The curatorNSignal mapping is at slot 2 within the SubgraphData struct, - // So the mapping is at slot keccak256(abi.encodePacked(uint256(subgraphID), uint256(SUBGRAPH_MAPPING_SLOT))) + 2 - // Therefore the nSignal value for msg.sender should be at slot: - return - uint256( - keccak256( - abi.encodePacked( - uint256(_curator), - uint256(keccak256(abi.encodePacked(_subgraphID, SUBGRAPH_MAPPING_SLOT))) - .add(2) - ) - ) - ); - } - - /** - * @notice Get the storage slot that corresponds to a curator's signal within a legacy subgraph - * @dev This can be useful to produce proofs to claim balances in L2, as implemented - * in L2GNS. Note this only works with legacy subgraphs. - * @param _subgraphCreatorAccount Address of the account that created the account - * @param _seqID Sequence number for the subgraph - * @param _curator Curator address - * @return Storage slot for the curator's signal in the specified legacy subgraph - */ - function getLegacyCuratorSlot( - address _subgraphCreatorAccount, - uint256 _seqID, - address _curator - ) public pure returns (uint256) { - // legacy subgraphs mapping is stored at slot LEGACY_SUBGRAPH_MAPPING_SLOT. - // So the subgraphs for the account are at slot keccak256(abi.encodePacked(uint256(_subgraphCreatorAccount), uint256(SUBGRAPH_MAPPING_SLOT))) - uint256 accountSlot = uint256( - keccak256( - abi.encodePacked(uint256(_subgraphCreatorAccount), LEGACY_SUBGRAPH_MAPPING_SLOT) - ) - ); - // Then the subgraph for this _seqID should be at: - uint256 subgraphSlot = uint256(keccak256(abi.encodePacked(_seqID, accountSlot))); - // The curatorNSignal mapping is at slot 2 within the SubgraphData struct, - // So the mapping is at slot subgraphSlot + 2 - // Therefore the nSignal value for msg.sender should be at slot: - return uint256(keccak256(abi.encodePacked(uint256(_curator), subgraphSlot.add(2)))); - } - /** * @dev Calculate tax that owner will have to cover for upgrading or deprecating. * @param _tokens Tokens that were received from deprecating the old subgraph diff --git a/contracts/discovery/IGNS.sol b/contracts/discovery/IGNS.sol index 0d7ac045c..1a5a3fc31 100644 --- a/contracts/discovery/IGNS.sol +++ b/contracts/discovery/IGNS.sol @@ -30,13 +30,10 @@ interface IGNS { * the L2GNS. */ struct SubgraphL2MigrationData { - uint256 lockedAtBlock; // Block at which the subgraph was locked for migration. L1 only uint256 tokens; // GRT that will be sent to L2 to mint signal - bool l1Done; // Migration finished on L1 side (or subgraph deprecated) - bytes32 lockedAtBlockHash; // Blockhash from block at which the subgraph was locked for migration + bool l1Done; // Migration finished on L1 side mapping(address => bool) curatorBalanceClaimed; // True for curators whose balance has been claimed in L2 bool l2Done; // Migration finished on L2 side - bool deprecated; // Subgraph was deprecated instead of sent. L1 only } /** diff --git a/contracts/discovery/L1GNS.sol b/contracts/discovery/L1GNS.sol index 107ee414d..5c3fc008b 100644 --- a/contracts/discovery/L1GNS.sol +++ b/contracts/discovery/L1GNS.sol @@ -14,19 +14,17 @@ import { IGraphToken } from "../token/IGraphToken.sol"; import { L1GNSV1Storage } from "./L1GNSStorage.sol"; /** - * @title GNS + * @title L1GNS * @dev The Graph Name System contract provides a decentralized naming system for subgraphs * used in the scope of the Graph Network. It translates Subgraphs into Subgraph Versions. * Each version is associated with a Subgraph Deployment. The contract has no knowledge of * human-readable names. All human readable names emitted in events. * The contract implements a multicall behaviour to support batching multiple calls in a single - * transaction. + * transaction. This L1GNS variant includes some functions to allow migrating subgraphs to L2. */ contract L1GNS is GNS, L1GNSV1Storage, L1ArbitrumMessenger { using SafeMathUpgradeable for uint256; - /// @dev Emitted when a subgraph was locked as preparation to migrating it to L2 - event SubgraphLockedForMigrationToL2(uint256 _subgraphID); /// @dev Emitted when a subgraph was sent to L2 through the bridge event SubgraphSentToL2(uint256 _subgraphID, address _l2Owner); /// @dev Emitted when the address of the Arbitrum Inbox was updated @@ -41,44 +39,6 @@ contract L1GNS is GNS, L1GNSV1Storage, L1ArbitrumMessenger { emit ArbitrumInboxAddressUpdated(_inbox); } - /** - * @notice Lock a subgraph for migration to L2. - * This will lock the subgraph's curator balance and prevent any further - * changes to the subgraph. - * WARNING: After calling this function, the subgraph owner has 255 blocks - * to call sendSubgraphToL2 to complete the migration; otherwise, the - * subgraph will have to be deprecated using deprecateLockedSubgraph, - * and the deployment to L2 will have to be manual (and curators will - * have to manually move the signal over too). - * @param _subgraphID Subgraph ID - */ - function lockSubgraphForMigrationToL2(uint256 _subgraphID) - external - payable - notPartialPaused - onlySubgraphAuth(_subgraphID) - { - // Subgraph check - SubgraphData storage subgraphData = _getSubgraphOrRevert(_subgraphID); - SubgraphL2MigrationData storage migrationData = subgraphL2MigrationData[_subgraphID]; - - // Lock the subgraph so no more signal can be minted or burned. - - // Burn all version signal in the name pool for tokens (w/no slippage protection) - // Sell all signal from the old deployment - migrationData.tokens = curation().burn( - subgraphData.subgraphDeploymentID, - subgraphData.vSignal, - 0 - ); - - subgraphData.disabled = true; - subgraphData.vSignal = 0; - - migrationData.lockedAtBlock = block.number; - emit SubgraphLockedForMigrationToL2(_subgraphID); - } - /** * @notice Send a subgraph's data and tokens to L2. * The subgraph must be locked using lockSubgraphForMigrationToL2 in a previous block @@ -97,34 +57,34 @@ contract L1GNS is GNS, L1GNSV1Storage, L1ArbitrumMessenger { uint256 _gasPriceBid, uint256 _maxSubmissionCost ) external payable notPartialPaused { - SubgraphData storage subgraphData = _getSubgraphData(_subgraphID); SubgraphL2MigrationData storage migrationData = subgraphL2MigrationData[_subgraphID]; - require( - migrationData.lockedAtBlock != 0 && migrationData.lockedAtBlock < block.number, - "!LOCKED" - ); - require(migrationData.lockedAtBlock.add(255) >= block.number, "TOO_LATE"); + require(!migrationData.l1Done, "ALREADY_DONE"); - // This is just like onlySubgraphAuth, but we want it to run after the other checks - // to revert with a nicer message in those cases: + + SubgraphData storage subgraphData = _getSubgraphOrRevert(_subgraphID); + // This is just like onlySubgraphAuth, but we want it to run after the l1Done check + // to revert with a nicer message in that case: require(ownerOf(_subgraphID) == msg.sender, "GNS: Must be authorized"); migrationData.l1Done = true; - bytes memory extraData = _encodeSubgraphDataForL2( - _subgraphID, - _l2Owner, - migrationData, - subgraphData + uint256 curationTokens = curation().burn( + subgraphData.subgraphDeploymentID, + subgraphData.vSignal, + 0 ); + subgraphData.disabled = true; + subgraphData.vSignal = 0; + + bytes memory extraData = _encodeSubgraphDataForL2(_subgraphID, _l2Owner, subgraphData); bytes memory data = abi.encode(_maxSubmissionCost, extraData); IGraphToken grt = graphToken(); - ITokenGateway gateway = ITokenGateway(_resolveContract(keccak256("GraphTokenGateway"))); - grt.approve(address(gateway), migrationData.tokens); + ITokenGateway gateway = graphTokenGateway(); + grt.approve(address(gateway), curationTokens); gateway.outboundTransfer{ value: msg.value }( address(grt), counterpartGNSAddress, - migrationData.tokens, + curationTokens, _maxGas, _gasPriceBid, data @@ -135,29 +95,6 @@ contract L1GNS is GNS, L1GNSV1Storage, L1ArbitrumMessenger { emit SubgraphSentToL2(_subgraphID, _l2Owner); } - /** - * @notice Deprecate a subgraph locked more than 256 blocks ago. - * This allows curators to recover their funds if the subgraph was locked - * for a migration to L2 but the subgraph was never actually sent to L2. - * @param _subgraphID Subgraph ID - */ - function deprecateLockedSubgraph(uint256 _subgraphID) external notPartialPaused { - SubgraphData storage subgraphData = _getSubgraphData(_subgraphID); - SubgraphL2MigrationData storage migrationData = subgraphL2MigrationData[_subgraphID]; - require(migrationData.lockedAtBlock != 0, "!LOCKED"); - require(migrationData.lockedAtBlock.add(256) < block.number, "TOO_EARLY"); - require(!migrationData.l1Done, "ALREADY_DONE"); - migrationData.l1Done = true; - migrationData.deprecated = true; - subgraphData.withdrawableGRT = migrationData.tokens; - subgraphData.reserveRatio = 0; - - // Burn the NFT - _burnNFT(_subgraphID); - - emit SubgraphDeprecated(_subgraphID, subgraphData.withdrawableGRT); - } - /** * @notice Claim the balance for a curator's signal in a subgraph that was * migrated to L2, by sending a retryable ticket to the L2GNS. @@ -180,7 +117,6 @@ contract L1GNS is GNS, L1GNSV1Storage, L1ArbitrumMessenger { SubgraphL2MigrationData storage migrationData = subgraphL2MigrationData[_subgraphID]; require(migrationData.l1Done, "!MIGRATED"); - require(!migrationData.deprecated, "SUBGRAPH_DEPRECATED"); require(_maxSubmissionCost != 0, "NO_SUBMISSION_COST"); @@ -219,21 +155,13 @@ contract L1GNS is GNS, L1GNSV1Storage, L1ArbitrumMessenger { * for the L2 migration. * @param _subgraphID Subgraph ID * @param _l2Owner Owner of the subgraph on L2 - * @param _migrationData Subgraph L2 migration data * @param _subgraphData Subgraph data */ function _encodeSubgraphDataForL2( uint256 _subgraphID, address _l2Owner, - SubgraphL2MigrationData storage _migrationData, SubgraphData storage _subgraphData ) internal view returns (bytes memory) { - return - abi.encode( - _subgraphID, - _l2Owner, - blockhash(_migrationData.lockedAtBlock), - _subgraphData.nSignal - ); + return abi.encode(_subgraphID, _l2Owner, _subgraphData.nSignal); } } diff --git a/contracts/l2/discovery/IL2GNS.sol b/contracts/l2/discovery/IL2GNS.sol index cbc40b9b4..bdfb1a87f 100644 --- a/contracts/l2/discovery/IL2GNS.sol +++ b/contracts/l2/discovery/IL2GNS.sol @@ -24,37 +24,6 @@ interface IL2GNS is ICallhookReceiver { bytes32 _versionMetadata ) external; - /** - * @notice Claim curator balance belonging to a curator from L1. - * This will be credited to the same curator's balance on L2. - * This can only be called by the corresponding curator. - * @param _subgraphID Subgraph for which to claim a balance - * @param _blockHeaderRlpBytes RLP-encoded block header from the block when the subgraph was locked on L1 - * @param _proofRlpBytes RLP-encoded list of proofs: first proof of the L1 GNS account, then proof of the slot for the curator's balance - */ - function claimL1CuratorBalance( - uint256 _subgraphID, - bytes memory _blockHeaderRlpBytes, - bytes memory _proofRlpBytes - ) external; - - /** - * @notice Claim curator balance belonging to a curator from L1 on a legacy subgraph. - * This will be credited to the same curator's balance on L2. - * This can only be called by the corresponding curator. - * Users can query getLegacySubgraphKey on L1 to get the _subgraphCreatorAccount and _seqID. - * @param _subgraphCreatorAccount Account that created the subgraph in L1 - * @param _seqID Sequence number for the subgraph - * @param _blockHeaderRlpBytes RLP-encoded block header from the block when the subgraph was locked on L1 - * @param _proofRlpBytes RLP-encoded list of proofs: first proof of the L1 GNS account, then proof of the slot for the curator's balance - */ - function claimL1CuratorBalanceForLegacySubgraph( - address _subgraphCreatorAccount, - uint256 _seqID, - bytes memory _blockHeaderRlpBytes, - bytes memory _proofRlpBytes - ) external; - /** * @notice Claim curator balance belonging to a curator from L1. * This will be credited to the a beneficiary on L2, and can only be called diff --git a/contracts/l2/discovery/L2GNS.sol b/contracts/l2/discovery/L2GNS.sol index fe89248bf..c0300545a 100644 --- a/contracts/l2/discovery/L2GNS.sol +++ b/contracts/l2/discovery/L2GNS.sol @@ -12,9 +12,6 @@ import { ICuration } from "../../curation/ICuration.sol"; import { IL2GNS } from "./IL2GNS.sol"; import { L2GNSV1Storage } from "./L2GNSStorage.sol"; -import { RLPReader } from "../../libraries/RLPReader.sol"; -import { StateProofVerifier as Verifier } from "../../libraries/StateProofVerifier.sol"; - import { IL2Curation } from "../curation/IL2Curation.sol"; /** @@ -27,8 +24,6 @@ import { IL2Curation } from "../curation/IL2Curation.sol"; * transaction. */ contract L2GNS is GNS, L2GNSV1Storage, IL2GNS { - using RLPReader for bytes; - using RLPReader for RLPReader.RLPItem; using SafeMathUpgradeable for uint256; /// @dev Emitted when a subgraph is received from L1 through the bridge @@ -42,24 +37,12 @@ contract L2GNS is GNS, L2GNSV1Storage, IL2GNS { address _l2Curator, uint256 _nSignalClaimed ); - /// @dev Emitted when claiming L1 balances using MPT proofs is enabled - event MPTClaimingEnabled(); - /// @dev Emitted when claiming L1 balances using MPT proofs is disabled - event MPTClaimingDisabled(); /** * @dev Checks that the sender is the L2GraphTokenGateway as configured on the Controller. */ modifier onlyL2Gateway() { - require(msg.sender == _resolveContract(keccak256("GraphTokenGateway")), "ONLY_GATEWAY"); - _; - } - - /** - * @dev Checks that claiming balances using Merkle Patricia proofs is enabled. - */ - modifier ifMPTClaimingEnabled() { - require(mptClaimingEnabled, "MPT_CLAIMING_DISABLED"); + require(msg.sender == address(graphTokenGateway()), "ONLY_GATEWAY"); _; } @@ -75,19 +58,6 @@ contract L2GNS is GNS, L2GNSV1Storage, IL2GNS { _; } - /** - * @notice Enables or disables claiming L1 balances using Merkle Patricia proofs - * @param _enabled If true, claiming MPT proofs will be enabled; if false, they will be disabled - */ - function setMPTClaimingEnabled(bool _enabled) external onlyGovernor { - mptClaimingEnabled = _enabled; - if (_enabled) { - emit MPTClaimingEnabled(); - } else { - emit MPTClaimingDisabled(); - } - } - /** * @notice Receive tokens with a callhook from the bridge. * The callhook will receive a subgraph from L1. @@ -101,14 +71,12 @@ contract L2GNS is GNS, L2GNSV1Storage, IL2GNS { bytes calldata _data ) external override notPartialPaused onlyL2Gateway { require(_from == counterpartGNSAddress, "ONLY_L1_GNS_THROUGH_BRIDGE"); - ( - uint256 subgraphID, - address subgraphOwner, - bytes32 lockedAtBlockHash, - uint256 nSignal - ) = abi.decode(_data, (uint256, address, bytes32, uint256)); - - _receiveSubgraphFromL1(subgraphID, subgraphOwner, _amount, lockedAtBlockHash, nSignal); + (uint256 subgraphID, address subgraphOwner, uint256 nSignal) = abi.decode( + _data, + (uint256, address, uint256) + ); + + _receiveSubgraphFromL1(subgraphID, subgraphOwner, _amount, nSignal); } /** @@ -158,111 +126,6 @@ contract L2GNS is GNS, L2GNSV1Storage, IL2GNS { emit SubgraphMigrationFinalized(_subgraphID); } - /** - * @notice Claim curator balance belonging to a curator from L1. - * This will be credited to the same curator's balance on L2. - * This can only be called by the corresponding curator. - * @param _subgraphID Subgraph for which to claim a balance - * @param _blockHeaderRlpBytes RLP-encoded block header from the block when the subgraph was locked on L1 - * @param _proofRlpBytes RLP-encoded list of proofs: first proof of the L1 GNS account, then proof of the slot for the curator's balance - */ - function claimL1CuratorBalance( - uint256 _subgraphID, - bytes memory _blockHeaderRlpBytes, - bytes memory _proofRlpBytes - ) external override notPartialPaused ifMPTClaimingEnabled { - IGNS.SubgraphL2MigrationData storage migratedData = subgraphL2MigrationData[_subgraphID]; - require(migratedData.l2Done, "!MIGRATED"); - require(!migratedData.curatorBalanceClaimed[msg.sender], "ALREADY_CLAIMED"); - - Verifier.BlockHeader memory blockHeader = Verifier.parseBlockHeader(_blockHeaderRlpBytes); - require(blockHeader.hash == migratedData.lockedAtBlockHash, "!BLOCKHASH"); - - RLPReader.RLPItem[] memory proofs = _proofRlpBytes.toRlpItem().toList(); - require(proofs.length == 2, "!N_PROOFS"); - - Verifier.Account memory l1GNSAccount = Verifier.extractAccountFromProof( - keccak256(abi.encodePacked(counterpartGNSAddress)), - blockHeader.stateRootHash, - proofs[0].toList() - ); - - require(l1GNSAccount.exists, "!ACCOUNT"); - - uint256 curatorSlot = getCuratorSlot(_subgraphID, msg.sender); - - Verifier.SlotValue memory curatorNSignalSlot = Verifier.extractSlotValueFromProof( - keccak256(abi.encodePacked(curatorSlot)), - l1GNSAccount.storageRoot, - proofs[1].toList() - ); - - require(curatorNSignalSlot.exists, "!CURATOR_SLOT"); - - SubgraphData storage subgraphData = _getSubgraphData(_subgraphID); - subgraphData.curatorNSignal[msg.sender] = subgraphData.curatorNSignal[msg.sender].add( - curatorNSignalSlot.value - ); - migratedData.curatorBalanceClaimed[msg.sender] = true; - - emit CuratorBalanceClaimed(_subgraphID, msg.sender, msg.sender, curatorNSignalSlot.value); - } - - /** - * @notice Claim curator balance belonging to a curator from L1 on a legacy subgraph. - * This will be credited to the same curator's balance on L2. - * This can only be called by the corresponding curator. - * Users can query getLegacySubgraphKey on L1 to get the _subgraphCreatorAccount and _seqID. - * @param _subgraphCreatorAccount Account that created the subgraph in L1 - * @param _seqID Sequence number for the subgraph - * @param _blockHeaderRlpBytes RLP-encoded block header from the block when the subgraph was locked on L1 - * @param _proofRlpBytes RLP-encoded list of proofs: first proof of the L1 GNS account, then proof of the slot for the curator's balance - */ - function claimL1CuratorBalanceForLegacySubgraph( - address _subgraphCreatorAccount, - uint256 _seqID, - bytes memory _blockHeaderRlpBytes, - bytes memory _proofRlpBytes - ) external override notPartialPaused ifMPTClaimingEnabled { - uint256 _subgraphID = _buildLegacySubgraphID(_subgraphCreatorAccount, _seqID); - - Verifier.BlockHeader memory blockHeader = Verifier.parseBlockHeader(_blockHeaderRlpBytes); - IGNS.SubgraphL2MigrationData storage migratedData = subgraphL2MigrationData[_subgraphID]; - - require(migratedData.l2Done, "!MIGRATED"); - require(blockHeader.hash == migratedData.lockedAtBlockHash, "!BLOCKHASH"); - require(!migratedData.curatorBalanceClaimed[msg.sender], "ALREADY_CLAIMED"); - - RLPReader.RLPItem[] memory proofs = _proofRlpBytes.toRlpItem().toList(); - require(proofs.length == 2, "!N_PROOFS"); - - Verifier.Account memory l1GNSAccount = Verifier.extractAccountFromProof( - keccak256(abi.encodePacked(counterpartGNSAddress)), - blockHeader.stateRootHash, - proofs[0].toList() - ); - - require(l1GNSAccount.exists, "!ACCOUNT"); - - uint256 curatorSlot = getLegacyCuratorSlot(_subgraphCreatorAccount, _seqID, msg.sender); - - Verifier.SlotValue memory curatorNSignalSlot = Verifier.extractSlotValueFromProof( - keccak256(abi.encodePacked(curatorSlot)), - l1GNSAccount.storageRoot, - proofs[1].toList() - ); - - require(curatorNSignalSlot.exists, "!CURATOR_SLOT"); - - SubgraphData storage subgraphData = _getSubgraphData(_subgraphID); - subgraphData.curatorNSignal[msg.sender] = subgraphData.curatorNSignal[msg.sender].add( - curatorNSignalSlot.value - ); - migratedData.curatorBalanceClaimed[msg.sender] = true; - - emit CuratorBalanceClaimed(_subgraphID, msg.sender, msg.sender, curatorNSignalSlot.value); - } - /** * @notice Claim curator balance belonging to a curator from L1. * This will be credited to the a beneficiary on L2, and can only be called @@ -369,14 +232,12 @@ contract L2GNS is GNS, L2GNSV1Storage, IL2GNS { * @param _subgraphID Subgraph ID * @param _subgraphOwner Owner of the subgraph * @param _tokens Tokens to be deposited in the subgraph - * @param _lockedAtBlockHash Blockhash of the block at which the subgraph was locked in L1 * @param _nSignal Name signal for the subgraph in L1 */ function _receiveSubgraphFromL1( uint256 _subgraphID, address _subgraphOwner, uint256 _tokens, - bytes32 _lockedAtBlockHash, uint256 _nSignal ) internal { IGNS.SubgraphL2MigrationData storage migratedData = subgraphL2MigrationData[_subgraphID]; @@ -388,7 +249,6 @@ contract L2GNS is GNS, L2GNSV1Storage, IL2GNS { subgraphData.nSignal = _nSignal; migratedData.tokens = _tokens; - migratedData.lockedAtBlockHash = _lockedAtBlockHash; migratedData.l1Done = true; // Mint the NFT. Use the subgraphID as tokenID. diff --git a/contracts/l2/discovery/L2GNSStorage.sol b/contracts/l2/discovery/L2GNSStorage.sol index 016f12adb..067dff1e5 100644 --- a/contracts/l2/discovery/L2GNSStorage.sol +++ b/contracts/l2/discovery/L2GNSStorage.sol @@ -9,8 +9,6 @@ pragma abicoder v2; * @dev */ abstract contract L2GNSV1Storage { - /// Specifies whether claiming L1 balances using Merkle Patricia proofs is enabled - bool public mptClaimingEnabled; /// @dev Storage gap to keep storage slots fixed in future versions uint256[50] private __gap; } diff --git a/contracts/libraries/MerklePatriciaProofVerifier.sol b/contracts/libraries/MerklePatriciaProofVerifier.sol deleted file mode 100644 index dfb901661..000000000 --- a/contracts/libraries/MerklePatriciaProofVerifier.sol +++ /dev/null @@ -1,313 +0,0 @@ -// SPDX-License-Identifier: MIT - -/* - * Copied from: - * https://github.com/lidofinance/curve-merkle-oracle/blob/1033b3e84142317ffd8f366b52e489d5eb49c73f/contracts/MerklePatriciaProofVerifier.sol - * - * MODIFIED from lidofinance's implementation: - * - Changed solidity version to 0.7.6 (pablo@edgeandnode.com) - * - Using local copy of the RLPReader library instead of using the package - * - Silenced linter warnings about inline assembly - * - Renamed a variable for mixedCase consistency - * - Added clearer revert messages - * - Use assert when checking for a condition that should be impossible (nibble >= 16) - * - Other minor QA changes - */ - -/** - * Copied from https://github.com/lorenzb/proveth/blob/c74b20e/onchain/ProvethVerifier.sol - * with minor performance and code style-related modifications. - */ -pragma solidity 0.7.6; - -import { RLPReader } from "./RLPReader.sol"; - -/** - * @title MerklePatriciaProofVerifier - * @notice This contract verifies proofs of inclusion or exclusion - * for Merkle Patricia tries. - */ -library MerklePatriciaProofVerifier { - using RLPReader for RLPReader.RLPItem; - using RLPReader for bytes; - - /// @dev Validates a Merkle-Patricia-Trie proof. - /// If the proof proves the inclusion of some key-value pair in the - /// trie, the value is returned. Otherwise, i.e. if the proof proves - /// the exclusion of a key from the trie, an empty byte array is - /// returned. - /// @param rootHash is the Keccak-256 hash of the root node of the MPT. - /// @param path is the key of the node whose inclusion/exclusion we are - /// proving. - /// @param stack is the stack of MPT nodes (starting with the root) that - /// need to be traversed during verification. - /// @return value whose inclusion is proved or an empty byte array for - /// a proof of exclusion - function extractProofValue( - bytes32 rootHash, - bytes memory path, - RLPReader.RLPItem[] memory stack - ) internal pure returns (bytes memory value) { - bytes memory mptKey = _decodeNibbles(path, 0); - uint256 mptKeyOffset; - - bytes32 nodeHashHash; - RLPReader.RLPItem[] memory node; - - RLPReader.RLPItem memory rlpValue; - - if (stack.length == 0) { - // Root hash of empty Merkle-Patricia-Trie - require( - rootHash == 0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421, - "MPT: invalid empty tree root" - ); - return new bytes(0); - } - - // Traverse stack of nodes starting at root. - for (uint256 i; i < stack.length; ++i) { - // We use the fact that an rlp encoded list consists of some - // encoding of its length plus the concatenation of its - // *rlp-encoded* items. - - // The root node is hashed with Keccak-256 ... - if (i == 0 && rootHash != stack[i].rlpBytesKeccak256()) { - revert("MPT: invalid root hash"); - } - // ... whereas all other nodes are hashed with the MPT - // hash function. - if (i != 0 && nodeHashHash != _mptHashHash(stack[i])) { - revert("MPT: invalid node hash"); - } - // We verified that stack[i] has the correct hash, so we - // may safely decode it. - node = stack[i].toList(); - - if (node.length == 2) { - // Extension or Leaf node - - bool isLeaf; - bytes memory nodeKey; - (isLeaf, nodeKey) = _merklePatriciaCompactDecode(node[0].toBytes()); - - uint256 prefixLength = _sharedPrefixLength(mptKeyOffset, mptKey, nodeKey); - mptKeyOffset += prefixLength; - - if (prefixLength < nodeKey.length) { - // Proof claims divergent extension or leaf. (Only - // relevant for proofs of exclusion.) - // An Extension/Leaf node is divergent iff it "skips" over - // the point at which a Branch node should have been had the - // excluded key been included in the trie. - // Example: Imagine a proof of exclusion for path [1, 4], - // where the current node is a Leaf node with - // path [1, 3, 3, 7]. For [1, 4] to be included, there - // should have been a Branch node at [1] with a child - // at 3 and a child at 4. - - // Sanity check - if (i < stack.length - 1) { - // divergent node must come last in proof - revert("MPT: divergent node not last"); - } - - return new bytes(0); - } - - if (isLeaf) { - // Sanity check - if (i < stack.length - 1) { - // leaf node must come last in proof - revert("MPT: leaf node not last"); - } - - if (mptKeyOffset < mptKey.length) { - return new bytes(0); - } - - rlpValue = node[1]; - return rlpValue.toBytes(); - } else { - // extension - // Sanity check - if (i == stack.length - 1) { - // shouldn't be at last level - revert("MPT: non-leaf node last"); - } - - if (!node[1].isList()) { - // rlp(child) was at least 32 bytes. node[1] contains - // Keccak256(rlp(child)). - nodeHashHash = node[1].payloadKeccak256(); - } else { - // rlp(child) was less than 32 bytes. node[1] contains - // rlp(child). - nodeHashHash = node[1].rlpBytesKeccak256(); - } - } - } else if (node.length == 17) { - // Branch node - - if (mptKeyOffset != mptKey.length) { - // we haven't consumed the entire path, so we need to look at a child - uint8 nibble = uint8(mptKey[mptKeyOffset]); - mptKeyOffset += 1; - - // mptKey comes from _decodeNibbles which should never - // return a nibble >= 16, which is why we should never - // ever have a nibble >= 16 here. (This is a sanity check - // which is why we use assert and not require.) - assert(nibble < 16); - - if (_isEmptyByteSequence(node[nibble])) { - // Sanity - if (i != stack.length - 1) { - // leaf node should be at last level - revert("MPT: empty leaf not last"); - } - - return new bytes(0); - } else if (!node[nibble].isList()) { - nodeHashHash = node[nibble].payloadKeccak256(); - } else { - nodeHashHash = node[nibble].rlpBytesKeccak256(); - } - } else { - // we have consumed the entire mptKey, so we need to look at what's contained in this node. - - // Sanity - if (i != stack.length - 1) { - // should be at last level - revert("MPT: end not last"); - } - - return node[16].toBytes(); - } - } - } - } - - /// @dev Computes the hash of the Merkle-Patricia-Trie hash of the RLP item. - /// Merkle-Patricia-Tries use a weird "hash function" that outputs - /// *variable-length* hashes: If the item is shorter than 32 bytes, - /// the MPT hash is the item. Otherwise, the MPT hash is the - /// Keccak-256 hash of the item. - /// The easiest way to compare variable-length byte sequences is - /// to compare their Keccak-256 hashes. - /// @param item The RLP item to be hashed. - /// @return Keccak-256(MPT-hash(item)) - function _mptHashHash(RLPReader.RLPItem memory item) private pure returns (bytes32) { - if (item.len < 32) { - return item.rlpBytesKeccak256(); - } else { - return keccak256(abi.encodePacked(item.rlpBytesKeccak256())); - } - } - - /** - * @dev Checks if an RLP item corresponds to an empty byte sequence, encoded as 0x80. - * @param item The RLP item to be checked. - * @return True if the item is an empty byte string, false otherwise. - */ - function _isEmptyByteSequence(RLPReader.RLPItem memory item) private pure returns (bool) { - if (item.len != 1) { - return false; - } - uint8 b; - uint256 memPtr = item.memPtr; - // solhint-disable-next-line no-inline-assembly - assembly { - b := byte(0, mload(memPtr)) - } - return b == 0x80; /* empty byte string */ - } - - /** - * @dev Decode a compact-encoded Merkle-Patricia proof node, - * which must be a leaf or extension node - * @param compact The compact-encoded node - * @return isLeaf True if the node is a leaf node, false if it is an extension node. - * @return nibbles The decoded path of the node split into nibbles. - */ - function _merklePatriciaCompactDecode(bytes memory compact) - private - pure - returns (bool isLeaf, bytes memory nibbles) - { - require(compact.length != 0, "MPT: invalid compact length"); - uint256 firstNibble = (uint8(compact[0]) >> 4) & 0xF; - uint256 skipNibbles; - if (firstNibble == 0) { - skipNibbles = 2; - isLeaf = false; - } else if (firstNibble == 1) { - skipNibbles = 1; - isLeaf = false; - } else if (firstNibble == 2) { - skipNibbles = 2; - isLeaf = true; - } else if (firstNibble == 3) { - skipNibbles = 1; - isLeaf = true; - } else { - // Not supposed to happen! - revert("MPT: invalid first nibble"); - } - return (isLeaf, _decodeNibbles(compact, skipNibbles)); - } - - /** - * @dev Decode the nibbles of a compact-encoded Merkle-Patricia proof node. - * @param compact The compact-encoded node - * @param skipNibbles The number of nibbles to skip at the beginning of the node. - * @return nibbles The decoded path of the node split into nibbles. - */ - function _decodeNibbles(bytes memory compact, uint256 skipNibbles) - private - pure - returns (bytes memory nibbles) - { - require(compact.length != 0, "MPT: _dN invalid compact length"); - - uint256 length = compact.length * 2; - require(skipNibbles <= length, "MPT: _dN invalid skipNibbles"); - length -= skipNibbles; - - nibbles = new bytes(length); - uint256 nibblesLength; - - for (uint256 i = skipNibbles; i < skipNibbles + length; i += 1) { - if (i % 2 == 0) { - nibbles[nibblesLength] = bytes1((uint8(compact[i / 2]) >> 4) & 0xF); - } else { - nibbles[nibblesLength] = bytes1((uint8(compact[i / 2])) & 0xF); - } - nibblesLength += 1; - } - - assert(nibblesLength == nibbles.length); - } - - /** - * @dev Compute the length of the shared prefix between two byte sequences. - * This will be the count of how many bytes (representing path nibbles) are the same at the beginning of the sequences. - * @param xsOffset The offset to skip on the first sequence - * @param xs The first sequence - * @param ys The second sequence - * @return The length of the shared prefix. - */ - function _sharedPrefixLength( - uint256 xsOffset, - bytes memory xs, - bytes memory ys - ) private pure returns (uint256) { - uint256 i; - for (; i + xsOffset < xs.length && i < ys.length; ++i) { - if (xs[i + xsOffset] != ys[i]) { - return i; - } - } - return i; - } -} diff --git a/contracts/libraries/RLPReader.sol b/contracts/libraries/RLPReader.sol deleted file mode 100644 index 1f0a673a6..000000000 --- a/contracts/libraries/RLPReader.sol +++ /dev/null @@ -1,438 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 - -/* - * Copied from: - * https://github.com/edumar111/Solidity-RLP/blob/2e48c3004b7c70e1a1dfdd04b5b9761c28da9cc4/contracts/RLPReader.sol - * - * (using edumar111's fork as it includes Solidity 0.7+ support, - * which we need, and the PR is still open on hamdiallam's original repo) - * - * MODIFIED from hamdiallam's implementation: - * - Explicitly marked visibility of constants - * - Silenced linter warnings about inline assembly - * - Other minor QA improvements - */ - -/** - * @author Hamdi Allam hamdi.allam97@gmail.com - * Please reach out with any questions or concerns - */ -pragma solidity >=0.5.0 <0.9.0; - -// solhint-disable no-inline-assembly - -/** - * @title RLPReader library - * @notice This library is used to decode RLP-encoded lists and strings - */ -library RLPReader { - /// Minimum value that encodes a short string - uint8 public constant STRING_SHORT_START = 0x80; - /// Minimum value that encodes a long string - uint8 public constant STRING_LONG_START = 0xb8; - /// Minimum value that encodes a short list - uint8 public constant LIST_SHORT_START = 0xc0; - /// Minimum value that encodes a long list - uint8 public constant LIST_LONG_START = 0xf8; - /// Size of each EVM word, used to copy strings in less steps - uint8 public constant WORD_SIZE = 32; - - /// @dev Structure to represent an RLP-encoded item - struct RLPItem { - uint256 len; - uint256 memPtr; - } - - /// @dev Iterator structure to iterate over an RLP-encoded list - struct Iterator { - RLPItem item; // Item that's being iterated over. - uint256 nextPtr; // Position of the next item in the list. - } - - /** - * @dev Returns the next element in the iteration. Reverts if it has not next element. - * @param self The iterator. - * @return The next element in the iteration. - */ - function next(Iterator memory self) internal pure returns (RLPItem memory) { - require(hasNext(self), "RLP: no next"); - - uint256 ptr = self.nextPtr; - uint256 itemLength = _itemLength(ptr); - self.nextPtr = ptr + itemLength; - - return RLPItem(itemLength, ptr); - } - - /** - * @dev Returns true if the iteration has more elements. - * @param self The iterator. - * @return True if the iteration has more elements, false otherwise. - */ - function hasNext(Iterator memory self) internal pure returns (bool) { - RLPItem memory item = self.item; - return self.nextPtr < item.memPtr + item.len; - } - - /** - * @dev Concerts an RLP-encoded bytes array to an RLPItem. - * @param item RLP encoded bytes - * @return The RLP item with a pointer to the payload within the bytes array - */ - function toRlpItem(bytes memory item) internal pure returns (RLPItem memory) { - uint256 memPtr; - assembly { - memPtr := add(item, 0x20) - } - - return RLPItem(item.length, memPtr); - } - - /** - * @dev Create an iterator. Reverts if item is not a list. - * @param self The RLP item. - * @return An 'Iterator' over the item. - */ - function iterator(RLPItem memory self) internal pure returns (Iterator memory) { - require(isList(self), "RLP: not list (iterator)"); - - uint256 ptr = self.memPtr + _payloadOffset(self.memPtr); - return Iterator(self, ptr); - } - - /** - * @dev Returns the length of an RLP item - * @param item The RLP item. - * @return The length of the RLP item. - */ - function rlpLen(RLPItem memory item) internal pure returns (uint256) { - return item.len; - } - - /** - * @dev Returns the location of the payload of an RLP item - * @param item The RLP item. - * @return Pointer to the payload within the RLP encoded bytes in memory - * @return Length of the payload - */ - function payloadLocation(RLPItem memory item) internal pure returns (uint256, uint256) { - uint256 offset = _payloadOffset(item.memPtr); - uint256 memPtr = item.memPtr + offset; - uint256 len = item.len - offset; // data length - return (memPtr, len); - } - - /** - * @dev Returns the size of the payload in an RLP encoded item - * @param item The RLP item. - * @return The size of the payload - */ - function payloadLen(RLPItem memory item) internal pure returns (uint256) { - (, uint256 len) = payloadLocation(item); - return len; - } - - /** - * @dev Decode an RLP item that represents a list into an array of RLP items - * @param item The RLP item containing the encoded list. - * @return The list of RLP-encoded items contained by the input item - */ - function toList(RLPItem memory item) internal pure returns (RLPItem[] memory) { - require(isList(item), "RLP: not list (toList)"); - - uint256 items = numItems(item); - RLPItem[] memory result = new RLPItem[](items); - - uint256 memPtr = item.memPtr + _payloadOffset(item.memPtr); - uint256 dataLen; - for (uint256 i = 0; i < items; i++) { - dataLen = _itemLength(memPtr); - result[i] = RLPItem(dataLen, memPtr); - memPtr = memPtr + dataLen; - } - - return result; - } - - /** - * @dev Check if an RLP item is a list - * @param item The RLP item. - * @return True if the item is a list, false otherwise (in which case it's a string / raw data) - */ - function isList(RLPItem memory item) internal pure returns (bool) { - if (item.len == 0) return false; - - uint8 byte0; - uint256 memPtr = item.memPtr; - assembly { - byte0 := byte(0, mload(memPtr)) - } - - if (byte0 < LIST_SHORT_START) return false; - return true; - } - - /** - * @dev A cheaper version of keccak256(toRlpBytes(item)) that avoids copying memory. - * @param item The RLP item. - * @return The keccak256 hash of RLP encoded bytes. - */ - function rlpBytesKeccak256(RLPItem memory item) internal pure returns (bytes32) { - uint256 ptr = item.memPtr; - uint256 len = item.len; - bytes32 result; - assembly { - result := keccak256(ptr, len) - } - return result; - } - - /** - * @dev A cheaper version of keccak256(toBytes(item)) that avoids copying memory. - * @param item The RLP item. - * @return The keccak256 hash of the item's payload. - */ - function payloadKeccak256(RLPItem memory item) internal pure returns (bytes32) { - (uint256 memPtr, uint256 len) = payloadLocation(item); - bytes32 result; - assembly { - result := keccak256(memPtr, len) - } - return result; - } - - /* RLPItem conversions into data types */ - - /** - * @dev Get the raw RLP encoded item in bytes - * @param item The RLP item. - * @return Raw RLP encoded item. - */ - function toRlpBytes(RLPItem memory item) internal pure returns (bytes memory) { - bytes memory result = new bytes(item.len); - if (result.length == 0) return result; - - uint256 ptr; - assembly { - ptr := add(0x20, result) - } - - copy(item.memPtr, ptr, item.len); - return result; - } - - /** - * @dev Interpret an RLP encoded item as a boolean. - * Any non-zero byte except "0x80" is considered true. - * @param item The RLP item. - * @return The boolean value. - */ - function toBoolean(RLPItem memory item) internal pure returns (bool) { - require(item.len == 1, "RLP: invalid boolean length"); - uint256 result; - uint256 memPtr = item.memPtr; - assembly { - result := byte(0, mload(memPtr)) - } - - // SEE Github Issue #5. - // Summary: Most commonly used RLP libraries (i.e Geth) will encode - // "0" as "0x80" instead of as "0". We handle this edge case explicitly - // here. - if (result == 0 || result == STRING_SHORT_START) { - return false; - } else { - return true; - } - } - - /** - * @dev Interpret an RLP encoded item as an address. - * @param item The RLP item. - * @return The address value. - */ - function toAddress(RLPItem memory item) internal pure returns (address) { - // 1 byte for the length prefix - require(item.len == 21, "RLP: invalid addr length"); - - return address(uint160(toUint(item))); - } - - /** - * @dev Interpret an RLP encoded item as a uint. - * @param item The RLP item. - * @return The uint value. - */ - function toUint(RLPItem memory item) internal pure returns (uint256) { - require(item.len != 0 && item.len <= 33, "RLP: invalid uint length"); - - (uint256 memPtr, uint256 len) = payloadLocation(item); - - uint256 result; - assembly { - result := mload(memPtr) - - // shfit to the correct location if neccesary - if lt(len, 32) { - result := div(result, exp(256, sub(32, len))) - } - } - - return result; - } - - /** - * @dev Interpret an RLP encoded item as an uint, ensuring the payload - * is 32 bytes in size. - * @param item The RLP item. - * @return The uint256 value. - */ - function toUintStrict(RLPItem memory item) internal pure returns (uint256) { - // one byte prefix - require(item.len == 33, "RLP: invalid uint strict length"); - - uint256 result; - uint256 memPtr = item.memPtr + 1; - assembly { - result := mload(memPtr) - } - - return result; - } - - /** - * @dev Interpret an RLP encoded item as a bytes array. - * @param item The RLP item. - * @return The bytes value. - */ - function toBytes(RLPItem memory item) internal pure returns (bytes memory) { - require(item.len != 0, "RLP: invalid zero length bytes"); - - (uint256 memPtr, uint256 len) = payloadLocation(item); - bytes memory result = new bytes(len); - - uint256 destPtr; - assembly { - destPtr := add(0x20, result) - } - - copy(memPtr, destPtr, len); - return result; - } - - /* - * Private Helpers - */ - - /** - * @dev Get the number of items in an RLP encoded list. - * @param item The RLP item. - * @return The number of items in the list. - */ - function numItems(RLPItem memory item) private pure returns (uint256) { - if (item.len == 0) return 0; - - uint256 count = 0; - uint256 currPtr = item.memPtr + _payloadOffset(item.memPtr); - uint256 endPtr = item.memPtr + item.len; - while (currPtr < endPtr) { - currPtr = currPtr + _itemLength(currPtr); // skip over an item - count++; - } - - return count; - } - - /** - * @dev Get the entire length of an RLP item. - * @param memPtr The memory pointer to the start of the item. - * @return The length of the item. - */ - function _itemLength(uint256 memPtr) private pure returns (uint256) { - uint256 itemLen; - uint256 byte0; - assembly { - byte0 := byte(0, mload(memPtr)) - } - - if (byte0 < STRING_SHORT_START) itemLen = 1; - else if (byte0 < STRING_LONG_START) itemLen = byte0 - STRING_SHORT_START + 1; - else if (byte0 < LIST_SHORT_START) { - assembly { - let byteLen := sub(byte0, 0xb7) // # of bytes the actual length is - memPtr := add(memPtr, 1) // skip over the first byte - - /* 32 byte word size */ - let dataLen := div(mload(memPtr), exp(256, sub(32, byteLen))) // right shifting to get the len - itemLen := add(dataLen, add(byteLen, 1)) - } - } else if (byte0 < LIST_LONG_START) { - itemLen = byte0 - LIST_SHORT_START + 1; - } else { - assembly { - let byteLen := sub(byte0, 0xf7) - memPtr := add(memPtr, 1) - - let dataLen := div(mload(memPtr), exp(256, sub(32, byteLen))) // right shifting to the correct length - itemLen := add(dataLen, add(byteLen, 1)) - } - } - - return itemLen; - } - - /** - * @dev Get the location of the payload for an RLP item. - * @param memPtr The memory pointer to the start of the item. - * @return The offset of the payload from the start of the item - */ - function _payloadOffset(uint256 memPtr) private pure returns (uint256) { - uint256 byte0; - assembly { - byte0 := byte(0, mload(memPtr)) - } - - if (byte0 < STRING_SHORT_START) return 0; - else if ( - byte0 < STRING_LONG_START || (byte0 >= LIST_SHORT_START && byte0 < LIST_LONG_START) - ) return 1; - else if (byte0 < LIST_SHORT_START) - // being explicit - return byte0 - (STRING_LONG_START - 1) + 1; - else return byte0 - (LIST_LONG_START - 1) + 1; - } - - /** - * @dev Copy two areas of memory - * @param src Pointer to source - * @param dest Pointer to destination - * @param len Amount of memory to copy from the source - */ - function copy( - uint256 src, - uint256 dest, - uint256 len - ) private pure { - if (len == 0) return; - - // copy as many word sizes as possible - for (; len >= WORD_SIZE; len -= WORD_SIZE) { - assembly { - mstore(dest, mload(src)) - } - - src += WORD_SIZE; - dest += WORD_SIZE; - } - - if (len != 0) { - // left over bytes. Mask is used to remove unwanted bytes from the word - uint256 mask = 256**(WORD_SIZE - len) - 1; - assembly { - let srcpart := and(mload(src), not(mask)) // zero out src - let destpart := and(mload(dest), mask) // retrieve the bytes - mstore(dest, or(destpart, srcpart)) - } - } - } -} diff --git a/contracts/libraries/StateProofVerifier.sol b/contracts/libraries/StateProofVerifier.sol deleted file mode 100644 index edcc7d35c..000000000 --- a/contracts/libraries/StateProofVerifier.sol +++ /dev/null @@ -1,156 +0,0 @@ -// SPDX-License-Identifier: MIT - -/* - * Copied from: - * https://github.com/lidofinance/curve-merkle-oracle/blob/1033b3e84142317ffd8f366b52e489d5eb49c73f/contracts/StateProofVerifier.sol - * - * MODIFIED from lidofinance's implementation: - * - Changed solidity version to 0.7.6 (pablo@edgeandnode.com) - * - Using local copy of the RLPReader library instead of using the package - * - Explicitly marked visibility of constants - * - Added revert messages - * - A few other QA improvements, e.g. NatSpec - */ - -pragma solidity 0.7.6; - -import { RLPReader } from "./RLPReader.sol"; -import { MerklePatriciaProofVerifier } from "./MerklePatriciaProofVerifier.sol"; - -/** - * @title A helper library for verification of Merkle Patricia account and state proofs. - */ -library StateProofVerifier { - using RLPReader for RLPReader.RLPItem; - using RLPReader for bytes; - - /// Index within a block header for the state root hash - uint256 public constant HEADER_STATE_ROOT_INDEX = 3; - /// Index within a block header for the block number - uint256 public constant HEADER_NUMBER_INDEX = 8; - /// Index within a block header for the timestamp - uint256 public constant HEADER_TIMESTAMP_INDEX = 11; - - struct BlockHeader { - bytes32 hash; - bytes32 stateRootHash; - uint256 number; - uint256 timestamp; - } - - struct Account { - bool exists; - uint256 nonce; - uint256 balance; - bytes32 storageRoot; - bytes32 codeHash; - } - - struct SlotValue { - bool exists; - uint256 value; - } - - /** - * @notice Parses block header and verifies its presence onchain within the latest 256 blocks. - * @param _headerRlpBytes RLP-encoded block header. - * @return The block header as a BlockHeader struct. - */ - function verifyBlockHeader(bytes memory _headerRlpBytes) - internal - view - returns (BlockHeader memory) - { - BlockHeader memory header = parseBlockHeader(_headerRlpBytes); - // ensure that the block is actually in the blockchain - require(header.hash == blockhash(header.number), "SPV: blockhash mismatch"); - return header; - } - - /** - * @notice Parses RLP-encoded block header. - * @param _headerRlpBytes RLP-encoded block header. - * @return The block header as a BlockHeader struct. - */ - function parseBlockHeader(bytes memory _headerRlpBytes) - internal - pure - returns (BlockHeader memory) - { - BlockHeader memory result; - RLPReader.RLPItem[] memory headerFields = _headerRlpBytes.toRlpItem().toList(); - - require(headerFields.length > HEADER_TIMESTAMP_INDEX, "SPV: invalid header length"); - - result.stateRootHash = bytes32(headerFields[HEADER_STATE_ROOT_INDEX].toUint()); - result.number = headerFields[HEADER_NUMBER_INDEX].toUint(); - result.timestamp = headerFields[HEADER_TIMESTAMP_INDEX].toUint(); - result.hash = keccak256(_headerRlpBytes); - - return result; - } - - /** - * @dev Verifies Merkle Patricia proof of an account and extracts the account fields. - * @param _addressHash Keccak256 hash of the address corresponding to the account. - * @param _stateRootHash MPT root hash of the Ethereum state trie. - * @param _proof RLP-encoded Merkle Patricia proof for the account. - * @return The account as an Account struct, if the proof shows it exists, or an empty struct otherwise. - */ - function extractAccountFromProof( - bytes32 _addressHash, // keccak256(abi.encodePacked(address)) - bytes32 _stateRootHash, - RLPReader.RLPItem[] memory _proof - ) internal pure returns (Account memory) { - bytes memory acctRlpBytes = MerklePatriciaProofVerifier.extractProofValue( - _stateRootHash, - abi.encodePacked(_addressHash), - _proof - ); - - Account memory account; - - if (acctRlpBytes.length == 0) { - return account; - } - - RLPReader.RLPItem[] memory acctFields = acctRlpBytes.toRlpItem().toList(); - require(acctFields.length == 4, "SPV: invalid accFields length"); - - account.exists = true; - account.nonce = acctFields[0].toUint(); - account.balance = acctFields[1].toUint(); - account.storageRoot = bytes32(acctFields[2].toUint()); - account.codeHash = bytes32(acctFields[3].toUint()); - - return account; - } - - /** - * @dev Verifies Merkle Patricia proof of a slot and extracts the slot's value. - * @param _slotHash Keccak256 hash of the slot position. - * @param _storageRootHash MPT root hash of the account's storage trie. - * @param _proof RLP-encoded Merkle Patricia proof for the slot. - * @return The slot's value as a SlotValue struct, if the proof shows it exists, or an empty struct otherwise. - */ - function extractSlotValueFromProof( - bytes32 _slotHash, - bytes32 _storageRootHash, - RLPReader.RLPItem[] memory _proof - ) internal pure returns (SlotValue memory) { - bytes memory valueRlpBytes = MerklePatriciaProofVerifier.extractProofValue( - _storageRootHash, - abi.encodePacked(_slotHash), - _proof - ); - - SlotValue memory value; - - if (valueRlpBytes.length != 0) { - value.exists = true; - value.value = valueRlpBytes.toRlpItem().toUint(); - } - - return value; - } -} diff --git a/contracts/tests/MerklePatriciaProofVerifierMock.sol b/contracts/tests/MerklePatriciaProofVerifierMock.sol deleted file mode 100644 index f8b958022..000000000 --- a/contracts/tests/MerklePatriciaProofVerifierMock.sol +++ /dev/null @@ -1,32 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later - -pragma solidity ^0.7.6; -pragma abicoder v2; - -import { MerklePatriciaProofVerifier } from "../libraries/MerklePatriciaProofVerifier.sol"; -import { RLPReader } from "../libraries/RLPReader.sol"; - -/** - * @title MerklePatriciaProofVerifierMock contract - * @dev This test contract is used to run unit tests on the MerklePatriciaProofVerifier library - */ -contract MerklePatriciaProofVerifierMock { - using RLPReader for RLPReader.RLPItem; - using RLPReader for bytes; - - /** - * @notice Extract the proof value from a Merkle-Patricia proof - * @param _rootHash Root hash of the Merkle-Patricia tree - * @param _path Path for which the proof should prove inclusion or exclusion - * @param _proofRlpBytes Merkle-Patricia proof of inclusion or exclusion, as an RLP-encoded list - * @return The value for the given path, if it exists, or an empty bytes if it's a valid proof of exclusion - */ - function extractProofValue( - bytes32 _rootHash, - bytes memory _path, - bytes memory _proofRlpBytes - ) external pure returns (bytes memory) { - RLPReader.RLPItem[] memory stack = _proofRlpBytes.toRlpItem().toList(); - return MerklePatriciaProofVerifier.extractProofValue(_rootHash, _path, stack); - } -} diff --git a/test/gns.test.ts b/test/gns.test.ts index 101e1038f..e4e45be70 100644 --- a/test/gns.test.ts +++ b/test/gns.test.ts @@ -13,7 +13,6 @@ import { randomHexBytes, Account, toGRT, - latestBlock, advanceBlocks, provider, } from './lib/testHelpers' @@ -36,7 +35,6 @@ import { createDefaultName, PublishSubgraph, Subgraph, - DEFAULT_RESERVE_RATIO, getTokensAndVSignal, publishNewSubgraph, publishNewVersion, @@ -1102,12 +1100,8 @@ describe('L1GNS', () => { return subgraph0 } - const publishCurateAndLockSubgraph = async function (): Promise { - const subgraph0 = await publishAndCurateOnSubgraph() - await gns.connect(me.signer).lockSubgraphForMigrationToL2(subgraph0.id) - return subgraph0 - } - const publishCurateLockAndSendSubgraph = async function ( + + const publishCurateAndSendSubgraph = async function ( beforeMigrationCallback?: (subgraphID: string) => Promise, ): Promise { const subgraph0 = await publishAndCurateOnSubgraph() @@ -1116,7 +1110,6 @@ describe('L1GNS', () => { await beforeMigrationCallback(subgraph0.id) } - await gns.connect(me.signer).lockSubgraphForMigrationToL2(subgraph0.id) const maxSubmissionCost = toBN('100') const maxGas = toBN('10') const gasPriceBid = toBN('20') @@ -1146,95 +1139,13 @@ describe('L1GNS', () => { return subgraphID } - describe('lockSubgraphForMigrationToL2', function () { - it('locks and disables a subgraph, burning the signal and storing the block number', async function () { - // Publish a named subgraph-0 -> subgraphDeployment0 - const subgraph0 = await publishAndCurateOnSubgraph() - - const curatedTokens = await grt.balanceOf(curation.address) - const subgraphBefore = await gns.subgraphs(subgraph0.id) - expect(subgraphBefore.vSignal).not.eq(0) - const tx = gns.connect(me.signer).lockSubgraphForMigrationToL2(subgraph0.id) - await expect(tx).emit(gns, 'SubgraphLockedForMigrationToL2').withArgs(subgraph0.id) - - const subgraphAfter = await gns.subgraphs(subgraph0.id) - expect(subgraphAfter.vSignal).eq(0) - expect(subgraphAfter.nSignal).eq(subgraphBefore.nSignal) - expect(await grt.balanceOf(gns.address)).eq(curatedTokens) - expect(subgraphAfter.disabled).eq(true) - expect(subgraphAfter.withdrawableGRT).eq(0) - - const migrationData = await gns.subgraphL2MigrationData(subgraph0.id) - expect(migrationData.lockedAtBlock).eq(await latestBlock()) - expect(migrationData.l1Done).eq(false) - - let invalidTx = gns.connect(me.signer).mintSignal(subgraph0.id, toGRT('90000'), 0) - await expect(invalidTx).revertedWith('GNS: Must be active') - invalidTx = gns.connect(me.signer).burnSignal(subgraph0.id, toGRT('90000'), 0) - await expect(invalidTx).revertedWith('GNS: Must be active') - }) - it('locks and disables a legacy subgraph, burning the signal and storing the block number', async function () { - const seqID = toBN('2') - const subgraphID = await publishAndCurateOnLegacySubgraph(seqID) - const curatedTokens = await grt.balanceOf(curation.address) - const subgraphBefore = await legacyGNSMock.legacySubgraphData(me.address, seqID) - expect(subgraphBefore.vSignal).not.eq(0) - const tx = legacyGNSMock.connect(me.signer).lockSubgraphForMigrationToL2(subgraphID) - await expect(tx).emit(legacyGNSMock, 'SubgraphLockedForMigrationToL2').withArgs(subgraphID) - - const subgraphAfter = await legacyGNSMock.legacySubgraphData(me.address, seqID) - expect(subgraphAfter.vSignal).eq(0) - expect(subgraphAfter.nSignal).eq(subgraphBefore.nSignal) - expect(await grt.balanceOf(legacyGNSMock.address)).eq(curatedTokens) - expect(subgraphAfter.disabled).eq(true) - expect(subgraphAfter.withdrawableGRT).eq(0) - - const migrationData = await legacyGNSMock.subgraphL2MigrationData(subgraphID) - expect(migrationData.lockedAtBlock).eq(await latestBlock()) - expect(migrationData.l1Done).eq(false) - - let invalidTx = legacyGNSMock.connect(me.signer).mintSignal(subgraphID, toGRT('90000'), 0) - await expect(invalidTx).revertedWith('GNS: Must be active') - invalidTx = legacyGNSMock.connect(me.signer).burnSignal(subgraphID, toGRT('90000'), 0) - await expect(invalidTx).revertedWith('GNS: Must be active') - }) - it('rejects calls from someone who is not the subgraph owner', async function () { - const subgraph0 = await publishAndCurateOnSubgraph() - const tx = gns.connect(other.signer).lockSubgraphForMigrationToL2(subgraph0.id) - await expect(tx).revertedWith('GNS: Must be authorized') - }) - it('rejects a call for a non-existent subgraph', async function () { - const subgraphID = buildLegacySubgraphID(me.address, toBN('0')) - - const tx = gns.connect(other.signer).lockSubgraphForMigrationToL2(subgraphID) - await expect(tx).revertedWith('ERC721: owner query for nonexistent token') - }) - it('rejects a call for a subgraph that is already locked', async function () { - const subgraph0 = await publishCurateAndLockSubgraph() - - const tx2 = gns.connect(me.signer).lockSubgraphForMigrationToL2(subgraph0.id) - await expect(tx2).revertedWith('GNS: Must be active') - }) - it('rejects a call for a subgraph that is deprecated', async function () { - const subgraph0 = await publishAndCurateOnSubgraph() - - await gns.connect(me.signer).deprecateSubgraph(subgraph0.id) - - const tx2 = gns.connect(me.signer).lockSubgraphForMigrationToL2(subgraph0.id) - // Deprecating the subgraph burns the NFT - await expect(tx2).revertedWith('ERC721: owner query for nonexistent token') - }) - }) describe('sendSubgraphToL2', function () { it('sends tokens and calldata to L2 through the GRT bridge, for a desired L2 owner', async function () { const subgraph0 = await publishAndCurateOnSubgraph() const curatedTokens = await grt.balanceOf(curation.address) const subgraphBefore = await gns.subgraphs(subgraph0.id) - const lockTx = await gns.connect(me.signer).lockSubgraphForMigrationToL2(subgraph0.id) - const lockReceipt = await lockTx.wait() - const lockBlockhash = lockReceipt.blockHash const maxSubmissionCost = toBN('100') const maxGas = toBN('10') @@ -1253,12 +1164,11 @@ describe('L1GNS', () => { expect(subgraphAfter.withdrawableGRT).eq(0) const migrationData = await gns.subgraphL2MigrationData(subgraph0.id) - expect(migrationData.lockedAtBlock).eq((await latestBlock()).sub(1)) expect(migrationData.l1Done).eq(true) const expectedCallhookData = defaultAbiCoder.encode( - ['uint256', 'address', 'bytes32', 'uint256'], - [subgraph0.id, other.address, lockBlockhash, subgraphBefore.nSignal], + ['uint256', 'address', 'uint256'], + [subgraph0.id, other.address, subgraphBefore.nSignal], ) const expectedL2Data = await l1GraphTokenGateway.getOutboundCalldata( @@ -1277,11 +1187,6 @@ describe('L1GNS', () => { const subgraphID = await publishAndCurateOnLegacySubgraph(seqID) const curatedTokens = await grt.balanceOf(curation.address) const subgraphBefore = await legacyGNSMock.legacySubgraphData(me.address, seqID) - const lockTx = await legacyGNSMock - .connect(me.signer) - .lockSubgraphForMigrationToL2(subgraphID) - const lockReceipt = await lockTx.wait() - const lockBlockhash = lockReceipt.blockHash const maxSubmissionCost = toBN('100') const maxGas = toBN('10') @@ -1300,12 +1205,11 @@ describe('L1GNS', () => { expect(subgraphAfter.withdrawableGRT).eq(0) const migrationData = await legacyGNSMock.subgraphL2MigrationData(subgraphID) - expect(migrationData.lockedAtBlock).eq((await latestBlock()).sub(1)) expect(migrationData.l1Done).eq(true) const expectedCallhookData = defaultAbiCoder.encode( - ['uint256', 'address', 'bytes32', 'uint256'], - [subgraphID, other.address, lockBlockhash, subgraphBefore.nSignal], + ['uint256', 'address', 'uint256'], + [subgraphID, other.address, subgraphBefore.nSignal], ) const expectedL2Data = await l1GraphTokenGateway.getOutboundCalldata( @@ -1320,7 +1224,7 @@ describe('L1GNS', () => { .withArgs(legacyGNSMock.address, mockL2Gateway.address, toBN(1), expectedL2Data) }) it('rejects calls from someone who is not the subgraph owner', async function () { - const subgraph0 = await publishCurateAndLockSubgraph() + const subgraph0 = await publishAndCurateOnSubgraph() const maxSubmissionCost = toBN('100') const maxGas = toBN('10') @@ -1332,21 +1236,8 @@ describe('L1GNS', () => { }) await expect(tx).revertedWith('GNS: Must be authorized') }) - it('rejects calls for a subgraph that is not locked', async function () { - const subgraph0 = await publishAndCurateOnSubgraph() - - const maxSubmissionCost = toBN('100') - const maxGas = toBN('10') - const gasPriceBid = toBN('20') - const tx = gns - .connect(me.signer) - .sendSubgraphToL2(subgraph0.id, me.address, maxGas, gasPriceBid, maxSubmissionCost, { - value: maxSubmissionCost.add(maxGas.mul(gasPriceBid)), - }) - await expect(tx).revertedWith('!LOCKED') - }) it('rejects calls for a subgraph that was already sent', async function () { - const subgraph0 = await publishCurateAndLockSubgraph() + const subgraph0 = await publishAndCurateOnSubgraph() const maxSubmissionCost = toBN('100') const maxGas = toBN('10') @@ -1365,10 +1256,10 @@ describe('L1GNS', () => { }) await expect(tx2).revertedWith('ALREADY_DONE') }) - it('rejects calls after too many blocks have passed', async function () { - const subgraph0 = await publishCurateAndLockSubgraph() + it('rejects a call for a subgraph that is deprecated', async function () { + const subgraph0 = await publishAndCurateOnSubgraph() - await advanceBlocks(256) + await gns.connect(me.signer).deprecateSubgraph(subgraph0.id) const maxSubmissionCost = toBN('100') const maxGas = toBN('10') @@ -1378,75 +1269,22 @@ describe('L1GNS', () => { .sendSubgraphToL2(subgraph0.id, me.address, maxGas, gasPriceBid, maxSubmissionCost, { value: maxSubmissionCost.add(maxGas.mul(gasPriceBid)), }) - await expect(tx).revertedWith('TOO_LATE') - }) - }) - describe('deprecateLockedSubgraph', function () { - it('can be called by anyone, and makes the GRT from the subgraph withdrawable', async function () { - const subgraph0 = await publishAndCurateOnSubgraph() - - const [beforeTokens] = await getTokensAndVSignal( - newSubgraph0.subgraphDeploymentID, - curation, - ) - await gns.connect(me.signer).lockSubgraphForMigrationToL2(subgraph0.id) - - await advanceBlocks(256) - - // Now the subgraph can be deprecated (by someone else!) - const tx = gns.connect(other.signer).deprecateLockedSubgraph(subgraph0.id) - await expect(tx).emit(gns, 'SubgraphDeprecated').withArgs(subgraph0.id, beforeTokens) - // After state, same as with deprecateSubgraph - const afterSubgraph = await gns.subgraphs(subgraph0.id) - // Check marked as deprecated - expect(afterSubgraph.disabled).eq(true) - // Signal for the deployment must be all burned - expect(afterSubgraph.vSignal.eq(toBN('0'))) - // Cleanup reserve ratio - expect(afterSubgraph.reserveRatio).eq(0) - // Should be equal since owner pays curation tax - expect(afterSubgraph.withdrawableGRT).eq(beforeTokens) - const migrationData = await gns.subgraphL2MigrationData(subgraph0.id) - expect(migrationData.deprecated).to.eq(true) - }) - it('rejects calls for a subgraph that was not locked', async function () { - const subgraph0 = await publishAndCurateOnSubgraph() - - await advanceBlocks(256) - - const tx = gns.connect(me.signer).deprecateLockedSubgraph(subgraph0.id) - await expect(tx).revertedWith('!LOCKED') - }) - it('rejects calls if not enough blocks have passed', async function () { - const subgraph0 = await publishCurateAndLockSubgraph() - - await advanceBlocks(255) // Not enough! - const tx = gns.connect(other.signer).deprecateLockedSubgraph(subgraph0.id) - await expect(tx).revertedWith('TOO_EARLY') - }) - it('rejects calls for a subgraph that was sent to L2', async function () { - const subgraph0 = await publishCurateLockAndSendSubgraph() - - await advanceBlocks(255) - const tx2 = gns.connect(other.signer).deprecateLockedSubgraph(subgraph0.id) - await expect(tx2).revertedWith('ALREADY_DONE') + await expect(tx).revertedWith('GNS: Must be active') }) - it('rejects calls for a subgraph that was already deprecated', async function () { - const subgraph0 = await publishAndCurateOnSubgraph() - - const [beforeTokens] = await getTokensAndVSignal( - newSubgraph0.subgraphDeploymentID, - curation, - ) - await gns.connect(me.signer).lockSubgraphForMigrationToL2(subgraph0.id) + it('rejects a call for a subgraph that does not exist', async function () { + const subgraphId = await buildSubgraphID(me.address, toBN(100)) - await advanceBlocks(256) + const maxSubmissionCost = toBN('100') + const maxGas = toBN('10') + const gasPriceBid = toBN('20') + const tx = gns + .connect(me.signer) + .sendSubgraphToL2(subgraphId, me.address, maxGas, gasPriceBid, maxSubmissionCost, { + value: maxSubmissionCost.add(maxGas.mul(gasPriceBid)), + }) - const tx = gns.connect(other.signer).deprecateLockedSubgraph(subgraph0.id) - await expect(tx).emit(gns, 'SubgraphDeprecated').withArgs(subgraph0.id, beforeTokens) - const tx2 = gns.connect(other.signer).deprecateLockedSubgraph(subgraph0.id) - await expect(tx2).revertedWith('ALREADY_DONE') + await expect(tx).revertedWith('GNS: Must be active') }) }) describe('claimCuratorBalanceToBeneficiaryOnL2', function () { @@ -1458,7 +1296,7 @@ describe('L1GNS', () => { }) it('sends a transaction with a curator balance to the L2GNS using the Arbitrum inbox', async function () { let beforeCuratorNSignal: BigNumber - const subgraph0 = await publishCurateLockAndSendSubgraph(async (subgraphID) => { + const subgraph0 = await publishCurateAndSendSubgraph(async (subgraphID) => { beforeCuratorNSignal = await gns.getCuratorSignal(subgraphID, me.address) }) @@ -1493,8 +1331,6 @@ describe('L1GNS', () => { const beforeCuratorNSignal = await legacyGNSMock.getCuratorSignal(subgraphID, me.address) - await legacyGNSMock.connect(me.signer).lockSubgraphForMigrationToL2(subgraphID) - const maxSubmissionCost = toBN('100') const maxGas = toBN('10') const gasPriceBid = toBN('20') @@ -1528,30 +1364,7 @@ describe('L1GNS', () => { .emit(legacyGNSMock, 'TxToL2') .withArgs(me.address, mockL2GNS.address, toBN('2'), expectedCalldata) }) - it('rejects calls for a subgraph that was locked but not sent to L2', async function () { - const subgraph0 = await publishCurateAndLockSubgraph() - - const maxSubmissionCost = toBN('100') - const maxGas = toBN('10') - const gasPriceBid = toBN('20') - - const tx = gns - .connect(me.signer) - .claimCuratorBalanceToBeneficiaryOnL2( - subgraph0.id, - other.address, - maxGas, - gasPriceBid, - maxSubmissionCost, - { - value: maxSubmissionCost.add(maxGas.mul(gasPriceBid)), - }, - ) - - // seqNum (third argument in the event) is 2, because number 1 was when the subgraph was sent to L2 - await expect(tx).revertedWith('!MIGRATED') - }) - it('rejects calls for a subgraph that was not locked', async function () { + it('rejects calls for a subgraph that was not sent to L2', async function () { const subgraph0 = await publishAndCurateOnSubgraph() const maxSubmissionCost = toBN('100') @@ -1571,14 +1384,14 @@ describe('L1GNS', () => { }, ) - // seqNum (third argument in the event) is 2, because number 1 was when the subgraph was sent to L2 await expect(tx).revertedWith('!MIGRATED') }) - it('rejects calls for a subgraph that was locked but deprecated', async function () { - const subgraph0 = await publishCurateAndLockSubgraph() + + it('rejects calls for a subgraph that was deprecated', async function () { + const subgraph0 = await publishAndCurateOnSubgraph() await advanceBlocks(256) - await gns.connect(me.signer).deprecateLockedSubgraph(subgraph0.id) + await gns.connect(me.signer).deprecateSubgraph(subgraph0.id) const maxSubmissionCost = toBN('100') const maxGas = toBN('10') @@ -1597,11 +1410,10 @@ describe('L1GNS', () => { }, ) - // seqNum (third argument in the event) is 2, because number 1 was when the subgraph was sent to L2 - await expect(tx).revertedWith('SUBGRAPH_DEPRECATED') + await expect(tx).revertedWith('!MIGRATED') }) it('rejects calls with an incorrect eth value', async function () { - const subgraph0 = await publishCurateLockAndSendSubgraph() + const subgraph0 = await publishCurateAndSendSubgraph() const maxSubmissionCost = toBN('100') const maxGas = toBN('10') @@ -1620,11 +1432,10 @@ describe('L1GNS', () => { }, ) - // seqNum (third argument in the event) is 2, because number 1 was when the subgraph was sent to L2 await expect(tx).revertedWith('WRONG_ETH_VALUE') }) it('rejects calls with zero maxSubmissionCost', async function () { - const subgraph0 = await publishCurateLockAndSendSubgraph() + const subgraph0 = await publishCurateAndSendSubgraph() const maxSubmissionCost = toBN('0') const maxGas = toBN('10') @@ -1643,7 +1454,6 @@ describe('L1GNS', () => { }, ) - // seqNum (third argument in the event) is 2, because number 1 was when the subgraph was sent to L2 await expect(tx).revertedWith('NO_SUBMISSION_COST') }) }) diff --git a/test/l2/l2GNS.test.ts b/test/l2/l2GNS.test.ts index d6b8af706..a0588564b 100644 --- a/test/l2/l2GNS.test.ts +++ b/test/l2/l2GNS.test.ts @@ -8,8 +8,6 @@ import { Account, toGRT, getL2SignerFromL1, - provider, - impersonateAccount, setAccountBalance, } from '../lib/testHelpers' import { L2FixtureContracts, NetworkFixture } from '../lib/fixtures' @@ -26,178 +24,25 @@ import { } from '../lib/gnsUtils' import { L2Curation } from '../../build/types/L2Curation' import { GraphToken } from '../../build/types/GraphToken' -import { encodeMPTStorageProofRLP, getBlockHeaderRLP } from '../lib/mptProofUtils' const { HashZero } = ethers.constants interface L1SubgraphParams { l1SubgraphId: string curatedTokens: BigNumber - lockBlockhash: string subgraphMetadata: string versionMetadata: string nSignal: BigNumber } -// Subgraph values taken from a mainnet subgraph, including a proof -// for a specific curator's balance, obtained using eth_getProof: -// await provider.send('eth_getProof', [ g.contracts.GNS.address, [ '0x2757396e3ce68a9104b5d84b5b0988e37067e780df1ad018184da3616033f432' ], '0x82e59e8ef5e6c4352d363fc5b6ea64d6f605d47ff0c454ea1133be6bacaff487']) -// Where the curator slot is 0x2757396e3ce68a9104b5d84b5b0988e37067e780df1ad018184da3616033f432, -// which was obtained by calling this in a localhost hardhat console: -// await g.contracts.GNS.getCuratorSlot('0x715f5c54c9a35783823650c340586f43acede4a907726e4e6499abde90331184', '0xE99bD186DBdA4Dc0A499b158E9E8eA7a628EDD14') -const mainnetSubgraphWithProof = { - subgraphId: '0x715f5c54c9a35783823650c340586f43acede4a907726e4e6499abde90331184', - curator: '0xE99bD186DBdA4Dc0A499b158E9E8eA7a628EDD14', - blockhash: '0x82e59e8ef5e6c4352d363fc5b6ea64d6f605d47ff0c454ea1133be6bacaff487', - blockNumber: 15884906, - nSignal: BigNumber.from('36740350312298917761'), - curatedTokens: BigNumber.from('1349853341070443183932'), - metadata: '0x7c0b534d4a5ee2a14b3209e678671ad7db2aa23d741a27ad4573daa5da4a67bb', // Obtained with a SubgraphMetadataUpdated event filter - getProofResponse: { - accountProof: [ - '0xf90211a08a9701cbb65b3ebd5ffd5d0c4e959a01f0f5777b60a7d3069d560aae9ced519fa05c14f1e3eb1aa27b98c5421813cd0a2ccd607f338aa5c6e51b01b5bbae9b7a22a0a8ef688324a1830e5052802e44e76122378468f08085b74584aab3dd7d655dfca0460ef2adac161e0a86112a2a9246e1d36e8006f344c146b211ec6985f371282fa077fee3062bfd699d695542b880f7cdf1f469500b2b6385cf8fe266bcb619f16ca0799795d800b383e54b1b70b89a462510a26f702e55d6e234ae599885cba183a4a0c21957e0a6895f39ee67c0db2bb2eb732b821fe034549d0f7e68db05fb434db4a0a71cd96e8ef9233fbe6ec72dae6208e06875bc3a2d7aeffc5a68e65a3edd353ca0549db853704cb95f28e3081c3ea5ea9953d6716e5ed1e85f1f07ca06cf3562cca0eb12b05a20566fdc91ff6e87344cb27a7739e2869978592081b3ee5da20e2a72a05cf1f39fc25860045fc1d5b12645d47eda0988b2f847d26bb871dd98f25ef608a05f56eb881b3957f3b0d27463f5db8dc0aa467fcc07420b38e7824e779099c78aa0167782c6e8c2a5c63f823f9a80749dc42807677cdf1baa489b6b3fd29913f66ea092c32da10ee6754d7864639ddd7bc849029bb789a0ac60624c06d54c0c4dea2da04753ee0c68d9ea737a61737780889d3c70853b02c42bdce010141e8974865049a06c66113c6c605086e103ec918a6ac51c0807f1475a8947174c4e7ca0b77d1ab980', - '0xf90211a092b4f87a7a56eb1b0cc4e37b1a470983de47b6e59bb9f001713eceeaf1e1b778a0570de7dce4feeb8714bfb203a85b6baaa6e828e4de6cef1b03a2214982523c1ea01366fb14fa2dcc99de2a1a32454d26a1f36c4d16d07dd693e33f7a5227dfd260a0aa87fd12b8b39ec060335012e43f95fb6c3eac97557d7ca8e75219be8f3b7da8a02dd06fd857e864e4f451c07c1b8fcbd01826ae296a943bcd1754baf28dfe1fc1a0844c26cacd9dda7a88d11c2fbc60773c7f6260df5c6cfba0204e666ea0dee13ba03bae90508ad2ca51f8e41ae91a7efdef4eb1894e7aa52b2e6d55b36e3621e484a00e85200c5a56f6a221eb10c4497b4a8dcdaf143fc02c84511d99eb51e1714bfca0dcd8e4198135ff184e437dc7f7be85f77c0b22cd5e2a682bea72d34b1732dba5a01d3f9883287cfbf33961c4700b91d31a5c103246302422f7f670ffcdd0d6da9aa02cb5f762b4718da65563d25a86934ef30127b07980013973942ace532d4693fba056bd9dbc1eeedb8dd7f1bc7b6750a58d50ade9ebc4ea1e448f74d0d28c998190a07125ff6fbc2aa718ee13fa1e18e96df6e1e08e6308b41ace8ce2bfd8a76f5ccaa036328b9158819bc7538f16b3915e58c4b188a6c6022715d164a815715b7e3e83a0a60be8f4456b0fad56abe9e9e34b08a5e6aba3363fb7861a69ac2059503f452ba0da1999c819fd92e96c21aec4206d3b4dd7c3ac322c233a237e2be6837ab377b680', - '0xf90211a0a4ec77fb4bb0a98e8ad69a419e3b0d1250a9609955a6c9bf537ba99e0f20a691a06be377d2802e354d166a7352df70b7912452edc1abeb4b1d4c42273a43a901cda06cc656bcb5ed234290549f9fc0cf2ec31f8ab58d3366b0a55272d4b963d57e98a07af81904e659c472a5aecfbab5b1368504fd8686d6c407af0e4e6a4027cb4374a0f66d3d2df212e13913b17f9f744248253843a5106ac91a9a4ece07576e12cc76a02765d2d176513a83f8ce5b91289571ac61dc0b6af1fbca8de8f737f7c14cf2a9a05774d994c9f98969ed39fbc775e8afd7432148bb46e9fc9b2eb085a4f8737ac3a0d122da0dc7a5a62c1d1708e558b396d38630c1168729f82020dcc9fd1e44448da0b17ed04570d4f4da14053fb9384c7edc8f20c11e76c6fdf3364947005a1608ada0deca116b59ebfa7cd4fb5d869212a7c92af35a3b8ee077a23eb17e37fe98ca40a01209069e0803e14a97d9ca11e34179b8857469ddbd6c6703ba33ab6ade014ef6a004f174729c89807aabd2850d35ed48f594875de96d1f89d93249aa0728c5840aa04dd240d8db8127a59db6131e6d32053fbc1884a5a0438edac929d7838a7053dba0bedb75f907bb25814a45ef07364882910e9730ab535cfadf8278d66c0ed17afaa07c4367a2c963808f0722fe007587fd2031b369198ee0794a29a7938f62eac828a039523e340a8c2968ba22b611a694694d467bfc8e7f8a467cef610cc2e8774be980', - '0xf90211a07238565a4a96d9c37896f8f48b8daca4e74ea1d4b767d5476a1ca945fe8d9736a0751c83fcffa8f0747cbadb4425e2d83e7c181ba5ba19a6df60931a63546e87aca0f7d9281e8e6c375deea49b98f55f5eb08a9511412e381d7bd96a25a7fbc9ca86a0d7373d9df46a011025971a3be7884a179e5af6fe90868d4105404c06a5c2f908a03c8830d58461246211f9b13dd0afd3ac34e1dac1e55329785e79c1ae14845b6ca06f7454b021f29191f006457aecf4e4695dbd652a4443162cf69cde1845b85df6a08c334bff53b2ba1e8df6f6aee68045ab8ee9f02b38f9766b97de48dcc02edcaea061db2c2f8b55ac092b1e3eba4a1e82f677fa52e4f4095d3dba831cb89f0306c3a04293fdf7986e8a464cf5a976b6ddff82ded83f28eef942ff1d8418d2799b06bfa07505f623087c999f63b8b2407853438ea3f747c4103bacc5fc6c62b330314624a0a2b540fa6b0564f959d8ccdba3659a59a00494fdf9cd1d9f4ea9efbe78227f70a0f9cc8d6b4cf4cb3178733e1daf8dd4e86e8c65d5e153cdae77542fcabdfd75fca0beebf7560922a87838e1c2119dd5f11a23b2f9f492d3d34d6faa8f2052a64722a069a3753b6b036c372444940038e387a6d3f77383cb48a302d0d8742a607652b7a02a1ddc02796d842608f4a372f8cb3beb90996acf8288bbb22d50331b56979c5fa0a0a548553326e53e260ce87c4b0c8271724aacd0115b3d0d28ce43ca208883e380', - '0xf90211a0e7efc1ad587fb9ecc0c343d94c894146f9ac499ad3b250368c11d6f531354b8fa07237f64ded7d0941d59656e5b590d3e6fc61093cc1740ad209dd300ee9f0ca12a042ac0a64ac87b16ec296edb580ce0910083690d9d1ace367369351a6fbfe0882a05533447ef90d3623bceccef86860a029ea394aa8783ee6cf3e982bd47ff12c03a0df248d8095d09d69e25381eb1ee6a90407fba3fe1baae6fbd56c2660986573bfa0622e8063b57c51b19747bc851ae0d828d1cde0bbf46f8a5180102dd94459c802a0e800b6c40184f7b7fa683ae191bb4aac1ce585bb6791b99eb4244e351d02f1cba04df04e181c844dd951cb08153bbf92c456bdbc68891bee2b5699f7dfb55b90a7a0833a530c25ed992d20626c55af19c9abe4d1c7a07d5a058dde29907fe65fbcd1a0e133c4cd151948b47d986b93c3572b04098c5da3435c27a9c847c7d5f990bc9ea0f3d3855ffbcc3c26adbeb526fae48536f4dbc39b9bf24f7a17b76335f6b000eea0c7a4d3135faba63cd89f64b0fabf4d726f0543fa347e8cf44db30bfe6ea9e11da0c2e15f8f776d1e3d9cfd29ef9b1e1c5ee5d6334152f587d72ecb9eed5fc3193ea0481f3b80d234d30cd1294075e557549e908d8152903e7f65382a68fd4aa1c683a0a9ba4206ef4055b28d1126bd21afd4ab26898267d7334191a6cc7f8b07a54122a0715b72d6ed83a6da4e9d376f86690caf329adbc5dcda4cfd0839e3f02066e20a80', - '0xf90211a00cad8552ddac3a1aa1c598c4d43a80d5a6cac7e58b543c86d5920a78d5b0f0dea0aa5f5aa9836447977b447ef698df483b8e458106b3e64a87005300bf2008562ea0c5925754c6c72a7b07512ee07acdae077ee70e9d3ab04065360fdc4bebdb155fa045f1e4df1025988aa9d0ce23c03f4b366a99286de59d82f1eafdf9a3890905a3a07c86218196a9dea70252b56ee769c10514bbdf33aebcd41fc4392af63febd239a08e202445f7c2fa69da1f1492a1b0e46d8b66b0b7024c7cff23ed5c07191da66fa0b3c179e3f3b9b216e4b35174e4e4d119526af446fdf757ad95e02e49cac28565a0fd74d0a8922342560f6dd820cfa373ec7353c6c66d74bd43351ebb7d103d5ceaa04a8689c3cb5396ee5a99469957f1f0670b0024b2ea3b75e0455797a5175c72a3a085270faec5854bff806bb9951261092745f657e062ae1499d3c5fde81fe14713a07dd8daf759fa359c36c7afc9f7963a557088f5483a8c5d7a0866237fb5f055c5a0d3ec4525a4f0d209a566b07f46a91c609b9c7acbc427db1390485cf4b5105557a005983a192b1f780b095661d92ef4d4102ffd03aad9adb6f3084ba26a11cd0daaa0afd710661f91421da1ece5ea87acc4f76e8af3dad5fa14f0a4ba1ac1a7276449a0ba0374b7981b92f55525b830723b32dce4ebd3c6a13fd06f61b465728ca077c7a0349075b6ff5265073d6ec6676f9b82991159e0bd8170596bcd80573f95576b7380', - '0xf90131a000e3833f5535c6eae67533a61520c8a99ec1d617d8230498ea57aaac1080ebf880a0432d16911e0f89bb5b6faff16255b203ee2e80db68098f75aee4673d327346b680a04911cdce5361377651739ba44d7f0dcb98e7d22c18f51c955480fcfb5e59abd580a09dec563e0a5682d43213c9a511e954705231ebaee0c72f0aa4f95792823ca0e280a01560fe4a9d9af402122701cccc9d3a13f77747b965d5efe09d0dfce95f807dcca08b5cd207548549e40fd1658e38b5b4227f7f03d8dd112541461c50f3c3ff38a180a0fbf6596703d7037eb2cc332d54fdfcda8e95c23e7478cfe31f6c1da43e7222f78080a0a67c5dda3bd39b79b00911abebf9c976950393b186cb5377ea09536dc48a1ff7a016a9123689ca894c201645726ead95406839cf2f8004461c0bd529321165857180', - '0xf851808080808080808080a0600efc8e5996c533afd640c3448c198e1101fa32e5bd246f71dd99c7201575308080808080a0a489e21458e112f8f8336e3e90ce8668b0a07bfe7921696a3f0feb657d05a50a80', - '0xf8669d2004b4599193722f03c0e529c8aab049a7fe5ed19ea9c3fed8c9365470b846f8440180a0384c27b2da88cde93261056c98ced4e09bba7ba17ecbd2c37e9c2cf26f836a22a0db307489fd9a4a438b5b48909e12020b209280ad777561c0a7451655db097e75', - ], - address: '0xadca0dd4729c8ba3acf3e99f3a9f471ef37b6825', - balance: '0x0', - codeHash: '0xdb307489fd9a4a438b5b48909e12020b209280ad777561c0a7451655db097e75', - nonce: '0x1', - storageHash: '0x384c27b2da88cde93261056c98ced4e09bba7ba17ecbd2c37e9c2cf26f836a22', - storageProof: [ - { - key: '0x2757396e3ce68a9104b5d84b5b0988e37067e780df1ad018184da3616033f432', - proof: [ - '0xf90211a0a718fd4452e43b9e3d1e25974976f536a603dde7c12e51d8189b4e3ea6c8dd6aa0a71f668d3dba2a9f242174738ff3596c68a84eb9088fffb307f48e061fbdc667a0a89dbcb1109a64587fdcde7b4268af231c5f0d27e1b25062c6c0bf7b48124d67a0bedf16b76516325a66ac35545179a8dd15ee1c6cd11b2e4357d533b19acb4b26a08b9b03cc165363ebc8f9f0590e76f98fc8502810e4ea87700f41f75a7f6692d8a037444b4dc0ef44f017449fe3b9ce45d9193edbf5c88b6e7bc22884424bf10373a0ff5c4bbed0973d8a097d7d8aa9d4534945aeb03a5785ada86b3a0ae079318894a0711fe60589286b4c83daf48cfba53e3242360c18b59ff7d93c72ffc766ed0428a08ae789ec3e7cce80fafd53e3f0c36744e15d1b0f293f93f691e451faa76b9327a0ca40f7477aca5208d28a6f9a00e6f6ad4fc49ebf83f9344443f004ba2d26a8aaa0958fd01948214784c18bdca21ef8419f04e108ea09f06eaea285f64812b98bada0458b092fc9ba5453463ca558487c118d5f0493aa98c1eb8306722c6fdabc2c7fa02c7c57f079bd040ff813a0a74ac9e46beadd2960eb33a6cd311c6aef4514592da0c785693d9760e93b431bf4b1d5933373a2ef1fe20599a38f3ce7c9643c2e9f23a0bdbe251449087722a740e7bdc0801bf55f3849e23e63d9dda2a8409d5163cd01a03dcac75caeb76acf717184167b6b490a6b96b2f0024daaf13dd8390b5a7c1baf80', - '0xf90211a0ff5fdab83f7d1d54dfb1fecdd0eb714225aa2533e5e999836c77588671815475a0ee2f0d24e448f85fc8520cf2d98035b2263a8af1db5b837f3fca3124b7b91f48a0787350c2fece0e0b614a68bfb83c20526d19142641b0588005eafb5678599f9ca09fa4124da658c059955c51944334a7891d0c8805f114d0a857079e920cbe6f6ca0b19f68062d189e03ae068799e351f9e1a5927c567067563ccff2f597b8dfd45da05457b729e133026647b6d98180bbbc56076f454fb291879a0c16b22da2a335c5a072031df309f78657aee2acb7b43a486effb4ecd68707d8a438c113bfaf6f1913a0dc0fba7acc1c0a48fc5c978af68fb20c9acaafc7c7515040b1448f324da6050aa0295ff43c4950ab5dee47b2f9e8a04d6a80180643e96488b152ddbd71e25c3b45a0b435feea8e8a46b90fc0156339bce6a210a314694925976b5c82892e1befaaada087dbef5907ae3f99cbe9de597444d7cd15388ccbe88c1b36406f1dad4b0e10eca0f2f0da32846e51736baa70f8bb7922b9fe74002df69ae9d6af48115264b959e9a0462ec92782e4c8f04061daa351713be998149398a2934b4c816b2b9c54e7968da069d20640c46c43d8c5feb541fb0327481d985b623e4f91bea6109d66f486798ea0104e278ae371a220a0d56a72e70ee9657e333baae96329cc862d96eab978804fa06ad2bac3206493db0c51b790f25ecb10ac634112c188c12f5e65496fc14061d180', - '0xf901f1a01bce8a1cac817f9bd318953b01214d46d0c2ffcffe3f22c81901d9fb8aa55009a0b4880ebbfa94b50526e3de8b46ac96ea60dda4f4edcb1e0316b0299a5d30b04ca0e0d4603a3cd66de5abbe1bb435ed7c317b9edfdad08a0afe84eba49b9fcf088da0c78be3a18158fcef5f88ecd1044da21d03b37d91b906f1abf1ae4cc753088122a008bb32eda0081f564b3426a9ffdd06d9e2856b498b47315622058f176626ed1280a05f6af6349189ad63f9a3af757da563c33e42ffffe1f69a9d4855957920c583fca09c3789f507808280b4a7c4e6234d6582337a2aae5d394111efb07e55e3c1c448a0b7234c0127f2d87aa64f17f09d7d1d72f5701d5410459309da5d15979b6c8c9aa066aabcac035cc9a5fd651bd52328a36a37d4762a6491eb2808af5267acb3f775a0b2d7d676b32bcfd5e8df9cd7f95a9bb91eac071a5b881d9fbc4d9cee0fafedf6a0102c6f1a447995d714d64ab2729b4261df1226374c2f4844f29b2edc69a8b46ca0d03a7b0103fbcba49b8573b566d50d117b00b2c69c048148ef8795fa0a63c7efa0cf6ad8ab9618d75f6d00f49e7b496c77f4591869bc2d0a3ff65d503b2383cfa9a06488cd46027de9ede4d7a7e10327e673234273533310addef6dc3a969aad0bdea0225875ae810220c85166fe921555be9efacceae0aa4654e9fdc2df25cbd1642380', - '0xf891a01cc2e5507a5150448fe06d254adc104702198a9f8eb5afb15567e80282229e2f80808080808080a04ad7cdbaba63f4b3b9c397858d06888424b7a9aa49d59f9c24fe54211b11d1e68080a09af52c684dd75b985f4aed07ea00ca7ac18201d717064f657fb86f9427aded33808080a03e61dcabfaf134b2b84b92607a7d7abf5b7950f05129a63e77c1d97d7c5e411580', - '0xeb9f20cb3e0c7eaed59eb82ba9e6f55fbf77c28472e242e7bfa15f1e2c3305ef528a8901523b25a875df6c79', - ], - value: '0x1523b25a875df6c79', - }, - ], - }, -} - -// Same but using the following slot for the getProof call: -// getLegacyCuratorSlot(account, accountSeqId, curator) => '0xbda2ea2df35ed9dad1726e4b7b20512302d0f12693c5cf63a4d778d0945b456b' -const mainnetLegacySubgraphWithProof = { - subgraphId: '0xb3424eb47c56b1cd4e82ab42f8a614d7fdc97c88a6887e0b51998968da8bca12', - account: '0x9EfbEA665B79F366fCBB390a55C617257E0C678c', - accountSeqId: 0, - curator: '0x9EfbEA665B79F366fCBB390a55C617257E0C678c', - blockhash: '0x82e59e8ef5e6c4352d363fc5b6ea64d6f605d47ff0c454ea1133be6bacaff487', - blockNumber: 15884906, - nSignal: BigNumber.from('409307499657003028320'), - curatedTokens: BigNumber.from('163746633794032920672522'), - metadata: '0xe559b3bce99b7e621504b23bb804a95daa48e2a8aacb7b836d64626d63b2b5c2', // Obtained with a SubgraphMetadataUpdated event filter - getProofResponse: { - accountProof: [ - '0xf90211a08a9701cbb65b3ebd5ffd5d0c4e959a01f0f5777b60a7d3069d560aae9ced519fa05c14f1e3eb1aa27b98c5421813cd0a2ccd607f338aa5c6e51b01b5bbae9b7a22a0a8ef688324a1830e5052802e44e76122378468f08085b74584aab3dd7d655dfca0460ef2adac161e0a86112a2a9246e1d36e8006f344c146b211ec6985f371282fa077fee3062bfd699d695542b880f7cdf1f469500b2b6385cf8fe266bcb619f16ca0799795d800b383e54b1b70b89a462510a26f702e55d6e234ae599885cba183a4a0c21957e0a6895f39ee67c0db2bb2eb732b821fe034549d0f7e68db05fb434db4a0a71cd96e8ef9233fbe6ec72dae6208e06875bc3a2d7aeffc5a68e65a3edd353ca0549db853704cb95f28e3081c3ea5ea9953d6716e5ed1e85f1f07ca06cf3562cca0eb12b05a20566fdc91ff6e87344cb27a7739e2869978592081b3ee5da20e2a72a05cf1f39fc25860045fc1d5b12645d47eda0988b2f847d26bb871dd98f25ef608a05f56eb881b3957f3b0d27463f5db8dc0aa467fcc07420b38e7824e779099c78aa0167782c6e8c2a5c63f823f9a80749dc42807677cdf1baa489b6b3fd29913f66ea092c32da10ee6754d7864639ddd7bc849029bb789a0ac60624c06d54c0c4dea2da04753ee0c68d9ea737a61737780889d3c70853b02c42bdce010141e8974865049a06c66113c6c605086e103ec918a6ac51c0807f1475a8947174c4e7ca0b77d1ab980', - '0xf90211a092b4f87a7a56eb1b0cc4e37b1a470983de47b6e59bb9f001713eceeaf1e1b778a0570de7dce4feeb8714bfb203a85b6baaa6e828e4de6cef1b03a2214982523c1ea01366fb14fa2dcc99de2a1a32454d26a1f36c4d16d07dd693e33f7a5227dfd260a0aa87fd12b8b39ec060335012e43f95fb6c3eac97557d7ca8e75219be8f3b7da8a02dd06fd857e864e4f451c07c1b8fcbd01826ae296a943bcd1754baf28dfe1fc1a0844c26cacd9dda7a88d11c2fbc60773c7f6260df5c6cfba0204e666ea0dee13ba03bae90508ad2ca51f8e41ae91a7efdef4eb1894e7aa52b2e6d55b36e3621e484a00e85200c5a56f6a221eb10c4497b4a8dcdaf143fc02c84511d99eb51e1714bfca0dcd8e4198135ff184e437dc7f7be85f77c0b22cd5e2a682bea72d34b1732dba5a01d3f9883287cfbf33961c4700b91d31a5c103246302422f7f670ffcdd0d6da9aa02cb5f762b4718da65563d25a86934ef30127b07980013973942ace532d4693fba056bd9dbc1eeedb8dd7f1bc7b6750a58d50ade9ebc4ea1e448f74d0d28c998190a07125ff6fbc2aa718ee13fa1e18e96df6e1e08e6308b41ace8ce2bfd8a76f5ccaa036328b9158819bc7538f16b3915e58c4b188a6c6022715d164a815715b7e3e83a0a60be8f4456b0fad56abe9e9e34b08a5e6aba3363fb7861a69ac2059503f452ba0da1999c819fd92e96c21aec4206d3b4dd7c3ac322c233a237e2be6837ab377b680', - '0xf90211a0a4ec77fb4bb0a98e8ad69a419e3b0d1250a9609955a6c9bf537ba99e0f20a691a06be377d2802e354d166a7352df70b7912452edc1abeb4b1d4c42273a43a901cda06cc656bcb5ed234290549f9fc0cf2ec31f8ab58d3366b0a55272d4b963d57e98a07af81904e659c472a5aecfbab5b1368504fd8686d6c407af0e4e6a4027cb4374a0f66d3d2df212e13913b17f9f744248253843a5106ac91a9a4ece07576e12cc76a02765d2d176513a83f8ce5b91289571ac61dc0b6af1fbca8de8f737f7c14cf2a9a05774d994c9f98969ed39fbc775e8afd7432148bb46e9fc9b2eb085a4f8737ac3a0d122da0dc7a5a62c1d1708e558b396d38630c1168729f82020dcc9fd1e44448da0b17ed04570d4f4da14053fb9384c7edc8f20c11e76c6fdf3364947005a1608ada0deca116b59ebfa7cd4fb5d869212a7c92af35a3b8ee077a23eb17e37fe98ca40a01209069e0803e14a97d9ca11e34179b8857469ddbd6c6703ba33ab6ade014ef6a004f174729c89807aabd2850d35ed48f594875de96d1f89d93249aa0728c5840aa04dd240d8db8127a59db6131e6d32053fbc1884a5a0438edac929d7838a7053dba0bedb75f907bb25814a45ef07364882910e9730ab535cfadf8278d66c0ed17afaa07c4367a2c963808f0722fe007587fd2031b369198ee0794a29a7938f62eac828a039523e340a8c2968ba22b611a694694d467bfc8e7f8a467cef610cc2e8774be980', - '0xf90211a07238565a4a96d9c37896f8f48b8daca4e74ea1d4b767d5476a1ca945fe8d9736a0751c83fcffa8f0747cbadb4425e2d83e7c181ba5ba19a6df60931a63546e87aca0f7d9281e8e6c375deea49b98f55f5eb08a9511412e381d7bd96a25a7fbc9ca86a0d7373d9df46a011025971a3be7884a179e5af6fe90868d4105404c06a5c2f908a03c8830d58461246211f9b13dd0afd3ac34e1dac1e55329785e79c1ae14845b6ca06f7454b021f29191f006457aecf4e4695dbd652a4443162cf69cde1845b85df6a08c334bff53b2ba1e8df6f6aee68045ab8ee9f02b38f9766b97de48dcc02edcaea061db2c2f8b55ac092b1e3eba4a1e82f677fa52e4f4095d3dba831cb89f0306c3a04293fdf7986e8a464cf5a976b6ddff82ded83f28eef942ff1d8418d2799b06bfa07505f623087c999f63b8b2407853438ea3f747c4103bacc5fc6c62b330314624a0a2b540fa6b0564f959d8ccdba3659a59a00494fdf9cd1d9f4ea9efbe78227f70a0f9cc8d6b4cf4cb3178733e1daf8dd4e86e8c65d5e153cdae77542fcabdfd75fca0beebf7560922a87838e1c2119dd5f11a23b2f9f492d3d34d6faa8f2052a64722a069a3753b6b036c372444940038e387a6d3f77383cb48a302d0d8742a607652b7a02a1ddc02796d842608f4a372f8cb3beb90996acf8288bbb22d50331b56979c5fa0a0a548553326e53e260ce87c4b0c8271724aacd0115b3d0d28ce43ca208883e380', - '0xf90211a0e7efc1ad587fb9ecc0c343d94c894146f9ac499ad3b250368c11d6f531354b8fa07237f64ded7d0941d59656e5b590d3e6fc61093cc1740ad209dd300ee9f0ca12a042ac0a64ac87b16ec296edb580ce0910083690d9d1ace367369351a6fbfe0882a05533447ef90d3623bceccef86860a029ea394aa8783ee6cf3e982bd47ff12c03a0df248d8095d09d69e25381eb1ee6a90407fba3fe1baae6fbd56c2660986573bfa0622e8063b57c51b19747bc851ae0d828d1cde0bbf46f8a5180102dd94459c802a0e800b6c40184f7b7fa683ae191bb4aac1ce585bb6791b99eb4244e351d02f1cba04df04e181c844dd951cb08153bbf92c456bdbc68891bee2b5699f7dfb55b90a7a0833a530c25ed992d20626c55af19c9abe4d1c7a07d5a058dde29907fe65fbcd1a0e133c4cd151948b47d986b93c3572b04098c5da3435c27a9c847c7d5f990bc9ea0f3d3855ffbcc3c26adbeb526fae48536f4dbc39b9bf24f7a17b76335f6b000eea0c7a4d3135faba63cd89f64b0fabf4d726f0543fa347e8cf44db30bfe6ea9e11da0c2e15f8f776d1e3d9cfd29ef9b1e1c5ee5d6334152f587d72ecb9eed5fc3193ea0481f3b80d234d30cd1294075e557549e908d8152903e7f65382a68fd4aa1c683a0a9ba4206ef4055b28d1126bd21afd4ab26898267d7334191a6cc7f8b07a54122a0715b72d6ed83a6da4e9d376f86690caf329adbc5dcda4cfd0839e3f02066e20a80', - '0xf90211a00cad8552ddac3a1aa1c598c4d43a80d5a6cac7e58b543c86d5920a78d5b0f0dea0aa5f5aa9836447977b447ef698df483b8e458106b3e64a87005300bf2008562ea0c5925754c6c72a7b07512ee07acdae077ee70e9d3ab04065360fdc4bebdb155fa045f1e4df1025988aa9d0ce23c03f4b366a99286de59d82f1eafdf9a3890905a3a07c86218196a9dea70252b56ee769c10514bbdf33aebcd41fc4392af63febd239a08e202445f7c2fa69da1f1492a1b0e46d8b66b0b7024c7cff23ed5c07191da66fa0b3c179e3f3b9b216e4b35174e4e4d119526af446fdf757ad95e02e49cac28565a0fd74d0a8922342560f6dd820cfa373ec7353c6c66d74bd43351ebb7d103d5ceaa04a8689c3cb5396ee5a99469957f1f0670b0024b2ea3b75e0455797a5175c72a3a085270faec5854bff806bb9951261092745f657e062ae1499d3c5fde81fe14713a07dd8daf759fa359c36c7afc9f7963a557088f5483a8c5d7a0866237fb5f055c5a0d3ec4525a4f0d209a566b07f46a91c609b9c7acbc427db1390485cf4b5105557a005983a192b1f780b095661d92ef4d4102ffd03aad9adb6f3084ba26a11cd0daaa0afd710661f91421da1ece5ea87acc4f76e8af3dad5fa14f0a4ba1ac1a7276449a0ba0374b7981b92f55525b830723b32dce4ebd3c6a13fd06f61b465728ca077c7a0349075b6ff5265073d6ec6676f9b82991159e0bd8170596bcd80573f95576b7380', - '0xf90131a000e3833f5535c6eae67533a61520c8a99ec1d617d8230498ea57aaac1080ebf880a0432d16911e0f89bb5b6faff16255b203ee2e80db68098f75aee4673d327346b680a04911cdce5361377651739ba44d7f0dcb98e7d22c18f51c955480fcfb5e59abd580a09dec563e0a5682d43213c9a511e954705231ebaee0c72f0aa4f95792823ca0e280a01560fe4a9d9af402122701cccc9d3a13f77747b965d5efe09d0dfce95f807dcca08b5cd207548549e40fd1658e38b5b4227f7f03d8dd112541461c50f3c3ff38a180a0fbf6596703d7037eb2cc332d54fdfcda8e95c23e7478cfe31f6c1da43e7222f78080a0a67c5dda3bd39b79b00911abebf9c976950393b186cb5377ea09536dc48a1ff7a016a9123689ca894c201645726ead95406839cf2f8004461c0bd529321165857180', - '0xf851808080808080808080a0600efc8e5996c533afd640c3448c198e1101fa32e5bd246f71dd99c7201575308080808080a0a489e21458e112f8f8336e3e90ce8668b0a07bfe7921696a3f0feb657d05a50a80', - '0xf8669d2004b4599193722f03c0e529c8aab049a7fe5ed19ea9c3fed8c9365470b846f8440180a0384c27b2da88cde93261056c98ced4e09bba7ba17ecbd2c37e9c2cf26f836a22a0db307489fd9a4a438b5b48909e12020b209280ad777561c0a7451655db097e75', - ], - address: '0xadca0dd4729c8ba3acf3e99f3a9f471ef37b6825', - balance: '0x0', - codeHash: '0xdb307489fd9a4a438b5b48909e12020b209280ad777561c0a7451655db097e75', - nonce: '0x1', - storageHash: '0x384c27b2da88cde93261056c98ced4e09bba7ba17ecbd2c37e9c2cf26f836a22', - storageProof: [ - { - key: '0xbda2ea2df35ed9dad1726e4b7b20512302d0f12693c5cf63a4d778d0945b456b', - proof: [ - '0xf90211a0a718fd4452e43b9e3d1e25974976f536a603dde7c12e51d8189b4e3ea6c8dd6aa0a71f668d3dba2a9f242174738ff3596c68a84eb9088fffb307f48e061fbdc667a0a89dbcb1109a64587fdcde7b4268af231c5f0d27e1b25062c6c0bf7b48124d67a0bedf16b76516325a66ac35545179a8dd15ee1c6cd11b2e4357d533b19acb4b26a08b9b03cc165363ebc8f9f0590e76f98fc8502810e4ea87700f41f75a7f6692d8a037444b4dc0ef44f017449fe3b9ce45d9193edbf5c88b6e7bc22884424bf10373a0ff5c4bbed0973d8a097d7d8aa9d4534945aeb03a5785ada86b3a0ae079318894a0711fe60589286b4c83daf48cfba53e3242360c18b59ff7d93c72ffc766ed0428a08ae789ec3e7cce80fafd53e3f0c36744e15d1b0f293f93f691e451faa76b9327a0ca40f7477aca5208d28a6f9a00e6f6ad4fc49ebf83f9344443f004ba2d26a8aaa0958fd01948214784c18bdca21ef8419f04e108ea09f06eaea285f64812b98bada0458b092fc9ba5453463ca558487c118d5f0493aa98c1eb8306722c6fdabc2c7fa02c7c57f079bd040ff813a0a74ac9e46beadd2960eb33a6cd311c6aef4514592da0c785693d9760e93b431bf4b1d5933373a2ef1fe20599a38f3ce7c9643c2e9f23a0bdbe251449087722a740e7bdc0801bf55f3849e23e63d9dda2a8409d5163cd01a03dcac75caeb76acf717184167b6b490a6b96b2f0024daaf13dd8390b5a7c1baf80', - '0xf90211a004e45dbe38ce18e4066d8ef3f3601a60e99bfe8108bdd33fc2a4412a871a92f2a0009a82307cb4be4409ea695c2a2615dcea6ac78723a41b3c66aaa582ec96c436a0cd3d3c05eaf081845f8d7129f3b95f2426d55049a339bc2cf7dd0c5e0440fcefa0dc43a9e95d04857aec0f278c210c323e2fa55b481e60e1c347d22a7f2e9cc044a0303cab7a9dae9b7e0306c3ac2adc6cca1af8319d4c70994ace25ae651121dcd6a092b1582b96d9f7201a6d77b520381d08dab1884b144ea6fa06a3c2c652e88fd7a0833bcc144833b489d85b9118dec149116ed0142d2b0ad6019ea93c58a523d4cea0d16f7a3d5e5a261eee467c374c4d59bedac32173d3cbc679c6cf6036cb4c67f9a01dc6c5e592451fccb000cbd36a4f111f0a6cd8af2ba300754d5ef34d9be9b9f3a05f5066b1000280d7d72fb9dc1dd388c87bfef059f4777580f33b13cb9a96e65ca0d2eb2645cc5c78883c221f844180b95fb888b4b56345b9fb9b9e2e3e9870d21fa0da3872e955330bcbbc3d89ba192b3e9a114039bd03dd2ce287a37b6e8119b83ea06b130d034f5ec06edefe0f5e75a03f3be8eb655e13d9f79918c19dc0661e29b6a026b2e7a3028a503b128b5eabe667b0b90add26dc0380525f25517195049971aca0be5369d1ed4a91ea36f43e9d9c26aa4e2f4f0f5755c481a9d57026147889c105a0ccd290357453ed6b5108b42ba17821172fcdd1fd985b0a20a72547222fec74ec80', - '0xf901b1a0a09eaceab2d25ef5a6488f0811ba68e754636aea2efe87d46bad185ee5cc353e80a00a13901525f5047839e8ccb6ef391d4d1ec24b5e5bcd4c428b5406116c67b21da05232c3a156b161974b4d4914fecd4962fd09d9c7059ca022ea0332f496f40bcda06e95132ede276dc9d77eea4ec90e7cedcd3343a96baa92d424da4559cb9542f6a068f6197670729e75f09f720167c9f5d15e09bff58c923e6eeef5e54d308cd299a090a495dd586ad6ce8922a7590183f6944d55082cfacfafbfd66935994878c609a01afc7d6bd7ed70e71a927fede6491cd04092c6bfc161bf1c954f73b921fd9de88080a0047707cacaff095fa4b8bfb01f79979023d27fa0510b23003cc4656bb5c771d1a0534d763003352d793214b28f3a07d0bf6be7a7f02d7411086d69a2d89680140ba0d19703221c00ad02c77ac9d97f56994a86831bb12a3921a189c287fef3e2275ca036f8494a2dacb798104d69e255203ff27aa5d767664a8cefc5eccb77886077f2a0d4687c24a348d07ea079c536c8c2f584eb518be959cfecbe6f480d0b7e735326a02093357f40f0bad6b00bf8f0282bfd34a0dd010b6dc52bfc9e1355992c3b1cd280', - '0xf851a06786edcc9c67f1504119c4d0e36ae6f9127266389d0ef5ee4afd4b5e50bf3f4a80808080808080808080a0d91ee0a4174d652a7c3c2a2db3d7e968cc3a84f5919e6b7af4fdd4d5a1acbebc8080808080', - '0xeb9f20ac8d80e0fe0db54a620deaa06305b68851845a4da688f862fb5fea1692a58a890876ab3ecd9b0cc6ca', - ], - value: '0x876ab3ecd9b0cc6ca', - }, - ], - }, -} - -const mainnetProofForDifferentBlock = { - accountProof: [ - '0xf90211a008ba162be4a831acbdfe628aa1867ea899e724b78570d2e2e6a3389c4f51e7aaa0901aa8bef1925917994c6abcb439808bbfae39aae8623b255c3529a898c14e5ca05b3ff03602e8561e2f4fdaccf0daff0afd6c59dd6314a7d5754a8d3658f48864a06ea25db38ef4149ea9716d73996cc67806a9db5a244fbaedb353388b39cd31bfa0bfef765e7fe1f80cc810235ac303c4490fed198b7b7fff3923d1d0004d98a840a0e00f852dd111d919df6f03fa88815797b13909ead7175f731e8f58f8756c0105a0aafce80dc97c6059a771e475e4076e6abd5c447f7e04104fc9d0d6a6dfd0932da0e6b2f28ff41158e14d6b410e99511f6f7554e74f7762629dfb4ad179714b5ac7a0e83694d3f79b52db460b9cf1aba33cc008cd1e12de9bedb08308c260250555f4a0c9436bde76cf5e9712b2d9e03d394e9f779ab45b0f771c79f087d6b289887adca0bf80398498ecfbd534a5771cfc1f79ae5d494aab3faa83b4b7d5858ff0e58580a095118ba475cfd1c776b02ac34716a9bc1d52a00c56721d4ba725d3f876f5f315a0f0ea8039d2ccf1651fb7eb75134367d1ab2f1849b9ac702a924642a230c5bb51a038aaf7f55c78bb4933bd6cfb030e274a459e1fda0431d142505a4e6f6e3a5123a009c2d3201fd7d93a686677076fa92557a47c35bad888d856d9d7849a8ea01b61a0c10c88e88b8d77bcaa5e8f183fb0558ca98a38cebb60184c48645ddd4b38092c80', - '0xf90211a0a42a0ef19c23b780e03a3b5f9de863786af2169fa15b85d393fbae2052c07d57a0dfd8f4a92f62a08a297e2525f297a2a730a8edc8aa81cfa92a01dbecdfd16a79a00cfd319d602d6a17eaa69ac1ac48efb56867fc71fb55c24a17dc758492ef510aa01d8c4d2a39257a0f22c164e26504685a6d223a8482fa21f01168a8663573ce62a07f4c2fdf5f1b961b5762ce9d2bd729c33e0dfdc47a89127f61ec6589bf45d675a0c898c361c0affc958650814701aab3746a46e70379035783d95c159db1c09266a00f734e2c6cfca74f7946a5973f773d2ef50019619e5106608e304d5e6746a61ca03ca7b92c054c934f5a321784778475f3cf4356ebfe298a1b0633864c6e8f4c4ea0303e606e88bc5a64911e3fd2366c394cd95a0f7821b635c9dc1675aadd90b338a0fdc3d4895ccae7d5e643e2a556d4d0761756559ba6823e5b579e0eb0f7fab581a0724c78e570600ed9b63ef27f37c833dafff499b020e1ac8dcbe638bf400c0968a0baf64f7207dc9f24b0d6baf69cd2712ed11f5ca94c1b7f3d6a00e2b6e40c1d02a0074b2ce83ab279776f145d98420e421a7db0058a36cf901b7a2ec6b21bb740e4a07a6f49435408d90fca807ede88d4f980a55e9879b902139be8a0b7b4478a6a29a06fd16bc6196aec8f3a236551709c5d375c49b7185e1f98dedbb0ab49794659c6a0c442b425ea1bb4f4b1841468be4a1fd080fb67138439d68b91d235a7d0d8542c80', - '0xf90211a0efd2613d0a804f4fa546e7a064da4267b1b5ee413cc0eac950fa068d44d66d58a0ddbe7e522df08d935405a051f6a5ac4ece17b713078279a47c4cda03aa00a1e4a085f48e639de7e35a5929fc18a5283bd886f1db1daa111a2037f191642f813ff3a01b600c46499d6720e691006359324d39ec9dabdf285dd703cc1ac4c5d54eb33fa08f1e1bd5560548120655491e5184d090095a92f778db5884f984d822d0df587ea01f49ad2a577f00dd0e7eba492836f22a38e91acf463a0151d72f3018e1063fd9a09a1f1d77d752cf64d4a9808b881e7308dbd1cd9db6d5f43b5bf861ab23107ee1a0da17e1f1ee4f2d0ba1e86fae61f56fa6973512d3e67d2be803b87b0a708340ffa0d953d5d71f1da9bdf3b639eccdddcebb0b3f279e7a5c8b5a4c623ea9f64afb96a077c6c029a1bd6ae13e57204ad02435c9d16ac08991936b793bfa3a25c9bc6a22a0cb9737b08c26b3b367d27e25c89625a131833ffb6fb32752cede3774e65f0d15a0a41fbe982ce84f9a8c815c1b2624daf2dfee2722dc0e165499ac4715ab0ad6a0a038b116b0c61672e61e5245671ab797a9c5755100081782631a09a0ab7677e5a5a0ff94af9e2b34b8ae9f2bb0851800a8d79409f71ef92dd5ac76bb387fdc4bca17a03bf35ce5cd3f63e84b36e75ff858aaaa824ddb29c4d49e9caeaea9c5aff38d0ba03802c963326159a902c71e5627c44a4435831d126ea13c4457c980f8b456022f80', - '0xf90211a0750f9a5ef0d6aef805bc3542ea9e45dd1c1688e676bcfb962604e2f05a935afba0c974aae944f91467b5678fc1f39889b5a52d9013517aa79d1296a0f98d3608eea076670c0ae12a32aba44db37dd7f46015419ac8d4dcb5e7f11dfd0883c6a9a27ba04539dac694cf59b90c7146850d0e21ded661e02673d0066042281b935c83d166a0ddb0213975d2fe1d4266edbf9e5567fe9af3eb32a943dc6de60ab14fc62896e1a0a36ef0befab6acb3465e84e1424ebd0255fa7885765bfc82ebacb13b4c3f4bf2a0909850012d77c57ad74720c0944edcec60fd77cc91e1bf79cfbb8c278e73ca6ca0b843bb94c7543757b3818e585139cdff16e3dc3815943c08eda53c8d9e8153faa052da49f83ce02065944aad3b0df9b026cec65f1622a35e5162cc4f44e50f3da6a0c6d0966eb43a9d33ea326a8d6a1762efc886072e9314bfb93e6d9a81594ea852a0189167569b2e7eb59cae48e74f0b358c129d504c007eec2fda6f4b716149e1aea0d835433ad49cd8106ef8d03eb79a2e6bd9459da70411fe37983ef026c8236471a023e6a589a587d624703575127dbb3865f157fca76190fdc33f2a3f73c39105f0a0c998aa53170787e29bdc444989965032d4258da718175163368a306c04229431a0abb958a4cf70d39472163e1b2309888d510cc3e0445748bb127eb69e5d7c35aea09592f1f09c59b2289749038535defffa1b98bcf7344ad05b9d3cccd75110844a80', - '0xf90211a0e7efc1ad587fb9ecc0c343d94c894146f9ac499ad3b250368c11d6f531354b8fa07237f64ded7d0941d59656e5b590d3e6fc61093cc1740ad209dd300ee9f0ca12a042ac0a64ac87b16ec296edb580ce0910083690d9d1ace367369351a6fbfe0882a05533447ef90d3623bceccef86860a029ea394aa8783ee6cf3e982bd47ff12c03a0df248d8095d09d69e25381eb1ee6a90407fba3fe1baae6fbd56c2660986573bfa0622e8063b57c51b19747bc851ae0d828d1cde0bbf46f8a5180102dd94459c802a0e800b6c40184f7b7fa683ae191bb4aac1ce585bb6791b99eb4244e351d02f1cba03104783681ab55e0f05486fcdc8e2fcf784d5a52c78c32832d7ce4794524b824a0833a530c25ed992d20626c55af19c9abe4d1c7a07d5a058dde29907fe65fbcd1a0e133c4cd151948b47d986b93c3572b04098c5da3435c27a9c847c7d5f990bc9ea0f3d3855ffbcc3c26adbeb526fae48536f4dbc39b9bf24f7a17b76335f6b000eea0c7a4d3135faba63cd89f64b0fabf4d726f0543fa347e8cf44db30bfe6ea9e11da0c2e15f8f776d1e3d9cfd29ef9b1e1c5ee5d6334152f587d72ecb9eed5fc3193ea05606f5dc9f0d6d58473595cca2a3bfe3a58cfd9f6f530f52a40dfcf477428f22a0a9ba4206ef4055b28d1126bd21afd4ab26898267d7334191a6cc7f8b07a54122a0715b72d6ed83a6da4e9d376f86690caf329adbc5dcda4cfd0839e3f02066e20a80', - '0xf90211a00cad8552ddac3a1aa1c598c4d43a80d5a6cac7e58b543c86d5920a78d5b0f0dea0dd59269713fe63d6391c36afe5676c00a2077bd60482e391360af5c3771248eca0c5925754c6c72a7b07512ee07acdae077ee70e9d3ab04065360fdc4bebdb155fa045f1e4df1025988aa9d0ce23c03f4b366a99286de59d82f1eafdf9a3890905a3a082f4d71cb736ffdf729a683152c26b2f99c8dda4b28693dccd9853c58982a2c4a08e202445f7c2fa69da1f1492a1b0e46d8b66b0b7024c7cff23ed5c07191da66fa0b3c179e3f3b9b216e4b35174e4e4d119526af446fdf757ad95e02e49cac28565a0fd74d0a8922342560f6dd820cfa373ec7353c6c66d74bd43351ebb7d103d5ceaa04a8689c3cb5396ee5a99469957f1f0670b0024b2ea3b75e0455797a5175c72a3a085270faec5854bff806bb9951261092745f657e062ae1499d3c5fde81fe14713a07dd8daf759fa359c36c7afc9f7963a557088f5483a8c5d7a0866237fb5f055c5a0d3ec4525a4f0d209a566b07f46a91c609b9c7acbc427db1390485cf4b5105557a005983a192b1f780b095661d92ef4d4102ffd03aad9adb6f3084ba26a11cd0daaa0afd710661f91421da1ece5ea87acc4f76e8af3dad5fa14f0a4ba1ac1a7276449a0ba0374b7981b92f55525b830723b32dce4ebd3c6a13fd06f61b465728ca077c7a0349075b6ff5265073d6ec6676f9b82991159e0bd8170596bcd80573f95576b7380', - '0xf90131a000e3833f5535c6eae67533a61520c8a99ec1d617d8230498ea57aaac1080ebf880a0432d16911e0f89bb5b6faff16255b203ee2e80db68098f75aee4673d327346b680a0241e5caf848b74ce5efbaa4f83b7df94d3bf5ae87d8fa7f97aff4094b05459bb80a09dec563e0a5682d43213c9a511e954705231ebaee0c72f0aa4f95792823ca0e280a01560fe4a9d9af402122701cccc9d3a13f77747b965d5efe09d0dfce95f807dcca08b5cd207548549e40fd1658e38b5b4227f7f03d8dd112541461c50f3c3ff38a180a0fbf6596703d7037eb2cc332d54fdfcda8e95c23e7478cfe31f6c1da43e7222f78080a0a67c5dda3bd39b79b00911abebf9c976950393b186cb5377ea09536dc48a1ff7a016a9123689ca894c201645726ead95406839cf2f8004461c0bd529321165857180', - '0xf851808080808080808080a0600efc8e5996c533afd640c3448c198e1101fa32e5bd246f71dd99c7201575308080808080a02a55c146621228f2dcddd1135d942971c0ee296df5055f5dee8e92b9ab462c6380', - '0xf8669d2004b4599193722f03c0e529c8aab049a7fe5ed19ea9c3fed8c9365470b846f8440180a0a32e5d12226001f1f5f4a3d574ebf9487af319b24eb0f98f02e26dec3944c3f1a0db307489fd9a4a438b5b48909e12020b209280ad777561c0a7451655db097e75', - ], - address: '0xadca0dd4729c8ba3acf3e99f3a9f471ef37b6825', - balance: '0x0', - codeHash: '0xdb307489fd9a4a438b5b48909e12020b209280ad777561c0a7451655db097e75', - nonce: '0x1', - storageHash: '0xa32e5d12226001f1f5f4a3d574ebf9487af319b24eb0f98f02e26dec3944c3f1', - storageProof: [ - { - key: '0x2757396e3ce68a9104b5d84b5b0988e37067e780df1ad018184da3616033f432', - proof: [ - '0xf90211a0a8e75f540571eb3c42baaac34fc6cbf805bab88fc9b56a89d2f34cdb24501870a0a71f668d3dba2a9f242174738ff3596c68a84eb9088fffb307f48e061fbdc667a0885ca4c629f3924e02c8e45cf078e484257af19e1a4b58aee012147ae3a92b95a0bedf16b76516325a66ac35545179a8dd15ee1c6cd11b2e4357d533b19acb4b26a0582f96c7d74fe3db5e03f6bec8bd270851854184c0fe603818618cde931dd9f0a02cd0952b4eeac88968ee221063915ef781eaeabb03de5aa1004b793a4f718cf6a0fbef9a34532cfe338a73ccedd177eaf1499f4a2e64095f055ac7908290baf4f9a0eeba7e56f3973a00a3ff5539d81ffb84df02f3798aee2561c315a00ee4b47489a0daf1b46b0f454e044a2a79454f900e02846f7a83f68f9a24680cbea8b9f78890a0ca9205467afc9ca2b2e12de01bbd97271e34bd39df54319c1efa35fee3e5344ba0958fd01948214784c18bdca21ef8419f04e108ea09f06eaea285f64812b98bada045d19971e0a4e566bd5d8fcdfb0c0fd243e9efa3733fb4f80d77438bd1698577a00fac3ae214e57a589a3dc3d5e5249cb2ab1966f73d35fac13b448270827d1effa0c785693d9760e93b431bf4b1d5933373a2ef1fe20599a38f3ce7c9643c2e9f23a0bdbe251449087722a740e7bdc0801bf55f3849e23e63d9dda2a8409d5163cd01a00f6e4f80e267fafdd75194ca49ac0eb7144bb6dcbbe0d50e810c9386b876524580', - '0xf90211a0b719adad765af02b76641e4ac0a5eb918f5c52e9cf0f38f0f507e4e8d4bb1456a0488e936d22182c75c0ec64be2e1e5f0b2066890719376ea408560a182988425da06ee266499e1f3d0c3d3c82e2085fa76c42324298736566ed40059de26880e7a9a09fa4124da658c059955c51944334a7891d0c8805f114d0a857079e920cbe6f6ca074271a2e9c903cb19f1b1cd3ef7c2f8260968be6aaac50cc6d7f8370c225f390a05457b729e133026647b6d98180bbbc56076f454fb291879a0c16b22da2a335c5a072031df309f78657aee2acb7b43a486effb4ecd68707d8a438c113bfaf6f1913a0dc0fba7acc1c0a48fc5c978af68fb20c9acaafc7c7515040b1448f324da6050aa0295ff43c4950ab5dee47b2f9e8a04d6a80180643e96488b152ddbd71e25c3b45a0b435feea8e8a46b90fc0156339bce6a210a314694925976b5c82892e1befaaada087dbef5907ae3f99cbe9de597444d7cd15388ccbe88c1b36406f1dad4b0e10eca0f2f0da32846e51736baa70f8bb7922b9fe74002df69ae9d6af48115264b959e9a0462ec92782e4c8f04061daa351713be998149398a2934b4c816b2b9c54e7968da069d20640c46c43d8c5feb541fb0327481d985b623e4f91bea6109d66f486798ea0104e278ae371a220a0d56a72e70ee9657e333baae96329cc862d96eab978804fa06ad2bac3206493db0c51b790f25ecb10ac634112c188c12f5e65496fc14061d180', - '0xf901f1a01bce8a1cac817f9bd318953b01214d46d0c2ffcffe3f22c81901d9fb8aa55009a0b4880ebbfa94b50526e3de8b46ac96ea60dda4f4edcb1e0316b0299a5d30b04ca0e0d4603a3cd66de5abbe1bb435ed7c317b9edfdad08a0afe84eba49b9fcf088da0c78be3a18158fcef5f88ecd1044da21d03b37d91b906f1abf1ae4cc753088122a008bb32eda0081f564b3426a9ffdd06d9e2856b498b47315622058f176626ed1280a05f6af6349189ad63f9a3af757da563c33e42ffffe1f69a9d4855957920c583fca09c3789f507808280b4a7c4e6234d6582337a2aae5d394111efb07e55e3c1c448a0b7234c0127f2d87aa64f17f09d7d1d72f5701d5410459309da5d15979b6c8c9aa066aabcac035cc9a5fd651bd52328a36a37d4762a6491eb2808af5267acb3f775a0b2d7d676b32bcfd5e8df9cd7f95a9bb91eac071a5b881d9fbc4d9cee0fafedf6a0102c6f1a447995d714d64ab2729b4261df1226374c2f4844f29b2edc69a8b46ca0d03a7b0103fbcba49b8573b566d50d117b00b2c69c048148ef8795fa0a63c7efa0cf6ad8ab9618d75f6d00f49e7b496c77f4591869bc2d0a3ff65d503b2383cfa9a06488cd46027de9ede4d7a7e10327e673234273533310addef6dc3a969aad0bdea0225875ae810220c85166fe921555be9efacceae0aa4654e9fdc2df25cbd1642380', - '0xf891a01cc2e5507a5150448fe06d254adc104702198a9f8eb5afb15567e80282229e2f80808080808080a04ad7cdbaba63f4b3b9c397858d06888424b7a9aa49d59f9c24fe54211b11d1e68080a09af52c684dd75b985f4aed07ea00ca7ac18201d717064f657fb86f9427aded33808080a03e61dcabfaf134b2b84b92607a7d7abf5b7950f05129a63e77c1d97d7c5e411580', - '0xeb9f20cb3e0c7eaed59eb82ba9e6f55fbf77c28472e242e7bfa15f1e2c3305ef528a8901523b25a875df6c79', - ], - value: '0x1523b25a875df6c79', - }, - ], -} - -// Data for the block we used to get the mainnet subgraph proof. -// This was obtained using eth_getBlockByNumber, and we only kept -// the fields we needed to reconstruct the block header. -const mainnetSubgraphBlockData = { - parentHash: '0x402376f31f89f631e5372b7f6522bc8465fa0e5eebf2eae46b8a7725c685cbd9', - sha3Uncles: '0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347', - miner: '0x95222290dd7278aa3ddd389cc1e1d165cc4bafe5', - stateRoot: '0x9d63f5e0289258a0566eaf260c79f152c1ddd624735f2698d9eac5106cfe7852', - transactionsRoot: '0x5d3fca3e5a32dfc190dce3412479e4f3ece7492d103e9eb80b74f3decfda2aa8', - receiptsRoot: '0x0bad122ad39e4b2affe59b70ac5e2062533d3ce61c7f2c077cdebb18d8dafbba', - logsBloom: - '0x3c247501c104808992481280850305232000084104000910020156c4d46009405409158e041824160e04180070010504020881580acc3c200300408001f01011400681100609042e28020188c030447204c46005204a4a2860c0c528b20030009e4a0880128ac0e1150564802c00aad000006308001906204200001000282008404585438303310385cc8780011840c61024008101009f4c832300406818c00c9a18414a000070430a0160b10940612c00c0020180132003c02f0242a0198000230aba568001a250920c19000c6310010e2702501086401840285917098160395239221c0c0288620001f140010588800310512110ec04c14004e840c88271d2', - difficulty: '0x0', - number: '0xf2626a', - gasLimit: '0x1c9c380', - gasUsed: '0x6ae2b2', - timestamp: '0x6362dbc3', - extraData: '0x6265617665726275696c642e6f7267', - mixHash: '0x1751b7bb3547c7f27cc383bd35dcbf06a24f9a7629a3c963f75029828fe0c67e', - nonce: '0x0000000000000000', - baseFeePerGas: '0x431ed95bc', -} - describe('L2GNS', () => { let me: Account let other: Account let governor: Account - let tokenSender: Account - let l1Receiver: Account - let l2Receiver: Account let mockRouter: Account let mockL1GRT: Account let mockL1Gateway: Account let mockL1GNS: Account - let pauseGuardian: Account let fixture: NetworkFixture let fixtureContracts: L2FixtureContracts @@ -228,7 +73,6 @@ describe('L2GNS', () => { return { l1SubgraphId: await buildSubgraphID(me.address, toBN('1'), 1), curatedTokens: toGRT('1337'), - lockBlockhash: randomHexBytes(32), subgraphMetadata: randomHexBytes(), versionMetadata: randomHexBytes(), nSignal: toBN('4567'), @@ -237,14 +81,13 @@ describe('L2GNS', () => { const migrateMockSubgraphFromL1 = async function ( l1SubgraphId: string, curatedTokens: BigNumber, - lockBlockhash: string, subgraphMetadata: string, versionMetadata: string, nSignal: BigNumber, ) { const callhookData = defaultAbiCoder.encode( - ['uint256', 'address', 'bytes32', 'uint256'], - [l1SubgraphId, me.address, lockBlockhash, nSignal], + ['uint256', 'address', 'uint256'], + [l1SubgraphId, me.address, nSignal], ) await gatewayFinalizeTransfer(mockL1GNS.address, gns.address, curatedTokens, callhookData) @@ -260,19 +103,7 @@ describe('L2GNS', () => { before(async function () { newSubgraph0 = buildSubgraph() - ;[ - me, - other, - governor, - tokenSender, - l1Receiver, - mockRouter, - mockL1GRT, - mockL1Gateway, - l2Receiver, - pauseGuardian, - mockL1GNS, - ] = await getAccounts() + ;[me, other, governor, mockRouter, mockL1GRT, mockL1Gateway, mockL1GNS] = await getAccounts() fixture = new NetworkFixture() fixtureContracts = await fixture.loadL2(governor.signer) @@ -297,29 +128,12 @@ describe('L2GNS', () => { await fixture.tearDown() }) - describe('enabling and disabling claiming through proofs', function () { - it('enables and disables mptClaimingEnabled and emits an event', async function () { - expect(await gns.mptClaimingEnabled()).eq(false) - const tx = gns.connect(governor.signer).setMPTClaimingEnabled(true) - await expect(tx).emit(gns, 'MPTClaimingEnabled') - expect(await gns.mptClaimingEnabled()).eq(true) - - const tx2 = gns.connect(governor.signer).setMPTClaimingEnabled(false) - await expect(tx2).emit(gns, 'MPTClaimingDisabled') - expect(await gns.mptClaimingEnabled()).eq(false) - }) - it('can only be called by the governor', async function () { - const tx = gns.connect(me.signer).setMPTClaimingEnabled(true) - await expect(tx).revertedWith('Only Controller governor') - }) - }) describe('receiving a subgraph from L1 (onTokenTransfer)', function () { it('cannot be called by someone other than the L2GraphTokenGateway', async function () { - const { l1SubgraphId, curatedTokens, lockBlockhash, nSignal } = - await defaultL1SubgraphParams() + const { l1SubgraphId, curatedTokens, nSignal } = await defaultL1SubgraphParams() const callhookData = defaultAbiCoder.encode( - ['uint256', 'address', 'bytes32', 'uint256'], - [l1SubgraphId, me.address, lockBlockhash, nSignal], + ['uint256', 'address', 'uint256'], + [l1SubgraphId, me.address, nSignal], ) const tx = gns .connect(me.signer) @@ -327,11 +141,10 @@ describe('L2GNS', () => { await expect(tx).revertedWith('ONLY_GATEWAY') }) it('rejects calls if the L1 sender is not the L1GNS', async function () { - const { l1SubgraphId, curatedTokens, lockBlockhash, nSignal } = - await defaultL1SubgraphParams() + const { l1SubgraphId, curatedTokens, nSignal } = await defaultL1SubgraphParams() const callhookData = defaultAbiCoder.encode( - ['uint256', 'address', 'bytes32', 'uint256'], - [l1SubgraphId, me.address, lockBlockhash, nSignal], + ['uint256', 'address', 'uint256'], + [l1SubgraphId, me.address, nSignal], ) const tx = gatewayFinalizeTransfer(me.address, gns.address, curatedTokens, callhookData) @@ -340,11 +153,10 @@ describe('L2GNS', () => { it('creates a subgraph in a disabled state', async function () { const l1SubgraphId = await buildSubgraphID(me.address, toBN('1'), 1) const curatedTokens = toGRT('1337') - const lockBlockhash = randomHexBytes(32) const nSignal = toBN('4567') const callhookData = defaultAbiCoder.encode( - ['uint256', 'address', 'bytes32', 'uint256'], - [l1SubgraphId, me.address, lockBlockhash, nSignal], + ['uint256', 'address', 'uint256'], + [l1SubgraphId, me.address, nSignal], ) const tx = gatewayFinalizeTransfer( mockL1GNS.address, @@ -361,12 +173,9 @@ describe('L2GNS', () => { const migrationData = await gns.subgraphL2MigrationData(l1SubgraphId) const subgraphData = await gns.subgraphs(l1SubgraphId) - expect(migrationData.lockedAtBlock).eq(0) // We don't use this in L2 expect(migrationData.tokens).eq(curatedTokens) - expect(migrationData.lockedAtBlockHash).eq(lockBlockhash) - expect(migrationData.l1Done).eq(true) // We don't use this in L2 + expect(migrationData.l1Done).eq(true) expect(migrationData.l2Done).eq(false) - expect(migrationData.deprecated).eq(false) // We don't use this in L2 expect(subgraphData.vSignal).eq(0) expect(subgraphData.nSignal).eq(nSignal) @@ -382,11 +191,10 @@ describe('L2GNS', () => { const l1SubgraphId = await buildSubgraphID(me.address, toBN('0'), 1) const curatedTokens = toGRT('1337') - const lockBlockhash = randomHexBytes(32) const nSignal = toBN('4567') const callhookData = defaultAbiCoder.encode( - ['uint256', 'address', 'bytes32', 'uint256'], - [l1SubgraphId, me.address, lockBlockhash, nSignal], + ['uint256', 'address', 'uint256'], + [l1SubgraphId, me.address, nSignal], ) const tx = gatewayFinalizeTransfer( mockL1GNS.address, @@ -403,12 +211,9 @@ describe('L2GNS', () => { const migrationData = await gns.subgraphL2MigrationData(l1SubgraphId) const subgraphData = await gns.subgraphs(l1SubgraphId) - expect(migrationData.lockedAtBlock).eq(0) // We don't use this in L2 expect(migrationData.tokens).eq(curatedTokens) - expect(migrationData.lockedAtBlockHash).eq(lockBlockhash) - expect(migrationData.l1Done).eq(true) // We don't use this in L2 + expect(migrationData.l1Done).eq(true) expect(migrationData.l2Done).eq(false) - expect(migrationData.deprecated).eq(false) // We don't use this in L2 expect(subgraphData.vSignal).eq(0) expect(subgraphData.nSignal).eq(nSignal) @@ -432,17 +237,11 @@ describe('L2GNS', () => { describe('finishing a subgraph migration from L1', function () { it('publishes the migrated subgraph and mints signal with no tax', async function () { - const { - l1SubgraphId, - curatedTokens, - lockBlockhash, - subgraphMetadata, - versionMetadata, - nSignal, - } = await defaultL1SubgraphParams() + const { l1SubgraphId, curatedTokens, subgraphMetadata, versionMetadata, nSignal } = + await defaultL1SubgraphParams() const callhookData = defaultAbiCoder.encode( - ['uint256', 'address', 'bytes32', 'uint256'], - [l1SubgraphId, me.address, lockBlockhash, nSignal], + ['uint256', 'address', 'uint256'], + [l1SubgraphId, me.address, nSignal], ) await gatewayFinalizeTransfer(mockL1GNS.address, gns.address, curatedTokens, callhookData) // Calculate expected signal before minting @@ -475,22 +274,15 @@ describe('L2GNS', () => { const migrationDataAfter = await gns.subgraphL2MigrationData(l1SubgraphId) expect(subgraphAfter.vSignal).eq(expectedSignal) expect(migrationDataAfter.l2Done).eq(true) - expect(migrationDataAfter.deprecated).eq(false) expect(subgraphAfter.disabled).eq(false) expect(subgraphAfter.subgraphDeploymentID).eq(newSubgraph0.subgraphDeploymentID) }) it('cannot be called by someone other than the subgraph owner', async function () { - const { - l1SubgraphId, - curatedTokens, - lockBlockhash, - subgraphMetadata, - versionMetadata, - nSignal, - } = await defaultL1SubgraphParams() + const { l1SubgraphId, curatedTokens, subgraphMetadata, versionMetadata, nSignal } = + await defaultL1SubgraphParams() const callhookData = defaultAbiCoder.encode( - ['uint256', 'address', 'bytes32', 'uint256'], - [l1SubgraphId, me.address, lockBlockhash, nSignal], + ['uint256', 'address', 'uint256'], + [l1SubgraphId, me.address, nSignal], ) await gatewayFinalizeTransfer(mockL1GNS.address, gns.address, curatedTokens, callhookData) @@ -533,17 +325,11 @@ describe('L2GNS', () => { await expect(tx).revertedWith('INVALID_SUBGRAPH') }) it('accepts calls to a pre-curated subgraph deployment', async function () { - const { - l1SubgraphId, - curatedTokens, - lockBlockhash, - subgraphMetadata, - versionMetadata, - nSignal, - } = await defaultL1SubgraphParams() + const { l1SubgraphId, curatedTokens, subgraphMetadata, versionMetadata, nSignal } = + await defaultL1SubgraphParams() const callhookData = defaultAbiCoder.encode( - ['uint256', 'address', 'bytes32', 'uint256'], - [l1SubgraphId, me.address, lockBlockhash, nSignal], + ['uint256', 'address', 'uint256'], + [l1SubgraphId, me.address, nSignal], ) await gatewayFinalizeTransfer(mockL1GNS.address, gns.address, curatedTokens, callhookData) @@ -584,7 +370,6 @@ describe('L2GNS', () => { const migrationDataAfter = await gns.subgraphL2MigrationData(l1SubgraphId) expect(subgraphAfter.vSignal).eq(expectedSignal) expect(migrationDataAfter.l2Done).eq(true) - expect(migrationDataAfter.deprecated).eq(false) expect(subgraphAfter.disabled).eq(false) expect(subgraphAfter.subgraphDeploymentID).eq(newSubgraph0.subgraphDeploymentID) expect(await curation.getCurationPoolTokens(newSubgraph0.subgraphDeploymentID)).eq( @@ -594,12 +379,11 @@ describe('L2GNS', () => { it('rejects calls if the subgraph deployment ID is zero', async function () { const l1SubgraphId = await buildSubgraphID(me.address, toBN('1'), 1) const curatedTokens = toGRT('1337') - const lockBlockhash = randomHexBytes(32) const metadata = randomHexBytes() const nSignal = toBN('4567') const callhookData = defaultAbiCoder.encode( - ['uint256', 'address', 'bytes32', 'uint256'], - [l1SubgraphId, me.address, lockBlockhash, nSignal], + ['uint256', 'address', 'uint256'], + [l1SubgraphId, me.address, nSignal], ) await gatewayFinalizeTransfer(mockL1GNS.address, gns.address, curatedTokens, callhookData) @@ -609,632 +393,17 @@ describe('L2GNS', () => { await expect(tx).revertedWith('GNS: deploymentID != 0') }) }) - - describe('claiming a curator balance using a proof', function () { - it('verifies a proof and assigns a curator balance', async function () { - const l1Subgraph = mainnetSubgraphWithProof - const versionMetadata = randomHexBytes() // Dummy value - // Now we pretend the L1 subgraph was locked and migrated at the specified block - await migrateMockSubgraphFromL1( - l1Subgraph.subgraphId, - l1Subgraph.curatedTokens, - l1Subgraph.blockhash, - l1Subgraph.metadata, - versionMetadata, - l1Subgraph.nSignal, - ) - - // We need L2GNS to think the mainnet GNS is its counterpart for the proof to be valid - await gns - .connect(governor.signer) - .setCounterpartGNSAddress(l1Subgraph.getProofResponse.address) - - await gns.connect(governor.signer).setMPTClaimingEnabled(true) - - const blockHeaderRLP = getBlockHeaderRLP(mainnetSubgraphBlockData) - const proofRLP = encodeMPTStorageProofRLP(l1Subgraph.getProofResponse) - - const curatorSigner = await impersonateAccount(l1Subgraph.curator) - await setAccountBalance(l1Subgraph.curator, parseEther('1000')) - const tx = gns - .connect(curatorSigner) - .claimL1CuratorBalance(l1Subgraph.subgraphId, blockHeaderRLP, proofRLP) - await expect(tx) - .emit(gns, 'CuratorBalanceClaimed') - .withArgs( - l1Subgraph.subgraphId, - l1Subgraph.curator, - l1Subgraph.curator, - l1Subgraph.getProofResponse.storageProof[0].value, - ) - const curatorBalance = await gns.getCuratorSignal(l1Subgraph.subgraphId, l1Subgraph.curator) - expect(curatorBalance).eq(l1Subgraph.getProofResponse.storageProof[0].value) - }) - it('adds the balance to any existing balance for the curator', async function () { - const l1Subgraph = mainnetSubgraphWithProof - const versionMetadata = randomHexBytes() - - // Now we pretend the L1 subgraph was locked and migrated at the specified block - await migrateMockSubgraphFromL1( - l1Subgraph.subgraphId, - l1Subgraph.curatedTokens, - l1Subgraph.blockhash, - l1Subgraph.metadata, - versionMetadata, - l1Subgraph.nSignal, - ) - - // We need L2GNS to think the mainnet GNS is its counterpart for the proof to be valid - await gns - .connect(governor.signer) - .setCounterpartGNSAddress(l1Subgraph.getProofResponse.address) - - await gns.connect(governor.signer).setMPTClaimingEnabled(true) - - const blockHeaderRLP = getBlockHeaderRLP(mainnetSubgraphBlockData) - const proofRLP = encodeMPTStorageProofRLP(l1Subgraph.getProofResponse) - - const curatorSigner = await impersonateAccount(l1Subgraph.curator) - await setAccountBalance(l1Subgraph.curator, parseEther('1000')) - - // We add some pre-existing balance on L2 to the curator: - await grt.connect(governor.signer).mint(l1Subgraph.curator, toGRT('100')) - await grt.connect(curatorSigner).approve(gns.address, toGRT('100')) - await gns.connect(curatorSigner).mintSignal(l1Subgraph.subgraphId, toGRT('100'), toBN('0')) - const prevSignal = await gns.getCuratorSignal(l1Subgraph.subgraphId, l1Subgraph.curator) - expect(prevSignal).not.eq(toBN(0)) - - const tx = gns - .connect(curatorSigner) - .claimL1CuratorBalance(l1Subgraph.subgraphId, blockHeaderRLP, proofRLP) - const expectedClaimedSignal = l1Subgraph.getProofResponse.storageProof[0].value - await expect(tx) - .emit(gns, 'CuratorBalanceClaimed') - .withArgs( - l1Subgraph.subgraphId, - l1Subgraph.curator, - l1Subgraph.curator, - expectedClaimedSignal, - ) - const curatorBalance = await gns.getCuratorSignal(l1Subgraph.subgraphId, l1Subgraph.curator) - expect(curatorBalance).eq(prevSignal.add(expectedClaimedSignal)) - }) - it('rejects calls with an invalid proof (e.g. from a different L1GNS address)', async function () { - const l1Subgraph = mainnetSubgraphWithProof - const versionMetadata = randomHexBytes() - // Now we pretend the L1 subgraph was locked and migrated at the specified block - await migrateMockSubgraphFromL1( - l1Subgraph.subgraphId, - l1Subgraph.curatedTokens, - l1Subgraph.blockhash, - l1Subgraph.metadata, - versionMetadata, - l1Subgraph.nSignal, - ) - - await gns.connect(governor.signer).setMPTClaimingEnabled(true) - // We haven't updated the L1 counterpart address, so GNS will not accept the account proof as valid - - const blockHeaderRLP = getBlockHeaderRLP(mainnetSubgraphBlockData) - const proofRLP = encodeMPTStorageProofRLP(l1Subgraph.getProofResponse) - - const curatorSigner = await impersonateAccount(l1Subgraph.curator) - await setAccountBalance(l1Subgraph.curator, parseEther('1000')) - const tx = gns - .connect(curatorSigner) - .claimL1CuratorBalance(l1Subgraph.subgraphId, blockHeaderRLP, proofRLP) - // The key for the L1 counterpart is not present in the proof, - // so the verifier will not be able to find a node for the expected path - await expect(tx).revertedWith('MPT: invalid node hash') - }) - it('rejects calls with an invalid proof (e.g. from a different curator)', async function () { - const l1Subgraph = mainnetSubgraphWithProof - const versionMetadata = randomHexBytes() - // Now we pretend the L1 subgraph was locked and migrated at the specified block - await migrateMockSubgraphFromL1( - l1Subgraph.subgraphId, - l1Subgraph.curatedTokens, - l1Subgraph.blockhash, - l1Subgraph.metadata, - versionMetadata, - l1Subgraph.nSignal, - ) - - // We need L2GNS to think the mainnet GNS is its counterpart for the proof to be valid - await gns - .connect(governor.signer) - .setCounterpartGNSAddress(l1Subgraph.getProofResponse.address) - - await gns.connect(governor.signer).setMPTClaimingEnabled(true) - - const blockHeaderRLP = getBlockHeaderRLP(mainnetSubgraphBlockData) - const proofRLP = encodeMPTStorageProofRLP(l1Subgraph.getProofResponse) - - const tx = gns - .connect(me.signer) - .claimL1CuratorBalance(l1Subgraph.subgraphId, blockHeaderRLP, proofRLP) - // The curator slot we're looking for isn't present in the proof, - // so the verifier will fail when looking for it - await expect(tx).revertedWith('MPT: invalid node hash') - }) - it('rejects calls for a subgraph that was not migrated', async function () { - const l1Subgraph = mainnetSubgraphWithProof - const l2Subgraph = await publishNewSubgraph(me, newSubgraph0, gns) - - // We need L2GNS to think the mainnet GNS is its counterpart for the proof to be valid - await gns - .connect(governor.signer) - .setCounterpartGNSAddress(l1Subgraph.getProofResponse.address) - - await gns.connect(governor.signer).setMPTClaimingEnabled(true) - const blockHeaderRLP = getBlockHeaderRLP(mainnetSubgraphBlockData) - const proofRLP = encodeMPTStorageProofRLP(l1Subgraph.getProofResponse) - - const tx = gns - .connect(me.signer) - .claimL1CuratorBalance(l2Subgraph.id!, blockHeaderRLP, proofRLP) - await expect(tx).revertedWith('!MIGRATED') - }) - it('rejects calls if the balance was already claimed', async function () { - const l1Subgraph = mainnetSubgraphWithProof - const versionMetadata = randomHexBytes() - // Now we pretend the L1 subgraph was locked and migrated at the specified block - await migrateMockSubgraphFromL1( - l1Subgraph.subgraphId, - l1Subgraph.curatedTokens, - l1Subgraph.blockhash, - l1Subgraph.metadata, - versionMetadata, - l1Subgraph.nSignal, - ) - - // We need L2GNS to think the mainnet GNS is its counterpart for the proof to be valid - await gns - .connect(governor.signer) - .setCounterpartGNSAddress(l1Subgraph.getProofResponse.address) - - await gns.connect(governor.signer).setMPTClaimingEnabled(true) - const blockHeaderRLP = getBlockHeaderRLP(mainnetSubgraphBlockData) - const proofRLP = encodeMPTStorageProofRLP(l1Subgraph.getProofResponse) - - const curatorSigner = await impersonateAccount(l1Subgraph.curator) - await setAccountBalance(l1Subgraph.curator, parseEther('1000')) - const tx = gns - .connect(curatorSigner) - .claimL1CuratorBalance(l1Subgraph.subgraphId, blockHeaderRLP, proofRLP) - await expect(tx) - .emit(gns, 'CuratorBalanceClaimed') - .withArgs( - l1Subgraph.subgraphId, - l1Subgraph.curator, - l1Subgraph.curator, - l1Subgraph.getProofResponse.storageProof[0].value, - ) - const curatorBalance = await gns.getCuratorSignal(l1Subgraph.subgraphId, l1Subgraph.curator) - expect(curatorBalance).eq(l1Subgraph.getProofResponse.storageProof[0].value) - - // Now we try to double-claim - const tx2 = gns - .connect(curatorSigner) - .claimL1CuratorBalance(l1Subgraph.subgraphId, blockHeaderRLP, proofRLP) - await expect(tx2).revertedWith('ALREADY_CLAIMED') - }) - it('rejects calls with a proof from a different block', async function () { - const l1Subgraph = mainnetSubgraphWithProof - const versionMetadata = randomHexBytes() - // Now we pretend the L1 subgraph was locked and migrated at the specified block - await migrateMockSubgraphFromL1( - l1Subgraph.subgraphId, - l1Subgraph.curatedTokens, - l1Subgraph.blockhash, - l1Subgraph.metadata, - versionMetadata, - l1Subgraph.nSignal, - ) - - // We need L2GNS to think the mainnet GNS is its counterpart for the proof to be valid - await gns - .connect(governor.signer) - .setCounterpartGNSAddress(l1Subgraph.getProofResponse.address) - - await gns.connect(governor.signer).setMPTClaimingEnabled(true) - const blockHeaderRLP = getBlockHeaderRLP(mainnetSubgraphBlockData) - const proofRLP = encodeMPTStorageProofRLP(mainnetProofForDifferentBlock) - - const curatorSigner = await impersonateAccount(l1Subgraph.curator) - await setAccountBalance(l1Subgraph.curator, parseEther('1000')) - const tx = gns - .connect(curatorSigner) - .claimL1CuratorBalance(l1Subgraph.subgraphId, blockHeaderRLP, proofRLP) - // The root hash from the block header won't match the root hash from the proof - await expect(tx).revertedWith('MPT: invalid root hash') - }) - it('rejects calls with a proof from a legacy subgraph', async function () { - const l1Subgraph = mainnetLegacySubgraphWithProof - const versionMetadata = randomHexBytes() - // Now we pretend the L1 subgraph was locked and migrated at the specified block - await migrateMockSubgraphFromL1( - l1Subgraph.subgraphId, - l1Subgraph.curatedTokens, - l1Subgraph.blockhash, - l1Subgraph.metadata, - versionMetadata, - l1Subgraph.nSignal, - ) - - // We need L2GNS to think the mainnet GNS is its counterpart for the proof to be valid - await gns - .connect(governor.signer) - .setCounterpartGNSAddress(l1Subgraph.getProofResponse.address) - - await gns.connect(governor.signer).setMPTClaimingEnabled(true) - - const blockHeaderRLP = getBlockHeaderRLP(mainnetSubgraphBlockData) - const proofRLP = encodeMPTStorageProofRLP(l1Subgraph.getProofResponse) - - const curatorSigner = await impersonateAccount(l1Subgraph.curator) - await setAccountBalance(l1Subgraph.curator, parseEther('1000')) - const tx = gns - .connect(curatorSigner) - .claimL1CuratorBalance(l1Subgraph.subgraphId, blockHeaderRLP, proofRLP) - - await expect(tx).revertedWith('MPT: invalid node hash') - }) - it('rejects calls if MPT claiming is not enabled', async function () { - const l1Subgraph = mainnetSubgraphWithProof - const versionMetadata = randomHexBytes() - // Now we pretend the L1 subgraph was locked and migrated at the specified block - await migrateMockSubgraphFromL1( - l1Subgraph.subgraphId, - l1Subgraph.curatedTokens, - l1Subgraph.blockhash, - l1Subgraph.metadata, - versionMetadata, - l1Subgraph.nSignal, - ) - - // We need L2GNS to think the mainnet GNS is its counterpart for the proof to be valid - await gns - .connect(governor.signer) - .setCounterpartGNSAddress(l1Subgraph.getProofResponse.address) - - const blockHeaderRLP = getBlockHeaderRLP(mainnetSubgraphBlockData) - const proofRLP = encodeMPTStorageProofRLP(l1Subgraph.getProofResponse) - - const curatorSigner = await impersonateAccount(l1Subgraph.curator) - await setAccountBalance(l1Subgraph.curator, parseEther('1000')) - const tx = gns - .connect(curatorSigner) - .claimL1CuratorBalance(l1Subgraph.subgraphId, blockHeaderRLP, proofRLP) - await expect(tx).revertedWith('MPT_CLAIMING_DISABLED') - }) - }) - describe('claiming a curator balance for a legacy subgraph using a proof', function () { - it('verifies a proof and assigns a curator balance', async function () { - const l1Subgraph = mainnetLegacySubgraphWithProof - const versionMetadata = randomHexBytes() - // Now we pretend the L1 subgraph was locked and migrated at the specified block - await migrateMockSubgraphFromL1( - l1Subgraph.subgraphId, - l1Subgraph.curatedTokens, - l1Subgraph.blockhash, - l1Subgraph.metadata, - versionMetadata, - l1Subgraph.nSignal, - ) - - // We need L2GNS to think the mainnet GNS is its counterpart for the proof to be valid - await gns - .connect(governor.signer) - .setCounterpartGNSAddress(l1Subgraph.getProofResponse.address) - - await gns.connect(governor.signer).setMPTClaimingEnabled(true) - const blockHeaderRLP = getBlockHeaderRLP(mainnetSubgraphBlockData) - const proofRLP = encodeMPTStorageProofRLP(l1Subgraph.getProofResponse) - - const curatorSigner = await impersonateAccount(l1Subgraph.curator) - await setAccountBalance(l1Subgraph.curator, parseEther('1000')) - const tx = gns - .connect(curatorSigner) - .claimL1CuratorBalanceForLegacySubgraph( - l1Subgraph.account, - l1Subgraph.accountSeqId, - blockHeaderRLP, - proofRLP, - ) - await expect(tx) - .emit(gns, 'CuratorBalanceClaimed') - .withArgs( - l1Subgraph.subgraphId, - l1Subgraph.curator, - l1Subgraph.curator, - l1Subgraph.getProofResponse.storageProof[0].value, - ) - const curatorBalance = await gns.getCuratorSignal(l1Subgraph.subgraphId, l1Subgraph.curator) - expect(curatorBalance).eq(l1Subgraph.getProofResponse.storageProof[0].value) - }) - it('adds the balance to any existing balance for the curator', async function () { - const l1Subgraph = mainnetLegacySubgraphWithProof - const versionMetadata = randomHexBytes() - // Now we pretend the L1 subgraph was locked and migrated at the specified block - await migrateMockSubgraphFromL1( - l1Subgraph.subgraphId, - l1Subgraph.curatedTokens, - l1Subgraph.blockhash, - l1Subgraph.metadata, - versionMetadata, - l1Subgraph.nSignal, - ) - - // We need L2GNS to think the mainnet GNS is its counterpart for the proof to be valid - await gns - .connect(governor.signer) - .setCounterpartGNSAddress(l1Subgraph.getProofResponse.address) - - await gns.connect(governor.signer).setMPTClaimingEnabled(true) - const blockHeaderRLP = getBlockHeaderRLP(mainnetSubgraphBlockData) - const proofRLP = encodeMPTStorageProofRLP(l1Subgraph.getProofResponse) - - const curatorSigner = await impersonateAccount(l1Subgraph.curator) - await setAccountBalance(l1Subgraph.curator, parseEther('1000')) - - // We add some pre-existing balance on L2 to the curator: - await grt.connect(governor.signer).mint(l1Subgraph.curator, toGRT('100')) - await grt.connect(curatorSigner).approve(gns.address, toGRT('100')) - await gns.connect(curatorSigner).mintSignal(l1Subgraph.subgraphId, toGRT('100'), toBN('0')) - const prevSignal = await gns.getCuratorSignal(l1Subgraph.subgraphId, l1Subgraph.curator) - expect(prevSignal).not.eq(toBN(0)) - - const tx = gns - .connect(curatorSigner) - .claimL1CuratorBalanceForLegacySubgraph( - l1Subgraph.account, - l1Subgraph.accountSeqId, - blockHeaderRLP, - proofRLP, - ) - await expect(tx) - .emit(gns, 'CuratorBalanceClaimed') - .withArgs( - l1Subgraph.subgraphId, - l1Subgraph.curator, - l1Subgraph.curator, - l1Subgraph.getProofResponse.storageProof[0].value, - ) - const curatorBalance = await gns.getCuratorSignal(l1Subgraph.subgraphId, l1Subgraph.curator) - expect(curatorBalance).eq(prevSignal.add(l1Subgraph.getProofResponse.storageProof[0].value)) - }) - it('rejects calls with an invalid proof (e.g. from a different L1GNS address)', async function () { - const l1Subgraph = mainnetLegacySubgraphWithProof - const versionMetadata = randomHexBytes() - // Now we pretend the L1 subgraph was locked and migrated at the specified block - await migrateMockSubgraphFromL1( - l1Subgraph.subgraphId, - l1Subgraph.curatedTokens, - l1Subgraph.blockhash, - l1Subgraph.metadata, - versionMetadata, - l1Subgraph.nSignal, - ) - - await gns.connect(governor.signer).setMPTClaimingEnabled(true) - // We haven't updated the L1 counterpart address, so GNS will not accept the account proof as valid - - const blockHeaderRLP = getBlockHeaderRLP(mainnetSubgraphBlockData) - const proofRLP = encodeMPTStorageProofRLP(l1Subgraph.getProofResponse) - - const curatorSigner = await impersonateAccount(l1Subgraph.curator) - await setAccountBalance(l1Subgraph.curator, parseEther('1000')) - const tx = gns - .connect(curatorSigner) - .claimL1CuratorBalanceForLegacySubgraph( - l1Subgraph.account, - l1Subgraph.accountSeqId, - blockHeaderRLP, - proofRLP, - ) - // The key for the L1 counterpart is not present in the proof, - // so the verifier will not be able to find a node for the expected path - await expect(tx).revertedWith('MPT: invalid node hash') - }) - it('rejects calls with an invalid proof (e.g. from a different curator)', async function () { - const l1Subgraph = mainnetLegacySubgraphWithProof - const versionMetadata = randomHexBytes() - // Now we pretend the L1 subgraph was locked and migrated at the specified block - await migrateMockSubgraphFromL1( - l1Subgraph.subgraphId, - l1Subgraph.curatedTokens, - l1Subgraph.blockhash, - l1Subgraph.metadata, - versionMetadata, - l1Subgraph.nSignal, - ) - - // We need L2GNS to think the mainnet GNS is its counterpart for the proof to be valid - await gns - .connect(governor.signer) - .setCounterpartGNSAddress(l1Subgraph.getProofResponse.address) - - await gns.connect(governor.signer).setMPTClaimingEnabled(true) - const blockHeaderRLP = getBlockHeaderRLP(mainnetSubgraphBlockData) - const proofRLP = encodeMPTStorageProofRLP(l1Subgraph.getProofResponse) - - const tx = gns - .connect(me.signer) - .claimL1CuratorBalanceForLegacySubgraph( - l1Subgraph.account, - l1Subgraph.accountSeqId, - blockHeaderRLP, - proofRLP, - ) - // The curator slot we're looking for isn't present in the proof, - // so the verifier will fail when looking for it - await expect(tx).revertedWith('MPT: invalid node hash') - }) - it('rejects calls for a subgraph that was not migrated', async function () { - const l1Subgraph = mainnetLegacySubgraphWithProof - const l2Subgraph = await publishNewSubgraph(me, newSubgraph0, gns) - - // We need L2GNS to think the mainnet GNS is its counterpart for the proof to be valid - await gns - .connect(governor.signer) - .setCounterpartGNSAddress(l1Subgraph.getProofResponse.address) - - await gns.connect(governor.signer).setMPTClaimingEnabled(true) - - const blockHeaderRLP = getBlockHeaderRLP(mainnetSubgraphBlockData) - const proofRLP = encodeMPTStorageProofRLP(l1Subgraph.getProofResponse) - - const tx = gns - .connect(me.signer) - .claimL1CuratorBalanceForLegacySubgraph(me.address, toBN(0), blockHeaderRLP, proofRLP) - await expect(tx).revertedWith('!MIGRATED') - }) - it('rejects calls if the balance was already claimed', async function () { - const l1Subgraph = mainnetLegacySubgraphWithProof - const versionMetadata = randomHexBytes() - // Now we pretend the L1 subgraph was locked and migrated at the specified block - await migrateMockSubgraphFromL1( - l1Subgraph.subgraphId, - l1Subgraph.curatedTokens, - l1Subgraph.blockhash, - l1Subgraph.metadata, - versionMetadata, - l1Subgraph.nSignal, - ) - - // We need L2GNS to think the mainnet GNS is its counterpart for the proof to be valid - await gns - .connect(governor.signer) - .setCounterpartGNSAddress(l1Subgraph.getProofResponse.address) - - await gns.connect(governor.signer).setMPTClaimingEnabled(true) - - const blockHeaderRLP = getBlockHeaderRLP(mainnetSubgraphBlockData) - const proofRLP = encodeMPTStorageProofRLP(l1Subgraph.getProofResponse) - - const curatorSigner = await impersonateAccount(l1Subgraph.curator) - await setAccountBalance(l1Subgraph.curator, parseEther('1000')) - const tx = gns - .connect(curatorSigner) - .claimL1CuratorBalanceForLegacySubgraph( - l1Subgraph.account, - l1Subgraph.accountSeqId, - blockHeaderRLP, - proofRLP, - ) - await expect(tx) - .emit(gns, 'CuratorBalanceClaimed') - .withArgs( - l1Subgraph.subgraphId, - l1Subgraph.curator, - l1Subgraph.curator, - l1Subgraph.getProofResponse.storageProof[0].value, - ) - const curatorBalance = await gns.getCuratorSignal(l1Subgraph.subgraphId, l1Subgraph.curator) - expect(curatorBalance).eq(l1Subgraph.getProofResponse.storageProof[0].value) - - // Now we try to double-claim - const tx2 = gns - .connect(curatorSigner) - .claimL1CuratorBalanceForLegacySubgraph( - l1Subgraph.account, - l1Subgraph.accountSeqId, - blockHeaderRLP, - proofRLP, - ) - await expect(tx2).revertedWith('ALREADY_CLAIMED') - }) - it('rejects calls with a proof from a non-legacy subgraph', async function () { - const l1Subgraph = mainnetSubgraphWithProof - const versionMetadata = randomHexBytes() - // Now we pretend the L1 subgraph was locked and migrated at the specified block - await migrateMockSubgraphFromL1( - l1Subgraph.subgraphId, - l1Subgraph.curatedTokens, - l1Subgraph.blockhash, - l1Subgraph.metadata, - versionMetadata, - l1Subgraph.nSignal, - ) - - // We need L2GNS to think the mainnet GNS is its counterpart for the proof to be valid - await gns - .connect(governor.signer) - .setCounterpartGNSAddress(l1Subgraph.getProofResponse.address) - - await gns.connect(governor.signer).setMPTClaimingEnabled(true) - - const blockHeaderRLP = getBlockHeaderRLP(mainnetSubgraphBlockData) - const proofRLP = encodeMPTStorageProofRLP(l1Subgraph.getProofResponse) - - const curatorSigner = await impersonateAccount(l1Subgraph.curator) - await setAccountBalance(l1Subgraph.curator, parseEther('1000')) - // For the mainnet subgraph we picked, the curator is also the owner, - // and it was their first subgraph, so the accountSeqId is 0 - const tx = gns - .connect(curatorSigner) - .claimL1CuratorBalanceForLegacySubgraph( - l1Subgraph.curator, - toBN('0'), - blockHeaderRLP, - proofRLP, - ) - - await expect(tx).revertedWith('MPT: invalid node hash') - }) - it('rejects calls if MPT claiming is not enabled', async function () { - const l1Subgraph = mainnetLegacySubgraphWithProof - const versionMetadata = randomHexBytes() - // Now we pretend the L1 subgraph was locked and migrated at the specified block - await migrateMockSubgraphFromL1( - l1Subgraph.subgraphId, - l1Subgraph.curatedTokens, - l1Subgraph.blockhash, - l1Subgraph.metadata, - versionMetadata, - l1Subgraph.nSignal, - ) - - // We need L2GNS to think the mainnet GNS is its counterpart for the proof to be valid - await gns - .connect(governor.signer) - .setCounterpartGNSAddress(l1Subgraph.getProofResponse.address) - - const blockHeaderRLP = getBlockHeaderRLP(mainnetSubgraphBlockData) - const proofRLP = encodeMPTStorageProofRLP(l1Subgraph.getProofResponse) - - const curatorSigner = await impersonateAccount(l1Subgraph.curator) - await setAccountBalance(l1Subgraph.curator, parseEther('1000')) - const tx = gns - .connect(curatorSigner) - .claimL1CuratorBalanceForLegacySubgraph( - l1Subgraph.account, - l1Subgraph.accountSeqId, - blockHeaderRLP, - proofRLP, - ) - await expect(tx).revertedWith('MPT_CLAIMING_DISABLED') - }) - }) describe('claiming a curator balance with a message from L1', function () { it('assigns a curator balance to a beneficiary', async function () { const mockL1GNSL2Alias = await getL2SignerFromL1(mockL1GNS.address) // Eth for gas: await setAccountBalance(await mockL1GNSL2Alias.getAddress(), parseEther('1')) - const { - l1SubgraphId, - curatedTokens, - lockBlockhash, - subgraphMetadata, - versionMetadata, - nSignal, - } = await defaultL1SubgraphParams() + const { l1SubgraphId, curatedTokens, subgraphMetadata, versionMetadata, nSignal } = + await defaultL1SubgraphParams() await migrateMockSubgraphFromL1( l1SubgraphId, curatedTokens, - lockBlockhash, subgraphMetadata, versionMetadata, nSignal, @@ -1256,18 +425,11 @@ describe('L2GNS', () => { // Eth for gas: await setAccountBalance(await mockL1GNSL2Alias.getAddress(), parseEther('1')) - const { - l1SubgraphId, - curatedTokens, - lockBlockhash, - subgraphMetadata, - versionMetadata, - nSignal, - } = await defaultL1SubgraphParams() + const { l1SubgraphId, curatedTokens, subgraphMetadata, versionMetadata, nSignal } = + await defaultL1SubgraphParams() await migrateMockSubgraphFromL1( l1SubgraphId, curatedTokens, - lockBlockhash, subgraphMetadata, versionMetadata, nSignal, @@ -1290,18 +452,11 @@ describe('L2GNS', () => { expect(l2CuratorBalance).eq(prevSignal.add(toGRT('10'))) }) it('can only be called from the counterpart GNS L2 alias', async function () { - const { - l1SubgraphId, - curatedTokens, - lockBlockhash, - subgraphMetadata, - versionMetadata, - nSignal, - } = await defaultL1SubgraphParams() + const { l1SubgraphId, curatedTokens, subgraphMetadata, versionMetadata, nSignal } = + await defaultL1SubgraphParams() await migrateMockSubgraphFromL1( l1SubgraphId, curatedTokens, - lockBlockhash, subgraphMetadata, versionMetadata, nSignal, @@ -1351,18 +506,11 @@ describe('L2GNS', () => { // Eth for gas: await setAccountBalance(await mockL1GNSL2Alias.getAddress(), parseEther('1')) - const { - l1SubgraphId, - curatedTokens, - lockBlockhash, - subgraphMetadata, - versionMetadata, - nSignal, - } = await defaultL1SubgraphParams() + const { l1SubgraphId, curatedTokens, subgraphMetadata, versionMetadata, nSignal } = + await defaultL1SubgraphParams() await migrateMockSubgraphFromL1( l1SubgraphId, curatedTokens, - lockBlockhash, subgraphMetadata, versionMetadata, nSignal, diff --git a/test/lib/mptProofUtils.ts b/test/lib/mptProofUtils.ts deleted file mode 100644 index 0de2a374c..000000000 --- a/test/lib/mptProofUtils.ts +++ /dev/null @@ -1,79 +0,0 @@ -import { hexlify, hexZeroPad, RLP } from 'ethers/lib/utils' - -const BLOCK_HEADER_FIELDS = [ - 'parentHash', - 'sha3Uncles', - 'miner', - 'stateRoot', - 'transactionsRoot', - 'receiptsRoot', - 'logsBloom', - 'difficulty', - 'number', - 'gasLimit', - 'gasUsed', - 'timestamp', - 'extraData', - 'mixHash', - 'nonce', - 'baseFeePerGas', -] - -// Expected to come from an eth_getBlockByNumber call -interface GetBlockResponse { - parentHash: string - sha3Uncles: string - miner: string - stateRoot: string - transactionsRoot: string - receiptsRoot: string - logsBloom: string - difficulty: string - number: string - gasLimit: string - gasUsed: string - timestamp: string - extraData: string - mixHash: string - nonce: string - baseFeePerGas: string -} - -interface SlotProof { - key: string - proof: Array - value: string -} -interface GetProofResponse { - accountProof: Array - address: string - balance: string - codeHash: string - nonce: string - storageHash: string - storageProof: Array -} - -const toNonzeroEvenLengthHex = (hex: string): string => { - if (hex == '0x0') { - return '0x' - } else if (hex.length % 2 == 0) { - return hex - } else { - return hexZeroPad(hex, Math.floor(hex.length / 2)) - } -} - -export const getBlockHeaderRLP = (block: GetBlockResponse): string => { - const header = BLOCK_HEADER_FIELDS.map((field) => hexlify(toNonzeroEvenLengthHex(block[field]))) - return RLP.encode(header) -} - -export const encodeMPTStorageProofRLP = (proof: GetProofResponse): string => { - if (proof.storageProof.length !== 1) { - throw new Error('Expected exactly one storage slot proof') - } - const accountProof = proof.accountProof.map((node) => RLP.decode(hexlify(node))) - const storageProof = proof.storageProof[0].proof.map((node) => RLP.decode(hexlify(node))) - return RLP.encode([accountProof, storageProof]) -} diff --git a/test/mpt.test.ts b/test/mpt.test.ts deleted file mode 100644 index 4773d846c..000000000 --- a/test/mpt.test.ts +++ /dev/null @@ -1,294 +0,0 @@ -import { expect } from 'chai' -import { ethers, ContractTransaction, BigNumber, Event } from 'ethers' -import { keccak256, RLP } from 'ethers/lib/utils' -import { Trie } from '@ethereumjs/trie' - -import { MerklePatriciaProofVerifierMock } from '../build/types/MerklePatriciaProofVerifierMock' -import { deployContract } from './lib/deployment' -import { Account, getAccounts, randomHexBytes } from './lib/testHelpers' - -const bufferToHex = (buf: Buffer): string => { - return '0x' + buf.toString('hex') -} - -const encodeProofRLP = (proof: Array): string => { - const decodedArr = proof.map((v) => RLP.decode(bufferToHex(v))) - return RLP.encode(decodedArr) -} - -describe('MerklePatriciaProofVerifier', () => { - let me: Account - let mpt: MerklePatriciaProofVerifierMock - - before(async function () { - ;[me] = await getAccounts() - mpt = (await deployContract( - 'MerklePatriciaProofVerifierMock', - me.signer, - )) as unknown as MerklePatriciaProofVerifierMock - }) - - it('verifies a valid proof of exclusion for the empty tree', async function () { - const trie = new Trie() - const key = Buffer.from('whatever') - const proof = await trie.createProof(key) - - const encodedProof = encodeProofRLP(proof) - - const val = await mpt.extractProofValue( - bufferToHex(trie.root()), - bufferToHex(key), - encodedProof, - ) - expect(val).to.equal('0x') - }) - - it('rejects an invalid root for the empty tree', async function () { - const trie = new Trie() - const key = Buffer.from('whatever') - const proof = await trie.createProof(key) - - const encodedProof = encodeProofRLP(proof) - - const call = mpt.extractProofValue(randomHexBytes(), bufferToHex(key), encodedProof) - await expect(call).revertedWith('MPT: invalid empty tree root') - }) - it('verifies a valid proof of inclusion', async function () { - const trie = new Trie() - const key = Buffer.from('foo') - const value = Buffer.from('bar') - await trie.put(key, value) - - // We add a few more random values - await trie.put(Buffer.from('food'), Buffer.from('baz')) - await trie.put(Buffer.from('fob'), Buffer.from('bat')) - await trie.put(Buffer.from('zort'), Buffer.from('narf')) - - const proof = await trie.createProof(key) - - const encodedProof = encodeProofRLP(proof) - - const val = await mpt.extractProofValue( - bufferToHex(trie.root()), - bufferToHex(key), - encodedProof, - ) - expect(val).to.equal(bufferToHex(value)) - }) - it('verifies a valid proof of exclusion based on a divergent node', async function () { - const trie = new Trie() - const key = Buffer.from('foo') - - // We add a few more random values - await trie.put(Buffer.from('food'), Buffer.from('baz')) - await trie.put(Buffer.from('fob'), Buffer.from('bat')) - await trie.put(Buffer.from('zort'), Buffer.from('narf')) - - const proof = await trie.createProof(key) - - // The path for "food" should form a divergent path for "foo" - const encodedProof = encodeProofRLP(proof) - - const val = await mpt.extractProofValue( - bufferToHex(trie.root()), - bufferToHex(key), - encodedProof, - ) - expect(val).to.equal('0x') - }) - it('verifies a valid proof of exclusion based on a leaf node', async function () { - const trie = new Trie() - const key = Buffer.from('food') - - // We add a few more random values - await trie.put(Buffer.from('foo'), Buffer.from('baz')) - - const proof = await trie.createProof(key) - - // The path for "foo" should be a leaf node, which proofs "food" is excluded - const encodedProof = encodeProofRLP(proof) - - const val = await mpt.extractProofValue( - bufferToHex(trie.root()), - bufferToHex(key), - encodedProof, - ) - expect(val).to.equal('0x') - }) - it('verifies a valid proof of exclusion based on an empty leaf on a branch node', async function () { - const trie = new Trie() - const key = Buffer.from('zork') - - await trie.put(Buffer.from('zor'), Buffer.from('baz')) - - // The fact that we have two keys that only differ in the - // last nibble gives us a proof that ends with a branch node - // with an empty value for the last nibble. - await trie.put(Buffer.from('zorl'), Buffer.from('bart')) - await trie.put(Buffer.from('zorm'), Buffer.from('bort')) - - const proof = await trie.createProof(key) - const encodedProof = encodeProofRLP(proof) - - const val = await mpt.extractProofValue( - bufferToHex(trie.root()), - bufferToHex(key), - encodedProof, - ) - expect(val).eq('0x') - }) - it('rejects a proof with an invalid value', async function () { - const trie = new Trie() - const key = Buffer.from('foo') - const value = Buffer.from('bar') - await trie.put(key, value) - - // We add a few more random values - await trie.put(Buffer.from('food'), Buffer.from('baz')) - await trie.put(Buffer.from('fob'), Buffer.from('bat')) - await trie.put(Buffer.from('zort'), Buffer.from('narf')) - - const proof = await trie.createProof(key) - - const decodedProof = proof.map((v) => RLP.decode(bufferToHex(v))) - decodedProof[3][16] = bufferToHex(Buffer.from('wrong')) - const reEncodedProof = decodedProof.map((v) => Buffer.from(RLP.encode(v).slice(2), 'hex')) - - const encodedProof = encodeProofRLP(reEncodedProof) - - const call = mpt.extractProofValue(bufferToHex(trie.root()), bufferToHex(key), encodedProof) - await expect(call).revertedWith('MPT: invalid node hash') - }) - it('rejects a proof of exclusion where the divergent node is not last', async function () { - const trie = new Trie() - const key = Buffer.from('foo') - - // We add a few more random values - await trie.put(Buffer.from('food'), Buffer.from('baz')) - await trie.put(Buffer.from('fob'), Buffer.from('bat')) - await trie.put(Buffer.from('zort'), Buffer.from('narf')) - - const proof = await trie.createProof(key) - - const decodedProof = proof.map((v) => RLP.decode(bufferToHex(v))) - // We add a random node to the end of the proof - decodedProof.push(bufferToHex(Buffer.from('wrong'))) - const reEncodedProof = decodedProof.map((v) => Buffer.from(RLP.encode(v).slice(2), 'hex')) - const encodedProof = encodeProofRLP(reEncodedProof) - - const call = mpt.extractProofValue(bufferToHex(trie.root()), bufferToHex(key), encodedProof) - await expect(call).revertedWith('MPT: divergent node not last') - }) - it('rejects a proof of inclusion with garbage at the end', async function () { - const trie = new Trie() - const key = Buffer.from('foo') - const value = Buffer.from('bar') - await trie.put(key, value) - - // We add a few more random values - await trie.put(Buffer.from('food'), Buffer.from('baz')) - await trie.put(Buffer.from('fob'), Buffer.from('bat')) - await trie.put(Buffer.from('zort'), Buffer.from('narf')) - - const proof = await trie.createProof(key) - const decodedProof = proof.map((v) => RLP.decode(bufferToHex(v))) - // We add a random node to the end of the proof - decodedProof.push(bufferToHex(Buffer.from('wrong'))) - const reEncodedProof = decodedProof.map((v) => Buffer.from(RLP.encode(v).slice(2), 'hex')) - const encodedProof = encodeProofRLP(reEncodedProof) - - const call = mpt.extractProofValue(bufferToHex(trie.root()), bufferToHex(key), encodedProof) - await expect(call).revertedWith('MPT: end not last') - }) - it('rejects a proof of inclusion with garbage after a leaf node', async function () { - const trie = new Trie() - const key = Buffer.from('foo') - const value = Buffer.from('bar') - await trie.put(key, value) - - const proof = await trie.createProof(key) - const decodedProof = proof.map((v) => RLP.decode(bufferToHex(v))) - // We add a random node to the end of the proof - decodedProof.push(bufferToHex(Buffer.from('wrong'))) - const reEncodedProof = decodedProof.map((v) => Buffer.from(RLP.encode(v).slice(2), 'hex')) - const encodedProof = encodeProofRLP(reEncodedProof) - - const call = mpt.extractProofValue(bufferToHex(trie.root()), bufferToHex(key), encodedProof) - await expect(call).revertedWith('MPT: leaf node not last') - }) - it('rejects a truncated proof of inclusion', async function () { - const trie = new Trie() - const key = Buffer.from('foo') - const value = Buffer.from('bar') - await trie.put(key, value) - - // We add a few more random values - await trie.put(Buffer.from('food'), Buffer.from('baz')) - await trie.put(Buffer.from('fob'), Buffer.from('bat')) - await trie.put(Buffer.from('zort'), Buffer.from('narf')) - - const proof = await trie.createProof(key) - const decodedProof = proof.map((v) => RLP.decode(bufferToHex(v))) - // We remove some nodes from the end, leaving a non-leaf node last - const truncatedProof = [decodedProof[0], decodedProof[1]] - const reEncodedProof = truncatedProof.map((v) => Buffer.from(RLP.encode(v).slice(2), 'hex')) - const encodedProof = encodeProofRLP(reEncodedProof) - - const call = mpt.extractProofValue(bufferToHex(trie.root()), bufferToHex(key), encodedProof) - await expect(call).revertedWith('MPT: non-leaf node last') - }) - it('rejects a proof of exclusion with a non-last empty byte sequence', async function () { - const trie = new Trie() - const key = Buffer.from('zork') - - await trie.put(Buffer.from('zor'), Buffer.from('baz')) - - // The fact that we have two keys that only differ in the - // last nibble gives us a proof that ends with a branch node - // with an empty value for the last nibble. - await trie.put(Buffer.from('zorl'), Buffer.from('bart')) - await trie.put(Buffer.from('zorm'), Buffer.from('bort')) - - const proof = await trie.createProof(key) - const decodedProof = proof.map((v) => RLP.decode(bufferToHex(v))) - // We add a random node to the end of the proof - decodedProof.push(bufferToHex(Buffer.from('wrong'))) - const reEncodedProof = decodedProof.map((v) => Buffer.from(RLP.encode(v).slice(2), 'hex')) - const encodedProof = encodeProofRLP(reEncodedProof) - - const call = mpt.extractProofValue(bufferToHex(trie.root()), bufferToHex(key), encodedProof) - await expect(call).revertedWith('MPT: empty leaf not last') - }) - it('verifies an inclusion proof for a trie that uses hashed keys', async function () { - const trie = new Trie({ useKeyHashing: true }) - const key = Buffer.from('something') - const value = Buffer.from('a value') - await trie.put(key, value) - - // We add a few more random values - await trie.put(Buffer.from('something else'), Buffer.from('baz')) - await trie.put(Buffer.from('more stuff'), Buffer.from('bat')) - await trie.put(Buffer.from('zort'), Buffer.from('narf')) - - const proof = await trie.createProof(key) - - const encodedProof = encodeProofRLP(proof) - const val = await mpt.extractProofValue(bufferToHex(trie.root()), keccak256(key), encodedProof) - await expect(val).eq(bufferToHex(value)) - }) - it('verifies an exclusion proof for a trie that uses hashed keys', async function () { - const trie = new Trie({ useKeyHashing: true }) - const key = Buffer.from('something') - - // We add a few more random values - await trie.put(Buffer.from('something else'), Buffer.from('baz')) - await trie.put(Buffer.from('more stuff'), Buffer.from('bat')) - await trie.put(Buffer.from('zort'), Buffer.from('narf')) - - const proof = await trie.createProof(key) - - const encodedProof = encodeProofRLP(proof) - const val = await mpt.extractProofValue(bufferToHex(trie.root()), keccak256(key), encodedProof) - await expect(val).eq('0x') - }) -}) From 8755a224a03b6fe7b212fecace7a51c88d717943 Mon Sep 17 00:00:00 2001 From: Pablo Carranza Velez Date: Fri, 18 Nov 2022 09:49:21 +0100 Subject: [PATCH 054/112] fix: no need for the trie npm package anymore --- package.json | 1 - yarn.lock | 85 ---------------------------------------------------- 2 files changed, 86 deletions(-) diff --git a/package.json b/package.json index b3074359a..ef123981e 100644 --- a/package.json +++ b/package.json @@ -19,7 +19,6 @@ "@commitlint/cli": "^13.2.1", "@commitlint/config-conventional": "^13.2.0", "@defi-wonderland/smock": "^2.0.7", - "@ethereumjs/trie": "^5.0.1", "@ethersproject/experimental": "^5.6.0", "@graphprotocol/common-ts": "^1.8.3", "@nomiclabs/hardhat-ethers": "^2.0.2", diff --git a/yarn.lock b/yarn.lock index dbf97f84b..42084d9cd 100644 --- a/yarn.lock +++ b/yarn.lock @@ -390,22 +390,6 @@ ethereumjs-util "^7.1.1" miller-rabin "^4.0.0" -"@ethereumjs/rlp@^4.0.0", "@ethereumjs/rlp@^4.0.0-beta.2": - version "4.0.0" - resolved "https://registry.yarnpkg.com/@ethereumjs/rlp/-/rlp-4.0.0.tgz#66719891bd727251a7f233f9ca80212d1994f8c8" - integrity sha512-LM4jS5n33bJN60fM5EC8VeyhUgga6/DjCPBV2vWjnfVtobqtOiNC4SQ1MRFqyBSmJGGdB533JZWewyvlcdJtkQ== - -"@ethereumjs/trie@^5.0.1": - version "5.0.1" - resolved "https://registry.yarnpkg.com/@ethereumjs/trie/-/trie-5.0.1.tgz#3cb5730450839c8a540ec48e16a7825a1d0897e5" - integrity sha512-MA8uXR2pa+r8+wBvlyeZoUewwbUZe4Iy4zKi38THP6/flYvIIY+bTVjf/EA3jMhH68j6nJH5SFu5wi5SBdS/2A== - dependencies: - "@ethereumjs/rlp" "^4.0.0" - "@ethereumjs/util" "^8.0.0" - "@types/readable-stream" "^2.3.13" - ethereum-cryptography "^1.1.2" - readable-stream "^3.6.0" - "@ethereumjs/tx@^3.2.1", "@ethereumjs/tx@^3.5.2": version "3.5.2" resolved "https://registry.yarnpkg.com/@ethereumjs/tx/-/tx-3.5.2.tgz#197b9b6299582ad84f9527ca961466fce2296c1c" @@ -422,15 +406,6 @@ "@ethereumjs/common" "^2.6.3" ethereumjs-util "^7.1.4" -"@ethereumjs/util@^8.0.0": - version "8.0.2" - resolved "https://registry.yarnpkg.com/@ethereumjs/util/-/util-8.0.2.tgz#b7348fc7253649b0f00685a94546c6eee1fad819" - integrity sha512-b1Fcxmq+ckCdoLPhVIBkTcH8szigMapPuEmD8EDakvtI5Na5rzmX1sBW73YQqaPc7iUxGCAzZP1LrFQ7aEMugA== - dependencies: - "@ethereumjs/rlp" "^4.0.0-beta.2" - async "^3.2.4" - ethereum-cryptography "^1.1.2" - "@ethereumjs/vm@^5.6.0": version "5.8.0" resolved "https://registry.yarnpkg.com/@ethereumjs/vm/-/vm-5.8.0.tgz#c9055f96afc13dd7b72893b57fa20027effea6fe" @@ -948,26 +923,11 @@ resolved "https://registry.yarnpkg.com/@noble/hashes/-/hashes-1.0.0.tgz#d5e38bfbdaba174805a4e649f13be9a9ed3351ae" integrity sha512-DZVbtY62kc3kkBtMHqwCOfXrT/hnoORy5BJ4+HU1IR59X0KWAOqsfzQPcUl/lQLlG7qXbe/fZ3r/emxtAl+sqg== -"@noble/hashes@1.1.2": - version "1.1.2" - resolved "https://registry.yarnpkg.com/@noble/hashes/-/hashes-1.1.2.tgz#e9e035b9b166ca0af657a7848eb2718f0f22f183" - integrity sha512-KYRCASVTv6aeUi1tsF8/vpyR7zpfs3FUzy2Jqm+MU+LmUKhQ0y2FpfwqkCcxSg2ua4GALJd8k2R76WxwZGbQpA== - -"@noble/hashes@~1.1.1": - version "1.1.3" - resolved "https://registry.yarnpkg.com/@noble/hashes/-/hashes-1.1.3.tgz#360afc77610e0a61f3417e497dcf36862e4f8111" - integrity sha512-CE0FCR57H2acVI5UOzIGSSIYxZ6v/HOhDR0Ro9VLyhnzLwx0o8W1mmgaqlEUx4049qJDlIBRztv5k+MM8vbO3A== - "@noble/secp256k1@1.5.5", "@noble/secp256k1@~1.5.2": version "1.5.5" resolved "https://registry.yarnpkg.com/@noble/secp256k1/-/secp256k1-1.5.5.tgz#315ab5745509d1a8c8e90d0bdf59823ccf9bcfc3" integrity sha512-sZ1W6gQzYnu45wPrWx8D3kwI2/U29VYTx9OjbDAd7jwRItJ0cSTMPRL/C8AWZFn9kWFLQGqEXVEE86w4Z8LpIQ== -"@noble/secp256k1@1.6.3", "@noble/secp256k1@~1.6.0": - version "1.6.3" - resolved "https://registry.yarnpkg.com/@noble/secp256k1/-/secp256k1-1.6.3.tgz#7eed12d9f4404b416999d0c87686836c4c5c9b94" - integrity sha512-T04e4iTurVy7I8Sw4+c5OSN9/RkPlo1uKxAomtxQNLq8j1uPAqnsqG1bqvY3Jv7c13gyr6dui0zmh/I3+f/JaQ== - "@nodelib/fs.scandir@2.1.5": version "2.1.5" resolved "https://registry.yarnpkg.com/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz#7619c2eb21b25483f6d167548b4cfd5a7488c3d5" @@ -1111,11 +1071,6 @@ resolved "https://registry.yarnpkg.com/@scure/base/-/base-1.0.0.tgz#109fb595021de285f05a7db6806f2f48296fcee7" integrity sha512-gIVaYhUsy+9s58m/ETjSJVKHhKTBMmcRb9cEV5/5dwvfDlfORjKrFsDeDHWRrm6RjcPvCLZFwGJjAjLj1gg4HA== -"@scure/base@~1.1.0": - version "1.1.1" - resolved "https://registry.yarnpkg.com/@scure/base/-/base-1.1.1.tgz#ebb651ee52ff84f420097055f4bf46cfba403938" - integrity sha512-ZxOhsSyxYwLJj3pLZCefNitxsj093tb2vq90mp2txoYeBqbcjDjqFhyM8eUjq/uFm6zJ+mUuqxlS2FkuSY1MTA== - "@scure/bip32@1.0.1": version "1.0.1" resolved "https://registry.yarnpkg.com/@scure/bip32/-/bip32-1.0.1.tgz#1409bdf9f07f0aec99006bb0d5827693418d3aa5" @@ -1125,15 +1080,6 @@ "@noble/secp256k1" "~1.5.2" "@scure/base" "~1.0.0" -"@scure/bip32@1.1.0": - version "1.1.0" - resolved "https://registry.yarnpkg.com/@scure/bip32/-/bip32-1.1.0.tgz#dea45875e7fbc720c2b4560325f1cf5d2246d95b" - integrity sha512-ftTW3kKX54YXLCxH6BB7oEEoJfoE2pIgw7MINKAs5PsS6nqKPuKk1haTF/EuHmYqG330t5GSrdmtRuHaY1a62Q== - dependencies: - "@noble/hashes" "~1.1.1" - "@noble/secp256k1" "~1.6.0" - "@scure/base" "~1.1.0" - "@scure/bip39@1.0.0": version "1.0.0" resolved "https://registry.yarnpkg.com/@scure/bip39/-/bip39-1.0.0.tgz#47504e58de9a56a4bbed95159d2d6829fa491bb0" @@ -1142,14 +1088,6 @@ "@noble/hashes" "~1.0.0" "@scure/base" "~1.0.0" -"@scure/bip39@1.1.0": - version "1.1.0" - resolved "https://registry.yarnpkg.com/@scure/bip39/-/bip39-1.1.0.tgz#92f11d095bae025f166bef3defcc5bf4945d419a" - integrity sha512-pwrPOS16VeTKg98dYXQyIjJEcWfz7/1YJIwxUEPFfQPtc86Ym/1sVgQ2RLoD43AazMk2l/unK4ITySSpW2+82w== - dependencies: - "@noble/hashes" "~1.1.1" - "@scure/base" "~1.1.0" - "@sentry/core@5.30.0": version "5.30.0" resolved "https://registry.yarnpkg.com/@sentry/core/-/core-5.30.0.tgz#6b203664f69e75106ee8b5a2fe1d717379b331f3" @@ -1511,14 +1449,6 @@ resolved "https://registry.yarnpkg.com/@types/qs/-/qs-6.9.7.tgz#63bb7d067db107cc1e457c303bc25d511febf6cb" integrity sha512-FGa1F62FT09qcrueBA6qYTrJPVDzah9a+493+o2PCXsesWHIn27G98TsSMs3WPNbZIEj4+VJf6saSFpvD+3Zsw== -"@types/readable-stream@^2.3.13": - version "2.3.15" - resolved "https://registry.yarnpkg.com/@types/readable-stream/-/readable-stream-2.3.15.tgz#3d79c9ceb1b6a57d5f6e6976f489b9b5384321ae" - integrity sha512-oM5JSKQCcICF1wvGgmecmHldZ48OZamtMxcGGVICOJA8o8cahXC1zEVAif8iwoc5j8etxFaRFnf095+CDsuoFQ== - dependencies: - "@types/node" "*" - safe-buffer "~5.1.1" - "@types/resolve@^0.0.8": version "0.0.8" resolved "https://registry.yarnpkg.com/@types/resolve/-/resolve-0.0.8.tgz#f26074d238e02659e323ce1a13d041eee280e194" @@ -2136,11 +2066,6 @@ async@^3.2.3: resolved "https://registry.yarnpkg.com/async/-/async-3.2.3.tgz#ac53dafd3f4720ee9e8a160628f18ea91df196c9" integrity sha512-spZRyzKL5l5BZQrr/6m/SqFdBN0q3OCI0f9rjfBzCMBIP4p75P620rR3gTmaksNOhmzgdxcaxdNfMy6anrbM0g== -async@^3.2.4: - version "3.2.4" - resolved "https://registry.yarnpkg.com/async/-/async-3.2.4.tgz#2d22e00f8cddeb5fde5dd33522b56d1cf569a81c" - integrity sha512-iAB+JbDEGXhyIUavoDl9WP/Jj106Kz9DEn1DPgYw5ruDn0e3Wgi3sKFm55sASdGBNOQB8F59d9qQ7deqrHA8wQ== - asynckit@^0.4.0: version "0.4.0" resolved "https://registry.yarnpkg.com/asynckit/-/asynckit-0.4.0.tgz#c79ed97f7f34cb8f2ba1bc9790bcc366474b4b79" @@ -4860,16 +4785,6 @@ ethereum-cryptography@^1.0.3: "@scure/bip32" "1.0.1" "@scure/bip39" "1.0.0" -ethereum-cryptography@^1.1.2: - version "1.1.2" - resolved "https://registry.yarnpkg.com/ethereum-cryptography/-/ethereum-cryptography-1.1.2.tgz#74f2ac0f0f5fe79f012c889b3b8446a9a6264e6d" - integrity sha512-XDSJlg4BD+hq9N2FjvotwUET9Tfxpxc3kWGE2AqUG5vcbeunnbImVk3cj6e/xT3phdW21mE8R5IugU4fspQDcQ== - dependencies: - "@noble/hashes" "1.1.2" - "@noble/secp256k1" "1.6.3" - "@scure/bip32" "1.1.0" - "@scure/bip39" "1.1.0" - ethereum-waffle@^3.3.0: version "3.4.4" resolved "https://registry.yarnpkg.com/ethereum-waffle/-/ethereum-waffle-3.4.4.tgz#1378b72040697857b7f5e8f473ca8f97a37b5840" From e8fb9d5faae9a2708fdae0708b9e5cf908033184 Mon Sep 17 00:00:00 2001 From: Pablo Carranza Velez Date: Mon, 21 Nov 2022 19:52:29 -0300 Subject: [PATCH 055/112] fix: gas check in L1GNS, and some QA/NatSpec details --- contracts/discovery/GNS.sol | 6 +++--- contracts/discovery/GNSStorage.sol | 4 ++-- contracts/discovery/L1GNS.sol | 13 +++---------- contracts/l2/curation/IL2Curation.sol | 4 ++-- contracts/l2/curation/L2Curation.sol | 16 +++++++++------- contracts/l2/discovery/L2GNS.sol | 2 ++ 6 files changed, 21 insertions(+), 24 deletions(-) diff --git a/contracts/discovery/GNS.sol b/contracts/discovery/GNS.sol index 2d39c0d6c..74de358a7 100644 --- a/contracts/discovery/GNS.sol +++ b/contracts/discovery/GNS.sol @@ -35,7 +35,7 @@ contract GNS is GNSV3Storage, GraphUpgradeable, IGNS, Multicall { uint32 private constant MAX_PPM = 1000000; /// @dev Equates to Connector weight on bancor formula to be CW = 1 - uint32 internal immutable FIXED_RESERVE_RATIO = MAX_PPM; + uint32 internal immutable fixedReserveRatio = MAX_PPM; // -- Events -- @@ -258,12 +258,12 @@ contract GNS is GNSV3Storage, GraphUpgradeable, IGNS, Multicall { uint256 subgraphID = _nextSubgraphID(subgraphOwner); SubgraphData storage subgraphData = _getSubgraphData(subgraphID); subgraphData.subgraphDeploymentID = _subgraphDeploymentID; - subgraphData.reserveRatio = FIXED_RESERVE_RATIO; + subgraphData.reserveRatio = fixedReserveRatio; // Mint the NFT. Use the subgraphID as tokenID. // This function will check the if tokenID already exists. _mintNFT(subgraphOwner, subgraphID); - emit SubgraphPublished(subgraphID, _subgraphDeploymentID, FIXED_RESERVE_RATIO); + emit SubgraphPublished(subgraphID, _subgraphDeploymentID, fixedReserveRatio); // Set the token metadata _setSubgraphMetadata(subgraphID, _subgraphMetadata); diff --git a/contracts/discovery/GNSStorage.sol b/contracts/discovery/GNSStorage.sol index 3143d2651..877130189 100644 --- a/contracts/discovery/GNSStorage.sol +++ b/contracts/discovery/GNSStorage.sol @@ -21,7 +21,7 @@ abstract contract GNSV1Storage is Managed { uint32 public ownerTaxPercentage; /// [DEPRECATED] Bonding curve formula. - address public __DEPRECATED_bondingCurve; + address public __DEPRECATED_bondingCurve; // solhint-disable-line var-name-mixedcase /// @dev Stores what subgraph deployment a particular legacy subgraph targets. /// A subgraph is defined by (graphAccountID, subgraphNumber). @@ -39,7 +39,7 @@ abstract contract GNSV1Storage is Managed { /// @dev [DEPRECATED] ERC-1056 contract reference. /// This contract was used for managing identities. - IEthereumDIDRegistry private __DEPRECATED_erc1056Registry; + IEthereumDIDRegistry private __DEPRECATED_erc1056Registry; // solhint-disable-line var-name-mixedcase } /** diff --git a/contracts/discovery/L1GNS.sol b/contracts/discovery/L1GNS.sol index 5c3fc008b..9176ab4af 100644 --- a/contracts/discovery/L1GNS.sol +++ b/contracts/discovery/L1GNS.sol @@ -20,7 +20,8 @@ import { L1GNSV1Storage } from "./L1GNSStorage.sol"; * Each version is associated with a Subgraph Deployment. The contract has no knowledge of * human-readable names. All human readable names emitted in events. * The contract implements a multicall behaviour to support batching multiple calls in a single - * transaction. This L1GNS variant includes some functions to allow migrating subgraphs to L2. + * transaction. + * This L1GNS variant includes some functions to allow migrating subgraphs to L2. */ contract L1GNS is GNS, L1GNSV1Storage, L1ArbitrumMessenger { using SafeMathUpgradeable for uint256; @@ -41,8 +42,6 @@ contract L1GNS is GNS, L1GNSV1Storage, L1ArbitrumMessenger { /** * @notice Send a subgraph's data and tokens to L2. - * The subgraph must be locked using lockSubgraphForMigrationToL2 in a previous block - * (less than 255 blocks ago). * Use the Arbitrum SDK to estimate the L2 retryable ticket parameters. * @param _subgraphID Subgraph ID * @param _l2Owner Address that will own the subgraph in L2 (could be the L1 owner, but could be different if the L1 owner is an L1 contract) @@ -118,15 +117,9 @@ contract L1GNS is GNS, L1GNSV1Storage, L1ArbitrumMessenger { require(migrationData.l1Done, "!MIGRATED"); + // The Arbitrum bridge will check this too, we just check here for an early exit require(_maxSubmissionCost != 0, "NO_SUBMISSION_COST"); - { - // makes sure only sufficient ETH is supplied required for successful redemption on L2 - // if a user does not desire immediate redemption they should provide - // a msg.value of AT LEAST _maxSubmissionCost - uint256 expectedEth = _maxSubmissionCost + (_maxGas * _gasPriceBid); - require(msg.value >= expectedEth, "WRONG_ETH_VALUE"); - } L2GasParams memory gasParams = L2GasParams(_maxSubmissionCost, _maxGas, _gasPriceBid); bytes memory outboundCalldata = abi.encodeWithSelector( diff --git a/contracts/l2/curation/IL2Curation.sol b/contracts/l2/curation/IL2Curation.sol index 57b6a145f..4d54b4804 100644 --- a/contracts/l2/curation/IL2Curation.sol +++ b/contracts/l2/curation/IL2Curation.sol @@ -24,9 +24,9 @@ interface IL2Curation { /** * @notice Calculate amount of signal that can be bought with tokens in a curation pool, * without accounting for curation tax. - * @param _subgraphDeploymentID Subgraph deployment to mint signal + * @param _subgraphDeploymentID Subgraph deployment for which to mint signal * @param _tokensIn Amount of tokens used to mint signal - * @return Amount of signal that can be bought and tokens subtracted for the tax + * @return Amount of signal that can be bought */ function tokensToSignalNoTax(bytes32 _subgraphDeploymentID, uint256 _tokensIn) external diff --git a/contracts/l2/curation/L2Curation.sol b/contracts/l2/curation/L2Curation.sol index f5735f1f0..352a93248 100644 --- a/contracts/l2/curation/L2Curation.sol +++ b/contracts/l2/curation/L2Curation.sol @@ -41,7 +41,7 @@ contract L2Curation is CurationV2Storage, GraphUpgradeable, IL2Curation { uint256 private constant SIGNAL_PER_MINIMUM_DEPOSIT = 1e18; // 1 signal as 18 decimal number /// @dev Reserve ratio for all subgraphs set to 100% for a flat bonding curve - uint32 private immutable FIXED_RESERVE_RATIO = MAX_PPM; + uint32 private immutable fixedReserveRatio = MAX_PPM; // -- Events -- @@ -99,7 +99,7 @@ contract L2Curation is CurationV2Storage, GraphUpgradeable, IL2Curation { Managed._initialize(_controller); // For backwards compatibility: - defaultReserveRatio = FIXED_RESERVE_RATIO; + defaultReserveRatio = fixedReserveRatio; emit ParameterUpdated("defaultReserveRatio"); _setCurationTaxPercentage(_curationTaxPercentage); _setMinimumCurationDeposit(_minimumCurationDeposit); @@ -108,8 +108,9 @@ contract L2Curation is CurationV2Storage, GraphUpgradeable, IL2Curation { /** * @notice Set the default reserve ratio - not implemented in L2 + * @dev We only keep this for compatibility with ICuration */ - function setDefaultReserveRatio(uint32) external override onlyGovernor { + function setDefaultReserveRatio(uint32) external view override onlyGovernor { revert("Not implemented in L2"); } @@ -359,7 +360,7 @@ contract L2Curation is CurationV2Storage, GraphUpgradeable, IL2Curation { */ function pools(bytes32 _subgraphDeploymentID) external view returns (CurationPool memory) { CurationPool memory pool = _pools[_subgraphDeploymentID]; - pool.reserveRatio = FIXED_RESERVE_RATIO; + pool.reserveRatio = fixedReserveRatio; return pool; } @@ -408,7 +409,8 @@ contract L2Curation is CurationV2Storage, GraphUpgradeable, IL2Curation { * This function considers and excludes the deposit tax. * @param _subgraphDeploymentID Subgraph deployment to mint signal * @param _tokensIn Amount of tokens used to mint signal - * @return Amount of signal that can be bought and tokens subtracted for the tax + * @return Amount of signal that can be bought + * @return Amount of GRT that would be subtracted as curation tax */ function tokensToSignal(bytes32 _subgraphDeploymentID, uint256 _tokensIn) public @@ -426,7 +428,7 @@ contract L2Curation is CurationV2Storage, GraphUpgradeable, IL2Curation { * without accounting for curation tax. * @param _subgraphDeploymentID Subgraph deployment to mint signal * @param _tokensIn Amount of tokens used to mint signal - * @return Amount of signal that can be bought and tokens subtracted for the tax + * @return Amount of signal that can be bought */ function tokensToSignalNoTax(bytes32 _subgraphDeploymentID, uint256 _tokensIn) public @@ -439,7 +441,7 @@ contract L2Curation is CurationV2Storage, GraphUpgradeable, IL2Curation { /** * @notice Calculate number of tokens to get when burning signal from a curation pool. - * @param _subgraphDeploymentID Subgraph deployment to burn signal + * @param _subgraphDeploymentID Subgraph deployment for which to burn signal * @param _signalIn Amount of signal to burn * @return Amount of tokens to get for an amount of signal */ diff --git a/contracts/l2/discovery/L2GNS.sol b/contracts/l2/discovery/L2GNS.sol index c0300545a..7ef19b64b 100644 --- a/contracts/l2/discovery/L2GNS.sol +++ b/contracts/l2/discovery/L2GNS.sol @@ -22,6 +22,8 @@ import { IL2Curation } from "../curation/IL2Curation.sol"; * human-readable names. All human readable names emitted in events. * The contract implements a multicall behaviour to support batching multiple calls in a single * transaction. + * This particular contract is meant to be deployed in L2, and includes helper functions to + * receive subgraphs that are migrated from L1. */ contract L2GNS is GNS, L2GNSV1Storage, IL2GNS { using SafeMathUpgradeable for uint256; From 703fca730b4dd9dee0d7e3996692bbd9cfe05334 Mon Sep 17 00:00:00 2001 From: Pablo Carranza Velez Date: Mon, 21 Nov 2022 19:55:38 -0300 Subject: [PATCH 056/112] fix: missed a rename --- contracts/l2/discovery/L2GNS.sol | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/contracts/l2/discovery/L2GNS.sol b/contracts/l2/discovery/L2GNS.sol index 7ef19b64b..69feb8bbb 100644 --- a/contracts/l2/discovery/L2GNS.sol +++ b/contracts/l2/discovery/L2GNS.sol @@ -115,7 +115,7 @@ contract L2GNS is GNS, L2GNSV1Storage, IL2GNS { // Set the token metadata _setSubgraphMetadata(_subgraphID, _subgraphMetadata); - emit SubgraphPublished(_subgraphID, _subgraphDeploymentID, FIXED_RESERVE_RATIO); + emit SubgraphPublished(_subgraphID, _subgraphDeploymentID, fixedReserveRatio); emit SubgraphUpgraded( _subgraphID, subgraphData.vSignal, @@ -245,7 +245,7 @@ contract L2GNS is GNS, L2GNSV1Storage, IL2GNS { IGNS.SubgraphL2MigrationData storage migratedData = subgraphL2MigrationData[_subgraphID]; SubgraphData storage subgraphData = _getSubgraphData(_subgraphID); - subgraphData.reserveRatio = FIXED_RESERVE_RATIO; + subgraphData.reserveRatio = fixedReserveRatio; // The subgraph will be disabled until finishSubgraphMigrationFromL1 is called subgraphData.disabled = true; subgraphData.nSignal = _nSignal; From bb9cdebfacd58a61c7a54023543409d76afb5cb8 Mon Sep 17 00:00:00 2001 From: Pablo Carranza Velez Date: Mon, 21 Nov 2022 20:15:24 -0300 Subject: [PATCH 057/112] fix: comment details --- contracts/l2/curation/L2Curation.sol | 2 +- test/l2/l2Curation.test.ts | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/contracts/l2/curation/L2Curation.sol b/contracts/l2/curation/L2Curation.sol index 352a93248..778147f5f 100644 --- a/contracts/l2/curation/L2Curation.sol +++ b/contracts/l2/curation/L2Curation.sol @@ -26,7 +26,7 @@ import { IL2Curation } from "./IL2Curation.sol"; * subgraph deployment they curate. * A curators deposit goes to a curation pool along with the deposits of other curators, * only one such pool exists for each subgraph deployment. - * The contract mints Graph Curation Shares (GCS) according to a (linear) bonding curve for each individual + * The contract mints Graph Curation Shares (GCS) according to a (flat) bonding curve for each individual * curation pool where GRT is deposited. * Holders can burn GCS using this contract to get GRT tokens back according to the * bonding curve. diff --git a/test/l2/l2Curation.test.ts b/test/l2/l2Curation.test.ts index ec54eb633..930861156 100644 --- a/test/l2/l2Curation.test.ts +++ b/test/l2/l2Curation.test.ts @@ -727,7 +727,7 @@ describe('L2Curation', () => { }) describe('multiple minting', async function () { - it('should mint the same signal every time due to the linear bonding curve', async function () { + it('should mint the same signal every time due to the flat bonding curve', async function () { const tokensToDepositMany = [ toGRT('1000'), // should mint if we start with number above minimum deposit toGRT('1000'), // every time it should mint the same GCS due to bonding curve! From 25297df0890ac807da3145d3304e8c12ff6b060a Mon Sep 17 00:00:00 2001 From: Pablo Carranza Velez Date: Tue, 22 Nov 2022 17:18:37 -0300 Subject: [PATCH 058/112] test: remove outdated l2 gas test --- test/gns.test.ts | 22 ---------------------- 1 file changed, 22 deletions(-) diff --git a/test/gns.test.ts b/test/gns.test.ts index e4e45be70..e06832a06 100644 --- a/test/gns.test.ts +++ b/test/gns.test.ts @@ -1412,28 +1412,6 @@ describe('L1GNS', () => { await expect(tx).revertedWith('!MIGRATED') }) - it('rejects calls with an incorrect eth value', async function () { - const subgraph0 = await publishCurateAndSendSubgraph() - - const maxSubmissionCost = toBN('100') - const maxGas = toBN('10') - const gasPriceBid = toBN('20') - - const tx = gns - .connect(me.signer) - .claimCuratorBalanceToBeneficiaryOnL2( - subgraph0.id, - other.address, - maxGas, - gasPriceBid, - maxSubmissionCost, - { - value: maxSubmissionCost.add(maxGas.mul(gasPriceBid)).sub(1), - }, - ) - - await expect(tx).revertedWith('WRONG_ETH_VALUE') - }) it('rejects calls with zero maxSubmissionCost', async function () { const subgraph0 = await publishCurateAndSendSubgraph() From 6321e606ad5b34d978d163eb867c442b38574cb9 Mon Sep 17 00:00:00 2001 From: Pablo Carranza Velez Date: Wed, 23 Nov 2022 22:06:40 -0300 Subject: [PATCH 059/112] fix: restore e2e task from dev --- tasks/e2e/e2e.ts | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/tasks/e2e/e2e.ts b/tasks/e2e/e2e.ts index 627f527f9..aa712380c 100644 --- a/tasks/e2e/e2e.ts +++ b/tasks/e2e/e2e.ts @@ -34,7 +34,6 @@ const setGraphConfig = async (args: TaskArguments, hre: HardhatRuntimeEnvironmen } task('e2e', 'Run all e2e tests') - .addFlag('disableSecureAccounts', 'Disable secure accounts on GRE') .addOptionalParam('graphConfig', cliOpts.graphConfig.description) .addOptionalParam('l1GraphConfig', cliOpts.graphConfig.description) .addOptionalParam('l2GraphConfig', cliOpts.graphConfig.description) @@ -50,6 +49,9 @@ task('e2e', 'Run all e2e tests') testFiles = testFiles.filter((file) => !['l1', 'l2'].includes(file.split('/')[3])) } + // Disable secure accounts, we don't need them for this task + hre.config.graph.disableSecureAccounts = true + setGraphConfig(args, hre) await hre.run(TASK_TEST, { testFiles: testFiles, @@ -57,7 +59,6 @@ task('e2e', 'Run all e2e tests') }) task('e2e:config', 'Run deployment configuration e2e tests') - .addFlag('disableSecureAccounts', 'Disable secure accounts on GRE') .addOptionalParam('graphConfig', cliOpts.graphConfig.description) .addOptionalParam('l1GraphConfig', cliOpts.graphConfig.description) .addOptionalParam('l2GraphConfig', cliOpts.graphConfig.description) @@ -65,6 +66,9 @@ task('e2e:config', 'Run deployment configuration e2e tests') .setAction(async (args, hre: HardhatRuntimeEnvironment) => { const files = new glob.GlobSync(CONFIG_TESTS).found + // Disable secure accounts, we don't need them for this task + hre.config.graph.disableSecureAccounts = true + setGraphConfig(args, hre) await hre.run(TASK_TEST, { testFiles: files, @@ -72,7 +76,6 @@ task('e2e:config', 'Run deployment configuration e2e tests') }) task('e2e:init', 'Run deployment initialization e2e tests') - .addFlag('disableSecureAccounts', 'Disable secure accounts on GRE') .addOptionalParam('graphConfig', cliOpts.graphConfig.description) .addOptionalParam('l1GraphConfig', cliOpts.graphConfig.description) .addOptionalParam('l2GraphConfig', cliOpts.graphConfig.description) @@ -80,6 +83,9 @@ task('e2e:init', 'Run deployment initialization e2e tests') .setAction(async (args, hre: HardhatRuntimeEnvironment) => { const files = new glob.GlobSync(INIT_TESTS).found + // Disable secure accounts, we don't need them for this task + hre.config.graph.disableSecureAccounts = true + setGraphConfig(args, hre) await hre.run(TASK_TEST, { testFiles: files, From 6639f8dea81615a3ec3c7586549be390ebcb004c Mon Sep 17 00:00:00 2001 From: Pablo Carranza Velez Date: Fri, 25 Nov 2022 16:05:33 -0300 Subject: [PATCH 060/112] fix(L1GNS): use a public function for outbound calldata --- contracts/discovery/L1GNS.sol | 27 ++++++++++++++++++++++++--- 1 file changed, 24 insertions(+), 3 deletions(-) diff --git a/contracts/discovery/L1GNS.sol b/contracts/discovery/L1GNS.sol index 9176ab4af..ec0c48e06 100644 --- a/contracts/discovery/L1GNS.sol +++ b/contracts/discovery/L1GNS.sol @@ -122,11 +122,9 @@ contract L1GNS is GNS, L1GNSV1Storage, L1ArbitrumMessenger { L2GasParams memory gasParams = L2GasParams(_maxSubmissionCost, _maxGas, _gasPriceBid); - bytes memory outboundCalldata = abi.encodeWithSelector( - IL2GNS.claimL1CuratorBalanceToBeneficiary.selector, + bytes memory outboundCalldata = getClaimCuratorBalanceOutboundCalldata( _subgraphID, msg.sender, - getCuratorSignal(_subgraphID, msg.sender), _beneficiary ); @@ -143,6 +141,29 @@ contract L1GNS is GNS, L1GNSV1Storage, L1ArbitrumMessenger { return abi.encode(seqNum); } + /** + * @notice Get the outbound calldata that will be sent to L2 + * when calling claimCuratorBalanceToBeneficiaryOnL2. + * This can be useful to estimate the L2 retryable ticket parameters. + * @param _subgraphID Subgraph ID + * @param _curator Curator address + * @param _beneficiary Address that will own the signal in L2 + */ + function getClaimCuratorBalanceOutboundCalldata( + uint256 _subgraphID, + address _curator, + address _beneficiary + ) public view returns (bytes memory) { + return + abi.encodeWithSelector( + IL2GNS.claimL1CuratorBalanceToBeneficiary.selector, + _subgraphID, + _curator, + getCuratorSignal(_subgraphID, _curator), + _beneficiary + ); + } + /** * @dev Encodes the subgraph data as callhook parameters * for the L2 migration. From 75522bf5c6f5e5adb751cb2c731b8063847abbfc Mon Sep 17 00:00:00 2001 From: Pablo Carranza Velez Date: Wed, 7 Dec 2022 17:32:17 -0300 Subject: [PATCH 061/112] fix: allow anyone to deprecate a subgraph if migration is not finished after a week (OZ H-01) --- contracts/discovery/IGNS.sol | 1 + contracts/l2/discovery/IL2GNS.sol | 9 +++ contracts/l2/discovery/L2GNS.sol | 37 ++++++++++- test/l2/l2GNS.test.ts | 103 +++++++++++++++++++++++++++++- 4 files changed, 145 insertions(+), 5 deletions(-) diff --git a/contracts/discovery/IGNS.sol b/contracts/discovery/IGNS.sol index 1a5a3fc31..c7557a98d 100644 --- a/contracts/discovery/IGNS.sol +++ b/contracts/discovery/IGNS.sol @@ -34,6 +34,7 @@ interface IGNS { bool l1Done; // Migration finished on L1 side mapping(address => bool) curatorBalanceClaimed; // True for curators whose balance has been claimed in L2 bool l2Done; // Migration finished on L2 side + uint256 subgraphReceivedOnL2BlockNumber; // Block number when the subgraph was received on L2 } /** diff --git a/contracts/l2/discovery/IL2GNS.sol b/contracts/l2/discovery/IL2GNS.sol index bdfb1a87f..40fe4e1ef 100644 --- a/contracts/l2/discovery/IL2GNS.sol +++ b/contracts/l2/discovery/IL2GNS.sol @@ -24,6 +24,15 @@ interface IL2GNS is ICallhookReceiver { bytes32 _versionMetadata ) external; + /** + * @notice Deprecate a subgraph that was migrated from L1, but for which + * the migration was never finished. Anyone can call this function after 50400 blocks + * (one day) have passed since the subgraph was migrated, if the subgraph owner didn't + * call finishSubgraphMigrationFromL1. + * @param _subgraphID Subgraph ID + */ + function deprecateSubgraphMigratedFromL1(uint256 _subgraphID) external; + /** * @notice Claim curator balance belonging to a curator from L1. * This will be credited to the a beneficiary on L2, and can only be called diff --git a/contracts/l2/discovery/L2GNS.sol b/contracts/l2/discovery/L2GNS.sol index 69feb8bbb..fbf00a5a6 100644 --- a/contracts/l2/discovery/L2GNS.sol +++ b/contracts/l2/discovery/L2GNS.sol @@ -28,6 +28,10 @@ import { IL2Curation } from "../curation/IL2Curation.sol"; contract L2GNS is GNS, L2GNSV1Storage, IL2GNS { using SafeMathUpgradeable for uint256; + /// The amount of time (in blocks) that a subgraph owner has to finish the migration + /// from L1 before the subgraph can be deprecated: 1 week + uint256 public constant FINISH_MIGRATION_TIMEOUT = 50400; + /// @dev Emitted when a subgraph is received from L1 through the bridge event SubgraphReceivedFromL1(uint256 _subgraphID); /// @dev Emitted when a subgraph migration from L1 is finalized, so the subgraph is published @@ -99,7 +103,7 @@ contract L2GNS is GNS, L2GNSV1Storage, IL2GNS { IGNS.SubgraphL2MigrationData storage migratedData = subgraphL2MigrationData[_subgraphID]; SubgraphData storage subgraphData = _getSubgraphData(_subgraphID); // A subgraph - require(migratedData.l1Done, "INVALID_SUBGRAPH"); + require(migratedData.subgraphReceivedOnL2BlockNumber != 0, "INVALID_SUBGRAPH"); require(!migratedData.l2Done, "ALREADY_DONE"); migratedData.l2Done = true; @@ -128,6 +132,35 @@ contract L2GNS is GNS, L2GNSV1Storage, IL2GNS { emit SubgraphMigrationFinalized(_subgraphID); } + /** + * @notice Deprecate a subgraph that was migrated from L1, but for which + * the migration was never finished. Anyone can call this function after 50400 blocks + * (one day) have passed since the subgraph was migrated, if the subgraph owner didn't + * call finishSubgraphMigrationFromL1. + * @param _subgraphID Subgraph ID + */ + function deprecateSubgraphMigratedFromL1(uint256 _subgraphID) + external + override + notPartialPaused + { + IGNS.SubgraphL2MigrationData storage migratedData = subgraphL2MigrationData[_subgraphID]; + require(migratedData.subgraphReceivedOnL2BlockNumber != 0, "INVALID_SUBGRAPH"); + require(!migratedData.l2Done, "ALREADY_FINISHED"); + require( + block.number > migratedData.subgraphReceivedOnL2BlockNumber + FINISH_MIGRATION_TIMEOUT, + "TOO_EARLY" + ); + SubgraphData storage subgraphData = _getSubgraphData(_subgraphID); + + migratedData.l2Done = true; + uint256 withdrawableGRT = migratedData.tokens; + subgraphData.withdrawableGRT = withdrawableGRT; + subgraphData.reserveRatio = 0; + _burnNFT(_subgraphID); + emit SubgraphDeprecated(_subgraphID, withdrawableGRT); + } + /** * @notice Claim curator balance belonging to a curator from L1. * This will be credited to the a beneficiary on L2, and can only be called @@ -251,7 +284,7 @@ contract L2GNS is GNS, L2GNSV1Storage, IL2GNS { subgraphData.nSignal = _nSignal; migratedData.tokens = _tokens; - migratedData.l1Done = true; + migratedData.subgraphReceivedOnL2BlockNumber = block.number; // Mint the NFT. Use the subgraphID as tokenID. // This function will check the if tokenID already exists. diff --git a/test/l2/l2GNS.test.ts b/test/l2/l2GNS.test.ts index a0588564b..8af64d054 100644 --- a/test/l2/l2GNS.test.ts +++ b/test/l2/l2GNS.test.ts @@ -9,6 +9,8 @@ import { toGRT, getL2SignerFromL1, setAccountBalance, + latestBlock, + advanceBlocks, } from '../lib/testHelpers' import { L2FixtureContracts, NetworkFixture } from '../lib/fixtures' import { toBN } from '../lib/testHelpers' @@ -75,7 +77,7 @@ describe('L2GNS', () => { curatedTokens: toGRT('1337'), subgraphMetadata: randomHexBytes(), versionMetadata: randomHexBytes(), - nSignal: toBN('4567'), + nSignal: toGRT('45670'), } } const migrateMockSubgraphFromL1 = async function ( @@ -174,8 +176,9 @@ describe('L2GNS', () => { const subgraphData = await gns.subgraphs(l1SubgraphId) expect(migrationData.tokens).eq(curatedTokens) - expect(migrationData.l1Done).eq(true) + expect(migrationData.l1Done).eq(false) expect(migrationData.l2Done).eq(false) + expect(migrationData.subgraphReceivedOnL2BlockNumber).eq(await latestBlock()) expect(subgraphData.vSignal).eq(0) expect(subgraphData.nSignal).eq(nSignal) @@ -212,8 +215,9 @@ describe('L2GNS', () => { const subgraphData = await gns.subgraphs(l1SubgraphId) expect(migrationData.tokens).eq(curatedTokens) - expect(migrationData.l1Done).eq(true) + expect(migrationData.l1Done).eq(false) expect(migrationData.l2Done).eq(false) + expect(migrationData.subgraphReceivedOnL2BlockNumber).eq(await latestBlock()) expect(subgraphData.vSignal).eq(0) expect(subgraphData.nSignal).eq(nSignal) @@ -393,6 +397,99 @@ describe('L2GNS', () => { await expect(tx).revertedWith('GNS: deploymentID != 0') }) }) + describe('deprecating a subgraph with an unfinished migration from L1', function () { + it('deprecates the subgraph and sets the withdrawableGRT', async function () { + const { l1SubgraphId, curatedTokens, nSignal } = await defaultL1SubgraphParams() + const callhookData = defaultAbiCoder.encode( + ['uint256', 'address', 'uint256'], + [l1SubgraphId, me.address, nSignal], + ) + await gatewayFinalizeTransfer(mockL1GNS.address, gns.address, curatedTokens, callhookData) + + await advanceBlocks(50400) + + const tx = gns + .connect(other.signer) // Can be called by anyone + .deprecateSubgraphMigratedFromL1(l1SubgraphId) + await expect(tx).emit(gns, 'SubgraphDeprecated').withArgs(l1SubgraphId, curatedTokens) + + const subgraphAfter = await gns.subgraphs(l1SubgraphId) + const migrationDataAfter = await gns.subgraphL2MigrationData(l1SubgraphId) + expect(subgraphAfter.vSignal).eq(0) + expect(migrationDataAfter.l2Done).eq(true) + expect(subgraphAfter.disabled).eq(true) + expect(subgraphAfter.subgraphDeploymentID).eq(HashZero) + expect(subgraphAfter.withdrawableGRT).eq(curatedTokens) + + // Check that the curator can withdraw the GRT + const mockL1GNSL2Alias = await getL2SignerFromL1(mockL1GNS.address) + await setAccountBalance(await mockL1GNSL2Alias.getAddress(), parseEther('1')) + // Note the signal is assigned to other.address as beneficiary + await gns + .connect(mockL1GNSL2Alias) + .claimL1CuratorBalanceToBeneficiary(l1SubgraphId, me.address, toGRT('10'), other.address) + const curatorBalanceBefore = await grt.balanceOf(other.address) + const expectedTokensOut = curatedTokens.mul(toGRT('10')).div(nSignal) + const withdrawTx = await gns.connect(other.signer).withdraw(l1SubgraphId) + await expect(withdrawTx) + .emit(gns, 'GRTWithdrawn') + .withArgs(l1SubgraphId, other.address, toGRT('10'), expectedTokensOut) + const curatorBalanceAfter = await grt.balanceOf(other.address) + expect(curatorBalanceAfter.sub(curatorBalanceBefore)).eq(expectedTokensOut) + }) + it('rejects calls if not enough time has passed', async function () { + const { l1SubgraphId, curatedTokens, nSignal } = await defaultL1SubgraphParams() + const callhookData = defaultAbiCoder.encode( + ['uint256', 'address', 'uint256'], + [l1SubgraphId, me.address, nSignal], + ) + await gatewayFinalizeTransfer(mockL1GNS.address, gns.address, curatedTokens, callhookData) + + await advanceBlocks(50399) + + const tx = gns + .connect(other.signer) // Can be called by anyone + .deprecateSubgraphMigratedFromL1(l1SubgraphId) + await expect(tx).revertedWith('TOO_EARLY') + }) + it('rejects calls if the subgraph migration was finished', async function () { + const { l1SubgraphId, curatedTokens, subgraphMetadata, versionMetadata, nSignal } = + await defaultL1SubgraphParams() + const callhookData = defaultAbiCoder.encode( + ['uint256', 'address', 'uint256'], + [l1SubgraphId, me.address, nSignal], + ) + await gatewayFinalizeTransfer(mockL1GNS.address, gns.address, curatedTokens, callhookData) + + await advanceBlocks(50400) + + await gns + .connect(me.signer) + .finishSubgraphMigrationFromL1( + l1SubgraphId, + newSubgraph0.subgraphDeploymentID, + subgraphMetadata, + versionMetadata, + ) + + const tx = gns + .connect(other.signer) // Can be called by anyone + .deprecateSubgraphMigratedFromL1(l1SubgraphId) + await expect(tx).revertedWith('ALREADY_FINISHED') + }) + it('rejects calls for a subgraph that does not exist', async function () { + const l1SubgraphId = await buildSubgraphID(me.address, toBN('1'), 1) + + const tx = gns.connect(me.signer).deprecateSubgraphMigratedFromL1(l1SubgraphId) + await expect(tx).revertedWith('INVALID_SUBGRAPH') + }) + it('rejects calls for a subgraph that was not migrated', async function () { + const l2Subgraph = await publishNewSubgraph(me, newSubgraph0, gns) + + const tx = gns.connect(me.signer).deprecateSubgraphMigratedFromL1(l2Subgraph.id) + await expect(tx).revertedWith('INVALID_SUBGRAPH') + }) + }) describe('claiming a curator balance with a message from L1', function () { it('assigns a curator balance to a beneficiary', async function () { const mockL1GNSL2Alias = await getL2SignerFromL1(mockL1GNS.address) From 0a5333282f6a3da4d021925799b4015e3222326b Mon Sep 17 00:00:00 2001 From: Pablo Carranza Velez Date: Fri, 16 Dec 2022 21:19:51 -0300 Subject: [PATCH 062/112] fix: update comments in deprecateSubgraphMigratedFromL1 --- contracts/l2/discovery/IL2GNS.sol | 6 +++--- contracts/l2/discovery/L2GNS.sol | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/contracts/l2/discovery/IL2GNS.sol b/contracts/l2/discovery/IL2GNS.sol index 40fe4e1ef..9d1f20fc0 100644 --- a/contracts/l2/discovery/IL2GNS.sol +++ b/contracts/l2/discovery/IL2GNS.sol @@ -26,9 +26,9 @@ interface IL2GNS is ICallhookReceiver { /** * @notice Deprecate a subgraph that was migrated from L1, but for which - * the migration was never finished. Anyone can call this function after 50400 blocks - * (one day) have passed since the subgraph was migrated, if the subgraph owner didn't - * call finishSubgraphMigrationFromL1. + * the migration was never finished. Anyone can call this function after a certain amount of + * blocks have passed since the subgraph was migrated, if the subgraph owner didn't + * call finishSubgraphMigrationFromL1. In L2GNS this timeout is the FINISH_MIGRATION_TIMEOUT constant. * @param _subgraphID Subgraph ID */ function deprecateSubgraphMigratedFromL1(uint256 _subgraphID) external; diff --git a/contracts/l2/discovery/L2GNS.sol b/contracts/l2/discovery/L2GNS.sol index fbf00a5a6..6ce3a5ac3 100644 --- a/contracts/l2/discovery/L2GNS.sol +++ b/contracts/l2/discovery/L2GNS.sol @@ -134,9 +134,9 @@ contract L2GNS is GNS, L2GNSV1Storage, IL2GNS { /** * @notice Deprecate a subgraph that was migrated from L1, but for which - * the migration was never finished. Anyone can call this function after 50400 blocks - * (one day) have passed since the subgraph was migrated, if the subgraph owner didn't - * call finishSubgraphMigrationFromL1. + * the migration was never finished. Anyone can call this function after a certain amount of + * blocks have passed since the subgraph was migrated, if the subgraph owner didn't + * call finishSubgraphMigrationFromL1. In L2GNS this timeout is the FINISH_MIGRATION_TIMEOUT constant. * @param _subgraphID Subgraph ID */ function deprecateSubgraphMigratedFromL1(uint256 _subgraphID) From 5d924c4fd5e5dca4d1c65e1a3da238af7d2d6a22 Mon Sep 17 00:00:00 2001 From: Pablo Carranza Velez Date: Thu, 15 Dec 2022 18:26:00 -0300 Subject: [PATCH 063/112] test: check that curators can't double spend on L1 after migration (OZ M-03) --- test/gns.test.ts | 48 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 48 insertions(+) diff --git a/test/gns.test.ts b/test/gns.test.ts index e06832a06..feae696d3 100644 --- a/test/gns.test.ts +++ b/test/gns.test.ts @@ -1286,6 +1286,54 @@ describe('L1GNS', () => { await expect(tx).revertedWith('GNS: Must be active') }) + it('does not allow curators to burn signal after sending', async function () { + const subgraph0 = await publishAndCurateOnSubgraph() + + const maxSubmissionCost = toBN('100') + const maxGas = toBN('10') + const gasPriceBid = toBN('20') + const tx = gns + .connect(me.signer) + .sendSubgraphToL2(subgraph0.id, me.address, maxGas, gasPriceBid, maxSubmissionCost, { + value: maxSubmissionCost.add(maxGas.mul(gasPriceBid)), + }) + await expect(tx).emit(gns, 'SubgraphSentToL2').withArgs(subgraph0.id, me.address) + + const tx2 = gns.connect(me.signer).burnSignal(subgraph0.id, toBN(1), toGRT('0')) + await expect(tx2).revertedWith('GNS: Must be active') + }) + it('does not allow curators to transfer signal after sending', async function () { + const subgraph0 = await publishAndCurateOnSubgraph() + + const maxSubmissionCost = toBN('100') + const maxGas = toBN('10') + const gasPriceBid = toBN('20') + const tx = gns + .connect(me.signer) + .sendSubgraphToL2(subgraph0.id, me.address, maxGas, gasPriceBid, maxSubmissionCost, { + value: maxSubmissionCost.add(maxGas.mul(gasPriceBid)), + }) + await expect(tx).emit(gns, 'SubgraphSentToL2').withArgs(subgraph0.id, me.address) + + const tx2 = gns.connect(me.signer).transferSignal(subgraph0.id, other.address, toBN(1)) + await expect(tx2).revertedWith('GNS: Must be active') + }) + it('does not allow curators to withdraw GRT after sending', async function () { + const subgraph0 = await publishAndCurateOnSubgraph() + + const maxSubmissionCost = toBN('100') + const maxGas = toBN('10') + const gasPriceBid = toBN('20') + const tx = gns + .connect(me.signer) + .sendSubgraphToL2(subgraph0.id, me.address, maxGas, gasPriceBid, maxSubmissionCost, { + value: maxSubmissionCost.add(maxGas.mul(gasPriceBid)), + }) + await expect(tx).emit(gns, 'SubgraphSentToL2').withArgs(subgraph0.id, me.address) + + const tx2 = gns.connect(me.signer).withdraw(subgraph0.id) + await expect(tx2).revertedWith('GNS: No more GRT to withdraw') + }) }) describe('claimCuratorBalanceToBeneficiaryOnL2', function () { beforeEach(async function () { From f8c2f065d972b304b40709bef1daf6db13a866bf Mon Sep 17 00:00:00 2001 From: Pablo Carranza Velez Date: Thu, 15 Dec 2022 19:06:33 -0300 Subject: [PATCH 064/112] test: add tests for L2GNS.publishNewVersion (OZ M-03) --- test/gns.test.ts | 152 ++++++------------------------------------ test/l2/l2GNS.test.ts | 111 ++++++++++++++++++++++++++++++ test/lib/gnsUtils.ts | 148 ++++++++++++++++++++++++++++++++++++++-- 3 files changed, 275 insertions(+), 136 deletions(-) diff --git a/test/gns.test.ts b/test/gns.test.ts index feae696d3..f0093a7a1 100644 --- a/test/gns.test.ts +++ b/test/gns.test.ts @@ -38,6 +38,9 @@ import { getTokensAndVSignal, publishNewSubgraph, publishNewVersion, + mintSignal, + deprecateSubgraph, + burnSignal, } from './lib/gnsUtils' const { AddressZero, HashZero } = ethers.constants @@ -136,117 +139,6 @@ describe('L1GNS', () => { ) } - const deprecateSubgraph = async (account: Account, subgraphID: string) => { - // Before state - const beforeSubgraph = await gns.subgraphs(subgraphID) - const [beforeTokens] = await getTokensAndVSignal(beforeSubgraph.subgraphDeploymentID, curation) - - // We can use the whole amount, since in this test suite all vSignal is used to be staked on nSignal - const ownerBalanceBefore = await grt.balanceOf(account.address) - - // Send tx - const tx = gns.connect(account.signer).deprecateSubgraph(subgraphID) - await expect(tx).emit(gns, 'SubgraphDeprecated').withArgs(subgraphID, beforeTokens) - - // After state - const afterSubgraph = await gns.subgraphs(subgraphID) - // Check marked as deprecated - expect(afterSubgraph.disabled).eq(true) - // Signal for the deployment must be all burned - expect(afterSubgraph.vSignal.eq(toBN('0'))) - // Cleanup reserve ratio - expect(afterSubgraph.reserveRatio).eq(0) - // Should be equal since owner pays curation tax - expect(afterSubgraph.withdrawableGRT).eq(beforeTokens) - - // Check balance of GNS increased by curation tax from owner being added - const afterGNSBalance = await grt.balanceOf(gns.address) - expect(afterGNSBalance).eq(afterSubgraph.withdrawableGRT) - // Check that the owner balance decreased by the curation tax - const ownerBalanceAfter = await grt.balanceOf(account.address) - expect(ownerBalanceBefore.eq(ownerBalanceAfter)) - - // Check NFT was burned - await expect(gns.ownerOf(subgraphID)).revertedWith('ERC721: owner query for nonexistent token') - - return tx - } - - const mintSignal = async ( - account: Account, - subgraphID: string, - tokensIn: BigNumber, - ): Promise => { - // Before state - const beforeSubgraph = await gns.subgraphs(subgraphID) - const [beforeTokens, beforeVSignal] = await getTokensAndVSignal( - beforeSubgraph.subgraphDeploymentID, - curation, - ) - - // Deposit - const { - 0: vSignalExpected, - 1: nSignalExpected, - 2: curationTax, - } = await gns.tokensToNSignal(subgraphID, tokensIn) - const tx = gns.connect(account.signer).mintSignal(subgraphID, tokensIn, 0) - await expect(tx) - .emit(gns, 'SignalMinted') - .withArgs(subgraphID, account.address, nSignalExpected, vSignalExpected, tokensIn) - - // After state - const afterSubgraph = await gns.subgraphs(subgraphID) - const [afterTokens, afterVSignal] = await getTokensAndVSignal( - afterSubgraph.subgraphDeploymentID, - curation, - ) - - // Check state - expect(afterTokens).eq(beforeTokens.add(tokensIn.sub(curationTax))) - expect(afterVSignal).eq(beforeVSignal.add(vSignalExpected)) - expect(afterSubgraph.nSignal).eq(beforeSubgraph.nSignal.add(nSignalExpected)) - expect(afterSubgraph.vSignal).eq(beforeVSignal.add(vSignalExpected)) - - return tx - } - - const burnSignal = async (account: Account, subgraphID: string): Promise => { - // Before state - const beforeSubgraph = await gns.subgraphs(subgraphID) - const [beforeTokens, beforeVSignal] = await getTokensAndVSignal( - beforeSubgraph.subgraphDeploymentID, - curation, - ) - const beforeUsersNSignal = await gns.getCuratorSignal(subgraphID, account.address) - - // Withdraw - const { 0: vSignalExpected, 1: tokensExpected } = await gns.nSignalToTokens( - subgraphID, - beforeUsersNSignal, - ) - - // Send tx - const tx = gns.connect(account.signer).burnSignal(subgraphID, beforeUsersNSignal, 0) - await expect(tx) - .emit(gns, 'SignalBurned') - .withArgs(subgraphID, account.address, beforeUsersNSignal, vSignalExpected, tokensExpected) - - // After state - const afterSubgraph = await gns.subgraphs(subgraphID) - const [afterTokens, afterVSignalCuration] = await getTokensAndVSignal( - afterSubgraph.subgraphDeploymentID, - curation, - ) - - // Check state - expect(afterTokens).eq(beforeTokens.sub(tokensExpected)) - expect(afterVSignalCuration).eq(beforeVSignal.sub(vSignalExpected)) - expect(afterSubgraph.nSignal).eq(beforeSubgraph.nSignal.sub(beforeUsersNSignal)) - - return tx - } - const transferSignal = async ( subgraphID: string, owner: Account, @@ -524,7 +416,7 @@ describe('L1GNS', () => { beforeEach(async () => { subgraph = await publishNewSubgraph(me, newSubgraph0, gns) - await mintSignal(me, subgraph.id, tokens10000) + await mintSignal(me, subgraph.id, tokens10000, gns, curation) }) it('should publish a new version on an existing subgraph', async function () { @@ -589,7 +481,7 @@ describe('L1GNS', () => { }) it('should upgrade version when there is no signal with no signal migration', async function () { - await burnSignal(me, subgraph.id) + await burnSignal(me, subgraph.id, gns, curation) const tx = gns .connect(me.signer) .publishNewVersion( @@ -603,7 +495,7 @@ describe('L1GNS', () => { }) it('should fail when subgraph is deprecated', async function () { - await deprecateSubgraph(me, subgraph.id) + await deprecateSubgraph(me, subgraph.id, gns, curation, grt) const tx = gns .connect(me.signer) .publishNewVersion( @@ -621,15 +513,15 @@ describe('L1GNS', () => { beforeEach(async () => { subgraph = await publishNewSubgraph(me, newSubgraph0, gns) - await mintSignal(me, subgraph.id, tokens10000) + await mintSignal(me, subgraph.id, tokens10000, gns, curation) }) it('should deprecate a subgraph', async function () { - await deprecateSubgraph(me, subgraph.id) + await deprecateSubgraph(me, subgraph.id, gns, curation, grt) }) it('should prevent a deprecated subgraph from being republished', async function () { - await deprecateSubgraph(me, subgraph.id) + await deprecateSubgraph(me, subgraph.id, gns, curation, grt) const tx = gns .connect(me.signer) .publishNewVersion( @@ -658,12 +550,12 @@ describe('L1GNS', () => { describe('mintSignal()', async function () { it('should deposit into the name signal curve', async function () { const subgraph = await publishNewSubgraph(me, newSubgraph0, gns) - await mintSignal(other, subgraph.id, tokens10000) + await mintSignal(other, subgraph.id, tokens10000, gns, curation) }) it('should fail when name signal is disabled', async function () { const subgraph = await publishNewSubgraph(me, newSubgraph0, gns) - await deprecateSubgraph(me, subgraph.id) + await deprecateSubgraph(me, subgraph.id, gns, curation, grt) const tx = gns.connect(me.signer).mintSignal(subgraph.id, tokens1000, 0) await expect(tx).revertedWith('GNS: Must be active') }) @@ -692,15 +584,15 @@ describe('L1GNS', () => { beforeEach(async () => { subgraph = await publishNewSubgraph(me, newSubgraph0, gns) - await mintSignal(other, subgraph.id, tokens10000) + await mintSignal(other, subgraph.id, tokens10000, gns, curation) }) it('should withdraw from the name signal curve', async function () { - await burnSignal(other, subgraph.id) + await burnSignal(other, subgraph.id, gns, curation) }) it('should fail when name signal is disabled', async function () { - await deprecateSubgraph(me, subgraph.id) + await deprecateSubgraph(me, subgraph.id, gns, curation, grt) // just test 1 since it will fail const tx = gns.connect(me.signer).burnSignal(subgraph.id, 1, 0) await expect(tx).revertedWith('GNS: Must be active') @@ -737,7 +629,7 @@ describe('L1GNS', () => { beforeEach(async () => { subgraph = await publishNewSubgraph(me, newSubgraph0, gns) - await mintSignal(other, subgraph.id, tokens10000) + await mintSignal(other, subgraph.id, tokens10000, gns, curation) otherNSignal = await gns.getCuratorSignal(subgraph.id, other.address) }) @@ -751,7 +643,7 @@ describe('L1GNS', () => { await expect(tx).revertedWith('GNS: Curator cannot transfer to the zero address') }) it('should fail when name signal is disabled', async function () { - await deprecateSubgraph(me, subgraph.id) + await deprecateSubgraph(me, subgraph.id, gns, curation, grt) const tx = gns .connect(other.signer) .transferSignal(subgraph.id, another.address, otherNSignal) @@ -776,11 +668,11 @@ describe('L1GNS', () => { beforeEach(async () => { subgraph = await publishNewSubgraph(me, newSubgraph0, gns) - await mintSignal(other, subgraph.id, tokens10000) + await mintSignal(other, subgraph.id, tokens10000, gns, curation) }) it('should withdraw GRT from a disabled name signal', async function () { - await deprecateSubgraph(me, subgraph.id) + await deprecateSubgraph(me, subgraph.id, gns, curation, grt) await withdraw(other, subgraph.id) }) @@ -790,14 +682,14 @@ describe('L1GNS', () => { }) it('should fail when there is no more GRT to withdraw', async function () { - await deprecateSubgraph(me, subgraph.id) + await deprecateSubgraph(me, subgraph.id, gns, curation, grt) await withdraw(other, subgraph.id) const tx = gns.connect(other.signer).withdraw(subgraph.id) await expect(tx).revertedWith('GNS: No more GRT to withdraw') }) it('should fail if the curator has no nSignal', async function () { - await deprecateSubgraph(me, subgraph.id) + await deprecateSubgraph(me, subgraph.id, gns, curation, grt) const tx = gns.connect(me.signer).withdraw(subgraph.id) await expect(tx).revertedWith('GNS: No signal to withdraw GRT') }) @@ -832,7 +724,7 @@ describe('L1GNS', () => { tokensToDeposit.sub(curationTax), beforeSubgraph.subgraphDeploymentID, ) - const tx = await mintSignal(me, subgraph.id, tokensToDeposit) + const tx = await mintSignal(me, subgraph.id, tokensToDeposit, gns, curation) const receipt = await tx.wait() const event: Event = receipt.events.pop() const nSignalCreated = event.args['nSignalCreated'] @@ -861,7 +753,7 @@ describe('L1GNS', () => { // State updated for (const tokensToDeposit of tokensToDepositMany) { - await mintSignal(me, subgraph.id, tokensToDeposit) + await mintSignal(me, subgraph.id, tokensToDeposit, gns, curation) } }) }) diff --git a/test/l2/l2GNS.test.ts b/test/l2/l2GNS.test.ts index 8af64d054..81ad95d06 100644 --- a/test/l2/l2GNS.test.ts +++ b/test/l2/l2GNS.test.ts @@ -20,9 +20,15 @@ import { L2GraphTokenGateway } from '../../build/types/L2GraphTokenGateway' import { buildSubgraph, buildSubgraphID, + burnSignal, DEFAULT_RESERVE_RATIO, + deprecateSubgraph, + getTokensAndVSignal, + mintSignal, publishNewSubgraph, + publishNewVersion, PublishSubgraph, + Subgraph, } from '../lib/gnsUtils' import { L2Curation } from '../../build/types/L2Curation' import { GraphToken } from '../../build/types/GraphToken' @@ -54,6 +60,12 @@ describe('L2GNS', () => { let grt: GraphToken let newSubgraph0: PublishSubgraph + let newSubgraph1: PublishSubgraph + + const tokens1000 = toGRT('1000') + const tokens10000 = toGRT('10000') + const tokens100000 = toGRT('100000') + const curationTaxPercentage = 50000 const gatewayFinalizeTransfer = async function ( from: string, @@ -130,6 +142,105 @@ describe('L2GNS', () => { await fixture.tearDown() }) + // Adapted from the L1 GNS tests but allowing curating to a pre-curated subgraph deployment + describe('publishNewVersion', async function () { + let subgraph: Subgraph + + beforeEach(async () => { + newSubgraph0 = buildSubgraph() + newSubgraph1 = buildSubgraph() + // Give some funds to the signers and approve gns contract to use funds on signers behalf + await grt.connect(governor.signer).mint(me.address, tokens100000) + await grt.connect(governor.signer).mint(other.address, tokens100000) + await grt.connect(me.signer).approve(gns.address, tokens100000) + await grt.connect(me.signer).approve(curation.address, tokens100000) + await grt.connect(other.signer).approve(gns.address, tokens100000) + await grt.connect(other.signer).approve(curation.address, tokens100000) + // Update curation tax to test the functionality of it in disableNameSignal() + await curation.connect(governor.signer).setCurationTaxPercentage(curationTaxPercentage) + subgraph = await publishNewSubgraph(me, newSubgraph0, gns) + await mintSignal(me, subgraph.id, tokens10000, gns, curation) + }) + + it('should publish a new version on an existing subgraph', async function () { + await publishNewVersion(me, subgraph.id, newSubgraph1, gns, curation) + }) + + it('should publish a new version on an existing subgraph with no current signal', async function () { + const emptySignalSubgraph = await publishNewSubgraph(me, buildSubgraph(), gns) + await publishNewVersion(me, emptySignalSubgraph.id, newSubgraph1, gns, curation) + }) + + it('should reject a new version with the same subgraph deployment ID', async function () { + const tx = gns + .connect(me.signer) + .publishNewVersion( + subgraph.id, + newSubgraph0.subgraphDeploymentID, + newSubgraph0.versionMetadata, + ) + await expect(tx).revertedWith( + 'GNS: Cannot publish a new version with the same subgraph deployment ID', + ) + }) + + it('should reject publishing a version to a subgraph that does not exist', async function () { + const tx = gns + .connect(me.signer) + .publishNewVersion( + randomHexBytes(32), + newSubgraph1.subgraphDeploymentID, + newSubgraph1.versionMetadata, + ) + await expect(tx).revertedWith('ERC721: owner query for nonexistent token') + }) + + it('reject if not the owner', async function () { + const tx = gns + .connect(other.signer) + .publishNewVersion( + subgraph.id, + newSubgraph1.subgraphDeploymentID, + newSubgraph1.versionMetadata, + ) + await expect(tx).revertedWith('GNS: Must be authorized') + }) + + it('should NOT fail when upgrade tries to point to a pre-curated', async function () { + // Curate directly to the deployment + await curation.connect(me.signer).mint(newSubgraph1.subgraphDeploymentID, tokens1000, 0) + + await publishNewVersion(me, subgraph.id, newSubgraph1, gns, curation) + }) + + it('should upgrade version when there is no signal with no signal migration', async function () { + await burnSignal(me, subgraph.id, gns, curation) + const tx = gns + .connect(me.signer) + .publishNewVersion( + subgraph.id, + newSubgraph1.subgraphDeploymentID, + newSubgraph1.versionMetadata, + ) + await expect(tx) + .emit(gns, 'SubgraphVersionUpdated') + .withArgs(subgraph.id, newSubgraph1.subgraphDeploymentID, newSubgraph1.versionMetadata) + }) + + it('should fail when subgraph is deprecated', async function () { + await deprecateSubgraph(me, subgraph.id, gns, curation, grt) + const tx = gns + .connect(me.signer) + .publishNewVersion( + subgraph.id, + newSubgraph1.subgraphDeploymentID, + newSubgraph1.versionMetadata, + ) + // NOTE: deprecate burns the Subgraph NFT, when someone wants to publish a new version it won't find it + await expect(tx).revertedWith('ERC721: owner query for nonexistent token') + }) + }) + describe('receiving a subgraph from L1 (onTokenTransfer)', function () { it('cannot be called by someone other than the L2GraphTokenGateway', async function () { const { l1SubgraphId, curatedTokens, nSignal } = await defaultL1SubgraphParams() diff --git a/test/lib/gnsUtils.ts b/test/lib/gnsUtils.ts index 32395957a..6d7bd2105 100644 --- a/test/lib/gnsUtils.ts +++ b/test/lib/gnsUtils.ts @@ -1,10 +1,13 @@ -import { BigNumber } from 'ethers' +import { BigNumber, ContractTransaction } from 'ethers' import { namehash, solidityKeccak256 } from 'ethers/lib/utils' import { Curation } from '../../build/types/Curation' import { L1GNS } from '../../build/types/L1GNS' import { L2GNS } from '../../build/types/L2GNS' import { Account, getChainID, randomHexBytes, toBN } from './testHelpers' import { expect } from 'chai' +import { L2Curation } from '../../build/types/L2Curation' +import { GraphToken } from '../../build/types/GraphToken' +import { L2GraphToken } from '../../build/types/L2GraphToken' // Entities export interface PublishSubgraph { @@ -59,7 +62,7 @@ export const createDefaultName = (name: string): AccountDefaultName => { export const getTokensAndVSignal = async ( subgraphDeploymentID: string, - curation: Curation, + curation: Curation | L2Curation, ): Promise> => { const curationPool = await curation.pools(subgraphDeploymentID) const vSignal = await curation.getCurationPoolSignal(subgraphDeploymentID) @@ -115,7 +118,7 @@ export const publishNewVersion = async ( subgraphID: string, newSubgraph: PublishSubgraph, gns: L1GNS | L2GNS, - curation: Curation, + curation: Curation | L2Curation, ) => { // Before state const ownerTaxPercentage = await gns.ownerTaxPercentage() @@ -149,6 +152,11 @@ export const publishNewVersion = async ( ? await curation.tokensToSignal(newSubgraph.subgraphDeploymentID, totalAdjustedUp) : [toBN(0), toBN(0)] + // Check the vSignal of the new curation curve, and tokens, before upgrading + const [beforeTokensNewCurve, beforeVSignalNewCurve] = await getTokensAndVSignal( + newSubgraph.subgraphDeploymentID, + curation, + ) // Send tx const tx = gns .connect(account.signer) @@ -178,12 +186,16 @@ export const publishNewVersion = async ( newSubgraph.subgraphDeploymentID, curation, ) - expect(afterTokensNewCurve).eq(totalAdjustedUp.sub(newCurationTaxEstimate)) - expect(afterVSignalNewCurve).eq(newVSignalEstimate) + expect(afterTokensNewCurve).eq( + beforeTokensNewCurve.add(totalAdjustedUp).sub(newCurationTaxEstimate), + ) + expect(afterVSignalNewCurve).eq(beforeVSignalNewCurve.add(newVSignalEstimate)) // Check the nSignal pool const afterSubgraph = await gns.subgraphs(subgraphID) - expect(afterSubgraph.vSignal).eq(afterVSignalNewCurve).eq(newVSignalEstimate) + expect(afterSubgraph.vSignal) + .eq(afterVSignalNewCurve.sub(beforeVSignalNewCurve)) + .eq(newVSignalEstimate) expect(afterSubgraph.nSignal).eq(beforeSubgraph.nSignal) // should not change expect(afterSubgraph.subgraphDeploymentID).eq(newSubgraph.subgraphDeploymentID) @@ -193,3 +205,127 @@ export const publishNewVersion = async ( return tx } + +export const mintSignal = async ( + account: Account, + subgraphID: string, + tokensIn: BigNumber, + gns: L1GNS | L2GNS, + curation: Curation | L2Curation, +): Promise => { + // Before state + const beforeSubgraph = await gns.subgraphs(subgraphID) + const [beforeTokens, beforeVSignal] = await getTokensAndVSignal( + beforeSubgraph.subgraphDeploymentID, + curation, + ) + + // Deposit + const { + 0: vSignalExpected, + 1: nSignalExpected, + 2: curationTax, + } = await gns.tokensToNSignal(subgraphID, tokensIn) + const tx = gns.connect(account.signer).mintSignal(subgraphID, tokensIn, 0) + await expect(tx) + .emit(gns, 'SignalMinted') + .withArgs(subgraphID, account.address, nSignalExpected, vSignalExpected, tokensIn) + + // After state + const afterSubgraph = await gns.subgraphs(subgraphID) + const [afterTokens, afterVSignal] = await getTokensAndVSignal( + afterSubgraph.subgraphDeploymentID, + curation, + ) + + // Check state + expect(afterTokens).eq(beforeTokens.add(tokensIn.sub(curationTax))) + expect(afterVSignal).eq(beforeVSignal.add(vSignalExpected)) + expect(afterSubgraph.nSignal).eq(beforeSubgraph.nSignal.add(nSignalExpected)) + expect(afterSubgraph.vSignal).eq(beforeVSignal.add(vSignalExpected)) + + return tx +} + +export const burnSignal = async ( + account: Account, + subgraphID: string, + gns: L1GNS | L2GNS, + curation: Curation | L2Curation, +): Promise => { + // Before state + const beforeSubgraph = await gns.subgraphs(subgraphID) + const [beforeTokens, beforeVSignal] = await getTokensAndVSignal( + beforeSubgraph.subgraphDeploymentID, + curation, + ) + const beforeUsersNSignal = await gns.getCuratorSignal(subgraphID, account.address) + + // Withdraw + const { 0: vSignalExpected, 1: tokensExpected } = await gns.nSignalToTokens( + subgraphID, + beforeUsersNSignal, + ) + + // Send tx + const tx = gns.connect(account.signer).burnSignal(subgraphID, beforeUsersNSignal, 0) + await expect(tx) + .emit(gns, 'SignalBurned') + .withArgs(subgraphID, account.address, beforeUsersNSignal, vSignalExpected, tokensExpected) + + // After state + const afterSubgraph = await gns.subgraphs(subgraphID) + const [afterTokens, afterVSignalCuration] = await getTokensAndVSignal( + afterSubgraph.subgraphDeploymentID, + curation, + ) + + // Check state + expect(afterTokens).eq(beforeTokens.sub(tokensExpected)) + expect(afterVSignalCuration).eq(beforeVSignal.sub(vSignalExpected)) + expect(afterSubgraph.nSignal).eq(beforeSubgraph.nSignal.sub(beforeUsersNSignal)) + + return tx +} + +export const deprecateSubgraph = async ( + account: Account, + subgraphID: string, + gns: L1GNS | L2GNS, + curation: Curation | L2Curation, + grt: GraphToken | L2GraphToken, +) => { + // Before state + const beforeSubgraph = await gns.subgraphs(subgraphID) + const [beforeTokens] = await getTokensAndVSignal(beforeSubgraph.subgraphDeploymentID, curation) + + // We can use the whole amount, since in this test suite all vSignal is used to be staked on nSignal + const ownerBalanceBefore = await grt.balanceOf(account.address) + + // Send tx + const tx = gns.connect(account.signer).deprecateSubgraph(subgraphID) + await expect(tx).emit(gns, 'SubgraphDeprecated').withArgs(subgraphID, beforeTokens) + + // After state + const afterSubgraph = await gns.subgraphs(subgraphID) + // Check marked as deprecated + expect(afterSubgraph.disabled).eq(true) + // Signal for the deployment must be all burned + expect(afterSubgraph.vSignal.eq(toBN('0'))) + // Cleanup reserve ratio + expect(afterSubgraph.reserveRatio).eq(0) + // Should be equal since owner pays curation tax + expect(afterSubgraph.withdrawableGRT).eq(beforeTokens) + + // Check balance of GNS increased by curation tax from owner being added + const afterGNSBalance = await grt.balanceOf(gns.address) + expect(afterGNSBalance).eq(afterSubgraph.withdrawableGRT) + // Check that the owner balance decreased by the curation tax + const ownerBalanceAfter = await grt.balanceOf(account.address) + expect(ownerBalanceBefore.eq(ownerBalanceAfter)) + + // Check NFT was burned + await expect(gns.ownerOf(subgraphID)).revertedWith('ERC721: owner query for nonexistent token') + + return tx +} From 2393e20116fd6b7383f8341172ff31009c9078eb Mon Sep 17 00:00:00 2001 From: Pablo Carranza Velez Date: Fri, 16 Dec 2022 16:37:11 -0300 Subject: [PATCH 065/112] test: add basic tests for subgraphSignal and subgraphTokens (OZ M-03) --- test/gns.test.ts | 35 ++++++++++++++++++++++++++++++++++- 1 file changed, 34 insertions(+), 1 deletion(-) diff --git a/test/gns.test.ts b/test/gns.test.ts index f0093a7a1..dad4877d1 100644 --- a/test/gns.test.ts +++ b/test/gns.test.ts @@ -507,7 +507,40 @@ describe('L1GNS', () => { await expect(tx).revertedWith('ERC721: owner query for nonexistent token') }) }) - + describe('subgraphTokens', function () { + it('should return the correct number of tokens for a subgraph', async function () { + const subgraph = await publishNewSubgraph(me, newSubgraph0, gns) + const taxForMe = ( + await curation.tokensToSignal(subgraph.subgraphDeploymentID, tokens10000) + )[1] + await mintSignal(me, subgraph.id, tokens10000, gns, curation) + const taxForOther = ( + await curation.tokensToSignal(subgraph.subgraphDeploymentID, tokens1000) + )[1] + await mintSignal(other, subgraph.id, tokens1000, gns, curation) + expect(await gns.subgraphTokens(subgraph.id)).eq( + tokens10000.add(tokens1000).sub(taxForMe).sub(taxForOther), + ) + }) + }) + describe('subgraphSignal', function () { + it('should return the correct amount of signal for a subgraph', async function () { + const subgraph = await publishNewSubgraph(me, newSubgraph0, gns) + const vSignalForMe = ( + await curation.tokensToSignal(subgraph.subgraphDeploymentID, tokens10000) + )[0] + await mintSignal(me, subgraph.id, tokens10000, gns, curation) + const vSignalForOther = ( + await curation.tokensToSignal(subgraph.subgraphDeploymentID, tokens1000) + )[0] + await mintSignal(other, subgraph.id, tokens1000, gns, curation) + const expectedSignal = await gns.vSignalToNSignal( + subgraph.id, + vSignalForMe.add(vSignalForOther), + ) + expect(await gns.subgraphSignal(subgraph.id)).eq(expectedSignal) + }) + }) describe('deprecateSubgraph', async function () { let subgraph: Subgraph From 119fd79d27681c94f60548303a38ce4860d07961 Mon Sep 17 00:00:00 2001 From: Pablo Carranza Velez Date: Fri, 16 Dec 2022 16:53:07 -0300 Subject: [PATCH 066/112] test: access control tests for subgraphNFT mint/burn (OZ M-03) --- test/gns.test.ts | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/test/gns.test.ts b/test/gns.test.ts index dad4877d1..d13d7c5ba 100644 --- a/test/gns.test.ts +++ b/test/gns.test.ts @@ -879,6 +879,18 @@ describe('L1GNS', () => { }) describe('NFT descriptor', function () { + it('cannot be minted by an account that is not the minter (i.e. GNS)', async function () { + const subgraphNFTAddress = await gns.subgraphNFT() + const subgraphNFT = getContractAt('SubgraphNFT', subgraphNFTAddress) as SubgraphNFT + const tx = subgraphNFT.connect(me.signer).mint(me.address, 1) + await expect(tx).revertedWith('Must be a minter') + }) + it('cannot be burned by an account that is not the minter (i.e. GNS)', async function () { + const subgraphNFTAddress = await gns.subgraphNFT() + const subgraphNFT = getContractAt('SubgraphNFT', subgraphNFTAddress) as SubgraphNFT + const tx = subgraphNFT.connect(me.signer).burn(1) + await expect(tx).revertedWith('Must be a minter') + }) it('with token descriptor', async function () { const subgraph0 = await publishNewSubgraph(me, newSubgraph0, gns) From 64dfcd0a471e9d958cccffb1bc580316e0f846a1 Mon Sep 17 00:00:00 2001 From: Pablo Carranza Velez Date: Mon, 19 Dec 2022 10:10:50 -0300 Subject: [PATCH 067/112] fix: set curator signal to zero after claiming from L1 to L2 (OZ L-01) --- contracts/discovery/L1GNS.sol | 16 +++++++++++- test/gns.test.ts | 47 +++++++++++++++++++++++++++++++++++ 2 files changed, 62 insertions(+), 1 deletion(-) diff --git a/contracts/discovery/L1GNS.sol b/contracts/discovery/L1GNS.sol index ec0c48e06..09513844e 100644 --- a/contracts/discovery/L1GNS.sol +++ b/contracts/discovery/L1GNS.sol @@ -99,6 +99,9 @@ contract L1GNS is GNS, L1GNSV1Storage, L1ArbitrumMessenger { * migrated to L2, by sending a retryable ticket to the L2GNS. * The balance will be claimed for a beneficiary address, as this method can be * used by curators that use a contract address in L1 that may not exist in L2. + * This will set the curator's signal on L1 to zero, so the caller must ensure + * that the retryable ticket is redeemed before expiration, or the signal will be lost. + * @dev Use the Arbitrum SDK to estimate the L2 retryable ticket parameters. * @param _subgraphID Subgraph ID * @param _beneficiary Address that will receive the tokens in L2 * @param _maxGas Max gas to use for the L2 retryable ticket @@ -122,12 +125,21 @@ contract L1GNS is GNS, L1GNSV1Storage, L1ArbitrumMessenger { L2GasParams memory gasParams = L2GasParams(_maxSubmissionCost, _maxGas, _gasPriceBid); + uint256 curatorNSignal = getCuratorSignal(_subgraphID, msg.sender); + require(curatorNSignal != 0, "NO_SIGNAL"); bytes memory outboundCalldata = getClaimCuratorBalanceOutboundCalldata( _subgraphID, + curatorNSignal, msg.sender, _beneficiary ); + // Similarly to withdrawing from a deprecated subgraph, + // we remove the curator's signal from the subgraph. + SubgraphData storage subgraphData = _getSubgraphData(_subgraphID); + subgraphData.curatorNSignal[msg.sender] = 0; + subgraphData.nSignal = subgraphData.nSignal.sub(curatorNSignal); + uint256 seqNum = sendTxToL2( arbitrumInboxAddress, counterpartGNSAddress, @@ -146,11 +158,13 @@ contract L1GNS is GNS, L1GNSV1Storage, L1ArbitrumMessenger { * when calling claimCuratorBalanceToBeneficiaryOnL2. * This can be useful to estimate the L2 retryable ticket parameters. * @param _subgraphID Subgraph ID + * @param _curatorNSignal Curator's signal in the subgraph * @param _curator Curator address * @param _beneficiary Address that will own the signal in L2 */ function getClaimCuratorBalanceOutboundCalldata( uint256 _subgraphID, + uint256 _curatorNSignal, address _curator, address _beneficiary ) public view returns (bytes memory) { @@ -159,7 +173,7 @@ contract L1GNS is GNS, L1GNSV1Storage, L1ArbitrumMessenger { IL2GNS.claimL1CuratorBalanceToBeneficiary.selector, _subgraphID, _curator, - getCuratorSignal(_subgraphID, _curator), + _curatorNSignal, _beneficiary ); } diff --git a/test/gns.test.ts b/test/gns.test.ts index d13d7c5ba..ded7a32a4 100644 --- a/test/gns.test.ts +++ b/test/gns.test.ts @@ -1311,6 +1311,53 @@ describe('L1GNS', () => { .emit(gns, 'TxToL2') .withArgs(me.address, mockL2GNS.address, toBN('2'), expectedCalldata) }) + it('sets the curator signal to zero so it cannot be called twice', async function () { + let beforeCuratorNSignal: BigNumber + const subgraph0 = await publishCurateAndSendSubgraph(async (subgraphID) => { + beforeCuratorNSignal = await gns.getCuratorSignal(subgraphID, me.address) + }) + + const expectedCalldata = l2GNSIface.encodeFunctionData( + 'claimL1CuratorBalanceToBeneficiary', + [subgraph0.id, me.address, beforeCuratorNSignal, other.address], + ) + const maxSubmissionCost = toBN('100') + const maxGas = toBN('10') + const gasPriceBid = toBN('20') + + const tx = gns + .connect(me.signer) + .claimCuratorBalanceToBeneficiaryOnL2( + subgraph0.id, + other.address, + maxGas, + gasPriceBid, + maxSubmissionCost, + { + value: maxSubmissionCost.add(maxGas.mul(gasPriceBid)), + }, + ) + + // seqNum (third argument in the event) is 2, because number 1 was when the subgraph was sent to L2 + await expect(tx) + .emit(gns, 'TxToL2') + .withArgs(me.address, mockL2GNS.address, toBN('2'), expectedCalldata) + expect(await gns.getCuratorSignal(subgraph0.id, me.address)).to.equal(toBN(0)) + + const tx2 = gns + .connect(me.signer) + .claimCuratorBalanceToBeneficiaryOnL2( + subgraph0.id, + other.address, + maxGas, + gasPriceBid, + maxSubmissionCost, + { + value: maxSubmissionCost.add(maxGas.mul(gasPriceBid)), + }, + ) + await expect(tx2).revertedWith('NO_SIGNAL') + }) it('sends a transaction with a curator balance from a legacy subgraph to the L2GNS', async function () { const subgraphID = await publishAndCurateOnLegacySubgraph(toBN('2')) From 7e816981bdef7a225e2a6ea7b5e38a90dd4aa18b Mon Sep 17 00:00:00 2001 From: Pablo Carranza Velez Date: Tue, 20 Dec 2022 12:50:39 -0300 Subject: [PATCH 068/112] fix: getClaimCuratorBalanceOutboundCalldata is pure now --- contracts/discovery/L1GNS.sol | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contracts/discovery/L1GNS.sol b/contracts/discovery/L1GNS.sol index 09513844e..5744c7a55 100644 --- a/contracts/discovery/L1GNS.sol +++ b/contracts/discovery/L1GNS.sol @@ -167,7 +167,7 @@ contract L1GNS is GNS, L1GNSV1Storage, L1ArbitrumMessenger { uint256 _curatorNSignal, address _curator, address _beneficiary - ) public view returns (bytes memory) { + ) public pure returns (bytes memory) { return abi.encodeWithSelector( IL2GNS.claimL1CuratorBalanceToBeneficiary.selector, From de4c3ab74a525cc1cc22943b5d7a8e0b56d6f5ef Mon Sep 17 00:00:00 2001 From: Pablo Carranza Velez Date: Mon, 19 Dec 2022 11:42:31 -0300 Subject: [PATCH 069/112] fix: make CurationPool.reserveRatio properly deprecated in L2 (OZ L-02) --- contracts/curation/Curation.sol | 29 +++++++--------------- contracts/curation/CurationStorage.sol | 4 ++-- contracts/l2/curation/L2Curation.sol | 33 ++++++++------------------ test/l2/l2Curation.test.ts | 10 ++++++-- 4 files changed, 29 insertions(+), 47 deletions(-) diff --git a/contracts/curation/Curation.sol b/contracts/curation/Curation.sol index a51c95902..5d457c7ea 100644 --- a/contracts/curation/Curation.sol +++ b/contracts/curation/Curation.sol @@ -150,7 +150,7 @@ contract Curation is CurationV2Storage, GraphUpgradeable { ); // Collect new funds into reserve - CurationPool storage curationPool = _pools[_subgraphDeploymentID]; + CurationPool storage curationPool = pools[_subgraphDeploymentID]; curationPool.tokens = curationPool.tokens.add(_tokens); emit Collected(_subgraphDeploymentID, _tokens); @@ -178,7 +178,7 @@ contract Curation is CurationV2Storage, GraphUpgradeable { require(signalOut >= _signalOutMin, "Slippage protection"); address curator = msg.sender; - CurationPool storage curationPool = _pools[_subgraphDeploymentID]; + CurationPool storage curationPool = pools[_subgraphDeploymentID]; // If it hasn't been curated before then initialize the curve if (!isCurated(_subgraphDeploymentID)) { @@ -247,7 +247,7 @@ contract Curation is CurationV2Storage, GraphUpgradeable { _updateRewards(_subgraphDeploymentID); // Update curation pool - CurationPool storage curationPool = _pools[_subgraphDeploymentID]; + CurationPool storage curationPool = pools[_subgraphDeploymentID]; curationPool.tokens = curationPool.tokens.sub(tokensOut); curationPool.gcs.burnFrom(curator, _signalIn); @@ -277,18 +277,7 @@ contract Curation is CurationV2Storage, GraphUpgradeable { override returns (uint256) { - return _pools[_subgraphDeploymentID].tokens; - } - - /** - * @notice Get a curation pool for a subgraph deployment - * @dev We add this when making the pools variable internal, to keep - * backwards compatibility. - * @param _subgraphDeploymentID Subgraph deployment for which to get the curation pool - * @return Curation pool for the subgraph deployment - */ - function pools(bytes32 _subgraphDeploymentID) external view returns (CurationPool memory) { - return _pools[_subgraphDeploymentID]; + return pools[_subgraphDeploymentID].tokens; } /** @@ -297,7 +286,7 @@ contract Curation is CurationV2Storage, GraphUpgradeable { * @return True if curated */ function isCurated(bytes32 _subgraphDeploymentID) public view override returns (bool) { - return _pools[_subgraphDeploymentID].tokens != 0; + return pools[_subgraphDeploymentID].tokens != 0; } /** @@ -312,7 +301,7 @@ contract Curation is CurationV2Storage, GraphUpgradeable { override returns (uint256) { - IGraphCurationToken gcs = _pools[_subgraphDeploymentID].gcs; + IGraphCurationToken gcs = pools[_subgraphDeploymentID].gcs; return (address(gcs) == address(0)) ? 0 : gcs.balanceOf(_curator); } @@ -327,7 +316,7 @@ contract Curation is CurationV2Storage, GraphUpgradeable { override returns (uint256) { - IGraphCurationToken gcs = _pools[_subgraphDeploymentID].gcs; + IGraphCurationToken gcs = pools[_subgraphDeploymentID].gcs; return (address(gcs) == address(0)) ? 0 : gcs.totalSupply(); } @@ -361,7 +350,7 @@ contract Curation is CurationV2Storage, GraphUpgradeable { returns (uint256) { // Get curation pool tokens and signal - CurationPool memory curationPool = _pools[_subgraphDeploymentID]; + CurationPool memory curationPool = pools[_subgraphDeploymentID]; // Init curation pool if (curationPool.tokens == 0) { @@ -401,7 +390,7 @@ contract Curation is CurationV2Storage, GraphUpgradeable { override returns (uint256) { - CurationPool memory curationPool = _pools[_subgraphDeploymentID]; + CurationPool memory curationPool = pools[_subgraphDeploymentID]; uint256 curationPoolSignal = getCurationPoolSignal(_subgraphDeploymentID); require( curationPool.tokens != 0, diff --git a/contracts/curation/CurationStorage.sol b/contracts/curation/CurationStorage.sol index 12a6f697e..d26ec0d96 100644 --- a/contracts/curation/CurationStorage.sol +++ b/contracts/curation/CurationStorage.sol @@ -17,7 +17,7 @@ abstract contract CurationV1Storage is Managed, ICuration { */ struct CurationPool { uint256 tokens; // GRT Tokens stored as reserves for the subgraph deployment - uint32 reserveRatio; // Ratio for the bonding curve, unused in L2 + uint32 reserveRatio; // Ratio for the bonding curve, unused and deprecated in L2 where it will always be 100% but appear as 0 IGraphCurationToken gcs; // Curation token contract for this curation pool } @@ -46,7 +46,7 @@ abstract contract CurationV1Storage is Managed, ICuration { /// @dev Mapping of subgraphDeploymentID => CurationPool /// There is only one CurationPool per SubgraphDeploymentID - mapping(bytes32 => CurationPool) internal _pools; + mapping(bytes32 => CurationPool) public pools; } abstract contract CurationV2Storage is CurationV1Storage, Initializable { diff --git a/contracts/l2/curation/L2Curation.sol b/contracts/l2/curation/L2Curation.sol index 778147f5f..350380e6d 100644 --- a/contracts/l2/curation/L2Curation.sol +++ b/contracts/l2/curation/L2Curation.sol @@ -161,7 +161,7 @@ contract L2Curation is CurationV2Storage, GraphUpgradeable, IL2Curation { ); // Collect new funds into reserve - CurationPool storage curationPool = _pools[_subgraphDeploymentID]; + CurationPool storage curationPool = pools[_subgraphDeploymentID]; curationPool.tokens = curationPool.tokens.add(_tokens); emit Collected(_subgraphDeploymentID, _tokens); @@ -189,7 +189,7 @@ contract L2Curation is CurationV2Storage, GraphUpgradeable, IL2Curation { require(signalOut >= _signalOutMin, "Slippage protection"); address curator = msg.sender; - CurationPool storage curationPool = _pools[_subgraphDeploymentID]; + CurationPool storage curationPool = pools[_subgraphDeploymentID]; // If it hasn't been curated before then initialize the curve if (!isCurated(_subgraphDeploymentID)) { @@ -251,7 +251,7 @@ contract L2Curation is CurationV2Storage, GraphUpgradeable, IL2Curation { require(signalOut >= _signalOutMin, "Slippage protection"); address curator = msg.sender; - CurationPool storage curationPool = _pools[_subgraphDeploymentID]; + CurationPool storage curationPool = pools[_subgraphDeploymentID]; // If it hasn't been curated before then initialize the curve if (!isCurated(_subgraphDeploymentID)) { @@ -319,7 +319,7 @@ contract L2Curation is CurationV2Storage, GraphUpgradeable, IL2Curation { _updateRewards(_subgraphDeploymentID); // Update curation pool - CurationPool storage curationPool = _pools[_subgraphDeploymentID]; + CurationPool storage curationPool = pools[_subgraphDeploymentID]; curationPool.tokens = curationPool.tokens.sub(tokensOut); curationPool.gcs.burnFrom(curator, _signalIn); @@ -348,20 +348,7 @@ contract L2Curation is CurationV2Storage, GraphUpgradeable, IL2Curation { override returns (uint256) { - return _pools[_subgraphDeploymentID].tokens; - } - - /** - * @notice Get a curation pool for a subgraph deployment - * @dev We add this when making the pools variable internal, to keep - * backwards compatibility. - * @param _subgraphDeploymentID Subgraph deployment for which to get the curation pool - * @return Curation pool for the subgraph deployment - */ - function pools(bytes32 _subgraphDeploymentID) external view returns (CurationPool memory) { - CurationPool memory pool = _pools[_subgraphDeploymentID]; - pool.reserveRatio = fixedReserveRatio; - return pool; + return pools[_subgraphDeploymentID].tokens; } /** @@ -370,7 +357,7 @@ contract L2Curation is CurationV2Storage, GraphUpgradeable, IL2Curation { * @return True if curated */ function isCurated(bytes32 _subgraphDeploymentID) public view override returns (bool) { - return _pools[_subgraphDeploymentID].tokens != 0; + return pools[_subgraphDeploymentID].tokens != 0; } /** @@ -385,7 +372,7 @@ contract L2Curation is CurationV2Storage, GraphUpgradeable, IL2Curation { override returns (uint256) { - IGraphCurationToken gcs = _pools[_subgraphDeploymentID].gcs; + IGraphCurationToken gcs = pools[_subgraphDeploymentID].gcs; return (address(gcs) == address(0)) ? 0 : gcs.balanceOf(_curator); } @@ -400,7 +387,7 @@ contract L2Curation is CurationV2Storage, GraphUpgradeable, IL2Curation { override returns (uint256) { - IGraphCurationToken gcs = _pools[_subgraphDeploymentID].gcs; + IGraphCurationToken gcs = pools[_subgraphDeploymentID].gcs; return (address(gcs) == address(0)) ? 0 : gcs.totalSupply(); } @@ -451,7 +438,7 @@ contract L2Curation is CurationV2Storage, GraphUpgradeable, IL2Curation { override returns (uint256) { - CurationPool memory curationPool = _pools[_subgraphDeploymentID]; + CurationPool memory curationPool = pools[_subgraphDeploymentID]; uint256 curationPoolSignal = getCurationPoolSignal(_subgraphDeploymentID); require( curationPool.tokens != 0, @@ -529,7 +516,7 @@ contract L2Curation is CurationV2Storage, GraphUpgradeable, IL2Curation { returns (uint256) { // Get curation pool tokens and signal - CurationPool memory curationPool = _pools[_subgraphDeploymentID]; + CurationPool memory curationPool = pools[_subgraphDeploymentID]; // Init curation pool if (curationPool.tokens == 0) { diff --git a/test/l2/l2Curation.test.ts b/test/l2/l2Curation.test.ts index 930861156..f082efa97 100644 --- a/test/l2/l2Curation.test.ts +++ b/test/l2/l2Curation.test.ts @@ -72,6 +72,10 @@ describe('L2Curation:Config', () => { // Set right in the constructor expect(await curation.defaultReserveRatio()).eq(MAX_PPM) }) + it('cannot be changed because the setter is not implemented', async function () { + const tx = curation.connect(governor.signer).setDefaultReserveRatio(10) + await expect(tx).revertedWith('Not implemented in L2') + }) }) describe('minimumCurationDeposit', function () { @@ -234,7 +238,8 @@ describe('L2Curation', () => { // Allocated and balance updated expect(afterPool.tokens).eq(beforePool.tokens.add(tokensToDeposit.sub(curationTax))) expect(afterPoolSignal).eq(beforePoolSignal.add(expectedSignal)) - expect(afterPool.reserveRatio).eq(await curation.defaultReserveRatio()) + // Pool reserveRatio is deprecated and therefore always zero in L2 + expect(afterPool.reserveRatio).eq(0) // Contract balance updated expect(afterTotalTokens).eq(beforeTotalTokens.add(tokensToDeposit.sub(curationTax))) // Total supply is reduced to curation tax burning @@ -272,7 +277,8 @@ describe('L2Curation', () => { // Allocated and balance updated expect(afterPool.tokens).eq(beforePool.tokens.add(tokensToDeposit)) expect(afterPoolSignal).eq(beforePoolSignal.add(expectedSignal)) - expect(afterPool.reserveRatio).eq(await curation.defaultReserveRatio()) + // Pool reserveRatio is deprecated and therefore always zero in L2 + expect(afterPool.reserveRatio).eq(0) // Contract balance updated expect(afterTotalTokens).eq(beforeTotalTokens.add(tokensToDeposit)) // Total supply is reduced to curation tax burning From f61eae83a6ac90db91457b5339caddd61950eed3 Mon Sep 17 00:00:00 2001 From: Pablo Carranza Velez Date: Mon, 19 Dec 2022 16:16:10 -0300 Subject: [PATCH 070/112] fix: improve NatSpec coverage and complete IManaged (OZ L-03) --- contracts/curation/Curation.sol | 50 +++++++++------- contracts/curation/CurationStorage.sol | 16 ++++++ contracts/curation/ICuration.sol | 80 ++++++++++++++++++++++++++ contracts/governance/IManaged.sol | 25 ++++++++ contracts/governance/Managed.sol | 6 +- 5 files changed, 153 insertions(+), 24 deletions(-) diff --git a/contracts/curation/Curation.sol b/contracts/curation/Curation.sol index 5d457c7ea..0dafc7631 100644 --- a/contracts/curation/Curation.sol +++ b/contracts/curation/Curation.sol @@ -33,10 +33,10 @@ import { GraphCurationToken } from "./GraphCurationToken.sol"; contract Curation is CurationV2Storage, GraphUpgradeable { using SafeMathUpgradeable for uint256; - // 100% in parts per million + /// @dev 100% in parts per million uint32 private constant MAX_PPM = 1000000; - // Amount of signal you get with your minimum token deposit + /// @dev Amount of signal you get with your minimum token deposit uint256 private constant SIGNAL_PER_MINIMUM_DEPOSIT = 1e18; // 1 signal as 18 decimal number // -- Events -- @@ -72,7 +72,13 @@ contract Curation is CurationV2Storage, GraphUpgradeable { event Collected(bytes32 indexed subgraphDeploymentID, uint256 tokens); /** - * @dev Initialize this contract. + * @notice Initialize this contract. + * @param _controller Address of the controller contract that manages this contract + * @param _bondingCurve Address of the bonding curve contract (e.g. a BancorFormula) + * @param _curationTokenMaster Address of the master copy to use for curation tokens + * @param _defaultReserveRatio Default reserve ratio for a curation pool in PPM + * @param _curationTaxPercentage Percentage of curation tax to charge when depositing GRT tokens + * @param _minimumCurationDeposit Minimum amount of tokens that can be deposited on a new subgraph deployment */ function initialize( address _controller, @@ -96,7 +102,7 @@ contract Curation is CurationV2Storage, GraphUpgradeable { /** * @dev Set the default reserve ratio percentage for a curation pool. - * @notice Update the default reserver ratio to `_defaultReserveRatio` + * @notice Update the default reserve ratio to `_defaultReserveRatio` * @param _defaultReserveRatio Reserve ratio (in PPM) */ function setDefaultReserveRatio(uint32 _defaultReserveRatio) external override onlyGovernor { @@ -117,7 +123,7 @@ contract Curation is CurationV2Storage, GraphUpgradeable { } /** - * @dev Set the curation tax percentage to charge when a curator deposits GRT tokens. + * @notice Set the curation tax percentage to charge when a curator deposits GRT tokens. * @param _percentage Curation tax percentage charged when depositing GRT tokens */ function setCurationTaxPercentage(uint32 _percentage) external override onlyGovernor { @@ -125,7 +131,7 @@ contract Curation is CurationV2Storage, GraphUpgradeable { } /** - * @dev Set the master copy to use as clones for the curation token. + * @notice Set the master copy to use as clones for the curation token. * @param _curationTokenMaster Address of implementation contract to use for curation tokens */ function setCurationTokenMaster(address _curationTokenMaster) external override onlyGovernor { @@ -133,8 +139,8 @@ contract Curation is CurationV2Storage, GraphUpgradeable { } /** - * @dev Assign Graph Tokens collected as curation fees to the curation pool reserve. - * This function can only be called by the Staking contract and will do the bookeeping of + * @notice Assign Graph Tokens collected as curation fees to the curation pool reserve. + * @dev This function can only be called by the Staking contract and will do the bookkeeping of * transferred tokens into this contract. * @param _subgraphDeploymentID SubgraphDeployment where funds should be allocated as reserves * @param _tokens Amount of Graph Tokens to add to reserves @@ -157,11 +163,12 @@ contract Curation is CurationV2Storage, GraphUpgradeable { } /** - * @dev Deposit Graph Tokens in exchange for signal of a SubgraphDeployment curation pool. + * @notice Deposit Graph Tokens in exchange for signal of a SubgraphDeployment curation pool. * @param _subgraphDeploymentID Subgraph deployment pool from where to mint signal * @param _tokensIn Amount of Graph Tokens to deposit * @param _signalOutMin Expected minimum amount of signal to receive - * @return Signal minted and deposit tax + * @return Amount of signal minted + * @return Amount of curation tax burned */ function mint( bytes32 _subgraphDeploymentID, @@ -267,7 +274,7 @@ contract Curation is CurationV2Storage, GraphUpgradeable { } /** - * @dev Get the amount of token reserves in a curation pool. + * @notice Get the amount of token reserves in a curation pool. * @param _subgraphDeploymentID Subgraph deployment curation poool * @return Amount of token reserves in the curation pool */ @@ -281,16 +288,16 @@ contract Curation is CurationV2Storage, GraphUpgradeable { } /** - * @dev Check if any GRT tokens are deposited for a SubgraphDeployment. + * @notice Check if any GRT tokens are deposited for a SubgraphDeployment. * @param _subgraphDeploymentID SubgraphDeployment to check if curated - * @return True if curated + * @return True if curated, false otherwise */ function isCurated(bytes32 _subgraphDeploymentID) public view override returns (bool) { return pools[_subgraphDeploymentID].tokens != 0; } /** - * @dev Get the amount of signal a curator has in a curation pool. + * @notice Get the amount of signal a curator has in a curation pool. * @param _curator Curator owning the signal tokens * @param _subgraphDeploymentID Subgraph deployment curation pool * @return Amount of signal owned by a curator for the subgraph deployment @@ -306,7 +313,7 @@ contract Curation is CurationV2Storage, GraphUpgradeable { } /** - * @dev Get the amount of signal in a curation pool. + * @notice Get the amount of signal in a curation pool. * @param _subgraphDeploymentID Subgraph deployment curation poool * @return Amount of signal minted for the subgraph deployment */ @@ -321,11 +328,12 @@ contract Curation is CurationV2Storage, GraphUpgradeable { } /** - * @dev Calculate amount of signal that can be bought with tokens in a curation pool. + * @notice Calculate amount of signal that can be bought with tokens in a curation pool. * This function considers and excludes the deposit tax. * @param _subgraphDeploymentID Subgraph deployment to mint signal * @param _tokensIn Amount of tokens used to mint signal - * @return Amount of signal that can be bought and tokens subtracted for the tax + * @return Amount of signal that can be bought + * @return Amount of tokens that will be burned as curation tax */ function tokensToSignal(bytes32 _subgraphDeploymentID, uint256 _tokensIn) public @@ -379,10 +387,10 @@ contract Curation is CurationV2Storage, GraphUpgradeable { } /** - * @dev Calculate number of tokens to get when burning signal from a curation pool. + * @notice Calculate number of tokens to get when burning signal from a curation pool. * @param _subgraphDeploymentID Subgraph deployment to burn signal * @param _signalIn Amount of signal to burn - * @return Amount of tokens to get for an amount of signal + * @return Amount of tokens to get for the specified amount of signal */ function signalToTokens(bytes32 _subgraphDeploymentID, uint256 _signalIn) public @@ -440,8 +448,8 @@ contract Curation is CurationV2Storage, GraphUpgradeable { } /** - * @dev Internal: Set the curation tax percentage to charge when a curator deposits GRT tokens. - * @param _percentage Curation tax percentage charged when depositing GRT tokens + * @dev Internal: Set the curation tax percentage (in PPM) to charge when a curator deposits GRT tokens. + * @param _percentage Curation tax charged when depositing GRT tokens in PPM */ function _setCurationTaxPercentage(uint32 _percentage) private { require( diff --git a/contracts/curation/CurationStorage.sol b/contracts/curation/CurationStorage.sol index d26ec0d96..bcbf0df6b 100644 --- a/contracts/curation/CurationStorage.sol +++ b/contracts/curation/CurationStorage.sol @@ -8,6 +8,13 @@ import { ICuration } from "./ICuration.sol"; import { IGraphCurationToken } from "./IGraphCurationToken.sol"; import { Managed } from "../governance/Managed.sol"; +/** + * @title Curation Storage version 1 + * @dev This contract holds the first version of the storage variables + * for the Curation and L2Curation contracts. + * When adding new variables, create a new version that inherits this and update + * the contracts to use the new version instead. + */ abstract contract CurationV1Storage is Managed, ICuration { // -- Pool -- @@ -49,6 +56,15 @@ abstract contract CurationV1Storage is Managed, ICuration { mapping(bytes32 => CurationPool) public pools; } +/** + * @title Curation Storage version 2 + * @dev This contract holds the second version of the storage variables + * for the Curation and L2Curation contracts. + * It doesn't add new variables at this contract's level, but adds the Initializable + * contract to the inheritance chain, which includes storage variables. + * When adding new variables, create a new version that inherits this and update + * the contracts to use the new version instead. + */ abstract contract CurationV2Storage is CurationV1Storage, Initializable { // Nothing here, just adding Initializable } diff --git a/contracts/curation/ICuration.sol b/contracts/curation/ICuration.sol index 9e1701aaf..712a76efb 100644 --- a/contracts/curation/ICuration.sol +++ b/contracts/curation/ICuration.sol @@ -4,55 +4,135 @@ pragma solidity ^0.7.6; import "./IGraphCurationToken.sol"; +/** + * @title Curation Interface + * @dev Interface for the Curation contract (and L2Curation too) + */ interface ICuration { // -- Configuration -- + /** + * @notice Update the default reserve ratio to `_defaultReserveRatio` + * @param _defaultReserveRatio Reserve ratio (in PPM) + */ function setDefaultReserveRatio(uint32 _defaultReserveRatio) external; + /** + * @notice Update the minimum deposit amount needed to intialize a new subgraph + * @param _minimumCurationDeposit Minimum amount of tokens required deposit + */ function setMinimumCurationDeposit(uint256 _minimumCurationDeposit) external; + /** + * @notice Set the curation tax percentage to charge when a curator deposits GRT tokens. + * @param _percentage Curation tax percentage charged when depositing GRT tokens + */ function setCurationTaxPercentage(uint32 _percentage) external; + /** + * @notice Set the master copy to use as clones for the curation token. + * @param _curationTokenMaster Address of implementation contract to use for curation tokens + */ function setCurationTokenMaster(address _curationTokenMaster) external; // -- Curation -- + /** + * @notice Deposit Graph Tokens in exchange for signal of a SubgraphDeployment curation pool. + * @param _subgraphDeploymentID Subgraph deployment pool from where to mint signal + * @param _tokensIn Amount of Graph Tokens to deposit + * @param _signalOutMin Expected minimum amount of signal to receive + * @return Amount of signal minted + * @return Amount of curation tax burned + */ function mint( bytes32 _subgraphDeploymentID, uint256 _tokensIn, uint256 _signalOutMin ) external returns (uint256, uint256); + /** + * @notice Burn _signal from the SubgraphDeployment curation pool + * @param _subgraphDeploymentID SubgraphDeployment the curator is returning signal + * @param _signalIn Amount of signal to return + * @param _tokensOutMin Expected minimum amount of tokens to receive + * @return Tokens returned + */ function burn( bytes32 _subgraphDeploymentID, uint256 _signalIn, uint256 _tokensOutMin ) external returns (uint256); + /** + * @notice Assign Graph Tokens collected as curation fees to the curation pool reserve. + * @param _subgraphDeploymentID SubgraphDeployment where funds should be allocated as reserves + * @param _tokens Amount of Graph Tokens to add to reserves + */ function collect(bytes32 _subgraphDeploymentID, uint256 _tokens) external; // -- Getters -- + /** + * @notice Check if any GRT tokens are deposited for a SubgraphDeployment. + * @param _subgraphDeploymentID SubgraphDeployment to check if curated + * @return True if curated, false otherwise + */ function isCurated(bytes32 _subgraphDeploymentID) external view returns (bool); + /** + * @notice Get the amount of signal a curator has in a curation pool. + * @param _curator Curator owning the signal tokens + * @param _subgraphDeploymentID Subgraph deployment curation pool + * @return Amount of signal owned by a curator for the subgraph deployment + */ function getCuratorSignal(address _curator, bytes32 _subgraphDeploymentID) external view returns (uint256); + /** + * @notice Get the amount of signal in a curation pool. + * @param _subgraphDeploymentID Subgraph deployment curation poool + * @return Amount of signal minted for the subgraph deployment + */ function getCurationPoolSignal(bytes32 _subgraphDeploymentID) external view returns (uint256); + /** + * @notice Get the amount of token reserves in a curation pool. + * @param _subgraphDeploymentID Subgraph deployment curation poool + * @return Amount of token reserves in the curation pool + */ function getCurationPoolTokens(bytes32 _subgraphDeploymentID) external view returns (uint256); + /** + * @notice Calculate amount of signal that can be bought with tokens in a curation pool. + * This function considers and excludes the deposit tax. + * @param _subgraphDeploymentID Subgraph deployment to mint signal + * @param _tokensIn Amount of tokens used to mint signal + * @return Amount of signal that can be bought + * @return Amount of tokens that will be burned as curation tax + */ function tokensToSignal(bytes32 _subgraphDeploymentID, uint256 _tokensIn) external view returns (uint256, uint256); + /** + * @notice Calculate number of tokens to get when burning signal from a curation pool. + * @param _subgraphDeploymentID Subgraph deployment to burn signal + * @param _signalIn Amount of signal to burn + * @return Amount of tokens to get for the specified amount of signal + */ function signalToTokens(bytes32 _subgraphDeploymentID, uint256 _signalIn) external view returns (uint256); + /** + * @notice Tax charged when curators deposit funds. + * Parts per million. (Allows for 4 decimal points, 999,999 = 99.9999%) + * @return Curation tax percentage expressed in PPM + */ function curationTaxPercentage() external view returns (uint32); } diff --git a/contracts/governance/IManaged.sol b/contracts/governance/IManaged.sol index 1a458a460..76f05e0fb 100644 --- a/contracts/governance/IManaged.sol +++ b/contracts/governance/IManaged.sol @@ -2,6 +2,31 @@ pragma solidity ^0.7.6; +import { IController } from "./IController.sol"; + +/** + * @title Managed Interface + * @dev Interface for contracts that can be managed by a controller. + */ interface IManaged { + /** + * @notice Set the controller that manages this contract + * @dev Only the current controller can set a new controller + * @param _controller Address of the new controller + */ function setController(address _controller) external; + + /** + * @notice Sync protocol contract addresses from the Controller registry + * @dev This function will cache all the contracts using the latest addresses. + * Anyone can call the function whenever a Proxy contract change in the + * controller to ensure the protocol is using the latest version. + */ + function syncAllContracts() external; + + /** + * @notice Get the Controller that manages this contract + * @return The Controller as an IController interface + */ + function controller() external view returns (IController); } diff --git a/contracts/governance/Managed.sol b/contracts/governance/Managed.sol index fde7d7954..70f7c6171 100644 --- a/contracts/governance/Managed.sol +++ b/contracts/governance/Managed.sol @@ -26,8 +26,8 @@ import { IManaged } from "./IManaged.sol"; abstract contract Managed is IManaged { // -- State -- - /// Controller that contract is registered with - IController public controller; + /// Controller that manages this contract + IController public override controller; /// @dev Cache for the addresses of the contracts retrieved from the controller mapping(bytes32 => address) private _addressCache; /// @dev Gap for future storage variables @@ -230,7 +230,7 @@ abstract contract Managed is IManaged { * Anyone can call the function whenever a Proxy contract change in the * controller to ensure the protocol is using the latest version */ - function syncAllContracts() external { + function syncAllContracts() external override { _syncContract("Curation"); _syncContract("EpochManager"); _syncContract("RewardsManager"); From 2a7cd68130305546cd8583a95800825c96803fbb Mon Sep 17 00:00:00 2001 From: Pablo Carranza Velez Date: Mon, 19 Dec 2022 16:40:59 -0300 Subject: [PATCH 071/112] fix: rename SubgraphData.reserveRatio to reserveRatioDeprecated (OZ N-02) --- contracts/discovery/GNS.sol | 4 ++-- contracts/discovery/IGNS.sol | 2 +- contracts/discovery/L1GNS.sol | 2 +- contracts/l2/discovery/L2GNS.sol | 4 ++-- test/gns.test.ts | 2 -- test/l2/l2GNS.test.ts | 6 +++--- test/lib/gnsUtils.ts | 6 +++--- 7 files changed, 12 insertions(+), 14 deletions(-) diff --git a/contracts/discovery/GNS.sol b/contracts/discovery/GNS.sol index 74de358a7..6d83d6c18 100644 --- a/contracts/discovery/GNS.sol +++ b/contracts/discovery/GNS.sol @@ -258,7 +258,7 @@ contract GNS is GNSV3Storage, GraphUpgradeable, IGNS, Multicall { uint256 subgraphID = _nextSubgraphID(subgraphOwner); SubgraphData storage subgraphData = _getSubgraphData(subgraphID); subgraphData.subgraphDeploymentID = _subgraphDeploymentID; - subgraphData.reserveRatio = fixedReserveRatio; + subgraphData.reserveRatioDeprecated = fixedReserveRatio; // Mint the NFT. Use the subgraphID as tokenID. // This function will check the if tokenID already exists. @@ -372,7 +372,7 @@ contract GNS is GNSV3Storage, GraphUpgradeable, IGNS, Multicall { // Deprecate the subgraph and do cleanup subgraphData.disabled = true; subgraphData.vSignal = 0; - subgraphData.reserveRatio = 0; + subgraphData.reserveRatioDeprecated = 0; // NOTE: We don't reset the following variable as we use it to test if the Subgraph was ever created // subgraphData.subgraphDeploymentID = 0; diff --git a/contracts/discovery/IGNS.sol b/contracts/discovery/IGNS.sol index c7557a98d..13ec754c8 100644 --- a/contracts/discovery/IGNS.sol +++ b/contracts/discovery/IGNS.sol @@ -18,7 +18,7 @@ interface IGNS { uint256 nSignal; // The token of the subgraph bonding curve mapping(address => uint256) curatorNSignal; bytes32 subgraphDeploymentID; - uint32 reserveRatio; // Ratio for the bonding curve, always 1 in PPM, kept only for backwards compatibility + uint32 reserveRatioDeprecated; // Ratio for the bonding curve, always 1 in PPM, deprecated. bool disabled; uint256 withdrawableGRT; } diff --git a/contracts/discovery/L1GNS.sol b/contracts/discovery/L1GNS.sol index 5744c7a55..38070ffe9 100644 --- a/contracts/discovery/L1GNS.sol +++ b/contracts/discovery/L1GNS.sol @@ -89,7 +89,7 @@ contract L1GNS is GNS, L1GNSV1Storage, L1ArbitrumMessenger { data ); - subgraphData.reserveRatio = 0; + subgraphData.reserveRatioDeprecated = 0; _burnNFT(_subgraphID); emit SubgraphSentToL2(_subgraphID, _l2Owner); } diff --git a/contracts/l2/discovery/L2GNS.sol b/contracts/l2/discovery/L2GNS.sol index 6ce3a5ac3..516844d7a 100644 --- a/contracts/l2/discovery/L2GNS.sol +++ b/contracts/l2/discovery/L2GNS.sol @@ -156,7 +156,7 @@ contract L2GNS is GNS, L2GNSV1Storage, IL2GNS { migratedData.l2Done = true; uint256 withdrawableGRT = migratedData.tokens; subgraphData.withdrawableGRT = withdrawableGRT; - subgraphData.reserveRatio = 0; + subgraphData.reserveRatioDeprecated = 0; _burnNFT(_subgraphID); emit SubgraphDeprecated(_subgraphID, withdrawableGRT); } @@ -278,7 +278,7 @@ contract L2GNS is GNS, L2GNSV1Storage, IL2GNS { IGNS.SubgraphL2MigrationData storage migratedData = subgraphL2MigrationData[_subgraphID]; SubgraphData storage subgraphData = _getSubgraphData(_subgraphID); - subgraphData.reserveRatio = fixedReserveRatio; + subgraphData.reserveRatioDeprecated = fixedReserveRatio; // The subgraph will be disabled until finishSubgraphMigrationFromL1 is called subgraphData.disabled = true; subgraphData.nSignal = _nSignal; diff --git a/test/gns.test.ts b/test/gns.test.ts index ded7a32a4..90da2648d 100644 --- a/test/gns.test.ts +++ b/test/gns.test.ts @@ -87,7 +87,6 @@ describe('L1GNS', () => { async function calcGNSBondingCurve( gnsSupply: BigNumber, // nSignal gnsReserveBalance: BigNumber, // vSignal - gnsReserveRatio: number, // default reserve ratio of GNS depositAmount: BigNumber, // GRT deposited subgraphID: string, ): Promise { @@ -753,7 +752,6 @@ describe('L1GNS', () => { const expectedNSignal = await calcGNSBondingCurve( beforeSubgraph.nSignal, beforeSubgraph.vSignal, - beforeSubgraph.reserveRatio, tokensToDeposit.sub(curationTax), beforeSubgraph.subgraphDeploymentID, ) diff --git a/test/l2/l2GNS.test.ts b/test/l2/l2GNS.test.ts index 81ad95d06..670d14a5e 100644 --- a/test/l2/l2GNS.test.ts +++ b/test/l2/l2GNS.test.ts @@ -294,7 +294,7 @@ describe('L2GNS', () => { expect(subgraphData.vSignal).eq(0) expect(subgraphData.nSignal).eq(nSignal) expect(subgraphData.subgraphDeploymentID).eq(HashZero) - expect(subgraphData.reserveRatio).eq(DEFAULT_RESERVE_RATIO) + expect(subgraphData.reserveRatioDeprecated).eq(DEFAULT_RESERVE_RATIO) expect(subgraphData.disabled).eq(true) expect(subgraphData.withdrawableGRT).eq(0) // Important so that it's not the same as a deprecated subgraph! @@ -333,7 +333,7 @@ describe('L2GNS', () => { expect(subgraphData.vSignal).eq(0) expect(subgraphData.nSignal).eq(nSignal) expect(subgraphData.subgraphDeploymentID).eq(HashZero) - expect(subgraphData.reserveRatio).eq(DEFAULT_RESERVE_RATIO) + expect(subgraphData.reserveRatioDeprecated).eq(DEFAULT_RESERVE_RATIO) expect(subgraphData.disabled).eq(true) expect(subgraphData.withdrawableGRT).eq(0) // Important so that it's not the same as a deprecated subgraph! @@ -344,7 +344,7 @@ describe('L2GNS', () => { expect(l2SubgraphData.vSignal).eq(0) expect(l2SubgraphData.nSignal).eq(0) expect(l2SubgraphData.subgraphDeploymentID).eq(l2Subgraph.subgraphDeploymentID) - expect(l2SubgraphData.reserveRatio).eq(DEFAULT_RESERVE_RATIO) + expect(l2SubgraphData.reserveRatioDeprecated).eq(DEFAULT_RESERVE_RATIO) expect(l2SubgraphData.disabled).eq(false) expect(l2SubgraphData.withdrawableGRT).eq(0) }) diff --git a/test/lib/gnsUtils.ts b/test/lib/gnsUtils.ts index 6d7bd2105..1314f9c52 100644 --- a/test/lib/gnsUtils.ts +++ b/test/lib/gnsUtils.ts @@ -20,7 +20,7 @@ export interface Subgraph { vSignal: BigNumber nSignal: BigNumber subgraphDeploymentID: string - reserveRatio: number + reserveRatioDeprecated: number disabled: boolean withdrawableGRT: BigNumber id?: string @@ -102,7 +102,7 @@ export const publishNewSubgraph = async ( expect(subgraph.vSignal).eq(0) expect(subgraph.nSignal).eq(0) expect(subgraph.subgraphDeploymentID).eq(newSubgraph.subgraphDeploymentID) - expect(subgraph.reserveRatio).eq(DEFAULT_RESERVE_RATIO) + expect(subgraph.reserveRatioDeprecated).eq(DEFAULT_RESERVE_RATIO) expect(subgraph.disabled).eq(false) expect(subgraph.withdrawableGRT).eq(0) @@ -313,7 +313,7 @@ export const deprecateSubgraph = async ( // Signal for the deployment must be all burned expect(afterSubgraph.vSignal.eq(toBN('0'))) // Cleanup reserve ratio - expect(afterSubgraph.reserveRatio).eq(0) + expect(afterSubgraph.reserveRatioDeprecated).eq(0) // Should be equal since owner pays curation tax expect(afterSubgraph.withdrawableGRT).eq(beforeTokens) From 8e7e9a0d806855d1db8f3a2c3414ba4b786c267e Mon Sep 17 00:00:00 2001 From: Pablo Carranza Velez Date: Tue, 20 Dec 2022 12:07:42 -0300 Subject: [PATCH 072/112] fix: split the migration data struct between L1 and L2GNS (OZ N-03) --- contracts/discovery/GNSStorage.sol | 2 -- contracts/discovery/IGNS.sol | 14 -------------- contracts/discovery/L1GNS.sol | 12 ++++-------- contracts/discovery/L1GNSStorage.sol | 2 ++ contracts/l2/discovery/IL2GNS.sol | 11 +++++++++++ contracts/l2/discovery/L2GNS.sol | 8 ++++---- contracts/l2/discovery/L2GNSStorage.sol | 4 ++++ test/gns.test.ts | 8 ++++---- test/l2/l2GNS.test.ts | 2 -- 9 files changed, 29 insertions(+), 34 deletions(-) diff --git a/contracts/discovery/GNSStorage.sol b/contracts/discovery/GNSStorage.sol index 877130189..0bc216a87 100644 --- a/contracts/discovery/GNSStorage.sol +++ b/contracts/discovery/GNSStorage.sol @@ -68,8 +68,6 @@ abstract contract GNSV2Storage is GNSV1Storage { * reduce the size of the gap accordingly. */ abstract contract GNSV3Storage is GNSV2Storage, Initializable { - /// Data for subgraph migration from L1 to L2, some fields will be empty or set differently on each layer - mapping(uint256 => IGNS.SubgraphL2MigrationData) public subgraphL2MigrationData; /// Address of the counterpart GNS contract (L1GNS/L2GNS) address public counterpartGNSAddress; /// @dev Gap to allow adding variables in future upgrades (since L1GNS and L2GNS have their own storage as well) diff --git a/contracts/discovery/IGNS.sol b/contracts/discovery/IGNS.sol index 13ec754c8..80cc99820 100644 --- a/contracts/discovery/IGNS.sol +++ b/contracts/discovery/IGNS.sol @@ -23,20 +23,6 @@ interface IGNS { uint256 withdrawableGRT; } - /** - * @dev The SubgraphL2MigrationData struct holds information - * about a subgraph related to its migration from L1 to L2. - * Some fields of this are used by the L1GNS, and some are used by - * the L2GNS. - */ - struct SubgraphL2MigrationData { - uint256 tokens; // GRT that will be sent to L2 to mint signal - bool l1Done; // Migration finished on L1 side - mapping(address => bool) curatorBalanceClaimed; // True for curators whose balance has been claimed in L2 - bool l2Done; // Migration finished on L2 side - uint256 subgraphReceivedOnL2BlockNumber; // Block number when the subgraph was received on L2 - } - /** * @dev The LegacySubgraphKey struct holds the account and sequence ID * used to generate subgraph IDs in legacy subgraphs. diff --git a/contracts/discovery/L1GNS.sol b/contracts/discovery/L1GNS.sol index 38070ffe9..c6108cc37 100644 --- a/contracts/discovery/L1GNS.sol +++ b/contracts/discovery/L1GNS.sol @@ -56,15 +56,13 @@ contract L1GNS is GNS, L1GNSV1Storage, L1ArbitrumMessenger { uint256 _gasPriceBid, uint256 _maxSubmissionCost ) external payable notPartialPaused { - SubgraphL2MigrationData storage migrationData = subgraphL2MigrationData[_subgraphID]; - - require(!migrationData.l1Done, "ALREADY_DONE"); + require(!subgraphMigratedToL2[_subgraphID], "ALREADY_DONE"); SubgraphData storage subgraphData = _getSubgraphOrRevert(_subgraphID); - // This is just like onlySubgraphAuth, but we want it to run after the l1Done check + // This is just like onlySubgraphAuth, but we want it to run after the subgraphMigratedToL2 check // to revert with a nicer message in that case: require(ownerOf(_subgraphID) == msg.sender, "GNS: Must be authorized"); - migrationData.l1Done = true; + subgraphMigratedToL2[_subgraphID] = true; uint256 curationTokens = curation().burn( subgraphData.subgraphDeploymentID, @@ -116,9 +114,7 @@ contract L1GNS is GNS, L1GNSV1Storage, L1ArbitrumMessenger { uint256 _gasPriceBid, uint256 _maxSubmissionCost ) external payable notPartialPaused returns (bytes memory) { - SubgraphL2MigrationData storage migrationData = subgraphL2MigrationData[_subgraphID]; - - require(migrationData.l1Done, "!MIGRATED"); + require(subgraphMigratedToL2[_subgraphID], "!MIGRATED"); // The Arbitrum bridge will check this too, we just check here for an early exit require(_maxSubmissionCost != 0, "NO_SUBMISSION_COST"); diff --git a/contracts/discovery/L1GNSStorage.sol b/contracts/discovery/L1GNSStorage.sol index c1d14e0b5..ecd0de319 100644 --- a/contracts/discovery/L1GNSStorage.sol +++ b/contracts/discovery/L1GNSStorage.sol @@ -12,6 +12,8 @@ pragma abicoder v2; abstract contract L1GNSV1Storage { /// Address of the Arbitrum DelayedInbox address public arbitrumInboxAddress; + /// True for subgraph IDs that have been migrated to L2 + mapping(uint256 => bool) public subgraphMigratedToL2; /// @dev Storage gap to keep storage slots fixed in future versions uint256[50] private __gap; } diff --git a/contracts/l2/discovery/IL2GNS.sol b/contracts/l2/discovery/IL2GNS.sol index 9d1f20fc0..1f9f77c6e 100644 --- a/contracts/l2/discovery/IL2GNS.sol +++ b/contracts/l2/discovery/IL2GNS.sol @@ -8,6 +8,17 @@ import { ICallhookReceiver } from "../../gateway/ICallhookReceiver.sol"; * @title Interface for the L2GNS contract. */ interface IL2GNS is ICallhookReceiver { + /** + * @dev The SubgraphL2MigrationData struct holds information + * about a subgraph related to its migration from L1 to L2. + */ + struct SubgraphL2MigrationData { + uint256 tokens; // GRT that will be sent to L2 to mint signal + mapping(address => bool) curatorBalanceClaimed; // True for curators whose balance has been claimed in L2 + bool l2Done; // Migration finished on L2 side + uint256 subgraphReceivedOnL2BlockNumber; // Block number when the subgraph was received on L2 + } + /** * @notice Finish a subgraph migration from L1. * The subgraph must have been previously sent through the bridge diff --git a/contracts/l2/discovery/L2GNS.sol b/contracts/l2/discovery/L2GNS.sol index 516844d7a..e25cb5297 100644 --- a/contracts/l2/discovery/L2GNS.sol +++ b/contracts/l2/discovery/L2GNS.sol @@ -100,7 +100,7 @@ contract L2GNS is GNS, L2GNSV1Storage, IL2GNS { bytes32 _subgraphMetadata, bytes32 _versionMetadata ) external override notPartialPaused onlySubgraphAuth(_subgraphID) { - IGNS.SubgraphL2MigrationData storage migratedData = subgraphL2MigrationData[_subgraphID]; + IL2GNS.SubgraphL2MigrationData storage migratedData = subgraphL2MigrationData[_subgraphID]; SubgraphData storage subgraphData = _getSubgraphData(_subgraphID); // A subgraph require(migratedData.subgraphReceivedOnL2BlockNumber != 0, "INVALID_SUBGRAPH"); @@ -144,7 +144,7 @@ contract L2GNS is GNS, L2GNSV1Storage, IL2GNS { override notPartialPaused { - IGNS.SubgraphL2MigrationData storage migratedData = subgraphL2MigrationData[_subgraphID]; + IL2GNS.SubgraphL2MigrationData storage migratedData = subgraphL2MigrationData[_subgraphID]; require(migratedData.subgraphReceivedOnL2BlockNumber != 0, "INVALID_SUBGRAPH"); require(!migratedData.l2Done, "ALREADY_FINISHED"); require( @@ -176,7 +176,7 @@ contract L2GNS is GNS, L2GNSV1Storage, IL2GNS { uint256 _balance, address _beneficiary ) external override notPartialPaused onlyL1Counterpart { - GNS.SubgraphL2MigrationData storage migratedData = subgraphL2MigrationData[_subgraphID]; + IL2GNS.SubgraphL2MigrationData storage migratedData = subgraphL2MigrationData[_subgraphID]; require(migratedData.l2Done, "!MIGRATED"); require(!migratedData.curatorBalanceClaimed[_curator], "ALREADY_CLAIMED"); @@ -275,7 +275,7 @@ contract L2GNS is GNS, L2GNSV1Storage, IL2GNS { uint256 _tokens, uint256 _nSignal ) internal { - IGNS.SubgraphL2MigrationData storage migratedData = subgraphL2MigrationData[_subgraphID]; + IL2GNS.SubgraphL2MigrationData storage migratedData = subgraphL2MigrationData[_subgraphID]; SubgraphData storage subgraphData = _getSubgraphData(_subgraphID); subgraphData.reserveRatioDeprecated = fixedReserveRatio; diff --git a/contracts/l2/discovery/L2GNSStorage.sol b/contracts/l2/discovery/L2GNSStorage.sol index 067dff1e5..9a2951d53 100644 --- a/contracts/l2/discovery/L2GNSStorage.sol +++ b/contracts/l2/discovery/L2GNSStorage.sol @@ -3,12 +3,16 @@ pragma solidity ^0.7.6; pragma abicoder v2; +import { IL2GNS } from "./IL2GNS.sol"; + /** * @title L2GNSV1Storage * @notice This contract holds all the L2-specific storage variables for the L2GNS contract, version 1 * @dev */ abstract contract L2GNSV1Storage { + /// Data for subgraph migration from L1 to L2 + mapping(uint256 => IL2GNS.SubgraphL2MigrationData) public subgraphL2MigrationData; /// @dev Storage gap to keep storage slots fixed in future versions uint256[50] private __gap; } diff --git a/test/gns.test.ts b/test/gns.test.ts index 90da2648d..b24c359d3 100644 --- a/test/gns.test.ts +++ b/test/gns.test.ts @@ -1098,8 +1098,8 @@ describe('L1GNS', () => { expect(subgraphAfter.disabled).eq(true) expect(subgraphAfter.withdrawableGRT).eq(0) - const migrationData = await gns.subgraphL2MigrationData(subgraph0.id) - expect(migrationData.l1Done).eq(true) + const migrated = await gns.subgraphMigratedToL2(subgraph0.id) + expect(migrated).eq(true) const expectedCallhookData = defaultAbiCoder.encode( ['uint256', 'address', 'uint256'], @@ -1139,8 +1139,8 @@ describe('L1GNS', () => { expect(subgraphAfter.disabled).eq(true) expect(subgraphAfter.withdrawableGRT).eq(0) - const migrationData = await legacyGNSMock.subgraphL2MigrationData(subgraphID) - expect(migrationData.l1Done).eq(true) + const migrated = await legacyGNSMock.subgraphMigratedToL2(subgraphID) + expect(migrated).eq(true) const expectedCallhookData = defaultAbiCoder.encode( ['uint256', 'address', 'uint256'], diff --git a/test/l2/l2GNS.test.ts b/test/l2/l2GNS.test.ts index 670d14a5e..a0928ceaf 100644 --- a/test/l2/l2GNS.test.ts +++ b/test/l2/l2GNS.test.ts @@ -287,7 +287,6 @@ describe('L2GNS', () => { const subgraphData = await gns.subgraphs(l1SubgraphId) expect(migrationData.tokens).eq(curatedTokens) - expect(migrationData.l1Done).eq(false) expect(migrationData.l2Done).eq(false) expect(migrationData.subgraphReceivedOnL2BlockNumber).eq(await latestBlock()) @@ -326,7 +325,6 @@ describe('L2GNS', () => { const subgraphData = await gns.subgraphs(l1SubgraphId) expect(migrationData.tokens).eq(curatedTokens) - expect(migrationData.l1Done).eq(false) expect(migrationData.l2Done).eq(false) expect(migrationData.subgraphReceivedOnL2BlockNumber).eq(await latestBlock()) From d433a1b8ee22016a065b1482aabf8aaa4baf6b20 Mon Sep 17 00:00:00 2001 From: Pablo Carranza Velez Date: Tue, 20 Dec 2022 12:17:22 -0300 Subject: [PATCH 073/112] fix(Managed): use hash directly in syncAllContracts and gns (OZ N-04) --- contracts/governance/Managed.sol | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/contracts/governance/Managed.sol b/contracts/governance/Managed.sol index 70f7c6171..9b0ea29c8 100644 --- a/contracts/governance/Managed.sol +++ b/contracts/governance/Managed.sol @@ -40,6 +40,7 @@ abstract contract Managed is IManaged { bytes32 private immutable STAKING = keccak256("Staking"); bytes32 private immutable GRAPH_TOKEN = keccak256("GraphToken"); bytes32 private immutable GRAPH_TOKEN_GATEWAY = keccak256("GraphTokenGateway"); + bytes32 private immutable GNS = keccak256("GNS"); // -- Events -- @@ -195,7 +196,7 @@ abstract contract Managed is IManaged { * @return Address of the GNS contract registered with Controller, as an IGNS interface. */ function gns() internal view returns (IGNS) { - return IGNS(_resolveContract(keccak256("GNS"))); + return IGNS(_resolveContract(GNS)); } /** @@ -213,14 +214,13 @@ abstract contract Managed is IManaged { /** * @dev Cache a contract address from the Controller registry. - * @param _name Name of the contract to sync into the cache - */ - function _syncContract(string memory _name) internal { - bytes32 nameHash = keccak256(abi.encodePacked(_name)); - address contractAddress = controller.getContractProxy(nameHash); - if (_addressCache[nameHash] != contractAddress) { - _addressCache[nameHash] = contractAddress; - emit ContractSynced(nameHash, contractAddress); + * @param _nameHash keccak256 hash of the name of the contract to sync into the cache + */ + function _syncContract(bytes32 _nameHash) internal { + address contractAddress = controller.getContractProxy(_nameHash); + if (_addressCache[_nameHash] != contractAddress) { + _addressCache[_nameHash] = contractAddress; + emit ContractSynced(_nameHash, contractAddress); } } @@ -231,12 +231,12 @@ abstract contract Managed is IManaged { * controller to ensure the protocol is using the latest version */ function syncAllContracts() external override { - _syncContract("Curation"); - _syncContract("EpochManager"); - _syncContract("RewardsManager"); - _syncContract("Staking"); - _syncContract("GraphToken"); - _syncContract("GraphTokenGateway"); - _syncContract("GNS"); + _syncContract(CURATION); + _syncContract(EPOCH_MANAGER); + _syncContract(REWARDS_MANAGER); + _syncContract(STAKING); + _syncContract(GRAPH_TOKEN); + _syncContract(GRAPH_TOKEN_GATEWAY); + _syncContract(GNS); } } From f9fa05c8502a02b9ef34ae105f7941bb39b6bb01 Mon Sep 17 00:00:00 2001 From: Pablo Carranza Velez Date: Tue, 20 Dec 2022 12:44:04 -0300 Subject: [PATCH 074/112] fix: remove unused imports (OZ N-06) --- contracts/curation/Curation.sol | 1 - contracts/curation/ICuration.sol | 2 -- contracts/discovery/GNS.sol | 1 - contracts/l2/curation/L2Curation.sol | 2 -- 4 files changed, 6 deletions(-) diff --git a/contracts/curation/Curation.sol b/contracts/curation/Curation.sol index 0dafc7631..04c3a6217 100644 --- a/contracts/curation/Curation.sol +++ b/contracts/curation/Curation.sol @@ -16,7 +16,6 @@ import { IGraphToken } from "../token/IGraphToken.sol"; import { CurationV2Storage } from "./CurationStorage.sol"; import { ICuration } from "./ICuration.sol"; import { IGraphCurationToken } from "./IGraphCurationToken.sol"; -import { GraphCurationToken } from "./GraphCurationToken.sol"; /** * @title Curation contract diff --git a/contracts/curation/ICuration.sol b/contracts/curation/ICuration.sol index 712a76efb..dffff46cd 100644 --- a/contracts/curation/ICuration.sol +++ b/contracts/curation/ICuration.sol @@ -2,8 +2,6 @@ pragma solidity ^0.7.6; -import "./IGraphCurationToken.sol"; - /** * @title Curation Interface * @dev Interface for the Curation contract (and L2Curation too) diff --git a/contracts/discovery/GNS.sol b/contracts/discovery/GNS.sol index 6d83d6c18..ea7c399c0 100644 --- a/contracts/discovery/GNS.sol +++ b/contracts/discovery/GNS.sol @@ -7,7 +7,6 @@ import { SafeMathUpgradeable } from "@openzeppelin/contracts-upgradeable/math/Sa import { AddressUpgradeable } from "@openzeppelin/contracts-upgradeable/utils/AddressUpgradeable.sol"; import { Multicall } from "../base/Multicall.sol"; -import { BancorFormula } from "../bancor/BancorFormula.sol"; import { GraphUpgradeable } from "../upgrades/GraphUpgradeable.sol"; import { TokenUtils } from "../utils/TokenUtils.sol"; import { ICuration } from "../curation/ICuration.sol"; diff --git a/contracts/l2/curation/L2Curation.sol b/contracts/l2/curation/L2Curation.sol index 350380e6d..638870c0e 100644 --- a/contracts/l2/curation/L2Curation.sol +++ b/contracts/l2/curation/L2Curation.sol @@ -7,7 +7,6 @@ import { AddressUpgradeable } from "@openzeppelin/contracts-upgradeable/utils/Ad import { SafeMathUpgradeable } from "@openzeppelin/contracts-upgradeable/math/SafeMathUpgradeable.sol"; import { ClonesUpgradeable } from "@openzeppelin/contracts-upgradeable/proxy/ClonesUpgradeable.sol"; -import { BancorFormula } from "../../bancor/BancorFormula.sol"; import { GraphUpgradeable } from "../../upgrades/GraphUpgradeable.sol"; import { TokenUtils } from "../../utils/TokenUtils.sol"; import { IRewardsManager } from "../../rewards/IRewardsManager.sol"; @@ -16,7 +15,6 @@ import { IGraphToken } from "../../token/IGraphToken.sol"; import { CurationV2Storage } from "../../curation/CurationStorage.sol"; import { ICuration } from "../../curation/ICuration.sol"; import { IGraphCurationToken } from "../../curation/IGraphCurationToken.sol"; -import { GraphCurationToken } from "../../curation/GraphCurationToken.sol"; import { IL2Curation } from "./IL2Curation.sol"; /** From e606b6894083e4526a6dcb312c8f4bfddc124524 Mon Sep 17 00:00:00 2001 From: Pablo Carranza Velez Date: Tue, 20 Dec 2022 12:47:30 -0300 Subject: [PATCH 075/112] fix: mark GNS as abstract (OZ N-07) --- contracts/discovery/GNS.sol | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contracts/discovery/GNS.sol b/contracts/discovery/GNS.sol index ea7c399c0..9af60ba2f 100644 --- a/contracts/discovery/GNS.sol +++ b/contracts/discovery/GNS.sol @@ -25,7 +25,7 @@ import { GNSV3Storage } from "./GNSStorage.sol"; * The contract implements a multicall behaviour to support batching multiple calls in a single * transaction. */ -contract GNS is GNSV3Storage, GraphUpgradeable, IGNS, Multicall { +abstract contract GNS is GNSV3Storage, GraphUpgradeable, IGNS, Multicall { using SafeMathUpgradeable for uint256; // -- Constants -- From b0832f21cce8622a51d69875118c16a3d0f2e526 Mon Sep 17 00:00:00 2001 From: Pablo Carranza Velez Date: Tue, 20 Dec 2022 12:58:47 -0300 Subject: [PATCH 076/112] fix: document the encoding in L2GNS.onTokenTransfer (OZ N-08) --- contracts/l2/discovery/L2GNS.sol | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/contracts/l2/discovery/L2GNS.sol b/contracts/l2/discovery/L2GNS.sol index e25cb5297..be6467a56 100644 --- a/contracts/l2/discovery/L2GNS.sol +++ b/contracts/l2/discovery/L2GNS.sol @@ -66,7 +66,10 @@ contract L2GNS is GNS, L2GNSV1Storage, IL2GNS { /** * @notice Receive tokens with a callhook from the bridge. - * The callhook will receive a subgraph from L1. + * The callhook will receive a subgraph from L1. The _data parameter + * must contain the ABI encoding of: + * (uint256 subgraphID, address subgraphOwner, uint256 nSignal) + * This is encoded by _encodeSubgraphDataForL2 in L1GNS. * @param _from Token sender in L1 (must be the L1GNS) * @param _amount Amount of tokens that were transferred * @param _data ABI-encoded callhook data From ce13a0127b3e3bea3b5b9033074f669b9e3b4872 Mon Sep 17 00:00:00 2001 From: Pablo Carranza Velez Date: Tue, 20 Dec 2022 13:02:07 -0300 Subject: [PATCH 077/112] fix: correct unit for curationTaxPercentage in comment (OZ N-09) --- contracts/discovery/GNSStorage.sol | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contracts/discovery/GNSStorage.sol b/contracts/discovery/GNSStorage.sol index 0bc216a87..b8480c8e6 100644 --- a/contracts/discovery/GNSStorage.sol +++ b/contracts/discovery/GNSStorage.sol @@ -17,7 +17,7 @@ import { ISubgraphNFT } from "./ISubgraphNFT.sol"; abstract contract GNSV1Storage is Managed { // -- State -- - /// Percentage of curation tax that must be paid by the owner, in parts per hundred. + /// Percentage of curation tax that must be paid by the owner, in parts per million. uint32 public ownerTaxPercentage; /// [DEPRECATED] Bonding curve formula. From 87cd0fe46608db8f11365ad5ca881cc9b179c673 Mon Sep 17 00:00:00 2001 From: Pablo Carranza Velez Date: Tue, 20 Dec 2022 13:07:26 -0300 Subject: [PATCH 078/112] fix: add indexed params to some events (OZ N-10) --- contracts/discovery/L1GNS.sol | 2 +- contracts/l2/discovery/L2GNS.sol | 10 +++++----- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/contracts/discovery/L1GNS.sol b/contracts/discovery/L1GNS.sol index c6108cc37..155762e7c 100644 --- a/contracts/discovery/L1GNS.sol +++ b/contracts/discovery/L1GNS.sol @@ -27,7 +27,7 @@ contract L1GNS is GNS, L1GNSV1Storage, L1ArbitrumMessenger { using SafeMathUpgradeable for uint256; /// @dev Emitted when a subgraph was sent to L2 through the bridge - event SubgraphSentToL2(uint256 _subgraphID, address _l2Owner); + event SubgraphSentToL2(uint256 indexed _subgraphID, address indexed _l2Owner); /// @dev Emitted when the address of the Arbitrum Inbox was updated event ArbitrumInboxAddressUpdated(address _inbox); diff --git a/contracts/l2/discovery/L2GNS.sol b/contracts/l2/discovery/L2GNS.sol index be6467a56..76b12eb1d 100644 --- a/contracts/l2/discovery/L2GNS.sol +++ b/contracts/l2/discovery/L2GNS.sol @@ -33,14 +33,14 @@ contract L2GNS is GNS, L2GNSV1Storage, IL2GNS { uint256 public constant FINISH_MIGRATION_TIMEOUT = 50400; /// @dev Emitted when a subgraph is received from L1 through the bridge - event SubgraphReceivedFromL1(uint256 _subgraphID); + event SubgraphReceivedFromL1(uint256 indexed _subgraphID); /// @dev Emitted when a subgraph migration from L1 is finalized, so the subgraph is published - event SubgraphMigrationFinalized(uint256 _subgraphID); + event SubgraphMigrationFinalized(uint256 indexed _subgraphID); /// @dev Emitted when the L1 balance for a curator has been claimed event CuratorBalanceClaimed( - uint256 _subgraphID, - address _l1Curator, - address _l2Curator, + uint256 indexed _subgraphID, + address indexed _l1Curator, + address indexed _l2Curator, uint256 _nSignalClaimed ); From b59c79837a224ec5f2f3d3663210c00635697daa Mon Sep 17 00:00:00 2001 From: Pablo Carranza Velez Date: Tue, 20 Dec 2022 13:15:08 -0300 Subject: [PATCH 079/112] fix: use named arguments in long function calls (OZ N-11) --- contracts/discovery/L1GNS.sol | 34 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/contracts/discovery/L1GNS.sol b/contracts/discovery/L1GNS.sol index 155762e7c..82b34bc2c 100644 --- a/contracts/discovery/L1GNS.sol +++ b/contracts/discovery/L1GNS.sol @@ -78,14 +78,14 @@ contract L1GNS is GNS, L1GNSV1Storage, L1ArbitrumMessenger { IGraphToken grt = graphToken(); ITokenGateway gateway = graphTokenGateway(); grt.approve(address(gateway), curationTokens); - gateway.outboundTransfer{ value: msg.value }( - address(grt), - counterpartGNSAddress, - curationTokens, - _maxGas, - _gasPriceBid, - data - ); + gateway.outboundTransfer{ value: msg.value }({ + _token: address(grt), + _to: counterpartGNSAddress, + _amount: curationTokens, + _maxGas: _maxGas, + _gasPriceBid: _gasPriceBid, + _data: data + }); subgraphData.reserveRatioDeprecated = 0; _burnNFT(_subgraphID); @@ -136,15 +136,15 @@ contract L1GNS is GNS, L1GNSV1Storage, L1ArbitrumMessenger { subgraphData.curatorNSignal[msg.sender] = 0; subgraphData.nSignal = subgraphData.nSignal.sub(curatorNSignal); - uint256 seqNum = sendTxToL2( - arbitrumInboxAddress, - counterpartGNSAddress, - msg.sender, - msg.value, - 0, - gasParams, - outboundCalldata - ); + uint256 seqNum = sendTxToL2({ + _inbox: arbitrumInboxAddress, + _to: counterpartGNSAddress, + _user: msg.sender, + _l1CallValue: msg.value, + _l2CallValue: 0, + _l2GasParams: gasParams, + _data: outboundCalldata + }); return abi.encode(seqNum); } From 18eb0a2e1284970af0eb0610d8c5be7d34d3089a Mon Sep 17 00:00:00 2001 From: Pablo Carranza Velez Date: Thu, 8 Dec 2022 13:57:43 -0300 Subject: [PATCH 080/112] feat: subgraph migration to L2 sending only the owner's tokens (OZ M-01) --- contracts/discovery/L1GNS.sol | 172 ++++++------ contracts/discovery/L1GNSStorage.sol | 2 - contracts/l2/curation/IL2Curation.sol | 9 +- contracts/l2/curation/L2Curation.sol | 16 +- contracts/l2/discovery/IL2GNS.sol | 30 +-- contracts/l2/discovery/L2GNS.sol | 166 +++++------- test/gns.test.ts | 246 ++++++++++------- test/l2/l2Curation.test.ts | 19 +- test/l2/l2GNS.test.ts | 362 +++++++++++--------------- 9 files changed, 484 insertions(+), 538 deletions(-) diff --git a/contracts/discovery/L1GNS.sol b/contracts/discovery/L1GNS.sol index 82b34bc2c..3ca2b0866 100644 --- a/contracts/discovery/L1GNS.sol +++ b/contracts/discovery/L1GNS.sol @@ -23,22 +23,18 @@ import { L1GNSV1Storage } from "./L1GNSStorage.sol"; * transaction. * This L1GNS variant includes some functions to allow migrating subgraphs to L2. */ -contract L1GNS is GNS, L1GNSV1Storage, L1ArbitrumMessenger { +contract L1GNS is GNS, L1GNSV1Storage { using SafeMathUpgradeable for uint256; /// @dev Emitted when a subgraph was sent to L2 through the bridge event SubgraphSentToL2(uint256 indexed _subgraphID, address indexed _l2Owner); - /// @dev Emitted when the address of the Arbitrum Inbox was updated - event ArbitrumInboxAddressUpdated(address _inbox); - /** - * @dev sets the addresses for L1 inbox provided by Arbitrum - * @param _inbox Address of the Inbox that is part of the Arbitrum Bridge - */ - function setArbitrumInboxAddress(address _inbox) external onlyGovernor { - arbitrumInboxAddress = _inbox; - emit ArbitrumInboxAddressUpdated(_inbox); - } + /// @dev Emitted when a curator's balance for a subgraph was sent to L2 + event CuratorBalanceSentToL2( + uint256 indexed _subgraphID, + address indexed _curator, + uint256 _tokens + ); /** * @notice Send a subgraph's data and tokens to L2. @@ -72,20 +68,33 @@ contract L1GNS is GNS, L1GNSV1Storage, L1ArbitrumMessenger { subgraphData.disabled = true; subgraphData.vSignal = 0; - bytes memory extraData = _encodeSubgraphDataForL2(_subgraphID, _l2Owner, subgraphData); + // We send only the subgraph owner's tokens and nsignal to L2, + // and for everyone else we set the withdrawableGRT so that they can choose + // to withdraw or migrate their signal. + uint256 ownerNSignal = subgraphData.curatorNSignal[msg.sender]; + uint256 totalSignal = subgraphData.nSignal; - bytes memory data = abi.encode(_maxSubmissionCost, extraData); - IGraphToken grt = graphToken(); - ITokenGateway gateway = graphTokenGateway(); - grt.approve(address(gateway), curationTokens); - gateway.outboundTransfer{ value: msg.value }({ - _token: address(grt), - _to: counterpartGNSAddress, - _amount: curationTokens, - _maxGas: _maxGas, - _gasPriceBid: _gasPriceBid, - _data: data - }); + // Get owner share of tokens to be sent to L2 + uint256 tokensForL2 = ownerNSignal.mul(curationTokens).div(totalSignal); + // This leaves the subgraph as if it was deprecated, + // so other curators can withdraw: + subgraphData.curatorNSignal[msg.sender] = 0; + subgraphData.nSignal = totalSignal.sub(ownerNSignal); + subgraphData.withdrawableGRT = curationTokens.sub(tokensForL2); + + bytes memory extraData = abi.encode( + uint8(IL2GNS.L1MessageCodes.RECEIVE_SUBGRAPH_CODE), + _subgraphID, + _l2Owner + ); + + _sendTokensAndMessageToL2GNS( + tokensForL2, + _maxGas, + _gasPriceBid, + _maxSubmissionCost, + extraData + ); subgraphData.reserveRatioDeprecated = 0; _burnNFT(_subgraphID); @@ -93,99 +102,88 @@ contract L1GNS is GNS, L1GNSV1Storage, L1ArbitrumMessenger { } /** - * @notice Claim the balance for a curator's signal in a subgraph that was - * migrated to L2, by sending a retryable ticket to the L2GNS. + * @notice Send the balance for a curator's signal in a subgraph that was + * migrated to L2, using the L1GraphTokenGateway. * The balance will be claimed for a beneficiary address, as this method can be * used by curators that use a contract address in L1 that may not exist in L2. * This will set the curator's signal on L1 to zero, so the caller must ensure * that the retryable ticket is redeemed before expiration, or the signal will be lost. + * It is up to the caller to verify that the subgraph migration was finished in L2, + * but if it wasn't, the tokens will be sent to the beneficiary in L2. * @dev Use the Arbitrum SDK to estimate the L2 retryable ticket parameters. * @param _subgraphID Subgraph ID * @param _beneficiary Address that will receive the tokens in L2 * @param _maxGas Max gas to use for the L2 retryable ticket * @param _gasPriceBid Gas price bid for the L2 retryable ticket * @param _maxSubmissionCost Max submission cost for the L2 retryable ticket - * @return The sequence ID for the retryable ticket, as returned by the Arbitrum inbox. */ - function claimCuratorBalanceToBeneficiaryOnL2( + function sendCuratorBalanceToBeneficiaryOnL2( uint256 _subgraphID, address _beneficiary, uint256 _maxGas, uint256 _gasPriceBid, uint256 _maxSubmissionCost - ) external payable notPartialPaused returns (bytes memory) { + ) external payable notPartialPaused { require(subgraphMigratedToL2[_subgraphID], "!MIGRATED"); // The Arbitrum bridge will check this too, we just check here for an early exit require(_maxSubmissionCost != 0, "NO_SUBMISSION_COST"); - L2GasParams memory gasParams = L2GasParams(_maxSubmissionCost, _maxGas, _gasPriceBid); - - uint256 curatorNSignal = getCuratorSignal(_subgraphID, msg.sender); + SubgraphData storage subgraphData = _getSubgraphData(_subgraphID); + uint256 curatorNSignal = subgraphData.curatorNSignal[msg.sender]; require(curatorNSignal != 0, "NO_SIGNAL"); - bytes memory outboundCalldata = getClaimCuratorBalanceOutboundCalldata( + uint256 subgraphNSignal = subgraphData.nSignal; + require(subgraphNSignal != 0, "NO_SUBGRAPH_SIGNAL"); + + uint256 tokensForL2 = curatorNSignal.mul(subgraphData.withdrawableGRT).div(subgraphNSignal); + bytes memory extraData = abi.encode( + uint8(IL2GNS.L1MessageCodes.RECEIVE_CURATOR_BALANCE_CODE), _subgraphID, - curatorNSignal, - msg.sender, _beneficiary ); - // Similarly to withdrawing from a deprecated subgraph, - // we remove the curator's signal from the subgraph. - SubgraphData storage subgraphData = _getSubgraphData(_subgraphID); + // Set the subgraph as if the curator had withdrawn their tokens subgraphData.curatorNSignal[msg.sender] = 0; - subgraphData.nSignal = subgraphData.nSignal.sub(curatorNSignal); - - uint256 seqNum = sendTxToL2({ - _inbox: arbitrumInboxAddress, - _to: counterpartGNSAddress, - _user: msg.sender, - _l1CallValue: msg.value, - _l2CallValue: 0, - _l2GasParams: gasParams, - _data: outboundCalldata - }); - - return abi.encode(seqNum); - } - - /** - * @notice Get the outbound calldata that will be sent to L2 - * when calling claimCuratorBalanceToBeneficiaryOnL2. - * This can be useful to estimate the L2 retryable ticket parameters. - * @param _subgraphID Subgraph ID - * @param _curatorNSignal Curator's signal in the subgraph - * @param _curator Curator address - * @param _beneficiary Address that will own the signal in L2 - */ - function getClaimCuratorBalanceOutboundCalldata( - uint256 _subgraphID, - uint256 _curatorNSignal, - address _curator, - address _beneficiary - ) public pure returns (bytes memory) { - return - abi.encodeWithSelector( - IL2GNS.claimL1CuratorBalanceToBeneficiary.selector, - _subgraphID, - _curator, - _curatorNSignal, - _beneficiary - ); + subgraphData.nSignal = subgraphNSignal.sub(curatorNSignal); + subgraphData.withdrawableGRT = subgraphData.withdrawableGRT.sub(tokensForL2); + + // Send the tokens and data to L2 using the L1GraphTokenGateway + _sendTokensAndMessageToL2GNS( + tokensForL2, + _maxGas, + _gasPriceBid, + _maxSubmissionCost, + extraData + ); } /** - * @dev Encodes the subgraph data as callhook parameters - * for the L2 migration. - * @param _subgraphID Subgraph ID - * @param _l2Owner Owner of the subgraph on L2 - * @param _subgraphData Subgraph data + * @notice Sends a message to the L2GNS with some extra data, + * also sending some tokens, using the L1GraphTokenGateway. + * @param _tokens Amount of tokens to send to L2 + * @param _maxGas Max gas to use for the L2 retryable ticket + * @param _gasPriceBid Gas price bid for the L2 retryable ticket + * @param _maxSubmissionCost Max submission cost for the L2 retryable ticket + * @param _extraData Extra data for the callhook on L2GNS */ - function _encodeSubgraphDataForL2( - uint256 _subgraphID, - address _l2Owner, - SubgraphData storage _subgraphData - ) internal view returns (bytes memory) { - return abi.encode(_subgraphID, _l2Owner, _subgraphData.nSignal); + function _sendTokensAndMessageToL2GNS( + uint256 _tokens, + uint256 _maxGas, + uint256 _gasPriceBid, + uint256 _maxSubmissionCost, + bytes memory _extraData + ) internal { + bytes memory data = abi.encode(_maxSubmissionCost, _extraData); + IGraphToken grt = graphToken(); + ITokenGateway gateway = graphTokenGateway(); + grt.approve(address(gateway), _tokens); + gateway.outboundTransfer{ value: msg.value }( + address(grt), + counterpartGNSAddress, + _tokens, + _maxGas, + _gasPriceBid, + data + ); } } diff --git a/contracts/discovery/L1GNSStorage.sol b/contracts/discovery/L1GNSStorage.sol index ecd0de319..64163636e 100644 --- a/contracts/discovery/L1GNSStorage.sol +++ b/contracts/discovery/L1GNSStorage.sol @@ -10,8 +10,6 @@ pragma abicoder v2; * reduce the size of the gap accordingly. */ abstract contract L1GNSV1Storage { - /// Address of the Arbitrum DelayedInbox - address public arbitrumInboxAddress; /// True for subgraph IDs that have been migrated to L2 mapping(uint256 => bool) public subgraphMigratedToL2; /// @dev Storage gap to keep storage slots fixed in future versions diff --git a/contracts/l2/curation/IL2Curation.sol b/contracts/l2/curation/IL2Curation.sol index 4d54b4804..bd8806538 100644 --- a/contracts/l2/curation/IL2Curation.sol +++ b/contracts/l2/curation/IL2Curation.sol @@ -12,14 +12,11 @@ interface IL2Curation { * only during an L1-L2 migration). * @param _subgraphDeploymentID Subgraph deployment pool from where to mint signal * @param _tokensIn Amount of Graph Tokens to deposit - * @param _signalOutMin Expected minimum amount of signal to receive * @return Signal minted */ - function mintTaxFree( - bytes32 _subgraphDeploymentID, - uint256 _tokensIn, - uint256 _signalOutMin - ) external returns (uint256); + function mintTaxFree(bytes32 _subgraphDeploymentID, uint256 _tokensIn) + external + returns (uint256); /** * @notice Calculate amount of signal that can be bought with tokens in a curation pool, diff --git a/contracts/l2/curation/L2Curation.sol b/contracts/l2/curation/L2Curation.sol index 638870c0e..908e40384 100644 --- a/contracts/l2/curation/L2Curation.sol +++ b/contracts/l2/curation/L2Curation.sol @@ -231,23 +231,21 @@ contract L2Curation is CurationV2Storage, GraphUpgradeable, IL2Curation { * only during an L1-L2 migration). * @param _subgraphDeploymentID Subgraph deployment pool from where to mint signal * @param _tokensIn Amount of Graph Tokens to deposit - * @param _signalOutMin Expected minimum amount of signal to receive * @return Signal minted */ - function mintTaxFree( - bytes32 _subgraphDeploymentID, - uint256 _tokensIn, - uint256 _signalOutMin - ) external override notPartialPaused onlyGNS returns (uint256) { + function mintTaxFree(bytes32 _subgraphDeploymentID, uint256 _tokensIn) + external + override + notPartialPaused + onlyGNS + returns (uint256) + { // Need to deposit some funds require(_tokensIn != 0, "Cannot deposit zero tokens"); // Exchange GRT tokens for GCS of the subgraph pool (no tax) uint256 signalOut = _tokensToSignal(_subgraphDeploymentID, _tokensIn); - // Slippage protection - require(signalOut >= _signalOutMin, "Slippage protection"); - address curator = msg.sender; CurationPool storage curationPool = pools[_subgraphDeploymentID]; diff --git a/contracts/l2/discovery/IL2GNS.sol b/contracts/l2/discovery/IL2GNS.sol index 1f9f77c6e..227625928 100644 --- a/contracts/l2/discovery/IL2GNS.sol +++ b/contracts/l2/discovery/IL2GNS.sol @@ -8,6 +8,11 @@ import { ICallhookReceiver } from "../../gateway/ICallhookReceiver.sol"; * @title Interface for the L2GNS contract. */ interface IL2GNS is ICallhookReceiver { + enum L1MessageCodes { + RECEIVE_SUBGRAPH_CODE, + RECEIVE_CURATOR_BALANCE_CODE + } + /** * @dev The SubgraphL2MigrationData struct holds information * about a subgraph related to its migration from L1 to L2. @@ -34,29 +39,4 @@ interface IL2GNS is ICallhookReceiver { bytes32 _subgraphMetadata, bytes32 _versionMetadata ) external; - - /** - * @notice Deprecate a subgraph that was migrated from L1, but for which - * the migration was never finished. Anyone can call this function after a certain amount of - * blocks have passed since the subgraph was migrated, if the subgraph owner didn't - * call finishSubgraphMigrationFromL1. In L2GNS this timeout is the FINISH_MIGRATION_TIMEOUT constant. - * @param _subgraphID Subgraph ID - */ - function deprecateSubgraphMigratedFromL1(uint256 _subgraphID) external; - - /** - * @notice Claim curator balance belonging to a curator from L1. - * This will be credited to the a beneficiary on L2, and can only be called - * from the GNS on L1 through a retryable ticket. - * @param _subgraphID Subgraph on which to claim the balance - * @param _curator Curator who owns the balance on L1 - * @param _balance Balance of the curator from L1 - * @param _beneficiary Address of an L2 beneficiary for the balance - */ - function claimL1CuratorBalanceToBeneficiary( - uint256 _subgraphID, - address _curator, - uint256 _balance, - address _beneficiary - ) external; } diff --git a/contracts/l2/discovery/L2GNS.sol b/contracts/l2/discovery/L2GNS.sol index 76b12eb1d..15a017283 100644 --- a/contracts/l2/discovery/L2GNS.sol +++ b/contracts/l2/discovery/L2GNS.sol @@ -5,9 +5,7 @@ pragma abicoder v2; import { SafeMathUpgradeable } from "@openzeppelin/contracts-upgradeable/math/SafeMathUpgradeable.sol"; -import { AddressAliasHelper } from "../../arbitrum/AddressAliasHelper.sol"; import { GNS } from "../../discovery/GNS.sol"; -import { IGNS } from "../../discovery/IGNS.sol"; import { ICuration } from "../../curation/ICuration.sol"; import { IL2GNS } from "./IL2GNS.sol"; import { L2GNSV1Storage } from "./L2GNSStorage.sol"; @@ -28,20 +26,18 @@ import { IL2Curation } from "../curation/IL2Curation.sol"; contract L2GNS is GNS, L2GNSV1Storage, IL2GNS { using SafeMathUpgradeable for uint256; - /// The amount of time (in blocks) that a subgraph owner has to finish the migration - /// from L1 before the subgraph can be deprecated: 1 week - uint256 public constant FINISH_MIGRATION_TIMEOUT = 50400; - /// @dev Emitted when a subgraph is received from L1 through the bridge event SubgraphReceivedFromL1(uint256 indexed _subgraphID); /// @dev Emitted when a subgraph migration from L1 is finalized, so the subgraph is published event SubgraphMigrationFinalized(uint256 indexed _subgraphID); /// @dev Emitted when the L1 balance for a curator has been claimed - event CuratorBalanceClaimed( - uint256 indexed _subgraphID, - address indexed _l1Curator, - address indexed _l2Curator, - uint256 _nSignalClaimed + event CuratorBalanceReceived(uint256 _subgraphID, address _l2Curator, uint256 _tokens); + /// @dev Emitted when the L1 balance for a curator has been returned to the beneficiary. + /// This can happen if the subgraph migration was not finished when the curator's tokens arrived. + event CuratorBalanceReturnedToBeneficiary( + uint256 _subgraphID, + address _l2Curator, + uint256 _tokens ); /** @@ -52,24 +48,18 @@ contract L2GNS is GNS, L2GNSV1Storage, IL2GNS { _; } - /** - * @dev Checks that the sender is the L2 alias of the counterpart - * GNS on L1. - */ - modifier onlyL1Counterpart() { - require( - msg.sender == AddressAliasHelper.applyL1ToL2Alias(counterpartGNSAddress), - "ONLY_COUNTERPART_GNS" - ); - _; - } - /** * @notice Receive tokens with a callhook from the bridge. - * The callhook will receive a subgraph from L1. The _data parameter + * The callhook will receive a subgraph or a curator's balance from L1. The _data parameter * must contain the ABI encoding of: - * (uint256 subgraphID, address subgraphOwner, uint256 nSignal) - * This is encoded by _encodeSubgraphDataForL2 in L1GNS. + * (uint8 code, uint256 subgraphId, address beneficiary) + * Where `code` is one of the codes defined in IL2GNS.L1MessageCodes. + * If the code is RECEIVE_SUBGRAPH_CODE, the beneficiary is the address of the + * owner of the subgraph on L2. + * If the code is RECEIVE_CURATOR_BALANCE_CODE, then the beneficiary is the + * address of the curator in L2. In this case, If the subgraph migration was never finished + * (or the subgraph doesn't exist), the tokens will be sent to the curator. + * @dev This function is called by the L2GraphTokenGateway contract. * @param _from Token sender in L1 (must be the L1GNS) * @param _amount Amount of tokens that were transferred * @param _data ABI-encoded callhook data @@ -80,12 +70,18 @@ contract L2GNS is GNS, L2GNSV1Storage, IL2GNS { bytes calldata _data ) external override notPartialPaused onlyL2Gateway { require(_from == counterpartGNSAddress, "ONLY_L1_GNS_THROUGH_BRIDGE"); - (uint256 subgraphID, address subgraphOwner, uint256 nSignal) = abi.decode( + (uint8 code, uint256 subgraphID, address beneficiary) = abi.decode( _data, - (uint256, address, uint256) + (uint8, uint256, address) ); - _receiveSubgraphFromL1(subgraphID, subgraphOwner, _amount, nSignal); + if (code == uint8(L1MessageCodes.RECEIVE_SUBGRAPH_CODE)) { + _receiveSubgraphFromL1(subgraphID, beneficiary, _amount); + } else if (code == uint8(L1MessageCodes.RECEIVE_CURATOR_BALANCE_CODE)) { + _mintSignalFromL1(subgraphID, beneficiary, _amount); + } else { + revert("INVALID_CODE"); + } } /** @@ -116,9 +112,14 @@ contract L2GNS is GNS, L2GNSV1Storage, IL2GNS { IL2Curation curation = IL2Curation(address(curation())); // Update pool: constant nSignal, vSignal can change (w/no slippage protection) // Buy all signal from the new deployment - subgraphData.vSignal = curation.mintTaxFree(_subgraphDeploymentID, migratedData.tokens, 0); - subgraphData.disabled = false; + uint256 vSignal = curation.mintTaxFree(_subgraphDeploymentID, migratedData.tokens); + uint256 nSignal = vSignalToNSignal(_subgraphID, vSignal); + subgraphData.disabled = false; + subgraphData.vSignal = vSignal; + subgraphData.nSignal = nSignal; + subgraphData.curatorNSignal[msg.sender] = nSignal; + subgraphData.subgraphDeploymentID = _subgraphDeploymentID; // Set the token metadata _setSubgraphMetadata(_subgraphID, _subgraphMetadata); @@ -129,69 +130,10 @@ contract L2GNS is GNS, L2GNSV1Storage, IL2GNS { migratedData.tokens, _subgraphDeploymentID ); - // Update target deployment - subgraphData.subgraphDeploymentID = _subgraphDeploymentID; emit SubgraphVersionUpdated(_subgraphID, _subgraphDeploymentID, _versionMetadata); emit SubgraphMigrationFinalized(_subgraphID); } - /** - * @notice Deprecate a subgraph that was migrated from L1, but for which - * the migration was never finished. Anyone can call this function after a certain amount of - * blocks have passed since the subgraph was migrated, if the subgraph owner didn't - * call finishSubgraphMigrationFromL1. In L2GNS this timeout is the FINISH_MIGRATION_TIMEOUT constant. - * @param _subgraphID Subgraph ID - */ - function deprecateSubgraphMigratedFromL1(uint256 _subgraphID) - external - override - notPartialPaused - { - IL2GNS.SubgraphL2MigrationData storage migratedData = subgraphL2MigrationData[_subgraphID]; - require(migratedData.subgraphReceivedOnL2BlockNumber != 0, "INVALID_SUBGRAPH"); - require(!migratedData.l2Done, "ALREADY_FINISHED"); - require( - block.number > migratedData.subgraphReceivedOnL2BlockNumber + FINISH_MIGRATION_TIMEOUT, - "TOO_EARLY" - ); - SubgraphData storage subgraphData = _getSubgraphData(_subgraphID); - - migratedData.l2Done = true; - uint256 withdrawableGRT = migratedData.tokens; - subgraphData.withdrawableGRT = withdrawableGRT; - subgraphData.reserveRatioDeprecated = 0; - _burnNFT(_subgraphID); - emit SubgraphDeprecated(_subgraphID, withdrawableGRT); - } - - /** - * @notice Claim curator balance belonging to a curator from L1. - * This will be credited to the a beneficiary on L2, and can only be called - * from the GNS on L1 through a retryable ticket. - * @param _subgraphID Subgraph on which to claim the balance - * @param _curator Curator who owns the balance on L1 - * @param _balance Balance of the curator from L1 - * @param _beneficiary Address of an L2 beneficiary for the balance - */ - function claimL1CuratorBalanceToBeneficiary( - uint256 _subgraphID, - address _curator, - uint256 _balance, - address _beneficiary - ) external override notPartialPaused onlyL1Counterpart { - IL2GNS.SubgraphL2MigrationData storage migratedData = subgraphL2MigrationData[_subgraphID]; - - require(migratedData.l2Done, "!MIGRATED"); - require(!migratedData.curatorBalanceClaimed[_curator], "ALREADY_CLAIMED"); - - SubgraphData storage subgraphData = _getSubgraphData(_subgraphID); - subgraphData.curatorNSignal[_beneficiary] = subgraphData.curatorNSignal[_beneficiary].add( - _balance - ); - migratedData.curatorBalanceClaimed[_curator] = true; - emit CuratorBalanceClaimed(_subgraphID, _curator, _beneficiary, _balance); - } - /** * @notice Publish a new version of an existing subgraph. * @dev This is the same as the one in the base GNS, but skips the check for @@ -270,13 +212,11 @@ contract L2GNS is GNS, L2GNSV1Storage, IL2GNS { * @param _subgraphID Subgraph ID * @param _subgraphOwner Owner of the subgraph * @param _tokens Tokens to be deposited in the subgraph - * @param _nSignal Name signal for the subgraph in L1 */ function _receiveSubgraphFromL1( uint256 _subgraphID, address _subgraphOwner, - uint256 _tokens, - uint256 _nSignal + uint256 _tokens ) internal { IL2GNS.SubgraphL2MigrationData storage migratedData = subgraphL2MigrationData[_subgraphID]; SubgraphData storage subgraphData = _getSubgraphData(_subgraphID); @@ -284,7 +224,6 @@ contract L2GNS is GNS, L2GNSV1Storage, IL2GNS { subgraphData.reserveRatioDeprecated = fixedReserveRatio; // The subgraph will be disabled until finishSubgraphMigrationFromL1 is called subgraphData.disabled = true; - subgraphData.nSignal = _nSignal; migratedData.tokens = _tokens; migratedData.subgraphReceivedOnL2BlockNumber = block.number; @@ -296,6 +235,45 @@ contract L2GNS is GNS, L2GNSV1Storage, IL2GNS { emit SubgraphReceivedFromL1(_subgraphID); } + /** + * @notice Deposit GRT into a subgraph and mint signal, using tokens received from L1. + * If the subgraph migration was never finished (or the subgraph doesn't exist), the tokens will be sent to the curator. + * @dev This looks a lot like GNS.mintSignal, but doesn't pull the tokens from the + * curator and has no slippage protection. + * @param _subgraphID Subgraph ID + * @param _curator Curator address + * @param _tokensIn The amount of tokens the nameCurator wants to deposit + */ + function _mintSignalFromL1( + uint256 _subgraphID, + address _curator, + uint256 _tokensIn + ) internal { + IL2GNS.SubgraphL2MigrationData storage migratedData = subgraphL2MigrationData[_subgraphID]; + + // If subgraph migration wasn't finished, we should send the tokens to the curator + if (!migratedData.l2Done) { + graphToken().transfer(_curator, _tokensIn); + emit CuratorBalanceReturnedToBeneficiary(_subgraphID, _curator, _tokensIn); + } else { + SubgraphData storage subgraphData = _getSubgraphData(_subgraphID); + // Get name signal to mint for tokens deposited + IL2Curation curation = IL2Curation(address(curation())); + uint256 vSignal = curation.mintTaxFree(subgraphData.subgraphDeploymentID, _tokensIn); + uint256 nSignal = vSignalToNSignal(_subgraphID, vSignal); + + // Update pools + subgraphData.vSignal = subgraphData.vSignal.add(vSignal); + subgraphData.nSignal = subgraphData.nSignal.add(nSignal); + subgraphData.curatorNSignal[_curator] = subgraphData.curatorNSignal[_curator].add( + nSignal + ); + + emit SignalMinted(_subgraphID, _curator, nSignal, vSignal, _tokensIn); + emit CuratorBalanceReceived(_subgraphID, _curator, _tokensIn); + } + } + /** * @dev Get subgraph data. * Since there are no legacy subgraphs in L2, we override the base diff --git a/test/gns.test.ts b/test/gns.test.ts index b24c359d3..692d2b804 100644 --- a/test/gns.test.ts +++ b/test/gns.test.ts @@ -235,10 +235,13 @@ describe('L1GNS', () => { // Give some funds to the signers and approve gns contract to use funds on signers behalf await grt.connect(governor.signer).mint(me.address, tokens100000) await grt.connect(governor.signer).mint(other.address, tokens100000) + await grt.connect(governor.signer).mint(another.address, tokens100000) await grt.connect(me.signer).approve(gns.address, tokens100000) await grt.connect(me.signer).approve(curation.address, tokens100000) await grt.connect(other.signer).approve(gns.address, tokens100000) await grt.connect(other.signer).approve(curation.address, tokens100000) + await grt.connect(another.signer).approve(gns.address, tokens100000) + await grt.connect(another.signer).approve(curation.address, tokens100000) // Update curation tax to test the functionality of it in disableNameSignal() await curation.connect(governor.signer).setCurationTaxPercentage(curationTaxPercentage) @@ -303,22 +306,6 @@ describe('L1GNS', () => { }) }) - describe('setArbitrumInboxAddress', function () { - it('should set `arbitrumInboxAddress`', async function () { - // Can set if allowed - const newValue = other.address - const tx = gns.connect(governor.signer).setArbitrumInboxAddress(newValue) - await expect(tx).emit(gns, 'ArbitrumInboxAddressUpdated').withArgs(newValue) - expect(await gns.arbitrumInboxAddress()).eq(newValue) - }) - - it('reject set `arbitrumInboxAddress` if not allowed', async function () { - const newValue = other.address - const tx = gns.connect(me.signer).setArbitrumInboxAddress(newValue) - await expect(tx).revertedWith('Only Controller governor') - }) - }) - describe('setSubgraphNFT', function () { it('should set `setSubgraphNFT`', async function () { const newValue = gns.address // I just use any contract address @@ -1032,7 +1019,8 @@ describe('L1GNS', () => { const subgraph0 = await publishNewSubgraph(me, newSubgraph0, gns) // Curate on the subgraph await gns.connect(me.signer).mintSignal(subgraph0.id, toGRT('90000'), 0) - + // Add an additional curator that is not the owner + await gns.connect(other.signer).mintSignal(subgraph0.id, toGRT('10000'), 0) return subgraph0 } @@ -1082,6 +1070,8 @@ describe('L1GNS', () => { const curatedTokens = await grt.balanceOf(curation.address) const subgraphBefore = await gns.subgraphs(subgraph0.id) + const beforeOwnerSignal = await gns.getCuratorSignal(subgraph0.id, me.address) + const maxSubmissionCost = toBN('100') const maxGas = toBN('10') const gasPriceBid = toBN('20') @@ -1092,25 +1082,26 @@ describe('L1GNS', () => { }) await expect(tx).emit(gns, 'SubgraphSentToL2').withArgs(subgraph0.id, other.address) + const expectedSentToL2 = beforeOwnerSignal.mul(curatedTokens).div(subgraphBefore.nSignal) + const expectedRemainingTokens = curatedTokens.sub(expectedSentToL2) const subgraphAfter = await gns.subgraphs(subgraph0.id) expect(subgraphAfter.vSignal).eq(0) - expect(await grt.balanceOf(gns.address)).eq(0) + expect(await grt.balanceOf(gns.address)).eq(expectedRemainingTokens) expect(subgraphAfter.disabled).eq(true) - expect(subgraphAfter.withdrawableGRT).eq(0) + expect(subgraphAfter.withdrawableGRT).eq(expectedRemainingTokens) const migrated = await gns.subgraphMigratedToL2(subgraph0.id) expect(migrated).eq(true) const expectedCallhookData = defaultAbiCoder.encode( - ['uint256', 'address', 'uint256'], - [subgraph0.id, other.address, subgraphBefore.nSignal], + ['uint8', 'uint256', 'address'], + [toBN(0), subgraph0.id, other.address], // code = 0 means RECEIVE_SUBGRAPH_CODE ) - const expectedL2Data = await l1GraphTokenGateway.getOutboundCalldata( grt.address, gns.address, mockL2GNS.address, - curatedTokens, + expectedSentToL2, expectedCallhookData, ) await expect(tx) @@ -1123,6 +1114,8 @@ describe('L1GNS', () => { const curatedTokens = await grt.balanceOf(curation.address) const subgraphBefore = await legacyGNSMock.legacySubgraphData(me.address, seqID) + const beforeOwnerSignal = await legacyGNSMock.getCuratorSignal(subgraphID, me.address) + const maxSubmissionCost = toBN('100') const maxGas = toBN('10') const gasPriceBid = toBN('20') @@ -1133,25 +1126,27 @@ describe('L1GNS', () => { }) await expect(tx).emit(legacyGNSMock, 'SubgraphSentToL2').withArgs(subgraphID, other.address) + const expectedSentToL2 = beforeOwnerSignal.mul(curatedTokens).div(subgraphBefore.nSignal) + const expectedRemainingTokens = curatedTokens.sub(expectedSentToL2) const subgraphAfter = await legacyGNSMock.legacySubgraphData(me.address, seqID) expect(subgraphAfter.vSignal).eq(0) - expect(await grt.balanceOf(legacyGNSMock.address)).eq(0) + expect(await grt.balanceOf(legacyGNSMock.address)).eq(expectedRemainingTokens) expect(subgraphAfter.disabled).eq(true) - expect(subgraphAfter.withdrawableGRT).eq(0) + expect(subgraphAfter.withdrawableGRT).eq(expectedRemainingTokens) const migrated = await legacyGNSMock.subgraphMigratedToL2(subgraphID) expect(migrated).eq(true) const expectedCallhookData = defaultAbiCoder.encode( - ['uint256', 'address', 'uint256'], - [subgraphID, other.address, subgraphBefore.nSignal], + ['uint8', 'uint256', 'address'], + [toBN(0), subgraphID, other.address], // code = 0 means RECEIVE_SUBGRAPH_CODE ) const expectedL2Data = await l1GraphTokenGateway.getOutboundCalldata( grt.address, legacyGNSMock.address, mockL2GNS.address, - curatedTokens, + expectedSentToL2, expectedCallhookData, ) await expect(tx) @@ -1236,6 +1231,8 @@ describe('L1GNS', () => { const tx2 = gns.connect(me.signer).burnSignal(subgraph0.id, toBN(1), toGRT('0')) await expect(tx2).revertedWith('GNS: Must be active') + const tx3 = gns.connect(other.signer).burnSignal(subgraph0.id, toBN(1), toGRT('0')) + await expect(tx3).revertedWith('GNS: Must be active') }) it('does not allow curators to transfer signal after sending', async function () { const subgraph0 = await publishAndCurateOnSubgraph() @@ -1252,8 +1249,10 @@ describe('L1GNS', () => { const tx2 = gns.connect(me.signer).transferSignal(subgraph0.id, other.address, toBN(1)) await expect(tx2).revertedWith('GNS: Must be active') + const tx3 = gns.connect(other.signer).transferSignal(subgraph0.id, me.address, toBN(1)) + await expect(tx3).revertedWith('GNS: Must be active') }) - it('does not allow curators to withdraw GRT after sending', async function () { + it('does not allow the owner to withdraw GRT after sending', async function () { const subgraph0 = await publishAndCurateOnSubgraph() const maxSubmissionCost = toBN('100') @@ -1267,33 +1266,55 @@ describe('L1GNS', () => { await expect(tx).emit(gns, 'SubgraphSentToL2').withArgs(subgraph0.id, me.address) const tx2 = gns.connect(me.signer).withdraw(subgraph0.id) - await expect(tx2).revertedWith('GNS: No more GRT to withdraw') + await expect(tx2).revertedWith('GNS: No signal to withdraw GRT') }) - }) - describe('claimCuratorBalanceToBeneficiaryOnL2', function () { - beforeEach(async function () { - await gns.connect(governor.signer).setArbitrumInboxAddress(arbitrumMocks.inboxMock.address) - await legacyGNSMock - .connect(governor.signer) - .setArbitrumInboxAddress(arbitrumMocks.inboxMock.address) + it('allows a curator that is not the owner to withdraw GRT after sending', async function () { + const subgraph0 = await publishAndCurateOnSubgraph() + + const beforeOtherSignal = await gns.getCuratorSignal(subgraph0.id, other.address) + + const maxSubmissionCost = toBN('100') + const maxGas = toBN('10') + const gasPriceBid = toBN('20') + const tx = gns + .connect(me.signer) + .sendSubgraphToL2(subgraph0.id, me.address, maxGas, gasPriceBid, maxSubmissionCost, { + value: maxSubmissionCost.add(maxGas.mul(gasPriceBid)), + }) + await expect(tx).emit(gns, 'SubgraphSentToL2').withArgs(subgraph0.id, me.address) + + const remainingTokens = (await gns.subgraphs(subgraph0.id)).withdrawableGRT + const tx2 = gns.connect(other.signer).withdraw(subgraph0.id) + await expect(tx2) + .emit(gns, 'GRTWithdrawn') + .withArgs(subgraph0.id, other.address, beforeOtherSignal, remainingTokens) }) - it('sends a transaction with a curator balance to the L2GNS using the Arbitrum inbox', async function () { - let beforeCuratorNSignal: BigNumber - const subgraph0 = await publishCurateAndSendSubgraph(async (subgraphID) => { - beforeCuratorNSignal = await gns.getCuratorSignal(subgraphID, me.address) - }) + }) + describe('sendCuratorBalanceToBeneficiaryOnL2', function () { + it('sends a transaction with a curator balance to the L2GNS using the L1 gateway', async function () { + const subgraph0 = await publishCurateAndSendSubgraph() + const afterSubgraph = await gns.subgraphs(subgraph0.id) + const curatorTokens = afterSubgraph.withdrawableGRT - const expectedCalldata = l2GNSIface.encodeFunctionData( - 'claimL1CuratorBalanceToBeneficiary', - [subgraph0.id, me.address, beforeCuratorNSignal, other.address], + const expectedCallhookData = defaultAbiCoder.encode( + ['uint8', 'uint256', 'address'], + [toBN(1), subgraph0.id, other.address], // code = 1 means RECEIVE_CURATOR_BALANCE_CODE + ) + const expectedL2Data = await l1GraphTokenGateway.getOutboundCalldata( + grt.address, + gns.address, + mockL2GNS.address, + curatorTokens, + expectedCallhookData, ) + const maxSubmissionCost = toBN('100') const maxGas = toBN('10') const gasPriceBid = toBN('20') const tx = gns - .connect(me.signer) - .claimCuratorBalanceToBeneficiaryOnL2( + .connect(other.signer) + .sendCuratorBalanceToBeneficiaryOnL2( subgraph0.id, other.address, maxGas, @@ -1306,26 +1327,33 @@ describe('L1GNS', () => { // seqNum (third argument in the event) is 2, because number 1 was when the subgraph was sent to L2 await expect(tx) - .emit(gns, 'TxToL2') - .withArgs(me.address, mockL2GNS.address, toBN('2'), expectedCalldata) + .emit(l1GraphTokenGateway, 'TxToL2') + .withArgs(gns.address, mockL2Gateway.address, toBN('2'), expectedL2Data) }) it('sets the curator signal to zero so it cannot be called twice', async function () { - let beforeCuratorNSignal: BigNumber - const subgraph0 = await publishCurateAndSendSubgraph(async (subgraphID) => { - beforeCuratorNSignal = await gns.getCuratorSignal(subgraphID, me.address) - }) + const subgraph0 = await publishCurateAndSendSubgraph() + const afterSubgraph = await gns.subgraphs(subgraph0.id) + const curatorTokens = afterSubgraph.withdrawableGRT - const expectedCalldata = l2GNSIface.encodeFunctionData( - 'claimL1CuratorBalanceToBeneficiary', - [subgraph0.id, me.address, beforeCuratorNSignal, other.address], + const expectedCallhookData = defaultAbiCoder.encode( + ['uint8', 'uint256', 'address'], + [toBN(1), subgraph0.id, other.address], // code = 1 means RECEIVE_CURATOR_BALANCE_CODE ) + const expectedL2Data = await l1GraphTokenGateway.getOutboundCalldata( + grt.address, + gns.address, + mockL2GNS.address, + curatorTokens, + expectedCallhookData, + ) + const maxSubmissionCost = toBN('100') const maxGas = toBN('10') const gasPriceBid = toBN('20') const tx = gns - .connect(me.signer) - .claimCuratorBalanceToBeneficiaryOnL2( + .connect(other.signer) + .sendCuratorBalanceToBeneficiaryOnL2( subgraph0.id, other.address, maxGas, @@ -1338,13 +1366,12 @@ describe('L1GNS', () => { // seqNum (third argument in the event) is 2, because number 1 was when the subgraph was sent to L2 await expect(tx) - .emit(gns, 'TxToL2') - .withArgs(me.address, mockL2GNS.address, toBN('2'), expectedCalldata) - expect(await gns.getCuratorSignal(subgraph0.id, me.address)).to.equal(toBN(0)) + .emit(l1GraphTokenGateway, 'TxToL2') + .withArgs(gns.address, mockL2Gateway.address, toBN('2'), expectedL2Data) const tx2 = gns - .connect(me.signer) - .claimCuratorBalanceToBeneficiaryOnL2( + .connect(other.signer) + .sendCuratorBalanceToBeneficiaryOnL2( subgraph0.id, other.address, maxGas, @@ -1356,30 +1383,48 @@ describe('L1GNS', () => { ) await expect(tx2).revertedWith('NO_SIGNAL') }) - it('sends a transaction with a curator balance from a legacy subgraph to the L2GNS', async function () { - const subgraphID = await publishAndCurateOnLegacySubgraph(toBN('2')) - - const beforeCuratorNSignal = await legacyGNSMock.getCuratorSignal(subgraphID, me.address) + it('gives each curator an amount of tokens proportional to their nSignal', async function () { + let beforeOtherNSignal: BigNumber + let beforeAnotherNSignal: BigNumber + const subgraph0 = await publishCurateAndSendSubgraph(async (subgraphID) => { + beforeOtherNSignal = await gns.getCuratorSignal(subgraphID, other.address) + await gns.connect(another.signer).mintSignal(subgraphID, toGRT('10000'), 0) + beforeAnotherNSignal = await gns.getCuratorSignal(subgraphID, another.address) + }) + const afterSubgraph = await gns.subgraphs(subgraph0.id) + + // Compute how much is owed to each curator + const curator1Tokens = beforeOtherNSignal + .mul(afterSubgraph.withdrawableGRT) + .div(afterSubgraph.nSignal) + const curator2Tokens = beforeAnotherNSignal + .mul(afterSubgraph.withdrawableGRT) + .div(afterSubgraph.nSignal) + + const expectedCallhookData1 = defaultAbiCoder.encode( + ['uint8', 'uint256', 'address'], + [toBN(1), subgraph0.id, other.address], // code = 1 means RECEIVE_CURATOR_BALANCE_CODE + ) + const expectedCallhookData2 = defaultAbiCoder.encode( + ['uint8', 'uint256', 'address'], + [toBN(1), subgraph0.id, another.address], // code = 1 means RECEIVE_CURATOR_BALANCE_CODE + ) + const expectedL2Data1 = await l1GraphTokenGateway.getOutboundCalldata( + grt.address, + gns.address, + mockL2GNS.address, + curator1Tokens, + expectedCallhookData1, + ) const maxSubmissionCost = toBN('100') const maxGas = toBN('10') const gasPriceBid = toBN('20') - const tx = legacyGNSMock - .connect(me.signer) - .sendSubgraphToL2(subgraphID, me.address, maxGas, gasPriceBid, maxSubmissionCost, { - value: maxSubmissionCost.add(maxGas.mul(gasPriceBid)), - }) - await expect(tx).emit(legacyGNSMock, 'SubgraphSentToL2').withArgs(subgraphID, me.address) - - const expectedCalldata = l2GNSIface.encodeFunctionData( - 'claimL1CuratorBalanceToBeneficiary', - [subgraphID, me.address, beforeCuratorNSignal, other.address], - ) - const tx2 = legacyGNSMock - .connect(me.signer) - .claimCuratorBalanceToBeneficiaryOnL2( - subgraphID, + const tx = gns + .connect(other.signer) + .sendCuratorBalanceToBeneficiaryOnL2( + subgraph0.id, other.address, maxGas, gasPriceBid, @@ -1390,9 +1435,36 @@ describe('L1GNS', () => { ) // seqNum (third argument in the event) is 2, because number 1 was when the subgraph was sent to L2 + await expect(tx) + .emit(l1GraphTokenGateway, 'TxToL2') + .withArgs(gns.address, mockL2Gateway.address, toBN('2'), expectedL2Data1) + + // Accept slight numerical errors given how we compute the amount of tokens to send + const curator2TokensUpdated = (await gns.subgraphs(subgraph0.id)).withdrawableGRT + expect(toRound(toFloat(curator2TokensUpdated))).to.equal(toRound(toFloat(curator2Tokens))) + const expectedL2Data2 = await l1GraphTokenGateway.getOutboundCalldata( + grt.address, + gns.address, + mockL2GNS.address, + curator2TokensUpdated, + expectedCallhookData2, + ) + const tx2 = gns + .connect(another.signer) + .sendCuratorBalanceToBeneficiaryOnL2( + subgraph0.id, + another.address, + maxGas, + gasPriceBid, + maxSubmissionCost, + { + value: maxSubmissionCost.add(maxGas.mul(gasPriceBid)), + }, + ) + // seqNum (third argument in the event) is 3 now await expect(tx2) - .emit(legacyGNSMock, 'TxToL2') - .withArgs(me.address, mockL2GNS.address, toBN('2'), expectedCalldata) + .emit(l1GraphTokenGateway, 'TxToL2') + .withArgs(gns.address, mockL2Gateway.address, toBN('3'), expectedL2Data2) }) it('rejects calls for a subgraph that was not sent to L2', async function () { const subgraph0 = await publishAndCurateOnSubgraph() @@ -1403,7 +1475,7 @@ describe('L1GNS', () => { const tx = gns .connect(me.signer) - .claimCuratorBalanceToBeneficiaryOnL2( + .sendCuratorBalanceToBeneficiaryOnL2( subgraph0.id, other.address, maxGas, @@ -1429,7 +1501,7 @@ describe('L1GNS', () => { const tx = gns .connect(me.signer) - .claimCuratorBalanceToBeneficiaryOnL2( + .sendCuratorBalanceToBeneficiaryOnL2( subgraph0.id, other.address, maxGas, @@ -1451,7 +1523,7 @@ describe('L1GNS', () => { const tx = gns .connect(me.signer) - .claimCuratorBalanceToBeneficiaryOnL2( + .sendCuratorBalanceToBeneficiaryOnL2( subgraph0.id, other.address, maxGas, diff --git a/test/l2/l2Curation.test.ts b/test/l2/l2Curation.test.ts index f082efa97..b98afe462 100644 --- a/test/l2/l2Curation.test.ts +++ b/test/l2/l2Curation.test.ts @@ -256,9 +256,7 @@ describe('L2Curation', () => { const beforeTotalTokens = await grt.balanceOf(curation.address) // Curate - const tx = curation - .connect(gnsImpersonator) - .mintTaxFree(subgraphDeploymentID, tokensToDeposit, 0) + const tx = curation.connect(gnsImpersonator).mintTaxFree(subgraphDeploymentID, tokensToDeposit) await expect(tx) .emit(curation, 'Signalled') .withArgs(gns.address, subgraphDeploymentID, tokensToDeposit, expectedSignal, 0) @@ -472,9 +470,7 @@ describe('L2Curation', () => { describe('curate tax free (from GNS)', async function () { it('can not be called by anyone other than GNS', async function () { const tokensToDeposit = await curation.minimumCurationDeposit() - const tx = curation - .connect(curator.signer) - .mintTaxFree(subgraphDeploymentID, tokensToDeposit, 0) + const tx = curation.connect(curator.signer).mintTaxFree(subgraphDeploymentID, tokensToDeposit) await expect(tx).revertedWith('Only the GNS can call this') }) @@ -482,7 +478,7 @@ describe('L2Curation', () => { const tokensToDeposit = (await curation.minimumCurationDeposit()).sub(toBN(1)) const tx = curation .connect(gnsImpersonator) - .mintTaxFree(subgraphDeploymentID, tokensToDeposit, 0) + .mintTaxFree(subgraphDeploymentID, tokensToDeposit) await expect(tx).revertedWith('Curation deposit is below minimum required') }) @@ -510,15 +506,6 @@ describe('L2Curation', () => { ) await shouldMintTaxFree(tokensToDeposit, expectedSignal) }) - - it('should revert curate if over slippage', async function () { - const tokensToDeposit = toGRT('1000') - const expectedSignal = signalAmountFor1000Tokens - const tx = curation - .connect(gnsImpersonator) - .mintTaxFree(subgraphDeploymentID, tokensToDeposit, expectedSignal.add(1)) - await expect(tx).revertedWith('Slippage protection') - }) }) describe('collect', async function () { diff --git a/test/l2/l2GNS.test.ts b/test/l2/l2GNS.test.ts index a0928ceaf..74e7b3a96 100644 --- a/test/l2/l2GNS.test.ts +++ b/test/l2/l2GNS.test.ts @@ -10,7 +10,6 @@ import { getL2SignerFromL1, setAccountBalance, latestBlock, - advanceBlocks, } from '../lib/testHelpers' import { L2FixtureContracts, NetworkFixture } from '../lib/fixtures' import { toBN } from '../lib/testHelpers' @@ -23,7 +22,6 @@ import { burnSignal, DEFAULT_RESERVE_RATIO, deprecateSubgraph, - getTokensAndVSignal, mintSignal, publishNewSubgraph, publishNewVersion, @@ -40,7 +38,6 @@ interface L1SubgraphParams { curatedTokens: BigNumber subgraphMetadata: string versionMetadata: string - nSignal: BigNumber } describe('L2GNS', () => { @@ -89,7 +86,6 @@ describe('L2GNS', () => { curatedTokens: toGRT('1337'), subgraphMetadata: randomHexBytes(), versionMetadata: randomHexBytes(), - nSignal: toGRT('45670'), } } const migrateMockSubgraphFromL1 = async function ( @@ -97,11 +93,10 @@ describe('L2GNS', () => { curatedTokens: BigNumber, subgraphMetadata: string, versionMetadata: string, - nSignal: BigNumber, ) { const callhookData = defaultAbiCoder.encode( - ['uint256', 'address', 'uint256'], - [l1SubgraphId, me.address, nSignal], + ['uint8', 'uint256', 'address'], + [toBN(0), l1SubgraphId, me.address], ) await gatewayFinalizeTransfer(mockL1GNS.address, gns.address, curatedTokens, callhookData) @@ -243,10 +238,10 @@ describe('L2GNS', () => { describe('receiving a subgraph from L1 (onTokenTransfer)', function () { it('cannot be called by someone other than the L2GraphTokenGateway', async function () { - const { l1SubgraphId, curatedTokens, nSignal } = await defaultL1SubgraphParams() + const { l1SubgraphId, curatedTokens } = await defaultL1SubgraphParams() const callhookData = defaultAbiCoder.encode( - ['uint256', 'address', 'uint256'], - [l1SubgraphId, me.address, nSignal], + ['uint8', 'uint256', 'address'], + [toBN(0), l1SubgraphId, me.address], ) const tx = gns .connect(me.signer) @@ -254,22 +249,20 @@ describe('L2GNS', () => { await expect(tx).revertedWith('ONLY_GATEWAY') }) it('rejects calls if the L1 sender is not the L1GNS', async function () { - const { l1SubgraphId, curatedTokens, nSignal } = await defaultL1SubgraphParams() + const { l1SubgraphId, curatedTokens } = await defaultL1SubgraphParams() const callhookData = defaultAbiCoder.encode( - ['uint256', 'address', 'uint256'], - [l1SubgraphId, me.address, nSignal], + ['uint8', 'uint256', 'address'], + [toBN(0), l1SubgraphId, me.address], ) const tx = gatewayFinalizeTransfer(me.address, gns.address, curatedTokens, callhookData) await expect(tx).revertedWith('ONLY_L1_GNS_THROUGH_BRIDGE') }) it('creates a subgraph in a disabled state', async function () { - const l1SubgraphId = await buildSubgraphID(me.address, toBN('1'), 1) - const curatedTokens = toGRT('1337') - const nSignal = toBN('4567') + const { l1SubgraphId, curatedTokens } = await defaultL1SubgraphParams() const callhookData = defaultAbiCoder.encode( - ['uint256', 'address', 'uint256'], - [l1SubgraphId, me.address, nSignal], + ['uint8', 'uint256', 'address'], + [toBN(0), l1SubgraphId, me.address], ) const tx = gatewayFinalizeTransfer( mockL1GNS.address, @@ -291,7 +284,7 @@ describe('L2GNS', () => { expect(migrationData.subgraphReceivedOnL2BlockNumber).eq(await latestBlock()) expect(subgraphData.vSignal).eq(0) - expect(subgraphData.nSignal).eq(nSignal) + expect(subgraphData.nSignal).eq(0) expect(subgraphData.subgraphDeploymentID).eq(HashZero) expect(subgraphData.reserveRatioDeprecated).eq(DEFAULT_RESERVE_RATIO) expect(subgraphData.disabled).eq(true) @@ -302,12 +295,10 @@ describe('L2GNS', () => { it('does not conflict with a locally created subgraph', async function () { const l2Subgraph = await publishNewSubgraph(me, newSubgraph0, gns) - const l1SubgraphId = await buildSubgraphID(me.address, toBN('0'), 1) - const curatedTokens = toGRT('1337') - const nSignal = toBN('4567') + const { l1SubgraphId, curatedTokens } = await defaultL1SubgraphParams() const callhookData = defaultAbiCoder.encode( - ['uint256', 'address', 'uint256'], - [l1SubgraphId, me.address, nSignal], + ['uint8', 'uint256', 'address'], + [toBN(0), l1SubgraphId, me.address], ) const tx = gatewayFinalizeTransfer( mockL1GNS.address, @@ -329,7 +320,7 @@ describe('L2GNS', () => { expect(migrationData.subgraphReceivedOnL2BlockNumber).eq(await latestBlock()) expect(subgraphData.vSignal).eq(0) - expect(subgraphData.nSignal).eq(nSignal) + expect(subgraphData.nSignal).eq(0) expect(subgraphData.subgraphDeploymentID).eq(HashZero) expect(subgraphData.reserveRatioDeprecated).eq(DEFAULT_RESERVE_RATIO) expect(subgraphData.disabled).eq(true) @@ -350,11 +341,11 @@ describe('L2GNS', () => { describe('finishing a subgraph migration from L1', function () { it('publishes the migrated subgraph and mints signal with no tax', async function () { - const { l1SubgraphId, curatedTokens, subgraphMetadata, versionMetadata, nSignal } = + const { l1SubgraphId, curatedTokens, subgraphMetadata, versionMetadata } = await defaultL1SubgraphParams() const callhookData = defaultAbiCoder.encode( - ['uint256', 'address', 'uint256'], - [l1SubgraphId, me.address, nSignal], + ['uint8', 'uint256', 'address'], + [toBN(0), l1SubgraphId, me.address], ) await gatewayFinalizeTransfer(mockL1GNS.address, gns.address, curatedTokens, callhookData) // Calculate expected signal before minting @@ -389,13 +380,15 @@ describe('L2GNS', () => { expect(migrationDataAfter.l2Done).eq(true) expect(subgraphAfter.disabled).eq(false) expect(subgraphAfter.subgraphDeploymentID).eq(newSubgraph0.subgraphDeploymentID) + const expectedNSignal = await gns.vSignalToNSignal(l1SubgraphId, expectedSignal) + expect(await gns.getCuratorSignal(l1SubgraphId, me.address)).eq(expectedNSignal) }) it('cannot be called by someone other than the subgraph owner', async function () { - const { l1SubgraphId, curatedTokens, subgraphMetadata, versionMetadata, nSignal } = + const { l1SubgraphId, curatedTokens, subgraphMetadata, versionMetadata } = await defaultL1SubgraphParams() const callhookData = defaultAbiCoder.encode( - ['uint256', 'address', 'uint256'], - [l1SubgraphId, me.address, nSignal], + ['uint8', 'uint256', 'address'], + [toBN(0), l1SubgraphId, me.address], ) await gatewayFinalizeTransfer(mockL1GNS.address, gns.address, curatedTokens, callhookData) @@ -438,11 +431,11 @@ describe('L2GNS', () => { await expect(tx).revertedWith('INVALID_SUBGRAPH') }) it('accepts calls to a pre-curated subgraph deployment', async function () { - const { l1SubgraphId, curatedTokens, subgraphMetadata, versionMetadata, nSignal } = + const { l1SubgraphId, curatedTokens, subgraphMetadata, versionMetadata } = await defaultL1SubgraphParams() const callhookData = defaultAbiCoder.encode( - ['uint256', 'address', 'uint256'], - [l1SubgraphId, me.address, nSignal], + ['uint8', 'uint256', 'address'], + [toBN(0), l1SubgraphId, me.address], ) await gatewayFinalizeTransfer(mockL1GNS.address, gns.address, curatedTokens, callhookData) @@ -490,13 +483,11 @@ describe('L2GNS', () => { ) }) it('rejects calls if the subgraph deployment ID is zero', async function () { - const l1SubgraphId = await buildSubgraphID(me.address, toBN('1'), 1) - const curatedTokens = toGRT('1337') const metadata = randomHexBytes() - const nSignal = toBN('4567') + const { l1SubgraphId, curatedTokens } = await defaultL1SubgraphParams() const callhookData = defaultAbiCoder.encode( - ['uint256', 'address', 'uint256'], - [l1SubgraphId, me.address, nSignal], + ['uint8', 'uint256', 'address'], + [toBN(0), l1SubgraphId, me.address], ) await gatewayFinalizeTransfer(mockL1GNS.address, gns.address, curatedTokens, callhookData) @@ -506,139 +497,60 @@ describe('L2GNS', () => { await expect(tx).revertedWith('GNS: deploymentID != 0') }) }) - describe('deprecating a subgraph with an unfinished migration from L1', function () { - it('deprecates the subgraph and sets the withdrawableGRT', async function () { - const { l1SubgraphId, curatedTokens, nSignal } = await defaultL1SubgraphParams() - const callhookData = defaultAbiCoder.encode( - ['uint256', 'address', 'uint256'], - [l1SubgraphId, me.address, nSignal], - ) - await gatewayFinalizeTransfer(mockL1GNS.address, gns.address, curatedTokens, callhookData) - - await advanceBlocks(50400) - - const tx = gns - .connect(other.signer) // Can be called by anyone - .deprecateSubgraphMigratedFromL1(l1SubgraphId) - await expect(tx).emit(gns, 'SubgraphDeprecated').withArgs(l1SubgraphId, curatedTokens) - - const subgraphAfter = await gns.subgraphs(l1SubgraphId) - const migrationDataAfter = await gns.subgraphL2MigrationData(l1SubgraphId) - expect(subgraphAfter.vSignal).eq(0) - expect(migrationDataAfter.l2Done).eq(true) - expect(subgraphAfter.disabled).eq(true) - expect(subgraphAfter.subgraphDeploymentID).eq(HashZero) - expect(subgraphAfter.withdrawableGRT).eq(curatedTokens) - - // Check that the curator can withdraw the GRT - const mockL1GNSL2Alias = await getL2SignerFromL1(mockL1GNS.address) - await setAccountBalance(await mockL1GNSL2Alias.getAddress(), parseEther('1')) - // Note the signal is assigned to other.address as beneficiary - await gns - .connect(mockL1GNSL2Alias) - .claimL1CuratorBalanceToBeneficiary(l1SubgraphId, me.address, toGRT('10'), other.address) - const curatorBalanceBefore = await grt.balanceOf(other.address) - const expectedTokensOut = curatedTokens.mul(toGRT('10')).div(nSignal) - const withdrawTx = await gns.connect(other.signer).withdraw(l1SubgraphId) - await expect(withdrawTx) - .emit(gns, 'GRTWithdrawn') - .withArgs(l1SubgraphId, other.address, toGRT('10'), expectedTokensOut) - const curatorBalanceAfter = await grt.balanceOf(other.address) - expect(curatorBalanceAfter.sub(curatorBalanceBefore)).eq(expectedTokensOut) - }) - it('rejects calls if not enough time has passed', async function () { - const { l1SubgraphId, curatedTokens, nSignal } = await defaultL1SubgraphParams() - const callhookData = defaultAbiCoder.encode( - ['uint256', 'address', 'uint256'], - [l1SubgraphId, me.address, nSignal], - ) - await gatewayFinalizeTransfer(mockL1GNS.address, gns.address, curatedTokens, callhookData) - - await advanceBlocks(50399) - - const tx = gns - .connect(other.signer) // Can be called by anyone - .deprecateSubgraphMigratedFromL1(l1SubgraphId) - await expect(tx).revertedWith('TOO_EARLY') - }) - it('rejects calls if the subgraph migration was finished', async function () { - const { l1SubgraphId, curatedTokens, subgraphMetadata, versionMetadata, nSignal } = - await defaultL1SubgraphParams() - const callhookData = defaultAbiCoder.encode( - ['uint256', 'address', 'uint256'], - [l1SubgraphId, me.address, nSignal], - ) - await gatewayFinalizeTransfer(mockL1GNS.address, gns.address, curatedTokens, callhookData) - - await advanceBlocks(50400) - - await gns - .connect(me.signer) - .finishSubgraphMigrationFromL1( - l1SubgraphId, - newSubgraph0.subgraphDeploymentID, - subgraphMetadata, - versionMetadata, - ) - - const tx = gns - .connect(other.signer) // Can be called by anyone - .deprecateSubgraphMigratedFromL1(l1SubgraphId) - await expect(tx).revertedWith('ALREADY_FINISHED') - }) - it('rejects calls for a subgraph that does not exist', async function () { - const l1SubgraphId = await buildSubgraphID(me.address, toBN('1'), 1) - - const tx = gns.connect(me.signer).deprecateSubgraphMigratedFromL1(l1SubgraphId) - await expect(tx).revertedWith('INVALID_SUBGRAPH') - }) - it('rejects calls for a subgraph that was not migrated', async function () { - const l2Subgraph = await publishNewSubgraph(me, newSubgraph0, gns) - - const tx = gns.connect(me.signer).deprecateSubgraphMigratedFromL1(l2Subgraph.id) - await expect(tx).revertedWith('INVALID_SUBGRAPH') - }) - }) - describe('claiming a curator balance with a message from L1', function () { + describe('claiming a curator balance with a message from L1 (onTokenTransfer)', function () { it('assigns a curator balance to a beneficiary', async function () { const mockL1GNSL2Alias = await getL2SignerFromL1(mockL1GNS.address) // Eth for gas: await setAccountBalance(await mockL1GNSL2Alias.getAddress(), parseEther('1')) - const { l1SubgraphId, curatedTokens, subgraphMetadata, versionMetadata, nSignal } = + const { l1SubgraphId, curatedTokens, subgraphMetadata, versionMetadata } = await defaultL1SubgraphParams() await migrateMockSubgraphFromL1( l1SubgraphId, curatedTokens, subgraphMetadata, versionMetadata, - nSignal, ) - const tx = gns - .connect(mockL1GNSL2Alias) - .claimL1CuratorBalanceToBeneficiary(l1SubgraphId, me.address, toGRT('10'), other.address) + const l2OwnerSignalBefore = await gns.getCuratorSignal(l1SubgraphId, me.address) + + const newCuratorTokens = toGRT('10') + const callhookData = defaultAbiCoder.encode( + ['uint8', 'uint256', 'address'], + [toBN(1), l1SubgraphId, other.address], + ) + const tx = await gatewayFinalizeTransfer( + mockL1GNS.address, + gns.address, + newCuratorTokens, + callhookData, + ) + await expect(tx) - .emit(gns, 'CuratorBalanceClaimed') - .withArgs(l1SubgraphId, me.address, other.address, toGRT('10')) - const l1CuratorBalance = await gns.getCuratorSignal(l1SubgraphId, me.address) - const l2CuratorBalance = await gns.getCuratorSignal(l1SubgraphId, other.address) - expect(l1CuratorBalance).eq(0) - expect(l2CuratorBalance).eq(toGRT('10')) + .emit(gns, 'CuratorBalanceReceived') + .withArgs(l1SubgraphId, other.address, newCuratorTokens) + + const l2NewCuratorSignal = await gns.getCuratorSignal(l1SubgraphId, other.address) + const expectedNewCuratorSignal = await gns.vSignalToNSignal( + l1SubgraphId, + await curation.tokensToSignalNoTax(newSubgraph0.subgraphDeploymentID, newCuratorTokens), + ) + const l2OwnerSignalAfter = await gns.getCuratorSignal(l1SubgraphId, me.address) + expect(l2OwnerSignalAfter).eq(l2OwnerSignalBefore) + expect(l2NewCuratorSignal).eq(expectedNewCuratorSignal) }) - it('adds the balance to any existing balance for the beneficiary', async function () { + it('adds the signal to any existing signal for the beneficiary', async function () { const mockL1GNSL2Alias = await getL2SignerFromL1(mockL1GNS.address) // Eth for gas: await setAccountBalance(await mockL1GNSL2Alias.getAddress(), parseEther('1')) - const { l1SubgraphId, curatedTokens, subgraphMetadata, versionMetadata, nSignal } = + const { l1SubgraphId, curatedTokens, subgraphMetadata, versionMetadata } = await defaultL1SubgraphParams() await migrateMockSubgraphFromL1( l1SubgraphId, curatedTokens, subgraphMetadata, versionMetadata, - nSignal, ) await grt.connect(governor.signer).mint(other.address, toGRT('10')) @@ -646,98 +558,124 @@ describe('L2GNS', () => { await gns.connect(other.signer).mintSignal(l1SubgraphId, toGRT('10'), toBN(0)) const prevSignal = await gns.getCuratorSignal(l1SubgraphId, other.address) - const tx = gns - .connect(mockL1GNSL2Alias) - .claimL1CuratorBalanceToBeneficiary(l1SubgraphId, me.address, toGRT('10'), other.address) + const newCuratorTokens = toGRT('10') + const callhookData = defaultAbiCoder.encode( + ['uint8', 'uint256', 'address'], + [toBN(1), l1SubgraphId, other.address], + ) + const tx = await gatewayFinalizeTransfer( + mockL1GNS.address, + gns.address, + newCuratorTokens, + callhookData, + ) + await expect(tx) - .emit(gns, 'CuratorBalanceClaimed') - .withArgs(l1SubgraphId, me.address, other.address, toGRT('10')) - const l1CuratorBalance = await gns.getCuratorSignal(l1SubgraphId, me.address) + .emit(gns, 'CuratorBalanceReceived') + .withArgs(l1SubgraphId, other.address, newCuratorTokens) + + const expectedNewCuratorSignal = await gns.vSignalToNSignal( + l1SubgraphId, + await curation.tokensToSignalNoTax(newSubgraph0.subgraphDeploymentID, newCuratorTokens), + ) const l2CuratorBalance = await gns.getCuratorSignal(l1SubgraphId, other.address) - expect(l1CuratorBalance).eq(0) - expect(l2CuratorBalance).eq(prevSignal.add(toGRT('10'))) + expect(l2CuratorBalance).eq(prevSignal.add(expectedNewCuratorSignal)) }) - it('can only be called from the counterpart GNS L2 alias', async function () { - const { l1SubgraphId, curatedTokens, subgraphMetadata, versionMetadata, nSignal } = + it('cannot be called by someone other than the L2GraphTokenGateway', async function () { + const { l1SubgraphId, curatedTokens, subgraphMetadata, versionMetadata } = await defaultL1SubgraphParams() await migrateMockSubgraphFromL1( l1SubgraphId, curatedTokens, subgraphMetadata, versionMetadata, - nSignal, ) + const callhookData = defaultAbiCoder.encode( + ['uint8', 'uint256', 'address'], + [toBN(1), l1SubgraphId, me.address], + ) + const tx = gns.connect(me.signer).onTokenTransfer(mockL1GNS.address, toGRT('1'), callhookData) + await expect(tx).revertedWith('ONLY_GATEWAY') + }) + it('rejects calls if the L1 sender is not the L1GNS', async function () { + const { l1SubgraphId, curatedTokens, subgraphMetadata, versionMetadata } = + await defaultL1SubgraphParams() + await migrateMockSubgraphFromL1( + l1SubgraphId, + curatedTokens, + subgraphMetadata, + versionMetadata, + ) + const callhookData = defaultAbiCoder.encode( + ['uint8', 'uint256', 'address'], + [toBN(1), l1SubgraphId, me.address], + ) + const tx = gatewayFinalizeTransfer(me.address, gns.address, toGRT('1'), callhookData) - const tx = gns - .connect(governor.signer) - .claimL1CuratorBalanceToBeneficiary(l1SubgraphId, me.address, toGRT('10'), other.address) - await expect(tx).revertedWith('ONLY_COUNTERPART_GNS') - - const tx2 = gns - .connect(me.signer) - .claimL1CuratorBalanceToBeneficiary(l1SubgraphId, me.address, toGRT('10'), other.address) - await expect(tx2).revertedWith('ONLY_COUNTERPART_GNS') - - const tx3 = gns - .connect(mockL1GNS.signer) - .claimL1CuratorBalanceToBeneficiary(l1SubgraphId, me.address, toGRT('10'), other.address) - await expect(tx3).revertedWith('ONLY_COUNTERPART_GNS') + await expect(tx).revertedWith('ONLY_L1_GNS_THROUGH_BRIDGE') }) - it('rejects calls for a subgraph that does not exist', async function () { + it('if a subgraph does not exist, it returns the tokens to the beneficiary', async function () { const mockL1GNSL2Alias = await getL2SignerFromL1(mockL1GNS.address) // Eth for gas: await setAccountBalance(await mockL1GNSL2Alias.getAddress(), parseEther('1')) const { l1SubgraphId } = await defaultL1SubgraphParams() - const tx = gns - .connect(mockL1GNSL2Alias) - .claimL1CuratorBalanceToBeneficiary(l1SubgraphId, me.address, toGRT('10'), other.address) - await expect(tx).revertedWith('!MIGRATED') + const callhookData = defaultAbiCoder.encode( + ['uint8', 'uint256', 'address'], + [toBN(1), l1SubgraphId, me.address], + ) + const curatorTokensBefore = await grt.balanceOf(me.address) + const gnsBalanceBefore = await grt.balanceOf(gns.address) + const tx = gatewayFinalizeTransfer(mockL1GNS.address, gns.address, toGRT('1'), callhookData) + await expect(tx) + .emit(gns, 'CuratorBalanceReturnedToBeneficiary') + .withArgs(l1SubgraphId, me.address, toGRT('1')) + const curatorTokensAfter = await grt.balanceOf(me.address) + expect(curatorTokensAfter).eq(curatorTokensBefore.add(toGRT('1'))) + const gnsBalanceAfter = await grt.balanceOf(gns.address) + // gatewayFinalizeTransfer will mint the tokens that are sent to the curator, + // so the GNS balance should be the same + expect(gnsBalanceAfter).eq(gnsBalanceBefore) }) - it('rejects calls for an L2-native subgraph', async function () { + it('for an L2-native subgraph, it sends the tokens to the beneficiary', async function () { + // This should never really happen unless there's a clash in subgraph IDs (which should + // also never happen), but we test it anyway to ensure it's a well-defined behavior const mockL1GNSL2Alias = await getL2SignerFromL1(mockL1GNS.address) // Eth for gas: await setAccountBalance(await mockL1GNSL2Alias.getAddress(), parseEther('1')) const l2Subgraph = await publishNewSubgraph(me, newSubgraph0, gns) - const tx = gns - .connect(mockL1GNSL2Alias) - .claimL1CuratorBalanceToBeneficiary(l2Subgraph.id!, me.address, toGRT('10'), other.address) - await expect(tx).revertedWith('!MIGRATED') - }) - it('rejects calls if the balance was already claimed', async function () { - const mockL1GNSL2Alias = await getL2SignerFromL1(mockL1GNS.address) - // Eth for gas: - await setAccountBalance(await mockL1GNSL2Alias.getAddress(), parseEther('1')) - - const { l1SubgraphId, curatedTokens, subgraphMetadata, versionMetadata, nSignal } = - await defaultL1SubgraphParams() - await migrateMockSubgraphFromL1( - l1SubgraphId, - curatedTokens, - subgraphMetadata, - versionMetadata, - nSignal, + const callhookData = defaultAbiCoder.encode( + ['uint8', 'uint256', 'address'], + [toBN(1), l2Subgraph.id!, me.address], ) - - const tx = gns - .connect(mockL1GNSL2Alias) - .claimL1CuratorBalanceToBeneficiary(l1SubgraphId, me.address, toGRT('10'), other.address) + const curatorTokensBefore = await grt.balanceOf(me.address) + const gnsBalanceBefore = await grt.balanceOf(gns.address) + const tx = gatewayFinalizeTransfer(mockL1GNS.address, gns.address, toGRT('1'), callhookData) await expect(tx) - .emit(gns, 'CuratorBalanceClaimed') - .withArgs(l1SubgraphId, me.address, other.address, toGRT('10')) - const l1CuratorBalance = await gns.getCuratorSignal(l1SubgraphId, me.address) - const l2CuratorBalance = await gns.getCuratorSignal(l1SubgraphId, other.address) - expect(l1CuratorBalance).eq(0) - expect(l2CuratorBalance).eq(toGRT('10')) - - // Now trying again should revert - const tx2 = gns - .connect(mockL1GNSL2Alias) - .claimL1CuratorBalanceToBeneficiary(l1SubgraphId, me.address, toGRT('10'), other.address) - await expect(tx2).revertedWith('ALREADY_CLAIMED') + .emit(gns, 'CuratorBalanceReturnedToBeneficiary') + .withArgs(l2Subgraph.id!, me.address, toGRT('1')) + const curatorTokensAfter = await grt.balanceOf(me.address) + expect(curatorTokensAfter).eq(curatorTokensBefore.add(toGRT('1'))) + const gnsBalanceAfter = await grt.balanceOf(gns.address) + // gatewayFinalizeTransfer will mint the tokens that are sent to the curator, + // so the GNS balance should be the same + expect(gnsBalanceAfter).eq(gnsBalanceBefore) + }) + }) + describe('onTokenTransfer with invalid codes', function () { + it('reverts', async function () { + // This should never really happen unless the Arbitrum bridge is compromised, + // so we test it anyway to ensure it's a well-defined behavior. + // code 2 does not exist: + const callhookData = defaultAbiCoder.encode( + ['uint8', 'uint256', 'address'], + [toBN(2), toBN(1337), me.address], + ) + const tx = gatewayFinalizeTransfer(mockL1GNS.address, gns.address, toGRT('1'), callhookData) + await expect(tx).revertedWith('INVALID_CODE') }) }) }) From e9fb416c3898c985ffd5794597436548cc8c7ec0 Mon Sep 17 00:00:00 2001 From: Pablo Carranza Velez Date: Tue, 31 Jan 2023 12:26:35 -0300 Subject: [PATCH 081/112] fix: also return tokens if subgraph is disabled --- contracts/l2/discovery/L2GNS.sol | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/contracts/l2/discovery/L2GNS.sol b/contracts/l2/discovery/L2GNS.sol index 15a017283..3fc23f691 100644 --- a/contracts/l2/discovery/L2GNS.sol +++ b/contracts/l2/discovery/L2GNS.sol @@ -250,13 +250,13 @@ contract L2GNS is GNS, L2GNSV1Storage, IL2GNS { uint256 _tokensIn ) internal { IL2GNS.SubgraphL2MigrationData storage migratedData = subgraphL2MigrationData[_subgraphID]; + SubgraphData storage subgraphData = _getSubgraphData(_subgraphID); // If subgraph migration wasn't finished, we should send the tokens to the curator - if (!migratedData.l2Done) { + if (!migratedData.l2Done || subgraphData.disabled) { graphToken().transfer(_curator, _tokensIn); emit CuratorBalanceReturnedToBeneficiary(_subgraphID, _curator, _tokensIn); } else { - SubgraphData storage subgraphData = _getSubgraphData(_subgraphID); // Get name signal to mint for tokens deposited IL2Curation curation = IL2Curation(address(curation())); uint256 vSignal = curation.mintTaxFree(subgraphData.subgraphDeploymentID, _tokensIn); From 783649735eb5ec8858e92f7714013a29135cdbe8 Mon Sep 17 00:00:00 2001 From: Pablo Carranza Velez Date: Tue, 31 Jan 2023 13:22:27 -0300 Subject: [PATCH 082/112] test: check edge cases in L2GNS --- test/l2/l2GNS.test.ts | 67 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 67 insertions(+) diff --git a/test/l2/l2GNS.test.ts b/test/l2/l2GNS.test.ts index 74e7b3a96..ffc00bcd1 100644 --- a/test/l2/l2GNS.test.ts +++ b/test/l2/l2GNS.test.ts @@ -664,6 +664,73 @@ describe('L2GNS', () => { // so the GNS balance should be the same expect(gnsBalanceAfter).eq(gnsBalanceBefore) }) + it('if a subgraph migration was not finished, it returns the tokens to the beneficiary', async function () { + const mockL1GNSL2Alias = await getL2SignerFromL1(mockL1GNS.address) + // Eth for gas: + await setAccountBalance(await mockL1GNSL2Alias.getAddress(), parseEther('1')) + + const { l1SubgraphId, curatedTokens } = await defaultL1SubgraphParams() + const callhookDataSG = defaultAbiCoder.encode( + ['uint8', 'uint256', 'address'], + [toBN(0), l1SubgraphId, me.address], + ) + await gatewayFinalizeTransfer(mockL1GNS.address, gns.address, curatedTokens, callhookDataSG) + + // At this point the SG exists, but migration is not finished + + const callhookData = defaultAbiCoder.encode( + ['uint8', 'uint256', 'address'], + [toBN(1), l1SubgraphId, me.address], + ) + const curatorTokensBefore = await grt.balanceOf(me.address) + const gnsBalanceBefore = await grt.balanceOf(gns.address) + const tx = gatewayFinalizeTransfer(mockL1GNS.address, gns.address, toGRT('1'), callhookData) + await expect(tx) + .emit(gns, 'CuratorBalanceReturnedToBeneficiary') + .withArgs(l1SubgraphId, me.address, toGRT('1')) + const curatorTokensAfter = await grt.balanceOf(me.address) + expect(curatorTokensAfter).eq(curatorTokensBefore.add(toGRT('1'))) + const gnsBalanceAfter = await grt.balanceOf(gns.address) + // gatewayFinalizeTransfer will mint the tokens that are sent to the curator, + // so the GNS balance should be the same + expect(gnsBalanceAfter).eq(gnsBalanceBefore) + }) + + it('if a subgraph was deprecated after migration, it returns the tokens to the beneficiary', async function () { + const mockL1GNSL2Alias = await getL2SignerFromL1(mockL1GNS.address) + // Eth for gas: + await setAccountBalance(await mockL1GNSL2Alias.getAddress(), parseEther('1')) + + const { l1SubgraphId, curatedTokens, subgraphMetadata, versionMetadata } = + await defaultL1SubgraphParams() + await migrateMockSubgraphFromL1( + l1SubgraphId, + curatedTokens, + subgraphMetadata, + versionMetadata, + ) + + await gns.connect(me.signer).deprecateSubgraph(l1SubgraphId) + + // SG was migrated, but is deprecated now! + + const callhookData = defaultAbiCoder.encode( + ['uint8', 'uint256', 'address'], + [toBN(1), l1SubgraphId, me.address], + ) + const curatorTokensBefore = await grt.balanceOf(me.address) + const gnsBalanceBefore = await grt.balanceOf(gns.address) + const tx = gatewayFinalizeTransfer(mockL1GNS.address, gns.address, toGRT('1'), callhookData) + await expect(tx) + .emit(gns, 'CuratorBalanceReturnedToBeneficiary') + .withArgs(l1SubgraphId, me.address, toGRT('1')) + const curatorTokensAfter = await grt.balanceOf(me.address) + expect(curatorTokensAfter).eq(curatorTokensBefore.add(toGRT('1'))) + const gnsBalanceAfter = await grt.balanceOf(gns.address) + // gatewayFinalizeTransfer will mint the tokens that are sent to the curator, + // so the GNS balance should be the same + expect(gnsBalanceAfter).eq(gnsBalanceBefore) + }) }) describe('onTokenTransfer with invalid codes', function () { it('reverts', async function () { From 3b78aac8a36282bf7713c6661d0f86fb6b404cbe Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pablo=20Carranza=20V=C3=A9lez?= Date: Wed, 15 Feb 2023 18:32:32 -0300 Subject: [PATCH 083/112] fix: apply suggestions from code review MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Tomás Migone --- contracts/discovery/L1GNS.sol | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/contracts/discovery/L1GNS.sol b/contracts/discovery/L1GNS.sol index 3ca2b0866..c5b4015b0 100644 --- a/contracts/discovery/L1GNS.sol +++ b/contracts/discovery/L1GNS.sol @@ -27,12 +27,13 @@ contract L1GNS is GNS, L1GNSV1Storage { using SafeMathUpgradeable for uint256; /// @dev Emitted when a subgraph was sent to L2 through the bridge - event SubgraphSentToL2(uint256 indexed _subgraphID, address indexed _l2Owner); + event SubgraphSentToL2(uint256 indexed _subgraphID, address indexed _l1Owner, address indexed _l2Owner, uint256 _tokens); /// @dev Emitted when a curator's balance for a subgraph was sent to L2 event CuratorBalanceSentToL2( uint256 indexed _subgraphID, - address indexed _curator, + address indexed _l1Curator, + address indexed _l2Beneficiary, uint256 _tokens ); From ce9a8f982ee87baa5adbbe53699e754a68b759b9 Mon Sep 17 00:00:00 2001 From: Pablo Carranza Velez Date: Wed, 15 Feb 2023 18:56:59 -0300 Subject: [PATCH 084/112] fix: events emitted in L2 GNS migration --- contracts/discovery/L1GNS.sol | 10 ++++- test/gns.test.ts | 78 ++++++++++++++++++++++++++++------- 2 files changed, 72 insertions(+), 16 deletions(-) diff --git a/contracts/discovery/L1GNS.sol b/contracts/discovery/L1GNS.sol index c5b4015b0..64c168ede 100644 --- a/contracts/discovery/L1GNS.sol +++ b/contracts/discovery/L1GNS.sol @@ -27,7 +27,12 @@ contract L1GNS is GNS, L1GNSV1Storage { using SafeMathUpgradeable for uint256; /// @dev Emitted when a subgraph was sent to L2 through the bridge - event SubgraphSentToL2(uint256 indexed _subgraphID, address indexed _l1Owner, address indexed _l2Owner, uint256 _tokens); + event SubgraphSentToL2( + uint256 indexed _subgraphID, + address indexed _l1Owner, + address indexed _l2Owner, + uint256 _tokens + ); /// @dev Emitted when a curator's balance for a subgraph was sent to L2 event CuratorBalanceSentToL2( @@ -99,7 +104,7 @@ contract L1GNS is GNS, L1GNSV1Storage { subgraphData.reserveRatioDeprecated = 0; _burnNFT(_subgraphID); - emit SubgraphSentToL2(_subgraphID, _l2Owner); + emit SubgraphSentToL2(_subgraphID, msg.sender, _l2Owner, tokensForL2); } /** @@ -156,6 +161,7 @@ contract L1GNS is GNS, L1GNSV1Storage { _maxSubmissionCost, extraData ); + emit CuratorBalanceSentToL2(_subgraphID, msg.sender, _beneficiary, tokensForL2); } /** diff --git a/test/gns.test.ts b/test/gns.test.ts index 692d2b804..a17578081 100644 --- a/test/gns.test.ts +++ b/test/gns.test.ts @@ -1036,12 +1036,21 @@ describe('L1GNS', () => { const maxSubmissionCost = toBN('100') const maxGas = toBN('10') const gasPriceBid = toBN('20') + + const subgraphBefore = await gns.subgraphs(subgraph0.id) + const curatedTokens = await gns.subgraphTokens(subgraph0.id) + const beforeOwnerSignal = await gns.getCuratorSignal(subgraph0.id, me.address) + const expectedSentToL2 = beforeOwnerSignal.mul(curatedTokens).div(subgraphBefore.nSignal) + const tx = gns .connect(me.signer) .sendSubgraphToL2(subgraph0.id, me.address, maxGas, gasPriceBid, maxSubmissionCost, { value: maxSubmissionCost.add(maxGas.mul(gasPriceBid)), }) - await expect(tx).emit(gns, 'SubgraphSentToL2').withArgs(subgraph0.id, me.address) + + await expect(tx) + .emit(gns, 'SubgraphSentToL2') + .withArgs(subgraph0.id, me.address, me.address, expectedSentToL2) return subgraph0 } const publishAndCurateOnLegacySubgraph = async function (seqID: BigNumber): Promise { @@ -1080,9 +1089,11 @@ describe('L1GNS', () => { .sendSubgraphToL2(subgraph0.id, other.address, maxGas, gasPriceBid, maxSubmissionCost, { value: maxSubmissionCost.add(maxGas.mul(gasPriceBid)), }) - await expect(tx).emit(gns, 'SubgraphSentToL2').withArgs(subgraph0.id, other.address) - const expectedSentToL2 = beforeOwnerSignal.mul(curatedTokens).div(subgraphBefore.nSignal) + await expect(tx) + .emit(gns, 'SubgraphSentToL2') + .withArgs(subgraph0.id, me.address, other.address, expectedSentToL2) + const expectedRemainingTokens = curatedTokens.sub(expectedSentToL2) const subgraphAfter = await gns.subgraphs(subgraph0.id) expect(subgraphAfter.vSignal).eq(0) @@ -1111,10 +1122,11 @@ describe('L1GNS', () => { it('sends tokens and calldata for a legacy subgraph to L2 through the GRT bridge', async function () { const seqID = toBN('2') const subgraphID = await publishAndCurateOnLegacySubgraph(seqID) - const curatedTokens = await grt.balanceOf(curation.address) - const subgraphBefore = await legacyGNSMock.legacySubgraphData(me.address, seqID) + const subgraphBefore = await legacyGNSMock.legacySubgraphData(me.address, seqID) + const curatedTokens = await legacyGNSMock.subgraphTokens(subgraphID) const beforeOwnerSignal = await legacyGNSMock.getCuratorSignal(subgraphID, me.address) + const expectedSentToL2 = beforeOwnerSignal.mul(curatedTokens).div(subgraphBefore.nSignal) const maxSubmissionCost = toBN('100') const maxGas = toBN('10') @@ -1124,9 +1136,10 @@ describe('L1GNS', () => { .sendSubgraphToL2(subgraphID, other.address, maxGas, gasPriceBid, maxSubmissionCost, { value: maxSubmissionCost.add(maxGas.mul(gasPriceBid)), }) - await expect(tx).emit(legacyGNSMock, 'SubgraphSentToL2').withArgs(subgraphID, other.address) + await expect(tx) + .emit(legacyGNSMock, 'SubgraphSentToL2') + .withArgs(subgraphID, me.address, other.address, expectedSentToL2) - const expectedSentToL2 = beforeOwnerSignal.mul(curatedTokens).div(subgraphBefore.nSignal) const expectedRemainingTokens = curatedTokens.sub(expectedSentToL2) const subgraphAfter = await legacyGNSMock.legacySubgraphData(me.address, seqID) expect(subgraphAfter.vSignal).eq(0) @@ -1169,6 +1182,11 @@ describe('L1GNS', () => { it('rejects calls for a subgraph that was already sent', async function () { const subgraph0 = await publishAndCurateOnSubgraph() + const subgraphBefore = await gns.subgraphs(subgraph0.id) + const curatedTokens = await gns.subgraphTokens(subgraph0.id) + const beforeOwnerSignal = await gns.getCuratorSignal(subgraph0.id, me.address) + const expectedSentToL2 = beforeOwnerSignal.mul(curatedTokens).div(subgraphBefore.nSignal) + const maxSubmissionCost = toBN('100') const maxGas = toBN('10') const gasPriceBid = toBN('20') @@ -1177,7 +1195,9 @@ describe('L1GNS', () => { .sendSubgraphToL2(subgraph0.id, me.address, maxGas, gasPriceBid, maxSubmissionCost, { value: maxSubmissionCost.add(maxGas.mul(gasPriceBid)), }) - await expect(tx).emit(gns, 'SubgraphSentToL2').withArgs(subgraph0.id, me.address) + await expect(tx) + .emit(gns, 'SubgraphSentToL2') + .withArgs(subgraph0.id, me.address, me.address, expectedSentToL2) const tx2 = gns .connect(me.signer) @@ -1219,6 +1239,11 @@ describe('L1GNS', () => { it('does not allow curators to burn signal after sending', async function () { const subgraph0 = await publishAndCurateOnSubgraph() + const subgraphBefore = await gns.subgraphs(subgraph0.id) + const curatedTokens = await gns.subgraphTokens(subgraph0.id) + const beforeOwnerSignal = await gns.getCuratorSignal(subgraph0.id, me.address) + const expectedSentToL2 = beforeOwnerSignal.mul(curatedTokens).div(subgraphBefore.nSignal) + const maxSubmissionCost = toBN('100') const maxGas = toBN('10') const gasPriceBid = toBN('20') @@ -1227,7 +1252,9 @@ describe('L1GNS', () => { .sendSubgraphToL2(subgraph0.id, me.address, maxGas, gasPriceBid, maxSubmissionCost, { value: maxSubmissionCost.add(maxGas.mul(gasPriceBid)), }) - await expect(tx).emit(gns, 'SubgraphSentToL2').withArgs(subgraph0.id, me.address) + await expect(tx) + .emit(gns, 'SubgraphSentToL2') + .withArgs(subgraph0.id, me.address, me.address, expectedSentToL2) const tx2 = gns.connect(me.signer).burnSignal(subgraph0.id, toBN(1), toGRT('0')) await expect(tx2).revertedWith('GNS: Must be active') @@ -1237,6 +1264,11 @@ describe('L1GNS', () => { it('does not allow curators to transfer signal after sending', async function () { const subgraph0 = await publishAndCurateOnSubgraph() + const subgraphBefore = await gns.subgraphs(subgraph0.id) + const curatedTokens = await gns.subgraphTokens(subgraph0.id) + const beforeOwnerSignal = await gns.getCuratorSignal(subgraph0.id, me.address) + const expectedSentToL2 = beforeOwnerSignal.mul(curatedTokens).div(subgraphBefore.nSignal) + const maxSubmissionCost = toBN('100') const maxGas = toBN('10') const gasPriceBid = toBN('20') @@ -1245,7 +1277,9 @@ describe('L1GNS', () => { .sendSubgraphToL2(subgraph0.id, me.address, maxGas, gasPriceBid, maxSubmissionCost, { value: maxSubmissionCost.add(maxGas.mul(gasPriceBid)), }) - await expect(tx).emit(gns, 'SubgraphSentToL2').withArgs(subgraph0.id, me.address) + await expect(tx) + .emit(gns, 'SubgraphSentToL2') + .withArgs(subgraph0.id, me.address, me.address, expectedSentToL2) const tx2 = gns.connect(me.signer).transferSignal(subgraph0.id, other.address, toBN(1)) await expect(tx2).revertedWith('GNS: Must be active') @@ -1255,6 +1289,11 @@ describe('L1GNS', () => { it('does not allow the owner to withdraw GRT after sending', async function () { const subgraph0 = await publishAndCurateOnSubgraph() + const subgraphBefore = await gns.subgraphs(subgraph0.id) + const curatedTokens = await gns.subgraphTokens(subgraph0.id) + const beforeOwnerSignal = await gns.getCuratorSignal(subgraph0.id, me.address) + const expectedSentToL2 = beforeOwnerSignal.mul(curatedTokens).div(subgraphBefore.nSignal) + const maxSubmissionCost = toBN('100') const maxGas = toBN('10') const gasPriceBid = toBN('20') @@ -1263,7 +1302,9 @@ describe('L1GNS', () => { .sendSubgraphToL2(subgraph0.id, me.address, maxGas, gasPriceBid, maxSubmissionCost, { value: maxSubmissionCost.add(maxGas.mul(gasPriceBid)), }) - await expect(tx).emit(gns, 'SubgraphSentToL2').withArgs(subgraph0.id, me.address) + await expect(tx) + .emit(gns, 'SubgraphSentToL2') + .withArgs(subgraph0.id, me.address, me.address, expectedSentToL2) const tx2 = gns.connect(me.signer).withdraw(subgraph0.id) await expect(tx2).revertedWith('GNS: No signal to withdraw GRT') @@ -1271,6 +1312,10 @@ describe('L1GNS', () => { it('allows a curator that is not the owner to withdraw GRT after sending', async function () { const subgraph0 = await publishAndCurateOnSubgraph() + const subgraphBefore = await gns.subgraphs(subgraph0.id) + const curatedTokens = await gns.subgraphTokens(subgraph0.id) + const beforeOwnerSignal = await gns.getCuratorSignal(subgraph0.id, me.address) + const expectedSentToL2 = beforeOwnerSignal.mul(curatedTokens).div(subgraphBefore.nSignal) const beforeOtherSignal = await gns.getCuratorSignal(subgraph0.id, other.address) const maxSubmissionCost = toBN('100') @@ -1281,7 +1326,9 @@ describe('L1GNS', () => { .sendSubgraphToL2(subgraph0.id, me.address, maxGas, gasPriceBid, maxSubmissionCost, { value: maxSubmissionCost.add(maxGas.mul(gasPriceBid)), }) - await expect(tx).emit(gns, 'SubgraphSentToL2').withArgs(subgraph0.id, me.address) + await expect(tx) + .emit(gns, 'SubgraphSentToL2') + .withArgs(subgraph0.id, me.address, me.address, expectedSentToL2) const remainingTokens = (await gns.subgraphs(subgraph0.id)).withdrawableGRT const tx2 = gns.connect(other.signer).withdraw(subgraph0.id) @@ -1298,7 +1345,7 @@ describe('L1GNS', () => { const expectedCallhookData = defaultAbiCoder.encode( ['uint8', 'uint256', 'address'], - [toBN(1), subgraph0.id, other.address], // code = 1 means RECEIVE_CURATOR_BALANCE_CODE + [toBN(1), subgraph0.id, another.address], // code = 1 means RECEIVE_CURATOR_BALANCE_CODE ) const expectedL2Data = await l1GraphTokenGateway.getOutboundCalldata( grt.address, @@ -1316,7 +1363,7 @@ describe('L1GNS', () => { .connect(other.signer) .sendCuratorBalanceToBeneficiaryOnL2( subgraph0.id, - other.address, + another.address, maxGas, gasPriceBid, maxSubmissionCost, @@ -1329,6 +1376,9 @@ describe('L1GNS', () => { await expect(tx) .emit(l1GraphTokenGateway, 'TxToL2') .withArgs(gns.address, mockL2Gateway.address, toBN('2'), expectedL2Data) + await expect(tx) + .emit(gns, 'CuratorBalanceSentToL2') + .withArgs(subgraph0.id, other.address, another.address, curatorTokens) }) it('sets the curator signal to zero so it cannot be called twice', async function () { const subgraph0 = await publishCurateAndSendSubgraph() From a994b63febfdc7e5c56832b62fa2d6d5501a130d Mon Sep 17 00:00:00 2001 From: Pablo Carranza Velez Date: Wed, 15 Feb 2023 19:07:32 -0300 Subject: [PATCH 085/112] fix: review comments from #585 in L2GNS --- contracts/l2/discovery/L2GNS.sol | 13 +++++++++---- test/l2/l2GNS.test.ts | 8 ++++++-- 2 files changed, 15 insertions(+), 6 deletions(-) diff --git a/contracts/l2/discovery/L2GNS.sol b/contracts/l2/discovery/L2GNS.sol index 3fc23f691..dd31c4726 100644 --- a/contracts/l2/discovery/L2GNS.sol +++ b/contracts/l2/discovery/L2GNS.sol @@ -27,7 +27,11 @@ contract L2GNS is GNS, L2GNSV1Storage, IL2GNS { using SafeMathUpgradeable for uint256; /// @dev Emitted when a subgraph is received from L1 through the bridge - event SubgraphReceivedFromL1(uint256 indexed _subgraphID); + event SubgraphReceivedFromL1( + uint256 indexed _subgraphID, + address indexed _owner, + uint256 _tokens + ); /// @dev Emitted when a subgraph migration from L1 is finalized, so the subgraph is published event SubgraphMigrationFinalized(uint256 indexed _subgraphID); /// @dev Emitted when the L1 balance for a curator has been claimed @@ -101,7 +105,6 @@ contract L2GNS is GNS, L2GNSV1Storage, IL2GNS { ) external override notPartialPaused onlySubgraphAuth(_subgraphID) { IL2GNS.SubgraphL2MigrationData storage migratedData = subgraphL2MigrationData[_subgraphID]; SubgraphData storage subgraphData = _getSubgraphData(_subgraphID); - // A subgraph require(migratedData.subgraphReceivedOnL2BlockNumber != 0, "INVALID_SUBGRAPH"); require(!migratedData.l2Done, "ALREADY_DONE"); migratedData.l2Done = true; @@ -137,7 +140,7 @@ contract L2GNS is GNS, L2GNSV1Storage, IL2GNS { /** * @notice Publish a new version of an existing subgraph. * @dev This is the same as the one in the base GNS, but skips the check for - * a subgraph to not be pre-curated, as the reserve ration in L2 is set to 1, + * a subgraph to not be pre-curated, as the reserve ratio in L2 is set to 1, * which prevents the risk of rug-pulling. * @param _subgraphID Subgraph ID * @param _subgraphDeploymentID Subgraph deployment ID of the new version @@ -230,9 +233,11 @@ contract L2GNS is GNS, L2GNSV1Storage, IL2GNS { // Mint the NFT. Use the subgraphID as tokenID. // This function will check the if tokenID already exists. + // Note we do this here so that we can later do the onlySubgraphAuth + // check in finishSubgraphMigrationFromL1. _mintNFT(_subgraphOwner, _subgraphID); - emit SubgraphReceivedFromL1(_subgraphID); + emit SubgraphReceivedFromL1(_subgraphID, _subgraphOwner, _tokens); } /** diff --git a/test/l2/l2GNS.test.ts b/test/l2/l2GNS.test.ts index ffc00bcd1..9072d7594 100644 --- a/test/l2/l2GNS.test.ts +++ b/test/l2/l2GNS.test.ts @@ -274,7 +274,9 @@ describe('L2GNS', () => { await expect(tx) .emit(l2GraphTokenGateway, 'DepositFinalized') .withArgs(mockL1GRT.address, mockL1GNS.address, gns.address, curatedTokens) - await expect(tx).emit(gns, 'SubgraphReceivedFromL1').withArgs(l1SubgraphId) + await expect(tx) + .emit(gns, 'SubgraphReceivedFromL1') + .withArgs(l1SubgraphId, me.address, curatedTokens) const migrationData = await gns.subgraphL2MigrationData(l1SubgraphId) const subgraphData = await gns.subgraphs(l1SubgraphId) @@ -310,7 +312,9 @@ describe('L2GNS', () => { await expect(tx) .emit(l2GraphTokenGateway, 'DepositFinalized') .withArgs(mockL1GRT.address, mockL1GNS.address, gns.address, curatedTokens) - await expect(tx).emit(gns, 'SubgraphReceivedFromL1').withArgs(l1SubgraphId) + await expect(tx) + .emit(gns, 'SubgraphReceivedFromL1') + .withArgs(l1SubgraphId, me.address, curatedTokens) const migrationData = await gns.subgraphL2MigrationData(l1SubgraphId) const subgraphData = await gns.subgraphs(l1SubgraphId) From 287167ede7933a0707c5928d7d2d2e259bb0f536 Mon Sep 17 00:00:00 2001 From: Pablo Carranza Velez Date: Tue, 28 Feb 2023 11:09:00 -0300 Subject: [PATCH 086/112] fix: remove unused imports (OZ N-01 for #764) --- contracts/discovery/L1GNS.sol | 1 - contracts/l2/curation/L2Curation.sol | 1 - 2 files changed, 2 deletions(-) diff --git a/contracts/discovery/L1GNS.sol b/contracts/discovery/L1GNS.sol index 64c168ede..1388a7a44 100644 --- a/contracts/discovery/L1GNS.sol +++ b/contracts/discovery/L1GNS.sol @@ -8,7 +8,6 @@ import { SafeMathUpgradeable } from "@openzeppelin/contracts-upgradeable/math/Sa import { GNS } from "./GNS.sol"; import { ITokenGateway } from "../arbitrum/ITokenGateway.sol"; -import { L1ArbitrumMessenger } from "../arbitrum/L1ArbitrumMessenger.sol"; import { IL2GNS } from "../l2/discovery/IL2GNS.sol"; import { IGraphToken } from "../token/IGraphToken.sol"; import { L1GNSV1Storage } from "./L1GNSStorage.sol"; diff --git a/contracts/l2/curation/L2Curation.sol b/contracts/l2/curation/L2Curation.sol index 908e40384..7c493a612 100644 --- a/contracts/l2/curation/L2Curation.sol +++ b/contracts/l2/curation/L2Curation.sol @@ -13,7 +13,6 @@ import { IRewardsManager } from "../../rewards/IRewardsManager.sol"; import { Managed } from "../../governance/Managed.sol"; import { IGraphToken } from "../../token/IGraphToken.sol"; import { CurationV2Storage } from "../../curation/CurationStorage.sol"; -import { ICuration } from "../../curation/ICuration.sol"; import { IGraphCurationToken } from "../../curation/IGraphCurationToken.sol"; import { IL2Curation } from "./IL2Curation.sol"; From e30eb01cafecef52b0bb56ac983ac641032b0289 Mon Sep 17 00:00:00 2001 From: Pablo Carranza Velez Date: Tue, 28 Feb 2023 11:29:17 -0300 Subject: [PATCH 087/112] test: add some missing cases for GNS migration (OZ N-02 for #764) --- test/gns.test.ts | 54 +++++++++++++++++++++++++++++++++++++++++++ test/l2/l2GNS.test.ts | 28 ++++++++++++++++++++++ 2 files changed, 82 insertions(+) diff --git a/test/gns.test.ts b/test/gns.test.ts index a17578081..93ea11d57 100644 --- a/test/gns.test.ts +++ b/test/gns.test.ts @@ -1433,6 +1433,34 @@ describe('L1GNS', () => { ) await expect(tx2).revertedWith('NO_SIGNAL') }) + it('sets the curator signal to zero so they cannot withdraw', async function () { + const subgraph0 = await publishCurateAndSendSubgraph(async (_subgraphId) => { + // We add another curator before migrating, so the the subgraph doesn't + // run out of withdrawable GRT and we can test that it denies the specific curator + // because they have sent their signal to L2, not because the subgraph is out of GRT. + await gns.connect(another.signer).mintSignal(_subgraphId, toGRT('1000'), toBN(0)) + }) + + const maxSubmissionCost = toBN('100') + const maxGas = toBN('10') + const gasPriceBid = toBN('20') + + await gns + .connect(other.signer) + .sendCuratorBalanceToBeneficiaryOnL2( + subgraph0.id, + other.address, + maxGas, + gasPriceBid, + maxSubmissionCost, + { + value: maxSubmissionCost.add(maxGas.mul(gasPriceBid)), + }, + ) + + const tx = gns.connect(other.signer).withdraw(subgraph0.id) + await expect(tx).revertedWith('GNS: No signal to withdraw GRT') + }) it('gives each curator an amount of tokens proportional to their nSignal', async function () { let beforeOtherNSignal: BigNumber let beforeAnotherNSignal: BigNumber @@ -1586,6 +1614,32 @@ describe('L1GNS', () => { await expect(tx).revertedWith('NO_SUBMISSION_COST') }) + it('rejects calls if the curator has withdrawn the GRT', async function () { + const subgraph0 = await publishCurateAndSendSubgraph() + const afterSubgraph = await gns.subgraphs(subgraph0.id) + + await gns.connect(other.signer).withdraw(subgraph0.id) + + const maxSubmissionCost = toBN('100') + const maxGas = toBN('10') + const gasPriceBid = toBN('20') + + const tx = gns + .connect(other.signer) + .sendCuratorBalanceToBeneficiaryOnL2( + subgraph0.id, + another.address, + maxGas, + gasPriceBid, + maxSubmissionCost, + { + value: maxSubmissionCost.add(maxGas.mul(gasPriceBid)), + }, + ) + + // seqNum (third argument in the event) is 2, because number 1 was when the subgraph was sent to L2 + await expect(tx).revertedWith('NO_SIGNAL') + }) }) }) }) diff --git a/test/l2/l2GNS.test.ts b/test/l2/l2GNS.test.ts index 9072d7594..f90b96f1c 100644 --- a/test/l2/l2GNS.test.ts +++ b/test/l2/l2GNS.test.ts @@ -500,6 +500,34 @@ describe('L2GNS', () => { .finishSubgraphMigrationFromL1(l1SubgraphId, HashZero, metadata, metadata) await expect(tx).revertedWith('GNS: deploymentID != 0') }) + it('rejects calls if the subgraph migration was already finished', async function () { + const metadata = randomHexBytes() + const { l1SubgraphId, curatedTokens } = await defaultL1SubgraphParams() + const callhookData = defaultAbiCoder.encode( + ['uint8', 'uint256', 'address'], + [toBN(0), l1SubgraphId, me.address], + ) + await gatewayFinalizeTransfer(mockL1GNS.address, gns.address, curatedTokens, callhookData) + + await gns + .connect(me.signer) + .finishSubgraphMigrationFromL1( + l1SubgraphId, + newSubgraph0.subgraphDeploymentID, + metadata, + metadata, + ) + + const tx = gns + .connect(me.signer) + .finishSubgraphMigrationFromL1( + l1SubgraphId, + newSubgraph0.subgraphDeploymentID, + metadata, + metadata, + ) + await expect(tx).revertedWith('ALREADY_DONE') + }) }) describe('claiming a curator balance with a message from L1 (onTokenTransfer)', function () { it('assigns a curator balance to a beneficiary', async function () { From 192aecf034e008139f8f0f492809b696a9afcaa1 Mon Sep 17 00:00:00 2001 From: Pablo Carranza Velez Date: Tue, 28 Feb 2023 12:08:42 -0300 Subject: [PATCH 088/112] fix: use variable for withdrawableGRT to save gas (OZ N-03 for #764) --- contracts/discovery/L1GNS.sol | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/contracts/discovery/L1GNS.sol b/contracts/discovery/L1GNS.sol index 1388a7a44..3e8d94360 100644 --- a/contracts/discovery/L1GNS.sol +++ b/contracts/discovery/L1GNS.sol @@ -140,7 +140,8 @@ contract L1GNS is GNS, L1GNSV1Storage { uint256 subgraphNSignal = subgraphData.nSignal; require(subgraphNSignal != 0, "NO_SUBGRAPH_SIGNAL"); - uint256 tokensForL2 = curatorNSignal.mul(subgraphData.withdrawableGRT).div(subgraphNSignal); + uint256 withdrawableGRT = subgraphData.withdrawableGRT; + uint256 tokensForL2 = curatorNSignal.mul(withdrawableGRT).div(subgraphNSignal); bytes memory extraData = abi.encode( uint8(IL2GNS.L1MessageCodes.RECEIVE_CURATOR_BALANCE_CODE), _subgraphID, @@ -150,7 +151,7 @@ contract L1GNS is GNS, L1GNSV1Storage { // Set the subgraph as if the curator had withdrawn their tokens subgraphData.curatorNSignal[msg.sender] = 0; subgraphData.nSignal = subgraphNSignal.sub(curatorNSignal); - subgraphData.withdrawableGRT = subgraphData.withdrawableGRT.sub(tokensForL2); + subgraphData.withdrawableGRT = withdrawableGRT.sub(tokensForL2); // Send the tokens and data to L2 using the L1GraphTokenGateway _sendTokensAndMessageToL2GNS( From 83fffa32a908ceba4aceaea29bba1f83ea25a462 Mon Sep 17 00:00:00 2001 From: Pablo Carranza Velez Date: Fri, 20 Jan 2023 13:22:37 -0300 Subject: [PATCH 089/112] feat: stake and delegation migration helpers for L2 See GIP-0046 for details: https://forum.thegraph.com/t/gip-0046-l2-migration-helpers/4023 --- .solcover.js | 1 + addresses.json | 8 +- cli/commands/migrate.ts | 6 +- cli/contracts.ts | 26 +- cli/network.ts | 14 +- config/graph.arbitrum-goerli.yml | 7 +- config/graph.arbitrum-localhost.yml | 7 +- config/graph.arbitrum-one.yml | 7 +- config/graph.goerli.yml | 7 +- config/graph.localhost.yml | 7 +- config/graph.mainnet.yml | 7 +- contracts/l2/staking/IL2Staking.sol | 35 + contracts/l2/staking/IL2StakingBase.sol | 14 + contracts/l2/staking/L2Staking.sol | 129 ++ .../staking/IL1GraphTokenLockMigrator.sol | 30 + contracts/staking/IL1Staking.sol | 20 + contracts/staking/IL1StakingBase.sol | 149 ++ contracts/staking/IStaking.sol | 171 +- contracts/staking/IStakingBase.sol | 437 +++++ contracts/staking/IStakingData.sol | 6 +- contracts/staking/IStakingExtension.sol | 351 ++++ contracts/staking/L1Staking.sol | 405 +++++ contracts/staking/L1StakingStorage.sol | 21 + contracts/staking/Staking.sol | 1514 ++++++----------- contracts/staking/StakingExtension.sol | 689 ++++++++ contracts/staking/StakingStorage.sol | 135 +- .../tests/L1GraphTokenLockMigratorMock.sol | 21 + e2e/deployment/config/controller.test.ts | 4 +- e2e/deployment/config/staking.test.ts | 40 +- test/disputes/poi.test.ts | 4 +- test/disputes/query.test.ts | 4 +- test/gateway/l1GraphTokenGateway.test.ts | 5 + test/gns.test.ts | 15 +- test/governance/pausing.test.ts | 4 +- test/l2/l2GNS.test.ts | 5 +- test/l2/l2GraphTokenGateway.test.ts | 5 + test/l2/l2Staking.test.ts | 291 ++++ test/lib/deployment.ts | 56 +- test/lib/fixtures.ts | 22 +- test/payments/allocationExchange.test.ts | 4 +- test/payments/withdrawHelper.test.ts | 4 +- test/rewards/rewards.test.ts | 4 +- test/serviceRegisty.test.ts | 4 +- test/staking/allocation.test.ts | 6 +- test/staking/configuration.test.ts | 4 +- test/staking/delegation.test.ts | 4 +- test/staking/migration.test.ts | 786 +++++++++ test/staking/staking.test.ts | 4 +- test/upgrade/admin.test.ts | 4 +- 49 files changed, 4223 insertions(+), 1280 deletions(-) create mode 100644 contracts/l2/staking/IL2Staking.sol create mode 100644 contracts/l2/staking/IL2StakingBase.sol create mode 100644 contracts/l2/staking/L2Staking.sol create mode 100644 contracts/staking/IL1GraphTokenLockMigrator.sol create mode 100644 contracts/staking/IL1Staking.sol create mode 100644 contracts/staking/IL1StakingBase.sol create mode 100644 contracts/staking/IStakingBase.sol create mode 100644 contracts/staking/IStakingExtension.sol create mode 100644 contracts/staking/L1Staking.sol create mode 100644 contracts/staking/L1StakingStorage.sol create mode 100644 contracts/staking/StakingExtension.sol create mode 100644 contracts/tests/L1GraphTokenLockMigratorMock.sol create mode 100644 test/l2/l2Staking.test.ts create mode 100644 test/staking/migration.test.ts diff --git a/.solcover.js b/.solcover.js index b10738c1f..8c5efb96b 100644 --- a/.solcover.js +++ b/.solcover.js @@ -7,4 +7,5 @@ module.exports = { }, skipFiles, istanbulFolder: './reports/coverage', + configureYulOptimizer: true, } diff --git a/addresses.json b/addresses.json index 098db3003..54ca2a114 100644 --- a/addresses.json +++ b/addresses.json @@ -146,7 +146,7 @@ "txHash": "0x218dbb4fd680db263524fc6be36462c18f3e267b87951cd86296eabd4a381183" } }, - "Staking": { + "L1Staking": { "address": "0xF55041E37E12cD407ad00CE2910B8269B01263b9", "initArgs": [ { @@ -415,7 +415,7 @@ "txHash": "0xbc6e9171943020d30c22197282311f003e79374e6eeeaab9c360942bdf4193f4" } }, - "Staking": { + "L1Staking": { "address": "0x35e3Cb6B317690d662160d5d02A5b364578F62c9", "initArgs": [ "0x48eD7AfbaB432d1Fc6Ea84EEC70E745d9DAcaF3B", @@ -619,7 +619,7 @@ "txHash": "0xb1e63211ea7b036bf35423034bc60490b3b35b199bddc85200ea926b76e16a4e" } }, - "Staking": { + "L1Staking": { "address": "0x5f8e26fAcC23FA4cbd87b8d9Dbbd33D5047abDE1", "initArgs": [ "0x254dffcd3277C0b1660F6d42EFbB754edaBAbC2B", @@ -852,7 +852,7 @@ "txHash": "0xf1d41fc99ed716a0c890ea62e13ee108ddcb4ecfc74efb715a4ef05605ce449b" } }, - "Staking": { + "L2Staking": { "address": "0xcd549d0C43d915aEB21d3a331dEaB9B7aF186D26", "initArgs": [ "0x7f734E995010Aa8d28b912703093d532C37b6EAb", diff --git a/cli/commands/migrate.ts b/cli/commands/migrate.ts index 4c9c03768..079e1d731 100644 --- a/cli/commands/migrate.ts +++ b/cli/commands/migrate.ts @@ -30,7 +30,8 @@ let allContracts = [ 'SubgraphNFTDescriptor', 'SubgraphNFT', 'L1GNS', - 'Staking', + 'StakingExtension', + 'L1Staking', 'RewardsManager', 'DisputeManager', 'AllocationExchange', @@ -49,7 +50,8 @@ const l2Contracts = [ 'SubgraphNFTDescriptor', 'SubgraphNFT', 'L2GNS', - 'Staking', + 'StakingExtension', + 'L2Staking', 'RewardsManager', 'DisputeManager', 'AllocationExchange', diff --git a/cli/contracts.ts b/cli/contracts.ts index 3a20bc727..0c8da1b91 100644 --- a/cli/contracts.ts +++ b/cli/contracts.ts @@ -18,7 +18,8 @@ import { getContractAt } from './network' import { EpochManager } from '../build/types/EpochManager' import { DisputeManager } from '../build/types/DisputeManager' -import { Staking } from '../build/types/Staking' +import { L1Staking } from '../build/types/L1Staking' +import { L2Staking } from '../build/types/L2Staking' import { ServiceRegistry } from '../build/types/ServiceRegistry' import { Curation } from '../build/types/Curation' import { RewardsManager } from '../build/types/RewardsManager' @@ -40,11 +41,15 @@ import { L2GraphToken } from '../build/types/L2GraphToken' import { L2GraphTokenGateway } from '../build/types/L2GraphTokenGateway' import { BridgeEscrow } from '../build/types/BridgeEscrow' import { L2Curation } from '../build/types/L2Curation' +import { IL1Staking } from '../build/types/IL1Staking' +import { IL2Staking } from '../build/types/IL2Staking' +import { Interface } from 'ethers/lib/utils' +import { loadArtifact } from './artifacts' export interface NetworkContracts { EpochManager: EpochManager DisputeManager: DisputeManager - Staking: Staking + Staking: IL1Staking | IL2Staking ServiceRegistry: ServiceRegistry Curation: Curation | L2Curation L2Curation: L2Curation @@ -66,6 +71,8 @@ export interface NetworkContracts { L2GraphTokenGateway: L2GraphTokenGateway L1GNS: L1GNS L2GNS: L2GNS + L1Staking: IL1Staking + L2Staking: IL2Staking } export const loadAddressBookContract = ( @@ -97,6 +104,15 @@ export const loadContracts = ( contract.connect = getWrappedConnect(contract, contractName) contract = wrapCalls(contract, contractName) } + if (contractName == 'L1Staking') { + // Hack the contract into behaving like an IL1Staking + const iface = new Interface(loadArtifact('IL1Staking').abi) + contract = new Contract(contract.address, iface) as unknown as IL1Staking + } else if (contractName == 'L2Staking') { + // Hack the contract into behaving like an IL2Staking + const iface = new Interface(loadArtifact('IL2Staking').abi) + contract = new Contract(contract.address, iface) as unknown as IL2Staking + } contracts[contractName] = contract if (signerOrProvider) { @@ -110,12 +126,18 @@ export const loadContracts = ( if (signerOrProvider && chainIdIsL2(chainId) && contractName == 'L2GNS') { contracts['GNS'] = contracts[contractName] } + if (signerOrProvider && chainIdIsL2(chainId) && contractName == 'L2Staking') { + contracts['Staking'] = contracts[contractName] + } if (signerOrProvider && chainIdIsL2(chainId) && contractName == 'L2Curation') { contracts['Curation'] = contracts[contractName] } if (signerOrProvider && !chainIdIsL2(chainId) && contractName == 'L1GNS') { contracts['GNS'] = contracts[contractName] } + if (signerOrProvider && !chainIdIsL2(chainId) && contractName == 'L1Staking') { + contracts['Staking'] = contracts[contractName] + } } catch (err) { logger.warn(`Could not load contract ${contractName} - ${err.message}`) } diff --git a/cli/network.ts b/cli/network.ts index 9a0618ff3..5422eac73 100644 --- a/cli/network.ts +++ b/cli/network.ts @@ -18,6 +18,9 @@ import { AddressBook } from './address-book' import { loadArtifact } from './artifacts' import { defaultOverrides } from './defaults' import { GraphToken } from '../build/types/GraphToken' +import { Interface } from 'ethers/lib/utils' +import { IL1Staking } from '../build/types/IL1Staking' +import { IL2Staking } from '../build/types/IL2Staking' const { keccak256, randomBytes, parseUnits, hexlify } = utils @@ -197,7 +200,7 @@ export const deployContract = async ( // Deploy const factory = getContractFactory(name, libraries) - const contract = await factory.connect(sender).deploy(...args) + let contract = await factory.connect(sender).deploy(...args) const txHash = contract.deployTransaction.hash logger.info(`> Deploy ${name}, txHash: ${txHash}`) await sender.provider.waitForTransaction(txHash) @@ -209,6 +212,15 @@ export const deployContract = async ( logger.info(`= RuntimeCodeHash: ${runtimeCodeHash}`) logger.info(`${name} has been deployed to address: ${contract.address}`) + if (name == 'L1Staking') { + // Hack the contract into behaving like an IL1Staking + const iface = new Interface(loadArtifact('IL1Staking').abi) + contract = new Contract(contract.address, iface, sender) as unknown as IL1Staking + } else if (name == 'L2Staking') { + // Hack the contract into behaving like an IL2Staking + const iface = new Interface(loadArtifact('IL2Staking').abi) + contract = new Contract(contract.address, iface, sender) as unknown as IL2Staking + } return { contract, creationCodeHash, runtimeCodeHash, txHash, libraries } } diff --git a/config/graph.arbitrum-goerli.yml b/config/graph.arbitrum-goerli.yml index dc5c7022c..94418c203 100644 --- a/config/graph.arbitrum-goerli.yml +++ b/config/graph.arbitrum-goerli.yml @@ -26,7 +26,7 @@ contracts: contractAddress: "${{RewardsManager.address}}" - fn: "setContractProxy" id: "0x1df41cd916959d1163dc8f0671a666ea8a3e434c13e40faef527133b5d167034" # keccak256('Staking') - contractAddress: "${{Staking.address}}" + contractAddress: "${{L2Staking.address}}" - fn: "setContractProxy" id: "0x45fc200c7e4544e457d3c5709bfe0d520442c30bbcbdaede89e8d4a4bbc19247" # keccak256('GraphToken') contractAddress: "${{L2GraphToken.address}}" @@ -98,7 +98,7 @@ contracts: minter: "${{L2GNS.address}}" - fn: "transferOwnership" owner: *governor - Staking: + L2Staking: proxy: true init: controller: "${{Controller.address}}" @@ -112,6 +112,7 @@ contracts: delegationRatio: 16 # delegated stake to indexer stake multiplier rebateAlphaNumerator: 77 # rebateAlphaNumerator / rebateAlphaDenominator rebateAlphaDenominator: 100 # rebateAlphaNumerator / rebateAlphaDenominator + extensionImpl: "${{StakingExtension.address}}" calls: - fn: "setDelegationTaxPercentage" delegationTaxPercentage: 5000 # parts per million @@ -133,7 +134,7 @@ contracts: AllocationExchange: init: graphToken: "${{L2GraphToken.address}}" - staking: "${{Staking.address}}" + staking: "${{L2Staking.address}}" governor: *allocationExchangeOwner authority: *authority calls: diff --git a/config/graph.arbitrum-localhost.yml b/config/graph.arbitrum-localhost.yml index a5674225b..6f46dd11c 100644 --- a/config/graph.arbitrum-localhost.yml +++ b/config/graph.arbitrum-localhost.yml @@ -26,7 +26,7 @@ contracts: contractAddress: "${{RewardsManager.address}}" - fn: "setContractProxy" id: "0x1df41cd916959d1163dc8f0671a666ea8a3e434c13e40faef527133b5d167034" # keccak256('Staking') - contractAddress: "${{Staking.address}}" + contractAddress: "${{L2Staking.address}}" - fn: "setContractProxy" id: "0x45fc200c7e4544e457d3c5709bfe0d520442c30bbcbdaede89e8d4a4bbc19247" # keccak256('GraphToken') contractAddress: "${{L2GraphToken.address}}" @@ -98,7 +98,7 @@ contracts: minter: "${{L2GNS.address}}" - fn: "transferOwnership" owner: *governor - Staking: + L2Staking: proxy: true init: controller: "${{Controller.address}}" @@ -112,6 +112,7 @@ contracts: delegationRatio: 16 # delegated stake to indexer stake multiplier rebateAlphaNumerator: 77 # rebateAlphaNumerator / rebateAlphaDenominator rebateAlphaDenominator: 100 # rebateAlphaNumerator / rebateAlphaDenominator + extensionImpl: "${{StakingExtension.address}}" calls: - fn: "setDelegationTaxPercentage" delegationTaxPercentage: 5000 # parts per million @@ -133,7 +134,7 @@ contracts: AllocationExchange: init: graphToken: "${{L2GraphToken.address}}" - staking: "${{Staking.address}}" + staking: "${{L2Staking.address}}" governor: *allocationExchangeOwner authority: *authority calls: diff --git a/config/graph.arbitrum-one.yml b/config/graph.arbitrum-one.yml index 9277bb051..8924aa9e9 100644 --- a/config/graph.arbitrum-one.yml +++ b/config/graph.arbitrum-one.yml @@ -26,7 +26,7 @@ contracts: contractAddress: "${{RewardsManager.address}}" - fn: "setContractProxy" id: "0x1df41cd916959d1163dc8f0671a666ea8a3e434c13e40faef527133b5d167034" # keccak256('Staking') - contractAddress: "${{Staking.address}}" + contractAddress: "${{L2Staking.address}}" - fn: "setContractProxy" id: "0x45fc200c7e4544e457d3c5709bfe0d520442c30bbcbdaede89e8d4a4bbc19247" # keccak256('GraphToken') contractAddress: "${{L2GraphToken.address}}" @@ -98,7 +98,7 @@ contracts: minter: "${{L2GNS.address}}" - fn: "transferOwnership" owner: *governor - Staking: + L2Staking: proxy: true init: controller: "${{Controller.address}}" @@ -112,6 +112,7 @@ contracts: delegationRatio: 16 # delegated stake to indexer stake multiplier rebateAlphaNumerator: 77 # rebateAlphaNumerator / rebateAlphaDenominator rebateAlphaDenominator: 100 # rebateAlphaNumerator / rebateAlphaDenominator + extensionImpl: "${{StakingExtension.address}}" calls: - fn: "setDelegationTaxPercentage" delegationTaxPercentage: 5000 # parts per million @@ -133,7 +134,7 @@ contracts: AllocationExchange: init: graphToken: "${{L2GraphToken.address}}" - staking: "${{Staking.address}}" + staking: "${{L2Staking.address}}" governor: *allocationExchangeOwner authority: *authority calls: diff --git a/config/graph.goerli.yml b/config/graph.goerli.yml index 223fda79a..263d353b7 100644 --- a/config/graph.goerli.yml +++ b/config/graph.goerli.yml @@ -26,7 +26,7 @@ contracts: contractAddress: "${{RewardsManager.address}}" - fn: "setContractProxy" id: "0x1df41cd916959d1163dc8f0671a666ea8a3e434c13e40faef527133b5d167034" # keccak256('Staking') - contractAddress: "${{Staking.address}}" + contractAddress: "${{L1Staking.address}}" - fn: "setContractProxy" id: "0x45fc200c7e4544e457d3c5709bfe0d520442c30bbcbdaede89e8d4a4bbc19247" # keccak256('GraphToken') contractAddress: "${{GraphToken.address}}" @@ -101,7 +101,7 @@ contracts: minter: "${{L1GNS.address}}" - fn: "transferOwnership" owner: *governor - Staking: + L1Staking: proxy: true init: controller: "${{Controller.address}}" @@ -115,6 +115,7 @@ contracts: delegationRatio: 16 # delegated stake to indexer stake multiplier rebateAlphaNumerator: 77 # rebateAlphaNumerator / rebateAlphaDenominator rebateAlphaDenominator: 100 # rebateAlphaNumerator / rebateAlphaDenominator + extensionImpl: "${{StakingExtension.address}}" calls: - fn: "setDelegationTaxPercentage" delegationTaxPercentage: 5000 # parts per million @@ -138,7 +139,7 @@ contracts: AllocationExchange: init: graphToken: "${{GraphToken.address}}" - staking: "${{Staking.address}}" + staking: "${{L1Staking.address}}" governor: *allocationExchangeOwner authority: *authority calls: diff --git a/config/graph.localhost.yml b/config/graph.localhost.yml index 23643d2df..734c70f3a 100644 --- a/config/graph.localhost.yml +++ b/config/graph.localhost.yml @@ -26,7 +26,7 @@ contracts: contractAddress: "${{RewardsManager.address}}" - fn: "setContractProxy" id: "0x1df41cd916959d1163dc8f0671a666ea8a3e434c13e40faef527133b5d167034" # keccak256('Staking') - contractAddress: "${{Staking.address}}" + contractAddress: "${{L1Staking.address}}" - fn: "setContractProxy" id: "0x45fc200c7e4544e457d3c5709bfe0d520442c30bbcbdaede89e8d4a4bbc19247" # keccak256('GraphToken') contractAddress: "${{GraphToken.address}}" @@ -101,7 +101,7 @@ contracts: minter: "${{L1GNS.address}}" - fn: "transferOwnership" owner: *governor - Staking: + L1Staking: proxy: true init: controller: "${{Controller.address}}" @@ -115,6 +115,7 @@ contracts: delegationRatio: 16 # delegated stake to indexer stake multiplier rebateAlphaNumerator: 77 # rebateAlphaNumerator / rebateAlphaDenominator rebateAlphaDenominator: 100 # rebateAlphaNumerator / rebateAlphaDenominator + extensionImpl: "${{StakingExtension.address}}" calls: - fn: "setDelegationTaxPercentage" delegationTaxPercentage: 5000 # parts per million @@ -138,7 +139,7 @@ contracts: AllocationExchange: init: graphToken: "${{GraphToken.address}}" - staking: "${{Staking.address}}" + staking: "${{L1Staking.address}}" governor: *allocationExchangeOwner authority: *authority calls: diff --git a/config/graph.mainnet.yml b/config/graph.mainnet.yml index e90f502c0..9753dfb5d 100644 --- a/config/graph.mainnet.yml +++ b/config/graph.mainnet.yml @@ -26,7 +26,7 @@ contracts: contractAddress: "${{RewardsManager.address}}" - fn: "setContractProxy" id: "0x1df41cd916959d1163dc8f0671a666ea8a3e434c13e40faef527133b5d167034" # keccak256('Staking') - contractAddress: "${{Staking.address}}" + contractAddress: "${{L1Staking.address}}" - fn: "setContractProxy" id: "0x45fc200c7e4544e457d3c5709bfe0d520442c30bbcbdaede89e8d4a4bbc19247" # keccak256('GraphToken') contractAddress: "${{GraphToken.address}}" @@ -101,7 +101,7 @@ contracts: minter: "${{L1GNS.address}}" - fn: "transferOwnership" owner: *governor - Staking: + L1Staking: proxy: true init: controller: "${{Controller.address}}" @@ -115,6 +115,7 @@ contracts: delegationRatio: 16 # delegated stake to indexer stake multiplier rebateAlphaNumerator: 77 # rebateAlphaNumerator / rebateAlphaDenominator rebateAlphaDenominator: 100 # rebateAlphaNumerator / rebateAlphaDenominator + extensionImpl: "${{StakingExtension.address}}" calls: - fn: "setDelegationTaxPercentage" delegationTaxPercentage: 5000 # parts per million @@ -138,7 +139,7 @@ contracts: AllocationExchange: init: graphToken: "${{GraphToken.address}}" - staking: "${{Staking.address}}" + staking: "${{L1Staking.address}}" governor: *allocationExchangeOwner authority: *authority calls: diff --git a/contracts/l2/staking/IL2Staking.sol b/contracts/l2/staking/IL2Staking.sol new file mode 100644 index 000000000..2b4d3c083 --- /dev/null +++ b/contracts/l2/staking/IL2Staking.sol @@ -0,0 +1,35 @@ +// SPDX-License-Identifier: GPL-2.0-or-later + +pragma solidity >=0.6.12 <0.8.0; +pragma abicoder v2; + +import { IStaking } from "../../staking/IStaking.sol"; +import { IL2StakingBase } from "./IL2StakingBase.sol"; + +/** + * @title Interface for the L2 Staking contract + * @notice This is the interface that should be used when interacting with the L2 Staking contract. + * It extends the IStaking interface with the functions that are specific to L2, adding the callhook receiver + * to receive migrated stake and delegation from L1. + * @dev Note that L2Staking doesn't actually inherit this interface. This is because of + * the custom setup of the Staking contract where part of the functionality is implemented + * in a separate contract (StakingExtension) to which calls are delegated through the fallback function. + */ +interface IL2Staking is IStaking, IL2StakingBase { + /// @dev Message codes for the L1 -> L2 bridge callhook + enum L1MessageCodes { + RECEIVE_INDEXER_STAKE_CODE, + RECEIVE_DELEGATION_CODE + } + + /// @dev Encoded message struct when receiving indexer stake through the bridge + struct ReceiveIndexerStakeData { + address indexer; + } + + /// @dev Encoded message struct when receiving delegation through the bridge + struct ReceiveDelegationData { + address indexer; + address delegator; + } +} diff --git a/contracts/l2/staking/IL2StakingBase.sol b/contracts/l2/staking/IL2StakingBase.sol new file mode 100644 index 000000000..edf19874d --- /dev/null +++ b/contracts/l2/staking/IL2StakingBase.sol @@ -0,0 +1,14 @@ +// SPDX-License-Identifier: GPL-2.0-or-later + +pragma solidity ^0.7.6; + +import { ICallhookReceiver } from "../../gateway/ICallhookReceiver.sol"; + +/** + * @title Base interface for the L2Staking contract. + * @notice This interface is used to define the callhook receiver interface that is implemented by L2Staking. + * @dev Note it includes only the L2-specific functionality, not the full IStaking interface. + */ +interface IL2StakingBase is ICallhookReceiver { + // Nothing to see here +} diff --git a/contracts/l2/staking/L2Staking.sol b/contracts/l2/staking/L2Staking.sol new file mode 100644 index 000000000..ad1181b92 --- /dev/null +++ b/contracts/l2/staking/L2Staking.sol @@ -0,0 +1,129 @@ +// SPDX-License-Identifier: GPL-2.0-or-later + +pragma solidity ^0.7.6; +pragma abicoder v2; + +import { SafeMath } from "@openzeppelin/contracts/math/SafeMath.sol"; +import { Staking } from "../../staking/Staking.sol"; +import { IL2StakingBase } from "./IL2StakingBase.sol"; +import { IL2Staking } from "./IL2Staking.sol"; +import { Stakes } from "../../staking/libs/Stakes.sol"; + +/** + * @title L2Staking contract + * @dev This contract is the L2 variant of the Staking contract. It adds a function + * to receive an indexer's stake or delegation from L1. Note that this contract inherits Staking, + * which uses a StakingExtension contract to implement the full IStaking interface through delegatecalls. + */ +contract L2Staking is Staking, IL2StakingBase { + using SafeMath for uint256; + using Stakes for Stakes.Indexer; + + /** + * @dev Emitted when `delegator` delegated `tokens` to the `indexer`, the delegator + * gets `shares` for the delegation pool proportionally to the tokens staked. + * This is copied from IStakingExtension, but we can't inherit from it because we + * don't implement the full interface here. + */ + event StakeDelegated( + address indexed indexer, + address indexed delegator, + uint256 tokens, + uint256 shares + ); + + /** + * @dev Checks that the sender is the L2GraphTokenGateway as configured on the Controller. + */ + modifier onlyL2Gateway() { + require(msg.sender == address(graphTokenGateway()), "ONLY_GATEWAY"); + _; + } + + /** + * @notice Receive ETH into the L2Staking contract: this will always revert + * @dev This function is only here to prevent ETH from being sent to the contract + */ + receive() external payable { + revert("RECEIVE_ETH_NOT_ALLOWED"); + } + + /** + * @notice Receive tokens with a callhook from the bridge. + * @dev The encoded _data can contain information about an indexer's stake + * or a delegator's delegation. + * See L1MessageCodes in IL2Staking for the supported messages. + * @param _from Token sender in L1 + * @param _amount Amount of tokens that were transferred + * @param _data ABI-encoded callhook data which must include a uint8 code and either a ReceiveIndexerStakeData or ReceiveDelegationData struct. + */ + function onTokenTransfer( + address _from, + uint256 _amount, + bytes calldata _data + ) external override notPartialPaused onlyL2Gateway { + require(_from == counterpartStakingAddress, "ONLY_L1_STAKING_THROUGH_BRIDGE"); + (uint8 code, bytes memory functionData) = abi.decode(_data, (uint8, bytes)); + + if (code == uint8(IL2Staking.L1MessageCodes.RECEIVE_INDEXER_STAKE_CODE)) { + IL2Staking.ReceiveIndexerStakeData memory indexerData = abi.decode( + functionData, + (IL2Staking.ReceiveIndexerStakeData) + ); + _receiveIndexerStake(_amount, indexerData); + } else if (code == uint8(IL2Staking.L1MessageCodes.RECEIVE_DELEGATION_CODE)) { + IL2Staking.ReceiveDelegationData memory delegationData = abi.decode( + functionData, + (IL2Staking.ReceiveDelegationData) + ); + _receiveDelegation(_amount, delegationData); + } else { + revert("INVALID_CODE"); + } + } + + /** + * @dev Receive an Indexer's stake from L1. + * The specified amount is added to the indexer's stake; the indexer's + * address is specified in the _indexerData struct. + * @param _amount Amount of tokens that were transferred + * @param _indexerData struct containing the indexer's address + */ + function _receiveIndexerStake( + uint256 _amount, + IL2Staking.ReceiveIndexerStakeData memory _indexerData + ) internal { + address indexer = _indexerData.indexer; + __stakes[indexer].deposit(_amount); + emit StakeDeposited(indexer, _amount); + } + + /** + * @dev Receive a Delegator's delegation from L1. + * The specified amount is added to the delegator's delegation; the delegator's + * address and the indexer's address are specified in the _delegationData struct. + * Note that no delegation tax is applied here. + * @param _amount Amount of tokens that were transferred + * @param _delegationData struct containing the delegator's address and the indexer's address + */ + function _receiveDelegation( + uint256 _amount, + IL2Staking.ReceiveDelegationData memory _delegationData + ) internal { + // Get the delegation pool of the indexer + DelegationPool storage pool = __delegationPools[_delegationData.indexer]; + Delegation storage delegation = pool.delegators[_delegationData.delegator]; + + // Calculate shares to issue (without applying any delegation tax) + uint256 shares = (pool.tokens == 0) ? _amount : _amount.mul(pool.shares).div(pool.tokens); + + // Update the delegation pool + pool.tokens = pool.tokens.add(_amount); + pool.shares = pool.shares.add(shares); + + // Update the individual delegation + delegation.shares = delegation.shares.add(shares); + + emit StakeDelegated(_delegationData.indexer, _delegationData.delegator, _amount, shares); + } +} diff --git a/contracts/staking/IL1GraphTokenLockMigrator.sol b/contracts/staking/IL1GraphTokenLockMigrator.sol new file mode 100644 index 000000000..4184417a4 --- /dev/null +++ b/contracts/staking/IL1GraphTokenLockMigrator.sol @@ -0,0 +1,30 @@ +// SPDX-License-Identifier: GPL-2.0-or-later + +pragma solidity >=0.6.12 <0.8.0; +pragma abicoder v2; + +/** + * @title Interface for the L1GraphTokenLockMigrator contract + * @dev This interface defines the function to get the migrated wallet address for a given L1 token lock wallet. + * The migrator contract is implemented in the token-distribution repo: https://github.com/graphprotocol/token-distribution/pull/64 + * and is only included here to provide support in L1Staking for the migration of stake and delegation + * owned by token lock contracts. See GIP-0046 for details: https://forum.thegraph.com/t/gip-0046-l2-migration-helpers/4023 + */ +interface IL1GraphTokenLockMigrator { + /** + * @notice Pulls ETH from an L1 wallet's account to use for L2 ticket gas. + * @dev This function is only callable by the staking contract. + * @param _l1Wallet Address of the L1 token lock wallet + * @param _amount Amount of ETH to pull from the migrator contract + */ + function pullETH(address _l1Wallet, uint256 _amount) external; + + /** + * @notice Get the L2 token lock wallet address for a given L1 token lock wallet + * @dev In the actual L1GraphTokenLockMigrator contract, this is simply the default getter for a public mapping variable. + * @param _l1Wallet Address of the L1 token lock wallet + * @return Address of the L2 token lock wallet if the wallet has an L2 counterpart, or address zero if + * the wallet doesn't have an L2 counterpart (or is not known to be a token lock wallet). + */ + function migratedWalletAddress(address _l1Wallet) external view returns (address); +} diff --git a/contracts/staking/IL1Staking.sol b/contracts/staking/IL1Staking.sol new file mode 100644 index 000000000..929218ea5 --- /dev/null +++ b/contracts/staking/IL1Staking.sol @@ -0,0 +1,20 @@ +// SPDX-License-Identifier: GPL-2.0-or-later + +pragma solidity >=0.6.12 <0.8.0; +pragma abicoder v2; + +import { IStaking } from "./IStaking.sol"; +import { IL1StakingBase } from "./IL1StakingBase.sol"; + +/** + * @title Interface for the L1 Staking contract + * @notice This is the interface that should be used when interacting with the L1 Staking contract. + * It extends the IStaking interface with the functions that are specific to L1, adding the migration helpers + * to send stake and delegation to L2. + * @dev Note that L1Staking doesn't actually inherit this interface. This is because of + * the custom setup of the Staking contract where part of the functionality is implemented + * in a separate contract (StakingExtension) to which calls are delegated through the fallback function. + */ +interface IL1Staking is IStaking, IL1StakingBase { + // Nothing to see here +} diff --git a/contracts/staking/IL1StakingBase.sol b/contracts/staking/IL1StakingBase.sol new file mode 100644 index 000000000..3662acfe1 --- /dev/null +++ b/contracts/staking/IL1StakingBase.sol @@ -0,0 +1,149 @@ +// SPDX-License-Identifier: GPL-2.0-or-later + +pragma solidity >=0.6.12 <0.8.0; +pragma abicoder v2; + +import { IL1GraphTokenLockMigrator } from "./IL1GraphTokenLockMigrator.sol"; + +/** + * @title Base interface for the L1Staking contract. + * @notice This interface is used to define the migration helpers that are implemented in L1Staking. + * @dev Note it includes only the L1-specific functionality, not the full IStaking interface. + */ +interface IL1StakingBase { + /// @dev Emitted when an indexer migrates their stake to L2. + /// This can happen several times as indexers can migrate partial stake. + event IndexerMigratedToL2( + address indexed indexer, + address indexed l2Indexer, + uint256 migratedStakeTokens + ); + + /// @dev Emitted when a delegator migrates their delegation to L2 + event DelegationMigratedToL2( + address indexed delegator, + address indexed l2Delegator, + address indexed indexer, + address l2Indexer, + uint256 migratedDelegationTokens + ); + + /// @dev Emitted when the L1GraphTokenLockMigrator is set + event L1GraphTokenLockMigratorSet(address l1GraphTokenLockMigrator); + + /// @dev Emitted when a delegator unlocks their tokens ahead of time because the indexer has migrated + event StakeDelegatedUnlockedDueToMigration(address indexed indexer, address indexed delegator); + + /** + * @notice Set the L1GraphTokenLockMigrator contract address + * @dev This function can only be called by the governor. + * @param _l1GraphTokenLockMigrator Address of the L1GraphTokenLockMigrator contract + */ + function setL1GraphTokenLockMigrator(IL1GraphTokenLockMigrator _l1GraphTokenLockMigrator) + external; + + /** + * @notice Send an indexer's stake to L2. + * @dev This function can only be called by the indexer (not an operator). + * It will validate that the remaining stake is sufficient to cover all the allocated + * stake, so the indexer might have to close some allocations before migrating. + * It will also check that the indexer's stake is not locked for withdrawal. + * Since the indexer address might be an L1-only contract, the function takes a beneficiary + * address that will be the indexer's address in L2. + * The caller must provide an amount of ETH to use for the L2 retryable ticket, that + * must be at least `_maxSubmissionCost + _gasPriceBid * _maxGas`. + * @param _l2Beneficiary Address of the indexer in L2. If the indexer has previously migrated stake, this must match the previously-used value. + * @param _amount Amount of stake GRT to migrate to L2 + * @param _maxGas Max gas to use for the L2 retryable ticket + * @param _gasPriceBid Gas price bid for the L2 retryable ticket + * @param _maxSubmissionCost Max submission cost for the L2 retryable ticket + */ + function migrateStakeToL2( + address _l2Beneficiary, + uint256 _amount, + uint256 _maxGas, + uint256 _gasPriceBid, + uint256 _maxSubmissionCost + ) external payable; + + /** + * @notice Send an indexer's stake to L2, from a GraphTokenLockWallet vesting contract. + * @dev This function can only be called by the indexer (not an operator). + * It will validate that the remaining stake is sufficient to cover all the allocated + * stake, so the indexer might have to close some allocations before migrating. + * It will also check that the indexer's stake is not locked for withdrawal. + * The L2 beneficiary for the stake will be determined by calling the L1GraphTokenLockMigrator contract, + * so the caller must have previously migrated tokens through that first + * (see GIP-0046 for details: https://forum.thegraph.com/t/gip-0046-l2-migration-helpers/4023). + * The ETH for the L2 gas will be pulled from the L1GraphTokenLockMigrator, so the owner of + * the GraphTokenLockWallet must have previously deposited at least `_maxSubmissionCost + _gasPriceBid * _maxGas` + * ETH into the L1GraphTokenLockMigrator contract (using its depositETH function). + * @param _amount Amount of stake GRT to migrate to L2 + * @param _maxGas Max gas to use for the L2 retryable ticket + * @param _gasPriceBid Gas price bid for the L2 retryable ticket + * @param _maxSubmissionCost Max submission cost for the L2 retryable ticket + */ + function migrateLockedStakeToL2( + uint256 _amount, + uint256 _maxGas, + uint256 _gasPriceBid, + uint256 _maxSubmissionCost + ) external; + + /** + * @notice Send a delegator's delegated tokens to L2 + * @dev This function can only be called by the delegator. + * This function will validate that the indexer has migrated their stake using migrateStakeToL2, + * and that the delegation is not locked for undelegation. + * Since the delegator's address might be an L1-only contract, the function takes a beneficiary + * address that will be the delegator's address in L2. + * The caller must provide an amount of ETH to use for the L2 retryable ticket, that + * must be at least `_maxSubmissionCost + _gasPriceBid * _maxGas`. + * @param _indexer Address of the indexer (in L1, before migrating) + * @param _l2Beneficiary Address of the delegator in L2 + * @param _maxGas Max gas to use for the L2 retryable ticket + * @param _gasPriceBid Gas price bid for the L2 retryable ticket + * @param _maxSubmissionCost Max submission cost for the L2 retryable ticket + */ + function migrateDelegationToL2( + address _indexer, + address _l2Beneficiary, + uint256 _maxGas, + uint256 _gasPriceBid, + uint256 _maxSubmissionCost + ) external payable; + + /** + * @notice Send a delegator's delegated tokens to L2, for a GraphTokenLockWallet vesting contract + * @dev This function can only be called by the delegator. + * This function will validate that the indexer has migrated their stake using migrateStakeToL2, + * and that the delegation is not locked for undelegation. + * The L2 beneficiary for the delegation will be determined by calling the L1GraphTokenLockMigrator contract, + * so the caller must have previously migrated tokens through that first + * (see GIP-0046 for details: https://forum.thegraph.com/t/gip-0046-l2-migration-helpers/4023). + * The ETH for the L2 gas will be pulled from the L1GraphTokenLockMigrator, so the owner of + * the GraphTokenLockWallet must have previously deposited at least `_maxSubmissionCost + _gasPriceBid * _maxGas` + * ETH into the L1GraphTokenLockMigrator contract (using its depositETH function). + * @param _indexer Address of the indexer (in L1, before migrating) + * @param _maxGas Max gas to use for the L2 retryable ticket + * @param _gasPriceBid Gas price bid for the L2 retryable ticket + * @param _maxSubmissionCost Max submission cost for the L2 retryable ticket + */ + function migrateLockedDelegationToL2( + address _indexer, + uint256 _maxGas, + uint256 _gasPriceBid, + uint256 _maxSubmissionCost + ) external; + + /** + * @notice Unlock a delegator's delegated tokens, if the indexer has migrated + * @dev This function can only be called by the delegator. + * This function will validate that the indexer has migrated their stake using migrateStakeToL2, + * and that the indexer has no remaining stake in L1. + * The tokens must previously be locked for undelegation by calling `undelegate()`, + * and can be withdrawn with `withdrawDelegated()` immediately after calling this. + * @param _indexer Address of the indexer (in L1, before migrating) + */ + function unlockDelegationToMigratedIndexer(address _indexer) external; +} diff --git a/contracts/staking/IStaking.sol b/contracts/staking/IStaking.sol index 86942e4d1..53ec76646 100644 --- a/contracts/staking/IStaking.sol +++ b/contracts/staking/IStaking.sol @@ -3,158 +3,21 @@ pragma solidity >=0.6.12 <0.8.0; pragma abicoder v2; -import "./IStakingData.sol"; - -interface IStaking is IStakingData { - // -- Allocation Data -- - - /** - * @dev Possible states an allocation can be - * States: - * - Null = indexer == address(0) - * - Active = not Null && tokens > 0 - * - Closed = Active && closedAtEpoch != 0 - * - Finalized = Closed && closedAtEpoch + channelDisputeEpochs > now() - * - Claimed = not Null && tokens == 0 - */ - enum AllocationState { - Null, - Active, - Closed, - Finalized, - Claimed - } - - // -- Configuration -- - - function setMinimumIndexerStake(uint256 _minimumIndexerStake) external; - - function setThawingPeriod(uint32 _thawingPeriod) external; - - function setCurationPercentage(uint32 _percentage) external; - - function setProtocolPercentage(uint32 _percentage) external; - - function setChannelDisputeEpochs(uint32 _channelDisputeEpochs) external; - - function setMaxAllocationEpochs(uint32 _maxAllocationEpochs) external; - - function setRebateRatio(uint32 _alphaNumerator, uint32 _alphaDenominator) external; - - function setDelegationRatio(uint32 _delegationRatio) external; - - function setDelegationParameters( - uint32 _indexingRewardCut, - uint32 _queryFeeCut, - uint32 _cooldownBlocks - ) external; - - function setDelegationParametersCooldown(uint32 _blocks) external; - - function setDelegationUnbondingPeriod(uint32 _delegationUnbondingPeriod) external; - - function setDelegationTaxPercentage(uint32 _percentage) external; - - function setSlasher(address _slasher, bool _allowed) external; - - function setAssetHolder(address _assetHolder, bool _allowed) external; - - // -- Operation -- - - function setOperator(address _operator, bool _allowed) external; - - function isOperator(address _operator, address _indexer) external view returns (bool); - - // -- Staking -- - - function stake(uint256 _tokens) external; - - function stakeTo(address _indexer, uint256 _tokens) external; - - function unstake(uint256 _tokens) external; - - function slash( - address _indexer, - uint256 _tokens, - uint256 _reward, - address _beneficiary - ) external; - - function withdraw() external; - - function setRewardsDestination(address _destination) external; - - // -- Delegation -- - - function delegate(address _indexer, uint256 _tokens) external returns (uint256); - - function undelegate(address _indexer, uint256 _shares) external returns (uint256); - - function withdrawDelegated(address _indexer, address _newIndexer) external returns (uint256); - - // -- Channel management and allocations -- - - function allocate( - bytes32 _subgraphDeploymentID, - uint256 _tokens, - address _allocationID, - bytes32 _metadata, - bytes calldata _proof - ) external; - - function allocateFrom( - address _indexer, - bytes32 _subgraphDeploymentID, - uint256 _tokens, - address _allocationID, - bytes32 _metadata, - bytes calldata _proof - ) external; - - function closeAllocation(address _allocationID, bytes32 _poi) external; - - function closeAllocationMany(CloseAllocationRequest[] calldata _requests) external; - - function closeAndAllocate( - address _oldAllocationID, - bytes32 _poi, - address _indexer, - bytes32 _subgraphDeploymentID, - uint256 _tokens, - address _allocationID, - bytes32 _metadata, - bytes calldata _proof - ) external; - - function collect(uint256 _tokens, address _allocationID) external; - - function claim(address _allocationID, bool _restake) external; - - function claimMany(address[] calldata _allocationID, bool _restake) external; - - // -- Getters and calculations -- - - function hasStake(address _indexer) external view returns (bool); - - function getIndexerStakedTokens(address _indexer) external view returns (uint256); - - function getIndexerCapacity(address _indexer) external view returns (uint256); - - function getAllocation(address _allocationID) external view returns (Allocation memory); - - function getAllocationState(address _allocationID) external view returns (AllocationState); - - function isAllocation(address _allocationID) external view returns (bool); - - function getSubgraphAllocatedTokens(bytes32 _subgraphDeploymentID) - external - view - returns (uint256); - - function getDelegation(address _indexer, address _delegator) - external - view - returns (Delegation memory); - - function isDelegator(address _indexer, address _delegator) external view returns (bool); +import { IStakingBase } from "./IStakingBase.sol"; +import { IStakingExtension } from "./IStakingExtension.sol"; +import { Stakes } from "./libs/Stakes.sol"; +import { IStakingData } from "./IStakingData.sol"; +import { Rebates } from "./libs/Rebates.sol"; +import { IMulticall } from "../base/IMulticall.sol"; +import { IManaged } from "../governance/IManaged.sol"; + +/** + * @title Interface for the Staking contract + * @notice This is the interface that should be used when interacting with the Staking contract. + * @dev Note that Staking doesn't actually inherit this interface. This is because of + * the custom setup of the Staking contract where part of the functionality is implemented + * in a separate contract (StakingExtension) to which calls are delegated through the fallback function. + */ +interface IStaking is IStakingBase, IStakingExtension, IMulticall, IManaged { + // Nothing to see here } diff --git a/contracts/staking/IStakingBase.sol b/contracts/staking/IStakingBase.sol new file mode 100644 index 000000000..406d29839 --- /dev/null +++ b/contracts/staking/IStakingBase.sol @@ -0,0 +1,437 @@ +// SPDX-License-Identifier: GPL-2.0-or-later + +pragma solidity >=0.6.12 <0.8.0; +pragma abicoder v2; + +import { IStakingData } from "./IStakingData.sol"; + +/** + * @title Base interface for the Staking contract. + * @dev This interface includes only what's implemented in the base Staking contract. + * It does not include the L1 and L2 specific functionality. It also does not include + * several functions that are implemented in the StakingExtension contract, and are called + * via delegatecall through the fallback function. See IStaking.sol for an interface + * that includes the full functionality. + */ +interface IStakingBase is IStakingData { + /** + * @dev Emitted when `indexer` stakes `tokens` amount. + */ + event StakeDeposited(address indexed indexer, uint256 tokens); + + /** + * @dev Emitted when `indexer` unstaked and locked `tokens` amount until `until` block. + */ + event StakeLocked(address indexed indexer, uint256 tokens, uint256 until); + + /** + * @dev Emitted when `indexer` withdrew `tokens` staked. + */ + event StakeWithdrawn(address indexed indexer, uint256 tokens); + + /** + * @dev Emitted when `indexer` allocated `tokens` amount to `subgraphDeploymentID` + * during `epoch`. + * `allocationID` indexer derived address used to identify the allocation. + * `metadata` additional information related to the allocation. + */ + event AllocationCreated( + address indexed indexer, + bytes32 indexed subgraphDeploymentID, + uint256 epoch, + uint256 tokens, + address indexed allocationID, + bytes32 metadata + ); + + /** + * @dev Emitted when `indexer` collected `tokens` amount in `epoch` for `allocationID`. + * These funds are related to `subgraphDeploymentID`. + * The `from` value is the sender of the collected funds. + */ + event AllocationCollected( + address indexed indexer, + bytes32 indexed subgraphDeploymentID, + uint256 epoch, + uint256 tokens, + address indexed allocationID, + address from, + uint256 curationFees, + uint256 rebateFees + ); + + /** + * @dev Emitted when `indexer` close an allocation in `epoch` for `allocationID`. + * An amount of `tokens` get unallocated from `subgraphDeploymentID`. + * The `effectiveAllocation` are the tokens allocated from creation to closing. + * This event also emits the POI (proof of indexing) submitted by the indexer. + * `isPublic` is true if the sender was someone other than the indexer. + */ + event AllocationClosed( + address indexed indexer, + bytes32 indexed subgraphDeploymentID, + uint256 epoch, + uint256 tokens, + address indexed allocationID, + uint256 effectiveAllocation, + address sender, + bytes32 poi, + bool isPublic + ); + + /** + * @dev Emitted when `indexer` claimed a rebate on `subgraphDeploymentID` during `epoch` + * related to the `forEpoch` rebate pool. + * The rebate is for `tokens` amount and `unclaimedAllocationsCount` are left for claim + * in the rebate pool. `delegationFees` collected and sent to delegation pool. + */ + event RebateClaimed( + address indexed indexer, + bytes32 indexed subgraphDeploymentID, + address indexed allocationID, + uint256 epoch, + uint256 forEpoch, + uint256 tokens, + uint256 unclaimedAllocationsCount, + uint256 delegationFees + ); + + /** + * @dev Emitted when `indexer` update the delegation parameters for its delegation pool. + */ + event DelegationParametersUpdated( + address indexed indexer, + uint32 indexingRewardCut, + uint32 queryFeeCut, + uint32 cooldownBlocks + ); + + /** + * @dev Emitted when `caller` set `assetHolder` address as `allowed` to send funds + * to staking contract. + */ + event AssetHolderUpdate(address indexed caller, address indexed assetHolder, bool allowed); + + /** + * @dev Emitted when `indexer` set `operator` access. + */ + event SetOperator(address indexed indexer, address indexed operator, bool allowed); + + /** + * @dev Emitted when `indexer` set an address to receive rewards. + */ + event SetRewardsDestination(address indexed indexer, address indexed destination); + + /** + * @dev Emitted when `extensionImpl` was set as the address of the StakingExtension contract + * to which extended functionality is delegated. + */ + event ExtensionImplementationSet(address extensionImpl); + + /** + * @dev Possible states an allocation can be. + * States: + * - Null = indexer == address(0) + * - Active = not Null && tokens > 0 + * - Closed = Active && closedAtEpoch != 0 + * - Finalized = Closed && closedAtEpoch + channelDisputeEpochs > now() + * - Claimed = not Null && tokens == 0 + */ + enum AllocationState { + Null, + Active, + Closed, + Finalized, + Claimed + } + + /** + * @notice Initialize this contract. + * @param _controller Address of the controller that manages this contract + * @param _minimumIndexerStake Minimum amount of tokens that an indexer must stake + * @param _thawingPeriod Number of epochs that tokens get locked after unstaking + * @param _protocolPercentage Percentage of query fees that are burned as protocol fee (in PPM) + * @param _curationPercentage Percentage of query fees that are given to curators (in PPM) + * @param _channelDisputeEpochs The period in epochs that needs to pass before fees in rebate pool can be claimed + * @param _maxAllocationEpochs The maximum number of epochs that an allocation can be active + * @param _delegationUnbondingPeriod The period in epochs that tokens get locked after undelegating + * @param _delegationRatio The ratio between an indexer's own stake and the delegation they can use + * @param _rebateAlphaNumerator The numerator of the alpha factor used to calculate the rebate + * @param _rebateAlphaDenominator The denominator of the alpha factor used to calculate the rebate + * @param _extensionImpl Address of the StakingExtension implementation + */ + function initialize( + address _controller, + uint256 _minimumIndexerStake, + uint32 _thawingPeriod, + uint32 _protocolPercentage, + uint32 _curationPercentage, + uint32 _channelDisputeEpochs, + uint32 _maxAllocationEpochs, + uint32 _delegationUnbondingPeriod, + uint32 _delegationRatio, + uint32 _rebateAlphaNumerator, + uint32 _rebateAlphaDenominator, + address _extensionImpl + ) external; + + /** + * @notice Set the address of the StakingExtension implementation. + * @dev This function can only be called by the governor. + * @param _extensionImpl Address of the StakingExtension implementation + */ + function setExtensionImpl(address _extensionImpl) external; + + /** + * @notice Set the address of the counterpart (L1 or L2) staking contract. + * @dev This function can only be called by the governor. + * @param _counterpart Address of the counterpart staking contract in the other chain, without any aliasing. + */ + function setCounterpartStakingAddress(address _counterpart) external; + + /** + * @notice Set the minimum stake needed to be an Indexer + * @dev This function can only be called by the governor. + * @param _minimumIndexerStake Minimum amount of tokens that an indexer must stake + */ + function setMinimumIndexerStake(uint256 _minimumIndexerStake) external; + + /** + * @notice Set the number of blocks that tokens get locked after unstaking + * @dev This function can only be called by the governor. + * @param _thawingPeriod Number of blocks that tokens get locked after unstaking + */ + function setThawingPeriod(uint32 _thawingPeriod) external; + + /** + * @notice Set the curation percentage of query fees sent to curators. + * @dev This function can only be called by the governor. + * @param _percentage Percentage of query fees sent to curators + */ + function setCurationPercentage(uint32 _percentage) external; + + /** + * @notice Set a protocol percentage to burn when collecting query fees. + * @dev This function can only be called by the governor. + * @param _percentage Percentage of query fees to burn as protocol fee + */ + function setProtocolPercentage(uint32 _percentage) external; + + /** + * @notice Set the period in epochs that need to pass before fees in rebate pool can be claimed. + * @dev This function can only be called by the governor. + * @param _channelDisputeEpochs Period in epochs + */ + function setChannelDisputeEpochs(uint32 _channelDisputeEpochs) external; + + /** + * @notice Set the max time allowed for indexers to allocate on a subgraph + * before others are allowed to close the allocation. + * @dev This function can only be called by the governor. + * @param _maxAllocationEpochs Allocation duration limit in epochs + */ + function setMaxAllocationEpochs(uint32 _maxAllocationEpochs) external; + + /** + * @notice Set the rebate ratio (fees to allocated stake). + * @dev This function can only be called by the governor. + * @param _alphaNumerator Numerator of `alpha` in the cobb-douglas function + * @param _alphaDenominator Denominator of `alpha` in the cobb-douglas function + */ + function setRebateRatio(uint32 _alphaNumerator, uint32 _alphaDenominator) external; + + /** + * @notice Set an address as allowed asset holder. + * @dev This function can only be called by the governor. + * @param _assetHolder Address of allowed source for state channel funds + * @param _allowed True if asset holder is allowed + */ + function setAssetHolder(address _assetHolder, bool _allowed) external; + + /** + * @notice Authorize or unauthorize an address to be an operator for the caller. + * @param _operator Address to authorize or unauthorize + * @param _allowed Whether the operator is authorized or not + */ + function setOperator(address _operator, bool _allowed) external; + + /** + * @notice Deposit tokens on the indexer's stake. + * The amount staked must be over the minimumIndexerStake. + * @param _tokens Amount of tokens to stake + */ + function stake(uint256 _tokens) external; + + /** + * @notice Deposit tokens on the Indexer stake, on behalf of the Indexer. + * The amount staked must be over the minimumIndexerStake. + * @param _indexer Address of the indexer + * @param _tokens Amount of tokens to stake + */ + function stakeTo(address _indexer, uint256 _tokens) external; + + /** + * @notice Unstake tokens from the indexer stake, lock them until the thawing period expires. + * @dev NOTE: The function accepts an amount greater than the currently staked tokens. + * If that happens, it will try to unstake the max amount of tokens it can. + * The reason for this behaviour is to avoid time conditions while the transaction + * is in flight. + * @param _tokens Amount of tokens to unstake + */ + function unstake(uint256 _tokens) external; + + /** + * @notice Withdraw indexer tokens once the thawing period has passed. + */ + function withdraw() external; + + /** + * @notice Set the destination where to send rewards for an indexer. + * @param _destination Rewards destination address. If set to zero, rewards will be restaked + */ + function setRewardsDestination(address _destination) external; + + /** + * @notice Set the delegation parameters for the caller. + * @param _indexingRewardCut Percentage of indexing rewards left for the indexer + * @param _queryFeeCut Percentage of query fees left for the indexer + * @param _cooldownBlocks Period that need to pass to update delegation parameters + */ + function setDelegationParameters( + uint32 _indexingRewardCut, + uint32 _queryFeeCut, + uint32 _cooldownBlocks + ) external; + + /** + * @notice Allocate available tokens to a subgraph deployment. + * @param _subgraphDeploymentID ID of the SubgraphDeployment where tokens will be allocated + * @param _tokens Amount of tokens to allocate + * @param _allocationID The allocation identifier + * @param _metadata IPFS hash for additional information about the allocation + * @param _proof A 65-bytes Ethereum signed message of `keccak256(indexerAddress,allocationID)` + */ + function allocate( + bytes32 _subgraphDeploymentID, + uint256 _tokens, + address _allocationID, + bytes32 _metadata, + bytes calldata _proof + ) external; + + /** + * @notice Allocate available tokens to a subgraph deployment from and indexer's stake. + * The caller must be the indexer or the indexer's operator. + * @param _indexer Indexer address to allocate funds from. + * @param _subgraphDeploymentID ID of the SubgraphDeployment where tokens will be allocated + * @param _tokens Amount of tokens to allocate + * @param _allocationID The allocation identifier + * @param _metadata IPFS hash for additional information about the allocation + * @param _proof A 65-bytes Ethereum signed message of `keccak256(indexerAddress,allocationID)` + */ + function allocateFrom( + address _indexer, + bytes32 _subgraphDeploymentID, + uint256 _tokens, + address _allocationID, + bytes32 _metadata, + bytes calldata _proof + ) external; + + /** + * @notice Close an allocation and free the staked tokens. + * To be eligible for rewards a proof of indexing must be presented. + * Presenting a bad proof is subject to slashable condition. + * To opt out of rewards set _poi to 0x0 + * @param _allocationID The allocation identifier + * @param _poi Proof of indexing submitted for the allocated period + */ + function closeAllocation(address _allocationID, bytes32 _poi) external; + + /** + * @notice Collect query fees from state channels and assign them to an allocation. + * Funds received are only accepted from a valid sender. + * @dev To avoid reverting on the withdrawal from channel flow this function will: + * 1) Accept calls with zero tokens. + * 2) Accept calls after an allocation passed the dispute period, in that case, all + * the received tokens are burned. + * @param _tokens Amount of tokens to collect + * @param _allocationID Allocation where the tokens will be assigned + */ + function collect(uint256 _tokens, address _allocationID) external; + + /** + * @notice Claim tokens from the rebate pool. + * @param _allocationID Allocation from where we are claiming tokens + * @param _restake True if restake fees instead of transfer to indexer + */ + function claim(address _allocationID, bool _restake) external; + + /** + * @dev Claim tokens from the rebate pool for many allocations. + * @param _allocationID Array of allocations from where we are claiming tokens + * @param _restake True if restake fees instead of transfer to indexer + */ + function claimMany(address[] calldata _allocationID, bool _restake) external; + + /** + * @notice Return true if operator is allowed for indexer. + * @param _operator Address of the operator + * @param _indexer Address of the indexer + * @return True if operator is allowed for indexer, false otherwise + */ + function isOperator(address _operator, address _indexer) external view returns (bool); + + /** + * @notice Getter that returns if an indexer has any stake. + * @param _indexer Address of the indexer + * @return True if indexer has staked tokens + */ + function hasStake(address _indexer) external view returns (bool); + + /** + * @notice Get the total amount of tokens staked by the indexer. + * @param _indexer Address of the indexer + * @return Amount of tokens staked by the indexer + */ + function getIndexerStakedTokens(address _indexer) external view returns (uint256); + + /** + * @notice Get the total amount of tokens available to use in allocations. + * This considers the indexer stake and delegated tokens according to delegation ratio + * @param _indexer Address of the indexer + * @return Amount of tokens available to allocate including delegation + */ + function getIndexerCapacity(address _indexer) external view returns (uint256); + + /** + * @notice Return the allocation by ID. + * @param _allocationID Address used as allocation identifier + * @return Allocation data + */ + function getAllocation(address _allocationID) external view returns (Allocation memory); + + /** + * @notice Return the current state of an allocation + * @param _allocationID Allocation identifier + * @return AllocationState enum with the state of the allocation + */ + function getAllocationState(address _allocationID) external view returns (AllocationState); + + /** + * @notice Return if allocationID is used. + * @param _allocationID Address used as signer by the indexer for an allocation + * @return True if allocationID already used + */ + function isAllocation(address _allocationID) external view returns (bool); + + /** + * @notice Return the total amount of tokens allocated to subgraph. + * @param _subgraphDeploymentID Deployment ID for the subgraph + * @return Total tokens allocated to subgraph + */ + function getSubgraphAllocatedTokens(bytes32 _subgraphDeploymentID) + external + view + returns (uint256); +} diff --git a/contracts/staking/IStakingData.sol b/contracts/staking/IStakingData.sol index 348a5a7f9..6787bf76a 100644 --- a/contracts/staking/IStakingData.sol +++ b/contracts/staking/IStakingData.sol @@ -2,6 +2,10 @@ pragma solidity >=0.6.12 <0.8.0; +/** + * @title Staking Data interface + * @dev This interface defines some structures used by the Staking contract. + */ interface IStakingData { /** * @dev Allocate GRT tokens for the purpose of serving queries of a subgraph deployment @@ -19,7 +23,7 @@ interface IStakingData { } /** - * @dev Represents a request to close an allocation with a specific proof of indexing. + * @dev CloseAllocationRequest represents a request to close an allocation with a specific proof of indexing. * This is passed when calling closeAllocationMany to define the closing parameters for * each allocation. */ diff --git a/contracts/staking/IStakingExtension.sol b/contracts/staking/IStakingExtension.sol new file mode 100644 index 000000000..e63f5c035 --- /dev/null +++ b/contracts/staking/IStakingExtension.sol @@ -0,0 +1,351 @@ +// SPDX-License-Identifier: GPL-2.0-or-later + +pragma solidity >=0.6.12 <0.8.0; +pragma abicoder v2; + +import { IStakingData } from "./IStakingData.sol"; +import { Rebates } from "./libs/Rebates.sol"; +import { Stakes } from "./libs/Stakes.sol"; + +/** + * @title Interface for the StakingExtension contract + * @dev This interface defines the events and functions implemented + * in the StakingExtension contract, which is used to extend the functionality + * of the Staking contract while keeping it within the 24kB mainnet size limit. + * In particular, this interface includes delegation functions and various storage + * getters. + */ +interface IStakingExtension is IStakingData { + /** + * @dev DelegationPool struct as returned by delegationPools(), since + * the original DelegationPool in IStakingData.sol contains a nested mapping. + */ + struct DelegationPoolReturn { + uint32 cooldownBlocks; // Blocks to wait before updating parameters + uint32 indexingRewardCut; // in PPM + uint32 queryFeeCut; // in PPM + uint256 updatedAtBlock; // Block when the pool was last updated + uint256 tokens; // Total tokens as pool reserves + uint256 shares; // Total shares minted in the pool + } + + /** + * @dev Emitted when `delegator` delegated `tokens` to the `indexer`, the delegator + * gets `shares` for the delegation pool proportionally to the tokens staked. + */ + event StakeDelegated( + address indexed indexer, + address indexed delegator, + uint256 tokens, + uint256 shares + ); + + /** + * @dev Emitted when `delegator` undelegated `tokens` from `indexer`. + * Tokens get locked for withdrawal after a period of time. + */ + event StakeDelegatedLocked( + address indexed indexer, + address indexed delegator, + uint256 tokens, + uint256 shares, + uint256 until + ); + + /** + * @dev Emitted when `delegator` withdrew delegated `tokens` from `indexer`. + */ + event StakeDelegatedWithdrawn( + address indexed indexer, + address indexed delegator, + uint256 tokens + ); + + /** + * @dev Emitted when `indexer` was slashed for a total of `tokens` amount. + * Tracks `reward` amount of tokens given to `beneficiary`. + */ + event StakeSlashed( + address indexed indexer, + uint256 tokens, + uint256 reward, + address beneficiary + ); + + /** + * @dev Emitted when `caller` set `slasher` address as `allowed` to slash stakes. + */ + event SlasherUpdate(address indexed caller, address indexed slasher, bool allowed); + + /** + * @notice Set the delegation ratio. + * If set to 10 it means the indexer can use up to 10x the indexer staked amount + * from their delegated tokens + * @dev This function is only callable by the governor + * @param _delegationRatio Delegation capacity multiplier + */ + function setDelegationRatio(uint32 _delegationRatio) external; + + /** + * @notice Set the minimum time in blocks an indexer needs to wait to change delegation parameters. + * Indexers can set a custom amount time for their own cooldown, but it must be greater than this. + * @dev This function is only callable by the governor + * @param _blocks Number of blocks to set the delegation parameters cooldown period + */ + function setDelegationParametersCooldown(uint32 _blocks) external; + + /** + * @notice Set the time, in epochs, a Delegator needs to wait to withdraw tokens after undelegating. + * @dev This function is only callable by the governor + * @param _delegationUnbondingPeriod Period in epochs to wait for token withdrawals after undelegating + */ + function setDelegationUnbondingPeriod(uint32 _delegationUnbondingPeriod) external; + + /** + * @notice Set a delegation tax percentage to burn when delegated funds are deposited. + * @dev This function is only callable by the governor + * @param _percentage Percentage of delegated tokens to burn as delegation tax, expressed in parts per million + */ + function setDelegationTaxPercentage(uint32 _percentage) external; + + /** + * @notice Set or unset an address as allowed slasher. + * @dev This function can only be called by the governor. + * @param _slasher Address of the party allowed to slash indexers + * @param _allowed True if slasher is allowed + */ + function setSlasher(address _slasher, bool _allowed) external; + + /** + * @notice Delegate tokens to an indexer. + * @param _indexer Address of the indexer to which tokens are delegated + * @param _tokens Amount of tokens to delegate + * @return Amount of shares issued from the delegation pool + */ + function delegate(address _indexer, uint256 _tokens) external returns (uint256); + + /** + * @notice Undelegate tokens from an indexer. Tokens will be locked for the unbonding period. + * @param _indexer Address of the indexer to which tokens had been delegated + * @param _shares Amount of shares to return and undelegate tokens + * @return Amount of tokens returned for the shares of the delegation pool + */ + function undelegate(address _indexer, uint256 _shares) external returns (uint256); + + /** + * @notice Withdraw undelegated tokens once the unbonding period has passed, and optionally + * re-delegate to a new indexer. + * @param _indexer Withdraw available tokens delegated to indexer + * @param _newIndexer Re-delegate to indexer address if non-zero, withdraw if zero address + */ + function withdrawDelegated(address _indexer, address _newIndexer) external returns (uint256); + + /** + * @notice Slash the indexer stake. Delegated tokens are not subject to slashing. + * @dev Can only be called by the slasher role. + * @param _indexer Address of indexer to slash + * @param _tokens Amount of tokens to slash from the indexer stake + * @param _reward Amount of reward tokens to send to a beneficiary + * @param _beneficiary Address of a beneficiary to receive a reward for the slashing + */ + function slash( + address _indexer, + uint256 _tokens, + uint256 _reward, + address _beneficiary + ) external; + + /** + * @notice Return the delegation from a delegator to an indexer. + * @param _indexer Address of the indexer where funds have been delegated + * @param _delegator Address of the delegator + * @return Delegation data + */ + function getDelegation(address _indexer, address _delegator) + external + view + returns (Delegation memory); + + /** + * @notice Return whether the delegator has delegated to the indexer. + * @param _indexer Address of the indexer where funds have been delegated + * @param _delegator Address of the delegator + * @return True if delegator has tokens delegated to the indexer + */ + function isDelegator(address _indexer, address _delegator) external view returns (bool); + + /** + * @notice Returns amount of delegated tokens ready to be withdrawn after unbonding period. + * @param _delegation Delegation of tokens from delegator to indexer + * @return Amount of tokens to withdraw + */ + function getWithdraweableDelegatedTokens(Delegation memory _delegation) + external + view + returns (uint256); + + /** + * @notice Getter for the delegationRatio, i.e. the delegation capacity multiplier: + * If delegation ratio is 100, and an Indexer has staked 5 GRT, + * then they can use up to 500 GRT from the delegated stake + * @return Delegation ratio + */ + function delegationRatio() external view returns (uint32); + + /** + * @notice Getter for delegationParametersCooldown: + * Minimum time in blocks an indexer needs to wait to change delegation parameters + * @return Delegation parameters cooldown in blocks + */ + function delegationParametersCooldown() external view returns (uint32); + + /** + * @notice Getter for delegationUnbondingPeriod: + * Time in epochs a delegator needs to wait to withdraw delegated stake + * @return Delegation unbonding period in epochs + */ + function delegationUnbondingPeriod() external view returns (uint32); + + /** + * @notice Getter for delegationTaxPercentage: + * Percentage of tokens to tax a delegation deposit, expressed in parts per million + * @return Delegation tax percentage in parts per million + */ + function delegationTaxPercentage() external view returns (uint32); + + /** + * @notice Getter for delegationPools[_indexer]: + * gets the delegation pool structure for a particular indexer. + * @param _indexer Address of the indexer for which to query the delegation pool + * @return Delegation pool as a DelegationPoolReturn struct + */ + function delegationPools(address _indexer) external view returns (DelegationPoolReturn memory); + + /** + * @notice Getter for operatorAuth[_indexer][_maybeOperator]: + * returns true if the operator is authorized to operate on behalf of the indexer. + * @param _indexer The indexer address for which to query authorization + * @param _maybeOperator The address that may or may not be an operator + * @return True if the operator is authorized to operate on behalf of the indexer + */ + function operatorAuth(address _indexer, address _maybeOperator) external view returns (bool); + + /** + * @notice Getter for rewardsDestination[_indexer]: + * returns the address where the indexer's rewards are sent. + * @param _indexer The indexer address for which to query the rewards destination + * @return The address where the indexer's rewards are sent, zero if none is set in which case rewards are re-staked + */ + function rewardsDestination(address _indexer) external view returns (address); + + /** + * @notice Getter for assetHolders[_maybeAssetHolder]: + * returns true if the address is an asset holder, i.e. an entity that can collect + * query fees into the Staking contract. + * @param _maybeAssetHolder The address that may or may not be an asset holder + * @return True if the address is an asset holder + */ + function assetHolders(address _maybeAssetHolder) external view returns (bool); + + /** + * @notice Getter for subgraphAllocations[_subgraphDeploymentId]: + * returns the amount of tokens allocated to a subgraph deployment. + * @param _subgraphDeploymentId The subgraph deployment for which to query the allocations + * @return The amount of tokens allocated to the subgraph deployment + */ + function subgraphAllocations(bytes32 _subgraphDeploymentId) external view returns (uint256); + + /** + * @notice Getter for rebates[_epoch]: + * gets the rebate pool for a particular epoch. + * @param _epoch Epoch for which to query the rebate pool + * @return Rebate pool for the specified epoch, as a Rebates.Pool struct + */ + function rebates(uint256 _epoch) external view returns (Rebates.Pool memory); + + /** + * @notice Getter for slashers[_maybeSlasher]: + * returns true if the address is a slasher, i.e. an entity that can slash indexers + * @param _maybeSlasher Address for which to check the slasher role + * @return True if the address is a slasher + */ + function slashers(address _maybeSlasher) external view returns (bool); + + /** + * @notice Getter for minimumIndexerStake: the minimum + * amount of GRT that an indexer needs to stake. + * @return Minimum indexer stake in GRT + */ + function minimumIndexerStake() external view returns (uint256); + + /** + * @notice Getter for thawingPeriod: the time in blocks an + * indexer needs to wait to unstake tokens. + * @return Thawing period in blocks + */ + function thawingPeriod() external view returns (uint32); + + /** + * @notice Getter for curationPercentage: the percentage of + * query fees that are distributed to curators. + * @return Curation percentage in parts per million + */ + function curationPercentage() external view returns (uint32); + + /** + * @notice Getter for protocolPercentage: the percentage of + * query fees that are burned as protocol fees. + * @return Protocol percentage in parts per million + */ + function protocolPercentage() external view returns (uint32); + + /** + * @notice Getter for channelDisputeEpochs: the time in epochs + * between closing an allocation and the moment it becomes finalized so + * query fees can be claimed. + * @return Channel dispute period in epochs + */ + function channelDisputeEpochs() external view returns (uint32); + + /** + * @notice Getter for maxAllocationEpochs: the maximum time in epochs + * that an allocation can be open before anyone is allowed to close it. This + * also caps the effective allocation when sending the allocation's query fees + * to the rebate pool. + * @return Maximum allocation period in epochs + */ + function maxAllocationEpochs() external view returns (uint32); + + /** + * @notice Getter for alphaNumerator: the numerator of the Cobb-Douglas + * rebate ratio. + * @return Rebate ratio numerator + */ + function alphaNumerator() external view returns (uint32); + + /** + * @notice Getter for alphaDenominator: the denominator of the Cobb-Douglas + * rebate ratio. + * @return Rebate ratio denominator + */ + function alphaDenominator() external view returns (uint32); + + /** + * @notice Getter for stakes[_indexer]: + * gets the stake information for an indexer as a Stakes.Indexer struct. + * @param _indexer Indexer address for which to query the stake information + * @return Stake information for the specified indexer, as a Stakes.Indexer struct + */ + function stakes(address _indexer) external view returns (Stakes.Indexer memory); + + /** + * @notice Getter for allocations[_allocationID]: + * gets an allocation's information as an IStakingData.Allocation struct. + * @param _allocationID Allocation ID for which to query the allocation information + * @return The specified allocation, as an IStakingData.Allocation struct + */ + function allocations(address _allocationID) + external + view + returns (IStakingData.Allocation memory); +} diff --git a/contracts/staking/L1Staking.sol b/contracts/staking/L1Staking.sol new file mode 100644 index 000000000..e1ec82e95 --- /dev/null +++ b/contracts/staking/L1Staking.sol @@ -0,0 +1,405 @@ +// SPDX-License-Identifier: GPL-2.0-or-later + +pragma solidity ^0.7.6; +pragma abicoder v2; + +import { SafeMath } from "@openzeppelin/contracts/math/SafeMath.sol"; + +import { ITokenGateway } from "../arbitrum/ITokenGateway.sol"; +import { Staking } from "./Staking.sol"; +import { Stakes } from "./libs/Stakes.sol"; +import { IStakingData } from "./IStakingData.sol"; +import { IL2Staking } from "../l2/staking/IL2Staking.sol"; +import { L1StakingV1Storage } from "./L1StakingStorage.sol"; +import { IGraphToken } from "../token/IGraphToken.sol"; +import { IL1StakingBase } from "./IL1StakingBase.sol"; +import { MathUtils } from "./libs/MathUtils.sol"; +import { IL1GraphTokenLockMigrator } from "./IL1GraphTokenLockMigrator.sol"; + +/** + * @title L1Staking contract + * @dev This contract is the L1 variant of the Staking contract. It adds functions + * to send an indexer's stake to L2, and to send delegation to L2 as well. + */ +contract L1Staking is Staking, L1StakingV1Storage, IL1StakingBase { + using Stakes for Stakes.Indexer; + using SafeMath for uint256; + + /** + * @notice Receive ETH into the Staking contract + * @dev Only the L1GraphTokenLockMigrator can send ETH, as part of the + * migration of stake/delegation for vesting lock wallets. + */ + receive() external payable { + require(msg.sender == address(l1GraphTokenLockMigrator), "Only migrator can send ETH"); + } + + /** + * @notice Set the L1GraphTokenLockMigrator contract address + * @dev This function can only be called by the governor. + * @param _l1GraphTokenLockMigrator Address of the L1GraphTokenLockMigrator contract + */ + function setL1GraphTokenLockMigrator(IL1GraphTokenLockMigrator _l1GraphTokenLockMigrator) + external + override + onlyGovernor + { + l1GraphTokenLockMigrator = _l1GraphTokenLockMigrator; + emit L1GraphTokenLockMigratorSet(address(_l1GraphTokenLockMigrator)); + } + + /** + * @notice Send an indexer's stake to L2. + * @dev This function can only be called by the indexer (not an operator). + * It will validate that the remaining stake is sufficient to cover all the allocated + * stake, so the indexer might have to close some allocations before migrating. + * It will also check that the indexer's stake is not locked for withdrawal. + * Since the indexer address might be an L1-only contract, the function takes a beneficiary + * address that will be the indexer's address in L2. + * The caller must provide an amount of ETH to use for the L2 retryable ticket, that + * must be at least `_maxSubmissionCost + _gasPriceBid * _maxGas`. + * @param _l2Beneficiary Address of the indexer in L2. If the indexer has previously migrated stake, this must match the previously-used value. + * @param _amount Amount of stake GRT to migrate to L2 + * @param _maxGas Max gas to use for the L2 retryable ticket + * @param _gasPriceBid Gas price bid for the L2 retryable ticket + * @param _maxSubmissionCost Max submission cost for the L2 retryable ticket + */ + function migrateStakeToL2( + address _l2Beneficiary, + uint256 _amount, + uint256 _maxGas, + uint256 _gasPriceBid, + uint256 _maxSubmissionCost + ) external payable override { + _migrateStakeToL2( + msg.sender, + _l2Beneficiary, + _amount, + _maxGas, + _gasPriceBid, + _maxSubmissionCost, + msg.value + ); + } + + /** + * @notice Send an indexer's stake to L2, from a GraphTokenLockWallet vesting contract. + * @dev This function can only be called by the indexer (not an operator). + * It will validate that the remaining stake is sufficient to cover all the allocated + * stake, so the indexer might have to close some allocations before migrating. + * It will also check that the indexer's stake is not locked for withdrawal. + * The L2 beneficiary for the stake will be determined by calling the L1GraphTokenLockMigrator contract, + * so the caller must have previously migrated tokens through that first + * (see GIP-0046 for details: https://forum.thegraph.com/t/gip-0046-l2-migration-helpers/4023). + * The ETH for the L2 gas will be pulled from the L1GraphTokenLockMigrator, so the owner of + * the GraphTokenLockWallet must have previously deposited at least `_maxSubmissionCost + _gasPriceBid * _maxGas` + * ETH into the L1GraphTokenLockMigrator contract (using its depositETH function). + * @param _amount Amount of stake GRT to migrate to L2 + * @param _maxGas Max gas to use for the L2 retryable ticket + * @param _gasPriceBid Gas price bid for the L2 retryable ticket + * @param _maxSubmissionCost Max submission cost for the L2 retryable ticket + */ + function migrateLockedStakeToL2( + uint256 _amount, + uint256 _maxGas, + uint256 _gasPriceBid, + uint256 _maxSubmissionCost + ) external override { + address l2Beneficiary = l1GraphTokenLockMigrator.migratedWalletAddress(msg.sender); + require(l2Beneficiary != address(0), "LOCK NOT MIGRATED"); + uint256 balance = address(this).balance; + uint256 ethAmount = _maxSubmissionCost.add(_maxGas.mul(_gasPriceBid)); + l1GraphTokenLockMigrator.pullETH(msg.sender, ethAmount); + require(address(this).balance == balance.add(ethAmount), "ETH TRANSFER FAILED"); + _migrateStakeToL2( + msg.sender, + l2Beneficiary, + _amount, + _maxGas, + _gasPriceBid, + _maxSubmissionCost, + ethAmount + ); + } + + /** + * @notice Send a delegator's delegated tokens to L2 + * @dev This function can only be called by the delegator. + * This function will validate that the indexer has migrated their stake using migrateStakeToL2, + * and that the delegation is not locked for undelegation. + * Since the delegator's address might be an L1-only contract, the function takes a beneficiary + * address that will be the delegator's address in L2. + * The caller must provide an amount of ETH to use for the L2 retryable ticket, that + * must be at least `_maxSubmissionCost + _gasPriceBid * _maxGas`. + * @param _indexer Address of the indexer (in L1, before migrating) + * @param _l2Beneficiary Address of the delegator in L2 + * @param _maxGas Max gas to use for the L2 retryable ticket + * @param _gasPriceBid Gas price bid for the L2 retryable ticket + * @param _maxSubmissionCost Max submission cost for the L2 retryable ticket + */ + function migrateDelegationToL2( + address _indexer, + address _l2Beneficiary, + uint256 _maxGas, + uint256 _gasPriceBid, + uint256 _maxSubmissionCost + ) external payable override { + _migrateDelegationToL2( + msg.sender, + _indexer, + _l2Beneficiary, + _maxGas, + _gasPriceBid, + _maxSubmissionCost, + msg.value + ); + } + + /** + * @notice Send a delegator's delegated tokens to L2, for a GraphTokenLockWallet vesting contract + * @dev This function can only be called by the delegator. + * This function will validate that the indexer has migrated their stake using migrateStakeToL2, + * and that the delegation is not locked for undelegation. + * The L2 beneficiary for the delegation will be determined by calling the L1GraphTokenLockMigrator contract, + * so the caller must have previously migrated tokens through that first + * (see GIP-0046 for details: https://forum.thegraph.com/t/gip-0046-l2-migration-helpers/4023). + * The ETH for the L2 gas will be pulled from the L1GraphTokenLockMigrator, so the owner of + * the GraphTokenLockWallet must have previously deposited at least `_maxSubmissionCost + _gasPriceBid * _maxGas` + * ETH into the L1GraphTokenLockMigrator contract (using its depositETH function). + * @param _indexer Address of the indexer (in L1, before migrating) + * @param _maxGas Max gas to use for the L2 retryable ticket + * @param _gasPriceBid Gas price bid for the L2 retryable ticket + * @param _maxSubmissionCost Max submission cost for the L2 retryable ticket + */ + function migrateLockedDelegationToL2( + address _indexer, + uint256 _maxGas, + uint256 _gasPriceBid, + uint256 _maxSubmissionCost + ) external override { + address l2Beneficiary = l1GraphTokenLockMigrator.migratedWalletAddress(msg.sender); + require(l2Beneficiary != address(0), "LOCK NOT MIGRATED"); + uint256 balance = address(this).balance; + uint256 ethAmount = _maxSubmissionCost.add(_maxGas.mul(_gasPriceBid)); + l1GraphTokenLockMigrator.pullETH(msg.sender, ethAmount); + require(address(this).balance == balance.add(ethAmount), "ETH TRANSFER FAILED"); + _migrateDelegationToL2( + msg.sender, + _indexer, + l2Beneficiary, + _maxGas, + _gasPriceBid, + _maxSubmissionCost, + ethAmount + ); + } + + /** + * @notice Unlock a delegator's delegated tokens, if the indexer has migrated + * @dev This function can only be called by the delegator. + * This function will validate that the indexer has migrated their stake using migrateStakeToL2, + * and that the indexer has no remaining stake in L1. + * The tokens must previously be locked for undelegation by calling `undelegate()`, + * and can be withdrawn with `withdrawDelegated()` immediately after calling this. + * @param _indexer Address of the indexer (in L1, before migrating) + */ + function unlockDelegationToMigratedIndexer(address _indexer) external override { + require( + indexerMigratedToL2[_indexer] != address(0) && __stakes[_indexer].tokensStaked == 0, + "indexer not migrated" + ); + + Delegation storage delegation = __delegationPools[_indexer].delegators[msg.sender]; + require(delegation.tokensLockedUntil != 0, "! locked"); + + // Unlock the delegation + delegation.tokensLockedUntil = epochManager().currentEpoch(); + + // After this, the delegator should be able to withdraw in the current block + emit StakeDelegatedUnlockedDueToMigration(_indexer, msg.sender); + } + + /** + * @dev Implements sending an indexer's stake to L2. + * This function can only be called by the indexer (not an operator). + * It will validate that the remaining stake is sufficient to cover all the allocated + * stake, so the indexer might have to close some allocations before migrating. + * It will also check that the indexer's stake is not locked for withdrawal. + * Since the indexer address might be an L1-only contract, the function takes a beneficiary + * address that will be the indexer's address in L2. + * @param _l2Beneficiary Address of the indexer in L2. If the indexer has previously migrated stake, this must match the previously-used value. + * @param _amount Amount of stake GRT to migrate to L2 + * @param _maxGas Max gas to use for the L2 retryable ticket + * @param _gasPriceBid Gas price bid for the L2 retryable ticket + * @param _maxSubmissionCost Max submission cost for the L2 retryable ticket + * @param _ethAmount Amount of ETH to send with the retryable ticket + */ + function _migrateStakeToL2( + address _indexer, + address _l2Beneficiary, + uint256 _amount, + uint256 _maxGas, + uint256 _gasPriceBid, + uint256 _maxSubmissionCost, + uint256 _ethAmount + ) internal { + Stakes.Indexer storage indexerStake = __stakes[_indexer]; + require(indexerStake.tokensStaked != 0, "tokensStaked == 0"); + // Indexers shouldn't be trying to withdraw tokens before migrating to L2. + // Allowing this would complicate our accounting so we require that they have no + // tokens locked for withdrawal. + require(indexerStake.tokensLocked == 0, "tokensLocked != 0"); + + require(_l2Beneficiary != address(0), "l2Beneficiary == 0"); + if (indexerMigratedToL2[_indexer] != address(0)) { + require(indexerMigratedToL2[_indexer] == _l2Beneficiary, "l2Beneficiary != previous"); + } else { + indexerMigratedToL2[_indexer] = _l2Beneficiary; + require(_amount >= __minimumIndexerStake, "!minimumIndexerStake sent"); + } + // Ensure minimum stake + indexerStake.tokensStaked = indexerStake.tokensStaked.sub(_amount); + require( + indexerStake.tokensStaked == 0 || indexerStake.tokensStaked >= __minimumIndexerStake, + "!minimumIndexerStake remaining" + ); + + IStakingData.DelegationPool storage delegationPool = __delegationPools[_indexer]; + + if (indexerStake.tokensStaked == 0) { + // require that no allocations are open + require(indexerStake.tokensAllocated == 0, "allocated"); + } else { + // require that the indexer has enough stake to cover all allocations + uint256 tokensDelegatedCap = indexerStake.tokensStaked.mul(uint256(__delegationRatio)); + uint256 tokensDelegatedCapacity = MathUtils.min( + delegationPool.tokens, + tokensDelegatedCap + ); + require( + indexerStake.tokensUsed() <= indexerStake.tokensStaked.add(tokensDelegatedCapacity), + "! allocation capacity" + ); + } + + IL2Staking.ReceiveIndexerStakeData memory functionData; + functionData.indexer = _l2Beneficiary; + + bytes memory extraData = abi.encode( + uint8(IL2Staking.L1MessageCodes.RECEIVE_INDEXER_STAKE_CODE), + abi.encode(functionData) + ); + + _sendTokensAndMessageToL2Staking( + _amount, + _maxGas, + _gasPriceBid, + _maxSubmissionCost, + _ethAmount, + extraData + ); + + emit IndexerMigratedToL2(_indexer, _l2Beneficiary, _amount); + } + + /** + * @dev Implements sending a delegator's delegated tokens to L2. + * This function can only be called by the delegator. + * This function will validate that the indexer has migrated their stake using migrateStakeToL2, + * and that the delegation is not locked for undelegation. + * Since the delegator's address might be an L1-only contract, the function takes a beneficiary + * address that will be the delegator's address in L2. + * @param _indexer Address of the indexer (in L1, before migrating) + * @param _l2Beneficiary Address of the delegator in L2 + * @param _maxGas Max gas to use for the L2 retryable ticket + * @param _gasPriceBid Gas price bid for the L2 retryable ticket + * @param _maxSubmissionCost Max submission cost for the L2 retryable ticket + * @param _ethAmount Amount of ETH to send with the retryable ticket + */ + function _migrateDelegationToL2( + address _delegator, + address _indexer, + address _l2Beneficiary, + uint256 _maxGas, + uint256 _gasPriceBid, + uint256 _maxSubmissionCost, + uint256 _ethAmount + ) internal { + require(_l2Beneficiary != address(0), "l2Beneficiary == 0"); + require(indexerMigratedToL2[_indexer] != address(0), "indexer not migrated"); + + // Get the delegation pool of the indexer + DelegationPool storage pool = __delegationPools[_indexer]; + Delegation storage delegation = pool.delegators[_delegator]; + + // Check that the delegation is not locked for undelegation + require(delegation.tokensLockedUntil == 0, "tokensLocked != 0"); + require(delegation.shares != 0, "delegation == 0"); + // Calculate tokens to get in exchange for the shares + uint256 tokensToSend = delegation.shares.mul(pool.tokens).div(pool.shares); + + // Update the delegation pool + pool.tokens = pool.tokens.sub(tokensToSend); + pool.shares = pool.shares.sub(delegation.shares); + + // Update the delegation + delegation.shares = 0; + bytes memory extraData; + { + IL2Staking.ReceiveDelegationData memory functionData; + functionData.indexer = indexerMigratedToL2[_indexer]; + functionData.delegator = _l2Beneficiary; + extraData = abi.encode( + uint8(IL2Staking.L1MessageCodes.RECEIVE_DELEGATION_CODE), + abi.encode(functionData) + ); + } + + _sendTokensAndMessageToL2Staking( + tokensToSend, + _maxGas, + _gasPriceBid, + _maxSubmissionCost, + _ethAmount, + extraData + ); + emit DelegationMigratedToL2( + _delegator, + _l2Beneficiary, + _indexer, + indexerMigratedToL2[_indexer], + tokensToSend + ); + } + + /** + * @dev Sends a message to the L2Staking with some extra data, + * also sending some tokens, using the L1GraphTokenGateway. + * @param _tokens Amount of tokens to send to L2 + * @param _maxGas Max gas to use for the L2 retryable ticket + * @param _gasPriceBid Gas price bid for the L2 retryable ticket + * @param _maxSubmissionCost Max submission cost for the L2 retryable ticket + * @param _value Amount of ETH to send with the message + * @param _extraData Extra data for the callhook on L2Staking + */ + function _sendTokensAndMessageToL2Staking( + uint256 _tokens, + uint256 _maxGas, + uint256 _gasPriceBid, + uint256 _maxSubmissionCost, + uint256 _value, + bytes memory _extraData + ) internal { + IGraphToken grt = graphToken(); + ITokenGateway gateway = graphTokenGateway(); + grt.approve(address(gateway), _tokens); + gateway.outboundTransfer{ value: _value }( + address(grt), + counterpartStakingAddress, + _tokens, + _maxGas, + _gasPriceBid, + abi.encode(_maxSubmissionCost, _extraData) + ); + } +} diff --git a/contracts/staking/L1StakingStorage.sol b/contracts/staking/L1StakingStorage.sol new file mode 100644 index 000000000..2fd180b13 --- /dev/null +++ b/contracts/staking/L1StakingStorage.sol @@ -0,0 +1,21 @@ +// SPDX-License-Identifier: GPL-2.0-or-later + +pragma solidity ^0.7.6; +pragma abicoder v2; + +import { IL1GraphTokenLockMigrator } from "./IL1GraphTokenLockMigrator.sol"; + +/** + * @title L1StakingV1Storage + * @notice This contract holds all the L1-specific storage variables for the L1Staking contract, version 1 + * @dev When adding new versions, make sure to move the gap to the new version and + * reduce the size of the gap accordingly. + */ +abstract contract L1StakingV1Storage { + /// If an indexer has migrated to L2, this mapping will hold the indexer's address in L2 + mapping(address => address) public indexerMigratedToL2; + /// @dev For locked indexers/delegations, this contract holds the mapping of L1 to L2 addresses + IL1GraphTokenLockMigrator internal l1GraphTokenLockMigrator; + /// @dev Storage gap to keep storage slots fixed in future versions + uint256[50] private __gap; +} diff --git a/contracts/staking/Staking.sol b/contracts/staking/Staking.sol index 2bcc8d74d..9676f4f6a 100644 --- a/contracts/staking/Staking.sol +++ b/contracts/staking/Staking.sol @@ -3,207 +3,93 @@ pragma solidity ^0.7.6; pragma abicoder v2; -import "@openzeppelin/contracts/cryptography/ECDSA.sol"; - -import "../base/Multicall.sol"; -import "../upgrades/GraphUpgradeable.sol"; -import "../utils/TokenUtils.sol"; - -import "./IStaking.sol"; -import "./StakingStorage.sol"; -import "./libs/MathUtils.sol"; -import "./libs/Rebates.sol"; -import "./libs/Stakes.sol"; +import { SafeMath } from "@openzeppelin/contracts/math/SafeMath.sol"; +import { ECDSA } from "@openzeppelin/contracts/cryptography/ECDSA.sol"; + +import { Multicall } from "../base/Multicall.sol"; +import { GraphUpgradeable } from "../upgrades/GraphUpgradeable.sol"; +import { TokenUtils } from "../utils/TokenUtils.sol"; +import { IGraphToken } from "../token/IGraphToken.sol"; +import { IStakingBase } from "./IStakingBase.sol"; +import { StakingV3Storage } from "./StakingStorage.sol"; +import { MathUtils } from "./libs/MathUtils.sol"; +import { Rebates } from "./libs/Rebates.sol"; +import { Stakes } from "./libs/Stakes.sol"; +import { Managed } from "../governance/Managed.sol"; +import { ICuration } from "../curation/ICuration.sol"; +import { IRewardsManager } from "../rewards/IRewardsManager.sol"; /** - * @title Staking contract + * @title Base Staking contract * @dev The Staking contract allows Indexers to Stake on Subgraphs. Indexers Stake by creating * Allocations on a Subgraph. It also allows Delegators to Delegate towards an Indexer. The * contract also has the slashing functionality. + * The contract is abstract as the implementation that is deployed depends on each layer: L1Staking on mainnet + * and L2Staking on Arbitrum. + * Note that this contract delegates part of its functionality to a StakingExtension contract. + * This is due to the 24kB contract size limit on Ethereum. */ -contract Staking is StakingV2Storage, GraphUpgradeable, IStaking, Multicall { +abstract contract Staking is StakingV3Storage, GraphUpgradeable, IStakingBase, Multicall { using SafeMath for uint256; using Stakes for Stakes.Indexer; using Rebates for Rebates.Pool; - // 100% in parts per million + /// @dev 100% in parts per million uint32 private constant MAX_PPM = 1000000; - // -- Events -- - - /** - * @dev Emitted when `indexer` update the delegation parameters for its delegation pool. - */ - event DelegationParametersUpdated( - address indexed indexer, - uint32 indexingRewardCut, - uint32 queryFeeCut, - uint32 cooldownBlocks - ); - - /** - * @dev Emitted when `indexer` stake `tokens` amount. - */ - event StakeDeposited(address indexed indexer, uint256 tokens); - - /** - * @dev Emitted when `indexer` unstaked and locked `tokens` amount `until` block. - */ - event StakeLocked(address indexed indexer, uint256 tokens, uint256 until); - - /** - * @dev Emitted when `indexer` withdrew `tokens` staked. - */ - event StakeWithdrawn(address indexed indexer, uint256 tokens); - - /** - * @dev Emitted when `indexer` was slashed for a total of `tokens` amount. - * Tracks `reward` amount of tokens given to `beneficiary`. - */ - event StakeSlashed( - address indexed indexer, - uint256 tokens, - uint256 reward, - address beneficiary - ); - - /** - * @dev Emitted when `delegator` delegated `tokens` to the `indexer`, the delegator - * gets `shares` for the delegation pool proportionally to the tokens staked. - */ - event StakeDelegated( - address indexed indexer, - address indexed delegator, - uint256 tokens, - uint256 shares - ); - - /** - * @dev Emitted when `delegator` undelegated `tokens` from `indexer`. - * Tokens get locked for withdrawal after a period of time. - */ - event StakeDelegatedLocked( - address indexed indexer, - address indexed delegator, - uint256 tokens, - uint256 shares, - uint256 until - ); - - /** - * @dev Emitted when `delegator` withdrew delegated `tokens` from `indexer`. - */ - event StakeDelegatedWithdrawn( - address indexed indexer, - address indexed delegator, - uint256 tokens - ); - - /** - * @dev Emitted when `indexer` allocated `tokens` amount to `subgraphDeploymentID` - * during `epoch`. - * `allocationID` indexer derived address used to identify the allocation. - * `metadata` additional information related to the allocation. - */ - event AllocationCreated( - address indexed indexer, - bytes32 indexed subgraphDeploymentID, - uint256 epoch, - uint256 tokens, - address indexed allocationID, - bytes32 metadata - ); - - /** - * @dev Emitted when `indexer` collected `tokens` amount in `epoch` for `allocationID`. - * These funds are related to `subgraphDeploymentID`. - * The `from` value is the sender of the collected funds. - */ - event AllocationCollected( - address indexed indexer, - bytes32 indexed subgraphDeploymentID, - uint256 epoch, - uint256 tokens, - address indexed allocationID, - address from, - uint256 curationFees, - uint256 rebateFees - ); - - /** - * @dev Emitted when `indexer` close an allocation in `epoch` for `allocationID`. - * An amount of `tokens` get unallocated from `subgraphDeploymentID`. - * The `effectiveAllocation` are the tokens allocated from creation to closing. - * This event also emits the POI (proof of indexing) submitted by the indexer. - * `isPublic` is true if the sender was someone other than the indexer. - */ - event AllocationClosed( - address indexed indexer, - bytes32 indexed subgraphDeploymentID, - uint256 epoch, - uint256 tokens, - address indexed allocationID, - uint256 effectiveAllocation, - address sender, - bytes32 poi, - bool isPublic - ); - - /** - * @dev Emitted when `indexer` claimed a rebate on `subgraphDeploymentID` during `epoch` - * related to the `forEpoch` rebate pool. - * The rebate is for `tokens` amount and `unclaimedAllocationsCount` are left for claim - * in the rebate pool. `delegationFees` collected and sent to delegation pool. - */ - event RebateClaimed( - address indexed indexer, - bytes32 indexed subgraphDeploymentID, - address indexed allocationID, - uint256 epoch, - uint256 forEpoch, - uint256 tokens, - uint256 unclaimedAllocationsCount, - uint256 delegationFees - ); + // -- Events are declared in IStakingBase -- // /** - * @dev Emitted when `caller` set `slasher` address as `allowed` to slash stakes. + * @notice Delegates the current call to the StakingExtension implementation. + * @dev This function does not return to its internal call site, it will return directly to the + * external caller. */ - event SlasherUpdate(address indexed caller, address indexed slasher, bool allowed); + // solhint-disable-next-line payable-fallback, no-complex-fallback + fallback() external { + require(address(this) != _implementation(), "only through proxy"); + // solhint-disable-next-line no-inline-assembly + assembly { + // (a) get free memory pointer + let ptr := mload(0x40) - /** - * @dev Emitted when `caller` set `assetHolder` address as `allowed` to send funds - * to staking contract. - */ - event AssetHolderUpdate(address indexed caller, address indexed assetHolder, bool allowed); + // (b) get address of the implementation + let impl := and(sload(extensionImpl.slot), 0xffffffffffffffffffffffffffffffffffffffff) - /** - * @dev Emitted when `indexer` set `operator` access. - */ - event SetOperator(address indexed indexer, address indexed operator, bool allowed); + // (1) copy incoming call data + calldatacopy(ptr, 0, calldatasize()) - /** - * @dev Emitted when `indexer` set an address to receive rewards. - */ - event SetRewardsDestination(address indexed indexer, address indexed destination); + // (2) forward call to logic contract + let result := delegatecall(gas(), impl, ptr, calldatasize(), 0, 0) + let size := returndatasize() - /** - * @dev Check if the caller is the slasher. - */ - modifier onlySlasher() { - require(slashers[msg.sender] == true, "!slasher"); - _; - } + // (3) retrieve return data + returndatacopy(ptr, 0, size) - /** - * @dev Check if the caller is authorized (indexer or operator) - */ - function _isAuth(address _indexer) private view returns (bool) { - return msg.sender == _indexer || isOperator(msg.sender, _indexer) == true; + // (4) forward return data back to caller + switch result + case 0 { + revert(ptr, size) + } + default { + return(ptr, size) + } + } } /** - * @dev Initialize this contract. + * @notice Initialize this contract. + * @param _controller Address of the controller that manages this contract + * @param _minimumIndexerStake Minimum amount of tokens that an indexer must stake + * @param _thawingPeriod Number of epochs that tokens get locked after unstaking + * @param _protocolPercentage Percentage of query fees that are burned as protocol fee (in PPM) + * @param _curationPercentage Percentage of query fees that are given to curators (in PPM) + * @param _channelDisputeEpochs The period in epochs that needs to pass before fees in rebate pool can be claimed + * @param _maxAllocationEpochs The maximum number of epochs that an allocation can be active + * @param _delegationUnbondingPeriod The period in epochs that tokens get locked after undelegating + * @param _delegationRatio The ratio between an indexer's own stake and the delegation they can use + * @param _rebateAlphaNumerator The numerator of the alpha factor used to calculate the rebate + * @param _rebateAlphaDenominator The denominator of the alpha factor used to calculate the rebate + * @param _extensionImpl Address of the StakingExtension implementation */ function initialize( address _controller, @@ -216,8 +102,9 @@ contract Staking is StakingV2Storage, GraphUpgradeable, IStaking, Multicall { uint32 _delegationUnbondingPeriod, uint32 _delegationRatio, uint32 _rebateAlphaNumerator, - uint32 _rebateAlphaDenominator - ) external onlyImpl { + uint32 _rebateAlphaDenominator, + address _extensionImpl + ) external override onlyImpl { Managed._initialize(_controller); // Settings @@ -230,52 +117,62 @@ contract Staking is StakingV2Storage, GraphUpgradeable, IStaking, Multicall { _setChannelDisputeEpochs(_channelDisputeEpochs); _setMaxAllocationEpochs(_maxAllocationEpochs); - _setDelegationUnbondingPeriod(_delegationUnbondingPeriod); - _setDelegationRatio(_delegationRatio); - _setDelegationParametersCooldown(0); - _setDelegationTaxPercentage(0); - _setRebateRatio(_rebateAlphaNumerator, _rebateAlphaDenominator); + + extensionImpl = _extensionImpl; + + // solhint-disable-next-line avoid-low-level-calls + (bool success, ) = extensionImpl.delegatecall( + abi.encodeWithSignature( + "initialize(uint32,uint32,uint32,uint32)", + _delegationUnbondingPeriod, + 0, + _delegationRatio, + 0 + ) + ); + require(success, "Extension init failed"); + emit ExtensionImplementationSet(_extensionImpl); } /** - * @dev Set the minimum indexer stake required to. - * @param _minimumIndexerStake Minimum indexer stake + * @notice Set the address of the StakingExtension implementation. + * @dev This function can only be called by the governor. + * @param _extensionImpl Address of the StakingExtension implementation */ - function setMinimumIndexerStake(uint256 _minimumIndexerStake) external override onlyGovernor { - _setMinimumIndexerStake(_minimumIndexerStake); + function setExtensionImpl(address _extensionImpl) external override onlyGovernor { + extensionImpl = _extensionImpl; + emit ExtensionImplementationSet(_extensionImpl); } /** - * @dev Internal: Set the minimum indexer stake required. - * @param _minimumIndexerStake Minimum indexer stake + * @notice Set the address of the counterpart (L1 or L2) staking contract. + * @dev This function can only be called by the governor. + * @param _counterpart Address of the counterpart staking contract in the other chain, without any aliasing. */ - function _setMinimumIndexerStake(uint256 _minimumIndexerStake) private { - require(_minimumIndexerStake > 0, "!minimumIndexerStake"); - minimumIndexerStake = _minimumIndexerStake; - emit ParameterUpdated("minimumIndexerStake"); + function setCounterpartStakingAddress(address _counterpart) external override onlyGovernor { + counterpartStakingAddress = _counterpart; + emit ParameterUpdated("counterpartStakingAddress"); } /** - * @dev Set the thawing period for unstaking. - * @param _thawingPeriod Period in blocks to wait for token withdrawals after unstaking + * @notice Set the minimum stake required to be an indexer. + * @param _minimumIndexerStake Minimum indexer stake */ - function setThawingPeriod(uint32 _thawingPeriod) external override onlyGovernor { - _setThawingPeriod(_thawingPeriod); + function setMinimumIndexerStake(uint256 _minimumIndexerStake) external override onlyGovernor { + _setMinimumIndexerStake(_minimumIndexerStake); } /** - * @dev Internal: Set the thawing period for unstaking. + * @notice Set the thawing period for unstaking. * @param _thawingPeriod Period in blocks to wait for token withdrawals after unstaking */ - function _setThawingPeriod(uint32 _thawingPeriod) private { - require(_thawingPeriod > 0, "!thawingPeriod"); - thawingPeriod = _thawingPeriod; - emit ParameterUpdated("thawingPeriod"); + function setThawingPeriod(uint32 _thawingPeriod) external override onlyGovernor { + _setThawingPeriod(_thawingPeriod); } /** - * @dev Set the curation percentage of query fees sent to curators. + * @notice Set the curation percentage of query fees sent to curators. * @param _percentage Percentage of query fees sent to curators */ function setCurationPercentage(uint32 _percentage) external override onlyGovernor { @@ -283,18 +180,7 @@ contract Staking is StakingV2Storage, GraphUpgradeable, IStaking, Multicall { } /** - * @dev Internal: Set the curation percentage of query fees sent to curators. - * @param _percentage Percentage of query fees sent to curators - */ - function _setCurationPercentage(uint32 _percentage) private { - // Must be within 0% to 100% (inclusive) - require(_percentage <= MAX_PPM, ">percentage"); - curationPercentage = _percentage; - emit ParameterUpdated("curationPercentage"); - } - - /** - * @dev Set a protocol percentage to burn when collecting query fees. + * @notice Set a protocol percentage to burn when collecting query fees. * @param _percentage Percentage of query fees to burn as protocol fee */ function setProtocolPercentage(uint32 _percentage) external override onlyGovernor { @@ -302,18 +188,7 @@ contract Staking is StakingV2Storage, GraphUpgradeable, IStaking, Multicall { } /** - * @dev Internal: Set a protocol percentage to burn when collecting query fees. - * @param _percentage Percentage of query fees to burn as protocol fee - */ - function _setProtocolPercentage(uint32 _percentage) private { - // Must be within 0% to 100% (inclusive) - require(_percentage <= MAX_PPM, ">percentage"); - protocolPercentage = _percentage; - emit ParameterUpdated("protocolPercentage"); - } - - /** - * @dev Set the period in epochs that need to pass before fees in rebate pool can be claimed. + * @notice Set the period in epochs that need to pass before fees in rebate pool can be claimed. * @param _channelDisputeEpochs Period in epochs */ function setChannelDisputeEpochs(uint32 _channelDisputeEpochs) external override onlyGovernor { @@ -321,17 +196,8 @@ contract Staking is StakingV2Storage, GraphUpgradeable, IStaking, Multicall { } /** - * @dev Internal: Set the period in epochs that need to pass before fees in rebate pool can be claimed. - * @param _channelDisputeEpochs Period in epochs - */ - function _setChannelDisputeEpochs(uint32 _channelDisputeEpochs) private { - require(_channelDisputeEpochs > 0, "!channelDisputeEpochs"); - channelDisputeEpochs = _channelDisputeEpochs; - emit ParameterUpdated("channelDisputeEpochs"); - } - - /** - * @dev Set the max time allowed for indexers stake on allocations. + * @notice Set the max time allowed for indexers to allocate on a subgraph + * before others are allowed to close the allocation. * @param _maxAllocationEpochs Allocation duration limit in epochs */ function setMaxAllocationEpochs(uint32 _maxAllocationEpochs) external override onlyGovernor { @@ -339,16 +205,7 @@ contract Staking is StakingV2Storage, GraphUpgradeable, IStaking, Multicall { } /** - * @dev Internal: Set the max time allowed for indexers stake on allocations. - * @param _maxAllocationEpochs Allocation duration limit in epochs - */ - function _setMaxAllocationEpochs(uint32 _maxAllocationEpochs) private { - maxAllocationEpochs = _maxAllocationEpochs; - emit ParameterUpdated("maxAllocationEpochs"); - } - - /** - * @dev Set the rebate ratio (fees to allocated stake). + * @notice Set the rebate ratio (fees to allocated stake). * @param _alphaNumerator Numerator of `alpha` in the cobb-douglas function * @param _alphaDenominator Denominator of `alpha` in the cobb-douglas function */ @@ -361,176 +218,239 @@ contract Staking is StakingV2Storage, GraphUpgradeable, IStaking, Multicall { } /** - * @dev Set the rebate ratio (fees to allocated stake). - * @param _alphaNumerator Numerator of `alpha` in the cobb-douglas function - * @param _alphaDenominator Denominator of `alpha` in the cobb-douglas function + * @notice Set an address as allowed asset holder. + * @param _assetHolder Address of allowed source for state channel funds + * @param _allowed True if asset holder is allowed */ - function _setRebateRatio(uint32 _alphaNumerator, uint32 _alphaDenominator) private { - require(_alphaNumerator > 0 && _alphaDenominator > 0, "!alpha"); - alphaNumerator = _alphaNumerator; - alphaDenominator = _alphaDenominator; - emit ParameterUpdated("rebateRatio"); + function setAssetHolder(address _assetHolder, bool _allowed) external override onlyGovernor { + require(_assetHolder != address(0), "!assetHolder"); + __assetHolders[_assetHolder] = _allowed; + emit AssetHolderUpdate(msg.sender, _assetHolder, _allowed); } /** - * @dev Set the delegation ratio. - * If set to 10 it means the indexer can use up to 10x the indexer staked amount - * from their delegated tokens - * @param _delegationRatio Delegation capacity multiplier + * @notice Authorize or unauthorize an address to be an operator for the caller. + * @param _operator Address to authorize or unauthorize + * @param _allowed Whether the operator is authorized or not */ - function setDelegationRatio(uint32 _delegationRatio) external override onlyGovernor { - _setDelegationRatio(_delegationRatio); + function setOperator(address _operator, bool _allowed) external override { + require(_operator != msg.sender, "operator == sender"); + __operatorAuth[msg.sender][_operator] = _allowed; + emit SetOperator(msg.sender, _operator, _allowed); } /** - * @dev Internal: Set the delegation ratio. - * If set to 10 it means the indexer can use up to 10x the indexer staked amount - * from their delegated tokens - * @param _delegationRatio Delegation capacity multiplier + * @notice Deposit tokens on the indexer's stake. + * The amount staked must be over the minimumIndexerStake. + * @param _tokens Amount of tokens to stake */ - function _setDelegationRatio(uint32 _delegationRatio) private { - delegationRatio = _delegationRatio; - emit ParameterUpdated("delegationRatio"); + function stake(uint256 _tokens) external override { + stakeTo(msg.sender, _tokens); } /** - * @dev Set the delegation parameters for the caller. - * @param _indexingRewardCut Percentage of indexing rewards left for delegators - * @param _queryFeeCut Percentage of query fees left for delegators - * @param _cooldownBlocks Period that need to pass to update delegation parameters + * @notice Unstake tokens from the indexer stake, lock them until the thawing period expires. + * @dev NOTE: The function accepts an amount greater than the currently staked tokens. + * If that happens, it will try to unstake the max amount of tokens it can. + * The reason for this behaviour is to avoid time conditions while the transaction + * is in flight. + * @param _tokens Amount of tokens to unstake */ - function setDelegationParameters( - uint32 _indexingRewardCut, - uint32 _queryFeeCut, - uint32 _cooldownBlocks - ) public override { - _setDelegationParameters(msg.sender, _indexingRewardCut, _queryFeeCut, _cooldownBlocks); - } + function unstake(uint256 _tokens) external override notPartialPaused { + address indexer = msg.sender; + Stakes.Indexer storage indexerStake = __stakes[indexer]; - /** - * @dev Set the delegation parameters for a particular indexer. - * @param _indexer Indexer to set delegation parameters - * @param _indexingRewardCut Percentage of indexing rewards left for delegators - * @param _queryFeeCut Percentage of query fees left for delegators - * @param _cooldownBlocks Period that need to pass to update delegation parameters - */ - function _setDelegationParameters( - address _indexer, - uint32 _indexingRewardCut, - uint32 _queryFeeCut, - uint32 _cooldownBlocks - ) private { - // Incentives must be within bounds - require(_queryFeeCut <= MAX_PPM, ">queryFeeCut"); - require(_indexingRewardCut <= MAX_PPM, ">indexingRewardCut"); + require(indexerStake.tokensStaked > 0, "!stake"); - // Cooldown period set by indexer cannot be below protocol global setting - require(_cooldownBlocks >= delegationParametersCooldown, " 0, "!stake-avail"); - // Verify the cooldown period passed - DelegationPool storage pool = delegationPools[_indexer]; - require( - pool.updatedAtBlock == 0 || - pool.updatedAtBlock.add(uint256(pool.cooldownBlocks)) <= block.number, - "!cooldown" - ); + // Ensure minimum stake + uint256 newStake = indexerStake.tokensSecureStake().sub(tokensToLock); + require(newStake == 0 || newStake >= __minimumIndexerStake, "!minimumIndexerStake"); - // Update delegation params - pool.indexingRewardCut = _indexingRewardCut; - pool.queryFeeCut = _queryFeeCut; - pool.cooldownBlocks = _cooldownBlocks; - pool.updatedAtBlock = block.number; + // Before locking more tokens, withdraw any unlocked ones if possible + uint256 tokensToWithdraw = indexerStake.tokensWithdrawable(); + if (tokensToWithdraw > 0) { + _withdraw(indexer); + } - emit DelegationParametersUpdated( - _indexer, - _indexingRewardCut, - _queryFeeCut, - _cooldownBlocks - ); - } + // Update the indexer stake locking tokens + indexerStake.lockTokens(tokensToLock, __thawingPeriod); - /** - * @dev Set the time in blocks an indexer needs to wait to change delegation parameters. - * @param _blocks Number of blocks to set the delegation parameters cooldown period - */ - function setDelegationParametersCooldown(uint32 _blocks) external override onlyGovernor { - _setDelegationParametersCooldown(_blocks); + emit StakeLocked(indexer, indexerStake.tokensLocked, indexerStake.tokensLockedUntil); } /** - * @dev Internal: Set the time in blocks an indexer needs to wait to change delegation parameters. - * @param _blocks Number of blocks to set the delegation parameters cooldown period + * @notice Withdraw indexer tokens once the thawing period has passed. */ - function _setDelegationParametersCooldown(uint32 _blocks) private { - delegationParametersCooldown = _blocks; - emit ParameterUpdated("delegationParametersCooldown"); + function withdraw() external override notPaused { + _withdraw(msg.sender); } /** - * @dev Set the period for undelegation of stake from indexer. - * @param _delegationUnbondingPeriod Period in epochs to wait for token withdrawals after undelegating + * @notice Set the destination where to send rewards for an indexer. + * @param _destination Rewards destination address. If set to zero, rewards will be restaked */ - function setDelegationUnbondingPeriod(uint32 _delegationUnbondingPeriod) - external - override - onlyGovernor - { - _setDelegationUnbondingPeriod(_delegationUnbondingPeriod); + function setRewardsDestination(address _destination) external override { + __rewardsDestination[msg.sender] = _destination; + emit SetRewardsDestination(msg.sender, _destination); } /** - * @dev Internal: Set the period for undelegation of stake from indexer. - * @param _delegationUnbondingPeriod Period in epochs to wait for token withdrawals after undelegating + * @notice Allocate available tokens to a subgraph deployment. + * @param _subgraphDeploymentID ID of the SubgraphDeployment where tokens will be allocated + * @param _tokens Amount of tokens to allocate + * @param _allocationID The allocation identifier + * @param _metadata IPFS hash for additional information about the allocation + * @param _proof A 65-bytes Ethereum signed message of `keccak256(indexerAddress,allocationID)` */ - function _setDelegationUnbondingPeriod(uint32 _delegationUnbondingPeriod) private { - require(_delegationUnbondingPeriod > 0, "!delegationUnbondingPeriod"); - delegationUnbondingPeriod = _delegationUnbondingPeriod; - emit ParameterUpdated("delegationUnbondingPeriod"); + function allocate( + bytes32 _subgraphDeploymentID, + uint256 _tokens, + address _allocationID, + bytes32 _metadata, + bytes calldata _proof + ) external override notPaused { + _allocate(msg.sender, _subgraphDeploymentID, _tokens, _allocationID, _metadata, _proof); } /** - * @dev Set a delegation tax percentage to burn when delegated funds are deposited. - * @param _percentage Percentage of delegated tokens to burn as delegation tax + * @notice Allocate available tokens to a subgraph deployment from and indexer's stake. + * The caller must be the indexer or the indexer's operator. + * @param _indexer Indexer address to allocate funds from. + * @param _subgraphDeploymentID ID of the SubgraphDeployment where tokens will be allocated + * @param _tokens Amount of tokens to allocate + * @param _allocationID The allocation identifier + * @param _metadata IPFS hash for additional information about the allocation + * @param _proof A 65-bytes Ethereum signed message of `keccak256(indexerAddress,allocationID)` */ - function setDelegationTaxPercentage(uint32 _percentage) external override onlyGovernor { - _setDelegationTaxPercentage(_percentage); + function allocateFrom( + address _indexer, + bytes32 _subgraphDeploymentID, + uint256 _tokens, + address _allocationID, + bytes32 _metadata, + bytes calldata _proof + ) external override notPaused { + _allocate(_indexer, _subgraphDeploymentID, _tokens, _allocationID, _metadata, _proof); } /** - * @dev Internal: Set a delegation tax percentage to burn when delegated funds are deposited. - * @param _percentage Percentage of delegated tokens to burn as delegation tax + * @notice Close an allocation and free the staked tokens. + * To be eligible for rewards a proof of indexing must be presented. + * Presenting a bad proof is subject to slashable condition. + * To opt out of rewards set _poi to 0x0 + * @param _allocationID The allocation identifier + * @param _poi Proof of indexing submitted for the allocated period */ - function _setDelegationTaxPercentage(uint32 _percentage) private { - // Must be within 0% to 100% (inclusive) - require(_percentage <= MAX_PPM, ">percentage"); - delegationTaxPercentage = _percentage; - emit ParameterUpdated("delegationTaxPercentage"); + function closeAllocation(address _allocationID, bytes32 _poi) external override notPaused { + _closeAllocation(_allocationID, _poi); } /** - * @dev Set or unset an address as allowed slasher. - * @param _slasher Address of the party allowed to slash indexers - * @param _allowed True if slasher is allowed + * @notice Collect query fees from state channels and assign them to an allocation. + * Funds received are only accepted from a valid sender. + * @dev To avoid reverting on the withdrawal from channel flow this function will: + * 1) Accept calls with zero tokens. + * 2) Accept calls after an allocation passed the dispute period, in that case, all + * the received tokens are burned. + * @param _tokens Amount of tokens to collect + * @param _allocationID Allocation where the tokens will be assigned */ - function setSlasher(address _slasher, bool _allowed) external override onlyGovernor { - require(_slasher != address(0), "!slasher"); - slashers[_slasher] = _allowed; - emit SlasherUpdate(msg.sender, _slasher, _allowed); - } + function collect(uint256 _tokens, address _allocationID) external override { + // Allocation identifier validation + require(_allocationID != address(0), "!alloc"); - /** - * @dev Set an address as allowed asset holder. - * @param _assetHolder Address of allowed source for state channel funds - * @param _allowed True if asset holder is allowed + // The contract caller must be an authorized asset holder + require(__assetHolders[msg.sender] == true, "!assetHolder"); + + // Allocation must exist + AllocationState allocState = _getAllocationState(_allocationID); + require(allocState != AllocationState.Null, "!collect"); + + // Get allocation + Allocation storage alloc = __allocations[_allocationID]; + uint256 queryFees = _tokens; + uint256 curationFees = 0; + bytes32 subgraphDeploymentID = alloc.subgraphDeploymentID; + + // Process query fees only if non-zero amount + if (queryFees > 0) { + // Pull tokens to collect from the authorized sender + IGraphToken graphToken = graphToken(); + TokenUtils.pullTokens(graphToken, msg.sender, _tokens); + + // -- Collect protocol tax -- + // If the Allocation is not active or closed we are going to charge a 100% protocol tax + uint256 usedProtocolPercentage = (allocState == AllocationState.Active || + allocState == AllocationState.Closed) + ? __protocolPercentage + : MAX_PPM; + uint256 protocolTax = _collectTax(graphToken, queryFees, usedProtocolPercentage); + queryFees = queryFees.sub(protocolTax); + + // -- Collect curation fees -- + // Only if the subgraph deployment is curated + curationFees = _collectCurationFees( + graphToken, + subgraphDeploymentID, + queryFees, + __curationPercentage + ); + queryFees = queryFees.sub(curationFees); + + // Add funds to the allocation + alloc.collectedFees = alloc.collectedFees.add(queryFees); + + // When allocation is closed redirect funds to the rebate pool + // This way we can keep collecting tokens even after the allocation is closed and + // before it gets to the finalized state. + if (allocState == AllocationState.Closed) { + Rebates.Pool storage rebatePool = __rebates[alloc.closedAtEpoch]; + rebatePool.fees = rebatePool.fees.add(queryFees); + } + } + + emit AllocationCollected( + alloc.indexer, + subgraphDeploymentID, + epochManager().currentEpoch(), + _tokens, + _allocationID, + msg.sender, + curationFees, + queryFees + ); + } + + /** + * @notice Claim tokens from the rebate pool. + * @param _allocationID Allocation from where we are claiming tokens + * @param _restake True if restake fees instead of transfer to indexer */ - function setAssetHolder(address _assetHolder, bool _allowed) external override onlyGovernor { - require(_assetHolder != address(0), "!assetHolder"); - assetHolders[_assetHolder] = _allowed; - emit AssetHolderUpdate(msg.sender, _assetHolder, _allowed); + function claim(address _allocationID, bool _restake) external override notPaused { + _claim(_allocationID, _restake); } /** - * @dev Return if allocationID is used. + * @dev Claim tokens from the rebate pool for many allocations. + * @param _allocationID Array of allocations from where we are claiming tokens + * @param _restake True if restake fees instead of transfer to indexer + */ + function claimMany(address[] calldata _allocationID, bool _restake) + external + override + notPaused + { + for (uint256 i = 0; i < _allocationID.length; i++) { + _claim(_allocationID[i], _restake); + } + } + + /** + * @notice Return if allocationID is used. * @param _allocationID Address used as signer by the indexer for an allocation * @return True if allocationID already used */ @@ -539,16 +459,16 @@ contract Staking is StakingV2Storage, GraphUpgradeable, IStaking, Multicall { } /** - * @dev Getter that returns if an indexer has any stake. + * @notice Getter that returns if an indexer has any stake. * @param _indexer Address of the indexer * @return True if indexer has staked tokens */ function hasStake(address _indexer) external view override returns (bool) { - return stakes[_indexer].tokensStaked > 0; + return __stakes[_indexer].tokensStaked > 0; } /** - * @dev Return the allocation by ID. + * @notice Return the allocation by ID. * @param _allocationID Address used as allocation identifier * @return Allocation data */ @@ -558,13 +478,13 @@ contract Staking is StakingV2Storage, GraphUpgradeable, IStaking, Multicall { override returns (Allocation memory) { - return allocations[_allocationID]; + return __allocations[_allocationID]; } /** - * @dev Return the current state of an allocation. - * @param _allocationID Address used as the allocation identifier - * @return AllocationState + * @notice Return the current state of an allocation + * @param _allocationID Allocation identifier + * @return AllocationState enum with the state of the allocation */ function getAllocationState(address _allocationID) external @@ -576,8 +496,8 @@ contract Staking is StakingV2Storage, GraphUpgradeable, IStaking, Multicall { } /** - * @dev Return the total amount of tokens allocated to subgraph. - * @param _subgraphDeploymentID Address used as the allocation identifier + * @notice Return the total amount of tokens allocated to subgraph. + * @param _subgraphDeploymentID Deployment ID for the subgraph * @return Total tokens allocated to subgraph */ function getSubgraphAllocatedTokens(bytes32 _subgraphDeploymentID) @@ -586,107 +506,21 @@ contract Staking is StakingV2Storage, GraphUpgradeable, IStaking, Multicall { override returns (uint256) { - return subgraphAllocations[_subgraphDeploymentID]; - } - - /** - * @dev Return the delegation from a delegator to an indexer. - * @param _indexer Address of the indexer where funds have been delegated - * @param _delegator Address of the delegator - * @return Delegation data - */ - function getDelegation(address _indexer, address _delegator) - external - view - override - returns (Delegation memory) - { - return delegationPools[_indexer].delegators[_delegator]; - } - - /** - * @dev Return whether the delegator has delegated to the indexer. - * @param _indexer Address of the indexer where funds have been delegated - * @param _delegator Address of the delegator - * @return True if delegator of indexer - */ - function isDelegator(address _indexer, address _delegator) public view override returns (bool) { - return delegationPools[_indexer].delegators[_delegator].shares > 0; + return __subgraphAllocations[_subgraphDeploymentID]; } /** - * @dev Get the total amount of tokens staked by the indexer. + * @notice Get the total amount of tokens staked by the indexer. * @param _indexer Address of the indexer * @return Amount of tokens staked by the indexer */ function getIndexerStakedTokens(address _indexer) external view override returns (uint256) { - return stakes[_indexer].tokensStaked; - } - - /** - * @dev Get the total amount of tokens available to use in allocations. - * This considers the indexer stake and delegated tokens according to delegation ratio - * @param _indexer Address of the indexer - * @return Amount of tokens staked by the indexer - */ - function getIndexerCapacity(address _indexer) public view override returns (uint256) { - Stakes.Indexer memory indexerStake = stakes[_indexer]; - uint256 tokensDelegated = delegationPools[_indexer].tokens; - - uint256 tokensDelegatedCap = indexerStake.tokensSecureStake().mul(uint256(delegationRatio)); - uint256 tokensDelegatedCapacity = MathUtils.min(tokensDelegated, tokensDelegatedCap); - - return indexerStake.tokensAvailableWithDelegation(tokensDelegatedCapacity); - } - - /** - * @dev Returns amount of delegated tokens ready to be withdrawn after unbonding period. - * @param _delegation Delegation of tokens from delegator to indexer - * @return Amount of tokens to withdraw - */ - function getWithdraweableDelegatedTokens(Delegation memory _delegation) - public - view - returns (uint256) - { - // There must be locked tokens and period passed - uint256 currentEpoch = epochManager().currentEpoch(); - if (_delegation.tokensLockedUntil > 0 && currentEpoch >= _delegation.tokensLockedUntil) { - return _delegation.tokensLocked; - } - return 0; - } - - /** - * @dev Authorize or unauthorize an address to be an operator. - * @param _operator Address to authorize - * @param _allowed Whether authorized or not - */ - function setOperator(address _operator, bool _allowed) external override { - require(_operator != msg.sender, "operator == sender"); - operatorAuth[msg.sender][_operator] = _allowed; - emit SetOperator(msg.sender, _operator, _allowed); + return __stakes[_indexer].tokensStaked; } /** - * @dev Return true if operator is allowed for indexer. - * @param _operator Address of the operator - * @param _indexer Address of the indexer - */ - function isOperator(address _operator, address _indexer) public view override returns (bool) { - return operatorAuth[_indexer][_operator]; - } - - /** - * @dev Deposit tokens on the indexer stake. - * @param _tokens Amount of tokens to stake - */ - function stake(uint256 _tokens) external override { - stakeTo(msg.sender, _tokens); - } - - /** - * @dev Deposit tokens on the indexer stake. + * @notice Deposit tokens on the Indexer stake, on behalf of the Indexer. + * The amount staked must be over the minimumIndexerStake. * @param _indexer Address of the indexer * @param _tokens Amount of tokens to stake */ @@ -695,7 +529,7 @@ contract Staking is StakingV2Storage, GraphUpgradeable, IStaking, Multicall { // Ensure minimum stake require( - stakes[_indexer].tokensSecureStake().add(_tokens) >= minimumIndexerStake, + __stakes[_indexer].tokensSecureStake().add(_tokens) >= __minimumIndexerStake, "!minimumIndexerStake" ); @@ -707,349 +541,160 @@ contract Staking is StakingV2Storage, GraphUpgradeable, IStaking, Multicall { } /** - * @dev Unstake tokens from the indexer stake, lock them until thawing period expires. - * NOTE: The function accepts an amount greater than the currently staked tokens. - * If that happens, it will try to unstake the max amount of tokens it can. - * The reason for this behaviour is to avoid time conditions while the transaction - * is in flight. - * @param _tokens Amount of tokens to unstake - */ - function unstake(uint256 _tokens) external override notPartialPaused { - address indexer = msg.sender; - Stakes.Indexer storage indexerStake = stakes[indexer]; - - require(indexerStake.tokensStaked > 0, "!stake"); - - // Tokens to lock is capped to the available tokens - uint256 tokensToLock = MathUtils.min(indexerStake.tokensAvailable(), _tokens); - require(tokensToLock > 0, "!stake-avail"); - - // Ensure minimum stake - uint256 newStake = indexerStake.tokensSecureStake().sub(tokensToLock); - require(newStake == 0 || newStake >= minimumIndexerStake, "!minimumIndexerStake"); - - // Before locking more tokens, withdraw any unlocked ones if possible - uint256 tokensToWithdraw = indexerStake.tokensWithdrawable(); - if (tokensToWithdraw > 0) { - _withdraw(indexer); - } - - // Update the indexer stake locking tokens - indexerStake.lockTokens(tokensToLock, thawingPeriod); - - emit StakeLocked(indexer, indexerStake.tokensLocked, indexerStake.tokensLockedUntil); - } - - /** - * @dev Withdraw indexer tokens once the thawing period has passed. - */ - function withdraw() external override notPaused { - _withdraw(msg.sender); - } - - /** - * @dev Set the destination where to send rewards. - * @param _destination Rewards destination address. If set to zero, rewards will be restaked - */ - function setRewardsDestination(address _destination) external override { - rewardsDestination[msg.sender] = _destination; - emit SetRewardsDestination(msg.sender, _destination); - } - - /** - * @dev Slash the indexer stake. Delegated tokens are not subject to slashing. - * Can only be called by the slasher role. - * @param _indexer Address of indexer to slash - * @param _tokens Amount of tokens to slash from the indexer stake - * @param _reward Amount of reward tokens to send to a beneficiary - * @param _beneficiary Address of a beneficiary to receive a reward for the slashing - */ - function slash( - address _indexer, - uint256 _tokens, - uint256 _reward, - address _beneficiary - ) external override onlySlasher notPartialPaused { - Stakes.Indexer storage indexerStake = stakes[_indexer]; - - // Only able to slash a non-zero number of tokens - require(_tokens > 0, "!tokens"); - - // Rewards comes from tokens slashed balance - require(_tokens >= _reward, "rewards>slash"); - - // Cannot slash stake of an indexer without any or enough stake - require(indexerStake.tokensStaked > 0, "!stake"); - require(_tokens <= indexerStake.tokensStaked, "slash>stake"); - - // Validate beneficiary of slashed tokens - require(_beneficiary != address(0), "!beneficiary"); - - // Slashing more tokens than freely available (over allocation condition) - // Unlock locked tokens to avoid the indexer to withdraw them - if (_tokens > indexerStake.tokensAvailable() && indexerStake.tokensLocked > 0) { - uint256 tokensOverAllocated = _tokens.sub(indexerStake.tokensAvailable()); - uint256 tokensToUnlock = MathUtils.min(tokensOverAllocated, indexerStake.tokensLocked); - indexerStake.unlockTokens(tokensToUnlock); - } - - // Remove tokens to slash from the stake - indexerStake.release(_tokens); - - // -- Interactions -- - - IGraphToken graphToken = graphToken(); - - // Set apart the reward for the beneficiary and burn remaining slashed stake - TokenUtils.burnTokens(graphToken, _tokens.sub(_reward)); - - // Give the beneficiary a reward for slashing - TokenUtils.pushTokens(graphToken, _beneficiary, _reward); - - emit StakeSlashed(_indexer, _tokens, _reward, _beneficiary); - } - - /** - * @dev Delegate tokens to an indexer. - * @param _indexer Address of the indexer to delegate tokens to - * @param _tokens Amount of tokens to delegate - * @return Amount of shares issued of the delegation pool - */ - function delegate(address _indexer, uint256 _tokens) - external - override - notPartialPaused - returns (uint256) - { - address delegator = msg.sender; - - // Transfer tokens to delegate to this contract - TokenUtils.pullTokens(graphToken(), delegator, _tokens); - - // Update state - return _delegate(delegator, _indexer, _tokens); - } - - /** - * @dev Undelegate tokens from an indexer. - * @param _indexer Address of the indexer where tokens had been delegated - * @param _shares Amount of shares to return and undelegate tokens - * @return Amount of tokens returned for the shares of the delegation pool - */ - function undelegate(address _indexer, uint256 _shares) - external - override - notPartialPaused - returns (uint256) - { - return _undelegate(msg.sender, _indexer, _shares); - } - - /** - * @dev Withdraw delegated tokens once the unbonding period has passed. - * @param _indexer Withdraw available tokens delegated to indexer - * @param _delegateToIndexer Re-delegate to indexer address if non-zero, withdraw if zero address + * @notice Set the delegation parameters for the caller. + * @param _indexingRewardCut Percentage of indexing rewards left for the indexer + * @param _queryFeeCut Percentage of query fees left for the indexer + * @param _cooldownBlocks Period that need to pass to update delegation parameters */ - function withdrawDelegated(address _indexer, address _delegateToIndexer) - external - override - notPaused - returns (uint256) - { - return _withdrawDelegated(msg.sender, _indexer, _delegateToIndexer); + function setDelegationParameters( + uint32 _indexingRewardCut, + uint32 _queryFeeCut, + uint32 _cooldownBlocks + ) public override { + _setDelegationParameters(msg.sender, _indexingRewardCut, _queryFeeCut, _cooldownBlocks); } /** - * @dev Allocate available tokens to a subgraph deployment. - * @param _subgraphDeploymentID ID of the SubgraphDeployment where tokens will be allocated - * @param _tokens Amount of tokens to allocate - * @param _allocationID The allocation identifier - * @param _metadata IPFS hash for additional information about the allocation - * @param _proof A 65-bytes Ethereum signed message of `keccak256(indexerAddress,allocationID)` + * @notice Get the total amount of tokens available to use in allocations. + * This considers the indexer stake and delegated tokens according to delegation ratio + * @param _indexer Address of the indexer + * @return Amount of tokens available to allocate including delegation */ - function allocate( - bytes32 _subgraphDeploymentID, - uint256 _tokens, - address _allocationID, - bytes32 _metadata, - bytes calldata _proof - ) external override notPaused { - _allocate(msg.sender, _subgraphDeploymentID, _tokens, _allocationID, _metadata, _proof); - } + function getIndexerCapacity(address _indexer) public view override returns (uint256) { + Stakes.Indexer memory indexerStake = __stakes[_indexer]; + uint256 tokensDelegated = __delegationPools[_indexer].tokens; - /** - * @dev Allocate available tokens to a subgraph deployment. - * @param _indexer Indexer address to allocate funds from. - * @param _subgraphDeploymentID ID of the SubgraphDeployment where tokens will be allocated - * @param _tokens Amount of tokens to allocate - * @param _allocationID The allocation identifier - * @param _metadata IPFS hash for additional information about the allocation - * @param _proof A 65-bytes Ethereum signed message of `keccak256(indexerAddress,allocationID)` - */ - function allocateFrom( - address _indexer, - bytes32 _subgraphDeploymentID, - uint256 _tokens, - address _allocationID, - bytes32 _metadata, - bytes calldata _proof - ) external override notPaused { - _allocate(_indexer, _subgraphDeploymentID, _tokens, _allocationID, _metadata, _proof); - } + uint256 tokensDelegatedCap = indexerStake.tokensSecureStake().mul( + uint256(__delegationRatio) + ); + uint256 tokensDelegatedCapacity = MathUtils.min(tokensDelegated, tokensDelegatedCap); - /** - * @dev Close an allocation and free the staked tokens. - * To be eligible for rewards a proof of indexing must be presented. - * Presenting a bad proof is subject to slashable condition. - * To opt out for rewards set _poi to 0x0 - * @param _allocationID The allocation identifier - * @param _poi Proof of indexing submitted for the allocated period - */ - function closeAllocation(address _allocationID, bytes32 _poi) external override notPaused { - _closeAllocation(_allocationID, _poi); + return indexerStake.tokensAvailableWithDelegation(tokensDelegatedCapacity); } /** - * @dev Close multiple allocations and free the staked tokens. - * To be eligible for rewards a proof of indexing must be presented. - * Presenting a bad proof is subject to slashable condition. - * To opt out for rewards set _poi to 0x0 - * @param _requests An array of CloseAllocationRequest + * @notice Return true if operator is allowed for indexer. + * @param _operator Address of the operator + * @param _indexer Address of the indexer + * @return True if operator is allowed for indexer, false otherwise */ - function closeAllocationMany(CloseAllocationRequest[] calldata _requests) - external - override - notPaused - { - for (uint256 i = 0; i < _requests.length; i++) { - _closeAllocation(_requests[i].allocationID, _requests[i].poi); - } + function isOperator(address _operator, address _indexer) public view override returns (bool) { + return __operatorAuth[_indexer][_operator]; } /** - * @dev Close and allocate. This will perform a close and then create a new Allocation - * atomically on the same transaction. - * @param _closingAllocationID The identifier of the allocation to be closed - * @param _poi Proof of indexing submitted for the allocated period - * @param _indexer Indexer address to allocate funds from. - * @param _subgraphDeploymentID ID of the SubgraphDeployment where tokens will be allocated - * @param _tokens Amount of tokens to allocate - * @param _allocationID The allocation identifier - * @param _metadata IPFS hash for additional information about the allocation - * @param _proof A 65-bytes Ethereum signed message of `keccak256(indexerAddress,allocationID)` + * @dev Internal: Set the minimum indexer stake required. + * @param _minimumIndexerStake Minimum indexer stake */ - function closeAndAllocate( - address _closingAllocationID, - bytes32 _poi, - address _indexer, - bytes32 _subgraphDeploymentID, - uint256 _tokens, - address _allocationID, - bytes32 _metadata, - bytes calldata _proof - ) external override notPaused { - _closeAllocation(_closingAllocationID, _poi); - _allocate(_indexer, _subgraphDeploymentID, _tokens, _allocationID, _metadata, _proof); + function _setMinimumIndexerStake(uint256 _minimumIndexerStake) private { + require(_minimumIndexerStake > 0, "!minimumIndexerStake"); + __minimumIndexerStake = _minimumIndexerStake; + emit ParameterUpdated("minimumIndexerStake"); } /** - * @dev Collect query fees from state channels and assign them to an allocation. - * Funds received are only accepted from a valid sender. - * To avoid reverting on the withdrawal from channel flow this function will: - * 1) Accept calls with zero tokens. - * 2) Accept calls after an allocation passed the dispute period, in that case, all - * the received tokens are burned. - * @param _tokens Amount of tokens to collect - * @param _allocationID Allocation where the tokens will be assigned + * @dev Internal: Set the thawing period for unstaking. + * @param _thawingPeriod Period in blocks to wait for token withdrawals after unstaking */ - function collect(uint256 _tokens, address _allocationID) external override { - // Allocation identifier validation - require(_allocationID != address(0), "!alloc"); - - // The contract caller must be an authorized asset holder - require(assetHolders[msg.sender] == true, "!assetHolder"); - - // Allocation must exist - AllocationState allocState = _getAllocationState(_allocationID); - require(allocState != AllocationState.Null, "!collect"); - - // Get allocation - Allocation storage alloc = allocations[_allocationID]; - uint256 queryFees = _tokens; - uint256 curationFees = 0; - bytes32 subgraphDeploymentID = alloc.subgraphDeploymentID; - - // Process query fees only if non-zero amount - if (queryFees > 0) { - // Pull tokens to collect from the authorized sender - IGraphToken graphToken = graphToken(); - TokenUtils.pullTokens(graphToken, msg.sender, _tokens); - - // -- Collect protocol tax -- - // If the Allocation is not active or closed we are going to charge a 100% protocol tax - uint256 usedProtocolPercentage = (allocState == AllocationState.Active || - allocState == AllocationState.Closed) - ? protocolPercentage - : MAX_PPM; - uint256 protocolTax = _collectTax(graphToken, queryFees, usedProtocolPercentage); - queryFees = queryFees.sub(protocolTax); + function _setThawingPeriod(uint32 _thawingPeriod) private { + require(_thawingPeriod > 0, "!thawingPeriod"); + __thawingPeriod = _thawingPeriod; + emit ParameterUpdated("thawingPeriod"); + } - // -- Collect curation fees -- - // Only if the subgraph deployment is curated - curationFees = _collectCurationFees( - graphToken, - subgraphDeploymentID, - queryFees, - curationPercentage - ); - queryFees = queryFees.sub(curationFees); + /** + * @dev Internal: Set the curation percentage of query fees sent to curators. + * @param _percentage Percentage of query fees sent to curators + */ + function _setCurationPercentage(uint32 _percentage) private { + // Must be within 0% to 100% (inclusive) + require(_percentage <= MAX_PPM, ">percentage"); + __curationPercentage = _percentage; + emit ParameterUpdated("curationPercentage"); + } - // Add funds to the allocation - alloc.collectedFees = alloc.collectedFees.add(queryFees); + /** + * @dev Internal: Set a protocol percentage to burn when collecting query fees. + * @param _percentage Percentage of query fees to burn as protocol fee + */ + function _setProtocolPercentage(uint32 _percentage) private { + // Must be within 0% to 100% (inclusive) + require(_percentage <= MAX_PPM, ">percentage"); + __protocolPercentage = _percentage; + emit ParameterUpdated("protocolPercentage"); + } - // When allocation is closed redirect funds to the rebate pool - // This way we can keep collecting tokens even after the allocation is closed and - // before it gets to the finalized state. - if (allocState == AllocationState.Closed) { - Rebates.Pool storage rebatePool = rebates[alloc.closedAtEpoch]; - rebatePool.fees = rebatePool.fees.add(queryFees); - } - } + /** + * @dev Internal: Set the period in epochs that need to pass before fees in rebate pool can be claimed. + * @param _channelDisputeEpochs Period in epochs + */ + function _setChannelDisputeEpochs(uint32 _channelDisputeEpochs) private { + require(_channelDisputeEpochs > 0, "!channelDisputeEpochs"); + __channelDisputeEpochs = _channelDisputeEpochs; + emit ParameterUpdated("channelDisputeEpochs"); + } - emit AllocationCollected( - alloc.indexer, - subgraphDeploymentID, - epochManager().currentEpoch(), - _tokens, - _allocationID, - msg.sender, - curationFees, - queryFees - ); + /** + * @dev Internal: Set the max time allowed for indexers stake on allocations. + * @param _maxAllocationEpochs Allocation duration limit in epochs + */ + function _setMaxAllocationEpochs(uint32 _maxAllocationEpochs) private { + __maxAllocationEpochs = _maxAllocationEpochs; + emit ParameterUpdated("maxAllocationEpochs"); } /** - * @dev Claim tokens from the rebate pool. - * @param _allocationID Allocation from where we are claiming tokens - * @param _restake True if restake fees instead of transfer to indexer + * @dev Set the rebate ratio (fees to allocated stake). + * @param _alphaNumerator Numerator of `alpha` in the cobb-douglas function + * @param _alphaDenominator Denominator of `alpha` in the cobb-douglas function */ - function claim(address _allocationID, bool _restake) external override notPaused { - _claim(_allocationID, _restake); + function _setRebateRatio(uint32 _alphaNumerator, uint32 _alphaDenominator) private { + require(_alphaNumerator > 0 && _alphaDenominator > 0, "!alpha"); + __alphaNumerator = _alphaNumerator; + __alphaDenominator = _alphaDenominator; + emit ParameterUpdated("rebateRatio"); } /** - * @dev Claim tokens from the rebate pool for many allocations. - * @param _allocationID Array of allocations from where we are claiming tokens - * @param _restake True if restake fees instead of transfer to indexer + * @dev Set the delegation parameters for a particular indexer. + * @param _indexer Indexer to set delegation parameters + * @param _indexingRewardCut Percentage of indexing rewards left for delegators + * @param _queryFeeCut Percentage of query fees left for delegators + * @param _cooldownBlocks Period that need to pass to update delegation parameters */ - function claimMany(address[] calldata _allocationID, bool _restake) - external - override - notPaused - { - for (uint256 i = 0; i < _allocationID.length; i++) { - _claim(_allocationID[i], _restake); - } + function _setDelegationParameters( + address _indexer, + uint32 _indexingRewardCut, + uint32 _queryFeeCut, + uint32 _cooldownBlocks + ) private { + // Incentives must be within bounds + require(_queryFeeCut <= MAX_PPM, ">queryFeeCut"); + require(_indexingRewardCut <= MAX_PPM, ">indexingRewardCut"); + + // Cooldown period set by indexer cannot be below protocol global setting + require(_cooldownBlocks >= __delegationParametersCooldown, " 0, "!tokens"); // Return tokens to the indexer @@ -1116,15 +761,13 @@ contract Staking is StakingV2Storage, GraphUpgradeable, IStaking, Multicall { bytes32 digest = ECDSA.toEthSignedMessageHash(messageHash); require(ECDSA.recover(digest, _proof) == _allocationID, "!proof"); + require( + __stakes[_indexer].tokensSecureStake() >= __minimumIndexerStake, + "!minimumIndexerStake" + ); if (_tokens > 0) { // Needs to have free capacity not used for other purposes to allocate require(getIndexerCapacity(_indexer) >= _tokens, "!capacity"); - } else { - // Allocating zero-tokens still needs to comply with stake requirements - require( - stakes[_indexer].tokensSecureStake() >= minimumIndexerStake, - "!minimumIndexerStake" - ); } // Creates an allocation @@ -1140,18 +783,18 @@ contract Staking is StakingV2Storage, GraphUpgradeable, IStaking, Multicall { 0, // Initialize effective allocation (_tokens > 0) ? _updateRewards(_subgraphDeploymentID) : 0 // Initialize accumulated rewards per stake allocated ); - allocations[_allocationID] = alloc; + __allocations[_allocationID] = alloc; // -- Rewards Distribution -- // Process non-zero-allocation rewards tracking if (_tokens > 0) { // Mark allocated tokens as used - stakes[_indexer].allocate(alloc.tokens); + __stakes[_indexer].allocate(alloc.tokens); // Track total allocations per subgraph // Used for rewards calculations - subgraphAllocations[alloc.subgraphDeploymentID] = subgraphAllocations[ + __subgraphAllocations[alloc.subgraphDeploymentID] = __subgraphAllocations[ alloc.subgraphDeploymentID ].add(alloc.tokens); } @@ -1177,7 +820,7 @@ contract Staking is StakingV2Storage, GraphUpgradeable, IStaking, Multicall { require(allocState == AllocationState.Active, "!active"); // Get allocation - Allocation memory alloc = allocations[_allocationID]; + Allocation memory alloc = __allocations[_allocationID]; // Validate that an allocation cannot be closed before one epoch alloc.closedAtEpoch = epochManager().currentEpoch(); @@ -1189,28 +832,28 @@ contract Staking is StakingV2Storage, GraphUpgradeable, IStaking, Multicall { // - After maxAllocationEpochs passed // - When the allocation is for non-zero amount of tokens bool isIndexer = _isAuth(alloc.indexer); - if (epochs <= maxAllocationEpochs || alloc.tokens == 0) { + if (epochs <= __maxAllocationEpochs || alloc.tokens == 0) { require(isIndexer, "!auth"); } // Close the allocation and start counting a period to settle remaining payments from // state channels. - allocations[_allocationID].closedAtEpoch = alloc.closedAtEpoch; + __allocations[_allocationID].closedAtEpoch = alloc.closedAtEpoch; // -- Rebate Pool -- // Calculate effective allocation for the amount of epochs it remained allocated alloc.effectiveAllocation = _getEffectiveAllocation( - maxAllocationEpochs, + __maxAllocationEpochs, alloc.tokens, epochs ); - allocations[_allocationID].effectiveAllocation = alloc.effectiveAllocation; + __allocations[_allocationID].effectiveAllocation = alloc.effectiveAllocation; // Account collected fees and effective allocation in rebate pool for the epoch - Rebates.Pool storage rebatePool = rebates[alloc.closedAtEpoch]; + Rebates.Pool storage rebatePool = __rebates[alloc.closedAtEpoch]; if (!rebatePool.exists()) { - rebatePool.init(alphaNumerator, alphaDenominator); + rebatePool.init(__alphaNumerator, __alphaDenominator); } rebatePool.addToPool(alloc.collectedFees, alloc.effectiveAllocation); @@ -1226,11 +869,11 @@ contract Staking is StakingV2Storage, GraphUpgradeable, IStaking, Multicall { } // Free allocated tokens from use - stakes[alloc.indexer].unallocate(alloc.tokens); + __stakes[alloc.indexer].unallocate(alloc.tokens); // Track total allocations per subgraph // Used for rewards calculations - subgraphAllocations[alloc.subgraphDeploymentID] = subgraphAllocations[ + __subgraphAllocations[alloc.subgraphDeploymentID] = __subgraphAllocations[ alloc.subgraphDeploymentID ].sub(alloc.tokens); } @@ -1259,13 +902,13 @@ contract Staking is StakingV2Storage, GraphUpgradeable, IStaking, Multicall { require(allocState == AllocationState.Finalized, "!finalized"); // Get allocation - Allocation memory alloc = allocations[_allocationID]; + Allocation memory alloc = __allocations[_allocationID]; // Only the indexer or operator can decide if to restake bool restake = _isAuth(alloc.indexer) ? _restake : false; // Process rebate reward - Rebates.Pool storage rebatePool = rebates[alloc.closedAtEpoch]; + Rebates.Pool storage rebatePool = __rebates[alloc.closedAtEpoch]; uint256 tokensToClaim = rebatePool.redeem(alloc.collectedFees, alloc.effectiveAllocation); // Add delegation rewards to the delegation pool @@ -1275,12 +918,12 @@ contract Staking is StakingV2Storage, GraphUpgradeable, IStaking, Multicall { // Purge allocation data except for: // - indexer: used in disputes and to avoid reusing an allocationID // - subgraphDeploymentID: used in disputes - allocations[_allocationID].tokens = 0; - allocations[_allocationID].createdAtEpoch = 0; // This avoid collect(), close() and claim() to be called - allocations[_allocationID].closedAtEpoch = 0; - allocations[_allocationID].collectedFees = 0; - allocations[_allocationID].effectiveAllocation = 0; - allocations[_allocationID].accRewardsPerAllocatedToken = 0; + __allocations[_allocationID].tokens = 0; + __allocations[_allocationID].createdAtEpoch = 0; // This avoid collect(), close() and claim() to be called + __allocations[_allocationID].closedAtEpoch = 0; + __allocations[_allocationID].collectedFees = 0; + __allocations[_allocationID].effectiveAllocation = 0; + __allocations[_allocationID].accRewardsPerAllocatedToken = 0; // -- Interactions -- @@ -1289,7 +932,7 @@ contract Staking is StakingV2Storage, GraphUpgradeable, IStaking, Multicall { // When all allocations processed then burn unclaimed fees and prune rebate pool if (rebatePool.unclaimedAllocationsCount == 0) { TokenUtils.burnTokens(graphToken, rebatePool.unclaimedFees()); - delete rebates[alloc.closedAtEpoch]; + delete __rebates[alloc.closedAtEpoch]; } // When there are tokens to claim from the rebate pool, transfer or restake @@ -1307,139 +950,6 @@ contract Staking is StakingV2Storage, GraphUpgradeable, IStaking, Multicall { ); } - /** - * @dev Delegate tokens to an indexer. - * @param _delegator Address of the delegator - * @param _indexer Address of the indexer to delegate tokens to - * @param _tokens Amount of tokens to delegate - * @return Amount of shares issued of the delegation pool - */ - function _delegate( - address _delegator, - address _indexer, - uint256 _tokens - ) private returns (uint256) { - // Only delegate a non-zero amount of tokens - require(_tokens > 0, "!tokens"); - // Only delegate to non-empty address - require(_indexer != address(0), "!indexer"); - // Only delegate to staked indexer - require(stakes[_indexer].tokensStaked > 0, "!stake"); - - // Get the delegation pool of the indexer - DelegationPool storage pool = delegationPools[_indexer]; - Delegation storage delegation = pool.delegators[_delegator]; - - // Collect delegation tax - uint256 delegationTax = _collectTax(graphToken(), _tokens, delegationTaxPercentage); - uint256 delegatedTokens = _tokens.sub(delegationTax); - - // Calculate shares to issue - uint256 shares = (pool.tokens == 0) - ? delegatedTokens - : delegatedTokens.mul(pool.shares).div(pool.tokens); - require(shares > 0, "!shares"); - - // Update the delegation pool - pool.tokens = pool.tokens.add(delegatedTokens); - pool.shares = pool.shares.add(shares); - - // Update the individual delegation - delegation.shares = delegation.shares.add(shares); - - emit StakeDelegated(_indexer, _delegator, delegatedTokens, shares); - - return shares; - } - - /** - * @dev Undelegate tokens from an indexer. - * @param _delegator Address of the delegator - * @param _indexer Address of the indexer where tokens had been delegated - * @param _shares Amount of shares to return and undelegate tokens - * @return Amount of tokens returned for the shares of the delegation pool - */ - function _undelegate( - address _delegator, - address _indexer, - uint256 _shares - ) private returns (uint256) { - // Can only undelegate a non-zero amount of shares - require(_shares > 0, "!shares"); - - // Get the delegation pool of the indexer - DelegationPool storage pool = delegationPools[_indexer]; - Delegation storage delegation = pool.delegators[_delegator]; - - // Delegator need to have enough shares in the pool to undelegate - require(delegation.shares >= _shares, "!shares-avail"); - - // Withdraw tokens if available - if (getWithdraweableDelegatedTokens(delegation) > 0) { - _withdrawDelegated(_delegator, _indexer, address(0)); - } - - // Calculate tokens to get in exchange for the shares - uint256 tokens = _shares.mul(pool.tokens).div(pool.shares); - - // Update the delegation pool - pool.tokens = pool.tokens.sub(tokens); - pool.shares = pool.shares.sub(_shares); - - // Update the delegation - delegation.shares = delegation.shares.sub(_shares); - delegation.tokensLocked = delegation.tokensLocked.add(tokens); - delegation.tokensLockedUntil = epochManager().currentEpoch().add(delegationUnbondingPeriod); - - emit StakeDelegatedLocked( - _indexer, - _delegator, - tokens, - _shares, - delegation.tokensLockedUntil - ); - - return tokens; - } - - /** - * @dev Withdraw delegated tokens once the unbonding period has passed. - * @param _delegator Delegator that is withdrawing tokens - * @param _indexer Withdraw available tokens delegated to indexer - * @param _delegateToIndexer Re-delegate to indexer address if non-zero, withdraw if zero address - */ - function _withdrawDelegated( - address _delegator, - address _indexer, - address _delegateToIndexer - ) private returns (uint256) { - // Get the delegation pool of the indexer - DelegationPool storage pool = delegationPools[_indexer]; - Delegation storage delegation = pool.delegators[_delegator]; - - // Validation - uint256 tokensToWithdraw = getWithdraweableDelegatedTokens(delegation); - require(tokensToWithdraw > 0, "!tokens"); - - // Reset lock - delegation.tokensLocked = 0; - delegation.tokensLockedUntil = 0; - - emit StakeDelegatedWithdrawn(_indexer, _delegator, tokensToWithdraw); - - // -- Interactions -- - - if (_delegateToIndexer != address(0)) { - // Re-delegate tokens to a new indexer - _delegate(_delegator, _delegateToIndexer, tokensToWithdraw); - } else { - // Return tokens to the delegator - TokenUtils.pushTokens(graphToken(), _delegator, tokensToWithdraw); - } - - return tokensToWithdraw; - } - /** * @dev Collect the delegation rewards for query fees. * This function will assign the collected fees to the delegation pool. @@ -1452,7 +962,7 @@ contract Staking is StakingV2Storage, GraphUpgradeable, IStaking, Multicall { returns (uint256) { uint256 delegationRewards = 0; - DelegationPool storage pool = delegationPools[_indexer]; + DelegationPool storage pool = __delegationPools[_indexer]; if (pool.tokens > 0 && pool.queryFeeCut < MAX_PPM) { uint256 indexerCut = uint256(pool.queryFeeCut).mul(_tokens).div(MAX_PPM); delegationRewards = _tokens.sub(indexerCut); @@ -1473,7 +983,7 @@ contract Staking is StakingV2Storage, GraphUpgradeable, IStaking, Multicall { returns (uint256) { uint256 delegationRewards = 0; - DelegationPool storage pool = delegationPools[_indexer]; + DelegationPool storage pool = __delegationPools[_indexer]; if (pool.tokens > 0 && pool.indexingRewardCut < MAX_PPM) { uint256 indexerCut = uint256(pool.indexingRewardCut).mul(_tokens).div(MAX_PPM); delegationRewards = _tokens.sub(indexerCut); @@ -1535,52 +1045,10 @@ contract Staking is StakingV2Storage, GraphUpgradeable, IStaking, Multicall { return tax; } - /** - * @dev Return the current state of an allocation - * @param _allocationID Allocation identifier - * @return AllocationState - */ - function _getAllocationState(address _allocationID) private view returns (AllocationState) { - Allocation storage alloc = allocations[_allocationID]; - - if (alloc.indexer == address(0)) { - return AllocationState.Null; - } - if (alloc.createdAtEpoch == 0) { - return AllocationState.Claimed; - } - - uint256 closedAtEpoch = alloc.closedAtEpoch; - if (closedAtEpoch == 0) { - return AllocationState.Active; - } - - uint256 epochs = epochManager().epochsSince(closedAtEpoch); - if (epochs >= channelDisputeEpochs) { - return AllocationState.Finalized; - } - return AllocationState.Closed; - } - - /** - * @dev Get the effective stake allocation considering epochs from allocation to closing. - * @param _maxAllocationEpochs Max amount of epochs to cap the allocated stake - * @param _tokens Amount of tokens allocated - * @param _numEpochs Number of epochs that passed from allocation to closing - * @return Effective allocated tokens across epochs - */ - function _getEffectiveAllocation( - uint256 _maxAllocationEpochs, - uint256 _tokens, - uint256 _numEpochs - ) private pure returns (uint256) { - bool shouldCap = _maxAllocationEpochs > 0 && _numEpochs > _maxAllocationEpochs; - return _tokens.mul((shouldCap) ? _maxAllocationEpochs : _numEpochs); - } - /** * @dev Triggers an update of rewards due to a change in allocations. * @param _subgraphDeploymentID Subgraph deployment updated + * @return Accumulated rewards per allocated token for the subgraph deployment */ function _updateRewards(bytes32 _subgraphDeploymentID) private returns (uint256) { IRewardsManager rewardsManager = rewardsManager(); @@ -1593,6 +1061,7 @@ contract Staking is StakingV2Storage, GraphUpgradeable, IStaking, Multicall { /** * @dev Assign rewards for the closed allocation to indexer and delegators. * @param _allocationID Allocation + * @param _indexer Address of the indexer that did the allocation */ function _distributeRewards(address _allocationID, address _indexer) private { IRewardsManager rewardsManager = rewardsManager(); @@ -1617,7 +1086,7 @@ contract Staking is StakingV2Storage, GraphUpgradeable, IStaking, Multicall { graphToken(), indexerRewards, _indexer, - rewardsDestination[_indexer] == address(0) + __rewardsDestination[_indexer] == address(0) ); } @@ -1641,7 +1110,7 @@ contract Staking is StakingV2Storage, GraphUpgradeable, IStaking, Multicall { _stake(_beneficiary, _amount); } else { // Transfer funds to the beneficiary's designated rewards destination if set - address destination = rewardsDestination[_beneficiary]; + address destination = __rewardsDestination[_beneficiary]; TokenUtils.pushTokens( _graphToken, destination == address(0) ? _beneficiary : destination, @@ -1649,4 +1118,57 @@ contract Staking is StakingV2Storage, GraphUpgradeable, IStaking, Multicall { ); } } + + /** + * @dev Check if the caller is authorized to operate on behalf of + * an indexer (i.e. the caller is the indexer or an operator) + * @param _indexer Indexer address + * @return True if the caller is authorized to operate on behalf of the indexer + */ + function _isAuth(address _indexer) private view returns (bool) { + return msg.sender == _indexer || isOperator(msg.sender, _indexer) == true; + } + + /** + * @dev Return the current state of an allocation + * @param _allocationID Allocation identifier + * @return AllocationState enum with the state of the allocation + */ + function _getAllocationState(address _allocationID) private view returns (AllocationState) { + Allocation storage alloc = __allocations[_allocationID]; + + if (alloc.indexer == address(0)) { + return AllocationState.Null; + } + if (alloc.createdAtEpoch == 0) { + return AllocationState.Claimed; + } + + uint256 closedAtEpoch = alloc.closedAtEpoch; + if (closedAtEpoch == 0) { + return AllocationState.Active; + } + + uint256 epochs = epochManager().epochsSince(closedAtEpoch); + if (epochs >= __channelDisputeEpochs) { + return AllocationState.Finalized; + } + return AllocationState.Closed; + } + + /** + * @dev Get the effective stake allocation considering epochs from allocation to closing. + * @param _maxAllocationEpochs Max amount of epochs to cap the allocated stake + * @param _tokens Amount of tokens allocated + * @param _numEpochs Number of epochs that passed from allocation to closing + * @return Effective allocated tokens across epochs + */ + function _getEffectiveAllocation( + uint256 _maxAllocationEpochs, + uint256 _tokens, + uint256 _numEpochs + ) private pure returns (uint256) { + bool shouldCap = _maxAllocationEpochs > 0 && _numEpochs > _maxAllocationEpochs; + return _tokens.mul((shouldCap) ? _maxAllocationEpochs : _numEpochs); + } } diff --git a/contracts/staking/StakingExtension.sol b/contracts/staking/StakingExtension.sol new file mode 100644 index 000000000..8b0657f19 --- /dev/null +++ b/contracts/staking/StakingExtension.sol @@ -0,0 +1,689 @@ +// SPDX-License-Identifier: GPL-2.0-or-later + +pragma solidity ^0.7.6; +pragma abicoder v2; + +import { SafeMath } from "@openzeppelin/contracts/math/SafeMath.sol"; +import { StakingV3Storage } from "./StakingStorage.sol"; +import { IStakingExtension } from "./IStakingExtension.sol"; +import { TokenUtils } from "../utils/TokenUtils.sol"; +import { IGraphToken } from "../token/IGraphToken.sol"; +import { GraphUpgradeable } from "../upgrades/GraphUpgradeable.sol"; +import { Rebates } from "./libs/Rebates.sol"; +import { Stakes } from "./libs/Stakes.sol"; +import { IStakingData } from "./IStakingData.sol"; +import { MathUtils } from "./libs/MathUtils.sol"; + +/** + * @title StakingExtension contract + * @dev This contract provides the logic to manage delegations and other Staking + * extension features (e.g. storage getters). It is meant to be called through delegatecall from the + * Staking contract, and is only kept separate to keep the Staking contract size + * within limits. + */ +contract StakingExtension is StakingV3Storage, GraphUpgradeable, IStakingExtension { + using SafeMath for uint256; + using Stakes for Stakes.Indexer; + + /// @dev 100% in parts per million + uint32 private constant MAX_PPM = 1000000; + + /** + * @dev Check if the caller is the slasher. + */ + modifier onlySlasher() { + require(__slashers[msg.sender] == true, "!slasher"); + _; + } + + /** + * @notice Initialize the StakingExtension contract + * @dev This function is meant to be delegatecalled from the Staking contract's + * initialize() function, so it uses the same access control check to ensure it is + * being called by the Staking implementation as part of the proxy upgrade process. + * @param _delegationUnbondingPeriod Delegation unbonding period in blocks + * @param _cooldownBlocks Minimum time between changes to delegation parameters, in blocks + * @param _delegationRatio Delegation capacity multiplier (e.g. 10 means 10x the indexer stake) + * @param _delegationTaxPercentage Percentage of delegated tokens to burn as delegation tax, expressed in parts per million + */ + function initialize( + uint32 _delegationUnbondingPeriod, + uint32 _cooldownBlocks, + uint32 _delegationRatio, + uint32 _delegationTaxPercentage + ) external onlyImpl { + _setDelegationUnbondingPeriod(_delegationUnbondingPeriod); + _setDelegationParametersCooldown(_cooldownBlocks); + _setDelegationRatio(_delegationRatio); + _setDelegationTaxPercentage(_delegationTaxPercentage); + } + + /** + * @notice Set a delegation tax percentage to burn when delegated funds are deposited. + * @dev This function is only callable by the governor + * @param _percentage Percentage of delegated tokens to burn as delegation tax, expressed in parts per million + */ + function setDelegationTaxPercentage(uint32 _percentage) external override onlyGovernor { + _setDelegationTaxPercentage(_percentage); + } + + /** + * @notice Set the delegation ratio. + * If set to 10 it means the indexer can use up to 10x the indexer staked amount + * from their delegated tokens + * @dev This function is only callable by the governor + * @param _delegationRatio Delegation capacity multiplier + */ + function setDelegationRatio(uint32 _delegationRatio) external override onlyGovernor { + _setDelegationRatio(_delegationRatio); + } + + /** + * @notice Set the minimum time in blocks an indexer needs to wait to change delegation parameters. + * Indexers can set a custom amount time for their own cooldown, but it must be greater than this. + * @dev This function is only callable by the governor + * @param _blocks Number of blocks to set the delegation parameters cooldown period + */ + function setDelegationParametersCooldown(uint32 _blocks) external override onlyGovernor { + _setDelegationParametersCooldown(_blocks); + } + + /** + * @notice Set the time, in epochs, a Delegator needs to wait to withdraw tokens after undelegating. + * @dev This function is only callable by the governor + * @param _delegationUnbondingPeriod Period in epochs to wait for token withdrawals after undelegating + */ + function setDelegationUnbondingPeriod(uint32 _delegationUnbondingPeriod) + external + override + onlyGovernor + { + _setDelegationUnbondingPeriod(_delegationUnbondingPeriod); + } + + /** + * @notice Set or unset an address as allowed slasher. + * @param _slasher Address of the party allowed to slash indexers + * @param _allowed True if slasher is allowed + */ + function setSlasher(address _slasher, bool _allowed) external override onlyGovernor { + require(_slasher != address(0), "!slasher"); + __slashers[_slasher] = _allowed; + emit SlasherUpdate(msg.sender, _slasher, _allowed); + } + + /** + * @notice Delegate tokens to an indexer. + * @param _indexer Address of the indexer to which tokens are delegated + * @param _tokens Amount of tokens to delegate + * @return Amount of shares issued from the delegation pool + */ + function delegate(address _indexer, uint256 _tokens) + external + override + notPartialPaused + returns (uint256) + { + address delegator = msg.sender; + + // Transfer tokens to delegate to this contract + TokenUtils.pullTokens(graphToken(), delegator, _tokens); + + // Update state + return _delegate(delegator, _indexer, _tokens); + } + + /** + * @notice Undelegate tokens from an indexer. Tokens will be locked for the unbonding period. + * @param _indexer Address of the indexer to which tokens had been delegated + * @param _shares Amount of shares to return and undelegate tokens + * @return Amount of tokens returned for the shares of the delegation pool + */ + function undelegate(address _indexer, uint256 _shares) + external + override + notPartialPaused + returns (uint256) + { + return _undelegate(msg.sender, _indexer, _shares); + } + + /** + * @notice Withdraw undelegated tokens once the unbonding period has passed, and optionally + * re-delegate to a new indexer. + * @param _indexer Withdraw available tokens delegated to indexer + * @param _newIndexer Re-delegate to indexer address if non-zero, withdraw if zero address + */ + function withdrawDelegated(address _indexer, address _newIndexer) + external + override + notPaused + returns (uint256) + { + return _withdrawDelegated(msg.sender, _indexer, _newIndexer); + } + + /** + * @notice Slash the indexer stake. Delegated tokens are not subject to slashing. + * @dev Can only be called by the slasher role. + * @param _indexer Address of indexer to slash + * @param _tokens Amount of tokens to slash from the indexer stake + * @param _reward Amount of reward tokens to send to a beneficiary + * @param _beneficiary Address of a beneficiary to receive a reward for the slashing + */ + function slash( + address _indexer, + uint256 _tokens, + uint256 _reward, + address _beneficiary + ) external override onlySlasher notPartialPaused { + Stakes.Indexer storage indexerStake = __stakes[_indexer]; + + // Only able to slash a non-zero number of tokens + require(_tokens > 0, "!tokens"); + + // Rewards comes from tokens slashed balance + require(_tokens >= _reward, "rewards>slash"); + + // Cannot slash stake of an indexer without any or enough stake + require(indexerStake.tokensStaked > 0, "!stake"); + require(_tokens <= indexerStake.tokensStaked, "slash>stake"); + + // Validate beneficiary of slashed tokens + require(_beneficiary != address(0), "!beneficiary"); + + // Slashing more tokens than freely available (over allocation condition) + // Unlock locked tokens to avoid the indexer to withdraw them + if (_tokens > indexerStake.tokensAvailable() && indexerStake.tokensLocked > 0) { + uint256 tokensOverAllocated = _tokens.sub(indexerStake.tokensAvailable()); + uint256 tokensToUnlock = MathUtils.min(tokensOverAllocated, indexerStake.tokensLocked); + indexerStake.unlockTokens(tokensToUnlock); + } + + // Remove tokens to slash from the stake + indexerStake.release(_tokens); + + // -- Interactions -- + + IGraphToken graphToken = graphToken(); + + // Set apart the reward for the beneficiary and burn remaining slashed stake + TokenUtils.burnTokens(graphToken, _tokens.sub(_reward)); + + // Give the beneficiary a reward for slashing + TokenUtils.pushTokens(graphToken, _beneficiary, _reward); + + emit StakeSlashed(_indexer, _tokens, _reward, _beneficiary); + } + + /** + * @notice Return the delegation from a delegator to an indexer. + * @param _indexer Address of the indexer where funds have been delegated + * @param _delegator Address of the delegator + * @return Delegation data + */ + function getDelegation(address _indexer, address _delegator) + external + view + override + returns (Delegation memory) + { + return __delegationPools[_indexer].delegators[_delegator]; + } + + /** + * @notice Getter for the delegationRatio, i.e. the delegation capacity multiplier: + * If delegation ratio is 100, and an Indexer has staked 5 GRT, + * then they can use up to 500 GRT from the delegated stake + * @return Delegation ratio + */ + function delegationRatio() external view override returns (uint32) { + return __delegationRatio; + } + + /** + * @notice Getter for delegationParametersCooldown: + * Minimum time in blocks an indexer needs to wait to change delegation parameters + * @return Delegation parameters cooldown in blocks + */ + function delegationParametersCooldown() external view override returns (uint32) { + return __delegationParametersCooldown; + } + + /** + * @notice Getter for delegationUnbondingPeriod: + * Time in epochs a delegator needs to wait to withdraw delegated stake + * @return Delegation unbonding period in epochs + */ + function delegationUnbondingPeriod() external view override returns (uint32) { + return __delegationUnbondingPeriod; + } + + /** + * @notice Getter for delegationTaxPercentage: + * Percentage of tokens to tax a delegation deposit, expressed in parts per million + * @return Delegation tax percentage in parts per million + */ + function delegationTaxPercentage() external view override returns (uint32) { + return __delegationTaxPercentage; + } + + /** + * @notice Getter for delegationPools[_indexer]: + * gets the delegation pool structure for a particular indexer. + * @param _indexer Address of the indexer for which to query the delegation pool + * @return Delegation pool as a DelegationPoolReturn struct + */ + function delegationPools(address _indexer) + external + view + override + returns (DelegationPoolReturn memory) + { + DelegationPool storage pool = __delegationPools[_indexer]; + return + DelegationPoolReturn( + pool.cooldownBlocks, // Blocks to wait before updating parameters + pool.indexingRewardCut, // in PPM + pool.queryFeeCut, // in PPM + pool.updatedAtBlock, // Block when the pool was last updated + pool.tokens, // Total tokens as pool reserves + pool.shares // Total shares minted in the pool + ); + } + + /** + * @notice Getter for rewardsDestination[_indexer]: + * returns the address where the indexer's rewards are sent. + * @param _indexer The indexer address for which to query the rewards destination + * @return The address where the indexer's rewards are sent, zero if none is set in which case rewards are re-staked + */ + function rewardsDestination(address _indexer) external view override returns (address) { + return __rewardsDestination[_indexer]; + } + + /** + * @notice Getter for assetHolders[_maybeAssetHolder]: + * returns true if the address is an asset holder, i.e. an entity that can collect + * query fees into the Staking contract. + * @param _maybeAssetHolder The address that may or may not be an asset holder + * @return True if the address is an asset holder + */ + function assetHolders(address _maybeAssetHolder) external view override returns (bool) { + return __assetHolders[_maybeAssetHolder]; + } + + /** + * @notice Getter for operatorAuth[_indexer][_maybeOperator]: + * returns true if the operator is authorized to operate on behalf of the indexer. + * @param _indexer The indexer address for which to query authorization + * @param _maybeOperator The address that may or may not be an operator + * @return True if the operator is authorized to operate on behalf of the indexer + */ + function operatorAuth(address _indexer, address _maybeOperator) + external + view + override + returns (bool) + { + return __operatorAuth[_indexer][_maybeOperator]; + } + + /** + * @notice Getter for subgraphAllocations[_subgraphDeploymentId]: + * returns the amount of tokens allocated to a subgraph deployment. + * @param _subgraphDeploymentId The subgraph deployment for which to query the allocations + * @return The amount of tokens allocated to the subgraph deployment + */ + function subgraphAllocations(bytes32 _subgraphDeploymentId) + external + view + override + returns (uint256) + { + return __subgraphAllocations[_subgraphDeploymentId]; + } + + /** + * @notice Getter for rebates[_epoch]: + * gets the rebate pool for a particular epoch. + * @param _epoch Epoch for which to query the rebate pool + * @return Rebate pool for the specified epoch, as a Rebates.Pool struct + */ + function rebates(uint256 _epoch) external view override returns (Rebates.Pool memory) { + return __rebates[_epoch]; + } + + /** + * @notice Getter for slashers[_maybeSlasher]: + * returns true if the address is a slasher, i.e. an entity that can slash indexers + * @param _maybeSlasher Address for which to check the slasher role + * @return True if the address is a slasher + */ + function slashers(address _maybeSlasher) external view override returns (bool) { + return __slashers[_maybeSlasher]; + } + + /** + * @notice Getter for minimumIndexerStake: the minimum + * amount of GRT that an indexer needs to stake. + * @return Minimum indexer stake in GRT + */ + function minimumIndexerStake() external view override returns (uint256) { + return __minimumIndexerStake; + } + + /** + * @notice Getter for thawingPeriod: the time in blocks an + * indexer needs to wait to unstake tokens. + * @return Thawing period in blocks + */ + function thawingPeriod() external view override returns (uint32) { + return __thawingPeriod; + } + + /** + * @notice Getter for curationPercentage: the percentage of + * query fees that are distributed to curators. + * @return Curation percentage in parts per million + */ + function curationPercentage() external view override returns (uint32) { + return __curationPercentage; + } + + /** + * @notice Getter for protocolPercentage: the percentage of + * query fees that are burned as protocol fees. + * @return Protocol percentage in parts per million + */ + function protocolPercentage() external view override returns (uint32) { + return __protocolPercentage; + } + + /** + * @notice Getter for channelDisputeEpochs: the time in epochs + * between closing an allocation and the moment it becomes finalized so + * query fees can be claimed. + * @return Channel dispute period in epochs + */ + function channelDisputeEpochs() external view override returns (uint32) { + return __channelDisputeEpochs; + } + + /** + * @notice Getter for maxAllocationEpochs: the maximum time in epochs + * that an allocation can be open before anyone is allowed to close it. This + * also caps the effective allocation when sending the allocation's query fees + * to the rebate pool. + * @return Maximum allocation period in epochs + */ + function maxAllocationEpochs() external view override returns (uint32) { + return __maxAllocationEpochs; + } + + /** + * @notice Getter for alphaNumerator: the numerator of the Cobb-Douglas + * rebate ratio. + * @return Rebate ratio numerator + */ + function alphaNumerator() external view override returns (uint32) { + return __alphaNumerator; + } + + /** + * @notice Getter for alphaDenominator: the denominator of the Cobb-Douglas + * rebate ratio. + * @return Rebate ratio denominator + */ + function alphaDenominator() external view override returns (uint32) { + return __alphaDenominator; + } + + /** + * @notice Getter for stakes[_indexer]: + * gets the stake information for an indexer as a Stakes.Indexer struct. + * @param _indexer Indexer address for which to query the stake information + * @return Stake information for the specified indexer, as a Stakes.Indexer struct + */ + function stakes(address _indexer) external view override returns (Stakes.Indexer memory) { + return __stakes[_indexer]; + } + + /** + * @notice Getter for allocations[_allocationID]: + * gets an allocation's information as an IStakingData.Allocation struct. + * @param _allocationID Allocation ID for which to query the allocation information + * @return The specified allocation, as an IStakingData.Allocation struct + */ + function allocations(address _allocationID) + external + view + override + returns (IStakingData.Allocation memory) + { + return __allocations[_allocationID]; + } + + /** + * @notice Return whether the delegator has delegated to the indexer. + * @param _indexer Address of the indexer where funds have been delegated + * @param _delegator Address of the delegator + * @return True if delegator has tokens delegated to the indexer + */ + function isDelegator(address _indexer, address _delegator) public view override returns (bool) { + return __delegationPools[_indexer].delegators[_delegator].shares > 0; + } + + /** + * @notice Returns amount of delegated tokens ready to be withdrawn after unbonding period. + * @param _delegation Delegation of tokens from delegator to indexer + * @return Amount of tokens to withdraw + */ + function getWithdraweableDelegatedTokens(Delegation memory _delegation) + public + view + override + returns (uint256) + { + // There must be locked tokens and period passed + uint256 currentEpoch = epochManager().currentEpoch(); + if (_delegation.tokensLockedUntil > 0 && currentEpoch >= _delegation.tokensLockedUntil) { + return _delegation.tokensLocked; + } + return 0; + } + + /** + * @dev Internal: Set a delegation tax percentage to burn when delegated funds are deposited. + * @param _percentage Percentage of delegated tokens to burn as delegation tax + */ + function _setDelegationTaxPercentage(uint32 _percentage) private { + // Must be within 0% to 100% (inclusive) + require(_percentage <= MAX_PPM, ">percentage"); + __delegationTaxPercentage = _percentage; + emit ParameterUpdated("delegationTaxPercentage"); + } + + /** + * @dev Internal: Set the delegation ratio. + * If set to 10 it means the indexer can use up to 10x the indexer staked amount + * from their delegated tokens + * @param _delegationRatio Delegation capacity multiplier + */ + function _setDelegationRatio(uint32 _delegationRatio) private { + __delegationRatio = _delegationRatio; + emit ParameterUpdated("delegationRatio"); + } + + /** + * @dev Internal: Set the time in blocks an indexer needs to wait to change delegation parameters. + * @param _blocks Number of blocks to set the delegation parameters cooldown period + */ + function _setDelegationParametersCooldown(uint32 _blocks) private { + __delegationParametersCooldown = _blocks; + emit ParameterUpdated("delegationParametersCooldown"); + } + + /** + * @dev Internal: Set the period for undelegation of stake from indexer. + * @param _delegationUnbondingPeriod Period in epochs to wait for token withdrawals after undelegating + */ + function _setDelegationUnbondingPeriod(uint32 _delegationUnbondingPeriod) private { + require(_delegationUnbondingPeriod > 0, "!delegationUnbondingPeriod"); + __delegationUnbondingPeriod = _delegationUnbondingPeriod; + emit ParameterUpdated("delegationUnbondingPeriod"); + } + + /** + * @dev Delegate tokens to an indexer. + * @param _delegator Address of the delegator + * @param _indexer Address of the indexer to delegate tokens to + * @param _tokens Amount of tokens to delegate + * @return Amount of shares issued of the delegation pool + */ + function _delegate( + address _delegator, + address _indexer, + uint256 _tokens + ) private returns (uint256) { + // Only delegate a non-zero amount of tokens + require(_tokens > 0, "!tokens"); + // Only delegate to non-empty address + require(_indexer != address(0), "!indexer"); + // Only delegate to staked indexer + require(__stakes[_indexer].tokensStaked > 0, "!stake"); + + // Get the delegation pool of the indexer + DelegationPool storage pool = __delegationPools[_indexer]; + Delegation storage delegation = pool.delegators[_delegator]; + + // Collect delegation tax + uint256 delegationTax = _collectTax(graphToken(), _tokens, __delegationTaxPercentage); + uint256 delegatedTokens = _tokens.sub(delegationTax); + + // Calculate shares to issue + uint256 shares = (pool.tokens == 0) + ? delegatedTokens + : delegatedTokens.mul(pool.shares).div(pool.tokens); + require(shares > 0, "!shares"); + + // Update the delegation pool + pool.tokens = pool.tokens.add(delegatedTokens); + pool.shares = pool.shares.add(shares); + + // Update the individual delegation + delegation.shares = delegation.shares.add(shares); + + emit StakeDelegated(_indexer, _delegator, delegatedTokens, shares); + + return shares; + } + + /** + * @dev Undelegate tokens from an indexer. + * @param _delegator Address of the delegator + * @param _indexer Address of the indexer where tokens had been delegated + * @param _shares Amount of shares to return and undelegate tokens + * @return Amount of tokens returned for the shares of the delegation pool + */ + function _undelegate( + address _delegator, + address _indexer, + uint256 _shares + ) private returns (uint256) { + // Can only undelegate a non-zero amount of shares + require(_shares > 0, "!shares"); + + // Get the delegation pool of the indexer + DelegationPool storage pool = __delegationPools[_indexer]; + Delegation storage delegation = pool.delegators[_delegator]; + + // Delegator need to have enough shares in the pool to undelegate + require(delegation.shares >= _shares, "!shares-avail"); + + // Withdraw tokens if available + if (getWithdraweableDelegatedTokens(delegation) > 0) { + _withdrawDelegated(_delegator, _indexer, address(0)); + } + + // Calculate tokens to get in exchange for the shares + uint256 tokens = _shares.mul(pool.tokens).div(pool.shares); + + // Update the delegation pool + pool.tokens = pool.tokens.sub(tokens); + pool.shares = pool.shares.sub(_shares); + + // Update the delegation + delegation.shares = delegation.shares.sub(_shares); + delegation.tokensLocked = delegation.tokensLocked.add(tokens); + delegation.tokensLockedUntil = epochManager().currentEpoch().add( + __delegationUnbondingPeriod + ); + + emit StakeDelegatedLocked( + _indexer, + _delegator, + tokens, + _shares, + delegation.tokensLockedUntil + ); + + return tokens; + } + + /** + * @dev Withdraw delegated tokens once the unbonding period has passed. + * @param _delegator Delegator that is withdrawing tokens + * @param _indexer Withdraw available tokens delegated to indexer + * @param _delegateToIndexer Re-delegate to indexer address if non-zero, withdraw if zero address + * @return Amount of tokens withdrawn or re-delegated + */ + function _withdrawDelegated( + address _delegator, + address _indexer, + address _delegateToIndexer + ) private returns (uint256) { + // Get the delegation pool of the indexer + DelegationPool storage pool = __delegationPools[_indexer]; + Delegation storage delegation = pool.delegators[_delegator]; + + // Validation + uint256 tokensToWithdraw = getWithdraweableDelegatedTokens(delegation); + require(tokensToWithdraw > 0, "!tokens"); + + // Reset lock + delegation.tokensLocked = 0; + delegation.tokensLockedUntil = 0; + + emit StakeDelegatedWithdrawn(_indexer, _delegator, tokensToWithdraw); + + // -- Interactions -- + + if (_delegateToIndexer != address(0)) { + // Re-delegate tokens to a new indexer + _delegate(_delegator, _delegateToIndexer, tokensToWithdraw); + } else { + // Return tokens to the delegator + TokenUtils.pushTokens(graphToken(), _delegator, tokensToWithdraw); + } + + return tokensToWithdraw; + } + + /** + * @dev Collect tax to burn for an amount of tokens. + * @param _graphToken Token to burn + * @param _tokens Total tokens received used to calculate the amount of tax to collect + * @param _percentage Percentage of tokens to burn as tax + * @return Amount of tax charged + */ + function _collectTax( + IGraphToken _graphToken, + uint256 _tokens, + uint256 _percentage + ) private returns (uint256) { + uint256 tax = uint256(_percentage).mul(_tokens).div(MAX_PPM); + TokenUtils.burnTokens(_graphToken, tax); // Burn tax if any + return tax; + } +} diff --git a/contracts/staking/StakingStorage.sol b/contracts/staking/StakingStorage.sol index d629cf8a8..e95356992 100644 --- a/contracts/staking/StakingStorage.sol +++ b/contracts/staking/StakingStorage.sol @@ -2,88 +2,119 @@ pragma solidity ^0.7.6; -import "../governance/Managed.sol"; - -import "./IStakingData.sol"; -import "./libs/Rebates.sol"; -import "./libs/Stakes.sol"; - +import { Managed } from "../governance/Managed.sol"; + +import { IStakingData } from "./IStakingData.sol"; +import { Rebates } from "./libs/Rebates.sol"; +import { Stakes } from "./libs/Stakes.sol"; + +/** + * @title StakingV1Storage + * @notice This contract holds all the storage variables for the Staking contract, version 1 + * @dev Note that we use a double underscore prefix for variable names; this prefix identifies + * variables that used to be public but are now internal, getters can be found on StakingExtension.sol. + */ +// solhint-disable-next-line max-states-count contract StakingV1Storage is Managed { // -- Staking -- - // Minimum amount of tokens an indexer needs to stake - uint256 public minimumIndexerStake; + /// @dev Minimum amount of tokens an indexer needs to stake + uint256 internal __minimumIndexerStake; + + /// @dev Time in blocks to unstake + uint32 internal __thawingPeriod; // in blocks - // Time in blocks to unstake - uint32 public thawingPeriod; // in blocks + /// @dev Percentage of fees going to curators + /// Parts per million. (Allows for 4 decimal points, 999,999 = 99.9999%) + uint32 internal __curationPercentage; - // Percentage of fees going to curators - // Parts per million. (Allows for 4 decimal points, 999,999 = 99.9999%) - uint32 public curationPercentage; + /// @dev Percentage of fees burned as protocol fee + /// Parts per million. (Allows for 4 decimal points, 999,999 = 99.9999%) + uint32 internal __protocolPercentage; - // Percentage of fees burned as protocol fee - // Parts per million. (Allows for 4 decimal points, 999,999 = 99.9999%) - uint32 public protocolPercentage; + /// @dev Period for allocation to be finalized + uint32 internal __channelDisputeEpochs; - // Period for allocation to be finalized - uint32 public channelDisputeEpochs; + /// @dev Maximum allocation time + uint32 internal __maxAllocationEpochs; - // Maximum allocation time - uint32 public maxAllocationEpochs; + /// @dev Rebate ratio numerator + uint32 internal __alphaNumerator; - // Rebate ratio - uint32 public alphaNumerator; - uint32 public alphaDenominator; + /// @dev Rebate ratio denominator + uint32 internal __alphaDenominator; - // Indexer stakes : indexer => Stake - mapping(address => Stakes.Indexer) public stakes; + /// @dev Indexer stakes : indexer => Stake + mapping(address => Stakes.Indexer) internal __stakes; - // Allocations : allocationID => Allocation - mapping(address => IStakingData.Allocation) public allocations; + /// @dev Allocations : allocationID => Allocation + mapping(address => IStakingData.Allocation) internal __allocations; - // Subgraph Allocations: subgraphDeploymentID => tokens - mapping(bytes32 => uint256) public subgraphAllocations; + /// @dev Subgraph Allocations: subgraphDeploymentID => tokens + mapping(bytes32 => uint256) internal __subgraphAllocations; - // Rebate pools : epoch => Pool - mapping(uint256 => Rebates.Pool) public rebates; + /// @dev Rebate pools : epoch => Pool + mapping(uint256 => Rebates.Pool) internal __rebates; // -- Slashing -- - // List of addresses allowed to slash stakes - mapping(address => bool) public slashers; + /// @dev List of addresses allowed to slash stakes + mapping(address => bool) internal __slashers; // -- Delegation -- - // Set the delegation capacity multiplier defined by the delegation ratio - // If delegation ratio is 100, and an Indexer has staked 5 GRT, - // then they can use up to 500 GRT from the delegated stake - uint32 public delegationRatio; + /// @dev Set the delegation capacity multiplier defined by the delegation ratio + /// If delegation ratio is 100, and an Indexer has staked 5 GRT, + /// then they can use up to 500 GRT from the delegated stake + uint32 internal __delegationRatio; - // Time in blocks an indexer needs to wait to change delegation parameters - uint32 public delegationParametersCooldown; + /// @dev Time in blocks an indexer needs to wait to change delegation parameters + uint32 internal __delegationParametersCooldown; - // Time in epochs a delegator needs to wait to withdraw delegated stake - uint32 public delegationUnbondingPeriod; // in epochs + /// @dev Time in epochs a delegator needs to wait to withdraw delegated stake + uint32 internal __delegationUnbondingPeriod; // in epochs - // Percentage of tokens to tax a delegation deposit - // Parts per million. (Allows for 4 decimal points, 999,999 = 99.9999%) - uint32 public delegationTaxPercentage; + /// @dev Percentage of tokens to tax a delegation deposit + /// Parts per million. (Allows for 4 decimal points, 999,999 = 99.9999%) + uint32 internal __delegationTaxPercentage; - // Delegation pools : indexer => DelegationPool - mapping(address => IStakingData.DelegationPool) public delegationPools; + /// @dev Delegation pools : indexer => DelegationPool + mapping(address => IStakingData.DelegationPool) internal __delegationPools; // -- Operators -- - // Operator auth : indexer => operator - mapping(address => mapping(address => bool)) public operatorAuth; + /// @dev Operator auth : indexer => operator => is authorized + mapping(address => mapping(address => bool)) internal __operatorAuth; // -- Asset Holders -- - // Allowed AssetHolders: assetHolder => is allowed - mapping(address => bool) public assetHolders; + /// @dev Allowed AssetHolders that can collect query fees: assetHolder => is allowed + mapping(address => bool) internal __assetHolders; } +/** + * @title StakingV2Storage + * @notice This contract holds all the storage variables for the Staking contract, version 2 + * @dev Note that we use a double underscore prefix for variable names; this prefix identifies + * variables that used to be public but are now internal, getters can be found on StakingExtension.sol. + */ contract StakingV2Storage is StakingV1Storage { - // Destination of accrued rewards : beneficiary => rewards destination - mapping(address => address) public rewardsDestination; + /// @dev Destination of accrued rewards : beneficiary => rewards destination + mapping(address => address) internal __rewardsDestination; +} + +/** + * @title StakingV3Storage + * @notice This contract holds all the storage variables for the base Staking contract, version 3. + * @dev Note that this is the first version that includes a storage gap - if adding + * future versions, make sure to move the gap to the new version and + * reduce the size of the gap accordingly. + */ +contract StakingV3Storage is StakingV2Storage { + /// @dev Address of the counterpart Staking contract on L1/L2 + address internal counterpartStakingAddress; + /// @dev Address of the StakingExtension implementation + address internal extensionImpl; + /// @dev Gap to allow adding variables in future upgrades (since L1Staking and L2Staking can have their own storage as well) + uint256[50] private __gap; } diff --git a/contracts/tests/L1GraphTokenLockMigratorMock.sol b/contracts/tests/L1GraphTokenLockMigratorMock.sol new file mode 100644 index 000000000..ba8607f35 --- /dev/null +++ b/contracts/tests/L1GraphTokenLockMigratorMock.sol @@ -0,0 +1,21 @@ +// SPDX-License-Identifier: MIT + +pragma solidity ^0.7.6; +pragma experimental ABIEncoderV2; + +contract L1GraphTokenLockMigratorMock { + mapping(address => address) public migratedWalletAddress; + + function setMigratedAddress(address _l1Address, address _l2Address) external { + migratedWalletAddress[_l1Address] = _l2Address; + } + + function pullETH(address _l1Wallet, uint256 _amount) external { + require( + migratedWalletAddress[_l1Wallet] != address(0), + "L1GraphTokenLockMigratorMock: unknown L1 wallet" + ); + (bool success, ) = payable(msg.sender).call{ value: _amount }(""); + require(success, "L1GraphTokenLockMigratorMock: ETH pull failed"); + } +} diff --git a/e2e/deployment/config/controller.test.ts b/e2e/deployment/config/controller.test.ts index 647cb19f5..5bc4e6c04 100644 --- a/e2e/deployment/config/controller.test.ts +++ b/e2e/deployment/config/controller.test.ts @@ -13,7 +13,7 @@ describe('Controller configuration', () => { 'DisputeManager', 'EpochManager', 'RewardsManager', - 'Staking', + 'L1Staking', 'GraphToken', 'L1GraphTokenGateway', ] @@ -24,7 +24,7 @@ describe('Controller configuration', () => { 'DisputeManager', 'EpochManager', 'RewardsManager', - 'Staking', + 'L2Staking', 'L2GraphToken', 'L2GraphTokenGateway', ] diff --git a/e2e/deployment/config/staking.test.ts b/e2e/deployment/config/staking.test.ts index e2b1fe5e9..b5eb2c400 100644 --- a/e2e/deployment/config/staking.test.ts +++ b/e2e/deployment/config/staking.test.ts @@ -1,12 +1,20 @@ import { expect } from 'chai' import hre from 'hardhat' import { getItemValue } from '../../../cli/config' +import GraphChain from '../../../gre/helpers/chain' describe('Staking configuration', () => { const { graphConfig, contracts: { Staking, Controller, DisputeManager, AllocationExchange }, + chainId, } = hre.graph() + let contractName: string + if (GraphChain.isL2(chainId)) { + contractName = 'L2Staking' + } else { + contractName = 'L1Staking' + } it('should be controlled by Controller', async function () { const controller = await Staking.controller() @@ -25,61 +33,73 @@ describe('Staking configuration', () => { it('minimumIndexerStake should match "minimumIndexerStake" in the config file', async function () { const value = await Staking.minimumIndexerStake() - const expected = getItemValue(graphConfig, 'contracts/Staking/init/minimumIndexerStake') + const expected = getItemValue(graphConfig, `contracts/${contractName}/init/minimumIndexerStake`) expect(value).eq(expected) }) it('thawingPeriod should match "thawingPeriod" in the config file', async function () { const value = await Staking.thawingPeriod() - const expected = getItemValue(graphConfig, 'contracts/Staking/init/thawingPeriod') + const expected = getItemValue(graphConfig, `contracts/${contractName}/init/thawingPeriod`) expect(value).eq(expected) }) it('protocolPercentage should match "protocolPercentage" in the config file', async function () { const value = await Staking.protocolPercentage() - const expected = getItemValue(graphConfig, 'contracts/Staking/init/protocolPercentage') + const expected = getItemValue(graphConfig, `contracts/${contractName}/init/protocolPercentage`) expect(value).eq(expected) }) it('curationPercentage should match "curationPercentage" in the config file', async function () { const value = await Staking.curationPercentage() - const expected = getItemValue(graphConfig, 'contracts/Staking/init/curationPercentage') + const expected = getItemValue(graphConfig, `contracts/${contractName}/init/curationPercentage`) expect(value).eq(expected) }) it('channelDisputeEpochs should match "channelDisputeEpochs" in the config file', async function () { const value = await Staking.channelDisputeEpochs() - const expected = getItemValue(graphConfig, 'contracts/Staking/init/channelDisputeEpochs') + const expected = getItemValue( + graphConfig, + `contracts/${contractName}/init/channelDisputeEpochs`, + ) expect(value).eq(expected) }) it('maxAllocationEpochs should match "maxAllocationEpochs" in the config file', async function () { const value = await Staking.maxAllocationEpochs() - const expected = getItemValue(graphConfig, 'contracts/Staking/init/maxAllocationEpochs') + const expected = getItemValue(graphConfig, `contracts/${contractName}/init/maxAllocationEpochs`) expect(value).eq(expected) }) it('delegationUnbondingPeriod should match "delegationUnbondingPeriod" in the config file', async function () { const value = await Staking.delegationUnbondingPeriod() - const expected = getItemValue(graphConfig, 'contracts/Staking/init/delegationUnbondingPeriod') + const expected = getItemValue( + graphConfig, + `contracts/${contractName}/init/delegationUnbondingPeriod`, + ) expect(value).eq(expected) }) it('delegationRatio should match "delegationRatio" in the config file', async function () { const value = await Staking.delegationRatio() - const expected = getItemValue(graphConfig, 'contracts/Staking/init/delegationRatio') + const expected = getItemValue(graphConfig, `contracts/${contractName}/init/delegationRatio`) expect(value).eq(expected) }) it('alphaNumerator should match "rebateAlphaNumerator" in the config file', async function () { const value = await Staking.alphaNumerator() - const expected = getItemValue(graphConfig, 'contracts/Staking/init/rebateAlphaNumerator') + const expected = getItemValue( + graphConfig, + `contracts/${contractName}/init/rebateAlphaNumerator`, + ) expect(value).eq(expected) }) it('alphaDenominator should match "rebateAlphaDenominator" in the config file', async function () { const value = await Staking.alphaDenominator() - const expected = getItemValue(graphConfig, 'contracts/Staking/init/rebateAlphaDenominator') + const expected = getItemValue( + graphConfig, + `contracts/${contractName}/init/rebateAlphaDenominator`, + ) expect(value).eq(expected) }) diff --git a/test/disputes/poi.test.ts b/test/disputes/poi.test.ts index cded4d91e..4bc26e4f9 100644 --- a/test/disputes/poi.test.ts +++ b/test/disputes/poi.test.ts @@ -4,7 +4,7 @@ import { utils } from 'ethers' import { DisputeManager } from '../../build/types/DisputeManager' import { EpochManager } from '../../build/types/EpochManager' import { GraphToken } from '../../build/types/GraphToken' -import { Staking } from '../../build/types/Staking' +import { IStaking } from '../../build/types/IStaking' import { NetworkFixture } from '../lib/fixtures' import { @@ -35,7 +35,7 @@ describe('DisputeManager:POI', async () => { let disputeManager: DisputeManager let epochManager: EpochManager let grt: GraphToken - let staking: Staking + let staking: IStaking // Derive some channel keys for each indexer used to sign attestations const indexerChannelKey = deriveChannelKey() diff --git a/test/disputes/query.test.ts b/test/disputes/query.test.ts index b7548e842..595b502cb 100644 --- a/test/disputes/query.test.ts +++ b/test/disputes/query.test.ts @@ -5,7 +5,7 @@ import { createAttestation, Receipt } from '@graphprotocol/common-ts' import { DisputeManager } from '../../build/types/DisputeManager' import { EpochManager } from '../../build/types/EpochManager' import { GraphToken } from '../../build/types/GraphToken' -import { Staking } from '../../build/types/Staking' +import { IStaking } from '../../build/types/IStaking' import { NetworkFixture } from '../lib/fixtures' import { @@ -42,7 +42,7 @@ describe('DisputeManager:Query', async () => { let disputeManager: DisputeManager let epochManager: EpochManager let grt: GraphToken - let staking: Staking + let staking: IStaking // Derive some channel keys for each indexer used to sign attestations const indexer1ChannelKey = deriveChannelKey() diff --git a/test/gateway/l1GraphTokenGateway.test.ts b/test/gateway/l1GraphTokenGateway.test.ts index ac9aec4c9..77be1a761 100644 --- a/test/gateway/l1GraphTokenGateway.test.ts +++ b/test/gateway/l1GraphTokenGateway.test.ts @@ -31,6 +31,7 @@ describe('L1GraphTokenGateway', () => { let mockL2Gateway: Account let pauseGuardian: Account let mockL2GNS: Account + let mockL2Staking: Account let fixture: NetworkFixture let grt: GraphToken @@ -73,6 +74,7 @@ describe('L1GraphTokenGateway', () => { mockL2Gateway, pauseGuardian, mockL2GNS, + mockL2Staking, ] = await getAccounts() // Dummy code on the mock router so that it appears as a contract @@ -301,6 +303,7 @@ describe('L1GraphTokenGateway', () => { mockL2GRT.address, mockL2Gateway.address, mockL2GNS.address, + mockL2Staking.address, ) let tx = l1GraphTokenGateway.connect(governor.signer).setPaused(true) await expect(tx).emit(l1GraphTokenGateway, 'PauseChanged').withArgs(true) @@ -333,6 +336,7 @@ describe('L1GraphTokenGateway', () => { mockL2GRT.address, mockL2Gateway.address, mockL2GNS.address, + mockL2Staking.address, ) await l1GraphTokenGateway.connect(governor.signer).setPauseGuardian(pauseGuardian.address) let tx = l1GraphTokenGateway.connect(pauseGuardian.signer).setPaused(true) @@ -439,6 +443,7 @@ describe('L1GraphTokenGateway', () => { mockL2GRT.address, mockL2Gateway.address, mockL2GNS.address, + mockL2Staking.address, ) }) diff --git a/test/gns.test.ts b/test/gns.test.ts index 93ea11d57..c40c57013 100644 --- a/test/gns.test.ts +++ b/test/gns.test.ts @@ -63,6 +63,7 @@ describe('L1GNS', () => { let mockL2GRT: Account let mockL2Gateway: Account let mockL2GNS: Account + let mockL2Staking: Account let fixture: NetworkFixture @@ -222,8 +223,17 @@ describe('L1GNS', () => { } before(async function () { - ;[me, other, governor, another, mockRouter, mockL2GRT, mockL2Gateway, mockL2GNS] = - await getAccounts() + ;[ + me, + other, + governor, + another, + mockRouter, + mockL2GRT, + mockL2Gateway, + mockL2GNS, + mockL2Staking, + ] = await getAccounts() // Dummy code on the mock router so that it appears as a contract await provider().send('hardhat_setCode', [mockRouter.address, '0x1234']) fixture = new NetworkFixture() @@ -258,6 +268,7 @@ describe('L1GNS', () => { mockL2GRT.address, mockL2Gateway.address, mockL2GNS.address, + mockL2Staking.address, ) }) diff --git a/test/governance/pausing.test.ts b/test/governance/pausing.test.ts index 71aa3dc3d..372509c67 100644 --- a/test/governance/pausing.test.ts +++ b/test/governance/pausing.test.ts @@ -2,7 +2,7 @@ import { expect } from 'chai' import { constants } from 'ethers' import { Controller } from '../../build/types/Controller' -import { Staking } from '../../build/types/Staking' +import { IStaking } from '../../build/types/IStaking' import { getAccounts, Account, toGRT } from '../lib/testHelpers' import { NetworkFixture } from '../lib/fixtures' @@ -14,7 +14,7 @@ describe('Pausing', () => { let fixture: NetworkFixture - let staking: Staking + let staking: IStaking let controller: Controller const setPartialPause = async (account: Account, setValue: boolean) => { diff --git a/test/l2/l2GNS.test.ts b/test/l2/l2GNS.test.ts index f90b96f1c..5f8064e6f 100644 --- a/test/l2/l2GNS.test.ts +++ b/test/l2/l2GNS.test.ts @@ -48,6 +48,7 @@ describe('L2GNS', () => { let mockL1GRT: Account let mockL1Gateway: Account let mockL1GNS: Account + let mockL1Staking: Account let fixture: NetworkFixture let fixtureContracts: L2FixtureContracts @@ -112,7 +113,8 @@ describe('L2GNS', () => { before(async function () { newSubgraph0 = buildSubgraph() - ;[me, other, governor, mockRouter, mockL1GRT, mockL1Gateway, mockL1GNS] = await getAccounts() + ;[me, other, governor, mockRouter, mockL1GRT, mockL1Gateway, mockL1GNS, mockL1Staking] = + await getAccounts() fixture = new NetworkFixture() fixtureContracts = await fixture.loadL2(governor.signer) @@ -126,6 +128,7 @@ describe('L2GNS', () => { mockL1GRT.address, mockL1Gateway.address, mockL1GNS.address, + mockL1Staking.address, ) }) diff --git a/test/l2/l2GraphTokenGateway.test.ts b/test/l2/l2GraphTokenGateway.test.ts index 2a2595419..1a3078938 100644 --- a/test/l2/l2GraphTokenGateway.test.ts +++ b/test/l2/l2GraphTokenGateway.test.ts @@ -29,6 +29,7 @@ describe('L2GraphTokenGateway', () => { let mockL1Gateway: Account let pauseGuardian: Account let mockL1GNS: Account + let mockL1Staking: Account let fixture: NetworkFixture let arbSysMock: FakeContract @@ -57,6 +58,7 @@ describe('L2GraphTokenGateway', () => { l2Receiver, pauseGuardian, mockL1GNS, + mockL1Staking, ] = await getAccounts() fixture = new NetworkFixture() @@ -191,6 +193,7 @@ describe('L2GraphTokenGateway', () => { mockL1GRT.address, mockL1Gateway.address, mockL1GNS.address, + mockL1Staking.address, ) let tx = l2GraphTokenGateway.connect(governor.signer).setPaused(true) await expect(tx).emit(l2GraphTokenGateway, 'PauseChanged').withArgs(true) @@ -222,6 +225,7 @@ describe('L2GraphTokenGateway', () => { mockL1GRT.address, mockL1Gateway.address, mockL1GNS.address, + mockL1Staking.address, ) await l2GraphTokenGateway.connect(governor.signer).setPauseGuardian(pauseGuardian.address) let tx = l2GraphTokenGateway.connect(pauseGuardian.signer).setPaused(true) @@ -285,6 +289,7 @@ describe('L2GraphTokenGateway', () => { mockL1GRT.address, mockL1Gateway.address, mockL1GNS.address, + mockL1Staking.address, ) }) diff --git a/test/l2/l2Staking.test.ts b/test/l2/l2Staking.test.ts new file mode 100644 index 000000000..2d967bfcb --- /dev/null +++ b/test/l2/l2Staking.test.ts @@ -0,0 +1,291 @@ +import { expect } from 'chai' +import { ethers, ContractTransaction, BigNumber } from 'ethers' +import { defaultAbiCoder, parseEther } from 'ethers/lib/utils' + +import { + getAccounts, + Account, + toGRT, + getL2SignerFromL1, + setAccountBalance, + latestBlock, + advanceBlocks, +} from '../lib/testHelpers' +import { L2FixtureContracts, NetworkFixture } from '../lib/fixtures' +import { toBN } from '../lib/testHelpers' + +import { IL2Staking } from '../../build/types/IL2Staking' +import { L2GraphTokenGateway } from '../../build/types/L2GraphTokenGateway' +import { GraphToken } from '../../build/types/GraphToken' + +const { AddressZero } = ethers.constants + +describe('L2Staking', () => { + let me: Account + let other: Account + let another: Account + let governor: Account + let mockRouter: Account + let mockL1GRT: Account + let mockL1Gateway: Account + let mockL1GNS: Account + let mockL1Staking: Account + let fixture: NetworkFixture + + let fixtureContracts: L2FixtureContracts + let l2GraphTokenGateway: L2GraphTokenGateway + let staking: IL2Staking + let grt: GraphToken + + const tokens10k = toGRT('10000') + const tokens100k = toGRT('100000') + const tokens1m = toGRT('1000000') + + const gatewayFinalizeTransfer = async function ( + from: string, + to: string, + amount: BigNumber, + callhookData: string, + ): Promise { + const mockL1GatewayL2Alias = await getL2SignerFromL1(mockL1Gateway.address) + // Eth for gas: + await setAccountBalance(await mockL1GatewayL2Alias.getAddress(), parseEther('1')) + + const tx = l2GraphTokenGateway + .connect(mockL1GatewayL2Alias) + .finalizeInboundTransfer(mockL1GRT.address, from, to, amount, callhookData) + return tx + } + + before(async function () { + ;[ + me, + other, + another, + governor, + mockRouter, + mockL1GRT, + mockL1Gateway, + mockL1GNS, + mockL1Staking, + ] = await getAccounts() + + fixture = new NetworkFixture() + fixtureContracts = await fixture.loadL2(governor.signer) + ;({ l2GraphTokenGateway, staking, grt } = fixtureContracts) + + await grt.connect(governor.signer).mint(me.address, tokens1m) + await grt.connect(me.signer).approve(staking.address, tokens1m) + await grt.connect(governor.signer).mint(other.address, tokens1m) + await grt.connect(other.signer).approve(staking.address, tokens1m) + await fixture.configureL2Bridge( + governor.signer, + fixtureContracts, + mockRouter.address, + mockL1GRT.address, + mockL1Gateway.address, + mockL1GNS.address, + mockL1Staking.address, + ) + }) + + beforeEach(async function () { + await fixture.setUp() + }) + + afterEach(async function () { + await fixture.tearDown() + }) + + describe('receive()', function () { + it('should not allow receiving ETH', async function () { + const tx = me.signer.sendTransaction({ + to: staking.address, + value: parseEther('1'), + }) + await expect(tx).revertedWith('RECEIVE_ETH_NOT_ALLOWED') + }) + }) + describe('receiving indexer stake from L1 (onTokenTransfer)', function () { + it('cannot be called by someone other than the L2GraphTokenGateway', async function () { + const functionData = defaultAbiCoder.encode(['tuple(address)'], [[me.address]]) + + const callhookData = defaultAbiCoder.encode( + ['uint8', 'bytes'], + [toBN(0), functionData], // code = 1 means RECEIVE_INDEXER_CODE + ) + const tx = staking + .connect(me.signer) + .onTokenTransfer(mockL1GNS.address, tokens100k, callhookData) + await expect(tx).revertedWith('ONLY_GATEWAY') + }) + it('rejects calls if the L1 sender is not the L1Staking', async function () { + const functionData = defaultAbiCoder.encode(['tuple(address)'], [[me.address]]) + + const callhookData = defaultAbiCoder.encode( + ['uint8', 'bytes'], + [toBN(0), functionData], // code = 1 means RECEIVE_INDEXER_CODE + ) + const tx = gatewayFinalizeTransfer(me.address, staking.address, tokens100k, callhookData) + + await expect(tx).revertedWith('ONLY_L1_STAKING_THROUGH_BRIDGE') + }) + it('adds stake to a new indexer', async function () { + const functionData = defaultAbiCoder.encode(['tuple(address)'], [[me.address]]) + + const callhookData = defaultAbiCoder.encode( + ['uint8', 'bytes'], + [toBN(0), functionData], // code = 1 means RECEIVE_INDEXER_CODE + ) + const tx = gatewayFinalizeTransfer( + mockL1Staking.address, + staking.address, + tokens100k, + callhookData, + ) + + await expect(tx) + .emit(l2GraphTokenGateway, 'DepositFinalized') + .withArgs(mockL1GRT.address, mockL1Staking.address, staking.address, tokens100k) + await expect(tx).emit(staking, 'StakeDeposited').withArgs(me.address, tokens100k) + expect(await staking.getIndexerStakedTokens(me.address)).to.equal(tokens100k) + }) + it('adds stake to an existing indexer that was already migrated', async function () { + const functionData = defaultAbiCoder.encode(['tuple(address)'], [[me.address]]) + + const callhookData = defaultAbiCoder.encode( + ['uint8', 'bytes'], + [toBN(0), functionData], // code = 1 means RECEIVE_INDEXER_CODE + ) + await gatewayFinalizeTransfer( + mockL1Staking.address, + staking.address, + tokens100k, + callhookData, + ) + const tx = gatewayFinalizeTransfer( + mockL1Staking.address, + staking.address, + tokens100k, + callhookData, + ) + + await expect(tx) + .emit(l2GraphTokenGateway, 'DepositFinalized') + .withArgs(mockL1GRT.address, mockL1Staking.address, staking.address, tokens100k) + await expect(tx).emit(staking, 'StakeDeposited').withArgs(me.address, tokens100k) + expect(await staking.getIndexerStakedTokens(me.address)).to.equal(tokens100k.add(tokens100k)) + }) + it('adds stake to an existing indexer that was staked in L2', async function () { + const functionData = defaultAbiCoder.encode(['tuple(address)'], [[me.address]]) + + const callhookData = defaultAbiCoder.encode( + ['uint8', 'bytes'], + [toBN(0), functionData], // code = 1 means RECEIVE_INDEXER_CODE + ) + await staking.connect(me.signer).stake(tokens100k) + const tx = gatewayFinalizeTransfer( + mockL1Staking.address, + staking.address, + tokens100k, + callhookData, + ) + + await expect(tx) + .emit(l2GraphTokenGateway, 'DepositFinalized') + .withArgs(mockL1GRT.address, mockL1Staking.address, staking.address, tokens100k) + await expect(tx).emit(staking, 'StakeDeposited').withArgs(me.address, tokens100k) + expect(await staking.getIndexerStakedTokens(me.address)).to.equal(tokens100k.add(tokens100k)) + }) + }) + + describe('receiving delegation from L1 (onTokenTransfer)', function () { + it('adds delegation for a new delegator', async function () { + await staking.connect(me.signer).stake(tokens100k) + + const functionData = defaultAbiCoder.encode( + ['tuple(address,address)'], + [[me.address, other.address]], + ) + + const callhookData = defaultAbiCoder.encode( + ['uint8', 'bytes'], + [toBN(1), functionData], // code = 1 means RECEIVE_DELEGATION_CODE + ) + const tx = gatewayFinalizeTransfer( + mockL1Staking.address, + staking.address, + tokens10k, + callhookData, + ) + + await expect(tx) + .emit(l2GraphTokenGateway, 'DepositFinalized') + .withArgs(mockL1GRT.address, mockL1Staking.address, staking.address, tokens10k) + const expectedShares = tokens10k + await expect(tx) + .emit(staking, 'StakeDelegated') + .withArgs(me.address, other.address, tokens10k, expectedShares) + const delegation = await staking.getDelegation(me.address, other.address) + expect(delegation.shares).to.equal(expectedShares) + }) + it('adds delegation for an existing delegator', async function () { + await staking.connect(me.signer).stake(tokens100k) + await staking.connect(other.signer).delegate(me.address, tokens10k) + + const functionData = defaultAbiCoder.encode( + ['tuple(address,address)'], + [[me.address, other.address]], + ) + + const callhookData = defaultAbiCoder.encode( + ['uint8', 'bytes'], + [toBN(1), functionData], // code = 1 means RECEIVE_DELEGATION_CODE + ) + const tx = gatewayFinalizeTransfer( + mockL1Staking.address, + staking.address, + tokens10k, + callhookData, + ) + + await expect(tx) + .emit(l2GraphTokenGateway, 'DepositFinalized') + .withArgs(mockL1GRT.address, mockL1Staking.address, staking.address, tokens10k) + const expectedNewShares = tokens10k + const expectedTotalShares = tokens10k.mul(2) + await expect(tx) + .emit(staking, 'StakeDelegated') + .withArgs(me.address, other.address, tokens10k, expectedNewShares) + const delegation = await staking.getDelegation(me.address, other.address) + expect(delegation.shares).to.equal(expectedTotalShares) + }) + }) + describe('onTokenTransfer with invalid messages', function () { + it('reverts if the code is invalid', async function () { + // This should never really happen unless the Arbitrum bridge is compromised, + // so we test it anyway to ensure it's a well-defined behavior. + // code 2 does not exist: + const callhookData = defaultAbiCoder.encode(['uint8', 'bytes'], [toBN(2), '0x12345678']) + const tx = gatewayFinalizeTransfer( + mockL1Staking.address, + staking.address, + toGRT('1'), + callhookData, + ) + await expect(tx).revertedWith('INVALID_CODE') + }) + it('reverts if the message encoding is invalid', async function () { + // This should never really happen unless the Arbitrum bridge is compromised, + // so we test it anyway to ensure it's a well-defined behavior. + const callhookData = defaultAbiCoder.encode(['address', 'uint128'], [AddressZero, toBN(2)]) + const tx = gatewayFinalizeTransfer( + mockL1Staking.address, + staking.address, + toGRT('1'), + callhookData, + ) + await expect(tx).reverted // abi.decode will fail with no reason + }) + }) +}) diff --git a/test/lib/deployment.ts b/test/lib/deployment.ts index e54ff31da..a7292f1ad 100644 --- a/test/lib/deployment.ts +++ b/test/lib/deployment.ts @@ -15,7 +15,9 @@ import { EpochManager } from '../../build/types/EpochManager' import { GNS } from '../../build/types/GNS' import { GraphToken } from '../../build/types/GraphToken' import { ServiceRegistry } from '../../build/types/ServiceRegistry' -import { Staking } from '../../build/types/Staking' +import { StakingExtension } from '../../build/types/StakingExtension' +import { IL1Staking } from '../../build/types/IL1Staking' +import { IL2Staking } from '../../build/types/IL2Staking' import { RewardsManager } from '../../build/types/RewardsManager' import { GraphGovernance } from '../../build/types/GraphGovernance' import { SubgraphNFT } from '../../build/types/SubgraphNFT' @@ -25,10 +27,17 @@ import { L2GraphToken } from '../../build/types/L2GraphToken' import { BridgeEscrow } from '../../build/types/BridgeEscrow' import { L2GNS } from '../../build/types/L2GNS' import { L1GNS } from '../../build/types/L1GNS' +import path from 'path' +import { Artifacts } from 'hardhat/internal/artifacts' // Disable logging for tests logger.pause() +const ARTIFACTS_PATH = path.resolve('build/contracts') +const artifacts = new Artifacts(ARTIFACTS_PATH) +const iL1StakingAbi = artifacts.readArtifactSync('IL1Staking').abi +const iL2StakingAbi = artifacts.readArtifactSync('IL2Staking').abi + // Default configuration used in tests export const defaults = { @@ -249,14 +258,48 @@ export async function deployServiceRegistry( ) as unknown as Promise } -export async function deployStaking( +export async function deployL1Staking( deployer: Signer, controller: string, proxyAdmin: GraphProxyAdmin, -): Promise { - return network.deployContractWithProxy( +): Promise { + const extensionImpl = (await deployContract( + 'StakingExtension', + deployer, + )) as unknown as StakingExtension + return (await network.deployContractWithProxy( + proxyAdmin, + 'L1Staking', + [ + controller, + defaults.staking.minimumIndexerStake, + defaults.staking.thawingPeriod, + 0, + 0, + defaults.staking.channelDisputeEpochs, + defaults.staking.maxAllocationEpochs, + defaults.staking.delegationUnbondingPeriod, + 0, + defaults.staking.alphaNumerator, + defaults.staking.alphaDenominator, + extensionImpl.address, + ], + deployer, + )) as unknown as IL1Staking +} + +export async function deployL2Staking( + deployer: Signer, + controller: string, + proxyAdmin: GraphProxyAdmin, +): Promise { + const extensionImpl = (await deployContract( + 'StakingExtension', + deployer, + )) as unknown as StakingExtension + return (await network.deployContractWithProxy( proxyAdmin, - 'Staking', + 'L2Staking', [ controller, defaults.staking.minimumIndexerStake, @@ -269,9 +312,10 @@ export async function deployStaking( 0, defaults.staking.alphaNumerator, defaults.staking.alphaDenominator, + extensionImpl.address, ], deployer, - ) as unknown as Staking + )) as unknown as IL2Staking } export async function deployRewardsManager( diff --git a/test/lib/fixtures.ts b/test/lib/fixtures.ts index 505cf973d..16cf6447b 100644 --- a/test/lib/fixtures.ts +++ b/test/lib/fixtures.ts @@ -15,7 +15,8 @@ import { Curation } from '../../build/types/Curation' import { L2Curation } from '../../build/types/L2Curation' import { L1GNS } from '../../build/types/L1GNS' import { L2GNS } from '../../build/types/L2GNS' -import { Staking } from '../../build/types/Staking' +import { IL1Staking } from '../../build/types/IL1Staking' +import { IL2Staking } from '../../build/types/IL2Staking' import { RewardsManager } from '../../build/types/RewardsManager' import { ServiceRegistry } from '../../build/types/ServiceRegistry' import { GraphProxyAdmin } from '../../build/types/GraphProxyAdmin' @@ -31,7 +32,7 @@ export interface L1FixtureContracts { grt: GraphToken curation: Curation gns: L1GNS - staking: Staking + staking: IL1Staking rewardsManager: RewardsManager serviceRegistry: ServiceRegistry proxyAdmin: GraphProxyAdmin @@ -46,7 +47,7 @@ export interface L2FixtureContracts { grt: L2GraphToken curation: L2Curation gns: L2GNS - staking: Staking + staking: IL2Staking rewardsManager: RewardsManager serviceRegistry: ServiceRegistry proxyAdmin: GraphProxyAdmin @@ -100,12 +101,14 @@ export class NetworkFixture { curation = await deployment.deployCuration(deployer, controller.address, proxyAdmin) } let gns: L1GNS | L2GNS + let staking: IL1Staking | IL2Staking if (isL2) { gns = await deployment.deployL2GNS(deployer, controller.address, proxyAdmin) + staking = await deployment.deployL2Staking(deployer, controller.address, proxyAdmin) } else { gns = await deployment.deployL1GNS(deployer, controller.address, proxyAdmin) + staking = await deployment.deployL1Staking(deployer, controller.address, proxyAdmin) } - const staking = await deployment.deployStaking(deployer, controller.address, proxyAdmin) const disputeManager = await deployment.deployDisputeManager( deployer, controller.address, @@ -246,6 +249,7 @@ export class NetworkFixture { mockL2GRTAddress: string, mockL2GatewayAddress: string, mockL2GNSAddress: string, + mockL2StakingAddress: string, ): Promise { // First configure the Arbitrum bridge mocks await arbitrumMocks.bridgeMock.connect(deployer).setInbox(arbitrumMocks.inboxMock.address, true) @@ -275,6 +279,12 @@ export class NetworkFixture { await l1FixtureContracts.l1GraphTokenGateway .connect(deployer) .addToCallhookAllowlist(l1FixtureContracts.gns.address) + await l1FixtureContracts.staking + .connect(deployer) + .setCounterpartStakingAddress(mockL2StakingAddress) + await l1FixtureContracts.l1GraphTokenGateway + .connect(deployer) + .addToCallhookAllowlist(l1FixtureContracts.staking.address) await l1FixtureContracts.l1GraphTokenGateway.connect(deployer).setPaused(false) } @@ -285,6 +295,7 @@ export class NetworkFixture { mockL1GRTAddress: string, mockL1GatewayAddress: string, mockL1GNSAddress: string, + mockL1StakingAddress: string, ): Promise { // Configure the L2 GRT // Configure the gateway @@ -301,6 +312,9 @@ export class NetworkFixture { .connect(deployer) .setL1CounterpartAddress(mockL1GatewayAddress) await l2FixtureContracts.gns.connect(deployer).setCounterpartGNSAddress(mockL1GNSAddress) + await l2FixtureContracts.staking + .connect(deployer) + .setCounterpartStakingAddress(mockL1StakingAddress) await l2FixtureContracts.l2GraphTokenGateway.connect(deployer).setPaused(false) } diff --git a/test/payments/allocationExchange.test.ts b/test/payments/allocationExchange.test.ts index 03a1a3229..fba672e03 100644 --- a/test/payments/allocationExchange.test.ts +++ b/test/payments/allocationExchange.test.ts @@ -3,7 +3,7 @@ import { BigNumber, constants, Wallet } from 'ethers' import { AllocationExchange } from '../../build/types/AllocationExchange' import { GraphToken } from '../../build/types/GraphToken' -import { Staking } from '../../build/types/Staking' +import { IStaking } from '../../build/types/IStaking' import { NetworkFixture } from '../lib/fixtures' import * as deployment from '../lib/deployment' @@ -33,7 +33,7 @@ describe('AllocationExchange', () => { let fixture: NetworkFixture let grt: GraphToken - let staking: Staking + let staking: IStaking let allocationExchange: AllocationExchange async function createVoucher( diff --git a/test/payments/withdrawHelper.test.ts b/test/payments/withdrawHelper.test.ts index d7b2c8655..1cda7427e 100644 --- a/test/payments/withdrawHelper.test.ts +++ b/test/payments/withdrawHelper.test.ts @@ -3,7 +3,7 @@ import { constants } from 'ethers' import { GRTWithdrawHelper } from '../../build/types/GRTWithdrawHelper' import { GraphToken } from '../../build/types/GraphToken' -import { Staking } from '../../build/types/Staking' +import { IStaking } from '../../build/types/IStaking' import { NetworkFixture } from '../lib/fixtures' import * as deployment from '../lib/deployment' @@ -26,7 +26,7 @@ describe('WithdrawHelper', () => { let fixture: NetworkFixture let grt: GraphToken - let staking: Staking + let staking: IStaking let withdrawHelper: GRTWithdrawHelper function createWithdrawData(callData: string) { diff --git a/test/rewards/rewards.test.ts b/test/rewards/rewards.test.ts index beed73a73..3916f4787 100644 --- a/test/rewards/rewards.test.ts +++ b/test/rewards/rewards.test.ts @@ -10,7 +10,7 @@ import { EpochManager } from '../../build/types/EpochManager' import { GraphToken } from '../../build/types/GraphToken' import { RewardsManager } from '../../build/types/RewardsManager' import { RewardsManagerMock } from '../../build/types/RewardsManagerMock' -import { Staking } from '../../build/types/Staking' +import { IStaking } from '../../build/types/IStaking' import { advanceBlocks, @@ -46,7 +46,7 @@ describe('Rewards', () => { let grt: GraphToken let curation: Curation let epochManager: EpochManager - let staking: Staking + let staking: IStaking let rewardsManager: RewardsManager let rewardsManagerMock: RewardsManagerMock diff --git a/test/serviceRegisty.test.ts b/test/serviceRegisty.test.ts index 726afa2f2..14027b5c3 100644 --- a/test/serviceRegisty.test.ts +++ b/test/serviceRegisty.test.ts @@ -1,7 +1,7 @@ import { expect } from 'chai' import { ServiceRegistry } from '../build/types/ServiceRegistry' -import { Staking } from '../build/types/Staking' +import { IStaking } from '../build/types/IStaking' import { getAccounts, Account } from './lib/testHelpers' import { NetworkFixture } from './lib/fixtures' @@ -14,7 +14,7 @@ describe('ServiceRegistry', () => { let fixture: NetworkFixture let serviceRegistry: ServiceRegistry - let staking: Staking + let staking: IStaking const shouldRegister = async (url: string, geohash: string) => { // Register the indexer service diff --git a/test/staking/allocation.test.ts b/test/staking/allocation.test.ts index 790c528d1..385fb9c16 100644 --- a/test/staking/allocation.test.ts +++ b/test/staking/allocation.test.ts @@ -4,7 +4,7 @@ import { constants, BigNumber, PopulatedTransaction } from 'ethers' import { Curation } from '../../build/types/Curation' import { EpochManager } from '../../build/types/EpochManager' import { GraphToken } from '../../build/types/GraphToken' -import { Staking } from '../../build/types/Staking' +import { IStaking } from '../../build/types/IStaking' import { NetworkFixture } from '../lib/fixtures' import { @@ -51,7 +51,7 @@ describe('Staking:Allocation', () => { let curation: Curation let epochManager: EpochManager let grt: GraphToken - let staking: Staking + let staking: IStaking // Test values @@ -354,7 +354,7 @@ describe('Staking:Allocation', () => { it('reject allocate if no tokens staked', async function () { const tx = allocate(toBN('1')) - await expect(tx).revertedWith('!capacity') + await expect(tx).revertedWith('!minimumIndexerStake') }) it('reject allocate zero tokens if no minimum stake', async function () { diff --git a/test/staking/configuration.test.ts b/test/staking/configuration.test.ts index 52aeaa843..9f86edd8d 100644 --- a/test/staking/configuration.test.ts +++ b/test/staking/configuration.test.ts @@ -1,7 +1,7 @@ import { expect } from 'chai' import { constants } from 'ethers' -import { Staking } from '../../build/types/Staking' +import { IStaking } from '../../build/types/IStaking' import { defaults } from '../lib/deployment' import { NetworkFixture } from '../lib/fixtures' @@ -19,7 +19,7 @@ describe('Staking:Config', () => { let fixture: NetworkFixture - let staking: Staking + let staking: IStaking before(async function () { ;[me, other, governor, slasher] = await getAccounts() diff --git a/test/staking/delegation.test.ts b/test/staking/delegation.test.ts index e097dfd9f..3ad68cadb 100644 --- a/test/staking/delegation.test.ts +++ b/test/staking/delegation.test.ts @@ -3,7 +3,7 @@ import { constants, BigNumber } from 'ethers' import { EpochManager } from '../../build/types/EpochManager' import { GraphToken } from '../../build/types/GraphToken' -import { Staking } from '../../build/types/Staking' +import { IStaking } from '../../build/types/IStaking' import { NetworkFixture } from '../lib/fixtures' import { @@ -34,7 +34,7 @@ describe('Staking::Delegation', () => { let epochManager: EpochManager let grt: GraphToken - let staking: Staking + let staking: IStaking // Test values const poi = randomHexBytes() diff --git a/test/staking/migration.test.ts b/test/staking/migration.test.ts new file mode 100644 index 000000000..d1e0a4a08 --- /dev/null +++ b/test/staking/migration.test.ts @@ -0,0 +1,786 @@ +import { expect } from 'chai' +import { constants, BigNumber, Event } from 'ethers' +import { defaultAbiCoder, ParamType, parseEther } from 'ethers/lib/utils' + +import { GraphToken } from '../../build/types/GraphToken' +import { IL1Staking } from '../../build/types/IL1Staking' +import { IStaking } from '../../build/types/IStaking' +import { L1GraphTokenGateway } from '../../build/types/L1GraphTokenGateway' +import { L1GraphTokenLockMigratorMock } from '../../build/types/L1GraphTokenLockMigratorMock' + +import { ArbitrumL1Mocks, L1FixtureContracts, NetworkFixture } from '../lib/fixtures' + +import { + advanceBlockTo, + deriveChannelKey, + getAccounts, + randomHexBytes, + latestBlock, + toBN, + toGRT, + provider, + Account, + setAccountBalance, + impersonateAccount, +} from '../lib/testHelpers' +import { deployContract } from '../lib/deployment' + +const { AddressZero, MaxUint256 } = constants + +describe('L1Staking:Migration', () => { + let me: Account + let governor: Account + let indexer: Account + let slasher: Account + let l2Indexer: Account + let delegator: Account + let l2Delegator: Account + let mockRouter: Account + let mockL2GRT: Account + let mockL2Gateway: Account + let mockL2GNS: Account + let mockL2Staking: Account + + let fixture: NetworkFixture + let fixtureContracts: L1FixtureContracts + + let grt: GraphToken + let staking: IL1Staking + let l1GraphTokenGateway: L1GraphTokenGateway + let arbitrumMocks: ArbitrumL1Mocks + let l1GraphTokenLockMigrator: L1GraphTokenLockMigratorMock + + // Test values + const indexerTokens = toGRT('10000000') + const delegatorTokens = toGRT('1000000') + const tokensToStake = toGRT('200000') + const subgraphDeploymentID = randomHexBytes() + const channelKey = deriveChannelKey() + const allocationID = channelKey.address + const metadata = randomHexBytes(32) + const minimumIndexerStake = toGRT('100000') + const delegationTaxPPM = 10000 // 1% + // Dummy L2 gas values + const maxGas = toBN('1000000') + const gasPriceBid = toBN('1000000000') + const maxSubmissionCost = toBN('1000000000') + + // Allocate with test values + const allocate = async (tokens: BigNumber) => { + return staking + .connect(indexer.signer) + .allocateFrom( + indexer.address, + subgraphDeploymentID, + tokens, + allocationID, + metadata, + await channelKey.generateProof(indexer.address), + ) + } + + before(async function () { + ;[ + me, + governor, + indexer, + slasher, + delegator, + l2Indexer, + mockRouter, + mockL2GRT, + mockL2Gateway, + mockL2GNS, + mockL2Staking, + l2Delegator, + ] = await getAccounts() + + fixture = new NetworkFixture() + fixtureContracts = await fixture.load(governor.signer, slasher.signer) + ;({ grt, staking, l1GraphTokenGateway } = fixtureContracts) + // Dummy code on the mock router so that it appears as a contract + await provider().send('hardhat_setCode', [mockRouter.address, '0x1234']) + arbitrumMocks = await fixture.loadArbitrumL1Mocks(governor.signer) + await fixture.configureL1Bridge( + governor.signer, + arbitrumMocks, + fixtureContracts, + mockRouter.address, + mockL2GRT.address, + mockL2Gateway.address, + mockL2GNS.address, + mockL2Staking.address, + ) + + l1GraphTokenLockMigrator = (await deployContract( + 'L1GraphTokenLockMigratorMock', + governor.signer, + )) as unknown as L1GraphTokenLockMigratorMock + + await setAccountBalance(l1GraphTokenLockMigrator.address, parseEther('1')) + + await staking + .connect(governor.signer) + .setL1GraphTokenLockMigrator(l1GraphTokenLockMigrator.address) + + // Give some funds to the indexer and approve staking contract to use funds on indexer behalf + await grt.connect(governor.signer).mint(indexer.address, indexerTokens) + await grt.connect(indexer.signer).approve(staking.address, indexerTokens) + + await grt.connect(governor.signer).mint(delegator.address, delegatorTokens) + await grt.connect(delegator.signer).approve(staking.address, delegatorTokens) + + await staking.connect(governor.signer).setMinimumIndexerStake(minimumIndexerStake) + await staking.connect(governor.signer).setDelegationTaxPercentage(delegationTaxPPM) // 1% + }) + + beforeEach(async function () { + await fixture.setUp() + }) + + afterEach(async function () { + await fixture.tearDown() + }) + + context('> when not staked', function () { + describe('migrateStakeToL2', function () { + it('should not allow migrating for someone who has not staked', async function () { + const tx = staking + .connect(indexer.signer) + .migrateStakeToL2( + l2Indexer.address, + tokensToStake, + maxGas, + gasPriceBid, + maxSubmissionCost, + ) + await expect(tx).revertedWith('tokensStaked == 0') + }) + }) + }) + + context('> when staked', function () { + const shouldMigrateIndexerStake = async ( + amountToSend: BigNumber, + options: { + expectedSeqNum?: number + l2Beneficiary?: string + } = {}, + ) => { + const l2Beneficiary = options.l2Beneficiary ?? l2Indexer.address + const expectedSeqNum = options.expectedSeqNum ?? 1 + const tx = staking + .connect(indexer.signer) + .migrateStakeToL2(l2Beneficiary, amountToSend, maxGas, gasPriceBid, maxSubmissionCost) + const expectedFunctionData = defaultAbiCoder.encode(['tuple(address)'], [[l2Indexer.address]]) + + const expectedCallhookData = defaultAbiCoder.encode( + ['uint8', 'bytes'], + [toBN(0), expectedFunctionData], // code = 1 means RECEIVE_INDEXER_CODE + ) + const expectedL2Data = await l1GraphTokenGateway.getOutboundCalldata( + grt.address, + staking.address, + mockL2Staking.address, + amountToSend, + expectedCallhookData, + ) + + await expect(tx) + .emit(l1GraphTokenGateway, 'TxToL2') + .withArgs(staking.address, mockL2Gateway.address, toBN(expectedSeqNum), expectedL2Data) + } + + beforeEach(async function () { + await staking.connect(indexer.signer).stake(tokensToStake) + }) + + describe('receive()', function () { + it('should not allow receiving funds from a random address', async function () { + const tx = indexer.signer.sendTransaction({ + to: staking.address, + value: parseEther('1'), + }) + await expect(tx).revertedWith('Only migrator can send ETH') + }) + it('should allow receiving funds from the migrator', async function () { + const impersonatedMigrator = await impersonateAccount(l1GraphTokenLockMigrator.address) + const tx = impersonatedMigrator.sendTransaction({ + to: staking.address, + value: parseEther('1'), + }) + await expect(tx).to.not.be.reverted + }) + }) + describe('migrateStakeToL2', function () { + it('should not allow migrating but leaving less than the minimum indexer stake', async function () { + const tx = staking + .connect(indexer.signer) + .migrateStakeToL2( + l2Indexer.address, + tokensToStake.sub(minimumIndexerStake).add(1), + maxGas, + gasPriceBid, + maxSubmissionCost, + ) + await expect(tx).revertedWith('!minimumIndexerStake remaining') + }) + it('should not allow migrating less than the minimum indexer stake the first time', async function () { + const tx = staking + .connect(indexer.signer) + .migrateStakeToL2( + l2Indexer.address, + minimumIndexerStake.sub(1), + maxGas, + gasPriceBid, + maxSubmissionCost, + ) + await expect(tx).revertedWith('!minimumIndexerStake sent') + }) + it('should not allow migrating if there are tokens locked for withdrawal', async function () { + await staking.connect(indexer.signer).unstake(tokensToStake) + const tx = staking + .connect(indexer.signer) + .migrateStakeToL2( + l2Indexer.address, + tokensToStake, + maxGas, + gasPriceBid, + maxSubmissionCost, + ) + await expect(tx).revertedWith('tokensLocked != 0') + }) + it('should not allow migrating to a beneficiary that is address zero', async function () { + const tx = staking + .connect(indexer.signer) + .migrateStakeToL2(AddressZero, tokensToStake, maxGas, gasPriceBid, maxSubmissionCost) + await expect(tx).revertedWith('l2Beneficiary == 0') + }) + it('should not allow migrating the whole stake if there are open allocations', async function () { + await allocate(toGRT('10')) + const tx = staking + .connect(indexer.signer) + .migrateStakeToL2( + l2Indexer.address, + tokensToStake, + maxGas, + gasPriceBid, + maxSubmissionCost, + ) + await expect(tx).revertedWith('allocated') + }) + it('should not allow migrating partial stake if the remaining indexer capacity is insufficient for open allocations', async function () { + // We set delegation ratio == 1 so an indexer can only use as much delegation as their own stake + await staking.connect(governor.signer).setDelegationRatio(1) + const tokensToDelegate = toGRT('202100') + await staking.connect(delegator.signer).delegate(indexer.address, tokensToDelegate) + + // Now the indexer has 200k tokens staked and 200k tokens delegated + await allocate(toGRT('400000')) + + // But if we try to migrate even 100k, we will not have enough indexer capacity to cover the open allocation + const tx = staking + .connect(indexer.signer) + .migrateStakeToL2( + l2Indexer.address, + toGRT('100000'), + maxGas, + gasPriceBid, + maxSubmissionCost, + ) + await expect(tx).revertedWith('! allocation capacity') + }) + it('sends the tokens and a message through the L1GraphTokenGateway', async function () { + const amountToSend = minimumIndexerStake + await shouldMigrateIndexerStake(amountToSend) + // Check that the indexer stake was reduced by the sent amount + expect((await staking.stakes(indexer.address)).tokensStaked).to.equal( + tokensToStake.sub(amountToSend), + ) + }) + it('should allow migrating the whole stake if there are no open allocations', async function () { + await shouldMigrateIndexerStake(tokensToStake) + // Check that the indexer stake was reduced by the sent amount + expect((await staking.stakes(indexer.address)).tokensStaked).to.equal(0) + }) + it('should allow migrating partial stake if the remaining capacity can cover the allocations', async function () { + // We set delegation ratio == 1 so an indexer can only use as much delegation as their own stake + await staking.connect(governor.signer).setDelegationRatio(1) + const tokensToDelegate = toGRT('200000') + await staking.connect(delegator.signer).delegate(indexer.address, tokensToDelegate) + + // Now the indexer has 200k tokens staked and 200k tokens delegated, + // but they allocate 200k + await allocate(toGRT('200000')) + + // If we migrate 100k, we will still have enough indexer capacity to cover the open allocation + const amountToSend = toGRT('100000') + await shouldMigrateIndexerStake(amountToSend) + // Check that the indexer stake was reduced by the sent amount + expect((await staking.stakes(indexer.address)).tokensStaked).to.equal( + tokensToStake.sub(amountToSend), + ) + }) + it('allows migrating several times to the same beneficiary', async function () { + // Stake a bit more so we're still over the minimum stake after migrating twice + await staking.connect(indexer.signer).stake(tokensToStake) + await shouldMigrateIndexerStake(minimumIndexerStake) + await shouldMigrateIndexerStake(toGRT('1000'), { expectedSeqNum: 2 }) + expect((await staking.stakes(indexer.address)).tokensStaked).to.equal( + tokensToStake.mul(2).sub(minimumIndexerStake).sub(toGRT('1000')), + ) + }) + it('should not allow migrating to a different beneficiary the second time', async function () { + await shouldMigrateIndexerStake(minimumIndexerStake) + const tx = staking.connect(indexer.signer).migrateStakeToL2( + indexer.address, // Note this is different from l2Indexer used before + minimumIndexerStake, + maxGas, + gasPriceBid, + maxSubmissionCost, + ) + await expect(tx).revertedWith('l2Beneficiary != previous') + }) + }) + + describe('migrateLockedStakeToL2', function () { + it('sends a message through L1GraphTokenGateway like migrateStakeToL2, but gets the beneficiary and ETH from a migrator contract', async function () { + const amountToSend = minimumIndexerStake + + await l1GraphTokenLockMigrator.setMigratedAddress(indexer.address, l2Indexer.address) + const oldMigratorEthBalance = await provider().getBalance(l1GraphTokenLockMigrator.address) + const tx = staking + .connect(indexer.signer) + .migrateLockedStakeToL2(minimumIndexerStake, maxGas, gasPriceBid, maxSubmissionCost) + const expectedFunctionData = defaultAbiCoder.encode( + ['tuple(address)'], + [[l2Indexer.address]], + ) + + const expectedCallhookData = defaultAbiCoder.encode( + ['uint8', 'bytes'], + [toBN(0), expectedFunctionData], // code = 0 means RECEIVE_INDEXER_CODE + ) + const expectedL2Data = await l1GraphTokenGateway.getOutboundCalldata( + grt.address, + staking.address, + mockL2Staking.address, + amountToSend, + expectedCallhookData, + ) + + await expect(tx) + .emit(l1GraphTokenGateway, 'TxToL2') + .withArgs(staking.address, mockL2Gateway.address, toBN(1), expectedL2Data) + expect(await provider().getBalance(l1GraphTokenLockMigrator.address)).to.equal( + oldMigratorEthBalance.sub(maxSubmissionCost).sub(gasPriceBid.mul(maxGas)), + ) + }) + it('should not allow migrating if the migrator contract returns a zero address beneficiary', async function () { + const amountToSend = minimumIndexerStake + + const tx = staking + .connect(indexer.signer) + .migrateLockedStakeToL2(minimumIndexerStake, maxGas, gasPriceBid, maxSubmissionCost) + await expect(tx).revertedWith('LOCK NOT MIGRATED') + }) + }) + describe('unlockDelegationToMigratedIndexer', function () { + beforeEach(async function () { + await staking.connect(governor.signer).setDelegationUnbondingPeriod(28) // epochs + }) + it('allows a delegator to a migrated indexer to withdraw locked delegation before the unbonding period', async function () { + const tokensToDelegate = toGRT('10000') + await staking.connect(delegator.signer).delegate(indexer.address, tokensToDelegate) + const actualDelegation = tokensToDelegate.sub( + tokensToDelegate.mul(delegationTaxPPM).div(1000000), + ) + await staking + .connect(indexer.signer) + .migrateStakeToL2( + l2Indexer.address, + tokensToStake, + maxGas, + gasPriceBid, + maxSubmissionCost, + ) + await staking.connect(delegator.signer).undelegate(indexer.address, actualDelegation) + const tx = await staking + .connect(delegator.signer) + .unlockDelegationToMigratedIndexer(indexer.address) + await expect(tx) + .emit(staking, 'StakeDelegatedUnlockedDueToMigration') + .withArgs(indexer.address, delegator.address) + const tx2 = await staking + .connect(delegator.signer) + .withdrawDelegated(indexer.address, AddressZero) + await expect(tx2) + .emit(staking, 'StakeDelegatedWithdrawn') + .withArgs(indexer.address, delegator.address, actualDelegation) + }) + it('rejects calls if the indexer has not migrated their stake', async function () { + const tokensToDelegate = toGRT('10000') + await staking.connect(delegator.signer).delegate(indexer.address, tokensToDelegate) + const tx = staking + .connect(delegator.signer) + .unlockDelegationToMigratedIndexer(indexer.address) + await expect(tx).revertedWith('indexer not migrated') + }) + it('rejects calls if the indexer has only migrated part of their stake but not all', async function () { + const tokensToDelegate = toGRT('10000') + await staking.connect(delegator.signer).delegate(indexer.address, tokensToDelegate) + await staking + .connect(indexer.signer) + .migrateStakeToL2( + l2Indexer.address, + minimumIndexerStake, + maxGas, + gasPriceBid, + maxSubmissionCost, + ) + const tx = staking + .connect(delegator.signer) + .unlockDelegationToMigratedIndexer(indexer.address) + await expect(tx).revertedWith('indexer not migrated') + }) + it('rejects calls if the delegator has not undelegated first', async function () { + const tokensToDelegate = toGRT('10000') + await staking.connect(delegator.signer).delegate(indexer.address, tokensToDelegate) + await staking + .connect(indexer.signer) + .migrateStakeToL2( + l2Indexer.address, + tokensToStake, + maxGas, + gasPriceBid, + maxSubmissionCost, + ) + const tx = staking + .connect(delegator.signer) + .unlockDelegationToMigratedIndexer(indexer.address) + await expect(tx).revertedWith('! locked') + }) + it('rejects calls if the caller is not a delegator', async function () { + await staking + .connect(indexer.signer) + .migrateStakeToL2( + l2Indexer.address, + tokensToStake, + maxGas, + gasPriceBid, + maxSubmissionCost, + ) + const tx = staking + .connect(delegator.signer) + .unlockDelegationToMigratedIndexer(indexer.address) + // The function checks for tokensLockedUntil so this is the error we should get: + await expect(tx).revertedWith('! locked') + }) + }) + describe('migrateDelegationToL2', function () { + it('rejects calls if the delegated indexer has not migrated stake to L2', async function () { + const tokensToDelegate = toGRT('10000') + await staking.connect(delegator.signer).delegate(indexer.address, tokensToDelegate) + + const tx = staking + .connect(delegator.signer) + .migrateDelegationToL2( + indexer.address, + l2Delegator.address, + maxGas, + gasPriceBid, + maxSubmissionCost, + ) + await expect(tx).revertedWith('indexer not migrated') + }) + it('rejects calls if the beneficiary is zero', async function () { + const tokensToDelegate = toGRT('10000') + await staking.connect(delegator.signer).delegate(indexer.address, tokensToDelegate) + await staking + .connect(indexer.signer) + .migrateStakeToL2( + l2Indexer.address, + minimumIndexerStake, + maxGas, + gasPriceBid, + maxSubmissionCost, + ) + + const tx = staking + .connect(delegator.signer) + .migrateDelegationToL2( + indexer.address, + AddressZero, + maxGas, + gasPriceBid, + maxSubmissionCost, + ) + await expect(tx).revertedWith('l2Beneficiary == 0') + }) + it('rejects calls if the delegator has tokens locked for undelegation', async function () { + const tokensToDelegate = toGRT('10000') + await staking.connect(delegator.signer).delegate(indexer.address, tokensToDelegate) + await staking + .connect(indexer.signer) + .migrateStakeToL2( + l2Indexer.address, + minimumIndexerStake, + maxGas, + gasPriceBid, + maxSubmissionCost, + ) + await staking.connect(delegator.signer).undelegate(indexer.address, toGRT('1')) + + const tx = staking + .connect(delegator.signer) + .migrateDelegationToL2( + indexer.address, + l2Delegator.address, + maxGas, + gasPriceBid, + maxSubmissionCost, + ) + await expect(tx).revertedWith('tokensLocked != 0') + }) + it('rejects calls if the delegator has no tokens delegated to the indexer', async function () { + await staking + .connect(indexer.signer) + .migrateStakeToL2( + l2Indexer.address, + minimumIndexerStake, + maxGas, + gasPriceBid, + maxSubmissionCost, + ) + + const tx = staking + .connect(delegator.signer) + .migrateDelegationToL2( + indexer.address, + l2Delegator.address, + maxGas, + gasPriceBid, + maxSubmissionCost, + ) + await expect(tx).revertedWith('delegation == 0') + }) + it('sends all the tokens delegated to the indexer to the beneficiary on L2, using the gateway', async function () { + const tokensToDelegate = toGRT('10000') + await staking.connect(delegator.signer).delegate(indexer.address, tokensToDelegate) + const actualDelegation = tokensToDelegate.sub( + tokensToDelegate.mul(delegationTaxPPM).div(1000000), + ) + await staking + .connect(indexer.signer) + .migrateStakeToL2( + l2Indexer.address, + minimumIndexerStake, + maxGas, + gasPriceBid, + maxSubmissionCost, + ) + + const expectedFunctionData = defaultAbiCoder.encode( + ['tuple(address,address)'], + [[l2Indexer.address, l2Delegator.address]], + ) + + const expectedCallhookData = defaultAbiCoder.encode( + ['uint8', 'bytes'], + [toBN(1), expectedFunctionData], // code = 1 means RECEIVE_DELEGATION_CODE + ) + const expectedL2Data = await l1GraphTokenGateway.getOutboundCalldata( + grt.address, + staking.address, + mockL2Staking.address, + actualDelegation, + expectedCallhookData, + ) + + const tx = staking + .connect(delegator.signer) + .migrateDelegationToL2( + indexer.address, + l2Delegator.address, + maxGas, + gasPriceBid, + maxSubmissionCost, + ) + // seqNum is 2 because the first bridge call was in migrateStakeToL2 + await expect(tx) + .emit(l1GraphTokenGateway, 'TxToL2') + .withArgs(staking.address, mockL2Gateway.address, toBN(2), expectedL2Data) + await expect(tx) + .emit(staking, 'DelegationMigratedToL2') + .withArgs( + delegator.address, + l2Delegator.address, + indexer.address, + l2Indexer.address, + actualDelegation, + ) + }) + it('sets the delegation shares to zero so cannot be called twice', async function () { + const tokensToDelegate = toGRT('10000') + await staking.connect(delegator.signer).delegate(indexer.address, tokensToDelegate) + await staking + .connect(indexer.signer) + .migrateStakeToL2( + l2Indexer.address, + minimumIndexerStake, + maxGas, + gasPriceBid, + maxSubmissionCost, + ) + + await staking + .connect(delegator.signer) + .migrateDelegationToL2( + indexer.address, + l2Delegator.address, + maxGas, + gasPriceBid, + maxSubmissionCost, + ) + + const tx = staking + .connect(delegator.signer) + .migrateDelegationToL2( + indexer.address, + l2Delegator.address, + maxGas, + gasPriceBid, + maxSubmissionCost, + ) + await expect(tx).revertedWith('delegation == 0') + }) + it('can be called again if the delegator added more delegation (edge case)', async function () { + const tokensToDelegate = toGRT('10000') + await staking.connect(delegator.signer).delegate(indexer.address, tokensToDelegate) + const actualDelegation = tokensToDelegate.sub( + tokensToDelegate.mul(delegationTaxPPM).div(1000000), + ) + await staking + .connect(indexer.signer) + .migrateStakeToL2( + l2Indexer.address, + minimumIndexerStake, + maxGas, + gasPriceBid, + maxSubmissionCost, + ) + + await staking + .connect(delegator.signer) + .migrateDelegationToL2( + indexer.address, + l2Delegator.address, + maxGas, + gasPriceBid, + maxSubmissionCost, + ) + + await staking.connect(delegator.signer).delegate(indexer.address, tokensToDelegate) + + const tx = staking + .connect(delegator.signer) + .migrateDelegationToL2( + indexer.address, + l2Delegator.address, + maxGas, + gasPriceBid, + maxSubmissionCost, + ) + await expect(tx) + .emit(staking, 'DelegationMigratedToL2') + .withArgs( + delegator.address, + l2Delegator.address, + indexer.address, + l2Indexer.address, + actualDelegation, + ) + }) + }) + describe('migrateLockedDelegationToL2', function () { + it('sends delegated tokens to L2 like migrateDelegationToL2, but gets the beneficiary and ETH from the L1GraphTokenLockMigrator', async function () { + const tokensToDelegate = toGRT('10000') + await staking.connect(delegator.signer).delegate(indexer.address, tokensToDelegate) + const actualDelegation = tokensToDelegate.sub( + tokensToDelegate.mul(delegationTaxPPM).div(1000000), + ) + + await staking + .connect(indexer.signer) + .migrateStakeToL2( + l2Indexer.address, + minimumIndexerStake, + maxGas, + gasPriceBid, + maxSubmissionCost, + ) + + const expectedFunctionData = defaultAbiCoder.encode( + ['tuple(address,address)'], + [[l2Indexer.address, l2Delegator.address]], + ) + + const expectedCallhookData = defaultAbiCoder.encode( + ['uint8', 'bytes'], + [toBN(1), expectedFunctionData], // code = 1 means RECEIVE_DELEGATION_CODE + ) + const expectedL2Data = await l1GraphTokenGateway.getOutboundCalldata( + grt.address, + staking.address, + mockL2Staking.address, + actualDelegation, + expectedCallhookData, + ) + + await l1GraphTokenLockMigrator.setMigratedAddress(delegator.address, l2Delegator.address) + + const oldMigratorEthBalance = await provider().getBalance(l1GraphTokenLockMigrator.address) + const tx = staking + .connect(delegator.signer) + .migrateLockedDelegationToL2(indexer.address, maxGas, gasPriceBid, maxSubmissionCost) + // seqNum is 2 because the first bridge call was in migrateStakeToL2 + await expect(tx) + .emit(l1GraphTokenGateway, 'TxToL2') + .withArgs(staking.address, mockL2Gateway.address, toBN(2), expectedL2Data) + await expect(tx) + .emit(staking, 'DelegationMigratedToL2') + .withArgs( + delegator.address, + l2Delegator.address, + indexer.address, + l2Indexer.address, + actualDelegation, + ) + expect(await provider().getBalance(l1GraphTokenLockMigrator.address)).to.equal( + oldMigratorEthBalance.sub(maxSubmissionCost).sub(gasPriceBid.mul(maxGas)), + ) + }) + it('rejects calls if the migrator contract returns a zero address beneficiary', async function () { + const tokensToDelegate = toGRT('10000') + await staking.connect(delegator.signer).delegate(indexer.address, tokensToDelegate) + const actualDelegation = tokensToDelegate.sub( + tokensToDelegate.mul(delegationTaxPPM).div(1000000), + ) + await staking + .connect(indexer.signer) + .migrateStakeToL2( + l2Indexer.address, + minimumIndexerStake, + maxGas, + gasPriceBid, + maxSubmissionCost, + ) + + const tx = staking + .connect(delegator.signer) + .migrateLockedDelegationToL2(indexer.address, maxGas, gasPriceBid, maxSubmissionCost) + await expect(tx).revertedWith('LOCK NOT MIGRATED') + }) + }) + }) +}) diff --git a/test/staking/staking.test.ts b/test/staking/staking.test.ts index b769c8f57..bebe9247a 100644 --- a/test/staking/staking.test.ts +++ b/test/staking/staking.test.ts @@ -2,7 +2,7 @@ import { expect } from 'chai' import { constants, BigNumber, Event } from 'ethers' import { GraphToken } from '../../build/types/GraphToken' -import { Staking } from '../../build/types/Staking' +import { IStaking } from '../../build/types/IStaking' import { NetworkFixture } from '../lib/fixtures' @@ -39,7 +39,7 @@ describe('Staking:Stakes', () => { let fixture: NetworkFixture let grt: GraphToken - let staking: Staking + let staking: IStaking // Test values const indexerTokens = toGRT('1000') diff --git a/test/upgrade/admin.test.ts b/test/upgrade/admin.test.ts index a20640943..71e30b777 100644 --- a/test/upgrade/admin.test.ts +++ b/test/upgrade/admin.test.ts @@ -5,7 +5,7 @@ import '@nomiclabs/hardhat-ethers' import { GraphProxy } from '../../build/types/GraphProxy' import { Curation } from '../../build/types/Curation' import { GraphProxyAdmin } from '../../build/types/GraphProxyAdmin' -import { Staking } from '../../build/types/Staking' +import { IStaking } from '../../build/types/IStaking' import * as deployment from '../lib/deployment' import { NetworkFixture } from '../lib/fixtures' @@ -24,7 +24,7 @@ describe('Upgrades', () => { let proxyAdmin: GraphProxyAdmin let curation: Curation - let staking: Staking + let staking: IStaking let stakingProxy: GraphProxy before(async function () { From 00f5c94b2de9058806fd8012ccd24ce1d94b0e7a Mon Sep 17 00:00:00 2001 From: Pablo Carranza Velez Date: Wed, 15 Mar 2023 10:04:55 -0300 Subject: [PATCH 090/112] fix: correct proxy check in Staking fallback --- contracts/staking/Staking.sol | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contracts/staking/Staking.sol b/contracts/staking/Staking.sol index 9676f4f6a..313ebd729 100644 --- a/contracts/staking/Staking.sol +++ b/contracts/staking/Staking.sol @@ -46,7 +46,7 @@ abstract contract Staking is StakingV3Storage, GraphUpgradeable, IStakingBase, M */ // solhint-disable-next-line payable-fallback, no-complex-fallback fallback() external { - require(address(this) != _implementation(), "only through proxy"); + require(_implementation() != address(0), "only through proxy"); // solhint-disable-next-line no-inline-assembly assembly { // (a) get free memory pointer From 709c8fe00770502e8783d1de4501fdc39e33aa86 Mon Sep 17 00:00:00 2001 From: Pablo Carranza Velez Date: Tue, 28 Mar 2023 19:39:13 -0300 Subject: [PATCH 091/112] fix: produce an alias for migrated subgraphIDs --- contracts/l2/discovery/IL2GNS.sol | 11 +++- contracts/l2/discovery/L2GNS.sol | 88 +++++++++++++++++++------------ 2 files changed, 64 insertions(+), 35 deletions(-) diff --git a/contracts/l2/discovery/IL2GNS.sol b/contracts/l2/discovery/IL2GNS.sol index 227625928..a51fa98a5 100644 --- a/contracts/l2/discovery/IL2GNS.sol +++ b/contracts/l2/discovery/IL2GNS.sol @@ -28,15 +28,22 @@ interface IL2GNS is ICallhookReceiver { * @notice Finish a subgraph migration from L1. * The subgraph must have been previously sent through the bridge * using the sendSubgraphToL2 function on L1GNS. - * @param _subgraphID Subgraph ID + * @param _l2SubgraphID Subgraph ID in L2 (aliased from the L1 subgraph ID) * @param _subgraphDeploymentID Latest subgraph deployment to assign to the subgraph * @param _subgraphMetadata IPFS hash of the subgraph metadata * @param _versionMetadata IPFS hash of the version metadata */ function finishSubgraphMigrationFromL1( - uint256 _subgraphID, + uint256 _l2SubgraphID, bytes32 _subgraphDeploymentID, bytes32 _subgraphMetadata, bytes32 _versionMetadata ) external; + + /** + * @notice Return the aliased L2 subgraph ID from a migrated L1 subgraph ID + * @param _l1SubgraphID L1 subgraph ID + * @return L2 subgraph ID + */ + function getAliasedL2SubgraphID(uint256 _l1SubgraphID) external pure returns (uint256); } diff --git a/contracts/l2/discovery/L2GNS.sol b/contracts/l2/discovery/L2GNS.sol index dd31c4726..fb3a86355 100644 --- a/contracts/l2/discovery/L2GNS.sol +++ b/contracts/l2/discovery/L2GNS.sol @@ -26,21 +26,30 @@ import { IL2Curation } from "../curation/IL2Curation.sol"; contract L2GNS is GNS, L2GNSV1Storage, IL2GNS { using SafeMathUpgradeable for uint256; + uint256 public constant SUBGRAPH_ID_ALIAS_OFFSET = + uint256(0x1111000000000000000000000000000000000000000000000000000000001111); + /// @dev Emitted when a subgraph is received from L1 through the bridge event SubgraphReceivedFromL1( - uint256 indexed _subgraphID, + uint256 indexed _l1SubgraphID, + uint256 indexed _l2SubgraphID, address indexed _owner, uint256 _tokens ); /// @dev Emitted when a subgraph migration from L1 is finalized, so the subgraph is published - event SubgraphMigrationFinalized(uint256 indexed _subgraphID); + event SubgraphMigrationFinalized(uint256 indexed _l2SubgraphID); /// @dev Emitted when the L1 balance for a curator has been claimed - event CuratorBalanceReceived(uint256 _subgraphID, address _l2Curator, uint256 _tokens); + event CuratorBalanceReceived( + uint256 indexed _l1SubgraphId, + uint256 indexed _l2SubgraphID, + address indexed _l2Curator, + uint256 _tokens + ); /// @dev Emitted when the L1 balance for a curator has been returned to the beneficiary. /// This can happen if the subgraph migration was not finished when the curator's tokens arrived. event CuratorBalanceReturnedToBeneficiary( - uint256 _subgraphID, - address _l2Curator, + uint256 indexed _l1SubgraphID, + address indexed _l2Curator, uint256 _tokens ); @@ -74,15 +83,15 @@ contract L2GNS is GNS, L2GNSV1Storage, IL2GNS { bytes calldata _data ) external override notPartialPaused onlyL2Gateway { require(_from == counterpartGNSAddress, "ONLY_L1_GNS_THROUGH_BRIDGE"); - (uint8 code, uint256 subgraphID, address beneficiary) = abi.decode( + (uint8 code, uint256 l1SubgraphID, address beneficiary) = abi.decode( _data, (uint8, uint256, address) ); if (code == uint8(L1MessageCodes.RECEIVE_SUBGRAPH_CODE)) { - _receiveSubgraphFromL1(subgraphID, beneficiary, _amount); + _receiveSubgraphFromL1(l1SubgraphID, beneficiary, _amount); } else if (code == uint8(L1MessageCodes.RECEIVE_CURATOR_BALANCE_CODE)) { - _mintSignalFromL1(subgraphID, beneficiary, _amount); + _mintSignalFromL1(l1SubgraphID, beneficiary, _amount); } else { revert("INVALID_CODE"); } @@ -92,19 +101,21 @@ contract L2GNS is GNS, L2GNSV1Storage, IL2GNS { * @notice Finish a subgraph migration from L1. * The subgraph must have been previously sent through the bridge * using the sendSubgraphToL2 function on L1GNS. - * @param _subgraphID Subgraph ID + * @param _l2SubgraphID Subgraph ID (aliased from the L1 subgraph ID) * @param _subgraphDeploymentID Latest subgraph deployment to assign to the subgraph * @param _subgraphMetadata IPFS hash of the subgraph metadata * @param _versionMetadata IPFS hash of the version metadata */ function finishSubgraphMigrationFromL1( - uint256 _subgraphID, + uint256 _l2SubgraphID, bytes32 _subgraphDeploymentID, bytes32 _subgraphMetadata, bytes32 _versionMetadata - ) external override notPartialPaused onlySubgraphAuth(_subgraphID) { - IL2GNS.SubgraphL2MigrationData storage migratedData = subgraphL2MigrationData[_subgraphID]; - SubgraphData storage subgraphData = _getSubgraphData(_subgraphID); + ) external override notPartialPaused onlySubgraphAuth(_l2SubgraphID) { + IL2GNS.SubgraphL2MigrationData storage migratedData = subgraphL2MigrationData[ + _l2SubgraphID + ]; + SubgraphData storage subgraphData = _getSubgraphData(_l2SubgraphID); require(migratedData.subgraphReceivedOnL2BlockNumber != 0, "INVALID_SUBGRAPH"); require(!migratedData.l2Done, "ALREADY_DONE"); migratedData.l2Done = true; @@ -116,7 +127,7 @@ contract L2GNS is GNS, L2GNSV1Storage, IL2GNS { // Update pool: constant nSignal, vSignal can change (w/no slippage protection) // Buy all signal from the new deployment uint256 vSignal = curation.mintTaxFree(_subgraphDeploymentID, migratedData.tokens); - uint256 nSignal = vSignalToNSignal(_subgraphID, vSignal); + uint256 nSignal = vSignalToNSignal(_l2SubgraphID, vSignal); subgraphData.disabled = false; subgraphData.vSignal = vSignal; @@ -124,17 +135,17 @@ contract L2GNS is GNS, L2GNSV1Storage, IL2GNS { subgraphData.curatorNSignal[msg.sender] = nSignal; subgraphData.subgraphDeploymentID = _subgraphDeploymentID; // Set the token metadata - _setSubgraphMetadata(_subgraphID, _subgraphMetadata); + _setSubgraphMetadata(_l2SubgraphID, _subgraphMetadata); - emit SubgraphPublished(_subgraphID, _subgraphDeploymentID, fixedReserveRatio); + emit SubgraphPublished(_l2SubgraphID, _subgraphDeploymentID, fixedReserveRatio); emit SubgraphUpgraded( - _subgraphID, + _l2SubgraphID, subgraphData.vSignal, migratedData.tokens, _subgraphDeploymentID ); - emit SubgraphVersionUpdated(_subgraphID, _subgraphDeploymentID, _versionMetadata); - emit SubgraphMigrationFinalized(_subgraphID); + emit SubgraphVersionUpdated(_l2SubgraphID, _subgraphDeploymentID, _versionMetadata); + emit SubgraphMigrationFinalized(_l2SubgraphID); } /** @@ -208,21 +219,31 @@ contract L2GNS is GNS, L2GNSV1Storage, IL2GNS { emit SubgraphVersionUpdated(_subgraphID, _subgraphDeploymentID, _versionMetadata); } + /** + * @notice Return the aliased L2 subgraph ID from a migrated L1 subgraph ID + * @param _l1SubgraphID L1 subgraph ID + * @return L2 subgraph ID + */ + function getAliasedL2SubgraphID(uint256 _l1SubgraphID) public pure override returns (uint256) { + return _l1SubgraphID + SUBGRAPH_ID_ALIAS_OFFSET; + } + /** * @dev Receive a subgraph from L1. * This function will initialize a subgraph received through the bridge, * and store the migration data so that it's finalized later using finishSubgraphMigrationFromL1. - * @param _subgraphID Subgraph ID + * @param _l1SubgraphID Subgraph ID in L1 (will be aliased) * @param _subgraphOwner Owner of the subgraph * @param _tokens Tokens to be deposited in the subgraph */ function _receiveSubgraphFromL1( - uint256 _subgraphID, + uint256 _l1SubgraphID, address _subgraphOwner, uint256 _tokens ) internal { - IL2GNS.SubgraphL2MigrationData storage migratedData = subgraphL2MigrationData[_subgraphID]; - SubgraphData storage subgraphData = _getSubgraphData(_subgraphID); + uint256 l2SubgraphID = getAliasedL2SubgraphID(_l1SubgraphID); + SubgraphData storage subgraphData = _getSubgraphData(l2SubgraphID); + IL2GNS.SubgraphL2MigrationData storage migratedData = subgraphL2MigrationData[l2SubgraphID]; subgraphData.reserveRatioDeprecated = fixedReserveRatio; // The subgraph will be disabled until finishSubgraphMigrationFromL1 is called @@ -235,9 +256,9 @@ contract L2GNS is GNS, L2GNSV1Storage, IL2GNS { // This function will check the if tokenID already exists. // Note we do this here so that we can later do the onlySubgraphAuth // check in finishSubgraphMigrationFromL1. - _mintNFT(_subgraphOwner, _subgraphID); + _mintNFT(_subgraphOwner, l2SubgraphID); - emit SubgraphReceivedFromL1(_subgraphID, _subgraphOwner, _tokens); + emit SubgraphReceivedFromL1(_l1SubgraphID, l2SubgraphID, _subgraphOwner, _tokens); } /** @@ -245,27 +266,28 @@ contract L2GNS is GNS, L2GNSV1Storage, IL2GNS { * If the subgraph migration was never finished (or the subgraph doesn't exist), the tokens will be sent to the curator. * @dev This looks a lot like GNS.mintSignal, but doesn't pull the tokens from the * curator and has no slippage protection. - * @param _subgraphID Subgraph ID + * @param _l1SubgraphID Subgraph ID in L1 (will be aliased) * @param _curator Curator address * @param _tokensIn The amount of tokens the nameCurator wants to deposit */ function _mintSignalFromL1( - uint256 _subgraphID, + uint256 _l1SubgraphID, address _curator, uint256 _tokensIn ) internal { - IL2GNS.SubgraphL2MigrationData storage migratedData = subgraphL2MigrationData[_subgraphID]; - SubgraphData storage subgraphData = _getSubgraphData(_subgraphID); + uint256 l2SubgraphID = getAliasedL2SubgraphID(_l1SubgraphID); + IL2GNS.SubgraphL2MigrationData storage migratedData = subgraphL2MigrationData[l2SubgraphID]; + SubgraphData storage subgraphData = _getSubgraphData(l2SubgraphID); // If subgraph migration wasn't finished, we should send the tokens to the curator if (!migratedData.l2Done || subgraphData.disabled) { graphToken().transfer(_curator, _tokensIn); - emit CuratorBalanceReturnedToBeneficiary(_subgraphID, _curator, _tokensIn); + emit CuratorBalanceReturnedToBeneficiary(_l1SubgraphID, _curator, _tokensIn); } else { // Get name signal to mint for tokens deposited IL2Curation curation = IL2Curation(address(curation())); uint256 vSignal = curation.mintTaxFree(subgraphData.subgraphDeploymentID, _tokensIn); - uint256 nSignal = vSignalToNSignal(_subgraphID, vSignal); + uint256 nSignal = vSignalToNSignal(l2SubgraphID, vSignal); // Update pools subgraphData.vSignal = subgraphData.vSignal.add(vSignal); @@ -274,8 +296,8 @@ contract L2GNS is GNS, L2GNSV1Storage, IL2GNS { nSignal ); - emit SignalMinted(_subgraphID, _curator, nSignal, vSignal, _tokensIn); - emit CuratorBalanceReceived(_subgraphID, _curator, _tokensIn); + emit SignalMinted(l2SubgraphID, _curator, nSignal, vSignal, _tokensIn); + emit CuratorBalanceReceived(_l1SubgraphID, l2SubgraphID, _curator, _tokensIn); } } From 7dc87a4c1da7f24368e4181fd8c6ed9c1189d885 Mon Sep 17 00:00:00 2001 From: Pablo Carranza Velez Date: Tue, 28 Mar 2023 20:01:51 -0300 Subject: [PATCH 092/112] test: fix L2GNS tests with subgraphID aliasing --- test/l2/l2GNS.test.ts | 109 ++++++++++++++++++++++-------------------- 1 file changed, 58 insertions(+), 51 deletions(-) diff --git a/test/l2/l2GNS.test.ts b/test/l2/l2GNS.test.ts index f90b96f1c..7f5417954 100644 --- a/test/l2/l2GNS.test.ts +++ b/test/l2/l2GNS.test.ts @@ -100,10 +100,11 @@ describe('L2GNS', () => { ) await gatewayFinalizeTransfer(mockL1GNS.address, gns.address, curatedTokens, callhookData) + const l2SubgraphId = await gns.getAliasedL2SubgraphID(l1SubgraphId) await gns .connect(me.signer) .finishSubgraphMigrationFromL1( - l1SubgraphId, + l2SubgraphId, newSubgraph0.subgraphDeploymentID, subgraphMetadata, versionMetadata, @@ -271,15 +272,17 @@ describe('L2GNS', () => { callhookData, ) + const l2SubgraphId = await gns.getAliasedL2SubgraphID(l1SubgraphId) + await expect(tx) .emit(l2GraphTokenGateway, 'DepositFinalized') .withArgs(mockL1GRT.address, mockL1GNS.address, gns.address, curatedTokens) await expect(tx) .emit(gns, 'SubgraphReceivedFromL1') - .withArgs(l1SubgraphId, me.address, curatedTokens) + .withArgs(l1SubgraphId, l2SubgraphId, me.address, curatedTokens) - const migrationData = await gns.subgraphL2MigrationData(l1SubgraphId) - const subgraphData = await gns.subgraphs(l1SubgraphId) + const migrationData = await gns.subgraphL2MigrationData(l2SubgraphId) + const subgraphData = await gns.subgraphs(l2SubgraphId) expect(migrationData.tokens).eq(curatedTokens) expect(migrationData.l2Done).eq(false) @@ -292,7 +295,7 @@ describe('L2GNS', () => { expect(subgraphData.disabled).eq(true) expect(subgraphData.withdrawableGRT).eq(0) // Important so that it's not the same as a deprecated subgraph! - expect(await gns.ownerOf(l1SubgraphId)).eq(me.address) + expect(await gns.ownerOf(l2SubgraphId)).eq(me.address) }) it('does not conflict with a locally created subgraph', async function () { const l2Subgraph = await publishNewSubgraph(me, newSubgraph0, gns) @@ -309,15 +312,17 @@ describe('L2GNS', () => { callhookData, ) + const l2SubgraphId = await gns.getAliasedL2SubgraphID(l1SubgraphId) + await expect(tx) .emit(l2GraphTokenGateway, 'DepositFinalized') .withArgs(mockL1GRT.address, mockL1GNS.address, gns.address, curatedTokens) await expect(tx) .emit(gns, 'SubgraphReceivedFromL1') - .withArgs(l1SubgraphId, me.address, curatedTokens) + .withArgs(l1SubgraphId, l2SubgraphId, me.address, curatedTokens) - const migrationData = await gns.subgraphL2MigrationData(l1SubgraphId) - const subgraphData = await gns.subgraphs(l1SubgraphId) + const migrationData = await gns.subgraphL2MigrationData(l2SubgraphId) + const subgraphData = await gns.subgraphs(l2SubgraphId) expect(migrationData.tokens).eq(curatedTokens) expect(migrationData.l2Done).eq(false) @@ -330,9 +335,9 @@ describe('L2GNS', () => { expect(subgraphData.disabled).eq(true) expect(subgraphData.withdrawableGRT).eq(0) // Important so that it's not the same as a deprecated subgraph! - expect(await gns.ownerOf(l1SubgraphId)).eq(me.address) + expect(await gns.ownerOf(l2SubgraphId)).eq(me.address) - expect(l2Subgraph.id).not.eq(l1SubgraphId) + expect(l2Subgraph.id).not.eq(l2SubgraphId) const l2SubgraphData = await gns.subgraphs(l2Subgraph.id) expect(l2SubgraphData.vSignal).eq(0) expect(l2SubgraphData.nSignal).eq(0) @@ -357,35 +362,35 @@ describe('L2GNS', () => { newSubgraph0.subgraphDeploymentID, curatedTokens, ) - + const l2SubgraphId = await gns.getAliasedL2SubgraphID(l1SubgraphId) const tx = gns .connect(me.signer) .finishSubgraphMigrationFromL1( - l1SubgraphId, + l2SubgraphId, newSubgraph0.subgraphDeploymentID, subgraphMetadata, versionMetadata, ) await expect(tx) .emit(gns, 'SubgraphPublished') - .withArgs(l1SubgraphId, newSubgraph0.subgraphDeploymentID, DEFAULT_RESERVE_RATIO) - await expect(tx).emit(gns, 'SubgraphMetadataUpdated').withArgs(l1SubgraphId, subgraphMetadata) + .withArgs(l2SubgraphId, newSubgraph0.subgraphDeploymentID, DEFAULT_RESERVE_RATIO) + await expect(tx).emit(gns, 'SubgraphMetadataUpdated').withArgs(l2SubgraphId, subgraphMetadata) await expect(tx) .emit(gns, 'SubgraphUpgraded') - .withArgs(l1SubgraphId, expectedSignal, curatedTokens, newSubgraph0.subgraphDeploymentID) + .withArgs(l2SubgraphId, expectedSignal, curatedTokens, newSubgraph0.subgraphDeploymentID) await expect(tx) .emit(gns, 'SubgraphVersionUpdated') - .withArgs(l1SubgraphId, newSubgraph0.subgraphDeploymentID, versionMetadata) - await expect(tx).emit(gns, 'SubgraphMigrationFinalized').withArgs(l1SubgraphId) + .withArgs(l2SubgraphId, newSubgraph0.subgraphDeploymentID, versionMetadata) + await expect(tx).emit(gns, 'SubgraphMigrationFinalized').withArgs(l2SubgraphId) - const subgraphAfter = await gns.subgraphs(l1SubgraphId) - const migrationDataAfter = await gns.subgraphL2MigrationData(l1SubgraphId) + const subgraphAfter = await gns.subgraphs(l2SubgraphId) + const migrationDataAfter = await gns.subgraphL2MigrationData(l2SubgraphId) expect(subgraphAfter.vSignal).eq(expectedSignal) expect(migrationDataAfter.l2Done).eq(true) expect(subgraphAfter.disabled).eq(false) expect(subgraphAfter.subgraphDeploymentID).eq(newSubgraph0.subgraphDeploymentID) - const expectedNSignal = await gns.vSignalToNSignal(l1SubgraphId, expectedSignal) - expect(await gns.getCuratorSignal(l1SubgraphId, me.address)).eq(expectedNSignal) + const expectedNSignal = await gns.vSignalToNSignal(l2SubgraphId, expectedSignal) + expect(await gns.getCuratorSignal(l2SubgraphId, me.address)).eq(expectedNSignal) }) it('cannot be called by someone other than the subgraph owner', async function () { const { l1SubgraphId, curatedTokens, subgraphMetadata, versionMetadata } = @@ -395,11 +400,11 @@ describe('L2GNS', () => { [toBN(0), l1SubgraphId, me.address], ) await gatewayFinalizeTransfer(mockL1GNS.address, gns.address, curatedTokens, callhookData) - + const l2SubgraphId = await gns.getAliasedL2SubgraphID(l1SubgraphId) const tx = gns .connect(other.signer) .finishSubgraphMigrationFromL1( - l1SubgraphId, + l2SubgraphId, newSubgraph0.subgraphDeploymentID, subgraphMetadata, versionMetadata, @@ -409,11 +414,11 @@ describe('L2GNS', () => { it('rejects calls for a subgraph that does not exist', async function () { const l1SubgraphId = await buildSubgraphID(me.address, toBN('1'), 1) const metadata = randomHexBytes() - + const l2SubgraphId = await gns.getAliasedL2SubgraphID(l1SubgraphId) const tx = gns .connect(me.signer) .finishSubgraphMigrationFromL1( - l1SubgraphId, + l2SubgraphId, newSubgraph0.subgraphDeploymentID, metadata, metadata, @@ -442,6 +447,7 @@ describe('L2GNS', () => { [toBN(0), l1SubgraphId, me.address], ) await gatewayFinalizeTransfer(mockL1GNS.address, gns.address, curatedTokens, callhookData) + const l2SubgraphId = await gns.getAliasedL2SubgraphID(l1SubgraphId) // Calculate expected signal before minting const expectedSignal = await curation.tokensToSignalNoTax( @@ -459,25 +465,25 @@ describe('L2GNS', () => { const tx = gns .connect(me.signer) .finishSubgraphMigrationFromL1( - l1SubgraphId, + l2SubgraphId, newSubgraph0.subgraphDeploymentID, subgraphMetadata, versionMetadata, ) await expect(tx) .emit(gns, 'SubgraphPublished') - .withArgs(l1SubgraphId, newSubgraph0.subgraphDeploymentID, DEFAULT_RESERVE_RATIO) - await expect(tx).emit(gns, 'SubgraphMetadataUpdated').withArgs(l1SubgraphId, subgraphMetadata) + .withArgs(l2SubgraphId, newSubgraph0.subgraphDeploymentID, DEFAULT_RESERVE_RATIO) + await expect(tx).emit(gns, 'SubgraphMetadataUpdated').withArgs(l2SubgraphId, subgraphMetadata) await expect(tx) .emit(gns, 'SubgraphUpgraded') - .withArgs(l1SubgraphId, expectedSignal, curatedTokens, newSubgraph0.subgraphDeploymentID) + .withArgs(l2SubgraphId, expectedSignal, curatedTokens, newSubgraph0.subgraphDeploymentID) await expect(tx) .emit(gns, 'SubgraphVersionUpdated') - .withArgs(l1SubgraphId, newSubgraph0.subgraphDeploymentID, versionMetadata) - await expect(tx).emit(gns, 'SubgraphMigrationFinalized').withArgs(l1SubgraphId) + .withArgs(l2SubgraphId, newSubgraph0.subgraphDeploymentID, versionMetadata) + await expect(tx).emit(gns, 'SubgraphMigrationFinalized').withArgs(l2SubgraphId) - const subgraphAfter = await gns.subgraphs(l1SubgraphId) - const migrationDataAfter = await gns.subgraphL2MigrationData(l1SubgraphId) + const subgraphAfter = await gns.subgraphs(l2SubgraphId) + const migrationDataAfter = await gns.subgraphL2MigrationData(l2SubgraphId) expect(subgraphAfter.vSignal).eq(expectedSignal) expect(migrationDataAfter.l2Done).eq(true) expect(subgraphAfter.disabled).eq(false) @@ -494,10 +500,10 @@ describe('L2GNS', () => { [toBN(0), l1SubgraphId, me.address], ) await gatewayFinalizeTransfer(mockL1GNS.address, gns.address, curatedTokens, callhookData) - + const l2SubgraphId = await gns.getAliasedL2SubgraphID(l1SubgraphId) const tx = gns .connect(me.signer) - .finishSubgraphMigrationFromL1(l1SubgraphId, HashZero, metadata, metadata) + .finishSubgraphMigrationFromL1(l2SubgraphId, HashZero, metadata, metadata) await expect(tx).revertedWith('GNS: deploymentID != 0') }) it('rejects calls if the subgraph migration was already finished', async function () { @@ -508,11 +514,11 @@ describe('L2GNS', () => { [toBN(0), l1SubgraphId, me.address], ) await gatewayFinalizeTransfer(mockL1GNS.address, gns.address, curatedTokens, callhookData) - + const l2SubgraphId = await gns.getAliasedL2SubgraphID(l1SubgraphId) await gns .connect(me.signer) .finishSubgraphMigrationFromL1( - l1SubgraphId, + l2SubgraphId, newSubgraph0.subgraphDeploymentID, metadata, metadata, @@ -521,7 +527,7 @@ describe('L2GNS', () => { const tx = gns .connect(me.signer) .finishSubgraphMigrationFromL1( - l1SubgraphId, + l2SubgraphId, newSubgraph0.subgraphDeploymentID, metadata, metadata, @@ -543,8 +549,8 @@ describe('L2GNS', () => { subgraphMetadata, versionMetadata, ) - - const l2OwnerSignalBefore = await gns.getCuratorSignal(l1SubgraphId, me.address) + const l2SubgraphId = await gns.getAliasedL2SubgraphID(l1SubgraphId) + const l2OwnerSignalBefore = await gns.getCuratorSignal(l2SubgraphId, me.address) const newCuratorTokens = toGRT('10') const callhookData = defaultAbiCoder.encode( @@ -560,14 +566,14 @@ describe('L2GNS', () => { await expect(tx) .emit(gns, 'CuratorBalanceReceived') - .withArgs(l1SubgraphId, other.address, newCuratorTokens) + .withArgs(l1SubgraphId, l2SubgraphId, other.address, newCuratorTokens) - const l2NewCuratorSignal = await gns.getCuratorSignal(l1SubgraphId, other.address) + const l2NewCuratorSignal = await gns.getCuratorSignal(l2SubgraphId, other.address) const expectedNewCuratorSignal = await gns.vSignalToNSignal( - l1SubgraphId, + l2SubgraphId, await curation.tokensToSignalNoTax(newSubgraph0.subgraphDeploymentID, newCuratorTokens), ) - const l2OwnerSignalAfter = await gns.getCuratorSignal(l1SubgraphId, me.address) + const l2OwnerSignalAfter = await gns.getCuratorSignal(l2SubgraphId, me.address) expect(l2OwnerSignalAfter).eq(l2OwnerSignalBefore) expect(l2NewCuratorSignal).eq(expectedNewCuratorSignal) }) @@ -585,10 +591,11 @@ describe('L2GNS', () => { versionMetadata, ) + const l2SubgraphId = await gns.getAliasedL2SubgraphID(l1SubgraphId) await grt.connect(governor.signer).mint(other.address, toGRT('10')) await grt.connect(other.signer).approve(gns.address, toGRT('10')) - await gns.connect(other.signer).mintSignal(l1SubgraphId, toGRT('10'), toBN(0)) - const prevSignal = await gns.getCuratorSignal(l1SubgraphId, other.address) + await gns.connect(other.signer).mintSignal(l2SubgraphId, toGRT('10'), toBN(0)) + const prevSignal = await gns.getCuratorSignal(l2SubgraphId, other.address) const newCuratorTokens = toGRT('10') const callhookData = defaultAbiCoder.encode( @@ -604,13 +611,13 @@ describe('L2GNS', () => { await expect(tx) .emit(gns, 'CuratorBalanceReceived') - .withArgs(l1SubgraphId, other.address, newCuratorTokens) + .withArgs(l1SubgraphId, l2SubgraphId, other.address, newCuratorTokens) const expectedNewCuratorSignal = await gns.vSignalToNSignal( - l1SubgraphId, + l2SubgraphId, await curation.tokensToSignalNoTax(newSubgraph0.subgraphDeploymentID, newCuratorTokens), ) - const l2CuratorBalance = await gns.getCuratorSignal(l1SubgraphId, other.address) + const l2CuratorBalance = await gns.getCuratorSignal(l2SubgraphId, other.address) expect(l2CuratorBalance).eq(prevSignal.add(expectedNewCuratorSignal)) }) it('cannot be called by someone other than the L2GraphTokenGateway', async function () { @@ -741,8 +748,8 @@ describe('L2GNS', () => { subgraphMetadata, versionMetadata, ) - - await gns.connect(me.signer).deprecateSubgraph(l1SubgraphId) + const l2SubgraphId = await gns.getAliasedL2SubgraphID(l1SubgraphId) + await gns.connect(me.signer).deprecateSubgraph(l2SubgraphId) // SG was migrated, but is deprecated now! From 20dd45084ab8ae0eb322ea8930cedfc47a661836 Mon Sep 17 00:00:00 2001 From: Pablo Carranza Velez Date: Wed, 29 Mar 2023 14:55:44 -0300 Subject: [PATCH 093/112] test: add tests for getAliasedL2SubgraphID --- test/l2/l2GNS.test.ts | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/test/l2/l2GNS.test.ts b/test/l2/l2GNS.test.ts index 7f5417954..e49bcd7b0 100644 --- a/test/l2/l2GNS.test.ts +++ b/test/l2/l2GNS.test.ts @@ -784,4 +784,28 @@ describe('L2GNS', () => { await expect(tx).revertedWith('INVALID_CODE') }) }) + describe('getAliasedL2SubgraphID', function () { + it('returns the L2 subgraph ID that is the L1 subgraph ID with an offset', async function () { + const l1SubgraphId = ethers.BigNumber.from( + '68799548758199140224151701590582019137924969401915573086349306511960790045480', + ) + const l2SubgraphId = await gns.getAliasedL2SubgraphID(l1SubgraphId) + const offset = ethers.BigNumber.from( + '0x1111000000000000000000000000000000000000000000000000000000001111', + ) + const base = ethers.constants.MaxUint256.add(1) + const expectedL2SubgraphId = l1SubgraphId.add(offset).mod(base) + expect(l2SubgraphId).eq(expectedL2SubgraphId) + }) + it('wraps around MAX_UINT256 in case of overflow', async function () { + const l1SubgraphId = ethers.constants.MaxUint256 + const l2SubgraphId = await gns.getAliasedL2SubgraphID(l1SubgraphId) + const offset = ethers.BigNumber.from( + '0x1111000000000000000000000000000000000000000000000000000000001111', + ) + const base = ethers.constants.MaxUint256.add(1) + const expectedL2SubgraphId = l1SubgraphId.add(offset).mod(base) + expect(l2SubgraphId).eq(expectedL2SubgraphId) + }) + }) }) From 2663f6ec130742dbc97f207aeabbb48ca872c520 Mon Sep 17 00:00:00 2001 From: Pablo Carranza Velez Date: Wed, 12 Apr 2023 19:00:09 -0300 Subject: [PATCH 094/112] fix(L1GNS): only allow exact msg.value to cover the L2 gas and submission fee (OZ M-01 for #786) --- contracts/discovery/L1GNS.sol | 13 ++++++++++++- test/gns.test.ts | 35 +++++++++++++++++++++++++++++++++++ 2 files changed, 47 insertions(+), 1 deletion(-) diff --git a/contracts/discovery/L1GNS.sol b/contracts/discovery/L1GNS.sol index 3e8d94360..f510352fe 100644 --- a/contracts/discovery/L1GNS.sol +++ b/contracts/discovery/L1GNS.sol @@ -44,6 +44,8 @@ contract L1GNS is GNS, L1GNSV1Storage { /** * @notice Send a subgraph's data and tokens to L2. * Use the Arbitrum SDK to estimate the L2 retryable ticket parameters. + * Note that any L2 gas/fee refunds will be lost, so the function only accepts + * the exact amount of ETH to cover _maxSubmissionCost + _maxGas * _gasPriceBid. * @param _subgraphID Subgraph ID * @param _l2Owner Address that will own the subgraph in L2 (could be the L1 owner, but could be different if the L1 owner is an L1 contract) * @param _maxGas Max gas to use for the L2 retryable ticket @@ -58,6 +60,10 @@ contract L1GNS is GNS, L1GNSV1Storage { uint256 _maxSubmissionCost ) external payable notPartialPaused { require(!subgraphMigratedToL2[_subgraphID], "ALREADY_DONE"); + require( + msg.value == _maxSubmissionCost.add(_maxGas.mul(_gasPriceBid)), + "INVALID_ETH_VALUE" + ); SubgraphData storage subgraphData = _getSubgraphOrRevert(_subgraphID); // This is just like onlySubgraphAuth, but we want it to run after the subgraphMigratedToL2 check @@ -115,6 +121,8 @@ contract L1GNS is GNS, L1GNSV1Storage { * that the retryable ticket is redeemed before expiration, or the signal will be lost. * It is up to the caller to verify that the subgraph migration was finished in L2, * but if it wasn't, the tokens will be sent to the beneficiary in L2. + * Note that any L2 gas/fee refunds will be lost, so the function only accepts + * the exact amount of ETH to cover _maxSubmissionCost + _maxGas * _gasPriceBid. * @dev Use the Arbitrum SDK to estimate the L2 retryable ticket parameters. * @param _subgraphID Subgraph ID * @param _beneficiary Address that will receive the tokens in L2 @@ -130,7 +138,10 @@ contract L1GNS is GNS, L1GNSV1Storage { uint256 _maxSubmissionCost ) external payable notPartialPaused { require(subgraphMigratedToL2[_subgraphID], "!MIGRATED"); - + require( + msg.value == _maxSubmissionCost.add(_maxGas.mul(_gasPriceBid)), + "INVALID_ETH_VALUE" + ); // The Arbitrum bridge will check this too, we just check here for an early exit require(_maxSubmissionCost != 0, "NO_SUBMISSION_COST"); diff --git a/test/gns.test.ts b/test/gns.test.ts index 93ea11d57..ce74c8e26 100644 --- a/test/gns.test.ts +++ b/test/gns.test.ts @@ -1236,6 +1236,19 @@ describe('L1GNS', () => { await expect(tx).revertedWith('GNS: Must be active') }) + it('rejects calls with more ETH than maxSubmissionCost + maxGas * gasPriceBid', async function () { + const subgraph0 = await publishAndCurateOnSubgraph() + + const maxSubmissionCost = toBN('100') + const maxGas = toBN('10') + const gasPriceBid = toBN('20') + const tx = gns + .connect(me.signer) + .sendSubgraphToL2(subgraph0.id, me.address, maxGas, gasPriceBid, maxSubmissionCost, { + value: maxSubmissionCost.add(maxGas.mul(gasPriceBid)).add(toBN('1')), + }) + await expect(tx).revertedWith('INVALID_ETH_VALUE') + }) it('does not allow curators to burn signal after sending', async function () { const subgraph0 = await publishAndCurateOnSubgraph() @@ -1614,6 +1627,28 @@ describe('L1GNS', () => { await expect(tx).revertedWith('NO_SUBMISSION_COST') }) + it('rejects calls with more ETH than maxSubmissionCost + maxGas * gasPriceBid', async function () { + const subgraph0 = await publishCurateAndSendSubgraph() + + const maxSubmissionCost = toBN('100') + const maxGas = toBN('10') + const gasPriceBid = toBN('20') + + const tx = gns + .connect(me.signer) + .sendCuratorBalanceToBeneficiaryOnL2( + subgraph0.id, + other.address, + maxGas, + gasPriceBid, + maxSubmissionCost, + { + value: maxSubmissionCost.add(maxGas.mul(gasPriceBid)).add(toBN('1')), + }, + ) + + await expect(tx).revertedWith('INVALID_ETH_VALUE') + }) it('rejects calls if the curator has withdrawn the GRT', async function () { const subgraph0 = await publishCurateAndSendSubgraph() const afterSubgraph = await gns.subgraphs(subgraph0.id) From ff8ce4d7882e11ad03e66c1a2483436397c6f92c Mon Sep 17 00:00:00 2001 From: Pablo Carranza Velez Date: Thu, 13 Apr 2023 18:19:21 -0300 Subject: [PATCH 095/112] fix(L1Staking): require exact msg.value for migration (OZ M-01) --- contracts/staking/L1Staking.sol | 16 +++- test/staking/migration.test.ts | 137 +++++++++++++++++++++++++++++++- 2 files changed, 149 insertions(+), 4 deletions(-) diff --git a/contracts/staking/L1Staking.sol b/contracts/staking/L1Staking.sol index e1ec82e95..605948889 100644 --- a/contracts/staking/L1Staking.sol +++ b/contracts/staking/L1Staking.sol @@ -57,7 +57,8 @@ contract L1Staking is Staking, L1StakingV1Storage, IL1StakingBase { * Since the indexer address might be an L1-only contract, the function takes a beneficiary * address that will be the indexer's address in L2. * The caller must provide an amount of ETH to use for the L2 retryable ticket, that - * must be at least `_maxSubmissionCost + _gasPriceBid * _maxGas`. + * must be at _exactly_ `_maxSubmissionCost + _gasPriceBid * _maxGas`. + * Any refunds for the submission fee or L2 gas will be lost. * @param _l2Beneficiary Address of the indexer in L2. If the indexer has previously migrated stake, this must match the previously-used value. * @param _amount Amount of stake GRT to migrate to L2 * @param _maxGas Max gas to use for the L2 retryable ticket @@ -71,6 +72,10 @@ contract L1Staking is Staking, L1StakingV1Storage, IL1StakingBase { uint256 _gasPriceBid, uint256 _maxSubmissionCost ) external payable override { + require( + msg.value == _maxSubmissionCost.add(_gasPriceBid.mul(_maxGas)), + "INVALID_ETH_AMOUNT" + ); _migrateStakeToL2( msg.sender, _l2Beneficiary, @@ -94,6 +99,7 @@ contract L1Staking is Staking, L1StakingV1Storage, IL1StakingBase { * The ETH for the L2 gas will be pulled from the L1GraphTokenLockMigrator, so the owner of * the GraphTokenLockWallet must have previously deposited at least `_maxSubmissionCost + _gasPriceBid * _maxGas` * ETH into the L1GraphTokenLockMigrator contract (using its depositETH function). + * Any refunds for the submission fee or L2 gas will be lost. * @param _amount Amount of stake GRT to migrate to L2 * @param _maxGas Max gas to use for the L2 retryable ticket * @param _gasPriceBid Gas price bid for the L2 retryable ticket @@ -130,7 +136,8 @@ contract L1Staking is Staking, L1StakingV1Storage, IL1StakingBase { * Since the delegator's address might be an L1-only contract, the function takes a beneficiary * address that will be the delegator's address in L2. * The caller must provide an amount of ETH to use for the L2 retryable ticket, that - * must be at least `_maxSubmissionCost + _gasPriceBid * _maxGas`. + * must be _exactly_ `_maxSubmissionCost + _gasPriceBid * _maxGas`. + * Any refunds for the submission fee or L2 gas will be lost. * @param _indexer Address of the indexer (in L1, before migrating) * @param _l2Beneficiary Address of the delegator in L2 * @param _maxGas Max gas to use for the L2 retryable ticket @@ -144,6 +151,10 @@ contract L1Staking is Staking, L1StakingV1Storage, IL1StakingBase { uint256 _gasPriceBid, uint256 _maxSubmissionCost ) external payable override { + require( + msg.value == _maxSubmissionCost.add(_gasPriceBid.mul(_maxGas)), + "INVALID_ETH_AMOUNT" + ); _migrateDelegationToL2( msg.sender, _indexer, @@ -166,6 +177,7 @@ contract L1Staking is Staking, L1StakingV1Storage, IL1StakingBase { * The ETH for the L2 gas will be pulled from the L1GraphTokenLockMigrator, so the owner of * the GraphTokenLockWallet must have previously deposited at least `_maxSubmissionCost + _gasPriceBid * _maxGas` * ETH into the L1GraphTokenLockMigrator contract (using its depositETH function). + * Any refunds for the submission fee or L2 gas will be lost. * @param _indexer Address of the indexer (in L1, before migrating) * @param _maxGas Max gas to use for the L2 retryable ticket * @param _gasPriceBid Gas price bid for the L2 retryable ticket diff --git a/test/staking/migration.test.ts b/test/staking/migration.test.ts index d1e0a4a08..6105296b1 100644 --- a/test/staking/migration.test.ts +++ b/test/staking/migration.test.ts @@ -153,6 +153,9 @@ describe('L1Staking:Migration', () => { maxGas, gasPriceBid, maxSubmissionCost, + { + value: maxSubmissionCost.add(gasPriceBid.mul(maxGas)), + }, ) await expect(tx).revertedWith('tokensStaked == 0') }) @@ -171,7 +174,9 @@ describe('L1Staking:Migration', () => { const expectedSeqNum = options.expectedSeqNum ?? 1 const tx = staking .connect(indexer.signer) - .migrateStakeToL2(l2Beneficiary, amountToSend, maxGas, gasPriceBid, maxSubmissionCost) + .migrateStakeToL2(l2Beneficiary, amountToSend, maxGas, gasPriceBid, maxSubmissionCost, { + value: maxSubmissionCost.add(gasPriceBid.mul(maxGas)), + }) const expectedFunctionData = defaultAbiCoder.encode(['tuple(address)'], [[l2Indexer.address]]) const expectedCallhookData = defaultAbiCoder.encode( @@ -222,6 +227,9 @@ describe('L1Staking:Migration', () => { maxGas, gasPriceBid, maxSubmissionCost, + { + value: maxSubmissionCost.add(gasPriceBid.mul(maxGas)), + }, ) await expect(tx).revertedWith('!minimumIndexerStake remaining') }) @@ -234,6 +242,9 @@ describe('L1Staking:Migration', () => { maxGas, gasPriceBid, maxSubmissionCost, + { + value: maxSubmissionCost.add(gasPriceBid.mul(maxGas)), + }, ) await expect(tx).revertedWith('!minimumIndexerStake sent') }) @@ -247,13 +258,18 @@ describe('L1Staking:Migration', () => { maxGas, gasPriceBid, maxSubmissionCost, + { + value: maxSubmissionCost.add(gasPriceBid.mul(maxGas)), + }, ) await expect(tx).revertedWith('tokensLocked != 0') }) it('should not allow migrating to a beneficiary that is address zero', async function () { const tx = staking .connect(indexer.signer) - .migrateStakeToL2(AddressZero, tokensToStake, maxGas, gasPriceBid, maxSubmissionCost) + .migrateStakeToL2(AddressZero, tokensToStake, maxGas, gasPriceBid, maxSubmissionCost, { + value: maxSubmissionCost.add(gasPriceBid.mul(maxGas)), + }) await expect(tx).revertedWith('l2Beneficiary == 0') }) it('should not allow migrating the whole stake if there are open allocations', async function () { @@ -266,6 +282,9 @@ describe('L1Staking:Migration', () => { maxGas, gasPriceBid, maxSubmissionCost, + { + value: maxSubmissionCost.add(gasPriceBid.mul(maxGas)), + }, ) await expect(tx).revertedWith('allocated') }) @@ -287,9 +306,27 @@ describe('L1Staking:Migration', () => { maxGas, gasPriceBid, maxSubmissionCost, + { + value: maxSubmissionCost.add(gasPriceBid.mul(maxGas)), + }, ) await expect(tx).revertedWith('! allocation capacity') }) + it('should not allow migrating if the ETH sent is more than required', async function () { + const tx = staking + .connect(indexer.signer) + .migrateStakeToL2( + indexer.address, + tokensToStake, + maxGas, + gasPriceBid, + maxSubmissionCost, + { + value: maxSubmissionCost.add(gasPriceBid.mul(maxGas)).add(1), + }, + ) + await expect(tx).revertedWith('INVALID_ETH_AMOUNT') + }) it('sends the tokens and a message through the L1GraphTokenGateway', async function () { const amountToSend = minimumIndexerStake await shouldMigrateIndexerStake(amountToSend) @@ -338,6 +375,9 @@ describe('L1Staking:Migration', () => { maxGas, gasPriceBid, maxSubmissionCost, + { + value: maxSubmissionCost.add(gasPriceBid.mul(maxGas)), + }, ) await expect(tx).revertedWith('l2Beneficiary != previous') }) @@ -403,6 +443,9 @@ describe('L1Staking:Migration', () => { maxGas, gasPriceBid, maxSubmissionCost, + { + value: maxSubmissionCost.add(gasPriceBid.mul(maxGas)), + }, ) await staking.connect(delegator.signer).undelegate(indexer.address, actualDelegation) const tx = await staking @@ -437,6 +480,9 @@ describe('L1Staking:Migration', () => { maxGas, gasPriceBid, maxSubmissionCost, + { + value: maxSubmissionCost.add(gasPriceBid.mul(maxGas)), + }, ) const tx = staking .connect(delegator.signer) @@ -454,6 +500,9 @@ describe('L1Staking:Migration', () => { maxGas, gasPriceBid, maxSubmissionCost, + { + value: maxSubmissionCost.add(gasPriceBid.mul(maxGas)), + }, ) const tx = staking .connect(delegator.signer) @@ -469,6 +518,9 @@ describe('L1Staking:Migration', () => { maxGas, gasPriceBid, maxSubmissionCost, + { + value: maxSubmissionCost.add(gasPriceBid.mul(maxGas)), + }, ) const tx = staking .connect(delegator.signer) @@ -490,6 +542,9 @@ describe('L1Staking:Migration', () => { maxGas, gasPriceBid, maxSubmissionCost, + { + value: maxSubmissionCost.add(gasPriceBid.mul(maxGas)), + }, ) await expect(tx).revertedWith('indexer not migrated') }) @@ -504,6 +559,9 @@ describe('L1Staking:Migration', () => { maxGas, gasPriceBid, maxSubmissionCost, + { + value: maxSubmissionCost.add(gasPriceBid.mul(maxGas)), + }, ) const tx = staking @@ -514,6 +572,9 @@ describe('L1Staking:Migration', () => { maxGas, gasPriceBid, maxSubmissionCost, + { + value: maxSubmissionCost.add(gasPriceBid.mul(maxGas)), + }, ) await expect(tx).revertedWith('l2Beneficiary == 0') }) @@ -528,6 +589,9 @@ describe('L1Staking:Migration', () => { maxGas, gasPriceBid, maxSubmissionCost, + { + value: maxSubmissionCost.add(gasPriceBid.mul(maxGas)), + }, ) await staking.connect(delegator.signer).undelegate(indexer.address, toGRT('1')) @@ -539,6 +603,9 @@ describe('L1Staking:Migration', () => { maxGas, gasPriceBid, maxSubmissionCost, + { + value: maxSubmissionCost.add(gasPriceBid.mul(maxGas)), + }, ) await expect(tx).revertedWith('tokensLocked != 0') }) @@ -551,6 +618,9 @@ describe('L1Staking:Migration', () => { maxGas, gasPriceBid, maxSubmissionCost, + { + value: maxSubmissionCost.add(gasPriceBid.mul(maxGas)), + }, ) const tx = staking @@ -561,6 +631,9 @@ describe('L1Staking:Migration', () => { maxGas, gasPriceBid, maxSubmissionCost, + { + value: maxSubmissionCost.add(gasPriceBid.mul(maxGas)), + }, ) await expect(tx).revertedWith('delegation == 0') }) @@ -578,6 +651,9 @@ describe('L1Staking:Migration', () => { maxGas, gasPriceBid, maxSubmissionCost, + { + value: maxSubmissionCost.add(gasPriceBid.mul(maxGas)), + }, ) const expectedFunctionData = defaultAbiCoder.encode( @@ -605,6 +681,9 @@ describe('L1Staking:Migration', () => { maxGas, gasPriceBid, maxSubmissionCost, + { + value: maxSubmissionCost.add(gasPriceBid.mul(maxGas)), + }, ) // seqNum is 2 because the first bridge call was in migrateStakeToL2 await expect(tx) @@ -631,6 +710,9 @@ describe('L1Staking:Migration', () => { maxGas, gasPriceBid, maxSubmissionCost, + { + value: maxSubmissionCost.add(gasPriceBid.mul(maxGas)), + }, ) await staking @@ -641,6 +723,9 @@ describe('L1Staking:Migration', () => { maxGas, gasPriceBid, maxSubmissionCost, + { + value: maxSubmissionCost.add(gasPriceBid.mul(maxGas)), + }, ) const tx = staking @@ -651,6 +736,9 @@ describe('L1Staking:Migration', () => { maxGas, gasPriceBid, maxSubmissionCost, + { + value: maxSubmissionCost.add(gasPriceBid.mul(maxGas)), + }, ) await expect(tx).revertedWith('delegation == 0') }) @@ -668,6 +756,9 @@ describe('L1Staking:Migration', () => { maxGas, gasPriceBid, maxSubmissionCost, + { + value: maxSubmissionCost.add(gasPriceBid.mul(maxGas)), + }, ) await staking @@ -678,6 +769,9 @@ describe('L1Staking:Migration', () => { maxGas, gasPriceBid, maxSubmissionCost, + { + value: maxSubmissionCost.add(gasPriceBid.mul(maxGas)), + }, ) await staking.connect(delegator.signer).delegate(indexer.address, tokensToDelegate) @@ -690,6 +784,9 @@ describe('L1Staking:Migration', () => { maxGas, gasPriceBid, maxSubmissionCost, + { + value: maxSubmissionCost.add(gasPriceBid.mul(maxGas)), + }, ) await expect(tx) .emit(staking, 'DelegationMigratedToL2') @@ -701,6 +798,36 @@ describe('L1Staking:Migration', () => { actualDelegation, ) }) + it('rejects calls if the ETH value is larger than expected', async function () { + const tokensToDelegate = toGRT('10000') + await staking.connect(delegator.signer).delegate(indexer.address, tokensToDelegate) + await staking + .connect(indexer.signer) + .migrateStakeToL2( + l2Indexer.address, + minimumIndexerStake, + maxGas, + gasPriceBid, + maxSubmissionCost, + { + value: maxSubmissionCost.add(gasPriceBid.mul(maxGas)), + }, + ) + + const tx = staking + .connect(delegator.signer) + .migrateDelegationToL2( + indexer.address, + l2Delegator.address, + maxGas, + gasPriceBid, + maxSubmissionCost, + { + value: maxSubmissionCost.add(gasPriceBid.mul(maxGas)).add(1), + }, + ) + await expect(tx).revertedWith('INVALID_ETH_AMOUNT') + }) }) describe('migrateLockedDelegationToL2', function () { it('sends delegated tokens to L2 like migrateDelegationToL2, but gets the beneficiary and ETH from the L1GraphTokenLockMigrator', async function () { @@ -718,6 +845,9 @@ describe('L1Staking:Migration', () => { maxGas, gasPriceBid, maxSubmissionCost, + { + value: maxSubmissionCost.add(gasPriceBid.mul(maxGas)), + }, ) const expectedFunctionData = defaultAbiCoder.encode( @@ -774,6 +904,9 @@ describe('L1Staking:Migration', () => { maxGas, gasPriceBid, maxSubmissionCost, + { + value: maxSubmissionCost.add(gasPriceBid.mul(maxGas)), + }, ) const tx = staking From ab4c92ee5260893ea8cafb78757109b43f83d62b Mon Sep 17 00:00:00 2001 From: Pablo Carranza Velez Date: Thu, 13 Apr 2023 18:38:03 -0300 Subject: [PATCH 096/112] fix: make sure delegation params are initialized when migrated stake is received (OZ M-03) --- contracts/l2/staking/L2Staking.sol | 4 +--- contracts/staking/Staking.sol | 2 +- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/contracts/l2/staking/L2Staking.sol b/contracts/l2/staking/L2Staking.sol index ad1181b92..7e6e3792c 100644 --- a/contracts/l2/staking/L2Staking.sol +++ b/contracts/l2/staking/L2Staking.sol @@ -93,9 +93,7 @@ contract L2Staking is Staking, IL2StakingBase { uint256 _amount, IL2Staking.ReceiveIndexerStakeData memory _indexerData ) internal { - address indexer = _indexerData.indexer; - __stakes[indexer].deposit(_amount); - emit StakeDeposited(indexer, _amount); + _stake(_indexerData.indexer, _amount); } /** diff --git a/contracts/staking/Staking.sol b/contracts/staking/Staking.sol index 313ebd729..08bd2e652 100644 --- a/contracts/staking/Staking.sol +++ b/contracts/staking/Staking.sol @@ -705,7 +705,7 @@ abstract contract Staking is StakingV3Storage, GraphUpgradeable, IStakingBase, M * @param _indexer Address of staking party * @param _tokens Amount of tokens to stake */ - function _stake(address _indexer, uint256 _tokens) private { + function _stake(address _indexer, uint256 _tokens) internal { // Deposit tokens into the indexer stake __stakes[_indexer].deposit(_tokens); From 1df3808fd4281beef5723ab9c04314764d8180f0 Mon Sep 17 00:00:00 2001 From: Pablo Carranza Velez Date: Thu, 13 Apr 2023 18:45:53 -0300 Subject: [PATCH 097/112] test: check delegation param values in L2Staking --- test/l2/l2Staking.test.ts | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/test/l2/l2Staking.test.ts b/test/l2/l2Staking.test.ts index 2d967bfcb..8977f8325 100644 --- a/test/l2/l2Staking.test.ts +++ b/test/l2/l2Staking.test.ts @@ -149,6 +149,9 @@ describe('L2Staking', () => { .withArgs(mockL1GRT.address, mockL1Staking.address, staking.address, tokens100k) await expect(tx).emit(staking, 'StakeDeposited').withArgs(me.address, tokens100k) expect(await staking.getIndexerStakedTokens(me.address)).to.equal(tokens100k) + const delegationPool = await staking.delegationPools(me.address) + expect(delegationPool.indexingRewardCut).eq(toBN(1000000)) // 1 in PPM + expect(delegationPool.queryFeeCut).eq(toBN(1000000)) // 1 in PPM }) it('adds stake to an existing indexer that was already migrated', async function () { const functionData = defaultAbiCoder.encode(['tuple(address)'], [[me.address]]) @@ -176,7 +179,7 @@ describe('L2Staking', () => { await expect(tx).emit(staking, 'StakeDeposited').withArgs(me.address, tokens100k) expect(await staking.getIndexerStakedTokens(me.address)).to.equal(tokens100k.add(tokens100k)) }) - it('adds stake to an existing indexer that was staked in L2', async function () { + it('adds stake to an existing indexer that was staked in L2 (without changing delegation params)', async function () { const functionData = defaultAbiCoder.encode(['tuple(address)'], [[me.address]]) const callhookData = defaultAbiCoder.encode( @@ -184,6 +187,7 @@ describe('L2Staking', () => { [toBN(0), functionData], // code = 1 means RECEIVE_INDEXER_CODE ) await staking.connect(me.signer).stake(tokens100k) + await staking.connect(me.signer).setDelegationParameters(1000, 1000, 1000) const tx = gatewayFinalizeTransfer( mockL1Staking.address, staking.address, @@ -196,6 +200,9 @@ describe('L2Staking', () => { .withArgs(mockL1GRT.address, mockL1Staking.address, staking.address, tokens100k) await expect(tx).emit(staking, 'StakeDeposited').withArgs(me.address, tokens100k) expect(await staking.getIndexerStakedTokens(me.address)).to.equal(tokens100k.add(tokens100k)) + const delegationPool = await staking.delegationPools(me.address) + expect(delegationPool.indexingRewardCut).eq(toBN(1000)) + expect(delegationPool.queryFeeCut).eq(toBN(1000)) }) }) From c0768521657f3d32b85adc382f6d900f4d68d72b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1s=20Migone?= Date: Fri, 14 Apr 2023 14:27:37 -0300 Subject: [PATCH 098/112] fix: make staking migrators revert if protocol is partially paused (OZ M-07) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Tomás Migone --- contracts/staking/L1Staking.sol | 14 +++--- test/staking/migration.test.ts | 77 ++++++++++++++++++++++++++++----- 2 files changed, 74 insertions(+), 17 deletions(-) diff --git a/contracts/staking/L1Staking.sol b/contracts/staking/L1Staking.sol index 605948889..d2fbdfa51 100644 --- a/contracts/staking/L1Staking.sol +++ b/contracts/staking/L1Staking.sol @@ -71,7 +71,7 @@ contract L1Staking is Staking, L1StakingV1Storage, IL1StakingBase { uint256 _maxGas, uint256 _gasPriceBid, uint256 _maxSubmissionCost - ) external payable override { + ) external payable override notPartialPaused { require( msg.value == _maxSubmissionCost.add(_gasPriceBid.mul(_maxGas)), "INVALID_ETH_AMOUNT" @@ -110,7 +110,7 @@ contract L1Staking is Staking, L1StakingV1Storage, IL1StakingBase { uint256 _maxGas, uint256 _gasPriceBid, uint256 _maxSubmissionCost - ) external override { + ) external override notPartialPaused { address l2Beneficiary = l1GraphTokenLockMigrator.migratedWalletAddress(msg.sender); require(l2Beneficiary != address(0), "LOCK NOT MIGRATED"); uint256 balance = address(this).balance; @@ -150,7 +150,7 @@ contract L1Staking is Staking, L1StakingV1Storage, IL1StakingBase { uint256 _maxGas, uint256 _gasPriceBid, uint256 _maxSubmissionCost - ) external payable override { + ) external payable override notPartialPaused { require( msg.value == _maxSubmissionCost.add(_gasPriceBid.mul(_maxGas)), "INVALID_ETH_AMOUNT" @@ -188,7 +188,7 @@ contract L1Staking is Staking, L1StakingV1Storage, IL1StakingBase { uint256 _maxGas, uint256 _gasPriceBid, uint256 _maxSubmissionCost - ) external override { + ) external override notPartialPaused { address l2Beneficiary = l1GraphTokenLockMigrator.migratedWalletAddress(msg.sender); require(l2Beneficiary != address(0), "LOCK NOT MIGRATED"); uint256 balance = address(this).balance; @@ -215,7 +215,11 @@ contract L1Staking is Staking, L1StakingV1Storage, IL1StakingBase { * and can be withdrawn with `withdrawDelegated()` immediately after calling this. * @param _indexer Address of the indexer (in L1, before migrating) */ - function unlockDelegationToMigratedIndexer(address _indexer) external override { + function unlockDelegationToMigratedIndexer(address _indexer) + external + override + notPartialPaused + { require( indexerMigratedToL2[_indexer] != address(0) && __stakes[_indexer].tokensStaked == 0, "indexer not migrated" diff --git a/test/staking/migration.test.ts b/test/staking/migration.test.ts index 6105296b1..68ccb9d9e 100644 --- a/test/staking/migration.test.ts +++ b/test/staking/migration.test.ts @@ -1,21 +1,19 @@ import { expect } from 'chai' -import { constants, BigNumber, Event } from 'ethers' -import { defaultAbiCoder, ParamType, parseEther } from 'ethers/lib/utils' +import { constants, BigNumber } from 'ethers' +import { defaultAbiCoder, parseEther } from 'ethers/lib/utils' import { GraphToken } from '../../build/types/GraphToken' import { IL1Staking } from '../../build/types/IL1Staking' -import { IStaking } from '../../build/types/IStaking' +import { IController } from '../../build/types/IController' import { L1GraphTokenGateway } from '../../build/types/L1GraphTokenGateway' import { L1GraphTokenLockMigratorMock } from '../../build/types/L1GraphTokenLockMigratorMock' import { ArbitrumL1Mocks, L1FixtureContracts, NetworkFixture } from '../lib/fixtures' import { - advanceBlockTo, deriveChannelKey, getAccounts, randomHexBytes, - latestBlock, toBN, toGRT, provider, @@ -25,7 +23,7 @@ import { } from '../lib/testHelpers' import { deployContract } from '../lib/deployment' -const { AddressZero, MaxUint256 } = constants +const { AddressZero } = constants describe('L1Staking:Migration', () => { let me: Account @@ -46,6 +44,7 @@ describe('L1Staking:Migration', () => { let grt: GraphToken let staking: IL1Staking + let controller: IController let l1GraphTokenGateway: L1GraphTokenGateway let arbitrumMocks: ArbitrumL1Mocks let l1GraphTokenLockMigrator: L1GraphTokenLockMigratorMock @@ -97,7 +96,7 @@ describe('L1Staking:Migration', () => { fixture = new NetworkFixture() fixtureContracts = await fixture.load(governor.signer, slasher.signer) - ;({ grt, staking, l1GraphTokenGateway } = fixtureContracts) + ;({ grt, staking, l1GraphTokenGateway, controller } = fixtureContracts) // Dummy code on the mock router so that it appears as a contract await provider().send('hardhat_setCode', [mockRouter.address, '0x1234']) arbitrumMocks = await fixture.loadArbitrumL1Mocks(governor.signer) @@ -218,6 +217,23 @@ describe('L1Staking:Migration', () => { }) }) describe('migrateStakeToL2', function () { + it('should not allow migrating if the protocol is partially paused', async function () { + await controller.setPartialPaused(true) + + const tx = staking + .connect(indexer.signer) + .migrateStakeToL2( + l2Indexer.address, + tokensToStake.sub(minimumIndexerStake), + maxGas, + gasPriceBid, + maxSubmissionCost, + { + value: maxSubmissionCost.add(gasPriceBid.mul(maxGas)), + }, + ) + await expect(tx).revertedWith('Partial-paused') + }) it('should not allow migrating but leaving less than the minimum indexer stake', async function () { const tx = staking .connect(indexer.signer) @@ -384,6 +400,14 @@ describe('L1Staking:Migration', () => { }) describe('migrateLockedStakeToL2', function () { + it('should not allow migrating if the protocol is partially paused', async function () { + await controller.setPartialPaused(true) + + const tx = staking + .connect(indexer.signer) + .migrateLockedStakeToL2(minimumIndexerStake, maxGas, gasPriceBid, maxSubmissionCost) + await expect(tx).revertedWith('Partial-paused') + }) it('sends a message through L1GraphTokenGateway like migrateStakeToL2, but gets the beneficiary and ETH from a migrator contract', async function () { const amountToSend = minimumIndexerStake @@ -417,8 +441,6 @@ describe('L1Staking:Migration', () => { ) }) it('should not allow migrating if the migrator contract returns a zero address beneficiary', async function () { - const amountToSend = minimumIndexerStake - const tx = staking .connect(indexer.signer) .migrateLockedStakeToL2(minimumIndexerStake, maxGas, gasPriceBid, maxSubmissionCost) @@ -461,6 +483,14 @@ describe('L1Staking:Migration', () => { .emit(staking, 'StakeDelegatedWithdrawn') .withArgs(indexer.address, delegator.address, actualDelegation) }) + it('rejects calls if the protocol is partially paused', async function () { + await controller.setPartialPaused(true) + + const tx = staking + .connect(delegator.signer) + .unlockDelegationToMigratedIndexer(indexer.address) + await expect(tx).revertedWith('Partial-paused') + }) it('rejects calls if the indexer has not migrated their stake', async function () { const tokensToDelegate = toGRT('10000') await staking.connect(delegator.signer).delegate(indexer.address, tokensToDelegate) @@ -530,6 +560,23 @@ describe('L1Staking:Migration', () => { }) }) describe('migrateDelegationToL2', function () { + it('rejects calls if the protocol is partially paused', async function () { + await controller.setPartialPaused(true) + + const tx = staking + .connect(delegator.signer) + .migrateDelegationToL2( + indexer.address, + l2Delegator.address, + maxGas, + gasPriceBid, + maxSubmissionCost, + { + value: maxSubmissionCost.add(gasPriceBid.mul(maxGas)), + }, + ) + await expect(tx).revertedWith('Partial-paused') + }) it('rejects calls if the delegated indexer has not migrated stake to L2', async function () { const tokensToDelegate = toGRT('10000') await staking.connect(delegator.signer).delegate(indexer.address, tokensToDelegate) @@ -830,6 +877,14 @@ describe('L1Staking:Migration', () => { }) }) describe('migrateLockedDelegationToL2', function () { + it('rejects calls if the protocol is partially paused', async function () { + await controller.setPartialPaused(true) + + const tx = staking + .connect(delegator.signer) + .migrateLockedDelegationToL2(indexer.address, maxGas, gasPriceBid, maxSubmissionCost) + await expect(tx).revertedWith('Partial-paused') + }) it('sends delegated tokens to L2 like migrateDelegationToL2, but gets the beneficiary and ETH from the L1GraphTokenLockMigrator', async function () { const tokensToDelegate = toGRT('10000') await staking.connect(delegator.signer).delegate(indexer.address, tokensToDelegate) @@ -893,9 +948,7 @@ describe('L1Staking:Migration', () => { it('rejects calls if the migrator contract returns a zero address beneficiary', async function () { const tokensToDelegate = toGRT('10000') await staking.connect(delegator.signer).delegate(indexer.address, tokensToDelegate) - const actualDelegation = tokensToDelegate.sub( - tokensToDelegate.mul(delegationTaxPPM).div(1000000), - ) + await staking .connect(indexer.signer) .migrateStakeToL2( From ced9d178ad44ceee602018d6401723f39cffdc05 Mon Sep 17 00:00:00 2001 From: Pablo Carranza Velez Date: Fri, 14 Apr 2023 16:13:44 -0300 Subject: [PATCH 099/112] fix: enforce minimum stake when restaking (OZ M-08) --- contracts/staking/Staking.sol | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/contracts/staking/Staking.sol b/contracts/staking/Staking.sol index 08bd2e652..67566f7a0 100644 --- a/contracts/staking/Staking.sol +++ b/contracts/staking/Staking.sol @@ -156,7 +156,9 @@ abstract contract Staking is StakingV3Storage, GraphUpgradeable, IStakingBase, M } /** - * @notice Set the minimum stake required to be an indexer. + * @notice Set the minimum stake required to be an indexer. Note the + * minimum stake in L2 MUST be the same as in L1, or migrated stake might + * fail to be redeemed in L2. * @param _minimumIndexerStake Minimum indexer stake */ function setMinimumIndexerStake(uint256 _minimumIndexerStake) external override onlyGovernor { @@ -527,12 +529,6 @@ abstract contract Staking is StakingV3Storage, GraphUpgradeable, IStakingBase, M function stakeTo(address _indexer, uint256 _tokens) public override notPartialPaused { require(_tokens > 0, "!tokens"); - // Ensure minimum stake - require( - __stakes[_indexer].tokensSecureStake().add(_tokens) >= __minimumIndexerStake, - "!minimumIndexerStake" - ); - // Transfer tokens to stake from caller to this contract TokenUtils.pullTokens(graphToken(), msg.sender, _tokens); @@ -709,6 +705,12 @@ abstract contract Staking is StakingV3Storage, GraphUpgradeable, IStakingBase, M // Deposit tokens into the indexer stake __stakes[_indexer].deposit(_tokens); + // Ensure minimum stake + require( + __stakes[_indexer].tokensSecureStake().add(_tokens) >= __minimumIndexerStake, + "!minimumIndexerStake" + ); + // Initialize the delegation pool the first time if (__delegationPools[_indexer].updatedAtBlock == 0) { _setDelegationParameters(_indexer, MAX_PPM, MAX_PPM, __delegationParametersCooldown); From 520ff1deb6430b9bdc305bc09bf7d1b3faa669a8 Mon Sep 17 00:00:00 2001 From: Pablo Carranza Velez Date: Fri, 14 Apr 2023 16:28:40 -0300 Subject: [PATCH 100/112] fix: do not enforce minimum stake when receiving from L1 --- contracts/l2/staking/L2Staking.sol | 11 ++++++++++- contracts/staking/Staking.sol | 8 +++----- 2 files changed, 13 insertions(+), 6 deletions(-) diff --git a/contracts/l2/staking/L2Staking.sol b/contracts/l2/staking/L2Staking.sol index 7e6e3792c..2c1cd3979 100644 --- a/contracts/l2/staking/L2Staking.sol +++ b/contracts/l2/staking/L2Staking.sol @@ -93,7 +93,16 @@ contract L2Staking is Staking, IL2StakingBase { uint256 _amount, IL2Staking.ReceiveIndexerStakeData memory _indexerData ) internal { - _stake(_indexerData.indexer, _amount); + address _indexer = _indexerData.indexer; + // Deposit tokens into the indexer stake + __stakes[_indexer].deposit(_amount); + + // Initialize the delegation pool the first time + if (__delegationPools[_indexer].updatedAtBlock == 0) { + _setDelegationParameters(_indexer, MAX_PPM, MAX_PPM, __delegationParametersCooldown); + } + + emit StakeDeposited(_indexer, _amount); } /** diff --git a/contracts/staking/Staking.sol b/contracts/staking/Staking.sol index 67566f7a0..b2f8eeaed 100644 --- a/contracts/staking/Staking.sol +++ b/contracts/staking/Staking.sol @@ -35,7 +35,7 @@ abstract contract Staking is StakingV3Storage, GraphUpgradeable, IStakingBase, M using Rebates for Rebates.Pool; /// @dev 100% in parts per million - uint32 private constant MAX_PPM = 1000000; + uint32 internal constant MAX_PPM = 1000000; // -- Events are declared in IStakingBase -- // @@ -156,9 +156,7 @@ abstract contract Staking is StakingV3Storage, GraphUpgradeable, IStakingBase, M } /** - * @notice Set the minimum stake required to be an indexer. Note the - * minimum stake in L2 MUST be the same as in L1, or migrated stake might - * fail to be redeemed in L2. + * @notice Set the minimum stake required to be an indexer. * @param _minimumIndexerStake Minimum indexer stake */ function setMinimumIndexerStake(uint256 _minimumIndexerStake) external override onlyGovernor { @@ -663,7 +661,7 @@ abstract contract Staking is StakingV3Storage, GraphUpgradeable, IStakingBase, M uint32 _indexingRewardCut, uint32 _queryFeeCut, uint32 _cooldownBlocks - ) private { + ) internal { // Incentives must be within bounds require(_queryFeeCut <= MAX_PPM, ">queryFeeCut"); require(_indexingRewardCut <= MAX_PPM, ">indexingRewardCut"); From 79fbd325f820872d0afb1fa1a6ecddff7abe53df Mon Sep 17 00:00:00 2001 From: Pablo Carranza Velez Date: Fri, 14 Apr 2023 17:23:43 -0300 Subject: [PATCH 101/112] fix: return migrated delegation to the delegator if it would mint zero shares (OZ M-09) --- contracts/l2/staking/IL2StakingBase.sol | 6 ++- contracts/l2/staking/L2Staking.sol | 31 ++++++++--- test/l2/l2Staking.test.ts | 69 +++++++++++++++++++++++++ test/lib/fixtures.ts | 4 +- 4 files changed, 98 insertions(+), 12 deletions(-) diff --git a/contracts/l2/staking/IL2StakingBase.sol b/contracts/l2/staking/IL2StakingBase.sol index edf19874d..e8d51069c 100644 --- a/contracts/l2/staking/IL2StakingBase.sol +++ b/contracts/l2/staking/IL2StakingBase.sol @@ -10,5 +10,9 @@ import { ICallhookReceiver } from "../../gateway/ICallhookReceiver.sol"; * @dev Note it includes only the L2-specific functionality, not the full IStaking interface. */ interface IL2StakingBase is ICallhookReceiver { - // Nothing to see here + event MigratedDelegationReturnedToDelegator( + address indexed indexer, + address indexed delegator, + uint256 amount + ); } diff --git a/contracts/l2/staking/L2Staking.sol b/contracts/l2/staking/L2Staking.sol index 2c1cd3979..754a774c6 100644 --- a/contracts/l2/staking/L2Staking.sol +++ b/contracts/l2/staking/L2Staking.sol @@ -124,13 +124,28 @@ contract L2Staking is Staking, IL2StakingBase { // Calculate shares to issue (without applying any delegation tax) uint256 shares = (pool.tokens == 0) ? _amount : _amount.mul(pool.shares).div(pool.tokens); - // Update the delegation pool - pool.tokens = pool.tokens.add(_amount); - pool.shares = pool.shares.add(shares); - - // Update the individual delegation - delegation.shares = delegation.shares.add(shares); - - emit StakeDelegated(_delegationData.indexer, _delegationData.delegator, _amount, shares); + if (shares == 0) { + // If no shares would be issued (probably a rounding issue or attack), return the tokens to the delegator + graphToken().transfer(_delegationData.delegator, _amount); + emit MigratedDelegationReturnedToDelegator( + _delegationData.indexer, + _delegationData.delegator, + _amount + ); + } else { + // Update the delegation pool + pool.tokens = pool.tokens.add(_amount); + pool.shares = pool.shares.add(shares); + + // Update the individual delegation + delegation.shares = delegation.shares.add(shares); + + emit StakeDelegated( + _delegationData.indexer, + _delegationData.delegator, + _amount, + shares + ); + } } } diff --git a/test/l2/l2Staking.test.ts b/test/l2/l2Staking.test.ts index 8977f8325..1a1e18839 100644 --- a/test/l2/l2Staking.test.ts +++ b/test/l2/l2Staking.test.ts @@ -10,6 +10,9 @@ import { setAccountBalance, latestBlock, advanceBlocks, + deriveChannelKey, + randomHexBytes, + advanceToNextEpoch, } from '../lib/testHelpers' import { L2FixtureContracts, NetworkFixture } from '../lib/fixtures' import { toBN } from '../lib/testHelpers' @@ -20,6 +23,11 @@ import { GraphToken } from '../../build/types/GraphToken' const { AddressZero } = ethers.constants +const subgraphDeploymentID = randomHexBytes() +const channelKey = deriveChannelKey() +const allocationID = channelKey.address +const metadata = randomHexBytes(32) + describe('L2Staking', () => { let me: Account let other: Account @@ -41,6 +49,20 @@ describe('L2Staking', () => { const tokens100k = toGRT('100000') const tokens1m = toGRT('1000000') + // Allocate with test values + const allocate = async (tokens: BigNumber) => { + return staking + .connect(me.signer) + .allocateFrom( + me.address, + subgraphDeploymentID, + tokens, + allocationID, + metadata, + await channelKey.generateProof(me.address), + ) + } + const gatewayFinalizeTransfer = async function ( from: string, to: string, @@ -267,6 +289,53 @@ describe('L2Staking', () => { const delegation = await staking.getDelegation(me.address, other.address) expect(delegation.shares).to.equal(expectedTotalShares) }) + it('returns delegation to the delegator if it would produce no shares', async function () { + await fixtureContracts.rewardsManager + .connect(governor.signer) + .setIssuancePerBlock(toGRT('114')) + + await staking.connect(me.signer).stake(tokens100k) + await staking.connect(me.signer).delegate(me.address, toBN(1)) // 1 weiGRT == 1 share + + await staking.connect(me.signer).setDelegationParameters(1000, 1000, 1000) + await grt.connect(me.signer).approve(fixtureContracts.curation.address, tokens10k) + await fixtureContracts.curation.connect(me.signer).mint(subgraphDeploymentID, tokens10k, 0) + + await allocate(tokens100k) + await advanceToNextEpoch(fixtureContracts.epochManager) + await advanceToNextEpoch(fixtureContracts.epochManager) + await staking.connect(me.signer).closeAllocation(allocationID, randomHexBytes(32)) + // Now there are some rewards sent to delegation pool, so 1 weiGRT is less than 1 share + + const functionData = defaultAbiCoder.encode( + ['tuple(address,address)'], + [[me.address, other.address]], + ) + + const callhookData = defaultAbiCoder.encode( + ['uint8', 'bytes'], + [toBN(1), functionData], // code = 1 means RECEIVE_DELEGATION_CODE + ) + const delegatorGRTBalanceBefore = await grt.balanceOf(other.address) + const tx = gatewayFinalizeTransfer( + mockL1Staking.address, + staking.address, + toBN(1), // Less than 1 share! + callhookData, + ) + + await expect(tx) + .emit(l2GraphTokenGateway, 'DepositFinalized') + .withArgs(mockL1GRT.address, mockL1Staking.address, staking.address, toBN(1)) + const delegation = await staking.getDelegation(me.address, other.address) + await expect(tx) + .emit(staking, 'MigratedDelegationReturnedToDelegator') + .withArgs(me.address, other.address, toBN(1)) + + expect(delegation.shares).to.equal(0) + const delegatorGRTBalanceAfter = await grt.balanceOf(other.address) + expect(delegatorGRTBalanceAfter.sub(delegatorGRTBalanceBefore)).to.equal(toBN(1)) + }) }) describe('onTokenTransfer with invalid messages', function () { it('reverts if the code is invalid', async function () { diff --git a/test/lib/fixtures.ts b/test/lib/fixtures.ts index fc73c2129..69dd945b2 100644 --- a/test/lib/fixtures.ts +++ b/test/lib/fixtures.ts @@ -176,9 +176,7 @@ export class NetworkFixture { await staking.connect(deployer).setSlasher(slasherAddress, true) await gns.connect(deployer).approveAll() - if (!isL2) { - await grt.connect(deployer).addMinter(rewardsManager.address) - } + await grt.connect(deployer).addMinter(rewardsManager.address) // Unpause the protocol await controller.connect(deployer).setPaused(false) From 9fa7d69c7b86345a0bb589703830bbe96c2d2d9b Mon Sep 17 00:00:00 2001 From: Pablo Carranza Velez Date: Fri, 14 Apr 2023 18:06:06 -0300 Subject: [PATCH 102/112] test: fix epoch alignment in allocation tests --- test/staking/allocation.test.ts | 2 ++ 1 file changed, 2 insertions(+) diff --git a/test/staking/allocation.test.ts b/test/staking/allocation.test.ts index 6482ca9fb..757c5c296 100644 --- a/test/staking/allocation.test.ts +++ b/test/staking/allocation.test.ts @@ -374,6 +374,7 @@ describe('Staking:Allocation', () => { }) it('should allocate', async function () { + await advanceToNextEpoch(epochManager) await shouldAllocate(tokensToAllocate) }) @@ -414,6 +415,7 @@ describe('Staking:Allocation', () => { }) it('reject allocate reusing an allocation ID', async function () { + await advanceToNextEpoch(epochManager) const someTokensToAllocate = toGRT('10') await shouldAllocate(someTokensToAllocate) const tx = allocate(someTokensToAllocate) From c4917d22e2976354a8940014a5f757b17ec6dba1 Mon Sep 17 00:00:00 2001 From: Pablo Carranza Velez Date: Fri, 14 Apr 2023 18:07:20 -0300 Subject: [PATCH 103/112] fix(Staking): use encodeWithSelector instead of encodeWithSignature (OZ N-03) --- contracts/staking/Staking.sol | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/contracts/staking/Staking.sol b/contracts/staking/Staking.sol index b2f8eeaed..5e21231ad 100644 --- a/contracts/staking/Staking.sol +++ b/contracts/staking/Staking.sol @@ -18,6 +18,7 @@ import { Stakes } from "./libs/Stakes.sol"; import { Managed } from "../governance/Managed.sol"; import { ICuration } from "../curation/ICuration.sol"; import { IRewardsManager } from "../rewards/IRewardsManager.sol"; +import { StakingExtension } from "./StakingExtension.sol"; /** * @title Base Staking contract @@ -123,8 +124,8 @@ abstract contract Staking is StakingV3Storage, GraphUpgradeable, IStakingBase, M // solhint-disable-next-line avoid-low-level-calls (bool success, ) = extensionImpl.delegatecall( - abi.encodeWithSignature( - "initialize(uint32,uint32,uint32,uint32)", + abi.encodeWithSelector( + StakingExtension.initialize.selector, _delegationUnbondingPeriod, 0, _delegationRatio, From e8a24d597db0ae17dbe4eced7a9a668c0e1d9c52 Mon Sep 17 00:00:00 2001 From: Pablo Carranza Velez Date: Fri, 14 Apr 2023 19:34:04 -0300 Subject: [PATCH 104/112] fix: improve test coverage (OZ N-05) --- .../tests/L1GraphTokenLockMigratorBadMock.sol | 22 ++++ test/staking/allocation.test.ts | 112 ++++++++++++++++++ test/staking/configuration.test.ts | 32 ++++- test/staking/delegation.test.ts | 11 ++ test/staking/migration.test.ts | 44 +++++++ 5 files changed, 219 insertions(+), 2 deletions(-) create mode 100644 contracts/tests/L1GraphTokenLockMigratorBadMock.sol diff --git a/contracts/tests/L1GraphTokenLockMigratorBadMock.sol b/contracts/tests/L1GraphTokenLockMigratorBadMock.sol new file mode 100644 index 000000000..d257c0dfb --- /dev/null +++ b/contracts/tests/L1GraphTokenLockMigratorBadMock.sol @@ -0,0 +1,22 @@ +// SPDX-License-Identifier: MIT + +pragma solidity ^0.7.6; +pragma experimental ABIEncoderV2; + +contract L1GraphTokenLockMigratorBadMock { + mapping(address => address) public migratedWalletAddress; + + function setMigratedAddress(address _l1Address, address _l2Address) external { + migratedWalletAddress[_l1Address] = _l2Address; + } + + // Sends 1 wei less than requested + function pullETH(address _l1Wallet, uint256 _amount) external { + require( + migratedWalletAddress[_l1Wallet] != address(0), + "L1GraphTokenLockMigratorMock: unknown L1 wallet" + ); + (bool success, ) = payable(msg.sender).call{ value: _amount - 1 }(""); + require(success, "L1GraphTokenLockMigratorMock: ETH pull failed"); + } +} diff --git a/test/staking/allocation.test.ts b/test/staking/allocation.test.ts index 757c5c296..490100c25 100644 --- a/test/staking/allocation.test.ts +++ b/test/staking/allocation.test.ts @@ -310,6 +310,11 @@ describe('Staking:Allocation', () => { expect(beforeOperator).eq(true) expect(afterOperator).eq(false) }) + it('should reject setting the operator to the msg.sender', async function () { + await expect( + staking.connect(indexer.signer).setOperator(indexer.address, true), + ).to.be.revertedWith('operator == sender') + }) }) describe('rewardsDestination', function () { @@ -958,4 +963,111 @@ describe('Staking:Allocation', () => { }) } }) + + describe('claimMany', function () { + beforeEach(async function () { + // Stake + await staking.connect(indexer.signer).stake(tokensToStake) + + // Set channel dispute period to one epoch + await staking.connect(governor.signer).setChannelDisputeEpochs(toBN('1')) + + // Fund wallets + await grt.connect(governor.signer).mint(assetHolder.address, tokensToCollect.mul(2)) + await grt.connect(assetHolder.signer).approve(staking.address, tokensToCollect.mul(2)) + }) + it('should claim many rebates with restake', async function () { + // Allocate + await allocate(toBN(100)) + + // Create a second allocation with a different allocationID + const channelKey2 = deriveChannelKey() + const allocationID2 = channelKey2.address + const metadata2 = randomHexBytes(32) + const poi2 = randomHexBytes() + const subgraphDeploymentID2 = randomHexBytes(32) + + await staking + .connect(indexer.signer) + .allocateFrom( + indexer.address, + subgraphDeploymentID2, + toBN(200), + allocationID2, + metadata2, + await channelKey2.generateProof(indexer.address), + ) + + // Collect some funds + await staking.connect(assetHolder.signer).collect(tokensToCollect, allocationID) + await staking.connect(assetHolder.signer).collect(tokensToCollect, allocationID2) + + // Advance blocks to get the allocation in epoch where it can be closed + await advanceToNextEpoch(epochManager) + + // Close the allocations + await staking.connect(indexer.signer).closeAllocation(allocationID, poi) + await advanceToNextEpoch(epochManager) // Make sure they fall in different rebate pools + await staking.connect(indexer.signer).closeAllocation(allocationID2, poi2) + + // Advance blocks to get the allocation in epoch where it can be claimed + await advanceToNextEpoch(epochManager) + + // Before state + const beforeIndexerStake = await staking.getIndexerStakedTokens(indexer.address) + const beforeAlloc1 = await staking.allocations(allocationID) + const beforeAlloc2 = await staking.allocations(allocationID2) + + // Claim with restake + expect(await staking.getAllocationState(allocationID)).eq(AllocationState.Finalized) + expect(await staking.getAllocationState(allocationID2)).eq(AllocationState.Finalized) + const tx = await staking + .connect(indexer.signer) + .claimMany([allocationID, allocationID2], true) + + // Verify that the claimed tokens are restaked + const afterIndexerStake = await staking.getIndexerStakedTokens(indexer.address) + const tokensToClaim = beforeAlloc1.effectiveAllocation.eq(0) + ? toBN(0) + : beforeAlloc1.collectedFees + const tokensToClaim2 = beforeAlloc2.effectiveAllocation.eq(0) + ? toBN(0) + : beforeAlloc2.collectedFees + expect(afterIndexerStake).eq(beforeIndexerStake.add(tokensToClaim).add(tokensToClaim2)) + }) + }) + + describe('isAllocation', function () { + it('should return true if allocation exists', async function () { + // Allocate + await staking.connect(indexer.signer).stake(tokensToStake) + await allocate(toBN(100)) + + // Check + expect(await staking.isAllocation(allocationID)).eq(true) + }) + it('should still return true after an allocation is closed', async function () { + // Allocate + await staking.connect(indexer.signer).stake(tokensToStake) + await allocate(toBN(100)) + + // Collect some funds + await grt.connect(governor.signer).mint(assetHolder.address, tokensToCollect) + await grt.connect(assetHolder.signer).approve(staking.address, tokensToCollect) + await staking.connect(assetHolder.signer).collect(tokensToCollect, allocationID) + + // Advance blocks to get the allocation in epoch where it can be closed + await advanceToNextEpoch(epochManager) + + // Close the allocation + await staking.connect(indexer.signer).closeAllocation(allocationID, poi) + + // Check + expect(await staking.isAllocation(allocationID)).eq(true) + }) + it('should return false if allocation does not exist', async function () { + // Check + expect(await staking.isAllocation(allocationID)).eq(false) + }) + }) }) diff --git a/test/staking/configuration.test.ts b/test/staking/configuration.test.ts index 9f86edd8d..46a465701 100644 --- a/test/staking/configuration.test.ts +++ b/test/staking/configuration.test.ts @@ -1,11 +1,15 @@ import { expect } from 'chai' +import { ethers } from 'hardhat' import { constants } from 'ethers' import { IStaking } from '../../build/types/IStaking' -import { defaults } from '../lib/deployment' +import { defaults, deployContract } from '../lib/deployment' import { NetworkFixture } from '../lib/fixtures' import { getAccounts, toBN, toGRT, Account } from '../lib/testHelpers' +import { GraphProxy } from '../../build/types/GraphProxy' +import { GraphProxyAdmin } from '../../build/types/GraphProxyAdmin' +import { network } from '../../cli' const { AddressZero } = constants @@ -20,12 +24,13 @@ describe('Staking:Config', () => { let fixture: NetworkFixture let staking: IStaking + let proxyAdmin: GraphProxyAdmin before(async function () { ;[me, other, governor, slasher] = await getAccounts() fixture = new NetworkFixture() - ;({ staking } = await fixture.load(governor.signer, slasher.signer)) + ;({ staking, proxyAdmin } = await fixture.load(governor.signer, slasher.signer)) }) beforeEach(async function () { @@ -228,4 +233,27 @@ describe('Staking:Config', () => { await expect(tx).revertedWith('Only Controller governor') }) }) + + describe('Staking and StakingExtension', function () { + it('does not allow calling the fallback from the Staking implementation', async function () { + const impl = await proxyAdmin.getProxyImplementation(staking.address) + + const factory = await ethers.getContractFactory('StakingExtension') + const implAsStaking = factory.attach(impl) as IStaking + const tx = implAsStaking.connect(other.signer).setDelegationRatio(50) + await expect(tx).revertedWith('only through proxy') + }) + it('can set the staking extension implementation with setExtensionImpl', async function () { + const newImpl = await network.deployContract('StakingExtension', [], governor.signer) + const tx = await staking.connect(governor.signer).setExtensionImpl(newImpl.contract.address) + await expect(tx) + .emit(staking, 'ExtensionImplementationSet') + .withArgs(newImpl.contract.address) + }) + it('rejects calls to setExtensionImpl from non-governor', async function () { + const newImpl = await network.deployContract('StakingExtension', [], governor.signer) + const tx = staking.connect(other.signer).setExtensionImpl(newImpl.contract.address) + await expect(tx).revertedWith('Only Controller governor') + }) + }) }) diff --git a/test/staking/delegation.test.ts b/test/staking/delegation.test.ts index 3ad68cadb..8c8310443 100644 --- a/test/staking/delegation.test.ts +++ b/test/staking/delegation.test.ts @@ -675,4 +675,15 @@ describe('Staking::Delegation', () => { await expect(tx).revertedWith('!shares') }) }) + describe('isDelegator', function () { + it('should return true if the address is a delegator', async function () { + await staking.connect(indexer.signer).stake(toGRT('1000')) + await shouldDelegate(delegator, toGRT('1')) + expect(await staking.isDelegator(indexer.address, delegator.address)).eq(true) + }) + + it('should return false if the address is not a delegator', async function () { + expect(await staking.isDelegator(indexer.address, delegator.address)).eq(false) + }) + }) }) diff --git a/test/staking/migration.test.ts b/test/staking/migration.test.ts index 68ccb9d9e..4ac5f7927 100644 --- a/test/staking/migration.test.ts +++ b/test/staking/migration.test.ts @@ -7,6 +7,7 @@ import { IL1Staking } from '../../build/types/IL1Staking' import { IController } from '../../build/types/IController' import { L1GraphTokenGateway } from '../../build/types/L1GraphTokenGateway' import { L1GraphTokenLockMigratorMock } from '../../build/types/L1GraphTokenLockMigratorMock' +import { L1GraphTokenLockMigratorBadMock } from '../../build/types/L1GraphTokenLockMigratorBadMock' import { ArbitrumL1Mocks, L1FixtureContracts, NetworkFixture } from '../lib/fixtures' @@ -48,6 +49,7 @@ describe('L1Staking:Migration', () => { let l1GraphTokenGateway: L1GraphTokenGateway let arbitrumMocks: ArbitrumL1Mocks let l1GraphTokenLockMigrator: L1GraphTokenLockMigratorMock + let l1GraphTokenLockMigratorBad: L1GraphTokenLockMigratorBadMock // Test values const indexerTokens = toGRT('10000000') @@ -116,7 +118,13 @@ describe('L1Staking:Migration', () => { governor.signer, )) as unknown as L1GraphTokenLockMigratorMock + l1GraphTokenLockMigratorBad = (await deployContract( + 'L1GraphTokenLockMigratorBadMock', + governor.signer, + )) as unknown as L1GraphTokenLockMigratorBadMock + await setAccountBalance(l1GraphTokenLockMigrator.address, parseEther('1')) + await setAccountBalance(l1GraphTokenLockMigratorBad.address, parseEther('1')) await staking .connect(governor.signer) @@ -446,6 +454,16 @@ describe('L1Staking:Migration', () => { .migrateLockedStakeToL2(minimumIndexerStake, maxGas, gasPriceBid, maxSubmissionCost) await expect(tx).revertedWith('LOCK NOT MIGRATED') }) + it('should not allow migrating if the migrator contract does not provide enough ETH', async function () { + await staking + .connect(governor.signer) + .setL1GraphTokenLockMigrator(l1GraphTokenLockMigratorBad.address) + await l1GraphTokenLockMigratorBad.setMigratedAddress(indexer.address, l2Indexer.address) + const tx = staking + .connect(indexer.signer) + .migrateLockedStakeToL2(minimumIndexerStake, maxGas, gasPriceBid, maxSubmissionCost) + await expect(tx).revertedWith('ETH TRANSFER FAILED') + }) }) describe('unlockDelegationToMigratedIndexer', function () { beforeEach(async function () { @@ -967,6 +985,32 @@ describe('L1Staking:Migration', () => { .migrateLockedDelegationToL2(indexer.address, maxGas, gasPriceBid, maxSubmissionCost) await expect(tx).revertedWith('LOCK NOT MIGRATED') }) + it('rejects calls if the migrator contract does not provide enough ETH', async function () { + const tokensToDelegate = toGRT('10000') + await staking.connect(delegator.signer).delegate(indexer.address, tokensToDelegate) + + await staking + .connect(indexer.signer) + .migrateStakeToL2( + l2Indexer.address, + minimumIndexerStake, + maxGas, + gasPriceBid, + maxSubmissionCost, + { + value: maxSubmissionCost.add(gasPriceBid.mul(maxGas)), + }, + ) + await staking + .connect(governor.signer) + .setL1GraphTokenLockMigrator(l1GraphTokenLockMigratorBad.address) + + await l1GraphTokenLockMigratorBad.setMigratedAddress(delegator.address, l2Delegator.address) + const tx = staking + .connect(delegator.signer) + .migrateLockedDelegationToL2(indexer.address, maxGas, gasPriceBid, maxSubmissionCost) + await expect(tx).revertedWith('ETH TRANSFER FAILED') + }) }) }) }) From 63f64ed36a2073c592280778bf0a8c9b88896549 Mon Sep 17 00:00:00 2001 From: Pablo Carranza Velez Date: Fri, 14 Apr 2023 19:40:53 -0300 Subject: [PATCH 105/112] fix: use tokensLocked to check that delegation is not locked (OZ N-08) --- contracts/staking/L1Staking.sol | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/contracts/staking/L1Staking.sol b/contracts/staking/L1Staking.sol index d2fbdfa51..8a9d44d8e 100644 --- a/contracts/staking/L1Staking.sol +++ b/contracts/staking/L1Staking.sol @@ -226,7 +226,7 @@ contract L1Staking is Staking, L1StakingV1Storage, IL1StakingBase { ); Delegation storage delegation = __delegationPools[_indexer].delegators[msg.sender]; - require(delegation.tokensLockedUntil != 0, "! locked"); + require(delegation.tokensLocked != 0, "! locked"); // Unlock the delegation delegation.tokensLockedUntil = epochManager().currentEpoch(); @@ -349,7 +349,7 @@ contract L1Staking is Staking, L1StakingV1Storage, IL1StakingBase { Delegation storage delegation = pool.delegators[_delegator]; // Check that the delegation is not locked for undelegation - require(delegation.tokensLockedUntil == 0, "tokensLocked != 0"); + require(delegation.tokensLocked == 0, "tokensLocked != 0"); require(delegation.shares != 0, "delegation == 0"); // Calculate tokens to get in exchange for the shares uint256 tokensToSend = delegation.shares.mul(pool.tokens).div(pool.shares); From 25f5f7e6172ec12ea510d04048ca8c7e82554553 Mon Sep 17 00:00:00 2001 From: Pablo Carranza Velez Date: Mon, 17 Apr 2023 10:25:08 -0300 Subject: [PATCH 106/112] fix: remove unused CloseAllocationRequest struct (OZ N-09) --- contracts/staking/IStakingData.sol | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/contracts/staking/IStakingData.sol b/contracts/staking/IStakingData.sol index 6787bf76a..4446077b5 100644 --- a/contracts/staking/IStakingData.sol +++ b/contracts/staking/IStakingData.sol @@ -22,16 +22,6 @@ interface IStakingData { uint256 accRewardsPerAllocatedToken; // Snapshot used for reward calc } - /** - * @dev CloseAllocationRequest represents a request to close an allocation with a specific proof of indexing. - * This is passed when calling closeAllocationMany to define the closing parameters for - * each allocation. - */ - struct CloseAllocationRequest { - address allocationID; - bytes32 poi; - } - // -- Delegation Data -- /** From 495ea0cd67a4337c06dafa17b672ca13dbb56682 Mon Sep 17 00:00:00 2001 From: Pablo Carranza Velez Date: Mon, 17 Apr 2023 10:29:27 -0300 Subject: [PATCH 107/112] fix: typo (OZ N-12) --- contracts/staking/Staking.sol | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contracts/staking/Staking.sol b/contracts/staking/Staking.sol index 5e21231ad..9addb6b3b 100644 --- a/contracts/staking/Staking.sol +++ b/contracts/staking/Staking.sol @@ -756,7 +756,7 @@ abstract contract Staking is StakingV3Storage, GraphUpgradeable, IStakingBase, M require(_allocationID != address(0), "!alloc"); require(_getAllocationState(_allocationID) == AllocationState.Null, "!null"); - // Caller must prove that they own the private key for the allocationID adddress + // Caller must prove that they own the private key for the allocationID address // The proof is an Ethereum signed message of KECCAK256(indexerAddress,allocationID) bytes32 messageHash = keccak256(abi.encodePacked(_indexer, _allocationID)); bytes32 digest = ECDSA.toEthSignedMessageHash(messageHash); From 19c55b74b2391a02e8ac3a410fa257791474faf7 Mon Sep 17 00:00:00 2001 From: Pablo Carranza Velez Date: Mon, 17 Apr 2023 10:33:14 -0300 Subject: [PATCH 108/112] fix: add comment on dangerous packing assumption (OZ N-14) --- contracts/staking/Staking.sol | 2 ++ 1 file changed, 2 insertions(+) diff --git a/contracts/staking/Staking.sol b/contracts/staking/Staking.sol index 9addb6b3b..10770acc5 100644 --- a/contracts/staking/Staking.sol +++ b/contracts/staking/Staking.sol @@ -54,6 +54,8 @@ abstract contract Staking is StakingV3Storage, GraphUpgradeable, IStakingBase, M let ptr := mload(0x40) // (b) get address of the implementation + // CAREFUL here: this only works because extensionImpl is the first variable in this slot + // (otherwise we may have to apply an offset) let impl := and(sload(extensionImpl.slot), 0xffffffffffffffffffffffffffffffffffffffff) // (1) copy incoming call data From 5fcd2dc7163d08c61cd8d458e1ff3ee87245e56f Mon Sep 17 00:00:00 2001 From: Pablo Carranza Velez Date: Mon, 17 Apr 2023 10:36:26 -0300 Subject: [PATCH 109/112] fix: unit for thawingPeriod in docstring (OZ N-15) --- contracts/staking/IStakingBase.sol | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contracts/staking/IStakingBase.sol b/contracts/staking/IStakingBase.sol index 406d29839..b76cc488d 100644 --- a/contracts/staking/IStakingBase.sol +++ b/contracts/staking/IStakingBase.sol @@ -149,7 +149,7 @@ interface IStakingBase is IStakingData { * @notice Initialize this contract. * @param _controller Address of the controller that manages this contract * @param _minimumIndexerStake Minimum amount of tokens that an indexer must stake - * @param _thawingPeriod Number of epochs that tokens get locked after unstaking + * @param _thawingPeriod Number of blocks that tokens get locked after unstaking * @param _protocolPercentage Percentage of query fees that are burned as protocol fee (in PPM) * @param _curationPercentage Percentage of query fees that are given to curators (in PPM) * @param _channelDisputeEpochs The period in epochs that needs to pass before fees in rebate pool can be claimed From 8f77decc0d9613dc3d55cfadc33b547a049bef1d Mon Sep 17 00:00:00 2001 From: Pablo Carranza Velez Date: Wed, 26 Apr 2023 17:33:06 -0300 Subject: [PATCH 110/112] chore: empty addresses and initialize config for scratch 4 --- addresses.json | 1197 +------------------- config/graph.arbitrum-goerli-scratch-4.yml | 153 +++ config/graph.goerli-scratch-4.yml | 162 +++ 3 files changed, 316 insertions(+), 1196 deletions(-) create mode 100644 config/graph.arbitrum-goerli-scratch-4.yml create mode 100644 config/graph.goerli-scratch-4.yml diff --git a/addresses.json b/addresses.json index 3fb004fdd..0967ef424 100644 --- a/addresses.json +++ b/addresses.json @@ -1,1196 +1 @@ -{ - "1": { - "IENS": { - "address": "0x00000000000C2E074eC69A0dFb2997BA6C7d2e1e" - }, - "IEthereumDIDRegistry": { - "address": "0xdCa7EF03e98e0DC2B855bE647C39ABe984fcF21B" - }, - "GraphProxyAdmin": { - "address": "0xF3B000a6749259539aF4E49f24EEc74Ea0e71430", - "creationCodeHash": "0x26a6f47e71ad242e264768571ce7223bf5a86fd0113ab6cb8200f65820232904", - "runtimeCodeHash": "0xd5330527cfb09df657adc879d8ad704ce6b8d5917265cabbd3eb073d1399f122", - "txHash": "0xc5fe1a9f70e3cc4d286e19e3ee8ee9a0639c7415aea22a3f308951abf300382c" - }, - "BancorFormula": { - "address": "0xd0C61e8F15d9deF697E1100663eD7dA74d3727dC", - "creationCodeHash": "0x17f6de9ab8a9bcf03a548c01d620a32caf1f29be8d90a9688ebee54295f857ef", - "runtimeCodeHash": "0x97a57f69b0029383398d02587a3a357168950d61622fe9f9710bf02b59740d63", - "txHash": "0xcd0e28e7d328ff306bb1f2079e594ff9d04d09f21bc5f978b790c8d44b02055a" - }, - "Controller": { - "address": "0x24CCD4D3Ac8529fF08c58F74ff6755036E616117", - "creationCodeHash": "0x7f37a1844c38fffd5390d2114804ffc4e5cf66dfb5c7bd67a32a4f5d10eebd2d", - "runtimeCodeHash": "0x929c62381fbed59483f832611752177cc2642e1e35fedeeb6cd9703e278448a0", - "txHash": "0x12b13ed4ac6fee14335be09df76171b26223d870977524cfdce46c11112a5c04" - }, - "EpochManager": { - "address": "0x64F990Bf16552A693dCB043BB7bf3866c5E05DdB", - "initArgs": [ - { - "name": "controller", - "value": "0x24CCD4D3Ac8529fF08c58F74ff6755036E616117" - }, - { - "name": "lengthInBlocks", - "value": 6646 - } - ], - "creationCodeHash": "0xa02709eb59b9cca8bee1271845b42db037dc1d042dad93410ba532d378a7c79f", - "runtimeCodeHash": "0xdb307489fd9a4a438b5b48909e12020b209280ad777561c0a7451655db097e75", - "txHash": "0x9116a77a4e87eb3fe28514a26b1a6e3ee5ce982f9df3c18aadfc36d4f7f050e7", - "proxy": true, - "implementation": { - "address": "0x3fab259F2392F733c60C19492B5678E5D2D2Ee31", - "creationCodeHash": "0xf03074bb7f026a2574b6ffb5d0f63f0c4fee81e004e1c46ef262dd5802d3384f", - "runtimeCodeHash": "0x0d078a0bf778c6c713c46979ac668161a0a0466356252e47082f80912e4495b2", - "txHash": "0x730141db9a1dc5c9429f7543442e34e9eb994610e2ceabdedb6d322e1bedf2aa" - } - }, - "GraphToken": { - "address": "0xc944E90C64B2c07662A292be6244BDf05Cda44a7", - "constructorArgs": [ - { - "name": "initialSupply", - "value": "10000000000000000000000000000" - } - ], - "creationCodeHash": "0x30da7a30d71fbd41d3327e4d0183401f257af3e905a0c68ebfd18b590b27b530", - "runtimeCodeHash": "0xb964f76194a04272e7582382a4d6bd6271bbb90deb5c1fd3ae3913504ea3a830", - "txHash": "0x079625b9f58a40f1948b396b7007d09ff4aa193d7ec798923910fc179294cab8" - }, - "ServiceRegistry": { - "address": "0xaD0C9DaCf1e515615b0581c8D7E295E296Ec26E6", - "initArgs": [ - { - "name": "controller", - "value": "0x24CCD4D3Ac8529fF08c58F74ff6755036E616117" - } - ], - "creationCodeHash": "0xa02709eb59b9cca8bee1271845b42db037dc1d042dad93410ba532d378a7c79f", - "runtimeCodeHash": "0xdb307489fd9a4a438b5b48909e12020b209280ad777561c0a7451655db097e75", - "txHash": "0x94cbb1d3863e33bf92acc6fab534c5ce63a9d1347958a323ae496b06f710f042", - "proxy": true, - "implementation": { - "address": "0x866232Ec9a9F918a821eBa561Cc5FC960Ef5B3aa", - "creationCodeHash": "0xf5fa541b43d15fade518feb63a95a73b9c67626108ead259e444af3a7ae1804f", - "runtimeCodeHash": "0x9856d2c2985f410f2f77e456fe6163827ea5251eb5e3f3768d3d4f8868187882", - "txHash": "0xdf811598fbfbc487b16b5bb3444ed47ae3107d3dcde8dbd770e8810315f942b5" - } - }, - "GraphCurationToken": { - "address": "0xb2E26f17Aea8eFA534e15Bde5C79c25D0C3dfa2e", - "creationCodeHash": "0x7e9a56b6fc05d428d1c1116eaa88a658f05487b493d847bfe5c69e35ec34f092", - "runtimeCodeHash": "0x587f9d4e9ecf9e7048d9f42f027957ca34ee6a95ca37d9758d8cd0ee16e89818", - "txHash": "0x68eb11f4d6eaec5036c97b4c6102a509ac31933f1fe011f275b3e5fee30b6590" - }, - "Curation": { - "address": "0x8FE00a685Bcb3B2cc296ff6FfEaB10acA4CE1538", - "initArgs": [ - { - "name": "controller", - "value": "0x24CCD4D3Ac8529fF08c58F74ff6755036E616117" - }, - { - "name": "bondingCurve", - "value": "0xd0C61e8F15d9deF697E1100663eD7dA74d3727dC" - }, - { - "name": "reserveRatio", - "value": 500000 - }, - { - "name": "curationTaxPercentage", - "value": 25000 - }, - { - "name": "minimumCurationDeposit", - "value": "1000000000000000000" - } - ], - "creationCodeHash": "0xa02709eb59b9cca8bee1271845b42db037dc1d042dad93410ba532d378a7c79f", - "runtimeCodeHash": "0xdb307489fd9a4a438b5b48909e12020b209280ad777561c0a7451655db097e75", - "txHash": "0x64d8d94e21f1923bd1793011ba28f24befd57b511622920716238b05595dac7d", - "proxy": true, - "implementation": { - "address": "0x147A7758EA71d91D545407927b34DD77A5f7C21A", - "creationCodeHash": "0x4aea53d73a1b7b00db3ba36023a70f4e53df68f9b42cb8932afb9cf1837a8cf7", - "runtimeCodeHash": "0x6e5cb73148de597888b628c2e0d97fa0f66ee4867ee0905314034f9031d52872", - "txHash": "0x2e44da799ad8866ac49aae2e40a16c57784ed4b1e9343daa4f764c39a05e0826" - } - }, - "L1GNS": { - "address": "0xaDcA0dd4729c8BA3aCf3E99F3A9f471EF37b6825", - "initArgs": [ - { - "name": "controller", - "value": "0x24CCD4D3Ac8529fF08c58F74ff6755036E616117" - }, - { - "name": "bondingCurve", - "value": "0xd0C61e8F15d9deF697E1100663eD7dA74d3727dC" - }, - { - "name": "didRegistry", - "value": "0xdca7ef03e98e0dc2b855be647c39abe984fcf21b" - } - ], - "creationCodeHash": "0xa02709eb59b9cca8bee1271845b42db037dc1d042dad93410ba532d378a7c79f", - "runtimeCodeHash": "0xdb307489fd9a4a438b5b48909e12020b209280ad777561c0a7451655db097e75", - "txHash": "0x7ef90b0477e5c5d05bbd203af7d2bf15224640204e12abb07331df11425d2d00", - "proxy": true, - "implementation": { - "address": "0xfdf6de9c5603d85e1dae3d00a776f43913c9b203", - "creationCodeHash": "0x86499a1c90a73b062c0d25777379cdf52085e36c7f4ce44016adc7775ea24355", - "runtimeCodeHash": "0x85cc02c86b4ee2c1b080c6f70500f775bb0fab7960ce62444a8018f3af07af75", - "txHash": "0x218dbb4fd680db263524fc6be36462c18f3e267b87951cd86296eabd4a381183" - } - }, - "L1Staking": { - "address": "0xF55041E37E12cD407ad00CE2910B8269B01263b9", - "initArgs": [ - { - "name": "controller", - "value": "0x24CCD4D3Ac8529fF08c58F74ff6755036E616117" - }, - { - "name": "minimumIndexerStake", - "value": "100000000000000000000000" - }, - { - "name": "thawingPeriod", - "value": 186092 - }, - { - "name": "protocolPercentage", - "value": 10000 - }, - { - "name": "curationPercentage", - "value": 100000 - }, - { - "name": "channelDisputeEpochs", - "value": 7 - }, - { - "name": "maxAllocationEpochs", - "value": 28 - }, - { - "name": "delegationUnbondingPeriod", - "value": 28 - }, - { - "name": "delegationRatio", - "value": 16 - }, - { - "name": "rebateAlphaNumerator", - "value": 77 - }, - { - "name": "rebateAlphaDenominator", - "value": 100 - } - ], - "creationCodeHash": "0xa02709eb59b9cca8bee1271845b42db037dc1d042dad93410ba532d378a7c79f", - "runtimeCodeHash": "0xdb307489fd9a4a438b5b48909e12020b209280ad777561c0a7451655db097e75", - "txHash": "0x6c92edf1c385983d57be0635cf40e1d1068d778edecf2be1631f51556c731af7", - "proxy": true, - "implementation": { - "address": "0x0Cf97E609937418eBC8C209404B947cBC914F599", - "creationCodeHash": "0xefff927976deb5bcec19101657cf59fc0baf5a8858cfcfe9465c607ee8ee3465", - "runtimeCodeHash": "0x3493cd97b84aead4b9c6a39e2da80aa5e7b08aaabef1c18680bb0856916e9687", - "txHash": "0x104c3068e4d7c79cdbefe9da401197cc456d8f93cd1b8c2d42bc5abeec27f03e", - "libraries": { - "LibCobbDouglas": "0x054f94aB35ee8E92aA5a51084Fe44295844A2DEe" - } - } - }, - "RewardsManager": { - "address": "0x9Ac758AB77733b4150A901ebd659cbF8cB93ED66", - "initArgs": [ - { - "name": "controller", - "value": "0x24CCD4D3Ac8529fF08c58F74ff6755036E616117" - }, - { - "name": "issuanceRate", - "value": "1000000012184945188" - } - ], - "creationCodeHash": "0xa02709eb59b9cca8bee1271845b42db037dc1d042dad93410ba532d378a7c79f", - "runtimeCodeHash": "0xdb307489fd9a4a438b5b48909e12020b209280ad777561c0a7451655db097e75", - "txHash": "0xd3a4d1b3e250e606f56417fd6e43d35bc794e793b1c5be4ffbecc3a43ca1b7b6", - "proxy": true, - "implementation": { - "address": "0xE633B775790b338B8C5a9ff47aB7C5D0faf4CB7a", - "creationCodeHash": "0x8e942423da51ec1fbce6834a7da57619454a48aa85efcaffb9a51cff7f655b8f", - "runtimeCodeHash": "0xc231e797a4a00f5dae7a26b30c0f689ab57d9f7b47a6b1c41deb73c13f952e3a", - "txHash": "0x22dc1dec76de393492ae07901e28581a93afdf6340b0fd53f58d4be81a3e61c9" - } - }, - "DisputeManager": { - "address": "0x97307b963662cCA2f7eD50e38dCC555dfFc4FB0b", - "initArgs": [ - { - "name": "controller", - "value": "0x24CCD4D3Ac8529fF08c58F74ff6755036E616117" - }, - { - "name": "arbitrator", - "value": "0xE1FDD398329C6b74C14cf19100316f0826a492d3" - }, - { - "name": "minimumDeposit", - "value": "10000000000000000000000" - }, - { - "name": "fishermanRewardPercentage", - "value": 500000 - }, - { - "name": "slashingPercentage", - "value": 25000 - } - ], - "creationCodeHash": "0xa02709eb59b9cca8bee1271845b42db037dc1d042dad93410ba532d378a7c79f", - "runtimeCodeHash": "0xdb307489fd9a4a438b5b48909e12020b209280ad777561c0a7451655db097e75", - "txHash": "0x90cd5852f5824f76d93814ffea26040ff503c81a84c4430e3688f219f9b48465", - "proxy": true, - "implementation": { - "address": "0x444c138bf2b151f28a713b0ee320240365a5bfc2", - "creationCodeHash": "0xc00c4702d9683f70a90f0b73ce1842e66fa4c26b2cf75fb486a016bb7bac2102", - "runtimeCodeHash": "0x2bb6445bf9e12618423efe9ef64d05e14d283979829e751cd24685c1440c403f", - "txHash": "0x413cd4f8e9e70ad482500772c1f13b0be48deb42d7f2d0d5a74b56d5a6bd8a4d" - } - }, - "AllocationExchange": { - "address": "0x4a53cf3b3EdA545dc61dee0cA21eA8996C94385f", - "initArgs": [ - { - "name": "graphToken", - "value": "0xc944e90c64b2c07662a292be6244bdf05cda44a7" - }, - { - "name": "staking", - "value": "0xf55041e37e12cd407ad00ce2910b8269b01263b9" - }, - { - "name": "governor", - "value": "0x74db79268e63302d3fc69fb5a7627f7454a41732" - }, - { - "name": "authority", - "value": "0x79fd74da4c906509862c8fe93e87a9602e370bc4" - } - ], - "creationCodeHash": "0x1c7b0d7e81fc15f8ccc5b159e2cedb1f152653ebbce895b59eb74a1b26826fda", - "runtimeCodeHash": "0xa63c77e0724a5f679660358452e388f60379f1331e74542afb1436ffb213b960", - "txHash": "0x2ecd036c562f2503af9eaa1a9bca3729bd31ec8a91677530eefbecb398b793ba" - }, - "SubgraphNFTDescriptor": { - "address": "0x8F0B7e136891e8Bad6aa4Abcb64EeeFE29dC2Af0", - "creationCodeHash": "0x7ac0757e66857e512df199569ee11c47a61b00a8d812469b79afa5dafa98c0ed", - "runtimeCodeHash": "0x9a34ad6b202bdfa95ea85654ea2e0dd40a4b8b10847f1c3d3d805fa95a078a3d", - "txHash": "0x77d98358726575ae044ac988b98b63f537951ccae2010e7177c4a7833dce9158" - }, - "SubgraphNFT": { - "address": "0x24e36639b3A3aaA9c928a8A6f12d34F942f1ab67", - "creationCodeHash": "0x8c9929ec6293458209f9cbadd96821604765e3656fe3c7b289b99194ede15336", - "runtimeCodeHash": "0x6309a51754b6bec245685c7a81059dc28e3756f1045f18d059abc9294f454a6a", - "txHash": "0x106c31f2c24a5285c47a766422823766f1c939034513e85613d70d99ef697173" - }, - "BridgeEscrow": { - "address": "0x36aFF7001294daE4C2ED4fDEfC478a00De77F090", - "initArgs": ["0x24CCD4D3Ac8529fF08c58F74ff6755036E616117"], - "creationCodeHash": "0xcdd28bb3db05f1267ca0f5ea29536c61841be5937ce711b813924f8ff38918cc", - "runtimeCodeHash": "0x4ca8c37c807bdfda1d6dcf441324b7ea14c6ddec5db37c20c2bf05aeae49bc0d", - "txHash": "0x218aff2c804dd3dfe5064b08cab83ffb37382ca2aea1a225c2ead02ec99f38b5", - "proxy": true, - "implementation": { - "address": "0xBcD54513aa593646d72aEA31406c633C235Ad6EA", - "creationCodeHash": "0x6a1fc897c0130a1c99221cde1938d247de13a0861111ac47ad81c691f323df1a", - "runtimeCodeHash": "0xc8e31a4ebea0c3e43ceece974071ba0b6db2bed6725190795e07a2d369d2a8ab", - "txHash": "0x92908e33b54f59ec13a0f7bd29b818c421742294b9974d73859e0bde871bafb9" - } - }, - "L1GraphTokenGateway": { - "address": "0x01cDC91B0A9bA741903aA3699BF4CE31d6C5cC06", - "initArgs": ["0x24CCD4D3Ac8529fF08c58F74ff6755036E616117"], - "creationCodeHash": "0xcdd28bb3db05f1267ca0f5ea29536c61841be5937ce711b813924f8ff38918cc", - "runtimeCodeHash": "0x4ca8c37c807bdfda1d6dcf441324b7ea14c6ddec5db37c20c2bf05aeae49bc0d", - "txHash": "0xd327568a286d6fcda1a6b78a14c87d660523a6900be901d6a7fbc2504faff64b", - "proxy": true, - "implementation": { - "address": "0xD41ca6A1d034D178c196DFa916f22f7D1a1B8222", - "creationCodeHash": "0x7d6f46e4801d562a8c6fc37779711cce39b3544ea62c6c6517d4b06e8604e38c", - "runtimeCodeHash": "0xa0c0a37340ee949d31c3d41b642c507c58f225c09da9ae3d378e5148cd27081a", - "txHash": "0x25f4234807c7f09b813d4e413311bbe440c8257bc050c71678f63a7abc6e643e" - } - } - }, - "5": { - "GraphProxyAdmin": { - "address": "0x6D47902c3358E0BCC06171DE935cB23D8E276fdd", - "creationCodeHash": "0x8b9a4c23135748eb0e4d0e743f7276d86264ace935d23f9aadcfccd64b482055", - "runtimeCodeHash": "0x47aa67e4a85090fe7324268b55fb7b320ee7a8937f2ad02480b71d4bb3332b13", - "txHash": "0xd4be829c13c741b8b56ca5ee7d98d86237ce44df7c11eff73df26cd87d5cab94" - }, - "BancorFormula": { - "address": "0x2DFDC3e11E035dD96A4aB30Ef67fab4Fb6EC01f2", - "creationCodeHash": "0x7ae36017eddb326ddd79c7363781366121361d42fdb201cf57c57424eede46f4", - "runtimeCodeHash": "0xed6701e196ad93718e28c2a2a44d110d9e9906085bcfe4faf4f6604b08f0116c", - "txHash": "0x97ca33e6e7d1d7d62bdec4827f208076922d9c42bf149693b36ab91429e65740" - }, - "Controller": { - "address": "0x48eD7AfbaB432d1Fc6Ea84EEC70E745d9DAcaF3B", - "creationCodeHash": "0x4f2082404e96b71282e9d7a8b7efd0f34996b5edac6711095911d36a57637c88", - "runtimeCodeHash": "0xe31d064a2db44ac10d41d74265b7d4482f86ee95644b9745c04f9fc91006906d", - "txHash": "0x8087bd10cc8d456a7b573bc30308785342db2b90d80f3a750931ab9cf5273b83" - }, - "EpochManager": { - "address": "0x03541c5cd35953CD447261122F93A5E7b812D697", - "initArgs": ["0x48eD7AfbaB432d1Fc6Ea84EEC70E745d9DAcaF3B", "554"], - "creationCodeHash": "0x25a7b6cafcebb062169bc25fca9bcce8f23bd7411235859229ae3cc99b9a7d58", - "runtimeCodeHash": "0xaf2d63813a0e5059f63ec46e1b280eb9d129d5ad548f0cdd1649d9798fde10b6", - "txHash": "0xb1c6189514b52091e35c0349dff29357a2572cd9c2f9ad7f623b2b24252826d1", - "proxy": true, - "implementation": { - "address": "0xb6a641879F195448F3Da10fF3b3C4541808a9342", - "creationCodeHash": "0x729aca90fcffdeede93bc42a6e089a93085ec04133e965cf0291cf6245880595", - "runtimeCodeHash": "0xce525d338b6ed471eeb36d2927a26608cca2d5cfe52bd0585945eacc55b525cf", - "txHash": "0x139630c31b6a5799231572aa0b555a44209acd79fb3df98832d80cf9a1013b58" - } - }, - "GraphToken": { - "address": "0x5c946740441C12510a167B447B7dE565C20b9E3C", - "constructorArgs": ["10000000000000000000000000000"], - "creationCodeHash": "0xa749ef173d768ffe0786529cd23238bc525f4a621a91303d8fb176533c18cec2", - "runtimeCodeHash": "0xe584408c8e04a6200bc7a50816343585ad80f699bd394b89bb96964dbc1a2a92", - "txHash": "0x0639808a47da8a5270bc89eb3009c7d29167c8f32f015648920ec5d114225540" - }, - "GraphCurationToken": { - "address": "0x8bEd0a89F18a801Da9dEA994D475DEa74f75A059", - "creationCodeHash": "0x8c076dacbf98f839a0ff25c197eafc836fc3fc1ee5183c7f157acec17678a641", - "runtimeCodeHash": "0xad138b4c4f34501f83aea6c03a49c103a9115526c993860a9acbd6caeaaf0d64", - "txHash": "0xc09739affd3d9dd43f690d3a487b1c149ad8aa50164995cfbc9de73914ff278a" - }, - "ServiceRegistry": { - "address": "0x7CF8aD279E9F26b7DAD2Be452A74068536C8231F", - "initArgs": ["0x48eD7AfbaB432d1Fc6Ea84EEC70E745d9DAcaF3B"], - "creationCodeHash": "0x25a7b6cafcebb062169bc25fca9bcce8f23bd7411235859229ae3cc99b9a7d58", - "runtimeCodeHash": "0xaf2d63813a0e5059f63ec46e1b280eb9d129d5ad548f0cdd1649d9798fde10b6", - "txHash": "0xf250ef40f4172e54b96047a59cfd7fc35ffabe14484ff1d518e0262195895282", - "proxy": true, - "implementation": { - "address": "0xdC7Fb3a43B9e069df5F07eDc835f60dAc3fD40BA", - "creationCodeHash": "0x45f56a7ad420cd11a8585594fb29121747d87d412161c8779ea36dfd34a48e88", - "runtimeCodeHash": "0x26aceabe83e2b757b2f000e185017528cdde2323c2129fd612180ac3192adfda", - "txHash": "0x2fdb5fa641f707809322107573ce7799711e125cc781aade99fd2948455847ab" - } - }, - "Curation": { - "address": "0xE59B4820dDE28D2c235Bd9A73aA4e8716Cb93E9B", - "initArgs": [ - "0x48eD7AfbaB432d1Fc6Ea84EEC70E745d9DAcaF3B", - "0x2DFDC3e11E035dD96A4aB30Ef67fab4Fb6EC01f2", - "0x8bEd0a89F18a801Da9dEA994D475DEa74f75A059", - "500000", - "10000", - "1000000000000000000" - ], - "creationCodeHash": "0x25a7b6cafcebb062169bc25fca9bcce8f23bd7411235859229ae3cc99b9a7d58", - "runtimeCodeHash": "0xaf2d63813a0e5059f63ec46e1b280eb9d129d5ad548f0cdd1649d9798fde10b6", - "txHash": "0xf1b1f0f28b80068bcc9fd6ef475be6324a8b23cbdb792f7344f05ce00aa997d7", - "proxy": true, - "implementation": { - "address": "0xAeaA2B058539750b740E858f97159E6856948670", - "creationCodeHash": "0x022576ab4b739ee17dab126ea7e5a6814bda724aa0e4c6735a051b38a76bd597", - "runtimeCodeHash": "0xc7b1f9bef01ef92779aab0ae9be86376c47584118c508f5b4e612a694a4aab93", - "txHash": "0x400bfb7b6c384363b859a66930590507ddca08ebedf64b20c4b5f6bc8e76e125" - } - }, - "SubgraphNFTDescriptor": { - "address": "0xE7e406b4Bfce0B78A751712BFEb1D6B0ce60e8fb", - "creationCodeHash": "0xf16e8ff11d852eea165195ac9e0dfa00f98e48f6ce3c77c469c7df9bf195b651", - "runtimeCodeHash": "0x39583196f2bcb85789b6e64692d8c0aa56f001c46f0ca3d371abbba2c695860f", - "txHash": "0xffee21f6616abd4ffdab0b930dbf44d2ba381a08c3c834798df464fd85e8047e" - }, - "SubgraphNFT": { - "address": "0x083318049968F20EfaEA48b0978EC57bbb0ECbcE", - "constructorArgs": ["0xEfc519BEd6a43a14f1BBBbA9e796C4931f7A5540"], - "creationCodeHash": "0x5de044b15df24beb8781d1ebe71f01301a6b8985183f37eb8d599aa4059a1d3e", - "runtimeCodeHash": "0x6a7751298d6ffdbcf421a3b72faab5b7d425884b04757303123758dbcfb21dfa", - "txHash": "0x8884b65a236c188e4c61cf9593be2f67b27e4f80785939336d659866cfd97aec" - }, - "L1GNS": { - "address": "0x065611D3515325aE6fe14f09AEe5Aa2C0a1f0CA7", - "initArgs": [ - "0x48eD7AfbaB432d1Fc6Ea84EEC70E745d9DAcaF3B", - "0x2DFDC3e11E035dD96A4aB30Ef67fab4Fb6EC01f2", - "0x083318049968F20EfaEA48b0978EC57bbb0ECbcE" - ], - "creationCodeHash": "0x25a7b6cafcebb062169bc25fca9bcce8f23bd7411235859229ae3cc99b9a7d58", - "runtimeCodeHash": "0xaf2d63813a0e5059f63ec46e1b280eb9d129d5ad548f0cdd1649d9798fde10b6", - "txHash": "0x0149f062893acb0eafcbf67acc99da99e03aab3ee2b6b40fbe523d91e0fcecd1", - "proxy": true, - "implementation": { - "address": "0xa95ee5A5f6b45Fcf85A7fa0714f462472C467818", - "creationCodeHash": "0x2e71e4aefc1e678cb9c71882c1da67fc640389337a7d6ae43f78d0f13294594a", - "runtimeCodeHash": "0xde0e02c6a36a90e11c768f40a81430b7e9cda261aa6dada14eaad392d42efc21", - "txHash": "0xbc6e9171943020d30c22197282311f003e79374e6eeeaab9c360942bdf4193f4" - } - }, - "L1Staking": { - "address": "0x35e3Cb6B317690d662160d5d02A5b364578F62c9", - "initArgs": [ - "0x48eD7AfbaB432d1Fc6Ea84EEC70E745d9DAcaF3B", - "100000000000000000000000", - "6646", - "10000", - "100000", - "2", - "4", - "12", - "16", - "77", - "100" - ], - "creationCodeHash": "0x25a7b6cafcebb062169bc25fca9bcce8f23bd7411235859229ae3cc99b9a7d58", - "runtimeCodeHash": "0xaf2d63813a0e5059f63ec46e1b280eb9d129d5ad548f0cdd1649d9798fde10b6", - "txHash": "0x1960be49029284756037cf3ee8afe9eeaba93de4ba84875c5eefd5d2289903bd", - "proxy": true, - "implementation": { - "address": "0x16e64AA72De0f3BDa30d3D324E967BDecb7c826a", - "creationCodeHash": "0x6828025572bcf46c755088cd0b11329db6b249b0221140e93571799125255ae1", - "runtimeCodeHash": "0x523492e8e808f27ac0240edc7359b760b1c17d0572a13e68799775b53c2a50ec", - "txHash": "0x42ff9ce1b319bbdd8619cdd999b2c3c7c3aeacc5ac7a6eddcc1c3f0a2774f4a0", - "libraries": { - "LibCobbDouglas": "0x137e60D093F679B0fF9ad922EB14aCe0F4F443cf" - } - } - }, - "RewardsManager": { - "address": "0x1246D7c4c903fDd6147d581010BD194102aD4ee2", - "initArgs": ["0x48eD7AfbaB432d1Fc6Ea84EEC70E745d9DAcaF3B", "1000000012184945188"], - "creationCodeHash": "0x25a7b6cafcebb062169bc25fca9bcce8f23bd7411235859229ae3cc99b9a7d58", - "runtimeCodeHash": "0xaf2d63813a0e5059f63ec46e1b280eb9d129d5ad548f0cdd1649d9798fde10b6", - "txHash": "0x108efecde4422dacf6ec7a07884cab214ba0a441fc73a6ad82ceb5c73e1c9334", - "proxy": true, - "implementation": { - "address": "0x8ECedc7631f4616D7f4074f9fC9D0368674794BE", - "creationCodeHash": "0x8e942423da51ec1fbce6834a7da57619454a48aa85efcaffb9a51cff7f655b8f", - "runtimeCodeHash": "0xc231e797a4a00f5dae7a26b30c0f689ab57d9f7b47a6b1c41deb73c13f952e3a", - "txHash": "0xe1b5474f03abf1b4f0c0f1fbbad215948ae28b490a21ec07e5a955f52f3969ed" - } - }, - "DisputeManager": { - "address": "0x8c344366D9269174F10bB588F16945eb47f78dc9", - "initArgs": [ - "0x48eD7AfbaB432d1Fc6Ea84EEC70E745d9DAcaF3B", - "0xFD01aa87BeB04D0ac764FC298aCFd05FfC5439cD", - "10000000000000000000000", - "500000", - "25000", - "25000" - ], - "creationCodeHash": "0x25a7b6cafcebb062169bc25fca9bcce8f23bd7411235859229ae3cc99b9a7d58", - "runtimeCodeHash": "0xaf2d63813a0e5059f63ec46e1b280eb9d129d5ad548f0cdd1649d9798fde10b6", - "txHash": "0xe93eba1bda0d262efabbc05d4e01b9ee197f22dd4f798e4c5fc5b1b9c137428e", - "proxy": true, - "implementation": { - "address": "0x476F0b8e5F952f0740aD3b0cb50648a7496c8388", - "creationCodeHash": "0x5b73c9b910d66426fd965ac3110e9debda1d81134c0354a7af8ec1f2ebd765f6", - "runtimeCodeHash": "0xcaf3547f0d675a1e1d2f887cf4666410bc3b084e65ad283ed3f1ff2b1bccc113", - "txHash": "0x6a90b5e2d5dcae2c94fe518ce7f6fb2ffc11e562b9feac6464dcb32e1e90c039" - } - }, - "AllocationExchange": { - "address": "0x67FBea097202f46D601D7C937b5DBb615659aDF2", - "constructorArgs": [ - "0x5c946740441C12510a167B447B7dE565C20b9E3C", - "0x35e3Cb6B317690d662160d5d02A5b364578F62c9", - "0xf1135bFF22512FF2A585b8d4489426CE660f204c", - "0x52e498aE9B8A5eE2A5Cd26805F06A9f29A7F489F" - ], - "creationCodeHash": "0x97714e1a80674ab0af90a10f2c7156cc92794ef81565fe9c7c35ecbe0025cc08", - "runtimeCodeHash": "0x5c20792fefe71126668be8ab19ab26cdb8ab9a6f73efbfa1d90f91e26459fa67", - "txHash": "0x87b35e5289792800832902206cf0ee4b9900e4d38089bd6634d10ea78729bf54" - }, - "IENS": { - "address": "0x00000000000C2E074eC69A0dFb2997BA6C7d2e1e" - }, - "IEthereumDIDRegistry": { - "address": "0xdCa7EF03e98e0DC2B855bE647C39ABe984fcF21B" - }, - "BridgeEscrow": { - "address": "0x8e4145358af77516B886D865e2EcacC0Fd832B75", - "initArgs": ["0x48eD7AfbaB432d1Fc6Ea84EEC70E745d9DAcaF3B"], - "creationCodeHash": "0xcdd28bb3db05f1267ca0f5ea29536c61841be5937ce711b813924f8ff38918cc", - "runtimeCodeHash": "0x4ca8c37c807bdfda1d6dcf441324b7ea14c6ddec5db37c20c2bf05aeae49bc0d", - "txHash": "0x190ea3c8f731a77a8fd1cbce860f9561f233adeafe559b33201b7d21ccd298cf", - "proxy": true, - "implementation": { - "address": "0xDD569E05D54fBF5d02fE4a26aC03Ea00317A0A2e", - "creationCodeHash": "0x6a1fc897c0130a1c99221cde1938d247de13a0861111ac47ad81c691f323df1a", - "runtimeCodeHash": "0xc8e31a4ebea0c3e43ceece974071ba0b6db2bed6725190795e07a2d369d2a8ab", - "txHash": "0x369038dcc8d8e70d40782dd761a82cc453c7a4f1939284c724a5a72119e3e566" - } - }, - "L1GraphTokenGateway": { - "address": "0xc82fF7b51c3e593D709BA3dE1b3a0d233D1DEca1", - "initArgs": ["0x48eD7AfbaB432d1Fc6Ea84EEC70E745d9DAcaF3B"], - "creationCodeHash": "0xcdd28bb3db05f1267ca0f5ea29536c61841be5937ce711b813924f8ff38918cc", - "runtimeCodeHash": "0x4ca8c37c807bdfda1d6dcf441324b7ea14c6ddec5db37c20c2bf05aeae49bc0d", - "txHash": "0x4a06731591df5c5f77c11bf8df7851234873eb6727fbbc93f5595a223f7cf3fc", - "proxy": true, - "implementation": { - "address": "0x9e8bab937Cac7c359F5e92248d10C613B3Cd7B8b", - "creationCodeHash": "0x7d6f46e4801d562a8c6fc37779711cce39b3544ea62c6c6517d4b06e8604e38c", - "runtimeCodeHash": "0xa0c0a37340ee949d31c3d41b642c507c58f225c09da9ae3d378e5148cd27081a", - "txHash": "0x517794503416be02d916d289f4e7510359d17567bec987da99319e27e5f40fc1" - } - } - }, - "1337": { - "GraphProxyAdmin": { - "address": "0x5b1869D9A4C187F2EAa108f3062412ecf0526b24", - "creationCodeHash": "0x8b9a4c23135748eb0e4d0e743f7276d86264ace935d23f9aadcfccd64b482055", - "runtimeCodeHash": "0x47aa67e4a85090fe7324268b55fb7b320ee7a8937f2ad02480b71d4bb3332b13", - "txHash": "0xeb755c878246eb261061e61335dcabb25c12aa65f17272bd5e680474ebd7af5d" - }, - "BancorFormula": { - "address": "0xCfEB869F69431e42cdB54A4F4f105C19C080A601", - "creationCodeHash": "0x7ae36017eddb326ddd79c7363781366121361d42fdb201cf57c57424eede46f4", - "runtimeCodeHash": "0xed6701e196ad93718e28c2a2a44d110d9e9906085bcfe4faf4f6604b08f0116c", - "txHash": "0xcf896ae348d971744d7600ede45a58ed2598caaa286ac9794acc5f81cf14933c" - }, - "Controller": { - "address": "0x254dffcd3277C0b1660F6d42EFbB754edaBAbC2B", - "creationCodeHash": "0x4f2082404e96b71282e9d7a8b7efd0f34996b5edac6711095911d36a57637c88", - "runtimeCodeHash": "0xe31d064a2db44ac10d41d74265b7d4482f86ee95644b9745c04f9fc91006906d", - "txHash": "0x2df913aa6ca0bec47227ca347677627390a00e78787f1144bcfc78673981dcd6" - }, - "EpochManager": { - "address": "0xD833215cBcc3f914bD1C9ece3EE7BF8B14f841bb", - "initArgs": ["0x254dffcd3277C0b1660F6d42EFbB754edaBAbC2B", "554"], - "creationCodeHash": "0x25a7b6cafcebb062169bc25fca9bcce8f23bd7411235859229ae3cc99b9a7d58", - "runtimeCodeHash": "0xaf2d63813a0e5059f63ec46e1b280eb9d129d5ad548f0cdd1649d9798fde10b6", - "txHash": "0x7e8cd2c928453b8a5ca041d14f6fea09de80e5e808f8d6ba480f35f10a28148a", - "proxy": true, - "implementation": { - "address": "0xC89Ce4735882C9F0f0FE26686c53074E09B0D550", - "creationCodeHash": "0x492b44ca23b4728151292c5a7a731da511619bbf4fc0194cb3158fde2a0794ed", - "runtimeCodeHash": "0x73009e4f97f097e7b5d67e1e1b6dd41ecc8f5363eb15484019b8000a6d0cb95c", - "txHash": "0xa9e5c9e3585bb68dc538062ca4c2dbfb58c3fc80523ca97c7d0d27f4a7ca1a09" - } - }, - "GraphToken": { - "address": "0xe982E462b094850F12AF94d21D470e21bE9D0E9C", - "constructorArgs": ["10000000000000000000000000000"], - "creationCodeHash": "0xa749ef173d768ffe0786529cd23238bc525f4a621a91303d8fb176533c18cec2", - "runtimeCodeHash": "0xe584408c8e04a6200bc7a50816343585ad80f699bd394b89bb96964dbc1a2a92", - "txHash": "0xf564d09de26eaa6d80b4712a426bcf24e05c221a8ac6214d81946c24242df619" - }, - "ServiceRegistry": { - "address": "0x9b1f7F645351AF3631a656421eD2e40f2802E6c0", - "initArgs": ["0x254dffcd3277C0b1660F6d42EFbB754edaBAbC2B"], - "creationCodeHash": "0x25a7b6cafcebb062169bc25fca9bcce8f23bd7411235859229ae3cc99b9a7d58", - "runtimeCodeHash": "0xaf2d63813a0e5059f63ec46e1b280eb9d129d5ad548f0cdd1649d9798fde10b6", - "txHash": "0x3493336fb4bc76b11aa4e880bb5e8366d00a0b1211ce417f9125d9b97a48920c", - "proxy": true, - "implementation": { - "address": "0x0290FB167208Af455bB137780163b7B7a9a10C16", - "creationCodeHash": "0xad443a9c9bf6a6049265e253dc99658bf99e4091c939f68972c5298926d7689d", - "runtimeCodeHash": "0x495a9a8de4aed745b0521e8b24661cf26ff12a9993a0ec5ef17728271a6f8629", - "txHash": "0x69a51f8846d42a786314d56ce00b7321a6576cd8bdc0d5898dd6f3ccb1c63c87" - } - }, - "Curation": { - "address": "0xA57B8a5584442B467b4689F1144D269d096A3daF", - "initArgs": [ - "0x254dffcd3277C0b1660F6d42EFbB754edaBAbC2B", - "0xCfEB869F69431e42cdB54A4F4f105C19C080A601", - "0x59d3631c86BbE35EF041872d502F218A39FBa150", - "500000", - "10000", - "1000000000000000000" - ], - "creationCodeHash": "0x25a7b6cafcebb062169bc25fca9bcce8f23bd7411235859229ae3cc99b9a7d58", - "runtimeCodeHash": "0xaf2d63813a0e5059f63ec46e1b280eb9d129d5ad548f0cdd1649d9798fde10b6", - "txHash": "0xfd1002811e9c61acd016cfbebb0c348fedfb94402ad9c330d9bcbebcd58c3f9c", - "proxy": true, - "implementation": { - "address": "0x2612Af3A521c2df9EAF28422Ca335b04AdF3ac66", - "creationCodeHash": "0xe69ca2e0119fb769311ecd3d4de6b12fd0cedfb56eeb4c537bd3defa2adcca43", - "runtimeCodeHash": "0x364e9b3216fa3a571e8be3cdb757fa007ee8a2afe384396e4a7cda3de79ce4d9", - "txHash": "0xc3278c3fae8f2cfab00755537c9a8d6712e1e8027f46a9ef99eb3b9231620ab2" - } - }, - "L1GNS": { - "address": "0xA94B7f0465E98609391C623d0560C5720a3f2D33", - "initArgs": [ - "0x254dffcd3277C0b1660F6d42EFbB754edaBAbC2B", - "0xCfEB869F69431e42cdB54A4F4f105C19C080A601", - "0x0E696947A06550DEf604e82C26fd9E493e576337" - ], - "creationCodeHash": "0x25a7b6cafcebb062169bc25fca9bcce8f23bd7411235859229ae3cc99b9a7d58", - "runtimeCodeHash": "0xaf2d63813a0e5059f63ec46e1b280eb9d129d5ad548f0cdd1649d9798fde10b6", - "txHash": "0x07c9dee04758b140616e0e9688bd310a86115b8dd2c5ab011f1edb709b429484", - "proxy": true, - "implementation": { - "address": "0xDb56f2e9369E0D7bD191099125a3f6C370F8ed15", - "creationCodeHash": "0xfbdc6caf28aa09493e0e0032aac06cdb2be8c5f62b8c839876d62d2bb2977e3d", - "runtimeCodeHash": "0x106af7614bdb7cdf60a6a93e5c92dbee03e36c799880d9ee8e8e9585fc077f72", - "txHash": "0xb1e63211ea7b036bf35423034bc60490b3b35b199bddc85200ea926b76e16a4e" - } - }, - "L1Staking": { - "address": "0x5f8e26fAcC23FA4cbd87b8d9Dbbd33D5047abDE1", - "initArgs": [ - "0x254dffcd3277C0b1660F6d42EFbB754edaBAbC2B", - "100000000000000000000000", - "6646", - "10000", - "100000", - "2", - "4", - "12", - "16", - "77", - "100" - ], - "creationCodeHash": "0x25a7b6cafcebb062169bc25fca9bcce8f23bd7411235859229ae3cc99b9a7d58", - "runtimeCodeHash": "0xaf2d63813a0e5059f63ec46e1b280eb9d129d5ad548f0cdd1649d9798fde10b6", - "txHash": "0x81c0824918a3d2f9d196118b9ea1c8d15bdf7dd55f3d39ca99f047d38c30445f", - "proxy": true, - "implementation": { - "address": "0xFC628dd79137395F3C9744e33b1c5DE554D94882", - "creationCodeHash": "0xa6ad6904fe70424527494f1401d71972967da4e35dea7ca01858063a56550e42", - "runtimeCodeHash": "0x3146935df7968ca2b32b0610ddb25e40148a9c007d3e81b367a10342b6eed13b", - "txHash": "0xb37e221c74a2237c0d63cc61242106c426b1b46041e6e0e27467f90c4e01da88", - "libraries": { - "LibCobbDouglas": "0xb09bCc172050fBd4562da8b229Cf3E45Dc3045A6" - } - } - }, - "RewardsManager": { - "address": "0x4bf749ec68270027C5910220CEAB30Cc284c7BA2", - "initArgs": ["0x254dffcd3277C0b1660F6d42EFbB754edaBAbC2B"], - "creationCodeHash": "0x25a7b6cafcebb062169bc25fca9bcce8f23bd7411235859229ae3cc99b9a7d58", - "runtimeCodeHash": "0xaf2d63813a0e5059f63ec46e1b280eb9d129d5ad548f0cdd1649d9798fde10b6", - "txHash": "0xf4a13ad82b067ffb59e689df3cb828a5cd4eac7e316323dfbb4b05b191127ce5", - "proxy": true, - "implementation": { - "address": "0xD86C8F0327494034F60e25074420BcCF560D5610", - "creationCodeHash": "0x5579914062ff21ef47c68cfab1eddbef1c320ae50e769dc430b73fcb995c5095", - "runtimeCodeHash": "0xae6b7b7a6f02d5964d4a35d66c906dd0fb5a5fb00549e646a586465e97218402", - "txHash": "0x8f411197f5b89b40fd61e2a8d35a9740279cff4fb2a7c2231f3faba1b8d4f581" - } - }, - "DisputeManager": { - "address": "0x5017A545b09ab9a30499DE7F431DF0855bCb7275", - "initArgs": [ - "0x254dffcd3277C0b1660F6d42EFbB754edaBAbC2B", - "0xFFcf8FDEE72ac11b5c542428B35EEF5769C409f0", - "10000000000000000000000", - "500000", - "25000", - "25000" - ], - "creationCodeHash": "0x25a7b6cafcebb062169bc25fca9bcce8f23bd7411235859229ae3cc99b9a7d58", - "runtimeCodeHash": "0xaf2d63813a0e5059f63ec46e1b280eb9d129d5ad548f0cdd1649d9798fde10b6", - "txHash": "0xc76797d4f240e81607f679ed0f0cd483065f4c657743bacd2198fd42fe4f089b", - "proxy": true, - "implementation": { - "address": "0x7C728214be9A0049e6a86f2137ec61030D0AA964", - "creationCodeHash": "0xbcdd3847552c8819e7b65d77b6929f2b61cd5a7522d1355f2bb1a0c2a099f713", - "runtimeCodeHash": "0x2c0589b92badf53b7cb8a0570e4d28ceefff0add59eb2e75e59e4ae4f76592ff", - "txHash": "0x6773e7db3d0991ad4541cdceb64c035e3c0cc7f5e9ecf7749ba2e699b8793bcf" - } - }, - "EthereumDIDRegistry": { - "address": "0xe78A0F7E598Cc8b0Bb87894B0F60dD2a88d6a8Ab", - "creationCodeHash": "0x20cd202f7991716a84c097da5fbd365fd27f7f35f241f82c529ad7aba18b814b", - "runtimeCodeHash": "0x5f396ffd54b6cd6b3faded0f366c5d7e148cc54743926061be2dfd12a75391de", - "txHash": "0xe68b0ca45476f9d07359ee078d16b8dc9ed9769495cc87ba034bbbfbd470588b" - }, - "GraphCurationToken": { - "address": "0x59d3631c86BbE35EF041872d502F218A39FBa150", - "creationCodeHash": "0x8c076dacbf98f839a0ff25c197eafc836fc3fc1ee5183c7f157acec17678a641", - "runtimeCodeHash": "0xad138b4c4f34501f83aea6c03a49c103a9115526c993860a9acbd6caeaaf0d64", - "txHash": "0x7290a04e5649738b46213e76a426bb59bebb6af80641af8197539446eb716249" - }, - "SubgraphNFTDescriptor": { - "address": "0x630589690929E9cdEFDeF0734717a9eF3Ec7Fcfe", - "creationCodeHash": "0xf16e8ff11d852eea165195ac9e0dfa00f98e48f6ce3c77c469c7df9bf195b651", - "runtimeCodeHash": "0x39583196f2bcb85789b6e64692d8c0aa56f001c46f0ca3d371abbba2c695860f", - "txHash": "0x3c5bff07a071ac0737f06a1e7a91a5f80fcf86a16dc8ac232ed0a305db9d9d85" - }, - "SubgraphNFT": { - "address": "0x0E696947A06550DEf604e82C26fd9E493e576337", - "constructorArgs": ["0x90F8bf6A479f320ead074411a4B0e7944Ea8c9C1"], - "creationCodeHash": "0xc3559f8ffca442b8a3706003d3c89d11bc918398551a197bbbd66ae649cc14c4", - "runtimeCodeHash": "0x16c4bfbb2374879d3f9373178fe14170332e274a3a4e6a07f7ffc5194420584d", - "txHash": "0xa03c6e4755494c8334fa9175941cb5655be943a930950312a6e3572204d6259f" - }, - "AllocationExchange": { - "address": "0xFF6049B87215476aBf744eaA3a476cBAd46fB1cA", - "constructorArgs": [ - "0xe982E462b094850F12AF94d21D470e21bE9D0E9C", - "0x5f8e26fAcC23FA4cbd87b8d9Dbbd33D5047abDE1", - "0x3E5e9111Ae8eB78Fe1CC3bb8915d5D461F3Ef9A9", - "0xE11BA2b4D45Eaed5996Cd0823791E0C93114882d" - ], - "creationCodeHash": "0xe7db7b38369ff61ea6cb2abdaf64f94deb88703faec5fa7a33866d1144a7da5f", - "runtimeCodeHash": "0x0792084bfc42580dc14eff231a75eab772eca117894dca8f1544cf0d38df219c", - "txHash": "0xeb2ac7e11256e10591b396fff48d0526c6bab20f9d45036ba07b8e32238d8397" - }, - "L1GraphTokenGateway": { - "address": "0xA586074FA4Fe3E546A132a16238abe37951D41fE", - "creationCodeHash": "0x506b750ce67ef926070c8918e372003d0cd9d21f8198a1e5447ff65a8ca8759e", - "runtimeCodeHash": "0x6cc716875c9de6a3bdc8b53366cb7adf83f96f2254b1f3171c996ac99449bc8c", - "txHash": "0x5f49cd4389f3c59b18bf1bcc7f5bf6feaa4a5e1e3f08b66805b4e1b7329a991c" - }, - "BridgeEscrow": { - "address": "0x2D8BE6BF0baA74e0A907016679CaE9190e80dD0A", - "creationCodeHash": "0x09b0de6d1f3afb28f3008befbc5c6303a8d510c31a4364483c009f3446082175", - "runtimeCodeHash": "0x7c242c472191805935e451bae6aaf0417ff7192d0b2a76422bc1c93b2284e2d4", - "txHash": "0x1881f59227e8f77a4b28c1877d5a4b08df576e1a22785800e35aeacfb3f6958e" - } - }, - "42161": { - "GraphProxyAdmin": { - "address": "0x2983936aC20202a6555993448E0d5654AC8Ca5fd", - "creationCodeHash": "0x68b304ac6bce7380d5e0f6b14a122f628bffebcc75f8205cb60f0baf578b79c3", - "runtimeCodeHash": "0x8d9ba87a745cf82ab407ebabe6c1490197084d320efb6c246d94bcc80e804417", - "txHash": "0x3ff82c38ec0e08e8f4194689188edcc1e8acb1f231c14cce8f0223f4dfc6cb76" - }, - "BancorFormula": { - "address": "0xA489FDc65229D6225014C0b357BCD19af6f00eE9", - "creationCodeHash": "0x7ae36017eddb326ddd79c7363781366121361d42fdb201cf57c57424eede46f4", - "runtimeCodeHash": "0xed6701e196ad93718e28c2a2a44d110d9e9906085bcfe4faf4f6604b08f0116c", - "txHash": "0xb2bb14ba3cbd1bb31b08b86aced469745f9888710254bb3baed047f435e788c0" - }, - "Controller": { - "address": "0x0a8491544221dd212964fbb96487467291b2C97e", - "creationCodeHash": "0x798f913fbaa1b2547c917e3dc31679089ab27cba442c511c159803acdba28c15", - "runtimeCodeHash": "0x00ae0824f79c4e48d2d23a8d4e6d075f04f44f3ea30a4f4305c345bb98117c62", - "txHash": "0x2a9d5744ad0e5e2e6bb6733ae890702fed2bce906e4e8b1cc50d2d3912c58d18" - }, - "EpochManager": { - "address": "0x5A843145c43d328B9bB7a4401d94918f131bB281", - "initArgs": ["0x0a8491544221dd212964fbb96487467291b2C97e", "6646"], - "creationCodeHash": "0xcdd28bb3db05f1267ca0f5ea29536c61841be5937ce711b813924f8ff38918cc", - "runtimeCodeHash": "0x4ca8c37c807bdfda1d6dcf441324b7ea14c6ddec5db37c20c2bf05aeae49bc0d", - "txHash": "0x4c70b8a56278452898d9eb23787a977d38141ebe48c79417c3acf6748ff921cf", - "proxy": true, - "implementation": { - "address": "0xeEDEdb3660154f439D93bfF612f7902edf07b848", - "creationCodeHash": "0x83bc0b08dbe1a9259666ec209f06223863f7bb9cfbf917a2d4b795c771a727fe", - "runtimeCodeHash": "0xed60261c6dc84ebc16830c36f3ee370a92802601d5a2fe1c3c19f5120dcbc2eb", - "txHash": "0x64fac1c567b7be735084b337a1e4ea9b990a8ffee8190485dc9b8dfcc257146c" - } - }, - "L2GraphToken": { - "address": "0x9623063377AD1B27544C965cCd7342f7EA7e88C7", - "initArgs": ["0x4528FD7868c91Ef64B9907450Ee8d82dC639612c"], - "creationCodeHash": "0xcdd28bb3db05f1267ca0f5ea29536c61841be5937ce711b813924f8ff38918cc", - "runtimeCodeHash": "0x4ca8c37c807bdfda1d6dcf441324b7ea14c6ddec5db37c20c2bf05aeae49bc0d", - "txHash": "0x8465190df853c05bbdec00ba6b66139be0e5663fd5b740bdd464ad7409ce2100", - "proxy": true, - "implementation": { - "address": "0xaFFCb96181D920FE8C0Af046C49B2c9eC98b28df", - "creationCodeHash": "0x6c4146427aafa7375a569154be95c8c931bf83aab0315706dd78bdf79c889e4c", - "runtimeCodeHash": "0x004371d1d80011906953dcba17c648503fc94b94e1e0365c8d8c706ff91f93e9", - "txHash": "0xbd7d146ce80831ed7643e9f5b5a84737da354994ae080cb3d7ff7bbc3e696b3d" - } - }, - "GraphCurationToken": { - "address": "0x47a0d56ea574419B524285d52fFe7198297D209c", - "creationCodeHash": "0x1ee42ee271cefe20c33c0de904501e618ac4b56debca67c634d0564cecea9ff2", - "runtimeCodeHash": "0x340e8f378c0117b300f3ec255bc5c3a273f9ab5bd2940fa8eb3b5065b21f86dc", - "txHash": "0x382568f1871a3d57f4d3787b255a2364e9926cb6770fdca3cde6cb04b577ecd5" - }, - "ServiceRegistry": { - "address": "0x072884c745c0A23144753335776c99BE22588f8A", - "initArgs": ["0x0a8491544221dd212964fbb96487467291b2C97e"], - "creationCodeHash": "0xcdd28bb3db05f1267ca0f5ea29536c61841be5937ce711b813924f8ff38918cc", - "runtimeCodeHash": "0x4ca8c37c807bdfda1d6dcf441324b7ea14c6ddec5db37c20c2bf05aeae49bc0d", - "txHash": "0x54b1da3f2fdd2327abe01f75ac38a670ee16d3f47bc58641ddaef04f0b9d0f78", - "proxy": true, - "implementation": { - "address": "0xD32569dA3B89b040A0589B5b8D2c721a68472ff3", - "creationCodeHash": "0x50808e8cce93cf78a23c9e6dd7984356bd2bd93be30b358982909140dd61f6ff", - "runtimeCodeHash": "0xaef79c87f7e80107c0dc568cf1f8950459b5174ee3aa565ec487556a655e71db", - "txHash": "0xca363c6bc841b43bd896b6d2098434679884d200a28013dedb48a2c95028ce40" - } - }, - "Curation": { - "address": "0x22d78fb4bc72e191C765807f8891B5e1785C8014", - "initArgs": [ - "0x0a8491544221dd212964fbb96487467291b2C97e", - "0xA489FDc65229D6225014C0b357BCD19af6f00eE9", - "0x47a0d56ea574419B524285d52fFe7198297D209c", - "1000000", - "10000", - "1000000000000000000" - ], - "creationCodeHash": "0xcdd28bb3db05f1267ca0f5ea29536c61841be5937ce711b813924f8ff38918cc", - "runtimeCodeHash": "0x4ca8c37c807bdfda1d6dcf441324b7ea14c6ddec5db37c20c2bf05aeae49bc0d", - "txHash": "0x8f856e2090af3243349199f7991e01b1c28de7b70b0185d2370d8ada5ce9c97b", - "proxy": true, - "implementation": { - "address": "0x234071F4B1e322d1167D63503498f82cC7Fa4606", - "creationCodeHash": "0xa5fa77df71a72c5aadba812345978c291c5fa1a3a23129b6eba3a38ac85d8b5d", - "runtimeCodeHash": "0x1d265e9f658778b48a0247cfef79bfc9304d1faa1f1e085f2fea85629f68e2d5", - "txHash": "0x68d06c576b5bc472152f4ab4afa90e2c5832512fa83c69493be5da52b585f45c" - } - }, - "SubgraphNFTDescriptor": { - "address": "0x96cce9b6489744630A058324FB22e7CD02815ebe", - "creationCodeHash": "0xf16e8ff11d852eea165195ac9e0dfa00f98e48f6ce3c77c469c7df9bf195b651", - "runtimeCodeHash": "0x39583196f2bcb85789b6e64692d8c0aa56f001c46f0ca3d371abbba2c695860f", - "txHash": "0xbb01566726e1d58825cf7aa2860f0f571ff47f92b3837aad0e73e7791fbca48c" - }, - "SubgraphNFT": { - "address": "0x3FbD54f0cc17b7aE649008dEEA12ed7D2622B23f", - "constructorArgs": ["0x4528FD7868c91Ef64B9907450Ee8d82dC639612c"], - "creationCodeHash": "0xc1e58864302084de282dffe54c160e20dd96c6cfff45e00e6ebfc15e04136982", - "runtimeCodeHash": "0x7216e736a8a8754e88688fbf5c0c7e9caf35c55ecc3a0c5a597b951c56cf7458", - "txHash": "0x4334bd64938c1c5c604bde96467a8601875046569f738e6860851594c91681ff" - }, - "GNS": { - "address": "0xec9A7fb6CbC2E41926127929c2dcE6e9c5D33Bec", - "initArgs": [ - "0x0a8491544221dd212964fbb96487467291b2C97e", - "0xA489FDc65229D6225014C0b357BCD19af6f00eE9", - "0x3FbD54f0cc17b7aE649008dEEA12ed7D2622B23f" - ], - "creationCodeHash": "0xcdd28bb3db05f1267ca0f5ea29536c61841be5937ce711b813924f8ff38918cc", - "runtimeCodeHash": "0x4ca8c37c807bdfda1d6dcf441324b7ea14c6ddec5db37c20c2bf05aeae49bc0d", - "txHash": "0xf7f2747d1aafd1684ffee7316e727475249cd358af559c6234089b72ffc25f5d", - "proxy": true, - "implementation": { - "address": "0x8Cab11d17082C67aFc3dc35D1a2E02B23dB914ab", - "creationCodeHash": "0xb0be24e926bb24420bb5a8d3f7bd0b70a545fdddbf8cb177a42478adf4435aae", - "runtimeCodeHash": "0x4cb62b9def5b691e43ed06808b18efe682fcefb7739909be0d6c87f1eda724cd", - "txHash": "0xb4bf3e0fdf9486ff24c567fecb90875a8d11efa1a1a4dba36f25d529c586852c" - } - }, - "Staking": { - "address": "0x00669A4CF01450B64E8A2A20E9b1FCB71E61eF03", - "initArgs": [ - "0x0a8491544221dd212964fbb96487467291b2C97e", - "100000000000000000000000", - "186092", - "10000", - "100000", - "7", - "28", - "28", - "16", - "77", - "100" - ], - "creationCodeHash": "0xcdd28bb3db05f1267ca0f5ea29536c61841be5937ce711b813924f8ff38918cc", - "runtimeCodeHash": "0x4ca8c37c807bdfda1d6dcf441324b7ea14c6ddec5db37c20c2bf05aeae49bc0d", - "txHash": "0xa33c0d58ddaed7e3f7381a33e3d5f63e39219863019f00d54ce2fd2446076ac7", - "proxy": true, - "implementation": { - "address": "0x2787f89355924a8781Acf988f12855C6CD495A06", - "creationCodeHash": "0xa4e467ac964866579894ebd6afae491e807a16969f9f1faa95707dc25d8d201c", - "runtimeCodeHash": "0x1880c562574956c097d7f6d8f09a91d76e50bf1babd71e369f032d4b0716f0f8", - "txHash": "0x3ed3b4744df380b7f87341839f16c19234a1565f56a8c672183396678f8390c4", - "libraries": { - "LibCobbDouglas": "0x86e80908F80B40B924966B781a8e20338670B431" - } - } - }, - "RewardsManager": { - "address": "0x971B9d3d0Ae3ECa029CAB5eA1fB0F72c85e6a525", - "initArgs": ["0x0a8491544221dd212964fbb96487467291b2C97e"], - "creationCodeHash": "0xcdd28bb3db05f1267ca0f5ea29536c61841be5937ce711b813924f8ff38918cc", - "runtimeCodeHash": "0x4ca8c37c807bdfda1d6dcf441324b7ea14c6ddec5db37c20c2bf05aeae49bc0d", - "txHash": "0x222e14cb6f49e3e7b76f6a523c1a3c24f96402676be8662bf1b94bb2250ddd0f", - "proxy": true, - "implementation": { - "address": "0xA301deAbDADF9DBd01932Aa13739c9620FAA54Fd", - "creationCodeHash": "0x8e942423da51ec1fbce6834a7da57619454a48aa85efcaffb9a51cff7f655b8f", - "runtimeCodeHash": "0xc231e797a4a00f5dae7a26b30c0f689ab57d9f7b47a6b1c41deb73c13f952e3a", - "txHash": "0x91c40dd8898d121a3a8d0f3f0e345f5b76157f4baab0ed9b3eb5c7ef14ab95c0" - } - }, - "DisputeManager": { - "address": "0x0Ab2B043138352413Bb02e67E626a70320E3BD46", - "initArgs": [ - "0x0a8491544221dd212964fbb96487467291b2C97e", - "0x113DC95e796836b8F0Fa71eE7fB42f221740c3B0", - "10000000000000000000000", - "500000", - "25000", - "25000" - ], - "creationCodeHash": "0xcdd28bb3db05f1267ca0f5ea29536c61841be5937ce711b813924f8ff38918cc", - "runtimeCodeHash": "0x4ca8c37c807bdfda1d6dcf441324b7ea14c6ddec5db37c20c2bf05aeae49bc0d", - "txHash": "0x68f08fe0a1179170c8b4c7542725d71432b4171604d7456dff824e0ec1c6cdb9", - "proxy": true, - "implementation": { - "address": "0x0E55B996EB7Bfc3175E883D02FF55a34f8C9986e", - "creationCodeHash": "0x2e77ad7a1627b6e04bece0fe18b3ab543ef4a2d6914f2e5e640b2c8175aca3a8", - "runtimeCodeHash": "0x0186afe711eff4ceea28620d091e3c6034fd15be05894119c74a38b020e3a554", - "txHash": "0xbb04391bd3353d6f2210e98ced779edcda954d971effcae7fd8676a94afa2655" - } - }, - "AllocationExchange": { - "address": "0x993F00C98D1678371a7b261Ed0E0D4b6F42d9aEE", - "constructorArgs": [ - "0x9623063377AD1B27544C965cCd7342f7EA7e88C7", - "0x00669A4CF01450B64E8A2A20E9b1FCB71E61eF03", - "0x270Ea4ea9e8A699f8fE54515E3Bb2c418952623b", - "0x79f2212de27912bCb25a452fC102C85c142E3eE3" - ], - "creationCodeHash": "0x96c5b59557c161d80f1617775a7b9537a89b0ecf2258598b3a37724be91ae80a", - "runtimeCodeHash": "0xc86fd1d67a0db0aed4cb310f977ebf3e70865e2095a167f4a103c3792146027c", - "txHash": "0x2bad6b8e5eda0026c8c38a70b925bbedd6a617a1e06952fb30e427fdbc592422" - }, - "L2GraphTokenGateway": { - "address": "0x65E1a5e8946e7E87d9774f5288f41c30a99fD302", - "initArgs": ["0x0a8491544221dd212964fbb96487467291b2C97e"], - "creationCodeHash": "0xcdd28bb3db05f1267ca0f5ea29536c61841be5937ce711b813924f8ff38918cc", - "runtimeCodeHash": "0x4ca8c37c807bdfda1d6dcf441324b7ea14c6ddec5db37c20c2bf05aeae49bc0d", - "txHash": "0x50816047ea926423ec02b6b89efb96efcd3d7e7028ea7cf82c3da9fd1bf3869e", - "proxy": true, - "implementation": { - "address": "0x6f37b2AF8A0Cc74f1bFddf2E9302Cb226710127F", - "creationCodeHash": "0xbd52455bd8b14bfc27af623388fe2f9e06ddd4c4be3fc06c51558a912de91770", - "runtimeCodeHash": "0x29e47f693053f978d6b2ac0a327319591bf5b5e8a6e6c0744b8afcc0250bf667", - "txHash": "0x0eaa9d03982b88e765262a15b95548cb688ce9337a48460f39e55f8850690cbe" - } - }, - "EthereumDIDRegistry": { - "address": "0xa9AEb1c6f14f4244547B9a0946C485DA99047638", - "creationCodeHash": "0x20cd202f7991716a84c097da5fbd365fd27f7f35f241f82c529ad7aba18b814b", - "runtimeCodeHash": "0x5f396ffd54b6cd6b3faded0f366c5d7e148cc54743926061be2dfd12a75391de", - "txHash": "0xdd23b546fa3b6be0cea2339abe3023a082153693fbc7bf1bc86d190165823b39" - }, - "IEthereumDIDRegistry": { - "address": "0xa9AEb1c6f14f4244547B9a0946C485DA99047638" - } - }, - "421613": { - "GraphProxyAdmin": { - "address": "0x4037466bb242f51575d32E8B1be693b3E5Cd1386", - "creationCodeHash": "0x68b304ac6bce7380d5e0f6b14a122f628bffebcc75f8205cb60f0baf578b79c3", - "runtimeCodeHash": "0x8d9ba87a745cf82ab407ebabe6c1490197084d320efb6c246d94bcc80e804417", - "txHash": "0x9c4d5f8c0ab5a5bc36b0a063ab1ff04372ce7d917c0b200b94544b5da4f0230d" - }, - "BancorFormula": { - "address": "0x71319060b9fdeD6174b6368bE04F9A1b7c9aCe48", - "creationCodeHash": "0x7ae36017eddb326ddd79c7363781366121361d42fdb201cf57c57424eede46f4", - "runtimeCodeHash": "0xed6701e196ad93718e28c2a2a44d110d9e9906085bcfe4faf4f6604b08f0116c", - "txHash": "0x7fe8cabb7a4fe56311591aa8d68d6c82cb0d5c232fc5aaf28bed4d1ece0e42e5" - }, - "Controller": { - "address": "0x7f734E995010Aa8d28b912703093d532C37b6EAb", - "creationCodeHash": "0x798f913fbaa1b2547c917e3dc31679089ab27cba442c511c159803acdba28c15", - "runtimeCodeHash": "0x00ae0824f79c4e48d2d23a8d4e6d075f04f44f3ea30a4f4305c345bb98117c62", - "txHash": "0x6213da3e6367ef47cd6e1fe23e4d83296f16153a64236a5c91f865f2ec84c089" - }, - "EpochManager": { - "address": "0x8ECedc7631f4616D7f4074f9fC9D0368674794BE", - "initArgs": ["0x7f734E995010Aa8d28b912703093d532C37b6EAb", "554"], - "creationCodeHash": "0xcdd28bb3db05f1267ca0f5ea29536c61841be5937ce711b813924f8ff38918cc", - "runtimeCodeHash": "0x4ca8c37c807bdfda1d6dcf441324b7ea14c6ddec5db37c20c2bf05aeae49bc0d", - "txHash": "0x62b0d6b8556be9443397ad1f6030fdc47b1a4a3ebcc63f34cdf4091420aec84b", - "proxy": true, - "implementation": { - "address": "0xAaB195Ed1B445A2A0E357494d9036bC746227AE2", - "creationCodeHash": "0x83bc0b08dbe1a9259666ec209f06223863f7bb9cfbf917a2d4b795c771a727fe", - "runtimeCodeHash": "0xed60261c6dc84ebc16830c36f3ee370a92802601d5a2fe1c3c19f5120dcbc2eb", - "txHash": "0xd4f8780490f63432580e3dd5b2b4d9b39e904e8b4ac5cfd23540658cbafe449d" - } - }, - "L2GraphToken": { - "address": "0x18C924BD5E8b83b47EFaDD632b7178E2Fd36073D", - "initArgs": ["0xEfc519BEd6a43a14f1BBBbA9e796C4931f7A5540"], - "creationCodeHash": "0xcdd28bb3db05f1267ca0f5ea29536c61841be5937ce711b813924f8ff38918cc", - "runtimeCodeHash": "0x4ca8c37c807bdfda1d6dcf441324b7ea14c6ddec5db37c20c2bf05aeae49bc0d", - "txHash": "0x7ec14b524141af953959b537c1acbea9b49b12ee906563a6172123b09ab3d1f6", - "proxy": true, - "implementation": { - "address": "0x5dcAcF820D7b9F0640e8a23a5a857675A774C34a", - "creationCodeHash": "0x6c4146427aafa7375a569154be95c8c931bf83aab0315706dd78bdf79c889e4c", - "runtimeCodeHash": "0x004371d1d80011906953dcba17c648503fc94b94e1e0365c8d8c706ff91f93e9", - "txHash": "0xb748498a2ebc90e20dc8da981be832f4e00f08ea9ff289880738705e45d6aeca" - } - }, - "GraphCurationToken": { - "address": "0x2B757ad83e4ed51ecaE8D4dC9AdE8E3Fa29F7BdC", - "creationCodeHash": "0x1ee42ee271cefe20c33c0de904501e618ac4b56debca67c634d0564cecea9ff2", - "runtimeCodeHash": "0x340e8f378c0117b300f3ec255bc5c3a273f9ab5bd2940fa8eb3b5065b21f86dc", - "txHash": "0x1aa753cd01fa4505c71f6866dae35faee723d181141ed91b6e5cf3082ee90f9b" - }, - "ServiceRegistry": { - "address": "0x07ECDD4278D83Cd2425cA86256634f666b659e53", - "initArgs": ["0x7f734E995010Aa8d28b912703093d532C37b6EAb"], - "creationCodeHash": "0xcdd28bb3db05f1267ca0f5ea29536c61841be5937ce711b813924f8ff38918cc", - "runtimeCodeHash": "0x4ca8c37c807bdfda1d6dcf441324b7ea14c6ddec5db37c20c2bf05aeae49bc0d", - "txHash": "0x8a13420fdc91139297ab1497fbf5b443c156bbc7b9d2a1ac97fb9f23abde2723", - "proxy": true, - "implementation": { - "address": "0xd18D4B4e84eA4713E04060c93bD079A974BE6C4a", - "creationCodeHash": "0x50808e8cce93cf78a23c9e6dd7984356bd2bd93be30b358982909140dd61f6ff", - "runtimeCodeHash": "0xaef79c87f7e80107c0dc568cf1f8950459b5174ee3aa565ec487556a655e71db", - "txHash": "0x2d6043d89a5f5c4f3d0df0f50264ab7efebc898be0b5d358a00715ba9f657a89" - } - }, - "Curation": { - "address": "0x7080AAcC4ADF4b1E72615D6eb24CDdE40a04f6Ca", - "initArgs": [ - "0x7f734E995010Aa8d28b912703093d532C37b6EAb", - "0x71319060b9fdeD6174b6368bE04F9A1b7c9aCe48", - "0x2B757ad83e4ed51ecaE8D4dC9AdE8E3Fa29F7BdC", - "1000000", - "10000", - "1000000000000000000" - ], - "creationCodeHash": "0xcdd28bb3db05f1267ca0f5ea29536c61841be5937ce711b813924f8ff38918cc", - "runtimeCodeHash": "0x4ca8c37c807bdfda1d6dcf441324b7ea14c6ddec5db37c20c2bf05aeae49bc0d", - "txHash": "0x2e5744fa4eca56cf6902e27fcc0509487f39bdb0d29b9eb0181db986235289a0", - "proxy": true, - "implementation": { - "address": "0xDA6c9d39b49c3d41CaC2030c6B75b40Efea09817", - "creationCodeHash": "0xa5fa77df71a72c5aadba812345978c291c5fa1a3a23129b6eba3a38ac85d8b5d", - "runtimeCodeHash": "0x1d265e9f658778b48a0247cfef79bfc9304d1faa1f1e085f2fea85629f68e2d5", - "txHash": "0x815eda87a2599d6f2c7458c7b164e7307d05018f0dd72073a50971d424313377" - } - }, - "SubgraphNFTDescriptor": { - "address": "0x30545f313bD2eb0F85E4f808Ae4D2C016efE78b2", - "creationCodeHash": "0xf16e8ff11d852eea165195ac9e0dfa00f98e48f6ce3c77c469c7df9bf195b651", - "runtimeCodeHash": "0x39583196f2bcb85789b6e64692d8c0aa56f001c46f0ca3d371abbba2c695860f", - "txHash": "0x060839a09e89cbd47adbb8c04cc76b21a00785600a4e8b44939dd928391777e1" - }, - "SubgraphNFT": { - "address": "0x5571D8FE183AD1367dF21eE9968690f0Eabdc593", - "constructorArgs": ["0xEfc519BEd6a43a14f1BBBbA9e796C4931f7A5540"], - "creationCodeHash": "0xc1e58864302084de282dffe54c160e20dd96c6cfff45e00e6ebfc15e04136982", - "runtimeCodeHash": "0x7216e736a8a8754e88688fbf5c0c7e9caf35c55ecc3a0c5a597b951c56cf7458", - "txHash": "0xc11917ffedda6867648fa2cb62cca1df3c0ed485a0a0885284e93a2c5d33455c" - }, - "GNS": { - "address": "0x6bf9104e054537301cC23A1023Ca30A6Df79eB21", - "initArgs": [ - "0x7f734E995010Aa8d28b912703093d532C37b6EAb", - "0x71319060b9fdeD6174b6368bE04F9A1b7c9aCe48", - "0x5571D8FE183AD1367dF21eE9968690f0Eabdc593" - ], - "creationCodeHash": "0xcdd28bb3db05f1267ca0f5ea29536c61841be5937ce711b813924f8ff38918cc", - "runtimeCodeHash": "0x4ca8c37c807bdfda1d6dcf441324b7ea14c6ddec5db37c20c2bf05aeae49bc0d", - "txHash": "0x3c2509730e06249d970818319bb507185d4fdea13d5600cef87928a718950c19", - "proxy": true, - "implementation": { - "address": "0x7eCb82A9Cf9B370d3fC2Ef66E38F38EDFAeaa125", - "creationCodeHash": "0xb0be24e926bb24420bb5a8d3f7bd0b70a545fdddbf8cb177a42478adf4435aae", - "runtimeCodeHash": "0x4cb62b9def5b691e43ed06808b18efe682fcefb7739909be0d6c87f1eda724cd", - "txHash": "0xf1d41fc99ed716a0c890ea62e13ee108ddcb4ecfc74efb715a4ef05605ce449b" - } - }, - "L2Staking": { - "address": "0xcd549d0C43d915aEB21d3a331dEaB9B7aF186D26", - "initArgs": [ - "0x7f734E995010Aa8d28b912703093d532C37b6EAb", - "100000000000000000000000", - "6646", - "10000", - "100000", - "2", - "4", - "12", - "16", - "77", - "100" - ], - "creationCodeHash": "0xcdd28bb3db05f1267ca0f5ea29536c61841be5937ce711b813924f8ff38918cc", - "runtimeCodeHash": "0x4ca8c37c807bdfda1d6dcf441324b7ea14c6ddec5db37c20c2bf05aeae49bc0d", - "txHash": "0xc98ebdd0a80b97ef8f6305903ef6496a7781db76a5b1b3c3c3b2b10dbd9a7af5", - "proxy": true, - "implementation": { - "address": "0x8E56ee65Ed613f2AecA8898D19497D288601bdeb", - "creationCodeHash": "0x75b63ef816627315c635cae7f95917764e2cb797496280cdeaa9b3230bf7f7bc", - "runtimeCodeHash": "0x461ccf91c7c6188c94c6df430b6954dfd9c5cc2a79a5e4db21422e11b663d319", - "txHash": "0xb9ce53dafab3dcaad25b24d9f998888225103265bd2d84cb1545b4e06e96e3b6", - "libraries": { - "LibCobbDouglas": "0x86f0f6cd9a38A851E3AB8f110be06B77C199eC1F" - } - } - }, - "RewardsManager": { - "address": "0x5F06ABd1CfAcF7AE99530D7Fed60E085f0B15e8D", - "initArgs": ["0x7f734E995010Aa8d28b912703093d532C37b6EAb"], - "creationCodeHash": "0xcdd28bb3db05f1267ca0f5ea29536c61841be5937ce711b813924f8ff38918cc", - "runtimeCodeHash": "0x4ca8c37c807bdfda1d6dcf441324b7ea14c6ddec5db37c20c2bf05aeae49bc0d", - "txHash": "0xd4cfa95475e9e867fb24babd6a00a5b6b01d2267533e2412986aa1ff94d51c02", - "proxy": true, - "implementation": { - "address": "0x17A627c7d6F0A1B4A876Cffea64987B62e91c42D", - "creationCodeHash": "0x8e942423da51ec1fbce6834a7da57619454a48aa85efcaffb9a51cff7f655b8f", - "runtimeCodeHash": "0xc231e797a4a00f5dae7a26b30c0f689ab57d9f7b47a6b1c41deb73c13f952e3a", - "txHash": "0xcf601a6ab653f0028d63529fd261776b3c91df190f2bb4bf32909c95c55049a7" - } - }, - "DisputeManager": { - "address": "0x16DEF7E0108A5467A106dbD7537f8591f470342E", - "initArgs": [ - "0x7f734E995010Aa8d28b912703093d532C37b6EAb", - "0xF89688d5d44d73cc4dE880857A3940487076e5A4", - "10000000000000000000000", - "500000", - "25000", - "25000" - ], - "creationCodeHash": "0xcdd28bb3db05f1267ca0f5ea29536c61841be5937ce711b813924f8ff38918cc", - "runtimeCodeHash": "0x4ca8c37c807bdfda1d6dcf441324b7ea14c6ddec5db37c20c2bf05aeae49bc0d", - "txHash": "0x70188c9243c2226ac793ac8c0a9eecd76c9b44e53f7f6f97fa177a34808421a0", - "proxy": true, - "implementation": { - "address": "0x39aEdA1d6ea3B62b76C7c439beBfFCb5369a175C", - "creationCodeHash": "0x2e77ad7a1627b6e04bece0fe18b3ab543ef4a2d6914f2e5e640b2c8175aca3a8", - "runtimeCodeHash": "0x0186afe711eff4ceea28620d091e3c6034fd15be05894119c74a38b020e3a554", - "txHash": "0x4efbd28e55866c0292309964f47bd805922ad417e5980e14e055ad693024582d" - } - }, - "AllocationExchange": { - "address": "0x61809D6Cde07f27D2fcDCb67a42d0Af1988Be5e8", - "constructorArgs": [ - "0x18C924BD5E8b83b47EFaDD632b7178E2Fd36073D", - "0xcd549d0C43d915aEB21d3a331dEaB9B7aF186D26", - "0x05F359b1319f1Ca9b799CB6386F31421c2c49dBA", - "0xD06f366678AE139a94b2AaC2913608De568F1D03" - ], - "creationCodeHash": "0x96c5b59557c161d80f1617775a7b9537a89b0ecf2258598b3a37724be91ae80a", - "runtimeCodeHash": "0xed3d9cce65ddfa8a237d4d7d294ffdb13a082e0adcda3bbd313029cfae1365f3", - "txHash": "0x1df63329a21dca69d20e03c076dd89c350970d35319eeefab028cebbc78d29dc" - }, - "L2GraphTokenGateway": { - "address": "0xef2757855d2802bA53733901F90C91645973f743", - "initArgs": ["0x7f734E995010Aa8d28b912703093d532C37b6EAb"], - "creationCodeHash": "0xcdd28bb3db05f1267ca0f5ea29536c61841be5937ce711b813924f8ff38918cc", - "runtimeCodeHash": "0x4ca8c37c807bdfda1d6dcf441324b7ea14c6ddec5db37c20c2bf05aeae49bc0d", - "txHash": "0x47bde4e3ad0bc077897a3de65058c4b7dd710aa447ec25942f716321cbdc590d", - "proxy": true, - "implementation": { - "address": "0xc68cd0d2ca533232Fd86D6e48b907338B2E0a74A", - "creationCodeHash": "0xbd52455bd8b14bfc27af623388fe2f9e06ddd4c4be3fc06c51558a912de91770", - "runtimeCodeHash": "0x29e47f693053f978d6b2ac0a327319591bf5b5e8a6e6c0744b8afcc0250bf667", - "txHash": "0xf68a5e1e516ee9a646f19bbe4d58336fdfcf5fc859f84cdac5e68b00bcd3a09a" - } - }, - "IEthereumDIDRegistry": { - "address": "0x8FFfcD6a85D29E9C33517aaf60b16FE4548f517E" - } - } -} +{} diff --git a/config/graph.arbitrum-goerli-scratch-4.yml b/config/graph.arbitrum-goerli-scratch-4.yml new file mode 100644 index 000000000..49a7a4e89 --- /dev/null +++ b/config/graph.arbitrum-goerli-scratch-4.yml @@ -0,0 +1,153 @@ +general: + arbitrator: &arbitrator "0xed42A803C9f0bAE74bF0E63f36FF0Ae7FF38beF7" # Arbitration Council (TODO: update) + governor: &governor "0xf6De21Ce446B47d7599BC6554Eaa9EDF05Dfe731" # Graph Council (TODO: update) + authority: &authority "0xDfaf2F953899c3Fae4aa4979727fd9F441E006b2" # Authority that signs payment vouchers + availabilityOracle: &availabilityOracle "0xC241E5A6e35432bf340B9853F025D90031a9E8ef" # Subgraph Availability Oracle (TODO: update) + pauseGuardian: &pauseGuardian "0x5753d3c0c08C2Cee7be69eBbd058299faB0ea966" # Protocol pause guardian (TODO: update) + allocationExchangeOwner: &allocationExchangeOwner "0x91cEc32a6975265cF96A43f4209F39274cBEc088" # Allocation Exchange owner (TODO: update) + +contracts: + Controller: + calls: + - fn: "setContractProxy" + id: "0xe6876326c1291dfcbbd3864a6816d698cd591defc7aa2153d7f9c4c04016c89f" # keccak256('Curation') + contractAddress: "${{L2Curation.address}}" + - fn: "setContractProxy" + id: "0x39605a6c26a173774ca666c67ef70cf491880e5d3d6d0ca66ec0a31034f15ea3" # keccak256('GNS') + contractAddress: "${{L2GNS.address}}" + - fn: "setContractProxy" + id: "0xf942813d07d17b56de9a9afc8de0ced6e8c053bbfdcc87b7badea4ddcf27c307" # keccak256('DisputeManager') + contractAddress: "${{DisputeManager.address}}" + - fn: "setContractProxy" + id: "0xc713c3df6d14cdf946460395d09af88993ee2b948b1a808161494e32c5f67063" # keccak256('EpochManager') + contractAddress: "${{EpochManager.address}}" + - fn: "setContractProxy" + id: "0x966f1e8d8d8014e05f6ec4a57138da9be1f7c5a7f802928a18072f7c53180761" # keccak256('RewardsManager') + contractAddress: "${{RewardsManager.address}}" + - fn: "setContractProxy" + id: "0x1df41cd916959d1163dc8f0671a666ea8a3e434c13e40faef527133b5d167034" # keccak256('Staking') + contractAddress: "${{L2Staking.address}}" + - fn: "setContractProxy" + id: "0x45fc200c7e4544e457d3c5709bfe0d520442c30bbcbdaede89e8d4a4bbc19247" # keccak256('GraphToken') + contractAddress: "${{L2GraphToken.address}}" + - fn: "setContractProxy" + id: "0xd362cac9cb75c10d67bcc0b7eeb0b1ef48bb5420b556c092d4fd7f758816fcf0" # keccak256('GraphTokenGateway') + contractAddress: "${{L2GraphTokenGateway.address}}" + - fn: "setPauseGuardian" + pauseGuardian: *pauseGuardian + - fn: "transferOwnership" + owner: *governor + GraphProxyAdmin: + calls: + - fn: "transferOwnership" + owner: *governor + ServiceRegistry: + proxy: true + init: + controller: "${{Controller.address}}" + calls: + - fn: "syncAllContracts" + EpochManager: + proxy: true + init: + controller: "${{Controller.address}}" + lengthInBlocks: 554 # length in hours = lengthInBlocks*13/60/60 (~13 second blocks) + L2GraphToken: + proxy: true + init: + owner: "${{Env.deployer}}" + calls: + - fn: "addMinter" + minter: "${{RewardsManager.address}}" + - fn: "renounceMinter" + - fn: "transferOwnership" + owner: *governor + L2Curation: + proxy: true + init: + controller: "${{Controller.address}}" + curationTokenMaster: "${{GraphCurationToken.address}}" + curationTaxPercentage: 10000 # in parts per million + minimumCurationDeposit: "1000000000000000000" # in wei + calls: + - fn: "syncAllContracts" + DisputeManager: + proxy: true + init: + controller: "${{Controller.address}}" + arbitrator: *arbitrator + minimumDeposit: "10000000000000000000000" # in wei + fishermanRewardPercentage: 500000 # in parts per million + idxSlashingPercentage: 25000 # in parts per million + qrySlashingPercentage: 25000 # in parts per million + calls: + - fn: "syncAllContracts" + L2GNS: + proxy: true + init: + controller: "${{Controller.address}}" + subgraphNFT: "${{SubgraphNFT.address}}" + calls: + - fn: "approveAll" + - fn: "syncAllContracts" + SubgraphNFT: + init: + governor: "${{Env.deployer}}" + calls: + - fn: "setTokenDescriptor" + tokenDescriptor: "${{SubgraphNFTDescriptor.address}}" + - fn: "setMinter" + minter: "${{L2GNS.address}}" + - fn: "transferOwnership" + owner: *governor + L2Staking: + proxy: true + init: + controller: "${{Controller.address}}" + minimumIndexerStake: "100000000000000000000000" # in wei + thawingPeriod: 6646 # in blocks + protocolPercentage: 10000 # in parts per million + curationPercentage: 100000 # in parts per million + channelDisputeEpochs: 2 # in epochs + maxAllocationEpochs: 4 # in epochs + delegationUnbondingPeriod: 12 # in epochs + delegationRatio: 16 # delegated stake to indexer stake multiplier + rebateAlphaNumerator: 77 # rebateAlphaNumerator / rebateAlphaDenominator + rebateAlphaDenominator: 100 # rebateAlphaNumerator / rebateAlphaDenominator + extensionImpl: "${{StakingExtension.address}}" + calls: + - fn: "setDelegationTaxPercentage" + delegationTaxPercentage: 5000 # parts per million + - fn: "setSlasher" + slasher: "${{DisputeManager.address}}" + allowed: true + - fn: "setAssetHolder" + assetHolder: "${{AllocationExchange.address}}" + allowed: true + - fn: "syncAllContracts" + RewardsManager: + proxy: true + init: + controller: "${{Controller.address}}" + calls: + - fn: "setIssuancePerBlock" + issuancePerBlock: "6036500000000000000" # per block increase of total supply, blocks in a year = 365*60*60*24/12 + - fn: "setSubgraphAvailabilityOracle" + subgraphAvailabilityOracle: *availabilityOracle + - fn: "syncAllContracts" + AllocationExchange: + init: + graphToken: "${{L2GraphToken.address}}" + staking: "${{L2Staking.address}}" + governor: *allocationExchangeOwner + authority: *authority + calls: + - fn: "approveAll" + L2GraphTokenGateway: + proxy: true + init: + controller: "${{Controller.address}}" + calls: + - fn: "syncAllContracts" + - fn: "setPauseGuardian" + pauseGuardian: *pauseGuardian diff --git a/config/graph.goerli-scratch-4.yml b/config/graph.goerli-scratch-4.yml new file mode 100644 index 000000000..28ecf0dee --- /dev/null +++ b/config/graph.goerli-scratch-4.yml @@ -0,0 +1,162 @@ +general: + arbitrator: &arbitrator "0x54d1a1020C5bc929A603DC2161BF6C71ae05553E" # Arbitration Council + governor: &governor "0x68C18C161C46D2E6097980e0D89aB35f28c365E2" # Graph Council + authority: &authority "0x142eb17fCd30Bc31Dfd69312c0f4E5E329Cc5a3C" # Authority that signs payment vouchers + availabilityOracle: &availabilityOracle "0xBB3Fbf50896943Cd05550b7d59cB6905c54053df" # Subgraph Availability Oracle + pauseGuardian: &pauseGuardian "0xF7470147bF547108c58197DbcBfD58D931e7908f" # Protocol pause guardian + allocationExchangeOwner: &allocationExchangeOwner "0xcfF86De5ccc3f27574C63E1CaBD97CdD840Ee798" # Allocation Exchange owner + +contracts: + Controller: + calls: + - fn: "setContractProxy" + id: "0xe6876326c1291dfcbbd3864a6816d698cd591defc7aa2153d7f9c4c04016c89f" # keccak256('Curation') + contractAddress: "${{Curation.address}}" + - fn: "setContractProxy" + id: "0x39605a6c26a173774ca666c67ef70cf491880e5d3d6d0ca66ec0a31034f15ea3" # keccak256('GNS') + contractAddress: "${{L1GNS.address}}" + - fn: "setContractProxy" + id: "0xf942813d07d17b56de9a9afc8de0ced6e8c053bbfdcc87b7badea4ddcf27c307" # keccak256('DisputeManager') + contractAddress: "${{DisputeManager.address}}" + - fn: "setContractProxy" + id: "0xc713c3df6d14cdf946460395d09af88993ee2b948b1a808161494e32c5f67063" # keccak256('EpochManager') + contractAddress: "${{EpochManager.address}}" + - fn: "setContractProxy" + id: "0x966f1e8d8d8014e05f6ec4a57138da9be1f7c5a7f802928a18072f7c53180761" # keccak256('RewardsManager') + contractAddress: "${{RewardsManager.address}}" + - fn: "setContractProxy" + id: "0x1df41cd916959d1163dc8f0671a666ea8a3e434c13e40faef527133b5d167034" # keccak256('Staking') + contractAddress: "${{L1Staking.address}}" + - fn: "setContractProxy" + id: "0x45fc200c7e4544e457d3c5709bfe0d520442c30bbcbdaede89e8d4a4bbc19247" # keccak256('GraphToken') + contractAddress: "${{GraphToken.address}}" + - fn: "setContractProxy" + id: "0xd362cac9cb75c10d67bcc0b7eeb0b1ef48bb5420b556c092d4fd7f758816fcf0" # keccak256('GraphTokenGateway') + contractAddress: "${{L1GraphTokenGateway.address}}" + - fn: "setPauseGuardian" + pauseGuardian: *pauseGuardian + - fn: "transferOwnership" + owner: *governor + GraphProxyAdmin: + calls: + - fn: "transferOwnership" + owner: *governor + ServiceRegistry: + proxy: true + init: + controller: "${{Controller.address}}" + calls: + - fn: "syncAllContracts" + EpochManager: + proxy: true + init: + controller: "${{Controller.address}}" + lengthInBlocks: 554 # length in hours = lengthInBlocks*13/60/60 (~13 second blocks) + GraphToken: + init: + initialSupply: "10000000000000000000000000000" # in wei + calls: + - fn: "addMinter" + minter: "${{RewardsManager.address}}" + - fn: "addMinter" + minter: "${{L1GraphTokenGateway.address}}" + - fn: "renounceMinter" + - fn: "transferOwnership" + owner: *governor + Curation: + proxy: true + init: + controller: "${{Controller.address}}" + bondingCurve: "${{BancorFormula.address}}" + curationTokenMaster: "${{GraphCurationToken.address}}" + reserveRatio: 500000 # in parts per million + curationTaxPercentage: 10000 # in parts per million + minimumCurationDeposit: "1000000000000000000" # in wei + calls: + - fn: "syncAllContracts" + DisputeManager: + proxy: true + init: + controller: "${{Controller.address}}" + arbitrator: *arbitrator + minimumDeposit: "10000000000000000000000" # in wei + fishermanRewardPercentage: 500000 # in parts per million + idxSlashingPercentage: 25000 # in parts per million + qrySlashingPercentage: 25000 # in parts per million + calls: + - fn: "syncAllContracts" + L1GNS: + proxy: true + init: + controller: "${{Controller.address}}" + subgraphNFT: "${{SubgraphNFT.address}}" + calls: + - fn: "approveAll" + - fn: "syncAllContracts" + SubgraphNFT: + init: + governor: "${{Env.deployer}}" + calls: + - fn: "setTokenDescriptor" + tokenDescriptor: "${{SubgraphNFTDescriptor.address}}" + - fn: "setMinter" + minter: "${{L1GNS.address}}" + - fn: "transferOwnership" + owner: *governor + L1Staking: + proxy: true + init: + controller: "${{Controller.address}}" + minimumIndexerStake: "100000000000000000000000" # in wei + thawingPeriod: 6646 # in blocks + protocolPercentage: 10000 # in parts per million + curationPercentage: 100000 # in parts per million + channelDisputeEpochs: 2 # in epochs + maxAllocationEpochs: 4 # in epochs + delegationUnbondingPeriod: 12 # in epochs + delegationRatio: 16 # delegated stake to indexer stake multiplier + rebateAlphaNumerator: 77 # rebateAlphaNumerator / rebateAlphaDenominator + rebateAlphaDenominator: 100 # rebateAlphaNumerator / rebateAlphaDenominator + extensionImpl: "${{StakingExtension.address}}" + calls: + - fn: "setDelegationTaxPercentage" + delegationTaxPercentage: 5000 # parts per million + - fn: "setSlasher" + slasher: "${{DisputeManager.address}}" + allowed: true + - fn: "setAssetHolder" + assetHolder: "${{AllocationExchange.address}}" + allowed: true + - fn: "syncAllContracts" + RewardsManager: + proxy: true + init: + controller: "${{Controller.address}}" + calls: + - fn: "setIssuancePerBlock" + issuancePerBlock: "114693500000000000000" # per block increase of total supply, blocks in a year = 365*60*60*24/12 + - fn: "setSubgraphAvailabilityOracle" + subgraphAvailabilityOracle: *availabilityOracle + - fn: "syncAllContracts" + AllocationExchange: + init: + graphToken: "${{GraphToken.address}}" + staking: "${{L1Staking.address}}" + governor: *allocationExchangeOwner + authority: *authority + calls: + - fn: "approveAll" + L1GraphTokenGateway: + proxy: true + init: + controller: "${{Controller.address}}" + calls: + - fn: "syncAllContracts" + - fn: "setPauseGuardian" + pauseGuardian: *pauseGuardian + BridgeEscrow: + proxy: true + init: + controller: "${{Controller.address}}" + calls: + - fn: "syncAllContracts" From ab63b39eb6165cf30080b073ffa0a1331dc0f2b2 Mon Sep 17 00:00:00 2001 From: Pablo Carranza Velez Date: Fri, 28 Apr 2023 19:39:37 -0300 Subject: [PATCH 111/112] chore: deploy scratch 4 on L1 --- addresses.json | 250 ++++++++++++++++++++++++++++++++++++++++- tasks/verify/verify.ts | 96 ++++++++-------- 2 files changed, 301 insertions(+), 45 deletions(-) diff --git a/addresses.json b/addresses.json index 0967ef424..0ce797cd5 100644 --- a/addresses.json +++ b/addresses.json @@ -1 +1,249 @@ -{} +{ + "1": { + "IENS": { + "address": "0x00000000000C2E074eC69A0dFb2997BA6C7d2e1e" + }, + "IEthereumDIDRegistry": { + "address": "0xdCa7EF03e98e0DC2B855bE647C39ABe984fcF21B" + } + }, + "5": { + "IENS": { + "address": "0x00000000000C2E074eC69A0dFb2997BA6C7d2e1e" + }, + "IEthereumDIDRegistry": { + "address": "0xdCa7EF03e98e0DC2B855bE647C39ABe984fcF21B" + }, + "GraphProxyAdmin": { + "address": "0x536dCf3e39738b949b66f212C597c07C48535c74", + "creationCodeHash": "0x68b304ac6bce7380d5e0f6b14a122f628bffebcc75f8205cb60f0baf578b79c3", + "runtimeCodeHash": "0x8d9ba87a745cf82ab407ebabe6c1490197084d320efb6c246d94bcc80e804417", + "txHash": "0x0b7dd56e3c38c31422ed5f7e76b22b412d2423fa458a2196c356487f8d1e6f59" + }, + "BancorFormula": { + "address": "0x4b481961CdbD387feeb1a56Db3Cbb20ab92f9a5C", + "creationCodeHash": "0x7ae36017eddb326ddd79c7363781366121361d42fdb201cf57c57424eede46f4", + "runtimeCodeHash": "0xed6701e196ad93718e28c2a2a44d110d9e9906085bcfe4faf4f6604b08f0116c", + "txHash": "0xdc33aa1a67df3419411fda6c2a088970a8253cb647d80e3157ac221864252bf4" + }, + "Controller": { + "address": "0x7A6F68E2d4849468db38342fe6b698D7a6acECB9", + "creationCodeHash": "0x5bde9a87bc4e8dd24d41900f0a19321c1dc6d3373d51bba093b130bb5b80a677", + "runtimeCodeHash": "0x7f0479db1d60ecf6295d92ea2359ebdd223640795613558b0594680f5d4922c9", + "txHash": "0xb8fe03ad9a3f89da4a6cd4ba5cb6cf6aee92005810f79a6bfc59a977c74645f9" + }, + "EpochManager": { + "address": "0xfAee9A2B2aD736B283D3fee3C9d374cfdbE3C646", + "initArgs": ["0x7A6F68E2d4849468db38342fe6b698D7a6acECB9", "554"], + "creationCodeHash": "0xcdd28bb3db05f1267ca0f5ea29536c61841be5937ce711b813924f8ff38918cc", + "runtimeCodeHash": "0x4ca8c37c807bdfda1d6dcf441324b7ea14c6ddec5db37c20c2bf05aeae49bc0d", + "txHash": "0x339ce52f4f26def4bed78f36349f46183410b16169649919a9d1c0103f1e6fc0", + "proxy": true, + "implementation": { + "address": "0x1981e99f4Aa0262B0Ee2B4a007082dDC87155dF3", + "creationCodeHash": "0x4f08e1ffb8c54ebbca17d3c7b0e606de209d23fdfa03ec8dd7931dbb58dab936", + "runtimeCodeHash": "0xc1596cfa842862545fbe627530f3dd0e01d73194c80fa243d66246409e85146a", + "txHash": "0x54ff9440f8a691de3a6fdeb0ea3ed7335e8c9a29d4057e43eff5b4cefd10a76d" + } + }, + "GraphToken": { + "address": "0xB16cF29ccf4399C8E75F664D90CC1687Fb38CFf8", + "constructorArgs": ["10000000000000000000000000000"], + "creationCodeHash": "0x9c50586e9e305b3a299f1cdf92ca9bb04fad5f43b5e0f7505054d79783fd8b69", + "runtimeCodeHash": "0xfe612acbb09bdb23fe60014e890054621fd34d74bf12bd94fb73351d474cd641", + "txHash": "0xe35ffed418f042f4beddb882b7a0be82c368f9bc070b757184e328330ba4f9ab" + }, + "GraphCurationToken": { + "address": "0xF2F158a7F1438856614743D8dE3148a21BFdE26C", + "creationCodeHash": "0x1ee42ee271cefe20c33c0de904501e618ac4b56debca67c634d0564cecea9ff2", + "runtimeCodeHash": "0x340e8f378c0117b300f3ec255bc5c3a273f9ab5bd2940fa8eb3b5065b21f86dc", + "txHash": "0x8ad8ab01c267f0ee75bdc3eec7cf20e4cd8d7435f093f0879e89249aedbcf4ca" + }, + "ServiceRegistry": { + "address": "0xD469666BF64b55e44073D5321fd30065E8b03bb1", + "initArgs": ["0x7A6F68E2d4849468db38342fe6b698D7a6acECB9"], + "creationCodeHash": "0xcdd28bb3db05f1267ca0f5ea29536c61841be5937ce711b813924f8ff38918cc", + "runtimeCodeHash": "0x4ca8c37c807bdfda1d6dcf441324b7ea14c6ddec5db37c20c2bf05aeae49bc0d", + "txHash": "0xf78c52a48ddf6d1f3f6f14fc2860de0bb74702bd244f57536738a3cecf85e6f5", + "proxy": true, + "implementation": { + "address": "0xdd32797046e1A6109F6C29a1aeb52216443e502E", + "creationCodeHash": "0x5e0d4c5a141e688791bdb01b2ee0a05bd3b113cb3ba4e40f2bfea44a46e76b65", + "runtimeCodeHash": "0xdb79b8abe9faccf1fa90dc2705355eb5e149613e21b01521bed28c3c53f0828c", + "txHash": "0x5fc1efc4b48640e87d0afa63fa38ae77f125184669562d383089254c8ecc7e3c" + } + }, + "Curation": { + "address": "0x217dB54Cb27d9232EBaeB89bBFd7a58Ae9659Cd0", + "initArgs": [ + "0x7A6F68E2d4849468db38342fe6b698D7a6acECB9", + "0x4b481961CdbD387feeb1a56Db3Cbb20ab92f9a5C", + "0xF2F158a7F1438856614743D8dE3148a21BFdE26C", + "500000", + "10000", + "1000000000000000000" + ], + "creationCodeHash": "0xcdd28bb3db05f1267ca0f5ea29536c61841be5937ce711b813924f8ff38918cc", + "runtimeCodeHash": "0x4ca8c37c807bdfda1d6dcf441324b7ea14c6ddec5db37c20c2bf05aeae49bc0d", + "txHash": "0xd61f8345f5a293d6aa6a4e48926c73fc8287fb448bb2eff89cce7a30caa392f3", + "proxy": true, + "implementation": { + "address": "0x1777d3CE6eb100503302a50a430b22F366796A38", + "creationCodeHash": "0x32ec24cd745738aea9b8cb693d4a20f3cf219226da985ab92d270d3e0b5f7f65", + "runtimeCodeHash": "0xfc0db997a425bc44ee6d6f1e2fd6471227489a22c94f1b358b900134e7ce6308", + "txHash": "0x4cfcef82dd57a6744f03b2eaea9759faace3de1d3c3ac1349bd767428d063e19" + } + }, + "SubgraphNFTDescriptor": { + "address": "0xfa0F0272DB3BD27582cA16b0Aa6f197Ec36cE77a", + "creationCodeHash": "0xf16e8ff11d852eea165195ac9e0dfa00f98e48f6ce3c77c469c7df9bf195b651", + "runtimeCodeHash": "0x39583196f2bcb85789b6e64692d8c0aa56f001c46f0ca3d371abbba2c695860f", + "txHash": "0x61fca92b10805ed8f9806a44108088a9bef100e833fc4aa84a5e8fd5c65bc8e3" + }, + "SubgraphNFT": { + "address": "0x20e7185FC8B217Ec9aF8D3cce401CC5A79692bfb", + "constructorArgs": ["0xBc7f4d3a85B820fDB1058FD93073Eb6bc9AAF59b"], + "creationCodeHash": "0xc1e58864302084de282dffe54c160e20dd96c6cfff45e00e6ebfc15e04136982", + "runtimeCodeHash": "0x7216e736a8a8754e88688fbf5c0c7e9caf35c55ecc3a0c5a597b951c56cf7458", + "txHash": "0x0d650d7ca9e285facfc997ead4d4c1c5f7cb52747e735f979b756307b7f9130b" + }, + "L1GNS": { + "address": "0x2916ca838E5D481631Bf0D8ee941a5c63863b57E", + "initArgs": [ + "0x7A6F68E2d4849468db38342fe6b698D7a6acECB9", + "0x20e7185FC8B217Ec9aF8D3cce401CC5A79692bfb" + ], + "creationCodeHash": "0xcdd28bb3db05f1267ca0f5ea29536c61841be5937ce711b813924f8ff38918cc", + "runtimeCodeHash": "0x4ca8c37c807bdfda1d6dcf441324b7ea14c6ddec5db37c20c2bf05aeae49bc0d", + "txHash": "0x5d69e691976763c85f5484563ec293655710019a362e182a0559b13b3b30d434", + "proxy": true, + "implementation": { + "address": "0x5e02b83e78a03B21F0F5FdAC28878D33b516d89d", + "creationCodeHash": "0x6c15ab1d0c9539b253429ff85779f2efea92d0a047d3e49b0541aa9ffc78f941", + "runtimeCodeHash": "0xb503cfbe1830cdcd6b953a08ea608f6624019fe00cae2df9375222781649539c", + "txHash": "0x44c74e54296222daecaf18adc728d868abe5f77021c66635d47a88ffbb314491" + } + }, + "StakingExtension": { + "address": "0xAe0bAd4E8Cc62fBe245CdBD3D2270Dd2A6a9B447", + "creationCodeHash": "0x105ab964d46b3e9208aae1d0e5f40d7bd1450d979b9b5f56966fcd7fe3b44803", + "runtimeCodeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470", + "txHash": "0x10ed8ff1e110a3c78cb4713d6bcc988d51767637ebb9d9b457bfa26bd3d767dd" + }, + "L1Staking": { + "address": "0xD6E8A5015a47E6eB04Bb9b483498d8bA1646daA5", + "initArgs": [ + "0x7A6F68E2d4849468db38342fe6b698D7a6acECB9", + "100000000000000000000000", + "6646", + "10000", + "100000", + "2", + "4", + "12", + "16", + "77", + "100", + "0xAe0bAd4E8Cc62fBe245CdBD3D2270Dd2A6a9B447" + ], + "creationCodeHash": "0xcdd28bb3db05f1267ca0f5ea29536c61841be5937ce711b813924f8ff38918cc", + "runtimeCodeHash": "0x4ca8c37c807bdfda1d6dcf441324b7ea14c6ddec5db37c20c2bf05aeae49bc0d", + "txHash": "0xf7c3527ffc2a9ff09206e438ca5d08059ccdd3137c11161e2aa8cf81d8b0a38e", + "proxy": true, + "implementation": { + "address": "0xcff4AfBbb6B32A58305bc49F18c45cCe8F4f2B43", + "creationCodeHash": "0x466056027ce0d3c598846b673c6e685b0088ed1feaf48bfdca73e28357272885", + "runtimeCodeHash": "0x23380050b9ea0a1c4bb1d99f1bb18c7ad30ae9b27ba68120bf024880b26c552f", + "txHash": "0x1aef9e2dc0e530ab15a69137698ae465798fadd6a347bdf9a5b521278579afe0", + "libraries": { + "LibCobbDouglas": "0x96d79b4A390832C267FDD99332C1515547DA0FBc" + } + } + }, + "RewardsManager": { + "address": "0xcB1eD98451BCfFc76F013EA11ec9e0b118E76c2d", + "initArgs": ["0x7A6F68E2d4849468db38342fe6b698D7a6acECB9"], + "creationCodeHash": "0xcdd28bb3db05f1267ca0f5ea29536c61841be5937ce711b813924f8ff38918cc", + "runtimeCodeHash": "0x4ca8c37c807bdfda1d6dcf441324b7ea14c6ddec5db37c20c2bf05aeae49bc0d", + "txHash": "0x8051b46e5d78e170139acd383e88170b7314b7b46fe284f442f302355266c48f", + "proxy": true, + "implementation": { + "address": "0x3C440DBd2552F51C0A62cc3B441a64616333B6aD", + "creationCodeHash": "0xac11d3f6f8c15eeeeaac99d1437af77c66dd3922a6b1cc157a872940ba295e69", + "runtimeCodeHash": "0x6fd1d258635a516a5711dc682e64f9a4bc10293e7fa66d5bd392eb826f8716e8", + "txHash": "0x2dd1c920d665e162d1dd640b279d1990a4a18ddb9348aea7b703e4af849514f8" + } + }, + "DisputeManager": { + "address": "0x8359739fb5061e721cDfCcAD1E377cd3b39bF76D", + "initArgs": [ + "0x7A6F68E2d4849468db38342fe6b698D7a6acECB9", + "0x54d1a1020C5bc929A603DC2161BF6C71ae05553E", + "10000000000000000000000", + "500000", + "25000", + "25000" + ], + "creationCodeHash": "0xcdd28bb3db05f1267ca0f5ea29536c61841be5937ce711b813924f8ff38918cc", + "runtimeCodeHash": "0x4ca8c37c807bdfda1d6dcf441324b7ea14c6ddec5db37c20c2bf05aeae49bc0d", + "txHash": "0x499ed630dfa688defd616f53e3da519aa3cfe7d17c4720b2a765253f941ac5a6", + "proxy": true, + "implementation": { + "address": "0xe0c2CCe14eC2B05a27e029f45Edf8e43A1C1a2e6", + "creationCodeHash": "0x14d74392339e928eb85811679a69f535726bf4107f1d37c211b8fe6435503ec1", + "runtimeCodeHash": "0xd32c88960f4b7f126b934f9e34c8a0c286d588ba20c1f6c7b112865fa63c4f51", + "txHash": "0x5abbd1af2035ab42e9494f6baa0e06d22e29b576198a92cbc1a578cc748e6a29" + } + }, + "AllocationExchange": { + "address": "0xABb1BecEa4abA20A99bf999D715fB29bdfD98Cd6", + "constructorArgs": [ + "0xB16cF29ccf4399C8E75F664D90CC1687Fb38CFf8", + "0xD6E8A5015a47E6eB04Bb9b483498d8bA1646daA5", + "0xcfF86De5ccc3f27574C63E1CaBD97CdD840Ee798", + "0x142eb17fCd30Bc31Dfd69312c0f4E5E329Cc5a3C" + ], + "creationCodeHash": "0xd90c042ddc86f0894af49cdd7b27e7698707557ad1b343ea6da7a495f696bd9a", + "runtimeCodeHash": "0x2a67f7372f0af34a6af5099f325f5986351f12e3a7ccf65cf4eecaf5c1b1eec7", + "txHash": "0x9db5ca07c2f75b44505c89bd24315219be126c8e0eb1e1f1f55f0697a81f9a7b" + }, + "L1GraphTokenGateway": { + "address": "0x5e9006BADEf31EDACF3717F8BF1D137672811739", + "initArgs": ["0x7A6F68E2d4849468db38342fe6b698D7a6acECB9"], + "creationCodeHash": "0xcdd28bb3db05f1267ca0f5ea29536c61841be5937ce711b813924f8ff38918cc", + "runtimeCodeHash": "0x4ca8c37c807bdfda1d6dcf441324b7ea14c6ddec5db37c20c2bf05aeae49bc0d", + "txHash": "0x5ccff931c7f5a3e238048dc36620fdd9be146d5e3170d96ca64c2efe885db6dd", + "proxy": true, + "implementation": { + "address": "0x008AB5fdDedEfeE5C15f87FF795Bf78096765C3B", + "creationCodeHash": "0xdc62caff704ca3af7c962403e6d4b2ec79f468a0c1c2558ba7bfdacf98a50538", + "runtimeCodeHash": "0x376bf689c2e201e557aa9addefb23c00423bea75c3e54a05239eef60a7e71e69", + "txHash": "0x82b8669a35c9bc8f9b7f66228845e6dcbb397e0384b9e949a2bdbba9cec53e81" + } + }, + "BridgeEscrow": { + "address": "0xA324D97820E8349d4e403828Aa4F31c96253d29C", + "initArgs": ["0x7A6F68E2d4849468db38342fe6b698D7a6acECB9"], + "creationCodeHash": "0xcdd28bb3db05f1267ca0f5ea29536c61841be5937ce711b813924f8ff38918cc", + "runtimeCodeHash": "0x4ca8c37c807bdfda1d6dcf441324b7ea14c6ddec5db37c20c2bf05aeae49bc0d", + "txHash": "0x016fd735ed829936fd570294249f7b31a9e7ae22a62bcf9825a767a5c77156c2", + "proxy": true, + "implementation": { + "address": "0x8d57C294C8aF10De48D042a97fB341dDcE616564", + "creationCodeHash": "0x0805aa5118c6c0ff32979fff0293f15117a5585164ac19e9d087f128f2ece376", + "runtimeCodeHash": "0x729d39ccb5ef0c4fafe515183be7c9327223ab9c1211862efba2aac1ec4ad3ba", + "txHash": "0xabe01da2a34082a03068efdd1d53e58325e13bedc55beb066d07c12cadcaa8cd" + } + } + }, + "42161": { + "IEthereumDIDRegistry": { + "address": "0xa9AEb1c6f14f4244547B9a0946C485DA99047638" + } + }, + "421613": { + "IEthereumDIDRegistry": { + "address": "0x8FFfcD6a85D29E9C33517aaf60b16FE4548f517E" + } + } +} diff --git a/tasks/verify/verify.ts b/tasks/verify/verify.ts index 2fa7326df..05bbf7b09 100644 --- a/tasks/verify/verify.ts +++ b/tasks/verify/verify.ts @@ -10,6 +10,7 @@ import { Wallet } from 'ethers' import fs from 'fs' import path from 'path' import { HardhatRuntimeEnvironment } from 'hardhat/types/runtime' +import { readConfig } from '../../cli/config' task('sourcify', 'Verifies contract on sourcify') .addPositionalParam('address', 'Address of the smart contract to verify', undefined, types.string) @@ -48,26 +49,29 @@ task('sourcifyAll', 'Verifies all contracts on sourcify') for (const contractName of addressBook.listEntries()) { console.log(`\n> Verifying contract ${contractName}...`) - - const contractPath = getContractPath(contractName) - if (contractPath) { - const contract = addressBook.getEntry(contractName) - if (contract.implementation) { - console.log('Contract is upgradeable, verifying proxy...') - + try { + const contractPath = getContractPath(contractName) + if (contractPath) { + const contract = addressBook.getEntry(contractName) + if (contract.implementation) { + console.log('Contract is upgradeable, verifying proxy...') + + await hre.run('sourcify', { + address: contract.address, + contract: 'contracts/upgrades/GraphProxy.sol:GraphProxy', + }) + } + + // Verify implementation await hre.run('sourcify', { - address: contract.address, - contract: 'contracts/upgrades/GraphProxy.sol:GraphProxy', + address: contract.implementation?.address ?? contract.address, + contract: `${contractPath}:${contractName}`, }) + } else { + console.log(`Contract ${contractName} not found.`) } - - // Verify implementation - await hre.run('sourcify', { - address: contract.implementation?.address ?? contract.address, - contract: `${contractPath}:${contractName}`, - }) - } else { - console.log(`Contract ${contractName} not found.`) + } catch (err) { + console.log(err) } } }) @@ -84,45 +88,49 @@ task('verifyAll', 'Verifies all contracts on etherscan') } console.log(`> Verifying all contracts on chain ${chainName}[${chainId}]...`) - const { addressBook, graphConfig } = hre.graph({ + let { addressBook, graphConfig } = hre.graph({ addressBook: args.addressBook, graphConfig: args.graphConfig, }) + graphConfig = readConfig(args.graphConfig) const accounts = await hre.ethers.getSigners() const env = await loadEnv(args, accounts[0] as unknown as Wallet) for (const contractName of addressBook.listEntries()) { console.log(`\n> Verifying contract ${contractName}...`) - - const contractConfig = getContractConfig(graphConfig, addressBook, contractName, env) - const contractPath = getContractPath(contractName) - const constructorParams = contractConfig.params.map((p) => p.value.toString()) - - if (contractPath) { - const contract = addressBook.getEntry(contractName) - - if (contract.implementation) { - console.log('Contract is upgradeable, verifying proxy...') - const proxyAdmin = addressBook.getEntry('GraphProxyAdmin') - - // Verify proxy + try { + const contractConfig = getContractConfig(graphConfig, addressBook, contractName, env) + const contractPath = getContractPath(contractName) + const constructorParams = contractConfig.params.map((p) => p.value.toString()) + + if (contractPath) { + const contract = addressBook.getEntry(contractName) + + if (contract.implementation) { + console.log('Contract is upgradeable, verifying proxy...') + const proxyAdmin = addressBook.getEntry('GraphProxyAdmin') + + // Verify proxy + await safeVerify(hre, { + address: contract.address, + contract: 'contracts/upgrades/GraphProxy.sol:GraphProxy', + constructorArgsParams: [contract.implementation.address, proxyAdmin.address], + }) + } + + // Verify implementation + console.log('Verifying implementation...') await safeVerify(hre, { - address: contract.address, - contract: 'contracts/upgrades/GraphProxy.sol:GraphProxy', - constructorArgsParams: [contract.implementation.address, proxyAdmin.address], + address: contract.implementation?.address ?? contract.address, + contract: `${contractPath}:${contractName}`, + constructorArgsParams: contract.implementation ? [] : constructorParams, }) + } else { + console.log(`Contract ${contractName} not found.`) } - - // Verify implementation - console.log('Verifying implementation...') - await safeVerify(hre, { - address: contract.implementation?.address ?? contract.address, - contract: `${contractPath}:${contractName}`, - constructorArgsParams: contract.implementation ? [] : constructorParams, - }) - } else { - console.log(`Contract ${contractName} not found.`) + } catch (err) { + console.log(err) } } }) From e8bc541f90b3743270b06b059fb360adea0750d7 Mon Sep 17 00:00:00 2001 From: Pablo Carranza Velez Date: Fri, 28 Apr 2023 20:21:08 -0300 Subject: [PATCH 112/112] chore: deploy scratch 4 on L2 --- addresses.json | 206 +++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 206 insertions(+) diff --git a/addresses.json b/addresses.json index 0ce797cd5..f28f195d3 100644 --- a/addresses.json +++ b/addresses.json @@ -244,6 +244,212 @@ "421613": { "IEthereumDIDRegistry": { "address": "0x8FFfcD6a85D29E9C33517aaf60b16FE4548f517E" + }, + "GraphProxyAdmin": { + "address": "0x932e0Fb105ee1E8e34B88b772072DF52AE25581A", + "creationCodeHash": "0x68b304ac6bce7380d5e0f6b14a122f628bffebcc75f8205cb60f0baf578b79c3", + "runtimeCodeHash": "0x8d9ba87a745cf82ab407ebabe6c1490197084d320efb6c246d94bcc80e804417", + "txHash": "0x3ac217ccebae0d16afcd763cf7e60c1c01ba036d624ee6ca64dcd7459948a87b" + }, + "Controller": { + "address": "0x532bb32D6342362eA3F50E00fFC22D698D2E80Ad", + "creationCodeHash": "0x5bde9a87bc4e8dd24d41900f0a19321c1dc6d3373d51bba093b130bb5b80a677", + "runtimeCodeHash": "0x7f0479db1d60ecf6295d92ea2359ebdd223640795613558b0594680f5d4922c9", + "txHash": "0xa631dcd6ba785553402b7b4215ad33d770a0705410756b2828a33a6f9a60c1af" + }, + "EpochManager": { + "address": "0x3D37efF83D521AF2E583eEfa9A23DedA5dC060A7", + "initArgs": ["0x532bb32D6342362eA3F50E00fFC22D698D2E80Ad", "554"], + "creationCodeHash": "0xcdd28bb3db05f1267ca0f5ea29536c61841be5937ce711b813924f8ff38918cc", + "runtimeCodeHash": "0x4ca8c37c807bdfda1d6dcf441324b7ea14c6ddec5db37c20c2bf05aeae49bc0d", + "txHash": "0x9dc88cec2a2eb89a51f686cefd3866b956895ee5569f58b695b496fc3f0aa6d1", + "proxy": true, + "implementation": { + "address": "0x570F5E36da7afed72FD9Ae253B82EDdC4ea33b60", + "creationCodeHash": "0x4f08e1ffb8c54ebbca17d3c7b0e606de209d23fdfa03ec8dd7931dbb58dab936", + "runtimeCodeHash": "0xc1596cfa842862545fbe627530f3dd0e01d73194c80fa243d66246409e85146a", + "txHash": "0x429ae06e5be75c1a8984710860f9866e185a3196d72bec2d9197ff049af9e780" + } + }, + "L2GraphToken": { + "address": "0x429b80751fB005F36d466c6DcDFd1ea30303679d", + "initArgs": ["0x48Ed1128A24fe9053E3F0C8358eC43D86A18c121"], + "creationCodeHash": "0xcdd28bb3db05f1267ca0f5ea29536c61841be5937ce711b813924f8ff38918cc", + "runtimeCodeHash": "0x4ca8c37c807bdfda1d6dcf441324b7ea14c6ddec5db37c20c2bf05aeae49bc0d", + "txHash": "0xf5446ad80bc3932618185714a1bc21acce67cf6f84298565476d974a1ec5abce", + "proxy": true, + "implementation": { + "address": "0xb24f40e15aE4cdd240a08687e860534aED274CF6", + "creationCodeHash": "0x6c4146427aafa7375a569154be95c8c931bf83aab0315706dd78bdf79c889e4c", + "runtimeCodeHash": "0x004371d1d80011906953dcba17c648503fc94b94e1e0365c8d8c706ff91f93e9", + "txHash": "0x341c034ef4f57483cf95141cdbe91229bba3582360c461cd39365bd22d6ba0f6" + } + }, + "GraphCurationToken": { + "address": "0x6abb5702bF4559b08D5bf43907e832f632dad3Ee", + "creationCodeHash": "0x1ee42ee271cefe20c33c0de904501e618ac4b56debca67c634d0564cecea9ff2", + "runtimeCodeHash": "0x340e8f378c0117b300f3ec255bc5c3a273f9ab5bd2940fa8eb3b5065b21f86dc", + "txHash": "0xf8ab72d269de0bb6914ddcf779e641b05b0704840671319e36008616f999aa72" + }, + "ServiceRegistry": { + "address": "0x2db3d2a63118C77159C31D19Ebc9A0F5011C1aB9", + "initArgs": ["0x532bb32D6342362eA3F50E00fFC22D698D2E80Ad"], + "creationCodeHash": "0xcdd28bb3db05f1267ca0f5ea29536c61841be5937ce711b813924f8ff38918cc", + "runtimeCodeHash": "0x4ca8c37c807bdfda1d6dcf441324b7ea14c6ddec5db37c20c2bf05aeae49bc0d", + "txHash": "0xd2e8f5b1c2946e79fdc0ef8b1a13a85582b2dc50702f6a84e78151ac1a9e8ebf", + "proxy": true, + "implementation": { + "address": "0xAEbBA868Bfbf868E8Fd90f57F1B955d9EbfFE57a", + "creationCodeHash": "0x5e0d4c5a141e688791bdb01b2ee0a05bd3b113cb3ba4e40f2bfea44a46e76b65", + "runtimeCodeHash": "0xdb79b8abe9faccf1fa90dc2705355eb5e149613e21b01521bed28c3c53f0828c", + "txHash": "0xfe050af8a60c9ff4a57497e36183e2d4bfa8a88d32ca4df7a83c7e84f095453b" + } + }, + "L2Curation": { + "address": "0xfEB32cb617CCC935f1F09809ba8b2D571eBc204E", + "initArgs": [ + "0x532bb32D6342362eA3F50E00fFC22D698D2E80Ad", + "0x6abb5702bF4559b08D5bf43907e832f632dad3Ee", + "10000", + "1000000000000000000" + ], + "creationCodeHash": "0xcdd28bb3db05f1267ca0f5ea29536c61841be5937ce711b813924f8ff38918cc", + "runtimeCodeHash": "0x4ca8c37c807bdfda1d6dcf441324b7ea14c6ddec5db37c20c2bf05aeae49bc0d", + "txHash": "0xa10747178d07e1ed9be1ec07addea74dbebaaba2721b6f06c79bdd949e204e6f", + "proxy": true, + "implementation": { + "address": "0x23FEcCe9B1804c036858a034c26CC241de8F602D", + "creationCodeHash": "0xfd9021fcb5d1e171ae3bae3f7c71453daa4f6348c0171aaf3dbb23e45bb890e4", + "runtimeCodeHash": "0xf30e9225f54f1d9de3ed17c14e849da0bed5c2f801031e2fa6571ae91bc63c90", + "txHash": "0x6e3607b07d3af48ccdd8b82df32c3cbbaeaa621eee218e50d68dcbdb05df9d44" + } + }, + "SubgraphNFTDescriptor": { + "address": "0xE6e3aCB1cd212c828F07375Cca024E2386760A23", + "creationCodeHash": "0xf16e8ff11d852eea165195ac9e0dfa00f98e48f6ce3c77c469c7df9bf195b651", + "runtimeCodeHash": "0x39583196f2bcb85789b6e64692d8c0aa56f001c46f0ca3d371abbba2c695860f", + "txHash": "0x7d74f2589b43670afba0d1ca95ee76ebbf088302e6ebc1305ccfb26e08d10584" + }, + "SubgraphNFT": { + "address": "0x95d9a3213f31Cf2D6eDa936ccE97779231E68839", + "constructorArgs": ["0x48Ed1128A24fe9053E3F0C8358eC43D86A18c121"], + "creationCodeHash": "0xc1e58864302084de282dffe54c160e20dd96c6cfff45e00e6ebfc15e04136982", + "runtimeCodeHash": "0x7216e736a8a8754e88688fbf5c0c7e9caf35c55ecc3a0c5a597b951c56cf7458", + "txHash": "0xa87e9de327a1a329b51d5f6d6db40919582918123715c76e0d0dc9910ffbd99d" + }, + "L2GNS": { + "address": "0xb674E7Db757D0e671b00D2882AD1A5d7b2288025", + "initArgs": [ + "0x532bb32D6342362eA3F50E00fFC22D698D2E80Ad", + "0x95d9a3213f31Cf2D6eDa936ccE97779231E68839" + ], + "creationCodeHash": "0xcdd28bb3db05f1267ca0f5ea29536c61841be5937ce711b813924f8ff38918cc", + "runtimeCodeHash": "0x4ca8c37c807bdfda1d6dcf441324b7ea14c6ddec5db37c20c2bf05aeae49bc0d", + "txHash": "0x2afec62882349c4780539422b6a1d6c730f1f0207ed2cfc030d629c2e93d849f", + "proxy": true, + "implementation": { + "address": "0xa06D1Aab9E9dF673355A8ac3d061B612cE769Af1", + "creationCodeHash": "0xf11bec5c6d83d773f23826dc27786154709c69e9906c1d18e775e8f2deda54a1", + "runtimeCodeHash": "0xe3eab6c225eaf49a8a617582d5b44eaaab4e898a11b973093fdc0a94f9afa3f7", + "txHash": "0x7ad41dd98327ee92c02f7d2dbbd037d8daec4cdfb8a85335314f7841f98818a1" + } + }, + "StakingExtension": { + "address": "0x42262EfC7f924A37dD5e3CD01d1d159e6aBA0AB3", + "creationCodeHash": "0x105ab964d46b3e9208aae1d0e5f40d7bd1450d979b9b5f56966fcd7fe3b44803", + "runtimeCodeHash": "0xb4a55a3aaa9fe067a273172d4a20f4a05e5a53f74c86f56a757c4a355e122b3c", + "txHash": "0x3541b17e5c0c687eb824e87a95ab48608de8a2cb06ecfed754d38d5739726792" + }, + "L2Staking": { + "address": "0x87B4264E0DFb58568f6273f90089B7cC97451d3C", + "initArgs": [ + "0x532bb32D6342362eA3F50E00fFC22D698D2E80Ad", + "100000000000000000000000", + "6646", + "10000", + "100000", + "2", + "4", + "12", + "16", + "77", + "100", + "0x42262EfC7f924A37dD5e3CD01d1d159e6aBA0AB3" + ], + "creationCodeHash": "0xcdd28bb3db05f1267ca0f5ea29536c61841be5937ce711b813924f8ff38918cc", + "runtimeCodeHash": "0x4ca8c37c807bdfda1d6dcf441324b7ea14c6ddec5db37c20c2bf05aeae49bc0d", + "txHash": "0x768d7f862ae85ff8b33a5534715d8ca85cb89de0db23ac1578dc787241acc7b0", + "proxy": true, + "implementation": { + "address": "0xbab3554ec35059D968424448353674764dde7449", + "creationCodeHash": "0x37bc0465731f38f3e40d3446fd767ea7a436647ebf321fa939d15802adca460d", + "runtimeCodeHash": "0xa5a961ebf618209d62125f82205e2fa111b665fea54d4b099cdd220d30b1f628", + "txHash": "0xb446b19658657f4584f174d2d5402225f2e22d67cac6b4edca045b6b1ea4c82d", + "libraries": { + "LibCobbDouglas": "0xA9878bB031f98b93576a303c122AcBC78b6AbBb3" + } + } + }, + "RewardsManager": { + "address": "0xa6d8E0996B3D40c64a69ff23707BeB80207a9651", + "initArgs": ["0x532bb32D6342362eA3F50E00fFC22D698D2E80Ad"], + "creationCodeHash": "0xcdd28bb3db05f1267ca0f5ea29536c61841be5937ce711b813924f8ff38918cc", + "runtimeCodeHash": "0x4ca8c37c807bdfda1d6dcf441324b7ea14c6ddec5db37c20c2bf05aeae49bc0d", + "txHash": "0xbf35be16f550ec77bbaa75227ca6c9886ab6afb9316cddeaa717efb3cda25789", + "proxy": true, + "implementation": { + "address": "0xe06d92FDa818abD1496c1fa901eFB666233a3043", + "creationCodeHash": "0xac11d3f6f8c15eeeeaac99d1437af77c66dd3922a6b1cc157a872940ba295e69", + "runtimeCodeHash": "0x6fd1d258635a516a5711dc682e64f9a4bc10293e7fa66d5bd392eb826f8716e8", + "txHash": "0x9497c70a075115a77dd09db74549dc4a3f3ada0c2304bf465a6d79a8218e4114" + } + }, + "DisputeManager": { + "address": "0x3277EF801632403bb9b7CFCC30BDF2C254D5DF02", + "initArgs": [ + "0x532bb32D6342362eA3F50E00fFC22D698D2E80Ad", + "0xed42A803C9f0bAE74bF0E63f36FF0Ae7FF38beF7", + "10000000000000000000000", + "500000", + "25000", + "25000" + ], + "creationCodeHash": "0xcdd28bb3db05f1267ca0f5ea29536c61841be5937ce711b813924f8ff38918cc", + "runtimeCodeHash": "0x4ca8c37c807bdfda1d6dcf441324b7ea14c6ddec5db37c20c2bf05aeae49bc0d", + "txHash": "0x8a0f457fc76162d0035776ff3060c2f1d9943c9122deba474f9f3d3113a14543", + "proxy": true, + "implementation": { + "address": "0xCcc7A56c0Cfcb69C8d18777324f6E553b2A827Ff", + "creationCodeHash": "0x14d74392339e928eb85811679a69f535726bf4107f1d37c211b8fe6435503ec1", + "runtimeCodeHash": "0xd32c88960f4b7f126b934f9e34c8a0c286d588ba20c1f6c7b112865fa63c4f51", + "txHash": "0x84fdfa6dd3fe6623372b72f7b904ccdaf5fa9b8657756641b0ceae6e81a3cd4c" + } + }, + "AllocationExchange": { + "address": "0xF8534abB7716dD17E1ac51fFebc5D99cdD996e78", + "constructorArgs": [ + "0x429b80751fB005F36d466c6DcDFd1ea30303679d", + "0x87B4264E0DFb58568f6273f90089B7cC97451d3C", + "0x91cEc32a6975265cF96A43f4209F39274cBEc088", + "0xDfaf2F953899c3Fae4aa4979727fd9F441E006b2" + ], + "creationCodeHash": "0xd90c042ddc86f0894af49cdd7b27e7698707557ad1b343ea6da7a495f696bd9a", + "runtimeCodeHash": "0x79e1d41147c62ee6511605a5d5b99cae3ad02dced2015c273774379436b9a11b", + "txHash": "0x4c00fb403b9e85887226caa1a98da9ac1ee5902fe89d49c96aea28bf717c6a09" + }, + "L2GraphTokenGateway": { + "address": "0x80a3cfaF5617F307e6eE25cAF019A0736fB83179", + "initArgs": ["0x532bb32D6342362eA3F50E00fFC22D698D2E80Ad"], + "creationCodeHash": "0xcdd28bb3db05f1267ca0f5ea29536c61841be5937ce711b813924f8ff38918cc", + "runtimeCodeHash": "0x4ca8c37c807bdfda1d6dcf441324b7ea14c6ddec5db37c20c2bf05aeae49bc0d", + "txHash": "0xc1a1c17f514aed4c088fa65ed995eabc1f2c904e4e334df0af88c46c0d8c7ee7", + "proxy": true, + "implementation": { + "address": "0xa8e813F609026c29dd5E0D3DA99a1298C1771140", + "creationCodeHash": "0xe8d9c601cbaf95e353565c31aaaff852e16217cbc6e92aeb6bc5decad64beaf0", + "runtimeCodeHash": "0xf89199d2d260441d419c35b0f5ecbb4887ede12c75e31b1ccc644cdaa101d5db", + "txHash": "0xed735b177f65670c867a228d43730e929c25074404f85988f03bf4ae1d424e84" + } } } }