diff --git a/docs/PaymentsTrustModel.md b/docs/PaymentsTrustModel.md index a79c5f24e..3779a11c4 100644 --- a/docs/PaymentsTrustModel.md +++ b/docs/PaymentsTrustModel.md @@ -68,7 +68,7 @@ RecurringCollector adds payer callbacks when the payer is a contract: <───┘ ``` -- **`isEligible`**: hard `require` — contract payer can block collection for ineligible receivers. Only called when `0 < tokensToCollect`. +- **`isEligible`**: fail-open gate — only an explicit return of `0` blocks collection; call failures (reverts, malformed data) are ignored to prevent a buggy payer from griefing the receiver. Only called when `0 < tokensToCollect`. - **`beforeCollection`**: try-catch — allows payer to top up escrow (RAM uses this for JIT deposits), but cannot block (though a malicious contract payer could consume excessive gas). Only called when `0 < tokensToCollect`. - **`afterCollection`**: try-catch — allows payer to reconcile state post-collection, cannot block (same gas exhaustion caveat). Called even when `tokensToCollect == 0` (zero-token collections still trigger reconciliation). @@ -99,7 +99,7 @@ RecurringCollector adds payer callbacks when the payer is a contract: Caveats on effective escrow (contract payers introduce additional trust requirements — see caveat 3): 1. **Thawing reduces effective balance** — a payer can initiate a thaw; once the thaw period completes, those tokens are withdrawable. The receiver should account for the thawing period and any in-progress thaws when assessing available escrow. -2. **Cancellation freezes the collection window** at `canceledAt` — the receiver can still collect for the period up to cancellation (with `minSecondsPerCollection` bypassed), but no further. +2. **Cancellation freezes the collection window** at `collectableUntil` — the receiver can still collect for the period up to cancellation; `minSecondsPerCollection` is enforced during the notice period and bypassed only after `collectableUntil` is reached. 3. **Contract payers can block** — if the payer is a contract that implements `IProviderEligibility`, it can deny collection via `isEligible` (see [RecurringCollector Extensions](#recurringcollector-extensions)). **Mitigation**: The thawing period provides a window for the receiver to collect before funds are withdrawn. The escrow balance and thaw state are publicly visible on-chain. diff --git a/packages/contracts-test/tests/unit/rewards/rewards-config.test.ts b/packages/contracts-test/tests/unit/rewards/rewards-config.test.ts index 3e510e1c1..bd3b2569a 100644 --- a/packages/contracts-test/tests/unit/rewards/rewards-config.test.ts +++ b/packages/contracts-test/tests/unit/rewards/rewards-config.test.ts @@ -274,5 +274,47 @@ describe('Rewards - Configuration', () => { expect(await rewardsManager.minimumSubgraphSignal()).eq(newMinimumSignal) }) }) + + describe('revertOnIneligible', function () { + it('should reject setRevertOnIneligible if unauthorized', async function () { + const tx = rewardsManager.connect(indexer1).setRevertOnIneligible(true) + await expect(tx).revertedWith('Only Controller governor') + }) + + it('should set revertOnIneligible to true', async function () { + const tx = rewardsManager.connect(governor).setRevertOnIneligible(true) + await expect(tx).emit(rewardsManager, 'ParameterUpdated').withArgs('revertOnIneligible') + expect(await rewardsManager.getRevertOnIneligible()).eq(true) + }) + + it('should set revertOnIneligible to false', async function () { + // First set to true + await rewardsManager.connect(governor).setRevertOnIneligible(true) + + // Then set back to false + const tx = rewardsManager.connect(governor).setRevertOnIneligible(false) + await expect(tx).emit(rewardsManager, 'ParameterUpdated').withArgs('revertOnIneligible') + expect(await rewardsManager.getRevertOnIneligible()).eq(false) + }) + + it('should be a no-op when setting same value (false to false)', async function () { + // Default is false + expect(await rewardsManager.getRevertOnIneligible()).eq(false) + + const tx = rewardsManager.connect(governor).setRevertOnIneligible(false) + await expect(tx).to.not.emit(rewardsManager, 'ParameterUpdated') + + expect(await rewardsManager.getRevertOnIneligible()).eq(false) + }) + + it('should be a no-op when setting same value (true to true)', async function () { + await rewardsManager.connect(governor).setRevertOnIneligible(true) + + const tx = rewardsManager.connect(governor).setRevertOnIneligible(true) + await expect(tx).to.not.emit(rewardsManager, 'ParameterUpdated') + + expect(await rewardsManager.getRevertOnIneligible()).eq(true) + }) + }) }) }) diff --git a/packages/contracts-test/tests/unit/rewards/rewards-eligibility-oracle.test.ts b/packages/contracts-test/tests/unit/rewards/rewards-eligibility-oracle.test.ts index 4db522378..c2137dc64 100644 --- a/packages/contracts-test/tests/unit/rewards/rewards-eligibility-oracle.test.ts +++ b/packages/contracts-test/tests/unit/rewards/rewards-eligibility-oracle.test.ts @@ -533,6 +533,97 @@ describe('Rewards - Eligibility Oracle', () => { expectApproxEq(event.args[2], expectedIndexingRewards, 'rewards amount') }) + it('should revert for ineligible indexer when revertOnIneligible is true', async function () { + // Setup REO that denies indexer1 + const MockRewardsEligibilityOracleFactory = await hre.ethers.getContractFactory( + 'contracts/tests/MockRewardsEligibilityOracle.sol:MockRewardsEligibilityOracle', + ) + const mockOracle = await MockRewardsEligibilityOracleFactory.deploy(false) // Deny + await mockOracle.deployed() + await rewardsManager.connect(governor).setProviderEligibilityOracle(mockOracle.address) + + // Enable revert on ineligible + await rewardsManager.connect(governor).setRevertOnIneligible(true) + + // Align with the epoch boundary + await helpers.mineEpoch(epochManager) + + // Setup allocation + await setupIndexerAllocation() + + // Jump to next epoch + await helpers.mineEpoch(epochManager) + + // Close allocation - should revert because indexer is ineligible + const tx = staking.connect(indexer1).closeAllocation(allocationID1, randomHexBytes()) + await expect(tx).revertedWith('Indexer not eligible for rewards') + }) + + it('should not revert for eligible indexer when revertOnIneligible is true', async function () { + // Setup REO that allows indexer1 + const MockRewardsEligibilityOracleFactory = await hre.ethers.getContractFactory( + 'contracts/tests/MockRewardsEligibilityOracle.sol:MockRewardsEligibilityOracle', + ) + const mockOracle = await MockRewardsEligibilityOracleFactory.deploy(true) // Allow + await mockOracle.deployed() + await rewardsManager.connect(governor).setProviderEligibilityOracle(mockOracle.address) + + // Enable revert on ineligible + await rewardsManager.connect(governor).setRevertOnIneligible(true) + + // Align with the epoch boundary + await helpers.mineEpoch(epochManager) + + // Setup allocation + await setupIndexerAllocation() + + // Jump to next epoch + await helpers.mineEpoch(epochManager) + + // Close allocation - should succeed (indexer is eligible) + const tx = staking.connect(indexer1).closeAllocation(allocationID1, randomHexBytes()) + await expect(tx).emit(rewardsManager, 'HorizonRewardsAssigned') + }) + + it('should reclaim (not revert) for ineligible indexer when revertOnIneligible is false', async function () { + // Setup REO that denies indexer1 + const MockRewardsEligibilityOracleFactory = await hre.ethers.getContractFactory( + 'contracts/tests/MockRewardsEligibilityOracle.sol:MockRewardsEligibilityOracle', + ) + const mockOracle = await MockRewardsEligibilityOracleFactory.deploy(false) // Deny + await mockOracle.deployed() + await rewardsManager.connect(governor).setProviderEligibilityOracle(mockOracle.address) + + // Ensure revertOnIneligible is false (default) + expect(await rewardsManager.getRevertOnIneligible()).eq(false) + + // Align with the epoch boundary + await helpers.mineEpoch(epochManager) + + // Setup allocation + await setupIndexerAllocation() + + // Jump to next epoch + await helpers.mineEpoch(epochManager) + + // Close allocation - should succeed but deny rewards + const tx = await staking.connect(indexer1).closeAllocation(allocationID1, randomHexBytes()) + const receipt = await tx.wait() + + // Should emit RewardsDeniedDueToEligibility (not revert) + const rewardsDeniedEvents = receipt.logs + .map((log) => { + try { + return rewardsManager.interface.parseLog(log) + } catch { + return null + } + }) + .filter((event) => event?.name === 'RewardsDeniedDueToEligibility') + + expect(rewardsDeniedEvents.length).to.equal(1, 'RewardsDeniedDueToEligibility event not found') + }) + it('should verify event structure differences between denial mechanisms', async function () { // Test 1: Denylist denial - event WITHOUT amount // Create allocation FIRST, then deny (so there are pre-denial rewards to deny) diff --git a/packages/contracts-test/tests/unit/rewards/rewards-interface.test.ts b/packages/contracts-test/tests/unit/rewards/rewards-interface.test.ts index 3fdd15ee6..63280f5e8 100644 --- a/packages/contracts-test/tests/unit/rewards/rewards-interface.test.ts +++ b/packages/contracts-test/tests/unit/rewards/rewards-interface.test.ts @@ -58,7 +58,7 @@ describe('RewardsManager interfaces', () => { }) it('IRewardsManager should have stable interface ID', () => { - expect(IRewardsManager__factory.interfaceId).to.equal('0x7e0447a1') + expect(IRewardsManager__factory.interfaceId).to.equal('0x337b092e') }) }) diff --git a/packages/contracts/contracts/rewards/RewardsManager.sol b/packages/contracts/contracts/rewards/RewardsManager.sol index 9a9218093..a0ca5ca20 100644 --- a/packages/contracts/contracts/rewards/RewardsManager.sol +++ b/packages/contracts/contracts/rewards/RewardsManager.sol @@ -265,6 +265,14 @@ contract RewardsManager is } } + /// @inheritdoc IRewardsManager + function setRevertOnIneligible(bool _revertOnIneligible) external override onlyGovernor { + if (revertOnIneligible != _revertOnIneligible) { + revertOnIneligible = _revertOnIneligible; + emit ParameterUpdated("revertOnIneligible"); + } + } + // -- Denylist -- /** @@ -344,6 +352,11 @@ contract RewardsManager is return rewardsEligibilityOracle; } + /// @inheritdoc IRewardsManager + function getRevertOnIneligible() external view override returns (bool) { + return revertOnIneligible; + } + /// @inheritdoc IRewardsManager function getNewRewardsPerSignal() public view override returns (uint256 claimablePerSignal) { (claimablePerSignal, ) = _getNewRewardsPerSignal(); @@ -772,6 +785,11 @@ contract RewardsManager is bool isDeniedSubgraph = isDenied(subgraphDeploymentID); bool isIneligible = address(rewardsEligibilityOracle) != address(0) && !rewardsEligibilityOracle.isEligible(indexer); + + // When configured to revert, block collection so rewards remain claimable if + // the indexer becomes eligible and collects before the allocation goes stale. + require(!isIneligible || !revertOnIneligible, "Indexer not eligible for rewards"); + if (!isDeniedSubgraph && !isIneligible) return false; if (isDeniedSubgraph) emit RewardsDenied(indexer, allocationID); diff --git a/packages/contracts/contracts/rewards/RewardsManagerStorage.sol b/packages/contracts/contracts/rewards/RewardsManagerStorage.sol index 5969d11c6..72a2d3176 100644 --- a/packages/contracts/contracts/rewards/RewardsManagerStorage.sol +++ b/packages/contracts/contracts/rewards/RewardsManagerStorage.sol @@ -117,4 +117,9 @@ abstract contract RewardsManagerV6Storage is RewardsManagerV5Storage { /// @dev Default fallback address for reclaiming rewards when no reason-specific address is configured. /// Zero address means rewards are dropped (not minted) if no specific reclaim address matches. address internal defaultReclaimAddress; + + /// @dev When true, ineligible indexers cause takeRewards to revert (blocking POI presentation + /// and allowing allocations to go stale). When false (default), ineligible indexers have + /// rewards reclaimed but takeRewards succeeds (returning 0). + bool internal revertOnIneligible; } diff --git a/packages/horizon/contracts/payments/collectors/RecurringCollector.sol b/packages/horizon/contracts/payments/collectors/RecurringCollector.sol index c03e739a4..a2a879a0f 100644 --- a/packages/horizon/contracts/payments/collectors/RecurringCollector.sol +++ b/packages/horizon/contracts/payments/collectors/RecurringCollector.sol @@ -1,19 +1,37 @@ // SPDX-License-Identifier: GPL-3.0-or-later pragma solidity ^0.8.27; -import { EIP712 } from "@openzeppelin/contracts/utils/cryptography/EIP712.sol"; -import { ECDSA } from "@openzeppelin/contracts/utils/cryptography/ECDSA.sol"; +import { PausableUpgradeable } from "@openzeppelin/contracts-upgradeable/utils/PausableUpgradeable.sol"; import { Math } from "@openzeppelin/contracts/utils/math/Math.sol"; -import { Authorizable } from "../../utilities/Authorizable.sol"; +import { IDataServiceAgreements } from "@graphprotocol/interfaces/contracts/data-service/IDataServiceAgreements.sol"; +import { IAgreementStateChangeCallback } from "@graphprotocol/interfaces/contracts/horizon/IAgreementStateChangeCallback.sol"; import { GraphDirectory } from "../../utilities/GraphDirectory.sol"; -// solhint-disable-next-line no-unused-import -import { IPaymentsCollector } from "@graphprotocol/interfaces/contracts/horizon/IPaymentsCollector.sol"; // for @inheritdoc -import { IERC165 } from "@openzeppelin/contracts/utils/introspection/IERC165.sol"; +import { IPaymentsCollector } from "@graphprotocol/interfaces/contracts/horizon/IPaymentsCollector.sol"; import { IAgreementOwner } from "@graphprotocol/interfaces/contracts/horizon/IAgreementOwner.sol"; +import { + REGISTERED, + ACCEPTED, + NOTICE_GIVEN, + SETTLED, + BY_PAYER, + BY_PROVIDER, + BY_DATA_SERVICE, + UPDATE, + AUTO_UPDATE, + AUTO_UPDATED, + OFFER_TYPE_NEW, + OFFER_TYPE_UPDATE, + WITH_NOTICE, + IF_NOT_ACCEPTED, + IAgreementCollector +} from "@graphprotocol/interfaces/contracts/horizon/IAgreementCollector.sol"; import { IRecurringCollector } from "@graphprotocol/interfaces/contracts/horizon/IRecurringCollector.sol"; import { IGraphPayments } from "@graphprotocol/interfaces/contracts/horizon/IGraphPayments.sol"; +import { IPausableControl } from "@graphprotocol/interfaces/contracts/issuance/common/IPausableControl.sol"; import { IProviderEligibility } from "@graphprotocol/interfaces/contracts/issuance/eligibility/IProviderEligibility.sol"; +import { ERC165Checker } from "@openzeppelin/contracts/utils/introspection/ERC165Checker.sol"; +import { ReentrancyGuardTransient } from "@openzeppelin/contracts/utils/ReentrancyGuardTransient.sol"; import { PPMMath } from "../../libraries/PPMMath.sol"; /** @@ -21,258 +39,397 @@ import { PPMMath } from "../../libraries/PPMMath.sol"; * @author Edge & Node * @dev Implements the {IRecurringCollector} interface. * @notice A payments collector contract that can be used to collect payments using a RCA (Recurring Collection Agreement). + * + * Callback model: lifecycle ({IAgreementStateChangeCallback}), collection + * ({IAgreementOwner.beforeCollection} / {IAgreementOwner.afterCollection}), and eligibility + * ({IProviderEligibility.isEligible}) callbacks are skipped when the target is `msg.sender`. + * The caller already has execution context and can sequence its own update logic + * (e.g. reconciliation, escrow top-up) without a callback. This eliminates callback + * loops, simplifies reentrancy analysis, and reduces the trust surface between caller + * and collector. Exception: the data service's {IDataServiceAgreements.acceptAgreement} + * callback is always invoked (including during auto-update when the data service is + * `msg.sender`), because it must validate and set up domain-specific state. + * + * @custom:security-pause This contract is independently pausable from RecurringAgreementManager. + * Pausing is an emergency measure for when something is seriously broken and may require an + * emergency contract upgrade to restore operation. When paused, all state-changing operations + * are blocked: {collect}, {offer}, {accept}, and {cancel}. View functions remain available. + * Pause guardians can pause/unpause; the governor manages pause guardian assignments. + * * @custom:security-contact Please email security+contracts@thegraph.com if you find any * bugs. We may have an active bug bounty program. */ -contract RecurringCollector is EIP712, GraphDirectory, Authorizable, IRecurringCollector { +contract RecurringCollector is + GraphDirectory, + PausableUpgradeable, + ReentrancyGuardTransient, + IPausableControl, + IRecurringCollector +{ using PPMMath for uint256; + // -- Constants -- + /// @notice The minimum number of seconds that must be between two collections uint32 public constant MIN_SECONDS_COLLECTION_WINDOW = 600; + /// @notice Maximum gas forwarded to external callbacks (payer and data-service). + /// Caps gas available to callback implementations, preventing 63/64-rule gas siphoning + /// attacks that could starve the core collect() / accept() call of gas. + uint256 private constant MAX_CALLBACK_GAS = 1_500_000; + /* solhint-disable gas-small-strings */ - /// @notice The EIP712 typehash for the RecurringCollectionAgreement struct - bytes32 public constant EIP712_RCA_TYPEHASH = + /// @notice The typehash for the RecurringCollectionAgreement struct + bytes32 public constant RCA_TYPEHASH = keccak256( - "RecurringCollectionAgreement(uint64 deadline,uint64 endsAt,address payer,address dataService,address serviceProvider,uint256 maxInitialTokens,uint256 maxOngoingTokensPerSecond,uint32 minSecondsPerCollection,uint32 maxSecondsPerCollection,uint256 nonce,bytes metadata)" + "RecurringCollectionAgreement(uint64 deadline,uint64 endsAt,address payer,address dataService,address serviceProvider,uint256 maxInitialTokens,uint256 maxOngoingTokensPerSecond,uint32 minSecondsPerCollection,uint32 maxSecondsPerCollection,uint16 conditions,uint32 minSecondsPayerCancellationNotice,uint256 nonce,bytes metadata)" ); - /// @notice The EIP712 typehash for the RecurringCollectionAgreementUpdate struct - bytes32 public constant EIP712_RCAU_TYPEHASH = + /// @notice The typehash for the RecurringCollectionAgreementUpdate struct + bytes32 public constant RCAU_TYPEHASH = keccak256( - "RecurringCollectionAgreementUpdate(bytes16 agreementId,uint64 deadline,uint64 endsAt,uint256 maxInitialTokens,uint256 maxOngoingTokensPerSecond,uint32 minSecondsPerCollection,uint32 maxSecondsPerCollection,uint32 nonce,bytes metadata)" + "RecurringCollectionAgreementUpdate(bytes16 agreementId,uint64 deadline,uint64 endsAt,uint256 maxInitialTokens,uint256 maxOngoingTokensPerSecond,uint32 minSecondsPerCollection,uint32 maxSecondsPerCollection,uint16 conditions,uint32 minSecondsPayerCancellationNotice,uint32 nonce,bytes metadata)" ); /* solhint-enable gas-small-strings */ - /// @notice Tracks agreements - mapping(bytes16 agreementId => AgreementData data) internal agreements; + /// @notice Bitmask: include active terms in getMaxNextClaim + uint8 public constant CLAIM_SCOPE_ACTIVE = 1; + /// @notice Bitmask: include pending terms in getMaxNextClaim + uint8 public constant CLAIM_SCOPE_PENDING = 2; + + /// @notice Condition flag: agreement requires eligibility checks before collection + uint16 public constant CONDITION_ELIGIBILITY_CHECK = 1; + + // -- Internal types -- /** - * @notice Constructs a new instance of the RecurringCollector contract. - * @param eip712Name The name of the EIP712 domain. - * @param eip712Version The version of the EIP712 domain. - * @param controller The address of the Graph controller. - * @param revokeSignerThawingPeriod The duration (in seconds) in which a signer is thawing before they can be revoked. + * @dev Internal storage layout for an agreement. Not part of the public interface; + * callers receive an AgreementData from getAgreementData(). + * Packed layout (3 base slots + nested terms + per-version nonces): + * slot 0: dataService(20) + acceptedAt(8) + updateNonce(4) = 32B + * slot 1: payer(20) + lastCollectionAt(8) + state(2) = 30B + * slot 2: serviceProvider(20) + collectableUntil(8) = 28B + * slot 3+: activeTerms (4 fixed slots + dynamic) + * slot 7+: pendingTerms (4 fixed slots + dynamic) + * slot N: activeOfferNonce(32) + * slot N+1: pendingOfferNonce(32) */ - constructor( - string memory eip712Name, - string memory eip712Version, - address controller, - uint256 revokeSignerThawingPeriod - ) EIP712(eip712Name, eip712Version) GraphDirectory(controller) Authorizable(revokeSignerThawingPeriod) {} + struct AgreementStorage { + address dataService; + uint64 acceptedAt; + uint32 updateNonce; + address payer; + uint64 lastCollectionAt; + uint16 state; + address serviceProvider; + uint64 collectableUntil; + AgreementTerms activeTerms; + AgreementTerms pendingTerms; + uint256 activeOfferNonce; + uint256 pendingOfferNonce; + } + + // -- State -- + + /// @custom:storage-location erc7201:graphprotocol.storage.RecurringCollector + struct RecurringCollectorStorage { + /// @notice Tracks agreements + mapping(bytes16 agreementId => AgreementStorage data) agreements; + /// @notice List of pause guardians and their allowed status + mapping(address pauseGuardian => bool allowed) pauseGuardians; + } + + /// @dev keccak256(abi.encode(uint256(keccak256("graphprotocol.storage.RecurringCollector")) - 1)) & ~bytes32(uint256(0xff)) + bytes32 private constant RECURRING_COLLECTOR_STORAGE_LOCATION = + 0x436d179d846767cf46c6cda3ec5a404bcbe1b4351ce320082402e5e9ab4d6600; /** - * @inheritdoc IPaymentsCollector - * @notice Initiate a payment collection through the payments protocol. - * See {IPaymentsCollector.collect}. - * @dev Caller must be the data service the RCA was issued to. + * @notice Checks if the caller is a pause guardian. */ - function collect(IGraphPayments.PaymentTypes paymentType, bytes calldata data) external returns (uint256) { - try this.decodeCollectData(data) returns (CollectParams memory collectParams) { - return _collect(paymentType, collectParams); - } catch { - revert RecurringCollectorInvalidCollectData(data); - } + modifier onlyPauseGuardian() { + _checkPauseGuardian(); + _; } + // -- Constructor -- + /** - * @inheritdoc IRecurringCollector - * @notice Accept a Recurring Collection Agreement. - * @dev Caller must be the data service the RCA was issued to. + * @notice Constructs a new instance of the RecurringCollector implementation contract. + * @dev Immutables are set here; proxy state is initialized via {initialize}. + * @param controller The address of the Graph controller. */ - function accept(RecurringCollectionAgreement calldata rca, bytes calldata signature) external returns (bytes16) { - /* solhint-disable gas-strict-inequalities */ - require( - rca.deadline >= block.timestamp, - RecurringCollectorAgreementDeadlineElapsed(block.timestamp, rca.deadline) - ); - /* solhint-enable gas-strict-inequalities */ - - if (0 < signature.length) { - // ECDSA-signed path: verify signature - _requireAuthorizedRCASigner(rca, signature); - } else { - // Contract-approved path: verify payer is a contract and confirms the agreement - require(0 < rca.payer.code.length, RecurringCollectorApproverNotContract(rca.payer)); - bytes32 agreementHash = _hashRCA(rca); - require( - IAgreementOwner(rca.payer).approveAgreement(agreementHash) == IAgreementOwner.approveAgreement.selector, - RecurringCollectorInvalidSigner() - ); - } - return _validateAndStoreAgreement(rca); + constructor(address controller) GraphDirectory(controller) { + _disableInitializers(); } /** - * @notice Validates RCA fields and stores the agreement. - * @param _rca The Recurring Collection Agreement to validate and store - * @return agreementId The deterministically generated agreement ID + * @notice Initializes the contract (proxy storage). + * @dev Marks the proxy as initialized so the `initializer` modifier prevents re-entry. */ - /* solhint-disable function-max-lines */ - function _validateAndStoreAgreement(RecurringCollectionAgreement memory _rca) private returns (bytes16) { - bytes16 agreementId = _generateAgreementId( - _rca.payer, - _rca.dataService, - _rca.serviceProvider, - _rca.deadline, - _rca.nonce - ); + function initialize() external initializer { + __Pausable_init(); + } - require(agreementId != bytes16(0), RecurringCollectorAgreementIdZero()); - require(msg.sender == _rca.dataService, RecurringCollectorUnauthorizedCaller(msg.sender, _rca.dataService)); + // -- External mutating -- - require( - _rca.dataService != address(0) && _rca.payer != address(0) && _rca.serviceProvider != address(0), - RecurringCollectorAgreementAddressNotSet() - ); - - _requireValidCollectionWindowParams(_rca.endsAt, _rca.minSecondsPerCollection, _rca.maxSecondsPerCollection); + /// @inheritdoc IPausableControl + function pause() external override onlyPauseGuardian { + _pause(); + } - AgreementData storage agreement = _getAgreementStorage(agreementId); - // check that the agreement is not already accepted - require( - agreement.state == AgreementState.NotAccepted, - RecurringCollectorAgreementIncorrectState(agreementId, agreement.state) - ); + /// @inheritdoc IPausableControl + function unpause() external override onlyPauseGuardian { + _unpause(); + } - // accept the agreement - agreement.acceptedAt = uint64(block.timestamp); - agreement.state = AgreementState.Accepted; - agreement.dataService = _rca.dataService; - agreement.payer = _rca.payer; - agreement.serviceProvider = _rca.serviceProvider; - agreement.endsAt = _rca.endsAt; - agreement.maxInitialTokens = _rca.maxInitialTokens; - agreement.maxOngoingTokensPerSecond = _rca.maxOngoingTokensPerSecond; - agreement.minSecondsPerCollection = _rca.minSecondsPerCollection; - agreement.maxSecondsPerCollection = _rca.maxSecondsPerCollection; - agreement.updateNonce = 0; + /** + * @notice Sets a pause guardian. + * @dev Only callable by the governor. + * @param _pauseGuardian The address of the pause guardian + * @param _allowed Whether the address should be a pause guardian + */ + function setPauseGuardian(address _pauseGuardian, bool _allowed) external { + require(msg.sender == _graphController().getGovernor(), NotGovernor(msg.sender)); + RecurringCollectorStorage storage $ = _getStorage(); - emit AgreementAccepted( - agreement.dataService, - agreement.payer, - agreement.serviceProvider, - agreementId, - agreement.acceptedAt, - agreement.endsAt, - agreement.maxInitialTokens, - agreement.maxOngoingTokensPerSecond, - agreement.minSecondsPerCollection, - agreement.maxSecondsPerCollection - ); + if ($.pauseGuardians[_pauseGuardian] == _allowed) return; - return agreementId; + $.pauseGuardians[_pauseGuardian] = _allowed; + emit PauseGuardianSet(_pauseGuardian, _allowed); } - /* solhint-enable function-max-lines */ /** - * @inheritdoc IRecurringCollector - * @notice Cancel a Recurring Collection Agreement. - * See {IRecurringCollector.cancel}. - * @dev Caller must be the data service for the agreement. + * @inheritdoc IPaymentsCollector + * @dev Caller must be the data service the RCA was issued to. */ - function cancel(bytes16 agreementId, CancelAgreementBy by) external { - AgreementData storage agreement = _getAgreementStorage(agreementId); - require( - agreement.state == AgreementState.Accepted, - RecurringCollectorAgreementIncorrectState(agreementId, agreement.state) - ); - require( - agreement.dataService == msg.sender, - RecurringCollectorDataServiceNotAuthorized(agreementId, msg.sender) - ); - agreement.canceledAt = uint64(block.timestamp); - if (by == CancelAgreementBy.Payer) { - agreement.state = AgreementState.CanceledByPayer; - } else { - agreement.state = AgreementState.CanceledByServiceProvider; + function collect( + IGraphPayments.PaymentTypes paymentType, + bytes calldata data + ) external nonReentrant whenNotPaused returns (uint256) { + try this.decodeCollectData(data) returns (CollectParams memory collectParams) { + return _collect(paymentType, collectParams); + } catch { + revert InvalidCollectData(data); } + } - emit AgreementCanceled( - agreement.dataService, - agreement.payer, - agreement.serviceProvider, - agreementId, - agreement.canceledAt, - by - ); + /// @inheritdoc IRecurringCollector + function offer( + uint8 offerType, + bytes calldata data, + uint16 options + ) external nonReentrant whenNotPaused returns (OfferResult memory result) { + if (offerType == OFFER_TYPE_NEW) { + RecurringCollectionAgreement memory rca = abi.decode(data, (RecurringCollectionAgreement)); + require(msg.sender == rca.payer, UnauthorizedPayer(msg.sender, rca.payer)); + (result.agreementId, result.versionHash) = _validateAndStoreOffer(rca); + result.dataService = rca.dataService; + result.serviceProvider = rca.serviceProvider; + result.state = REGISTERED; + } else if (offerType == OFFER_TYPE_UPDATE) { + RecurringCollectionAgreementUpdate memory rcau = abi.decode(data, (RecurringCollectionAgreementUpdate)); + AgreementStorage storage agreement = _getAgreementStorage(rcau.agreementId); + require(msg.sender == agreement.payer, UnauthorizedPayer(msg.sender, agreement.payer)); + return _validateAndStoreUpdate(agreement, rcau, options); + } else { + revert InvalidOfferType(offerType); + } } - /** - * @inheritdoc IRecurringCollector - * @notice Update a Recurring Collection Agreement. - * @dev Caller must be the data service for the agreement. - * @dev Note: Updated pricing terms apply immediately and will affect the next collection - * for the entire period since lastCollectionAt. - */ - function update(RecurringCollectionAgreementUpdate calldata rcau, bytes calldata signature) external { - AgreementData storage agreement = _requireValidUpdateTarget(rcau.agreementId); + /// @inheritdoc IRecurringCollector + function accept( + bytes16 agreementId, + bytes32 versionHash, + bytes calldata extraData, + uint16 options + ) external nonReentrant whenNotPaused { + AgreementStorage storage agreement = _getAgreementStorage(agreementId); + uint16 state = agreement.state; - /* solhint-disable gas-strict-inequalities */ require( - rcau.deadline >= block.timestamp, - RecurringCollectorAgreementDeadlineElapsed(block.timestamp, rcau.deadline) + msg.sender == agreement.serviceProvider, + UnauthorizedServiceProvider(msg.sender, agreement.serviceProvider) ); - /* solhint-enable gas-strict-inequalities */ - if (0 < signature.length) { - // ECDSA-signed path: verify signature - _requireAuthorizedRCAUSigner(rcau, signature, agreement.payer); + if (state & (REGISTERED | ACCEPTED) == REGISTERED) + _accept(agreementId, versionHash, extraData, agreement, agreement.activeTerms, false, false, options); + else if (state & ACCEPTED != 0) + // Accept pending terms — allowed even if NOTICE_GIVEN, SETTLED, BY_* + _accept(agreementId, versionHash, extraData, agreement, agreement.pendingTerms, false, false, options); + else revert AgreementIncorrectState(agreementId, state); + } + + /// @inheritdoc IAgreementCollector + function cancel(bytes16 agreementId, bytes32 versionHash, uint16 options) external nonReentrant whenNotPaused { + AgreementStorage storage agreement = _getAgreementStorage(agreementId); + + uint16 byFlag; + if (agreement.payer == msg.sender) byFlag = BY_PAYER; + else if (agreement.serviceProvider == msg.sender) byFlag = BY_PROVIDER; + else if (agreement.dataService == msg.sender) byFlag = BY_DATA_SERVICE; + else revert UnauthorizedCaller(msg.sender, address(0)); + + uint16 eventState; + if (versionHash == agreement.pendingTerms.hash) { + delete agreement.pendingTerms; + // UPDATE in event only — signals this cancel targets pending terms. + // Not persisted: agreement lifecycle state is unchanged. + eventState = agreement.state | UPDATE; } else { - // Contract-approved path: verify payer is a contract and confirms the update - require(0 < agreement.payer.code.length, RecurringCollectorApproverNotContract(agreement.payer)); - bytes32 updateHash = _hashRCAU(rcau); require( - IAgreementOwner(agreement.payer).approveAgreement(updateHash) == - IAgreementOwner.approveAgreement.selector, - RecurringCollectorInvalidSigner() + versionHash == agreement.activeTerms.hash, + AgreementHashMismatch(agreementId, agreement.activeTerms.hash, versionHash) ); + uint16 oldState = agreement.state; + require(oldState & REGISTERED != 0, AgreementIncorrectState(agreementId, oldState)); + + if (options & IF_NOT_ACCEPTED != 0) + require(oldState & ACCEPTED == 0, AgreementIncorrectState(agreementId, oldState)); + + if (byFlag == BY_PAYER && (oldState & ACCEPTED != 0)) { + _applyNotice( + agreement, + agreementId, + uint64(block.timestamp) + agreement.activeTerms.minSecondsPayerCancellationNotice + ); + } else { + uint64 noticeCutoff = uint64(block.timestamp); + if (noticeCutoff < agreement.collectableUntil) agreement.collectableUntil = noticeCutoff; + } + + eventState = oldState | NOTICE_GIVEN | byFlag; + if (oldState & ACCEPTED == 0) eventState = eventState | SETTLED; + agreement.state = eventState; } - _validateAndStoreUpdate(agreement, rcau); + _emitAndNotify(agreementId, versionHash, eventState, agreement.dataService, agreement.payer); } - /// @inheritdoc IRecurringCollector - function recoverRCASigner( - RecurringCollectionAgreement calldata rca, - bytes calldata signature - ) external view returns (address) { - return _recoverRCASigner(rca, signature); - } + // -- External view -- /// @inheritdoc IRecurringCollector - function recoverRCAUSigner( - RecurringCollectionAgreementUpdate calldata rcau, - bytes calldata signature - ) external view returns (address) { - return _recoverRCAUSigner(rcau, signature); + function getAgreementData(bytes16 agreementId) external view returns (AgreementData memory data_) { + AgreementStorage storage a = _getAgreementStorage(agreementId); + data_.agreementId = agreementId; + data_.payer = a.payer; + data_.serviceProvider = a.serviceProvider; + data_.dataService = a.dataService; + data_.acceptedAt = a.acceptedAt; + data_.lastCollectionAt = a.lastCollectionAt; + data_.collectableUntil = a.collectableUntil; + data_.updateNonce = a.updateNonce; + data_.state = a.state; + (data_.isCollectable, data_.collectionSeconds, ) = _getCollectionInfo( + a.state, + a.collectableUntil, + a.lastCollectionAt, + a.acceptedAt, + a.activeTerms.maxSecondsPerCollection + ); } /// @inheritdoc IRecurringCollector - function hashRCA(RecurringCollectionAgreement calldata rca) external view returns (bytes32) { - return _hashRCA(rca); + function getAgreementVersionCount(bytes16 agreementId) external view returns (uint256) { + AgreementStorage storage agreement = _getAgreementStorage(agreementId); + if (agreement.activeTerms.hash == bytes32(0)) return 0; + if (agreement.pendingTerms.hash == bytes32(0)) return 1; + return 2; } - /// @inheritdoc IRecurringCollector - function hashRCAU(RecurringCollectionAgreementUpdate calldata rcau) external view returns (bytes32) { - return _hashRCAU(rcau); + /// @inheritdoc IAgreementCollector + function getAgreementVersionAt( + bytes16 agreementId, + uint256 index + ) external view returns (AgreementVersion memory version) { + AgreementStorage storage agreement = _getAgreementStorage(agreementId); + version.agreementId = agreementId; + version.state = agreement.state; + + if (index == 0) version.versionHash = agreement.activeTerms.hash; + else if (index == 1) { + version.versionHash = agreement.pendingTerms.hash; + version.state = version.state | UPDATE; + } } + /* solhint-disable function-max-lines */ /// @inheritdoc IRecurringCollector - function getAgreement(bytes16 agreementId) external view returns (AgreementData memory) { - return _getAgreement(agreementId); + function getAgreementOfferAt( + bytes16 agreementId, + uint256 index + ) external view returns (uint8 offerType, bytes memory offerData) { + AgreementStorage storage agreement = _getAgreementStorage(agreementId); + + AgreementTerms storage terms; + uint256 offerNonce; + bool isUpdate; + + if (index == 0) { + terms = agreement.activeTerms; + offerNonce = agreement.activeOfferNonce; + isUpdate = (agreement.state & UPDATE) != 0; + } else if (index == 1) { + terms = agreement.pendingTerms; + offerNonce = agreement.pendingOfferNonce; + isUpdate = true; // pending is always an update + } else { + return (0, ""); + } + + if (terms.hash == bytes32(0)) return (0, ""); + + if (isUpdate) { + offerType = OFFER_TYPE_UPDATE; + offerData = abi.encode( + RecurringCollectionAgreementUpdate({ + agreementId: agreementId, + deadline: terms.deadline, + endsAt: terms.endsAt, + maxInitialTokens: terms.maxInitialTokens, + maxOngoingTokensPerSecond: terms.maxOngoingTokensPerSecond, + minSecondsPerCollection: terms.minSecondsPerCollection, + maxSecondsPerCollection: terms.maxSecondsPerCollection, + conditions: terms.conditions, + minSecondsPayerCancellationNotice: terms.minSecondsPayerCancellationNotice, + // Safe: pendingOfferNonce always originates from RCAU.nonce (uint32). + // activeOfferNonce reaches this branch only when isUpdate is true + // (UPDATE flag set on state), meaning it was promoted from pendingOfferNonce. + // forge-lint: disable-next-line(unsafe-typecast) + nonce: uint32(offerNonce), + metadata: terms.metadata + }) + ); + } else { + offerType = OFFER_TYPE_NEW; + offerData = abi.encode( + RecurringCollectionAgreement({ + deadline: terms.deadline, + endsAt: terms.endsAt, + payer: agreement.payer, + dataService: agreement.dataService, + serviceProvider: agreement.serviceProvider, + maxInitialTokens: terms.maxInitialTokens, + maxOngoingTokensPerSecond: terms.maxOngoingTokensPerSecond, + minSecondsPerCollection: terms.minSecondsPerCollection, + maxSecondsPerCollection: terms.maxSecondsPerCollection, + conditions: terms.conditions, + minSecondsPayerCancellationNotice: terms.minSecondsPayerCancellationNotice, + nonce: offerNonce, + metadata: terms.metadata + }) + ); + } } + /* solhint-enable function-max-lines */ /// @inheritdoc IRecurringCollector - function getCollectionInfo( - AgreementData calldata agreement - ) external view returns (bool isCollectable, uint256 collectionSeconds, AgreementNotCollectableReason reason) { - return _getCollectionInfo(agreement); + function getMaxNextClaim(bytes16 agreementId) external view returns (uint256) { + return _getMaxNextClaim(agreementId, CLAIM_SCOPE_ACTIVE | CLAIM_SCOPE_PENDING); } /// @inheritdoc IRecurringCollector - function getMaxNextClaim(bytes16 agreementId) external view returns (uint256) { - return _getMaxNextClaim(agreements[agreementId]); + function getMaxNextClaim(bytes16 agreementId, uint8 claimScope) external view returns (uint256) { + return _getMaxNextClaim(agreementId, claimScope); } /// @inheritdoc IRecurringCollector @@ -286,6 +443,24 @@ contract RecurringCollector is EIP712, GraphDirectory, Authorizable, IRecurringC return _generateAgreementId(payer, dataService, serviceProvider, deadline, nonce); } + // -- Public view -- + + /** + * @notice List of pause guardians and their allowed status + * @param pauseGuardian The address to check + * @return Whether the address is a pause guardian + */ + function isPauseGuardian(address pauseGuardian) public view override returns (bool) { + return _getStorage().pauseGuardians[pauseGuardian]; + } + + /// @inheritdoc IPausableControl + function paused() public view override(PausableUpgradeable, IPausableControl) returns (bool) { + return super.paused(); + } + + // -- Public pure -- + /** * @notice Decodes the collect data. * @param data The encoded collect parameters. @@ -295,6 +470,76 @@ contract RecurringCollector is EIP712, GraphDirectory, Authorizable, IRecurringC return abi.decode(data, (CollectParams)); } + /** + * @notice Compute the struct hash for an RCA. + * @param rca The RCA to hash + * @return The struct hash + */ + function hashRCA(RecurringCollectionAgreement memory rca) public pure returns (bytes32) { + // forge-lint: disable-start(asm-keccak256) + // Split abi.encode to avoid stack-too-deep with 14 fields + bytes memory metadataHash = abi.encode(keccak256(rca.metadata)); + return + keccak256( + bytes.concat( + abi.encode( + RCA_TYPEHASH, + rca.deadline, + rca.endsAt, + rca.payer, + rca.dataService, + rca.serviceProvider, + rca.maxInitialTokens, + rca.maxOngoingTokensPerSecond + ), + abi.encode( + rca.minSecondsPerCollection, + rca.maxSecondsPerCollection, + rca.conditions, + rca.minSecondsPayerCancellationNotice, + rca.nonce + ), + metadataHash + ) + ); + // forge-lint: disable-end(asm-keccak256) + } + + /** + * @notice Compute the struct hash for an RCAU. + * @param rcau The RCAU to hash + * @return The struct hash + */ + function hashRCAU(RecurringCollectionAgreementUpdate memory rcau) public pure returns (bytes32) { + // forge-lint: disable-start(asm-keccak256) + return + keccak256( + abi.encode( + RCAU_TYPEHASH, + rcau.agreementId, + rcau.deadline, + rcau.endsAt, + rcau.maxInitialTokens, + rcau.maxOngoingTokensPerSecond, + rcau.minSecondsPerCollection, + rcau.maxSecondsPerCollection, + rcau.conditions, + rcau.minSecondsPayerCancellationNotice, + rcau.nonce, + keccak256(rcau.metadata) + ) + ); + // forge-lint: disable-end(asm-keccak256) + } + + // -- Internal -- + + function _checkPauseGuardian() internal view { + require(_getStorage().pauseGuardians[msg.sender], NotPauseGuardian(msg.sender)); + } + + // -- Private mutating -- + /* solhint-disable function-max-lines */ /** * @notice Collect payment through the payments protocol. @@ -309,7 +554,7 @@ contract RecurringCollector is EIP712, GraphDirectory, Authorizable, IRecurringC * `_params.tokens` is zero, to prevent bypassing collection windows while updating * `lastCollectionAt`. * - * Emits {PaymentCollected} and {RCACollected} events. + * Emits {RCACollected} event. * * @param _paymentType The type of payment to collect * @param _params The decoded parameters for the collection @@ -319,18 +564,19 @@ contract RecurringCollector is EIP712, GraphDirectory, Authorizable, IRecurringC IGraphPayments.PaymentTypes _paymentType, CollectParams memory _params ) private returns (uint256) { - AgreementData storage agreement = _getAgreementStorage(_params.agreementId); + AgreementStorage storage agreement = _getAgreementStorage(_params.agreementId); // Check if agreement is collectable first (bool isCollectable, uint256 collectionSeconds, AgreementNotCollectableReason reason) = _getCollectionInfo( - agreement + agreement.state, + agreement.collectableUntil, + agreement.lastCollectionAt, + agreement.acceptedAt, + agreement.activeTerms.maxSecondsPerCollection ); - require(isCollectable, RecurringCollectorAgreementNotCollectable(_params.agreementId, reason)); + require(isCollectable, AgreementNotCollectable(_params.agreementId, reason)); - require( - msg.sender == agreement.dataService, - RecurringCollectorDataServiceNotAuthorized(_params.agreementId, msg.sender) - ); + require(msg.sender == agreement.dataService, DataServiceNotAuthorized(_params.agreementId, msg.sender)); // Check the service provider has an active provision with the data service // This prevents an attack where the payer can deny the service provider from collecting payments @@ -340,7 +586,7 @@ contract RecurringCollector is EIP712, GraphDirectory, Authorizable, IRecurringC agreement.serviceProvider, agreement.dataService ); - require(tokensAvailable > 0, RecurringCollectorUnauthorizedDataService(agreement.dataService)); + require(0 < tokensAvailable, UnauthorizedDataService(agreement.dataService)); } // Always validate temporal constraints (min/maxSecondsPerCollection) even for @@ -358,26 +604,42 @@ contract RecurringCollector is EIP712, GraphDirectory, Authorizable, IRecurringC /* solhint-disable gas-strict-inequalities */ require( slippage <= _params.maxSlippage, - RecurringCollectorExcessiveSlippage(_params.tokens, tokensToCollect, _params.maxSlippage) + ExcessiveSlippage(_params.tokens, tokensToCollect, _params.maxSlippage) ); /* solhint-enable gas-strict-inequalities */ } agreement.lastCollectionAt = uint64(block.timestamp); - // Hard eligibility gate for contract payers that opt in via ERC165 - if (0 < tokensToCollect && 0 < agreement.payer.code.length) { - try IERC165(agreement.payer).supportsInterface(type(IProviderEligibility).interfaceId) returns ( - bool supported - ) { - if (supported) { - require( - IProviderEligibility(agreement.payer).isEligible(agreement.serviceProvider), - RecurringCollectorCollectionNotEligible(_params.agreementId, agreement.serviceProvider) - ); - } - } catch {} - // Let contract payers top up escrow if short - try IAgreementOwner(agreement.payer).beforeCollection(_params.agreementId, tokensToCollect) {} catch {} + // Eligibility gate: only when the agreement has CONDITION_ELIGIBILITY_CHECK set. + // Fails open: collection proceeds if the staticcall reverts or returns malformed data. + // Only an explicit isEligible() == 0 blocks collection. This prevents a buggy payer + // callback from griefing the service provider. + // Low-level staticcall avoids caller-side ABI decoding reverts (skipped if payer is caller). + if ( + 0 < tokensToCollect && + (agreement.activeTerms.conditions & CONDITION_ELIGIBILITY_CHECK != 0) && + _shouldCallback(agreement.payer) + ) { + // solhint-disable-next-line avoid-low-level-calls + (bool success, bytes memory result) = agreement.payer.staticcall{ gas: MAX_CALLBACK_GAS }( + abi.encodeCall(IProviderEligibility.isEligible, (agreement.serviceProvider)) + ); + if (success && !(result.length < 32) && abi.decode(result, (uint256)) == 0) + revert CollectionNotEligible(_params.agreementId, agreement.serviceProvider); + + if (!success || result.length < 32) + emit PayerCallbackFailed(_params.agreementId, agreement.payer, PayerCallbackStage.EligibilityCheck); + } + + // Let contract payers top up escrow if short + if (0 < tokensToCollect && _shouldCallback(agreement.payer)) { + // solhint-disable-next-line avoid-low-level-calls + (bool ok, ) = agreement.payer.call{ gas: MAX_CALLBACK_GAS }( + abi.encodeCall(IAgreementOwner.beforeCollection, (_params.agreementId, tokensToCollect)) + ); + if (!ok) { + emit PayerCallbackFailed(_params.agreementId, agreement.payer, PayerCallbackStage.BeforeCollection); + } } if (0 < tokensToCollect) { @@ -392,28 +654,36 @@ contract RecurringCollector is EIP712, GraphDirectory, Authorizable, IRecurringC ); } - emit PaymentCollected( - _paymentType, - _params.collectionId, - agreement.payer, - agreement.serviceProvider, - agreement.dataService, - tokensToCollect - ); + emit RCACollected(_params.agreementId, _params.collectionId, agreement.state); - emit RCACollected( - agreement.dataService, - agreement.payer, - agreement.serviceProvider, - _params.agreementId, - _params.collectionId, - tokensToCollect, - _params.dataServiceCut - ); + // Notify contract payers so they can reconcile escrow in the same transaction. + if (_shouldCallback(agreement.payer)) { + // solhint-disable-next-line avoid-low-level-calls + (bool ok, ) = agreement.payer.call{ gas: MAX_CALLBACK_GAS }( + abi.encodeCall(IAgreementOwner.afterCollection, (_params.agreementId, tokensToCollect)) + ); + if (!ok) { + emit PayerCallbackFailed(_params.agreementId, agreement.payer, PayerCallbackStage.AfterCollection); + } + } - // Notify contract payers so they can reconcile escrow in the same transaction - if (0 < agreement.payer.code.length) { - try IAgreementOwner(agreement.payer).afterCollection(_params.agreementId, tokensToCollect) {} catch {} + if (agreement.state & SETTLED == 0 && _getMaxNextClaim(_params.agreementId, CLAIM_SCOPE_ACTIVE) == 0) + agreement.state = agreement.state | SETTLED; + + // Auto-update: promote pending terms when the current cycle settles. + // On success _accept emits the ACCEPTED notification, so skip the SETTLED one. + bool autoUpdated; + if ( + (agreement.state & (SETTLED | AUTO_UPDATE) == (SETTLED | AUTO_UPDATE)) && agreement.pendingTerms.endsAt != 0 + ) autoUpdated = _tryAutoUpdate(_params.agreementId, agreement); + if (agreement.state & SETTLED != 0 && !autoUpdated) { + _emitAndNotify( + _params.agreementId, + agreement.activeTerms.hash, + agreement.state, + agreement.dataService, + agreement.payer + ); } return tokensToCollect; @@ -421,305 +691,405 @@ contract RecurringCollector is EIP712, GraphDirectory, Authorizable, IRecurringC /* solhint-enable function-max-lines */ /** - * @notice Requires that the collection window parameters are valid. - * - * @param _endsAt The end time of the agreement - * @param _minSecondsPerCollection The minimum seconds per collection - * @param _maxSecondsPerCollection The maximum seconds per collection + * @notice Accept terms (initial or update). Returns false only when catchCallbackRevert is true + * and the data service callback reverts; otherwise always returns true or reverts. + * @param agreementId The agreement ID + * @param versionHash The expected terms hash + * @param extraData Opaque data forwarded to the data service callback + * @param agreement The agreement storage reference + * @param terms The terms to accept (activeTerms or pendingTerms) + * @param skipDeadlineCheck True to skip the offer deadline check + * @param catchCallbackRevert True to catch data service callback reverts (for auto-update) + * @param options Bitmask of agreement options (e.g. AUTO_UPDATE) to apply to state + * @return True if accept succeeded, false if callback reverted and catchCallbackRevert was true */ - function _requireValidCollectionWindowParams( - uint64 _endsAt, - uint32 _minSecondsPerCollection, - uint32 _maxSecondsPerCollection - ) private view { - // Agreement needs to end in the future - require(_endsAt > block.timestamp, RecurringCollectorAgreementElapsedEndsAt(block.timestamp, _endsAt)); - - // Collection window needs to be at least MIN_SECONDS_COLLECTION_WINDOW - require( - _maxSecondsPerCollection > _minSecondsPerCollection && - // solhint-disable-next-line gas-strict-inequalities - (_maxSecondsPerCollection - _minSecondsPerCollection >= MIN_SECONDS_COLLECTION_WINDOW), - RecurringCollectorAgreementInvalidCollectionWindow( - MIN_SECONDS_COLLECTION_WINDOW, - _minSecondsPerCollection, - _maxSecondsPerCollection - ) - ); - - // Agreement needs to last at least one min collection window - require( + /* solhint-disable function-max-lines */ + function _accept( + bytes16 agreementId, + bytes32 versionHash, + bytes memory extraData, + AgreementStorage storage agreement, + AgreementTerms storage terms, + bool skipDeadlineCheck, + bool catchCallbackRevert, + uint16 options + ) private returns (bool) { + require(terms.hash != bytes32(0), AgreementTermsEmpty(agreementId)); + require(versionHash == terms.hash, AgreementHashMismatch(agreementId, terms.hash, versionHash)); + + // Enforce offer deadline (skipped for auto-update where expiry is the trigger). + // deadline=0 means "no deadline" — used by WITH_NOTICE updates where the payer + // does not set an explicit acceptance deadline. + if (!skipDeadlineCheck && terms.deadline != 0) { // solhint-disable-next-line gas-strict-inequalities - _endsAt - block.timestamp >= _minSecondsPerCollection + MIN_SECONDS_COLLECTION_WINDOW, - RecurringCollectorAgreementInvalidDuration( - _minSecondsPerCollection + MIN_SECONDS_COLLECTION_WINDOW, - _endsAt - block.timestamp - ) - ); - } + require(block.timestamp <= terms.deadline, AgreementDeadlineElapsed(block.timestamp, terms.deadline)); + } - /** - * @notice Validates temporal constraints and caps the requested token amount. - * @dev Enforces `minSecondsPerCollection` (unless canceled/elapsed) and returns the lesser of - * the requested amount and the RCA payer's per-collection cap - * (`maxOngoingTokensPerSecond * collectionSeconds`, plus `maxInitialTokens` on first collection). - * @param _agreement The agreement data - * @param _agreementId The ID of the agreement - * @param _tokens The requested token amount (upper bound from data service) - * @param _collectionSeconds Collection duration, already capped at maxSecondsPerCollection - * @return The capped token amount: min(_tokens, payer's max for this collection) - */ - function _requireValidCollect( - AgreementData memory _agreement, - bytes16 _agreementId, - uint256 _tokens, - uint256 _collectionSeconds - ) private view returns (uint256) { - bool canceledOrElapsed = _agreement.state == AgreementState.CanceledByPayer || - block.timestamp > _agreement.endsAt; - if (!canceledOrElapsed) { - require( - // solhint-disable-next-line gas-strict-inequalities - _collectionSeconds >= _agreement.minSecondsPerCollection, - RecurringCollectorCollectionTooSoon( - _agreementId, - // casting to uint32 is safe because _collectionSeconds < minSecondsPerCollection (uint32) - // forge-lint: disable-next-line(unsafe-typecast) - uint32(_collectionSeconds), - _agreement.minSecondsPerCollection - ) + // Re-validate time-dependent constraints for the new terms. + // Skipped for auto-update: terms were validated at offer time and invariants can't change. + if (!catchCallbackRevert) { + _requireValidCollectionWindowParams( + terms.endsAt, + terms.minSecondsPerCollection, + terms.maxSecondsPerCollection ); } - // _collectionSeconds is already capped at maxSecondsPerCollection by _getCollectionInfo - uint256 maxTokens = _agreement.maxOngoingTokensPerSecond * _collectionSeconds; - maxTokens += _agreement.lastCollectionAt == 0 ? _agreement.maxInitialTokens : 0; - return Math.min(_tokens, maxTokens); - } + // Data service callback — validates and sets up domain-specific state. + // Skip for no-op updates: when extraData is empty, metadata is unchanged, and the + // agreement is not SETTLED. SETTLED transitions always notify (revival path). + // Safe because data service callbacks only depend on metadata and extraData, not on + // collector-level pricing fields (maxInitialTokens, maxOngoingTokensPerSecond, etc.). + { + bool skipCallback = agreement.pendingTerms.hash == versionHash && + extraData.length == 0 && + keccak256(terms.metadata) == keccak256(agreement.activeTerms.metadata) && + (agreement.state & SETTLED == 0); + + if (!skipCallback) { + if (catchCallbackRevert) { + // solhint-disable-next-line avoid-low-level-calls + (bool ok, ) = agreement.dataService.call{ gas: MAX_CALLBACK_GAS }( + abi.encodeCall( + IDataServiceAgreements.acceptAgreement, + ( + agreementId, + versionHash, + agreement.payer, + agreement.serviceProvider, + terms.metadata, + extraData + ) + ) + ); + if (!ok) return false; + } else { + IDataServiceAgreements(agreement.dataService).acceptAgreement( + agreementId, + versionHash, + agreement.payer, + agreement.serviceProvider, + terms.metadata, + extraData + ); + } + } + } - /** - * @notice See {recoverRCASigner} - * @param _rca The RCA whose hash was signed - * @param _signature The ECDSA signature bytes - * @return The address of the signer - */ - function _recoverRCASigner( - RecurringCollectionAgreement memory _rca, - bytes memory _signature - ) private view returns (address) { - bytes32 messageHash = _hashRCA(_rca); - return ECDSA.recover(messageHash, _signature); + agreement.acceptedAt = uint64(block.timestamp); + uint16 oldState = agreement.state; + + // For updates: promote pending terms to active, clear notice state, revive agreement + if (agreement.pendingTerms.hash == versionHash) { + agreement.activeTerms = agreement.pendingTerms; + agreement.activeOfferNonce = agreement.pendingOfferNonce; + delete agreement.pendingTerms; + agreement.pendingOfferNonce = 0; + // Clear all clearable flags, keep REGISTERED | ACCEPTED | UPDATE + uint16 clearMask = NOTICE_GIVEN | SETTLED | BY_PAYER | BY_PROVIDER | BY_DATA_SERVICE | AUTO_UPDATED; + oldState = (oldState & ~clearMask) | UPDATE; + } + + // Set collectableUntil to the (now-active) terms endsAt — covers both initial and update accepts + agreement.collectableUntil = agreement.activeTerms.endsAt; + + uint16 newState = oldState | ACCEPTED; + // Apply togglable options (currently only AUTO_UPDATE) + newState = (newState & ~AUTO_UPDATE) | (options & AUTO_UPDATE); + agreement.state = newState; + + _emitAndNotify(agreementId, versionHash, newState, agreement.dataService, agreement.payer); + return true; } + /* solhint-enable function-max-lines */ /** - * @notice See {recoverRCAUSigner} - * @param _rcau The RCAU whose hash was signed - * @param _signature The ECDSA signature bytes - * @return The address of the signer + * @notice Validate and store an RCA offer. Does not activate — data service must call accept(). + * @param _rca The RCA to validate and store + * @return agreementId The generated agreement ID */ - function _recoverRCAUSigner( - RecurringCollectionAgreementUpdate memory _rcau, - bytes memory _signature - ) private view returns (address) { - bytes32 messageHash = _hashRCAU(_rcau); - return ECDSA.recover(messageHash, _signature); + /* solhint-disable function-max-lines */ + function _validateAndStoreOffer(RecurringCollectionAgreement memory _rca) private returns (bytes16, bytes32) { + /* solhint-disable gas-strict-inequalities */ + require(block.timestamp <= _rca.deadline, AgreementDeadlineElapsed(block.timestamp, _rca.deadline)); + /* solhint-enable gas-strict-inequalities */ + + bytes16 agreementId = _generateAgreementId( + _rca.payer, + _rca.dataService, + _rca.serviceProvider, + _rca.deadline, + _rca.nonce + ); + + require(agreementId != bytes16(0), AgreementIdZero()); + require( + _rca.dataService != address(0) && _rca.payer != address(0) && _rca.serviceProvider != address(0), + AgreementAddressNotSet() + ); + _requireValidCollectionWindowParams(_rca.endsAt, _rca.minSecondsPerCollection, _rca.maxSecondsPerCollection); + _requireEligibilityCapability(_rca.payer, _rca.conditions); + + // Reverts on overflow — rejecting terms that could prevent collection + _rca.maxOngoingTokensPerSecond * _rca.maxSecondsPerCollection; + + AgreementStorage storage agreement = _getAgreementStorage(agreementId); + require(agreement.state == 0, AgreementIncorrectState(agreementId, agreement.state)); + + agreement.state = REGISTERED; + agreement.dataService = _rca.dataService; + agreement.payer = _rca.payer; + agreement.serviceProvider = _rca.serviceProvider; + agreement.activeTerms.deadline = _rca.deadline; + agreement.activeTerms.endsAt = _rca.endsAt; + agreement.activeTerms.maxInitialTokens = _rca.maxInitialTokens; + agreement.activeTerms.maxOngoingTokensPerSecond = _rca.maxOngoingTokensPerSecond; + agreement.activeTerms.minSecondsPerCollection = _rca.minSecondsPerCollection; + agreement.activeTerms.maxSecondsPerCollection = _rca.maxSecondsPerCollection; + agreement.activeTerms.conditions = _rca.conditions; + agreement.activeTerms.minSecondsPayerCancellationNotice = _rca.minSecondsPayerCancellationNotice; + agreement.updateNonce = 0; + agreement.activeOfferNonce = _rca.nonce; + agreement.activeTerms.hash = hashRCA(_rca); + agreement.activeTerms.metadata = _rca.metadata; + + _emitAndNotify(agreementId, agreement.activeTerms.hash, REGISTERED, _rca.dataService, _rca.payer); + + return (agreementId, agreement.activeTerms.hash); } + /* solhint-enable function-max-lines */ /** - * @notice See {hashRCA} - * @param _rca The RCA to hash - * @return The EIP712 hash of the RCA + * @notice State-driven update storage. Validates nonce, stores terms, emits event. + * @dev If Offered: overwrites activeTerms (revises the offer). + * If Accepted: writes to pendingTerms (stages for acceptUpdate). + * Otherwise: reverts. + * @param agreement The storage reference to the agreement data + * @param rcau The Recurring Collection Agreement Update to apply + * @param offerOptions Bitmask of offer options (e.g. WITH_NOTICE) controlling update behavior + * @return The offer result containing the agreement ID and updated terms */ - function _hashRCA(RecurringCollectionAgreement memory _rca) private view returns (bytes32) { + /* solhint-disable function-max-lines */ + function _validateAndStoreUpdate( + AgreementStorage storage agreement, + RecurringCollectionAgreementUpdate memory rcau, + uint16 offerOptions + ) private returns (OfferResult memory) { + uint16 state = agreement.state; + + require(state & REGISTERED != 0, AgreementIncorrectState(rcau.agreementId, state)); + + if (offerOptions & IF_NOT_ACCEPTED != 0) + require(state & ACCEPTED == 0, AgreementIncorrectState(rcau.agreementId, state)); + + if (offerOptions & WITH_NOTICE == 0 || rcau.deadline != 0) + // solhint-disable-next-line gas-strict-inequalities + require(rcau.deadline >= block.timestamp, AgreementDeadlineElapsed(block.timestamp, rcau.deadline)); + + uint32 expectedNonce = agreement.updateNonce + 1; + require(rcau.nonce == expectedNonce, InvalidUpdateNonce(rcau.agreementId, expectedNonce, rcau.nonce)); + agreement.updateNonce = expectedNonce; + + _requireValidCollectionWindowParams(rcau.endsAt, rcau.minSecondsPerCollection, rcau.maxSecondsPerCollection); + _requireEligibilityCapability(agreement.payer, rcau.conditions); + + // Reverts on overflow — rejecting terms that could prevent collection + rcau.maxOngoingTokensPerSecond * rcau.maxSecondsPerCollection; + + AgreementTerms memory terms; + terms.deadline = rcau.deadline; + terms.endsAt = rcau.endsAt; + terms.maxInitialTokens = rcau.maxInitialTokens; + terms.maxOngoingTokensPerSecond = rcau.maxOngoingTokensPerSecond; + terms.minSecondsPerCollection = rcau.minSecondsPerCollection; + terms.maxSecondsPerCollection = rcau.maxSecondsPerCollection; + terms.conditions = rcau.conditions; + terms.minSecondsPayerCancellationNotice = rcau.minSecondsPayerCancellationNotice; + terms.hash = hashRCAU(rcau); + terms.metadata = rcau.metadata; + + uint16 eventState; + if (state & ACCEPTED == 0) { + // Not yet accepted — overwrite active terms directly + agreement.activeTerms = terms; + agreement.activeOfferNonce = rcau.nonce; + state = state | UPDATE; + agreement.state = state; + eventState = state; + } else { + // Already accepted — store as pending + agreement.pendingTerms = terms; + agreement.pendingOfferNonce = rcau.nonce; + eventState = state | UPDATE; // UPDATE in event indicates this version is from an update + + // WITH_NOTICE: derive notice cutoff from rcau.deadline or agreement state + if (offerOptions & WITH_NOTICE != 0) { + uint64 noticeCutoff = rcau.deadline != 0 + ? rcau.deadline + : uint64(block.timestamp) + agreement.activeTerms.minSecondsPayerCancellationNotice; + _applyNotice(agreement, rcau.agreementId, noticeCutoff); + state = state | NOTICE_GIVEN | BY_PAYER; + agreement.state = state; + eventState = state | UPDATE; + } + } + + _emitAndNotify(rcau.agreementId, terms.hash, eventState, agreement.dataService, agreement.payer); + return - _hashTypedDataV4( - keccak256( - abi.encode( - EIP712_RCA_TYPEHASH, - _rca.deadline, - _rca.endsAt, - _rca.payer, - _rca.dataService, - _rca.serviceProvider, - _rca.maxInitialTokens, - _rca.maxOngoingTokensPerSecond, - _rca.minSecondsPerCollection, - _rca.maxSecondsPerCollection, - _rca.nonce, - keccak256(_rca.metadata) - ) - ) - ); + OfferResult({ + agreementId: rcau.agreementId, + dataService: agreement.dataService, + serviceProvider: agreement.serviceProvider, + versionHash: terms.hash, + state: eventState + }); } + /* solhint-enable function-max-lines */ /** - * @notice See {hashRCAU} - * @param _rcau The RCAU to hash - * @return The EIP712 hash of the RCAU + * @notice Attempt to auto-update an agreement by promoting pending terms. + * @dev Called from _collect() when collection window is exhausted and AUTO_UPDATE is set. + * Uses non-reverting callback so collect always succeeds even if upgrade fails. + * @param agreementId The agreement ID + * @param agreement The agreement storage reference + * @return updated True if upgrade succeeded */ - function _hashRCAU(RecurringCollectionAgreementUpdate memory _rcau) private view returns (bytes32) { - return - _hashTypedDataV4( - keccak256( - abi.encode( - EIP712_RCAU_TYPEHASH, - _rcau.agreementId, - _rcau.deadline, - _rcau.endsAt, - _rcau.maxInitialTokens, - _rcau.maxOngoingTokensPerSecond, - _rcau.minSecondsPerCollection, - _rcau.maxSecondsPerCollection, - _rcau.nonce, - keccak256(_rcau.metadata) - ) - ) - ); + function _tryAutoUpdate(bytes16 agreementId, AgreementStorage storage agreement) private returns (bool updated) { + // Reuse _accept with catchCallbackRevert=true, skipDeadlineCheck=true. + // Collection window validation is skipped — terms were validated at offer time. + // Pass current state as options to preserve AUTO_UPDATE bit. + updated = _accept( + agreementId, + agreement.pendingTerms.hash, + "", + agreement, + agreement.pendingTerms, + true, + true, + agreement.state + ); + + if (updated) agreement.state = agreement.state | AUTO_UPDATED; + + emit AutoUpdateAttempted(agreementId, updated); } /** - * @notice Requires that the signer for the RCA is authorized - * by the payer of the RCA. - * @param _rca The RCA whose hash was signed - * @param _signature The ECDSA signature bytes - * @return The address of the authorized signer + * @notice Apply a notice cutoff to an agreement, enforcing minSecondsPayerCancellationNotice. + * @dev Active terms are not modified — collectableUntil is reduced to min(collectableUntil, noticeCutoff). + * @param agreement The agreement storage reference + * @param agreementId The agreement ID (for error reporting) + * @param noticeCutoff The target cutoff timestamp (must satisfy min notice from now) */ - function _requireAuthorizedRCASigner( - RecurringCollectionAgreement memory _rca, - bytes memory _signature - ) private view returns (address) { - address signer = _recoverRCASigner(_rca, _signature); - require(_isAuthorized(_rca.payer, signer), RecurringCollectorInvalidSigner()); + function _applyNotice(AgreementStorage storage agreement, bytes16 agreementId, uint64 noticeCutoff) private { + uint32 minNotice = agreement.activeTerms.minSecondsPayerCancellationNotice; + uint256 actualNotice = noticeCutoff < block.timestamp ? 0 : noticeCutoff - block.timestamp; + /* solhint-disable gas-strict-inequalities */ + require(minNotice <= actualNotice, InsufficientNotice(agreementId, minNotice, actualNotice)); + /* solhint-enable gas-strict-inequalities */ - return signer; + if (noticeCutoff < agreement.collectableUntil) agreement.collectableUntil = noticeCutoff; } /** - * @notice Requires that the signer for the RCAU is authorized - * by the payer. - * @param _rcau The RCAU whose hash was signed - * @param _signature The ECDSA signature bytes - * @param _payer The address of the payer - * @return The address of the authorized signer + * @notice Emit {AgreementUpdated} and send non-reverting lifecycle notifications to both + * the data service and the payer. + * @dev Consolidates the emit-and-notify pattern used by every state-transition path. + * Notifications to `msg.sender` and EOAs are skipped by {_notifyStateChange}. + * @param _agreementId The agreement ID + * @param _versionHash The EIP-712 hash of the terms involved in this change + * @param _state The agreement state flags + * @param _dataService The data service to notify + * @param _payer The payer to notify */ - function _requireAuthorizedRCAUSigner( - RecurringCollectionAgreementUpdate memory _rcau, - bytes memory _signature, + function _emitAndNotify( + bytes16 _agreementId, + bytes32 _versionHash, + uint16 _state, + address _dataService, address _payer - ) private view returns (address) { - address signer = _recoverRCAUSigner(_rcau, _signature); - require(_isAuthorized(_payer, signer), RecurringCollectorInvalidSigner()); - - return signer; + ) private { + emit AgreementUpdated(_agreementId, _versionHash, _state); + _notifyStateChange(_dataService, _agreementId, _versionHash, _state); + _notifyStateChange(_payer, _agreementId, _versionHash, _state); } /** - * @notice Validates that an agreement is in a valid state for updating and that the caller is authorized. - * @param _agreementId The ID of the agreement to validate - * @return The storage reference to the agreement data + * @notice Non-reverting callback to notify a contract of an agreement state change. + * @dev Uses low-level call with gas cap. Failures are silently ignored. + * + * Skips notification when `_target` is `msg.sender` (caller already has execution + * context) or an EOA (no code to call). This eliminates callback loops, simplifies + * reentrancy reasoning, and removes an attack surface — callers sequence their own + * post-call reconciliation instead of relying on a callback from the callee. + * + * @param _target The contract to notify + * @param _agreementId The agreement ID + * @param _versionHash The EIP-712 hash of the terms involved in this change + * @param _state The agreement state flags, includes UPDATE when applicable */ - function _requireValidUpdateTarget(bytes16 _agreementId) private view returns (AgreementData storage) { - AgreementData storage agreement = _getAgreementStorage(_agreementId); - require( - agreement.state == AgreementState.Accepted, - RecurringCollectorAgreementIncorrectState(_agreementId, agreement.state) - ); - require( - agreement.dataService == msg.sender, - RecurringCollectorDataServiceNotAuthorized(_agreementId, msg.sender) + function _notifyStateChange(address _target, bytes16 _agreementId, bytes32 _versionHash, uint16 _state) private { + if (_target == msg.sender || _target.code.length == 0) return; + ( + // solhint-disable-next-line avoid-low-level-calls + _target.call{ gas: MAX_CALLBACK_GAS }( + abi.encodeCall( + IAgreementStateChangeCallback.afterAgreementStateChange, + (_agreementId, _versionHash, _state) + ) + ) ); - return agreement; } - /** - * @notice Validates and stores an update to a Recurring Collection Agreement. - * Shared validation/storage/emit logic for the update function. - * @param _agreement The storage reference to the agreement data - * @param _rcau The Recurring Collection Agreement Update to apply - */ - function _validateAndStoreUpdate( - AgreementData storage _agreement, - RecurringCollectionAgreementUpdate calldata _rcau - ) private { - // validate nonce to prevent replay attacks - uint32 expectedNonce = _agreement.updateNonce + 1; - require( - _rcau.nonce == expectedNonce, - RecurringCollectorInvalidUpdateNonce(_rcau.agreementId, expectedNonce, _rcau.nonce) - ); + // -- Private view/pure -- - _requireValidCollectionWindowParams(_rcau.endsAt, _rcau.minSecondsPerCollection, _rcau.maxSecondsPerCollection); - - // update the agreement - _agreement.endsAt = _rcau.endsAt; - _agreement.maxInitialTokens = _rcau.maxInitialTokens; - _agreement.maxOngoingTokensPerSecond = _rcau.maxOngoingTokensPerSecond; - _agreement.minSecondsPerCollection = _rcau.minSecondsPerCollection; - _agreement.maxSecondsPerCollection = _rcau.maxSecondsPerCollection; - _agreement.updateNonce = _rcau.nonce; - - emit AgreementUpdated( - _agreement.dataService, - _agreement.payer, - _agreement.serviceProvider, - _rcau.agreementId, - uint64(block.timestamp), - _agreement.endsAt, - _agreement.maxInitialTokens, - _agreement.maxOngoingTokensPerSecond, - _agreement.minSecondsPerCollection, - _agreement.maxSecondsPerCollection - ); + function _getStorage() private pure returns (RecurringCollectorStorage storage $) { + // solhint-disable-next-line no-inline-assembly + assembly { + $.slot := RECURRING_COLLECTOR_STORAGE_LOCATION + } } /** - * @notice Gets an agreement to be updated. - * @param _agreementId The ID of the agreement to get - * @return The storage reference to the agreement data + * @notice Check whether a callback to `_target` should proceed. + * @dev Returns false (skip) when the target is `msg.sender` (caller sequences its own + * post-call logic) or an EOA (no code to call). Reverts if gas is insufficient for a + * safe callback dispatch. + * @param _target The intended callback recipient + * @return True if the callback should be dispatched */ - function _getAgreementStorage(bytes16 _agreementId) private view returns (AgreementData storage) { - return agreements[_agreementId]; + function _shouldCallback(address _target) private view returns (bool) { + if (_target == msg.sender || _target.code.length == 0) return false; + if (gasleft() < (MAX_CALLBACK_GAS * 64) / 63) revert InsufficientCallbackGas(); + return true; } /** - * @notice See {getAgreement} + * @notice Gets an agreement to be updated. * @param _agreementId The ID of the agreement to get - * @return The agreement data + * @return The storage reference to the agreement data */ - function _getAgreement(bytes16 _agreementId) private view returns (AgreementData memory) { - return agreements[_agreementId]; + function _getAgreementStorage(bytes16 _agreementId) private view returns (AgreementStorage storage) { + return _getStorage().agreements[_agreementId]; } - /** - * @notice Internal function to get collection info for an agreement. - * @dev Single source of truth for collection window logic. The returned `collectionSeconds` - * is capped at `maxSecondsPerCollection` — this is a cap on tokens, not a deadline; late - * collections succeed but receive at most `maxSecondsPerCollection` worth of tokens. - * @param _agreement The agreement data - * @return isCollectable Whether the agreement can be collected from - * @return collectionSeconds The valid collection duration in seconds, capped at - * maxSecondsPerCollection (0 if not collectable) - * @return reason The reason why the agreement is not collectable (None if collectable) - */ function _getCollectionInfo( - AgreementData memory _agreement + uint16 _state, + uint64 _collectableUntil, + uint64 _lastCollectionAt, + uint64 _acceptedAt, + uint32 _maxSecondsPerCollection ) private view returns (bool, uint256, AgreementNotCollectableReason) { - // Check if agreement is in collectable state - bool hasValidState = _agreement.state == AgreementState.Accepted || - _agreement.state == AgreementState.CanceledByPayer; + // Collectable = accepted and not settled + bool hasValidState = (_state & (ACCEPTED | SETTLED)) == ACCEPTED; if (!hasValidState) { return (false, 0, AgreementNotCollectableReason.InvalidAgreementState); } - bool canceledOrElapsed = _agreement.state == AgreementState.CanceledByPayer || - block.timestamp > _agreement.endsAt; - uint256 canceledOrNow = _agreement.state == AgreementState.CanceledByPayer - ? _agreement.canceledAt - : block.timestamp; - - uint256 collectionEnd = canceledOrElapsed ? Math.min(canceledOrNow, _agreement.endsAt) : block.timestamp; - uint256 collectionStart = _agreementCollectionStartAt(_agreement); + uint256 collectionEnd = block.timestamp < _collectableUntil ? block.timestamp : _collectableUntil; + uint256 collectionStart = 0 < _lastCollectionAt ? _lastCollectionAt : _acceptedAt; if (collectionEnd < collectionStart) { return (false, 0, AgreementNotCollectableReason.InvalidTemporalWindow); @@ -730,59 +1100,155 @@ contract RecurringCollector is EIP712, GraphDirectory, Authorizable, IRecurringC } uint256 elapsed = collectionEnd - collectionStart; - return ( - true, - Math.min(elapsed, uint256(_agreement.maxSecondsPerCollection)), - AgreementNotCollectableReason.None - ); + return (true, Math.min(elapsed, uint256(_maxSecondsPerCollection)), AgreementNotCollectableReason.None); } /** - * @notice Gets the start time for the collection of an agreement. + * @notice Validates temporal constraints and caps the requested token amount. + * @dev Enforces `minSecondsPerCollection` (unless canceled/elapsed) and returns the lesser of + * the requested amount and the RCA payer's per-collection cap + * (`maxOngoingTokensPerSecond * collectionSeconds`, plus `maxInitialTokens` on first collection). * @param _agreement The agreement data - * @return The start time for the collection of the agreement + * @param _agreementId The ID of the agreement + * @param _tokens The requested token amount (upper bound from data service) + * @param _collectionSeconds Collection duration, already capped at maxSecondsPerCollection + * @return The capped token amount: min(_tokens, payer's max for this collection) */ - function _agreementCollectionStartAt(AgreementData memory _agreement) private pure returns (uint256) { - return _agreement.lastCollectionAt > 0 ? _agreement.lastCollectionAt : _agreement.acceptedAt; + function _requireValidCollect( + AgreementStorage storage _agreement, + bytes16 _agreementId, + uint256 _tokens, + uint256 _collectionSeconds + ) private view returns (uint256) { + if (block.timestamp < _agreement.collectableUntil) { + require( + // solhint-disable-next-line gas-strict-inequalities + _collectionSeconds >= _agreement.activeTerms.minSecondsPerCollection, + CollectionTooSoon( + _agreementId, + // casting to uint32 is safe because _collectionSeconds < minSecondsPerCollection (uint32) + // forge-lint: disable-next-line(unsafe-typecast) + uint32(_collectionSeconds), + _agreement.activeTerms.minSecondsPerCollection + ) + ); + } + // _collectionSeconds is already capped at maxSecondsPerCollection by _getCollectionInfo + uint256 maxTokens = _agreement.activeTerms.maxOngoingTokensPerSecond * _collectionSeconds; + maxTokens += _agreement.lastCollectionAt == 0 ? _agreement.activeTerms.maxInitialTokens : 0; + + return Math.min(_tokens, maxTokens); } /** - * @notice Compute the maximum tokens collectable in the next collection (worst case). - * @dev For active agreements uses endsAt as the collection end (worst case), - * not block.timestamp (current). Returns 0 for non-collectable states. - * @param _a The agreement data - * @return The maximum tokens that could be collected + * @notice Requires that the collection window parameters are valid. + * @param _endsAt The end time of the agreement + * @param _minSecondsPerCollection The minimum seconds per collection + * @param _maxSecondsPerCollection The maximum seconds per collection */ - function _getMaxNextClaim(AgreementData memory _a) private pure returns (uint256) { - // CanceledByServiceProvider = immediately non-collectable - if (_a.state == AgreementState.CanceledByServiceProvider) return 0; - // Only Accepted and CanceledByPayer are collectable - if (_a.state != AgreementState.Accepted && _a.state != AgreementState.CanceledByPayer) return 0; + function _requireValidCollectionWindowParams( + uint64 _endsAt, + uint32 _minSecondsPerCollection, + uint32 _maxSecondsPerCollection + ) private view { + InvalidCollectionWindowReason reason; + /* solhint-disable gas-strict-inequalities */ + if (_endsAt <= block.timestamp) reason = InvalidCollectionWindowReason.ElapsedEndsAt; + else if ( + _maxSecondsPerCollection <= _minSecondsPerCollection || + _maxSecondsPerCollection - _minSecondsPerCollection < MIN_SECONDS_COLLECTION_WINDOW + ) + reason = InvalidCollectionWindowReason.InvalidWindow; + /* solhint-enable gas-strict-inequalities */ + else if (_endsAt - block.timestamp < uint256(_minSecondsPerCollection) + MIN_SECONDS_COLLECTION_WINDOW) + reason = InvalidCollectionWindowReason.InsufficientDuration; + else return; - // Collection starts from last collection (or acceptance if never collected) - uint256 collectionStart = 0 < _a.lastCollectionAt ? _a.lastCollectionAt : _a.acceptedAt; + revert AgreementInvalidCollectionWindow(reason, _minSecondsPerCollection, _maxSecondsPerCollection); + } - // Determine the latest possible collection end + /** + * @notice Validates that a payer for an agreement with CONDITION_ELIGIBILITY_CHECK + * implements IProviderEligibility (via ERC-165). + * @dev Agreeing to eligibility checks is a significant commitment — it gives the payer + * the ability to deny payment to the service provider. Both parties must see this + * condition explicitly at offer time so neither is surprised. + * + * Without this check a payer could include an apparently-inert eligibility condition + * (e.g. from an EOA or contract that does not yet implement the interface) that later + * becomes enforceable via account upgrade (ERC-7702, metamorphic deploy, etc.), + * enabling surprise payment denials. Requiring ERC-165 confirmation at offer time + * ensures the condition is real and intentional. + * + * Note: even if a payer contract supports IProviderEligibility, it is free to create + * offers without CONDITION_ELIGIBILITY_CHECK — the condition is opt-in per agreement. + * @param _payer The payer address to check for eligibility capability + * @param _conditions The condition flags bitmap + */ + function _requireEligibilityCapability(address _payer, uint16 _conditions) private view { + if (_conditions & CONDITION_ELIGIBILITY_CHECK != 0) { + require( + ERC165Checker.supportsInterface(_payer, type(IProviderEligibility).interfaceId), + EligibilityConditionNotSupported(_payer) + ); + } + } + + function _getMaxNextClaim(bytes16 agreementId, uint8 claimScope) private view returns (uint256 maxClaim) { + AgreementStorage memory _a = _getStorage().agreements[agreementId]; + + uint256 maxCurrentClaim = claimScope & CLAIM_SCOPE_ACTIVE != 0 ? _maxClaimForTerms(_a, _a.activeTerms) : 0; + uint256 maxPendingClaim = claimScope & CLAIM_SCOPE_PENDING != 0 ? _maxClaimForTerms(_a, _a.pendingTerms) : 0; + + maxClaim = maxCurrentClaim < maxPendingClaim ? maxPendingClaim : maxCurrentClaim; + } + + /** + * @notice Compute max claim for a given set of terms against the agreement's lifecycle state. + * @dev Handles all agreement states uniformly: + * - NotAccepted with stored terms (offered): block.timestamp as proxy for acceptedAt + * - Accepted: lastCollectionAt/acceptedAt-based window up to endsAt + * - CanceledByPayer / CanceledByServiceProvider: window capped at min(collectableUntil, endsAt) + * - Settled / empty slots: 0 + * @param _a The agreement data (lifecycle state) + * @param _terms The terms to evaluate (activeTerms or pendingTerms) + * @return The maximum possible claim for the given terms + */ + function _maxClaimForTerms( + AgreementStorage memory _a, + AgreementTerms memory _terms + ) private view returns (uint256) { + if (_terms.endsAt == 0) return 0; + + uint256 collectionStart; uint256 collectionEnd; - if (_a.state == AgreementState.CanceledByPayer) { - // Payer cancel freezes the window at min(canceledAt, endsAt) - collectionEnd = _a.canceledAt < _a.endsAt ? _a.canceledAt : _a.endsAt; + + uint16 s = _a.state; + if (s & SETTLED != 0 || s == 0) { + // Settled or empty — nothing claimable + return 0; + } else if (s & ACCEPTED == 0) { + // Registered but not accepted (offered) — use block.timestamp as proxy for acceptedAt + collectionStart = block.timestamp; + collectionEnd = _terms.endsAt; + } else if (s & NOTICE_GIVEN == 0) { + // Active (accepted, not terminated) + collectionStart = 0 < _a.lastCollectionAt ? _a.lastCollectionAt : _a.acceptedAt; + collectionEnd = _terms.endsAt; } else { - // Active: collection window capped at endsAt - collectionEnd = _a.endsAt; + // Terminated but not settled — collect up to min(collectableUntil, terms.endsAt) + collectionStart = 0 < _a.lastCollectionAt ? _a.lastCollectionAt : _a.acceptedAt; + collectionEnd = _a.collectableUntil < _terms.endsAt ? _a.collectableUntil : _terms.endsAt; } - // No collection possible if window is empty - // solhint-disable-next-line gas-strict-inequalities - if (collectionEnd <= collectionStart) return 0; - - // Max seconds is capped by maxSecondsPerCollection (enforced by _requireValidCollect) + if (!(collectionStart < collectionEnd)) return 0; uint256 windowSeconds = collectionEnd - collectionStart; - uint256 maxSeconds = windowSeconds < _a.maxSecondsPerCollection ? windowSeconds : _a.maxSecondsPerCollection; - - uint256 maxClaim = _a.maxOngoingTokensPerSecond * maxSeconds; - if (_a.lastCollectionAt == 0) maxClaim += _a.maxInitialTokens; - return maxClaim; + uint256 effectiveSeconds = windowSeconds < _terms.maxSecondsPerCollection + ? windowSeconds + : _terms.maxSecondsPerCollection; + return + _terms.maxOngoingTokensPerSecond * effectiveSeconds + + (_a.lastCollectionAt == 0 ? _terms.maxInitialTokens : 0); } /** diff --git a/packages/horizon/hardhat.config.ts b/packages/horizon/hardhat.config.ts index d9b1334e4..76f1e7bc7 100644 --- a/packages/horizon/hardhat.config.ts +++ b/packages/horizon/hardhat.config.ts @@ -24,6 +24,7 @@ const config: HardhatUserConfig = { enabled: true, runs: 20, }, + evmVersion: 'cancun', }, }, etherscan: { diff --git a/packages/horizon/ignition/modules/core/HorizonProxies.ts b/packages/horizon/ignition/modules/core/HorizonProxies.ts index e3ad0a737..8b501eed7 100644 --- a/packages/horizon/ignition/modules/core/HorizonProxies.ts +++ b/packages/horizon/ignition/modules/core/HorizonProxies.ts @@ -3,6 +3,7 @@ import { ethers } from 'ethers' import GraphPaymentsArtifact from '../../../build/contracts/contracts/payments/GraphPayments.sol/GraphPayments.json' import PaymentsEscrowArtifact from '../../../build/contracts/contracts/payments/PaymentsEscrow.sol/PaymentsEscrow.json' +import RecurringCollectorArtifact from '../../../build/contracts/contracts/payments/collectors/RecurringCollector.sol/RecurringCollector.json' import { MigrateControllerGovernorModule } from '../periphery/Controller' import GraphPeripheryModule from '../periphery/periphery' import { deployGraphProxy } from '../proxy/GraphProxy' @@ -40,12 +41,21 @@ export default buildModule('HorizonProxies', (m) => { { id: 'setContractProxy_PaymentsEscrow' }, ) + // Deploy RecurringCollector proxy (not registered in Controller — RC reads from Controller but is not looked up by others) + const { Proxy: RecurringCollectorProxy, ProxyAdmin: RecurringCollectorProxyAdmin } = + deployTransparentUpgradeableProxy(m, { + name: 'RecurringCollector', + artifact: RecurringCollectorArtifact, + }) + return { HorizonStakingProxy, GraphPaymentsProxy, PaymentsEscrowProxy, + RecurringCollectorProxy, GraphPaymentsProxyAdmin, PaymentsEscrowProxyAdmin, + RecurringCollectorProxyAdmin, } }) @@ -62,7 +72,21 @@ export const MigrateHorizonProxiesDeployerModule = buildModule('HorizonProxiesDe artifact: PaymentsEscrowArtifact, }) - return { GraphPaymentsProxy, PaymentsEscrowProxy, GraphPaymentsProxyAdmin, PaymentsEscrowProxyAdmin } + // Deploy RecurringCollector proxy + const { Proxy: RecurringCollectorProxy, ProxyAdmin: RecurringCollectorProxyAdmin } = + deployTransparentUpgradeableProxy(m, { + name: 'RecurringCollector', + artifact: RecurringCollectorArtifact, + }) + + return { + GraphPaymentsProxy, + PaymentsEscrowProxy, + RecurringCollectorProxy, + GraphPaymentsProxyAdmin, + PaymentsEscrowProxyAdmin, + RecurringCollectorProxyAdmin, + } }) export const MigrateHorizonProxiesGovernorModule = buildModule('HorizonProxiesGovernor', (m) => { diff --git a/packages/horizon/ignition/modules/core/RecurringCollector.ts b/packages/horizon/ignition/modules/core/RecurringCollector.ts new file mode 100644 index 000000000..25ba5f6a0 --- /dev/null +++ b/packages/horizon/ignition/modules/core/RecurringCollector.ts @@ -0,0 +1,42 @@ +import { buildModule } from '@nomicfoundation/hardhat-ignition/modules' + +import RecurringCollectorArtifact from '../../../build/contracts/contracts/payments/collectors/RecurringCollector.sol/RecurringCollector.json' +import GraphPeripheryModule from '../periphery/periphery' +import { deployImplementation } from '../proxy/implementation' +import { upgradeTransparentUpgradeableProxy } from '../proxy/TransparentUpgradeableProxy' +import HorizonProxiesModule from './HorizonProxies' + +export default buildModule('RecurringCollector', (m) => { + const { Controller } = m.useModule(GraphPeripheryModule) + const { RecurringCollectorProxyAdmin, RecurringCollectorProxy } = m.useModule(HorizonProxiesModule) + + const governor = m.getAccount(1) + + // Deploy RecurringCollector implementation - requires periphery and proxies to be registered in the controller + const RecurringCollectorImplementation = deployImplementation( + m, + { + name: 'RecurringCollector', + artifact: RecurringCollectorArtifact, + constructorArgs: [Controller], + }, + { after: [GraphPeripheryModule, HorizonProxiesModule] }, + ) + + // Upgrade proxy to implementation contract + const RecurringCollector = upgradeTransparentUpgradeableProxy( + m, + RecurringCollectorProxyAdmin, + RecurringCollectorProxy, + RecurringCollectorImplementation, + { + name: 'RecurringCollector', + artifact: RecurringCollectorArtifact, + initArgs: [], + }, + ) + + m.call(RecurringCollectorProxyAdmin, 'transferOwnership', [governor], { after: [RecurringCollector] }) + + return { RecurringCollector, RecurringCollectorProxyAdmin, RecurringCollectorImplementation } +}) diff --git a/packages/horizon/ignition/modules/core/core.ts b/packages/horizon/ignition/modules/core/core.ts index c71ae232b..7644e8c76 100644 --- a/packages/horizon/ignition/modules/core/core.ts +++ b/packages/horizon/ignition/modules/core/core.ts @@ -4,12 +4,15 @@ import GraphPaymentsModule, { MigrateGraphPaymentsModule } from './GraphPayments import GraphTallyCollectorModule, { MigrateGraphTallyCollectorModule } from './GraphTallyCollector' import HorizonStakingModule, { MigrateHorizonStakingDeployerModule } from './HorizonStaking' import PaymentsEscrowModule, { MigratePaymentsEscrowModule } from './PaymentsEscrow' +import RecurringCollectorModule from './RecurringCollector' export default buildModule('GraphHorizon_Core', (m) => { const { HorizonStaking, HorizonStakingImplementation } = m.useModule(HorizonStakingModule) const { GraphPaymentsProxyAdmin, GraphPayments, GraphPaymentsImplementation } = m.useModule(GraphPaymentsModule) const { PaymentsEscrowProxyAdmin, PaymentsEscrow, PaymentsEscrowImplementation } = m.useModule(PaymentsEscrowModule) const { GraphTallyCollector } = m.useModule(GraphTallyCollectorModule) + const { RecurringCollectorProxyAdmin, RecurringCollector, RecurringCollectorImplementation } = + m.useModule(RecurringCollectorModule) return { HorizonStaking, @@ -21,10 +24,13 @@ export default buildModule('GraphHorizon_Core', (m) => { PaymentsEscrow, PaymentsEscrowImplementation, GraphTallyCollector, + RecurringCollectorProxyAdmin, + RecurringCollector, + RecurringCollectorImplementation, } }) -export const MigrateHorizonCoreModule = buildModule('GraphHorizon_Core', (m) => { +export const MigrateHorizonCoreModule = buildModule('MigrateGraphHorizon_Core', (m) => { const { HorizonStakingProxy: HorizonStaking, HorizonStakingImplementation } = m.useModule( MigrateHorizonStakingDeployerModule, ) diff --git a/packages/horizon/ignition/modules/deploy.ts b/packages/horizon/ignition/modules/deploy.ts index f2f5fecde..428f2e0c7 100644 --- a/packages/horizon/ignition/modules/deploy.ts +++ b/packages/horizon/ignition/modules/deploy.ts @@ -31,6 +31,9 @@ export default buildModule('GraphHorizon_Deploy', (m) => { PaymentsEscrow, PaymentsEscrowImplementation, GraphTallyCollector, + RecurringCollectorProxyAdmin, + RecurringCollector, + RecurringCollectorImplementation, } = m.useModule(GraphHorizonCoreModule) const governor = m.getAccount(1) @@ -74,5 +77,8 @@ export default buildModule('GraphHorizon_Deploy', (m) => { Transparent_Proxy_PaymentsEscrow: PaymentsEscrow, Implementation_PaymentsEscrow: PaymentsEscrowImplementation, GraphTallyCollector, + Transparent_ProxyAdmin_RecurringCollector: RecurringCollectorProxyAdmin, + Transparent_Proxy_RecurringCollector: RecurringCollector, + Implementation_RecurringCollector: RecurringCollectorImplementation, } }) diff --git a/packages/horizon/ignition/modules/proxy/TransparentUpgradeableProxy.ts b/packages/horizon/ignition/modules/proxy/TransparentUpgradeableProxy.ts index 35e2ec5a4..30df8b3e3 100644 --- a/packages/horizon/ignition/modules/proxy/TransparentUpgradeableProxy.ts +++ b/packages/horizon/ignition/modules/proxy/TransparentUpgradeableProxy.ts @@ -65,5 +65,9 @@ export function upgradeTransparentUpgradeableProxy( [proxy, implementation, m.encodeFunctionCall(implementation, 'initialize', metadata.initArgs)], options, ) - return loadProxyWithABI(m, proxy, metadata, { ...options, after: [upgradeCall] }) + return loadProxyWithABI(m, proxy, metadata, { + ...options, + id: `${metadata.name}_UpgradedProxyWithABI`, + after: [upgradeCall], + }) } diff --git a/packages/horizon/ignition/modules/proxy/utils.ts b/packages/horizon/ignition/modules/proxy/utils.ts index c6b7f4c2a..23ee71775 100644 --- a/packages/horizon/ignition/modules/proxy/utils.ts +++ b/packages/horizon/ignition/modules/proxy/utils.ts @@ -13,11 +13,12 @@ export function loadProxyWithABI( contract: ImplementationMetadata, options?: ContractOptions, ) { + const { id: customId, ...rest } = options ?? {} let proxyWithABI if (contract.artifact === undefined) { - proxyWithABI = m.contractAt(contract.name, proxy, options) + proxyWithABI = m.contractAt(customId ?? contract.name, proxy, rest) } else { - proxyWithABI = m.contractAt(`${contract.name}_ProxyWithABI`, contract.artifact, proxy, options) + proxyWithABI = m.contractAt(customId ?? `${contract.name}_ProxyWithABI`, contract.artifact, proxy, rest) } return proxyWithABI } diff --git a/packages/horizon/package.json b/packages/horizon/package.json index 09eb7eaaf..7662a48a3 100644 --- a/packages/horizon/package.json +++ b/packages/horizon/package.json @@ -34,7 +34,7 @@ "test:self": "forge test", "test:deployment": "SECURE_ACCOUNTS_DISABLE_PROVIDER=true hardhat test test/deployment/*.ts", "test:integration": "./scripts/integration", - "test:coverage": "pnpm build && pnpm test:coverage:self", + "test:coverage": "forge coverage", "test:coverage:self": "mkdir -p coverage && forge coverage --report lcov --report-file coverage/lcov.info", "prepublishOnly": "pnpm run build" }, diff --git a/packages/horizon/test/unit/payments/recurring-collector/BareAgreementOwner.t.sol b/packages/horizon/test/unit/payments/recurring-collector/BareAgreementOwner.t.sol index 2f6324957..70167e576 100644 --- a/packages/horizon/test/unit/payments/recurring-collector/BareAgreementOwner.t.sol +++ b/packages/horizon/test/unit/payments/recurring-collector/BareAgreementOwner.t.sol @@ -13,12 +13,9 @@ contract BareAgreementOwner is IAgreementOwner { authorizedHashes[agreementHash] = true; } - function approveAgreement(bytes32 agreementHash) external view override returns (bytes4) { - if (!authorizedHashes[agreementHash]) return bytes4(0); - return IAgreementOwner.approveAgreement.selector; - } - function beforeCollection(bytes16, uint256) external override {} function afterCollection(bytes16, uint256) external override {} + + function afterAgreementStateChange(bytes16, bytes32, uint16) external override {} } diff --git a/packages/horizon/test/unit/payments/recurring-collector/MalformedERC165Payer.t.sol b/packages/horizon/test/unit/payments/recurring-collector/MalformedERC165Payer.t.sol new file mode 100644 index 000000000..419302f64 --- /dev/null +++ b/packages/horizon/test/unit/payments/recurring-collector/MalformedERC165Payer.t.sol @@ -0,0 +1,29 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.27; + +import { IAgreementOwner } from "@graphprotocol/interfaces/contracts/horizon/IAgreementOwner.sol"; + +/// @notice Malicious payer that returns empty data from supportsInterface(), +/// causing an ABI decoding revert on the caller side that escapes try/catch. +contract MalformedERC165Payer is IAgreementOwner { + mapping(bytes32 => bool) public authorizedHashes; + + function authorize(bytes32 agreementHash) external { + authorizedHashes[agreementHash] = true; + } + + function beforeCollection(bytes16, uint256) external override {} + + function afterCollection(bytes16, uint256) external override {} + + function afterAgreementStateChange(bytes16, bytes32, uint16) external override {} + + /// @notice Responds to supportsInterface with empty returndata. + /// The call succeeds at the EVM level but the caller cannot ABI-decode the result. + fallback() external { + // solhint-disable-next-line no-inline-assembly + assembly { + return(0, 0) + } + } +} diff --git a/packages/horizon/test/unit/payments/recurring-collector/MockAcceptCallback.t.sol b/packages/horizon/test/unit/payments/recurring-collector/MockAcceptCallback.t.sol new file mode 100644 index 000000000..6523a7b27 --- /dev/null +++ b/packages/horizon/test/unit/payments/recurring-collector/MockAcceptCallback.t.sol @@ -0,0 +1,18 @@ +// SPDX-License-Identifier: GPL-3.0-or-later +pragma solidity ^0.8.27; + +import { IDataServiceAgreements } from "@graphprotocol/interfaces/contracts/data-service/IDataServiceAgreements.sol"; + +/// @dev Mock data service that implements IDataServiceAgreements for RC unit tests. +/// Simply accepts all agreements without validation. +contract MockAcceptCallback is IDataServiceAgreements { + function acceptAgreement( + bytes16, + bytes32, + address, + address, + bytes calldata, + bytes calldata + ) external pure override {} + function afterAgreementStateChange(bytes16, bytes32, uint16) external pure override {} +} diff --git a/packages/horizon/test/unit/payments/recurring-collector/MockAgreementOwner.t.sol b/packages/horizon/test/unit/payments/recurring-collector/MockAgreementOwner.t.sol index 614dab81a..526e8239e 100644 --- a/packages/horizon/test/unit/payments/recurring-collector/MockAgreementOwner.t.sol +++ b/packages/horizon/test/unit/payments/recurring-collector/MockAgreementOwner.t.sol @@ -1,23 +1,23 @@ // SPDX-License-Identifier: MIT pragma solidity ^0.8.27; -import { IERC165 } from "@openzeppelin/contracts/utils/introspection/IERC165.sol"; import { IAgreementOwner } from "@graphprotocol/interfaces/contracts/horizon/IAgreementOwner.sol"; import { IProviderEligibility } from "@graphprotocol/interfaces/contracts/issuance/eligibility/IProviderEligibility.sol"; +import { IERC165 } from "@openzeppelin/contracts/utils/introspection/IERC165.sol"; /// @notice Mock contract approver for testing acceptUnsigned and updateUnsigned. /// Can be configured to return valid selector, wrong value, or revert. -/// Optionally supports IERC165 + IProviderEligibility for eligibility gate testing. -contract MockAgreementOwner is IAgreementOwner, IERC165, IProviderEligibility { +/// Implements IProviderEligibility for eligibility gate testing. +contract MockAgreementOwner is IAgreementOwner, IProviderEligibility, IERC165 { mapping(bytes32 => bool) public authorizedHashes; bool public shouldRevert; bytes4 public overrideReturnValue; bool public useOverride; // -- Eligibility configuration -- - bool public eligibilityEnabled; - mapping(address => bool) public eligibleProviders; - bool public defaultEligible; + // Defaults to true: payers that don't care about eligibility allow all providers. + // Tests that want to deny must explicitly set a provider ineligible. + mapping(address => bool) public ineligibleProviders; function authorize(bytes32 agreementHash) external { authorizedHashes[agreementHash] = true; @@ -32,19 +32,6 @@ contract MockAgreementOwner is IAgreementOwner, IERC165, IProviderEligibility { useOverride = true; } - function approveAgreement(bytes32 agreementHash) external view override returns (bytes4) { - if (shouldRevert) { - revert("MockAgreementOwner: forced revert"); - } - if (useOverride) { - return overrideReturnValue; - } - if (!authorizedHashes[agreementHash]) { - return bytes4(0); - } - return IAgreementOwner.approveAgreement.selector; - } - bytes16 public lastBeforeCollectionAgreementId; uint256 public lastBeforeCollectionTokens; bool public shouldRevertOnBeforeCollection; @@ -77,31 +64,22 @@ contract MockAgreementOwner is IAgreementOwner, IERC165, IProviderEligibility { lastCollectedTokens = tokensCollected; } - // -- ERC165 + IProviderEligibility -- + function afterAgreementStateChange(bytes16, bytes32, uint16) external override {} - /// @notice Enable ERC165 reporting of IProviderEligibility support - function setEligibilityEnabled(bool _enabled) external { - eligibilityEnabled = _enabled; - } + // -- IERC165 -- - /// @notice Set whether a specific provider is eligible - function setProviderEligible(address provider, bool _eligible) external { - eligibleProviders[provider] = _eligible; + function supportsInterface(bytes4 interfaceId) external pure override returns (bool) { + return interfaceId == type(IERC165).interfaceId || interfaceId == type(IProviderEligibility).interfaceId; } - /// @notice Set default eligibility for providers not explicitly configured - function setDefaultEligible(bool _eligible) external { - defaultEligible = _eligible; - } + // -- IProviderEligibility -- - function supportsInterface(bytes4 interfaceId) external view override returns (bool) { - if (interfaceId == type(IERC165).interfaceId) return true; - if (interfaceId == type(IProviderEligibility).interfaceId) return eligibilityEnabled; - return false; + /// @notice Mark a provider as ineligible (default is eligible) + function setProviderIneligible(address provider) external { + ineligibleProviders[provider] = true; } function isEligible(address indexer) external view override returns (bool) { - if (eligibleProviders[indexer]) return true; - return defaultEligible; + return !ineligibleProviders[indexer]; } } diff --git a/packages/horizon/test/unit/payments/recurring-collector/RecurringCollectorAuthorizableTest.t.sol b/packages/horizon/test/unit/payments/recurring-collector/RecurringCollectorAuthorizableTest.t.sol deleted file mode 100644 index b4d109678..000000000 --- a/packages/horizon/test/unit/payments/recurring-collector/RecurringCollectorAuthorizableTest.t.sol +++ /dev/null @@ -1,14 +0,0 @@ -// SPDX-License-Identifier: MIT -pragma solidity ^0.8.27; - -import { IAuthorizable } from "@graphprotocol/interfaces/contracts/horizon/IAuthorizable.sol"; -import { RecurringCollector } from "../../../../contracts/payments/collectors/RecurringCollector.sol"; - -import { AuthorizableTest } from "../../../unit/utilities/Authorizable.t.sol"; -import { InvalidControllerMock } from "../../mocks/InvalidControllerMock.t.sol"; - -contract RecurringCollectorAuthorizableTest is AuthorizableTest { - function newAuthorizable(uint256 thawPeriod) public override returns (IAuthorizable) { - return new RecurringCollector("RecurringCollector", "1", address(new InvalidControllerMock()), thawPeriod); - } -} diff --git a/packages/horizon/test/unit/payments/recurring-collector/RecurringCollectorHelper.t.sol b/packages/horizon/test/unit/payments/recurring-collector/RecurringCollectorHelper.t.sol index 9a01754aa..ccd5d376b 100644 --- a/packages/horizon/test/unit/payments/recurring-collector/RecurringCollectorHelper.t.sol +++ b/packages/horizon/test/unit/payments/recurring-collector/RecurringCollectorHelper.t.sol @@ -2,83 +2,18 @@ pragma solidity ^0.8.27; import { IRecurringCollector } from "@graphprotocol/interfaces/contracts/horizon/IRecurringCollector.sol"; +import { IProviderEligibility } from "@graphprotocol/interfaces/contracts/issuance/eligibility/IProviderEligibility.sol"; import { RecurringCollector } from "../../../../contracts/payments/collectors/RecurringCollector.sol"; -import { AuthorizableHelper } from "../../../unit/utilities/Authorizable.t.sol"; import { Bounder } from "../../../unit/utils/Bounder.t.sol"; +import { ERC165Checker } from "@openzeppelin/contracts/utils/introspection/ERC165Checker.sol"; -contract RecurringCollectorHelper is AuthorizableHelper, Bounder { +contract RecurringCollectorHelper is Bounder { RecurringCollector public collector; + address public proxyAdmin; - constructor( - RecurringCollector collector_ - ) AuthorizableHelper(collector_, collector_.REVOKE_AUTHORIZATION_THAWING_PERIOD()) { + constructor(RecurringCollector collector_, address proxyAdmin_) { collector = collector_; - } - - function generateSignedRCA( - IRecurringCollector.RecurringCollectionAgreement memory rca, - uint256 signerPrivateKey - ) public view returns (IRecurringCollector.RecurringCollectionAgreement memory, bytes memory) { - bytes32 messageHash = collector.hashRCA(rca); - (uint8 v, bytes32 r, bytes32 s) = vm.sign(signerPrivateKey, messageHash); - bytes memory signature = abi.encodePacked(r, s, v); - - return (rca, signature); - } - - function generateSignedRCAU( - IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau, - uint256 signerPrivateKey - ) public view returns (IRecurringCollector.RecurringCollectionAgreementUpdate memory, bytes memory) { - bytes32 messageHash = collector.hashRCAU(rcau); - (uint8 v, bytes32 r, bytes32 s) = vm.sign(signerPrivateKey, messageHash); - bytes memory signature = abi.encodePacked(r, s, v); - - return (rcau, signature); - } - - function generateSignedRCAUForAgreement( - bytes16 agreementId, - IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau, - uint256 signerPrivateKey - ) public view returns (IRecurringCollector.RecurringCollectionAgreementUpdate memory, bytes memory) { - // Automatically set the correct nonce based on current agreement state - IRecurringCollector.AgreementData memory agreement = collector.getAgreement(agreementId); - rcau.nonce = agreement.updateNonce + 1; - - return generateSignedRCAU(rcau, signerPrivateKey); - } - - function generateSignedRCAUWithCorrectNonce( - IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau, - uint256 signerPrivateKey - ) public view returns (IRecurringCollector.RecurringCollectionAgreementUpdate memory, bytes memory) { - // This is kept for backwards compatibility but should not be used with new interface - // since we can't determine agreementId without it being passed separately - return generateSignedRCAU(rcau, signerPrivateKey); - } - - function generateSignedRCAWithCalculatedId( - IRecurringCollector.RecurringCollectionAgreement memory rca, - uint256 signerPrivateKey - ) public view returns (IRecurringCollector.RecurringCollectionAgreement memory, bytes memory, bytes16) { - // Ensure we have sensible values - rca = sensibleRCA(rca); - - // Calculate the agreement ID - bytes16 agreementId = collector.generateAgreementId( - rca.payer, - rca.dataService, - rca.serviceProvider, - rca.deadline, - rca.nonce - ); - - (IRecurringCollector.RecurringCollectionAgreement memory signedRca, bytes memory signature) = generateSignedRCA( - rca, - signerPrivateKey - ); - return (signedRca, signature, agreementId); + proxyAdmin = proxyAdmin_; } function withElapsedAcceptDeadline( @@ -101,9 +36,17 @@ contract RecurringCollectorHelper is AuthorizableHelper, Bounder { function sensibleRCA( IRecurringCollector.RecurringCollectionAgreement memory rca ) public view returns (IRecurringCollector.RecurringCollectionAgreement memory) { - vm.assume(rca.dataService != address(0)); - vm.assume(rca.payer != address(0)); - vm.assume(rca.serviceProvider != address(0)); + vm.assume(uint160(rca.dataService) > 0xFF); + vm.assume(uint160(rca.payer) > 0xFF); + vm.assume(uint160(rca.serviceProvider) > 0xFF); + // Exclude ProxyAdmin address — TransparentProxy routes admin calls to ProxyAdmin, not implementation + vm.assume(rca.dataService != proxyAdmin); + vm.assume(rca.payer != proxyAdmin); + vm.assume(rca.serviceProvider != proxyAdmin); + // Prevent role collisions — cancel() resolves role by address priority + vm.assume(rca.payer != rca.serviceProvider); + vm.assume(rca.payer != rca.dataService); + vm.assume(rca.serviceProvider != rca.dataService); // Ensure we have a nonce if it's zero if (rca.nonce == 0) { @@ -122,11 +65,24 @@ contract RecurringCollectorHelper is AuthorizableHelper, Bounder { rca.maxInitialTokens = _sensibleMaxInitialTokens(rca.maxInitialTokens); rca.maxOngoingTokensPerSecond = _sensibleMaxOngoingTokensPerSecond(rca.maxOngoingTokensPerSecond); + // CONDITION_ELIGIBILITY_CHECK requires payer to support IProviderEligibility via ERC-165. + // Mask it out in fuzz-generated offers when the payer can't satisfy the check. + if (!ERC165Checker.supportsInterface(rca.payer, type(IProviderEligibility).interfaceId)) { + rca.conditions = rca.conditions & ~uint16(collector.CONDITION_ELIGIBILITY_CHECK()); + } + return rca; } function sensibleRCAU( IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau + ) public view returns (IRecurringCollector.RecurringCollectionAgreementUpdate memory) { + return sensibleRCAU(rcau, address(0)); + } + + function sensibleRCAU( + IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau, + address payer ) public view returns (IRecurringCollector.RecurringCollectionAgreementUpdate memory) { rcau.minSecondsPerCollection = _sensibleMinSecondsPerCollection(rcau.minSecondsPerCollection); rcau.maxSecondsPerCollection = _sensibleMaxSecondsPerCollection( @@ -139,6 +95,12 @@ contract RecurringCollectorHelper is AuthorizableHelper, Bounder { rcau.maxInitialTokens = _sensibleMaxInitialTokens(rcau.maxInitialTokens); rcau.maxOngoingTokensPerSecond = _sensibleMaxOngoingTokensPerSecond(rcau.maxOngoingTokensPerSecond); + // CONDITION_ELIGIBILITY_CHECK requires payer to support IProviderEligibility via ERC-165. + // Mask it out in fuzz-generated updates when the payer can't satisfy the check. + if (payer != address(0) && !ERC165Checker.supportsInterface(payer, type(IProviderEligibility).interfaceId)) { + rcau.conditions = rcau.conditions & ~uint16(collector.CONDITION_ELIGIBILITY_CHECK()); + } + return rcau; } diff --git a/packages/horizon/test/unit/payments/recurring-collector/accept.t.sol b/packages/horizon/test/unit/payments/recurring-collector/accept.t.sol index 8404db85e..d87bff391 100644 --- a/packages/horizon/test/unit/payments/recurring-collector/accept.t.sol +++ b/packages/horizon/test/unit/payments/recurring-collector/accept.t.sol @@ -1,6 +1,11 @@ // SPDX-License-Identifier: MIT pragma solidity ^0.8.27; +import { + REGISTERED, + ACCEPTED, + OFFER_TYPE_NEW +} from "@graphprotocol/interfaces/contracts/horizon/IAgreementCollector.sol"; import { IRecurringCollector } from "@graphprotocol/interfaces/contracts/horizon/IRecurringCollector.sol"; import { RecurringCollectorSharedTest } from "./shared.t.sol"; @@ -13,54 +18,57 @@ contract RecurringCollectorAcceptTest is RecurringCollectorSharedTest { /* solhint-disable graph/func-name-mixedcase */ function test_Accept(FuzzyTestAccept calldata fuzzyTestAccept) public { - _sensibleAuthorizeAndAccept(fuzzyTestAccept); + (, bytes16 agreementId) = _sensibleAccept(fuzzyTestAccept); + IRecurringCollector.AgreementData memory agreement = _recurringCollector.getAgreementData(agreementId); + assertEq(agreement.state, REGISTERED | ACCEPTED); } function test_Accept_Revert_WhenAcceptanceDeadlineElapsed( - IRecurringCollector.RecurringCollectionAgreement memory fuzzyRCA, - bytes memory fuzzySignature, + FuzzyTestAccept calldata fuzzyTestAccept, uint256 unboundedSkip ) public { - // Ensure non-empty signature so the signed path is taken (which checks deadline first) - vm.assume(fuzzySignature.length > 0); - // Generate deterministic agreement ID for validation - bytes16 agreementId = _recurringCollector.generateAgreementId( - fuzzyRCA.payer, - fuzzyRCA.dataService, - fuzzyRCA.serviceProvider, - fuzzyRCA.deadline, - fuzzyRCA.nonce + // Store an offer while deadline is still valid + (, bytes16 agreementId) = _sensibleOffer(fuzzyTestAccept); + IRecurringCollector.AgreementData memory agreement = _recurringCollector.getAgreementData(agreementId); + bytes32 activeHash = _recurringCollector.getAgreementVersionAt(agreementId, 0).versionHash; + + // Decode the deadline from the active offer + (, bytes memory offerData) = _recurringCollector.getAgreementOfferAt(agreementId, 0); + IRecurringCollector.RecurringCollectionAgreement memory rca = abi.decode( + offerData, + (IRecurringCollector.RecurringCollectionAgreement) ); - vm.assume(agreementId != bytes16(0)); + uint64 deadline = rca.deadline; + + // Skip time past the deadline skip(boundSkip(unboundedSkip, 1, type(uint64).max - block.timestamp)); - fuzzyRCA = _recurringCollectorHelper.withElapsedAcceptDeadline(fuzzyRCA); + vm.assume(block.timestamp > deadline); bytes memory expectedErr = abi.encodeWithSelector( - IRecurringCollector.RecurringCollectorAgreementDeadlineElapsed.selector, + IRecurringCollector.AgreementDeadlineElapsed.selector, block.timestamp, - fuzzyRCA.deadline + deadline ); vm.expectRevert(expectedErr); - vm.prank(fuzzyRCA.dataService); - _recurringCollector.accept(fuzzyRCA, fuzzySignature); + vm.prank(agreement.serviceProvider); + _recurringCollector.accept(agreementId, activeHash, bytes(""), 0); } function test_Accept_Revert_WhenAlreadyAccepted(FuzzyTestAccept calldata fuzzyTestAccept) public { - ( - IRecurringCollector.RecurringCollectionAgreement memory acceptedRca, - bytes memory signature, - , - bytes16 agreementId - ) = _sensibleAuthorizeAndAccept(fuzzyTestAccept); + (IRecurringCollector.RecurringCollectionAgreement memory acceptedRca, bytes16 agreementId) = _sensibleAccept( + fuzzyTestAccept + ); + // Re-offering the same RCA should fail in offer() because the agreement + // is already in Accepted state (not NotAccepted) bytes memory expectedErr = abi.encodeWithSelector( - IRecurringCollector.RecurringCollectorAgreementIncorrectState.selector, + IRecurringCollector.AgreementIncorrectState.selector, agreementId, - IRecurringCollector.AgreementState.Accepted + REGISTERED | ACCEPTED ); vm.expectRevert(expectedErr); - vm.prank(acceptedRca.dataService); - _recurringCollector.accept(acceptedRca, signature); + vm.prank(acceptedRca.payer); + _recurringCollector.offer(OFFER_TYPE_NEW, abi.encode(acceptedRca), 0); } /* solhint-enable graph/func-name-mixedcase */ diff --git a/packages/horizon/test/unit/payments/recurring-collector/acceptUnsigned.t.sol b/packages/horizon/test/unit/payments/recurring-collector/acceptUnsigned.t.sol index 153b69141..fc79cc1b2 100644 --- a/packages/horizon/test/unit/payments/recurring-collector/acceptUnsigned.t.sol +++ b/packages/horizon/test/unit/payments/recurring-collector/acceptUnsigned.t.sol @@ -1,6 +1,11 @@ // SPDX-License-Identifier: MIT pragma solidity ^0.8.27; +import { + REGISTERED, + ACCEPTED, + OFFER_TYPE_NEW +} from "@graphprotocol/interfaces/contracts/horizon/IAgreementCollector.sol"; import { IRecurringCollector } from "@graphprotocol/interfaces/contracts/horizon/IRecurringCollector.sol"; import { RecurringCollectorSharedTest } from "./shared.t.sol"; @@ -24,6 +29,8 @@ contract RecurringCollectorAcceptUnsignedTest is RecurringCollectorSharedTest { maxOngoingTokensPerSecond: 1 ether, minSecondsPerCollection: 600, maxSecondsPerCollection: 3600, + conditions: 0, + minSecondsPayerCancellationNotice: 0, nonce: 1, metadata: "" }) @@ -39,9 +46,6 @@ contract RecurringCollectorAcceptUnsignedTest is RecurringCollectorSharedTest { ); rca.payer = address(approver); - bytes32 agreementHash = _recurringCollector.hashRCA(rca); - approver.authorize(agreementHash); - _setupValidProvision(rca.serviceProvider, rca.dataService); bytes16 expectedId = _recurringCollector.generateAgreementId( @@ -52,81 +56,77 @@ contract RecurringCollectorAcceptUnsignedTest is RecurringCollectorSharedTest { rca.nonce ); - vm.expectEmit(address(_recurringCollector)); - emit IRecurringCollector.AgreementAccepted( - rca.dataService, - rca.payer, - rca.serviceProvider, - expectedId, - uint64(block.timestamp), - rca.endsAt, - rca.maxInitialTokens, - rca.maxOngoingTokensPerSecond, - rca.minSecondsPerCollection, - rca.maxSecondsPerCollection - ); - - vm.prank(rca.dataService); - bytes16 agreementId = _recurringCollector.accept(rca, ""); + // Payer calls offer + vm.prank(address(approver)); + bytes16 agreementId = _recurringCollector.offer(OFFER_TYPE_NEW, abi.encode(rca), 0).agreementId; assertEq(agreementId, expectedId); - IRecurringCollector.AgreementData memory agreement = _recurringCollector.getAgreement(agreementId); - assertEq(uint8(agreement.state), uint8(IRecurringCollector.AgreementState.Accepted)); - assertEq(agreement.payer, address(approver)); - assertEq(agreement.serviceProvider, rca.serviceProvider); - assertEq(agreement.dataService, rca.dataService); - } + // Data service accepts with stored hash + bytes32 activeHash = _recurringCollector.getAgreementVersionAt(agreementId, 0).versionHash; - function test_AcceptUnsigned_Revert_WhenPayerNotContract() public { - address eoa = makeAddr("eoa"); - IRecurringCollector.RecurringCollectionAgreement memory rca = _makeSimpleRCA(eoa); + vm.expectEmit(address(_recurringCollector)); + emit IRecurringCollector.AgreementUpdated(expectedId, activeHash, REGISTERED | ACCEPTED); - vm.expectRevert( - abi.encodeWithSelector(IRecurringCollector.RecurringCollectorApproverNotContract.selector, eoa) - ); - vm.prank(rca.dataService); - _recurringCollector.accept(rca, ""); + vm.prank(rca.serviceProvider); + _recurringCollector.accept(agreementId, activeHash, bytes(""), 0); + + IRecurringCollector.AgreementData memory accepted = _recurringCollector.getAgreementData(agreementId); + assertEq(accepted.state, REGISTERED | ACCEPTED); + assertEq(accepted.payer, address(approver)); + assertEq(accepted.serviceProvider, rca.serviceProvider); + assertEq(accepted.dataService, rca.dataService); + assertEq(accepted.state, REGISTERED | ACCEPTED); } - function test_AcceptUnsigned_Revert_WhenHashNotAuthorized() public { - MockAgreementOwner approver = _newApprover(); - IRecurringCollector.RecurringCollectionAgreement memory rca = _makeSimpleRCA(address(approver)); + function test_AcceptUnsigned_OK_WhenPayerIsEOA() public { + address eoa = makeAddr("eoa"); + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeSimpleRCA(eoa); - // Don't authorize the hash - vm.expectRevert(); - vm.prank(rca.dataService); - _recurringCollector.accept(rca, ""); + _setupValidProvision(rca.serviceProvider, rca.dataService); + vm.prank(eoa); + IRecurringCollector.OfferResult memory result = _recurringCollector.offer(OFFER_TYPE_NEW, abi.encode(rca), 0); + assertTrue(result.agreementId != bytes16(0)); } function test_AcceptUnsigned_Revert_WhenWrongMagicValue() public { + // With the offer/accept path, the "wrong magic value" concept no longer applies + // since there is no approveAgreement callback. Instead, test that a non-payer + // cannot call offer. MockAgreementOwner approver = _newApprover(); IRecurringCollector.RecurringCollectionAgreement memory rca = _makeSimpleRCA(address(approver)); - approver.setOverrideReturnValue(bytes4(0xdeadbeef)); - - vm.expectRevert(abi.encodeWithSelector(IRecurringCollector.RecurringCollectorInvalidSigner.selector)); - vm.prank(rca.dataService); - _recurringCollector.accept(rca, ""); + // Someone other than payer tries to call offer + address notPayer = makeAddr("notPayer"); + vm.expectRevert( + abi.encodeWithSelector(IRecurringCollector.UnauthorizedPayer.selector, notPayer, address(approver)) + ); + vm.prank(notPayer); + _recurringCollector.offer(OFFER_TYPE_NEW, abi.encode(rca), 0); } function test_AcceptUnsigned_Revert_WhenNotDataService() public { MockAgreementOwner approver = _newApprover(); IRecurringCollector.RecurringCollectionAgreement memory rca = _makeSimpleRCA(address(approver)); - bytes32 agreementHash = _recurringCollector.hashRCA(rca); - approver.authorize(agreementHash); + _setupValidProvision(rca.serviceProvider, rca.dataService); - address notDataService = makeAddr("notDataService"); + // Payer calls offer + vm.prank(address(approver)); + bytes16 agreementId = _recurringCollector.offer(OFFER_TYPE_NEW, abi.encode(rca), 0).agreementId; + + bytes32 activeHash = _recurringCollector.getAgreementVersionAt(agreementId, 0).versionHash; + + address notServiceProvider = makeAddr("notServiceProvider"); vm.expectRevert( abi.encodeWithSelector( - IRecurringCollector.RecurringCollectorUnauthorizedCaller.selector, - notDataService, - rca.dataService + IRecurringCollector.UnauthorizedServiceProvider.selector, + notServiceProvider, + rca.serviceProvider ) ); - vm.prank(notDataService); - _recurringCollector.accept(rca, ""); + vm.prank(notServiceProvider); + _recurringCollector.accept(agreementId, activeHash, bytes(""), 0); } function test_AcceptUnsigned_Revert_WhenAlreadyAccepted(FuzzyTestAccept calldata fuzzyTestAccept) public { @@ -136,53 +136,97 @@ contract RecurringCollectorAcceptUnsignedTest is RecurringCollectorSharedTest { ); rca.payer = address(approver); - bytes32 agreementHash = _recurringCollector.hashRCA(rca); - approver.authorize(agreementHash); - _setupValidProvision(rca.serviceProvider, rca.dataService); - vm.prank(rca.dataService); - bytes16 agreementId = _recurringCollector.accept(rca, ""); + // Payer calls offer + vm.prank(address(approver)); + bytes16 agreementId = _recurringCollector.offer(OFFER_TYPE_NEW, abi.encode(rca), 0).agreementId; + + // Service provider accepts + bytes32 activeHash = _recurringCollector.getAgreementVersionAt(agreementId, 0).versionHash; + vm.prank(rca.serviceProvider); + _recurringCollector.accept(agreementId, activeHash, bytes(""), 0); + // Second accept should fail — no pending update, so terms are empty bytes memory expectedErr = abi.encodeWithSelector( - IRecurringCollector.RecurringCollectorAgreementIncorrectState.selector, - agreementId, - IRecurringCollector.AgreementState.Accepted + IRecurringCollector.AgreementTermsEmpty.selector, + agreementId ); vm.expectRevert(expectedErr); - vm.prank(rca.dataService); - _recurringCollector.accept(rca, ""); + vm.prank(rca.serviceProvider); + _recurringCollector.accept(agreementId, activeHash, bytes(""), 0); } function test_AcceptUnsigned_Revert_WhenApproverReverts() public { + // With the offer/accept path, the payer calls offer() directly. + // "Approver reverts" doesn't apply the same way. Instead, test that + // accept() with a wrong hash reverts. MockAgreementOwner approver = _newApprover(); IRecurringCollector.RecurringCollectionAgreement memory rca = _makeSimpleRCA(address(approver)); - approver.setShouldRevert(true); + _setupValidProvision(rca.serviceProvider, rca.dataService); + + // Payer calls offer + vm.prank(address(approver)); + bytes16 agreementId = _recurringCollector.offer(OFFER_TYPE_NEW, abi.encode(rca), 0).agreementId; + + bytes32 activeHash = _recurringCollector.getAgreementVersionAt(agreementId, 0).versionHash; - vm.expectRevert("MockAgreementOwner: forced revert"); - vm.prank(rca.dataService); - _recurringCollector.accept(rca, ""); + // Accept with wrong hash should revert + bytes32 wrongHash = bytes32(uint256(1)); + vm.expectRevert( + abi.encodeWithSelector( + IRecurringCollector.AgreementHashMismatch.selector, + agreementId, + activeHash, + wrongHash + ) + ); + vm.prank(rca.serviceProvider); + _recurringCollector.accept(agreementId, wrongHash, bytes(""), 0); } - function test_AcceptUnsigned_Revert_WhenDeadlineElapsed() public { + function test_AcceptUnsigned_Revert_WhenEndsAtElapsed() public { MockAgreementOwner approver = _newApprover(); IRecurringCollector.RecurringCollectionAgreement memory rca = _makeSimpleRCA(address(approver)); + // Set deadline far in the future so the endsAt check fires first + rca.deadline = type(uint64).max; - bytes32 agreementHash = _recurringCollector.hashRCA(rca); - approver.authorize(agreementHash); - - // Advance time past the deadline - vm.warp(rca.deadline + 1); + // Advance time past endsAt so the offer is rejected + vm.warp(rca.endsAt + 1); bytes memory expectedErr = abi.encodeWithSelector( - IRecurringCollector.RecurringCollectorAgreementDeadlineElapsed.selector, - block.timestamp, - rca.deadline + IRecurringCollector.AgreementInvalidCollectionWindow.selector, + IRecurringCollector.InvalidCollectionWindowReason.ElapsedEndsAt, + rca.minSecondsPerCollection, + rca.maxSecondsPerCollection ); vm.expectRevert(expectedErr); - vm.prank(rca.dataService); - _recurringCollector.accept(rca, ""); + vm.prank(address(approver)); + _recurringCollector.offer(OFFER_TYPE_NEW, abi.encode(rca), 0); + } + + function test_AcceptUnsigned_Revert_WhenHashNotAuthorized() public { + // With the offer/accept path, the hash is stored by offer(). + // There is no separate "authorization" step. This test now verifies that + // accept() with a mismatched hash fails. + MockAgreementOwner approver = _newApprover(); + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeSimpleRCA(address(approver)); + + _setupValidProvision(rca.serviceProvider, rca.dataService); + + // Payer calls offer + vm.prank(address(approver)); + bytes16 agreementId = _recurringCollector.offer(OFFER_TYPE_NEW, abi.encode(rca), 0).agreementId; + + // Try accept with a completely wrong hash + bytes32 badHash = bytes32(uint256(0xdead)); + bytes32 activeHash = _recurringCollector.getAgreementVersionAt(agreementId, 0).versionHash; + vm.expectRevert( + abi.encodeWithSelector(IRecurringCollector.AgreementHashMismatch.selector, agreementId, activeHash, badHash) + ); + vm.prank(rca.serviceProvider); + _recurringCollector.accept(agreementId, badHash, bytes(""), 0); } /* solhint-enable graph/func-name-mixedcase */ diff --git a/packages/horizon/test/unit/payments/recurring-collector/acceptValidation.t.sol b/packages/horizon/test/unit/payments/recurring-collector/acceptValidation.t.sol new file mode 100644 index 000000000..2666e7931 --- /dev/null +++ b/packages/horizon/test/unit/payments/recurring-collector/acceptValidation.t.sol @@ -0,0 +1,238 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.27; + +import { OFFER_TYPE_NEW } from "@graphprotocol/interfaces/contracts/horizon/IAgreementCollector.sol"; +import { IRecurringCollector } from "@graphprotocol/interfaces/contracts/horizon/IRecurringCollector.sol"; + +import { RecurringCollectorSharedTest } from "./shared.t.sol"; + +/// @notice Tests for validation branch coverage in RecurringCollector.accept(). +contract RecurringCollectorAcceptValidationTest is RecurringCollectorSharedTest { + /* solhint-disable graph/func-name-mixedcase */ + + function _makeValidRCA() internal returns (IRecurringCollector.RecurringCollectionAgreement memory) { + return + IRecurringCollector.RecurringCollectionAgreement({ + deadline: uint64(block.timestamp + 1 hours), + endsAt: uint64(block.timestamp + 365 days), + payer: makeAddr("payer"), + dataService: makeAddr("ds"), + serviceProvider: makeAddr("sp"), + maxInitialTokens: 100 ether, + maxOngoingTokensPerSecond: 1 ether, + minSecondsPerCollection: 600, + maxSecondsPerCollection: 3600, + conditions: 0, + minSecondsPayerCancellationNotice: 0, + nonce: 1, + metadata: "" + }); + } + + function _offerAndAccept(IRecurringCollector.RecurringCollectionAgreement memory rca) internal { + _setupValidProvision(rca.serviceProvider, rca.dataService); + // Step 1: Payer submits offer + vm.prank(rca.payer); + bytes16 agreementId = _recurringCollector.offer(OFFER_TYPE_NEW, abi.encode(rca), 0).agreementId; + // Step 2: Service provider accepts + bytes32 activeHash = _recurringCollector.getAgreementVersionAt(agreementId, 0).versionHash; + vm.prank(rca.serviceProvider); + _recurringCollector.accept(agreementId, activeHash, bytes(""), 0); + } + + // ==================== Zero address checks (L175) ==================== + + function test_Accept_Revert_WhenDataServiceZero() public { + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeValidRCA(); + rca.dataService = address(0); + + // offer() checks addresses via _storeOffer + vm.expectRevert(IRecurringCollector.AgreementAddressNotSet.selector); + vm.prank(rca.payer); + _recurringCollector.offer(OFFER_TYPE_NEW, abi.encode(rca), 0); + } + + // Note: payer=0 is impractical to test directly because authorization + // (L150) fails before the address check (L175). The zero-address branch + // is covered by the dataService=0 and serviceProvider=0 tests. + + function test_Accept_Revert_WhenServiceProviderZero() public { + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeValidRCA(); + rca.serviceProvider = address(0); + + // offer() checks addresses via _storeOffer + vm.expectRevert(IRecurringCollector.AgreementAddressNotSet.selector); + vm.prank(rca.payer); + _recurringCollector.offer(OFFER_TYPE_NEW, abi.encode(rca), 0); + } + + // ==================== endsAt validation (L545) ==================== + + function test_Accept_Revert_WhenEndsAtInPast() public { + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeValidRCA(); + rca.endsAt = uint64(block.timestamp); // endsAt == now, fails "endsAt > block.timestamp" + + _setupValidProvision(rca.serviceProvider, rca.dataService); + + // offer() validates endsAt via _storeOffer + vm.expectRevert( + abi.encodeWithSelector( + IRecurringCollector.AgreementInvalidCollectionWindow.selector, + IRecurringCollector.InvalidCollectionWindowReason.ElapsedEndsAt, + rca.minSecondsPerCollection, + rca.maxSecondsPerCollection + ) + ); + vm.prank(rca.payer); + _recurringCollector.offer(OFFER_TYPE_NEW, abi.encode(rca), 0); + } + + // ==================== Collection window validation (L548) ==================== + + function test_Accept_Revert_WhenCollectionWindowTooSmall() public { + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeValidRCA(); + // min=600, max=1000 -> difference = 400 < MIN_SECONDS_COLLECTION_WINDOW (600) + rca.minSecondsPerCollection = 600; + rca.maxSecondsPerCollection = 1000; + rca.endsAt = uint64(block.timestamp + 365 days); + + _setupValidProvision(rca.serviceProvider, rca.dataService); + + // offer() validates collection window via _storeOffer + vm.expectRevert( + abi.encodeWithSelector( + IRecurringCollector.AgreementInvalidCollectionWindow.selector, + IRecurringCollector.InvalidCollectionWindowReason.InvalidWindow, + rca.minSecondsPerCollection, + rca.maxSecondsPerCollection + ) + ); + vm.prank(rca.payer); + _recurringCollector.offer(OFFER_TYPE_NEW, abi.encode(rca), 0); + } + + function test_Accept_Revert_WhenMaxEqualsMin() public { + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeValidRCA(); + // max == min -> fails "maxSecondsPerCollection > minSecondsPerCollection" + rca.minSecondsPerCollection = 3600; + rca.maxSecondsPerCollection = 3600; + rca.endsAt = uint64(block.timestamp + 365 days); + + _setupValidProvision(rca.serviceProvider, rca.dataService); + + // offer() validates collection window via _storeOffer + vm.expectRevert( + abi.encodeWithSelector( + IRecurringCollector.AgreementInvalidCollectionWindow.selector, + IRecurringCollector.InvalidCollectionWindowReason.InvalidWindow, + rca.minSecondsPerCollection, + rca.maxSecondsPerCollection + ) + ); + vm.prank(rca.payer); + _recurringCollector.offer(OFFER_TYPE_NEW, abi.encode(rca), 0); + } + + // ==================== Duration validation (L560) ==================== + + function test_Accept_Revert_WhenDurationTooShort() public { + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeValidRCA(); + // Need: endsAt - now >= minSecondsPerCollection + MIN_SECONDS_COLLECTION_WINDOW + // Set duration just under the minimum + uint32 minWindow = _recurringCollector.MIN_SECONDS_COLLECTION_WINDOW(); + rca.minSecondsPerCollection = 600; + rca.maxSecondsPerCollection = 600 + minWindow; // valid window + rca.endsAt = uint64(block.timestamp + rca.minSecondsPerCollection + minWindow - 1); // 1 second too short + + _setupValidProvision(rca.serviceProvider, rca.dataService); + + // offer() validates duration via _storeOffer + vm.expectRevert( + abi.encodeWithSelector( + IRecurringCollector.AgreementInvalidCollectionWindow.selector, + IRecurringCollector.InvalidCollectionWindowReason.InsufficientDuration, + rca.minSecondsPerCollection, + rca.maxSecondsPerCollection + ) + ); + vm.prank(rca.payer); + _recurringCollector.offer(OFFER_TYPE_NEW, abi.encode(rca), 0); + } + + // ==================== Overflow validation (maxOngoingTokensPerSecond * maxSecondsPerCollection) ==================== + + function test_Offer_Revert_WhenMaxOngoingTokensOverflows() public { + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeValidRCA(); + // maxOngoingTokensPerSecond * maxSecondsPerCollection overflows uint256 + rca.maxOngoingTokensPerSecond = type(uint256).max; + rca.maxSecondsPerCollection = 3600; + + _setupValidProvision(rca.serviceProvider, rca.dataService); + + vm.expectRevert(abi.encodeWithSignature("Panic(uint256)", 0x11)); + vm.prank(rca.payer); + _recurringCollector.offer(OFFER_TYPE_NEW, abi.encode(rca), 0); + } + + function test_Offer_OK_WhenMaxOngoingTokensAtBoundary() public { + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeValidRCA(); + // Largest value that does not overflow: type(uint256).max / maxSecondsPerCollection + rca.maxOngoingTokensPerSecond = type(uint256).max / uint256(rca.maxSecondsPerCollection); + + _setupValidProvision(rca.serviceProvider, rca.dataService); + + // Should succeed — product fits in uint256 + vm.prank(rca.payer); + _recurringCollector.offer(OFFER_TYPE_NEW, abi.encode(rca), 0); + } + + // ==================== Caller authorization (L173) ==================== + + function test_Accept_Revert_WhenCallerNotServiceProvider() public { + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeValidRCA(); + + _setupValidProvision(rca.serviceProvider, rca.dataService); + + // Step 1: Payer submits offer + vm.prank(rca.payer); + bytes16 agreementId = _recurringCollector.offer(OFFER_TYPE_NEW, abi.encode(rca), 0).agreementId; + bytes32 activeHash = _recurringCollector.getAgreementVersionAt(agreementId, 0).versionHash; + + // Step 2: Wrong caller tries to accept - should revert + address wrongCaller = makeAddr("wrongCaller"); + vm.expectRevert( + abi.encodeWithSelector( + IRecurringCollector.UnauthorizedServiceProvider.selector, + wrongCaller, + rca.serviceProvider + ) + ); + vm.prank(wrongCaller); + _recurringCollector.accept(agreementId, activeHash, bytes(""), 0); + } + + // ==================== Empty pending terms (L706) ==================== + + function test_Accept_Revert_WhenPendingTermsEmpty() public { + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeValidRCA(); + + // Offer and accept to reach REGISTERED | ACCEPTED state + _offerAndAccept(rca); + bytes16 agreementId = _recurringCollector.generateAgreementId( + rca.payer, + rca.dataService, + rca.serviceProvider, + rca.deadline, + rca.nonce + ); + + // No update was offered so pendingTerms.hash == bytes32(0). + // Attempting to accept pending terms with versionHash = 0 should revert + // with an explicit empty-terms guard, not rely on the deadline check. + vm.expectRevert(abi.encodeWithSelector(IRecurringCollector.AgreementTermsEmpty.selector, agreementId)); + vm.prank(rca.serviceProvider); + _recurringCollector.accept(agreementId, bytes32(0), bytes(""), 0); + } + + /* solhint-enable graph/func-name-mixedcase */ +} diff --git a/packages/horizon/test/unit/payments/recurring-collector/afterCollection.t.sol b/packages/horizon/test/unit/payments/recurring-collector/afterCollection.t.sol index c84958daf..80a1992fc 100644 --- a/packages/horizon/test/unit/payments/recurring-collector/afterCollection.t.sol +++ b/packages/horizon/test/unit/payments/recurring-collector/afterCollection.t.sol @@ -1,6 +1,7 @@ // SPDX-License-Identifier: MIT pragma solidity ^0.8.27; +import { OFFER_TYPE_NEW } from "@graphprotocol/interfaces/contracts/horizon/IAgreementCollector.sol"; import { IRecurringCollector } from "@graphprotocol/interfaces/contracts/horizon/IRecurringCollector.sol"; import { IGraphPayments } from "@graphprotocol/interfaces/contracts/horizon/IGraphPayments.sol"; @@ -27,17 +28,23 @@ contract RecurringCollectorAfterCollectionTest is RecurringCollectorSharedTest { maxOngoingTokensPerSecond: 1 ether, minSecondsPerCollection: 600, maxSecondsPerCollection: 3600, + conditions: 0, + minSecondsPayerCancellationNotice: 0, nonce: 1, metadata: "" }) ); - bytes32 agreementHash = _recurringCollector.hashRCA(rca); - approver.authorize(agreementHash); _setupValidProvision(rca.serviceProvider, rca.dataService); - vm.prank(rca.dataService); - agreementId = _recurringCollector.accept(rca, ""); + // Payer calls offer + vm.prank(address(approver)); + agreementId = _recurringCollector.offer(OFFER_TYPE_NEW, abi.encode(rca), 0).agreementId; + + // Data service accepts with stored hash + bytes32 activeHash = _recurringCollector.getAgreementVersionAt(agreementId, 0).versionHash; + vm.prank(rca.serviceProvider); + _recurringCollector.accept(agreementId, activeHash, bytes(""), 0); } /* solhint-disable graph/func-name-mixedcase */ @@ -114,6 +121,14 @@ contract RecurringCollectorAfterCollectionTest is RecurringCollectorSharedTest { uint256 tokens = 1 ether; bytes memory data = _generateCollectData(_generateCollectParams(rca, agreementId, bytes32("col1"), tokens, 0)); + // Expect PayerCallbackFailed event for the afterCollection stage + vm.expectEmit(address(_recurringCollector)); + emit IRecurringCollector.PayerCallbackFailed( + agreementId, + address(approver), + IRecurringCollector.PayerCallbackStage.AfterCollection + ); + // Collection should still succeed despite callback reverting vm.prank(rca.dataService); uint256 collected = _recurringCollector.collect(IGraphPayments.PaymentTypes.IndexingFee, data); @@ -124,9 +139,49 @@ contract RecurringCollectorAfterCollectionTest is RecurringCollectorSharedTest { assertEq(approver.lastCollectedTokens(), 0); } + function test_Collect_Revert_WhenInsufficientCallbackGas() public { + MockAgreementOwner approver = _newApprover(); + (IRecurringCollector.RecurringCollectionAgreement memory rca, bytes16 agreementId) = _acceptUnsignedAgreement( + approver + ); + + skip(rca.minSecondsPerCollection); + uint256 tokens = 1 ether; + bytes memory data = _generateCollectData(_generateCollectParams(rca, agreementId, bytes32("col1"), tokens, 0)); + + // Encode the outer collect call + bytes memory callData = abi.encodeCall( + _recurringCollector.collect, + (IGraphPayments.PaymentTypes.IndexingFee, data) + ); + + // Binary-search for a gas limit that passes core collect logic but trips the + // callback gas guard (gasleft < MAX_CALLBACK_GAS * 64/63 ≈ 1_523_810). + // Core logic + escrow call + beforeCollection + events uses ~200k gas. + bool triggered; + for (uint256 gasLimit = 1_700_000; gasLimit > 1_500_000; gasLimit -= 10_000) { + uint256 snap = vm.snapshot(); + vm.prank(rca.dataService); + (bool success, bytes memory returnData) = address(_recurringCollector).call{ gas: gasLimit }(callData); + if (!success && returnData.length >= 4) { + bytes4 selector; + assembly { + selector := mload(add(returnData, 32)) + } + if (selector == IRecurringCollector.InsufficientCallbackGas.selector) { + triggered = true; + assertTrue(vm.revertTo(snap)); + break; + } + } + assertTrue(vm.revertTo(snap)); + } + assertTrue(triggered, "Should have triggered InsufficientCallbackGas at some gas limit"); + } + function test_AfterCollection_NotCalledForEOAPayer(FuzzyTestCollect calldata fuzzy) public { - // Use standard ECDSA-signed path (EOA payer, no contract) - (IRecurringCollector.RecurringCollectionAgreement memory acceptedRca, , , ) = _sensibleAuthorizeAndAccept( + // EOA payer — no contract code, so afterCollection is skipped + (IRecurringCollector.RecurringCollectionAgreement memory acceptedRca, ) = _sensibleAccept( fuzzy.fuzzyTestAccept ); diff --git a/packages/horizon/test/unit/payments/recurring-collector/autoUpgrade.t.sol b/packages/horizon/test/unit/payments/recurring-collector/autoUpgrade.t.sol new file mode 100644 index 000000000..889d780c2 --- /dev/null +++ b/packages/horizon/test/unit/payments/recurring-collector/autoUpgrade.t.sol @@ -0,0 +1,599 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.27; + +import { + REGISTERED, + ACCEPTED, + SETTLED, + AUTO_UPDATE, + AUTO_UPDATED, + NOTICE_GIVEN, + OFFER_TYPE_NEW, + OFFER_TYPE_UPDATE, + WITH_NOTICE +} from "@graphprotocol/interfaces/contracts/horizon/IAgreementCollector.sol"; +import { IRecurringCollector } from "@graphprotocol/interfaces/contracts/horizon/IRecurringCollector.sol"; +import { IGraphPayments } from "@graphprotocol/interfaces/contracts/horizon/IGraphPayments.sol"; +import { IDataServiceAgreements } from "@graphprotocol/interfaces/contracts/data-service/IDataServiceAgreements.sol"; +import { Vm } from "forge-std/Vm.sol"; + +import { RecurringCollectorSharedTest } from "./shared.t.sol"; +import { MockAgreementOwner } from "./MockAgreementOwner.t.sol"; + +/// @dev Mock data service that reverts on acceptAgreement +contract RevertingAcceptCallback is IDataServiceAgreements { + function acceptAgreement( + bytes16, + bytes32, + address, + address, + bytes calldata, + bytes calldata + ) external pure override { + revert("reject upgrade"); + } + function afterAgreementStateChange(bytes16, bytes32, uint16) external pure override {} +} + +/// @notice Tests for the auto-update mechanism: AUTO_UPDATE flag, WITH_NOTICE offers, +/// and automatic promotion of pending terms during the final collect. +contract RecurringCollectorAutoUpgradeTest is RecurringCollectorSharedTest { + /* solhint-disable graph/func-name-mixedcase */ + + MockAgreementOwner internal _approver; + bytes internal _revertingCallbackCode; + + function setUp() public override { + super.setUp(); + _approver = new MockAgreementOwner(); + _revertingCallbackCode = address(new RevertingAcceptCallback()).code; + } + + // ============================================================ + // Helper: create a basic accepted agreement with given options + // ============================================================ + + function _makeAcceptedAgreement( + uint16 options + ) internal returns (IRecurringCollector.RecurringCollectionAgreement memory rca, bytes16 agreementId) { + rca = _recurringCollectorHelper.sensibleRCA( + IRecurringCollector.RecurringCollectionAgreement({ + deadline: uint64(block.timestamp + 1 hours), + endsAt: uint64(block.timestamp + 30 days), + payer: address(_approver), + dataService: makeAddr("ds"), + serviceProvider: makeAddr("sp"), + maxInitialTokens: 0, + maxOngoingTokensPerSecond: 1 ether, + minSecondsPerCollection: 600, + maxSecondsPerCollection: 3600, + conditions: 0, + minSecondsPayerCancellationNotice: 1 days, + nonce: 1, + metadata: "" + }) + ); + + _setupValidProvision(rca.serviceProvider, rca.dataService); + vm.prank(address(_approver)); + agreementId = _recurringCollector.offer(OFFER_TYPE_NEW, abi.encode(rca), 0).agreementId; + + bytes32 activeHash = _recurringCollector.getAgreementVersionAt(agreementId, 0).versionHash; + vm.prank(rca.serviceProvider); + _recurringCollector.accept(agreementId, activeHash, bytes(""), options); + } + + function _offerUpgrade( + IRecurringCollector.RecurringCollectionAgreement memory, + bytes16 agreementId, + uint64 upgradeDeadline, + uint64 newEndsAt + ) internal { + IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau = IRecurringCollector + .RecurringCollectionAgreementUpdate({ + agreementId: agreementId, + deadline: upgradeDeadline, + endsAt: newEndsAt, + maxInitialTokens: 0, + maxOngoingTokensPerSecond: 2 ether, + minSecondsPerCollection: 600, + maxSecondsPerCollection: 3600, + conditions: 0, + minSecondsPayerCancellationNotice: 1 days, + nonce: 1, + metadata: "" + }); + vm.prank(address(_approver)); + _recurringCollector.offer(OFFER_TYPE_UPDATE, abi.encode(rcau), WITH_NOTICE); + } + + function _collectFull(IRecurringCollector.RecurringCollectionAgreement memory rca, bytes16 agreementId) internal { + // Decode active terms to get endsAt + (, bytes memory offerData) = _recurringCollector.getAgreementOfferAt(agreementId, 0); + uint64 activeEndsAt; + { + // Could be OFFER_TYPE_NEW or OFFER_TYPE_UPDATE; both have endsAt at same decode position + // Use a simpler approach: just decode the endsAt from the active offer + (uint8 offerType, ) = _recurringCollector.getAgreementOfferAt(agreementId, 0); + if (offerType == OFFER_TYPE_NEW) { + IRecurringCollector.RecurringCollectionAgreement memory activeRca = abi.decode( + offerData, + (IRecurringCollector.RecurringCollectionAgreement) + ); + activeEndsAt = activeRca.endsAt; + } else { + IRecurringCollector.RecurringCollectionAgreementUpdate memory activeRcau = abi.decode( + offerData, + (IRecurringCollector.RecurringCollectionAgreementUpdate) + ); + activeEndsAt = activeRcau.endsAt; + } + } + // Warp to endsAt + buffer to ensure we're past it + vm.warp(activeEndsAt + 1); + + IRecurringCollector.CollectParams memory params = IRecurringCollector.CollectParams({ + agreementId: agreementId, + collectionId: bytes32("final"), + tokens: 0, + dataServiceCut: 0, + receiverDestination: rca.serviceProvider, + maxSlippage: type(uint256).max + }); + + vm.prank(rca.dataService); + _recurringCollector.collect(IGraphPayments.PaymentTypes.IndexingFee, abi.encode(params)); + } + + // ============================================================ + // accept() options tests + // ============================================================ + + function test_Accept_SetsAutoUpgradeFlag() public { + (IRecurringCollector.RecurringCollectionAgreement memory rca, bytes16 agreementId) = _makeAcceptedAgreement( + AUTO_UPDATE + ); + rca; // silence unused warning + IRecurringCollector.AgreementData memory agreement = _recurringCollector.getAgreementData(agreementId); + assertTrue(agreement.state & AUTO_UPDATE != 0, "AUTO_UPDATE should be set"); + } + + function test_Accept_NoAutoUpgradeByDefault() public { + (, bytes16 agreementId) = _makeAcceptedAgreement(0); + IRecurringCollector.AgreementData memory agreement = _recurringCollector.getAgreementData(agreementId); + assertTrue(agreement.state & AUTO_UPDATE == 0, "AUTO_UPDATE should not be set"); + } + + function test_Accept_TogglesAutoUpgradeOnUpdate() public { + // Accept with AUTO_UPDATE + (IRecurringCollector.RecurringCollectionAgreement memory rca, bytes16 agreementId) = _makeAcceptedAgreement( + AUTO_UPDATE + ); + + // Offer an update + IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau = IRecurringCollector + .RecurringCollectionAgreementUpdate({ + agreementId: agreementId, + deadline: uint64(block.timestamp + 1 hours), + endsAt: uint64(block.timestamp + 60 days), + maxInitialTokens: 0, + maxOngoingTokensPerSecond: 2 ether, + minSecondsPerCollection: 600, + maxSecondsPerCollection: 3600, + conditions: 0, + minSecondsPayerCancellationNotice: 1 days, + nonce: 1, + metadata: "" + }); + vm.prank(address(_approver)); + _recurringCollector.offer(OFFER_TYPE_UPDATE, abi.encode(rcau), 0); + + // Accept update with options=0 (clear AUTO_UPDATE) + bytes32 pendingHash = _recurringCollector.getAgreementVersionAt(agreementId, 1).versionHash; + vm.prank(rca.serviceProvider); + _recurringCollector.accept(agreementId, pendingHash, bytes(""), 0); + + IRecurringCollector.AgreementData memory updatedAgreement = _recurringCollector.getAgreementData(agreementId); + assertTrue(updatedAgreement.state & AUTO_UPDATE == 0, "AUTO_UPDATE should be cleared"); + } + + // ============================================================ + // WITH_NOTICE offer tests + // ============================================================ + + function test_OfferWithNotice_SetsCanceledAt() public { + (IRecurringCollector.RecurringCollectionAgreement memory rca, bytes16 agreementId) = _makeAcceptedAgreement( + AUTO_UPDATE + ); + + uint64 upgradeDeadline = uint64(block.timestamp + 2 days); + uint64 newEndsAt = uint64(block.timestamp + 365 days); + _offerUpgrade(rca, agreementId, upgradeDeadline, newEndsAt); + + IRecurringCollector.AgreementData memory agreement = _recurringCollector.getAgreementData(agreementId); + // Active terms are NOT modified — collectableUntil is reduced instead + assertEq(agreement.collectableUntil, upgradeDeadline, "collectableUntil should be set to deadline"); + assertTrue(agreement.state & NOTICE_GIVEN != 0, "NOTICE_GIVEN should be set"); + (, bytes memory offerData) = _recurringCollector.getAgreementOfferAt(agreementId, 1); + IRecurringCollector.RecurringCollectionAgreementUpdate memory pendingRcau = abi.decode( + offerData, + (IRecurringCollector.RecurringCollectionAgreementUpdate) + ); + assertEq(pendingRcau.endsAt, newEndsAt, "pending endsAt should be the new terms"); + } + + function test_OfferWithNotice_DeadlineZero_AutoComputes() public { + (IRecurringCollector.RecurringCollectionAgreement memory rca, bytes16 agreementId) = _makeAcceptedAgreement( + AUTO_UPDATE + ); + + uint64 newEndsAt = uint64(block.timestamp + 365 days); + _offerUpgrade(rca, agreementId, 0, newEndsAt); + + IRecurringCollector.AgreementData memory agreement = _recurringCollector.getAgreementData(agreementId); + // deadline=0 → auto-set collectableUntil to block.timestamp + minSecondsPayerCancellationNotice + uint64 expectedCollectableUntil = uint64(block.timestamp) + rca.minSecondsPayerCancellationNotice; + assertEq( + agreement.collectableUntil, + expectedCollectableUntil, + "collectableUntil should be auto-computed from min notice" + ); + } + + function test_OfferUpgrade_Revert_WhenInsufficientNotice() public { + (IRecurringCollector.RecurringCollectionAgreement memory rca, bytes16 agreementId) = _makeAcceptedAgreement( + AUTO_UPDATE + ); + + // Deadline is 1 second from now, but min notice is 1 day + uint64 tooSoonDeadline = uint64(block.timestamp + 1); + IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau = IRecurringCollector + .RecurringCollectionAgreementUpdate({ + agreementId: agreementId, + deadline: tooSoonDeadline, + endsAt: uint64(block.timestamp + 365 days), + maxInitialTokens: 0, + maxOngoingTokensPerSecond: 2 ether, + minSecondsPerCollection: 600, + maxSecondsPerCollection: 3600, + conditions: 0, + minSecondsPayerCancellationNotice: 1 days, + nonce: 1, + metadata: "" + }); + + vm.expectRevert( + abi.encodeWithSelector( + IRecurringCollector.InsufficientNotice.selector, + agreementId, + rca.minSecondsPayerCancellationNotice, + uint256(tooSoonDeadline - block.timestamp) + ) + ); + vm.prank(address(_approver)); + _recurringCollector.offer(OFFER_TYPE_UPDATE, abi.encode(rcau), WITH_NOTICE); + } + + // ============================================================ + // Auto-upgrade on final collect + // ============================================================ + + function test_Collect_AutoUpgrades_WhenPendingAndFlagSet() public { + (IRecurringCollector.RecurringCollectionAgreement memory rca, bytes16 agreementId) = _makeAcceptedAgreement( + AUTO_UPDATE + ); + + // Offer upgrade with deadline = now + 2 days + uint64 upgradeDeadline = uint64(block.timestamp + 2 days); + uint64 newEndsAt = uint64(block.timestamp + 365 days); + _offerUpgrade(rca, agreementId, upgradeDeadline, newEndsAt); + + // Warp past the upgrade deadline (which is now the active endsAt) + vm.warp(upgradeDeadline + 1); + + // Final collect should trigger auto-upgrade + IRecurringCollector.CollectParams memory params = IRecurringCollector.CollectParams({ + agreementId: agreementId, + collectionId: bytes32("final"), + tokens: 0, + dataServiceCut: 0, + receiverDestination: rca.serviceProvider, + maxSlippage: type(uint256).max + }); + + vm.expectEmit(address(_recurringCollector)); + emit IRecurringCollector.AutoUpdateAttempted(agreementId, true); + + vm.prank(rca.dataService); + _recurringCollector.collect(IGraphPayments.PaymentTypes.IndexingFee, abi.encode(params)); + + // Verify: agreement is NOT settled, active terms are the upgraded ones + IRecurringCollector.AgreementData memory agreement = _recurringCollector.getAgreementData(agreementId); + assertTrue(agreement.state & SETTLED == 0, "should NOT be settled after upgrade"); + { + (, bytes memory activeOfferData) = _recurringCollector.getAgreementOfferAt(agreementId, 0); + IRecurringCollector.RecurringCollectionAgreementUpdate memory activeRcau = abi.decode( + activeOfferData, + (IRecurringCollector.RecurringCollectionAgreementUpdate) + ); + assertEq(activeRcau.endsAt, newEndsAt, "active endsAt should be new terms"); + assertEq(activeRcau.maxOngoingTokensPerSecond, 2 ether, "active rate should be new terms"); + } + // Pending terms should be cleared (only 1 version) + assertEq(_recurringCollector.getAgreementVersionCount(agreementId), 1, "pending terms should be cleared"); + assertTrue(agreement.state & AUTO_UPDATE != 0, "AUTO_UPDATE should be preserved"); + assertTrue(agreement.state & AUTO_UPDATED != 0, "AUTO_UPDATED should be set after auto-update"); + } + + function test_Collect_Settles_WhenAutoUpgradeCallbackReverts() public { + (IRecurringCollector.RecurringCollectionAgreement memory rca, bytes16 agreementId) = _makeAcceptedAgreement( + AUTO_UPDATE + ); + + uint64 upgradeDeadline = uint64(block.timestamp + 2 days); + uint64 newEndsAt = uint64(block.timestamp + 365 days); + _offerUpgrade(rca, agreementId, upgradeDeadline, newEndsAt); + + // Replace data service code with reverting callback + vm.etch(rca.dataService, _revertingCallbackCode); + + vm.warp(upgradeDeadline + 1); + + IRecurringCollector.CollectParams memory params = IRecurringCollector.CollectParams({ + agreementId: agreementId, + collectionId: bytes32("final"), + tokens: 0, + dataServiceCut: 0, + receiverDestination: rca.serviceProvider, + maxSlippage: type(uint256).max + }); + + vm.expectEmit(address(_recurringCollector)); + emit IRecurringCollector.AutoUpdateAttempted(agreementId, false); + + vm.prank(rca.dataService); + _recurringCollector.collect(IGraphPayments.PaymentTypes.IndexingFee, abi.encode(params)); + + // Collect succeeds but agreement settles + IRecurringCollector.AgreementData memory agreement = _recurringCollector.getAgreementData(agreementId); + assertTrue(agreement.state & SETTLED != 0, "should be settled when upgrade fails"); + } + + function test_Collect_Settles_WhenNoAutoUpgradeFlag() public { + // Accept WITHOUT AUTO_UPDATE + (IRecurringCollector.RecurringCollectionAgreement memory rca, bytes16 agreementId) = _makeAcceptedAgreement(0); + + // Offer upgrade (pending terms exist but no AUTO_UPDATE flag) + uint64 upgradeDeadline = uint64(block.timestamp + 2 days); + uint64 newEndsAt = uint64(block.timestamp + 365 days); + _offerUpgrade(rca, agreementId, upgradeDeadline, newEndsAt); + + vm.warp(upgradeDeadline + 1); + + IRecurringCollector.CollectParams memory params = IRecurringCollector.CollectParams({ + agreementId: agreementId, + collectionId: bytes32("final"), + tokens: 0, + dataServiceCut: 0, + receiverDestination: rca.serviceProvider, + maxSlippage: type(uint256).max + }); + + vm.prank(rca.dataService); + _recurringCollector.collect(IGraphPayments.PaymentTypes.IndexingFee, abi.encode(params)); + + IRecurringCollector.AgreementData memory agreement = _recurringCollector.getAgreementData(agreementId); + assertTrue(agreement.state & SETTLED != 0, "should settle without AUTO_UPDATE flag"); + } + + function test_Collect_Settles_ExpiredNonTerminated_NoPendingTerms() public { + // Accept without any pending terms or auto-upgrade + (IRecurringCollector.RecurringCollectionAgreement memory rca, bytes16 agreementId) = _makeAcceptedAgreement(0); + + // Just collect after expiry — no pending, no terminate, should settle + _collectFull(rca, agreementId); + + IRecurringCollector.AgreementData memory agreement = _recurringCollector.getAgreementData(agreementId); + assertTrue(agreement.state & SETTLED != 0, "expired agreement should settle on final collect"); + } + + function test_Collect_AutoUpgrade_SucceedsEvenWhenPendingEndsAtPast() public { + (IRecurringCollector.RecurringCollectionAgreement memory rca, bytes16 agreementId) = _makeAcceptedAgreement( + AUTO_UPDATE + ); + + // Offer upgrade with deadline = now + 2 days, but set pending endsAt to only 3 days from now + uint64 upgradeDeadline = uint64(block.timestamp + 2 days); + uint64 shortEndsAt = uint64(block.timestamp + 3 days); + _offerUpgrade(rca, agreementId, upgradeDeadline, shortEndsAt); + + // Warp past both the upgrade deadline AND the pending endsAt + vm.warp(shortEndsAt + 1); + + IRecurringCollector.CollectParams memory params = IRecurringCollector.CollectParams({ + agreementId: agreementId, + collectionId: bytes32("final"), + tokens: 0, + dataServiceCut: 0, + receiverDestination: rca.serviceProvider, + maxSlippage: type(uint256).max + }); + + // Auto-upgrade succeeds — terms were validated at offer time, data service callback decides + vm.expectEmit(address(_recurringCollector)); + emit IRecurringCollector.AutoUpdateAttempted(agreementId, true); + + vm.prank(rca.dataService); + _recurringCollector.collect(IGraphPayments.PaymentTypes.IndexingFee, abi.encode(params)); + + IRecurringCollector.AgreementData memory agreement = _recurringCollector.getAgreementData(agreementId); + assertTrue(agreement.state & SETTLED == 0, "should not be settled - upgrade succeeded"); + { + (, bytes memory activeOfferData) = _recurringCollector.getAgreementOfferAt(agreementId, 0); + IRecurringCollector.RecurringCollectionAgreementUpdate memory activeRcau = abi.decode( + activeOfferData, + (IRecurringCollector.RecurringCollectionAgreementUpdate) + ); + assertEq(activeRcau.endsAt, shortEndsAt, "active endsAt should be the upgraded terms"); + } + } + + // ============================================================ + // Full lifecycle + // ============================================================ + + function test_FullLifecycle_Offer_Accept_Collect_Upgrade_AutoPromote_Collect() public { + // 1. Create and accept agreement with AUTO_UPDATE + (IRecurringCollector.RecurringCollectionAgreement memory rca, bytes16 agreementId) = _makeAcceptedAgreement( + AUTO_UPDATE + ); + + // 2. First collection + vm.warp(block.timestamp + 1000); + IRecurringCollector.CollectParams memory params = IRecurringCollector.CollectParams({ + agreementId: agreementId, + collectionId: bytes32("first"), + tokens: 0, + dataServiceCut: 0, + receiverDestination: rca.serviceProvider, + maxSlippage: type(uint256).max + }); + vm.prank(rca.dataService); + _recurringCollector.collect(IGraphPayments.PaymentTypes.IndexingFee, abi.encode(params)); + + // 3. Offer upgrade + uint64 upgradeDeadline = uint64(block.timestamp + 2 days); + uint64 newEndsAt = uint64(block.timestamp + 365 days); + _offerUpgrade(rca, agreementId, upgradeDeadline, newEndsAt); + + // 4. Final collect triggers auto-upgrade + vm.warp(upgradeDeadline + 1); + params.collectionId = bytes32("final"); + vm.prank(rca.dataService); + _recurringCollector.collect(IGraphPayments.PaymentTypes.IndexingFee, abi.encode(params)); + + // 5. Verify upgraded + IRecurringCollector.AgreementData memory agreement = _recurringCollector.getAgreementData(agreementId); + assertTrue(agreement.state & SETTLED == 0, "should not be settled"); + { + (, bytes memory activeOfferData) = _recurringCollector.getAgreementOfferAt(agreementId, 0); + IRecurringCollector.RecurringCollectionAgreementUpdate memory activeRcau = abi.decode( + activeOfferData, + (IRecurringCollector.RecurringCollectionAgreementUpdate) + ); + assertEq(activeRcau.maxOngoingTokensPerSecond, 2 ether, "should have new rate"); + } + + // 6. Collect on new terms + vm.warp(block.timestamp + 1000); + params.collectionId = bytes32("post-upgrade"); + vm.prank(rca.dataService); + _recurringCollector.collect(IGraphPayments.PaymentTypes.IndexingFee, abi.encode(params)); + + // Should still be active + agreement = _recurringCollector.getAgreementData(agreementId); + assertTrue(agreement.state & SETTLED == 0, "should still be active after post-upgrade collect"); + } + + // ============================================================ + // SETTLED notification suppression on auto-update (decision 4) + // ============================================================ + + /// @notice When auto-update succeeds, the transient SETTLED event should be suppressed. + /// Only the revived ACCEPTED state should appear in AgreementUpdated events. + function test_Collect_AutoUpgrade_SuppressesSettledNotification() public { + (IRecurringCollector.RecurringCollectionAgreement memory rca, bytes16 agreementId) = _makeAcceptedAgreement( + AUTO_UPDATE + ); + + uint64 upgradeDeadline = uint64(block.timestamp + 2 days); + uint64 newEndsAt = uint64(block.timestamp + 365 days); + _offerUpgrade(rca, agreementId, upgradeDeadline, newEndsAt); + + vm.warp(upgradeDeadline + 1); + + IRecurringCollector.CollectParams memory params = IRecurringCollector.CollectParams({ + agreementId: agreementId, + collectionId: bytes32("final"), + tokens: 0, + dataServiceCut: 0, + receiverDestination: rca.serviceProvider, + maxSlippage: type(uint256).max + }); + + vm.recordLogs(); + vm.prank(rca.dataService); + _recurringCollector.collect(IGraphPayments.PaymentTypes.IndexingFee, abi.encode(params)); + + // Scan AgreementUpdated events — none should have SETTLED flag set + Vm.Log[] memory logs = vm.getRecordedLogs(); + bytes32 updatedSig = IRecurringCollector.AgreementUpdated.selector; + bool foundSettled; + bool foundAccepted; + for (uint256 i; i < logs.length; i++) { + if (logs[i].topics[0] == updatedSig) { + // Decode state from event data: AgreementUpdated(bytes16 agreementId, bytes32 versionHash, uint16 state) + (, uint16 eventState) = abi.decode(logs[i].data, (bytes32, uint16)); + if (eventState & SETTLED != 0) foundSettled = true; + if (eventState & ACCEPTED != 0 && eventState & SETTLED == 0) foundAccepted = true; + } + } + assertFalse(foundSettled, "SETTLED event should be suppressed when auto-update succeeds"); + assertTrue(foundAccepted, "revived ACCEPTED event should be emitted"); + } + + /// @notice When auto-update fails, the SETTLED event should fire normally. + function test_Collect_NoAutoUpgrade_EmitsSettledNotification() public { + (IRecurringCollector.RecurringCollectionAgreement memory rca, bytes16 agreementId) = _makeAcceptedAgreement( + AUTO_UPDATE + ); + + uint64 upgradeDeadline = uint64(block.timestamp + 2 days); + uint64 newEndsAt = uint64(block.timestamp + 365 days); + _offerUpgrade(rca, agreementId, upgradeDeadline, newEndsAt); + + // Replace data service code with reverting callback → auto-update will fail + vm.etch(rca.dataService, _revertingCallbackCode); + + vm.warp(upgradeDeadline + 1); + + IRecurringCollector.CollectParams memory params = IRecurringCollector.CollectParams({ + agreementId: agreementId, + collectionId: bytes32("final"), + tokens: 0, + dataServiceCut: 0, + receiverDestination: rca.serviceProvider, + maxSlippage: type(uint256).max + }); + + vm.recordLogs(); + vm.prank(rca.dataService); + _recurringCollector.collect(IGraphPayments.PaymentTypes.IndexingFee, abi.encode(params)); + + // Scan AgreementUpdated events — should find one with SETTLED flag + Vm.Log[] memory logs = vm.getRecordedLogs(); + bytes32 updatedSig = IRecurringCollector.AgreementUpdated.selector; + bool foundSettled; + for (uint256 i; i < logs.length; i++) { + if (logs[i].topics[0] == updatedSig) { + (, uint16 eventState) = abi.decode(logs[i].data, (bytes32, uint16)); + if (eventState & SETTLED != 0) foundSettled = true; + } + } + assertTrue(foundSettled, "SETTLED event should fire when auto-update fails"); + } + + /// @notice When no auto-update is attempted (no pending terms, no AUTO_UPDATE), SETTLED fires. + function test_Collect_NoPending_EmitsSettledNotification() public { + // Accept WITHOUT AUTO_UPDATE and no pending terms + (IRecurringCollector.RecurringCollectionAgreement memory rca, bytes16 agreementId) = _makeAcceptedAgreement(0); + + _collectFull(rca, agreementId); + + // Re-read state — agreement should be SETTLED + IRecurringCollector.AgreementData memory agreement = _recurringCollector.getAgreementData(agreementId); + assertTrue(agreement.state & SETTLED != 0, "should be SETTLED after final collect"); + } + + /* solhint-enable graph/func-name-mixedcase */ +} diff --git a/packages/horizon/test/unit/payments/recurring-collector/base.t.sol b/packages/horizon/test/unit/payments/recurring-collector/base.t.sol index c37ced83f..06f8a89cd 100644 --- a/packages/horizon/test/unit/payments/recurring-collector/base.t.sol +++ b/packages/horizon/test/unit/payments/recurring-collector/base.t.sol @@ -12,32 +12,10 @@ contract RecurringCollectorBaseTest is RecurringCollectorSharedTest { /* solhint-disable graph/func-name-mixedcase */ - function test_RecoverRCASigner(FuzzyTestAccept memory fuzzyTestAccept) public view { - uint256 signerKey = boundKey(fuzzyTestAccept.unboundedSignerKey); - ( - IRecurringCollector.RecurringCollectionAgreement memory rca, - bytes memory signature - ) = _recurringCollectorHelper.generateSignedRCA(fuzzyTestAccept.rca, signerKey); - - assertEq( - _recurringCollector.recoverRCASigner(rca, signature), - vm.addr(signerKey), - "Recovered RCA signer does not match" - ); - } - - function test_RecoverRCAUSigner(FuzzyTestUpdate memory fuzzyTestUpdate) public view { - uint256 signerKey = boundKey(fuzzyTestUpdate.fuzzyTestAccept.unboundedSignerKey); - ( - IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau, - bytes memory signature - ) = _recurringCollectorHelper.generateSignedRCAU(fuzzyTestUpdate.rcau, signerKey); - - assertEq( - _recurringCollector.recoverRCAUSigner(rcau, signature), - vm.addr(signerKey), - "Recovered RCAU signer does not match" - ); + function test_TypeHashes() public view { + // Verify the typehash constants are set + assertTrue(_recurringCollector.RCA_TYPEHASH() != bytes32(0), "RCA typehash should be non-zero"); + assertTrue(_recurringCollector.RCAU_TYPEHASH() != bytes32(0), "RCAU typehash should be non-zero"); } /* solhint-enable graph/func-name-mixedcase */ diff --git a/packages/horizon/test/unit/payments/recurring-collector/cancel.t.sol b/packages/horizon/test/unit/payments/recurring-collector/cancel.t.sol index 1ccb0ccc1..dc2caf2bd 100644 --- a/packages/horizon/test/unit/payments/recurring-collector/cancel.t.sol +++ b/packages/horizon/test/unit/payments/recurring-collector/cancel.t.sol @@ -2,6 +2,14 @@ pragma solidity ^0.8.27; import { IRecurringCollector } from "@graphprotocol/interfaces/contracts/horizon/IRecurringCollector.sol"; +import { IGraphPayments } from "@graphprotocol/interfaces/contracts/horizon/IGraphPayments.sol"; +import { + REGISTERED, + ACCEPTED, + NOTICE_GIVEN, + SETTLED, + BY_PROVIDER +} from "@graphprotocol/interfaces/contracts/horizon/IAgreementCollector.sol"; import { RecurringCollectorSharedTest } from "./shared.t.sol"; @@ -13,21 +21,21 @@ contract RecurringCollectorCancelTest is RecurringCollectorSharedTest { /* solhint-disable graph/func-name-mixedcase */ function test_Cancel(FuzzyTestAccept calldata fuzzyTestAccept, uint8 unboundedCanceler) public { - ( - IRecurringCollector.RecurringCollectionAgreement memory acceptedRca, - , - , - bytes16 agreementId - ) = _sensibleAuthorizeAndAccept(fuzzyTestAccept); - - _cancel(acceptedRca, agreementId, _fuzzyCancelAgreementBy(unboundedCanceler)); + (IRecurringCollector.RecurringCollectionAgreement memory acceptedRca, bytes16 agreementId) = _sensibleAccept( + fuzzyTestAccept + ); + + if (_fuzzyCancelByPayer(unboundedCanceler)) { + _cancelByPayer(acceptedRca, agreementId); + } else { + _cancelByProvider(acceptedRca, agreementId); + } } - function test_Cancel_Revert_WhenNotAccepted( - IRecurringCollector.RecurringCollectionAgreement memory fuzzyRCA, - uint8 unboundedCanceler - ) public { - // Generate deterministic agreement ID + function test_Cancel_Revert_WhenNoneState(IRecurringCollector.RecurringCollectionAgreement memory fuzzyRCA) public { + vm.assume(fuzzyRCA.payer != address(0)); + vm.assume(fuzzyRCA.payer != _proxyAdmin); + // Agreement doesn't exist — payer field is address(0), so auth fails bytes16 agreementId = _recurringCollector.generateAgreementId( fuzzyRCA.payer, fuzzyRCA.dataService, @@ -36,33 +44,98 @@ contract RecurringCollectorCancelTest is RecurringCollectorSharedTest { fuzzyRCA.nonce ); - bytes memory expectedErr = abi.encodeWithSelector( - IRecurringCollector.RecurringCollectorAgreementIncorrectState.selector, - agreementId, - IRecurringCollector.AgreementState.NotAccepted + vm.expectRevert( + abi.encodeWithSelector(IRecurringCollector.UnauthorizedCaller.selector, fuzzyRCA.payer, address(0)) ); - vm.expectRevert(expectedErr); - vm.prank(fuzzyRCA.dataService); - _recurringCollector.cancel(agreementId, _fuzzyCancelAgreementBy(unboundedCanceler)); + vm.prank(fuzzyRCA.payer); + _recurringCollector.cancel(agreementId, bytes32(0), 0); } - function test_Cancel_Revert_WhenNotDataService( + function test_Cancel_Revert_WhenNotAuthorized( FuzzyTestAccept calldata fuzzyTestAccept, - uint8 unboundedCanceler, - address notDataService + address notAuthorized ) public { - vm.assume(fuzzyTestAccept.rca.dataService != notDataService); - - (, , , bytes16 agreementId) = _sensibleAuthorizeAndAccept(fuzzyTestAccept); + (IRecurringCollector.RecurringCollectionAgreement memory rca, bytes16 agreementId) = _sensibleAccept( + fuzzyTestAccept + ); + vm.assume(notAuthorized != rca.dataService); + vm.assume(notAuthorized != rca.payer); + vm.assume(notAuthorized != rca.serviceProvider); + vm.assume(notAuthorized != _proxyAdmin); + bytes32 activeHash = _recurringCollector.getAgreementVersionAt(agreementId, 0).versionHash; bytes memory expectedErr = abi.encodeWithSelector( - IRecurringCollector.RecurringCollectorDataServiceNotAuthorized.selector, - agreementId, - notDataService + IRecurringCollector.UnauthorizedCaller.selector, + notAuthorized, + address(0) ); vm.expectRevert(expectedErr); - vm.prank(notDataService); - _recurringCollector.cancel(agreementId, _fuzzyCancelAgreementBy(unboundedCanceler)); + vm.prank(notAuthorized); + _recurringCollector.cancel(agreementId, activeHash, 0); + } + function test_Cancel_ByProvider_AllowsFinalCollection() public { + // Setup: Create agreement with known parameters + IRecurringCollector.RecurringCollectionAgreement memory rca; + rca.deadline = uint64(block.timestamp + 1000); + rca.endsAt = uint64(block.timestamp + 100_000); + rca.payer = address(0x123); + rca.dataService = address(0x456); + rca.serviceProvider = address(0x789); + rca.maxInitialTokens = 0; + rca.maxOngoingTokensPerSecond = 1 ether; + rca.minSecondsPerCollection = 60; + rca.maxSecondsPerCollection = 3600; + rca.nonce = 1; + rca.metadata = ""; + + bytes16 agreementId = _accept(rca); + + // First collection to establish lastCollectionAt + skip(rca.minSecondsPerCollection); + IRecurringCollector.CollectParams memory firstCollect = IRecurringCollector.CollectParams({ + agreementId: agreementId, + collectionId: keccak256("first"), + tokens: 1 ether, + dataServiceCut: 0, + receiverDestination: rca.serviceProvider, + maxSlippage: type(uint256).max + }); + vm.prank(rca.dataService); + _recurringCollector.collect(IGraphPayments.PaymentTypes.IndexingFee, _generateCollectData(firstCollect)); + + // Provider works for minSecondsPerCollection more, then cancels + skip(rca.minSecondsPerCollection); + bytes32 vHash = _recurringCollector.getAgreementVersionAt(agreementId, 0).versionHash; + vm.prank(rca.serviceProvider); + _recurringCollector.cancel(agreementId, vHash, 0); + + // State should NOT be SETTLED yet — data service needs to do a final collection + IRecurringCollector.AgreementData memory data = _recurringCollector.getAgreementData(agreementId); + assertTrue(data.state & SETTLED == 0, "agreement should not be SETTLED immediately on provider cancel"); + assertTrue(data.state & NOTICE_GIVEN != 0, "agreement should have NOTICE_GIVEN"); + assertTrue(data.state & BY_PROVIDER != 0, "agreement should have BY_PROVIDER"); + + // Data service should be able to collect for the work done since lastCollectionAt + uint256 expectedTokens = rca.maxOngoingTokensPerSecond * rca.minSecondsPerCollection; + IRecurringCollector.CollectParams memory finalCollect = IRecurringCollector.CollectParams({ + agreementId: agreementId, + collectionId: keccak256("final"), + tokens: expectedTokens, + dataServiceCut: 0, + receiverDestination: rca.serviceProvider, + maxSlippage: type(uint256).max + }); + vm.prank(rca.dataService); + uint256 collected = _recurringCollector.collect( + IGraphPayments.PaymentTypes.IndexingFee, + _generateCollectData(finalCollect) + ); + assertEq(collected, expectedTokens, "data service should collect for work done before provider cancel"); + + // After final collection, agreement should auto-settle + data = _recurringCollector.getAgreementData(agreementId); + assertTrue(data.state & SETTLED != 0, "agreement should be SETTLED after final collection"); } + /* solhint-enable graph/func-name-mixedcase */ } diff --git a/packages/horizon/test/unit/payments/recurring-collector/collect.t.sol b/packages/horizon/test/unit/payments/recurring-collector/collect.t.sol index d19f5caed..ea2008fcd 100644 --- a/packages/horizon/test/unit/payments/recurring-collector/collect.t.sol +++ b/packages/horizon/test/unit/payments/recurring-collector/collect.t.sol @@ -15,10 +15,8 @@ contract RecurringCollectorCollectTest is RecurringCollectorSharedTest { /* solhint-disable graph/func-name-mixedcase */ function test_Collect_Revert_WhenInvalidData(address caller, uint8 unboundedPaymentType, bytes memory data) public { - bytes memory expectedErr = abi.encodeWithSelector( - IRecurringCollector.RecurringCollectorInvalidCollectData.selector, - data - ); + vm.assume(caller != _proxyAdmin); + bytes memory expectedErr = abi.encodeWithSelector(IRecurringCollector.InvalidCollectData.selector, data); vm.expectRevert(expectedErr); vm.prank(caller); _recurringCollector.collect(_paymentType(unboundedPaymentType), data); @@ -29,8 +27,9 @@ contract RecurringCollectorCollectTest is RecurringCollectorSharedTest { address notDataService ) public { vm.assume(fuzzy.fuzzyTestAccept.rca.dataService != notDataService); + vm.assume(notDataService != _proxyAdmin); - (, , , bytes16 agreementId) = _sensibleAuthorizeAndAccept(fuzzy.fuzzyTestAccept); + (, bytes16 agreementId) = _sensibleAccept(fuzzy.fuzzyTestAccept); IRecurringCollector.CollectParams memory collectParams = fuzzy.collectParams; skip(1); @@ -38,7 +37,7 @@ contract RecurringCollectorCollectTest is RecurringCollectorSharedTest { bytes memory data = _generateCollectData(collectParams); bytes memory expectedErr = abi.encodeWithSelector( - IRecurringCollector.RecurringCollectorDataServiceNotAuthorized.selector, + IRecurringCollector.DataServiceNotAuthorized.selector, collectParams.agreementId, notDataService ); @@ -48,12 +47,9 @@ contract RecurringCollectorCollectTest is RecurringCollectorSharedTest { } function test_Collect_Revert_WhenUnauthorizedDataService(FuzzyTestCollect calldata fuzzy) public { - ( - IRecurringCollector.RecurringCollectionAgreement memory acceptedRca, - , - , - bytes16 agreementId - ) = _sensibleAuthorizeAndAccept(fuzzy.fuzzyTestAccept); + (IRecurringCollector.RecurringCollectionAgreement memory acceptedRca, bytes16 agreementId) = _sensibleAccept( + fuzzy.fuzzyTestAccept + ); IRecurringCollector.CollectParams memory collectParams = fuzzy.collectParams; collectParams.agreementId = agreementId; collectParams.tokens = bound(collectParams.tokens, 1, type(uint256).max); @@ -81,7 +77,7 @@ contract RecurringCollectorCollectTest is RecurringCollectorSharedTest { ); bytes memory expectedErr = abi.encodeWithSelector( - IRecurringCollector.RecurringCollectorUnauthorizedDataService.selector, + IRecurringCollector.UnauthorizedDataService.selector, acceptedRca.dataService ); vm.expectRevert(expectedErr); @@ -93,7 +89,7 @@ contract RecurringCollectorCollectTest is RecurringCollectorSharedTest { bytes memory data = _generateCollectData(fuzzy.collectParams); bytes memory expectedErr = abi.encodeWithSelector( - IRecurringCollector.RecurringCollectorAgreementNotCollectable.selector, + IRecurringCollector.AgreementNotCollectable.selector, fuzzy.collectParams.agreementId, IRecurringCollector.AgreementNotCollectableReason.InvalidAgreementState ); @@ -102,14 +98,14 @@ contract RecurringCollectorCollectTest is RecurringCollectorSharedTest { _recurringCollector.collect(_paymentType(fuzzy.unboundedPaymentType), data); } - function test_Collect_Revert_WhenCanceledAgreementByServiceProvider(FuzzyTestCollect calldata fuzzy) public { - ( - IRecurringCollector.RecurringCollectionAgreement memory acceptedRca, - , - , - bytes16 agreementId - ) = _sensibleAuthorizeAndAccept(fuzzy.fuzzyTestAccept); - _cancel(acceptedRca, agreementId, IRecurringCollector.CancelAgreementBy.ServiceProvider); + function test_Collect_Revert_WhenCanceledAgreementByServiceProvider_NoElapsedTime( + FuzzyTestCollect calldata fuzzy + ) public { + (IRecurringCollector.RecurringCollectionAgreement memory acceptedRca, bytes16 agreementId) = _sensibleAccept( + fuzzy.fuzzyTestAccept + ); + // Provider cancels in the same block as accept — no collectable seconds + _cancelByProvider(acceptedRca, agreementId); IRecurringCollector.CollectParams memory collectData = fuzzy.collectParams; collectData.tokens = bound(collectData.tokens, 1, type(uint256).max); IRecurringCollector.CollectParams memory collectParams = _generateCollectParams( @@ -122,9 +118,9 @@ contract RecurringCollectorCollectTest is RecurringCollectorSharedTest { bytes memory data = _generateCollectData(collectParams); bytes memory expectedErr = abi.encodeWithSelector( - IRecurringCollector.RecurringCollectorAgreementNotCollectable.selector, + IRecurringCollector.AgreementNotCollectable.selector, collectParams.agreementId, - IRecurringCollector.AgreementNotCollectableReason.InvalidAgreementState + IRecurringCollector.AgreementNotCollectableReason.ZeroCollectionSeconds ); vm.expectRevert(expectedErr); vm.prank(acceptedRca.dataService); @@ -135,12 +131,9 @@ contract RecurringCollectorCollectTest is RecurringCollectorSharedTest { FuzzyTestCollect calldata fuzzy, uint256 unboundedCollectionSeconds ) public { - ( - IRecurringCollector.RecurringCollectionAgreement memory acceptedRca, - , - , - bytes16 agreementId - ) = _sensibleAuthorizeAndAccept(fuzzy.fuzzyTestAccept); + (IRecurringCollector.RecurringCollectionAgreement memory acceptedRca, bytes16 agreementId) = _sensibleAccept( + fuzzy.fuzzyTestAccept + ); skip(acceptedRca.minSecondsPerCollection); bytes memory data = _generateCollectData( @@ -167,7 +160,7 @@ contract RecurringCollectorCollectTest is RecurringCollectorSharedTest { ); data = _generateCollectData(collectParams); bytes memory expectedErr = abi.encodeWithSelector( - IRecurringCollector.RecurringCollectorCollectionTooSoon.selector, + IRecurringCollector.CollectionTooSoon.selector, collectParams.agreementId, collectionSeconds, acceptedRca.minSecondsPerCollection @@ -182,12 +175,9 @@ contract RecurringCollectorCollectTest is RecurringCollectorSharedTest { uint256 unboundedFirstCollectionSeconds, uint256 unboundedSecondCollectionSeconds ) public { - ( - IRecurringCollector.RecurringCollectionAgreement memory acceptedRca, - , - , - bytes16 agreementId - ) = _sensibleAuthorizeAndAccept(fuzzy.fuzzyTestAccept); + (IRecurringCollector.RecurringCollectionAgreement memory acceptedRca, bytes16 agreementId) = _sensibleAccept( + fuzzy.fuzzyTestAccept + ); // First valid collection to establish lastCollectionAt skip( @@ -250,12 +240,9 @@ contract RecurringCollectorCollectTest is RecurringCollectorSharedTest { uint256 unboundedTokens, bool testInitialCollection ) public { - ( - IRecurringCollector.RecurringCollectionAgreement memory acceptedRca, - , - , - bytes16 agreementId - ) = _sensibleAuthorizeAndAccept(fuzzy.fuzzyTestAccept); + (IRecurringCollector.RecurringCollectionAgreement memory acceptedRca, bytes16 agreementId) = _sensibleAccept( + fuzzy.fuzzyTestAccept + ); if (!testInitialCollection) { // skip to collectable time @@ -307,12 +294,9 @@ contract RecurringCollectorCollectTest is RecurringCollectorSharedTest { uint256 unboundedCollectionSeconds, uint256 unboundedTokens ) public { - ( - IRecurringCollector.RecurringCollectionAgreement memory acceptedRca, - , - , - bytes16 agreementId - ) = _sensibleAuthorizeAndAccept(fuzzy.fuzzyTestAccept); + (IRecurringCollector.RecurringCollectionAgreement memory acceptedRca, bytes16 agreementId) = _sensibleAccept( + fuzzy.fuzzyTestAccept + ); (bytes memory data, uint256 collectionSeconds, uint256 tokens) = _generateValidCollection( acceptedRca, @@ -350,9 +334,7 @@ contract RecurringCollectorCollectTest is RecurringCollectorSharedTest { rca.metadata = ""; // Accept the agreement - _recurringCollectorHelper.authorizeSignerWithChecks(rca.payer, 1); - (, bytes memory signature) = _recurringCollectorHelper.generateSignedRCA(rca, 1); - bytes16 agreementId = _accept(rca, signature); + bytes16 agreementId = _accept(rca); // Do a first collection to use up initial tokens allowance skip(rca.minSecondsPerCollection); @@ -390,12 +372,7 @@ contract RecurringCollectorCollectTest is RecurringCollectorSharedTest { // Expect revert due to excessive slippage (50 > 49) vm.expectRevert( - abi.encodeWithSelector( - IRecurringCollector.RecurringCollectorExcessiveSlippage.selector, - requested, - maxAllowed, - maxSlippage - ) + abi.encodeWithSelector(IRecurringCollector.ExcessiveSlippage.selector, requested, maxAllowed, maxSlippage) ); vm.prank(rca.dataService); _recurringCollector.collect(IGraphPayments.PaymentTypes.IndexingFee, data); @@ -417,9 +394,7 @@ contract RecurringCollectorCollectTest is RecurringCollectorSharedTest { rca.metadata = ""; // Accept the agreement - _recurringCollectorHelper.authorizeSignerWithChecks(rca.payer, 1); - (, bytes memory signature) = _recurringCollectorHelper.generateSignedRCA(rca, 1); - bytes16 agreementId = _accept(rca, signature); + bytes16 agreementId = _accept(rca); // Do a first collection to use up initial tokens allowance skip(rca.minSecondsPerCollection); @@ -467,12 +442,9 @@ contract RecurringCollectorCollectTest is RecurringCollectorSharedTest { assertEq(collected, maxAllowed); } function test_Collect_Revert_WhenZeroTokensBypassesTemporalValidation(FuzzyTestCollect calldata fuzzy) public { - ( - IRecurringCollector.RecurringCollectionAgreement memory acceptedRca, - , - , - bytes16 agreementId - ) = _sensibleAuthorizeAndAccept(fuzzy.fuzzyTestAccept); + (IRecurringCollector.RecurringCollectionAgreement memory acceptedRca, bytes16 agreementId) = _sensibleAccept( + fuzzy.fuzzyTestAccept + ); // First valid collection to establish lastCollectionAt skip(acceptedRca.minSecondsPerCollection); @@ -503,7 +475,7 @@ contract RecurringCollectorCollectTest is RecurringCollectorSharedTest { vm.expectRevert( abi.encodeWithSelector( - IRecurringCollector.RecurringCollectorCollectionTooSoon.selector, + IRecurringCollector.CollectionTooSoon.selector, agreementId, uint32(1), // only 1 second elapsed acceptedRca.minSecondsPerCollection diff --git a/packages/horizon/test/unit/payments/recurring-collector/coverageGaps.t.sol b/packages/horizon/test/unit/payments/recurring-collector/coverageGaps.t.sol new file mode 100644 index 000000000..aabce1fc5 --- /dev/null +++ b/packages/horizon/test/unit/payments/recurring-collector/coverageGaps.t.sol @@ -0,0 +1,723 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.27; + +import { + REGISTERED, + ACCEPTED, + UPDATE, + NOTICE_GIVEN, + SETTLED, + BY_PAYER, + BY_PROVIDER, + BY_DATA_SERVICE, + OFFER_TYPE_NEW, + OFFER_TYPE_UPDATE, + IAgreementCollector +} from "@graphprotocol/interfaces/contracts/horizon/IAgreementCollector.sol"; +import { IRecurringCollector } from "@graphprotocol/interfaces/contracts/horizon/IRecurringCollector.sol"; +import { IGraphPayments } from "@graphprotocol/interfaces/contracts/horizon/IGraphPayments.sol"; +import { IAgreementOwner } from "@graphprotocol/interfaces/contracts/horizon/IAgreementOwner.sol"; +import { IProviderEligibility } from "@graphprotocol/interfaces/contracts/issuance/eligibility/IProviderEligibility.sol"; +import { IERC165 } from "@openzeppelin/contracts/utils/introspection/IERC165.sol"; + +import { RecurringCollectorSharedTest } from "./shared.t.sol"; +import { MockAgreementOwner } from "./MockAgreementOwner.t.sol"; + +/// @notice A payer contract that supports ERC165 + IProviderEligibility at offer time, +/// but returns malformed (< 32 bytes) data from isEligible at collection time. +contract MalformedEligibilityPayer is IAgreementOwner, IERC165 { + bool public returnMalformed; + + function setReturnMalformed(bool _malformed) external { + returnMalformed = _malformed; + } + + function beforeCollection(bytes16, uint256) external override {} + function afterCollection(bytes16, uint256) external override {} + function afterAgreementStateChange(bytes16, bytes32, uint16) external override {} + + function supportsInterface(bytes4 interfaceId) external pure override returns (bool) { + return interfaceId == type(IERC165).interfaceId || interfaceId == type(IProviderEligibility).interfaceId; + } + + /// @notice When returnMalformed is true, returns empty data via assembly (< 32 bytes). + /// Otherwise returns true (eligible). + fallback() external { + if (returnMalformed) { + // solhint-disable-next-line no-inline-assembly + assembly { + return(0, 0) // return 0 bytes — triggers result.length < 32 + } + } else { + // solhint-disable-next-line no-inline-assembly + assembly { + mstore(0x00, 1) // true + return(0x00, 0x20) + } + } + } +} + +/// @notice Tests targeting specific uncovered lines in RecurringCollector.sol +contract RecurringCollectorCoverageGapsTest is RecurringCollectorSharedTest { + /* solhint-disable graph/func-name-mixedcase */ + + // ══════════════════════════════════════════════════════════════════════ + // Gap 1 — Line 228: revert InvalidOfferType(offerType) + // ══════════════════════════════════════════════════════════════════════ + + function test_Offer_Revert_WhenOfferTypeInvalid_Two() public { + // OFFER_TYPE_NEW=0, OFFER_TYPE_UPDATE=1, so offerType=2 is invalid + address payer = makeAddr("payer"); + vm.expectRevert(abi.encodeWithSelector(IRecurringCollector.InvalidOfferType.selector, uint8(2))); + vm.prank(payer); + _recurringCollector.offer(2, bytes(""), 0); + } + + function test_Offer_Revert_WhenOfferTypeInvalid_Three() public { + address payer = makeAddr("payer"); + vm.expectRevert(abi.encodeWithSelector(IRecurringCollector.InvalidOfferType.selector, uint8(3))); + vm.prank(payer); + _recurringCollector.offer(3, bytes(""), 0); + } + + function test_Offer_Revert_WhenOfferTypeInvalid_MaxUint8() public { + address payer = makeAddr("payer"); + vm.expectRevert(abi.encodeWithSelector(IRecurringCollector.InvalidOfferType.selector, uint8(255))); + vm.prank(payer); + _recurringCollector.offer(255, bytes(""), 0); + } + + // ══════════════════════════════════════════════════════════════════════ + // Gap 2 — Lines 464-468: getAgreementVersionCount() + // ══════════════════════════════════════════════════════════════════════ + + function test_GetAgreementVersionCount_Empty() public view { + bytes16 fakeId = bytes16(keccak256("nonexistent")); + uint256 count = _recurringCollector.getAgreementVersionCount(fakeId); + assertEq(count, 0, "Empty agreement should return 0 versions"); + } + + function test_GetAgreementVersionCount_Accepted(FuzzyTestAccept calldata fuzzy) public { + (, bytes16 agreementId) = _sensibleAccept(fuzzy); + uint256 count = _recurringCollector.getAgreementVersionCount(agreementId); + assertEq(count, 1, "Accepted agreement with no pending should return 1"); + } + + function test_GetAgreementVersionCount_WithPendingUpdate(FuzzyTestAccept calldata fuzzy) public { + (IRecurringCollector.RecurringCollectionAgreement memory rca, bytes16 agreementId) = _sensibleAccept(fuzzy); + + // Submit an update (creates pendingTerms) + IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau = IRecurringCollector + .RecurringCollectionAgreementUpdate({ + agreementId: agreementId, + deadline: uint64(block.timestamp + 1 hours), + endsAt: rca.endsAt, + maxInitialTokens: rca.maxInitialTokens, + maxOngoingTokensPerSecond: rca.maxOngoingTokensPerSecond, + minSecondsPerCollection: rca.minSecondsPerCollection, + maxSecondsPerCollection: rca.maxSecondsPerCollection, + conditions: 0, + minSecondsPayerCancellationNotice: 0, + nonce: 1, + metadata: "" + }); + + vm.prank(rca.payer); + _recurringCollector.offer(OFFER_TYPE_UPDATE, abi.encode(rcau), 0); + + uint256 count = _recurringCollector.getAgreementVersionCount(agreementId); + assertEq(count, 2, "Agreement with pending update should return 2"); + } + + // ══════════════════════════════════════════════════════════════════════ + // Gap 3 — Lines 472-482: getAgreementVersionAt() + // ══════════════════════════════════════════════════════════════════════ + + function test_GetAgreementVersionAt_Index0(FuzzyTestAccept calldata fuzzy) public { + (, bytes16 agreementId) = _sensibleAccept(fuzzy); + + IAgreementCollector.AgreementVersion memory version = _recurringCollector.getAgreementVersionAt(agreementId, 0); + assertTrue(version.versionHash != bytes32(0), "Index 0 should return non-zero active terms hash"); + assertEq(version.state, REGISTERED | ACCEPTED, "Index 0 state should be REGISTERED | ACCEPTED"); + } + + function test_GetAgreementVersionAt_Index1_WithPending(FuzzyTestAccept calldata fuzzy) public { + (IRecurringCollector.RecurringCollectionAgreement memory rca, bytes16 agreementId) = _sensibleAccept(fuzzy); + + // Submit an update (creates pendingTerms) + IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau = IRecurringCollector + .RecurringCollectionAgreementUpdate({ + agreementId: agreementId, + deadline: uint64(block.timestamp + 1 hours), + endsAt: rca.endsAt, + maxInitialTokens: rca.maxInitialTokens, + maxOngoingTokensPerSecond: rca.maxOngoingTokensPerSecond, + minSecondsPerCollection: rca.minSecondsPerCollection, + maxSecondsPerCollection: rca.maxSecondsPerCollection, + conditions: 0, + minSecondsPayerCancellationNotice: 0, + nonce: 1, + metadata: "" + }); + + vm.prank(rca.payer); + _recurringCollector.offer(OFFER_TYPE_UPDATE, abi.encode(rcau), 0); + + IAgreementCollector.AgreementVersion memory version = _recurringCollector.getAgreementVersionAt(agreementId, 1); + assertTrue(version.versionHash != bytes32(0), "Index 1 should return non-zero pending terms hash"); + assertEq(version.state, REGISTERED | ACCEPTED | UPDATE, "Index 1 state should include UPDATE flag"); + } + + // ══════════════════════════════════════════════════════════════════════ + // Gap 4 — getAgreementOfferAt round-trip (replaces getAgreementTermsAt tests) + // ══════════════════════════════════════════════════════════════════════ + + function test_GetAgreementOfferAt_Index0(FuzzyTestAccept calldata fuzzy) public { + (, bytes16 agreementId) = _sensibleAccept(fuzzy); + + bytes32 activeHash = _recurringCollector.getAgreementVersionAt(agreementId, 0).versionHash; + (uint8 offerType, bytes memory offerData) = _recurringCollector.getAgreementOfferAt(agreementId, 0); + + assertEq(offerType, OFFER_TYPE_NEW, "Index 0 should be OFFER_TYPE_NEW"); + IRecurringCollector.RecurringCollectionAgreement memory rca = abi.decode( + offerData, + (IRecurringCollector.RecurringCollectionAgreement) + ); + assertEq(_recurringCollector.hashRCA(rca), activeHash, "Reconstructed hash should match active terms"); + } + + function test_GetAgreementOfferAt_Index1_WithPending(FuzzyTestAccept calldata fuzzy) public { + (IRecurringCollector.RecurringCollectionAgreement memory rca, bytes16 agreementId) = _sensibleAccept(fuzzy); + + // Submit an update + IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau = IRecurringCollector + .RecurringCollectionAgreementUpdate({ + agreementId: agreementId, + deadline: uint64(block.timestamp + 1 hours), + endsAt: rca.endsAt, + maxInitialTokens: rca.maxInitialTokens, + maxOngoingTokensPerSecond: rca.maxOngoingTokensPerSecond, + minSecondsPerCollection: rca.minSecondsPerCollection, + maxSecondsPerCollection: rca.maxSecondsPerCollection, + conditions: 0, + minSecondsPayerCancellationNotice: 0, + nonce: 1, + metadata: "" + }); + + vm.prank(rca.payer); + _recurringCollector.offer(OFFER_TYPE_UPDATE, abi.encode(rcau), 0); + + bytes32 pendingHash = _recurringCollector.getAgreementVersionAt(agreementId, 1).versionHash; + (uint8 offerType, bytes memory offerData) = _recurringCollector.getAgreementOfferAt(agreementId, 1); + + assertEq(offerType, OFFER_TYPE_UPDATE, "Index 1 should be OFFER_TYPE_UPDATE"); + IRecurringCollector.RecurringCollectionAgreementUpdate memory decoded = abi.decode( + offerData, + (IRecurringCollector.RecurringCollectionAgreementUpdate) + ); + assertEq(_recurringCollector.hashRCAU(decoded), pendingHash, "Reconstructed hash should match pending terms"); + } + + // ══════════════════════════════════════════════════════════════════════ + // Gap 5 — Lines 517-518: getMaxNextClaim(agreementId, claimScope) + // ══════════════════════════════════════════════════════════════════════ + + function test_GetMaxNextClaim_ScopeActiveOnly(FuzzyTestAccept calldata fuzzy) public { + (, bytes16 agreementId) = _sensibleAccept(fuzzy); + + uint256 maxClaimActive = _recurringCollector.getMaxNextClaim( + agreementId, + _recurringCollector.CLAIM_SCOPE_ACTIVE() + ); + uint256 maxClaimBoth = _recurringCollector.getMaxNextClaim(agreementId); + + // With no pending terms, active-only should equal both-scopes + assertEq(maxClaimActive, maxClaimBoth, "Active-only scope should match full scope when no pending terms"); + } + + function test_GetMaxNextClaim_ScopePendingOnly(FuzzyTestAccept calldata fuzzy) public { + (, bytes16 agreementId) = _sensibleAccept(fuzzy); + + uint256 maxClaimPending = _recurringCollector.getMaxNextClaim( + agreementId, + _recurringCollector.CLAIM_SCOPE_PENDING() + ); + + // With no pending terms, pending-only scope should return 0 + assertEq(maxClaimPending, 0, "Pending-only scope should return 0 when no pending terms"); + } + + function test_GetMaxNextClaim_ScopePendingOnly_WithPending(FuzzyTestAccept calldata fuzzy) public { + (IRecurringCollector.RecurringCollectionAgreement memory rca, bytes16 agreementId) = _sensibleAccept(fuzzy); + + // Submit an update to create pending terms + IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau = IRecurringCollector + .RecurringCollectionAgreementUpdate({ + agreementId: agreementId, + deadline: uint64(block.timestamp + 1 hours), + endsAt: rca.endsAt, + maxInitialTokens: rca.maxInitialTokens, + maxOngoingTokensPerSecond: rca.maxOngoingTokensPerSecond, + minSecondsPerCollection: rca.minSecondsPerCollection, + maxSecondsPerCollection: rca.maxSecondsPerCollection, + conditions: 0, + minSecondsPayerCancellationNotice: 0, + nonce: 1, + metadata: "" + }); + + vm.prank(rca.payer); + _recurringCollector.offer(OFFER_TYPE_UPDATE, abi.encode(rcau), 0); + + uint256 maxClaimPending = _recurringCollector.getMaxNextClaim( + agreementId, + _recurringCollector.CLAIM_SCOPE_PENDING() + ); + + // Pending terms exist and have non-zero endsAt, so pending scope should be > 0 + assertTrue(0 < maxClaimPending, "Pending-only scope should be > 0 when pending terms exist"); + } + + // ══════════════════════════════════════════════════════════════════════ + // Gap 6 — Line 634: emit PayerCallbackFailed(...EligibilityCheck) + // ══════════════════════════════════════════════════════════════════════ + + function test_Collect_EmitsPayerCallbackFailed_WhenEligibilityReturnsMalformed() public { + MalformedEligibilityPayer payer = new MalformedEligibilityPayer(); + + IRecurringCollector.RecurringCollectionAgreement memory rca = _recurringCollectorHelper.sensibleRCA( + IRecurringCollector.RecurringCollectionAgreement({ + deadline: uint64(block.timestamp + 1 hours), + endsAt: uint64(block.timestamp + 365 days), + payer: address(payer), + dataService: makeAddr("ds-elig"), + serviceProvider: makeAddr("sp-elig"), + maxInitialTokens: 100 ether, + maxOngoingTokensPerSecond: 1 ether, + minSecondsPerCollection: 600, + maxSecondsPerCollection: 3600, + conditions: 1, // CONDITION_ELIGIBILITY_CHECK + minSecondsPayerCancellationNotice: 0, + nonce: 1, + metadata: "" + }) + ); + + _setupValidProvision(rca.serviceProvider, rca.dataService); + + // Payer calls offer (isEligible works correctly at this point) + vm.prank(address(payer)); + bytes16 agreementId = _recurringCollector.offer(OFFER_TYPE_NEW, abi.encode(rca), 0).agreementId; + + // Service provider accepts + bytes32 activeHash = _recurringCollector.getAgreementVersionAt(agreementId, 0).versionHash; + vm.prank(rca.serviceProvider); + _recurringCollector.accept(agreementId, activeHash, bytes(""), 0); + + // Now make the payer return malformed (< 32 bytes) from isEligible + payer.setReturnMalformed(true); + + skip(rca.minSecondsPerCollection); + uint256 tokens = 1 ether; + bytes memory data = _generateCollectData( + _generateCollectParams(rca, agreementId, bytes32("col-malformed"), tokens, 0) + ); + + // Should emit PayerCallbackFailed with EligibilityCheck stage, but still collect + vm.expectEmit(address(_recurringCollector)); + emit IRecurringCollector.PayerCallbackFailed( + agreementId, + address(payer), + IRecurringCollector.PayerCallbackStage.EligibilityCheck + ); + + vm.prank(rca.dataService); + uint256 collected = _recurringCollector.collect(IGraphPayments.PaymentTypes.IndexingFee, data); + assertEq(collected, tokens, "Collection should proceed despite malformed eligibility response"); + } + + // ══════════════════════════════════════════════════════════════════════ + // Gap 7 — Line 850: agreement.activeTerms = terms (update on REGISTERED-only) + // ══════════════════════════════════════════════════════════════════════ + + function test_Update_OverwritesActiveTerms_WhenNotYetAccepted() public { + address payer = address(0x1111); + address dataService = address(0x2222); + address serviceProvider = address(0x3333); + + IRecurringCollector.RecurringCollectionAgreement memory rca = IRecurringCollector.RecurringCollectionAgreement({ + deadline: uint64(block.timestamp + 1 hours), + endsAt: uint64(block.timestamp + 365 days), + payer: payer, + dataService: dataService, + serviceProvider: serviceProvider, + maxInitialTokens: 100 ether, + maxOngoingTokensPerSecond: 1 ether, + minSecondsPerCollection: 600, + maxSecondsPerCollection: 3600, + conditions: 0, + minSecondsPayerCancellationNotice: 0, + nonce: 1, + metadata: "" + }); + + // Offer but do NOT accept — stays in REGISTERED state + bytes16 agreementId = _offer(rca); + + IRecurringCollector.AgreementData memory beforeUpdate = _recurringCollector.getAgreementData(agreementId); + assertEq(beforeUpdate.state, REGISTERED, "Should be REGISTERED only"); + + // Now submit OFFER_TYPE_UPDATE to overwrite activeTerms (line 850) + uint256 newMaxInitial = 200 ether; + IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau = IRecurringCollector + .RecurringCollectionAgreementUpdate({ + agreementId: agreementId, + deadline: uint64(block.timestamp + 1 hours), + endsAt: rca.endsAt, + maxInitialTokens: newMaxInitial, + maxOngoingTokensPerSecond: rca.maxOngoingTokensPerSecond, + minSecondsPerCollection: rca.minSecondsPerCollection, + maxSecondsPerCollection: rca.maxSecondsPerCollection, + conditions: 0, + minSecondsPayerCancellationNotice: 0, + nonce: 1, + metadata: "" + }); + + vm.prank(payer); + _recurringCollector.offer(OFFER_TYPE_UPDATE, abi.encode(rcau), 0); + + IRecurringCollector.AgreementData memory afterUpdate = _recurringCollector.getAgreementData(agreementId); + assertEq(afterUpdate.state, REGISTERED | UPDATE, "Should be REGISTERED | UPDATE after pre-accept overwrite"); + { + (, bytes memory activeOfferData) = _recurringCollector.getAgreementOfferAt(agreementId, 0); + IRecurringCollector.RecurringCollectionAgreementUpdate memory activeRcau = abi.decode( + activeOfferData, + (IRecurringCollector.RecurringCollectionAgreementUpdate) + ); + assertEq(activeRcau.maxInitialTokens, newMaxInitial, "activeTerms should be overwritten with new values"); + } + // pendingTerms should remain empty + assertEq(_recurringCollector.getAgreementVersionCount(agreementId), 1, "pendingTerms should remain empty"); + } + + // ══════════════════════════════════════════════════════════════════════ + // Gap 8 — Line 977: return (false, 0, ZeroCollectionSeconds) + // ══════════════════════════════════════════════════════════════════════ + + function test_GetAgreementData_ZeroCollectionSeconds(FuzzyTestAccept calldata fuzzy) public { + (, bytes16 agreementId) = _sensibleAccept(fuzzy); + + // Read agreement in the same block as accept — collectionStart == collectionEnd == block.timestamp + IRecurringCollector.AgreementData memory agreement = _recurringCollector.getAgreementData(agreementId); + + assertFalse(agreement.isCollectable, "Should not be collectable with zero elapsed time"); + assertEq(agreement.collectionSeconds, 0, "Collection seconds should be 0"); + } + + // ══════════════════════════════════════════════════════════════════════ + // Gap 9 — Lines 1011-1012: _maxClaimForTerms when REGISTERED only + // ══════════════════════════════════════════════════════════════════════ + + function test_GetMaxNextClaim_OfferedButNotAccepted() public { + address payer = address(0x1111); + address dataService = address(0x2222); + address serviceProvider = address(0x3333); + + IRecurringCollector.RecurringCollectionAgreement memory rca = IRecurringCollector.RecurringCollectionAgreement({ + deadline: uint64(block.timestamp + 1 hours), + endsAt: uint64(block.timestamp + 100_000), + payer: payer, + dataService: dataService, + serviceProvider: serviceProvider, + maxInitialTokens: 5000, + maxOngoingTokensPerSecond: 100, + minSecondsPerCollection: 600, + maxSecondsPerCollection: 3600, + conditions: 0, + minSecondsPayerCancellationNotice: 0, + nonce: 1, + metadata: "" + }); + + // Offer but do NOT accept + bytes16 agreementId = _offer(rca); + + uint256 maxClaim = _recurringCollector.getMaxNextClaim(agreementId); + + // For REGISTERED-only: collectionStart = block.timestamp, collectionEnd = endsAt + // windowSeconds = endsAt - block.timestamp = 100_000 + // effectiveSeconds = min(100_000, maxSecondsPerCollection=3600) = 3600 + // maxClaim = 100 * 3600 + 5000 = 365_000 + uint256 windowSeconds = rca.endsAt - block.timestamp; + uint256 effectiveSeconds = windowSeconds < rca.maxSecondsPerCollection + ? windowSeconds + : rca.maxSecondsPerCollection; + uint256 expected = rca.maxOngoingTokensPerSecond * effectiveSeconds + rca.maxInitialTokens; + + assertEq(maxClaim, expected, "Offered-but-not-accepted maxClaim should use block.timestamp as proxy"); + assertTrue(0 < maxClaim, "maxClaim should be non-zero for valid offered agreement"); + } + + // Line 252 (AgreementIncorrectState in accept) is unreachable: it requires state without + // REGISTERED or ACCEPTED, but _getAgreementStorage returns state=0 for non-existent + // agreements where serviceProvider=address(0), making the require on line 243 fail first. + // It is a defensive guard. + + // -- Line 407: cancel a pending update specifically -- + + function test_Cancel_PendingUpdate_ClearsPendingTerms(FuzzyTestAccept calldata fuzzy) public { + (IRecurringCollector.RecurringCollectionAgreement memory rca, bytes16 agreementId) = _sensibleAccept(fuzzy); + + // Offer an update + IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau = IRecurringCollector + .RecurringCollectionAgreementUpdate({ + agreementId: agreementId, + deadline: uint64(block.timestamp + 1 hours), + endsAt: rca.endsAt + 365 days, + maxInitialTokens: rca.maxInitialTokens * 2, + maxOngoingTokensPerSecond: rca.maxOngoingTokensPerSecond * 2, + minSecondsPerCollection: rca.minSecondsPerCollection, + maxSecondsPerCollection: rca.maxSecondsPerCollection, + conditions: 0, + minSecondsPayerCancellationNotice: 0, + nonce: 1, + metadata: "" + }); + vm.prank(rca.payer); + _recurringCollector.offer(OFFER_TYPE_UPDATE, abi.encode(rcau), 0); + + // Cancel specifically the pending update (using pending version hash) + bytes32 pendingHash = _recurringCollector.getAgreementVersionAt(agreementId, 1).versionHash; + assertTrue(pendingHash != bytes32(0), "Should have pending terms"); + + vm.prank(rca.payer); + _recurringCollector.cancel(agreementId, pendingHash, 0); + + // Pending terms cleared, active terms intact + assertEq(_recurringCollector.getAgreementVersionCount(agreementId), 1, "Pending terms should be cleared"); + bytes32 activeHash = _recurringCollector.getAgreementVersionAt(agreementId, 0).versionHash; + assertTrue(activeHash != bytes32(0), "Active terms should remain"); + } + + // ══════════════════════════════════════════════════════════════════════ + // Gap 10 — Line 261: cancel() by data service + // ══════════════════════════════════════════════════════════════════════ + + function test_Cancel_ByDataService(FuzzyTestAccept calldata fuzzy) public { + (IRecurringCollector.RecurringCollectionAgreement memory rca, bytes16 agreementId) = _sensibleAccept(fuzzy); + + bytes32 vHash = _recurringCollector.getAgreementVersionAt(agreementId, 0).versionHash; + vm.expectEmit(address(_recurringCollector)); + emit IRecurringCollector.AgreementUpdated( + agreementId, + vHash, + REGISTERED | ACCEPTED | NOTICE_GIVEN | BY_DATA_SERVICE + ); + vm.prank(rca.dataService); + _recurringCollector.cancel(agreementId, vHash, 0); + + IRecurringCollector.AgreementData memory data = _recurringCollector.getAgreementData(agreementId); + assertTrue(data.state & BY_DATA_SERVICE != 0, "Should have BY_DATA_SERVICE flag"); + assertTrue(data.state & NOTICE_GIVEN != 0, "Should have NOTICE_GIVEN flag"); + } + + // ══════════════════════════════════════════════════════════════════════ + // Gap 11 — Line 290: cancel() on unaccepted (REGISTERED-only) agreement + // adds SETTLED flag immediately + // ══════════════════════════════════════════════════════════════════════ + + function test_Cancel_UnacceptedAgreement_SettlesImmediately() public { + address payer = address(0x1111); + address dataService = address(0x2222); + address serviceProvider = address(0x3333); + + IRecurringCollector.RecurringCollectionAgreement memory rca = IRecurringCollector.RecurringCollectionAgreement({ + deadline: uint64(block.timestamp + 1 hours), + endsAt: uint64(block.timestamp + 365 days), + payer: payer, + dataService: dataService, + serviceProvider: serviceProvider, + maxInitialTokens: 100 ether, + maxOngoingTokensPerSecond: 1 ether, + minSecondsPerCollection: 600, + maxSecondsPerCollection: 3600, + conditions: 0, + minSecondsPayerCancellationNotice: 0, + nonce: 1, + metadata: "" + }); + + // Offer but do NOT accept + bytes16 agreementId = _offer(rca); + + IRecurringCollector.AgreementData memory before = _recurringCollector.getAgreementData(agreementId); + assertEq(before.state, REGISTERED, "Should be REGISTERED only before cancel"); + + // Cancel by payer (any party works, using payer) + bytes32 vHash = _recurringCollector.getAgreementVersionAt(agreementId, 0).versionHash; + vm.expectEmit(address(_recurringCollector)); + emit IRecurringCollector.AgreementUpdated(agreementId, vHash, REGISTERED | NOTICE_GIVEN | SETTLED | BY_PAYER); + vm.prank(payer); + _recurringCollector.cancel(agreementId, vHash, 0); + + IRecurringCollector.AgreementData memory after_ = _recurringCollector.getAgreementData(agreementId); + assertTrue(after_.state & SETTLED != 0, "Unaccepted cancel should set SETTLED immediately"); + assertTrue(after_.state & NOTICE_GIVEN != 0, "Should have NOTICE_GIVEN"); + } + + // ══════════════════════════════════════════════════════════════════════ + // Gap 12 — Line 271: cancel() with active terms hash (else branch of + // pendingTerms check) when pending terms also exist + // ══════════════════════════════════════════════════════════════════════ + + function test_Cancel_ActiveTerms_WhenPendingExists(FuzzyTestAccept calldata fuzzy) public { + (IRecurringCollector.RecurringCollectionAgreement memory rca, bytes16 agreementId) = _sensibleAccept(fuzzy); + + // Submit an update to create pending terms + IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau = IRecurringCollector + .RecurringCollectionAgreementUpdate({ + agreementId: agreementId, + deadline: uint64(block.timestamp + 1 hours), + endsAt: rca.endsAt, + maxInitialTokens: rca.maxInitialTokens, + maxOngoingTokensPerSecond: rca.maxOngoingTokensPerSecond, + minSecondsPerCollection: rca.minSecondsPerCollection, + maxSecondsPerCollection: rca.maxSecondsPerCollection, + conditions: 0, + minSecondsPayerCancellationNotice: 0, + nonce: 1, + metadata: "" + }); + vm.prank(rca.payer); + _recurringCollector.offer(OFFER_TYPE_UPDATE, abi.encode(rcau), 0); + + // Cancel using the ACTIVE terms hash (not pendingTerms hash) + // This hits the else branch at line 271 + bytes32 activeHash = _recurringCollector.getAgreementVersionAt(agreementId, 0).versionHash; + vm.prank(rca.serviceProvider); + _recurringCollector.cancel(agreementId, activeHash, 0); + + IRecurringCollector.AgreementData memory data = _recurringCollector.getAgreementData(agreementId); + assertTrue(data.state & NOTICE_GIVEN != 0, "Should have NOTICE_GIVEN"); + assertTrue(data.state & BY_PROVIDER != 0, "Should have BY_PROVIDER"); + } + + // ══════════════════════════════════════════════════════════════════════ + // Gap 13 — Line 276: cancel() revert on unregistered agreement + // ══════════════════════════════════════════════════════════════════════ + + // ══════════════════════════════════════════════════════════════════════ + // Gap 13a — Line 271: cancel() revert when hash matches neither + // pending nor active terms + // ══════════════════════════════════════════════════════════════════════ + + function test_Cancel_Revert_WhenHashMatchesNeither(FuzzyTestAccept calldata fuzzy) public { + (IRecurringCollector.RecurringCollectionAgreement memory rca, bytes16 agreementId) = _sensibleAccept(fuzzy); + + bytes32 activeHash = _recurringCollector.getAgreementVersionAt(agreementId, 0).versionHash; + bytes32 bogusHash = bytes32(uint256(0xdead)); + vm.assume(bogusHash != activeHash); + + vm.expectRevert( + abi.encodeWithSelector( + IRecurringCollector.AgreementHashMismatch.selector, + agreementId, + activeHash, + bogusHash + ) + ); + vm.prank(rca.payer); + _recurringCollector.cancel(agreementId, bogusHash, 0); + } + + // Line 276: require(oldState & REGISTERED != 0) in cancel() + // This branch is a defensive guard. A state without REGISTERED can only be 0 (non-existent), + // but non-existent agreements have payer/serviceProvider/dataService == address(0), + // so the caller authorization check at line 259-262 always reverts first. + + // ══════════════════════════════════════════════════════════════════════ + // Gap 14 — Lines 356/360/368: getAgreementOfferAt edge cases + // ══════════════════════════════════════════════════════════════════════ + + function test_GetAgreementOfferAt_Index2_ReturnsEmpty(FuzzyTestAccept calldata fuzzy) public { + (, bytes16 agreementId) = _sensibleAccept(fuzzy); + + (uint8 offerType, bytes memory offerData) = _recurringCollector.getAgreementOfferAt(agreementId, 2); + assertEq(offerType, 0, "Out-of-range index should return 0 offerType"); + assertEq(offerData.length, 0, "Out-of-range index should return empty data"); + } + + function test_GetAgreementOfferAt_EmptyAgreement() public view { + bytes16 fakeId = bytes16(keccak256("nonexistent")); + + // Index 0 on non-existent agreement: terms.hash == bytes32(0) + (uint8 offerType, bytes memory offerData) = _recurringCollector.getAgreementOfferAt(fakeId, 0); + assertEq(offerType, 0, "Empty agreement index 0 should return 0 offerType"); + assertEq(offerData.length, 0, "Empty agreement index 0 should return empty data"); + } + + function test_GetAgreementOfferAt_Index1_NoPending(FuzzyTestAccept calldata fuzzy) public { + (, bytes16 agreementId) = _sensibleAccept(fuzzy); + + // Index 1 with no pending terms: terms.hash == bytes32(0) + (uint8 offerType, bytes memory offerData) = _recurringCollector.getAgreementOfferAt(agreementId, 1); + assertEq(offerType, 0, "No pending terms should return 0 offerType"); + assertEq(offerData.length, 0, "No pending terms should return empty data"); + } + + // ══════════════════════════════════════════════════════════════════════ + // Gap 15 — Line 801: _validateAndStoreOffer revert on expired deadline + // ══════════════════════════════════════════════════════════════════════ + + function test_Offer_Revert_WhenDeadlineExpired() public { + address payer = address(0x1111); + + IRecurringCollector.RecurringCollectionAgreement memory rca = IRecurringCollector.RecurringCollectionAgreement({ + deadline: uint64(block.timestamp - 1), // already expired + endsAt: uint64(block.timestamp + 365 days), + payer: payer, + dataService: address(0x2222), + serviceProvider: address(0x3333), + maxInitialTokens: 100 ether, + maxOngoingTokensPerSecond: 1 ether, + minSecondsPerCollection: 600, + maxSecondsPerCollection: 3600, + conditions: 0, + minSecondsPayerCancellationNotice: 0, + nonce: 1, + metadata: "" + }); + + vm.expectRevert( + abi.encodeWithSelector(IRecurringCollector.AgreementDeadlineElapsed.selector, block.timestamp, rca.deadline) + ); + vm.prank(payer); + _recurringCollector.offer(OFFER_TYPE_NEW, abi.encode(rca), 0); + } + + // ══════════════════════════════════════════════════════════════════════ + // Gap 16 — Line 864: _validateAndStoreUpdate revert on unregistered + // ══════════════════════════════════════════════════════════════════════ + + // Line 864: require(state & REGISTERED != 0) in _validateAndStoreUpdate() + // This branch is a defensive guard. For a non-existent agreement, payer == address(0), + // so the auth check (require msg.sender == agreement.payer) reverts first. + // No normal flow can produce a state where REGISTERED is cleared on an existing agreement. + + // ══════════════════════════════════════════════════════════════════════ + // Gap 17 — Line 1205: _maxClaimForTerms with s == 0 (empty state) + // ══════════════════════════════════════════════════════════════════════ + + function test_GetMaxNextClaim_EmptyState_ReturnsZero() public view { + // Non-existent agreement has state == 0 and terms.endsAt == 0 + // Both conditions return 0 + bytes16 fakeId = bytes16(keccak256("nonexistent")); + uint256 maxClaim = _recurringCollector.getMaxNextClaim(fakeId); + assertEq(maxClaim, 0, "Empty state agreement should return 0"); + } + + /* solhint-enable graph/func-name-mixedcase */ +} diff --git a/packages/horizon/test/unit/payments/recurring-collector/eligibility.t.sol b/packages/horizon/test/unit/payments/recurring-collector/eligibility.t.sol index 310e1a88f..c93a1999a 100644 --- a/packages/horizon/test/unit/payments/recurring-collector/eligibility.t.sol +++ b/packages/horizon/test/unit/payments/recurring-collector/eligibility.t.sol @@ -1,14 +1,17 @@ // SPDX-License-Identifier: MIT pragma solidity ^0.8.27; +import { OFFER_TYPE_NEW } from "@graphprotocol/interfaces/contracts/horizon/IAgreementCollector.sol"; import { IRecurringCollector } from "@graphprotocol/interfaces/contracts/horizon/IRecurringCollector.sol"; import { IGraphPayments } from "@graphprotocol/interfaces/contracts/horizon/IGraphPayments.sol"; import { RecurringCollectorSharedTest } from "./shared.t.sol"; import { MockAgreementOwner } from "./MockAgreementOwner.t.sol"; import { BareAgreementOwner } from "./BareAgreementOwner.t.sol"; +import { MalformedERC165Payer } from "./MalformedERC165Payer.t.sol"; /// @notice Tests for the IProviderEligibility gate in RecurringCollector._collect() +/// and the ERC-165 validation of CONDITION_ELIGIBILITY_CHECK at offer time. contract RecurringCollectorEligibilityTest is RecurringCollectorSharedTest { function _newApprover() internal returns (MockAgreementOwner) { return new MockAgreementOwner(); @@ -28,31 +31,36 @@ contract RecurringCollectorEligibilityTest is RecurringCollectorSharedTest { maxOngoingTokensPerSecond: 1 ether, minSecondsPerCollection: 600, maxSecondsPerCollection: 3600, + conditions: 1, // CONDITION_ELIGIBILITY_CHECK + minSecondsPayerCancellationNotice: 0, nonce: 1, metadata: "" }) ); - bytes32 agreementHash = _recurringCollector.hashRCA(rca); - approver.authorize(agreementHash); _setupValidProvision(rca.serviceProvider, rca.dataService); - vm.prank(rca.dataService); - agreementId = _recurringCollector.accept(rca, ""); + // Payer calls offer + vm.prank(address(approver)); + agreementId = _recurringCollector.offer(OFFER_TYPE_NEW, abi.encode(rca), 0).agreementId; + + // Service provider accepts with stored hash + bytes32 activeHash = _recurringCollector.getAgreementVersionAt(agreementId, 0).versionHash; + vm.prank(rca.serviceProvider); + _recurringCollector.accept(agreementId, activeHash, bytes(""), 0); } /* solhint-disable graph/func-name-mixedcase */ + // ── Collection-time eligibility checks ────────────────────────────── + function test_Collect_OK_WhenEligible() public { MockAgreementOwner approver = _newApprover(); (IRecurringCollector.RecurringCollectionAgreement memory rca, bytes16 agreementId) = _acceptUnsignedAgreement( approver ); - // Enable eligibility check and mark provider as eligible - approver.setEligibilityEnabled(true); - approver.setProviderEligible(rca.serviceProvider, true); - + // Provider is eligible by default — isEligible returns true skip(rca.minSecondsPerCollection); uint256 tokens = 1 ether; bytes memory data = _generateCollectData(_generateCollectParams(rca, agreementId, bytes32("col1"), tokens, 0)); @@ -68,51 +76,42 @@ contract RecurringCollectorEligibilityTest is RecurringCollectorSharedTest { approver ); - // Enable eligibility check but provider is NOT eligible - approver.setEligibilityEnabled(true); - // defaultEligible is false, and provider not explicitly set + // Explicitly mark provider as ineligible + approver.setProviderIneligible(rca.serviceProvider); skip(rca.minSecondsPerCollection); uint256 tokens = 1 ether; bytes memory data = _generateCollectData(_generateCollectParams(rca, agreementId, bytes32("col1"), tokens, 0)); vm.expectRevert( - abi.encodeWithSelector( - IRecurringCollector.RecurringCollectorCollectionNotEligible.selector, - agreementId, - rca.serviceProvider - ) + abi.encodeWithSelector(IRecurringCollector.CollectionNotEligible.selector, agreementId, rca.serviceProvider) ); vm.prank(rca.dataService); _recurringCollector.collect(IGraphPayments.PaymentTypes.IndexingFee, data); } - function test_Collect_OK_WhenPayerDoesNotSupportInterface() public { + function test_Collect_OK_ZeroTokensSkipsEligibilityCheck() public { MockAgreementOwner approver = _newApprover(); (IRecurringCollector.RecurringCollectionAgreement memory rca, bytes16 agreementId) = _acceptUnsignedAgreement( approver ); - // eligibilityEnabled is false by default — supportsInterface returns false for IProviderEligibility - // Collection should proceed normally (backward compatible) + // Provider is ineligible, but zero-token collection should skip the gate + approver.setProviderIneligible(rca.serviceProvider); skip(rca.minSecondsPerCollection); - uint256 tokens = 1 ether; - bytes memory data = _generateCollectData(_generateCollectParams(rca, agreementId, bytes32("col1"), tokens, 0)); + bytes memory data = _generateCollectData(_generateCollectParams(rca, agreementId, bytes32("col1"), 0, 0)); vm.prank(rca.dataService); uint256 collected = _recurringCollector.collect(IGraphPayments.PaymentTypes.IndexingFee, data); - assertEq(collected, tokens); + assertEq(collected, 0); } - function test_Collect_OK_WhenEOAPayer(FuzzyTestCollect calldata fuzzy) public { - // Use standard ECDSA-signed path (EOA payer) - ( - IRecurringCollector.RecurringCollectionAgreement memory acceptedRca, - , - , - bytes16 agreementId - ) = _sensibleAuthorizeAndAccept(fuzzy.fuzzyTestAccept); + function test_Collect_OK_WithoutEligibilityCondition(FuzzyTestCollect calldata fuzzy) public { + // EOA payer — no CONDITION_ELIGIBILITY_CHECK + (IRecurringCollector.RecurringCollectionAgreement memory acceptedRca, ) = _sensibleAccept( + fuzzy.fuzzyTestAccept + ); (bytes memory data, uint256 collectionSeconds, uint256 tokens) = _generateValidCollection( acceptedRca, @@ -122,68 +121,167 @@ contract RecurringCollectorEligibilityTest is RecurringCollectorSharedTest { ); skip(collectionSeconds); - // EOA payer has no code — eligibility check is skipped entirely + // EOA payer — conditions masked to exclude CONDITION_ELIGIBILITY_CHECK by sensibleRCA vm.prank(acceptedRca.dataService); uint256 collected = _recurringCollector.collect(_paymentType(fuzzy.unboundedPaymentType), data); assertEq(collected, tokens); } - function test_Collect_OK_WhenPayerHasNoERC165() public { - // BareAgreementOwner implements IAgreementOwner but NOT IERC165. - // The supportsInterface call will revert, hitting the catch {} branch. + // ── Offer-time ERC-165 validation ─────────────────────────────────── + + function test_Offer_Revert_WhenPayerDoesNotSupportEligibility() public { + // BareAgreementOwner implements IAgreementOwner but NOT IERC165/IProviderEligibility. + // Offer must be rejected — CONDITION_ELIGIBILITY_CHECK requires ERC-165 confirmation + // to prevent slipping in an apparently-inert condition that could later deny payments. BareAgreementOwner bare = new BareAgreementOwner(); + IRecurringCollector.RecurringCollectionAgreement memory rca = IRecurringCollector.RecurringCollectionAgreement({ + deadline: uint64(block.timestamp + 1 hours), + endsAt: uint64(block.timestamp + 365 days), + payer: address(bare), + dataService: makeAddr("ds"), + serviceProvider: makeAddr("sp"), + maxInitialTokens: 100 ether, + maxOngoingTokensPerSecond: 1 ether, + minSecondsPerCollection: 600, + maxSecondsPerCollection: 3600, + conditions: 1, // CONDITION_ELIGIBILITY_CHECK + minSecondsPayerCancellationNotice: 0, + nonce: 1, + metadata: "" + }); + rca = _recurringCollectorHelper.sensibleRCA(rca); + // sensibleRCA won't mask conditions because payer has code — but it doesn't support the interface + rca.conditions = 1; // force it back in case sensibleRCA touched it + + _setupValidProvision(rca.serviceProvider, rca.dataService); + + vm.expectRevert( + abi.encodeWithSelector(IRecurringCollector.EligibilityConditionNotSupported.selector, address(bare)) + ); + vm.prank(address(bare)); + _recurringCollector.offer(OFFER_TYPE_NEW, abi.encode(rca), 0); + } + + function test_Offer_Revert_WhenPayerReturnsMalformedERC165() public { + // MalformedERC165Payer has a fallback returning empty data — ERC165Checker + // correctly detects this as non-compliant and the offer must be rejected. + MalformedERC165Payer malicious = new MalformedERC165Payer(); + + IRecurringCollector.RecurringCollectionAgreement memory rca = IRecurringCollector.RecurringCollectionAgreement({ + deadline: uint64(block.timestamp + 1 hours), + endsAt: uint64(block.timestamp + 365 days), + payer: address(malicious), + dataService: makeAddr("ds"), + serviceProvider: makeAddr("sp"), + maxInitialTokens: 100 ether, + maxOngoingTokensPerSecond: 1 ether, + minSecondsPerCollection: 600, + maxSecondsPerCollection: 3600, + conditions: 1, // CONDITION_ELIGIBILITY_CHECK + minSecondsPayerCancellationNotice: 0, + nonce: 1, + metadata: "" + }); + rca = _recurringCollectorHelper.sensibleRCA(rca); + rca.conditions = 1; + + _setupValidProvision(rca.serviceProvider, rca.dataService); + + vm.expectRevert( + abi.encodeWithSelector(IRecurringCollector.EligibilityConditionNotSupported.selector, address(malicious)) + ); + vm.prank(address(malicious)); + _recurringCollector.offer(OFFER_TYPE_NEW, abi.encode(rca), 0); + } + + function test_Offer_Revert_WhenEOAPayerSetsEligibilityCondition() public { + // EOA payers cannot implement IProviderEligibility — offer must be rejected + // to prevent an apparently-inert condition from being slipped in. + address eoa = makeAddr("eoa-payer"); + + IRecurringCollector.RecurringCollectionAgreement memory rca = IRecurringCollector.RecurringCollectionAgreement({ + deadline: uint64(block.timestamp + 1 hours), + endsAt: uint64(block.timestamp + 365 days), + payer: eoa, + dataService: makeAddr("ds"), + serviceProvider: makeAddr("sp"), + maxInitialTokens: 100 ether, + maxOngoingTokensPerSecond: 1 ether, + minSecondsPerCollection: 600, + maxSecondsPerCollection: 3600, + conditions: 1, // CONDITION_ELIGIBILITY_CHECK + minSecondsPayerCancellationNotice: 0, + nonce: 1, + metadata: "" + }); + rca = _recurringCollectorHelper.sensibleRCA(rca); + rca.conditions = 1; // force — sensibleRCA masks it for EOAs + + _setupValidProvision(rca.serviceProvider, rca.dataService); + + vm.expectRevert(abi.encodeWithSelector(IRecurringCollector.EligibilityConditionNotSupported.selector, eoa)); + vm.prank(eoa); + _recurringCollector.offer(OFFER_TYPE_NEW, abi.encode(rca), 0); + } + + function test_Offer_OK_WhenEligibilityCapablePayer() public { + // MockAgreementOwner implements IERC165 + IProviderEligibility — offer succeeds + MockAgreementOwner approver = _newApprover(); + IRecurringCollector.RecurringCollectionAgreement memory rca = _recurringCollectorHelper.sensibleRCA( IRecurringCollector.RecurringCollectionAgreement({ deadline: uint64(block.timestamp + 1 hours), endsAt: uint64(block.timestamp + 365 days), - payer: address(bare), + payer: address(approver), dataService: makeAddr("ds"), serviceProvider: makeAddr("sp"), maxInitialTokens: 100 ether, maxOngoingTokensPerSecond: 1 ether, minSecondsPerCollection: 600, maxSecondsPerCollection: 3600, + conditions: 1, // CONDITION_ELIGIBILITY_CHECK + minSecondsPayerCancellationNotice: 0, nonce: 1, metadata: "" }) ); - bytes32 agreementHash = _recurringCollector.hashRCA(rca); - bare.authorize(agreementHash); _setupValidProvision(rca.serviceProvider, rca.dataService); - vm.prank(rca.dataService); - bytes16 agreementId = _recurringCollector.accept(rca, ""); - - skip(rca.minSecondsPerCollection); - uint256 tokens = 1 ether; - bytes memory data = _generateCollectData(_generateCollectParams(rca, agreementId, bytes32("col1"), tokens, 0)); - - // Collection succeeds — the catch {} swallows the revert from supportsInterface - vm.prank(rca.dataService); - uint256 collected = _recurringCollector.collect(IGraphPayments.PaymentTypes.IndexingFee, data); - assertEq(collected, tokens); + vm.prank(address(approver)); + IRecurringCollector.OfferResult memory result = _recurringCollector.offer(OFFER_TYPE_NEW, abi.encode(rca), 0); + assertTrue(result.agreementId != bytes16(0)); } - function test_Collect_OK_ZeroTokensSkipsEligibilityCheck() public { + function test_Offer_OK_WhenEligibilityCapablePayerWithoutCondition() public { + // Even if payer supports IProviderEligibility, offers WITHOUT the condition are valid. + // Eligibility checks are opt-in per agreement. MockAgreementOwner approver = _newApprover(); - (IRecurringCollector.RecurringCollectionAgreement memory rca, bytes16 agreementId) = _acceptUnsignedAgreement( - approver - ); - // Enable eligibility check, provider is NOT eligible - approver.setEligibilityEnabled(true); - // defaultEligible = false + IRecurringCollector.RecurringCollectionAgreement memory rca = _recurringCollectorHelper.sensibleRCA( + IRecurringCollector.RecurringCollectionAgreement({ + deadline: uint64(block.timestamp + 1 hours), + endsAt: uint64(block.timestamp + 365 days), + payer: address(approver), + dataService: makeAddr("ds"), + serviceProvider: makeAddr("sp"), + maxInitialTokens: 100 ether, + maxOngoingTokensPerSecond: 1 ether, + minSecondsPerCollection: 600, + maxSecondsPerCollection: 3600, + conditions: 0, // No eligibility check + minSecondsPayerCancellationNotice: 0, + nonce: 1, + metadata: "" + }) + ); - // Zero-token collection should NOT trigger the eligibility gate - // (the guard is inside `if (0 < tokensToCollect && ...)`) - skip(rca.minSecondsPerCollection); - bytes memory data = _generateCollectData(_generateCollectParams(rca, agreementId, bytes32("col1"), 0, 0)); + _setupValidProvision(rca.serviceProvider, rca.dataService); - vm.prank(rca.dataService); - uint256 collected = _recurringCollector.collect(IGraphPayments.PaymentTypes.IndexingFee, data); - assertEq(collected, 0); + vm.prank(address(approver)); + IRecurringCollector.OfferResult memory result = _recurringCollector.offer(OFFER_TYPE_NEW, abi.encode(rca), 0); + assertTrue(result.agreementId != bytes16(0)); } /* solhint-enable graph/func-name-mixedcase */ diff --git a/packages/horizon/test/unit/payments/recurring-collector/getMaxNextClaim.t.sol b/packages/horizon/test/unit/payments/recurring-collector/getMaxNextClaim.t.sol index 801beef6d..9fbd1150f 100644 --- a/packages/horizon/test/unit/payments/recurring-collector/getMaxNextClaim.t.sol +++ b/packages/horizon/test/unit/payments/recurring-collector/getMaxNextClaim.t.sol @@ -1,6 +1,7 @@ // SPDX-License-Identifier: MIT pragma solidity ^0.8.27; +import { OFFER_TYPE_NEW } from "@graphprotocol/interfaces/contracts/horizon/IAgreementCollector.sol"; import { IRecurringCollector } from "@graphprotocol/interfaces/contracts/horizon/IRecurringCollector.sol"; import { RecurringCollectorSharedTest } from "./shared.t.sol"; @@ -18,14 +19,9 @@ contract RecurringCollectorGetMaxNextClaimTest is RecurringCollectorSharedTest { // -- Test 2: CanceledByServiceProvider agreement returns 0 -- function test_GetMaxNextClaim_CanceledByServiceProvider(FuzzyTestAccept calldata fuzzy) public { - ( - IRecurringCollector.RecurringCollectionAgreement memory rca, - , - , - bytes16 agreementId - ) = _sensibleAuthorizeAndAccept(fuzzy); + (IRecurringCollector.RecurringCollectionAgreement memory rca, bytes16 agreementId) = _sensibleAccept(fuzzy); - _cancel(rca, agreementId, IRecurringCollector.CancelAgreementBy.ServiceProvider); + _cancelByProvider(rca, agreementId); assertEq(_recurringCollector.getMaxNextClaim(agreementId), 0, "CanceledByServiceProvider should return 0"); } @@ -34,12 +30,7 @@ contract RecurringCollectorGetMaxNextClaimTest is RecurringCollectorSharedTest { // Returns maxOngoingTokensPerSecond * min(windowSeconds, maxSecondsPerCollection) + maxInitialTokens function test_GetMaxNextClaim_Accepted_NeverCollected(FuzzyTestAccept calldata fuzzy) public { - ( - IRecurringCollector.RecurringCollectionAgreement memory rca, - , - , - bytes16 agreementId - ) = _sensibleAuthorizeAndAccept(fuzzy); + (IRecurringCollector.RecurringCollectionAgreement memory rca, bytes16 agreementId) = _sensibleAccept(fuzzy); uint256 maxClaim = _recurringCollector.getMaxNextClaim(agreementId); @@ -55,12 +46,7 @@ contract RecurringCollectorGetMaxNextClaimTest is RecurringCollectorSharedTest { // Returns maxOngoingTokensPerSecond * min(windowSeconds, maxSecondsPerCollection) (no initial bonus) function test_GetMaxNextClaim_Accepted_AfterCollection(FuzzyTestAccept calldata fuzzy) public { - ( - IRecurringCollector.RecurringCollectionAgreement memory rca, - , - , - bytes16 agreementId - ) = _sensibleAuthorizeAndAccept(fuzzy); + (IRecurringCollector.RecurringCollectionAgreement memory rca, bytes16 agreementId) = _sensibleAccept(fuzzy); // Perform a first collection so lastCollectionAt is set skip(rca.minSecondsPerCollection); @@ -79,43 +65,38 @@ contract RecurringCollectorGetMaxNextClaimTest is RecurringCollectorSharedTest { // -- Test 5: CanceledByPayer agreement -- - // 5a: Canceled in the same block as accepted (window = 0) + // 5a: Canceled in the same block as accepted — with min notice, collectableUntil is in the future function test_GetMaxNextClaim_CanceledByPayer_SameBlock(FuzzyTestAccept calldata fuzzy) public { - ( - IRecurringCollector.RecurringCollectionAgreement memory rca, - , - , - bytes16 agreementId - ) = _sensibleAuthorizeAndAccept(fuzzy); + (IRecurringCollector.RecurringCollectionAgreement memory rca, bytes16 agreementId) = _sensibleAccept(fuzzy); - _cancel(rca, agreementId, IRecurringCollector.CancelAgreementBy.Payer); + _cancelByPayer(rca, agreementId); uint256 maxClaim = _recurringCollector.getMaxNextClaim(agreementId); - // canceledAt == acceptedAt (same block), so window = 0, maxClaim = 0 - assertEq(maxClaim, 0, "CanceledByPayer in same block should return 0"); + if (rca.minSecondsPayerCancellationNotice > 0) { + // With notice: collectableUntil > acceptedAt, so there IS a claimable window + assertTrue(maxClaim > 0, "CanceledByPayer with notice should have claimable window"); + } else { + // Zero notice: collectableUntil == acceptedAt, window = 0 + assertEq(maxClaim, 0, "CanceledByPayer with zero notice should return 0"); + } } - // 5b: Canceled after time has elapsed (canceledAt < endsAt) + // 5b: Canceled after time has elapsed (collectableUntil < endsAt) function test_GetMaxNextClaim_CanceledByPayer_WithWindow(FuzzyTestAccept calldata fuzzy) public { - ( - IRecurringCollector.RecurringCollectionAgreement memory rca, - , - , - bytes16 agreementId - ) = _sensibleAuthorizeAndAccept(fuzzy); + (IRecurringCollector.RecurringCollectionAgreement memory rca, bytes16 agreementId) = _sensibleAccept(fuzzy); // Advance time, then cancel (still before endsAt due to sensible bounds) skip(rca.minSecondsPerCollection + 100); - _cancel(rca, agreementId, IRecurringCollector.CancelAgreementBy.Payer); + _cancelByPayer(rca, agreementId); uint256 maxClaim = _recurringCollector.getMaxNextClaim(agreementId); - // collectionEnd = min(canceledAt, endsAt) = canceledAt (since canceledAt < endsAt) + // collectionEnd = min(collectableUntil, endsAt) = collectableUntil (since collectableUntil < endsAt) // collectionStart = acceptedAt (never collected) - IRecurringCollector.AgreementData memory agreement = _recurringCollector.getAgreement(agreementId); - uint256 windowSeconds = agreement.canceledAt - agreement.acceptedAt; + IRecurringCollector.AgreementData memory agreement = _recurringCollector.getAgreementData(agreementId); + uint256 windowSeconds = agreement.collectableUntil - agreement.acceptedAt; uint256 maxSeconds = windowSeconds < rca.maxSecondsPerCollection ? windowSeconds : rca.maxSecondsPerCollection; uint256 expected = rca.maxOngoingTokensPerSecond * maxSeconds + rca.maxInitialTokens; assertEq(maxClaim, expected, "CanceledByPayer with elapsed time mismatch"); @@ -123,12 +104,7 @@ contract RecurringCollectorGetMaxNextClaimTest is RecurringCollectorSharedTest { // 5c: CanceledByPayer after a collection (no initial tokens) function test_GetMaxNextClaim_CanceledByPayer_AfterCollection(FuzzyTestAccept calldata fuzzy) public { - ( - IRecurringCollector.RecurringCollectionAgreement memory rca, - , - , - bytes16 agreementId - ) = _sensibleAuthorizeAndAccept(fuzzy); + (IRecurringCollector.RecurringCollectionAgreement memory rca, bytes16 agreementId) = _sensibleAccept(fuzzy); // Perform a first collection skip(rca.minSecondsPerCollection); @@ -138,13 +114,13 @@ contract RecurringCollectorGetMaxNextClaimTest is RecurringCollectorSharedTest { // Advance more time, then cancel skip(rca.minSecondsPerCollection + 100); - _cancel(rca, agreementId, IRecurringCollector.CancelAgreementBy.Payer); + _cancelByPayer(rca, agreementId); uint256 maxClaim = _recurringCollector.getMaxNextClaim(agreementId); // lastCollectionAt is set, so no initial bonus - IRecurringCollector.AgreementData memory agreement = _recurringCollector.getAgreement(agreementId); - uint256 windowSeconds = agreement.canceledAt - agreement.lastCollectionAt; + IRecurringCollector.AgreementData memory agreement = _recurringCollector.getAgreementData(agreementId); + uint256 windowSeconds = agreement.collectableUntil - agreement.lastCollectionAt; uint256 maxSeconds = windowSeconds < rca.maxSecondsPerCollection ? windowSeconds : rca.maxSecondsPerCollection; uint256 expected = rca.maxOngoingTokensPerSecond * maxSeconds; assertEq(maxClaim, expected, "CanceledByPayer post-collection should exclude initial tokens"); @@ -155,12 +131,7 @@ contract RecurringCollectorGetMaxNextClaimTest is RecurringCollectorSharedTest { // is capped at endsAt, so returns maxOngoingTokensPerSecond * min(remaining, maxSecondsPerCollection) function test_GetMaxNextClaim_Accepted_PastEndsAt(FuzzyTestAccept calldata fuzzy) public { - ( - IRecurringCollector.RecurringCollectionAgreement memory rca, - , - , - bytes16 agreementId - ) = _sensibleAuthorizeAndAccept(fuzzy); + (IRecurringCollector.RecurringCollectionAgreement memory rca, bytes16 agreementId) = _sensibleAccept(fuzzy); // Perform a first collection so we have a lastCollectionAt skip(rca.minSecondsPerCollection); @@ -185,12 +156,7 @@ contract RecurringCollectorGetMaxNextClaimTest is RecurringCollectorSharedTest { // Also test past endsAt when never collected (includes initial tokens) function test_GetMaxNextClaim_Accepted_PastEndsAt_NeverCollected(FuzzyTestAccept calldata fuzzy) public { - ( - IRecurringCollector.RecurringCollectionAgreement memory rca, - , - , - bytes16 agreementId - ) = _sensibleAuthorizeAndAccept(fuzzy); + (IRecurringCollector.RecurringCollectionAgreement memory rca, bytes16 agreementId) = _sensibleAccept(fuzzy); uint256 acceptedAt = block.timestamp; @@ -212,7 +178,6 @@ contract RecurringCollectorGetMaxNextClaimTest is RecurringCollectorSharedTest { function test_GetMaxNextClaim_MaxSecondsPerCollectionCaps() public { // Use deterministic values to precisely verify the cap behavior - uint256 signerKey = 0xBEEF; address payer = address(0x1111); address dataService = address(0x2222); address serviceProvider = address(0x3333); @@ -233,16 +198,19 @@ contract RecurringCollectorGetMaxNextClaimTest is RecurringCollectorSharedTest { maxOngoingTokensPerSecond: maxOngoingTokensPerSecond, minSecondsPerCollection: minSecondsPerCollection, maxSecondsPerCollection: maxSecondsPerCollection, + conditions: 0, + minSecondsPayerCancellationNotice: 0, nonce: 1, metadata: "" }); - // Authorize signer and accept - _recurringCollectorHelper.authorizeSignerWithChecks(payer, signerKey); - (, bytes memory signature) = _recurringCollectorHelper.generateSignedRCA(rca, signerKey); + // Offer and accept _setupValidProvision(serviceProvider, dataService); - vm.prank(dataService); - bytes16 agreementId = _recurringCollector.accept(rca, signature); + vm.prank(payer); + bytes16 agreementId = _recurringCollector.offer(OFFER_TYPE_NEW, abi.encode(rca), 0).agreementId; + bytes32 activeHash = _recurringCollector.getAgreementVersionAt(agreementId, 0).versionHash; + vm.prank(serviceProvider); + _recurringCollector.accept(agreementId, activeHash, bytes(""), 0); // Window = endsAt - acceptedAt = 100_000 seconds, which is > maxSecondsPerCollection (3600) // So the window should be capped at maxSecondsPerCollection @@ -260,7 +228,6 @@ contract RecurringCollectorGetMaxNextClaimTest is RecurringCollectorSharedTest { function test_GetMaxNextClaim_WindowSmallerThanMaxSecondsPerCollection() public { // Test the case where the window is smaller than maxSecondsPerCollection (no cap) - uint256 signerKey = 0xBEEF; address payer = address(0x1111); address dataService = address(0x2222); address serviceProvider = address(0x3333); @@ -283,15 +250,18 @@ contract RecurringCollectorGetMaxNextClaimTest is RecurringCollectorSharedTest { maxOngoingTokensPerSecond: maxOngoingTokensPerSecond, minSecondsPerCollection: minSecondsPerCollection, maxSecondsPerCollection: maxSecondsPerCollection, + conditions: 0, + minSecondsPayerCancellationNotice: 0, nonce: 1, metadata: "" }); - _recurringCollectorHelper.authorizeSignerWithChecks(payer, signerKey); - (, bytes memory signature) = _recurringCollectorHelper.generateSignedRCA(rca, signerKey); _setupValidProvision(serviceProvider, dataService); - vm.prank(dataService); - bytes16 agreementId = _recurringCollector.accept(rca, signature); + vm.prank(payer); + bytes16 agreementId = _recurringCollector.offer(OFFER_TYPE_NEW, abi.encode(rca), 0).agreementId; + bytes32 activeHash = _recurringCollector.getAgreementVersionAt(agreementId, 0).versionHash; + vm.prank(serviceProvider); + _recurringCollector.accept(agreementId, activeHash, bytes(""), 0); uint256 maxClaim = _recurringCollector.getMaxNextClaim(agreementId); diff --git a/packages/horizon/test/unit/payments/recurring-collector/hashRoundTrip.t.sol b/packages/horizon/test/unit/payments/recurring-collector/hashRoundTrip.t.sol new file mode 100644 index 000000000..27c204e4c --- /dev/null +++ b/packages/horizon/test/unit/payments/recurring-collector/hashRoundTrip.t.sol @@ -0,0 +1,195 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.27; + +import { + REGISTERED, + ACCEPTED, + UPDATE, + OFFER_TYPE_NEW, + OFFER_TYPE_UPDATE, + WITH_NOTICE +} from "@graphprotocol/interfaces/contracts/horizon/IAgreementCollector.sol"; +import { IRecurringCollector } from "@graphprotocol/interfaces/contracts/horizon/IRecurringCollector.sol"; + +import { RecurringCollectorSharedTest } from "./shared.t.sol"; + +/// @notice Round-trip hash verification: reconstruct offers from on-chain data and verify hashes. +contract RecurringCollectorHashRoundTripTest is RecurringCollectorSharedTest { + /* solhint-disable graph/func-name-mixedcase */ + + // ==================== RCA round-trip ==================== + + function test_HashRoundTrip_RCA(FuzzyTestAccept calldata fuzzy) public { + (IRecurringCollector.RecurringCollectionAgreement memory rca, bytes16 agreementId) = _sensibleAccept(fuzzy); + + _verifyVersionHash(agreementId, 0); + + // Also verify the reconstructed RCA matches the original + (uint8 offerType, bytes memory offerData) = _recurringCollector.getAgreementOfferAt(agreementId, 0); + assertEq(offerType, OFFER_TYPE_NEW, "Offer type should be NEW"); + IRecurringCollector.RecurringCollectionAgreement memory reconstructed = abi.decode( + offerData, + (IRecurringCollector.RecurringCollectionAgreement) + ); + assertEq(reconstructed.payer, rca.payer, "payer mismatch"); + assertEq(reconstructed.dataService, rca.dataService, "dataService mismatch"); + assertEq(reconstructed.serviceProvider, rca.serviceProvider, "serviceProvider mismatch"); + assertEq(reconstructed.nonce, rca.nonce, "nonce mismatch"); + assertEq(reconstructed.endsAt, rca.endsAt, "endsAt mismatch"); + } + + // ==================== RCAU round-trip (pending) ==================== + + function test_HashRoundTrip_RCAU_Pending(FuzzyTestAccept calldata fuzzy) public { + (IRecurringCollector.RecurringCollectionAgreement memory rca, bytes16 agreementId) = _sensibleAccept(fuzzy); + + // Offer update (creates pending terms) + IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau = _makeUpdate(rca, agreementId, 1); + vm.prank(rca.payer); + _recurringCollector.offer(OFFER_TYPE_UPDATE, abi.encode(rcau), 0); + + // Verify pending version hash round-trips + _verifyVersionHash(agreementId, 1); + + // Active version should still round-trip + _verifyVersionHash(agreementId, 0); + } + + // ==================== RCAU round-trip (accepted) ==================== + + function test_HashRoundTrip_RCAU_Accepted(FuzzyTestAccept calldata fuzzy) public { + (IRecurringCollector.RecurringCollectionAgreement memory rca, bytes16 agreementId) = _sensibleAccept(fuzzy); + + // Offer and accept update + IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau = _makeUpdate(rca, agreementId, 1); + vm.prank(rca.payer); + _recurringCollector.offer(OFFER_TYPE_UPDATE, abi.encode(rcau), 0); + + bytes32 pendingHash = _recurringCollector.getAgreementVersionAt(agreementId, 1).versionHash; + vm.prank(rca.serviceProvider); + _recurringCollector.accept(agreementId, pendingHash, bytes(""), 0); + + // Active version is now from RCAU — verify round-trip + _verifyVersionHash(agreementId, 0); + + // Verify offer type is UPDATE + (uint8 offerType, ) = _recurringCollector.getAgreementOfferAt(agreementId, 0); + assertEq(offerType, OFFER_TYPE_UPDATE, "Active offer type should be UPDATE after accept"); + } + + // ==================== RCAU pre-acceptance overwrite ==================== + + function test_HashRoundTrip_RCAU_PreAcceptOverwrite(FuzzyTestAccept calldata fuzzy) public { + IRecurringCollector.RecurringCollectionAgreement memory rca = _recurringCollectorHelper.sensibleRCA(fuzzy.rca); + _setupValidProvision(rca.serviceProvider, rca.dataService); + + // Offer RCA + vm.prank(rca.payer); + bytes16 agreementId = _recurringCollector.offer(OFFER_TYPE_NEW, abi.encode(rca), 0).agreementId; + + // Overwrite with RCAU before acceptance + IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau = _makeUpdate(rca, agreementId, 1); + vm.prank(rca.payer); + _recurringCollector.offer(OFFER_TYPE_UPDATE, abi.encode(rcau), 0); + + // Active terms are from RCAU now — verify hash round-trip + _verifyVersionHash(agreementId, 0); + + (uint8 offerType, ) = _recurringCollector.getAgreementOfferAt(agreementId, 0); + assertEq(offerType, OFFER_TYPE_UPDATE, "Active offer type should be UPDATE after pre-accept overwrite"); + } + + // ==================== Cancel pending, active stays RCA ==================== + + function test_HashRoundTrip_CancelPending_ActiveStaysRCA(FuzzyTestAccept calldata fuzzy) public { + (IRecurringCollector.RecurringCollectionAgreement memory rca, bytes16 agreementId) = _sensibleAccept(fuzzy); + + // Offer update + IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau = _makeUpdate(rca, agreementId, 1); + vm.prank(rca.payer); + _recurringCollector.offer(OFFER_TYPE_UPDATE, abi.encode(rcau), 0); + + // Cancel the pending update + bytes32 pendingCancelHash = _recurringCollector.getAgreementVersionAt(agreementId, 1).versionHash; + vm.prank(rca.payer); + _recurringCollector.cancel(agreementId, pendingCancelHash, 0); + + // Active terms should still be from RCA and round-trip + _verifyVersionHash(agreementId, 0); + + (uint8 offerType, ) = _recurringCollector.getAgreementOfferAt(agreementId, 0); + assertEq(offerType, OFFER_TYPE_NEW, "Active offer type should still be NEW after cancel pending"); + } + + // ==================== WITH_NOTICE deadline=0 round-trip ==================== + + function test_HashRoundTrip_WithNotice_DeadlineZero(FuzzyTestAccept calldata fuzzy) public { + (IRecurringCollector.RecurringCollectionAgreement memory rca, bytes16 agreementId) = _sensibleAccept(fuzzy); + + // Offer update with WITH_NOTICE and deadline=0 + IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau = _makeUpdate(rca, agreementId, 1); + rcau.deadline = 0; // auto-compute notice cutoff + vm.prank(rca.payer); + _recurringCollector.offer(OFFER_TYPE_UPDATE, abi.encode(rcau), WITH_NOTICE); + + // Pending terms should round-trip with the ORIGINAL deadline (0) + _verifyVersionHash(agreementId, 1); + + // Verify the stored deadline is 0 (not the derived notice cutoff) + (, bytes memory pendingData) = _recurringCollector.getAgreementOfferAt(agreementId, 1); + IRecurringCollector.RecurringCollectionAgreementUpdate memory storedRcau = abi.decode( + pendingData, + (IRecurringCollector.RecurringCollectionAgreementUpdate) + ); + assertEq(storedRcau.deadline, 0, "Stored deadline should be original (0), not derived cutoff"); + } + + // ==================== Helpers ==================== + + /// @notice Verify that getAgreementOfferAt round-trips to the stored version hash + function _verifyVersionHash(bytes16 agreementId, uint256 index) internal view { + bytes32 storedHash = _recurringCollector.getAgreementVersionAt(agreementId, index).versionHash; + (uint8 offerType, bytes memory offerData) = _recurringCollector.getAgreementOfferAt(agreementId, index); + + bytes32 reconstructedHash; + if (offerType == OFFER_TYPE_NEW) { + IRecurringCollector.RecurringCollectionAgreement memory rca = abi.decode( + offerData, + (IRecurringCollector.RecurringCollectionAgreement) + ); + reconstructedHash = _recurringCollector.hashRCA(rca); + } else { + IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau = abi.decode( + offerData, + (IRecurringCollector.RecurringCollectionAgreementUpdate) + ); + reconstructedHash = _recurringCollector.hashRCAU(rcau); + } + + assertEq(reconstructedHash, storedHash, "Reconstructed hash must match stored version hash"); + } + + /// @notice Build a sensible RCAU from an accepted RCA + function _makeUpdate( + IRecurringCollector.RecurringCollectionAgreement memory rca, + bytes16 agreementId, + uint32 nonce + ) internal view returns (IRecurringCollector.RecurringCollectionAgreementUpdate memory) { + return + IRecurringCollector.RecurringCollectionAgreementUpdate({ + agreementId: agreementId, + deadline: uint64(block.timestamp + 30 days), + endsAt: rca.endsAt, + maxInitialTokens: rca.maxInitialTokens, + maxOngoingTokensPerSecond: rca.maxOngoingTokensPerSecond, + minSecondsPerCollection: rca.minSecondsPerCollection, + maxSecondsPerCollection: rca.maxSecondsPerCollection, + conditions: rca.conditions, + minSecondsPayerCancellationNotice: rca.minSecondsPayerCancellationNotice, + nonce: nonce, + metadata: rca.metadata + }); + } + + /* solhint-enable graph/func-name-mixedcase */ +} diff --git a/packages/horizon/test/unit/payments/recurring-collector/hashing.t.sol b/packages/horizon/test/unit/payments/recurring-collector/hashing.t.sol new file mode 100644 index 000000000..86ea52a4e --- /dev/null +++ b/packages/horizon/test/unit/payments/recurring-collector/hashing.t.sol @@ -0,0 +1,91 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.27; + +import { IRecurringCollector } from "@graphprotocol/interfaces/contracts/horizon/IRecurringCollector.sol"; +import { RecurringCollectorSharedTest } from "./shared.t.sol"; + +function contentHashTerms(IRecurringCollector.AgreementTerms memory terms) pure returns (bytes32) { + return + keccak256( + abi.encode( + terms.deadline, + terms.endsAt, + terms.maxInitialTokens, + terms.maxOngoingTokensPerSecond, + terms.minSecondsPerCollection, + terms.maxSecondsPerCollection, + terms.conditions, + terms.minSecondsPayerCancellationNotice, + keccak256(terms.metadata) + ) + ); +} + +/// @notice Tests for hashing functions (hashRCA, hashRCAU, contentHashTerms) +contract RecurringCollectorHashingTest is RecurringCollectorSharedTest { + /* solhint-disable graph/func-name-mixedcase */ + + function test_ContentHashTerms_DeterministicForSameInput() public view { + IRecurringCollector.AgreementTerms memory terms = IRecurringCollector.AgreementTerms({ + deadline: 1000, + endsAt: 2000, + maxInitialTokens: 100 ether, + maxOngoingTokensPerSecond: 1 ether, + minSecondsPerCollection: 60, + maxSecondsPerCollection: 3600, + conditions: 0, + minSecondsPayerCancellationNotice: 0, + hash: bytes32(0), + metadata: "test" + }); + + bytes32 hash1 = contentHashTerms(terms); + bytes32 hash2 = contentHashTerms(terms); + assertEq(hash1, hash2, "Same input should produce same hash"); + assertTrue(hash1 != bytes32(0), "Hash should not be zero"); + } + + function _makeTerms( + uint256 rate, + bytes memory metadata + ) private pure returns (IRecurringCollector.AgreementTerms memory) { + return + IRecurringCollector.AgreementTerms({ + deadline: 1000, + endsAt: 2000, + maxInitialTokens: 100 ether, + maxOngoingTokensPerSecond: rate, + minSecondsPerCollection: 60, + maxSecondsPerCollection: 3600, + conditions: 0, + minSecondsPayerCancellationNotice: 0, + hash: bytes32(0), + metadata: metadata + }); + } + + function test_ContentHashTerms_DiffersWhenFieldChanges() public view { + bytes32 hash1 = contentHashTerms(_makeTerms(1 ether, "test")); + bytes32 hash2 = contentHashTerms(_makeTerms(2 ether, "test")); + assertTrue(hash1 != hash2, "Different terms should produce different hashes"); + } + + function test_ContentHashTerms_DiffersWhenMetadataChanges() public view { + bytes32 hash1 = contentHashTerms(_makeTerms(1 ether, "metadata-A")); + bytes32 hash2 = contentHashTerms(_makeTerms(1 ether, "metadata-B")); + assertTrue(hash1 != hash2, "Different metadata should produce different hashes"); + } + + function test_ContentHashTerms_IgnoresHashField() public view { + IRecurringCollector.AgreementTerms memory termsA = _makeTerms(1 ether, "test"); + termsA.hash = bytes32(uint256(1)); + IRecurringCollector.AgreementTerms memory termsB = _makeTerms(1 ether, "test"); + termsB.hash = bytes32(uint256(999)); + + bytes32 hash1 = contentHashTerms(termsA); + bytes32 hash2 = contentHashTerms(termsB); + assertEq(hash1, hash2, "Hash field itself should not affect contentHash"); + } + + /* solhint-enable graph/func-name-mixedcase */ +} diff --git a/packages/horizon/test/unit/payments/recurring-collector/mixedPath.t.sol b/packages/horizon/test/unit/payments/recurring-collector/mixedPath.t.sol index 10d6ee5e0..446ea283a 100644 --- a/packages/horizon/test/unit/payments/recurring-collector/mixedPath.t.sol +++ b/packages/horizon/test/unit/payments/recurring-collector/mixedPath.t.sol @@ -1,113 +1,99 @@ // SPDX-License-Identifier: MIT pragma solidity ^0.8.27; +import { + REGISTERED, + ACCEPTED, + UPDATE, + OFFER_TYPE_NEW, + OFFER_TYPE_UPDATE, + WITH_NOTICE +} from "@graphprotocol/interfaces/contracts/horizon/IAgreementCollector.sol"; import { IRecurringCollector } from "@graphprotocol/interfaces/contracts/horizon/IRecurringCollector.sol"; import { RecurringCollectorSharedTest } from "./shared.t.sol"; import { MockAgreementOwner } from "./MockAgreementOwner.t.sol"; -/// @notice Tests that ECDSA and contract-approved paths can be mixed for accept and update. +/// @notice Tests the contract-approved offer+accept path for accept and update. contract RecurringCollectorMixedPathTest is RecurringCollectorSharedTest { /* solhint-disable graph/func-name-mixedcase */ - /// @notice ECDSA accept, then contract-approved update should fail (payer is EOA) - function test_MixedPath_ECDSAAccept_UnsignedUpdate_RevertsForEOA() public { - uint256 signerKey = 0xA11CE; - address payer = vm.addr(signerKey); + /// @notice Contract-approved accept, then contract-approved update works + function test_MixedPath_UnsignedAccept_UnsignedUpdate_OK() public { + MockAgreementOwner approver = new MockAgreementOwner(); IRecurringCollector.RecurringCollectionAgreement memory rca = _recurringCollectorHelper.sensibleRCA( IRecurringCollector.RecurringCollectionAgreement({ deadline: uint64(block.timestamp + 1 hours), endsAt: uint64(block.timestamp + 365 days), - payer: payer, + payer: address(approver), dataService: makeAddr("ds"), serviceProvider: makeAddr("sp"), maxInitialTokens: 100 ether, maxOngoingTokensPerSecond: 1 ether, minSecondsPerCollection: 600, maxSecondsPerCollection: 3600, + conditions: 0, + minSecondsPayerCancellationNotice: 0, nonce: 1, metadata: "" }) ); - // Accept via ECDSA - (, , bytes16 agreementId) = _authorizeAndAccept(rca, signerKey); + // Accept via offer+accept path + _setupValidProvision(rca.serviceProvider, rca.dataService); + vm.prank(address(approver)); + bytes16 agreementId = _recurringCollector.offer(OFFER_TYPE_NEW, abi.encode(rca), 0).agreementId; + bytes32 activeHash = _recurringCollector.getAgreementVersionAt(agreementId, 0).versionHash; + vm.prank(rca.serviceProvider); + _recurringCollector.accept(agreementId, activeHash, bytes(""), 0); - // Try unsigned update — should revert because payer is an EOA + // Update via offerUpdate+accept path IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau = _recurringCollectorHelper.sensibleRCAU( IRecurringCollector.RecurringCollectionAgreementUpdate({ agreementId: agreementId, deadline: 0, endsAt: uint64(block.timestamp + 730 days), - maxInitialTokens: 200 ether, - maxOngoingTokensPerSecond: 2 ether, + maxInitialTokens: 50 ether, + maxOngoingTokensPerSecond: 0.5 ether, minSecondsPerCollection: 600, maxSecondsPerCollection: 7200, + conditions: 0, + minSecondsPayerCancellationNotice: 0, nonce: 1, metadata: "" }) ); - vm.expectRevert( - abi.encodeWithSelector(IRecurringCollector.RecurringCollectorApproverNotContract.selector, payer) - ); - vm.prank(rca.dataService); - _recurringCollector.update(rcau, ""); - } - - /// @notice Contract-approved accept, then ECDSA update should fail (no authorized signer) - function test_MixedPath_UnsignedAccept_ECDSAUpdate_RevertsForUnauthorizedSigner() public { - MockAgreementOwner approver = new MockAgreementOwner(); - - IRecurringCollector.RecurringCollectionAgreement memory rca = _recurringCollectorHelper.sensibleRCA( - IRecurringCollector.RecurringCollectionAgreement({ - deadline: uint64(block.timestamp + 1 hours), - endsAt: uint64(block.timestamp + 365 days), - payer: address(approver), - dataService: makeAddr("ds"), - serviceProvider: makeAddr("sp"), - maxInitialTokens: 100 ether, - maxOngoingTokensPerSecond: 1 ether, - minSecondsPerCollection: 600, - maxSecondsPerCollection: 3600, - nonce: 1, - metadata: "" - }) - ); + // Payer calls offerUpdate + vm.prank(address(approver)); + _recurringCollector.offer(OFFER_TYPE_UPDATE, abi.encode(rcau), 0); - // Accept via contract-approved path - bytes32 agreementHash = _recurringCollector.hashRCA(rca); - approver.authorize(agreementHash); - _setupValidProvision(rca.serviceProvider, rca.dataService); - vm.prank(rca.dataService); - bytes16 agreementId = _recurringCollector.accept(rca, ""); + // Data service accepts update with stored hash + bytes32 pendingHash = _recurringCollector.getAgreementVersionAt(agreementId, 1).versionHash; - // Try ECDSA update with an unauthorized signer - uint256 wrongKey = 0xDEAD; - IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau = _recurringCollectorHelper.sensibleRCAU( - IRecurringCollector.RecurringCollectionAgreementUpdate({ - agreementId: agreementId, - deadline: uint64(block.timestamp + 1 hours), - endsAt: uint64(block.timestamp + 730 days), - maxInitialTokens: 200 ether, - maxOngoingTokensPerSecond: 2 ether, - minSecondsPerCollection: 600, - maxSecondsPerCollection: 7200, - nonce: 1, - metadata: "" - }) - ); + vm.expectEmit(address(_recurringCollector)); + emit IRecurringCollector.AgreementUpdated(agreementId, pendingHash, REGISTERED | ACCEPTED | UPDATE); - (, bytes memory sig) = _recurringCollectorHelper.generateSignedRCAU(rcau, wrongKey); + vm.prank(rca.serviceProvider); + _recurringCollector.accept(agreementId, pendingHash, bytes(""), 0); - vm.expectRevert(IRecurringCollector.RecurringCollectorInvalidSigner.selector); - vm.prank(rca.dataService); - _recurringCollector.update(rcau, sig); + // Verify updated terms + IRecurringCollector.AgreementData memory finalAgreement = _recurringCollector.getAgreementData(agreementId); + { + (, bytes memory activeOfferData) = _recurringCollector.getAgreementOfferAt(agreementId, 0); + IRecurringCollector.RecurringCollectionAgreementUpdate memory activeRcau = abi.decode( + activeOfferData, + (IRecurringCollector.RecurringCollectionAgreementUpdate) + ); + assertEq(activeRcau.maxOngoingTokensPerSecond, rcau.maxOngoingTokensPerSecond); + assertEq(activeRcau.maxSecondsPerCollection, rcau.maxSecondsPerCollection); + } + assertEq(finalAgreement.updateNonce, 1); } - /// @notice Contract-approved accept, then contract-approved update works - function test_MixedPath_UnsignedAccept_UnsignedUpdate_OK() public { + /// @notice WITH_NOTICE + deadline=0 pending terms can be manually accepted + function test_MixedPath_WithNotice_DeadlineZero_ManualAccept() public { MockAgreementOwner approver = new MockAgreementOwner(); IRecurringCollector.RecurringCollectionAgreement memory rca = _recurringCollectorHelper.sensibleRCA( @@ -121,21 +107,24 @@ contract RecurringCollectorMixedPathTest is RecurringCollectorSharedTest { maxOngoingTokensPerSecond: 1 ether, minSecondsPerCollection: 600, maxSecondsPerCollection: 3600, + conditions: 0, + minSecondsPayerCancellationNotice: 60, nonce: 1, metadata: "" }) ); - // Accept via contract-approved path - bytes32 agreementHash = _recurringCollector.hashRCA(rca); - approver.authorize(agreementHash); + // Offer + accept initial terms _setupValidProvision(rca.serviceProvider, rca.dataService); - vm.prank(rca.dataService); - bytes16 agreementId = _recurringCollector.accept(rca, ""); - - // Update via contract-approved path (use sensibleRCAU to stay in valid ranges) - IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau = _recurringCollectorHelper.sensibleRCAU( - IRecurringCollector.RecurringCollectionAgreementUpdate({ + vm.prank(address(approver)); + bytes16 agreementId = _recurringCollector.offer(OFFER_TYPE_NEW, abi.encode(rca), 0).agreementId; + bytes32 activeHash = _recurringCollector.getAgreementVersionAt(agreementId, 0).versionHash; + vm.prank(rca.serviceProvider); + _recurringCollector.accept(agreementId, activeHash, bytes(""), 0); + + // Payer offers update with WITH_NOTICE and deadline=0 + IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau = IRecurringCollector + .RecurringCollectionAgreementUpdate({ agreementId: agreementId, deadline: 0, endsAt: uint64(block.timestamp + 730 days), @@ -143,36 +132,25 @@ contract RecurringCollectorMixedPathTest is RecurringCollectorSharedTest { maxOngoingTokensPerSecond: 0.5 ether, minSecondsPerCollection: 600, maxSecondsPerCollection: 7200, + conditions: 0, + minSecondsPayerCancellationNotice: 60, nonce: 1, metadata: "" - }) - ); - - bytes32 updateHash = _recurringCollector.hashRCAU(rcau); - approver.authorize(updateHash); + }); - vm.expectEmit(address(_recurringCollector)); - emit IRecurringCollector.AgreementUpdated( - rca.dataService, - address(approver), - rca.serviceProvider, - agreementId, - uint64(block.timestamp), - rcau.endsAt, - rcau.maxInitialTokens, - rcau.maxOngoingTokensPerSecond, - rcau.minSecondsPerCollection, - rcau.maxSecondsPerCollection - ); + vm.prank(address(approver)); + _recurringCollector.offer(OFFER_TYPE_UPDATE, abi.encode(rcau), WITH_NOTICE); - vm.prank(rca.dataService); - _recurringCollector.update(rcau, ""); + // Service provider manually accepts the deadline=0 pending terms + bytes32 pendingHash = _recurringCollector.getAgreementVersionAt(agreementId, 1).versionHash; + vm.prank(rca.serviceProvider); + _recurringCollector.accept(agreementId, pendingHash, bytes(""), 0); - // Verify updated terms - IRecurringCollector.AgreementData memory agreement = _recurringCollector.getAgreement(agreementId); - assertEq(agreement.maxOngoingTokensPerSecond, rcau.maxOngoingTokensPerSecond); - assertEq(agreement.maxSecondsPerCollection, rcau.maxSecondsPerCollection); - assertEq(agreement.updateNonce, 1); + // Verify the update was accepted and agreement is active + IRecurringCollector.AgreementData memory data = _recurringCollector.getAgreementData(agreementId); + assertEq(data.updateNonce, 1); + assertEq(data.state, REGISTERED | ACCEPTED | UPDATE); + assertEq(data.collectableUntil, rcau.endsAt); } /* solhint-enable graph/func-name-mixedcase */ diff --git a/packages/horizon/test/unit/payments/recurring-collector/pause.t.sol b/packages/horizon/test/unit/payments/recurring-collector/pause.t.sol new file mode 100644 index 000000000..cbc22c22b --- /dev/null +++ b/packages/horizon/test/unit/payments/recurring-collector/pause.t.sol @@ -0,0 +1,178 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.27; + +import { Pausable } from "@openzeppelin/contracts/utils/Pausable.sol"; + +import { IGraphPayments } from "@graphprotocol/interfaces/contracts/horizon/IGraphPayments.sol"; +import { OFFER_TYPE_NEW, OFFER_TYPE_UPDATE } from "@graphprotocol/interfaces/contracts/horizon/IAgreementCollector.sol"; +import { IRecurringCollector } from "@graphprotocol/interfaces/contracts/horizon/IRecurringCollector.sol"; + +import { RecurringCollectorSharedTest } from "./shared.t.sol"; +import { MockAgreementOwner } from "./MockAgreementOwner.t.sol"; + +/// @notice Tests for the pause mechanism in RecurringCollector. +contract RecurringCollectorPauseTest is RecurringCollectorSharedTest { + /* solhint-disable graph/func-name-mixedcase */ + + address internal guardian = makeAddr("guardian"); + + // Governor is address(0) in the mock controller + function _governor() internal pure returns (address) { + return address(0); + } + + function _setGuardian(address who, bool allowed) internal { + vm.prank(_governor()); + _recurringCollector.setPauseGuardian(who, allowed); + } + + function _pause() internal { + vm.prank(guardian); + _recurringCollector.pause(); + } + + // ==================== setPauseGuardian ==================== + + function test_SetPauseGuardian_OK() public { + vm.expectEmit(address(_recurringCollector)); + emit IRecurringCollector.PauseGuardianSet(guardian, true); + _setGuardian(guardian, true); + assertTrue(_recurringCollector.isPauseGuardian(guardian)); + } + + function test_SetPauseGuardian_Remove() public { + _setGuardian(guardian, true); + + vm.expectEmit(address(_recurringCollector)); + emit IRecurringCollector.PauseGuardianSet(guardian, false); + _setGuardian(guardian, false); + assertFalse(_recurringCollector.isPauseGuardian(guardian)); + } + + function test_SetPauseGuardian_Revert_WhenNotGovernor() public { + vm.expectRevert(abi.encodeWithSelector(IRecurringCollector.NotGovernor.selector, address(this))); + _recurringCollector.setPauseGuardian(guardian, true); + } + + function test_SetPauseGuardian_NoOp_WhenNoChange() public { + // guardian is not set, trying to set false (no change) — should be a silent no-op + vm.recordLogs(); + vm.prank(_governor()); + _recurringCollector.setPauseGuardian(guardian, false); + assertEq(vm.getRecordedLogs().length, 0, "no event on no-op"); + } + + function test_SetPauseGuardian_NoOp_WhenNoChange_AlreadySet() public { + _setGuardian(guardian, true); + + // already set to true, trying to set true again — should be a silent no-op + vm.recordLogs(); + vm.prank(_governor()); + _recurringCollector.setPauseGuardian(guardian, true); + assertEq(vm.getRecordedLogs().length, 0, "no event on no-op"); + } + + // ==================== pause / unpause ==================== + + function test_Pause_OK() public { + _setGuardian(guardian, true); + assertFalse(_recurringCollector.paused()); + + _pause(); + assertTrue(_recurringCollector.paused()); + } + + function test_Pause_Revert_WhenNotGuardian() public { + vm.expectRevert(abi.encodeWithSelector(IRecurringCollector.NotPauseGuardian.selector, address(this))); + _recurringCollector.pause(); + } + + function test_Unpause_OK() public { + _setGuardian(guardian, true); + _pause(); + assertTrue(_recurringCollector.paused()); + + vm.prank(guardian); + _recurringCollector.unpause(); + assertFalse(_recurringCollector.paused()); + } + + function test_Unpause_Revert_WhenNotGuardian() public { + _setGuardian(guardian, true); + _pause(); + + vm.expectRevert(abi.encodeWithSelector(IRecurringCollector.NotPauseGuardian.selector, address(this))); + _recurringCollector.unpause(); + } + + // ==================== whenNotPaused guards ==================== + + function test_Accept_Revert_WhenPaused(FuzzyTestAccept calldata fuzzy) public { + _setGuardian(guardian, true); + _pause(); + + IRecurringCollector.RecurringCollectionAgreement memory rca = _recurringCollectorHelper.sensibleRCA(fuzzy.rca); + + // offer() is paused + vm.expectRevert(Pausable.EnforcedPause.selector); + vm.prank(rca.payer); + _recurringCollector.offer(OFFER_TYPE_NEW, abi.encode(rca), 0); + } + + function test_Collect_Revert_WhenPaused(FuzzyTestAccept calldata fuzzy) public { + // Accept first (before pausing) + (IRecurringCollector.RecurringCollectionAgreement memory rca, bytes16 agreementId) = _sensibleAccept(fuzzy); + + _setGuardian(guardian, true); + _pause(); + + skip(rca.minSecondsPerCollection); + bytes memory data = _generateCollectData(_generateCollectParams(rca, agreementId, keccak256("col"), 1, 0)); + + vm.expectRevert(Pausable.EnforcedPause.selector); + vm.prank(rca.dataService); + _recurringCollector.collect(IGraphPayments.PaymentTypes.IndexingFee, data); + } + + function test_Cancel_Revert_WhenPaused(FuzzyTestAccept calldata fuzzy) public { + (IRecurringCollector.RecurringCollectionAgreement memory rca, bytes16 agreementId) = _sensibleAccept(fuzzy); + + bytes32 activeHash = _recurringCollector.getAgreementVersionAt(agreementId, 0).versionHash; + _setGuardian(guardian, true); + _pause(); + + vm.expectRevert(Pausable.EnforcedPause.selector); + vm.prank(rca.payer); + _recurringCollector.cancel(agreementId, activeHash, 0); + } + + function test_Update_Revert_WhenPaused(FuzzyTestAccept calldata fuzzy) public { + (IRecurringCollector.RecurringCollectionAgreement memory rca, bytes16 agreementId) = _sensibleAccept(fuzzy); + + _setGuardian(guardian, true); + _pause(); + + IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau = _recurringCollectorHelper.sensibleRCAU( + IRecurringCollector.RecurringCollectionAgreementUpdate({ + agreementId: agreementId, + deadline: 0, + endsAt: uint64(block.timestamp + 730 days), + maxInitialTokens: 200 ether, + maxOngoingTokensPerSecond: 2 ether, + minSecondsPerCollection: 600, + maxSecondsPerCollection: 7200, + conditions: 0, + minSecondsPayerCancellationNotice: 0, + nonce: 1, + metadata: "" + }) + ); + + // offerUpdate() is also paused + vm.expectRevert(Pausable.EnforcedPause.selector); + vm.prank(rca.payer); + _recurringCollector.offer(OFFER_TYPE_UPDATE, abi.encode(rcau), 0); + } + + /* solhint-enable graph/func-name-mixedcase */ +} diff --git a/packages/horizon/test/unit/payments/recurring-collector/shared.t.sol b/packages/horizon/test/unit/payments/recurring-collector/shared.t.sol index 0c20ccf7f..c98bf8851 100644 --- a/packages/horizon/test/unit/payments/recurring-collector/shared.t.sol +++ b/packages/horizon/test/unit/payments/recurring-collector/shared.t.sol @@ -4,16 +4,27 @@ pragma solidity ^0.8.27; import { Test } from "forge-std/Test.sol"; import { IGraphPayments } from "@graphprotocol/interfaces/contracts/horizon/IGraphPayments.sol"; -import { IPaymentsCollector } from "@graphprotocol/interfaces/contracts/horizon/IPaymentsCollector.sol"; +import { + REGISTERED, + ACCEPTED, + NOTICE_GIVEN, + SETTLED, + BY_PAYER, + BY_PROVIDER, + OFFER_TYPE_NEW +} from "@graphprotocol/interfaces/contracts/horizon/IAgreementCollector.sol"; import { IRecurringCollector } from "@graphprotocol/interfaces/contracts/horizon/IRecurringCollector.sol"; import { IHorizonStakingTypes } from "@graphprotocol/interfaces/contracts/horizon/internal/IHorizonStakingTypes.sol"; import { RecurringCollector } from "../../../../contracts/payments/collectors/RecurringCollector.sol"; +import { TransparentUpgradeableProxy } from "@openzeppelin/contracts/proxy/transparent/TransparentUpgradeableProxy.sol"; +import { ERC1967Utils } from "@openzeppelin/contracts/proxy/ERC1967/ERC1967Utils.sol"; import { Bounder } from "../../../unit/utils/Bounder.t.sol"; import { PartialControllerMock } from "../../mocks/PartialControllerMock.t.sol"; import { HorizonStakingMock } from "../../mocks/HorizonStakingMock.t.sol"; import { PaymentsEscrowMock } from "./PaymentsEscrowMock.t.sol"; import { RecurringCollectorHelper } from "./RecurringCollectorHelper.t.sol"; +import { MockAcceptCallback } from "./MockAcceptCallback.t.sol"; contract RecurringCollectorSharedTest is Test, Bounder { struct FuzzyTestCollect { @@ -24,7 +35,6 @@ contract RecurringCollectorSharedTest is Test, Bounder { struct FuzzyTestAccept { IRecurringCollector.RecurringCollectionAgreement rca; - uint256 unboundedSignerKey; } struct FuzzyTestUpdate { @@ -36,64 +46,41 @@ contract RecurringCollectorSharedTest is Test, Bounder { PaymentsEscrowMock internal _paymentsEscrow; HorizonStakingMock internal _horizonStaking; RecurringCollectorHelper internal _recurringCollectorHelper; + address internal _proxyAdmin; + bytes internal _mockAcceptCallbackCode; - function setUp() public { + function setUp() public virtual { _paymentsEscrow = new PaymentsEscrowMock(); _horizonStaking = new HorizonStakingMock(); PartialControllerMock.Entry[] memory entries = new PartialControllerMock.Entry[](2); entries[0] = PartialControllerMock.Entry({ name: "PaymentsEscrow", addr: address(_paymentsEscrow) }); entries[1] = PartialControllerMock.Entry({ name: "Staking", addr: address(_horizonStaking) }); - _recurringCollector = new RecurringCollector( - "RecurringCollector", - "1", - address(new PartialControllerMock(entries)), - 1 + address controller = address(new PartialControllerMock(entries)); + RecurringCollector implementation = new RecurringCollector(controller); + address proxyAdminOwner = makeAddr("proxyAdminOwner"); + TransparentUpgradeableProxy proxy = new TransparentUpgradeableProxy( + address(implementation), + proxyAdminOwner, + abi.encodeCall(RecurringCollector.initialize, ()) ); - _recurringCollectorHelper = new RecurringCollectorHelper(_recurringCollector); + _recurringCollector = RecurringCollector(address(proxy)); + // Store the actual ProxyAdmin contract address to exclude from fuzz inputs + _proxyAdmin = address(uint160(uint256(vm.load(address(proxy), ERC1967Utils.ADMIN_SLOT)))); + _recurringCollectorHelper = new RecurringCollectorHelper(_recurringCollector, _proxyAdmin); + _mockAcceptCallbackCode = address(new MockAcceptCallback()).code; } - function _sensibleAuthorizeAndAccept( + function _sensibleAccept( FuzzyTestAccept calldata _fuzzyTestAccept - ) - internal - returns ( - IRecurringCollector.RecurringCollectionAgreement memory, - bytes memory signature, - uint256 key, - bytes16 agreementId - ) - { + ) internal returns (IRecurringCollector.RecurringCollectionAgreement memory, bytes16 agreementId) { IRecurringCollector.RecurringCollectionAgreement memory rca = _recurringCollectorHelper.sensibleRCA( _fuzzyTestAccept.rca ); - key = boundKey(_fuzzyTestAccept.unboundedSignerKey); - ( - IRecurringCollector.RecurringCollectionAgreement memory acceptedRca, - bytes memory sig, - bytes16 id - ) = _authorizeAndAccept(rca, key); - return (acceptedRca, sig, key, id); + agreementId = _accept(rca); + return (rca, agreementId); } - // authorizes signer, signs the RCA, and accepts it - function _authorizeAndAccept( - IRecurringCollector.RecurringCollectionAgreement memory _rca, - uint256 _signerKey - ) internal returns (IRecurringCollector.RecurringCollectionAgreement memory, bytes memory, bytes16 agreementId) { - _recurringCollectorHelper.authorizeSignerWithChecks(_rca.payer, _signerKey); - ( - IRecurringCollector.RecurringCollectionAgreement memory rca, - bytes memory signature - ) = _recurringCollectorHelper.generateSignedRCA(_rca, _signerKey); - - agreementId = _accept(rca, signature); - return (rca, signature, agreementId); - } - - function _accept( - IRecurringCollector.RecurringCollectionAgreement memory _rca, - bytes memory _signature - ) internal returns (bytes16) { + function _accept(IRecurringCollector.RecurringCollectionAgreement memory _rca) internal returns (bytes16) { // Set up valid staking provision by default to allow collections to succeed _setupValidProvision(_rca.serviceProvider, _rca.dataService); @@ -106,28 +93,47 @@ contract RecurringCollectorSharedTest is Test, Bounder { _rca.nonce ); - vm.expectEmit(address(_recurringCollector)); - emit IRecurringCollector.AgreementAccepted( - _rca.dataService, - _rca.payer, - _rca.serviceProvider, - expectedAgreementId, - uint64(block.timestamp), - _rca.endsAt, - _rca.maxInitialTokens, - _rca.maxOngoingTokensPerSecond, - _rca.minSecondsPerCollection, - _rca.maxSecondsPerCollection - ); - vm.prank(_rca.dataService); - bytes16 actualAgreementId = _recurringCollector.accept(_rca, _signature); + // Step 1: Payer submits offer + vm.prank(_rca.payer); + bytes16 actualAgreementId = _recurringCollector.offer(OFFER_TYPE_NEW, abi.encode(_rca), 0).agreementId; // Verify the agreement ID matches expectation assertEq(actualAgreementId, expectedAgreementId); + + // Step 2: Service provider accepts the offer + bytes32 activeHash = _recurringCollector.getAgreementVersionAt(actualAgreementId, 0).versionHash; + vm.expectEmit(address(_recurringCollector)); + emit IRecurringCollector.AgreementUpdated(expectedAgreementId, activeHash, REGISTERED | ACCEPTED); + vm.prank(_rca.serviceProvider); + _recurringCollector.accept(actualAgreementId, activeHash, bytes(""), 0); + return actualAgreementId; } + function _offer(IRecurringCollector.RecurringCollectionAgreement memory _rca) internal returns (bytes16) { + _setupValidProvision(_rca.serviceProvider, _rca.dataService); + vm.prank(_rca.payer); + return _recurringCollector.offer(OFFER_TYPE_NEW, abi.encode(_rca), 0).agreementId; + } + + function _sensibleOffer( + FuzzyTestAccept calldata _fuzzyTestAccept + ) internal returns (IRecurringCollector.RecurringCollectionAgreement memory, bytes16 agreementId) { + IRecurringCollector.RecurringCollectionAgreement memory rca = _recurringCollectorHelper.sensibleRCA( + _fuzzyTestAccept.rca + ); + agreementId = _offer(rca); + return (rca, agreementId); + } + function _setupValidProvision(address _serviceProvider, address _dataService) internal { + // In RC unit tests, dataService must be a fresh address so we can etch mock callback code. + // Reject fuzz inputs that collide with deployed test infrastructure. + vm.assume(_dataService.code.length == 0); + // Etch mock IDataServiceAgreements code so accept/acceptUpdate callbacks succeed. + if (uint160(_dataService) > 0xFF) { + vm.etch(_dataService, _mockAcceptCallbackCode); + } _horizonStaking.setProvision( _serviceProvider, _dataService, @@ -146,22 +152,30 @@ contract RecurringCollectorSharedTest is Test, Bounder { ); } - function _cancel( + function _cancelByPayer( IRecurringCollector.RecurringCollectionAgreement memory _rca, - bytes16 _agreementId, - IRecurringCollector.CancelAgreementBy _by + bytes16 _agreementId ) internal { + bytes32 vHash = _recurringCollector.getAgreementVersionAt(_agreementId, 0).versionHash; vm.expectEmit(address(_recurringCollector)); - emit IRecurringCollector.AgreementCanceled( - _rca.dataService, - _rca.payer, - _rca.serviceProvider, + emit IRecurringCollector.AgreementUpdated(_agreementId, vHash, REGISTERED | ACCEPTED | NOTICE_GIVEN | BY_PAYER); + vm.prank(_rca.payer); + _recurringCollector.cancel(_agreementId, vHash, 0); + } + + function _cancelByProvider( + IRecurringCollector.RecurringCollectionAgreement memory _rca, + bytes16 _agreementId + ) internal { + bytes32 vHash = _recurringCollector.getAgreementVersionAt(_agreementId, 0).versionHash; + vm.expectEmit(address(_recurringCollector)); + emit IRecurringCollector.AgreementUpdated( _agreementId, - uint64(block.timestamp), - _by + vHash, + REGISTERED | ACCEPTED | NOTICE_GIVEN | BY_PROVIDER ); - vm.prank(_rca.dataService); - _recurringCollector.cancel(_agreementId, _by); + vm.prank(_rca.serviceProvider); + _recurringCollector.cancel(_agreementId, vHash, 0); } function _expectCollectCallAndEmit( @@ -186,26 +200,9 @@ contract RecurringCollectorSharedTest is Test, Bounder { ) ) ); - vm.expectEmit(address(_recurringCollector)); - emit IPaymentsCollector.PaymentCollected( - __paymentType, - _fuzzyParams.collectionId, - _rca.payer, - _rca.serviceProvider, - _rca.dataService, - _tokens - ); vm.expectEmit(address(_recurringCollector)); - emit IRecurringCollector.RCACollected( - _rca.dataService, - _rca.payer, - _rca.serviceProvider, - _agreementId, - _fuzzyParams.collectionId, - _tokens, - _fuzzyParams.dataServiceCut - ); + emit IRecurringCollector.RCACollected(_agreementId, _fuzzyParams.collectionId, REGISTERED | ACCEPTED); } function _generateValidCollection( @@ -261,11 +258,8 @@ contract RecurringCollectorSharedTest is Test, Bounder { return abi.encode(_params); } - function _fuzzyCancelAgreementBy(uint8 _seed) internal pure returns (IRecurringCollector.CancelAgreementBy) { - return - IRecurringCollector.CancelAgreementBy( - bound(_seed, 0, uint256(IRecurringCollector.CancelAgreementBy.Payer)) - ); + function _fuzzyCancelByPayer(uint8 _seed) internal pure returns (bool) { + return bound(_seed, 0, 1) == 1; } function _paymentType(uint8 _unboundedPaymentType) internal pure returns (IGraphPayments.PaymentTypes) { diff --git a/packages/horizon/test/unit/payments/recurring-collector/update.t.sol b/packages/horizon/test/unit/payments/recurring-collector/update.t.sol index d466f3c49..bd0b7ea96 100644 --- a/packages/horizon/test/unit/payments/recurring-collector/update.t.sol +++ b/packages/horizon/test/unit/payments/recurring-collector/update.t.sol @@ -1,6 +1,12 @@ // SPDX-License-Identifier: MIT pragma solidity ^0.8.27; +import { + REGISTERED, + ACCEPTED, + UPDATE, + OFFER_TYPE_UPDATE +} from "@graphprotocol/interfaces/contracts/horizon/IAgreementCollector.sol"; import { IRecurringCollector } from "@graphprotocol/interfaces/contracts/horizon/IRecurringCollector.sol"; import { RecurringCollectorSharedTest } from "./shared.t.sol"; @@ -16,31 +22,27 @@ contract RecurringCollectorUpdateTest is RecurringCollectorSharedTest { FuzzyTestUpdate calldata fuzzyTestUpdate, uint256 unboundedUpdateSkip ) public { - ( - IRecurringCollector.RecurringCollectionAgreement memory acceptedRca, - , - uint256 signerKey, - bytes16 agreementId - ) = _sensibleAuthorizeAndAccept(fuzzyTestUpdate.fuzzyTestAccept); + (IRecurringCollector.RecurringCollectionAgreement memory acceptedRca, bytes16 agreementId) = _sensibleAccept( + fuzzyTestUpdate.fuzzyTestAccept + ); IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau = _recurringCollectorHelper.sensibleRCAU( - fuzzyTestUpdate.rcau + fuzzyTestUpdate.rcau, + acceptedRca.payer ); rcau.agreementId = agreementId; boundSkipCeil(unboundedUpdateSkip, type(uint64).max); rcau.deadline = uint64(bound(rcau.deadline, 0, block.timestamp - 1)); - (, bytes memory signature) = _recurringCollectorHelper.generateSignedRCAU(rcau, signerKey); - bytes memory expectedErr = abi.encodeWithSelector( - IRecurringCollector.RecurringCollectorAgreementDeadlineElapsed.selector, + IRecurringCollector.AgreementDeadlineElapsed.selector, block.timestamp, rcau.deadline ); vm.expectRevert(expectedErr); - vm.prank(acceptedRca.dataService); - _recurringCollector.update(rcau, signature); + vm.prank(acceptedRca.payer); + _recurringCollector.offer(OFFER_TYPE_UPDATE, abi.encode(rcau), 0); } function test_Update_Revert_WhenNeverAccepted( @@ -59,177 +61,190 @@ contract RecurringCollectorUpdateTest is RecurringCollectorSharedTest { ); rcau.agreementId = agreementId; - rcau.deadline = uint64(block.timestamp); - + // accept checks serviceProvider first — non-existent agreement has serviceProvider = address(0) bytes memory expectedErr = abi.encodeWithSelector( - IRecurringCollector.RecurringCollectorAgreementIncorrectState.selector, - rcau.agreementId, - IRecurringCollector.AgreementState.NotAccepted + IRecurringCollector.UnauthorizedServiceProvider.selector, + rca.serviceProvider, + address(0) ); vm.expectRevert(expectedErr); - vm.prank(rca.dataService); - _recurringCollector.update(rcau, ""); + vm.prank(rca.serviceProvider); + _recurringCollector.accept(agreementId, bytes32(0), bytes(""), 0); } - function test_Update_Revert_WhenDataServiceNotAuthorized( + function test_Update_Revert_WhenServiceProviderNotAuthorized( FuzzyTestUpdate calldata fuzzyTestUpdate, - address notDataService + address notServiceProvider ) public { - vm.assume(fuzzyTestUpdate.fuzzyTestAccept.rca.dataService != notDataService); - (, , uint256 signerKey, bytes16 agreementId) = _sensibleAuthorizeAndAccept(fuzzyTestUpdate.fuzzyTestAccept); + vm.assume(fuzzyTestUpdate.fuzzyTestAccept.rca.serviceProvider != notServiceProvider); + vm.assume(notServiceProvider != _proxyAdmin); + (IRecurringCollector.RecurringCollectionAgreement memory acceptedRca, bytes16 agreementId) = _sensibleAccept( + fuzzyTestUpdate.fuzzyTestAccept + ); IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau = _recurringCollectorHelper.sensibleRCAU( - fuzzyTestUpdate.rcau + fuzzyTestUpdate.rcau, + acceptedRca.payer ); rcau.agreementId = agreementId; + rcau.nonce = 1; - (, bytes memory signature) = _recurringCollectorHelper.generateSignedRCAUWithCorrectNonce(rcau, signerKey); + // Step 1: Payer submits offerUpdate + vm.prank(acceptedRca.payer); + _recurringCollector.offer(OFFER_TYPE_UPDATE, abi.encode(rcau), 0); + // Step 2: Wrong caller tries to accept - should revert + bytes32 pendingHash = _recurringCollector.getAgreementVersionAt(agreementId, 1).versionHash; bytes memory expectedErr = abi.encodeWithSelector( - IRecurringCollector.RecurringCollectorDataServiceNotAuthorized.selector, - rcau.agreementId, - notDataService + IRecurringCollector.UnauthorizedServiceProvider.selector, + notServiceProvider, + fuzzyTestUpdate.fuzzyTestAccept.rca.serviceProvider ); vm.expectRevert(expectedErr); - vm.prank(notDataService); - _recurringCollector.update(rcau, signature); + vm.prank(notServiceProvider); + _recurringCollector.accept(agreementId, pendingHash, bytes(""), 0); } - function test_Update_Revert_WhenInvalidSigner( + function test_Update_Revert_WhenUnauthorizedPayer( FuzzyTestUpdate calldata fuzzyTestUpdate, - uint256 unboundedInvalidSignerKey + address notPayer ) public { - ( - IRecurringCollector.RecurringCollectionAgreement memory acceptedRca, - , - , - bytes16 agreementId - ) = _sensibleAuthorizeAndAccept(fuzzyTestUpdate.fuzzyTestAccept); - uint256 signerKey = boundKey(fuzzyTestUpdate.fuzzyTestAccept.unboundedSignerKey); - uint256 invalidSignerKey = boundKey(unboundedInvalidSignerKey); - vm.assume(signerKey != invalidSignerKey); + (IRecurringCollector.RecurringCollectionAgreement memory acceptedRca, bytes16 agreementId) = _sensibleAccept( + fuzzyTestUpdate.fuzzyTestAccept + ); + vm.assume(notPayer != acceptedRca.payer); IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau = _recurringCollectorHelper.sensibleRCAU( - fuzzyTestUpdate.rcau + fuzzyTestUpdate.rcau, + acceptedRca.payer ); rcau.agreementId = agreementId; - (, bytes memory signature) = _recurringCollectorHelper.generateSignedRCAU(rcau, invalidSignerKey); + vm.expectRevert( + abi.encodeWithSelector(IRecurringCollector.UnauthorizedPayer.selector, notPayer, acceptedRca.payer) + ); + vm.prank(notPayer); + _recurringCollector.offer(OFFER_TYPE_UPDATE, abi.encode(rcau), 0); + } - vm.expectRevert(IRecurringCollector.RecurringCollectorInvalidSigner.selector); - vm.prank(acceptedRca.dataService); - _recurringCollector.update(rcau, signature); + function test_Update_Revert_WhenMaxOngoingTokensOverflows(FuzzyTestUpdate calldata fuzzyTestUpdate) public { + (IRecurringCollector.RecurringCollectionAgreement memory acceptedRca, bytes16 agreementId) = _sensibleAccept( + fuzzyTestUpdate.fuzzyTestAccept + ); + IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau = _recurringCollectorHelper.sensibleRCAU( + fuzzyTestUpdate.rcau, + acceptedRca.payer + ); + rcau.agreementId = agreementId; + rcau.nonce = 1; + // maxOngoingTokensPerSecond * maxSecondsPerCollection overflows uint256 + rcau.maxOngoingTokensPerSecond = type(uint256).max; + + vm.expectRevert(abi.encodeWithSignature("Panic(uint256)", 0x11)); + vm.prank(acceptedRca.payer); + _recurringCollector.offer(OFFER_TYPE_UPDATE, abi.encode(rcau), 0); } function test_Update_OK(FuzzyTestUpdate calldata fuzzyTestUpdate) public { - ( - IRecurringCollector.RecurringCollectionAgreement memory acceptedRca, - , - uint256 signerKey, - bytes16 agreementId - ) = _sensibleAuthorizeAndAccept(fuzzyTestUpdate.fuzzyTestAccept); + (IRecurringCollector.RecurringCollectionAgreement memory acceptedRca, bytes16 agreementId) = _sensibleAccept( + fuzzyTestUpdate.fuzzyTestAccept + ); IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau = _recurringCollectorHelper.sensibleRCAU( - fuzzyTestUpdate.rcau + fuzzyTestUpdate.rcau, + acceptedRca.payer ); rcau.agreementId = agreementId; // Don't use fuzzed nonce - use correct nonce for first update rcau.nonce = 1; - (, bytes memory signature) = _recurringCollectorHelper.generateSignedRCAU(rcau, signerKey); + // Step 1: Payer submits offerUpdate + vm.prank(acceptedRca.payer); + _recurringCollector.offer(OFFER_TYPE_UPDATE, abi.encode(rcau), 0); + + // Step 2: Service provider accepts the update + bytes32 pendingHash = _recurringCollector.getAgreementVersionAt(agreementId, 1).versionHash; vm.expectEmit(address(_recurringCollector)); - emit IRecurringCollector.AgreementUpdated( - acceptedRca.dataService, - acceptedRca.payer, - acceptedRca.serviceProvider, - rcau.agreementId, - uint64(block.timestamp), - rcau.endsAt, - rcau.maxInitialTokens, - rcau.maxOngoingTokensPerSecond, - rcau.minSecondsPerCollection, - rcau.maxSecondsPerCollection - ); - vm.prank(acceptedRca.dataService); - _recurringCollector.update(rcau, signature); - - IRecurringCollector.AgreementData memory agreement = _recurringCollector.getAgreement(agreementId); - assertEq(rcau.endsAt, agreement.endsAt); - assertEq(rcau.maxInitialTokens, agreement.maxInitialTokens); - assertEq(rcau.maxOngoingTokensPerSecond, agreement.maxOngoingTokensPerSecond); - assertEq(rcau.minSecondsPerCollection, agreement.minSecondsPerCollection); - assertEq(rcau.maxSecondsPerCollection, agreement.maxSecondsPerCollection); - assertEq(rcau.nonce, agreement.updateNonce); + emit IRecurringCollector.AgreementUpdated(rcau.agreementId, pendingHash, REGISTERED | ACCEPTED | UPDATE); + vm.prank(acceptedRca.serviceProvider); + _recurringCollector.accept(agreementId, pendingHash, bytes(""), 0); + + IRecurringCollector.AgreementData memory updatedAgreement = _recurringCollector.getAgreementData(agreementId); + { + (, bytes memory activeOfferData) = _recurringCollector.getAgreementOfferAt(agreementId, 0); + IRecurringCollector.RecurringCollectionAgreementUpdate memory activeRcau = abi.decode( + activeOfferData, + (IRecurringCollector.RecurringCollectionAgreementUpdate) + ); + assertEq(rcau.endsAt, activeRcau.endsAt); + assertEq(rcau.maxInitialTokens, activeRcau.maxInitialTokens); + assertEq(rcau.maxOngoingTokensPerSecond, activeRcau.maxOngoingTokensPerSecond); + assertEq(rcau.minSecondsPerCollection, activeRcau.minSecondsPerCollection); + assertEq(rcau.maxSecondsPerCollection, activeRcau.maxSecondsPerCollection); + } + assertEq(rcau.nonce, updatedAgreement.updateNonce); } function test_Update_Revert_WhenInvalidNonce_TooLow(FuzzyTestUpdate calldata fuzzyTestUpdate) public { - ( - IRecurringCollector.RecurringCollectionAgreement memory acceptedRca, - , - uint256 signerKey, - bytes16 agreementId - ) = _sensibleAuthorizeAndAccept(fuzzyTestUpdate.fuzzyTestAccept); + (IRecurringCollector.RecurringCollectionAgreement memory acceptedRca, bytes16 agreementId) = _sensibleAccept( + fuzzyTestUpdate.fuzzyTestAccept + ); IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau = _recurringCollectorHelper.sensibleRCAU( - fuzzyTestUpdate.rcau + fuzzyTestUpdate.rcau, + acceptedRca.payer ); rcau.agreementId = agreementId; rcau.nonce = 0; // Invalid: should be 1 for first update - (, bytes memory signature) = _recurringCollectorHelper.generateSignedRCAU(rcau, signerKey); - bytes memory expectedErr = abi.encodeWithSelector( - IRecurringCollector.RecurringCollectorInvalidUpdateNonce.selector, + IRecurringCollector.InvalidUpdateNonce.selector, rcau.agreementId, 1, // expected 0 // provided ); vm.expectRevert(expectedErr); - vm.prank(acceptedRca.dataService); - _recurringCollector.update(rcau, signature); + vm.prank(acceptedRca.payer); + _recurringCollector.offer(OFFER_TYPE_UPDATE, abi.encode(rcau), 0); } function test_Update_Revert_WhenInvalidNonce_TooHigh(FuzzyTestUpdate calldata fuzzyTestUpdate) public { - ( - IRecurringCollector.RecurringCollectionAgreement memory acceptedRca, - , - uint256 signerKey, - bytes16 agreementId - ) = _sensibleAuthorizeAndAccept(fuzzyTestUpdate.fuzzyTestAccept); + (IRecurringCollector.RecurringCollectionAgreement memory acceptedRca, bytes16 agreementId) = _sensibleAccept( + fuzzyTestUpdate.fuzzyTestAccept + ); IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau = _recurringCollectorHelper.sensibleRCAU( - fuzzyTestUpdate.rcau + fuzzyTestUpdate.rcau, + acceptedRca.payer ); rcau.agreementId = agreementId; rcau.nonce = 5; // Invalid: should be 1 for first update - (, bytes memory signature) = _recurringCollectorHelper.generateSignedRCAU(rcau, signerKey); - bytes memory expectedErr = abi.encodeWithSelector( - IRecurringCollector.RecurringCollectorInvalidUpdateNonce.selector, + IRecurringCollector.InvalidUpdateNonce.selector, rcau.agreementId, 1, // expected 5 // provided ); vm.expectRevert(expectedErr); - vm.prank(acceptedRca.dataService); - _recurringCollector.update(rcau, signature); + vm.prank(acceptedRca.payer); + _recurringCollector.offer(OFFER_TYPE_UPDATE, abi.encode(rcau), 0); } function test_Update_Revert_WhenReplayAttack(FuzzyTestUpdate calldata fuzzyTestUpdate) public { - ( - IRecurringCollector.RecurringCollectionAgreement memory acceptedRca, - , - uint256 signerKey, - bytes16 agreementId - ) = _sensibleAuthorizeAndAccept(fuzzyTestUpdate.fuzzyTestAccept); + (IRecurringCollector.RecurringCollectionAgreement memory acceptedRca, bytes16 agreementId) = _sensibleAccept( + fuzzyTestUpdate.fuzzyTestAccept + ); IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau1 = _recurringCollectorHelper.sensibleRCAU( - fuzzyTestUpdate.rcau + fuzzyTestUpdate.rcau, + acceptedRca.payer ); rcau1.agreementId = agreementId; rcau1.nonce = 1; - // First update succeeds - (, bytes memory signature1) = _recurringCollectorHelper.generateSignedRCAU(rcau1, signerKey); - vm.prank(acceptedRca.dataService); - _recurringCollector.update(rcau1, signature1); + // First update succeeds (offerUpdate + accept) + vm.prank(acceptedRca.payer); + _recurringCollector.offer(OFFER_TYPE_UPDATE, abi.encode(rcau1), 0); + bytes32 pendingHash1 = _recurringCollector.getAgreementVersionAt(agreementId, 1).versionHash; + vm.prank(acceptedRca.serviceProvider); + _recurringCollector.accept(agreementId, pendingHash1, bytes(""), 0); // Second update with different terms and nonce 2 succeeds IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau2 = IRecurringCollector @@ -241,51 +256,55 @@ contract RecurringCollectorUpdateTest is RecurringCollectorSharedTest { maxOngoingTokensPerSecond: rcau1.maxOngoingTokensPerSecond * 2, // Different terms minSecondsPerCollection: rcau1.minSecondsPerCollection, maxSecondsPerCollection: rcau1.maxSecondsPerCollection, + conditions: 0, + minSecondsPayerCancellationNotice: 0, nonce: 2, metadata: rcau1.metadata }); - (, bytes memory signature2) = _recurringCollectorHelper.generateSignedRCAU(rcau2, signerKey); - vm.prank(acceptedRca.dataService); - _recurringCollector.update(rcau2, signature2); + vm.prank(acceptedRca.payer); + _recurringCollector.offer(OFFER_TYPE_UPDATE, abi.encode(rcau2), 0); + bytes32 pendingHash2 = _recurringCollector.getAgreementVersionAt(agreementId, 1).versionHash; + vm.prank(acceptedRca.serviceProvider); + _recurringCollector.accept(agreementId, pendingHash2, bytes(""), 0); - // Attempting to replay first update should fail + // Attempting to replay first update should fail (nonce check in offerUpdate) bytes memory expectedErr = abi.encodeWithSelector( - IRecurringCollector.RecurringCollectorInvalidUpdateNonce.selector, + IRecurringCollector.InvalidUpdateNonce.selector, rcau1.agreementId, 3, // expected (current nonce + 1) 1 // provided (old nonce) ); vm.expectRevert(expectedErr); - vm.prank(acceptedRca.dataService); - _recurringCollector.update(rcau1, signature1); + vm.prank(acceptedRca.payer); + _recurringCollector.offer(OFFER_TYPE_UPDATE, abi.encode(rcau1), 0); } function test_Update_OK_NonceIncrementsCorrectly(FuzzyTestUpdate calldata fuzzyTestUpdate) public { - ( - IRecurringCollector.RecurringCollectionAgreement memory acceptedRca, - , - uint256 signerKey, - bytes16 agreementId - ) = _sensibleAuthorizeAndAccept(fuzzyTestUpdate.fuzzyTestAccept); + (IRecurringCollector.RecurringCollectionAgreement memory acceptedRca, bytes16 agreementId) = _sensibleAccept( + fuzzyTestUpdate.fuzzyTestAccept + ); // Initial nonce should be 0 - IRecurringCollector.AgreementData memory initialAgreement = _recurringCollector.getAgreement(agreementId); + IRecurringCollector.AgreementData memory initialAgreement = _recurringCollector.getAgreementData(agreementId); assertEq(initialAgreement.updateNonce, 0); // First update with nonce 1 IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau1 = _recurringCollectorHelper.sensibleRCAU( - fuzzyTestUpdate.rcau + fuzzyTestUpdate.rcau, + acceptedRca.payer ); rcau1.agreementId = agreementId; rcau1.nonce = 1; - (, bytes memory signature1) = _recurringCollectorHelper.generateSignedRCAU(rcau1, signerKey); - vm.prank(acceptedRca.dataService); - _recurringCollector.update(rcau1, signature1); + vm.prank(acceptedRca.payer); + _recurringCollector.offer(OFFER_TYPE_UPDATE, abi.encode(rcau1), 0); + bytes32 pendingHash1 = _recurringCollector.getAgreementVersionAt(agreementId, 1).versionHash; + vm.prank(acceptedRca.serviceProvider); + _recurringCollector.accept(agreementId, pendingHash1, bytes(""), 0); // Verify nonce incremented to 1 - IRecurringCollector.AgreementData memory updatedAgreement1 = _recurringCollector.getAgreement(agreementId); + IRecurringCollector.AgreementData memory updatedAgreement1 = _recurringCollector.getAgreementData(agreementId); assertEq(updatedAgreement1.updateNonce, 1); // Second update with nonce 2 @@ -298,16 +317,20 @@ contract RecurringCollectorUpdateTest is RecurringCollectorSharedTest { maxOngoingTokensPerSecond: rcau1.maxOngoingTokensPerSecond * 2, // Different terms minSecondsPerCollection: rcau1.minSecondsPerCollection, maxSecondsPerCollection: rcau1.maxSecondsPerCollection, + conditions: 0, + minSecondsPayerCancellationNotice: 0, nonce: 2, metadata: rcau1.metadata }); - (, bytes memory signature2) = _recurringCollectorHelper.generateSignedRCAU(rcau2, signerKey); - vm.prank(acceptedRca.dataService); - _recurringCollector.update(rcau2, signature2); + vm.prank(acceptedRca.payer); + _recurringCollector.offer(OFFER_TYPE_UPDATE, abi.encode(rcau2), 0); + bytes32 pendingHash2 = _recurringCollector.getAgreementVersionAt(agreementId, 1).versionHash; + vm.prank(acceptedRca.serviceProvider); + _recurringCollector.accept(agreementId, pendingHash2, bytes(""), 0); // Verify nonce incremented to 2 - IRecurringCollector.AgreementData memory updatedAgreement2 = _recurringCollector.getAgreement(agreementId); + IRecurringCollector.AgreementData memory updatedAgreement2 = _recurringCollector.getAgreementData(agreementId); assertEq(updatedAgreement2.updateNonce, 2); } diff --git a/packages/horizon/test/unit/payments/recurring-collector/updateUnsigned.t.sol b/packages/horizon/test/unit/payments/recurring-collector/updateUnsigned.t.sol index 22016075a..b3e24179d 100644 --- a/packages/horizon/test/unit/payments/recurring-collector/updateUnsigned.t.sol +++ b/packages/horizon/test/unit/payments/recurring-collector/updateUnsigned.t.sol @@ -1,6 +1,13 @@ // SPDX-License-Identifier: MIT pragma solidity ^0.8.27; +import { + REGISTERED, + ACCEPTED, + UPDATE, + OFFER_TYPE_NEW, + OFFER_TYPE_UPDATE +} from "@graphprotocol/interfaces/contracts/horizon/IAgreementCollector.sol"; import { IRecurringCollector } from "@graphprotocol/interfaces/contracts/horizon/IRecurringCollector.sol"; import { RecurringCollectorSharedTest } from "./shared.t.sol"; @@ -11,18 +18,23 @@ contract RecurringCollectorUpdateUnsignedTest is RecurringCollectorSharedTest { return new MockAgreementOwner(); } - /// @notice Helper to accept an agreement via the unsigned path and return the ID + /// @notice Helper to accept an agreement via the offer+accept path and return the ID function _acceptUnsigned( MockAgreementOwner approver, IRecurringCollector.RecurringCollectionAgreement memory rca ) internal returns (bytes16) { - bytes32 agreementHash = _recurringCollector.hashRCA(rca); - approver.authorize(agreementHash); - _setupValidProvision(rca.serviceProvider, rca.dataService); - vm.prank(rca.dataService); - return _recurringCollector.accept(rca, ""); + // Payer calls offer + vm.prank(address(approver)); + bytes16 agreementId = _recurringCollector.offer(OFFER_TYPE_NEW, abi.encode(rca), 0).agreementId; + + // Service provider accepts with stored hash + bytes32 activeHash = _recurringCollector.getAgreementVersionAt(agreementId, 0).versionHash; + vm.prank(rca.serviceProvider); + _recurringCollector.accept(agreementId, activeHash, bytes(""), 0); + + return agreementId; } function _makeSimpleRCA(address payer) internal returns (IRecurringCollector.RecurringCollectionAgreement memory) { @@ -38,6 +50,8 @@ contract RecurringCollectorUpdateUnsignedTest is RecurringCollectorSharedTest { maxOngoingTokensPerSecond: 1 ether, minSecondsPerCollection: 600, maxSecondsPerCollection: 3600, + conditions: 0, + minSecondsPayerCancellationNotice: 0, nonce: 1, metadata: "" }) @@ -58,6 +72,8 @@ contract RecurringCollectorUpdateUnsignedTest is RecurringCollectorSharedTest { maxOngoingTokensPerSecond: 2 ether, minSecondsPerCollection: 600, maxSecondsPerCollection: 7200, + conditions: 0, + minSecondsPayerCancellationNotice: 0, nonce: nonce, metadata: "" }) @@ -74,74 +90,38 @@ contract RecurringCollectorUpdateUnsignedTest is RecurringCollectorSharedTest { IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau = _makeSimpleRCAU(agreementId, 1); - // Authorize the update hash - bytes32 updateHash = _recurringCollector.hashRCAU(rcau); - approver.authorize(updateHash); - - vm.expectEmit(address(_recurringCollector)); - emit IRecurringCollector.AgreementUpdated( - rca.dataService, - rca.payer, - rca.serviceProvider, - agreementId, - uint64(block.timestamp), - rcau.endsAt, - rcau.maxInitialTokens, - rcau.maxOngoingTokensPerSecond, - rcau.minSecondsPerCollection, - rcau.maxSecondsPerCollection - ); - - vm.prank(rca.dataService); - _recurringCollector.update(rcau, ""); + // Payer calls offerUpdate + vm.prank(address(approver)); + _recurringCollector.offer(OFFER_TYPE_UPDATE, abi.encode(rcau), 0); - IRecurringCollector.AgreementData memory agreement = _recurringCollector.getAgreement(agreementId); - assertEq(rcau.endsAt, agreement.endsAt); - assertEq(rcau.maxInitialTokens, agreement.maxInitialTokens); - assertEq(rcau.maxOngoingTokensPerSecond, agreement.maxOngoingTokensPerSecond); - assertEq(rcau.minSecondsPerCollection, agreement.minSecondsPerCollection); - assertEq(rcau.maxSecondsPerCollection, agreement.maxSecondsPerCollection); - assertEq(rcau.nonce, agreement.updateNonce); - } + // Data service accepts update with stored hash + bytes32 pendingHash = _recurringCollector.getAgreementVersionAt(agreementId, 1).versionHash; - function test_UpdateUnsigned_Revert_WhenPayerNotContract() public { - // Use the signed accept path to create an agreement with an EOA payer, - // then attempt updateUnsigned which should fail because payer isn't a contract - uint256 signerKey = 0xA11CE; - address payer = vm.addr(signerKey); - IRecurringCollector.RecurringCollectionAgreement memory rca = _recurringCollectorHelper.sensibleRCA( - IRecurringCollector.RecurringCollectionAgreement({ - deadline: uint64(block.timestamp + 1 hours), - endsAt: uint64(block.timestamp + 365 days), - payer: payer, - dataService: makeAddr("ds"), - serviceProvider: makeAddr("sp"), - maxInitialTokens: 100 ether, - maxOngoingTokensPerSecond: 1 ether, - minSecondsPerCollection: 600, - maxSecondsPerCollection: 3600, - nonce: 1, - metadata: "" - }) - ); + vm.expectEmit(address(_recurringCollector)); + emit IRecurringCollector.AgreementUpdated(agreementId, pendingHash, REGISTERED | ACCEPTED | UPDATE); - // Accept via signed path - _recurringCollectorHelper.authorizeSignerWithChecks(payer, signerKey); - (, bytes memory signature) = _recurringCollectorHelper.generateSignedRCA(rca, signerKey); - _setupValidProvision(rca.serviceProvider, rca.dataService); - vm.prank(rca.dataService); - bytes16 agreementId = _recurringCollector.accept(rca, signature); + vm.prank(rca.serviceProvider); + _recurringCollector.accept(agreementId, pendingHash, bytes(""), 0); - IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau = _makeSimpleRCAU(agreementId, 1); - - vm.expectRevert( - abi.encodeWithSelector(IRecurringCollector.RecurringCollectorApproverNotContract.selector, payer) - ); - vm.prank(rca.dataService); - _recurringCollector.update(rcau, ""); + IRecurringCollector.AgreementData memory updated = _recurringCollector.getAgreementData(agreementId); + { + (, bytes memory activeOfferData) = _recurringCollector.getAgreementOfferAt(agreementId, 0); + IRecurringCollector.RecurringCollectionAgreementUpdate memory activeRcau = abi.decode( + activeOfferData, + (IRecurringCollector.RecurringCollectionAgreementUpdate) + ); + assertEq(rcau.endsAt, activeRcau.endsAt); + assertEq(rcau.maxInitialTokens, activeRcau.maxInitialTokens); + assertEq(rcau.maxOngoingTokensPerSecond, activeRcau.maxOngoingTokensPerSecond); + assertEq(rcau.minSecondsPerCollection, activeRcau.minSecondsPerCollection); + assertEq(rcau.maxSecondsPerCollection, activeRcau.maxSecondsPerCollection); + } + assertEq(rcau.nonce, updated.updateNonce); } function test_UpdateUnsigned_Revert_WhenHashNotAuthorized() public { + // With the offer/accept update path, the hash is stored by offerUpdate(). + // This test verifies that accept() with a mismatched hash fails. MockAgreementOwner approver = _newApprover(); IRecurringCollector.RecurringCollectionAgreement memory rca = _makeSimpleRCA(address(approver)); @@ -149,13 +129,29 @@ contract RecurringCollectorUpdateUnsignedTest is RecurringCollectorSharedTest { IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau = _makeSimpleRCAU(agreementId, 1); - // Don't authorize the update hash — approver returns bytes4(0), caller rejects - vm.expectRevert(IRecurringCollector.RecurringCollectorInvalidSigner.selector); - vm.prank(rca.dataService); - _recurringCollector.update(rcau, ""); + // Payer calls offerUpdate + vm.prank(address(approver)); + _recurringCollector.offer(OFFER_TYPE_UPDATE, abi.encode(rcau), 0); + + bytes32 pendingHash = _recurringCollector.getAgreementVersionAt(agreementId, 1).versionHash; + + // Data service tries to accept with wrong hash + bytes32 badHash = bytes32(uint256(0xdead)); + vm.expectRevert( + abi.encodeWithSelector( + IRecurringCollector.AgreementHashMismatch.selector, + agreementId, + pendingHash, + badHash + ) + ); + vm.prank(rca.serviceProvider); + _recurringCollector.accept(agreementId, badHash, bytes(""), 0); } function test_UpdateUnsigned_Revert_WhenWrongMagicValue() public { + // With offer/accept, there is no approveAgreement callback. Instead, test + // that a non-payer cannot call offerUpdate. MockAgreementOwner approver = _newApprover(); IRecurringCollector.RecurringCollectionAgreement memory rca = _makeSimpleRCA(address(approver)); @@ -163,11 +159,12 @@ contract RecurringCollectorUpdateUnsignedTest is RecurringCollectorSharedTest { IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau = _makeSimpleRCAU(agreementId, 1); - approver.setOverrideReturnValue(bytes4(0xdeadbeef)); - - vm.expectRevert(abi.encodeWithSelector(IRecurringCollector.RecurringCollectorInvalidSigner.selector)); - vm.prank(rca.dataService); - _recurringCollector.update(rcau, ""); + address notPayer = makeAddr("notPayer"); + vm.expectRevert( + abi.encodeWithSelector(IRecurringCollector.UnauthorizedPayer.selector, notPayer, address(approver)) + ); + vm.prank(notPayer); + _recurringCollector.offer(OFFER_TYPE_UPDATE, abi.encode(rcau), 0); } function test_UpdateUnsigned_Revert_WhenNotDataService() public { @@ -178,34 +175,38 @@ contract RecurringCollectorUpdateUnsignedTest is RecurringCollectorSharedTest { IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau = _makeSimpleRCAU(agreementId, 1); - bytes32 updateHash = _recurringCollector.hashRCAU(rcau); - approver.authorize(updateHash); + // Payer calls offerUpdate + vm.prank(address(approver)); + _recurringCollector.offer(OFFER_TYPE_UPDATE, abi.encode(rcau), 0); + + bytes32 pendingHash = _recurringCollector.getAgreementVersionAt(agreementId, 1).versionHash; - address notDataService = makeAddr("notDataService"); + address notServiceProvider = makeAddr("notServiceProvider"); vm.expectRevert( abi.encodeWithSelector( - IRecurringCollector.RecurringCollectorDataServiceNotAuthorized.selector, - agreementId, - notDataService + IRecurringCollector.UnauthorizedServiceProvider.selector, + notServiceProvider, + rca.serviceProvider ) ); - vm.prank(notDataService); - _recurringCollector.update(rcau, ""); + vm.prank(notServiceProvider); + _recurringCollector.accept(agreementId, pendingHash, bytes(""), 0); } function test_UpdateUnsigned_Revert_WhenNotAccepted() public { - // Don't accept — just try to update a non-existent agreement + // Don't accept — just try to accept a non-existent agreement bytes16 fakeId = bytes16(keccak256("fake")); - IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau = _makeSimpleRCAU(fakeId, 1); + address caller = makeAddr("ds"); + // accept checks serviceProvider first — non-existent agreement has serviceProvider = address(0) bytes memory expectedErr = abi.encodeWithSelector( - IRecurringCollector.RecurringCollectorAgreementIncorrectState.selector, - fakeId, - IRecurringCollector.AgreementState.NotAccepted + IRecurringCollector.UnauthorizedServiceProvider.selector, + caller, + address(0) ); vm.expectRevert(expectedErr); - vm.prank(makeAddr("ds")); - _recurringCollector.update(rcau, ""); + vm.prank(caller); + _recurringCollector.accept(fakeId, bytes32(0), bytes(""), 0); } function test_UpdateUnsigned_Revert_WhenInvalidNonce() public { @@ -217,21 +218,20 @@ contract RecurringCollectorUpdateUnsignedTest is RecurringCollectorSharedTest { // Use wrong nonce (0 instead of 1) IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau = _makeSimpleRCAU(agreementId, 0); - bytes32 updateHash = _recurringCollector.hashRCAU(rcau); - approver.authorize(updateHash); - bytes memory expectedErr = abi.encodeWithSelector( - IRecurringCollector.RecurringCollectorInvalidUpdateNonce.selector, + IRecurringCollector.InvalidUpdateNonce.selector, agreementId, 1, // expected 0 // provided ); vm.expectRevert(expectedErr); - vm.prank(rca.dataService); - _recurringCollector.update(rcau, ""); + vm.prank(address(approver)); + _recurringCollector.offer(OFFER_TYPE_UPDATE, abi.encode(rcau), 0); } function test_UpdateUnsigned_Revert_WhenApproverReverts() public { + // With the offer/accept path, the "approver reverts" concept translates to + // accept with a wrong hash. MockAgreementOwner approver = _newApprover(); IRecurringCollector.RecurringCollectionAgreement memory rca = _makeSimpleRCA(address(approver)); @@ -239,11 +239,24 @@ contract RecurringCollectorUpdateUnsignedTest is RecurringCollectorSharedTest { IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau = _makeSimpleRCAU(agreementId, 1); - approver.setShouldRevert(true); + // Payer calls offerUpdate + vm.prank(address(approver)); + _recurringCollector.offer(OFFER_TYPE_UPDATE, abi.encode(rcau), 0); + + bytes32 pendingHash = _recurringCollector.getAgreementVersionAt(agreementId, 1).versionHash; - vm.expectRevert("MockAgreementOwner: forced revert"); - vm.prank(rca.dataService); - _recurringCollector.update(rcau, ""); + // Data service accepts with a wrong hash + bytes32 wrongHash = bytes32(uint256(1)); + vm.expectRevert( + abi.encodeWithSelector( + IRecurringCollector.AgreementHashMismatch.selector, + agreementId, + pendingHash, + wrongHash + ) + ); + vm.prank(rca.serviceProvider); + _recurringCollector.accept(agreementId, wrongHash, bytes(""), 0); } function test_UpdateUnsigned_Revert_WhenDeadlineElapsed() public { @@ -257,17 +270,14 @@ contract RecurringCollectorUpdateUnsignedTest is RecurringCollectorSharedTest { // Set the update deadline in the past rcau.deadline = uint64(block.timestamp - 1); - bytes32 updateHash = _recurringCollector.hashRCAU(rcau); - approver.authorize(updateHash); - bytes memory expectedErr = abi.encodeWithSelector( - IRecurringCollector.RecurringCollectorAgreementDeadlineElapsed.selector, + IRecurringCollector.AgreementDeadlineElapsed.selector, block.timestamp, rcau.deadline ); vm.expectRevert(expectedErr); - vm.prank(rca.dataService); - _recurringCollector.update(rcau, ""); + vm.prank(address(approver)); + _recurringCollector.offer(OFFER_TYPE_UPDATE, abi.encode(rcau), 0); } /* solhint-enable graph/func-name-mixedcase */ diff --git a/packages/horizon/test/unit/payments/recurring-collector/viewFunctions.t.sol b/packages/horizon/test/unit/payments/recurring-collector/viewFunctions.t.sol new file mode 100644 index 000000000..55b770fa8 --- /dev/null +++ b/packages/horizon/test/unit/payments/recurring-collector/viewFunctions.t.sol @@ -0,0 +1,70 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.27; + +import { REGISTERED, ACCEPTED } from "@graphprotocol/interfaces/contracts/horizon/IAgreementCollector.sol"; +import { IRecurringCollector } from "@graphprotocol/interfaces/contracts/horizon/IRecurringCollector.sol"; +import { IGraphPayments } from "@graphprotocol/interfaces/contracts/horizon/IGraphPayments.sol"; + +import { RecurringCollectorSharedTest } from "./shared.t.sol"; + +/// @notice Tests for view functions: getAgreementData (isCollectable, collectionSeconds), getMaxNextClaim. +contract RecurringCollectorViewFunctionsTest is RecurringCollectorSharedTest { + /* solhint-disable graph/func-name-mixedcase */ + + // ==================== getAgreementData collectability ==================== + + function test_GetAgreementData_Accepted(FuzzyTestAccept calldata fuzzy) public { + (IRecurringCollector.RecurringCollectionAgreement memory rca, bytes16 agreementId) = _sensibleAccept(fuzzy); + + // Advance past minSecondsPerCollection so it's collectable + skip(rca.minSecondsPerCollection); + + IRecurringCollector.AgreementData memory agreement = _recurringCollector.getAgreementData(agreementId); + + assertTrue(agreement.isCollectable, "Accepted agreement should be collectable"); + assertTrue(agreement.collectionSeconds > 0, "Collection seconds should be > 0"); + } + + function test_GetAgreementData_CanceledBySP(FuzzyTestAccept calldata fuzzy) public { + (IRecurringCollector.RecurringCollectionAgreement memory rca, bytes16 agreementId) = _sensibleAccept(fuzzy); + + _cancelByProvider(rca, agreementId); + + IRecurringCollector.AgreementData memory agreement = _recurringCollector.getAgreementData(agreementId); + + assertFalse(agreement.isCollectable, "CanceledBySP should not be collectable"); + } + + function test_GetAgreementData_None() public view { + // Query a non-existent agreement + bytes16 fakeId = bytes16(keccak256("nonexistent")); + IRecurringCollector.AgreementData memory agreement = _recurringCollector.getAgreementData(fakeId); + + assertFalse(agreement.isCollectable); + } + + function test_GetAgreementData_CanceledByPayer_SameBlock(FuzzyTestAccept calldata fuzzy) public { + (IRecurringCollector.RecurringCollectionAgreement memory rca, bytes16 agreementId) = _sensibleAccept(fuzzy); + + // Payer cancel in the same block as accept — zero time elapsed since acceptedAt. + _cancelByPayer(rca, agreementId); + + IRecurringCollector.AgreementData memory agreement = _recurringCollector.getAgreementData(agreementId); + // collectionEnd == collectionStart -> ZeroCollectionSeconds regardless of notice period + assertFalse(agreement.isCollectable, "Same-block cancel should not be collectable (zero elapsed time)"); + } + + function test_GetAgreementData_CanceledByPayer_WithWindow(FuzzyTestAccept calldata fuzzy) public { + (IRecurringCollector.RecurringCollectionAgreement memory rca, bytes16 agreementId) = _sensibleAccept(fuzzy); + + skip(rca.minSecondsPerCollection + 100); + _cancelByPayer(rca, agreementId); + + IRecurringCollector.AgreementData memory agreement = _recurringCollector.getAgreementData(agreementId); + + assertTrue(agreement.isCollectable, "CanceledByPayer with elapsed time should be collectable"); + assertTrue(agreement.collectionSeconds > 0); + } + + /* solhint-enable graph/func-name-mixedcase */ +} diff --git a/packages/horizon/test/unit/utilities/Authorizable.t.sol b/packages/horizon/test/unit/utilities/Authorizable.t.sol index 66c4bb921..18ed8df54 100644 --- a/packages/horizon/test/unit/utilities/Authorizable.t.sol +++ b/packages/horizon/test/unit/utilities/Authorizable.t.sol @@ -37,8 +37,14 @@ contract AuthorizableTest is Test, Bounder { return new AuthorizableImp(_thawPeriod); } + /// @dev Override to exclude addresses that would interfere with fuzz tests + /// (e.g. proxy admin addresses that reject non-admin calls with a different error). + function assumeValidFuzzAddress(address addr) internal virtual { + vm.assume(addr != address(0)); + } + function test_AuthorizeSigner(uint256 _unboundedKey, address _authorizer) public { - vm.assume(_authorizer != address(0)); + assumeValidFuzzAddress(_authorizer); uint256 signerKey = boundKey(_unboundedKey); authHelper.authorizeSignerWithChecks(_authorizer, signerKey); @@ -141,15 +147,15 @@ contract AuthorizableTest is Test, Bounder { } function test_ThawSigner(address _authorizer, uint256 _unboundedKey, uint256 _thaw) public withFuzzyThaw(_thaw) { - vm.assume(_authorizer != address(0)); + assumeValidFuzzAddress(_authorizer); uint256 signerKey = boundKey(_unboundedKey); authHelper.authorizeAndThawSignerWithChecks(_authorizer, signerKey); } function test_ThawSigner_Revert_WhenNotAuthorized(address _authorizer, address _signer) public { - vm.assume(_authorizer != address(0)); - vm.assume(_signer != address(0)); + assumeValidFuzzAddress(_authorizer); + assumeValidFuzzAddress(_signer); bytes memory expectedErr = abi.encodeWithSelector( IAuthorizable.AuthorizableSignerNotAuthorized.selector, @@ -166,7 +172,7 @@ contract AuthorizableTest is Test, Bounder { uint256 _unboundedKey, uint256 _thaw ) public withFuzzyThaw(_thaw) { - vm.assume(_authorizer != address(0)); + assumeValidFuzzAddress(_authorizer); (uint256 signerKey, address signer) = boundAddrAndKey(_unboundedKey); authHelper.authorizeAndRevokeSignerWithChecks(_authorizer, signerKey); @@ -185,7 +191,7 @@ contract AuthorizableTest is Test, Bounder { uint256 _unboundedKey, uint256 _thaw ) public withFuzzyThaw(_thaw) { - vm.assume(_authorizer != address(0)); + assumeValidFuzzAddress(_authorizer); (uint256 signerKey, address signer) = boundAddrAndKey(_unboundedKey); authHelper.authorizeAndThawSignerWithChecks(_authorizer, signerKey); @@ -198,8 +204,8 @@ contract AuthorizableTest is Test, Bounder { } function test_CancelThawSigner_Revert_When_NotAuthorized(address _authorizer, address _signer) public { - vm.assume(_authorizer != address(0)); - vm.assume(_signer != address(0)); + assumeValidFuzzAddress(_authorizer); + assumeValidFuzzAddress(_signer); bytes memory expectedErr = abi.encodeWithSelector( IAuthorizable.AuthorizableSignerNotAuthorized.selector, @@ -216,7 +222,7 @@ contract AuthorizableTest is Test, Bounder { uint256 _unboundedKey, uint256 _thaw ) public withFuzzyThaw(_thaw) { - vm.assume(_authorizer != address(0)); + assumeValidFuzzAddress(_authorizer); (uint256 signerKey, address signer) = boundAddrAndKey(_unboundedKey); authHelper.authorizeAndRevokeSignerWithChecks(_authorizer, signerKey); @@ -231,7 +237,7 @@ contract AuthorizableTest is Test, Bounder { } function test_CancelThawSigner_Revert_When_NotThawing(address _authorizer, uint256 _unboundedKey) public { - vm.assume(_authorizer != address(0)); + assumeValidFuzzAddress(_authorizer); (uint256 signerKey, address signer) = boundAddrAndKey(_unboundedKey); authHelper.authorizeSignerWithChecks(_authorizer, signerKey); @@ -247,15 +253,15 @@ contract AuthorizableTest is Test, Bounder { uint256 _unboundedKey, uint256 _thaw ) public withFuzzyThaw(_thaw) { - vm.assume(_authorizer != address(0)); + assumeValidFuzzAddress(_authorizer); uint256 signerKey = boundKey(_unboundedKey); authHelper.authorizeAndRevokeSignerWithChecks(_authorizer, signerKey); } function test_RevokeAuthorizedSigner_Revert_WhenNotAuthorized(address _authorizer, address _signer) public { - vm.assume(_authorizer != address(0)); - vm.assume(_signer != address(0)); + assumeValidFuzzAddress(_authorizer); + assumeValidFuzzAddress(_signer); bytes memory expectedErr = abi.encodeWithSelector( IAuthorizable.AuthorizableSignerNotAuthorized.selector, @@ -272,7 +278,7 @@ contract AuthorizableTest is Test, Bounder { uint256 _unboundedKey, uint256 _thaw ) public withFuzzyThaw(_thaw) { - vm.assume(_authorizer != address(0)); + assumeValidFuzzAddress(_authorizer); (uint256 signerKey, address signer) = boundAddrAndKey(_unboundedKey); authHelper.authorizeAndRevokeSignerWithChecks(_authorizer, signerKey); @@ -287,7 +293,7 @@ contract AuthorizableTest is Test, Bounder { } function test_RevokeAuthorizedSigner_Revert_WhenNotThawing(address _authorizer, uint256 _unboundedKey) public { - vm.assume(_authorizer != address(0)); + assumeValidFuzzAddress(_authorizer); (uint256 signerKey, address signer) = boundAddrAndKey(_unboundedKey); authHelper.authorizeSignerWithChecks(_authorizer, signerKey); @@ -303,7 +309,7 @@ contract AuthorizableTest is Test, Bounder { uint256 _thaw, uint256 _skip ) public withFuzzyThaw(_thaw) { - vm.assume(_authorizer != address(0)); + assumeValidFuzzAddress(_authorizer); (uint256 signerKey, address signer) = boundAddrAndKey(_unboundedKey); authHelper.authorizeAndThawSignerWithChecks(_authorizer, signerKey); diff --git a/packages/interfaces/contracts/contracts/rewards/IRewardsManager.sol b/packages/interfaces/contracts/contracts/rewards/IRewardsManager.sol index 43a13d791..205bde73c 100644 --- a/packages/interfaces/contracts/contracts/rewards/IRewardsManager.sol +++ b/packages/interfaces/contracts/contracts/rewards/IRewardsManager.sol @@ -134,6 +134,21 @@ interface IRewardsManager { */ function setDefaultReclaimAddress(address newDefaultReclaimAddress) external; + /** + * @notice Set whether ineligible indexers cause takeRewards to revert + * @dev When true, takeRewards reverts for ineligible indexers, keeping rewards claimable + * if the indexer becomes eligible and collects before the allocation goes stale. + * When false (default), takeRewards succeeds but rewards are reclaimed. + * @param revertOnIneligible True to revert on ineligible, false to reclaim + */ + function setRevertOnIneligible(bool revertOnIneligible) external; + + /** + * @notice Get whether ineligible indexers cause takeRewards to revert + * @return revertOnIneligible True if takeRewards reverts for ineligible indexers + */ + function getRevertOnIneligible() external view returns (bool revertOnIneligible); + // -- Denylist -- /** diff --git a/packages/interfaces/contracts/data-service/IDataServiceAgreements.sol b/packages/interfaces/contracts/data-service/IDataServiceAgreements.sol index ea5b0dd54..d7d7e286d 100644 --- a/packages/interfaces/contracts/data-service/IDataServiceAgreements.sol +++ b/packages/interfaces/contracts/data-service/IDataServiceAgreements.sol @@ -1,19 +1,40 @@ // SPDX-License-Identifier: GPL-3.0-or-later pragma solidity ^0.8.22; +import { IAgreementStateChangeCallback } from "../horizon/IAgreementStateChangeCallback.sol"; + /** * @title Interface for data services that manage indexing agreements. * @author Edge & Node - * @notice Interface to support payer-initiated cancellation of indexing agreements. - * Any data service that participates in agreement lifecycle management via - * {RecurringAgreementManager} should implement this interface. + * @notice Callback interface that data services implement to participate in + * agreement lifecycle management via {RecurringCollector}. + * - {acceptAgreement}: Reverting callback during accept — validates and sets up + * domain-specific state (e.g. allocation binding). CAN revert to reject the transition. + * - {afterAgreementStateChange} (inherited): Non-reverting notification on lifecycle events. + * Implementations should filter by state flags and ignore unrecognised combinations. * @custom:security-contact Please email security+contracts@thegraph.com if you find any * bugs. We may have an active bug bounty program. */ -interface IDataServiceAgreements { +interface IDataServiceAgreements is IAgreementStateChangeCallback { /** - * @notice Cancel an indexing agreement by payer / signer. - * @param agreementId The id of the indexing agreement + * @notice Called when a service provider accepts an agreement (initial or update). + * @dev Revert to reject the acceptance. Called before the collector finalizes state. + * For initial acceptance the data service should set up domain-specific state (e.g. bind allocation). + * For updates the data service should validate and apply updated terms. + * The data service can distinguish initial vs update by checking its own state for the agreementId. + * @param agreementId The ID of the agreement being accepted + * @param versionHash The hash of the terms version being accepted + * @param payer The address of the payer + * @param serviceProvider The address of the service provider accepting + * @param metadata The agreement metadata (data-service-specific) + * @param extraData Opaque data forwarded from the accept calldata (e.g. allocationId) */ - function cancelIndexingAgreementByPayer(bytes16 agreementId) external; + function acceptAgreement( + bytes16 agreementId, + bytes32 versionHash, + address payer, + address serviceProvider, + bytes calldata metadata, + bytes calldata extraData + ) external; } diff --git a/packages/interfaces/contracts/horizon/IAgreementCollector.sol b/packages/interfaces/contracts/horizon/IAgreementCollector.sol new file mode 100644 index 000000000..b855515ca --- /dev/null +++ b/packages/interfaces/contracts/horizon/IAgreementCollector.sol @@ -0,0 +1,98 @@ +// SPDX-License-Identifier: GPL-3.0-or-later +pragma solidity ^0.8.22; + +import { IPaymentsCollector } from "./IPaymentsCollector.sol"; + +// -- Agreement state flags -- +// REGISTERED, ACCEPTED are monotonic (once set, never cleared). +// All other flags are clearable — cleared when pending terms are accepted. + +/// @dev Offer exists in storage +uint16 constant REGISTERED = 1; +/// @dev Provider accepted terms +uint16 constant ACCEPTED = 2; +/// @dev collectableUntil has been reduced, collection capped (clearable) +uint16 constant NOTICE_GIVEN = 4; +/// @dev Nothing to collect in current state (clearable — cleared on new terms promotion) +uint16 constant SETTLED = 8; + +// -- Who-initiated flags (clearable, meaningful when NOTICE_GIVEN is set) -- + +/// @dev Notice given by payer +uint16 constant BY_PAYER = 16; +/// @dev Notice given by provider (forfeit — immediate SETTLED) +uint16 constant BY_PROVIDER = 32; +/// @dev Notice given by data service +uint16 constant BY_DATA_SERVICE = 64; + +// -- Update-origin flag -- + +/// @dev Terms originated from an RCAU (update), not the initial RCA. +/// Set on agreement state when active terms come from an accepted or pre-acceptance update. +/// ORed into returned state by getAgreementVersionAt for pending versions (index 1). +uint16 constant UPDATE = 128; + +// -- Togglable option flags (set via accept options parameter) -- + +/// @dev Provider opts in to automatic update on final collect +uint16 constant AUTO_UPDATE = 256; + +// -- Lifecycle flags (set by the collector during auto-update, clearable) -- + +/// @dev Active terms were promoted via auto-update (not explicit provider accept) +uint16 constant AUTO_UPDATED = 512; + +// -- Offer type constants -- + +/// @dev Create a new agreement +uint8 constant OFFER_TYPE_NEW = 0; +/// @dev Update an existing agreement +uint8 constant OFFER_TYPE_UPDATE = 1; + +// -- Offer option constants (for unsigned offer path) -- + +/// @dev Reduce collectableUntil and set NOTICE_GIVEN | BY_PAYER on the agreement +uint16 constant WITH_NOTICE = 1; +/// @dev Revert if the targeted version has already been accepted +uint16 constant IF_NOT_ACCEPTED = 2; + +/** + * @title Base interface for agreement-based payment collectors + * @notice Base interface for agreement-based payment collectors. + * @author Edge & Node + * @dev Defines the generic lifecycle operations shared by all agreement-based + * collectors. Concrete collectors (e.g. {IRecurringCollector}) extend this + * with agreement-type-specific structures, methods, and validation. + * Inherits {IPaymentsCollector} for the collect() entry point. + * Does not prescribe pausability or signer authorization — those are + * implementation concerns for concrete collectors. + */ +interface IAgreementCollector is IPaymentsCollector { + /** + * @notice Snapshot of an agreement's version hash and state at a given index. + * @param agreementId The agreement ID + * @param versionHash The EIP-712 hash of the terms at that index + * @param state The agreement state flags, with UPDATE set when applicable + */ + struct AgreementVersion { + bytes16 agreementId; + bytes32 versionHash; + uint16 state; + } + + /** + * @notice Cancel an agreement or revoke a pending update, determined by termsHash. + * @param agreementId The agreement's ID. + * @param termsHash EIP-712 hash identifying which terms to cancel (active or pending). + * @param options Bitmask — IF_NOT_ACCEPTED reverts if the targeted version was already accepted. + */ + function cancel(bytes16 agreementId, bytes32 termsHash, uint16 options) external; + + /** + * @notice Get the version hash and state at a given index for an agreement. + * @param agreementId The ID of the agreement + * @param index The zero-based version index + * @return The AgreementVersion containing versionHash and state + */ + function getAgreementVersionAt(bytes16 agreementId, uint256 index) external view returns (AgreementVersion memory); +} diff --git a/packages/interfaces/contracts/horizon/IAgreementOwner.sol b/packages/interfaces/contracts/horizon/IAgreementOwner.sol index 00de00f9e..5fed56f6e 100644 --- a/packages/interfaces/contracts/horizon/IAgreementOwner.sol +++ b/packages/interfaces/contracts/horizon/IAgreementOwner.sol @@ -1,38 +1,30 @@ // SPDX-License-Identifier: GPL-3.0-or-later pragma solidity ^0.8.22; +import { IAgreementStateChangeCallback } from "./IAgreementStateChangeCallback.sol"; + /** * @title Interface for contract payer callbacks from RecurringCollector * @author Edge & Node * @notice Callbacks that RecurringCollector invokes on contract payers (payers with * deployed code, as opposed to EOA payers that use ECDSA signatures). * - * Three callbacks: - * - {approveAgreement}: gate — called during accept/update to verify authorization. - * Uses the magic-value pattern (return selector on success). Called with RCA hash - * on accept, RCAU hash on update; namespaces don't collide (different EIP712 type hashes). + * Collection callbacks: * - {beforeCollection}: called before PaymentsEscrow.collect() so the payer can top up * escrow if needed. Only acts when the escrow balance is short for the collection. * - {afterCollection}: called after collection so the payer can reconcile escrow state. * Both collection callbacks are wrapped in try/catch — reverts do not block collection. * - * No per-payer authorization step is needed — the contract's code is the authorization. - * The trust chain is: governance grants operator role → operator registers - * (validates and pre-funds) → approveAgreement confirms → RC accepts/updates. + * Lifecycle callback (inherited from {IAgreementStateChangeCallback}): + * - {afterAgreementStateChange}: non-reverting notification on agreement lifecycle events. + * Only fires for events NOT initiated by the payer (the collector skips callbacks to + * `msg.sender`). Implementations should filter by state flags and ignore unrecognised + * combinations. * * @custom:security-contact Please email security+contracts@thegraph.com if you find any * bugs. We may have an active bug bounty program. */ -interface IAgreementOwner { - /** - * @notice Confirms this contract authorized the given agreement or update - * @dev Called by {RecurringCollector.accept} with an RCA hash or by - * {RecurringCollector.update} with an RCAU hash to verify authorization (empty authData path). - * @param agreementHash The EIP712 hash of the RCA or RCAU struct - * @return magic `IAgreementOwner.approveAgreement.selector` if authorized - */ - function approveAgreement(bytes32 agreementHash) external view returns (bytes4); - +interface IAgreementOwner is IAgreementStateChangeCallback { /** * @notice Called by RecurringCollector before PaymentsEscrow.collect() * @dev Allows contract payers to top up escrow if the balance is insufficient diff --git a/packages/interfaces/contracts/horizon/IAgreementStateChangeCallback.sol b/packages/interfaces/contracts/horizon/IAgreementStateChangeCallback.sol new file mode 100644 index 000000000..d2c49c3a8 --- /dev/null +++ b/packages/interfaces/contracts/horizon/IAgreementStateChangeCallback.sol @@ -0,0 +1,29 @@ +// SPDX-License-Identifier: GPL-3.0-or-later +pragma solidity ^0.8.22; + +/** + * @title IAgreementStateChangeCallback + * @author Edge & Node + * @notice Callback interface for contracts that want to be notified of agreement lifecycle events. + * @dev Called non-reverting by the RecurringCollector — implementations cannot block state transitions. + * The data service and the payer (if a contract) receive this callback, except when they are + * `msg.sender` — the caller already has execution context and sequences its own post-call + * logic instead of relying on a callback from the callee. + * + * The set of lifecycle events that trigger this callback may expand over time (e.g. offers, + * collections). Implementations MUST use the `state` flags to filter to events they care about + * and silently ignore unrecognised or irrelevant state combinations. This ensures forward + * compatibility when the collector begins sending callbacks for additional lifecycle events. + */ +interface IAgreementStateChangeCallback { + /** + * @notice Called when an agreement's state changes. + * @dev Implementations should inspect `state` to determine relevance and ignore + * state combinations they do not handle. The callback is gas-bounded — avoid + * expensive operations that could cause silent failures. + * @param agreementId The ID of the agreement + * @param versionHash The EIP-712 hash of the terms involved in this change + * @param state The agreement state flags, includes UPDATE when the version is pending + */ + function afterAgreementStateChange(bytes16 agreementId, bytes32 versionHash, uint16 state) external; +} diff --git a/packages/interfaces/contracts/horizon/IRecurringCollector.sol b/packages/interfaces/contracts/horizon/IRecurringCollector.sol index ef34f11bd..5981316ed 100644 --- a/packages/interfaces/contracts/horizon/IRecurringCollector.sol +++ b/packages/interfaces/contracts/horizon/IRecurringCollector.sol @@ -1,35 +1,65 @@ // SPDX-License-Identifier: GPL-3.0-or-later pragma solidity ^0.8.22; -import { IPaymentsCollector } from "./IPaymentsCollector.sol"; +import { IAgreementCollector } from "./IAgreementCollector.sol"; import { IGraphPayments } from "./IGraphPayments.sol"; -import { IAuthorizable } from "./IAuthorizable.sol"; /** * @title Interface for the {RecurringCollector} contract * @author Edge & Node - * @dev Implements the {IPaymentCollector} interface as defined by the Graph - * Horizon payments protocol. + * @dev Extends {IAgreementCollector} with Recurring Collection Agreement (RCA) specific + * structures, methods, and validation rules. * @notice Implements a payments collector contract that can be used to collect - * recurrent payments. + * recurrent payments based on time-windowed pricing terms. */ -interface IRecurringCollector is IAuthorizable, IPaymentsCollector { - /// @notice The state of an agreement - enum AgreementState { - NotAccepted, - Accepted, - CanceledByServiceProvider, - CanceledByPayer +interface IRecurringCollector is IAgreementCollector { + // -- Structs (shared) -- + + /** + * @notice The params for collecting an agreement + * @param agreementId The agreement ID + * @param collectionId The collection ID + * @param tokens The amount of tokens to collect + * @param dataServiceCut The data service cut in parts per million + * @param receiverDestination The address where the collected fees should be sent + * @param maxSlippage Max acceptable tokens to lose due to rate limiting, or type(uint256).max to ignore + */ + struct CollectParams { + bytes16 agreementId; + bytes32 collectionId; + uint256 tokens; + uint256 dataServiceCut; + address receiverDestination; + uint256 maxSlippage; } - /// @notice The party that can cancel an agreement - enum CancelAgreementBy { - ServiceProvider, - Payer, - ThirdParty + /** + * @notice Return value for opaque offer overloads. + * @param agreementId The deterministically generated agreement ID + * @param dataService The data service address from the decoded agreement + * @param serviceProvider The service provider address from the decoded agreement + * @param versionHash The EIP-712 hash of the terms that were stored + * @param state Agreement state flags, includes UPDATE when the version is pending + */ + // solhint-disable-next-line gas-struct-packing + struct OfferResult { + bytes16 agreementId; + address dataService; + address serviceProvider; + bytes32 versionHash; + uint16 state; + } + + // -- Enums -- + + /// @dev The stage of a payer callback + enum PayerCallbackStage { + EligibilityCheck, + BeforeCollection, + AfterCollection } - /// @notice Reasons why an agreement is not collectable + /// @dev Reasons why an agreement is not collectable enum AgreementNotCollectableReason { None, InvalidAgreementState, @@ -37,6 +67,86 @@ interface IRecurringCollector is IAuthorizable, IPaymentsCollector { InvalidTemporalWindow } + /// @dev Reasons why a collection window is invalid + enum InvalidCollectionWindowReason { + None, + ElapsedEndsAt, + InvalidWindow, + InsufficientDuration + } + + // -- Events -- + + /** + * @notice Emitted on every agreement lifecycle change. + * @param agreementId The agreement ID + * @param versionHash The hash of the agreement terms version + * @param state The agreement state flags after the change + */ + event AgreementUpdated(bytes16 indexed agreementId, bytes32 versionHash, uint16 state); + // solhint-disable-previous-line gas-indexed-events + + /** + * @notice Emitted when a payer callback reverts. + * @param agreementId The agreement ID + * @param payer The payer address + * @param stage The callback stage at which the failure occurred + */ + event PayerCallbackFailed(bytes16 indexed agreementId, address indexed payer, PayerCallbackStage stage); + + /** + * @notice Emitted when an auto-update is attempted during the final collect. + * @param agreementId The agreement ID + * @param success Whether the auto-update succeeded + */ + event AutoUpdateAttempted(bytes16 indexed agreementId, bool success); + // solhint-disable-previous-line gas-indexed-events + + /** + * @notice Emitted when a pause guardian is set. + * @param account The pause guardian address + * @param allowed Whether the account is allowed as a pause guardian + */ + event PauseGuardianSet(address indexed account, bool allowed); + // solhint-disable-previous-line gas-indexed-events + + // -- Generic errors -- + + error AgreementIdZero(); + error DataServiceNotAuthorized(bytes16 agreementId, address unauthorizedDataService); + error UnauthorizedDataService(address dataService); + error AgreementDeadlineElapsed(uint256 currentTimestamp, uint64 deadline); + error UnauthorizedCaller(address unauthorizedCaller, address dataService); + error InvalidCollectData(bytes invalidData); + error AgreementIncorrectState(bytes16 agreementId, uint16 incorrectState); + error AgreementNotCollectable(bytes16 agreementId, AgreementNotCollectableReason reason); + error AgreementAddressNotSet(); + error AgreementInvalidCollectionWindow( + InvalidCollectionWindowReason reason, + uint32 minSecondsPerCollection, + uint32 maxSecondsPerCollection + ); + error AgreementHashMismatch(bytes16 agreementId, bytes32 expected, bytes32 provided); + error AgreementTermsEmpty(bytes16 agreementId); + error UnauthorizedPayer(address caller, address payer); + error UnauthorizedServiceProvider(address caller, address serviceProvider); + error InsufficientCallbackGas(); + error NotGovernor(address account); + error NotPauseGuardian(address account); + error InvalidOfferType(uint8 offerType); + error ExcessiveSlippage(uint256 requested, uint256 actual, uint256 maxSlippage); + + // -- Pause guardian methods (pause/unpause/paused implemented via IPausableControl) -- + + /** + * @notice Check whether an account is a pause guardian. + * @param pauseGuardian The address to check + * @return Whether the account is a pause guardian + */ + function isPauseGuardian(address pauseGuardian) external view returns (bool); + + // -- RCA-specific structures -- + /** * @notice The Recurring Collection Agreement (RCA) * @param deadline The deadline for accepting the RCA @@ -50,6 +160,9 @@ interface IRecurringCollector is IAuthorizable, IPaymentsCollector { * except for the first collection * @param minSecondsPerCollection The minimum amount of seconds that must pass between collections * @param maxSecondsPerCollection The maximum seconds of service that can be collected in a single collection + * @param conditions Bitfield of agreement conditions (e.g. CONDITION_ELIGIBILITY_CHECK) + * @param minSecondsPayerCancellationNotice Minimum seconds of notice the payer must give before + * cancellation takes effect (enforced on cancel and OFFER_TYPE_UPDATE with WITH_NOTICE) * @param nonce A unique nonce for preventing collisions (user-chosen) * @param metadata Arbitrary metadata to extend functionality if a data service requires it * @@ -65,6 +178,8 @@ interface IRecurringCollector is IAuthorizable, IPaymentsCollector { uint256 maxOngoingTokensPerSecond; uint32 minSecondsPerCollection; uint32 maxSecondsPerCollection; + uint16 conditions; + uint32 minSecondsPayerCancellationNotice; uint256 nonce; bytes metadata; } @@ -80,6 +195,9 @@ interface IRecurringCollector is IAuthorizable, IPaymentsCollector { * except for the first collection * @param minSecondsPerCollection The minimum amount of seconds that must pass between collections * @param maxSecondsPerCollection The maximum seconds of service that can be collected in a single collection + * @param conditions Bitfield of agreement conditions (e.g. CONDITION_ELIGIBILITY_CHECK) + * @param minSecondsPayerCancellationNotice Minimum seconds of notice the payer must give before + * cancellation takes effect (enforced on cancel and OFFER_TYPE_UPDATE with WITH_NOTICE) * @param nonce The nonce for preventing replay attacks (must be current nonce + 1) * @param metadata Arbitrary metadata to extend functionality if a data service requires it */ @@ -92,258 +210,103 @@ interface IRecurringCollector is IAuthorizable, IPaymentsCollector { uint256 maxOngoingTokensPerSecond; uint32 minSecondsPerCollection; uint32 maxSecondsPerCollection; + uint16 conditions; + uint32 minSecondsPayerCancellationNotice; uint32 nonce; bytes metadata; } /** - * @notice The data for an agreement - * @dev This struct is used to store the data of an agreement in the contract - * @param dataService The address of the data service - * @param payer The address of the payer - * @param serviceProvider The address of the service provider - * @param acceptedAt The timestamp when the agreement was accepted - * @param lastCollectionAt The timestamp when the agreement was last collected at + * @notice The pricing and window terms for an agreement + * @dev Shared between active and pending update terms in AgreementStorage. + * Packed layout (4 fixed slots + dynamic): + * slot 0: deadline(8) + endsAt(8) + minSecondsPerCollection(4) + maxSecondsPerCollection(4) + conditions(2) + minSecondsPayerCancellationNotice(4) = 30B + * slot 1: maxInitialTokens(32) + * slot 2: maxOngoingTokensPerSecond(32) + * slot 3: hash(32) + * slot 4+: metadata (dynamic) + * @param deadline The deadline for accepting these terms * @param endsAt The timestamp when the agreement ends - * @param maxInitialTokens The maximum amount of tokens that can be collected in the first collection - * on top of the amount allowed for subsequent collections - * @param maxOngoingTokensPerSecond The maximum amount of tokens that can be collected per second - * except for the first collection * @param minSecondsPerCollection The minimum amount of seconds that must pass between collections * @param maxSecondsPerCollection The maximum seconds of service that can be collected in a single collection - * @param updateNonce The current nonce for updates (prevents replay attacks) - * @param canceledAt The timestamp when the agreement was canceled - * @param state The state of the agreement + * @param conditions Bitfield of agreement conditions (e.g. CONDITION_ELIGIBILITY_CHECK) + * @param minSecondsPayerCancellationNotice Minimum seconds of notice the payer must give before + * cancellation takes effect (enforced on cancel and OFFER_TYPE_UPDATE with WITH_NOTICE) + * @param maxInitialTokens The maximum amount of tokens that can be collected in the first collection + * @param maxOngoingTokensPerSecond The maximum amount of tokens that can be collected per second + * @param hash Precomputed EIP-712 hash of the RCA or RCAU that produced these terms + * @param metadata Arbitrary metadata to extend functionality if a data service requires it */ - struct AgreementData { - address dataService; - address payer; - address serviceProvider; - uint64 acceptedAt; - uint64 lastCollectionAt; + struct AgreementTerms { + uint64 deadline; uint64 endsAt; - uint256 maxInitialTokens; - uint256 maxOngoingTokensPerSecond; uint32 minSecondsPerCollection; uint32 maxSecondsPerCollection; - uint32 updateNonce; - uint64 canceledAt; - AgreementState state; - } - - /** - * @notice The params for collecting an agreement - * @param agreementId The agreement ID of the RCA - * @param collectionId The collection ID of the RCA - * @param tokens The amount of tokens to collect - * @param dataServiceCut The data service cut in parts per million - * @param receiverDestination The address where the collected fees should be sent - * @param maxSlippage Max acceptable tokens to lose due to rate limiting, or type(uint256).max to ignore - */ - struct CollectParams { - bytes16 agreementId; - bytes32 collectionId; - uint256 tokens; - uint256 dataServiceCut; - address receiverDestination; - uint256 maxSlippage; + uint16 conditions; + uint32 minSecondsPayerCancellationNotice; + uint256 maxInitialTokens; + uint256 maxOngoingTokensPerSecond; + bytes32 hash; + bytes metadata; } /** - * @notice Emitted when an agreement is accepted - * @param dataService The address of the data service - * @param payer The address of the payer - * @param serviceProvider The address of the service provider + * @notice View of agreement identity, parties, state, temporal info, and collectability. + * @dev Decouples the public interface from internal storage layout so that storage + * refactors do not constitute breaking interface changes. * @param agreementId The agreement ID - * @param acceptedAt The timestamp when the agreement was accepted - * @param endsAt The timestamp when the agreement ends - * @param maxInitialTokens The maximum amount of tokens that can be collected in the first collection - * @param maxOngoingTokensPerSecond The maximum amount of tokens that can be collected per second - * @param minSecondsPerCollection The minimum amount of seconds that must pass between collections - * @param maxSecondsPerCollection The maximum seconds of service that can be collected in a single collection - */ - event AgreementAccepted( - address indexed dataService, - address indexed payer, - address indexed serviceProvider, - bytes16 agreementId, - uint64 acceptedAt, - uint64 endsAt, - uint256 maxInitialTokens, - uint256 maxOngoingTokensPerSecond, - uint32 minSecondsPerCollection, - uint32 maxSecondsPerCollection - ); - - /** - * @notice Emitted when an agreement is canceled - * @param dataService The address of the data service - * @param payer The address of the payer - * @param serviceProvider The address of the service provider - * @param agreementId The agreement ID - * @param canceledAt The timestamp when the agreement was canceled - * @param canceledBy The party that canceled the agreement - */ - event AgreementCanceled( - address indexed dataService, - address indexed payer, - address indexed serviceProvider, - bytes16 agreementId, - uint64 canceledAt, - CancelAgreementBy canceledBy - ); - - /** - * @notice Emitted when an agreement is updated - * @param dataService The address of the data service * @param payer The address of the payer * @param serviceProvider The address of the service provider - * @param agreementId The agreement ID - * @param updatedAt The timestamp when the agreement was updated - * @param endsAt The timestamp when the agreement ends - * @param maxInitialTokens The maximum amount of tokens that can be collected in the first collection - * @param maxOngoingTokensPerSecond The maximum amount of tokens that can be collected per second - * @param minSecondsPerCollection The minimum amount of seconds that must pass between collections - * @param maxSecondsPerCollection The maximum seconds of service that can be collected in a single collection - */ - event AgreementUpdated( - address indexed dataService, - address indexed payer, - address indexed serviceProvider, - bytes16 agreementId, - uint64 updatedAt, - uint64 endsAt, - uint256 maxInitialTokens, - uint256 maxOngoingTokensPerSecond, - uint32 minSecondsPerCollection, - uint32 maxSecondsPerCollection - ); - - /** - * @notice Emitted when an RCA is collected * @param dataService The address of the data service - * @param payer The address of the payer - * @param serviceProvider The address of the service provider - * @param agreementId The agreement ID - * @param collectionId The collection ID - * @param tokens The amount of tokens collected - * @param dataServiceCut The tokens cut for the data service + * @param acceptedAt The timestamp when the agreement was accepted (zero if not yet accepted) + * @param lastCollectionAt The timestamp of the last collection (zero if never collected) + * @param collectableUntil The timestamp after which the agreement is no longer collectable + * @param updateNonce The current nonce for updates (prevents replay attacks) + * @param state Bitflag state of the agreement (see IAgreementCollector state flags) + * @param isCollectable Whether the agreement allows collection attempts right now + * @param collectionSeconds The valid collection duration in seconds (capped at maxSecondsPerCollection) */ - event RCACollected( - address indexed dataService, - address indexed payer, - address indexed serviceProvider, - bytes16 agreementId, - bytes32 collectionId, - uint256 tokens, - uint256 dataServiceCut - ); + struct AgreementData { + bytes16 agreementId; + address payer; + address serviceProvider; + address dataService; + uint64 acceptedAt; + uint64 lastCollectionAt; + uint64 collectableUntil; + uint32 updateNonce; + uint16 state; + bool isCollectable; + uint256 collectionSeconds; + } - /** - * @notice Thrown when accepting an agreement with a zero ID - */ - error RecurringCollectorAgreementIdZero(); + // -- RCA-specific events -- /** - * @notice Thrown when interacting with an agreement not owned by the message sender + * @notice Emitted when an RCA is collected. Links the collection to the agreement. + * @dev Token amounts and payment breakdown are in GraphPaymentCollected from GraphPayments. * @param agreementId The agreement ID - * @param unauthorizedDataService The address of the unauthorized data service - */ - error RecurringCollectorDataServiceNotAuthorized(bytes16 agreementId, address unauthorizedDataService); - /** - * @notice Thrown when the data service is not authorized for the service provider - * @param dataService The address of the unauthorized data service - */ - error RecurringCollectorUnauthorizedDataService(address dataService); - - /** - * @notice Thrown when interacting with an agreement with an elapsed deadline - * @param currentTimestamp The current timestamp - * @param deadline The elapsed deadline timestamp + * @param collectionId The collection ID + * @param state The agreement state after collection */ - error RecurringCollectorAgreementDeadlineElapsed(uint256 currentTimestamp, uint64 deadline); + event RCACollected(bytes16 indexed agreementId, bytes32 collectionId, uint16 state); + // solhint-disable-previous-line gas-indexed-events - /** - * @notice Thrown when the signer is invalid - */ - error RecurringCollectorInvalidSigner(); + // -- RCA-specific errors -- /** * @notice Thrown when the payment type is not IndexingFee * @param invalidPaymentType The invalid payment type */ - error RecurringCollectorInvalidPaymentType(IGraphPayments.PaymentTypes invalidPaymentType); - - /** - * @notice Thrown when the caller is not the data service the RCA was issued to - * @param unauthorizedCaller The address of the caller - * @param dataService The address of the data service - */ - error RecurringCollectorUnauthorizedCaller(address unauthorizedCaller, address dataService); - - /** - * @notice Thrown when calling collect() with invalid data - * @param invalidData The invalid data - */ - error RecurringCollectorInvalidCollectData(bytes invalidData); - - /** - * @notice Thrown when interacting with an agreement that has an incorrect state - * @param agreementId The agreement ID - * @param incorrectState The incorrect state - */ - error RecurringCollectorAgreementIncorrectState(bytes16 agreementId, AgreementState incorrectState); - - /** - * @notice Thrown when an agreement is not collectable - * @param agreementId The agreement ID - * @param reason The reason why the agreement is not collectable - */ - error RecurringCollectorAgreementNotCollectable(bytes16 agreementId, AgreementNotCollectableReason reason); - - /** - * @notice Thrown when accepting an agreement with an address that is not set - */ - error RecurringCollectorAgreementAddressNotSet(); - - /** - * @notice Thrown when accepting or upgrading an agreement with an elapsed endsAt - * @param currentTimestamp The current timestamp - * @param endsAt The agreement end timestamp - */ - error RecurringCollectorAgreementElapsedEndsAt(uint256 currentTimestamp, uint64 endsAt); - - /** - * @notice Thrown when accepting or upgrading an agreement with an elapsed endsAt - * @param allowedMinCollectionWindow The allowed minimum collection window - * @param minSecondsPerCollection The minimum seconds per collection - * @param maxSecondsPerCollection The maximum seconds per collection - */ - error RecurringCollectorAgreementInvalidCollectionWindow( - uint32 allowedMinCollectionWindow, - uint32 minSecondsPerCollection, - uint32 maxSecondsPerCollection - ); - - /** - * @notice Thrown when accepting or upgrading an agreement with an invalid duration - * @param requiredMinDuration The required minimum duration - * @param invalidDuration The invalid duration - */ - error RecurringCollectorAgreementInvalidDuration(uint32 requiredMinDuration, uint256 invalidDuration); + error InvalidPaymentType(IGraphPayments.PaymentTypes invalidPaymentType); /** * @notice Thrown when calling collect() with a zero collection seconds * @param agreementId The agreement ID * @param currentTimestamp The current timestamp * @param lastCollectionAt The timestamp when the last collection was done - * */ - error RecurringCollectorZeroCollectionSeconds( - bytes16 agreementId, - uint256 currentTimestamp, - uint64 lastCollectionAt - ); + error ZeroCollectionSeconds(bytes16 agreementId, uint256 currentTimestamp, uint64 lastCollectionAt); /** * @notice Thrown when calling collect() too soon @@ -351,7 +314,7 @@ interface IRecurringCollector is IAuthorizable, IPaymentsCollector { * @param secondsSinceLast Seconds since last collection * @param minSeconds Minimum seconds between collections */ - error RecurringCollectorCollectionTooSoon(bytes16 agreementId, uint32 secondsSinceLast, uint32 minSeconds); + error CollectionTooSoon(bytes16 agreementId, uint32 secondsSinceLast, uint32 minSeconds); /** * @notice Thrown when calling update() with an invalid nonce @@ -359,126 +322,99 @@ interface IRecurringCollector is IAuthorizable, IPaymentsCollector { * @param expected The expected nonce * @param provided The provided nonce */ - error RecurringCollectorInvalidUpdateNonce(bytes16 agreementId, uint32 expected, uint32 provided); - - /** - * @notice Thrown when collected tokens are less than requested beyond the allowed slippage - * @param requested The amount of tokens requested to collect - * @param actual The actual amount that would be collected - * @param maxSlippage The maximum allowed slippage - */ - error RecurringCollectorExcessiveSlippage(uint256 requested, uint256 actual, uint256 maxSlippage); + error InvalidUpdateNonce(bytes16 agreementId, uint32 expected, uint32 provided); /** * @notice Thrown when a contract payer's eligibility oracle denies the service provider * @param agreementId The agreement ID * @param serviceProvider The service provider that is not eligible */ - error RecurringCollectorCollectionNotEligible(bytes16 agreementId, address serviceProvider); + error CollectionNotEligible(bytes16 agreementId, address serviceProvider); /** * @notice Thrown when the contract approver is not a contract * @param approver The address that is not a contract */ - error RecurringCollectorApproverNotContract(address approver); + error ApproverNotContract(address approver); /** - * @notice Accept a Recurring Collection Agreement. - * @dev Caller must be the data service the RCA was issued to. - * If `signature` is non-empty: checks `rca.deadline >= block.timestamp` and verifies the ECDSA signature. - * If `signature` is empty: the payer must be a contract implementing {IAgreementOwner.approveAgreement} - * and must return the magic value for the RCA's EIP712 hash. - * @param rca The Recurring Collection Agreement to accept - * @param signature ECDSA signature bytes, or empty for contract-approved agreements - * @return agreementId The deterministically generated agreement ID + * @notice Thrown when notice does not satisfy minSecondsPayerCancellationNotice + * @param agreementId The agreement ID + * @param minSecondsPayerCancellationNotice The required minimum notice period + * @param actualSeconds The actual seconds of notice provided */ - function accept( - RecurringCollectionAgreement calldata rca, - bytes calldata signature - ) external returns (bytes16 agreementId); + error InsufficientNotice(bytes16 agreementId, uint32 minSecondsPayerCancellationNotice, uint256 actualSeconds); /** - * @notice Cancel an indexing agreement. - * @param agreementId The agreement's ID. - * @param by The party that is canceling the agreement. + * @notice Thrown when CONDITION_ELIGIBILITY_CHECK is set but the payer does not + * advertise IProviderEligibility support via ERC-165. + * @param payer The payer address that does not support IProviderEligibility */ - function cancel(bytes16 agreementId, CancelAgreementBy by) external; + error EligibilityConditionNotSupported(address payer); - /** - * @notice Update a Recurring Collection Agreement. - * @dev Caller must be the data service for the agreement. - * If `signature` is non-empty: checks `rcau.deadline >= block.timestamp` and verifies the ECDSA signature. - * If `signature` is empty: the payer (stored in the agreement) must be a contract implementing - * {IAgreementOwner.approveAgreement} and must return the magic value for the RCAU's EIP712 hash. - * @param rcau The Recurring Collection Agreement Update to apply - * @param signature ECDSA signature bytes, or empty for contract-approved updates - */ - function update(RecurringCollectionAgreementUpdate calldata rcau, bytes calldata signature) external; + // -- RCA-specific methods -- /** - * @notice Computes the hash of a RecurringCollectionAgreement (RCA). - * @param rca The RCA for which to compute the hash. - * @return The hash of the RCA. + * @notice Offer a new agreement or update an existing one. + * @param offerType The type of offer (OFFER_TYPE_NEW or OFFER_TYPE_UPDATE) + * @param data ABI-encoded offer data + * @param options Bitmask of offer options (e.g. WITH_NOTICE) + * @return The offer result containing agreementId, dataService, and serviceProvider */ - function hashRCA(RecurringCollectionAgreement calldata rca) external view returns (bytes32); + function offer(uint8 offerType, bytes calldata data, uint16 options) external returns (OfferResult memory); /** - * @notice Computes the hash of a RecurringCollectionAgreementUpdate (RCAU). - * @param rcau The RCAU for which to compute the hash. - * @return The hash of the RCAU. + * @notice Accept a previously offered agreement or pending update by its ID and hash. + * @param agreementId The ID of the agreement to accept + * @param agreementHash EIP-712 hash the service provider expects to accept + * @param extraData Opaque data forwarded to the data service callback + * @param options Bitmask of agreement options (e.g. AUTO_UPDATE) */ - function hashRCAU(RecurringCollectionAgreementUpdate calldata rcau) external view returns (bytes32); + function accept(bytes16 agreementId, bytes32 agreementHash, bytes calldata extraData, uint16 options) external; /** - * @notice Recovers the signer address of a signed RecurringCollectionAgreement (RCA). - * @param rca The RCA whose hash was signed. - * @param signature The ECDSA signature bytes. - * @return The address of the signer. + * @notice Get agreement data for a given agreement ID. + * @param agreementId The ID of the agreement to retrieve. + * @return The AgreementData struct containing identity, parties, state, and collectability. */ - function recoverRCASigner( - RecurringCollectionAgreement calldata rca, - bytes calldata signature - ) external view returns (address); + function getAgreementData(bytes16 agreementId) external view returns (AgreementData memory); /** - * @notice Recovers the signer address of a signed RecurringCollectionAgreementUpdate (RCAU). - * @param rcau The RCAU whose hash was signed. - * @param signature The ECDSA signature bytes. - * @return The address of the signer. + * @notice Get the maximum tokens collectable for an agreement, scoped by active and/or pending terms. + * @param agreementId The ID of the agreement + * @param claimScope Bitmask: 1 = active terms, 2 = pending terms, 3 = max of both + * @return The maximum tokens that could be collected under the requested scope */ - function recoverRCAUSigner( - RecurringCollectionAgreementUpdate calldata rcau, - bytes calldata signature - ) external view returns (address); + function getMaxNextClaim(bytes16 agreementId, uint8 claimScope) external view returns (uint256); /** - * @notice Gets an agreement. - * @param agreementId The ID of the agreement to retrieve. - * @return The AgreementData struct containing the agreement's data. + * @notice Convenience overload: returns max of both active and pending terms. + * @param agreementId The ID of the agreement + * @return The maximum tokens that could be collected */ - function getAgreement(bytes16 agreementId) external view returns (AgreementData memory); + function getMaxNextClaim(bytes16 agreementId) external view returns (uint256); /** - * @notice Get the maximum tokens collectable in the next collection for an agreement. - * @dev Computes the worst-case (maximum possible) claim amount based on current on-chain - * agreement state. For active agreements, uses `endsAt` as the upper bound (not block.timestamp). - * Returns 0 for NotAccepted, CanceledByServiceProvider, or fully expired agreements. + * @notice Get the number of term versions stored for an agreement. * @param agreementId The ID of the agreement - * @return The maximum tokens that could be collected in the next collection + * @return The number of stored term versions */ - function getMaxNextClaim(bytes16 agreementId) external view returns (uint256); + function getAgreementVersionCount(bytes16 agreementId) external view returns (uint256); /** - * @notice Get collection info for an agreement - * @param agreement The agreement data - * @return isCollectable Whether the agreement is in a valid state that allows collection attempts, - * not that there are necessarily funds available to collect. - * @return collectionSeconds The valid collection duration in seconds (0 if not collectable) - * @return reason The reason why the agreement is not collectable (None if collectable) + * @notice Reconstruct the original offer for a given version, enabling independent hash verification. + * @dev Returns the offer type (OFFER_TYPE_NEW or OFFER_TYPE_UPDATE) and the ABI-encoded + * original struct (RecurringCollectionAgreement or RecurringCollectionAgreementUpdate). + * Callers can decode and pass to hashRCA/hashRCAU to verify the stored version hash. + * @param agreementId The ID of the agreement + * @param index The zero-based version index + * @return offerType OFFER_TYPE_NEW (0) or OFFER_TYPE_UPDATE (1) + * @return offerData ABI-encoded RecurringCollectionAgreement or RecurringCollectionAgreementUpdate */ - function getCollectionInfo( - AgreementData calldata agreement - ) external view returns (bool isCollectable, uint256 collectionSeconds, AgreementNotCollectableReason reason); + function getAgreementOfferAt( + bytes16 agreementId, + uint256 index + ) external view returns (uint8 offerType, bytes memory offerData); /** * @notice Generate a deterministic agreement ID from agreement parameters diff --git a/packages/interfaces/contracts/issuance/agreement/IRecurringAgreementHelper.sol b/packages/interfaces/contracts/issuance/agreement/IRecurringAgreementHelper.sol index 3e37e50e8..f9dbf8e00 100644 --- a/packages/interfaces/contracts/issuance/agreement/IRecurringAgreementHelper.sol +++ b/packages/interfaces/contracts/issuance/agreement/IRecurringAgreementHelper.sol @@ -23,18 +23,18 @@ interface IRecurringAgreementHelper { * @param tokenBalance GRT balance available to the manager * @param sumMaxNextClaimAll Global sum of maxNextClaim across all (collector, provider) pairs * @param totalEscrowDeficit Total unfunded escrow across all pairs - * @param totalAgreementCount Total number of tracked agreements * @param escrowBasis Configured escrow level (Full / OnDemand / JustInTime) - * @param tempJit Whether the temporary JIT breaker is active + * @param minOnDemandBasisThreshold Threshold for OnDemand basis (numerator over 256) + * @param minFullBasisMargin Margin for Full basis (added to 256) * @param collectorCount Number of collectors with active agreements */ struct GlobalAudit { uint256 tokenBalance; uint256 sumMaxNextClaimAll; uint256 totalEscrowDeficit; - uint256 totalAgreementCount; IRecurringEscrowManagement.EscrowBasis escrowBasis; - bool tempJit; + uint8 minOnDemandBasisThreshold; + uint8 minFullBasisMargin; uint256 collectorCount; } @@ -44,6 +44,7 @@ interface IRecurringAgreementHelper { * @param provider The provider address * @param agreementCount Number of agreements for this pair * @param sumMaxNextClaim Sum of maxNextClaim for this pair + * @param escrowSnap Cached escrow balance (compare with escrow.balance to detect staleness) * @param escrow Escrow account state (balance, tokensThawing, thawEndTimestamp) */ struct PairAudit { @@ -51,6 +52,7 @@ interface IRecurringAgreementHelper { address provider; uint256 agreementCount; uint256 sumMaxNextClaim; + uint256 escrowSnap; IPaymentsEscrow.EscrowAccount escrow; } @@ -90,23 +92,98 @@ interface IRecurringAgreementHelper { */ function auditPair(address collector, address provider) external view returns (PairAudit memory pair); - // -- Reconciliation -- + // -- Enumeration Views -- + + /** + * @notice Get all managed agreement IDs for a (collector, provider) pair + * @param collector The collector address + * @param provider The provider address + * @return agreementIds The array of agreement IDs + */ + function getPairAgreements( + address collector, + address provider + ) external view returns (bytes16[] memory agreementIds); + + /** + * @notice Get a paginated slice of managed agreement IDs for a (collector, provider) pair + * @param collector The collector address + * @param provider The provider address + * @param offset The index to start from + * @param count Maximum number to return (clamped to available) + * @return agreementIds The array of agreement IDs + */ + function getPairAgreements( + address collector, + address provider, + uint256 offset, + uint256 count + ) external view returns (bytes16[] memory agreementIds); /** - * @notice Reconcile all agreements for a provider, cleaning up fully settled ones. - * @dev Permissionless. O(n) gas — may hit gas limits with many agreements. - * @param provider The provider to reconcile - * @return removed Number of agreements removed during reconciliation + * @notice Get all collector addresses with active agreements + * @return result Array of collector addresses */ - function reconcile(address provider) external returns (uint256 removed); + function getCollectors() external view returns (address[] memory result); /** - * @notice Reconcile a batch of specific agreement IDs, cleaning up fully settled ones. - * @dev Permissionless. Skips non-existent agreements. - * @param agreementIds The agreement IDs to reconcile - * @return removed Number of agreements removed during reconciliation + * @notice Get a paginated slice of collector addresses + * @param offset The index to start from + * @param count Maximum number to return (clamped to available) + * @return result Array of collector addresses */ - function reconcileBatch(bytes16[] calldata agreementIds) external returns (uint256 removed); + function getCollectors(uint256 offset, uint256 count) external view returns (address[] memory result); + + /** + * @notice Get all provider addresses with active agreements for a collector + * @param collector The collector address + * @return result Array of provider addresses + */ + function getProviders(address collector) external view returns (address[] memory result); + + /** + * @notice Get a paginated slice of provider addresses for a collector + * @param collector The collector address + * @param offset The index to start from + * @param count Maximum number to return (clamped to available) + * @return result Array of provider addresses + */ + function getProviders( + address collector, + uint256 offset, + uint256 count + ) external view returns (address[] memory result); + + // -- Reconciliation Discovery -- + + /** + * @notice Per-agreement staleness info for reconciliation discovery + * @param agreementId The agreement ID + * @param cachedMaxNextClaim The RAM's cached maxNextClaim + * @param liveMaxNextClaim The collector's current maxNextClaim + * @param stale True if cached != live (reconciliation needed) + */ + struct AgreementStaleness { + bytes16 agreementId; + uint256 cachedMaxNextClaim; + uint256 liveMaxNextClaim; + bool stale; + } + + /** + * @notice Check which agreements in a (collector, provider) pair need reconciliation + * @dev Compares cached maxNextClaim against live collector values. + * @param collector The collector address + * @param provider The provider address + * @return staleAgreements Array of staleness info per agreement + * @return escrowStale True if escrowSnap differs from actual escrow balance + */ + function checkPairStaleness( + address collector, + address provider + ) external view returns (AgreementStaleness[] memory staleAgreements, bool escrowStale); + + // -- Reconciliation -- /** * @notice Reconcile all agreements for a (collector, provider) pair, then diff --git a/packages/interfaces/contracts/issuance/agreement/IRecurringAgreementManagement.sol b/packages/interfaces/contracts/issuance/agreement/IRecurringAgreementManagement.sol index 43f72057a..666e6ed7f 100644 --- a/packages/interfaces/contracts/issuance/agreement/IRecurringAgreementManagement.sol +++ b/packages/interfaces/contracts/issuance/agreement/IRecurringAgreementManagement.sol @@ -17,33 +17,41 @@ interface IRecurringAgreementManagement { // solhint-disable gas-indexed-events /** - * @notice Emitted when an agreement is offered for escrow management + * @notice Emitted when an agreement is discovered and registered for escrow management. * @param agreementId The deterministic agreement ID + * @param collector The collector contract address + * @param dataService The data service address * @param provider The service provider for this agreement - * @param maxNextClaim The calculated maximum next claim amount */ - event AgreementOffered(bytes16 indexed agreementId, address indexed provider, uint256 maxNextClaim); + event AgreementAdded( + bytes16 indexed agreementId, + address indexed collector, + address dataService, + address indexed provider + ); /** - * @notice Emitted when an agreement offer is revoked before acceptance + * @notice Emitted when an agreement callback is ignored because it does not belong to this manager. + * @dev Useful for debugging missed agreements. * @param agreementId The agreement ID - * @param provider The provider whose sumMaxNextClaim was reduced + * @param collector The collector that sent the callback + * @param reason The rejection reason */ - event OfferRevoked(bytes16 indexed agreementId, address indexed provider); + event AgreementRejected(bytes16 indexed agreementId, address indexed collector, AgreementRejectionReason reason); - /** - * @notice Emitted when an agreement is canceled via the data service - * @param agreementId The agreement ID - * @param provider The provider for this agreement - */ - event AgreementCanceled(bytes16 indexed agreementId, address indexed provider); + /// @notice Why an agreement was not tracked by this manager. + enum AgreementRejectionReason { + UnauthorizedCollector, + UnknownAgreement, + PayerMismatch, + UnauthorizedDataService + } /** * @notice Emitted when an agreement is removed from escrow management * @param agreementId The agreement ID being removed - * @param provider The provider whose sumMaxNextClaim was reduced */ - event AgreementRemoved(bytes16 indexed agreementId, address indexed provider); + event AgreementRemoved(bytes16 indexed agreementId); /** * @notice Emitted when an agreement's max next claim is recalculated @@ -53,30 +61,14 @@ interface IRecurringAgreementManagement { */ event AgreementReconciled(bytes16 indexed agreementId, uint256 oldMaxNextClaim, uint256 newMaxNextClaim); - /** - * @notice Emitted when a pending agreement update is offered - * @param agreementId The agreement ID - * @param pendingMaxNextClaim The max next claim for the pending update - * @param updateNonce The RCAU nonce for the pending update - */ - event AgreementUpdateOffered(bytes16 indexed agreementId, uint256 pendingMaxNextClaim, uint32 updateNonce); - - /** - * @notice Emitted when a pending agreement update is revoked - * @param agreementId The agreement ID - * @param pendingMaxNextClaim The escrow that was freed - * @param updateNonce The RCAU nonce that was revoked - */ - event AgreementUpdateRevoked(bytes16 indexed agreementId, uint256 pendingMaxNextClaim, uint32 updateNonce); - /** * @notice Emitted when a (collector, provider) pair is removed from tracking * @dev Emitted when the pair has no agreements AND escrow is fully recovered (balance zero). - * May cascade inline from agreement deletion or be triggered by {reconcileCollectorProvider}. + * May cascade inline from agreement deletion or be triggered by {reconcileProvider}. * @param collector The collector address * @param provider The provider address */ - event CollectorProviderRemoved(address indexed collector, address indexed provider); + event ProviderRemoved(address indexed collector, address indexed provider); /** * @notice Emitted when a collector is removed from the global tracking set @@ -90,16 +82,10 @@ interface IRecurringAgreementManagement { // -- Errors -- /** - * @notice Thrown when trying to offer an agreement that is already offered - * @param agreementId The agreement ID - */ - error AgreementAlreadyOffered(bytes16 agreementId); - - /** - * @notice Thrown when trying to operate on an agreement that is not offered + * @notice Thrown when re-offering an agreement with a different service provider * @param agreementId The agreement ID */ - error AgreementNotOffered(bytes16 agreementId); + error ServiceProviderMismatch(bytes16 agreementId); /** * @notice Thrown when the RCA payer is not this contract @@ -108,24 +94,6 @@ interface IRecurringAgreementManagement { */ error PayerMustBeManager(address payer, address expected); - /** - * @notice Thrown when trying to revoke an agreement that is already accepted - * @param agreementId The agreement ID - */ - error AgreementAlreadyAccepted(bytes16 agreementId); - - /** - * @notice Thrown when trying to cancel an agreement that has not been accepted yet - * @param agreementId The agreement ID - */ - error AgreementNotAccepted(bytes16 agreementId); - - /** - * @notice Thrown when the data service address has no deployed code - * @param dataService The address that was expected to be a contract - */ - error InvalidDataService(address dataService); - /// @notice Thrown when the RCA service provider is the zero address error ServiceProviderZeroAddress(); @@ -135,17 +103,6 @@ interface IRecurringAgreementManagement { */ error UnauthorizedDataService(address dataService); - /// @notice Thrown when a collection callback is called by an address other than the agreement's collector - error OnlyAgreementCollector(); - - /** - * @notice Thrown when the RCAU nonce does not match the expected next update nonce - * @param agreementId The agreement ID - * @param expectedNonce The expected nonce (collector's updateNonce + 1) - * @param actualNonce The nonce provided in the RCAU - */ - error InvalidUpdateNonce(bytes16 agreementId, uint32 expectedNonce, uint32 actualNonce); - /** * @notice Thrown when the collector address does not have COLLECTOR_ROLE * @param collector The unauthorized collector address @@ -155,69 +112,33 @@ interface IRecurringAgreementManagement { // -- Functions -- /** - * @notice Offer an RCA for escrow management. Must be called before - * the data service accepts the agreement (with empty authData). - * @dev Calculates max next claim from RCA parameters, stores the authorized hash - * for the {IAgreementOwner} callback, and deposits into escrow. + * @notice Offer an RCA for escrow management. + * @dev Forwards opaque offer data to the collector, which decodes and validates it, + * then reconciles agreement tracking and escrow locally after the call returns. + * The collector does not callback to `msg.sender` — see RecurringCollector callback model. * Requires AGREEMENT_MANAGER_ROLE. - * @param rca The Recurring Collection Agreement parameters * @param collector The RecurringCollector contract to use for this agreement + * @param offerType The offer type (OFFER_TYPE_NEW or OFFER_TYPE_UPDATE) + * @param offerData Opaque ABI-encoded agreement data forwarded to the collector * @return agreementId The deterministic agreement ID */ function offerAgreement( - IRecurringCollector.RecurringCollectionAgreement calldata rca, - IRecurringCollector collector - ) external returns (bytes16 agreementId); - - /** - * @notice Offer a pending agreement update for escrow management. Must be called - * before the data service applies the update (with empty authData). - * @dev Stores the authorized RCAU hash for the {IAgreementOwner} callback and - * adds the pending update's max next claim to sumMaxNextClaim. Treats the - * pending update as a separate escrow entry alongside the current agreement. - * If a previous pending update exists, it is replaced. - * Requires AGREEMENT_MANAGER_ROLE. - * @param rcau The Recurring Collection Agreement Update parameters - * @return agreementId The agreement ID from the RCAU - */ - function offerAgreementUpdate( - IRecurringCollector.RecurringCollectionAgreementUpdate calldata rcau + IRecurringCollector collector, + uint8 offerType, + bytes calldata offerData ) external returns (bytes16 agreementId); /** - * @notice Revoke a pending agreement update, freeing its reserved escrow. - * @dev Requires AGREEMENT_MANAGER_ROLE. Reconciles the agreement first to - * detect if the update was already applied. If the pending update is still - * outstanding after reconciliation, clears it and frees the escrow. - * No-op (returns false) if no pending update exists after reconciliation. - * @param agreementId The agreement ID whose pending update to revoke - * @return revoked True if a pending update was cleared by this call - */ - function revokeAgreementUpdate(bytes16 agreementId) external returns (bool revoked); - - /** - * @notice Revoke an un-accepted agreement offer. Only for agreements not yet - * accepted in RecurringCollector. - * @dev Requires AGREEMENT_MANAGER_ROLE. Clears the agreement tracking and authorized hashes, - * freeing the reserved escrow. Any pending update is also cleared. - * No-op (returns true) if the agreement is not tracked. - * @param agreementId The agreement ID to revoke - * @return gone True if the agreement is not tracked (whether revoked by this call or already absent) - */ - function revokeOffer(bytes16 agreementId) external returns (bool gone); - - /** - * @notice Cancel an accepted agreement by routing through the data service. - * @dev Requires AGREEMENT_MANAGER_ROLE. Reads agreement state from RecurringCollector: - * - NotAccepted: reverts (use {revokeOffer} instead) - * - Accepted: cancels via the data service, then reconciles and updates escrow - * - Already canceled: idempotent — reconciles and updates escrow without re-canceling - * After cancellation, call {reconcileAgreement} once the collection window closes. + * @notice Cancel an agreement or pending update by routing through the collector. + * @dev Requires AGREEMENT_MANAGER_ROLE. Forwards the terms hash to the collector's + * cancel function, then reconciles locally after the call returns. The collector does + * not callback to `msg.sender` — see RecurringCollector callback model. + * @param collector The collector contract address for this agreement * @param agreementId The agreement ID to cancel - * @return gone True if the agreement is not tracked (already absent); false when - * the agreement is still tracked (caller should eventually call {reconcileAgreement}) + * @param versionHash The terms hash to cancel (activeTerms.hash or pendingTerms.hash) + * @param options Bitmask — IF_NOT_ACCEPTED reverts if the targeted version was already accepted. */ - function cancelAgreement(bytes16 agreementId) external returns (bool gone); + function cancelAgreement(address collector, bytes16 agreementId, bytes32 versionHash, uint16 options) external; /** * @notice Reconcile a single agreement: re-read on-chain state, recalculate @@ -227,10 +148,25 @@ interface IRecurringAgreementManagement { * - NotAccepted past deadline: zeroes and deletes (returns false) * - Accepted/Canceled: reconciles maxNextClaim, deletes if zero * Should be called after collections, cancellations, or agreement updates. + * @param collector The collector contract address for this agreement * @param agreementId The agreement ID to reconcile * @return exists True if the agreement is still tracked after this call */ - function reconcileAgreement(bytes16 agreementId) external returns (bool exists); + function reconcileAgreement(address collector, bytes16 agreementId) external returns (bool exists); + + /** + * @notice Force-remove a tracked agreement whose collector is unresponsive. + * @dev Operator escape hatch for when a collector contract reverts on all calls + * (broken upgrade, self-destruct, permanent pause), making normal reconciliation + * impossible. Zeroes the agreement's maxNextClaim, removes it from pair tracking, + * and triggers pair reconciliation to thaw/withdraw the freed escrow. + * + * Requires OPERATOR_ROLE. Only use when the collector cannot be fixed. + * + * @param collector The collector contract address + * @param agreementId The agreement ID to force-remove + */ + function forceRemoveAgreement(address collector, bytes16 agreementId) external; /** * @notice Reconcile a (collector, provider) pair: rebalance escrow, withdraw @@ -238,11 +174,19 @@ interface IRecurringAgreementManagement { * @dev Permissionless. First updates escrow state (deposit deficit, thaw excess, * withdraw completed thaws), then removes pair tracking when both pairAgreementCount * and escrow balance are zero. Also serves as the permissionless "poke" to rebalance - * escrow after {IRecurringEscrowManagement-setEscrowBasis} or {IRecurringEscrowManagement-setTempJit} + * escrow after {IRecurringEscrowManagement-setEscrowBasis} or threshold/margin * changes. Returns true if the pair still has agreements or escrow is still thawing. * @param collector The collector address * @param provider The provider address - * @return exists True if the pair is still tracked after this call + * @return tracked True if the pair is still tracked after this call + */ + function reconcileProvider(address collector, address provider) external returns (bool tracked); + + /** + * @notice Emergency: clear the eligibility oracle so all providers become eligible. + * @dev Callable by PAUSE_ROLE holders. Use when the oracle is broken or compromised + * and is wrongly blocking collections. The governor can later set a replacement oracle + * via {IProviderEligibilityManagement.setProviderEligibilityOracle}. */ - function reconcileCollectorProvider(address collector, address provider) external returns (bool exists); + function emergencyClearEligibilityOracle() external; } diff --git a/packages/interfaces/contracts/issuance/agreement/IRecurringAgreements.sol b/packages/interfaces/contracts/issuance/agreement/IRecurringAgreements.sol index 9d6223ad0..ca53204d2 100644 --- a/packages/interfaces/contracts/issuance/agreement/IRecurringAgreements.sol +++ b/packages/interfaces/contracts/issuance/agreement/IRecurringAgreements.sol @@ -1,7 +1,6 @@ // SPDX-License-Identifier: GPL-3.0-or-later pragma solidity ^0.8.22; -import { IDataServiceAgreements } from "../../data-service/IDataServiceAgreements.sol"; import { IPaymentsEscrow } from "../../horizon/IPaymentsEscrow.sol"; import { IRecurringCollector } from "../../horizon/IRecurringCollector.sol"; import { IRecurringEscrowManagement } from "./IRecurringEscrowManagement.sol"; @@ -21,36 +20,23 @@ interface IRecurringAgreements { /** * @notice Tracked state for a managed agreement * @dev An agreement is considered tracked when `provider != address(0)`. + * The collector owns all agreement terms, pending update state, and + * data service reference. The RAM only caches the max next claim + * and the minimum needed for routing and tracking. * - * Storage layout (7 slots): - * slot 0: provider (20) + deadline (8) + pendingUpdateNonce (4) = 32 (packed) + * The collector is implicit from the storage key: agreements are stored + * under `collectors[collector].agreements[agreementId]`. + * + * Storage layout (2 slots): + * slot 0: provider (20) (12 bytes free) * slot 1: maxNextClaim (32) - * slot 2: pendingUpdateMaxNextClaim (32) - * slot 3: agreementHash (32) - * slot 4: pendingUpdateHash (32) - * slot 5: dataService (20) (12 bytes free) - * slot 6: collector (20) (12 bytes free) * * @param provider The service provider for this agreement - * @param deadline The RCA deadline for acceptance (used to detect expired offers) - * @param pendingUpdateNonce The RCAU nonce for the pending update (0 means no pending) - * @param maxNextClaim The current maximum tokens claimable in the next collection - * @param pendingUpdateMaxNextClaim Max next claim for an offered-but-not-yet-applied update - * @param agreementHash The RCA hash stored for cleanup of authorizedHashes on deletion - * @param pendingUpdateHash The RCAU hash stored for cleanup of authorizedHashes on deletion - * @param dataService The data service contract for this agreement - * @param collector The RecurringCollector contract for this agreement + * @param maxNextClaim Cached max of active and pending claims from collector */ struct AgreementInfo { address provider; - uint64 deadline; - uint32 pendingUpdateNonce; uint256 maxNextClaim; - uint256 pendingUpdateMaxNextClaim; - bytes32 agreementHash; - bytes32 pendingUpdateHash; - IDataServiceAgreements dataService; - IRecurringCollector collector; } // -- View Functions -- @@ -76,47 +62,19 @@ interface IRecurringAgreements { /** * @notice Get the max next claim for a specific agreement + * @param collector The collector contract address * @param agreementId The agreement ID * @return tokens The current max next claim stored for this agreement */ - function getAgreementMaxNextClaim(bytes16 agreementId) external view returns (uint256 tokens); + function getAgreementMaxNextClaim(address collector, bytes16 agreementId) external view returns (uint256 tokens); /** * @notice Get the full tracked state for a specific agreement + * @param collector The collector contract address * @param agreementId The agreement ID * @return info The agreement info struct (all fields zero if not tracked) */ - function getAgreementInfo(bytes16 agreementId) external view returns (AgreementInfo memory info); - - /** - * @notice Get the number of managed agreements for a provider - * @param provider The provider address - * @return count The count of tracked agreements - */ - function getProviderAgreementCount(address provider) external view returns (uint256 count); - - /** - * @notice Get all managed agreement IDs for a provider - * @dev Returns the full set of tracked agreement IDs. May be expensive for providers - * with many agreements — prefer the paginated overload or {getProviderAgreementCount} - * for on-chain use. - * @param provider The provider address - * @return agreementIds The array of agreement IDs - */ - function getProviderAgreements(address provider) external view returns (bytes16[] memory agreementIds); - - /** - * @notice Get a paginated slice of managed agreement IDs for a provider - * @param provider The provider address - * @param offset The index to start from - * @param count Maximum number of IDs to return (clamped to available) - * @return agreementIds The array of agreement IDs - */ - function getProviderAgreements( - address provider, - uint256 offset, - uint256 count - ) external view returns (bytes16[] memory agreementIds); + function getAgreementInfo(address collector, bytes16 agreementId) external view returns (AgreementInfo memory info); /** * @notice Get the current escrow basis setting @@ -126,8 +84,7 @@ interface IRecurringAgreements { /** * @notice Get the sum of maxNextClaim across all (collector, provider) pairs - * @dev Populated lazily through normal operations. May be stale if agreements were - * offered before this feature was deployed — run reconciliation to populate. + * @dev Populated lazily through normal operations. * @return tokens The global sum of max next claims */ function getSumMaxNextClaimAll() external view returns (uint256 tokens); @@ -142,20 +99,25 @@ interface IRecurringAgreements { function getTotalEscrowDeficit() external view returns (uint256 tokens); /** - * @notice Get the total number of tracked agreements across all providers - * @dev Populated lazily through normal operations. - * @return count The total agreement count + * @notice Get the minimum spare balance threshold for OnDemand basis. + * @dev Effective basis limited to JustInTime when spare < sumMaxNextClaimAll * threshold / 256. + * @return threshold The numerator over 256 */ - function getTotalAgreementCount() external view returns (uint256 count); + function getMinOnDemandBasisThreshold() external view returns (uint8 threshold); /** - * @notice Check whether temporary JIT mode is currently active - * @dev When active, the system operates in JIT-only mode regardless of the configured - * escrow basis. The configured basis is preserved and takes effect again when - * temp JIT recovers (totalEscrowDeficit < available) or operator calls {setTempJit}. - * @return active True if temporary JIT mode is active + * @notice Get the minimum spare balance margin for Full basis. + * @dev Effective basis limited to OnDemand when spare < sumMaxNextClaimAll * (256 + margin) / 256. + * @return margin The margin added to 256 */ - function isTempJit() external view returns (bool active); + function getMinFullBasisMargin() external view returns (uint8 margin); + + /** + * @notice Minimum fraction of sumMaxNextClaim required to initiate an escrow thaw. + * @dev Escrow thaw is not initiated if excess is below sumMaxNextClaim * minThawFraction / 256 for a (collector, provider) pair. + * @return fraction The numerator over 256 + */ + function getMinThawFraction() external view returns (uint8 fraction); /** * @notice Get the number of collectors with active agreements @@ -164,53 +126,54 @@ interface IRecurringAgreements { function getCollectorCount() external view returns (uint256 count); /** - * @notice Get all collector addresses with active agreements - * @dev May be expensive for large sets — prefer the paginated overload for on-chain use. - * @return result Array of collector addresses + * @notice Get a collector address by index + * @param index The index in the collector set + * @return collector The collector address */ - function getCollectors() external view returns (address[] memory result); + function getCollectorAt(uint256 index) external view returns (address collector); /** - * @notice Get a paginated slice of collector addresses - * @param offset The index to start from - * @param count Maximum number to return (clamped to available) - * @return result Array of collector addresses + * @notice Get the number of providers with active agreements for a collector + * @param collector The collector address + * @return count The number of tracked providers */ - function getCollectors(uint256 offset, uint256 count) external view returns (address[] memory result); + function getProviderCount(address collector) external view returns (uint256 count); /** - * @notice Get the number of providers with active agreements for a collector + * @notice Get a provider address by index for a given collector * @param collector The collector address - * @return count The number of tracked providers + * @param index The index in the provider set + * @return provider The provider address */ - function getCollectorProviderCount(address collector) external view returns (uint256 count); + function getProviderAt(address collector, uint256 index) external view returns (address provider); /** - * @notice Get all provider addresses with active agreements for a collector - * @dev May be expensive for large sets — prefer the paginated overload for on-chain use. + * @notice Get the number of managed agreements for a (collector, provider) pair * @param collector The collector address - * @return result Array of provider addresses + * @param provider The provider address + * @return count The pair agreement count */ - function getCollectorProviders(address collector) external view returns (address[] memory result); + function getPairAgreementCount(address collector, address provider) external view returns (uint256 count); /** - * @notice Get a paginated slice of provider addresses for a collector + * @notice Get a managed agreement ID by index for a (collector, provider) pair * @param collector The collector address - * @param offset The index to start from - * @param count Maximum number to return (clamped to available) - * @return result Array of provider addresses + * @param provider The provider address + * @param index The index in the agreement set + * @return agreementId The agreement ID */ - function getCollectorProviders( + function getPairAgreementAt( address collector, - uint256 offset, - uint256 count - ) external view returns (address[] memory result); + address provider, + uint256 index + ) external view returns (bytes16 agreementId); /** - * @notice Get the number of managed agreements for a (collector, provider) pair + * @notice Get the cached escrow balance for a (collector, provider) pair + * @dev Compare with {getEscrowAccount} to detect stale escrow state requiring reconciliation. * @param collector The collector address * @param provider The provider address - * @return count The pair agreement count + * @return escrowSnap The last-known escrow balance */ - function getPairAgreementCount(address collector, address provider) external view returns (uint256 count); + function getEscrowSnap(address collector, address provider) external view returns (uint256 escrowSnap); } diff --git a/packages/interfaces/contracts/issuance/agreement/IRecurringEscrowManagement.sol b/packages/interfaces/contracts/issuance/agreement/IRecurringEscrowManagement.sol index ee4d3d35b..f19bc108b 100644 --- a/packages/interfaces/contracts/issuance/agreement/IRecurringEscrowManagement.sol +++ b/packages/interfaces/contracts/issuance/agreement/IRecurringEscrowManagement.sol @@ -6,7 +6,7 @@ pragma solidity ^0.8.22; * @author Edge & Node * @notice Functions for configuring escrow deposits that back * managed RCAs. Controls how aggressively escrow is pre-deposited. - * Escrow rebalancing is performed by {IRecurringAgreementManagement-reconcileCollectorProvider}. + * Escrow rebalancing is performed by {IRecurringAgreementManagement-reconcileProvider}. * * @custom:security-contact Please email security+contracts@thegraph.com if you find any * bugs. We may have an active bug bounty program. @@ -56,12 +56,25 @@ interface IRecurringEscrowManagement { event EscrowBasisSet(EscrowBasis indexed oldBasis, EscrowBasis indexed newBasis); /** - * @notice Emitted when temporary JIT mode is activated or deactivated - * @param active True when entering temp JIT, false when recovering - * @param automatic True when triggered by the system (beforeCollection/reconcileCollectorProvider), - * false when triggered by operator (setTempJit/setEscrowBasis) + * @notice Emitted when the OnDemand basis threshold is changed + * @param oldThreshold The previous threshold + * @param newThreshold The new threshold */ - event TempJitSet(bool indexed active, bool indexed automatic); + event MinOnDemandBasisThresholdSet(uint8 oldThreshold, uint8 newThreshold); + + /** + * @notice Emitted when the Full basis margin is changed + * @param oldMargin The previous margin + * @param newMargin The new margin + */ + event MinFullBasisMarginSet(uint8 oldMargin, uint8 newMargin); + + /** + * @notice Emitted when the minimum thaw fraction is changed + * @param oldFraction The previous fraction + * @param newFraction The new fraction + */ + event MinThawFractionSet(uint8 oldFraction, uint8 newFraction); // solhint-enable gas-indexed-events @@ -71,17 +84,44 @@ interface IRecurringEscrowManagement { * @notice Set the escrow basis (maximum aspiration level). * @dev Requires OPERATOR_ROLE. The system automatically degrades below the configured * level when balance is insufficient. Changing the basis does not immediately rebalance - * escrow — call {IRecurringAgreementManagement-reconcileCollectorProvider} per pair to apply. + * escrow — call {IRecurringAgreementManagement-reconcileProvider} per pair to apply. * @param basis The new escrow basis */ function setEscrowBasis(EscrowBasis basis) external; /** - * @notice Manually activate or deactivate temporary JIT mode - * @dev Requires OPERATOR_ROLE. When activated, the system operates in JIT-only mode - * regardless of the configured escrow basis. When deactivated, the configured basis - * takes effect again. Emits {TempJitSet}. - * @param active True to activate temp JIT, false to deactivate + * @notice Set the minimum spare balance threshold for OnDemand basis. + * @dev Requires OPERATOR_ROLE. The effective basis is limited to JustInTime + * when spare balance (balance - totalEscrowDeficit) is not strictly greater than + * sumMaxNextClaimAll * minOnDemandBasisThreshold / 256. + * @param threshold The numerator over 256 for the spare threshold + */ + function setMinOnDemandBasisThreshold(uint8 threshold) external; + + /** + * @notice Set the minimum spare balance margin for Full basis. + * @dev Requires OPERATOR_ROLE. The effective basis is limited to OnDemand + * when spare balance is not strictly greater than + * sumMaxNextClaimAll * (256 + minFullBasisMargin) / 256. + * @param margin The margin added to 256 for the spare threshold numerator + */ + function setMinFullBasisMargin(uint8 margin) external; + + /** + * @notice Set the minimum fraction to initiate thawing excess escrow. + * @dev Requires OPERATOR_ROLE. When excess above max for a (collector, provider) pair + * is less than sumMaxNextClaim[collector][provider] * minThawFraction / 256, the thaw + * is skipped. This avoids wasting the thaw timer on negligible amounts and prevents + * micro-deposit griefing where an attacker deposits dust via depositTo() and triggers + * reconciliation to start a tiny thaw that blocks legitimate thaw increases. + * + * WARNING: Setting fraction to 0 disables the dust threshold entirely, allowing any + * excess (including dust amounts) to trigger a thaw. This re-enables the micro-deposit + * griefing vector described above. Setting fraction to very high values (e.g. 255) + * means thaws are almost never triggered (excess must exceed ~99.6% of sumMaxNextClaim), + * which can cause escrow to remain over-funded indefinitely. The default of 16 (~6.25%) + * provides a reasonable balance. Operators should keep this value between 8 and 64. + * @param fraction The numerator over 256 for the dust threshold */ - function setTempJit(bool active) external; + function setMinThawFraction(uint8 fraction) external; } diff --git a/packages/interfaces/contracts/issuance/common/IEmergencyRoleControl.sol b/packages/interfaces/contracts/issuance/common/IEmergencyRoleControl.sol new file mode 100644 index 000000000..f47fe584d --- /dev/null +++ b/packages/interfaces/contracts/issuance/common/IEmergencyRoleControl.sol @@ -0,0 +1,23 @@ +// SPDX-License-Identifier: GPL-2.0-or-later + +pragma solidity ^0.7.6 || ^0.8.0; + +/** + * @title IEmergencyRoleControl + * @author Edge & Node + * @notice Interface for emergency role revocation by pause-role holders. + * @dev Provides a surgical alternative to pausing: disable a specific actor + * (operator, collector, data service) without halting the entire contract. + * Only the governor (role admin) can re-grant revoked roles. + */ +interface IEmergencyRoleControl { + /** + * @notice Emergency role revocation by pause-role holder + * @dev Allows pause-role holders to revoke any non-governor role as a fast-response + * emergency measure. Governor role is excluded to prevent a pause guardian from + * locking out governance. + * @param role The role to revoke + * @param account The account to revoke the role from + */ + function emergencyRevokeRole(bytes32 role, address account) external; +} diff --git a/packages/interfaces/contracts/subgraph-service/ISubgraphService.sol b/packages/interfaces/contracts/subgraph-service/ISubgraphService.sol index be0bf05d2..0f8f3403f 100644 --- a/packages/interfaces/contracts/subgraph-service/ISubgraphService.sol +++ b/packages/interfaces/contracts/subgraph-service/ISubgraphService.sol @@ -4,9 +4,6 @@ pragma solidity ^0.8.22; import { IDataServiceAgreements } from "../data-service/IDataServiceAgreements.sol"; import { IDataServiceFees } from "../data-service/IDataServiceFees.sol"; import { IGraphPayments } from "../horizon/IGraphPayments.sol"; - -import { IRecurringCollector } from "../horizon/IRecurringCollector.sol"; - import { IAllocation } from "./internal/IAllocation.sol"; import { IIndexingAgreement } from "./internal/IIndexingAgreement.sol"; import { ILegacyAllocation } from "./internal/ILegacyAllocation.sol"; @@ -79,6 +76,21 @@ interface ISubgraphService is IDataServiceAgreements, IDataServiceFees { event IndexingFeesCutSet(uint256 indexingFeesCut); // solhint-disable-previous-line gas-indexed-events + /** + * @notice Emitted when an authorized collector is set or unset + * @param collector The collector address + * @param authorized Whether the collector is authorized + */ + event AuthorizedCollectorSet(address indexed collector, bool authorized); + // solhint-disable-previous-line gas-indexed-events + + /** + * @notice Emitted when the block closing allocation with active agreement setting is toggled + * @param enabled Whether the setting is enabled + */ + event BlockClosingAllocationWithActiveAgreementSet(bool enabled); + // solhint-disable-previous-line gas-indexed-events + /** * @notice Thrown when trying to set a curation cut that is not a valid PPM value * @param curationCut The curation cut value @@ -142,13 +154,13 @@ interface ISubgraphService is IDataServiceAgreements, IDataServiceFees { error SubgraphServiceInvalidRAV(address ravIndexer, address allocationIndexer); /** - * @notice Thrown when trying to force close an allocation that is not stale and the indexer is not over-allocated + * @notice Thrown when trying to resize a stale allocation but it is not stale * @param allocationId The id of the allocation */ error SubgraphServiceCannotForceCloseAllocation(address allocationId); /** - * @notice Thrown when trying to force close an altruistic allocation + * @notice Thrown when trying to resize a stale allocation that is already altruistic (0 tokens) * @param allocationId The id of the allocation */ error SubgraphServiceAllocationIsAltruistic(address allocationId); @@ -164,6 +176,20 @@ interface ISubgraphService is IDataServiceAgreements, IDataServiceFees { */ error SubgraphServiceInvalidCollectionId(bytes32 collectionId); + /** + * @notice Thrown when a callback is called by an address that is not an authorized collector + * @param caller The unauthorized caller + */ + error SubgraphServiceNotCollector(address caller); + + /** + * @notice Thrown when trying to close an allocation that has an active indexing agreement + * and the close allocation guard is enabled + * @param allocationId The id of the allocation + * @param agreementId The id of the active agreement + */ + error SubgraphServiceAllocationHasActiveAgreement(address allocationId, bytes16 agreementId); + /** * @notice Initialize the contract * @dev The thawingPeriod and verifierCut ranges are not set here because they are variables @@ -181,16 +207,21 @@ interface ISubgraphService is IDataServiceAgreements, IDataServiceFees { ) external; /** - * @notice Force close a stale allocation + * @notice Resize a stale allocation to zero tokens * @dev This function can be permissionlessly called when the allocation is stale. This * ensures that rewards for other allocations are not diluted by an inactive allocation. * + * The allocation stays open as a stakeless allocation (0 tokens) rather than being closed. + * Allocations are long-lived and track agreement bindings, so force-closing would + * inadvertently cancel the associated agreement. Any bound indexing agreement remains + * active. + * * Requirements: * - Allocation must exist and be open * - Allocation must be stale - * - Allocation cannot be altruistic + * - Allocation cannot already be stakeless * - * Emits a {AllocationClosed} event. + * Emits a {AllocationResized} event. * * @param allocationId The id of the allocation */ @@ -248,7 +279,7 @@ interface ISubgraphService is IDataServiceAgreements, IDataServiceFees { /** * @notice Sets the curators payment cut for query fees - * @dev Emits a {CuratorCutSet} event + * @dev Emits a {CurationCutSet} event * @param curationCut The curation cut for the payment type */ function setCurationCut(uint256 curationCut) external; @@ -268,40 +299,31 @@ interface ISubgraphService is IDataServiceAgreements, IDataServiceFees { function setPaymentsDestination(address newPaymentsDestination) external; /** - * @notice Accept an indexing agreement. - * @dev If `signature` is non-empty it is treated as an ECDSA signature; if empty the payer - * must be a contract implementing {IAgreementOwner}. - * @param allocationId The id of the allocation - * @param rca The recurring collection agreement parameters - * @param signature ECDSA signature bytes, or empty for contract-approved agreements - * @return agreementId The ID of the accepted indexing agreement + * @notice Sets an authorized collector + * @param collector The collector address + * @param authorized Whether the collector is authorized */ - function acceptIndexingAgreement( - address allocationId, - IRecurringCollector.RecurringCollectionAgreement calldata rca, - bytes calldata signature - ) external returns (bytes16); + function setAuthorizedCollector(address collector, bool authorized) external; /** - * @notice Update an indexing agreement. - * @dev If `signature` is non-empty it is treated as an ECDSA signature; if empty the payer - * must be a contract implementing {IAgreementOwner}. - * @param indexer The address of the indexer - * @param rcau The recurring collector agreement update to apply - * @param signature ECDSA signature bytes, or empty for contract-approved updates + * @notice Enables or disables blocking allocation closure when an active agreement exists. + * When enabled, closing an allocation that has an active indexing agreement will revert. + * @param enabled True to enable, false to disable */ - function updateIndexingAgreement( - address indexer, - IRecurringCollector.RecurringCollectionAgreementUpdate calldata rcau, - bytes calldata signature - ) external; + function setBlockClosingAllocationWithActiveAgreement(bool enabled) external; /** - * @notice Cancel an indexing agreement by indexer / operator. - * @param indexer The address of the indexer - * @param agreementId The id of the indexing agreement + * @notice Whether closing an allocation with an active agreement is blocked + * @return enabled True if blocking is enabled + */ + function getBlockClosingAllocationWithActiveAgreement() external view returns (bool enabled); + + /** + * @notice Checks if a collector is authorized + * @param collector The collector address + * @return Whether the collector is authorized */ - function cancelIndexingAgreement(address indexer, bytes16 agreementId) external; + function isAuthorizedCollector(address collector) external view returns (bool); /** * @notice Get the indexing agreement for a given agreement ID. diff --git a/packages/interfaces/contracts/subgraph-service/internal/IIndexingAgreement.sol b/packages/interfaces/contracts/subgraph-service/internal/IIndexingAgreement.sol index a3a6d02a3..c97c92482 100644 --- a/packages/interfaces/contracts/subgraph-service/internal/IIndexingAgreement.sol +++ b/packages/interfaces/contracts/subgraph-service/internal/IIndexingAgreement.sol @@ -20,11 +20,15 @@ interface IIndexingAgreement { /** * @notice Indexer Agreement Data * @param allocationId The allocation ID + * @param collector The collector contract this agreement belongs to * @param version The indexing agreement version + * @param subgraphDeploymentId The subgraph deployment ID locked at initial accept */ struct State { address allocationId; + address collector; IndexingAgreementVersion version; + bytes32 subgraphDeploymentId; } /** diff --git a/packages/issuance/audits/PR1301/AgreementLifecycleConsolidation.md b/packages/issuance/audits/PR1301/AgreementLifecycleConsolidation.md new file mode 100644 index 000000000..f74911091 --- /dev/null +++ b/packages/issuance/audits/PR1301/AgreementLifecycleConsolidation.md @@ -0,0 +1,152 @@ +# Agreement Lifecycle Consolidation + +Architectural refactor consolidating agreement lifecycle ownership into +RecurringCollector (RC), with SubgraphService (SS) and +RecurringAgreementManager (RAM) becoming reactive callback participants rather +than co-owners of lifecycle state. + +## Problem: Fragmented Lifecycle Ownership + +Previously, agreement lifecycle was split across three contracts with overlapping +responsibilities and increasingly tight coupling: + +- **SS** exposed public entry points (`acceptIndexingAgreement`, + `updateIndexingAgreement`, `cancelIndexingAgreementByPayer`) and orchestrated + RC calls internally +- **RC** validated ECDSA signatures and stored acceptance state via a 4-value + enum (`NotAccepted`, `Accepted`, `CanceledByServiceProvider`, `CanceledByPayer`) +- **RAM** authorized agreements via hash lookup (`approveAgreement`) and managed + escrow in flat per-provider mappings + +This meant state was scattered, the call graph was imperative (SS initiates → RC +records), and adding a new data service required replicating SS's lifecycle +methods. + +Responding to audit findings within this fragmented architecture created a +compounding problem: each fix added duplicated validation logic across contracts, +introduced new edge cases at the seams, and tightened already-fragile coupling. +RAM and SS both approached Spurious Dragon bytecode limits, making further fixes +increasingly constrained. Consolidation was necessary to break this cycle — +moving lifecycle ownership to RC reduced total bytecode and relieved size pressure +on RAM and SS by eliminating duplicated orchestration code. + +The tight coupling also limited future configuration and reuse — any new data +service or collector or agreement manager would have required reimplementing +the same entangled lifecycle logic. + +## Solution: Collector-Driven Callbacks + +RC becomes the **single state machine owner** for agreement lifecycle. SS and RAM +become **reactive participants** via well-defined callback interfaces. + +### Responsibility Separation + +| Concern | Before | After | +| --------------------- | -------------------------- | --------------------------------------- | +| Lifecycle entry point | SS public methods | RC `offer()` / `accept()` / `cancel()` | +| State model | 4-value enum in RC | `uint16` bitmask flags in RC | +| Domain validation | Inline in SS methods | SS `acceptAgreement` callback | +| Escrow management | Flat maps in RAM | Nested per-collector storage in RAM | +| Authorization | ECDSA + Authorizable in RC | Offer phase + collector whitelist in SS | +| State notifications | None | `afterAgreementStateChange` callback | + +### New Interfaces + +- **`IAgreementCollector`** — generic lifecycle operations and state flag + constants. Decouples data services and agreement managers from RC-specific types. +- **`IAgreementStateChangeCallback`** — callback method for state transition + notifications. Data services react to lifecycle events without blocking. +- **`IDataServiceAgreements`** (evolved) — extends the callback interface and + adds `acceptAgreement` for domain-specific acceptance validation. + +### Coupling Inversion + +``` +BEFORE (imperative): + Caller → SS.acceptIndexingAgreement() → RC.accept(rca, sig) + State split between SS (allocation binding) and RC (acceptance flag) + +AFTER (callback-driven): + Caller → RC.offer() → RC.accept() + ↳ SS.acceptAgreement() — validates allocation, stores collector ref + ↳ SS.afterAgreementStateChange() — domain-specific reaction (no-op here) + ↳ RAM.afterAgreementStateChange() — escrow bookkeeping + State owned by RC; SS and RAM derive what they need from callbacks +``` + +### State Model: Enum → Bitmask + +The old 4-value enum could not express compound states (e.g., "accepted with +notice given by payer, auto-updated"). The new `uint16` bitmask uses independent +flags: + +`REGISTERED` · `ACCEPTED` · `NOTICE_GIVEN` · `SETTLED` · `BY_PAYER` · +`BY_PROVIDER` · `BY_DATA_SERVICE` · `UPDATE` · `AUTO_UPDATE` · `AUTO_UPDATED` + +Composed states like `ACCEPTED | NOTICE_GIVEN | BY_PAYER` replace what would have +required additional enum values and transition logic. + +### Callback Hardening + +All callbacks are gas-capped at `MAX_CALLBACK_GAS` (1.5M) with non-reverting +semantics for state-change notifications. Failures emit `PayerCallbackFailed` +rather than propagating reverts. Callbacks to `msg.sender` are skipped (the caller +already has context), avoiding callback loops. + +This incorporates audit findings TRST-H-1, L-1, H-2, H-4, SR-4 as integral +design rather than bolt-on mitigations. + +### RAM Storage: Flat → Nested + +``` +BEFORE: mapping(agreementId => info) — global, no collector scoping +AFTER: mapping(collector => CollectorData) + .agreements[agreementId] — scoped per collector + .providers[provider] — pair-keyed escrow + agreement set +``` + +Enables multi-collector support and cleaner enumeration without cross-collector +interference. + +### Additional Changes + +- **RC upgradeability**: ERC-7201 namespaced storage, `initialize()` pattern, + `TransparentUpgradeableProxy` deployment +- **RC pausability**: `PausableUpgradeable` with governor-managed pause guardians; + `whenNotPaused` on `collect`, `offer`, `accept`, `cancel` +- **Drop EIP-712 signing from RC**: ECDSA validation removed; the two-phase + offer/accept flow eliminates the need for on-chain signature verification + since callers authenticate through normal `msg.sender` checks. Moving + agreement state on-chain enables direct, atomic state transitions and + eliminates the ambiguity of off-chain signed messages where unknown updates + could exist — the on-chain state is always the complete, authoritative picture. + This also improves the trust model: a contract payer that has submitted an + offer has pre-approved it on-chain and cannot block acceptance, whereas the + old signing model required the payer to cooperate at acceptance time. +- **Auto-update**: when a collection window expires and the provider has opted in + (`AUTO_UPDATE` flag), RC automatically promotes pending terms — a lifecycle + path only possible because RC owns the state machine. Data service callback + reverts are caught so a failing callback cannot block the promotion. +- **Collector whitelist in SS**: `authorizedCollectors` mapping with + `setAuthorizedCollector()` (owner-only) gates all callbacks via + `_requireCollectorCaller()`, making SS collector-agnostic rather than + hardcoded to a single RC instance + +## Design Principles + +1. **Single owner per concern** — RC owns lifecycle state, SS owns domain + validation, RAM owns escrow accounting +2. **Callbacks over method calls** — loose coupling via interfaces; new data + services implement callbacks without replicating lifecycle orchestration +3. **Compose state, don't enumerate it** — bitmask flags combine freely, + eliminating combinatorial enum growth +4. **Fail-safe notifications** — gas-capped, non-reverting callbacks prevent + one participant from blocking the state machine +5. **Collector-agnostic interfaces** — `IAgreementCollector` and callbacks use + generic types (agreementId, state flags, opaque metadata); no collector-specific + data is encoded into the interface, so data services and agreement managers + can work with any conforming collector without adaptation + +While the refactor moved significant code between contracts, the logic within +each contract is now more direct and easier to reason about. Each contract +has a single, well-defined role with clearer trust boundaries. diff --git a/packages/issuance/audits/PR1301/Graph_PR1301_v01.pdf b/packages/issuance/audits/PR1301/Graph_PR1301_v01.pdf new file mode 100644 index 000000000..8f14dd018 Binary files /dev/null and b/packages/issuance/audits/PR1301/Graph_PR1301_v01.pdf differ diff --git a/packages/issuance/audits/PR1301/README.md b/packages/issuance/audits/PR1301/README.md new file mode 100644 index 000000000..46695b14a --- /dev/null +++ b/packages/issuance/audits/PR1301/README.md @@ -0,0 +1,49 @@ +# Trust Security Audit - PR #1301 + +**Auditor:** Trust Security +**Period:** 2026-03-03 to 2026-03-19 +**Commit:** 7405c9d5f73bce04734efb3f609b76d95ffb520e +**Report:** [Graph_PR1301_v01.pdf](Graph_PR1301_v01.pdf) + +## Findings Summary + +| ID | Title | Severity | +| ----------------------- | -------------------------------------------------------- | -------- | +| [TRST-H-1](TRST-H-1.md) | Malicious payer gas siphoning via 63/64 rule | High | +| [TRST-H-2](TRST-H-2.md) | Invalid supportsInterface() returndata escapes try/catch | High | +| [TRST-H-3](TRST-H-3.md) | Stale escrow snapshot causes perpetual revert loop | High | +| [TRST-H-4](TRST-H-4.md) | EOA payer can block collection via EIP-7702 | High | +| [TRST-M-1](TRST-M-1.md) | Micro-thaw griefing via permissionless depositTo() | Medium | +| [TRST-M-2](TRST-M-2.md) | tempJit fallback in beforeCollection() unreachable | Medium | +| [TRST-M-3](TRST-M-3.md) | Instant escrow mode degradation via agreement offer | Medium | +| [TRST-L-1](TRST-L-1.md) | Insufficient gas for afterCollection callback | Low | +| [TRST-L-2](TRST-L-2.md) | Pending update over-reserves escrow | Low | +| [TRST-L-3](TRST-L-3.md) | Unsafe approveAgreement behavior during pause | Low | +| [TRST-L-4](TRST-L-4.md) | Pair tracking removal blocked by 1 wei donation | Low | +| [TRST-L-5](TRST-L-5.md) | \_computeMaxFirstClaim overestimates near deadline | Low | + +## Recommendations + +| ID | Title | +| ----------------------- | ---------------------------------------------- | +| [TRST-R-1](TRST-R-1.md) | Avoid redeployment of RewardsEligibilityOracle | +| [TRST-R-2](TRST-R-2.md) | Improve stale documentation | +| [TRST-R-3](TRST-R-3.md) | Incorporate defensive coding best practices | +| [TRST-R-4](TRST-R-4.md) | Document critical assumptions in the RAM | + +## Centralization Risks + +| ID | Title | +| ------------------------- | --------------------------------------------------------------- | +| [TRST-CR-1](TRST-CR-1.md) | RAM Governor has unilateral control over payment infrastructure | +| [TRST-CR-2](TRST-CR-2.md) | Operator role controls agreement lifecycle and escrow mode | +| [TRST-CR-3](TRST-CR-3.md) | Single RAM instance manages all agreement escrow | + +## Systemic Risks + +| ID | Title | +| ------------------------- | -------------------------------------------------------------- | +| [TRST-SR-1](TRST-SR-1.md) | JIT mode provider payment race condition | +| [TRST-SR-2](TRST-SR-2.md) | Escrow thawing period creates prolonged fund immobility | +| [TRST-SR-3](TRST-SR-3.md) | Issuance distribution dependency for RAM solvency | +| [TRST-SR-4](TRST-SR-4.md) | Try/catch callback pattern silently degrades state consistency | diff --git a/packages/issuance/audits/PR1301/TRST-CR-1.md b/packages/issuance/audits/PR1301/TRST-CR-1.md new file mode 100644 index 000000000..65827afaa --- /dev/null +++ b/packages/issuance/audits/PR1301/TRST-CR-1.md @@ -0,0 +1,19 @@ +# TRST-CR-1: RAM Governor has unilateral control over payment infrastructure + +- **Severity:** Centralization Risk + +## Description + +The RecurringAgreementManager's `GOVERNOR_ROLE` has broad unilateral authority over critical payment infrastructure: + +- Controls which data services can participate (`DATA_SERVICE_ROLE` grants) +- Controls which collectors are trusted (`COLLECTOR_ROLE` grants) +- Can set the issuance allocator address, redirecting the token flow that funds all escrow +- Can set the provider eligibility oracle, which gates who can receive payments +- Can pause the entire contract, halting all agreement management + +A compromised or malicious governor could revoke a data service's role (preventing new agreements), change the issuance allocator to a contract that withholds funds, or set a malicious eligibility oracle that blocks specific providers from collecting. These actions affect all agreements managed by the RAM, not just future ones. + +--- + +Accepted centralization tradeoff. The governor must have these powers for effective protocol operation. Expected to be a multisig or governance contract in production. diff --git a/packages/issuance/audits/PR1301/TRST-CR-2.md b/packages/issuance/audits/PR1301/TRST-CR-2.md new file mode 100644 index 000000000..3331459bb --- /dev/null +++ b/packages/issuance/audits/PR1301/TRST-CR-2.md @@ -0,0 +1,17 @@ +# TRST-CR-2: Operator role controls agreement lifecycle and escrow mode + +- **Severity:** Centralization Risk + +## Description + +The `OPERATOR_ROLE` (admin of `AGREEMENT_MANAGER_ROLE`) controls the operational layer of the RAM: + +- Grants `AGREEMENT_MANAGER_ROLE`, which authorizes offering, updating, revoking, and canceling agreements +- Can change the `escrowBasis` (Full/OnDemand/JIT), instantly affecting escrow behavior for all existing agreements +- Can set `tempJit`, overriding the escrow mode to JIT for all pairs + +An operator switching from Full to JIT mode instantly removes proactive escrow guarantees for all providers. Providers who accepted agreements under the assumption of Full escrow backing may find their payment security degraded without notice or consent. The escrow mode change is a storage write with no timelock or multi-sig requirement. + +--- + +Accepted. The operator is a trusted role managing agreement lifecycle and escrow parameters on behalf of the protocol. Escrow parameter changes are visible on-chain via events. diff --git a/packages/issuance/audits/PR1301/TRST-CR-3.md b/packages/issuance/audits/PR1301/TRST-CR-3.md new file mode 100644 index 000000000..42097257c --- /dev/null +++ b/packages/issuance/audits/PR1301/TRST-CR-3.md @@ -0,0 +1,15 @@ +# TRST-CR-3: Single RAM instance manages all agreement escrow + +- **Severity:** Centralization Risk + +## Description + +The RecurringAgreementManager is a single contract instance that manages escrow for all agreements across all (collector, provider) pairs. The `totalEscrowDeficit` is a global aggregate, and the escrow mode (Full/OnDemand/JIT) applies uniformly to all pairs. + +This means operational decisions or issues affecting one pair can cascade to all others. For example, a single large agreement that becomes insolvent increases `totalEscrowDeficit`, potentially degrading the escrow mode from Full to OnDemand for every other pair. Similarly, a stale snapshot on one pair (TRST-H-3) affects the global deficit calculation. + +There is no isolation between pairs beyond the per-pair `sumMaxNextClaim` tracking. The RAM does not support per-pair escrow mode configuration or per-pair balance ringfencing. + +--- + +Accepted design tradeoff. The shared pool optimizes capital efficiency — per-pair isolation would significantly increase complexity, gas costs, and operational overhead. The snap-refresh fix (TRST-H-3) and minThawFraction (TRST-M-1) reduce cascading effects. diff --git a/packages/issuance/audits/PR1301/TRST-H-1.md b/packages/issuance/audits/PR1301/TRST-H-1.md new file mode 100644 index 000000000..a08927f1b --- /dev/null +++ b/packages/issuance/audits/PR1301/TRST-H-1.md @@ -0,0 +1,26 @@ +# TRST-H-1: Malicious payer gas siphoning via 63/64 rule in collection callbacks leads to collection bypass + +- **Severity:** High +- **Category:** Gas-related issues +- **Source:** RecurringCollector.sol +- **Status:** Open + +## Description + +In `RecurringCollector._collect()`, the `beforeCollection()` and `afterCollection()` callbacks to contract payers are wrapped in try/catch blocks (lines 380, 416). A malicious contract payer can exploit the EVM's 63/64 gas forwarding rule to consume nearly all available gas in these callbacks. + +The attack works as follows: the malicious payer's `beforeCollection()` implementation consumes 63/64 of the gas forwarded to it, either returning successfully or reverting, but regardless leaving only 1/64 of the original gas for the remainder of `_collect()`. The core payment logic (`PaymentsEscrow.collect()` at line 384) and event emissions then execute with a fraction of the expected gas. The `afterCollection()` callback then consumes another 63/64 of what remains. + +Realistically, after both callbacks siphon gas, there will not be enough gas left to complete the `PaymentsEscrow.collect()` call and the subsequent event emissions, causing the entire `collect()` transaction to revert. The security model for Payer as a smart contract does not account for requiring such gas expenditure, which can also be obfuscated away. This gives the malicious payer effective veto power over all collections against their agreements. + +## Recommended Mitigation + +Enforce a minimum gas reservation before each callback. Before calling `beforeCollection()`, check that `gasleft()` is sufficient and forward only a bounded amount of gas using the `{gas: maxCallbackGas}` syntax, retaining enough gas for the core payment logic. Apply the same pattern to `afterCollection()`. This caps the gas available to the payer's callbacks regardless of their implementation, ensuring the critical `PaymentsEscrow.collect()` call always has enough gas to complete. + +## Team Response + +TBD + +--- + +`_shouldCallback()` reverts with `InsufficientCallbackGas` when `gasleft() < (MAX_CALLBACK_GAS * 64) / 63`, accounting for the 63/64 forwarding rule. All payer callbacks (`isEligible`, `beforeCollection`, `afterCollection`) use `{gas: MAX_CALLBACK_GAS}` (1,500,000) to cap execution. Cross-package gas measurements with real contracts confirm sufficient headroom. diff --git a/packages/issuance/audits/PR1301/TRST-H-2.md b/packages/issuance/audits/PR1301/TRST-H-2.md new file mode 100644 index 000000000..a227a1720 --- /dev/null +++ b/packages/issuance/audits/PR1301/TRST-H-2.md @@ -0,0 +1,26 @@ +# TRST-H-2: Invalid supportsInterface() returndata escapes try/catch leading to collection bypass + +- **Severity:** High +- **Category:** Logical flaws +- **Source:** RecurringCollector.sol +- **Status:** Open + +## Description + +In `RecurringCollector._collect()` (lines 368-378), the provider eligibility check calls `IERC165(agreement.payer).supportsInterface()` inside a try/catch block. The try clause expects a `(bool supported)` return value. If the external call succeeds at the EVM level (does not revert) but returns malformed data - such as fewer than 32 bytes of returndata or data that cannot be ABI-decoded as a bool - the Solidity ABI decoder reverts on the caller side when attempting to decode the return value. + +This ABI decoding revert occurs in the calling contract's execution context, not in the external call itself. Solidity's try/catch mechanism only catches reverts originating from the external call (callee-side reverts). Caller-side decoding failures escape the catch block and propagate as an unhandled revert, causing the entire `_collect()` transaction to fail. + +A malicious contract payer can exploit this by implementing a `supportsInterface()` function that returns success with empty returndata, a single byte, or any non-standard encoding. This permanently blocks all collections against agreements with that payer, since the `code.length > 0` check always routes through the vulnerable path. As before, the security model does not account for this bypass path to be validated against. + +## Recommended Mitigation + +Avoid receiving and decoding values from untrusted contract calls. This can be done manually by reading returndata at the assembly level. + +## Team Response + +TBD + +--- + +Replaced the `supportsInterface` → `isEligible` two-step with a single direct `isEligible` low-level `staticcall` with gas cap. Returndata is validated for length (>= 32 bytes) and decoded as `uint256`. Only an explicit return of `0` blocks collection; reverts, short returndata, and malformed responses are treated as "no opinion" (collection proceeds), with a `PayerCallbackFailed` event emitted for observability. diff --git a/packages/issuance/audits/PR1301/TRST-H-3.md b/packages/issuance/audits/PR1301/TRST-H-3.md new file mode 100644 index 000000000..43f0810f4 --- /dev/null +++ b/packages/issuance/audits/PR1301/TRST-H-3.md @@ -0,0 +1,28 @@ +# TRST-H-3: Stale escrow snapshot causes a perpetual revert loop + +- **Severity:** High +- **Category:** Logical flaws +- **Source:** RecurringAgreementManager.sol +- **Status:** Open + +## Description + +The RecurringAgreementManager (RAM) maintains an `escrowSnap` per (collector, provider) pair - a cached view of the escrow balance used to compute `totalEscrowDeficit`. This snap is only updated at the end of `_updateEscrow()` via `_setEscrowSnap()`. When `afterCollection()` is called by the RecurringCollector after a payment collection, the escrow balance has already been reduced by the collected amount, but `escrowSnap` still reflects the pre-collection value. + +The stale-high snap causes `_escrowMinMax()` to understate the deficit. In Full escrow mode, when the RAM's free token balance is low, this leads to an incorrect decision to deposit into escrow. The deposit attempt reverts due to insufficient ERC20 balance, and the entire `afterCollection()` call fails. Since RecurringCollector wraps `afterCollection()` in try/catch (line 416), the revert is silently swallowed - but the snap never gets updated, making it permanently stale. + +This is self-reinforcing: every subsequent `afterCollection()`, `reconcileAgreement()`, and `reconcileCollectorProvider()` call for the affected pair follows the same code path and reverts for the same reason. There is no manual recovery path. The escrow accounting diverges from reality for the affected pair, and `totalEscrowDeficit` is globally understated, potentially causing other pairs to incorrectly enter Full mode and over-deposit. + +The state only self-heals when the RAM receives enough tokens (e.g., from issuance distribution) to cover the phantom deposit, at which point the deposit succeeds but sends tokens to escrow unnecessarily. + +## Recommended Mitigation + +Read the fresh escrow balance inside `_escrowMinMax()` when computing the deficit, rather than relying on the cached `escrowSnap` derived from `totalEscrowDeficit`. This makes the function self-correcting: even if a prior `afterCollection()` failed, the next call sees the true balance and makes the correct deposit/thaw decision. This approach fixes the root cause rather than masking the symptom with a balance guard. + +## Team Response + +TBD + +--- + +Now refreshing the cached `escrowSnap` at the start of `_reconcileProviderEscrow()` so that `_escrowMinMax()` uses updated `totalEscrowDeficit`. diff --git a/packages/issuance/audits/PR1301/TRST-H-4.md b/packages/issuance/audits/PR1301/TRST-H-4.md new file mode 100644 index 000000000..80b4c4195 --- /dev/null +++ b/packages/issuance/audits/PR1301/TRST-H-4.md @@ -0,0 +1,28 @@ +# TRST-H-4: EOA payer can block collection by acquiring code via EIP-7702 + +- **Severity:** High +- **Category:** Type confusion +- **Source:** RecurringCollector.sol +- **Status:** Open + +## Description + +In `RecurringCollector._collect()` (lines 368-378), the provider eligibility gate is applied when `agreement.payer.code.length > 0`. This gate was designed as an opt-in mechanism for contract payers to control which providers can collect. However, with EIP-7702 (live on both Ethereum mainnet and Arbitrum), an EOA can set a code delegation to an arbitrary contract address. + +An EOA payer who originally signed an agreement via the ECDSA path can later acquire code using an EIP-7702 delegation transaction. This causes the `code.length > 0` branch to activate during collection. By delegating to a contract that implements `supportsInterface()` returning true for `IProviderEligibility` and `isEligible()` returning false, the payer triggers the `require()` on line 373. + +The `require()` is inside the try block's success handler. In Solidity, reverts in the success handler are NOT caught by the catch block - they propagate up and revert the entire transaction. This gives the payer complete, toggleable control over whether collections succeed. The payer can enable the delegation to block collections, disable it to sign new agreements, and re-enable it before collection attempts - all at negligible gas cost. + +The payer can then thaw and withdraw their escrowed funds after the thawing period, effectively receiving services for free. This bypasses the assumed security model where a provider can trust the escrow balance for an EOA payer to ensure collection will succeed. + +## Recommended Mitigation + +Record whether the payer had code at agreement acceptance time by adding a bool flag to the agreement struct (e.g., `payerIsContract`). Only apply the `IProviderEligibility` gate when the payer was a contract at acceptance. This preserves the eligibility feature for legitimate contract payers while closing the EOA-to-contract vector introduced by EIP-7702. + +## Team Response + +TBD + +--- + +Eligibility checks are now opt-in via the `CONDITION_ELIGIBILITY_CHECK` flag, set explicitly in the agreement terms. Providers agree to eligibility gating by accepting an agreement that includes this condition. When the flag is set, the payer must pass an ERC-165 `supportsInterface` check for `IProviderEligibility` at offer time. An EOA cannot pass this check, so an EOA cannot create an agreement with eligibility gating enabled. diff --git a/packages/issuance/audits/PR1301/TRST-L-1.md b/packages/issuance/audits/PR1301/TRST-L-1.md new file mode 100644 index 000000000..94963f186 --- /dev/null +++ b/packages/issuance/audits/PR1301/TRST-L-1.md @@ -0,0 +1,26 @@ +# TRST-L-1: Insufficient gas for afterCollection callback leaves escrow state outdated + +- **Severity:** Low +- **Category:** Time sensitivity flaw +- **Source:** RecurringCollector.sol +- **Status:** Open + +## Description + +In `RecurringCollector._collect()`, after a successful escrow collection, the function notifies contract payers via a try/catch call to `afterCollection()` (line 416). The caller (originating at data provider) controls the gas forwarded to the `collect()` transaction. By providing just enough gas for the core collection to succeed but not enough for the `afterCollection()` callback, the external call will revert due to an out-of-gas error, which is silently caught by the catch block. + +For the RecurringAgreementManager (RAM), `afterCollection()` triggers `_reconcileAndUpdateEscrow()`, which reconciles the agreement's `maxNextClaim` against on-chain state and updates the escrow snapshot via `_setEscrowSnap()`. When this callback is skipped, the `escrowSnap` remains at its pre-collection value, overstating the actual escrow balance. This stale snapshot causes `totalEscrowDeficit` to be understated, which can lead to incorrect escrow mode decisions in `_escrowMinMax()` for subsequent operations on the affected (collector, provider) pair. + +The state will self-correct on the next successful call to `_updateEscrow()` for the same pair (e.g., via `reconcileAgreement()` or a subsequent collection with sufficient gas), so the impact is temporary. However, during the stale window, escrow rebalancing decisions may be suboptimal. + +## Recommended Mitigation + +Enforce a minimum gas forwarding requirement for the `afterCollection()` callback. This can be done by checking `gasleft()` before the `afterCollection()` call and reverting if insufficient gas remains for the callback to execute meaningfully. + +## Team Response + +TBD + +--- + +`_shouldCallback()` is called before every payer callback (including `afterCollection`). It reverts the entire `collect` call with `InsufficientCallbackGas` when insufficient gas remains, ensuring the callback cannot be starved. Callback failures from other causes are detected and logged via `PayerCallbackFailed` event. diff --git a/packages/issuance/audits/PR1301/TRST-L-2.md b/packages/issuance/audits/PR1301/TRST-L-2.md new file mode 100644 index 000000000..310115964 --- /dev/null +++ b/packages/issuance/audits/PR1301/TRST-L-2.md @@ -0,0 +1,26 @@ +# TRST-L-2: Pending update over-reserves escrow with unrealistically conservative calculation + +- **Severity:** Low +- **Category:** Arithmetic issues +- **Source:** RecurringAgreementManager.sol +- **Status:** Open + +## Description + +In `offerAgreementUpdate()` (line 328), the pending update's `maxNextClaim` is computed via `_computeMaxFirstClaim()` using the full `maxSecondsPerCollection` window and the new `maxInitialTokens`. This amount is added to `sumMaxNextClaim` alongside the existing (non-pending) `maxNextClaim`, making both slots additive. + +This is overly conservative because only one set of terms is ever active at a time. While the update is pending, the RAM reserves escrow for both the current agreement terms and the proposed updated terms simultaneously. The correct calculation should take the maximum of the two rates multiplied by `maxSecondsPerCollection` plus the new `maxInitialTokens`, and add the old `maxInitialTokens` only if the initial collection has not yet occurred. + +The over-reservation reduces the effective capacity of the RAM, ties up capital that could serve other agreements, and in Full mode can trigger escrow mode degradation by inflating `totalEscrowDeficit`. Once the update is accepted or revoked, the excess is released, but during the pending window the impact on escrow accounting is significant for high-value agreements. Additionally, the over-reservation will trigger an unnecessary thaw as soon as the agreement update completes, since escrow will exceed the corrected target. + +## Recommended Mitigation + +The `pendingMaxNextClaim` should be computed as stated above, then reduced by the current `maxNextClaim` so that the total deficit is accurate. This reflects the reality that only one set of terms is active at any time, and the worst-case scenario where `collect()` is called before and after the agreement update. + +## Team Response + +TBD + +--- + +The RC now owns the `maxNextClaim` calculation. RAM calls `IRecurringCollector.getMaxNextClaim(agreementId)` which returns `max(activeTermsClaim, pendingTermsClaim)` — only the larger of current or pending terms is reserved, not both additively. diff --git a/packages/issuance/audits/PR1301/TRST-L-3.md b/packages/issuance/audits/PR1301/TRST-L-3.md new file mode 100644 index 000000000..93597db61 --- /dev/null +++ b/packages/issuance/audits/PR1301/TRST-L-3.md @@ -0,0 +1,28 @@ +# TRST-L-3: Unsafe behavior of approveAgreement during pause + +- **Severity:** Low +- **Category:** Access control issues +- **Source:** RecurringAgreementManager.sol +- **Status:** Open + +## Description + +The `approveAgreement()` function (line 226) is a view function with no `whenNotPaused` modifier. During a pause, it continues to return the magic selector for authorized hashes, allowing the RecurringCollector to accept new agreements or apply updates even while the RAM is paused. + +A pause is typically an emergency measure intended to halt all state-changing operations. Allowing agreement acceptance during pause undermines this intent, as the accepted agreement creates obligations (escrow reservations, `maxNextClaim` tracking) that the paused RAM cannot manage. + +Similarly, `beforeCollection()` and `afterCollection()` do not check pause state. While blocking these during pause could prevent providers from collecting earned payments, allowing them could pose a security risk if the pause was triggered due to a discovered vulnerability in the escrow management logic. + +## Recommended Mitigation + +Add a pause check to `approveAgreement()` that returns `bytes4(0)` when the contract is paused, preventing new agreement acceptances and updates during emergency pauses. For `beforeCollection()` and `afterCollection()`, evaluate the trade-off: blocking them protects against exploitation of escrow logic bugs during pause, while allowing them ensures providers can still collect earned payments. Consider allowing collection callbacks only in a restricted mode during pause. + +## Team Response + +TBD + +--- + +1. **RAM**: The `approveAgreement` callback no longer exists — agreement lifecycle moved to the offer/accept flow on RC. `offerAgreement` is gated by `AGREEMENT_MANAGER_ROLE`. + +2. **RecurringCollector**: `offer`, `accept`, `collect`, and `cancel` are now all gated by `whenNotPaused`. Pause guardians managed by governor via `setPauseGuardian`. diff --git a/packages/issuance/audits/PR1301/TRST-L-4.md b/packages/issuance/audits/PR1301/TRST-L-4.md new file mode 100644 index 000000000..71ea33109 --- /dev/null +++ b/packages/issuance/audits/PR1301/TRST-L-4.md @@ -0,0 +1,26 @@ +# TRST-L-4: Pair tracking removal blocked by 1 wei escrow donation + +- **Severity:** Low +- **Category:** Donation attacks +- **Source:** RecurringAgreementManager.sol +- **Status:** Open + +## Description + +When the last agreement for a (collector, provider) pair is deleted, `_reconcilePairTracking()` is intended to remove the pair from the tracking sets (`collectorProviders`, `collectors`) and clean up the escrow state. However, an attacker can prevent this cleanup by depositing 1 wei of GRT into the pair's escrow account via `PaymentsEscrow.deposit()` just before the reconciliation occurs. + +The donation increases the escrow balance, which in turn updates the `escrowSnap` to a non-zero value during `_updateEscrow()`. The `_reconcilePairTracking()` function checks whether the `escrowSnap` is zero to determine if the pair can be safely removed. With the 1 wei donation, this check passes (snap != 0), and the pair is retained in the tracking sets even though it has no active agreements. + +This leaves orphaned entries in the `collectorProviders` and `collectors` tracking sets, preventing clean removal of the collector from the RAM's accounting. + +## Recommended Mitigation + +In `_reconcilePairTracking()`, base the removal decision on `pairAgreementCount` reaching zero rather than on `escrowSnap` being zero. If no agreements remain for a pair, remove it from tracking regardless of the escrow balance. Any residual escrow balance (from donations or rounding) can be handled by initiating a thaw before removal. + +## Team Response + +TBD + +--- + +Accepted limitation. Orphaned tracking entries do not affect correctness or funds safety. The proposed fix (removing pairs regardless of escrow balance) would sacrifice discoverability of unreclaimed escrow. Residual balances are handled through offline reconciliation. diff --git a/packages/issuance/audits/PR1301/TRST-L-5.md b/packages/issuance/audits/PR1301/TRST-L-5.md new file mode 100644 index 000000000..812ac5c35 --- /dev/null +++ b/packages/issuance/audits/PR1301/TRST-L-5.md @@ -0,0 +1,26 @@ +# TRST-L-5: The \_computeMaxFirstClaim function overestimates when deadline is before full collection window + +- **Severity:** Low +- **Category:** Logical flaw +- **Source:** RecurringAgreementManager.sol +- **Status:** Open + +## Description + +In `_computeMaxFirstClaim()` (line 645), the maximum first claim is computed as: `maxOngoingTokensPerSecond * maxSecondsPerCollection + maxInitialTokens`. This uses the full `maxSecondsPerCollection` window regardless of how much time actually remains until the agreement's `endsAt` deadline. + +In contrast, RecurringCollector's `getMaxNextClaim()` correctly accounts for the remaining time until the deadline, capping the collection window when the deadline is closer than `maxSecondsPerCollection`. The RAM's overestimate means `sumMaxNextClaim` is inflated for agreements near their end date, causing the RAM to reserve more escrow than the RecurringCollector would ever allow to be collected. + +The excess reservation is wasteful but not directly exploitable, as the collector enforces the actual cap during collection. However, it reduces the RAM's effective capacity and can contribute to unnecessary escrow mode degradation. + +## Recommended Mitigation + +Align `_computeMaxFirstClaim()` with the RecurringCollector's `getMaxNextClaim()` logic by accounting for the remaining time until the agreement's `endsAt`. Compute the collection window as `min(maxSecondsPerCollection, endsAt - lastCollectionAt)` when determining the maximum possible claim. This requires passing the `endsAt` parameter to the function. + +## Team Response + +TBD + +--- + +RAM delegates to `IRecurringCollector.getMaxNextClaim(agreementId)` for all `maxNextClaim` calculations. The RC's `_maxClaimForTerms` correctly caps the collection window by remaining time until `endsAt`, eliminating the overestimate. diff --git a/packages/issuance/audits/PR1301/TRST-M-1.md b/packages/issuance/audits/PR1301/TRST-M-1.md new file mode 100644 index 000000000..6ff77952f --- /dev/null +++ b/packages/issuance/audits/PR1301/TRST-M-1.md @@ -0,0 +1,30 @@ +# TRST-M-1: Micro-thaw griefing via permissionless depositTo() and reconcileAgreement() + +- **Severity:** Medium +- **Category:** Griefing attacks +- **Source:** RecurringAgreementManager.sol +- **Status:** Open + +## Description + +Three independently benign features combine into a griefing vector: + +1. `PaymentsEscrow.depositTo()` has no access control - anyone can deposit any amount for any (payer, collector, receiver) tuple. +2. `reconcileAgreement()` is permissionless - anyone can trigger a reconciliation which calls `_updateEscrow()`. +3. `PaymentsEscrow.adjustThaw()` with `evenIfTimerReset=false` is a no-op when increasing the thaw amount would reset the thawing timer. + +An attacker deposits 1 wei into an escrow account via `depositTo()`, then calls `reconcileAgreement()`. The reconciliation detects escrow is 1 wei above target and initiates a thaw of 1 wei via `adjustThaw()`. This starts the thawing timer. When the RAM later needs to thaw a larger amount (e.g., after an agreement ends or is updated), it calls `adjustThaw()` with `evenIfTimerReset=false`, which becomes a no-op because increasing the thaw would reset the timer. + +In cases where thaws are needed to mobilize funds from one escrow pair to another - for example, to fund a new agreement or agreement update for a different provider - this griefing prevents the rebalancing. New agreements or updates that require escrow from the blocked pair's thawed funds could fail to be properly funded, causing escrow mode degradation or preventing the offers entirely. + +## Recommended Mitigation + +Add a minimum thaw threshold in `_updateEscrow()`. Amounts below the threshold should be ignored rather than initiating a thaw. This prevents an attacker from starting a thaw timer with a dust amount. If they do perform the attack, they will donate a non-negligible amount in exchange for the one-round block. + +## Team Response + +TBD + +--- + +Added configurable `minThawFraction` (uint8, proportion of 256, default 16 = 6.25%) that skips thaws when the excess above max is below `sumMaxNextClaim * fraction / 256` for the (collector, provider) pair. An attacker must now donate a meaningful fraction per griefing round, making such an attack both economically unattractive and less effective. diff --git a/packages/issuance/audits/PR1301/TRST-M-2.md b/packages/issuance/audits/PR1301/TRST-M-2.md new file mode 100644 index 000000000..9fc633fa5 --- /dev/null +++ b/packages/issuance/audits/PR1301/TRST-M-2.md @@ -0,0 +1,28 @@ +# TRST-M-2: The tempJit fallback in beforeCollection() is unreachable in practice + +- **Severity:** Medium +- **Category:** Logical flaw +- **Source:** RecurringAgreementManager.sol +- **Status:** Open + +## Description + +In `beforeCollection()` (line 236), when the escrow balance is insufficient for an upcoming collection, the function attempts a JIT (Just-In-Time) top-up by setting `$.tempJit = true` before returning. The `tempJit` flag forces `_escrowMinMax()` to return JustInTime mode, freeing escrow from other pairs to fund this collection. + +However, the JIT path is only entered when the escrow is insufficient to cover `tokensToCollect`. In the `RecurringCollector._collect()` flow, `beforeCollection()` is called before `PaymentsEscrow.collect()`. If `beforeCollection()` cannot top up the escrow (because the RAM lacks free balance and the `deficit >= balanceOf()` guard fails), it returns without action. The subsequent `PaymentsEscrow.collect()` then attempts to collect `tokensToCollect` from an escrow that is still insufficient, causing the entire `collect()` transaction to revert. + +This means `tempJit` is never set in the scenario where it would be most needed: when escrow is short and the collection will fail regardless. An admin cannot rely on `tempJit` being triggered automatically during the RecurringCollector collection flow and would need to manually set JIT mode to achieve the intended fallback behavior. This would cause a delay the first time the issue is encountered where presumably there is no reason for admin to intervene. + +## Recommended Mitigation + +The original intention cannot be truly fulfilled without major redesign of multiple contracts. It is in practice more advisable to take the scenario into account and introduce an off-chain monitoring bot which would set the `tempJit` when needed. + +## Team Response + +TBD + +--- + +The `tempJit` mechanism has been replaced with threshold-based basis degradation. + +`_escrowMinMax()` now uses `minOnDemandBasisThreshold` and `minFullBasisMargin` parameters to automatically limit the effective escrow basis based on the ratio of spare balance to `sumMaxNextClaimAll`. This does not rely on a callback to activate and provides automatic, configurable transition boundaries. diff --git a/packages/issuance/audits/PR1301/TRST-M-3.md b/packages/issuance/audits/PR1301/TRST-M-3.md new file mode 100644 index 000000000..ea3c6f7da --- /dev/null +++ b/packages/issuance/audits/PR1301/TRST-M-3.md @@ -0,0 +1,28 @@ +# TRST-M-3: Instant escrow mode degradation from Full to OnDemand via agreement offer + +- **Severity:** Medium +- **Category:** Logical flaw +- **Source:** RecurringAgreementManager.sol +- **Status:** Open + +## Description + +Neither `offerAgreement()` nor `offerAgreementUpdate()` verify that the RAM has sufficient token balance to fund the new escrow obligation without degrading the escrow mode. An operator can offer an agreement whose `maxNextClaim`, when added to the existing `sumMaxNextClaim`, causes `totalEscrowDeficit` to exceed the RAM's balance. This instantly degrades the escrow mode from Full to OnDemand for ALL (collector, provider) pairs. + +The degradation occurs because `_escrowMinMax()` checks: `totalEscrowDeficit < balanceOf(address(this))`. When the new agreement pushes the deficit above the balance, this condition becomes false, and `min` drops to 0 for every pair - meaning no proactive deposits are made for any agreement, not just the new one. Existing providers who had fully-escrowed agreements silently lose their escrow guarantees. + +Whether intentional or by misfortune, this behavior can be triggered instantly by a single offer. If this degradation is desirable in some cases, it should only occur by explicit intention, not as a side effect of a routine operation. + +## Recommended Mitigation + +Add a separate configuration flag (e.g., `allowModeDegradation`) that must be explicitly set by the admin to permit offers that would degrade the escrow mode. When the flag is false, `offerAgreement()` and `offerAgreementUpdate()` should revert if the new obligation would push `totalEscrowDeficit` above the current balance. This ensures mode degradation is always a conscious decision. + +## Team Response + +TBD + +--- + +Acknowledged. The risk is documented in [RecurringAgreementManager.md — Automatic Degradation](../../contracts/agreement/RecurringAgreementManager.md#automatic-degradation), including the operator caution about pre-offer headroom checks. + +An on-chain guard was prototyped but added ~2.7KB to the contract, exceeding the Spurious Dragon 24576-byte limit. The operator (AGREEMENT_MANAGER_ROLE holder) is a trusted role expected to verify escrow headroom before offering agreements. diff --git a/packages/issuance/audits/PR1301/TRST-R-1.md b/packages/issuance/audits/PR1301/TRST-R-1.md new file mode 100644 index 000000000..5f1457f71 --- /dev/null +++ b/packages/issuance/audits/PR1301/TRST-R-1.md @@ -0,0 +1,11 @@ +# TRST-R-1: Avoid redeployment of the RewardsEligibilityOracle by restructuring storage + +- **Severity:** Recommendation + +## Description + +The modified RewardsEligibilityOracle has two new state variables, as well as moving `eligibilityValidationEnabled` from the original slot to the end of the structure. Due to the relocation, an upgrade is needed, meaning all previous eligibility state will be lost. It is possible to only append storage slots to the original structure, and avoid a hard redeployment flow, by leveraging the upgradeability of the oracle. + +--- + +Acknowledged. The oracle is not yet deployed to production so the storage restructuring does not lose live state. The current layout preserves clean append-only expansion for future upgrades. diff --git a/packages/issuance/audits/PR1301/TRST-R-2.md b/packages/issuance/audits/PR1301/TRST-R-2.md new file mode 100644 index 000000000..a9a30ff54 --- /dev/null +++ b/packages/issuance/audits/PR1301/TRST-R-2.md @@ -0,0 +1,14 @@ +# TRST-R-2: Improve stale documentation + +- **Severity:** Recommendation + +## Description + +The functions below are mentioned in various documentation files but do not exist in the current codebase: + +- `acceptUnsignedIndexingAgreement()` +- `removeAgreement()` + +--- + +Updated documentation to remove references to `acceptUnsignedIndexingAgreement()` and `removeAgreement()`. diff --git a/packages/issuance/audits/PR1301/TRST-R-3.md b/packages/issuance/audits/PR1301/TRST-R-3.md new file mode 100644 index 000000000..7e53510fa --- /dev/null +++ b/packages/issuance/audits/PR1301/TRST-R-3.md @@ -0,0 +1,11 @@ +# TRST-R-3: Incorporate defensive coding best practices + +- **Severity:** Recommendation + +## Description + +In the RAM's `cancelAgreement()` function, the agreement state is required to not be not accepted. However, the logic could be more specific and require the agreement to be Accepted - rejecting previously cancelled agreements. There is no impact because corresponding checks in the RecurringCollector would deny such cancels, but it remains as a best practice. + +--- + +Checking agreement state is now handled by `RecurringCollector.cancel()`. diff --git a/packages/issuance/audits/PR1301/TRST-R-4.md b/packages/issuance/audits/PR1301/TRST-R-4.md new file mode 100644 index 000000000..c5f7a48fb --- /dev/null +++ b/packages/issuance/audits/PR1301/TRST-R-4.md @@ -0,0 +1,11 @@ +# TRST-R-4: Document critical assumptions in the RAM + +- **Severity:** Recommendation + +## Description + +The `approveAgreement()` view checks if the agreement hash is valid, however it offers no replay protection for repeated agreement approvals. This attack vector is only stopped at the RecurringCollector as it checks the agreement does not exist and maintains unidirectional transitions from the agreement Accepted state. For future collectors this may not be the case, necessitating clear documentation of the assumption. + +--- + +The collector now fully owns the agreement lifecycle and is responsible for replay protection. diff --git a/packages/issuance/audits/PR1301/TRST-SR-1.md b/packages/issuance/audits/PR1301/TRST-SR-1.md new file mode 100644 index 000000000..1902b2ffd --- /dev/null +++ b/packages/issuance/audits/PR1301/TRST-SR-1.md @@ -0,0 +1,15 @@ +# TRST-SR-1: JIT mode provider payment race condition + +- **Severity:** Systemic Risk + +## Description + +When the RecurringAgreementManager operates in JustInTime (JIT) escrow mode, escrow is not proactively funded for any (collector, provider) pair. Instead, funds are deposited into escrow only during the `beforeCollection()` callback, moments before `PaymentsEscrow.collect()` executes. Since the RAM holds a shared pool of GRT that backs all agreements, multiple providers collecting around the same time are effectively racing for the same pool of tokens. + +If the RAM's balance is sufficient to cover any single collection but not all concurrent collections, the provider whose data service submits the `collect()` transaction first will succeed, while subsequent providers' collections will revert because the RAM's balance has been depleted by the first collection's JIT deposit. This creates a first-come-first-served dynamic where providers must compete on transaction ordering to receive payment. + +This race condition is inherent to the JIT mode design and cannot be fully eliminated without proactive escrow funding. In extreme cases, a well-resourced provider could use priority gas auctions or private mempools to consistently front-run other providers' collections, creating an unfair payment advantage unrelated to service quality. + +--- + +Known architectural tradeoff. Full mode eliminates this entirely; OnDemand reduces its likelihood. JIT provides best-effort payment guarantees and is the fallback when the RAM's balance cannot sustain proactive escrow funding. diff --git a/packages/issuance/audits/PR1301/TRST-SR-2.md b/packages/issuance/audits/PR1301/TRST-SR-2.md new file mode 100644 index 000000000..5ad078675 --- /dev/null +++ b/packages/issuance/audits/PR1301/TRST-SR-2.md @@ -0,0 +1,15 @@ +# TRST-SR-2: Escrow thawing period creates prolonged fund immobility + +- **Severity:** Systemic Risk + +## Description + +The PaymentsEscrow thawing period (configurable up to `MAX_WAIT_PERIOD`, 90 days) creates a window during which escrowed funds are immobile. When the RAM needs to rebalance escrow across providers - for example, after an agreement ends and funds should be redirected to a new agreement - the thawing delay prevents immediate reallocation. During this window, the RAM effectively has reduced capacity. + +If multiple agreements end in a short period or the escrow mode degrades from Full to OnDemand, the RAM may enter a state where substantial funds are locked in thawing and unavailable for either existing or new obligations. This is compounded by the micro-thaw griefing vector (TRST-M-1), which can extend the immobility period by blocking thaw increases. + +The thawing period is a protocol-level parameter set on PaymentsEscrow and is outside the RAM's control. Changes to this parameter affect all users of the escrow system, not just the RAM. + +--- + +The thawing period protects providers from instant escrow drainage after service delivery. The minThawFraction fix (TRST-M-1) reduces griefing amplification and the snap-refresh fix (TRST-H-3) ensures accurate deficit tracking during rebalancing. The fundamental constraint is a protocol-level design decision outside the RAM's scope. diff --git a/packages/issuance/audits/PR1301/TRST-SR-3.md b/packages/issuance/audits/PR1301/TRST-SR-3.md new file mode 100644 index 000000000..91a3a71fc --- /dev/null +++ b/packages/issuance/audits/PR1301/TRST-SR-3.md @@ -0,0 +1,15 @@ +# TRST-SR-3: Issuance distribution dependency for RAM solvency + +- **Severity:** Systemic Risk + +## Description + +The RAM relies on periodic issuance distribution (via the issuance allocator) to receive GRT tokens for funding escrow obligations. If the issuance system experiences delays, governance disputes, or contract upgrades that temporarily halt distributions, the RAM's free balance depletes as collections drain escrow without replenishment. + +Once the free balance reaches zero, the RAM cannot fund JIT top-ups in `beforeCollection()`, cannot proactively deposit in Full mode for new agreements, and existing escrow accounts gradually drain with each collection. Prolonged issuance interruption could cascade into escrow mode degradation (Full -> OnDemand -> JIT), ultimately affecting all providers' payment reliability. + +This is an external dependency that the RAM admin cannot mitigate beyond maintaining a buffer balance. + +--- + +Acknowledged. The RAM maintains a buffer balance and the escrow degradation mechanism (Full → OnDemand → JIT) provides graceful fallback. Issuance interruptions are visible on-chain, allowing operators to respond before provider payments are affected. diff --git a/packages/issuance/audits/PR1301/TRST-SR-4.md b/packages/issuance/audits/PR1301/TRST-SR-4.md new file mode 100644 index 000000000..e2b055fac --- /dev/null +++ b/packages/issuance/audits/PR1301/TRST-SR-4.md @@ -0,0 +1,17 @@ +# TRST-SR-4: Try/catch callback pattern silently degrades state consistency + +- **Severity:** Systemic Risk + +## Description + +The RecurringCollector wraps all payer callbacks (`beforeCollection()`, `afterCollection()`) in try/catch blocks. While this design prevents malicious or buggy payer contracts from blocking collection, it means that any revert in these callbacks is silently discarded. The collection proceeds as if the callback succeeded, but the payer's internal state (escrow snapshots, deficit tracking, reconciliation) may not have been updated. + +This creates a systemic tension: the try/catch is necessary for liveness (ensuring providers can collect), but it trades state consistency for availability. Over time, if callbacks fail repeatedly (due to gas issues, contract bugs, or the stale snapshot issue in TRST-H-3), the divergence between the RAM's internal accounting and the actual escrow state can compound silently with no on-chain signal. + +There is no event emitted when a callback fails, making it difficult for off-chain monitoring to detect and respond to these silent failures. + +--- + +Non-reverting callbacks are intentional — collector liveness takes priority over payer state updates. Callbacks now use low-level `call`/`staticcall` with gas caps instead of try/catch. The snap-refresh fix (TRST-H-3) ensures the next successful `_reconcileProviderEscrow` call self-corrects any divergence. Permissionless `reconcileAgreement` and `reconcileProvider` provide external recovery paths. + +Failed callbacks emit `PayerCallbackFailed(agreementId, payer, stage)` with a `PayerCallbackStage` enum (`EligibilityCheck`, `BeforeCollection`, `AfterCollection`), giving off-chain monitoring a signal to detect failures and trigger reconciliation. diff --git a/packages/issuance/contracts/agreement/RecurringAgreementHelper.sol b/packages/issuance/contracts/agreement/RecurringAgreementHelper.sol index 250ca600d..b51686383 100644 --- a/packages/issuance/contracts/agreement/RecurringAgreementHelper.sol +++ b/packages/issuance/contracts/agreement/RecurringAgreementHelper.sol @@ -51,9 +51,9 @@ contract RecurringAgreementHelper is IRecurringAgreementHelper { tokenBalance: GRAPH_TOKEN.balanceOf(MANAGER), sumMaxNextClaimAll: mgr.getSumMaxNextClaimAll(), totalEscrowDeficit: mgr.getTotalEscrowDeficit(), - totalAgreementCount: mgr.getTotalAgreementCount(), escrowBasis: mgr.getEscrowBasis(), - tempJit: mgr.isTempJit(), + minOnDemandBasisThreshold: mgr.getMinOnDemandBasisThreshold(), + minFullBasisMargin: mgr.getMinFullBasisMargin(), collectorCount: mgr.getCollectorCount() }); } @@ -80,58 +80,129 @@ contract RecurringAgreementHelper is IRecurringAgreementHelper { provider: provider, agreementCount: mgr.getPairAgreementCount(collector, provider), sumMaxNextClaim: mgr.getSumMaxNextClaim(IRecurringCollector(collector), provider), + escrowSnap: mgr.getEscrowSnap(collector, provider), escrow: mgr.getEscrowAccount(IRecurringCollector(collector), provider) }); } - // -- Reconciliation -- + // -- Enumeration Views -- /// @inheritdoc IRecurringAgreementHelper - function reconcile(address provider) external returns (uint256 removed) { + function getPairAgreements(address collector, address provider) external view returns (bytes16[] memory) { + return getPairAgreements(collector, provider, 0, type(uint256).max); + } + + /// @inheritdoc IRecurringAgreementHelper + function getPairAgreements( + address collector, + address provider, + uint256 offset, + uint256 count + ) public view returns (bytes16[] memory result) { IRecurringAgreements mgr = IRecurringAgreements(MANAGER); - IRecurringAgreementManagement mgt = IRecurringAgreementManagement(MANAGER); - bytes16[] memory ids = mgr.getProviderAgreements(provider); - for (uint256 i = 0; i < ids.length; ++i) if (!mgt.reconcileAgreement(ids[i])) ++removed; + uint256 total = mgr.getPairAgreementCount(collector, provider); + // solhint-disable-next-line gas-strict-inequalities + if (total <= offset) return new bytes16[](0); + uint256 remaining = total - offset; + if (remaining < count) count = remaining; + result = new bytes16[](count); + for (uint256 i = 0; i < count; ++i) result[i] = mgr.getPairAgreementAt(collector, provider, offset + i); } /// @inheritdoc IRecurringAgreementHelper - function reconcileBatch(bytes16[] calldata agreementIds) external returns (uint256 removed) { - IRecurringAgreementManagement mgt = IRecurringAgreementManagement(MANAGER); - for (uint256 i = 0; i < agreementIds.length; ++i) { - if (!mgt.reconcileAgreement(agreementIds[i])) ++removed; + function getCollectors() external view returns (address[] memory) { + return getCollectors(0, type(uint256).max); + } + + /// @inheritdoc IRecurringAgreementHelper + function getCollectors(uint256 offset, uint256 count) public view returns (address[] memory result) { + IRecurringAgreements mgr = IRecurringAgreements(MANAGER); + uint256 total = mgr.getCollectorCount(); + // solhint-disable-next-line gas-strict-inequalities + if (total <= offset) return new address[](0); + uint256 remaining = total - offset; + if (remaining < count) count = remaining; + result = new address[](count); + for (uint256 i = 0; i < count; ++i) result[i] = mgr.getCollectorAt(offset + i); + } + + /// @inheritdoc IRecurringAgreementHelper + function getProviders(address collector) external view returns (address[] memory) { + return getProviders(collector, 0, type(uint256).max); + } + + /// @inheritdoc IRecurringAgreementHelper + function getProviders( + address collector, + uint256 offset, + uint256 count + ) public view returns (address[] memory result) { + IRecurringAgreements mgr = IRecurringAgreements(MANAGER); + uint256 total = mgr.getProviderCount(collector); + // solhint-disable-next-line gas-strict-inequalities + if (total <= offset) return new address[](0); + uint256 remaining = total - offset; + if (remaining < count) count = remaining; + result = new address[](count); + for (uint256 i = 0; i < count; ++i) result[i] = mgr.getProviderAt(collector, offset + i); + } + + // -- Reconciliation Discovery -- + + /// @inheritdoc IRecurringAgreementHelper + function checkPairStaleness( + address collector, + address provider + ) external view returns (AgreementStaleness[] memory staleAgreements, bool escrowStale) { + IRecurringAgreements mgr = IRecurringAgreements(MANAGER); + uint256 count = mgr.getPairAgreementCount(collector, provider); + staleAgreements = new AgreementStaleness[](count); + for (uint256 i = 0; i < count; ++i) { + bytes16 id = mgr.getPairAgreementAt(collector, provider, i); + uint256 cached = mgr.getAgreementMaxNextClaim(collector, id); + uint256 live = IRecurringCollector(collector).getMaxNextClaim(id); + staleAgreements[i] = AgreementStaleness({ + agreementId: id, + cachedMaxNextClaim: cached, + liveMaxNextClaim: live, + stale: cached != live + }); } + escrowStale = + mgr.getEscrowSnap(collector, provider) != + mgr.getEscrowAccount(IRecurringCollector(collector), provider).balance; } + // -- Reconciliation -- + /// @inheritdoc IRecurringAgreementHelper function reconcilePair(address collector, address provider) external returns (uint256 removed, bool pairExists) { removed = _reconcilePair(collector, provider); - pairExists = IRecurringAgreementManagement(MANAGER).reconcileCollectorProvider(collector, provider); + pairExists = IRecurringAgreementManagement(MANAGER).reconcileProvider(collector, provider); } /// @inheritdoc IRecurringAgreementHelper function reconcileCollector(address collector) external returns (uint256 removed, bool collectorExists) { - IRecurringAgreements mgr = IRecurringAgreements(MANAGER); IRecurringAgreementManagement mgt = IRecurringAgreementManagement(MANAGER); // Snapshot providers before iterating (removal modifies the set) - address[] memory providers = mgr.getCollectorProviders(collector); + address[] memory providers = this.getProviders(collector); for (uint256 p = 0; p < providers.length; ++p) { removed += _reconcilePair(collector, providers[p]); - mgt.reconcileCollectorProvider(collector, providers[p]); + mgt.reconcileProvider(collector, providers[p]); } - collectorExists = mgr.getCollectorProviders(collector).length != 0; + collectorExists = IRecurringAgreements(MANAGER).getProviderCount(collector) != 0; } /// @inheritdoc IRecurringAgreementHelper function reconcileAll() external returns (uint256 removed) { - IRecurringAgreements mgr = IRecurringAgreements(MANAGER); IRecurringAgreementManagement mgt = IRecurringAgreementManagement(MANAGER); // Snapshot collectors before iterating - address[] memory collectors = mgr.getCollectors(); + address[] memory collectors = this.getCollectors(); for (uint256 c = 0; c < collectors.length; ++c) { - address[] memory providers = mgr.getCollectorProviders(collectors[c]); + address[] memory providers = this.getProviders(collectors[c]); for (uint256 p = 0; p < providers.length; ++p) { removed += _reconcilePair(collectors[c], providers[p]); - mgt.reconcileCollectorProvider(collectors[c], providers[p]); + mgt.reconcileProvider(collectors[c], providers[p]); } } } @@ -144,7 +215,7 @@ contract RecurringAgreementHelper is IRecurringAgreementHelper { uint256 count ) private view returns (PairAudit[] memory pairs) { IRecurringAgreements mgr = IRecurringAgreements(MANAGER); - address[] memory providers = mgr.getCollectorProviders(collector, offset, count); + address[] memory providers = this.getProviders(collector, offset, count); pairs = new PairAudit[](providers.length); for (uint256 i = 0; i < providers.length; ++i) { pairs[i] = PairAudit({ @@ -152,19 +223,17 @@ contract RecurringAgreementHelper is IRecurringAgreementHelper { provider: providers[i], agreementCount: mgr.getPairAgreementCount(collector, providers[i]), sumMaxNextClaim: mgr.getSumMaxNextClaim(IRecurringCollector(collector), providers[i]), + escrowSnap: mgr.getEscrowSnap(collector, providers[i]), escrow: mgr.getEscrowAccount(IRecurringCollector(collector), providers[i]) }); } } function _reconcilePair(address collector, address provider) private returns (uint256 removed) { - IRecurringAgreements mgr = IRecurringAgreements(MANAGER); IRecurringAgreementManagement mgt = IRecurringAgreementManagement(MANAGER); - bytes16[] memory ids = mgr.getProviderAgreements(provider); + bytes16[] memory ids = this.getPairAgreements(collector, provider); for (uint256 i = 0; i < ids.length; ++i) { - if (address(mgr.getAgreementInfo(ids[i]).collector) == collector) { - if (!mgt.reconcileAgreement(ids[i])) ++removed; - } + if (!mgt.reconcileAgreement(collector, ids[i])) ++removed; } } } diff --git a/packages/issuance/contracts/agreement/RecurringAgreementManager.md b/packages/issuance/contracts/agreement/RecurringAgreementManager.md index 92b7c14de..bd8b03265 100644 --- a/packages/issuance/contracts/agreement/RecurringAgreementManager.md +++ b/packages/issuance/contracts/agreement/RecurringAgreementManager.md @@ -7,7 +7,7 @@ It implements seven interfaces: - **`IIssuanceTarget`** — receives minted GRT from IssuanceAllocator - **`IAgreementOwner`** — authorizes RCA acceptance and updates via callback (replaces ECDSA signature) - **`IRecurringAgreementManagement`** — agreement lifecycle: offer, update, revoke, cancel, remove, reconcile -- **`IRecurringEscrowManagement`** — escrow configuration: setEscrowBasis, setTempJit +- **`IRecurringEscrowManagement`** — escrow configuration: setEscrowBasis, limit thresholds, thaw fraction - **`IProviderEligibilityManagement`** — eligibility oracle configuration: setProviderEligibilityOracle - **`IRecurringAgreements`** — read-only queries: agreement info, escrow state, global tracking - **`IProviderEligibility`** — delegates payment eligibility checks to an optional oracle @@ -16,7 +16,7 @@ It implements seven interfaces: RAM pulls minted GRT from IssuanceAllocator via `_ensureIncomingDistributionToCurrentBlock()` before any balance-dependent decision. This ensures `balanceOf(address(this))` reflects all available tokens before escrow deposits or JIT calculations. -**Trigger points**: `beforeCollection` (JIT path, when escrow is insufficient) and `_updateEscrow` (all escrow rebalancing). Both may fire in the same transaction, so a per-block deduplication guard (`ensuredIncomingDistributedToBlock`) skips redundant allocator calls. +**Trigger points**: `beforeCollection` (JIT path, when escrow is insufficient) and `_reconcileProviderEscrow` (all escrow rebalancing). Both may fire in the same transaction, so a per-block deduplication guard (`ensuredIncomingDistributedToBlock`) skips redundant allocator calls. **Failure tolerance**: Allocator reverts are caught via try-catch — collection continues and a `DistributeIssuanceFailed` event is emitted for monitoring. This prevents a malfunctioning allocator from blocking payments. @@ -27,15 +27,11 @@ RAM pulls minted GRT from IssuanceAllocator via `_ensureIncomingDistributionToCu One escrow account per (RecurringAgreementManager, collector, provider) tuple covers **all** managed RCAs for that (collector, provider) pair. Multiple agreements for the same pair share a single escrow balance: ``` -sum(maxNextClaim + pendingUpdateMaxNextClaim for all active agreements for that provider) <= PaymentsEscrow.escrowAccounts[RecurringAgreementManager][RecurringCollector][provider] +sum(maxNextClaim for all active agreements for that provider) <= PaymentsEscrow.escrowAccounts[RecurringAgreementManager][RecurringCollector][provider] ``` Deposits never revert — `_escrowMinMax` degrades the mode when balance is insufficient, ensuring the deposit amount is always affordable. The `getEscrowAccount` view exposes the underlying escrow account for monitoring. -## Hash Authorization - -The `authorizedHashes` mapping stores `hash → agreementId` rather than `hash → bool`. Hashes are automatically invalidated when agreements are deleted, preventing reuse without explicit cleanup. - ## Max Next Claim For accepted agreements, delegated to `RecurringCollector.getMaxNextClaim(agreementId)` as the single source of truth. For pre-accepted offers, a conservative estimate calculated at offer time: @@ -44,22 +40,22 @@ For accepted agreements, delegated to `RecurringCollector.getMaxNextClaim(agreem maxNextClaim = maxOngoingTokensPerSecond * maxSecondsPerCollection + maxInitialTokens ``` -| Agreement State | maxNextClaim | -| --------------------------- | -------------------------------------------------------------- | -| NotAccepted (pre-offered) | Stored estimate from `offerAgreement` | -| NotAccepted (past deadline) | 0 (expired offer, removable) | -| Accepted, never collected | Calculated by RecurringCollector (includes initial + ongoing) | -| Accepted, after collect | Calculated by RecurringCollector (ongoing only) | -| CanceledByPayer | Calculated by RecurringCollector (window frozen at canceledAt) | -| CanceledByServiceProvider | 0 | -| Fully expired | 0 | +| Agreement State | maxNextClaim | +| --------------------------- | -------------------------------------------------------------------- | +| NotAccepted (pre-offered) | Stored estimate from `offerAgreement` | +| NotAccepted (past deadline) | 0 (expired offer, removable) | +| Accepted, never collected | Calculated by RecurringCollector (includes initial + ongoing) | +| Accepted, after collect | Calculated by RecurringCollector (ongoing only) | +| CanceledByPayer | Calculated by RecurringCollector (window capped at collectableUntil) | +| CanceledByServiceProvider | 0 | +| Fully expired | 0 | ## Lifecycle ### Offer → Accept (two-step) -1. **Agreement manager** calls `offerAgreement(rca, collector)` — stores hash, calculates conservative maxNextClaim, deposits into escrow -2. **Service provider operator** calls `SubgraphService.acceptUnsignedIndexingAgreement(allocationId, rca)` — SubgraphService → RecurringCollector → `approveAgreement(hash)` callback to RecurringAgreementManager +1. **Agreement manager** calls `offerAgreement(collector, offerType, offerData)` — forwards opaque offer to collector (new or update), tracks agreement, calculates conservative maxNextClaim, deposits into escrow +2. **Service provider operator** calls `RecurringCollector.accept(agreementId, termsHash, signature)` — RecurringCollector validates and calls `IAgreementAcceptCallback.onAcceptAgreement()` on the data service (e.g. SubgraphService) for domain-specific validation During the pending update window, both current and pending maxNextClaim are escrowed simultaneously (conservative). @@ -67,15 +63,16 @@ During the pending update window, both current and pending maxNextClaim are escr Collection flows through `SubgraphService → RecurringCollector → PaymentsEscrow`. RecurringCollector then calls `IAgreementOwner.afterCollection` on the payer, which triggers automatic reconciliation and escrow top-up in the same transaction. Manual reconcile is still available as a fallback. -The manager exposes `reconcileAgreement` (gas-predictable, per-agreement). Batch convenience functions `reconcileBatch` (caller-selected list) and `reconcile(provider)` (iterates all agreements) are in the stateless `RecurringAgreementHelper` contract, which delegates each reconciliation back to the manager. +The manager exposes `reconcileAgreement` (gas-predictable, per-agreement) and `reconcileProvider` (pair-level escrow rebalancing). Batch convenience functions `reconcilePair`, `reconcileCollector`, and `reconcileAll` are in the stateless `RecurringAgreementHelper` contract, which iterates agreements and delegates each reconciliation back to the manager. -### Revoke / Cancel / Remove +### Cancel / Remove -- **`revokeOffer`** — withdraws an un-accepted offer -- **`cancelAgreement`** — for accepted agreements, routes cancellation through the data service then reconciles; idempotent for already-canceled agreements -- **`removeAgreement`** (permissionless) — cleans up agreements with maxNextClaim = 0 +- **`cancelAgreement`** — routes cancellation through the collector's `cancel` function (passing the terms hash), then reconciles locally. Cancels un-accepted offers, accepted agreements, or pending updates depending on the `versionHash` provided. Requires AGREEMENT_MANAGER_ROLE. +- **`forceRemoveAgreement`** — operator escape hatch for agreements whose collector is unresponsive (broken upgrade, permanent pause). Zeroes the agreement's maxNextClaim, removes it from pair tracking, and triggers pair reconciliation. Requires OPERATOR_ROLE. -| State | Removable when | +Cleanup is automatic: `reconcileAgreement` deletes agreements whose `maxNextClaim` is 0. + +| State | Deleted by reconcile when | | ------------------------- | ------------------------------------- | | CanceledByServiceProvider | Immediately (maxNextClaim = 0) | | CanceledByPayer | After collection window expires | @@ -106,7 +103,7 @@ Ordered low-to-high: ### Min/Max Model -`_updateEscrow` uses two numbers from `_escrowMinMax` instead of a single `sumMaxNextClaim`: +`_reconcileProviderEscrow` uses two numbers from `_escrowMinMax` instead of a single `sumMaxNextClaim`: - **min**: deposit floor — deposit if effective balance is below this - **max**: thaw ceiling — thaw effective balance above this (never resetting an active thaw timer) @@ -115,11 +112,24 @@ The split ensures smooth transitions between levels. When degradation occurs, mi ### Automatic Degradation -The setting is a ceiling, not a mandate. **Full → OnDemand** when `available <= totalEscrowDeficit` (RAM's balance can't close the system-wide gap): min drops to 0, max stays at `sumMaxNextClaim`. Degradation never reaches JustInTime automatically — only explicit operator setting or temp JIT. +The setting is a ceiling, not a mandate. `_escrowMinMax` computes `spare = balance - totalEscrowDeficit` (floored at 0) and compares it against `sumMaxNextClaimAll` scaled by two configurable uint8 parameters (fractional units of 1/256): + +| Gate | Controls | Condition (active when true) | Parameter (default) | +| ---- | ---------------------------------------- | --------------------------------------------------------------------------------------- | --------------------------------------- | +| max | Hold escrow at `sumMaxNextClaim` ceiling | `sumMaxNextClaimAll * minOnDemandBasisThreshold / 256 < spare` | `minOnDemandBasisThreshold` (128 = 50%) | +| min | Proactively deposit to `sumMaxNextClaim` | `sumMaxNextClaimAll * (256 + minFullBasisMargin) / 256 < spare` (requires basis = Full) | `minFullBasisMargin` (16 ~ 6% margin) | + +The min gate is stricter (0.5x < 1.0625x), giving three effective states as `spare` decreases: + +1. **Full** (`smnca × 1.0625 < spare`): both gates pass — min = max = `sumMaxNextClaim` +2. **OnDemand** (`smnca × 0.5 < spare ≤ smnca × 1.0625`): min gate fails, max holds — min = 0, max = `sumMaxNextClaim` (no new deposits, but existing escrow up to max is held) +3. **JIT** (`spare ≤ smnca × 0.5`): both gates fail — min = max = 0 (thaw everything) + +**Operator caution — new agreements can trigger instant degradation.** `offerAgreement()` (both new and update) increases `sumMaxNextClaim` (and therefore `totalEscrowDeficit`) without checking whether the RAM has sufficient balance to maintain the current escrow mode. A single offer can push `spare` below the threshold, instantly degrading escrow mode for **all** (collector, provider) pairs — not just the new agreement. Existing providers who had fully-escrowed agreements silently lose their proactive deposits. The operator (AGREEMENT_MANAGER_ROLE holder) should verify escrow headroom before offering agreements. An on-chain guard was considered but excluded due to contract size constraints (Spurious Dragon 24576-byte limit). -### `_updateEscrow` Flow +### `_reconcileProviderEscrow` Flow -`_updateEscrow(collector, provider)` normalizes escrow state in four steps using (min, max) from `_escrowMinMax`. Steps 3 and 4 are mutually exclusive (min <= max); the thaw timer is never reset. +`_reconcileProviderEscrow(collector, provider)` normalizes escrow state in four steps using (min, max) from `_escrowMinMax`. Steps 3 and 4 are mutually exclusive (min <= max); the thaw timer is never reset. 1. **Adjust thaw target** — cancel/reduce thawing to keep min <= effective balance, or increase toward max (without timer reset) 2. **Withdraw completed thaw** — always withdrawn, even if within [min, max] @@ -128,45 +138,33 @@ The setting is a ceiling, not a mandate. **Full → OnDemand** when `available < ### Reconciliation -Per-agreement reconciliation (`reconcileAgreement`) re-reads agreement state from RecurringCollector and updates `sumMaxNextClaim`. Pair-level escrow rebalancing and cleanup is O(1) via `reconcileCollectorProvider(collector, provider)`. Batch helpers `reconcileBatch` and `reconcile(provider)` live in the separate `RecurringAgreementHelper` contract — they are stateless wrappers that call `reconcileAgreement` in a loop. +Per-agreement reconciliation (`reconcileAgreement`) re-reads agreement state from RecurringCollector and updates `sumMaxNextClaim`. Pair-level escrow rebalancing and cleanup is O(1) via `reconcileProvider(collector, provider)`. Batch helpers `reconcilePair`, `reconcileCollector`, and `reconcileAll` live in the separate `RecurringAgreementHelper` contract — they are stateless wrappers that call `reconcileAgreement` in a loop, then call `reconcileProvider` per pair. ### Global Tracking -| Storage field | Type | Updated at | -| ----------------------------------- | ------- | --------------------------------------------------------------------------- | -| `escrowBasis` | enum | `setEscrowBasis()` | -| `sumMaxNextClaimAll` | uint256 | Every `sumMaxNextClaim[c][p]` mutation | -| `totalEscrowDeficit` | uint256 | Every `sumMaxNextClaim[c][p]` or `escrowSnap[c][p]` mutation | -| `totalAgreementCount` | uint256 | `offerAgreement` (+1), `revokeOffer` (-1), `removeAgreement` (-1) | -| `escrowSnap[c][p]` | mapping | End of `_updateEscrow` via snapshot diff | -| `tempJit` | bool | `beforeCollection` (trip), `_updateEscrow` (recover), `setTempJit` (manual) | -| `issuanceAllocator` | address | `setIssuanceAllocator()` (governor) | -| `ensuredIncomingDistributedToBlock` | uint64 | `_ensureIncomingDistributionToCurrentBlock()` (per-block dedup) | +| Storage field | Type | Updated at | +| ----------------------------------- | ------- | --------------------------------------------------------------------------------------------- | +| `escrowBasis` | enum | `setEscrowBasis()` | +| `sumMaxNextClaimAll` | uint256 | Every `sumMaxNextClaim[c][p]` mutation | +| `totalEscrowDeficit` | uint256 | Every `sumMaxNextClaim[c][p]` or `escrowSnap[c][p]` mutation | +| `providerEligibilityOracle` | address | `setProviderEligibilityOracle()` (governor), `emergencyClearEligibilityOracle()` (pause role) | +| `escrowSnap[c][p]` | mapping | End of `_reconcileProviderEscrow` via snapshot diff | +| `minOnDemandBasisThreshold` | uint8 | `setMinOnDemandBasisThreshold()` (operator) | +| `minFullBasisMargin` | uint8 | `setMinFullBasisMargin()` (operator) | +| `minThawFraction` | uint8 | `setMinThawFraction()` (operator) | +| `issuanceAllocator` | address | `setIssuanceAllocator()` (governor) | +| `ensuredIncomingDistributedToBlock` | uint32 | `_ensureIncomingDistributionToCurrentBlock()` (per-block dedup) | **`totalEscrowDeficit`** is maintained incrementally as `Σ max(0, sumMaxNextClaim[c][p] - escrowSnap[c][p])` per (collector, provider). Over-deposited pairs cannot mask another pair's deficit. At each mutation point, the pair's deficit is recomputed before and after. -### Temp JIT - -If `beforeCollection` can't fully deposit for a collection (`available <= deficit`), it deposits nothing and activates temporary JIT mode. While active, `_escrowMinMax` returns `(0, 0)` — JIT-only behavior — regardless of the configured `escrowBasis`. The configured basis is preserved and takes effect again on recovery. - -**Trigger**: `beforeCollection` activates temp JIT when `available <= deficit` (all-or-nothing: no partial deposits). - -**Recovery**: `_updateEscrow` clears temp JIT when `totalEscrowDeficit < available`. Recovery uses `totalEscrowDeficit` (sum of per-(collector, provider) deficits) rather than total sumMaxNextClaim, correctly accounting for already-deposited escrow. During JIT mode, thaws complete and tokens return to RAM, naturally building toward recovery. - -**Operator override**: `setTempJit(bool)` allows direct control. `setEscrowBasis` does not affect `tempJit` — the two settings are independent. - -### Upgrade Safety - -Default storage value 0 maps to `JustInTime`, so `initialize()` sets `escrowBasis = Full` as the default. Future upgrades must set it explicitly via a reinitializer. `tempJit` defaults to `false` (0), which is correct — no temp JIT on fresh deployment. - ## Roles - **GOVERNOR_ROLE**: Sets issuance allocator, eligibility oracle; grants `DATA_SERVICE_ROLE`, `COLLECTOR_ROLE`, and other roles; admin of `OPERATOR_ROLE` -- **OPERATOR_ROLE**: Sets escrow basis and temp JIT; admin of `AGREEMENT_MANAGER_ROLE` - - **AGREEMENT_MANAGER_ROLE**: Offers agreements/updates, revokes offers, cancels agreements -- **PAUSE_ROLE**: Pauses contract (reconcile/remove remain available) -- **Permissionless**: `reconcileAgreement`, `removeAgreement`, `reconcileCollectorProvider` -- **RecurringAgreementHelper** (permissionless): `reconcile(provider)`, `reconcileBatch(ids[])` +- **OPERATOR_ROLE**: Sets escrow basis, threshold/margin, and thaw-fraction parameters; `forceRemoveAgreement`; admin of `AGREEMENT_MANAGER_ROLE` + - **AGREEMENT_MANAGER_ROLE**: Offers agreements/updates, cancels agreements +- **PAUSE_ROLE**: Pauses contract (reconcile remains available); `emergencyClearEligibilityOracle` +- **Permissionless**: `reconcileAgreement`, `reconcileProvider` +- **RecurringAgreementHelper** (permissionless): `reconcilePair`, `reconcileCollector`, `reconcileAll` ## Deployment diff --git a/packages/issuance/contracts/agreement/RecurringAgreementManager.sol b/packages/issuance/contracts/agreement/RecurringAgreementManager.sol index 309c81f21..4056b29e4 100644 --- a/packages/issuance/contracts/agreement/RecurringAgreementManager.sol +++ b/packages/issuance/contracts/agreement/RecurringAgreementManager.sol @@ -16,10 +16,9 @@ import { IProviderEligibilityManagement } from "@graphprotocol/interfaces/contra import { IRecurringAgreements } from "@graphprotocol/interfaces/contracts/issuance/agreement/IRecurringAgreements.sol"; import { IPaymentsEscrow } from "@graphprotocol/interfaces/contracts/horizon/IPaymentsEscrow.sol"; import { IRecurringCollector } from "@graphprotocol/interfaces/contracts/horizon/IRecurringCollector.sol"; -import { IDataServiceAgreements } from "@graphprotocol/interfaces/contracts/data-service/IDataServiceAgreements.sol"; import { IProviderEligibility } from "@graphprotocol/interfaces/contracts/issuance/eligibility/IProviderEligibility.sol"; +import { IEmergencyRoleControl } from "@graphprotocol/interfaces/contracts/issuance/common/IEmergencyRoleControl.sol"; -import { EnumerableSetUtil } from "../common/EnumerableSetUtil.sol"; import { BaseUpgradeable } from "../common/BaseUpgradeable.sol"; import { IGraphToken } from "../common/IGraphToken.sol"; @@ -38,15 +37,75 @@ import { ReentrancyGuardTransient } from "@openzeppelin/contracts/utils/Reentran * 3. Tracks max-next-claim per agreement, deposits into PaymentsEscrow to cover maximums * * One escrow per (this contract, collector, provider) covers all managed - * RCAs for that (collector, provider) pair. Each agreement stores its own collector - * address. Other participants can independently use RCAs via the standard ECDSA-signed flow. + * RCAs for that (collector, provider) pair. Agreements are namespaced under + * their collector to prevent cross-collector ID collisions. + * Other participants can independently use RCAs via the standard ECDSA-signed flow. * - * @custom:security CEI — All external calls target trusted protocol contracts (PaymentsEscrow, - * GRT, RecurringCollector) except {cancelAgreement}'s call to the data service, which is - * governance-gated, and {_ensureIncomingDistributionToCurrentBlock}'s call to the issuance - * allocator, which is also governance-gated. {nonReentrant} on {beforeCollection}, - * {afterCollection}, and {cancelAgreement} guards against reentrancy through these external - * calls as defence-in-depth. + * @custom:design-coupling This contract is structurally coupled to RecurringCollector's + * lifecycle semantics: AgreementState transitions, updateNonce progression, and the + * RCA/RCAU struct shapes. Claim computation (pricing formula) is decoupled — delegated + * to the collector via {IRecurringCollector.getMaxNextClaim}, so a collector with a + * different pricing model can be used without changes to this contract. Lifecycle changes (new states, + * different update mechanics) would require coordinated updates to both contracts. + * + * @custom:security CEI — State-mutating calls target trusted protocol contracts (PaymentsEscrow, + * GRT) and {_ensureIncomingDistributionToCurrentBlock}'s call to the issuance allocator, + * which is governance-gated. + * + * Collector trust: this contract places significant trust in collector contracts. Collectors + * are COLLECTOR_ROLE-gated (governor-managed). {offerAgreement} and {cancelAgreement} call + * collectors directly. Discovery calls `getAgreement`; reconciliation calls `getMaxNextClaim` + * — these return values drive escrow accounting. A broken or malicious collector can cause + * reconciliation to revert; use {forceRemoveAgreement} as an operator escape hatch. + * Once an agreement is tracked, reconciliation proceeds even if COLLECTOR_ROLE is later + * revoked, ensuring orderly settlement. + * + * {offerAgreement} and {cancelAgreement} pass through to the collector and then reconcile + * locally. The collector does not callback to `msg.sender` (see RecurringCollector callback + * model), so these methods own the full call sequence: forward to collector, then reconcile. + * This allows them to hold the reentrancy lock for the entire operation. + * + * All entry points that mutate state ({offerAgreement}, {cancelAgreement}, {beforeCollection}, + * {afterCollection}, {afterAgreementStateChange}, {reconcileAgreement}, {reconcileProvider}, + * {forceRemoveAgreement}) are {nonReentrant}. + * + * @custom:security-pause This contract and RecurringCollector are independently pausable. + * Pausing is an emergency measure for when something is seriously broken (e.g. a vulnerability + * being exploited) and needs to be halted to allow time for investigation and possible + * contract upgrade. Components are independently pausable so that a problem in one does + * not require halting the entire protocol. + * + * When this contract is paused, all permissionless state-changing operations are blocked: + * agreement lifecycle callbacks ({beforeCollection}, {afterCollection}, + * {afterAgreementStateChange}), permissionless reconciliation ({reconcileAgreement}, + * {reconcileProvider}), and agreement management ({offerAgreement}, {cancelAgreement}). + * + * Operator-gated functions ({forceRemoveAgreement}, configuration setters) remain callable + * during pause. The operator is trusted and may need to act during emergencies. + * + * Cross-contract interaction: when this contract is paused but RecurringCollector is not, + * providers can still call {IRecurringCollector.collect}. The collector will proceed but + * the {beforeCollection} and {afterCollection} callbacks to this contract will revert. + * Because the collector uses low-level calls for payer callbacks, collection succeeds + * without JIT escrow top-up or reconciliation. Escrow accounting will drift until this + * contract is unpaused and {reconcileAgreement} is called. To fully halt collections, + * pause RecurringCollector as well. + * + * Emergency response tools available to PAUSE_ROLE holders: + * - {emergencyRevokeRole}: surgically disable a specific actor (operator, collector, etc.) + * - {emergencyClearEligibilityOracle}: fail-open if oracle is broken/wrongly blocking collections + * - {pause}: halt all permissionless operations on this contract + * + * Note: {emergencyRevokeRole} can revoke PAUSE_ROLE itself. This allows one pause guardian + * to disable a compromised guardian. The risk is that a compromised guardian could revoke + * all other guardians; the governor backstop can re-grant roles to recover. + * + * Escalation ladder (from targeted to full stop): + * 1. Revoke a specific role via {emergencyRevokeRole} (e.g. disable one operator or collector) + * 2. Clear the eligibility oracle via {emergencyClearEligibilityOracle} (unblock collections) + * 3. Pause this contract (stops all permissionless escrow management) + * 4. Pause RecurringCollector (stops all collections and agreement state changes) + * 5. Pause both (full halt) * * @custom:security-contact Please email security+contracts@thegraph.com if you find any * bugs. We may have an active bug bounty program. @@ -60,11 +119,11 @@ contract RecurringAgreementManager is IRecurringEscrowManagement, IProviderEligibilityManagement, IRecurringAgreements, - IProviderEligibility + IProviderEligibility, + IEmergencyRoleControl { using EnumerableSet for EnumerableSet.Bytes32Set; using EnumerableSet for EnumerableSet.AddressSet; - using EnumerableSetUtil for EnumerableSet.AddressSet; /// @notice Emitted when distributeIssuance() reverts (collection continues without fresh issuance) /// @param allocator The allocator that reverted @@ -73,7 +132,8 @@ contract RecurringAgreementManager is /// @notice Thrown when the issuance allocator does not support IIssuanceAllocationDistribution error InvalidIssuanceAllocator(address allocator); - using EnumerableSetUtil for EnumerableSet.Bytes32Set; + /// @notice Thrown when attempting to emergency-revoke the governor role + error CannotRevokeGovernorRole(); // -- Role Constants -- @@ -106,41 +166,69 @@ contract RecurringAgreementManager is // -- Storage (ERC-7201) -- + /// @notice Intermediate values for escrow reconciliation, packed to reduce stack depth. + struct EscrowTargets { + IPaymentsEscrow.EscrowAccount account; + uint256 min; + uint256 max; + uint256 escrowed; + uint256 thawThreshold; + } + + /** + * @notice Per-(collector, provider) pair tracking data + * @param sumMaxNextClaim Sum of maxNextClaim for all agreements in this pair + * @param escrowSnap Last known escrow balance (for snapshot diff) + * @param agreements Set of agreement IDs for this pair (stored as bytes32 for EnumerableSet) + */ + struct CollectorProviderData { + uint256 sumMaxNextClaim; + uint256 escrowSnap; + EnumerableSet.Bytes32Set agreements; + } + + /** + * @notice Per-collector tracking data + * @param agreements Agreement data keyed by agreement ID + * @param providers Per-provider tracking data + * @param providerSet Set of provider addresses with active agreements + */ + struct CollectorData { + mapping(bytes16 agreementId => AgreementInfo) agreements; + mapping(address provider => CollectorProviderData) providers; + EnumerableSet.AddressSet providerSet; + } + /// @custom:storage-location erc7201:graphprotocol.issuance.storage.RecurringAgreementManager // solhint-disable-next-line gas-struct-packing struct RecurringAgreementManagerStorage { - /// @notice Authorized agreement hashes — maps hash to agreementId (bytes16(0) = not authorized) - mapping(bytes32 agreementHash => bytes16) authorizedHashes; - /// @notice Per-agreement tracking data - mapping(bytes16 agreementId => AgreementInfo) agreements; - /// @notice Sum of maxNextClaim for all agreements per (collector, provider) pair - mapping(address collector => mapping(address provider => uint256)) sumMaxNextClaim; - /// @notice Set of agreement IDs per service provider (stored as bytes32 for EnumerableSet) - mapping(address provider => EnumerableSet.Bytes32Set) providerAgreementIds; + /// @notice Per-collector tracking data (agreements, providers, escrow) + mapping(address collector => CollectorData) collectors; + /// @notice Set of all collector addresses with active agreements + EnumerableSet.AddressSet collectorSet; /// @notice Sum of sumMaxNextClaim across all (collector, provider) pairs uint256 sumMaxNextClaimAll; /// @notice Total unfunded escrow: sum of max(0, sumMaxNextClaim[c][p] - escrowSnap[c][p]) uint256 totalEscrowDeficit; - /// @notice Total number of tracked agreements across all providers - uint256 totalAgreementCount; - /// @notice Last known escrow balance per (collector, provider) pair (for snapshot diff) - mapping(address collector => mapping(address provider => uint256)) escrowSnap; - /// @notice Set of all collector addresses with active agreements - EnumerableSet.AddressSet collectors; - /// @notice Set of provider addresses per collector - mapping(address collector => EnumerableSet.AddressSet) collectorProviders; - /// @notice Number of agreements per (collector, provider) pair - mapping(address collector => mapping(address provider => uint256)) pairAgreementCount; /// @notice The issuance allocator that mints GRT to this contract (20 bytes) - /// @dev Packed slot (30/32 bytes): issuanceAllocator (20) + ensuredIncomingDistributedToBlock (8) + - /// escrowBasis (1) + tempJit (1). All read together in _updateEscrow / beforeCollection. + /// @dev Packed slot (28/32 bytes): issuanceAllocator (20) + ensuredIncomingDistributedToBlock (4) + + /// escrowBasis (1) + minOnDemandBasisThreshold (1) + minFullBasisMargin (1) + minThawFraction (1). + /// All read together in _reconcileProviderEscrow / beforeCollection. IIssuanceAllocationDistribution issuanceAllocator; /// @notice Block number when _ensureIncomingDistributionToCurrentBlock last ran - uint64 ensuredIncomingDistributedToBlock; - /// @notice Governance-configured escrow level (not modified by temp JIT) + uint32 ensuredIncomingDistributedToBlock; + /// @notice Governance-configured escrow level (maximum aspiration) EscrowBasis escrowBasis; - /// @notice Whether temporary JIT mode is active (beforeCollection couldn't deposit) - bool tempJit; + /// @notice Threshold for OnDemand: sumMaxNextClaimAll * threshold / 256 < spare. + /// Governance-configured. + uint8 minOnDemandBasisThreshold; + /// @notice Margin for Full: sumMaxNextClaimAll * (256 + margin) / 256 < spare. + /// Governance-configured. + uint8 minFullBasisMargin; + /// @notice Minimum thaw fraction: escrow excess below sumMaxNextClaim * minThawFraction / 256 + /// per (collector, provider) pair is skipped as operationally insignificant. + /// Governance-configured. + uint8 minThawFraction; /// @notice Optional oracle for checking payment eligibility of service providers (20/32 bytes in slot) IProviderEligibility providerEligibilityOracle; } @@ -172,7 +260,12 @@ contract RecurringAgreementManager is _setRoleAdmin(DATA_SERVICE_ROLE, GOVERNOR_ROLE); _setRoleAdmin(COLLECTOR_ROLE, GOVERNOR_ROLE); _setRoleAdmin(AGREEMENT_MANAGER_ROLE, OPERATOR_ROLE); - _getStorage().escrowBasis = EscrowBasis.Full; + + RecurringAgreementManagerStorage storage _s = _getStorage(); + _s.escrowBasis = EscrowBasis.Full; + _s.minOnDemandBasisThreshold = 128; + _s.minFullBasisMargin = 16; + _s.minThawFraction = 16; } // -- ERC165 -- @@ -187,6 +280,7 @@ contract RecurringAgreementManager is interfaceId == type(IProviderEligibilityManagement).interfaceId || interfaceId == type(IRecurringAgreements).interfaceId || interfaceId == type(IProviderEligibility).interfaceId || + interfaceId == type(IEmergencyRoleControl).interfaceId || super.supportsInterface(interfaceId); } @@ -204,8 +298,8 @@ contract RecurringAgreementManager is /// Even if interleaved, the only effect is a one-block lag before the new allocator's /// distribution is picked up — corrected automatically on the next block. function setIssuanceAllocator(address newIssuanceAllocator) external virtual override onlyRole(GOVERNOR_ROLE) { - RecurringAgreementManagerStorage storage $ = _getStorage(); - if (address($.issuanceAllocator) == newIssuanceAllocator) return; + RecurringAgreementManagerStorage storage _s = _getStorage(); + if (address(_s.issuanceAllocator) == newIssuanceAllocator) return; if (newIssuanceAllocator != address(0)) require( @@ -216,241 +310,183 @@ contract RecurringAgreementManager is InvalidIssuanceAllocator(newIssuanceAllocator) ); - emit IssuanceAllocatorSet(address($.issuanceAllocator), newIssuanceAllocator); - $.issuanceAllocator = IIssuanceAllocationDistribution(newIssuanceAllocator); + emit IssuanceAllocatorSet(address(_s.issuanceAllocator), newIssuanceAllocator); + _s.issuanceAllocator = IIssuanceAllocationDistribution(newIssuanceAllocator); } // -- IAgreementOwner -- /// @inheritdoc IAgreementOwner - function approveAgreement(bytes32 agreementHash) external view override returns (bytes4) { - RecurringAgreementManagerStorage storage $ = _getStorage(); - bytes16 agreementId = $.authorizedHashes[agreementHash]; - - if (agreementId == bytes16(0) || $.agreements[agreementId].provider == address(0)) return bytes4(0); - - return IAgreementOwner.approveAgreement.selector; - } - - /// @inheritdoc IAgreementOwner - function beforeCollection(bytes16 agreementId, uint256 tokensToCollect) external override nonReentrant { - RecurringAgreementManagerStorage storage $ = _getStorage(); - AgreementInfo storage agreement = $.agreements[agreementId]; - address provider = agreement.provider; + function beforeCollection( + bytes16 agreementId, + uint256 tokensToCollect + ) external override whenNotPaused nonReentrant { + RecurringAgreementManagerStorage storage _s = _getStorage(); + address collector = msg.sender; + address provider = _getAgreementProvider(_s, collector, agreementId); if (provider == address(0)) return; - _requireCollector(agreement); // JIT top-up: deposit only when escrow balance cannot cover this collection - uint256 escrowBalance = _fetchEscrowAccount(msg.sender, provider).balance; + uint256 escrowBalance = _fetchEscrowAccount(collector, provider).balance; if (tokensToCollect <= escrowBalance) return; // Ensure issuance is distributed so balanceOf reflects all available tokens - _ensureIncomingDistributionToCurrentBlock($); + _ensureIncomingDistributionToCurrentBlock(_s); - // Strict <: when deficit == available, enter tempJit rather than depleting entire balance uint256 deficit = tokensToCollect - escrowBalance; if (deficit < GRAPH_TOKEN.balanceOf(address(this))) { GRAPH_TOKEN.approve(address(PAYMENTS_ESCROW), deficit); - PAYMENTS_ESCROW.deposit(msg.sender, provider, deficit); - } else if (!$.tempJit) { - $.tempJit = true; - emit TempJitSet(true, true); + PAYMENTS_ESCROW.deposit(collector, provider, deficit); } } /// @inheritdoc IAgreementOwner - function afterCollection(bytes16 agreementId, uint256 /* tokensCollected */) external override nonReentrant { - RecurringAgreementManagerStorage storage $ = _getStorage(); - AgreementInfo storage agreement = $.agreements[agreementId]; - if (agreement.provider == address(0)) return; - _requireCollector(agreement); + function afterCollection( + bytes16 agreementId, + uint256 /* tokensCollected */ + ) external override whenNotPaused nonReentrant { + _reconcileAgreement(_getStorage(), msg.sender, agreementId); + } - _reconcileAndUpdateEscrow($, agreementId); + /** + * @notice Callback from RecurringCollector on externally-initiated lifecycle events. + * @dev The collector skips this callback when this contract is `msg.sender` (i.e. for + * operations this contract initiated via {offerAgreement} / {cancelAgreement}), because + * those methods reconcile directly after the passthrough call. This callback therefore + * only fires for events initiated by third parties (provider accept, external cancel). + * @param agreementId The agreement ID + */ + // solhint-disable-next-line use-natspec + function afterAgreementStateChange(bytes16 agreementId, bytes32, uint16) external whenNotPaused nonReentrant { + _reconcileAgreement(_getStorage(), msg.sender, agreementId); } // -- IRecurringAgreementManagement -- /// @inheritdoc IRecurringAgreementManagement function offerAgreement( - IRecurringCollector.RecurringCollectionAgreement calldata rca, - IRecurringCollector collector - ) external onlyRole(AGREEMENT_MANAGER_ROLE) whenNotPaused returns (bytes16 agreementId) { - require(rca.payer == address(this), PayerMustBeManager(rca.payer, address(this))); - require(rca.serviceProvider != address(0), ServiceProviderZeroAddress()); - require(hasRole(DATA_SERVICE_ROLE, rca.dataService), UnauthorizedDataService(rca.dataService)); + IRecurringCollector collector, + uint8 offerType, + bytes calldata offerData + ) external onlyRole(AGREEMENT_MANAGER_ROLE) nonReentrant returns (bytes16 agreementId) { require(hasRole(COLLECTOR_ROLE, address(collector)), UnauthorizedCollector(address(collector))); - RecurringAgreementManagerStorage storage $ = _getStorage(); + // Forward to collector — no callback to msg.sender, we reconcile after return + IRecurringCollector.OfferResult memory result = collector.offer(offerType, offerData, 0); + agreementId = result.agreementId; - agreementId = collector.generateAgreementId( - rca.payer, - rca.dataService, - rca.serviceProvider, - rca.deadline, - rca.nonce - ); - require($.agreements[agreementId].provider == address(0), AgreementAlreadyOffered(agreementId)); + require(result.serviceProvider != address(0), ServiceProviderZeroAddress()); + require(hasRole(DATA_SERVICE_ROLE, result.dataService), UnauthorizedDataService(result.dataService)); - bytes32 agreementHash = collector.hashRCA(rca); - uint256 maxNextClaim = _createAgreement($, agreementId, rca, collector, agreementHash); - _updateEscrow($, address(collector), rca.serviceProvider); - - emit AgreementOffered(agreementId, rca.serviceProvider, maxNextClaim); + _reconcileAgreement(_getStorage(), address(collector), agreementId); } /// @inheritdoc IRecurringAgreementManagement - function offerAgreementUpdate( - IRecurringCollector.RecurringCollectionAgreementUpdate calldata rcau - ) external onlyRole(AGREEMENT_MANAGER_ROLE) whenNotPaused returns (bytes16 agreementId) { - agreementId = rcau.agreementId; - RecurringAgreementManagerStorage storage $ = _getStorage(); - AgreementInfo storage agreement = $.agreements[agreementId]; - require(agreement.provider != address(0), AgreementNotOffered(agreementId)); - - // Reconcile against on-chain state before layering a new pending update, - // so escrow accounting is current and we can validate the nonce. - _reconcileAgreement($, agreementId); - - // Validate nonce: must be the next expected nonce on the collector - IRecurringCollector.AgreementData memory rca = agreement.collector.getAgreement(agreementId); - uint32 expectedNonce = rca.updateNonce + 1; - require(rcau.nonce == expectedNonce, InvalidUpdateNonce(agreementId, expectedNonce, rcau.nonce)); - - // Clean up old pending hash if replacing - if (agreement.pendingUpdateHash != bytes32(0)) delete $.authorizedHashes[agreement.pendingUpdateHash]; - - // Authorize the RCAU hash for the IAgreementOwner callback - bytes32 updateHash = agreement.collector.hashRCAU(rcau); - $.authorizedHashes[updateHash] = agreementId; - agreement.pendingUpdateNonce = rcau.nonce; - agreement.pendingUpdateHash = updateHash; - - uint256 pendingMaxNextClaim = _computeMaxFirstClaim( - rcau.maxOngoingTokensPerSecond, - rcau.maxSecondsPerCollection, - rcau.maxInitialTokens - ); - _setAgreementMaxNextClaim($, agreementId, pendingMaxNextClaim, true); - _updateEscrow($, address(agreement.collector), agreement.provider); - - emit AgreementUpdateOffered(agreementId, pendingMaxNextClaim, rcau.nonce); + function cancelAgreement( + address collector, + bytes16 agreementId, + bytes32 versionHash, + uint16 options + ) external onlyRole(AGREEMENT_MANAGER_ROLE) nonReentrant { + // Forward to collector — no callback to msg.sender, we reconcile after return + IRecurringCollector(collector).cancel(agreementId, versionHash, options); + _reconcileAgreement(_getStorage(), collector, agreementId); } /// @inheritdoc IRecurringAgreementManagement - function revokeAgreementUpdate( + function reconcileAgreement( + address collector, bytes16 agreementId - ) external onlyRole(AGREEMENT_MANAGER_ROLE) whenNotPaused returns (bool revoked) { - RecurringAgreementManagerStorage storage $ = _getStorage(); - AgreementInfo storage agreement = $.agreements[agreementId]; - require(agreement.provider != address(0), AgreementNotOffered(agreementId)); - - // Reconcile first — the update may have been accepted since the offer was made - _reconcileAgreement($, agreementId); - - if (agreement.pendingUpdateHash == bytes32(0)) return false; - - uint256 pendingMaxClaim = agreement.pendingUpdateMaxNextClaim; - uint32 nonce = agreement.pendingUpdateNonce; - - _setAgreementMaxNextClaim($, agreementId, 0, true); - delete $.authorizedHashes[agreement.pendingUpdateHash]; - agreement.pendingUpdateNonce = 0; - agreement.pendingUpdateHash = bytes32(0); - - _updateEscrow($, address(agreement.collector), agreement.provider); - - emit AgreementUpdateRevoked(agreementId, pendingMaxClaim, nonce); - return true; + ) external whenNotPaused nonReentrant returns (bool exists) { + exists = !_reconcileAgreement(_getStorage(), collector, agreementId); } /// @inheritdoc IRecurringAgreementManagement - function revokeOffer( - bytes16 agreementId - ) external onlyRole(AGREEMENT_MANAGER_ROLE) whenNotPaused returns (bool gone) { - RecurringAgreementManagerStorage storage $ = _getStorage(); - AgreementInfo storage agreement = $.agreements[agreementId]; - if (agreement.provider == address(0)) return true; - - // Only revoke un-accepted agreements — accepted ones must be canceled via cancelAgreement - IRecurringCollector.AgreementData memory rca = agreement.collector.getAgreement(agreementId); - require(rca.state == IRecurringCollector.AgreementState.NotAccepted, AgreementAlreadyAccepted(agreementId)); - - address provider = _deleteAgreement($, agreementId, agreement); - emit OfferRevoked(agreementId, provider); - return true; + function reconcileProvider( + address collector, + address provider + ) external whenNotPaused nonReentrant returns (bool tracked) { + return _reconcileProvider(_getStorage(), collector, provider); } /// @inheritdoc IRecurringAgreementManagement - function cancelAgreement( + function forceRemoveAgreement( + address collector, bytes16 agreementId - ) external onlyRole(AGREEMENT_MANAGER_ROLE) whenNotPaused nonReentrant returns (bool gone) { - RecurringAgreementManagerStorage storage $ = _getStorage(); - AgreementInfo storage agreement = $.agreements[agreementId]; - if (agreement.provider == address(0)) return true; - - IRecurringCollector.AgreementData memory rca = agreement.collector.getAgreement(agreementId); - - // Not accepted — use revokeOffer instead - require(rca.state != IRecurringCollector.AgreementState.NotAccepted, AgreementNotAccepted(agreementId)); - - // If still active, route cancellation through the data service. - // Note: external call before state update — safe because caller must hold - // AGREEMENT_MANAGER_ROLE and data service is governance-gated. nonReentrant - // provides defence-in-depth (see CEI note in contract header). - if (rca.state == IRecurringCollector.AgreementState.Accepted) { - IDataServiceAgreements ds = agreement.dataService; - require(address(ds).code.length != 0, InvalidDataService(address(ds))); - ds.cancelIndexingAgreementByPayer(agreementId); - emit AgreementCanceled(agreementId, agreement.provider); - } - // else: already canceled (CanceledByPayer or CanceledByServiceProvider) — skip cancel call, just reconcile + ) external onlyRole(OPERATOR_ROLE) nonReentrant { + RecurringAgreementManagerStorage storage _s = _getStorage(); + AgreementInfo storage agreement = _s.collectors[collector].agreements[agreementId]; + address provider = agreement.provider; + if (provider == address(0)) return; - return _reconcileAndCleanup($, agreementId, agreement); + CollectorProviderData storage cpd = _s.collectors[collector].providers[provider]; + + _adjustMaxNextClaim(_s, cpd, agreement, 0); + _removeAgreement(_s, cpd, collector, provider, agreementId); } /// @inheritdoc IRecurringAgreementManagement - function reconcileAgreement(bytes16 agreementId) external returns (bool exists) { - RecurringAgreementManagerStorage storage $ = _getStorage(); - AgreementInfo storage agreement = $.agreements[agreementId]; - if (agreement.provider == address(0)) return false; - - return !_reconcileAndCleanup($, agreementId, agreement); + /// @dev Emergency fail-open: if the oracle is broken or compromised and is wrongly + /// blocking collections, the pause guardian can clear it so all providers become eligible. + /// The governor can later set a replacement oracle. + function emergencyClearEligibilityOracle() external override onlyRole(PAUSE_ROLE) { + _setProviderEligibilityOracle(IProviderEligibility(address(0))); } - /// @inheritdoc IRecurringAgreementManagement - function reconcileCollectorProvider(address collector, address provider) external returns (bool exists) { - return !_reconcilePairTracking(_getStorage(), collector, provider); + /// @inheritdoc IEmergencyRoleControl + /// @dev Governor role is excluded to prevent a pause guardian from locking out governance. + function emergencyRevokeRole(bytes32 role, address account) external override onlyRole(PAUSE_ROLE) { + require(role != GOVERNOR_ROLE, CannotRevokeGovernorRole()); + _revokeRole(role, account); } // -- IRecurringEscrowManagement -- /// @inheritdoc IRecurringEscrowManagement function setEscrowBasis(EscrowBasis basis) external onlyRole(OPERATOR_ROLE) { - RecurringAgreementManagerStorage storage $ = _getStorage(); - if ($.escrowBasis == basis) return; - EscrowBasis oldBasis = $.escrowBasis; - $.escrowBasis = basis; + RecurringAgreementManagerStorage storage _s = _getStorage(); + if (_s.escrowBasis == basis) return; + + EscrowBasis oldBasis = _s.escrowBasis; + _s.escrowBasis = basis; emit EscrowBasisSet(oldBasis, basis); } /// @inheritdoc IRecurringEscrowManagement - function setTempJit(bool active) external onlyRole(OPERATOR_ROLE) { - RecurringAgreementManagerStorage storage $ = _getStorage(); - if ($.tempJit != active) { - $.tempJit = active; - emit TempJitSet(active, false); - } + function setMinOnDemandBasisThreshold(uint8 threshold) external onlyRole(OPERATOR_ROLE) { + RecurringAgreementManagerStorage storage _s = _getStorage(); + if (_s.minOnDemandBasisThreshold == threshold) return; + + uint8 oldThreshold = _s.minOnDemandBasisThreshold; + _s.minOnDemandBasisThreshold = threshold; + emit MinOnDemandBasisThresholdSet(oldThreshold, threshold); + } + + /// @inheritdoc IRecurringEscrowManagement + function setMinFullBasisMargin(uint8 margin) external onlyRole(OPERATOR_ROLE) { + RecurringAgreementManagerStorage storage _s = _getStorage(); + if (_s.minFullBasisMargin == margin) return; + + uint8 oldMargin = _s.minFullBasisMargin; + _s.minFullBasisMargin = margin; + emit MinFullBasisMarginSet(oldMargin, margin); + } + + /// @inheritdoc IRecurringEscrowManagement + function setMinThawFraction(uint8 fraction) external onlyRole(OPERATOR_ROLE) { + RecurringAgreementManagerStorage storage _s = _getStorage(); + if (_s.minThawFraction == fraction) return; + + uint8 oldFraction = _s.minThawFraction; + _s.minThawFraction = fraction; + emit MinThawFractionSet(oldFraction, fraction); } // -- IProviderEligibilityManagement -- /// @inheritdoc IProviderEligibilityManagement function setProviderEligibilityOracle(IProviderEligibility oracle) external onlyRole(GOVERNOR_ROLE) { - RecurringAgreementManagerStorage storage $ = _getStorage(); - if (address($.providerEligibilityOracle) == address(oracle)) return; - IProviderEligibility oldOracle = $.providerEligibilityOracle; - $.providerEligibilityOracle = oracle; - emit ProviderEligibilityOracleSet(oldOracle, oracle); + _setProviderEligibilityOracle(oracle); } /// @inheritdoc IProviderEligibilityManagement @@ -470,391 +506,234 @@ contract RecurringAgreementManager is // -- IRecurringAgreements -- + // --- Agreement data --- + /// @inheritdoc IRecurringAgreements - function getSumMaxNextClaim(IRecurringCollector collector, address provider) external view returns (uint256) { - return _getStorage().sumMaxNextClaim[address(collector)][provider]; + function getAgreementInfo(address collector, bytes16 agreementId) external view returns (AgreementInfo memory) { + return _getStorage().collectors[collector].agreements[agreementId]; } /// @inheritdoc IRecurringAgreements - function getEscrowAccount( - IRecurringCollector collector, - address provider - ) external view returns (IPaymentsEscrow.EscrowAccount memory account) { - return _fetchEscrowAccount(address(collector), provider); + function getAgreementMaxNextClaim(address collector, bytes16 agreementId) external view returns (uint256) { + return _getStorage().collectors[collector].agreements[agreementId].maxNextClaim; } + // --- Escrow state --- + /// @inheritdoc IRecurringAgreements - function getAgreementMaxNextClaim(bytes16 agreementId) external view returns (uint256) { - return _getStorage().agreements[agreementId].maxNextClaim; + function getSumMaxNextClaim(IRecurringCollector collector, address provider) external view returns (uint256) { + return _getStorage().collectors[address(collector)].providers[provider].sumMaxNextClaim; } /// @inheritdoc IRecurringAgreements - function getAgreementInfo(bytes16 agreementId) external view returns (AgreementInfo memory) { - return _getStorage().agreements[agreementId]; + function getSumMaxNextClaimAll() external view returns (uint256) { + return _getStorage().sumMaxNextClaimAll; } /// @inheritdoc IRecurringAgreements - function getProviderAgreementCount(address provider) external view returns (uint256) { - return _getStorage().providerAgreementIds[provider].length(); + function getTotalEscrowDeficit() external view returns (uint256) { + return _getStorage().totalEscrowDeficit; } /// @inheritdoc IRecurringAgreements - function getProviderAgreements(address provider) external view returns (bytes16[] memory) { - return _getStorage().providerAgreementIds[provider].getPageBytes16(0, type(uint256).max); + function getEscrowAccount( + IRecurringCollector collector, + address provider + ) external view returns (IPaymentsEscrow.EscrowAccount memory account) { + return _fetchEscrowAccount(address(collector), provider); } /// @inheritdoc IRecurringAgreements - function getProviderAgreements( - address provider, - uint256 offset, - uint256 count - ) external view returns (bytes16[] memory) { - return _getStorage().providerAgreementIds[provider].getPageBytes16(offset, count); + function getEscrowSnap(address collector, address provider) external view returns (uint256) { + return _getStorage().collectors[collector].providers[provider].escrowSnap; } + // --- Escrow parameters --- + /// @inheritdoc IRecurringAgreements function getEscrowBasis() external view returns (EscrowBasis) { return _getStorage().escrowBasis; } /// @inheritdoc IRecurringAgreements - function getSumMaxNextClaimAll() external view returns (uint256) { - return _getStorage().sumMaxNextClaimAll; + function getMinOnDemandBasisThreshold() external view returns (uint8) { + return _getStorage().minOnDemandBasisThreshold; } /// @inheritdoc IRecurringAgreements - function getTotalEscrowDeficit() external view returns (uint256) { - return _getStorage().totalEscrowDeficit; + function getMinFullBasisMargin() external view returns (uint8) { + return _getStorage().minFullBasisMargin; } /// @inheritdoc IRecurringAgreements - function getTotalAgreementCount() external view returns (uint256) { - return _getStorage().totalAgreementCount; + function getMinThawFraction() external view returns (uint8) { + return _getStorage().minThawFraction; } - /// @inheritdoc IRecurringAgreements - function isTempJit() external view returns (bool) { - return _getStorage().tempJit; - } + // --- Enumeration --- /// @inheritdoc IRecurringAgreements function getCollectorCount() external view returns (uint256) { - return _getStorage().collectors.length(); + return _getStorage().collectorSet.length(); } /// @inheritdoc IRecurringAgreements - function getCollectors() external view returns (address[] memory) { - return _getStorage().collectors.getPage(0, type(uint256).max); + function getCollectorAt(uint256 index) external view returns (address) { + return _getStorage().collectorSet.at(index); } /// @inheritdoc IRecurringAgreements - function getCollectors(uint256 offset, uint256 count) external view returns (address[] memory) { - return _getStorage().collectors.getPage(offset, count); + function getProviderCount(address collector) external view returns (uint256) { + return _getStorage().collectors[collector].providerSet.length(); } /// @inheritdoc IRecurringAgreements - function getCollectorProviderCount(address collector) external view returns (uint256) { - return _getStorage().collectorProviders[collector].length(); + function getProviderAt(address collector, uint256 index) external view returns (address) { + return _getStorage().collectors[collector].providerSet.at(index); } /// @inheritdoc IRecurringAgreements - function getCollectorProviders(address collector) external view returns (address[] memory) { - return _getStorage().collectorProviders[collector].getPage(0, type(uint256).max); + function getPairAgreementCount(address collector, address provider) external view returns (uint256) { + return _getStorage().collectors[collector].providers[provider].agreements.length(); } /// @inheritdoc IRecurringAgreements - function getCollectorProviders( - address collector, - uint256 offset, - uint256 count - ) external view returns (address[] memory) { - return _getStorage().collectorProviders[collector].getPage(offset, count); + function getPairAgreementAt(address collector, address provider, uint256 index) external view returns (bytes16) { + return bytes16(_getStorage().collectors[collector].providers[provider].agreements.at(index)); } - /// @inheritdoc IRecurringAgreements - function getPairAgreementCount(address collector, address provider) external view returns (uint256) { - return _getStorage().pairAgreementCount[collector][provider]; - } + // -- Private Functions -- + // Ordered by call depth: orchestrators → building blocks → leaf utilities - // -- Internal Functions -- + // --- Agreement reconciliation --- /** - * @notice Require that msg.sender is the agreement's collector. - * @param agreement The agreement info to check against + * @notice Discover (if first-seen) and reconcile a single agreement. + * @dev Used by both {afterAgreementStateChange} and {reconcileAgreement}. + * Resolves the provider via {_getAgreementProvider}, refreshes the cached + * maxNextClaim from the collector, and reconciles escrow. + * @param _s The storage reference + * @param collector The collector contract address + * @param agreementId The agreement ID + * @return deleted True if the agreement was removed (or never registered) */ - function _requireCollector(AgreementInfo storage agreement) private view { - require(msg.sender == address(agreement.collector), OnlyAgreementCollector()); - } + function _reconcileAgreement( + RecurringAgreementManagerStorage storage _s, + address collector, + bytes16 agreementId + ) private returns (bool deleted) { + address provider = _getAgreementProvider(_s, collector, agreementId); + if (provider == address(0)) return true; - /** - * @notice Create agreement storage, authorize its hash, update pair tracking, and set max-next-claim. - * @param agreementId The generated agreement ID - * @param rca The recurring collection agreement parameters - * @param collector The collector contract - * @param agreementHash The hash of the RCA to authorize - * @return maxNextClaim The computed max-next-claim for the new agreement - */ - // solhint-disable-next-line use-natspec - function _createAgreement( - RecurringAgreementManagerStorage storage $, - bytes16 agreementId, - IRecurringCollector.RecurringCollectionAgreement calldata rca, - IRecurringCollector collector, - bytes32 agreementHash - ) private returns (uint256 maxNextClaim) { - $.authorizedHashes[agreementHash] = agreementId; - - $.agreements[agreementId] = AgreementInfo({ - provider: rca.serviceProvider, - deadline: rca.deadline, - pendingUpdateNonce: 0, - maxNextClaim: 0, - pendingUpdateMaxNextClaim: 0, - agreementHash: agreementHash, - pendingUpdateHash: bytes32(0), - dataService: IDataServiceAgreements(rca.dataService), - collector: collector - }); - $.providerAgreementIds[rca.serviceProvider].add(bytes32(agreementId)); - ++$.totalAgreementCount; - if (++$.pairAgreementCount[address(collector)][rca.serviceProvider] == 1) { - $.collectorProviders[address(collector)].add(rca.serviceProvider); - $.collectors.add(address(collector)); - } + AgreementInfo storage agreement = _s.collectors[collector].agreements[agreementId]; + CollectorProviderData storage cpd = _s.collectors[collector].providers[provider]; - maxNextClaim = _computeMaxFirstClaim( - rca.maxOngoingTokensPerSecond, - rca.maxSecondsPerCollection, - rca.maxInitialTokens - ); - _setAgreementMaxNextClaim($, agreementId, maxNextClaim, false); - } + // Refresh cached maxNextClaim from collector + uint256 newMaxClaim = IRecurringCollector(collector).getMaxNextClaim(agreementId); - /** - * @notice Compute maximum first claim from agreement rate parameters. - * @param maxOngoingTokensPerSecond Maximum ongoing tokens per second - * @param maxSecondsPerCollection Maximum seconds per collection period - * @param maxInitialTokens Maximum initial tokens - * @return Maximum possible claim amount - */ - function _computeMaxFirstClaim( - uint256 maxOngoingTokensPerSecond, - uint256 maxSecondsPerCollection, - uint256 maxInitialTokens - ) private pure returns (uint256) { - return maxOngoingTokensPerSecond * maxSecondsPerCollection + maxInitialTokens; - } + // Update agreement + all derived totals (reads old value from storage) + uint256 oldMaxClaim = _adjustMaxNextClaim(_s, cpd, agreement, newMaxClaim); + if (oldMaxClaim != newMaxClaim) emit AgreementReconciled(agreementId, oldMaxClaim, newMaxClaim); - /** - * @notice Reconcile an agreement and update escrow for its (collector, provider) pair. - * @param agreementId The agreement ID to reconcile - */ - // solhint-disable-next-line use-natspec - function _reconcileAndUpdateEscrow(RecurringAgreementManagerStorage storage $, bytes16 agreementId) private { - _reconcileAgreement($, agreementId); - AgreementInfo storage info = $.agreements[agreementId]; - _updateEscrow($, address(info.collector), info.provider); + deleted = newMaxClaim == 0; // Delete if fully settled + if (deleted) _removeAgreement(_s, cpd, collector, provider, agreementId); + else _reconcileProviderEscrow(_s, collector, provider); } /** - * @notice Reconcile an agreement, update escrow, and delete if nothing left to claim. - * @param agreementId The agreement ID to reconcile - * @param agreement Storage pointer to the agreement info - * @return deleted True if the agreement was removed + * @notice Get the service provider for an agreement, discovering from the collector if first-seen. + * @dev Returns the cached provider for known agreements. For first-seen agreements: + * reads from the collector, validates roles and payer, registers in tracking sets, + * and returns the provider. Returns address(0) for agreements that don't belong to + * this manager (unauthorized collector, wrong payer, unauthorized data service, or + * non-existent). Once tracked, reconciliation bypasses this function's discovery path. + * @param _s The storage reference + * @param collector The collector contract address + * @param agreementId The agreement ID + * @return provider The service provider address, or address(0) if not ours */ - // solhint-disable-next-line use-natspec - function _reconcileAndCleanup( - RecurringAgreementManagerStorage storage $, - bytes16 agreementId, - AgreementInfo storage agreement - ) private returns (bool deleted) { - _reconcileAndUpdateEscrow($, agreementId); - if (agreement.maxNextClaim == 0) { - address provider = _deleteAgreement($, agreementId, agreement); - emit AgreementRemoved(agreementId, provider); - return true; + function _getAgreementProvider( + RecurringAgreementManagerStorage storage _s, + address collector, + bytes16 agreementId + ) private returns (address provider) { + provider = _s.collectors[collector].agreements[agreementId].provider; + if (provider != address(0)) return provider; + + // Untracked agreement; validate collector role, existence, payer, and data service. + // COLLECTOR_ROLE is required for discovery (first encounter). Once tracked, reconciliation + // of already-added agreements proceeds regardless of role — a deauthorized collector's + // agreements can still be reconciled, settled, and force-removed. + if (!hasRole(COLLECTOR_ROLE, collector)) { + emit AgreementRejected(agreementId, collector, AgreementRejectionReason.UnauthorizedCollector); + return address(0); } - } - - /** - * @notice Reconcile a single agreement's max next claim against on-chain state - * @param agreementId The agreement ID to reconcile - */ - // solhint-disable-next-line use-natspec - function _reconcileAgreement(RecurringAgreementManagerStorage storage $, bytes16 agreementId) private { - AgreementInfo storage agreement = $.agreements[agreementId]; - - IRecurringCollector rc = agreement.collector; - IRecurringCollector.AgreementData memory rca = rc.getAgreement(agreementId); - - // Not yet accepted — keep the pre-offer estimate unless the deadline has passed - if (rca.state == IRecurringCollector.AgreementState.NotAccepted) { - if (block.timestamp <= agreement.deadline) return; - // Deadline passed: zero out so the caller can delete the expired offer - uint256 prev = agreement.maxNextClaim; - if (prev != 0) { - _setAgreementMaxNextClaim($, agreementId, 0, false); - emit AgreementReconciled(agreementId, prev, 0); - } - return; + IRecurringCollector.AgreementData memory data = IRecurringCollector(collector).getAgreementData(agreementId); + provider = data.serviceProvider; + if (provider == address(0)) { + emit AgreementRejected(agreementId, collector, AgreementRejectionReason.UnknownAgreement); + return address(0); } - - // Clear pending update if applied (updateNonce advanced) or unreachable (agreement canceled) - if ( - agreement.pendingUpdateHash != bytes32(0) && - (agreement.pendingUpdateNonce <= rca.updateNonce || - rca.state != IRecurringCollector.AgreementState.Accepted) - ) { - _setAgreementMaxNextClaim($, agreementId, 0, true); - delete $.authorizedHashes[agreement.pendingUpdateHash]; - agreement.pendingUpdateNonce = 0; - agreement.pendingUpdateHash = bytes32(0); + if (data.payer != address(this)) { + emit AgreementRejected(agreementId, collector, AgreementRejectionReason.PayerMismatch); + return address(0); } - - uint256 oldMaxClaim = agreement.maxNextClaim; - uint256 newMaxClaim = rc.getMaxNextClaim(agreementId); - - if (oldMaxClaim != newMaxClaim) { - _setAgreementMaxNextClaim($, agreementId, newMaxClaim, false); - emit AgreementReconciled(agreementId, oldMaxClaim, newMaxClaim); + if (!hasRole(DATA_SERVICE_ROLE, data.dataService)) { + emit AgreementRejected(agreementId, collector, AgreementRejectionReason.UnauthorizedDataService); + return address(0); } - } - /** - * @notice Delete an agreement: clean up hashes, zero escrow obligations, remove from provider set, and update escrow. - * @param agreementId The agreement ID to delete - * @param agreement Storage pointer to the agreement info - * @return provider The provider address (captured before deletion) - */ - // solhint-disable-next-line use-natspec - function _deleteAgreement( - RecurringAgreementManagerStorage storage $, - bytes16 agreementId, - AgreementInfo storage agreement - ) private returns (address provider) { - provider = agreement.provider; - IRecurringCollector collector = agreement.collector; - - // Clean up authorized hashes - delete $.authorizedHashes[agreement.agreementHash]; - if (agreement.pendingUpdateHash != bytes32(0)) delete $.authorizedHashes[agreement.pendingUpdateHash]; - - // Zero out escrow requirements before deleting - _setAgreementMaxNextClaim($, agreementId, 0, false); - _setAgreementMaxNextClaim($, agreementId, 0, true); - --$.totalAgreementCount; - $.providerAgreementIds[provider].remove(bytes32(agreementId)); - - --$.pairAgreementCount[address(collector)][provider]; - delete $.agreements[agreementId]; + // Register agreement + _s.collectors[collector].agreements[agreementId].provider = provider; + CollectorProviderData storage cpd = _s.collectors[collector].providers[provider]; + cpd.agreements.add(bytes32(agreementId)); + _s.collectors[collector].providerSet.add(provider); + _s.collectorSet.add(collector); + emit AgreementAdded(agreementId, collector, data.dataService, provider); + } - _reconcilePairTracking($, address(collector), provider); + function _removeAgreement( + RecurringAgreementManagerStorage storage _s, + CollectorProviderData storage cpd, + address collector, + address provider, + bytes16 agreementId + ) private { + cpd.agreements.remove(bytes32(agreementId)); + delete _s.collectors[collector].agreements[agreementId]; + emit AgreementRemoved(agreementId); + _reconcileProvider(_s, collector, provider); } + // --- Provider reconciliation --- + /** * @notice Reconcile escrow then remove (collector, provider) tracking if fully drained. - * @dev Calls {_updateEscrow} to withdraw completed thaws, then removes the pair from - * tracking only when both pairAgreementCount and escrowSnap are zero. + * @dev Calls {_reconcileProviderEscrow} to withdraw completed thaws, then removes the pair from + * tracking only when both agreement count and escrowSnap are zero. * Cascades to remove the collector when it has no remaining providers. - * @return gone True if the pair is not tracked after this call + * @param _s The storage reference + * @param collector The collector contract address + * @param provider Service provider address + * @return tracked True if the pair is still tracked after this call */ - // solhint-disable-next-line use-natspec - function _reconcilePairTracking( - RecurringAgreementManagerStorage storage $, + function _reconcileProvider( + RecurringAgreementManagerStorage storage _s, address collector, address provider - ) private returns (bool gone) { - _updateEscrow($, collector, provider); - if ($.pairAgreementCount[collector][provider] != 0) return false; - if ($.escrowSnap[collector][provider] != 0) return false; - if ($.collectorProviders[collector].remove(provider)) { - emit CollectorProviderRemoved(collector, provider); - if ($.collectorProviders[collector].length() == 0) { - $.collectors.remove(collector); + ) private returns (bool tracked) { + _reconcileProviderEscrow(_s, collector, provider); + CollectorProviderData storage cpd = _s.collectors[collector].providers[provider]; + + if (cpd.agreements.length() != 0 || cpd.escrowSnap != 0) tracked = true; + else if (_s.collectors[collector].providerSet.remove(provider)) { + emit ProviderRemoved(collector, provider); + if (_s.collectors[collector].providerSet.length() == 0) { + // Provider agreement count will already be zero at this point. + _s.collectorSet.remove(collector); emit CollectorRemoved(collector); } } - return true; - } - - /** - * @notice Atomically set one escrow obligation slot of an agreement and cascade to provider/global totals. - * @dev This and {_setEscrowSnap} are the only two functions that mutate totalEscrowDeficit. - * @param agreementId The agreement to update - * @param newValue The new obligation value - * @param pending If true, updates pendingUpdateMaxNextClaim; otherwise updates maxNextClaim - */ - // solhint-disable-next-line use-natspec - function _setAgreementMaxNextClaim( - RecurringAgreementManagerStorage storage $, - bytes16 agreementId, - uint256 newValue, - bool pending - ) private { - AgreementInfo storage agreement = $.agreements[agreementId]; - - uint256 oldValue = pending ? agreement.pendingUpdateMaxNextClaim : agreement.maxNextClaim; - if (oldValue == newValue) return; - - address collector = address(agreement.collector); - address provider = agreement.provider; - uint256 oldDeficit = _providerEscrowDeficit($, collector, provider); - - if (pending) agreement.pendingUpdateMaxNextClaim = newValue; - else agreement.maxNextClaim = newValue; - - $.sumMaxNextClaim[collector][provider] = $.sumMaxNextClaim[collector][provider] - oldValue + newValue; - $.sumMaxNextClaimAll = $.sumMaxNextClaimAll - oldValue + newValue; - $.totalEscrowDeficit = $.totalEscrowDeficit - oldDeficit + _providerEscrowDeficit($, collector, provider); - } - - /** - * @notice Compute escrow levels (min, max) based on escrow basis. - * @dev Escrow ladder: - * - * | Level | min (deposit floor) | max (thaw ceiling) | - * |------------|---------------------|--------------------| - * | Full | sumMaxNext | sumMaxNext | - * | OnDemand | 0 | sumMaxNext | - * | JustInTime | 0 | 0 | - * - * When tempJit, behaves as JustInTime regardless of configured basis. - * Full degrades to OnDemand when available balance <= totalEscrowDeficit. - * Full requires strictly more tokens on hand than the global deficit. - * - * @param collector The collector address - * @param provider The service provider - * @return min Deposit floor — deposit if balance is below this - * @return max Thaw ceiling — thaw if balance is above this - */ - // solhint-disable-next-line use-natspec - function _escrowMinMax( - RecurringAgreementManagerStorage storage $, - address collector, - address provider - ) private view returns (uint256 min, uint256 max) { - EscrowBasis basis = $.tempJit ? EscrowBasis.JustInTime : $.escrowBasis; - - max = basis == EscrowBasis.JustInTime ? 0 : $.sumMaxNextClaim[collector][provider]; - min = (basis == EscrowBasis.Full && $.totalEscrowDeficit < GRAPH_TOKEN.balanceOf(address(this))) ? max : 0; - } - - /** - * @notice Compute a (collector, provider) pair's escrow deficit: max(0, sumMaxNext - snapshot). - * @param collector The collector address - * @param provider The service provider - * @return deficit The amount not in escrow for this (collector, provider) - */ - // solhint-disable-next-line use-natspec - function _providerEscrowDeficit( - RecurringAgreementManagerStorage storage $, - address collector, - address provider - ) private view returns (uint256 deficit) { - uint256 sumMaxNext = $.sumMaxNextClaim[collector][provider]; - uint256 snapshot = $.escrowSnap[collector][provider]; - - deficit = (snapshot < sumMaxNext) ? sumMaxNext - snapshot : 0; } /** @@ -881,39 +760,74 @@ contract RecurringAgreementManager is * * Updates escrow snapshot at the end for global tracking. * + * @param _s The storage reference * @param collector The collector contract address * @param provider The service provider to update escrow for */ - // solhint-disable-next-line use-natspec - function _updateEscrow(RecurringAgreementManagerStorage storage $, address collector, address provider) private { - _ensureIncomingDistributionToCurrentBlock($); - // Auto-recover from tempJit when balance exceeds deficit (same strict < as beforeCollection/escrowMinMax) - if ($.tempJit && $.totalEscrowDeficit < GRAPH_TOKEN.balanceOf(address(this))) { - $.tempJit = false; - emit TempJitSet(false, true); - } + function _reconcileProviderEscrow( + RecurringAgreementManagerStorage storage _s, + address collector, + address provider + ) private { + _ensureIncomingDistributionToCurrentBlock(_s); - IPaymentsEscrow.EscrowAccount memory account = _fetchEscrowAccount(collector, provider); - (uint256 min, uint256 max) = _escrowMinMax($, collector, provider); + CollectorProviderData storage cpd = _s.collectors[collector].providers[provider]; + + // Sync snapshot before decisions: the escrow balance may have changed externally + // (e.g. RecurringCollector.collect drained it before calling afterCollection). + // Without this, totalEscrowDeficit is stale → spare is overstated → basis is inflated + // → deposit attempt for tokens we don't have → revert swallowed by try/catch → snap + // stays permanently stale. Reading the fresh balance here makes the function + // self-correcting regardless of prior callback failures. + _setEscrowSnap(_s, cpd, collector, provider); + + EscrowTargets memory t = _computeEscrowTargets(_s, cpd, collector, provider); - // Defensive: PaymentsEscrow maintains tokensThawing <= balance, guard against external invariant breach - uint256 escrowed = account.tokensThawing < account.balance ? account.balance - account.tokensThawing : 0; // Objectives in order of priority: // We want to end with escrowed of at least min, and seek to thaw down to no more than max. // 1. Do not reset thaw timer if a thaw is in progress. // (This is to avoid thrash of restarting thaws resulting in never withdrawing excess.) // 2. Make minimal adjustment to thawing tokens to get as close to min/max as possible. // (First cancel unrealised thawing before depositing.) - uint256 thawTarget = (escrowed < min) - ? (min < account.balance ? account.balance - min : 0) - : (max < escrowed ? account.balance - max : account.tokensThawing); - if (thawTarget != account.tokensThawing) { + // 3. Skip thaw if excess above max is below the minimum thaw threshold. + uint256 excess = t.max < t.escrowed ? t.escrowed - t.max : 0; + uint256 thawTarget = (t.escrowed < t.min) + ? (t.min < t.account.balance ? t.account.balance - t.min : 0) + : (t.max < t.account.balance ? t.account.balance - t.max : 0); + // Act when the target differs, but skip thaw increases below thawThreshold (obj 3). + // Deficit adjustments (escrowed < min) always proceed — the threshold only gates new thaws. + if (thawTarget != t.account.tokensThawing && (t.escrowed < t.min || t.thawThreshold <= excess)) { PAYMENTS_ESCROW.adjustThaw(collector, provider, thawTarget, false); - account = _fetchEscrowAccount(collector, provider); + t.account = _fetchEscrowAccount(collector, provider); } - _withdrawAndRebalance(collector, provider, account, min, max); - _setEscrowSnap($, collector, provider); + _withdrawAndRebalance(collector, provider, t); + _setEscrowSnap(_s, cpd, collector, provider); + } + + /** + * @notice Compute escrow reconciliation targets for a (collector, provider) pair. + * @dev Extracted to reduce stack depth in {_reconcileProviderEscrow}. + * @param _s Storage reference to RecurringAgreementManagerStorage + * @param cpd Collector-provider data containing sumMaxNextClaim + * @param collector The collector contract address + * @param provider Service provider address + * @return t Computed escrow targets + */ + function _computeEscrowTargets( + RecurringAgreementManagerStorage storage _s, + CollectorProviderData storage cpd, + address collector, + address provider + ) private view returns (EscrowTargets memory t) { + t.account = _fetchEscrowAccount(collector, provider); + (t.min, t.max) = _escrowMinMax(_s, cpd.sumMaxNextClaim); + // Defensive: PaymentsEscrow maintains tokensThawing <= balance, guard against external invariant breach + t.escrowed = t.account.tokensThawing < t.account.balance ? t.account.balance - t.account.tokensThawing : 0; + // Thaw threshold: ignore thaws below this to prevent micro-thaw griefing. + // An attacker depositing dust via depositTo() then triggering reconciliation could start + // a tiny thaw that blocks legitimate thaw increases for the entire thawing period. + t.thawThreshold = (cpd.sumMaxNextClaim * uint256(_s.minThawFraction)) / 256; } /** @@ -923,69 +837,148 @@ contract RecurringAgreementManager is * These last two steps are mutually exclusive (min <= max). Only one runs per call. * @param collector The collector contract address * @param provider Service provider address - * @param account Current escrow account state - * @param min Deposit floor - * @param max Thaw ceiling + * @param t Escrow targets computed by {_computeEscrowTargets} */ - function _withdrawAndRebalance( - address collector, - address provider, - IPaymentsEscrow.EscrowAccount memory account, - uint256 min, - uint256 max - ) private { + function _withdrawAndRebalance(address collector, address provider, EscrowTargets memory t) private { // Withdraw any remaining thawed tokens (realised thawing is withdrawn even if within [min, max]) - if (0 < account.tokensThawing && account.thawEndTimestamp < block.timestamp) { - uint256 withdrawn = account.tokensThawing < account.balance ? account.tokensThawing : account.balance; + if (0 < t.account.tokensThawing && t.account.thawEndTimestamp < block.timestamp) { + uint256 withdrawn = t.account.tokensThawing < t.account.balance + ? t.account.tokensThawing + : t.account.balance; PAYMENTS_ESCROW.withdraw(collector, provider); emit EscrowWithdrawn(provider, collector, withdrawn); - account = _fetchEscrowAccount(collector, provider); + t.account = _fetchEscrowAccount(collector, provider); } - if (account.tokensThawing == 0) { - if (max < account.balance) + if (t.account.tokensThawing == 0) { + uint256 excess = t.max < t.account.balance ? t.account.balance - t.max : 0; + if (0 < excess && t.thawThreshold <= excess) // Thaw excess above max (might have withdrawn allowing a new thaw to start) - PAYMENTS_ESCROW.adjustThaw(collector, provider, account.balance - max, false); - else { - // Deposit any deficit below min (deposit exactly the missing amount, no more) - uint256 deposit = (min < account.balance) ? 0 : min - account.balance; - if (0 < deposit) { - GRAPH_TOKEN.approve(address(PAYMENTS_ESCROW), deposit); - PAYMENTS_ESCROW.deposit(collector, provider, deposit); - emit EscrowFunded(provider, collector, deposit); - } + PAYMENTS_ESCROW.adjustThaw(collector, provider, excess, false); + else if (t.account.balance < t.min) { + // Deposit deficit below min (deposit exactly the missing amount, no more) + uint256 deficit = t.min - t.account.balance; + GRAPH_TOKEN.approve(address(PAYMENTS_ESCROW), deficit); + PAYMENTS_ESCROW.deposit(collector, provider, deficit); + emit EscrowFunded(provider, collector, deficit); } } } + // --- Accounting building blocks --- + /** - * @notice Atomically sync the escrow snapshot for a (collector, provider) pair after escrow mutations. - * @dev This and {_setAgreementMaxNextClaim} are the only two functions that mutate totalEscrowDeficit. + * @notice The sole mutation point for agreement.maxNextClaim and all derived totals. + * @dev ALL writes to agreement.maxNextClaim, sumMaxNextClaim, sumMaxNextClaimAll, and + * claim-driven totalEscrowDeficit MUST go through this function. It reads the old value + * from storage itself — callers cannot supply a stale or incorrect old value. + * (Escrow-balance-driven deficit updates go through {_setEscrowSnap} instead.) + * @param _s The storage reference + * @param cpd The collector-provider data storage pointer + * @param agreement The agreement whose maxNextClaim is changing + * @param newMaxClaim The new maxNextClaim for the agreement + * @return oldMaxClaim The previous maxNextClaim (read from storage) + */ + function _adjustMaxNextClaim( + RecurringAgreementManagerStorage storage _s, + CollectorProviderData storage cpd, + AgreementInfo storage agreement, + uint256 newMaxClaim + ) private returns (uint256 oldMaxClaim) { + oldMaxClaim = agreement.maxNextClaim; + + if (oldMaxClaim != newMaxClaim) { + agreement.maxNextClaim = newMaxClaim; + + uint256 oldDeficit = _providerEscrowDeficit(cpd); + cpd.sumMaxNextClaim = cpd.sumMaxNextClaim - oldMaxClaim + newMaxClaim; + _s.sumMaxNextClaimAll = _s.sumMaxNextClaimAll - oldMaxClaim + newMaxClaim; + _s.totalEscrowDeficit = _s.totalEscrowDeficit - oldDeficit + _providerEscrowDeficit(cpd); + } + } + + /** + * @notice The sole mutation point for escrowSnap and escrow-balance-driven totalEscrowDeficit updates. + * @dev Together with {_adjustMaxNextClaim}, these are the only two functions that mutate totalEscrowDeficit. + * @param _s The storage reference + * @param cpd The collector-provider data storage pointer * @param collector The collector address * @param provider The service provider */ - // solhint-disable-next-line use-natspec - function _setEscrowSnap(RecurringAgreementManagerStorage storage $, address collector, address provider) private { - uint256 oldEscrow = $.escrowSnap[collector][provider]; + function _setEscrowSnap( + RecurringAgreementManagerStorage storage _s, + CollectorProviderData storage cpd, + address collector, + address provider + ) private { + uint256 oldEscrow = cpd.escrowSnap; uint256 newEscrow = _fetchEscrowAccount(collector, provider).balance; if (oldEscrow == newEscrow) return; - uint256 oldDeficit = _providerEscrowDeficit($, collector, provider); - $.escrowSnap[collector][provider] = newEscrow; - uint256 newDeficit = _providerEscrowDeficit($, collector, provider); - $.totalEscrowDeficit = $.totalEscrowDeficit - oldDeficit + newDeficit; + uint256 oldDeficit = _providerEscrowDeficit(cpd); + cpd.escrowSnap = newEscrow; + uint256 newDeficit = _providerEscrowDeficit(cpd); + _s.totalEscrowDeficit = _s.totalEscrowDeficit - oldDeficit + newDeficit; } - // solhint-disable-next-line use-natspec - function _fetchEscrowAccount( - address collector, - address provider - ) private view returns (IPaymentsEscrow.EscrowAccount memory account) { - (account.balance, account.tokensThawing, account.thawEndTimestamp) = PAYMENTS_ESCROW.escrowAccounts( - address(this), - collector, - provider - ); + /** + * @notice Compute escrow levels (min, max) based on escrow basis. + * @dev Escrow ladder: + * + * | Level | min (deposit floor) | max (thaw ceiling) | + * |------------|---------------------|--------------------| + * | Full | sumMaxNext | sumMaxNext | + * | OnDemand | 0 | sumMaxNext | + * | JustInTime | 0 | 0 | + * + * The effective basis is the configured escrowBasis degraded based on spare balance + * (balance - totalEscrowDeficit). OnDemand requires sumMaxNextClaimAll * threshold / 256 < spare. + * Full requires sumMaxNextClaimAll * (256 + margin) / 256 < spare. + * + * @param _s The storage reference + * @param sumMaxNextClaim The collector-provider's sumMaxNextClaim + * @return min Deposit floor — deposit if balance is below this + * @return max Thaw ceiling — thaw if balance is above this + */ + function _escrowMinMax( + RecurringAgreementManagerStorage storage _s, + uint256 sumMaxNextClaim + ) private view returns (uint256 min, uint256 max) { + uint256 balance = GRAPH_TOKEN.balanceOf(address(this)); + uint256 totalDeficit = _s.totalEscrowDeficit; + uint256 spare = totalDeficit < balance ? balance - totalDeficit : 0; + uint256 sumMaxNext = _s.sumMaxNextClaimAll; + + EscrowBasis basis = _s.escrowBasis; + max = basis != EscrowBasis.JustInTime && ((sumMaxNext * uint256(_s.minOnDemandBasisThreshold)) / 256 < spare) + ? sumMaxNextClaim + : 0; + min = basis == EscrowBasis.Full && ((sumMaxNext * (256 + uint256(_s.minFullBasisMargin))) / 256 < spare) + ? max + : 0; + } + + /** + * @notice Compute a (collector, provider) pair's escrow deficit: max(0, sumMaxNext - snapshot). + * @param cpd The collector-provider data + * @return deficit The amount not in escrow for this (collector, provider) + */ + function _providerEscrowDeficit(CollectorProviderData storage cpd) private view returns (uint256 deficit) { + uint256 sumMaxNext = cpd.sumMaxNextClaim; + uint256 snapshot = cpd.escrowSnap; + + deficit = (snapshot < sumMaxNext) ? sumMaxNext - snapshot : 0; + } + + // --- Leaf utilities --- + + function _setProviderEligibilityOracle(IProviderEligibility oracle) private { + RecurringAgreementManagerStorage storage _s = _getStorage(); + if (address(_s.providerEligibilityOracle) == address(oracle)) return; + + IProviderEligibility oldOracle = _s.providerEligibilityOracle; + _s.providerEligibilityOracle = oracle; + emit ProviderEligibilityOracleSet(oldOracle, oracle); } /** @@ -993,20 +986,20 @@ contract RecurringAgreementManager is * @dev No-op if allocator is not set or already ensured this block. The local ensuredIncomingDistributedToBlock * check avoids the external call overhead (~2800 gas) on redundant same-block invocations * (e.g. beforeCollection + afterCollection in the same collection tx). + * @param _s The storage reference */ - // solhint-disable-next-line use-natspec - function _ensureIncomingDistributionToCurrentBlock(RecurringAgreementManagerStorage storage $) private { - // Uses low 8 bytes of block.number; consecutive blocks always differ so same-block - // dedup works correctly even past uint64 wrap. A false match requires the previous - // last call to have been exactly 2^64 blocks ago (~584 billion years at 1 block/s). - uint64 blockNum; + function _ensureIncomingDistributionToCurrentBlock(RecurringAgreementManagerStorage storage _s) private { + // Uses low 4 bytes of block.number; consecutive blocks always differ so same-block + // dedup works correctly even past uint32 wrap. A false match requires the previous + // last call to have been exactly 2^32 blocks ago (~1,630 years at 12 s/block). + uint32 blockNum; unchecked { - blockNum = uint64(block.number); + blockNum = uint32(block.number); } - if ($.ensuredIncomingDistributedToBlock == blockNum) return; - $.ensuredIncomingDistributedToBlock = blockNum; + if (_s.ensuredIncomingDistributedToBlock == blockNum) return; + _s.ensuredIncomingDistributedToBlock = blockNum; - IIssuanceAllocationDistribution allocator = $.issuanceAllocator; + IIssuanceAllocationDistribution allocator = _s.issuanceAllocator; if (address(allocator) == address(0)) return; try allocator.distributeIssuance() {} catch { @@ -1014,14 +1007,25 @@ contract RecurringAgreementManager is } } + function _fetchEscrowAccount( + address collector, + address provider + ) private view returns (IPaymentsEscrow.EscrowAccount memory account) { + (account.balance, account.tokensThawing, account.thawEndTimestamp) = PAYMENTS_ESCROW.escrowAccounts( + address(this), + collector, + provider + ); + } + /** * @notice Get the ERC-7201 namespaced storage + * @return _s The storage reference */ - // solhint-disable-next-line use-natspec - function _getStorage() private pure returns (RecurringAgreementManagerStorage storage $) { + function _getStorage() private pure returns (RecurringAgreementManagerStorage storage _s) { // solhint-disable-next-line no-inline-assembly assembly { - $.slot := RECURRING_AGREEMENT_MANAGER_STORAGE_LOCATION + _s.slot := RECURRING_AGREEMENT_MANAGER_STORAGE_LOCATION } } } diff --git a/packages/issuance/package.json b/packages/issuance/package.json index 6223811a4..2030a0006 100644 --- a/packages/issuance/package.json +++ b/packages/issuance/package.json @@ -27,7 +27,8 @@ "clean": "rm -rf artifacts/ forge-artifacts/ cache_forge/ coverage/ cache/ types/ typechain-src/ .eslintcache test/node_modules/", "compile": "hardhat compile --quiet --no-tests", "typechain": "typechain --target ethers-v6 --out-dir typechain-src 'artifacts/contracts/**/!(*.dbg).json' && tsc -p tsconfig.typechain.json && rm -rf typechain-src && echo '{\"type\":\"commonjs\"}' > types/package.json", - "test": "forge test", + "test": "pnpm test:self", + "test:self": "forge test", "test:coverage": "forge coverage", "test:coverage:self": "mkdir -p coverage && forge coverage --report lcov --report-file coverage/lcov.info", "lint": "pnpm lint:ts; pnpm lint:sol; pnpm lint:forge; pnpm lint:md; pnpm lint:json", diff --git a/packages/issuance/test/unit/agreement-manager/afterCollection.t.sol b/packages/issuance/test/unit/agreement-manager/afterCollection.t.sol index 6e0eae7c3..cb25968a1 100644 --- a/packages/issuance/test/unit/agreement-manager/afterCollection.t.sol +++ b/packages/issuance/test/unit/agreement-manager/afterCollection.t.sol @@ -77,7 +77,7 @@ contract RecurringAgreementManagerCollectionCallbackTest is RecurringAgreementMa assertEq(escrowAfter, escrowBefore); } - function test_BeforeCollection_Revert_WhenCallerNotRecurringCollector() public { + function test_BeforeCollection_NoOp_WhenCallerNotRecurringCollector() public { (IRecurringCollector.RecurringCollectionAgreement memory rca, ) = _makeRCAWithId( 100 ether, 1 ether, @@ -87,7 +87,7 @@ contract RecurringAgreementManagerCollectionCallbackTest is RecurringAgreementMa bytes16 agreementId = _offerAgreement(rca); - vm.expectRevert(IRecurringAgreementManagement.OnlyAgreementCollector.selector); + // Wrong collector sees no agreement under its namespace — silent no-op agreementManager.beforeCollection(agreementId, 100 ether); } @@ -126,11 +126,11 @@ contract RecurringAgreementManagerCollectionCallbackTest is RecurringAgreementMa // After first collection, maxInitialTokens no longer applies // New max = 1e18 * 3600 = 3600e18 - assertEq(agreementManager.getAgreementMaxNextClaim(agreementId), 3600 ether); + assertEq(agreementManager.getAgreementMaxNextClaim(address(recurringCollector), agreementId), 3600 ether); assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), 3600 ether); } - function test_AfterCollection_Revert_WhenCallerNotRecurringCollector() public { + function test_AfterCollection_NoOp_WhenCallerNotRecurringCollector() public { (IRecurringCollector.RecurringCollectionAgreement memory rca, ) = _makeRCAWithId( 100 ether, 1 ether, @@ -140,7 +140,7 @@ contract RecurringAgreementManagerCollectionCallbackTest is RecurringAgreementMa bytes16 agreementId = _offerAgreement(rca); - vm.expectRevert(IRecurringAgreementManagement.OnlyAgreementCollector.selector); + // Wrong collector sees no agreement under its namespace — silent no-op agreementManager.afterCollection(agreementId, 100 ether); } @@ -166,7 +166,7 @@ contract RecurringAgreementManagerCollectionCallbackTest is RecurringAgreementMa vm.prank(address(recurringCollector)); agreementManager.afterCollection(agreementId, 0); - assertEq(agreementManager.getAgreementMaxNextClaim(agreementId), 0); + assertEq(agreementManager.getAgreementMaxNextClaim(address(recurringCollector), agreementId), 0); assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), 0); } diff --git a/packages/issuance/test/unit/agreement-manager/approver.t.sol b/packages/issuance/test/unit/agreement-manager/approver.t.sol index 1bf635a1f..f1b4d97c3 100644 --- a/packages/issuance/test/unit/agreement-manager/approver.t.sol +++ b/packages/issuance/test/unit/agreement-manager/approver.t.sol @@ -8,6 +8,7 @@ import { IRecurringEscrowManagement } from "@graphprotocol/interfaces/contracts/ import { IProviderEligibilityManagement } from "@graphprotocol/interfaces/contracts/issuance/eligibility/IProviderEligibilityManagement.sol"; import { IRecurringAgreements } from "@graphprotocol/interfaces/contracts/issuance/agreement/IRecurringAgreements.sol"; import { IIssuanceTarget } from "@graphprotocol/interfaces/contracts/issuance/allocate/IIssuanceTarget.sol"; +import { OFFER_TYPE_NEW } from "@graphprotocol/interfaces/contracts/horizon/IAgreementCollector.sol"; import { IRecurringCollector } from "@graphprotocol/interfaces/contracts/horizon/IRecurringCollector.sol"; import { RecurringAgreementManagerSharedTest } from "./shared.t.sol"; @@ -16,60 +17,6 @@ import { MockIssuanceAllocator } from "./mocks/MockIssuanceAllocator.sol"; contract RecurringAgreementManagerApproverTest is RecurringAgreementManagerSharedTest { /* solhint-disable graph/func-name-mixedcase */ - // -- IAgreementOwner Tests -- - - function test_ApproveAgreement_ReturnsSelector() public { - IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCA( - 100 ether, - 1 ether, - 60, - 3600, - uint64(block.timestamp + 365 days) - ); - - _offerAgreement(rca); - - bytes32 agreementHash = recurringCollector.hashRCA(rca); - bytes4 result = agreementManager.approveAgreement(agreementHash); - assertEq(result, IAgreementOwner.approveAgreement.selector); - } - - function test_ApproveAgreement_ReturnsZero_WhenNotAuthorized() public { - bytes32 fakeHash = keccak256("fake agreement"); - assertEq(agreementManager.approveAgreement(fakeHash), bytes4(0)); - } - - function test_ApproveAgreement_DifferentHashesAreIndependent() public { - IRecurringCollector.RecurringCollectionAgreement memory rca1 = _makeRCA( - 100 ether, - 1 ether, - 60, - 3600, - uint64(block.timestamp + 365 days) - ); - rca1.nonce = 1; - - IRecurringCollector.RecurringCollectionAgreement memory rca2 = _makeRCA( - 200 ether, - 2 ether, - 60, - 7200, - uint64(block.timestamp + 365 days) - ); - rca2.nonce = 2; - - // Only offer rca1 - _offerAgreement(rca1); - - // rca1 hash should be authorized - bytes32 hash1 = recurringCollector.hashRCA(rca1); - assertEq(agreementManager.approveAgreement(hash1), IAgreementOwner.approveAgreement.selector); - - // rca2 hash should NOT be authorized - bytes32 hash2 = recurringCollector.hashRCA(rca2); - assertEq(agreementManager.approveAgreement(hash2), bytes4(0)); - } - // -- ERC165 Tests -- function test_SupportsInterface_IIssuanceTarget() public view { @@ -147,7 +94,7 @@ contract RecurringAgreementManagerApproverTest is RecurringAgreementManagerShare token.mint(address(agreementManager), available); vm.prank(operator); - agreementManager.offerAgreement(rca, _collector()); + agreementManager.offerAgreement(_collector(), OFFER_TYPE_NEW, abi.encode(rca)); IPaymentsEscrow.EscrowAccount memory expected; (expected.balance, expected.tokensThawing, expected.thawEndTimestamp) = paymentsEscrow.escrowAccounts( @@ -166,11 +113,14 @@ contract RecurringAgreementManagerApproverTest is RecurringAgreementManagerShare } function test_GetAgreementMaxNextClaim_ZeroForUnknown() public view { - assertEq(agreementManager.getAgreementMaxNextClaim(bytes16(keccak256("unknown"))), 0); + assertEq( + agreementManager.getAgreementMaxNextClaim(address(recurringCollector), bytes16(keccak256("unknown"))), + 0 + ); } function test_GetIndexerAgreementCount_ZeroForUnknown() public { - assertEq(agreementManager.getProviderAgreementCount(makeAddr("unknown")), 0); + assertEq(agreementManager.getPairAgreementCount(address(recurringCollector), makeAddr("unknown")), 0); } /* solhint-enable graph/func-name-mixedcase */ diff --git a/packages/issuance/test/unit/agreement-manager/branchCoverage.t.sol b/packages/issuance/test/unit/agreement-manager/branchCoverage.t.sol new file mode 100644 index 000000000..92ae0407f --- /dev/null +++ b/packages/issuance/test/unit/agreement-manager/branchCoverage.t.sol @@ -0,0 +1,271 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.27; + +import { + REGISTERED, + ACCEPTED, + OFFER_TYPE_NEW +} from "@graphprotocol/interfaces/contracts/horizon/IAgreementCollector.sol"; +import { IRecurringCollector } from "@graphprotocol/interfaces/contracts/horizon/IRecurringCollector.sol"; +import { IRecurringAgreementManagement } from "@graphprotocol/interfaces/contracts/issuance/agreement/IRecurringAgreementManagement.sol"; +import { IRecurringEscrowManagement } from "@graphprotocol/interfaces/contracts/issuance/agreement/IRecurringEscrowManagement.sol"; + +import { RecurringAgreementManager } from "../../../contracts/agreement/RecurringAgreementManager.sol"; +import { RecurringAgreementManagerSharedTest } from "./shared.t.sol"; +import { MockRecurringCollector } from "./mocks/MockRecurringCollector.sol"; + +/// @notice Targeted tests for uncovered branches in RecurringAgreementManager. +contract RecurringAgreementManagerBranchCoverageTest is RecurringAgreementManagerSharedTest { + /* solhint-disable graph/func-name-mixedcase */ + + bytes32 internal constant PAUSE_ROLE = keccak256("PAUSE_ROLE"); + + // ══════════════════════════════════════════════════════════════════════ + // setIssuanceAllocator — ERC165 validation (L305) + // ══════════════════════════════════════════════════════════════════════ + + /// @notice Setting allocator to an address that does not support IIssuanceAllocationDistribution reverts. + function test_SetIssuanceAllocator_Revert_InvalidERC165() public { + // Use an address with code but wrong interface (the mock collector doesn't implement IIssuanceAllocationDistribution) + vm.prank(governor); + vm.expectRevert( + abi.encodeWithSelector( + RecurringAgreementManager.InvalidIssuanceAllocator.selector, + address(recurringCollector) + ) + ); + agreementManager.setIssuanceAllocator(address(recurringCollector)); + } + + /// @notice Setting allocator to an EOA (no code) also fails ERC165 check. + function test_SetIssuanceAllocator_Revert_EOA() public { + address eoa = makeAddr("randomEOA"); + vm.prank(governor); + vm.expectRevert(abi.encodeWithSelector(RecurringAgreementManager.InvalidIssuanceAllocator.selector, eoa)); + agreementManager.setIssuanceAllocator(eoa); + } + + // ══════════════════════════════════════════════════════════════════════ + // offerAgreement — unauthorized collector (L372) + // ══════════════════════════════════════════════════════════════════════ + + /// @notice offerAgreement reverts when collector lacks COLLECTOR_ROLE. + function test_OfferAgreement_Revert_UnauthorizedCollector() public { + MockRecurringCollector rogue = new MockRecurringCollector(); + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCA( + 100 ether, + 1 ether, + 60, + 3600, + uint64(block.timestamp + 365 days) + ); + rca.payer = address(agreementManager); + + vm.prank(operator); + vm.expectRevert( + abi.encodeWithSelector(IRecurringAgreementManagement.UnauthorizedCollector.selector, address(rogue)) + ); + agreementManager.offerAgreement(IRecurringCollector(address(rogue)), OFFER_TYPE_NEW, abi.encode(rca)); + } + + // ══════════════════════════════════════════════════════════════════════ + // offerAgreement — zero service provider (L378) + // ══════════════════════════════════════════════════════════════════════ + + /// @notice offerAgreement reverts when collector returns serviceProvider = address(0). + function test_OfferAgreement_Revert_ZeroServiceProvider() public { + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCA( + 100 ether, + 1 ether, + 60, + 3600, + uint64(block.timestamp + 365 days) + ); + rca.serviceProvider = address(0); // mock will return this as-is + + token.mint(address(agreementManager), 1_000_000 ether); + + vm.prank(operator); + vm.expectRevert(IRecurringAgreementManagement.ServiceProviderZeroAddress.selector); + agreementManager.offerAgreement(_collector(), OFFER_TYPE_NEW, abi.encode(rca)); + } + + // ══════════════════════════════════════════════════════════════════════ + // offerAgreement — unauthorized data service (L379) + // ══════════════════════════════════════════════════════════════════════ + + /// @notice offerAgreement reverts when the returned dataService lacks DATA_SERVICE_ROLE. + function test_OfferAgreement_Revert_UnauthorizedDataService() public { + address rogueDS = makeAddr("rogueDataService"); + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCA( + 100 ether, + 1 ether, + 60, + 3600, + uint64(block.timestamp + 365 days) + ); + rca.dataService = rogueDS; // not granted DATA_SERVICE_ROLE + + token.mint(address(agreementManager), 1_000_000 ether); + + vm.prank(operator); + vm.expectRevert( + abi.encodeWithSelector(IRecurringAgreementManagement.UnauthorizedDataService.selector, rogueDS) + ); + agreementManager.offerAgreement(_collector(), OFFER_TYPE_NEW, abi.encode(rca)); + } + + // ══════════════════════════════════════════════════════════════════════ + // forceRemoveAgreement (L412–424) + // ══════════════════════════════════════════════════════════════════════ + + /// @notice forceRemoveAgreement is a no-op when the agreement is unknown (provider == address(0)). + function test_ForceRemoveAgreement_NoOp_UnknownAgreement() public { + bytes16 unknownId = bytes16(keccak256("nonexistent")); + + // Should not revert — early return + vm.prank(operator); + agreementManager.forceRemoveAgreement(address(recurringCollector), unknownId); + + // No state changes + assertEq(agreementManager.getPairAgreementCount(address(recurringCollector), indexer), 0); + } + + /// @notice forceRemoveAgreement removes a tracked agreement. + function test_ForceRemoveAgreement_RemovesTracked() public { + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCA( + 100 ether, + 1 ether, + 60, + 3600, + uint64(block.timestamp + 365 days) + ); + bytes16 agreementId = _offerAgreement(rca); + + // Verify tracked + assertEq(agreementManager.getPairAgreementCount(address(recurringCollector), indexer), 1); + assertTrue(agreementManager.getSumMaxNextClaim(_collector(), indexer) > 0); + + // Force remove + vm.prank(operator); + agreementManager.forceRemoveAgreement(address(recurringCollector), agreementId); + + // Cleaned up + assertEq(agreementManager.getPairAgreementCount(address(recurringCollector), indexer), 0); + assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), 0); + assertEq(agreementManager.getSumMaxNextClaimAll(), 0); + } + + // ══════════════════════════════════════════════════════════════════════ + // emergencyRevokeRole (L437–439) + // ══════════════════════════════════════════════════════════════════════ + + /// @notice emergencyRevokeRole reverts when attempting to revoke GOVERNOR_ROLE. + function test_EmergencyRevokeRole_Revert_CannotRevokeGovernor() public { + // Grant PAUSE_ROLE to governor for this test + vm.prank(governor); + agreementManager.grantRole(PAUSE_ROLE, governor); + + vm.prank(governor); + vm.expectRevert(RecurringAgreementManager.CannotRevokeGovernorRole.selector); + agreementManager.emergencyRevokeRole(GOVERNOR_ROLE, governor); + } + + /// @notice emergencyRevokeRole succeeds for non-governor roles. + function test_EmergencyRevokeRole_Success() public { + // Grant PAUSE_ROLE to an account + address pauseGuardian = makeAddr("pauseGuardian"); + vm.prank(governor); + agreementManager.grantRole(PAUSE_ROLE, pauseGuardian); + + // Grant a role to revoke + address target = makeAddr("target"); + vm.prank(operator); + agreementManager.grantRole(AGREEMENT_MANAGER_ROLE, target); + assertTrue(agreementManager.hasRole(AGREEMENT_MANAGER_ROLE, target)); + + // Emergency revoke + vm.prank(pauseGuardian); + agreementManager.emergencyRevokeRole(AGREEMENT_MANAGER_ROLE, target); + assertFalse(agreementManager.hasRole(AGREEMENT_MANAGER_ROLE, target)); + } + + // ══════════════════════════════════════════════════════════════════════ + // afterAgreementStateChange callback (L360–361) + // ══════════════════════════════════════════════════════════════════════ + + /// @notice afterAgreementStateChange triggers reconciliation when called by collector. + function test_AfterAgreementStateChange_ReconcileOnCallback() public { + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCA( + 100 ether, + 1 ether, + 60, + 3600, + uint64(block.timestamp + 365 days) + ); + bytes16 agreementId = _offerAgreement(rca); + + // Mark agreement as accepted in the mock collector + _setAgreementAccepted(agreementId, rca, uint64(block.timestamp)); + + // Simulate collector calling afterAgreementStateChange (as happens on provider accept) + vm.prank(address(recurringCollector)); + agreementManager.afterAgreementStateChange(agreementId, bytes32(0), 0); + + // Agreement should still be tracked (reconcile updates maxNextClaim) + assertEq(agreementManager.getPairAgreementCount(address(recurringCollector), indexer), 1); + } + + // ══════════════════════════════════════════════════════════════════════ + // _withdrawAndRebalance — deposit deficit branch (L854/857–862) + // ══════════════════════════════════════════════════════════════════════ + + /// @notice When escrow balance drops below min (after collection), reconcile deposits the deficit. + function test_WithdrawAndRebalance_DepositDeficit() public { + // Offer agreement in Full mode — escrow gets fully funded + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCA( + 100 ether, + 1 ether, + 60, + 3600, + uint64(block.timestamp + 365 days) + ); + _offerAgreement(rca); + + uint256 expectedMaxClaim = 1 ether * 3600 + 100 ether; // 3700 ether + + // Verify fully funded + (uint256 balBefore, , ) = paymentsEscrow.escrowAccounts( + address(agreementManager), + address(recurringCollector), + indexer + ); + assertEq(balBefore, expectedMaxClaim); + + // Simulate collection draining most of the escrow: + // Set escrow balance to a small amount (below min), no thawing + uint256 drainedBalance = 100 ether; // well below min = expectedMaxClaim in Full mode + paymentsEscrow.setAccount( + address(agreementManager), + address(recurringCollector), + indexer, + drainedBalance, + 0, // no thawing + 0 // no thaw end + ); + + // Manager still has tokens (minted 1M in _offerAgreement, deposited 3700) + // Reconcile should trigger deposit deficit branch + agreementManager.reconcileProvider(address(recurringCollector), indexer); + + // After reconcile, escrow should be topped up + (uint256 balAfter, , ) = paymentsEscrow.escrowAccounts( + address(agreementManager), + address(recurringCollector), + indexer + ); + assertTrue(balAfter > drainedBalance, "escrow should be topped up after reconcile"); + } + + /* solhint-enable graph/func-name-mixedcase */ +} diff --git a/packages/issuance/test/unit/agreement-manager/callbackGas.t.sol b/packages/issuance/test/unit/agreement-manager/callbackGas.t.sol new file mode 100644 index 000000000..e4870924f --- /dev/null +++ b/packages/issuance/test/unit/agreement-manager/callbackGas.t.sol @@ -0,0 +1,150 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.27; + +import { IRecurringCollector } from "@graphprotocol/interfaces/contracts/horizon/IRecurringCollector.sol"; + +import { RecurringAgreementManagerSharedTest } from "./shared.t.sol"; +import { MockIssuanceAllocator } from "./mocks/MockIssuanceAllocator.sol"; + +/// @notice Gas regression canary for RAM callbacks (beforeCollection / afterCollection). +/// RecurringCollector caps gas forwarded to these callbacks at 1.5M (MAX_CALLBACK_GAS). +/// +/// These tests use mocks for PaymentsEscrow, IssuanceAllocator, and RecurringCollector, +/// so measured gas is lower than production. They catch RAM code regressions (new loops, +/// extra external calls, etc.) but cannot validate the production gas margin. +/// +/// Production-representative gas measurements live in the testing package: +/// packages/testing/test/gas/CallbackGas.t.sol (uses real PaymentsEscrow, RecurringCollector, +/// and IssuanceAllocator via RealStackHarness). +contract RecurringAgreementManagerCallbackGasTest is RecurringAgreementManagerSharedTest { + /* solhint-disable graph/func-name-mixedcase */ + + /// @notice Gas budget that RecurringCollector forwards to each callback. + /// Must match MAX_CALLBACK_GAS in RecurringCollector. + uint256 internal constant MAX_CALLBACK_GAS = 1_500_000; + + /// @notice Alarm threshold — 1/10th of the callback gas budget. + /// Current mock worst-case is ~70k. Crossing 150k means RAM code got significantly + /// heavier and the production gas margin (against real contracts) must be re-evaluated. + uint256 internal constant GAS_ALARM_THRESHOLD = MAX_CALLBACK_GAS / 10; // 150_000 + + MockIssuanceAllocator internal mockAllocator; + + function setUp() public override { + super.setUp(); + mockAllocator = new MockIssuanceAllocator(token, address(agreementManager)); + vm.label(address(mockAllocator), "MockIssuanceAllocator"); + + vm.prank(governor); + agreementManager.setIssuanceAllocator(address(mockAllocator)); + } + + // ==================== beforeCollection gas ==================== + + /// @notice Worst-case beforeCollection: escrow short, triggers distributeIssuance + JIT deposit. + function test_BeforeCollection_GasWithinBudget_JitDeposit() public { + (IRecurringCollector.RecurringCollectionAgreement memory rca, ) = _makeRCAWithId( + 100 ether, + 1 ether, + 3600, + uint64(block.timestamp + 365 days) + ); + bytes16 agreementId = _offerAgreement(rca); + + (uint256 escrowBalance, , ) = paymentsEscrow.escrowAccounts( + address(agreementManager), + address(recurringCollector), + indexer + ); + + mockAllocator.setMintPerDistribution(1000 ether); + vm.roll(block.number + 1); + + uint256 tokensToCollect = escrowBalance + 500 ether; + + uint256 gasBefore = gasleft(); + vm.prank(address(recurringCollector)); + agreementManager.beforeCollection(agreementId, tokensToCollect); + uint256 gasUsed = gasBefore - gasleft(); + + assertLt(gasUsed, GAS_ALARM_THRESHOLD, "beforeCollection (JIT) exceeds 1/10th of callback gas budget"); + } + + /// @notice beforeCollection early-return path: escrow sufficient, no external calls. + function test_BeforeCollection_GasWithinBudget_EscrowSufficient() public { + (IRecurringCollector.RecurringCollectionAgreement memory rca, ) = _makeRCAWithId( + 100 ether, + 1 ether, + 3600, + uint64(block.timestamp + 365 days) + ); + bytes16 agreementId = _offerAgreement(rca); + + uint256 gasBefore = gasleft(); + vm.prank(address(recurringCollector)); + agreementManager.beforeCollection(agreementId, 1 ether); + uint256 gasUsed = gasBefore - gasleft(); + + assertLt(gasUsed, GAS_ALARM_THRESHOLD, "beforeCollection (sufficient) exceeds 1/10th of callback gas budget"); + } + + // ==================== afterCollection gas ==================== + + /// @notice Worst-case afterCollection: reconcile + full escrow update (rebalance path). + function test_AfterCollection_GasWithinBudget_FullReconcile() public { + (IRecurringCollector.RecurringCollectionAgreement memory rca, ) = _makeRCAWithId( + 100 ether, + 1 ether, + 3600, + uint64(block.timestamp + 365 days) + ); + bytes16 agreementId = _offerAgreement(rca); + + uint64 acceptedAt = uint64(block.timestamp); + uint64 lastCollectionAt = uint64(block.timestamp + 1 hours); + _setAgreementCollected(agreementId, rca, acceptedAt, lastCollectionAt); + vm.warp(lastCollectionAt); + + mockAllocator.setMintPerDistribution(1000 ether); + vm.roll(block.number + 1); + + uint256 gasBefore = gasleft(); + vm.prank(address(recurringCollector)); + agreementManager.afterCollection(agreementId, 500 ether); + uint256 gasUsed = gasBefore - gasleft(); + + assertLt( + gasUsed, + GAS_ALARM_THRESHOLD, + "afterCollection (full reconcile) exceeds 1/10th of callback gas budget" + ); + } + + /// @notice afterCollection when agreement was canceled by SP — reconcile zeros out maxNextClaim. + function test_AfterCollection_GasWithinBudget_CanceledBySP() public { + (IRecurringCollector.RecurringCollectionAgreement memory rca, ) = _makeRCAWithId( + 100 ether, + 1 ether, + 3600, + uint64(block.timestamp + 365 days) + ); + bytes16 agreementId = _offerAgreement(rca); + _setAgreementCanceledBySP(agreementId, rca); + + mockAllocator.setMintPerDistribution(1000 ether); + vm.roll(block.number + 1); + + uint256 gasBefore = gasleft(); + vm.prank(address(recurringCollector)); + agreementManager.afterCollection(agreementId, 0); + uint256 gasUsed = gasBefore - gasleft(); + + assertLt( + gasUsed, + GAS_ALARM_THRESHOLD, + "afterCollection (canceled by SP) exceeds 1/10th of callback gas budget" + ); + } + + /* solhint-enable graph/func-name-mixedcase */ +} diff --git a/packages/issuance/test/unit/agreement-manager/cancelAgreement.t.sol b/packages/issuance/test/unit/agreement-manager/cancelAgreement.t.sol index 2eae0a66e..c6c86bb53 100644 --- a/packages/issuance/test/unit/agreement-manager/cancelAgreement.t.sol +++ b/packages/issuance/test/unit/agreement-manager/cancelAgreement.t.sol @@ -4,7 +4,6 @@ pragma solidity ^0.8.27; import { IRecurringAgreementManagement } from "@graphprotocol/interfaces/contracts/issuance/agreement/IRecurringAgreementManagement.sol"; import { IRecurringCollector } from "@graphprotocol/interfaces/contracts/horizon/IRecurringCollector.sol"; import { IAccessControl } from "@openzeppelin/contracts/access/IAccessControl.sol"; -import { PausableUpgradeable } from "@openzeppelin/contracts-upgradeable/utils/PausableUpgradeable.sol"; import { RecurringAgreementManagerSharedTest } from "./shared.t.sol"; @@ -21,19 +20,21 @@ contract RecurringAgreementManagerCancelAgreementTest is RecurringAgreementManag bytes16 agreementId = _offerAgreement(rca); - // Simulate acceptance + // Simulate acceptance, then advance time so cancel creates a non-zero claim window _setAgreementAccepted(agreementId, rca, uint64(block.timestamp)); + vm.warp(block.timestamp + 10); - vm.expectEmit(address(agreementManager)); - emit IRecurringAgreementManagement.AgreementCanceled(agreementId, indexer); + // After cancel by payer with 10s elapsed: maxNextClaim = 1e18 * 10 + 100e18 = 110e18 + uint256 preMaxClaim = agreementManager.getAgreementInfo(address(recurringCollector), agreementId).maxNextClaim; - vm.prank(operator); - bool gone = agreementManager.cancelAgreement(agreementId); - assertFalse(gone); // still tracked after cancel + bool gone = _cancelAgreement(agreementId); + // CanceledByPayer with remaining claim window => still tracked + assertFalse(gone); - // Verify the mock was called - assertTrue(mockSubgraphService.canceled(agreementId)); - assertEq(mockSubgraphService.cancelCallCount(agreementId), 1); + // Verify maxNextClaim decreased to the payer-cancel window + uint256 postMaxClaim = agreementManager.getAgreementInfo(address(recurringCollector), agreementId).maxNextClaim; + assertEq(postMaxClaim, 1 ether * 10 + 100 ether, "maxNextClaim should reflect payer-cancel window"); + assertTrue(postMaxClaim < preMaxClaim, "maxNextClaim should decrease after cancel"); } function test_CancelAgreement_ReconcileAfterCancel() public { @@ -54,15 +55,14 @@ contract RecurringAgreementManagerCancelAgreementTest is RecurringAgreementManag _setAgreementCanceledBySP(agreementId, rca); // CanceledBySP has maxNextClaim=0 so agreement is deleted inline - vm.prank(operator); - bool gone = agreementManager.cancelAgreement(agreementId); + bool gone = _cancelAgreement(agreementId); assertTrue(gone); // deleted inline — nothing left to claim // After cancelAgreement (which now reconciles), required escrow should decrease assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), 0); } - function test_CancelAgreement_Idempotent_CanceledByPayer() public { + function test_CancelAgreement_AlreadyCanceled_StillForwards() public { (IRecurringCollector.RecurringCollectionAgreement memory rca, ) = _makeRCAWithId( 100 ether, 1 ether, @@ -75,13 +75,12 @@ contract RecurringAgreementManagerCancelAgreementTest is RecurringAgreementManag // Set as CanceledByPayer (already canceled) _setAgreementCanceledByPayer(agreementId, rca, uint64(block.timestamp), uint64(block.timestamp + 1 hours), 0); - // Should succeed — idempotent, skips the external cancel call - vm.prank(operator); - bool gone = agreementManager.cancelAgreement(agreementId); - assertFalse(gone); // still tracked after cancel - - // Should NOT have called SubgraphService - assertEq(mockSubgraphService.cancelCallCount(agreementId), 0); + // cancelAgreement always forwards to collector — caller is responsible + // for knowing whether the agreement is already canceled + bool gone = _cancelAgreement(agreementId); + // Agreement may or may not be fully gone depending on collector behavior + // after re-cancel — the key invariant is that it doesn't revert + assertTrue(gone || !gone); // no-op assertion, just verify no revert } function test_CancelAgreement_Idempotent_CanceledByServiceProvider() public { @@ -99,18 +98,14 @@ contract RecurringAgreementManagerCancelAgreementTest is RecurringAgreementManag // Should succeed — idempotent, reconciles to update escrow // CanceledBySP has maxNextClaim=0 so agreement is deleted inline - vm.prank(operator); - bool gone = agreementManager.cancelAgreement(agreementId); + bool gone = _cancelAgreement(agreementId); assertTrue(gone); // deleted inline — nothing left to claim - // Should NOT have called SubgraphService - assertEq(mockSubgraphService.cancelCallCount(agreementId), 0); - // Required escrow should drop to 0 assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), 0); } - function test_CancelAgreement_Revert_WhenNotAccepted() public { + function test_CancelAgreement_Offered() public { (IRecurringCollector.RecurringCollectionAgreement memory rca, ) = _makeRCAWithId( 100 ether, 1 ether, @@ -120,21 +115,25 @@ contract RecurringAgreementManagerCancelAgreementTest is RecurringAgreementManag bytes16 agreementId = _offerAgreement(rca); - // Agreement is NotAccepted — should revert - vm.expectRevert( - abi.encodeWithSelector(IRecurringAgreementManagement.AgreementNotAccepted.selector, agreementId) - ); - vm.prank(operator); - agreementManager.cancelAgreement(agreementId); + // Cancel an offered (not yet accepted) agreement — should succeed and clean up + bool gone = _cancelAgreement(agreementId); + assertTrue(gone); + assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), 0); } - function test_CancelAgreement_ReturnsTrue_WhenNotOffered() public { + function test_CancelAgreement_RejectsUnknown_WhenNotOffered() public { bytes16 fakeId = bytes16(keccak256("fake")); - // Returns true (gone) when agreement not found + // cancelAgreement is a passthrough — unknown agreement triggers AgreementRejected via callback + vm.expectEmit(address(agreementManager)); + emit IRecurringAgreementManagement.AgreementRejected( + fakeId, + address(recurringCollector), + IRecurringAgreementManagement.AgreementRejectionReason.UnknownAgreement + ); + vm.prank(operator); - bool gone = agreementManager.cancelAgreement(fakeId); - assertTrue(gone); + agreementManager.cancelAgreement(address(recurringCollector), fakeId, bytes32(0), 0); } function test_CancelAgreement_Revert_WhenNotOperator() public { @@ -154,6 +153,7 @@ contract RecurringAgreementManagerCancelAgreementTest is RecurringAgreementManag rca.nonce ); + bytes32 activeHash = recurringCollector.getAgreementVersionAt(agreementId, 0).versionHash; address nonOperator = makeAddr("nonOperator"); vm.expectRevert( abi.encodeWithSelector( @@ -163,10 +163,10 @@ contract RecurringAgreementManagerCancelAgreementTest is RecurringAgreementManag ) ); vm.prank(nonOperator); - agreementManager.cancelAgreement(agreementId); + agreementManager.cancelAgreement(address(recurringCollector), agreementId, activeHash, 0); } - function test_CancelAgreement_Revert_WhenPaused() public { + function test_CancelAgreement_SucceedsWhenPaused() public { (IRecurringCollector.RecurringCollectionAgreement memory rca, ) = _makeRCAWithId( 100 ether, 1 ether, @@ -181,9 +181,10 @@ contract RecurringAgreementManagerCancelAgreementTest is RecurringAgreementManag agreementManager.pause(); vm.stopPrank(); - vm.expectRevert(PausableUpgradeable.EnforcedPause.selector); + // Role-gated functions should succeed even when paused + bytes32 activeHash = recurringCollector.getAgreementVersionAt(agreementId, 0).versionHash; vm.prank(operator); - agreementManager.cancelAgreement(agreementId); + agreementManager.cancelAgreement(address(recurringCollector), agreementId, activeHash, 0); } function test_CancelAgreement_EmitsEvent() public { @@ -198,10 +199,30 @@ contract RecurringAgreementManagerCancelAgreementTest is RecurringAgreementManag _setAgreementAccepted(agreementId, rca, uint64(block.timestamp)); vm.expectEmit(address(agreementManager)); - emit IRecurringAgreementManagement.AgreementCanceled(agreementId, indexer); + emit IRecurringAgreementManagement.AgreementRemoved(agreementId); + + _cancelAgreement(agreementId); + } + + function test_CancelAgreement_Succeeds_WhenPaused() public { + (IRecurringCollector.RecurringCollectionAgreement memory rca, ) = _makeRCAWithId( + 100 ether, + 1 ether, + 3600, + uint64(block.timestamp + 365 days) + ); + + bytes16 agreementId = _offerAgreement(rca); + + vm.startPrank(governor); + agreementManager.grantRole(keccak256("PAUSE_ROLE"), governor); + agreementManager.pause(); + vm.stopPrank(); + // Role-gated functions should succeed even when paused + bytes32 activeHash = recurringCollector.getAgreementVersionAt(agreementId, 0).versionHash; vm.prank(operator); - agreementManager.cancelAgreement(agreementId); + agreementManager.cancelAgreement(address(recurringCollector), agreementId, activeHash, 0); } /* solhint-enable graph/func-name-mixedcase */ diff --git a/packages/issuance/test/unit/agreement-manager/cancelWithPendingUpdate.t.sol b/packages/issuance/test/unit/agreement-manager/cancelWithPendingUpdate.t.sol index 33f9e5a16..caebedc9c 100644 --- a/packages/issuance/test/unit/agreement-manager/cancelWithPendingUpdate.t.sol +++ b/packages/issuance/test/unit/agreement-manager/cancelWithPendingUpdate.t.sol @@ -43,42 +43,35 @@ contract RecurringAgreementManagerCancelWithPendingUpdateTest is RecurringAgreem ); _offerAgreementUpdate(rcau); - uint256 pendingMaxClaim = 2 ether * 7200 + 200 ether; + // max(current, pending) = max(3700, 14600) = 14600 + uint256 pendingMaxClaim = 14600 ether; assertEq( agreementManager.getSumMaxNextClaim(_collector(), indexer), - originalMaxClaim + pendingMaxClaim, - "both original and pending escrow should be reserved" + pendingMaxClaim, + "escrow reserved for max of current and pending" ); // 3. Cancel the agreement — simulate CanceledByPayer with remaining collection window. // The collector still has a non-zero maxNextClaim (remaining window to collect). // updateNonce is still 0 — the pending update was never applied. - uint64 canceledAt = uint64(block.timestamp + 1 hours); - vm.warp(canceledAt); - _setAgreementCanceledByPayer(agreementId, rca, acceptedAt, canceledAt, 0); + uint64 collectableUntil = uint64(block.timestamp + 1 hours); + vm.warp(collectableUntil); + _setAgreementCanceledByPayer(agreementId, rca, acceptedAt, collectableUntil, 0); - // Call cancelAgreement — state is already CanceledByPayer so it skips the DS call - // and goes straight to reconcile-and-cleanup. - vm.prank(operator); - bool gone = agreementManager.cancelAgreement(agreementId); - assertFalse(gone, "agreement should still exist (has remaining claims)"); + // State is CanceledByPayer — cancelAgreement rejects non-Accepted states, + // so use reconcileAgreement to trigger cleanup. + bool exists = agreementManager.reconcileAgreement(address(recurringCollector), agreementId); + assertTrue(exists, "agreement should still exist (has remaining claims)"); // 4. BUG: The pending update can never be accepted (collector rejects updates on // canceled agreements), yet pendingUpdateMaxNextClaim is still reserved. - IRecurringAgreements.AgreementInfo memory info = agreementManager.getAgreementInfo(agreementId); uint256 sumAfterCancel = agreementManager.getSumMaxNextClaim(_collector(), indexer); // The pending escrow should have been freed (zeroed) since the update is dead. - // This assertion demonstrates the bug — it will FAIL because the pending escrow - // is still included in sumMaxNextClaim. - assertEq( - info.pendingUpdateMaxNextClaim, - 0, - "BUG: pending update escrow should be zero after cancel (update can never be applied)" - ); + // sumMaxNextClaim should only include the base claim, not the dead pending update. assertEq( sumAfterCancel, - agreementManager.getAgreementMaxNextClaim(agreementId), + agreementManager.getAgreementMaxNextClaim(address(recurringCollector), agreementId), "BUG: sumMaxNextClaim should only include the base claim, not the dead pending update" ); } @@ -111,25 +104,25 @@ contract RecurringAgreementManagerCancelWithPendingUpdateTest is RecurringAgreem _offerAgreementUpdate(rcau); // 3. Cancel (CanceledByPayer, remaining window) - uint64 canceledAt = uint64(block.timestamp + 1 hours); - vm.warp(canceledAt); - _setAgreementCanceledByPayer(agreementId, rca, acceptedAt, canceledAt, 0); - - vm.prank(operator); - agreementManager.cancelAgreement(agreementId); - - // 4. Explicit reconcile — pending should already be cleared - agreementManager.reconcileAgreement(agreementId); - - IRecurringAgreements.AgreementInfo memory info = agreementManager.getAgreementInfo(agreementId); - assertEq(info.pendingUpdateMaxNextClaim, 0, "pending escrow should be zero after cancel"); - assertEq(info.pendingUpdateNonce, 0, "pending nonce should be zero after cancel"); - assertEq(info.pendingUpdateHash, bytes32(0), "pending hash should be zero after cancel"); + uint64 collectableUntil = uint64(block.timestamp + 1 hours); + vm.warp(collectableUntil); + _setAgreementCanceledByPayer(agreementId, rca, acceptedAt, collectableUntil, 0); + + // State is CanceledByPayer — cancelAgreement rejects non-Accepted states, + // so use reconcileAgreement to trigger cleanup. + agreementManager.reconcileAgreement(address(recurringCollector), agreementId); + + // After cancel + reconcile, maxNextClaim should reflect only the remaining collection window + IRecurringAgreements.AgreementInfo memory info = agreementManager.getAgreementInfo( + address(recurringCollector), + agreementId + ); + assertEq( + info.maxNextClaim, + agreementManager.getAgreementMaxNextClaim(address(recurringCollector), agreementId) + ); - // 5. The dead update hash should no longer be authorized - bytes32 updateHash = recurringCollector.hashRCAU(rcau); - bytes4 result = agreementManager.approveAgreement(updateHash); - assertTrue(result != agreementManager.approveAgreement.selector, "dead hash should not be authorized"); + // The pending update can no longer be applied (collector handles hash lifecycle) } /* solhint-enable graph/func-name-mixedcase */ diff --git a/packages/issuance/test/unit/agreement-manager/cascadeCleanup.t.sol b/packages/issuance/test/unit/agreement-manager/cascadeCleanup.t.sol index e8d6c579e..ef84bee9e 100644 --- a/packages/issuance/test/unit/agreement-manager/cascadeCleanup.t.sol +++ b/packages/issuance/test/unit/agreement-manager/cascadeCleanup.t.sol @@ -2,6 +2,7 @@ pragma solidity ^0.8.27; import { IRecurringAgreementManagement } from "@graphprotocol/interfaces/contracts/issuance/agreement/IRecurringAgreementManagement.sol"; +import { OFFER_TYPE_NEW } from "@graphprotocol/interfaces/contracts/horizon/IAgreementCollector.sol"; import { IRecurringCollector } from "@graphprotocol/interfaces/contracts/horizon/IRecurringCollector.sol"; import { RecurringAgreementManagerSharedTest } from "./shared.t.sol"; @@ -41,6 +42,8 @@ contract RecurringAgreementManagerCascadeCleanupTest is RecurringAgreementManage maxOngoingTokensPerSecond: 1 ether, minSecondsPerCollection: 60, maxSecondsPerCollection: 3600, + conditions: 0, + minSecondsPayerCancellationNotice: 0, nonce: nonce, metadata: "" }); @@ -67,6 +70,8 @@ contract RecurringAgreementManagerCascadeCleanupTest is RecurringAgreementManage maxOngoingTokensPerSecond: 1 ether, minSecondsPerCollection: 60, maxSecondsPerCollection: 3600, + conditions: 0, + minSecondsPayerCancellationNotice: 0, nonce: nonce, metadata: "" }); @@ -85,7 +90,8 @@ contract RecurringAgreementManagerCascadeCleanupTest is RecurringAgreementManage ) internal returns (bytes16) { token.mint(address(agreementManager), 1_000_000 ether); vm.prank(operator); - return agreementManager.offerAgreement(rca, IRecurringCollector(address(collector))); + return + agreementManager.offerAgreement(IRecurringCollector(address(collector)), OFFER_TYPE_NEW, abi.encode(rca)); } // -- Tests: Enumeration after offer -- @@ -95,9 +101,9 @@ contract RecurringAgreementManagerCascadeCleanupTest is RecurringAgreementManage _offerAgreement(rca); assertEq(agreementManager.getCollectorCount(), 1); - assertEq(agreementManager.getCollectors()[0], address(recurringCollector)); - assertEq(agreementManager.getCollectorProviderCount(address(recurringCollector)), 1); - assertEq(agreementManager.getCollectorProviders(address(recurringCollector))[0], indexer); + assertEq(agreementManager.getCollectorAt(0), address(recurringCollector)); + assertEq(agreementManager.getProviderCount(address(recurringCollector)), 1); + assertEq(agreementManager.getProviderAt(address(recurringCollector), 0), indexer); assertEq(agreementManager.getPairAgreementCount(address(recurringCollector), indexer), 1); } @@ -110,7 +116,7 @@ contract RecurringAgreementManagerCascadeCleanupTest is RecurringAgreementManage // Sets still have one entry each, but pair count is 2 assertEq(agreementManager.getCollectorCount(), 1); - assertEq(agreementManager.getCollectorProviderCount(address(recurringCollector)), 1); + assertEq(agreementManager.getProviderCount(address(recurringCollector)), 1); assertEq(agreementManager.getPairAgreementCount(address(recurringCollector), indexer), 2); } @@ -122,8 +128,8 @@ contract RecurringAgreementManagerCascadeCleanupTest is RecurringAgreementManage _offerForCollector(collector2, rca2); assertEq(agreementManager.getCollectorCount(), 2); - assertEq(agreementManager.getCollectorProviderCount(address(recurringCollector)), 1); - assertEq(agreementManager.getCollectorProviderCount(address(collector2)), 1); + assertEq(agreementManager.getProviderCount(address(recurringCollector)), 1); + assertEq(agreementManager.getProviderCount(address(collector2)), 1); } function test_Cascade_MultiProvider_BothTracked() public { @@ -136,7 +142,7 @@ contract RecurringAgreementManagerCascadeCleanupTest is RecurringAgreementManage _offerAgreement(rca2); assertEq(agreementManager.getCollectorCount(), 1); - assertEq(agreementManager.getCollectorProviderCount(address(recurringCollector)), 2); + assertEq(agreementManager.getProviderCount(address(recurringCollector)), 2); } // -- Tests: Cascade on reconciliation -- @@ -150,11 +156,11 @@ contract RecurringAgreementManagerCascadeCleanupTest is RecurringAgreementManage // Reconcile first (SP canceled → deleted) _setAgreementCanceledBySP(id1, rca1); - agreementManager.reconcileAgreement(id1); + agreementManager.reconcileAgreement(address(recurringCollector), id1); // Pair still tracked assertEq(agreementManager.getCollectorCount(), 1); - assertEq(agreementManager.getCollectorProviderCount(address(recurringCollector)), 1); + assertEq(agreementManager.getProviderCount(address(recurringCollector)), 1); assertEq(agreementManager.getPairAgreementCount(address(recurringCollector), indexer), 1); } @@ -163,29 +169,29 @@ contract RecurringAgreementManagerCascadeCleanupTest is RecurringAgreementManage bytes16 id = _offerAgreement(rca); _setAgreementCanceledBySP(id, rca); - agreementManager.reconcileAgreement(id); + agreementManager.reconcileAgreement(address(recurringCollector), id); // Agreement removed, but pair stays tracked while escrow is thawing assertEq(agreementManager.getPairAgreementCount(address(recurringCollector), indexer), 0); assertEq(agreementManager.getCollectorCount(), 1, "collector stays tracked during thaw"); assertEq( - agreementManager.getCollectorProviderCount(address(recurringCollector)), + agreementManager.getProviderCount(address(recurringCollector)), 1, "provider stays tracked during thaw" ); - // After thaw period, reconcileCollectorProvider reconciles escrow and removes + // After thaw period, reconcileProvider reconciles escrow and removes vm.warp(block.timestamp + paymentsEscrow.THAWING_PERIOD() + 1); vm.expectEmit(address(agreementManager)); - emit IRecurringAgreementManagement.CollectorProviderRemoved(address(recurringCollector), indexer); + emit IRecurringAgreementManagement.ProviderRemoved(address(recurringCollector), indexer); vm.expectEmit(address(agreementManager)); emit IRecurringAgreementManagement.CollectorRemoved(address(recurringCollector)); - assertFalse(agreementManager.reconcileCollectorProvider(address(recurringCollector), indexer)); + assertFalse(agreementManager.reconcileProvider(address(recurringCollector), indexer)); assertEq(agreementManager.getCollectorCount(), 0); - assertEq(agreementManager.getCollectorProviderCount(address(recurringCollector)), 0); + assertEq(agreementManager.getProviderCount(address(recurringCollector)), 0); } function test_Cascade_ReconcileLastProvider_CollectorCleanedUp_OtherCollectorRemains() public { @@ -198,24 +204,20 @@ contract RecurringAgreementManagerCascadeCleanupTest is RecurringAgreementManage // Reconcile collector1's agreement — pair stays tracked during thaw _setAgreementCanceledBySP(id1, rca1); - agreementManager.reconcileAgreement(id1); + agreementManager.reconcileAgreement(address(recurringCollector), id1); assertEq(agreementManager.getCollectorCount(), 2, "both collectors tracked during thaw"); - assertEq( - agreementManager.getCollectorProviderCount(address(recurringCollector)), - 1, - "provider stays during thaw" - ); + assertEq(agreementManager.getProviderCount(address(recurringCollector)), 1, "provider stays during thaw"); - // After thaw period, reconcileCollectorProvider reconciles escrow and removes + // After thaw period, reconcileProvider reconciles escrow and removes vm.warp(block.timestamp + paymentsEscrow.THAWING_PERIOD() + 1); - agreementManager.reconcileCollectorProvider(address(recurringCollector), indexer); + agreementManager.reconcileProvider(address(recurringCollector), indexer); // collector1 cleaned up, collector2 remains assertEq(agreementManager.getCollectorCount(), 1); - assertEq(agreementManager.getCollectors()[0], address(collector2)); - assertEq(agreementManager.getCollectorProviderCount(address(recurringCollector)), 0); - assertEq(agreementManager.getCollectorProviderCount(address(collector2)), 1); + assertEq(agreementManager.getCollectorAt(0), address(collector2)); + assertEq(agreementManager.getProviderCount(address(recurringCollector)), 0); + assertEq(agreementManager.getProviderCount(address(collector2)), 1); } function test_Cascade_ReconcileProvider_CollectorRetainsOtherProvider() public { @@ -229,24 +231,24 @@ contract RecurringAgreementManagerCascadeCleanupTest is RecurringAgreementManage // Reconcile indexer's agreement — pair stays tracked during thaw _setAgreementCanceledBySP(id1, rca1); - agreementManager.reconcileAgreement(id1); + agreementManager.reconcileAgreement(address(recurringCollector), id1); assertEq(agreementManager.getCollectorCount(), 1); assertEq( - agreementManager.getCollectorProviderCount(address(recurringCollector)), + agreementManager.getProviderCount(address(recurringCollector)), 2, "both providers tracked during thaw" ); assertEq(agreementManager.getPairAgreementCount(address(recurringCollector), indexer), 0); assertEq(agreementManager.getPairAgreementCount(address(recurringCollector), indexer2), 1); - // After thaw period, reconcileCollectorProvider reconciles escrow and removes + // After thaw period, reconcileProvider reconciles escrow and removes vm.warp(block.timestamp + paymentsEscrow.THAWING_PERIOD() + 1); - agreementManager.reconcileCollectorProvider(address(recurringCollector), indexer); + agreementManager.reconcileProvider(address(recurringCollector), indexer); // Now only indexer2 remains - assertEq(agreementManager.getCollectorProviderCount(address(recurringCollector)), 1); - assertEq(agreementManager.getCollectorProviders(address(recurringCollector))[0], indexer2); + assertEq(agreementManager.getProviderCount(address(recurringCollector)), 1); + assertEq(agreementManager.getProviderAt(address(recurringCollector), 0), indexer2); } // -- Tests: Re-addition after cleanup -- @@ -257,12 +259,12 @@ contract RecurringAgreementManagerCascadeCleanupTest is RecurringAgreementManage // Reconcile agreement — pair stays tracked during escrow thaw _setAgreementCanceledBySP(id, rca); - agreementManager.reconcileAgreement(id); + agreementManager.reconcileAgreement(address(recurringCollector), id); assertEq(agreementManager.getCollectorCount(), 1, "stays tracked during thaw"); - // After thaw period, full cleanup via reconcileCollectorProvider + // After thaw period, full cleanup via reconcileProvider vm.warp(block.timestamp + paymentsEscrow.THAWING_PERIOD() + 1); - agreementManager.reconcileCollectorProvider(address(recurringCollector), indexer); + agreementManager.reconcileProvider(address(recurringCollector), indexer); assertEq(agreementManager.getCollectorCount(), 0); // Re-add — sets repopulate @@ -270,31 +272,30 @@ contract RecurringAgreementManagerCascadeCleanupTest is RecurringAgreementManage _offerAgreement(rca2); assertEq(agreementManager.getCollectorCount(), 1); - assertEq(agreementManager.getCollectorProviderCount(address(recurringCollector)), 1); + assertEq(agreementManager.getProviderCount(address(recurringCollector)), 1); assertEq(agreementManager.getPairAgreementCount(address(recurringCollector), indexer), 1); } - // -- Tests: Revoke also cascades -- + // -- Tests: Cancel also cascades -- - function test_Cascade_RevokeOffer_DeferredCleanup() public { + function test_Cascade_CancelOffered_DeferredCleanup() public { (IRecurringCollector.RecurringCollectionAgreement memory rca, ) = _makeRCAForCollector(recurringCollector, 1); bytes16 id = _offerAgreement(rca); assertEq(agreementManager.getCollectorCount(), 1); - vm.prank(operator); - agreementManager.revokeOffer(id); + _cancelAgreement(id); // Agreement gone, but pair stays tracked during escrow thaw assertEq(agreementManager.getPairAgreementCount(address(recurringCollector), indexer), 0); assertEq(agreementManager.getCollectorCount(), 1, "stays tracked during thaw"); - // After thaw period, reconcileCollectorProvider reconciles escrow and removes + // After thaw period, reconcileProvider reconciles escrow and removes vm.warp(block.timestamp + paymentsEscrow.THAWING_PERIOD() + 1); - agreementManager.reconcileCollectorProvider(address(recurringCollector), indexer); + agreementManager.reconcileProvider(address(recurringCollector), indexer); assertEq(agreementManager.getCollectorCount(), 0); - assertEq(agreementManager.getCollectorProviderCount(address(recurringCollector)), 0); + assertEq(agreementManager.getProviderCount(address(recurringCollector)), 0); } // -- Tests: Permissionless safety valve functions -- @@ -304,14 +305,14 @@ contract RecurringAgreementManagerCascadeCleanupTest is RecurringAgreementManage _offerAgreement(rca); // Exists: pair has agreements - bool exists = agreementManager.reconcileCollectorProvider(address(recurringCollector), indexer); + bool exists = agreementManager.reconcileProvider(address(recurringCollector), indexer); assertTrue(exists); - assertEq(agreementManager.getCollectorProviderCount(address(recurringCollector)), 1); + assertEq(agreementManager.getProviderCount(address(recurringCollector)), 1); } function test_ReconcileCollectorProvider_ReturnsFalse_WhenNotTracked() public { // Not exists: pair was never added - bool exists = agreementManager.reconcileCollectorProvider(address(recurringCollector), indexer); + bool exists = agreementManager.reconcileProvider(address(recurringCollector), indexer); assertFalse(exists); } @@ -320,10 +321,10 @@ contract RecurringAgreementManagerCascadeCleanupTest is RecurringAgreementManage bytes16 id = _offerAgreement(rca); _setAgreementCanceledBySP(id, rca); - agreementManager.reconcileAgreement(id); + agreementManager.reconcileAgreement(address(recurringCollector), id); // Exists: escrow still has pending thaw - bool exists = agreementManager.reconcileCollectorProvider(address(recurringCollector), indexer); + bool exists = agreementManager.reconcileProvider(address(recurringCollector), indexer); assertTrue(exists); } @@ -332,18 +333,18 @@ contract RecurringAgreementManagerCascadeCleanupTest is RecurringAgreementManage bytes16 id = _offerAgreement(rca); _setAgreementCanceledBySP(id, rca); - agreementManager.reconcileAgreement(id); + agreementManager.reconcileAgreement(address(recurringCollector), id); - // After thaw period, reconcileCollectorProvider reconciles escrow internally + // After thaw period, reconcileProvider reconciles escrow internally vm.warp(block.timestamp + paymentsEscrow.THAWING_PERIOD() + 1); - bool exists = agreementManager.reconcileCollectorProvider(address(recurringCollector), indexer); + bool exists = agreementManager.reconcileProvider(address(recurringCollector), indexer); assertFalse(exists); } function test_ReconcileCollectorProvider_Permissionless() public { address anyone = makeAddr("anyone"); vm.prank(anyone); - bool exists = agreementManager.reconcileCollectorProvider(address(recurringCollector), indexer); + bool exists = agreementManager.reconcileProvider(address(recurringCollector), indexer); assertFalse(exists); } @@ -385,32 +386,24 @@ contract RecurringAgreementManagerCascadeCleanupTest is RecurringAgreementManage // -- Tests: Pagination -- - function test_GetCollectors_Pagination() public { + function test_GetCollectors_Enumeration() public { (IRecurringCollector.RecurringCollectionAgreement memory rca1, ) = _makeRCAForCollector(recurringCollector, 1); _offerAgreement(rca1); (IRecurringCollector.RecurringCollectionAgreement memory rca2, ) = _makeRCAForCollector(collector2, 2); _offerForCollector(collector2, rca2); - // Full list - address[] memory all = agreementManager.getCollectors(); - assertEq(all.length, 2); - - // Paginated - address[] memory first = agreementManager.getCollectors(0, 1); - assertEq(first.length, 1); - assertEq(first[0], all[0]); - - address[] memory second = agreementManager.getCollectors(1, 1); - assertEq(second.length, 1); - assertEq(second[0], all[1]); + // Full enumeration + assertEq(agreementManager.getCollectorCount(), 2); + address collector0 = agreementManager.getCollectorAt(0); + address collector1 = agreementManager.getCollectorAt(1); - // Past end - address[] memory empty = agreementManager.getCollectors(2, 1); - assertEq(empty.length, 0); + // Individual access by index + assertEq(agreementManager.getCollectorAt(0), collector0); + assertEq(agreementManager.getCollectorAt(1), collector1); } - function test_GetCollectorProviders_Pagination() public { + function test_GetCollectorProviders_Enumeration() public { address indexer2 = makeAddr("indexer2"); (IRecurringCollector.RecurringCollectionAgreement memory rca1, ) = _makeRCAForProvider(indexer, 1); @@ -419,14 +412,14 @@ contract RecurringAgreementManagerCascadeCleanupTest is RecurringAgreementManage (IRecurringCollector.RecurringCollectionAgreement memory rca2, ) = _makeRCAForProvider(indexer2, 2); _offerAgreement(rca2); - // Full list - address[] memory all = agreementManager.getCollectorProviders(address(recurringCollector)); - assertEq(all.length, 2); + // Full enumeration + assertEq(agreementManager.getProviderCount(address(recurringCollector)), 2); + address provider0 = agreementManager.getProviderAt(address(recurringCollector), 0); + address provider1 = agreementManager.getProviderAt(address(recurringCollector), 1); - // Paginated - address[] memory first = agreementManager.getCollectorProviders(address(recurringCollector), 0, 1); - assertEq(first.length, 1); - assertEq(first[0], all[0]); + // Individual access by index + assertEq(agreementManager.getProviderAt(address(recurringCollector), 0), provider0); + assertEq(agreementManager.getProviderAt(address(recurringCollector), 1), provider1); } /* solhint-enable graph/func-name-mixedcase */ diff --git a/packages/issuance/test/unit/agreement-manager/discovery.t.sol b/packages/issuance/test/unit/agreement-manager/discovery.t.sol new file mode 100644 index 000000000..94cf52007 --- /dev/null +++ b/packages/issuance/test/unit/agreement-manager/discovery.t.sol @@ -0,0 +1,319 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.27; + +import { Vm } from "forge-std/Vm.sol"; + +import { IRecurringAgreementManagement } from "@graphprotocol/interfaces/contracts/issuance/agreement/IRecurringAgreementManagement.sol"; +import { + REGISTERED, + ACCEPTED, + NOTICE_GIVEN, + SETTLED, + BY_PROVIDER +} from "@graphprotocol/interfaces/contracts/horizon/IAgreementCollector.sol"; +import { IRecurringCollector } from "@graphprotocol/interfaces/contracts/horizon/IRecurringCollector.sol"; + +import { RecurringAgreementManagerSharedTest } from "./shared.t.sol"; +import { MockRecurringCollector } from "./mocks/MockRecurringCollector.sol"; + +/// @notice Tests for agreement discovery via reconcileAgreement when the RAM +/// has never been notified about the agreement (no prior offer/callback). +/// This covers scenarios like: +/// - RAM deployed after agreements already existed on the collector +/// - Collector state changed out-of-band (e.g. SP cancel via collector directly) +/// - Callback was missed or failed silently +contract RecurringAgreementManagerDiscoveryTest is RecurringAgreementManagerSharedTest { + /* solhint-disable graph/func-name-mixedcase */ + + // ==================== Discovery via reconcileAgreement ==================== + + function test_Discovery_AcceptedAgreement_ViaReconcile() public { + // Set up an agreement directly on the mock collector — RAM never saw offer() + (IRecurringCollector.RecurringCollectionAgreement memory rca, bytes16 agreementId) = _makeRCAWithId( + 100 ether, + 1 ether, + 3600, + uint64(block.timestamp + 365 days) + ); + _setAgreementAccepted(agreementId, rca, uint64(block.timestamp)); + + // Fund the RAM so escrow management works + token.mint(address(agreementManager), 1_000_000 ether); + + // RAM has no knowledge of this agreement + assertEq(agreementManager.getAgreementInfo(address(recurringCollector), agreementId).provider, address(0)); + + // reconcileAgreement should discover, register, and reconcile + vm.expectEmit(address(agreementManager)); + emit IRecurringAgreementManagement.AgreementAdded( + agreementId, + address(recurringCollector), + dataService, + indexer + ); + + bool exists = agreementManager.reconcileAgreement(address(recurringCollector), agreementId); + + assertTrue(exists); + assertEq(agreementManager.getAgreementInfo(address(recurringCollector), agreementId).provider, indexer); + + uint256 expectedMaxClaim = 1 ether * 3600 + 100 ether; + assertEq( + agreementManager.getAgreementInfo(address(recurringCollector), agreementId).maxNextClaim, + expectedMaxClaim + ); + assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), expectedMaxClaim); + } + + function test_Discovery_CanceledBySP_ViaReconcile() public { + // Agreement was accepted and then SP-canceled before RAM ever learned about it + (IRecurringCollector.RecurringCollectionAgreement memory rca, bytes16 agreementId) = _makeRCAWithId( + 100 ether, + 1 ether, + 3600, + uint64(block.timestamp + 365 days) + ); + _setAgreementCanceledBySP(agreementId, rca); + + token.mint(address(agreementManager), 1_000_000 ether); + + // SP cancel → SETTLED → maxNextClaim = 0 → should discover then immediately remove + vm.expectEmit(address(agreementManager)); + emit IRecurringAgreementManagement.AgreementAdded( + agreementId, + address(recurringCollector), + dataService, + indexer + ); + vm.expectEmit(address(agreementManager)); + emit IRecurringAgreementManagement.AgreementRemoved(agreementId); + + bool exists = agreementManager.reconcileAgreement(address(recurringCollector), agreementId); + + assertFalse(exists); + assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), 0); + } + + function test_Discovery_Idempotent_SecondReconcileNoReRegister() public { + // Set up and discover an agreement + (IRecurringCollector.RecurringCollectionAgreement memory rca, bytes16 agreementId) = _makeRCAWithId( + 100 ether, + 1 ether, + 3600, + uint64(block.timestamp + 365 days) + ); + _setAgreementAccepted(agreementId, rca, uint64(block.timestamp)); + token.mint(address(agreementManager), 1_000_000 ether); + + agreementManager.reconcileAgreement(address(recurringCollector), agreementId); + + // Second reconcile should NOT emit AgreementAdded again + vm.recordLogs(); + agreementManager.reconcileAgreement(address(recurringCollector), agreementId); + + // Check no AgreementAdded was emitted + Vm.Log[] memory logs = vm.getRecordedLogs(); + bytes32 addedSig = IRecurringAgreementManagement.AgreementAdded.selector; + for (uint256 i = 0; i < logs.length; i++) { + assertTrue(logs[i].topics[0] != addedSig, "AgreementAdded should not be emitted on re-reconcile"); + } + } + + // ==================== Rejection scenarios ==================== + + function test_Discovery_RejectsUnknownAgreement() public { + // Reconcile a completely unknown agreement ID + bytes16 fakeId = bytes16(keccak256("nonexistent")); + + vm.expectEmit(address(agreementManager)); + emit IRecurringAgreementManagement.AgreementRejected( + fakeId, + address(recurringCollector), + IRecurringAgreementManagement.AgreementRejectionReason.UnknownAgreement + ); + + bool exists = agreementManager.reconcileAgreement(address(recurringCollector), fakeId); + assertFalse(exists); + } + + function test_Discovery_RejectsUnauthorizedCollector() public { + // COLLECTOR_ROLE is required for discovery (first encounter). + // Once tracked, reconciliation proceeds regardless of role. + MockRecurringCollector rogue = new MockRecurringCollector(); + vm.label(address(rogue), "RogueCollector"); + + (IRecurringCollector.RecurringCollectionAgreement memory rca, bytes16 agreementId) = _makeRCAWithId( + 100 ether, + 1 ether, + 3600, + uint64(block.timestamp + 365 days) + ); + // Store agreement on the rogue collector + rogue.setAgreement( + agreementId, + _buildAgreementStorage(rca, REGISTERED | ACCEPTED, uint64(block.timestamp), 0, 0) + ); + + vm.expectEmit(address(agreementManager)); + emit IRecurringAgreementManagement.AgreementRejected( + agreementId, + address(rogue), + IRecurringAgreementManagement.AgreementRejectionReason.UnauthorizedCollector + ); + + bool exists = agreementManager.reconcileAgreement(address(rogue), agreementId); + assertFalse(exists); + } + + function test_Discovery_RejectsPayerMismatch() public { + // Agreement where payer is NOT the RAM + (IRecurringCollector.RecurringCollectionAgreement memory rca, bytes16 agreementId) = _makeRCAWithId( + 100 ether, + 1 ether, + 3600, + uint64(block.timestamp + 365 days) + ); + + // Override payer to some other address + MockRecurringCollector.AgreementStorage memory data = _buildAgreementStorage( + rca, + REGISTERED | ACCEPTED, + uint64(block.timestamp), + 0, + 0 + ); + data.payer = address(0xdead); + recurringCollector.setAgreement(agreementId, data); + + vm.expectEmit(address(agreementManager)); + emit IRecurringAgreementManagement.AgreementRejected( + agreementId, + address(recurringCollector), + IRecurringAgreementManagement.AgreementRejectionReason.PayerMismatch + ); + + bool exists = agreementManager.reconcileAgreement(address(recurringCollector), agreementId); + assertFalse(exists); + } + + function test_Discovery_RejectsUnauthorizedDataService() public { + // Agreement with a dataService that does NOT have DATA_SERVICE_ROLE + address rogueDataService = makeAddr("rogueDataService"); + + bytes16 agreementId = bytes16(keccak256("rogue-ds-agreement")); + + IRecurringCollector.RecurringCollectionAgreement memory rogueRca = _makeRCA( + 100 ether, + 1 ether, + 60, + 3600, + uint64(block.timestamp + 365 days) + ); + rogueRca.dataService = rogueDataService; + recurringCollector.setAgreement( + agreementId, + _buildAgreementStorage(rogueRca, REGISTERED | ACCEPTED, uint64(block.timestamp), 0, 0) + ); + + vm.expectEmit(address(agreementManager)); + emit IRecurringAgreementManagement.AgreementRejected( + agreementId, + address(recurringCollector), + IRecurringAgreementManagement.AgreementRejectionReason.UnauthorizedDataService + ); + + bool exists = agreementManager.reconcileAgreement(address(recurringCollector), agreementId); + assertFalse(exists); + } + + // ==================== Out-of-band state changes ==================== + + function test_OutOfBand_AcceptedThenSPCancel_ReconcileRemoves() public { + // Offer via normal path (RAM tracks it) + (IRecurringCollector.RecurringCollectionAgreement memory rca, ) = _makeRCAWithId( + 100 ether, + 1 ether, + 3600, + uint64(block.timestamp + 365 days) + ); + bytes16 agreementId = _offerAgreement(rca); + + uint256 trackedMaxClaim = agreementManager + .getAgreementInfo(address(recurringCollector), agreementId) + .maxNextClaim; + assertTrue(trackedMaxClaim > 0, "Should be tracked after offer"); + + // SP cancels directly on collector (out-of-band, no callback to RAM) + _setAgreementCanceledBySP(agreementId, rca); + + // RAM still thinks it has the old maxNextClaim + assertEq( + agreementManager.getAgreementInfo(address(recurringCollector), agreementId).maxNextClaim, + trackedMaxClaim, + "RAM should still have stale maxNextClaim" + ); + + // Permissionless reconcile syncs the state + vm.expectEmit(address(agreementManager)); + emit IRecurringAgreementManagement.AgreementRemoved(agreementId); + + bool exists = agreementManager.reconcileAgreement(address(recurringCollector), agreementId); + assertFalse(exists); + assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), 0); + } + + function test_OutOfBand_CollectionReducesMaxClaim_ReconcileUpdates() public { + // Offer and accept via normal path + (IRecurringCollector.RecurringCollectionAgreement memory rca, ) = _makeRCAWithId( + 100 ether, + 1 ether, + 3600, + uint64(block.timestamp + 365 days) + ); + bytes16 agreementId = _offerAgreement(rca); + _setAgreementAccepted(agreementId, rca, uint64(block.timestamp)); + + uint256 preReconcileMax = agreementManager + .getAgreementInfo(address(recurringCollector), agreementId) + .maxNextClaim; + + // Simulate a collection happened out-of-band (lastCollectionAt advanced) + uint64 collectionTime = uint64(block.timestamp + 1800); + _setAgreementCollected(agreementId, rca, uint64(block.timestamp), collectionTime); + + // Warp to collection time so the mock's maxNextClaim reflects the collection + vm.warp(collectionTime); + + // Reconcile should update maxNextClaim (no more initialTokens, reduced window) + bool exists = agreementManager.reconcileAgreement(address(recurringCollector), agreementId); + assertTrue(exists); + + uint256 postReconcileMax = agreementManager + .getAgreementInfo(address(recurringCollector), agreementId) + .maxNextClaim; + assertTrue(postReconcileMax < preReconcileMax, "maxNextClaim should decrease after collection"); + // After collection: no initialTokens, maxSeconds still 3600 → 1e18 * 3600 = 3600e18 + assertEq(postReconcileMax, 1 ether * 3600, "Should be ongoing-only after first collection"); + } + + // ==================== Permissionless reconcile ==================== + + function test_Discovery_Permissionless() public { + // Anyone can call reconcileAgreement — no role required + (IRecurringCollector.RecurringCollectionAgreement memory rca, bytes16 agreementId) = _makeRCAWithId( + 100 ether, + 1 ether, + 3600, + uint64(block.timestamp + 365 days) + ); + _setAgreementAccepted(agreementId, rca, uint64(block.timestamp)); + token.mint(address(agreementManager), 1_000_000 ether); + + address randomUser = makeAddr("randomUser"); + vm.prank(randomUser); + bool exists = agreementManager.reconcileAgreement(address(recurringCollector), agreementId); + assertTrue(exists); + } + + /* solhint-enable graph/func-name-mixedcase */ +} diff --git a/packages/issuance/test/unit/agreement-manager/edgeCases.t.sol b/packages/issuance/test/unit/agreement-manager/edgeCases.t.sol index c08476ff9..410b8b7d1 100644 --- a/packages/issuance/test/unit/agreement-manager/edgeCases.t.sol +++ b/packages/issuance/test/unit/agreement-manager/edgeCases.t.sol @@ -5,19 +5,33 @@ import { Vm } from "forge-std/Vm.sol"; import { IERC165 } from "@openzeppelin/contracts/utils/introspection/IERC165.sol"; -import { IAgreementOwner } from "@graphprotocol/interfaces/contracts/horizon/IAgreementOwner.sol"; import { IPaymentsEscrow } from "@graphprotocol/interfaces/contracts/horizon/IPaymentsEscrow.sol"; import { IRecurringAgreementManagement } from "@graphprotocol/interfaces/contracts/issuance/agreement/IRecurringAgreementManagement.sol"; import { IRecurringAgreements } from "@graphprotocol/interfaces/contracts/issuance/agreement/IRecurringAgreements.sol"; import { IRecurringEscrowManagement } from "@graphprotocol/interfaces/contracts/issuance/agreement/IRecurringEscrowManagement.sol"; +import { + REGISTERED, + ACCEPTED, + OFFER_TYPE_NEW +} from "@graphprotocol/interfaces/contracts/horizon/IAgreementCollector.sol"; import { IRecurringCollector } from "@graphprotocol/interfaces/contracts/horizon/IRecurringCollector.sol"; import { RecurringAgreementManagerSharedTest } from "./shared.t.sol"; +import { MockRecurringCollector } from "./mocks/MockRecurringCollector.sol"; /// @notice Edge case and boundary condition tests for RecurringAgreementManager. contract RecurringAgreementManagerEdgeCasesTest is RecurringAgreementManagerSharedTest { /* solhint-disable graph/func-name-mixedcase */ + // -- Helpers -- + + function _getProviderAgreements(address provider) internal view returns (bytes16[] memory result) { + uint256 count = agreementManager.getPairAgreementCount(address(recurringCollector), provider); + result = new bytes16[](count); + for (uint256 i = 0; i < count; ++i) + result[i] = agreementManager.getPairAgreementAt(address(recurringCollector), provider, i); + } + // ==================== supportsInterface Fallback ==================== function test_SupportsInterface_UnknownInterfaceReturnsFalse() public view { @@ -31,57 +45,12 @@ contract RecurringAgreementManagerEdgeCasesTest is RecurringAgreementManagerShar assertTrue(agreementManager.supportsInterface(type(IERC165).interfaceId)); } - // ==================== Cancel with Invalid Data Service ==================== - - function test_CancelAgreement_Revert_WhenDataServiceHasNoCode() public { - // Use an EOA as dataService so ds.code.length == 0 (line 255) - address eoa = makeAddr("eoa-data-service"); - - IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCA( - 100 ether, - 1 ether, - 60, - 3600, - uint64(block.timestamp + 365 days) - ); - rca.dataService = eoa; - - // Grant DATA_SERVICE_ROLE so the offer goes through - vm.prank(governor); - agreementManager.grantRole(DATA_SERVICE_ROLE, eoa); - - token.mint(address(agreementManager), 1_000_000 ether); - vm.prank(operator); - bytes16 agreementId = agreementManager.offerAgreement(rca, _collector()); - - // Set as Accepted so it takes the cancel-via-dataService path - recurringCollector.setAgreement( - agreementId, - IRecurringCollector.AgreementData({ - dataService: eoa, - payer: address(agreementManager), - serviceProvider: indexer, - acceptedAt: uint64(block.timestamp), - lastCollectionAt: 0, - endsAt: rca.endsAt, - maxInitialTokens: rca.maxInitialTokens, - maxOngoingTokensPerSecond: rca.maxOngoingTokensPerSecond, - minSecondsPerCollection: rca.minSecondsPerCollection, - maxSecondsPerCollection: rca.maxSecondsPerCollection, - updateNonce: 0, - canceledAt: 0, - state: IRecurringCollector.AgreementState.Accepted - }) - ); - - vm.expectRevert(abi.encodeWithSelector(IRecurringAgreementManagement.InvalidDataService.selector, eoa)); - vm.prank(operator); - agreementManager.cancelAgreement(agreementId); - } + // NOTE: test_CancelAgreement_Revert_WhenDataServiceHasNoCode removed — + // cancelAgreement now calls collector.cancel() directly, no data service interaction. // ==================== Hash Cleanup Tests ==================== - function test_RevokeOffer_CleansUpAgreementHash() public { + function test_CancelOffered_CleansUpAgreement() public { IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCA( 100 ether, 1 ether, @@ -91,19 +60,18 @@ contract RecurringAgreementManagerEdgeCasesTest is RecurringAgreementManagerShar ); bytes16 agreementId = _offerAgreement(rca); - bytes32 rcaHash = recurringCollector.hashRCA(rca); - // Hash is authorized - assertEq(agreementManager.approveAgreement(rcaHash), IAgreementOwner.approveAgreement.selector); + // Agreement is tracked + assertEq(agreementManager.getPairAgreementCount(address(recurringCollector), indexer), 1); - vm.prank(operator); - agreementManager.revokeOffer(agreementId); + _cancelAgreement(agreementId); - // Hash is cleaned up (not just stale — actually deleted) - assertEq(agreementManager.approveAgreement(rcaHash), bytes4(0)); + // Agreement is cleaned up + assertEq(agreementManager.getPairAgreementCount(address(recurringCollector), indexer), 0); + assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), 0); } - function test_RevokeOffer_CleansUpPendingUpdateHash() public { + function test_CancelOffered_CleansUpPendingUpdate() public { IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCA( 100 ether, 1 ether, @@ -125,18 +93,14 @@ contract RecurringAgreementManagerEdgeCasesTest is RecurringAgreementManagerShar ); _offerAgreementUpdate(rcau); - bytes32 updateHash = recurringCollector.hashRCAU(rcau); - // Update hash is authorized - assertEq(agreementManager.approveAgreement(updateHash), IAgreementOwner.approveAgreement.selector); + _cancelAgreement(agreementId); - vm.prank(operator); - agreementManager.revokeOffer(agreementId); - - // Both hashes cleaned up - assertEq(agreementManager.approveAgreement(updateHash), bytes4(0)); + // Agreement and pending update fully cleaned up + assertEq(agreementManager.getPairAgreementCount(address(recurringCollector), indexer), 0); + assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), 0); } - function test_Remove_CleansUpAgreementHash() public { + function test_Remove_CleansUpAgreement() public { IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCA( 100 ether, 1 ether, @@ -146,17 +110,17 @@ contract RecurringAgreementManagerEdgeCasesTest is RecurringAgreementManagerShar ); bytes16 agreementId = _offerAgreement(rca); - bytes32 rcaHash = recurringCollector.hashRCA(rca); // SP cancels — removable _setAgreementCanceledBySP(agreementId, rca); - agreementManager.reconcileAgreement(agreementId); + agreementManager.reconcileAgreement(address(recurringCollector), agreementId); - // Hash is cleaned up - assertEq(agreementManager.approveAgreement(rcaHash), bytes4(0)); + // Agreement is fully cleaned up + assertEq(agreementManager.getPairAgreementCount(address(recurringCollector), indexer), 0); + assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), 0); } - function test_Remove_CleansUpPendingUpdateHash() public { + function test_Remove_CleansUpPendingUpdate() public { IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCA( 100 ether, 1 ether, @@ -178,17 +142,16 @@ contract RecurringAgreementManagerEdgeCasesTest is RecurringAgreementManagerShar ); _offerAgreementUpdate(rcau); - bytes32 updateHash = recurringCollector.hashRCAU(rcau); - // SP cancels — removable _setAgreementCanceledBySP(agreementId, rca); - agreementManager.reconcileAgreement(agreementId); + agreementManager.reconcileAgreement(address(recurringCollector), agreementId); - // Pending update hash also cleaned up - assertEq(agreementManager.approveAgreement(updateHash), bytes4(0)); + // Agreement and pending update fully cleaned up + assertEq(agreementManager.getPairAgreementCount(address(recurringCollector), indexer), 0); + assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), 0); } - function test_Reconcile_CleansUpAppliedPendingUpdateHash() public { + function test_Reconcile_ClearsAppliedPendingUpdate() public { IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCA( 100 ether, 1 ether, @@ -210,36 +173,33 @@ contract RecurringAgreementManagerEdgeCasesTest is RecurringAgreementManagerShar ); _offerAgreementUpdate(rcau); - bytes32 updateHash = recurringCollector.hashRCAU(rcau); - assertEq(agreementManager.approveAgreement(updateHash), IAgreementOwner.approveAgreement.selector); + // Pending update is tracked on the collector - // Simulate: agreement accepted with pending <= updateNonce (update was applied) - recurringCollector.setAgreement( - agreementId, - IRecurringCollector.AgreementData({ - dataService: rca.dataService, - payer: rca.payer, - serviceProvider: rca.serviceProvider, - acceptedAt: uint64(block.timestamp), - lastCollectionAt: 0, - endsAt: uint64(block.timestamp + 730 days), - maxInitialTokens: 200 ether, - maxOngoingTokensPerSecond: 2 ether, - minSecondsPerCollection: 60, - maxSecondsPerCollection: 7200, - updateNonce: 1, // (pending <=) - canceledAt: 0, - state: IRecurringCollector.AgreementState.Accepted - }) + // Simulate: agreement accepted with update applied (pending terms cleared on collector) + IRecurringCollector.RecurringCollectionAgreement memory updatedRca = _makeRCA( + 200 ether, 2 ether, 60, 7200, uint64(block.timestamp + 730 days) ); + updatedRca.payer = rca.payer; + updatedRca.dataService = rca.dataService; + updatedRca.serviceProvider = rca.serviceProvider; + MockRecurringCollector.AgreementStorage memory data = _buildAgreementStorage( + updatedRca, REGISTERED | ACCEPTED, uint64(block.timestamp), 0, 0 + ); + data.updateNonce = 1; + recurringCollector.setAgreement(agreementId, data); - agreementManager.reconcileAgreement(agreementId); + agreementManager.reconcileAgreement(address(recurringCollector), agreementId); - // Pending update hash should be cleaned up after reconcile clears the applied update - assertEq(agreementManager.approveAgreement(updateHash), bytes4(0)); + // After reconcile, maxNextClaim is recalculated from the new active terms + IRecurringAgreements.AgreementInfo memory infoAfter = agreementManager.getAgreementInfo( + address(recurringCollector), + agreementId + ); + // maxNextClaim = 2e18 * 7200 + 200e18 = 14600e18 + assertEq(infoAfter.maxNextClaim, 14600 ether); } - function test_OfferUpdate_CleansUpReplacedPendingHash() public { + function test_OfferUpdate_ReplacesExistingPendingOnCollector() public { IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCA( 100 ether, 1 ether, @@ -262,10 +222,17 @@ contract RecurringAgreementManagerEdgeCasesTest is RecurringAgreementManagerShar ); _offerAgreementUpdate(rcau1); - bytes32 hash1 = recurringCollector.hashRCAU(rcau1); - assertEq(agreementManager.approveAgreement(hash1), IAgreementOwner.approveAgreement.selector); + // max(current=3700, pending=14600) = 14600 + assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), 14600 ether); + + // Cancel pending update clears pending terms on the collector — sum drops to active-only + _cancelPendingUpdate(agreementId); - // Second pending update replaces first (same nonce — collector hasn't accepted either) + // Sum drops to active-only (3700) since pending was cleared + uint256 originalMaxClaim = 1 ether * 3600 + 100 ether; + assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), originalMaxClaim); + + // Collector's updateNonce is still 1, so next valid nonce is 2. IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau2 = _makeRCAU( agreementId, 50 ether, @@ -273,50 +240,12 @@ contract RecurringAgreementManagerEdgeCasesTest is RecurringAgreementManagerShar 60, 1800, uint64(block.timestamp + 180 days), - 1 + 2 ); _offerAgreementUpdate(rcau2); - // First update hash should be cleaned up - assertEq(agreementManager.approveAgreement(hash1), bytes4(0)); - - // Second update hash should be authorized - bytes32 hash2 = recurringCollector.hashRCAU(rcau2); - assertEq(agreementManager.approveAgreement(hash2), IAgreementOwner.approveAgreement.selector); - } - - function test_GetAgreementInfo_IncludesHashes() public { - IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCA( - 100 ether, - 1 ether, - 60, - 3600, - uint64(block.timestamp + 365 days) - ); - - bytes16 agreementId = _offerAgreement(rca); - bytes32 rcaHash = recurringCollector.hashRCA(rca); - - IRecurringAgreements.AgreementInfo memory info = agreementManager.getAgreementInfo(agreementId); - assertEq(info.agreementHash, rcaHash); - assertEq(info.pendingUpdateHash, bytes32(0)); - - // Offer an update - IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau = _makeRCAU( - agreementId, - 200 ether, - 2 ether, - 60, - 7200, - uint64(block.timestamp + 730 days), - 1 - ); - _offerAgreementUpdate(rcau); - - bytes32 updateHash = recurringCollector.hashRCAU(rcau); - info = agreementManager.getAgreementInfo(agreementId); - assertEq(info.agreementHash, rcaHash); - assertEq(info.pendingUpdateHash, updateHash); + // max(current=3700, pending=950) = 3700 (current dominates) + assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), 3700 ether); } // ==================== Zero-Value Parameter Tests ==================== @@ -334,7 +263,7 @@ contract RecurringAgreementManagerEdgeCasesTest is RecurringAgreementManagerShar // maxNextClaim = 1e18 * 3600 + 0 = 3600e18 uint256 expectedMaxClaim = 1 ether * 3600; - assertEq(agreementManager.getAgreementMaxNextClaim(agreementId), expectedMaxClaim); + assertEq(agreementManager.getAgreementMaxNextClaim(address(recurringCollector), agreementId), expectedMaxClaim); assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), expectedMaxClaim); } @@ -350,7 +279,7 @@ contract RecurringAgreementManagerEdgeCasesTest is RecurringAgreementManagerShar bytes16 agreementId = _offerAgreement(rca); // maxNextClaim = 0 * 3600 + 100e18 = 100e18 - assertEq(agreementManager.getAgreementMaxNextClaim(agreementId), 100 ether); + assertEq(agreementManager.getAgreementMaxNextClaim(address(recurringCollector), agreementId), 100 ether); assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), 100 ether); } @@ -365,10 +294,10 @@ contract RecurringAgreementManagerEdgeCasesTest is RecurringAgreementManagerShar bytes16 agreementId = _offerAgreement(rca); - // maxNextClaim = 0 * 0 + 0 = 0 - assertEq(agreementManager.getAgreementMaxNextClaim(agreementId), 0); + // maxNextClaim = 0 * 0 + 0 = 0 — immediately cleaned up + assertEq(agreementManager.getAgreementMaxNextClaim(address(recurringCollector), agreementId), 0); assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), 0); - assertEq(agreementManager.getProviderAgreementCount(indexer), 1); + assertEq(agreementManager.getPairAgreementCount(address(recurringCollector), indexer), 0); } // ==================== Deadline Boundary Tests ==================== @@ -391,9 +320,9 @@ contract RecurringAgreementManagerEdgeCasesTest is RecurringAgreementManagerShar // At deadline (block.timestamp == deadline), the condition is `block.timestamp <= info.deadline` // so this should still be claimable - bool exists = agreementManager.reconcileAgreement(agreementId); + bool exists = agreementManager.reconcileAgreement(address(recurringCollector), agreementId); assertTrue(exists); - assertEq(agreementManager.getProviderAgreementCount(indexer), 1); + assertEq(agreementManager.getPairAgreementCount(address(recurringCollector), indexer), 1); } function test_Remove_OneSecondAfterDeadline_NotAccepted() public { @@ -411,9 +340,10 @@ contract RecurringAgreementManagerEdgeCasesTest is RecurringAgreementManagerShar // Warp to one second past deadline vm.warp(deadline + 1); - // Now removable (deadline < block.timestamp) - agreementManager.reconcileAgreement(agreementId); - assertEq(agreementManager.getProviderAgreementCount(indexer), 0); + // Now removable (deadline < block.timestamp → getMaxNextClaim returns 0) + bool exists = agreementManager.reconcileAgreement(address(recurringCollector), agreementId); + assertFalse(exists); + assertEq(agreementManager.getPairAgreementCount(address(recurringCollector), indexer), 0); } // ==================== Reconcile Edge Cases ==================== @@ -431,55 +361,19 @@ contract RecurringAgreementManagerEdgeCasesTest is RecurringAgreementManagerShar uint64 now_ = uint64(block.timestamp); // Set as accepted with lastCollectionAt == endsAt (fully consumed) - recurringCollector.setAgreement( - agreementId, - IRecurringCollector.AgreementData({ - dataService: rca.dataService, - payer: rca.payer, - serviceProvider: rca.serviceProvider, - acceptedAt: now_, - lastCollectionAt: rca.endsAt, - endsAt: rca.endsAt, - maxInitialTokens: rca.maxInitialTokens, - maxOngoingTokensPerSecond: rca.maxOngoingTokensPerSecond, - minSecondsPerCollection: rca.minSecondsPerCollection, - maxSecondsPerCollection: rca.maxSecondsPerCollection, - updateNonce: 0, - canceledAt: 0, - state: IRecurringCollector.AgreementState.Accepted - }) - ); + _setAgreementCollected(agreementId, rca, now_, rca.endsAt); - agreementManager.reconcileAgreement(agreementId); + agreementManager.reconcileAgreement(address(recurringCollector), agreementId); // getMaxNextClaim returns 0 when collectionEnd <= collectionStart - assertEq(agreementManager.getAgreementMaxNextClaim(agreementId), 0); + assertEq(agreementManager.getAgreementMaxNextClaim(address(recurringCollector), agreementId), 0); assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), 0); } // ==================== Cancel Edge Cases ==================== - function test_CancelAgreement_Revert_WhenDataServiceReverts() public { - IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCA( - 100 ether, - 1 ether, - 60, - 3600, - uint64(block.timestamp + 365 days) - ); - - bytes16 agreementId = _offerAgreement(rca); - - // Set as accepted - _setAgreementAccepted(agreementId, rca, uint64(block.timestamp)); - - // Configure the mock SubgraphService to revert - mockSubgraphService.setRevert(true, "SubgraphService: cannot cancel"); - - vm.expectRevert("SubgraphService: cannot cancel"); - vm.prank(operator); - agreementManager.cancelAgreement(agreementId); - } + // NOTE: test_CancelAgreement_Revert_WhenDataServiceReverts removed — + // cancelAgreement now calls collector.cancel() directly, no data service interaction. // ==================== Offer With Zero Balance Tests ==================== @@ -494,12 +388,12 @@ contract RecurringAgreementManagerEdgeCasesTest is RecurringAgreementManagerShar // Don't fund the contract — zero token balance vm.prank(operator); - bytes16 agreementId = agreementManager.offerAgreement(rca, _collector()); + bytes16 agreementId = agreementManager.offerAgreement(_collector(), OFFER_TYPE_NEW, abi.encode(rca)); uint256 maxClaim = 1 ether * 3600 + 100 ether; // Agreement is tracked even though escrow couldn't be funded - assertEq(agreementManager.getAgreementMaxNextClaim(agreementId), maxClaim); + assertEq(agreementManager.getAgreementMaxNextClaim(address(recurringCollector), agreementId), maxClaim); assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), maxClaim); // Escrow has zero balance @@ -565,7 +459,8 @@ contract RecurringAgreementManagerEdgeCasesTest is RecurringAgreementManagerShar ids[2] = id3; // Should succeed without error — _fundEscrow is idempotent - agreementHelper.reconcileBatch(ids); + for (uint256 i = 0; i < ids.length; ++i) + agreementManager.reconcileAgreement(address(recurringCollector), ids[i]); // All reconciled to 0 assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), 0); @@ -575,7 +470,8 @@ contract RecurringAgreementManagerEdgeCasesTest is RecurringAgreementManagerShar function test_ReconcileBatch_EmptyArray() public { // Empty batch should succeed with no effect bytes16[] memory ids = new bytes16[](0); - agreementHelper.reconcileBatch(ids); + for (uint256 i = 0; i < ids.length; ++i) + agreementManager.reconcileAgreement(address(recurringCollector), ids[i]); } function test_ReconcileBatch_NonExistentAgreements() public { @@ -584,7 +480,8 @@ contract RecurringAgreementManagerEdgeCasesTest is RecurringAgreementManagerShar ids[0] = bytes16(keccak256("nonexistent1")); ids[1] = bytes16(keccak256("nonexistent2")); - agreementHelper.reconcileBatch(ids); + for (uint256 i = 0; i < ids.length; ++i) + agreementManager.reconcileAgreement(address(recurringCollector), ids[i]); } // ==================== UpdateEscrow Edge Cases ==================== @@ -602,26 +499,26 @@ contract RecurringAgreementManagerEdgeCasesTest is RecurringAgreementManagerShar // Remove the agreement _setAgreementCanceledBySP(agreementId, rca); - agreementManager.reconcileAgreement(agreementId); + agreementManager.reconcileAgreement(address(recurringCollector), agreementId); - // First reconcileCollectorProvider: initiates thaw - agreementManager.reconcileCollectorProvider(address(_collector()), indexer); + // First reconcileProvider: initiates thaw + agreementManager.reconcileProvider(address(_collector()), indexer); // Warp past mock's thawing period (1 day) vm.warp(block.timestamp + 1 days + 1); - // Second reconcileCollectorProvider: withdraws thawed tokens, then no more to thaw - agreementManager.reconcileCollectorProvider(address(_collector()), indexer); + // Second reconcileProvider: withdraws thawed tokens, then no more to thaw + agreementManager.reconcileProvider(address(_collector()), indexer); - // Third reconcileCollectorProvider: should be a no-op (nothing to thaw or withdraw) - agreementManager.reconcileCollectorProvider(address(_collector()), indexer); + // Third reconcileProvider: should be a no-op (nothing to thaw or withdraw) + agreementManager.reconcileProvider(address(_collector()), indexer); } // ==================== Multiple Pending Update Replacements ==================== // ==================== Zero-Value Pending Update Hash Cleanup ==================== - function test_OfferUpdate_ZeroValuePendingUpdate_HashCleanedOnReplace() public { + function test_OfferUpdate_ZeroValuePendingUpdate_ReplacedByNonZero() public { IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCA( 100 ether, 1 ether, @@ -645,13 +542,13 @@ contract RecurringAgreementManagerEdgeCasesTest is RecurringAgreementManagerShar ); _offerAgreementUpdate(rcau1); - bytes32 zeroHash = recurringCollector.hashRCAU(rcau1); - // Zero-value hash should still be authorized - assertEq(agreementManager.approveAgreement(zeroHash), IAgreementOwner.approveAgreement.selector); // sumMaxNextClaim should be unchanged (original + 0) assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), originalMaxClaim); - // Replace with a non-zero update (same nonce — collector hasn't accepted either) + // Cancel pending update and replace with a non-zero update + _cancelPendingUpdate(agreementId); + + // Collector's updateNonce is now 1, so next nonce must be 2 IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau2 = _makeRCAU( agreementId, 200 ether, @@ -659,19 +556,13 @@ contract RecurringAgreementManagerEdgeCasesTest is RecurringAgreementManagerShar 60, 7200, uint64(block.timestamp + 730 days), - 1 + 2 ); _offerAgreementUpdate(rcau2); - // Old zero-value hash should be cleaned up - assertEq(agreementManager.approveAgreement(zeroHash), bytes4(0)); - - // New hash should be authorized - bytes32 newHash = recurringCollector.hashRCAU(rcau2); - assertEq(agreementManager.approveAgreement(newHash), IAgreementOwner.approveAgreement.selector); - - uint256 pendingMaxClaim = 2 ether * 7200 + 200 ether; - assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), originalMaxClaim + pendingMaxClaim); + // max(current, pending) = max(3700, 14600) = 14600 + uint256 pendingMaxClaim = 14600 ether; + assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), pendingMaxClaim); } function test_Reconcile_ZeroValuePendingUpdate_ClearedWhenApplied() public { @@ -697,39 +588,27 @@ contract RecurringAgreementManagerEdgeCasesTest is RecurringAgreementManagerShar ); _offerAgreementUpdate(rcau); - bytes32 zeroHash = recurringCollector.hashRCAU(rcau); - assertEq(agreementManager.approveAgreement(zeroHash), IAgreementOwner.approveAgreement.selector); - - // Simulate: agreement accepted with update applied (pending nonce <= updateNonce) - recurringCollector.setAgreement( - agreementId, - IRecurringCollector.AgreementData({ - dataService: rca.dataService, - payer: rca.payer, - serviceProvider: rca.serviceProvider, - acceptedAt: uint64(block.timestamp), - lastCollectionAt: 0, - endsAt: uint64(block.timestamp + 730 days), - maxInitialTokens: 0, - maxOngoingTokensPerSecond: 0, - minSecondsPerCollection: 60, - maxSecondsPerCollection: 3600, - updateNonce: 1, - canceledAt: 0, - state: IRecurringCollector.AgreementState.Accepted - }) + // Simulate: agreement accepted with update applied (pending terms cleared on collector) + IRecurringCollector.RecurringCollectionAgreement memory updatedRca = _makeRCA( + 0, 0, 60, 3600, uint64(block.timestamp + 730 days) ); + updatedRca.payer = rca.payer; + updatedRca.dataService = rca.dataService; + updatedRca.serviceProvider = rca.serviceProvider; + MockRecurringCollector.AgreementStorage memory data = _buildAgreementStorage( + updatedRca, REGISTERED | ACCEPTED, uint64(block.timestamp), 0, 0 + ); + data.updateNonce = 1; + recurringCollector.setAgreement(agreementId, data); - agreementManager.reconcileAgreement(agreementId); - - // Zero-value pending hash should be cleaned up - assertEq(agreementManager.approveAgreement(zeroHash), bytes4(0)); + agreementManager.reconcileAgreement(address(recurringCollector), agreementId); - // Pending fields should be cleared - IRecurringAgreements.AgreementInfo memory info = agreementManager.getAgreementInfo(agreementId); - assertEq(info.pendingUpdateMaxNextClaim, 0); - assertEq(info.pendingUpdateNonce, 0); - assertEq(info.pendingUpdateHash, bytes32(0)); + // maxNextClaim should reflect the new (zero-value) active terms + IRecurringAgreements.AgreementInfo memory info = agreementManager.getAgreementInfo( + address(recurringCollector), + agreementId + ); + assertEq(info.maxNextClaim, 0); } // ==================== Re-offer After Remove ==================== @@ -747,29 +626,28 @@ contract RecurringAgreementManagerEdgeCasesTest is RecurringAgreementManagerShar bytes16 agreementId = _offerAgreement(rca); uint256 maxClaim = 1 ether * 3600 + 100 ether; assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), maxClaim); - assertEq(agreementManager.getProviderAgreementCount(indexer), 1); + assertEq(agreementManager.getPairAgreementCount(address(recurringCollector), indexer), 1); // 2. SP cancels and remove _setAgreementCanceledBySP(agreementId, rca); - agreementManager.reconcileAgreement(agreementId); + agreementManager.reconcileAgreement(address(recurringCollector), agreementId); assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), 0); - assertEq(agreementManager.getProviderAgreementCount(indexer), 0); + assertEq(agreementManager.getPairAgreementCount(address(recurringCollector), indexer), 0); // 3. Re-offer the same agreement (same parameters, same agreementId) bytes16 reofferedId = _offerAgreement(rca); assertEq(reofferedId, agreementId); assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), maxClaim); - assertEq(agreementManager.getProviderAgreementCount(indexer), 1); + assertEq(agreementManager.getPairAgreementCount(address(recurringCollector), indexer), 1); // 4. Verify the re-offered agreement is fully functional - IRecurringAgreements.AgreementInfo memory info = agreementManager.getAgreementInfo(reofferedId); + IRecurringAgreements.AgreementInfo memory info = agreementManager.getAgreementInfo( + address(recurringCollector), + reofferedId + ); assertTrue(info.provider != address(0)); assertEq(info.provider, indexer); assertEq(info.maxNextClaim, maxClaim); - - // Hash is authorized again - bytes32 rcaHash = recurringCollector.hashRCA(rca); - assertEq(agreementManager.approveAgreement(rcaHash), IAgreementOwner.approveAgreement.selector); } function test_ReofferAfterRemove_WithDifferentNonce() public { @@ -786,7 +664,7 @@ contract RecurringAgreementManagerEdgeCasesTest is RecurringAgreementManagerShar // Remove _setAgreementCanceledBySP(id1, rca1); - agreementManager.reconcileAgreement(id1); + agreementManager.reconcileAgreement(address(recurringCollector), id1); // Re-offer with different nonce (different agreementId) IRecurringCollector.RecurringCollectionAgreement memory rca2 = _makeRCA( @@ -803,7 +681,7 @@ contract RecurringAgreementManagerEdgeCasesTest is RecurringAgreementManagerShar uint256 maxClaim2 = 2 ether * 7200 + 200 ether; assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), maxClaim2); - assertEq(agreementManager.getProviderAgreementCount(indexer), 1); + assertEq(agreementManager.getPairAgreementCount(address(recurringCollector), indexer), 1); } // ==================== Input Validation ==================== @@ -821,7 +699,7 @@ contract RecurringAgreementManagerEdgeCasesTest is RecurringAgreementManagerShar token.mint(address(agreementManager), 1_000_000 ether); vm.expectRevert(IRecurringAgreementManagement.ServiceProviderZeroAddress.selector); vm.prank(operator); - agreementManager.offerAgreement(rca, _collector()); + agreementManager.offerAgreement(_collector(), OFFER_TYPE_NEW, abi.encode(rca)); } function test_Offer_Revert_ZeroDataService() public { @@ -839,13 +717,13 @@ contract RecurringAgreementManagerEdgeCasesTest is RecurringAgreementManagerShar abi.encodeWithSelector(IRecurringAgreementManagement.UnauthorizedDataService.selector, address(0)) ); vm.prank(operator); - agreementManager.offerAgreement(rca, _collector()); + agreementManager.offerAgreement(_collector(), OFFER_TYPE_NEW, abi.encode(rca)); } // ==================== getProviderAgreements ==================== function test_GetIndexerAgreements_Empty() public { - bytes16[] memory ids = agreementManager.getProviderAgreements(indexer); + bytes16[] memory ids = _getProviderAgreements(indexer); assertEq(ids.length, 0); } @@ -860,7 +738,7 @@ contract RecurringAgreementManagerEdgeCasesTest is RecurringAgreementManagerShar bytes16 agreementId = _offerAgreement(rca); - bytes16[] memory ids = agreementManager.getProviderAgreements(indexer); + bytes16[] memory ids = _getProviderAgreements(indexer); assertEq(ids.length, 1); assertEq(ids[0], agreementId); } @@ -887,7 +765,7 @@ contract RecurringAgreementManagerEdgeCasesTest is RecurringAgreementManagerShar bytes16 id1 = _offerAgreement(rca1); bytes16 id2 = _offerAgreement(rca2); - bytes16[] memory ids = agreementManager.getProviderAgreements(indexer); + bytes16[] memory ids = _getProviderAgreements(indexer); assertEq(ids.length, 2); // EnumerableSet maintains insertion order assertEq(ids[0], id1); @@ -918,9 +796,9 @@ contract RecurringAgreementManagerEdgeCasesTest is RecurringAgreementManagerShar // Remove first agreement _setAgreementCanceledBySP(id1, rca1); - agreementManager.reconcileAgreement(id1); + agreementManager.reconcileAgreement(address(recurringCollector), id1); - bytes16[] memory ids = agreementManager.getProviderAgreements(indexer); + bytes16[] memory ids = _getProviderAgreements(indexer); assertEq(ids.length, 1); assertEq(ids[0], id2); } @@ -950,8 +828,8 @@ contract RecurringAgreementManagerEdgeCasesTest is RecurringAgreementManagerShar bytes16 id1 = _offerAgreement(rca1); bytes16 id2 = _offerAgreement(rca2); - bytes16[] memory indexer1Ids = agreementManager.getProviderAgreements(indexer); - bytes16[] memory indexer2Ids = agreementManager.getProviderAgreements(indexer2); + bytes16[] memory indexer1Ids = _getProviderAgreements(indexer); + bytes16[] memory indexer2Ids = _getProviderAgreements(indexer2); assertEq(indexer1Ids.length, 1); assertEq(indexer1Ids[0], id1); @@ -959,7 +837,7 @@ contract RecurringAgreementManagerEdgeCasesTest is RecurringAgreementManagerShar assertEq(indexer2Ids[0], id2); } - function test_GetIndexerAgreements_Paginated() public { + function test_GetIndexerAgreements_Enumeration() public { IRecurringCollector.RecurringCollectionAgreement memory rca1 = _makeRCA( 100 ether, 1 ether, @@ -981,21 +859,12 @@ contract RecurringAgreementManagerEdgeCasesTest is RecurringAgreementManagerShar bytes16 id1 = _offerAgreement(rca1); bytes16 id2 = _offerAgreement(rca2); - // Full range returns both - bytes16[] memory all = agreementManager.getProviderAgreements(indexer, 0, 10); - assertEq(all.length, 2); - assertEq(all[0], id1); - assertEq(all[1], id2); - - // Offset skips first - bytes16[] memory fromOne = agreementManager.getProviderAgreements(indexer, 1, 10); - assertEq(fromOne.length, 1); - assertEq(fromOne[0], id2); - - // Count limits result - bytes16[] memory firstOnly = agreementManager.getProviderAgreements(indexer, 0, 1); - assertEq(firstOnly.length, 1); - assertEq(firstOnly[0], id1); + // Count returns total + assertEq(agreementManager.getPairAgreementCount(address(recurringCollector), indexer), 2); + + // Individual access by index + assertEq(agreementManager.getPairAgreementAt(address(recurringCollector), indexer, 0), id1); + assertEq(agreementManager.getPairAgreementAt(address(recurringCollector), indexer, 1), id2); } // ==================== Withdraw Timing Boundary (Issue 1) ==================== @@ -1016,7 +885,7 @@ contract RecurringAgreementManagerEdgeCasesTest is RecurringAgreementManagerShar // SP cancels — reconcile triggers thaw _setAgreementCanceledBySP(agreementId, rca); - agreementManager.reconcileAgreement(agreementId); + agreementManager.reconcileAgreement(address(recurringCollector), agreementId); IPaymentsEscrow.EscrowAccount memory accountBeforeWarp; ( @@ -1033,7 +902,7 @@ contract RecurringAgreementManagerEdgeCasesTest is RecurringAgreementManagerShar // Record logs to verify no EscrowWithdrawn event vm.recordLogs(); - agreementManager.reconcileCollectorProvider(address(_collector()), indexer); + agreementManager.reconcileProvider(address(_collector()), indexer); Vm.Log[] memory entries = vm.getRecordedLogs(); bytes32 withdrawSig = keccak256("EscrowWithdrawn(address,address,uint256)"); @@ -1065,7 +934,7 @@ contract RecurringAgreementManagerEdgeCasesTest is RecurringAgreementManagerShar uint256 maxClaim = 1 ether * 3600 + 100 ether; _setAgreementCanceledBySP(agreementId, rca); - agreementManager.reconcileAgreement(agreementId); + agreementManager.reconcileAgreement(address(recurringCollector), agreementId); (, , uint256 thawEnd) = paymentsEscrow.escrowAccounts( address(agreementManager), @@ -1079,7 +948,7 @@ contract RecurringAgreementManagerEdgeCasesTest is RecurringAgreementManagerShar vm.expectEmit(address(agreementManager)); emit IRecurringEscrowManagement.EscrowWithdrawn(indexer, address(recurringCollector), maxClaim); - agreementManager.reconcileCollectorProvider(address(_collector()), indexer); + agreementManager.reconcileProvider(address(_collector()), indexer); // Escrow should be empty (uint256 finalBalance, , ) = paymentsEscrow.escrowAccounts( @@ -1123,13 +992,12 @@ contract RecurringAgreementManagerEdgeCasesTest is RecurringAgreementManagerShar vm.prank(address(recurringCollector)); agreementManager.beforeCollection(agreementId, escrowBalance); - // tempJit must NOT be set — there is no deficit - assertFalse(agreementManager.isTempJit(), "No tempJit when escrow exactly covers collection"); + // No deficit — collection should succeed without issue } // ==================== Cancel Event Behavior ==================== - function test_CancelAgreement_NoEvent_WhenAlreadyCanceled() public { + function test_CancelAgreement_AlreadyCanceled_StillForwards() public { IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCA( 100 ether, 1 ether, @@ -1143,20 +1011,11 @@ contract RecurringAgreementManagerEdgeCasesTest is RecurringAgreementManagerShar // Set as already CanceledByServiceProvider _setAgreementCanceledBySP(agreementId, rca); - // Record logs to verify no AgreementCanceled event - vm.recordLogs(); + // cancelAgreement always forwards to collector — no idempotent skip + bytes32 activeHash = recurringCollector.getAgreementVersionAt(agreementId, 0).versionHash; vm.prank(operator); - agreementManager.cancelAgreement(agreementId); - - // Check that no AgreementCanceled event was emitted - Vm.Log[] memory entries = vm.getRecordedLogs(); - bytes32 cancelEventSig = keccak256("AgreementCanceled(bytes16,address)"); - for (uint256 i = 0; i < entries.length; i++) { - assertTrue( - entries[i].topics[0] != cancelEventSig, - "AgreementCanceled should not be emitted on idempotent path" - ); - } + agreementManager.cancelAgreement(address(recurringCollector), agreementId, activeHash, 0); + // Verify it doesn't revert — collector handles already-canceled state } function test_CancelAgreement_EmitsEvent_WhenAccepted() public { @@ -1171,16 +1030,19 @@ contract RecurringAgreementManagerEdgeCasesTest is RecurringAgreementManagerShar bytes16 agreementId = _offerAgreement(rca); _setAgreementAccepted(agreementId, rca, uint64(block.timestamp)); + bytes32 activeHash = recurringCollector.getAgreementVersionAt(agreementId, 0).versionHash; + + // cancelAgreement triggers the callback which reconciles — expect AgreementRemoved vm.expectEmit(address(agreementManager)); - emit IRecurringAgreementManagement.AgreementCanceled(agreementId, indexer); + emit IRecurringAgreementManagement.AgreementRemoved(agreementId); vm.prank(operator); - agreementManager.cancelAgreement(agreementId); + agreementManager.cancelAgreement(address(recurringCollector), agreementId, activeHash, 0); } // ==================== Multiple Pending Update Replacements ==================== - function test_OfferUpdate_ThreeConsecutiveReplacements() public { + function test_OfferUpdate_ThreeConsecutiveUpdates() public { IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCA( 100 ether, 1 ether, @@ -1192,7 +1054,7 @@ contract RecurringAgreementManagerEdgeCasesTest is RecurringAgreementManagerShar bytes16 agreementId = _offerAgreement(rca); uint256 originalMaxClaim = 1 ether * 3600 + 100 ether; - // Update 1 + // Update 1 (nonce=1) IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau1 = _makeRCAU( agreementId, 200 ether, @@ -1203,10 +1065,14 @@ contract RecurringAgreementManagerEdgeCasesTest is RecurringAgreementManagerShar 1 ); _offerAgreementUpdate(rcau1); - uint256 pending1 = 2 ether * 7200 + 200 ether; - assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), originalMaxClaim + pending1); + // max(current, pending) = max(3700, 14600) = 14600 + uint256 pending1 = 14600 ether; + assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), pending1); + + // Cancel pending update clears pending on collector, sum drops to active-only + _cancelPendingUpdate(agreementId); + assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), originalMaxClaim); - // Update 2 replaces 1 (same nonce — collector hasn't accepted either) IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau2 = _makeRCAU( agreementId, 50 ether, @@ -1214,13 +1080,15 @@ contract RecurringAgreementManagerEdgeCasesTest is RecurringAgreementManagerShar 60, 1800, uint64(block.timestamp + 180 days), - 1 + 2 ); _offerAgreementUpdate(rcau2); - uint256 pending2 = 0.5 ether * 1800 + 50 ether; - assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), originalMaxClaim + pending2); + // max(current, pending) = max(3700, 950) = 3700 (current dominates) + assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), originalMaxClaim); + + // Cancel pending update 2 and offer update 3 (nonce=3) + _cancelPendingUpdate(agreementId); - // Update 3 replaces 2 (same nonce — collector still hasn't accepted) IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau3 = _makeRCAU( agreementId, 300 ether, @@ -1228,39 +1096,11 @@ contract RecurringAgreementManagerEdgeCasesTest is RecurringAgreementManagerShar 60, 3600, uint64(block.timestamp + 1095 days), - 1 + 3 ); _offerAgreementUpdate(rcau3); - uint256 pending3 = 3 ether * 3600 + 300 ether; - assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), originalMaxClaim + pending3); - - // Only hash for update 3 should be authorized - bytes32 hash1 = recurringCollector.hashRCAU(rcau1); - bytes32 hash2 = recurringCollector.hashRCAU(rcau2); - bytes32 hash3 = recurringCollector.hashRCAU(rcau3); - - assertEq(agreementManager.approveAgreement(hash1), bytes4(0)); - assertEq(agreementManager.approveAgreement(hash2), bytes4(0)); - assertEq(agreementManager.approveAgreement(hash3), IAgreementOwner.approveAgreement.selector); + // max(current, pending) = max(3700, 11100) = 11100 + uint256 pending3 = 11100 ether; + assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), pending3); } - - // ==================== setTempJit No-Op ==================== - - function test_SetTempJit_NoopWhenAlreadyFalse() public { - // Default tempJit is false; setting false again should early-return with no event - vm.recordLogs(); - vm.prank(operator); - agreementManager.setTempJit(false); - - Vm.Log[] memory logs = vm.getRecordedLogs(); - for (uint256 i = 0; i < logs.length; i++) { - assertTrue( - logs[i].topics[0] != IRecurringEscrowManagement.TempJitSet.selector, - "TempJitSet should not be emitted" - ); - } - assertFalse(agreementManager.isTempJit()); - } - - /* solhint-enable graph/func-name-mixedcase */ } diff --git a/packages/issuance/test/unit/agreement-manager/ensureDistributed.t.sol b/packages/issuance/test/unit/agreement-manager/ensureDistributed.t.sol index 042deb976..34e8467af 100644 --- a/packages/issuance/test/unit/agreement-manager/ensureDistributed.t.sol +++ b/packages/issuance/test/unit/agreement-manager/ensureDistributed.t.sol @@ -112,20 +112,18 @@ contract RecurringAgreementManagerEnsureDistributedTest is RecurringAgreementMan token.transfer(address(1), freeBalance); assertEq(token.balanceOf(address(agreementManager)), 0); - // Configure allocator to mint enough to cover the deficit + // Configure allocator to mint enough to cover the deficit plus 50% of sumMaxNextClaimAll reserve uint256 deficit = 500 ether; - mockAllocator.setMintPerDistribution(deficit + 1 ether); + uint256 reserve = agreementManager.getSumMaxNextClaimAll(); // >= 50% threshold + mockAllocator.setMintPerDistribution(deficit + reserve); // Advance block so distribution actually mints vm.roll(block.number + 1); - // Without distribution, this would trigger tempJit (balance=0, deficit=500). - // With distribution, the allocator mints tokens first, so JIT deposit succeeds. + // Without distribution, balance would be 0. With distribution, the allocator mints + // tokens first, so JIT deposit succeeds. vm.prank(address(recurringCollector)); agreementManager.beforeCollection(agreementId, escrowBalance + deficit); - - // tempJit should NOT be active — distribution provided funds - assertFalse(agreementManager.isTempJit(), "tempJit should not be set when distribution provides funds"); } function test_BeforeCollection_SkipsDistributeWhenEscrowSufficient() public { @@ -227,9 +225,9 @@ contract RecurringAgreementManagerEnsureDistributedTest is RecurringAgreementMan agreementManager.beforeCollection(agreementId, escrowBalance + 500 ether); } - // ==================== uint64 wrap ==================== + // ==================== uint32 wrap ==================== - function test_EnsureDistributed_WorksAcrossUint64Boundary() public { + function test_EnsureDistributed_WorksAcrossUint32Boundary() public { // Use afterCollection path which always reaches _updateEscrow → _ensureIncomingDistributionToCurrentBlock, // regardless of escrow balance (unlike beforeCollection which has an early return). (IRecurringCollector.RecurringCollectionAgreement memory rca, ) = _makeRCAWithId( @@ -245,31 +243,31 @@ contract RecurringAgreementManagerEnsureDistributedTest is RecurringAgreementMan uint256 countBefore = mockAllocator.distributeCallCount(); - // Jump to uint64 max - vm.roll(type(uint64).max); + // Jump to uint32 max + vm.roll(type(uint32).max); vm.prank(address(recurringCollector)); agreementManager.afterCollection(agreementId, 0); - assertGt(mockAllocator.distributeCallCount(), countBefore, "should distribute at uint64.max"); + assertGt(mockAllocator.distributeCallCount(), countBefore, "should distribute at uint32.max"); uint256 countAtMax = mockAllocator.distributeCallCount(); - // Cross the boundary: uint64.max + 1 wraps to 0 in uint64. - // ensuredIncomingDistributedToBlock is uint64.max from the previous call, so no false match. - vm.roll(uint256(type(uint64).max) + 1); + // Cross the boundary: uint32.max + 1 wraps to 0 in uint32. + // ensuredIncomingDistributedToBlock is uint32.max from the previous call, so no false match. + vm.roll(uint256(type(uint32).max) + 1); vm.prank(address(recurringCollector)); agreementManager.afterCollection(agreementId, 0); - assertGt(mockAllocator.distributeCallCount(), countAtMax, "should distribute after uint64 wrap to 0"); + assertGt(mockAllocator.distributeCallCount(), countAtMax, "should distribute after uint32 wrap to 0"); uint256 countAfterWrap = mockAllocator.distributeCallCount(); // Next block after wrap (wraps to 1) also works - vm.roll(uint256(type(uint64).max) + 2); + vm.roll(uint256(type(uint32).max) + 2); vm.prank(address(recurringCollector)); agreementManager.afterCollection(agreementId, 0); assertGt(mockAllocator.distributeCallCount(), countAfterWrap, "should distribute on block after wrap"); } - function test_EnsureDistributed_SameBlockDedup_AtUint64Boundary() public { + function test_EnsureDistributed_SameBlockDedup_AtUint32Boundary() public { (IRecurringCollector.RecurringCollectionAgreement memory rca, ) = _makeRCAWithId( 100 ether, 1 ether, @@ -280,7 +278,7 @@ contract RecurringAgreementManagerEnsureDistributedTest is RecurringAgreementMan token.mint(address(agreementManager), 10_000 ether); // Jump past the boundary - vm.roll(uint256(type(uint64).max) + 3); + vm.roll(uint256(type(uint32).max) + 3); (uint256 escrowBalance, , ) = paymentsEscrow.escrowAccounts( address(agreementManager), address(recurringCollector), @@ -308,7 +306,9 @@ contract RecurringAgreementManagerEnsureDistributedTest is RecurringAgreementMan // Deploy a contract that doesn't support ERC165 address notAllocator = address(new NoERC165Contract()); vm.prank(governor); - vm.expectRevert(abi.encodeWithSelector(RecurringAgreementManager.InvalidIssuanceAllocator.selector, notAllocator)); + vm.expectRevert( + abi.encodeWithSelector(RecurringAgreementManager.InvalidIssuanceAllocator.selector, notAllocator) + ); agreementManager.setIssuanceAllocator(notAllocator); } diff --git a/packages/issuance/test/unit/agreement-manager/escrowEdgeCases.t.sol b/packages/issuance/test/unit/agreement-manager/escrowEdgeCases.t.sol new file mode 100644 index 000000000..f8b3569a8 --- /dev/null +++ b/packages/issuance/test/unit/agreement-manager/escrowEdgeCases.t.sol @@ -0,0 +1,421 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.27; + +import { + REGISTERED, + ACCEPTED, + NOTICE_GIVEN, + SETTLED, + BY_PROVIDER, + OFFER_TYPE_NEW +} from "@graphprotocol/interfaces/contracts/horizon/IAgreementCollector.sol"; +import { IRecurringCollector } from "@graphprotocol/interfaces/contracts/horizon/IRecurringCollector.sol"; +import { IRecurringEscrowManagement } from "@graphprotocol/interfaces/contracts/issuance/agreement/IRecurringEscrowManagement.sol"; +import { IProviderEligibility } from "@graphprotocol/interfaces/contracts/issuance/eligibility/IProviderEligibility.sol"; + +import { RecurringAgreementManagerSharedTest } from "./shared.t.sol"; +import { MockEligibilityOracle } from "./mocks/MockEligibilityOracle.sol"; + +/// @notice Edge case tests for escrow lifecycle, basis degradation, and cross-provider isolation. +/// Covers audit gaps: +/// - REGISTERED-only agreement aging and cleanup (audit gap 6) +/// - Basis degradation when RAM balance is insufficient (audit gap 12) +/// - Cross-provider escrow tracking isolation (audit gap 13) +/// - Eligibility oracle toggle during active agreement (audit gap 16) +contract RecurringAgreementManagerEscrowEdgeCasesTest is RecurringAgreementManagerSharedTest { + /* solhint-disable graph/func-name-mixedcase */ + + address internal indexer2; + + function setUp() public override { + super.setUp(); + indexer2 = makeAddr("indexer2"); + } + + // -- Helpers -- + + function _makeRCAForIndexer( + address sp, + uint256 maxInitial, + uint256 maxOngoing, + uint32 maxSec, + uint256 nonce + ) internal view returns (IRecurringCollector.RecurringCollectionAgreement memory) { + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCA( + maxInitial, + maxOngoing, + 60, + maxSec, + uint64(block.timestamp + 365 days) + ); + rca.serviceProvider = sp; + rca.nonce = nonce; + return rca; + } + + function _escrowBalance(address collector_, address provider_) internal view returns (uint256) { + (uint256 bal, , ) = paymentsEscrow.escrowAccounts(address(agreementManager), collector_, provider_); + return bal; + } + + function _escrowThawing(address collector_, address provider_) internal view returns (uint256) { + (, uint256 thawing, ) = paymentsEscrow.escrowAccounts(address(agreementManager), collector_, provider_); + return thawing; + } + + // ══════════════════════════════════════════════════════════════════════ + // 6. REGISTERED-only agreement — aging and cleanup + // ══════════════════════════════════════════════════════════════════════ + + /// @notice REGISTERED-only agreement: immediately after offer, it's tracked with non-zero maxNextClaim. + /// Can be canceled and cleaned up without ever being accepted. + function test_RegisteredOnly_TrackedAndCancelable() public { + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCA( + 100 ether, + 1 ether, + 60, + 3600, + uint64(block.timestamp + 365 days) + ); + + bytes16 agreementId = _offerAgreement(rca); + + // Tracked with non-zero maxNextClaim + assertEq(agreementManager.getPairAgreementCount(address(recurringCollector), indexer), 1); + assertTrue( + agreementManager.getAgreementInfo(address(recurringCollector), agreementId).maxNextClaim > 0, + "REGISTERED agreement should have non-zero maxNextClaim" + ); + + // Cancel without ever accepting — cleans up immediately + _cancelAgreement(agreementId); + assertEq( + agreementManager.getPairAgreementCount(address(recurringCollector), indexer), + 0, + "canceled REGISTERED agreement should be removed" + ); + assertEq( + agreementManager.getSumMaxNextClaim(_collector(), indexer), + 0, + "maxNextClaim should be 0 after cleanup" + ); + assertEq(agreementManager.getSumMaxNextClaimAll(), 0, "global maxNextClaim should be 0"); + } + + /// @notice After aging past endsAt, reconcile removes a REGISTERED agreement because + /// maxNextClaim drops to 0 when the collection window expires. + function test_RegisteredOnly_RemovedOnReconcileAfterExpiry() public { + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCA( + 100 ether, + 1 ether, + 60, + 3600, + uint64(block.timestamp + 30 days) // shorter endsAt + ); + + bytes16 agreementId = _offerAgreement(rca); + assertEq(agreementManager.getPairAgreementCount(address(recurringCollector), indexer), 1); + + // Warp past endsAt — collector reports maxNextClaim = 0 + vm.warp(block.timestamp + 31 days); + + // Reconcile removes the expired agreement automatically + agreementManager.reconcileAgreement(address(recurringCollector), agreementId); + assertEq( + agreementManager.getPairAgreementCount(address(recurringCollector), indexer), + 0, + "expired REGISTERED agreement should be auto-removed on reconcile" + ); + assertEq(agreementManager.getSumMaxNextClaimAll(), 0, "global sum should be 0"); + } + + /// @notice REGISTERED-only agreement contributes to escrow tracking while alive + function test_RegisteredOnly_ContributesToEscrow() public { + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCA( + 100 ether, + 1 ether, + 60, + 3600, + uint64(block.timestamp + 365 days) + ); + + bytes16 agreementId = _offerAgreement(rca); + uint256 expectedMaxClaim = 1 ether * 3600 + 100 ether; + + // In Full basis mode, the escrow should have been deposited + assertEq(agreementManager.getSumMaxNextClaimAll(), expectedMaxClaim, "global sum should include REGISTERED"); + assertEq( + agreementManager.getSumMaxNextClaim(_collector(), indexer), + expectedMaxClaim, + "pair sum should include REGISTERED" + ); + + // Escrow should be funded (Full mode) + uint256 escrowed = _escrowBalance(address(recurringCollector), indexer); + assertEq(escrowed, expectedMaxClaim, "escrow should be fully funded in Full mode"); + + // After cancel, escrow should start thawing + _cancelAgreement(agreementId); + uint256 thawing = _escrowThawing(address(recurringCollector), indexer); + assertEq(thawing, expectedMaxClaim, "escrow should be thawing after cancel"); + } + + // ══════════════════════════════════════════════════════════════════════ + // 12. Basis degradation when balance is insufficient + // ══════════════════════════════════════════════════════════════════════ + + /// @notice When RAM's token balance is too low for Full mode, escrow deposit is + /// partial and deficit tracking reflects the shortfall. + function test_BasisDegradation_InsufficientBalance_PartialDeposit() public { + // Fund RAM with a small amount + uint256 limitedFunding = 100 ether; + token.mint(address(agreementManager), limitedFunding); + + // Offer agreement that requires much more escrow than available + // maxNextClaim = 10 ether * 3600 + 500 ether = 36500 ether >> 100 ether + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCAForIndexer( + indexer, + 500 ether, + 10 ether, + 3600, + 1 + ); + + // Don't use _offerAgreement since it mints 1M tokens — call directly + vm.prank(operator); + bytes16 agreementId = agreementManager.offerAgreement(_collector(), OFFER_TYPE_NEW, abi.encode(rca)); + + uint256 expectedMaxClaim = 10 ether * 3600 + 500 ether; // 36500 ether + assertEq(agreementManager.getSumMaxNextClaimAll(), expectedMaxClaim, "sum should reflect full maxNextClaim"); + + // RAM only had 100 ether. In Full mode, spare = balance - deficit. + // Since deposit uses available balance, only partial deposit was possible. + // totalEscrowDeficit should be > 0 reflecting the unfunded portion. + uint256 escrowed = _escrowBalance(address(recurringCollector), indexer); + assertTrue(escrowed < expectedMaxClaim, "escrow should be less than maxNextClaim (partial deposit)"); + + // Verify deficit reflects the gap + uint256 deficit = agreementManager.getTotalEscrowDeficit(); + assertEq(deficit, expectedMaxClaim - escrowed, "deficit should be maxNextClaim - escrowBalance"); + } + + /// @notice Sufficient funding allows Full basis mode to fully deposit escrow. + /// Demonstrates recovery from degraded state to fully-funded state. + function test_BasisDegradation_RecoveryWithSufficientFunding() public { + // Use _offerAgreement which mints 1M tokens — sufficient for Full mode + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCAForIndexer( + indexer, + 100 ether, + 1 ether, + 3600, + 1 + ); + + _offerAgreement(rca); + uint256 expectedMaxClaim = 1 ether * 3600 + 100 ether; // 3700 ether + + // Full mode: escrow fully deposited + uint256 escrowFull = _escrowBalance(address(recurringCollector), indexer); + assertEq(escrowFull, expectedMaxClaim, "Full mode: escrow should be fully funded"); + assertEq(agreementManager.getTotalEscrowDeficit(), 0, "Full mode: no deficit"); + + // Switch to JIT — no proactive deposits + vm.prank(operator); + agreementManager.setEscrowBasis(IRecurringEscrowManagement.EscrowBasis.JustInTime); + + // Reconcile to trigger escrow rebalancing + agreementManager.reconcileProvider(address(recurringCollector), indexer); + + // In JIT, excess should be thawing + uint256 thawing = _escrowThawing(address(recurringCollector), indexer); + assertTrue(thawing > 0, "JIT mode: excess should be thawing"); + + // Switch back to Full + vm.prank(operator); + agreementManager.setEscrowBasis(IRecurringEscrowManagement.EscrowBasis.Full); + + // Reconcile — should cancel thaw and maintain full deposit + agreementManager.reconcileProvider(address(recurringCollector), indexer); + + uint256 escrowRecovered = _escrowBalance(address(recurringCollector), indexer); + assertEq(escrowRecovered, expectedMaxClaim, "recovered: escrow should be fully funded again"); + } + + // ══════════════════════════════════════════════════════════════════════ + // 13. Cross-provider escrow isolation + // ══════════════════════════════════════════════════════════════════════ + + /// @notice Two providers' escrow tracking is fully isolated — canceling one + /// has no effect on the other's sumMaxNextClaim or escrow balance. + function test_CrossProviderEscrow_IsolatedTracking() public { + IRecurringCollector.RecurringCollectionAgreement memory rca1 = _makeRCAForIndexer( + indexer, + 100 ether, + 1 ether, + 3600, + 1 + ); + IRecurringCollector.RecurringCollectionAgreement memory rca2 = _makeRCAForIndexer( + indexer2, + 200 ether, + 2 ether, + 7200, + 2 + ); + + bytes16 id1 = _offerAgreement(rca1); + bytes16 id2 = _offerAgreement(rca2); + + uint256 maxClaim1 = 1 ether * 3600 + 100 ether; // 3700 ether + uint256 maxClaim2 = 2 ether * 7200 + 200 ether; // 14600 ether + + // Verify isolated sums + assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), maxClaim1, "indexer1 sum"); + assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer2), maxClaim2, "indexer2 sum"); + assertEq(agreementManager.getSumMaxNextClaimAll(), maxClaim1 + maxClaim2, "global sum"); + + // Verify isolated escrow deposits (Full mode) + assertEq(_escrowBalance(address(recurringCollector), indexer), maxClaim1, "indexer1 escrow"); + assertEq(_escrowBalance(address(recurringCollector), indexer2), maxClaim2, "indexer2 escrow"); + + // Cancel indexer1's agreement + _cancelAgreement(id1); + + // Indexer1 tracking cleared + assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), 0, "indexer1 sum after cancel"); + + // Indexer2 completely unaffected + assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer2), maxClaim2, "indexer2 sum after cancel"); + assertEq( + _escrowBalance(address(recurringCollector), indexer2), + maxClaim2, + "indexer2 escrow untouched after indexer1 cancel" + ); + + // Global sum reflects only indexer2 + assertEq(agreementManager.getSumMaxNextClaimAll(), maxClaim2, "global sum after indexer1 cancel"); + } + + /// @notice One provider's thaw-in-progress does not affect another's escrow min/max + function test_CrossProviderEscrow_ThawDoesNotAffectOther() public { + IRecurringCollector.RecurringCollectionAgreement memory rca1 = _makeRCAForIndexer( + indexer, + 100 ether, + 1 ether, + 3600, + 1 + ); + IRecurringCollector.RecurringCollectionAgreement memory rca2 = _makeRCAForIndexer( + indexer2, + 100 ether, + 1 ether, + 3600, + 2 + ); + + bytes16 id1 = _offerAgreement(rca1); + _offerAgreement(rca2); + + uint256 maxClaim = 1 ether * 3600 + 100 ether; + + // Cancel indexer1 — triggers thaw + _cancelAgreement(id1); + + // Indexer1 has thawing escrow + uint256 thawing1 = _escrowThawing(address(recurringCollector), indexer); + assertEq(thawing1, maxClaim, "indexer1 escrow should be thawing"); + + // Indexer2 escrow should be completely unaffected (no thawing) + uint256 thawing2 = _escrowThawing(address(recurringCollector), indexer2); + assertEq(thawing2, 0, "indexer2 should have no thawing"); + assertEq( + _escrowBalance(address(recurringCollector), indexer2), + maxClaim, + "indexer2 balance should be fully funded" + ); + + // After thaw period, withdraw for indexer1 does not touch indexer2 + vm.warp(block.timestamp + 1 days + 1); + agreementManager.reconcileProvider(address(recurringCollector), indexer); + + assertEq( + _escrowBalance(address(recurringCollector), indexer2), + maxClaim, + "indexer2 balance untouched after indexer1 thaw completion" + ); + } + + // ══════════════════════════════════════════════════════════════════════ + // 16. Eligibility oracle toggle during active agreement + // ══════════════════════════════════════════════════════════════════════ + + /// @notice When the eligibility oracle flips a provider to ineligible while they have + /// an active agreement, isEligible reflects the change immediately. + function test_EligibilityOracle_FlipDuringActiveAgreement() public { + MockEligibilityOracle oracle = new MockEligibilityOracle(); + vm.label(address(oracle), "EligibilityOracle"); + + // Set oracle — initially all eligible + oracle.setDefaultEligible(true); + vm.prank(governor); + agreementManager.setProviderEligibilityOracle(IProviderEligibility(address(oracle))); + + // Offer agreement for indexer + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCA( + 100 ether, + 1 ether, + 60, + 3600, + uint64(block.timestamp + 365 days) + ); + _offerAgreement(rca); + + // Indexer is eligible + assertTrue(agreementManager.isEligible(indexer), "should be eligible initially"); + + // Oracle flips indexer to ineligible + oracle.setDefaultEligible(false); + // Default is false and indexer not explicitly set → ineligible + assertFalse(agreementManager.isEligible(indexer), "should be ineligible after oracle flip"); + + // Agreement is still tracked (eligibility doesn't auto-remove) + assertEq( + agreementManager.getPairAgreementCount(address(recurringCollector), indexer), + 1, + "agreement should persist despite ineligibility" + ); + assertTrue( + agreementManager.getAgreementInfo(address(recurringCollector), bytes16(0)).maxNextClaim == 0 || + agreementManager.getSumMaxNextClaim(_collector(), indexer) > 0, + "escrow tracking should be unaffected by eligibility" + ); + + // Oracle flips back + oracle.setEligible(indexer, true); + assertTrue(agreementManager.isEligible(indexer), "should be eligible again after oracle flip back"); + } + + /// @notice Emergency clear of eligibility oracle makes all providers eligible (fail-open) + function test_EligibilityOracle_EmergencyClear_FailOpen() public { + MockEligibilityOracle oracle = new MockEligibilityOracle(); + + // Set oracle that denies indexer + vm.prank(governor); + agreementManager.setProviderEligibilityOracle(IProviderEligibility(address(oracle))); + assertFalse(agreementManager.isEligible(indexer), "should be ineligible"); + + // Emergency clear (PAUSE_ROLE needed — grant it first) + bytes32 PAUSE_ROLE = keccak256("PAUSE_ROLE"); + vm.prank(governor); + agreementManager.grantRole(PAUSE_ROLE, governor); + + vm.prank(governor); + agreementManager.emergencyClearEligibilityOracle(); + + // All providers now eligible (fail-open) + assertTrue(agreementManager.isEligible(indexer), "should be eligible after emergency clear"); + assertTrue(agreementManager.isEligible(indexer2), "all providers eligible after emergency clear"); + } + + /* solhint-enable graph/func-name-mixedcase */ +} diff --git a/packages/issuance/test/unit/agreement-manager/fundingModes.t.sol b/packages/issuance/test/unit/agreement-manager/fundingModes.t.sol index 960825dc6..cb2db0c81 100644 --- a/packages/issuance/test/unit/agreement-manager/fundingModes.t.sol +++ b/packages/issuance/test/unit/agreement-manager/fundingModes.t.sol @@ -5,6 +5,7 @@ import { Vm } from "forge-std/Vm.sol"; import { IRecurringEscrowManagement } from "@graphprotocol/interfaces/contracts/issuance/agreement/IRecurringEscrowManagement.sol"; import { IPaymentsEscrow } from "@graphprotocol/interfaces/contracts/horizon/IPaymentsEscrow.sol"; +import { OFFER_TYPE_NEW } from "@graphprotocol/interfaces/contracts/horizon/IAgreementCollector.sol"; import { IRecurringCollector } from "@graphprotocol/interfaces/contracts/horizon/IRecurringCollector.sol"; import { RecurringAgreementManagerSharedTest } from "./shared.t.sol"; @@ -84,12 +85,9 @@ contract RecurringAgreementManagerFundingModesTest is RecurringAgreementManagerS _offerAgreement(rca1); uint256 maxClaim1 = 1 ether * 3600 + 100 ether; assertEq(agreementManager.getSumMaxNextClaimAll(), maxClaim1); - assertEq(agreementManager.getTotalAgreementCount(), 1); - _offerAgreement(rca2); uint256 maxClaim2 = 2 ether * 7200 + 200 ether; assertEq(agreementManager.getSumMaxNextClaimAll(), maxClaim1 + maxClaim2); - assertEq(agreementManager.getTotalAgreementCount(), 2); } function test_GlobalTracking_TotalUndeposited() public { @@ -126,7 +124,7 @@ contract RecurringAgreementManagerFundingModesTest is RecurringAgreementManagerS assertEq(agreementManager.getTotalEscrowDeficit(), maxClaim, "JIT: totalEscrowDeficit = sumMaxNextClaim"); } - function test_GlobalTracking_RevokeDecrementsCountAndRequired() public { + function test_GlobalTracking_CancelDecrementsCountAndRequired() public { IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCAForIndexer( indexer, 100 ether, @@ -138,13 +136,9 @@ contract RecurringAgreementManagerFundingModesTest is RecurringAgreementManagerS bytes16 agreementId = _offerAgreement(rca); uint256 maxClaim = 1 ether * 3600 + 100 ether; assertEq(agreementManager.getSumMaxNextClaimAll(), maxClaim); - assertEq(agreementManager.getTotalAgreementCount(), 1); - - vm.prank(operator); - agreementManager.revokeOffer(agreementId); + _cancelAgreement(agreementId); assertEq(agreementManager.getSumMaxNextClaimAll(), 0); - assertEq(agreementManager.getTotalAgreementCount(), 0); } function test_GlobalTracking_RemoveDecrementsCountAndRequired() public { @@ -157,13 +151,10 @@ contract RecurringAgreementManagerFundingModesTest is RecurringAgreementManagerS ); bytes16 agreementId = _offerAgreement(rca); - assertEq(agreementManager.getTotalAgreementCount(), 1); - _setAgreementCanceledBySP(agreementId, rca); - agreementManager.reconcileAgreement(agreementId); + agreementManager.reconcileAgreement(address(recurringCollector), agreementId); assertEq(agreementManager.getSumMaxNextClaimAll(), 0); - assertEq(agreementManager.getTotalAgreementCount(), 0); } function test_GlobalTracking_ReconcileUpdatesRequired() public { @@ -181,11 +172,9 @@ contract RecurringAgreementManagerFundingModesTest is RecurringAgreementManagerS // SP cancels — reconcile sets maxNextClaim to 0 _setAgreementCanceledBySP(agreementId, rca); - agreementManager.reconcileAgreement(agreementId); + agreementManager.reconcileAgreement(address(recurringCollector), agreementId); assertEq(agreementManager.getSumMaxNextClaimAll(), 0); - // Reconcile now deletes settled agreements inline - assertEq(agreementManager.getTotalAgreementCount(), 0); } function test_GlobalTracking_TotalUndeposited_MultiProvider() public { @@ -240,7 +229,7 @@ contract RecurringAgreementManagerFundingModesTest is RecurringAgreementManagerS 2 ); vm.prank(operator); - agreementManager.offerAgreement(rca2, _collector()); + agreementManager.offerAgreement(_collector(), OFFER_TYPE_NEW, abi.encode(rca2)); uint256 maxClaim2 = 2 ether * 7200 + 200 ether; // indexer is fully deposited (undeposited = 0), indexer2 has full deficit (undeposited = maxClaim2) @@ -293,7 +282,7 @@ contract RecurringAgreementManagerFundingModesTest is RecurringAgreementManagerS // SP cancels, remove (triggers thaw of all excess) _setAgreementCanceledBySP(agreementId, rca); - agreementManager.reconcileAgreement(agreementId); + agreementManager.reconcileAgreement(address(recurringCollector), agreementId); IPaymentsEscrow.EscrowAccount memory account; (account.balance, account.tokensThawing, account.thawEndTimestamp) = paymentsEscrow.escrowAccounts( @@ -324,7 +313,7 @@ contract RecurringAgreementManagerFundingModesTest is RecurringAgreementManagerS agreementManager.setEscrowBasis(IRecurringEscrowManagement.EscrowBasis.JustInTime); // Update escrow — should thaw everything - agreementManager.reconcileCollectorProvider(address(_collector()), indexer); + agreementManager.reconcileProvider(address(_collector()), indexer); IPaymentsEscrow.EscrowAccount memory account; (account.balance, account.tokensThawing, account.thawEndTimestamp) = paymentsEscrow.escrowAccounts( @@ -428,7 +417,7 @@ contract RecurringAgreementManagerFundingModesTest is RecurringAgreementManagerS // OnDemand thaw ceiling = required — no thaw expected (balance == thawCeiling) vm.prank(operator); agreementManager.setEscrowBasis(IRecurringEscrowManagement.EscrowBasis.OnDemand); - agreementManager.reconcileCollectorProvider(address(_collector()), indexer); + agreementManager.reconcileProvider(address(_collector()), indexer); IPaymentsEscrow.EscrowAccount memory account; (account.balance, account.tokensThawing, account.thawEndTimestamp) = paymentsEscrow.escrowAccounts( @@ -459,7 +448,7 @@ contract RecurringAgreementManagerFundingModesTest is RecurringAgreementManagerS // JustInTime would thaw everything vm.prank(operator); agreementManager.setEscrowBasis(IRecurringEscrowManagement.EscrowBasis.JustInTime); - agreementManager.reconcileCollectorProvider(address(_collector()), indexer); + agreementManager.reconcileProvider(address(_collector()), indexer); IPaymentsEscrow.EscrowAccount memory jitAccount; (jitAccount.balance, jitAccount.tokensThawing, jitAccount.thawEndTimestamp) = paymentsEscrow.escrowAccounts( @@ -472,7 +461,7 @@ contract RecurringAgreementManagerFundingModesTest is RecurringAgreementManagerS // Switch to OnDemand — min=0, min <= liquid=0, so thaw is left alone vm.prank(operator); agreementManager.setEscrowBasis(IRecurringEscrowManagement.EscrowBasis.OnDemand); - agreementManager.reconcileCollectorProvider(address(_collector()), indexer); + agreementManager.reconcileProvider(address(_collector()), indexer); IPaymentsEscrow.EscrowAccount memory odAccount; (odAccount.balance, odAccount.tokensThawing, odAccount.thawEndTimestamp) = paymentsEscrow.escrowAccounts( @@ -534,7 +523,7 @@ contract RecurringAgreementManagerFundingModesTest is RecurringAgreementManagerS ); token.mint(address(agreementManager), 100_000 ether); vm.prank(operator); - agreementManager.offerAgreement(rca, _collector()); + agreementManager.offerAgreement(_collector(), OFFER_TYPE_NEW, abi.encode(rca)); } // sumMaxNextClaim should be larger than totalEscrowDeficit (degradation occurred: Full -> OnDemand) @@ -582,13 +571,13 @@ contract RecurringAgreementManagerFundingModesTest is RecurringAgreementManagerS // Switch through all modes — agreement data preserved vm.prank(operator); agreementManager.setEscrowBasis(IRecurringEscrowManagement.EscrowBasis.OnDemand); - assertEq(agreementManager.getAgreementMaxNextClaim(agreementId), maxClaim); + assertEq(agreementManager.getAgreementMaxNextClaim(address(recurringCollector), agreementId), maxClaim); assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), maxClaim); vm.prank(operator); agreementManager.setEscrowBasis(IRecurringEscrowManagement.EscrowBasis.JustInTime); - assertEq(agreementManager.getAgreementMaxNextClaim(agreementId), maxClaim); - assertEq(agreementManager.getProviderAgreementCount(indexer), 1); + assertEq(agreementManager.getAgreementMaxNextClaim(address(recurringCollector), agreementId), maxClaim); + assertEq(agreementManager.getPairAgreementCount(address(recurringCollector), indexer), 1); } function test_ModeSwitch_UpdateEscrowAppliesNewMode() public { @@ -608,7 +597,7 @@ contract RecurringAgreementManagerFundingModesTest is RecurringAgreementManagerS // Switch to JustInTime and update escrow vm.prank(operator); agreementManager.setEscrowBasis(IRecurringEscrowManagement.EscrowBasis.JustInTime); - agreementManager.reconcileCollectorProvider(address(_collector()), indexer); + agreementManager.reconcileProvider(address(_collector()), indexer); IPaymentsEscrow.EscrowAccount memory account; (account.balance, account.tokensThawing, account.thawEndTimestamp) = paymentsEscrow.escrowAccounts( @@ -674,7 +663,7 @@ contract RecurringAgreementManagerFundingModesTest is RecurringAgreementManagerS vm.prank(address(recurringCollector)); agreementManager.afterCollection(agreementId, 500 ether); - uint256 newMaxClaim = agreementManager.getAgreementMaxNextClaim(agreementId); + uint256 newMaxClaim = agreementManager.getAgreementMaxNextClaim(address(recurringCollector), agreementId); assertEq(newMaxClaim, 1 ether * 3600, "maxNextClaim = ongoing only after first collection"); } @@ -704,8 +693,9 @@ contract RecurringAgreementManagerFundingModesTest is RecurringAgreementManagerS ); _offerAgreementUpdate(rcau); - uint256 pendingMaxClaim = 2 ether * 7200 + 200 ether; - assertEq(agreementManager.getSumMaxNextClaimAll(), maxClaim + pendingMaxClaim); + // max(current, pending) = max(3700, 14600) = 14600 + uint256 pendingMaxClaim = 14600 ether; + assertEq(agreementManager.getSumMaxNextClaimAll(), pendingMaxClaim); } function test_GlobalTracking_ReplacePendingUpdate() public { @@ -730,10 +720,13 @@ contract RecurringAgreementManagerFundingModesTest is RecurringAgreementManagerS ); _offerAgreementUpdate(rcau1); - uint256 pendingMaxClaim1 = 2 ether * 7200 + 200 ether; - assertEq(agreementManager.getSumMaxNextClaimAll(), maxClaim + pendingMaxClaim1); + // max(current, pending) = max(3700, 14600) = 14600 + uint256 pendingMaxClaim1 = 14600 ether; + assertEq(agreementManager.getSumMaxNextClaimAll(), pendingMaxClaim1); + + // Revoke first update, then offer replacement with next valid nonce + _cancelPendingUpdate(agreementId); - // Replace with different terms (same nonce — collector hasn't accepted either) IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau2 = _makeRCAU( agreementId, 50 ether, @@ -741,12 +734,12 @@ contract RecurringAgreementManagerFundingModesTest is RecurringAgreementManagerS 60, 1800, uint64(block.timestamp + 180 days), - 1 + 2 ); _offerAgreementUpdate(rcau2); - uint256 pendingMaxClaim2 = 0.5 ether * 1800 + 50 ether; - assertEq(agreementManager.getSumMaxNextClaimAll(), maxClaim + pendingMaxClaim2); + // max(current, pending) = max(3700, 950) = 3700 (current dominates) + assertEq(agreementManager.getSumMaxNextClaimAll(), maxClaim); } // ==================== Upward Transitions ==================== @@ -776,7 +769,7 @@ contract RecurringAgreementManagerFundingModesTest is RecurringAgreementManagerS // Switch to Full vm.prank(operator); agreementManager.setEscrowBasis(IRecurringEscrowManagement.EscrowBasis.Full); - agreementManager.reconcileCollectorProvider(address(_collector()), indexer); + agreementManager.reconcileProvider(address(_collector()), indexer); assertEq( paymentsEscrow.getBalance(address(agreementManager), address(recurringCollector), indexer), @@ -800,7 +793,7 @@ contract RecurringAgreementManagerFundingModesTest is RecurringAgreementManagerS // Switch to OnDemand — holds at required (no thaw for 1 agreement) vm.prank(operator); agreementManager.setEscrowBasis(IRecurringEscrowManagement.EscrowBasis.OnDemand); - agreementManager.reconcileCollectorProvider(address(_collector()), indexer); + agreementManager.reconcileProvider(address(_collector()), indexer); IPaymentsEscrow.EscrowAccount memory odAccount; (odAccount.balance, odAccount.tokensThawing, odAccount.thawEndTimestamp) = paymentsEscrow.escrowAccounts( @@ -813,7 +806,7 @@ contract RecurringAgreementManagerFundingModesTest is RecurringAgreementManagerS // Switch back to Full — no change needed (already at required) vm.prank(operator); agreementManager.setEscrowBasis(IRecurringEscrowManagement.EscrowBasis.Full); - agreementManager.reconcileCollectorProvider(address(_collector()), indexer); + agreementManager.reconcileProvider(address(_collector()), indexer); IPaymentsEscrow.EscrowAccount memory fullAccount; (fullAccount.balance, fullAccount.tokensThawing, fullAccount.thawEndTimestamp) = paymentsEscrow.escrowAccounts( @@ -850,7 +843,7 @@ contract RecurringAgreementManagerFundingModesTest is RecurringAgreementManagerS // Cancel and remove rca1 — this triggers a thaw for excess _setAgreementCanceledBySP(id1, rca1); - agreementManager.reconcileAgreement(id1); + agreementManager.reconcileAgreement(address(recurringCollector), id1); IPaymentsEscrow.EscrowAccount memory beforeSwitch; (beforeSwitch.balance, beforeSwitch.tokensThawing, beforeSwitch.thawEndTimestamp) = paymentsEscrow @@ -862,7 +855,7 @@ contract RecurringAgreementManagerFundingModesTest is RecurringAgreementManagerS // remaining balance thaws after current thaw completes and is withdrawn vm.prank(operator); agreementManager.setEscrowBasis(IRecurringEscrowManagement.EscrowBasis.JustInTime); - agreementManager.reconcileCollectorProvider(address(_collector()), indexer); + agreementManager.reconcileProvider(address(_collector()), indexer); IPaymentsEscrow.EscrowAccount memory midCycle; (midCycle.balance, midCycle.tokensThawing, midCycle.thawEndTimestamp) = paymentsEscrow.escrowAccounts( @@ -875,7 +868,7 @@ contract RecurringAgreementManagerFundingModesTest is RecurringAgreementManagerS // Complete thaw, withdraw all vm.warp(block.timestamp + 2 days); - agreementManager.reconcileCollectorProvider(address(_collector()), indexer); + agreementManager.reconcileProvider(address(_collector()), indexer); IPaymentsEscrow.EscrowAccount memory afterWithdraw; (afterWithdraw.balance, afterWithdraw.tokensThawing, afterWithdraw.thawEndTimestamp) = paymentsEscrow @@ -885,9 +878,58 @@ contract RecurringAgreementManagerFundingModesTest is RecurringAgreementManagerS assertEq(afterWithdraw.tokensThawing, 0, "JIT: nothing left to thaw"); } - // ==================== Temp JIT ==================== + // ==================== Threshold-Based Basis Degradation ==================== + // + // _escrowMinMax computes spare = balance - totalEscrowDeficit (floored at 0) + // and checks two gates against sumMaxNextClaimAll (smnca): + // + // max gate: smnca * minOnDemandBasisThreshold / 256 < spare [default threshold=128 -> 0.5x] + // min gate: smnca * (256 + minFullBasisMargin) / 256 < spare [default margin=16 -> 1.0625x] + // + // min gate is stricter (1.0625 > 0.5), giving three degradation states: + // Full: spare > smnca * 1.0625 (min=max=sumMaxNextClaim) + // OnDemand: 0.5*smnca < spare <= 1.0625*smnca (min=0, max=sumMaxNextClaim) + // JIT-like: spare <= 0.5*smnca (min=0, max=0) + + // -- Helpers for degradation tests -- + + /// @notice Drain SAM balance to zero + function _drainSAM() internal { + uint256 samBalance = token.balanceOf(address(agreementManager)); + if (0 < samBalance) { + vm.prank(address(agreementManager)); + token.transfer(address(1), samBalance); + } + } + + /// @notice Get the effective escrow balance (balance - tokensThawing) for a pair + function _effectiveEscrow(address collector, address provider) internal view returns (uint256) { + (uint256 balance, uint256 thawing, ) = paymentsEscrow.escrowAccounts( + address(agreementManager), + collector, + provider + ); + return balance - thawing; + } + + /// @notice Get full escrow account for a pair + function _escrowAccount( + address collector, + address provider + ) internal view returns (uint256 balance, uint256 tokensThawing, uint256 thawEndTimestamp) { + return paymentsEscrow.escrowAccounts(address(agreementManager), collector, provider); + } - function test_TempJit_TripsOnPartialBeforeCollection() public { + /// @notice Fund SAM so spare equals exactly the given amount (above totalEscrowDeficit) + function _fundToSpare(uint256 targetSpare) internal { + _drainSAM(); + uint256 deficit = agreementManager.getTotalEscrowDeficit(); + token.mint(address(agreementManager), deficit + targetSpare); + } + + // ---- Full basis: min gate (1.0625x) controls Full -> OnDemand ---- + + function test_BasisDegradation_Full_BothGatesPass_DepositsToSumMaxNextClaim() public { IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCAForIndexer( indexer, 100 ether, @@ -895,33 +937,55 @@ contract RecurringAgreementManagerFundingModesTest is RecurringAgreementManagerS 3600, 1 ); - bytes16 agreementId = _offerAgreement(rca); - - // Drain SAM's token balance so beforeCollection can't fully fund - uint256 samBalance = token.balanceOf(address(agreementManager)); - if (0 < samBalance) { - vm.prank(address(agreementManager)); - token.transfer(address(1), samBalance); - } + _offerAgreement(rca); + uint256 smnca = agreementManager.getSumMaxNextClaimAll(); + uint256 pairSmnc = agreementManager.getSumMaxNextClaim(_collector(), indexer); - // Request collection exceeding escrow balance - vm.expectEmit(address(agreementManager)); - emit IRecurringEscrowManagement.TempJitSet(true, true); + // spare > smnca * 1.0625 -- both gates pass -> Full + _fundToSpare((smnca * (256 + 16)) / 256 + 1); + agreementManager.reconcileProvider(address(_collector()), indexer); - vm.prank(address(recurringCollector)); - agreementManager.beforeCollection(agreementId, 1_000_000 ether); + assertEq( + _effectiveEscrow(address(recurringCollector), indexer), + pairSmnc, + "Full: deposited to sumMaxNextClaim" + ); + } - // Verify state - assertTrue(agreementManager.isTempJit(), "Temp JIT should be tripped"); + function test_BasisDegradation_Full_MinGateFail_DegradesToOnDemand() public { + // spare at min gate boundary: min gate fails but max gate passes -> OnDemand + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCAForIndexer( + indexer, + 100 ether, + 1 ether, + 3600, + 1 + ); + _offerAgreement(rca); + uint256 smnca = agreementManager.getSumMaxNextClaimAll(); + uint256 pairSmnc = agreementManager.getSumMaxNextClaim(_collector(), indexer); + + // spare = smnca * 272/256 exactly -- min gate fails (not strictly greater) + // but spare > smnca * 128/256, so max gate passes + uint256 minGateThreshold = (smnca * (256 + 16)) / 256; + _fundToSpare(minGateThreshold); + agreementManager.reconcileProvider(address(_collector()), indexer); + + // OnDemand behavior: min=0 (no deposits), max=sumMaxNextClaim (holds ceiling) + // Escrow was deposited during offerAgreement, so it should still be at pairSmnc + // (max holds, no thaw started because balance <= max) + uint256 effective = _effectiveEscrow(address(recurringCollector), indexer); + assertEq(effective, pairSmnc, "OnDemand: escrow held at ceiling (no thaw)"); + + // Stored basis unchanged assertEq( uint256(agreementManager.getEscrowBasis()), uint256(IRecurringEscrowManagement.EscrowBasis.Full), - "Basis unchanged (temp JIT overrides behavior, not escrowBasis)" + "Stored basis unchanged" ); } - function test_BeforeCollection_TripsWhenAvailableEqualsDeficit() public { - // Boundary: available == deficit — strict '<' means trip, not deposit + function test_BasisDegradation_Full_MinGateBoundary_OneWeiDifference() public { IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCAForIndexer( indexer, 100 ether, @@ -929,37 +993,84 @@ contract RecurringAgreementManagerFundingModesTest is RecurringAgreementManagerS 3600, 1 ); - bytes16 agreementId = _offerAgreement(rca); + _offerAgreement(rca); + uint256 smnca = agreementManager.getSumMaxNextClaimAll(); + uint256 pairSmnc = agreementManager.getSumMaxNextClaim(_collector(), indexer); + uint256 minGateThreshold = (smnca * (256 + 16)) / 256; - // Set manager balance to exactly the escrow shortfall - (uint256 escrowBalance, , ) = paymentsEscrow.escrowAccounts( - address(agreementManager), - address(recurringCollector), - indexer + // At min gate boundary: OnDemand (min=0, max=smnc) + _fundToSpare(minGateThreshold); + agreementManager.reconcileProvider(address(_collector()), indexer); + + // Escrow was pre-deposited, OnDemand holds it (no thaw because balance <= max) + assertEq(_effectiveEscrow(address(recurringCollector), indexer), pairSmnc, "At boundary: OnDemand holds"); + + // One wei above: Full (min=max=smnc) + _fundToSpare(minGateThreshold + 1); + agreementManager.reconcileProvider(address(_collector()), indexer); + assertEq(_effectiveEscrow(address(recurringCollector), indexer), pairSmnc, "One above boundary: Full deposits"); + } + + // ---- Full basis: max gate (0.5x) controls OnDemand -> JIT-like ---- + + function test_BasisDegradation_Full_MaxGateFail_DegradesToJIT() public { + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCAForIndexer( + indexer, + 100 ether, + 1 ether, + 3600, + 1 ); - uint256 tokensToCollect = escrowBalance + 500 ether; - uint256 deficit = tokensToCollect - escrowBalance; // 500 ether + _offerAgreement(rca); + uint256 smnca = agreementManager.getSumMaxNextClaimAll(); - // Drain SAM then mint exactly the deficit - uint256 samBalance = token.balanceOf(address(agreementManager)); - if (0 < samBalance) { - vm.prank(address(agreementManager)); - token.transfer(address(1), samBalance); - } - token.mint(address(agreementManager), deficit); - assertEq(token.balanceOf(address(agreementManager)), deficit, "Balance == deficit"); + // spare = smnca * 128/256 exactly -- max gate fails -> JIT-like (both 0) + uint256 maxGateThreshold = (smnca * 128) / 256; + _fundToSpare(maxGateThreshold); + agreementManager.reconcileProvider(address(_collector()), indexer); - vm.expectEmit(address(agreementManager)); - emit IRecurringEscrowManagement.TempJitSet(true, true); + (uint256 bal, uint256 thawing, ) = _escrowAccount(address(recurringCollector), indexer); + assertEq(thawing, bal, "JIT-like: all escrow thawing"); + } - vm.prank(address(recurringCollector)); - agreementManager.beforeCollection(agreementId, tokensToCollect); + function test_BasisDegradation_Full_MaxGateBoundary_OneWeiDifference() public { + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCAForIndexer( + indexer, + 100 ether, + 1 ether, + 3600, + 1 + ); + _offerAgreement(rca); + uint256 smnca = agreementManager.getSumMaxNextClaimAll(); + uint256 maxGateThreshold = (smnca * 128) / 256; + + // At max gate boundary: JIT-like + _fundToSpare(maxGateThreshold); + agreementManager.reconcileProvider(address(_collector()), indexer); + (uint256 bal1, uint256 thawing1, ) = _escrowAccount(address(recurringCollector), indexer); + assertEq(thawing1, bal1, "At max boundary: JIT thaws all"); - assertTrue(agreementManager.isTempJit(), "Trips when available == deficit"); + // Complete thaw + vm.warp(block.timestamp + 2 days); + agreementManager.reconcileProvider(address(_collector()), indexer); + + // One wei above max gate: OnDemand (max passes, min still fails since 0.5x+1 < 1.0625x) + _fundToSpare(maxGateThreshold + 1); + agreementManager.reconcileProvider(address(_collector()), indexer); + + // OnDemand: min=0 so no deposit happens (escrow was withdrawn during thaw) + // max=smnc so no thaw starts either. Effective balance stays at 0 (nothing to hold). + (uint256 bal2, uint256 thawing2, ) = _escrowAccount(address(recurringCollector), indexer); + assertEq(thawing2, 0, "One above max boundary: OnDemand no thaw"); + // No deposit because min=0 + assertEq(bal2, 0, "OnDemand: no deposit (min=0)"); } - function test_BeforeCollection_DepositsWhenAvailableExceedsDeficit() public { - // Boundary: available == deficit + 1 — deposits instead of tripping + // ---- Intermediate OnDemand state: between the two thresholds ---- + + function test_BasisDegradation_Full_IntermediateOnDemand_NoDepositButHoldsEscrow() public { + // Verify the intermediate state: min=0 (no deposit), max=smnc (holds ceiling) IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCAForIndexer( indexer, 100 ether, @@ -967,38 +1078,61 @@ contract RecurringAgreementManagerFundingModesTest is RecurringAgreementManagerS 3600, 1 ); - bytes16 agreementId = _offerAgreement(rca); + _offerAgreement(rca); + uint256 smnca = agreementManager.getSumMaxNextClaimAll(); + uint256 pairSmnc = agreementManager.getSumMaxNextClaim(_collector(), indexer); + + // Fund to middle of OnDemand band: 0.5x < spare < 1.0625x + // Use spare = 0.75x (halfway in the band) + uint256 midSpare = (smnca * 3) / 4; + assertTrue(midSpare > (smnca * 128) / 256, "midSpare above max gate"); + assertTrue(midSpare <= (smnca * (256 + 16)) / 256, "midSpare below min gate"); + + _fundToSpare(midSpare); + agreementManager.reconcileProvider(address(_collector()), indexer); + + // Escrow was deposited during offerAgreement (when SAM had 1M ether). + // OnDemand: max=smnc so holds (no thaw), min=0 so no new deposit. + uint256 effective = _effectiveEscrow(address(recurringCollector), indexer); + assertEq(effective, pairSmnc, "OnDemand: holds pre-existing escrow at ceiling"); + (, uint256 thawing, ) = _escrowAccount(address(recurringCollector), indexer); + assertEq(thawing, 0, "OnDemand: no thaw"); + } - (uint256 escrowBalance, , ) = paymentsEscrow.escrowAccounts( - address(agreementManager), - address(recurringCollector), - indexer + function test_BasisDegradation_Full_IntermediateOnDemand_NoDepositFromZero() public { + // Start with zero escrow in OnDemand band -- verify no deposit happens + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCAForIndexer( + indexer, + 100 ether, + 1 ether, + 3600, + 1 ); - uint256 tokensToCollect = escrowBalance + 500 ether; - uint256 deficit = tokensToCollect - escrowBalance; // 500 ether + _offerAgreement(rca); + uint256 smnca = agreementManager.getSumMaxNextClaimAll(); - // Drain SAM then mint deficit + 1 - uint256 samBalance = token.balanceOf(address(agreementManager)); - if (0 < samBalance) { - vm.prank(address(agreementManager)); - token.transfer(address(1), samBalance); - } - token.mint(address(agreementManager), deficit + 1); + // Drain to JIT, complete thaw to clear escrow + _drainSAM(); + agreementManager.reconcileProvider(address(_collector()), indexer); + vm.warp(block.timestamp + 2 days); + agreementManager.reconcileProvider(address(_collector()), indexer); + assertEq(_effectiveEscrow(address(recurringCollector), indexer), 0, "Escrow cleared"); - vm.prank(address(recurringCollector)); - agreementManager.beforeCollection(agreementId, tokensToCollect); + // Fund to OnDemand band + _fundToSpare((smnca * 3) / 4); + agreementManager.reconcileProvider(address(_collector()), indexer); - assertFalse(agreementManager.isTempJit(), "No trip when deficit < available"); - (uint256 newEscrow, , ) = paymentsEscrow.escrowAccounts( - address(agreementManager), - address(recurringCollector), - indexer + // OnDemand: min=0 -> no deposit from zero. max=smnc but nothing to hold. + assertEq( + _effectiveEscrow(address(recurringCollector), indexer), + 0, + "OnDemand: no deposit when starting from zero" ); - assertEq(newEscrow, tokensToCollect, "Escrow topped up to tokensToCollect"); } - function test_TempJit_PreservesBasisOnTrip() public { - // Set OnDemand, trip — escrowBasis should NOT change + // ---- OnDemand basis: max gate only (min always 0) ---- + + function test_BasisDegradation_OnDemand_MaxGatePass_HoldsAtCeiling() public { vm.prank(operator); agreementManager.setEscrowBasis(IRecurringEscrowManagement.EscrowBasis.OnDemand); @@ -1009,31 +1143,45 @@ contract RecurringAgreementManagerFundingModesTest is RecurringAgreementManagerS 3600, 1 ); - bytes16 agreementId = _offerAgreement(rca); + _offerAgreement(rca); + uint256 smnca = agreementManager.getSumMaxNextClaimAll(); - // Drain SAM - uint256 samBalance = token.balanceOf(address(agreementManager)); - if (0 < samBalance) { - vm.prank(address(agreementManager)); - token.transfer(address(1), samBalance); - } + // OnDemand: only max gate matters (min is always 0 because basis != Full) + // max gate: smnca * threshold/256 < spare + _fundToSpare((smnca * 128) / 256 + 1); + agreementManager.reconcileProvider(address(_collector()), indexer); - vm.expectEmit(address(agreementManager)); - emit IRecurringEscrowManagement.TempJitSet(true, true); + (, uint256 thawing, ) = _escrowAccount(address(recurringCollector), indexer); + assertEq(thawing, 0, "OnDemand: no thaw when max gate passes"); + } - vm.prank(address(recurringCollector)); - agreementManager.beforeCollection(agreementId, 1_000_000 ether); + function test_BasisDegradation_OnDemand_MaxGateFail_ThawsAll() public { + vm.prank(operator); + agreementManager.setEscrowBasis(IRecurringEscrowManagement.EscrowBasis.OnDemand); - // Basis stays OnDemand (not switched to JIT) - assertEq( - uint256(agreementManager.getEscrowBasis()), - uint256(IRecurringEscrowManagement.EscrowBasis.OnDemand), - "Basis unchanged during trip" + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCAForIndexer( + indexer, + 100 ether, + 1 ether, + 3600, + 1 ); - assertTrue(agreementManager.isTempJit()); + _offerAgreement(rca); + uint256 smnca = agreementManager.getSumMaxNextClaimAll(); + + // Max gate fails -> max=0 -> thaw everything + _fundToSpare((smnca * 128) / 256); + agreementManager.reconcileProvider(address(_collector()), indexer); + + (uint256 bal, uint256 thawing, ) = _escrowAccount(address(recurringCollector), indexer); + assertEq(thawing, bal, "OnDemand degraded: all thawing"); } - function test_TempJit_DoesNotTripWhenFullyCovered() public { + function test_BasisDegradation_OnDemand_MinGateIrrelevant() public { + // Even with generous spare (above min gate), OnDemand never deposits + vm.prank(operator); + agreementManager.setEscrowBasis(IRecurringEscrowManagement.EscrowBasis.OnDemand); + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCAForIndexer( indexer, 100 ether, @@ -1041,20 +1189,30 @@ contract RecurringAgreementManagerFundingModesTest is RecurringAgreementManagerS 3600, 1 ); - bytes16 agreementId = _offerAgreement(rca); - uint256 maxClaim = 1 ether * 3600 + 100 ether; + _offerAgreement(rca); + uint256 smnca = agreementManager.getSumMaxNextClaimAll(); - // Ensure SAM has plenty of tokens - token.mint(address(agreementManager), 1_000_000 ether); + // Drain to zero, complete thaw + _drainSAM(); + agreementManager.reconcileProvider(address(_collector()), indexer); + vm.warp(block.timestamp + 2 days); + agreementManager.reconcileProvider(address(_collector()), indexer); - // Request less than escrow balance — no trip - vm.prank(address(recurringCollector)); - agreementManager.beforeCollection(agreementId, maxClaim); + // Fund well above both gates + _fundToSpare(smnca * 2); + agreementManager.reconcileProvider(address(_collector()), indexer); - assertFalse(agreementManager.isTempJit(), "No trip when fully covered"); + // OnDemand: min=0 always (basis != Full), so no deposit from zero + assertEq( + _effectiveEscrow(address(recurringCollector), indexer), + 0, + "OnDemand: no deposit regardless of spare (min always 0)" + ); } - function test_TempJit_DoesNotTripWhenAlreadyActive() public { + // ---- Zero spare ---- + + function test_BasisDegradation_ZeroSpare_DegradesToJIT() public { IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCAForIndexer( indexer, 100 ether, @@ -1062,40 +1220,20 @@ contract RecurringAgreementManagerFundingModesTest is RecurringAgreementManagerS 3600, 1 ); - bytes16 agreementId = _offerAgreement(rca); - - // Drain SAM - uint256 samBalance = token.balanceOf(address(agreementManager)); - if (0 < samBalance) { - vm.prank(address(agreementManager)); - token.transfer(address(1), samBalance); - } + _offerAgreement(rca); - // First trip - vm.prank(address(recurringCollector)); - agreementManager.beforeCollection(agreementId, 1_000_000 ether); - assertTrue(agreementManager.isTempJit()); + _drainSAM(); + assertEq(token.balanceOf(address(agreementManager)), 0, "SAM drained"); - // Second partial collection — should NOT emit event again - vm.recordLogs(); - vm.prank(address(recurringCollector)); - agreementManager.beforeCollection(agreementId, 1_000_000 ether); + agreementManager.reconcileProvider(address(_collector()), indexer); - // Check no TempJitSet event was emitted - Vm.Log[] memory logs = vm.getRecordedLogs(); - bytes32 tripSig = keccak256("TempJitSet(bool,bool)"); - bool found = false; - for (uint256 i = 0; i < logs.length; i++) { - if (logs[i].topics[0] == tripSig) found = true; - } - assertFalse(found, "No second trip event"); + (uint256 bal, uint256 thawing, ) = _escrowAccount(address(recurringCollector), indexer); + assertEq(thawing, bal, "JIT: thaws all when spare=0"); } - function test_TempJit_TripsEvenWhenAlreadyJustInTime() public { - // Governor explicitly sets JIT - vm.prank(operator); - agreementManager.setEscrowBasis(IRecurringEscrowManagement.EscrowBasis.JustInTime); + // ---- Recovery ---- + function test_BasisDegradation_Recovery_JITToOnDemand() public { IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCAForIndexer( indexer, 100 ether, @@ -1103,22 +1241,27 @@ contract RecurringAgreementManagerFundingModesTest is RecurringAgreementManagerS 3600, 1 ); - bytes16 agreementId = _offerAgreement(rca); + _offerAgreement(rca); + uint256 smnca = agreementManager.getSumMaxNextClaimAll(); - // Drain SAM so beforeCollection can't cover - uint256 samBalance = token.balanceOf(address(agreementManager)); - if (0 < samBalance) { - vm.prank(address(agreementManager)); - token.transfer(address(1), samBalance); - } + // Drain to JIT, complete thaw + _drainSAM(); + agreementManager.reconcileProvider(address(_collector()), indexer); + vm.warp(block.timestamp + 2 days); + agreementManager.reconcileProvider(address(_collector()), indexer); + assertEq(_effectiveEscrow(address(recurringCollector), indexer), 0, "JIT: zero escrow"); - vm.prank(address(recurringCollector)); - agreementManager.beforeCollection(agreementId, 1_000_000 ether); + // Fund to OnDemand band (above max gate, below min gate) + _fundToSpare((smnca * 128) / 256 + 1); + agreementManager.reconcileProvider(address(_collector()), indexer); - assertTrue(agreementManager.isTempJit(), "Trips even in JIT mode"); + // OnDemand: min=0 so no deposit, max=smnc but nothing to hold + assertEq(_effectiveEscrow(address(recurringCollector), indexer), 0, "OnDemand recovery: no deposit (min=0)"); + (, uint256 thawing, ) = _escrowAccount(address(recurringCollector), indexer); + assertEq(thawing, 0, "OnDemand recovery: no thaw"); } - function test_TempJit_JitStillWorksWhileActive() public { + function test_BasisDegradation_Recovery_JITToFull() public { IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCAForIndexer( indexer, 100 ether, @@ -1126,36 +1269,26 @@ contract RecurringAgreementManagerFundingModesTest is RecurringAgreementManagerS 3600, 1 ); - bytes16 agreementId = _offerAgreement(rca); - - // Drain SAM to trip the breaker - uint256 samBalance = token.balanceOf(address(agreementManager)); - if (0 < samBalance) { - vm.prank(address(agreementManager)); - token.transfer(address(1), samBalance); - } + _offerAgreement(rca); + uint256 smnca = agreementManager.getSumMaxNextClaimAll(); + uint256 pairSmnc = agreementManager.getSumMaxNextClaim(_collector(), indexer); - vm.prank(address(recurringCollector)); - agreementManager.beforeCollection(agreementId, 1_000_000 ether); - assertTrue(agreementManager.isTempJit()); + // Drain to JIT, complete thaw + _drainSAM(); + agreementManager.reconcileProvider(address(_collector()), indexer); + vm.warp(block.timestamp + 2 days); + agreementManager.reconcileProvider(address(_collector()), indexer); - // Now fund SAM and do a JIT top-up while temp JIT is active - token.mint(address(agreementManager), 500 ether); + // Fund above min gate -> Full + _fundToSpare((smnca * (256 + 16)) / 256 + 1); + agreementManager.reconcileProvider(address(_collector()), indexer); - vm.prank(address(recurringCollector)); - agreementManager.beforeCollection(agreementId, 500 ether); - - (uint256 escrowBalance, , ) = paymentsEscrow.escrowAccounts( - address(agreementManager), - address(recurringCollector), - indexer - ); - uint256 maxClaim = 1 ether * 3600 + 100 ether; - assertTrue(maxClaim <= escrowBalance, "JIT still works during temp JIT"); + assertEq(_effectiveEscrow(address(recurringCollector), indexer), pairSmnc, "Full: recovered and deposited"); } - function test_TempJit_RecoveryOnUpdateEscrow() public { - // Offer rca1 (fully deposited), drain SAM, offer rca2 (creates undeposited deficit) + // ---- Multi-provider: global degradation ---- + + function test_BasisDegradation_MultiProvider_BothDegraded() public { IRecurringCollector.RecurringCollectionAgreement memory rca1 = _makeRCAForIndexer( indexer, 100 ether, @@ -1163,49 +1296,31 @@ contract RecurringAgreementManagerFundingModesTest is RecurringAgreementManagerS 3600, 1 ); - bytes16 agreementId = _offerAgreement(rca1); + _offerAgreement(rca1); - uint256 samBalance = token.balanceOf(address(agreementManager)); - if (0 < samBalance) { - vm.prank(address(agreementManager)); - token.transfer(address(1), samBalance); - } + _drainSAM(); IRecurringCollector.RecurringCollectionAgreement memory rca2 = _makeRCAForIndexer( - indexer, + indexer2, 100 ether, 1 ether, 3600, 2 ); vm.prank(operator); - agreementManager.offerAgreement(rca2, _collector()); - - // Trip temp JIT - vm.prank(address(recurringCollector)); - agreementManager.beforeCollection(agreementId, 1_000_000 ether); - assertTrue(agreementManager.isTempJit()); - - // Mint more than totalEscrowDeficit — recovery requires strict deficit < available - uint256 totalEscrowDeficit = agreementManager.getTotalEscrowDeficit(); - assertTrue(0 < totalEscrowDeficit, "Deficit exists"); - token.mint(address(agreementManager), totalEscrowDeficit + 1); + agreementManager.offerAgreement(_collector(), OFFER_TYPE_NEW, abi.encode(rca2)); - vm.expectEmit(address(agreementManager)); - emit IRecurringEscrowManagement.TempJitSet(false, true); + agreementManager.reconcileProvider(address(_collector()), indexer); + agreementManager.reconcileProvider(address(_collector()), indexer2); - agreementManager.reconcileCollectorProvider(address(_collector()), indexer); + (uint256 bal1, uint256 thawing1, ) = _escrowAccount(address(recurringCollector), indexer); + (uint256 bal2, uint256 thawing2, ) = _escrowAccount(address(recurringCollector), indexer2); - assertFalse(agreementManager.isTempJit(), "Temp JIT recovered"); - assertEq( - uint256(agreementManager.getEscrowBasis()), - uint256(IRecurringEscrowManagement.EscrowBasis.Full), - "Basis still Full" - ); + assertEq(thawing1, bal1, "indexer: degraded thaws all"); + assertEq(thawing2, bal2, "indexer2: degraded thaws all"); } - function test_TempJit_NoRecoveryWhenPartiallyFunded() public { - // Offer rca1 (fully deposited), drain, offer rca2 (undeposited — creates deficit) + function test_BasisDegradation_MultiProvider_RecoveryRestoresBoth() public { IRecurringCollector.RecurringCollectionAgreement memory rca1 = _makeRCAForIndexer( indexer, 100 ether, @@ -1213,47 +1328,43 @@ contract RecurringAgreementManagerFundingModesTest is RecurringAgreementManagerS 3600, 1 ); - bytes16 agreementId = _offerAgreement(rca1); - - uint256 samBalance = token.balanceOf(address(agreementManager)); - if (0 < samBalance) { - vm.prank(address(agreementManager)); - token.transfer(address(1), samBalance); - } + _offerAgreement(rca1); IRecurringCollector.RecurringCollectionAgreement memory rca2 = _makeRCAForIndexer( - indexer, - 100 ether, - 1 ether, - 3600, + indexer2, + 50 ether, + 2 ether, + 1800, 2 ); - vm.prank(operator); - agreementManager.offerAgreement(rca2, _collector()); + _offerAgreement(rca2); - // Trip - vm.prank(address(recurringCollector)); - agreementManager.beforeCollection(agreementId, 1_000_000 ether); - assertTrue(agreementManager.isTempJit()); + uint256 smnca = agreementManager.getSumMaxNextClaimAll(); + uint256 pairSmnc1 = agreementManager.getSumMaxNextClaim(_collector(), indexer); + uint256 pairSmnc2 = agreementManager.getSumMaxNextClaim(_collector(), indexer2); - uint256 totalEscrowDeficit = agreementManager.getTotalEscrowDeficit(); - assertTrue(0 < totalEscrowDeficit, "0 < totalEscrowDeficit"); + // Drain and degrade + _drainSAM(); + agreementManager.reconcileProvider(address(_collector()), indexer); + agreementManager.reconcileProvider(address(_collector()), indexer2); - // Mint less than totalEscrowDeficit — no recovery - token.mint(address(agreementManager), totalEscrowDeficit / 2); + // Complete thaws + vm.warp(block.timestamp + 2 days); + agreementManager.reconcileProvider(address(_collector()), indexer); + agreementManager.reconcileProvider(address(_collector()), indexer2); - agreementManager.reconcileCollectorProvider(address(_collector()), indexer); + // Fund above min gate -> both recover to Full + _fundToSpare((smnca * (256 + 16)) / 256 + 1); + agreementManager.reconcileProvider(address(_collector()), indexer); + agreementManager.reconcileProvider(address(_collector()), indexer2); - assertTrue(agreementManager.isTempJit(), "Still tripped (insufficient balance)"); - assertEq( - uint256(agreementManager.getEscrowBasis()), - uint256(IRecurringEscrowManagement.EscrowBasis.Full), - "Basis unchanged" - ); + assertEq(_effectiveEscrow(address(recurringCollector), indexer), pairSmnc1, "indexer: recovered to Full"); + assertEq(_effectiveEscrow(address(recurringCollector), indexer2), pairSmnc2, "indexer2: recovered to Full"); } - function test_TempJit_NoRecoveryWhenExactlyFunded() public { - // Boundary: available == totalEscrowDeficit — strict '<' means no recovery + // ---- offerAgreement can trigger instant degradation ---- + + function test_BasisDegradation_OfferAgreement_TriggersInstantDegradation() public { IRecurringCollector.RecurringCollectionAgreement memory rca1 = _makeRCAForIndexer( indexer, 100 ether, @@ -1261,50 +1372,50 @@ contract RecurringAgreementManagerFundingModesTest is RecurringAgreementManagerS 3600, 1 ); - bytes16 agreementId = _offerAgreement(rca1); + _offerAgreement(rca1); + uint256 pairSmnc1 = agreementManager.getSumMaxNextClaim(_collector(), indexer); - uint256 samBalance = token.balanceOf(address(agreementManager)); - if (0 < samBalance) { - vm.prank(address(agreementManager)); - token.transfer(address(1), samBalance); - } + assertEq( + _effectiveEscrow(address(recurringCollector), indexer), + pairSmnc1, + "indexer: initially fully escrowed" + ); + // Fund to just above min gate for current smnca + _drainSAM(); + uint256 smnca = agreementManager.getSumMaxNextClaimAll(); + uint256 deficit = agreementManager.getTotalEscrowDeficit(); + token.mint(address(agreementManager), deficit + (smnca * (256 + 16)) / 256 + 1); + + agreementManager.reconcileProvider(address(_collector()), indexer); + assertEq( + _effectiveEscrow(address(recurringCollector), indexer), + pairSmnc1, + "indexer: still Full after careful funding" + ); + + // Offer large new agreement -- increases smnca, pushing spare below min gate IRecurringCollector.RecurringCollectionAgreement memory rca2 = _makeRCAForIndexer( - indexer, - 100 ether, - 1 ether, - 3600, + indexer2, + 500 ether, + 10 ether, + 7200, 2 ); vm.prank(operator); - agreementManager.offerAgreement(rca2, _collector()); - - // Trip - vm.prank(address(recurringCollector)); - agreementManager.beforeCollection(agreementId, 1_000_000 ether); - assertTrue(agreementManager.isTempJit()); + agreementManager.offerAgreement(_collector(), OFFER_TYPE_NEW, abi.encode(rca2)); - // Mint exactly totalEscrowDeficit — recovery requires strict deficit < available - uint256 totalEscrowDeficit = agreementManager.getTotalEscrowDeficit(); - assertTrue(0 < totalEscrowDeficit, "Deficit exists"); - token.mint(address(agreementManager), totalEscrowDeficit); - assertEq(token.balanceOf(address(agreementManager)), totalEscrowDeficit, "Balance == deficit"); + // Reconcile indexer -- existing provider's escrow now degraded + agreementManager.reconcileProvider(address(_collector()), indexer); - agreementManager.reconcileCollectorProvider(address(_collector()), indexer); - - assertTrue(agreementManager.isTempJit(), "Still tripped (available == deficit, not >)"); - assertEq( - uint256(agreementManager.getEscrowBasis()), - uint256(IRecurringEscrowManagement.EscrowBasis.Full), - "Basis unchanged" - ); + // New smnca much larger, spare likely below max gate too -> JIT-like + (uint256 bal, uint256 thawing, ) = _escrowAccount(address(recurringCollector), indexer); + assertEq(thawing, bal, "indexer: degraded after new offer increased smnca"); } - function test_TempJit_EscrowBasisPreservedDuringTrip() public { - // Set OnDemand, trip, recover — escrowBasis stays OnDemand throughout - vm.prank(operator); - agreementManager.setEscrowBasis(IRecurringEscrowManagement.EscrowBasis.OnDemand); + // ---- Stored escrowBasis never changes automatically ---- + function test_BasisDegradation_StoredBasisUnchanged() public { IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCAForIndexer( indexer, 100 ether, @@ -1312,41 +1423,51 @@ contract RecurringAgreementManagerFundingModesTest is RecurringAgreementManagerS 3600, 1 ); - bytes16 agreementId = _offerAgreement(rca); + _offerAgreement(rca); - // Drain and trip - uint256 samBalance = token.balanceOf(address(agreementManager)); - if (0 < samBalance) { - vm.prank(address(agreementManager)); - token.transfer(address(1), samBalance); - } + assertEq( + uint256(agreementManager.getEscrowBasis()), + uint256(IRecurringEscrowManagement.EscrowBasis.Full), + "Basis: Full before degradation" + ); - vm.prank(address(recurringCollector)); - agreementManager.beforeCollection(agreementId, 1_000_000 ether); - assertTrue(agreementManager.isTempJit()); + _drainSAM(); + agreementManager.reconcileProvider(address(_collector()), indexer); assertEq( uint256(agreementManager.getEscrowBasis()), - uint256(IRecurringEscrowManagement.EscrowBasis.OnDemand), - "Basis preserved during trip" + uint256(IRecurringEscrowManagement.EscrowBasis.Full), + "Basis: still Full after degradation" ); - // Recovery — mint more than deficit (recovery requires strict deficit < available) - token.mint(address(agreementManager), agreementManager.getSumMaxNextClaimAll() + 1); - - vm.expectEmit(address(agreementManager)); - emit IRecurringEscrowManagement.TempJitSet(false, true); + uint256 smnca = agreementManager.getSumMaxNextClaimAll(); + vm.warp(block.timestamp + 2 days); + agreementManager.reconcileProvider(address(_collector()), indexer); + _fundToSpare((smnca * (256 + 16)) / 256 + 1); + agreementManager.reconcileProvider(address(_collector()), indexer); - agreementManager.reconcileCollectorProvider(address(_collector()), indexer); - assertFalse(agreementManager.isTempJit()); assertEq( uint256(agreementManager.getEscrowBasis()), - uint256(IRecurringEscrowManagement.EscrowBasis.OnDemand), - "Basis still OnDemand after recovery" + uint256(IRecurringEscrowManagement.EscrowBasis.Full), + "Basis: still Full after recovery" ); } - function test_TempJit_SetTempJitClearsBreaker() public { + // ---- Edge case: no agreements (smnca = 0) ---- + + function test_BasisDegradation_NoAgreements_NoRevert() public { + agreementManager.reconcileProvider(address(_collector()), indexer); + assertEq(_effectiveEscrow(address(recurringCollector), indexer), 0, "No agreements: zero escrow"); + } + + // ---- Custom params ---- + + function test_BasisDegradation_CustomMargin_WiderOnDemandBand() public { + // Increase margin to 128 -> min gate threshold = smnca * 384/256 = 1.5x + // OnDemand band becomes 0.5x < spare <= 1.5x (much wider) + vm.prank(operator); + agreementManager.setMinFullBasisMargin(128); + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCAForIndexer( indexer, 100 ether, @@ -1354,30 +1475,33 @@ contract RecurringAgreementManagerFundingModesTest is RecurringAgreementManagerS 3600, 1 ); - bytes16 agreementId = _offerAgreement(rca); - - // Drain and trip - uint256 samBalance = token.balanceOf(address(agreementManager)); - if (0 < samBalance) { - vm.prank(address(agreementManager)); - token.transfer(address(1), samBalance); - } + _offerAgreement(rca); + uint256 smnca = agreementManager.getSumMaxNextClaimAll(); + uint256 pairSmnc = agreementManager.getSumMaxNextClaim(_collector(), indexer); - vm.prank(address(recurringCollector)); - agreementManager.beforeCollection(agreementId, 1_000_000 ether); - assertTrue(agreementManager.isTempJit()); + // spare = smnca * 1.2 -- above max gate (0.5) but below min gate (1.5) + _fundToSpare((smnca * 307) / 256); // ~1.2x + agreementManager.reconcileProvider(address(_collector()), indexer); - // Operator clears tempJit directly - vm.expectEmit(address(agreementManager)); - emit IRecurringEscrowManagement.TempJitSet(false, false); + // OnDemand: holds pre-deposited escrow (max=smnc), no deposit (min=0) + assertEq( + _effectiveEscrow(address(recurringCollector), indexer), + pairSmnc, + "OnDemand with wide band: holds at ceiling" + ); - vm.prank(operator); - agreementManager.setTempJit(false); + // Fund above 1.5x -> Full + _fundToSpare((smnca * (256 + 128)) / 256 + 1); + agreementManager.reconcileProvider(address(_collector()), indexer); - assertFalse(agreementManager.isTempJit(), "Operator cleared breaker"); + assertEq(_effectiveEscrow(address(recurringCollector), indexer), pairSmnc, "Full with wide band: deposited"); } - function test_TempJit_SetEscrowBasisDoesNotClearBreaker() public { + function test_BasisDegradation_CustomThreshold_HigherMaxGate() public { + // Increase threshold to 200 -> max gate threshold = smnca * 200/256 ~ 0.78x + vm.prank(operator); + agreementManager.setMinOnDemandBasisThreshold(200); + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCAForIndexer( indexer, 100 ether, @@ -1385,159 +1509,145 @@ contract RecurringAgreementManagerFundingModesTest is RecurringAgreementManagerS 3600, 1 ); - bytes16 agreementId = _offerAgreement(rca); - - // Drain and trip - uint256 samBalance = token.balanceOf(address(agreementManager)); - if (0 < samBalance) { - vm.prank(address(agreementManager)); - token.transfer(address(1), samBalance); - } + _offerAgreement(rca); + uint256 smnca = agreementManager.getSumMaxNextClaimAll(); - vm.prank(address(recurringCollector)); - agreementManager.beforeCollection(agreementId, 1_000_000 ether); - assertTrue(agreementManager.isTempJit()); + // spare = smnca * 0.6 -- below new max gate (0.78) -> JIT-like + _fundToSpare((smnca * 154) / 256); // ~0.6x + agreementManager.reconcileProvider(address(_collector()), indexer); - // Operator changes basis — tempJit stays active - vm.prank(operator); - agreementManager.setEscrowBasis(IRecurringEscrowManagement.EscrowBasis.OnDemand); + (uint256 bal, uint256 thawing, ) = _escrowAccount(address(recurringCollector), indexer); + assertEq(thawing, bal, "JIT with higher threshold: thaws all at 0.6x"); - assertTrue(agreementManager.isTempJit(), "setEscrowBasis does not clear tempJit"); - assertEq( - uint256(agreementManager.getEscrowBasis()), - uint256(IRecurringEscrowManagement.EscrowBasis.OnDemand), - "Basis changed independently" - ); + // spare = smnca * 0.85 -- above new max gate (0.78) -> OnDemand + vm.warp(block.timestamp + 2 days); + agreementManager.reconcileProvider(address(_collector()), indexer); + _fundToSpare((smnca * 218) / 256); // ~0.85x + agreementManager.reconcileProvider(address(_collector()), indexer); + + // OnDemand: no deposit (min=0), no thaw (max=smnc) + (uint256 bal2, uint256 thawing2, ) = _escrowAccount(address(recurringCollector), indexer); + assertEq(thawing2, 0, "OnDemand with higher threshold: no thaw at 0.85x"); + assertEq(bal2, 0, "OnDemand with higher threshold: no deposit (min=0, escrow cleared)"); } - function test_TempJit_MultipleTripRecoverCycles() public { - // Offer rca1 (deposited), drain SAM, offer rca2 (undeposited — creates deficit) - IRecurringCollector.RecurringCollectionAgreement memory rca1 = _makeRCAForIndexer( + function test_BeforeCollection_JitTopUpStillWorks_WhenDegraded() public { + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCAForIndexer( indexer, 100 ether, 1 ether, 3600, 1 ); - bytes16 agreementId = _offerAgreement(rca1); + bytes16 agreementId = _offerAgreement(rca); + // Drain SAM uint256 samBalance = token.balanceOf(address(agreementManager)); if (0 < samBalance) { vm.prank(address(agreementManager)); token.transfer(address(1), samBalance); } - IRecurringCollector.RecurringCollectionAgreement memory rca2 = _makeRCAForIndexer( - indexer, - 100 ether, - 1 ether, - 3600, - 2 + // Mint just enough for JIT top-up + token.mint(address(agreementManager), 500 ether); + + vm.prank(address(recurringCollector)); + agreementManager.beforeCollection(agreementId, 500 ether); + + // JIT top-up should have succeeded + IPaymentsEscrow.EscrowAccount memory acc; + (acc.balance, acc.tokensThawing, acc.thawEndTimestamp) = paymentsEscrow.escrowAccounts( + address(agreementManager), + address(recurringCollector), + indexer ); - vm.prank(operator); - agreementManager.offerAgreement(rca2, _collector()); + assertTrue(500 ether <= acc.balance, "JIT top-up works when degraded"); + } - uint256 undeposited = agreementManager.getTotalEscrowDeficit(); - assertTrue(0 < undeposited, "Has undeposited deficit"); + // ==================== Setters ==================== - // --- Cycle 1: Trip --- - vm.prank(address(recurringCollector)); - agreementManager.beforeCollection(agreementId, 1_000_000 ether); - assertTrue(agreementManager.isTempJit()); + function test_SetMinOnDemandBasisThreshold() public { + assertEq(agreementManager.getMinOnDemandBasisThreshold(), 128, "Default threshold"); - // --- Cycle 1: Recover (mint more than deficit — recovery requires strict deficit < available) --- - token.mint(address(agreementManager), undeposited + 1); - agreementManager.reconcileCollectorProvider(address(_collector()), indexer); - assertFalse(agreementManager.isTempJit()); - assertEq(uint256(agreementManager.getEscrowBasis()), uint256(IRecurringEscrowManagement.EscrowBasis.Full)); + vm.expectEmit(address(agreementManager)); + emit IRecurringEscrowManagement.MinOnDemandBasisThresholdSet(128, 64); - // After recovery, reconcileCollectorProvider deposited into escrow. Drain again and create new deficit. - samBalance = token.balanceOf(address(agreementManager)); - if (0 < samBalance) { - vm.prank(address(agreementManager)); - token.transfer(address(1), samBalance); - } + vm.prank(operator); + agreementManager.setMinOnDemandBasisThreshold(64); - IRecurringCollector.RecurringCollectionAgreement memory rca3 = _makeRCAForIndexer( - indexer, - 100 ether, - 1 ether, - 3600, - 3 - ); + assertEq(agreementManager.getMinOnDemandBasisThreshold(), 64, "Updated threshold"); + } + + function test_SetMinOnDemandBasisThreshold_NoopWhenSame() public { + vm.recordLogs(); vm.prank(operator); - agreementManager.offerAgreement(rca3, _collector()); + agreementManager.setMinOnDemandBasisThreshold(128); // same as default - undeposited = agreementManager.getTotalEscrowDeficit(); - assertTrue(0 < undeposited, "New undeposited deficit"); + Vm.Log[] memory logs = vm.getRecordedLogs(); + for (uint256 i = 0; i < logs.length; i++) { + assertTrue( + logs[i].topics[0] != IRecurringEscrowManagement.MinOnDemandBasisThresholdSet.selector, + "Should not emit when unchanged" + ); + } + } - // --- Cycle 2: Trip --- - vm.prank(address(recurringCollector)); - agreementManager.beforeCollection(agreementId, 1_000_000 ether); - assertTrue(agreementManager.isTempJit()); + function test_SetMinFullBasisMargin() public { + assertEq(agreementManager.getMinFullBasisMargin(), 16, "Default margin"); - // --- Cycle 2: Recover (mint more than deficit) --- - token.mint(address(agreementManager), undeposited + 1); - agreementManager.reconcileCollectorProvider(address(_collector()), indexer); - assertFalse(agreementManager.isTempJit()); - assertEq(uint256(agreementManager.getEscrowBasis()), uint256(IRecurringEscrowManagement.EscrowBasis.Full)); + vm.expectEmit(address(agreementManager)); + emit IRecurringEscrowManagement.MinFullBasisMarginSet(16, 32); + + vm.prank(operator); + agreementManager.setMinFullBasisMargin(32); + + assertEq(agreementManager.getMinFullBasisMargin(), 32, "Updated margin"); } - function test_TempJit_MultiProvider() public { - // Offer rca1 (deposited), drain SAM, offer rca2 (creates deficit → 0 < totalEscrowDeficit) - IRecurringCollector.RecurringCollectionAgreement memory rca1 = _makeRCAForIndexer( - indexer, - 100 ether, - 1 ether, - 3600, - 1 - ); - bytes16 id1 = _offerAgreement(rca1); + function test_SetMinFullBasisMargin_NoopWhenSame() public { + vm.recordLogs(); + vm.prank(operator); + agreementManager.setMinFullBasisMargin(16); // same as default - // Drain SAM so rca2 can't be deposited - uint256 samBalance = token.balanceOf(address(agreementManager)); - if (0 < samBalance) { - vm.prank(address(agreementManager)); - token.transfer(address(1), samBalance); + Vm.Log[] memory logs = vm.getRecordedLogs(); + for (uint256 i = 0; i < logs.length; i++) { + assertTrue( + logs[i].topics[0] != IRecurringEscrowManagement.MinFullBasisMarginSet.selector, + "Should not emit when unchanged" + ); } + } + + function test_SetMinThawFraction() public { + assertEq(agreementManager.getMinThawFraction(), 16, "Default fraction"); + + vm.expectEmit(address(agreementManager)); + emit IRecurringEscrowManagement.MinThawFractionSet(16, 32); - // Offer rca2 directly (no mint) — escrow stays undeposited, creates deficit - IRecurringCollector.RecurringCollectionAgreement memory rca2 = _makeRCAForIndexer( - indexer2, - 100 ether, - 1 ether, - 3600, - 2 - ); vm.prank(operator); - agreementManager.offerAgreement(rca2, _collector()); - assertTrue(0 < agreementManager.getTotalEscrowDeficit(), "should have undeposited escrow"); + agreementManager.setMinThawFraction(32); - // Trip via indexer's agreement - vm.prank(address(recurringCollector)); - agreementManager.beforeCollection(id1, 1_000_000 ether); - assertTrue(agreementManager.isTempJit()); + assertEq(agreementManager.getMinThawFraction(), 32, "Updated fraction"); + } - // Both providers should see JIT behavior (thaw everything) - agreementManager.reconcileCollectorProvider(address(_collector()), indexer); - agreementManager.reconcileCollectorProvider(address(_collector()), indexer2); + function test_SetMinThawFraction_NoopWhenSame() public { + vm.recordLogs(); + vm.prank(operator); + agreementManager.setMinThawFraction(16); // same as default - IPaymentsEscrow.EscrowAccount memory acc1; - (acc1.balance, acc1.tokensThawing, acc1.thawEndTimestamp) = paymentsEscrow.escrowAccounts( - address(agreementManager), - address(recurringCollector), - indexer - ); - IPaymentsEscrow.EscrowAccount memory acc2; - (acc2.balance, acc2.tokensThawing, acc2.thawEndTimestamp) = paymentsEscrow.escrowAccounts( - address(agreementManager), - address(recurringCollector), - indexer2 - ); + Vm.Log[] memory logs = vm.getRecordedLogs(); + for (uint256 i = 0; i < logs.length; i++) { + assertTrue( + logs[i].topics[0] != IRecurringEscrowManagement.MinThawFractionSet.selector, + "Should not emit when unchanged" + ); + } + } - // Both providers should be thawing (JIT mode via temp JIT) - assertEq(acc1.tokensThawing, acc1.balance, "indexer: JIT thaws all"); - assertEq(acc2.tokensThawing, acc2.balance, "indexer2: JIT thaws all"); + function test_SetMinThawFraction_Revert_WhenNotOperator() public { + vm.prank(governor); + vm.expectRevert(); + agreementManager.setMinThawFraction(32); } /* solhint-enable graph/func-name-mixedcase */ diff --git a/packages/issuance/test/unit/agreement-manager/fuzz.t.sol b/packages/issuance/test/unit/agreement-manager/fuzz.t.sol index 26912be11..02469e99e 100644 --- a/packages/issuance/test/unit/agreement-manager/fuzz.t.sol +++ b/packages/issuance/test/unit/agreement-manager/fuzz.t.sol @@ -3,6 +3,7 @@ pragma solidity ^0.8.27; import { IRecurringAgreementManagement } from "@graphprotocol/interfaces/contracts/issuance/agreement/IRecurringAgreementManagement.sol"; import { IPaymentsEscrow } from "@graphprotocol/interfaces/contracts/horizon/IPaymentsEscrow.sol"; +import { OFFER_TYPE_NEW } from "@graphprotocol/interfaces/contracts/horizon/IAgreementCollector.sol"; import { IRecurringCollector } from "@graphprotocol/interfaces/contracts/horizon/IRecurringCollector.sol"; import { RecurringAgreementManagerSharedTest } from "./shared.t.sol"; @@ -31,9 +32,12 @@ contract RecurringAgreementManagerFuzzTest is RecurringAgreementManagerSharedTes bytes16 agreementId = _offerAgreement(rca); - uint256 expectedMaxClaim = uint256(maxOngoingTokensPerSecond) * uint256(maxSecondsPerCollection) + - uint256(maxInitialTokens); - assertEq(agreementManager.getAgreementMaxNextClaim(agreementId), expectedMaxClaim); + uint256 remainingSeconds = endsAt > block.timestamp ? endsAt - block.timestamp : 0; + uint256 effectiveSeconds = remainingSeconds < maxSecondsPerCollection + ? remainingSeconds + : maxSecondsPerCollection; + uint256 expectedMaxClaim = uint256(maxOngoingTokensPerSecond) * effectiveSeconds + uint256(maxInitialTokens); + assertEq(agreementManager.getAgreementMaxNextClaim(address(recurringCollector), agreementId), expectedMaxClaim); assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), expectedMaxClaim); } @@ -58,23 +62,26 @@ contract RecurringAgreementManagerFuzzTest is RecurringAgreementManagerSharedTes // Fund with a specific amount instead of the default 1M ether token.mint(address(agreementManager), availableTokens); vm.prank(operator); - bytes16 agreementId = agreementManager.offerAgreement(rca, _collector()); + bytes16 agreementId = agreementManager.offerAgreement(_collector(), OFFER_TYPE_NEW, abi.encode(rca)); - uint256 maxNextClaim = agreementManager.getAgreementMaxNextClaim(agreementId); + uint256 maxNextClaim = agreementManager.getAgreementMaxNextClaim(address(recurringCollector), agreementId); (uint256 escrowBalance, , ) = paymentsEscrow.escrowAccounts( address(agreementManager), address(recurringCollector), indexer ); - // In Full mode (default): - // If totalEscrowDeficit < available: Full deposits required (there is buffer). - // Otherwise (available <= totalEscrowDeficit): degrades to OnDemand (no buffer, deposit target = 0). - // JIT beforeCollection is the safety net for underfunded escrow. - if (maxNextClaim < availableTokens) { + // In Full mode (default), basis degrades based on spare = balance - totalEscrowDeficit. + // Before deposit: deficit = maxNextClaim, smnca = maxNextClaim. + // spare = availableTokens - maxNextClaim (if availableTokens > maxNextClaim, else 0). + // Full requires smnca * (256+16)/256 = maxNextClaim * 272/256 < spare. + // OnDemand requires smnca * 128/256 = maxNextClaim/2 < spare (but min=0, so no deposit). + // So Full deposits only when availableTokens > maxNextClaim + maxNextClaim * 272/256. + uint256 fullThreshold = maxNextClaim + (maxNextClaim * 272) / 256; + if (fullThreshold < availableTokens) { assertEq(escrowBalance, maxNextClaim); } else { - // Degraded to OnDemand: no deposit (no buffer or insufficient) + // Degraded — no deposit (OnDemand/JIT both have min=0) assertEq(escrowBalance, 0); } } @@ -113,16 +120,23 @@ contract RecurringAgreementManagerFuzzTest is RecurringAgreementManagerSharedTes _offerAgreement(rca2); uint256 required2 = agreementManager.getSumMaxNextClaim(_collector(), indexer); - uint256 maxClaim1 = uint256(maxOngoing1) * uint256(maxSec1) + uint256(maxInitial1); - uint256 maxClaim2 = uint256(maxOngoing2) * uint256(maxSec2) + uint256(maxInitial2); + uint256 remaining = uint256(block.timestamp + 365 days) - block.timestamp; + uint256 eff1 = remaining < maxSec1 ? remaining : maxSec1; + uint256 eff2 = remaining < maxSec2 ? remaining : maxSec2; + uint256 maxClaim1 = uint256(maxOngoing1) * eff1 + uint256(maxInitial1); + uint256 maxClaim2 = uint256(maxOngoing2) * eff2 + uint256(maxInitial2); assertEq(required1, maxClaim1); assertEq(required2, maxClaim1 + maxClaim2); } - // -- revokeOffer / reconcileAgreement -- + // -- cancelAgreement / reconcileAgreement -- - function testFuzz_RevokeOffer_RequiredEscrowDecrements(uint64 maxInitial, uint64 maxOngoing, uint32 maxSec) public { + function testFuzz_CancelOffered_RequiredEscrowDecrements( + uint64 maxInitial, + uint64 maxOngoing, + uint32 maxSec + ) public { vm.assume(0 < maxSec); IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCA( @@ -137,11 +151,10 @@ contract RecurringAgreementManagerFuzzTest is RecurringAgreementManagerSharedTes uint256 requiredBefore = agreementManager.getSumMaxNextClaim(_collector(), indexer); assertTrue(0 < requiredBefore || (maxInitial == 0 && maxOngoing == 0)); - vm.prank(operator); - agreementManager.revokeOffer(agreementId); + _cancelAgreement(agreementId); assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), 0); - assertEq(agreementManager.getProviderAgreementCount(indexer), 0); + assertEq(agreementManager.getPairAgreementCount(address(recurringCollector), indexer), 0); } function testFuzz_Remove_AfterSPCancel_ClearsState(uint64 maxInitial, uint64 maxOngoing, uint32 maxSec) public { @@ -158,11 +171,11 @@ contract RecurringAgreementManagerFuzzTest is RecurringAgreementManagerSharedTes bytes16 agreementId = _offerAgreement(rca); _setAgreementCanceledBySP(agreementId, rca); - agreementManager.reconcileAgreement(agreementId); + agreementManager.reconcileAgreement(address(recurringCollector), agreementId); assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), 0); - assertEq(agreementManager.getProviderAgreementCount(indexer), 0); - assertEq(agreementManager.getAgreementMaxNextClaim(agreementId), 0); + assertEq(agreementManager.getPairAgreementCount(address(recurringCollector), indexer), 0); + assertEq(agreementManager.getAgreementMaxNextClaim(address(recurringCollector), agreementId), 0); } // -- reconcile -- @@ -196,7 +209,7 @@ contract RecurringAgreementManagerFuzzTest is RecurringAgreementManagerSharedTes // Warp to collection time vm.warp(collectionAt); - agreementManager.reconcileAgreement(agreementId); + agreementManager.reconcileAgreement(address(recurringCollector), agreementId); uint256 postReconcileRequired = agreementManager.getSumMaxNextClaim(_collector(), indexer); @@ -216,6 +229,8 @@ contract RecurringAgreementManagerFuzzTest is RecurringAgreementManagerSharedTes uint32 updateMaxSec ) public { vm.assume(0 < maxSec && 0 < updateMaxSec); + // Ensure non-zero claim so agreement isn't immediately cleaned up + vm.assume(0 < maxInitial || 0 < maxOngoing); IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCA( maxInitial, @@ -227,24 +242,30 @@ contract RecurringAgreementManagerFuzzTest is RecurringAgreementManagerSharedTes bytes16 agreementId = _offerAgreement(rca); - uint256 originalMaxClaim = uint256(maxOngoing) * uint256(maxSec) + uint256(maxInitial); + uint256 remainingOrig = uint256(block.timestamp + 365 days) - block.timestamp; + uint256 effOrig = remainingOrig < maxSec ? remainingOrig : maxSec; + uint256 originalMaxClaim = uint256(maxOngoing) * effOrig + uint256(maxInitial); assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), originalMaxClaim); + uint64 updateEndsAt = uint64(block.timestamp + 730 days); IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau = _makeRCAU( agreementId, updateMaxInitial, updateMaxOngoing, 60, updateMaxSec, - uint64(block.timestamp + 730 days), + updateEndsAt, 1 ); _offerAgreementUpdate(rcau); - uint256 pendingMaxClaim = uint256(updateMaxOngoing) * uint256(updateMaxSec) + uint256(updateMaxInitial); + uint256 remainingUpdate = uint256(updateEndsAt) - block.timestamp; + uint256 effUpdate = remainingUpdate < updateMaxSec ? remainingUpdate : updateMaxSec; + uint256 fullPendingMaxClaim = uint256(updateMaxOngoing) * effUpdate + uint256(updateMaxInitial); - // Both original and pending are funded simultaneously - assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), originalMaxClaim + pendingMaxClaim); + // Sum uses max(current, pending) since only one set of terms is active at a time + uint256 expectedSum = fullPendingMaxClaim > originalMaxClaim ? fullPendingMaxClaim : originalMaxClaim; + assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), expectedSum); } // -- reconcileAgreement deadline -- @@ -262,15 +283,15 @@ contract RecurringAgreementManagerFuzzTest is RecurringAgreementManagerSharedTes bytes16 agreementId = _offerAgreement(rca); // Before deadline: should return true (still claimable) - bool exists = agreementManager.reconcileAgreement(agreementId); + bool exists = agreementManager.reconcileAgreement(address(recurringCollector), agreementId); assertTrue(exists); // Warp past deadline vm.warp(rca.deadline + extraTime); // After deadline: should succeed - agreementManager.reconcileAgreement(agreementId); - assertEq(agreementManager.getProviderAgreementCount(indexer), 0); + agreementManager.reconcileAgreement(address(recurringCollector), agreementId); + assertEq(agreementManager.getPairAgreementCount(address(recurringCollector), indexer), 0); } // -- getEscrowAccount -- @@ -289,7 +310,7 @@ contract RecurringAgreementManagerFuzzTest is RecurringAgreementManagerSharedTes token.mint(address(agreementManager), available); vm.prank(operator); - agreementManager.offerAgreement(rca, _collector()); + agreementManager.offerAgreement(_collector(), OFFER_TYPE_NEW, abi.encode(rca)); IPaymentsEscrow.EscrowAccount memory expected; (expected.balance, expected.tokensThawing, expected.thawEndTimestamp) = paymentsEscrow.escrowAccounts( diff --git a/packages/issuance/test/unit/agreement-manager/helper.t.sol b/packages/issuance/test/unit/agreement-manager/helper.t.sol index 5a8c95722..29907fa97 100644 --- a/packages/issuance/test/unit/agreement-manager/helper.t.sol +++ b/packages/issuance/test/unit/agreement-manager/helper.t.sol @@ -4,10 +4,12 @@ pragma solidity ^0.8.27; import { Vm } from "forge-std/Vm.sol"; import { IERC20 } from "@openzeppelin/contracts/token/ERC20/IERC20.sol"; +import { REGISTERED, ACCEPTED } from "@graphprotocol/interfaces/contracts/horizon/IAgreementCollector.sol"; import { IRecurringCollector } from "@graphprotocol/interfaces/contracts/horizon/IRecurringCollector.sol"; import { RecurringAgreementHelper } from "../../../contracts/agreement/RecurringAgreementHelper.sol"; import { RecurringAgreementManagerSharedTest } from "./shared.t.sol"; +import { MockRecurringCollector } from "./mocks/MockRecurringCollector.sol"; contract RecurringAgreementHelperTest is RecurringAgreementManagerSharedTest { /* solhint-disable graph/func-name-mixedcase */ @@ -63,20 +65,20 @@ contract RecurringAgreementHelperTest is RecurringAgreementManagerSharedTest { // Fund for reconcile token.mint(address(agreementManager), 1_000_000 ether); - agreementHelper.reconcile(indexer); + agreementHelper.reconcilePair(address(recurringCollector), indexer); // Agreement 1: CanceledBySP -> maxClaim = 0 - assertEq(agreementManager.getAgreementMaxNextClaim(id1), 0); + assertEq(agreementManager.getAgreementMaxNextClaim(address(recurringCollector), id1), 0); // Agreement 2: collected, remaining window large, capped at maxSecondsPerCollection = 7200 // maxClaim = 2e18 * 7200 = 14400e18 (no initial since collected) - assertEq(agreementManager.getAgreementMaxNextClaim(id2), 14400 ether); + assertEq(agreementManager.getAgreementMaxNextClaim(address(recurringCollector), id2), 14400 ether); assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), 14400 ether); } function test_Reconcile_EmptyProvider() public { // reconcile for a provider with no agreements — should be a no-op address unknown = makeAddr("unknown"); - agreementHelper.reconcile(unknown); + agreementHelper.reconcilePair(address(recurringCollector), unknown); assertEq(agreementManager.getSumMaxNextClaim(_collector(), unknown), 0); } @@ -94,16 +96,22 @@ contract RecurringAgreementHelperTest is RecurringAgreementManagerSharedTest { _setAgreementAccepted(agreementId, rca, uint64(block.timestamp)); // First reconcile - agreementHelper.reconcile(indexer); + agreementHelper.reconcilePair(address(recurringCollector), indexer); uint256 escrowAfterFirst = agreementManager.getSumMaxNextClaim(_collector(), indexer); - uint256 maxClaimAfterFirst = agreementManager.getAgreementMaxNextClaim(agreementId); + uint256 maxClaimAfterFirst = agreementManager.getAgreementMaxNextClaim( + address(recurringCollector), + agreementId + ); // Second reconcile should produce identical results (idempotent) vm.recordLogs(); - agreementHelper.reconcile(indexer); + agreementHelper.reconcilePair(address(recurringCollector), indexer); assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), escrowAfterFirst); - assertEq(agreementManager.getAgreementMaxNextClaim(agreementId), maxClaimAfterFirst); + assertEq( + agreementManager.getAgreementMaxNextClaim(address(recurringCollector), agreementId), + maxClaimAfterFirst + ); // No reconcile event on the second call since nothing changed Vm.Log[] memory logs = vm.getRecordedLogs(); @@ -159,12 +167,12 @@ contract RecurringAgreementHelperTest is RecurringAgreementManagerSharedTest { vm.warp(lastCollectionAt); token.mint(address(agreementManager), 1_000_000 ether); - agreementHelper.reconcile(indexer); + agreementHelper.reconcilePair(address(recurringCollector), indexer); - assertEq(agreementManager.getAgreementMaxNextClaim(id1), 0); - assertEq(agreementManager.getAgreementMaxNextClaim(id2), 14400 ether); // 2e18 * 7200 + assertEq(agreementManager.getAgreementMaxNextClaim(address(recurringCollector), id1), 0); + assertEq(agreementManager.getAgreementMaxNextClaim(address(recurringCollector), id2), 14400 ether); // 2e18 * 7200 // id3 unchanged: 3e18 * 1800 = 5400e18 (pre-offer estimate) - assertEq(agreementManager.getAgreementMaxNextClaim(id3), 5400 ether); + assertEq(agreementManager.getAgreementMaxNextClaim(address(recurringCollector), id3), 5400 ether); assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), 14400 ether + 5400 ether); } @@ -204,12 +212,13 @@ contract RecurringAgreementHelperTest is RecurringAgreementManagerSharedTest { bytes16[] memory ids = new bytes16[](2); ids[0] = id1; ids[1] = id2; - agreementHelper.reconcileBatch(ids); + for (uint256 i = 0; i < ids.length; ++i) + agreementManager.reconcileAgreement(address(recurringCollector), ids[i]); // Agreement 1 canceled by SP -> maxNextClaim = 0 - assertEq(agreementManager.getAgreementMaxNextClaim(id1), 0); + assertEq(agreementManager.getAgreementMaxNextClaim(address(recurringCollector), id1), 0); // Agreement 2 accepted, never collected -> maxNextClaim = initial + ongoing - assertEq(agreementManager.getAgreementMaxNextClaim(id2), maxClaim2); + assertEq(agreementManager.getAgreementMaxNextClaim(address(recurringCollector), id2), maxClaim2); // Required should be just agreement 2 now assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), maxClaim2); } @@ -232,17 +241,19 @@ contract RecurringAgreementHelperTest is RecurringAgreementManagerSharedTest { bytes16[] memory ids = new bytes16[](2); ids[0] = fakeId; ids[1] = realId; - agreementHelper.reconcileBatch(ids); + for (uint256 i = 0; i < ids.length; ++i) + agreementManager.reconcileAgreement(address(recurringCollector), ids[i]); // Real agreement should still be tracked uint256 maxClaim = 1 ether * 3600 + 100 ether; - assertEq(agreementManager.getAgreementMaxNextClaim(realId), maxClaim); + assertEq(agreementManager.getAgreementMaxNextClaim(address(recurringCollector), realId), maxClaim); } function test_ReconcileBatch_Empty() public { // Empty array — should succeed silently bytes16[] memory ids = new bytes16[](0); - agreementHelper.reconcileBatch(ids); + for (uint256 i = 0; i < ids.length; ++i) + agreementManager.reconcileAgreement(address(recurringCollector), ids[i]); } function test_ReconcileBatch_CrossIndexer() public { @@ -284,7 +295,8 @@ contract RecurringAgreementHelperTest is RecurringAgreementManagerSharedTest { bytes16[] memory ids = new bytes16[](2); ids[0] = id1; ids[1] = id2; - agreementHelper.reconcileBatch(ids); + for (uint256 i = 0; i < ids.length; ++i) + agreementManager.reconcileAgreement(address(recurringCollector), ids[i]); assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), 0); assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer2), 0); @@ -306,7 +318,29 @@ contract RecurringAgreementHelperTest is RecurringAgreementManagerSharedTest { bytes16[] memory ids = new bytes16[](1); ids[0] = agreementId; vm.prank(anyone); - agreementHelper.reconcileBatch(ids); + for (uint256 i = 0; i < ids.length; ++i) + agreementManager.reconcileAgreement(address(recurringCollector), ids[i]); + } + + function _setSimulatedAgreement( + bytes16 agreementId, + address dataService, + address _payer, + address _serviceProvider + ) private { + MockRecurringCollector.AgreementStorage memory simData; + simData.dataService = dataService; + simData.payer = _payer; + simData.serviceProvider = _serviceProvider; + simData.acceptedAt = uint64(block.timestamp); + simData.updateNonce = 1; + simData.state = REGISTERED | ACCEPTED; + simData.activeTerms.endsAt = uint64(block.timestamp + 730 days); + simData.activeTerms.maxInitialTokens = 200 ether; + simData.activeTerms.maxOngoingTokensPerSecond = 2 ether; + simData.activeTerms.minSecondsPerCollection = 60; + simData.activeTerms.maxSecondsPerCollection = 7200; + recurringCollector.setAgreement(agreementId, simData); } function test_ReconcileBatch_ClearsPendingUpdate() public { @@ -319,7 +353,6 @@ contract RecurringAgreementHelperTest is RecurringAgreementManagerSharedTest { bytes16 agreementId = _offerAgreement(rca); - // Offer a pending update (nonce 1) IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau = _makeRCAU( agreementId, 200 ether, @@ -331,37 +364,19 @@ contract RecurringAgreementHelperTest is RecurringAgreementManagerSharedTest { ); _offerAgreementUpdate(rcau); - uint256 originalMaxClaim = 1 ether * 3600 + 100 ether; - uint256 pendingMaxClaim = 2 ether * 7200 + 200 ether; - assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), originalMaxClaim + pendingMaxClaim); + // max(current, pending) = max(3700, 14600) = 14600 + assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), 14600 ether); - // Simulate: accepted with the update already applied (pending <= updateNonce) - recurringCollector.setAgreement( - agreementId, - IRecurringCollector.AgreementData({ - dataService: rca.dataService, - payer: rca.payer, - serviceProvider: rca.serviceProvider, - acceptedAt: uint64(block.timestamp), - lastCollectionAt: 0, - endsAt: rcau.endsAt, - maxInitialTokens: rcau.maxInitialTokens, - maxOngoingTokensPerSecond: rcau.maxOngoingTokensPerSecond, - minSecondsPerCollection: rcau.minSecondsPerCollection, - maxSecondsPerCollection: rcau.maxSecondsPerCollection, - updateNonce: 1, // matches pending nonce, so update was applied - canceledAt: 0, - state: IRecurringCollector.AgreementState.Accepted - }) - ); + // Simulate: accepted with the update already applied + _setSimulatedAgreement(agreementId, rca.dataService, rca.payer, rca.serviceProvider); bytes16[] memory ids = new bytes16[](1); ids[0] = agreementId; - agreementHelper.reconcileBatch(ids); + for (uint256 i = 0; i < ids.length; ++i) + agreementManager.reconcileAgreement(address(recurringCollector), ids[i]); // Pending should be cleared; required escrow should be based on new terms - uint256 newMaxClaim = 2 ether * 7200 + 200 ether; - assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), newMaxClaim); + assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), 2 ether * 7200 + 200 ether); } /* solhint-enable graph/func-name-mixedcase */ diff --git a/packages/issuance/test/unit/agreement-manager/helperAudit.t.sol b/packages/issuance/test/unit/agreement-manager/helperAudit.t.sol index f957eee9f..b36be8fab 100644 --- a/packages/issuance/test/unit/agreement-manager/helperAudit.t.sol +++ b/packages/issuance/test/unit/agreement-manager/helperAudit.t.sol @@ -3,6 +3,7 @@ pragma solidity ^0.8.27; import { IRecurringAgreementHelper } from "@graphprotocol/interfaces/contracts/issuance/agreement/IRecurringAgreementHelper.sol"; import { IRecurringEscrowManagement } from "@graphprotocol/interfaces/contracts/issuance/agreement/IRecurringEscrowManagement.sol"; +import { REGISTERED, ACCEPTED, OFFER_TYPE_NEW } from "@graphprotocol/interfaces/contracts/horizon/IAgreementCollector.sol"; import { IRecurringCollector } from "@graphprotocol/interfaces/contracts/horizon/IRecurringCollector.sol"; import { RecurringAgreementManagerSharedTest } from "./shared.t.sol"; @@ -41,6 +42,8 @@ contract RecurringAgreementHelperAuditTest is RecurringAgreementManagerSharedTes maxOngoingTokensPerSecond: 1 ether, minSecondsPerCollection: 60, maxSecondsPerCollection: 3600, + conditions: 0, + minSecondsPayerCancellationNotice: 0, nonce: nonce, metadata: "" }); @@ -52,7 +55,8 @@ contract RecurringAgreementHelperAuditTest is RecurringAgreementManagerSharedTes ) internal returns (bytes16) { token.mint(address(agreementManager), 1_000_000 ether); vm.prank(operator); - return agreementManager.offerAgreement(rca, IRecurringCollector(address(collector))); + return + agreementManager.offerAgreement(IRecurringCollector(address(collector)), OFFER_TYPE_NEW, abi.encode(rca)); } // -- Tests: auditGlobal -- @@ -62,9 +66,9 @@ contract RecurringAgreementHelperAuditTest is RecurringAgreementManagerSharedTes assertEq(g.tokenBalance, 0); assertEq(g.sumMaxNextClaimAll, 0); assertEq(g.totalEscrowDeficit, 0); - assertEq(g.totalAgreementCount, 0); assertEq(uint256(g.escrowBasis), uint256(IRecurringEscrowManagement.EscrowBasis.Full)); - assertFalse(g.tempJit); + assertEq(g.minOnDemandBasisThreshold, 128); + assertEq(g.minFullBasisMargin, 16); assertEq(g.collectorCount, 0); } @@ -80,7 +84,6 @@ contract RecurringAgreementHelperAuditTest is RecurringAgreementManagerSharedTes IRecurringAgreementHelper.GlobalAudit memory g = agreementHelper.auditGlobal(); assertEq(g.sumMaxNextClaimAll, maxClaim); - assertEq(g.totalAgreementCount, 1); assertEq(g.collectorCount, 1); // Token balance is the minted amount minus what was deposited to escrow assertTrue(0 < g.tokenBalance); @@ -98,7 +101,6 @@ contract RecurringAgreementHelperAuditTest is RecurringAgreementManagerSharedTes _offerForCollector(collector2, rca2); IRecurringAgreementHelper.GlobalAudit memory g = agreementHelper.auditGlobal(); - assertEq(g.totalAgreementCount, 2); assertEq(g.collectorCount, 2); } @@ -139,7 +141,7 @@ contract RecurringAgreementHelperAuditTest is RecurringAgreementManagerSharedTes // Cancel by SP to make maxNextClaim = 0, then reconcile (thaw starts) _setAgreementCanceledBySP(agreementId, rca); - agreementManager.reconcileAgreement(agreementId); + agreementManager.reconcileAgreement(address(recurringCollector), agreementId); IRecurringAgreementHelper.PairAudit memory p = agreementHelper.auditPair(address(recurringCollector), indexer); // sumMaxNextClaim should be 0 after reconcile @@ -217,6 +219,85 @@ contract RecurringAgreementHelperAuditTest is RecurringAgreementManagerSharedTes assertEq(empty.length, 0); } + // -- Tests: getProviderAgreements (paginated) -- + + function test_GetProviderAgreements_Paginated() public { + IRecurringCollector.RecurringCollectionAgreement memory rca1 = _makeRCAForCollector( + recurringCollector, + indexer, + 1 + ); + _offerAgreement(rca1); + + IRecurringCollector.RecurringCollectionAgreement memory rca2 = _makeRCAForCollector( + recurringCollector, + indexer, + 2 + ); + _offerAgreement(rca2); + + // Full list + bytes16[] memory all = agreementHelper.getPairAgreements(address(recurringCollector), indexer); + assertEq(all.length, 2); + + // First page + bytes16[] memory first = agreementHelper.getPairAgreements(address(recurringCollector), indexer, 0, 1); + assertEq(first.length, 1); + assertEq(first[0], all[0]); + + // Second page + bytes16[] memory second = agreementHelper.getPairAgreements(address(recurringCollector), indexer, 1, 1); + assertEq(second.length, 1); + assertEq(second[0], all[1]); + + // Past end + bytes16[] memory empty = agreementHelper.getPairAgreements(address(recurringCollector), indexer, 2, 1); + assertEq(empty.length, 0); + + // Count larger than remaining + bytes16[] memory clamped = agreementHelper.getPairAgreements(address(recurringCollector), indexer, 1, 100); + assertEq(clamped.length, 1); + assertEq(clamped[0], all[1]); + } + + // -- Tests: getCollectors (paginated) -- + + function test_GetCollectors_Paginated() public { + // Create agreements under two different collectors to register them + IRecurringCollector.RecurringCollectionAgreement memory rca1 = _makeRCAForCollector( + recurringCollector, + indexer, + 1 + ); + _offerAgreement(rca1); + + IRecurringCollector.RecurringCollectionAgreement memory rca2 = _makeRCAForCollector(collector2, indexer, 2); + _offerForCollector(collector2, rca2); + + // Full list + address[] memory all = agreementHelper.getCollectors(); + assertEq(all.length, 2); + + // First page + address[] memory first = agreementHelper.getCollectors(0, 1); + assertEq(first.length, 1); + assertEq(first[0], all[0]); + + // Second page + address[] memory second = agreementHelper.getCollectors(1, 1); + assertEq(second.length, 1); + assertEq(second[0], all[1]); + + // Past end + address[] memory empty = agreementHelper.getCollectors(2, 1); + assertEq(empty.length, 0); + + // Count larger than remaining + address[] memory clamped = agreementHelper.getCollectors(1, 100); + assertEq(clamped.length, 1); + assertEq(clamped[0], all[1]); + } + function test_AuditPairs_IsolatesCollectors() public { IRecurringCollector.RecurringCollectionAgreement memory rca1 = _makeRCAForCollector( recurringCollector, @@ -235,5 +316,39 @@ contract RecurringAgreementHelperAuditTest is RecurringAgreementManagerSharedTes assertEq(c2Pairs.length, 1); } + // -- checkPairStaleness -- + + function test_CheckPairStaleness_DetectsStaleAgreement() public { + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCA( + 100 ether, + 1 ether, + 60, + 3600, + uint64(block.timestamp + 365 days) + ); + token.mint(address(agreementManager), 1_000_000 ether); + bytes16 agreementId = _offerAgreement(rca); + + // Fresh state: cached == live + (IRecurringAgreementHelper.AgreementStaleness[] memory stale, bool escrowStale) = + agreementHelper.checkPairStaleness(address(recurringCollector), indexer); + assertEq(stale.length, 1); + assertEq(stale[0].agreementId, agreementId); + assertFalse(stale[0].stale, "Should not be stale when cached == live"); + + // Make it stale: modify the collector's agreement so getMaxNextClaim diverges + MockRecurringCollector.AgreementStorage memory mockData = _buildAgreementStorage( + rca, REGISTERED | ACCEPTED, uint64(block.timestamp), rca.endsAt, 0 + ); + mockData.activeTerms.maxOngoingTokensPerSecond = 2 ether; // double the rate + recurringCollector.setAgreement(agreementId, mockData); + + // Now cached != live + (stale, escrowStale) = agreementHelper.checkPairStaleness(address(recurringCollector), indexer); + assertEq(stale.length, 1); + assertTrue(stale[0].stale, "Should be stale when collector rate changed"); + assertTrue(stale[0].liveMaxNextClaim > stale[0].cachedMaxNextClaim); + } + /* solhint-enable graph/func-name-mixedcase */ } diff --git a/packages/issuance/test/unit/agreement-manager/helperCleanup.t.sol b/packages/issuance/test/unit/agreement-manager/helperCleanup.t.sol index 8a56264f2..78423181e 100644 --- a/packages/issuance/test/unit/agreement-manager/helperCleanup.t.sol +++ b/packages/issuance/test/unit/agreement-manager/helperCleanup.t.sol @@ -1,6 +1,14 @@ // SPDX-License-Identifier: MIT pragma solidity ^0.8.27; +import { + REGISTERED, + ACCEPTED, + NOTICE_GIVEN, + SETTLED, + BY_PROVIDER, + OFFER_TYPE_NEW +} from "@graphprotocol/interfaces/contracts/horizon/IAgreementCollector.sol"; import { IRecurringCollector } from "@graphprotocol/interfaces/contracts/horizon/IRecurringCollector.sol"; import { RecurringAgreementManagerSharedTest } from "./shared.t.sol"; @@ -39,7 +47,8 @@ contract RecurringAgreementHelperCleanupTest is RecurringAgreementManagerSharedT ) internal returns (bytes16) { token.mint(address(agreementManager), 1_000_000 ether); vm.prank(operator); - return agreementManager.offerAgreement(rca, IRecurringCollector(address(collector))); + return + agreementManager.offerAgreement(IRecurringCollector(address(collector)), OFFER_TYPE_NEW, abi.encode(rca)); } function _setCanceledBySPOnCollector( @@ -49,21 +58,13 @@ contract RecurringAgreementHelperCleanupTest is RecurringAgreementManagerSharedT ) internal { collector.setAgreement( agreementId, - IRecurringCollector.AgreementData({ - dataService: rca.dataService, - payer: rca.payer, - serviceProvider: rca.serviceProvider, - acceptedAt: uint64(block.timestamp), - lastCollectionAt: 0, - endsAt: rca.endsAt, - maxInitialTokens: rca.maxInitialTokens, - maxOngoingTokensPerSecond: rca.maxOngoingTokensPerSecond, - minSecondsPerCollection: rca.minSecondsPerCollection, - maxSecondsPerCollection: rca.maxSecondsPerCollection, - updateNonce: 0, - canceledAt: uint64(block.timestamp), - state: IRecurringCollector.AgreementState.CanceledByServiceProvider - }) + _buildAgreementStorage( + rca, + REGISTERED | ACCEPTED | NOTICE_GIVEN | SETTLED | BY_PROVIDER, + uint64(block.timestamp), + uint64(block.timestamp), + 0 + ) ); } @@ -74,9 +75,9 @@ contract RecurringAgreementHelperCleanupTest is RecurringAgreementManagerSharedT bytes16 id = _offerAgreement(rca); _setAgreementCanceledBySP(id, rca); - uint256 removed = agreementHelper.reconcile(indexer); + (uint256 removed, ) = agreementHelper.reconcilePair(address(recurringCollector), indexer); assertEq(removed, 1); - assertEq(agreementManager.getProviderAgreementCount(indexer), 0); + assertEq(agreementManager.getPairAgreementCount(address(recurringCollector), indexer), 0); } function test_Reconcile_SkipsStillClaimable() public { @@ -84,9 +85,9 @@ contract RecurringAgreementHelperCleanupTest is RecurringAgreementManagerSharedT bytes16 id = _offerAgreement(rca); _setAgreementAccepted(id, rca, uint64(block.timestamp)); - uint256 removed = agreementHelper.reconcile(indexer); + (uint256 removed, ) = agreementHelper.reconcilePair(address(recurringCollector), indexer); assertEq(removed, 0); - assertEq(agreementManager.getProviderAgreementCount(indexer), 1); + assertEq(agreementManager.getPairAgreementCount(address(recurringCollector), indexer), 1); } function test_Reconcile_MixedStates() public { @@ -100,13 +101,13 @@ contract RecurringAgreementHelperCleanupTest is RecurringAgreementManagerSharedT bytes16 id2 = _offerAgreement(rca2); _setAgreementAccepted(id2, rca2, uint64(block.timestamp)); - uint256 removed = agreementHelper.reconcile(indexer); + (uint256 removed, ) = agreementHelper.reconcilePair(address(recurringCollector), indexer); assertEq(removed, 1); - assertEq(agreementManager.getProviderAgreementCount(indexer), 1); + assertEq(agreementManager.getPairAgreementCount(address(recurringCollector), indexer), 1); } function test_Reconcile_EmptyProvider() public { - uint256 removed = agreementHelper.reconcile(indexer); + (uint256 removed, ) = agreementHelper.reconcilePair(address(recurringCollector), indexer); assertEq(removed, 0); } @@ -117,9 +118,9 @@ contract RecurringAgreementHelperCleanupTest is RecurringAgreementManagerSharedT // Warp past deadline vm.warp(rca.deadline + 1); - uint256 removed = agreementHelper.reconcile(indexer); + (uint256 removed, ) = agreementHelper.reconcilePair(address(recurringCollector), indexer); assertEq(removed, 1); - assertEq(agreementManager.getProviderAgreementCount(indexer), 0); + assertEq(agreementManager.getPairAgreementCount(address(recurringCollector), indexer), 0); } function test_Reconcile_Permissionless() public { @@ -129,7 +130,7 @@ contract RecurringAgreementHelperCleanupTest is RecurringAgreementManagerSharedT address anyone = makeAddr("anyone"); vm.prank(anyone); - uint256 removed = agreementHelper.reconcile(indexer); + (uint256 removed, ) = agreementHelper.reconcilePair(address(recurringCollector), indexer); assertEq(removed, 1); } @@ -146,11 +147,11 @@ contract RecurringAgreementHelperCleanupTest is RecurringAgreementManagerSharedT // Drain escrow, then pair can be removed vm.warp(block.timestamp + 1 days + 1); - agreementManager.reconcileCollectorProvider(address(_collector()), indexer); + agreementManager.reconcileProvider(address(_collector()), indexer); (, pairExists) = agreementHelper.reconcilePair(address(recurringCollector), indexer); assertFalse(pairExists); - assertEq(agreementManager.getCollectorProviderCount(address(recurringCollector)), 0); + assertEq(agreementManager.getProviderCount(address(recurringCollector)), 0); } function test_ReconcilePair_PairExistsWhenAgreementsRemain() public { @@ -205,8 +206,8 @@ contract RecurringAgreementHelperCleanupTest is RecurringAgreementManagerSharedT // Drain escrows, then collector can be removed vm.warp(block.timestamp + 1 days + 1); - agreementManager.reconcileCollectorProvider(address(_collector()), indexer); - agreementManager.reconcileCollectorProvider(address(_collector()), indexer2); + agreementManager.reconcileProvider(address(_collector()), indexer); + agreementManager.reconcileProvider(address(_collector()), indexer2); (, collectorExists) = agreementHelper.reconcileCollector(address(recurringCollector)); assertFalse(collectorExists); @@ -243,13 +244,12 @@ contract RecurringAgreementHelperCleanupTest is RecurringAgreementManagerSharedT uint256 removed = agreementHelper.reconcileAll(); assertEq(removed, 2); - assertEq(agreementManager.getTotalAgreementCount(), 0); assertEq(agreementManager.getCollectorCount(), 2); // escrow still thawing // Drain escrows, then collectors can be removed vm.warp(block.timestamp + 1 days + 1); - agreementManager.reconcileCollectorProvider(address(_collector()), indexer); - agreementManager.reconcileCollectorProvider(address(collector2), indexer); + agreementManager.reconcileProvider(address(_collector()), indexer); + agreementManager.reconcileProvider(address(collector2), indexer); agreementHelper.reconcileAll(); assertEq(agreementManager.getCollectorCount(), 0); @@ -273,7 +273,6 @@ contract RecurringAgreementHelperCleanupTest is RecurringAgreementManagerSharedT uint256 removed = agreementHelper.reconcileAll(); assertEq(removed, 1); - assertEq(agreementManager.getTotalAgreementCount(), 1); } // -- Tests: reconcilePair (value reconciliation + cleanup) -- @@ -330,9 +329,9 @@ contract RecurringAgreementHelperCleanupTest is RecurringAgreementManagerSharedT // Set as CanceledBySP — after reconcile, maxNextClaim=0, then removable _setAgreementCanceledBySP(id, rca); - uint256 removed = agreementHelper.reconcile(indexer); + (uint256 removed, ) = agreementHelper.reconcilePair(address(recurringCollector), indexer); assertEq(removed, 1); - assertEq(agreementManager.getProviderAgreementCount(indexer), 0); + assertEq(agreementManager.getPairAgreementCount(address(recurringCollector), indexer), 0); } function test_Reconcile_NoopWhenAllActive() public { @@ -340,9 +339,9 @@ contract RecurringAgreementHelperCleanupTest is RecurringAgreementManagerSharedT bytes16 id = _offerAgreement(rca); _setAgreementAccepted(id, rca, uint64(block.timestamp)); - uint256 removed = agreementHelper.reconcile(indexer); + (uint256 removed, ) = agreementHelper.reconcilePair(address(recurringCollector), indexer); assertEq(removed, 0); - assertEq(agreementManager.getProviderAgreementCount(indexer), 1); + assertEq(agreementManager.getPairAgreementCount(address(recurringCollector), indexer), 1); } // -- Tests: reconcilePair does reconcile+cleanup+pair removal -- @@ -358,7 +357,7 @@ contract RecurringAgreementHelperCleanupTest is RecurringAgreementManagerSharedT // Drain escrow, then pair can be removed vm.warp(block.timestamp + 1 days + 1); - agreementManager.reconcileCollectorProvider(address(_collector()), indexer); + agreementManager.reconcileProvider(address(_collector()), indexer); (, pairExists) = agreementHelper.reconcilePair(address(recurringCollector), indexer); assertFalse(pairExists); diff --git a/packages/issuance/test/unit/agreement-manager/lifecycle.t.sol b/packages/issuance/test/unit/agreement-manager/lifecycle.t.sol index 843d929ea..47f16b424 100644 --- a/packages/issuance/test/unit/agreement-manager/lifecycle.t.sol +++ b/packages/issuance/test/unit/agreement-manager/lifecycle.t.sol @@ -2,8 +2,15 @@ pragma solidity ^0.8.27; import { IRecurringAgreementHelper } from "@graphprotocol/interfaces/contracts/issuance/agreement/IRecurringAgreementHelper.sol"; -import { IRecurringAgreementManagement } from "@graphprotocol/interfaces/contracts/issuance/agreement/IRecurringAgreementManagement.sol"; import { IRecurringEscrowManagement } from "@graphprotocol/interfaces/contracts/issuance/agreement/IRecurringEscrowManagement.sol"; +import { + REGISTERED, + ACCEPTED, + NOTICE_GIVEN, + SETTLED, + BY_PROVIDER, + OFFER_TYPE_NEW +} from "@graphprotocol/interfaces/contracts/horizon/IAgreementCollector.sol"; import { IRecurringCollector } from "@graphprotocol/interfaces/contracts/horizon/IRecurringCollector.sol"; import { RecurringAgreementManagerSharedTest } from "./shared.t.sol"; @@ -47,6 +54,8 @@ contract RecurringAgreementLifecycleTest is RecurringAgreementManagerSharedTest maxOngoingTokensPerSecond: maxOngoing, minSecondsPerCollection: 60, maxSecondsPerCollection: maxSec, + conditions: 0, + minSecondsPayerCancellationNotice: 0, nonce: nonce, metadata: "" }); @@ -58,7 +67,8 @@ contract RecurringAgreementLifecycleTest is RecurringAgreementManagerSharedTest ) internal returns (bytes16) { token.mint(address(agreementManager), 1_000_000 ether); vm.prank(operator); - return agreementManager.offerAgreement(rca, IRecurringCollector(address(collector))); + return + agreementManager.offerAgreement(IRecurringCollector(address(collector)), OFFER_TYPE_NEW, abi.encode(rca)); } function _setCanceledBySPOnCollector( @@ -68,21 +78,13 @@ contract RecurringAgreementLifecycleTest is RecurringAgreementManagerSharedTest ) internal { collector.setAgreement( agreementId, - IRecurringCollector.AgreementData({ - dataService: rca.dataService, - payer: rca.payer, - serviceProvider: rca.serviceProvider, - acceptedAt: uint64(block.timestamp), - lastCollectionAt: 0, - endsAt: rca.endsAt, - maxInitialTokens: rca.maxInitialTokens, - maxOngoingTokensPerSecond: rca.maxOngoingTokensPerSecond, - minSecondsPerCollection: rca.minSecondsPerCollection, - maxSecondsPerCollection: rca.maxSecondsPerCollection, - updateNonce: 0, - canceledAt: uint64(block.timestamp), - state: IRecurringCollector.AgreementState.CanceledByServiceProvider - }) + _buildAgreementStorage( + rca, + REGISTERED | ACCEPTED | NOTICE_GIVEN | SETTLED | BY_PROVIDER, + uint64(block.timestamp), + uint64(block.timestamp), + 0 + ) ); } @@ -91,8 +93,6 @@ contract RecurringAgreementLifecycleTest is RecurringAgreementManagerSharedTest function test_Lifecycle_OfferAcceptCancelReconcileCleanup() public { // 1. Start empty IRecurringAgreementHelper.GlobalAudit memory g = agreementHelper.auditGlobal(); - assertEq(g.totalAgreementCount, 0); - // 2. Offer IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCAFor( recurringCollector, @@ -107,7 +107,6 @@ contract RecurringAgreementLifecycleTest is RecurringAgreementManagerSharedTest // 3. Audit: agreement tracked, escrow deposited g = agreementHelper.auditGlobal(); - assertEq(g.totalAgreementCount, 1); assertEq(g.sumMaxNextClaimAll, maxClaim); assertEq(g.collectorCount, 1); @@ -124,7 +123,7 @@ contract RecurringAgreementLifecycleTest is RecurringAgreementManagerSharedTest _setAgreementCollected(agreementId, rca, uint64(block.timestamp - 1800), uint64(block.timestamp)); // 6. Reconcile — maxInitialTokens drops out after first collection - agreementHelper.reconcile(indexer); + agreementHelper.reconcilePair(address(recurringCollector), indexer); uint256 reducedMaxClaim = 1 ether * 3600; // no more initial assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), reducedMaxClaim); @@ -132,12 +131,11 @@ contract RecurringAgreementLifecycleTest is RecurringAgreementManagerSharedTest _setAgreementCanceledBySP(agreementId, rca); // 8. Reconcile - uint256 removed = agreementHelper.reconcile(indexer); + (uint256 removed, ) = agreementHelper.reconcilePair(address(recurringCollector), indexer); assertEq(removed, 1); // 9. Agreements gone, but escrow still thawing — collector stays tracked g = agreementHelper.auditGlobal(); - assertEq(g.totalAgreementCount, 0); assertEq(g.sumMaxNextClaimAll, 0); assertEq(g.collectorCount, 1); // still tracked — escrow not yet drained @@ -147,7 +145,7 @@ contract RecurringAgreementLifecycleTest is RecurringAgreementManagerSharedTest // 11. Wait for thaw and withdraw vm.warp(block.timestamp + THAW_PERIOD + 1); - agreementManager.reconcileCollectorProvider(address(_collector()), indexer); + agreementManager.reconcileProvider(address(_collector()), indexer); p = agreementHelper.auditPair(address(recurringCollector), indexer); assertEq(p.escrow.balance, 0); @@ -187,9 +185,9 @@ contract RecurringAgreementLifecycleTest is RecurringAgreementManagerSharedTest IRecurringAgreementHelper.GlobalAudit memory g = agreementHelper.auditGlobal(); assertEq(uint256(g.escrowBasis), uint256(IRecurringEscrowManagement.EscrowBasis.OnDemand)); - // reconcileCollectorProvider — OnDemand has min=0, max=sumMaxNextClaim. + // reconcileProvider — OnDemand has min=0, max=sumMaxNextClaim. // Balance == max so no thaw needed (balanced) - agreementManager.reconcileCollectorProvider(address(_collector()), indexer); + agreementManager.reconcileProvider(address(_collector()), indexer); p = agreementHelper.auditPair(address(recurringCollector), indexer); // In OnDemand with balance == max, no thaw @@ -198,14 +196,14 @@ contract RecurringAgreementLifecycleTest is RecurringAgreementManagerSharedTest // Switch to JustInTime — should start thawing everything vm.prank(operator); agreementManager.setEscrowBasis(IRecurringEscrowManagement.EscrowBasis.JustInTime); - agreementManager.reconcileCollectorProvider(address(_collector()), indexer); + agreementManager.reconcileProvider(address(_collector()), indexer); p = agreementHelper.auditPair(address(recurringCollector), indexer); assertEq(p.escrow.tokensThawing, maxClaim); // thawing everything // Wait for thaw and withdraw vm.warp(block.timestamp + THAW_PERIOD + 1); - agreementManager.reconcileCollectorProvider(address(_collector()), indexer); + agreementManager.reconcileProvider(address(_collector()), indexer); p = agreementHelper.auditPair(address(recurringCollector), indexer); assertEq(p.escrow.balance, 0); @@ -213,7 +211,7 @@ contract RecurringAgreementLifecycleTest is RecurringAgreementManagerSharedTest // Switch back to Full — should deposit again vm.prank(operator); agreementManager.setEscrowBasis(IRecurringEscrowManagement.EscrowBasis.Full); - agreementManager.reconcileCollectorProvider(address(_collector()), indexer); + agreementManager.reconcileProvider(address(_collector()), indexer); p = agreementHelper.auditPair(address(recurringCollector), indexer); assertEq(p.escrow.balance, maxClaim); @@ -259,7 +257,6 @@ contract RecurringAgreementLifecycleTest is RecurringAgreementManagerSharedTest // Audit global IRecurringAgreementHelper.GlobalAudit memory g = agreementHelper.auditGlobal(); - assertEq(g.totalAgreementCount, 3); assertEq(g.sumMaxNextClaimAll, maxClaim1 + maxClaim2 + maxClaim3); assertEq(g.collectorCount, 2); @@ -282,11 +279,10 @@ contract RecurringAgreementLifecycleTest is RecurringAgreementManagerSharedTest assertTrue(pairExists); // escrow still thawing // collector1 still has indexer2 (+ c1+indexer pair tracked due to thawing escrow) - assertEq(agreementManager.getCollectorProviderCount(address(recurringCollector)), 2); + assertEq(agreementManager.getProviderCount(address(recurringCollector)), 2); // Global state updated g = agreementHelper.auditGlobal(); - assertEq(g.totalAgreementCount, 2); assertEq(g.sumMaxNextClaimAll, maxClaim2 + maxClaim3); // Cancel remaining and full reconcile @@ -299,7 +295,6 @@ contract RecurringAgreementLifecycleTest is RecurringAgreementManagerSharedTest // Agreements gone, but escrows still thawing — collectors stay tracked g = agreementHelper.auditGlobal(); - assertEq(g.totalAgreementCount, 0); assertEq(g.sumMaxNextClaimAll, 0); assertEq(g.collectorCount, 2); // still tracked — escrow not yet drained @@ -318,9 +313,9 @@ contract RecurringAgreementLifecycleTest is RecurringAgreementManagerSharedTest // Wait for thaw, withdraw all vm.warp(block.timestamp + THAW_PERIOD + 1); - agreementManager.reconcileCollectorProvider(address(_collector()), indexer); - agreementManager.reconcileCollectorProvider(address(_collector()), indexer2); - agreementManager.reconcileCollectorProvider(address(collector2), indexer); + agreementManager.reconcileProvider(address(_collector()), indexer); + agreementManager.reconcileProvider(address(_collector()), indexer2); + agreementManager.reconcileProvider(address(collector2), indexer); // All escrows drained p1 = agreementHelper.auditPair(address(recurringCollector), indexer); @@ -335,7 +330,7 @@ contract RecurringAgreementLifecycleTest is RecurringAgreementManagerSharedTest assertEq(p3.escrow.balance, 0); assertEq(p3.escrow.tokensThawing, 0); - // Now reconcile tracking (escrow drained, so reconcileCollectorProvider succeeds) + // Now reconcile tracking (escrow drained, so reconcileProvider succeeds) agreementHelper.reconcileAll(); g = agreementHelper.auditGlobal(); @@ -356,16 +351,16 @@ contract RecurringAgreementLifecycleTest is RecurringAgreementManagerSharedTest _offerAgreement(rca); // Before deadline: not removable - uint256 removed = agreementHelper.reconcile(indexer); + (uint256 removed, ) = agreementHelper.reconcilePair(address(recurringCollector), indexer); assertEq(removed, 0); // Warp past deadline vm.warp(rca.deadline + 1); // Now removable - removed = agreementHelper.reconcile(indexer); + (removed, ) = agreementHelper.reconcilePair(address(recurringCollector), indexer); assertEq(removed, 1); - assertEq(agreementManager.getProviderAgreementCount(indexer), 0); + assertEq(agreementManager.getPairAgreementCount(address(recurringCollector), indexer), 0); // Escrow deposited in Full mode should now be thawing IRecurringAgreementHelper.PairAudit memory p = agreementHelper.auditPair(address(recurringCollector), indexer); @@ -373,7 +368,7 @@ contract RecurringAgreementLifecycleTest is RecurringAgreementManagerSharedTest // Wait for thaw and withdraw vm.warp(block.timestamp + THAW_PERIOD + 1); - agreementManager.reconcileCollectorProvider(address(_collector()), indexer); + agreementManager.reconcileProvider(address(_collector()), indexer); p = agreementHelper.auditPair(address(recurringCollector), indexer); assertEq(p.escrow.balance, 0); @@ -427,7 +422,7 @@ contract RecurringAgreementLifecycleTest is RecurringAgreementManagerSharedTest // Wait for thaw, then drain collector1's escrow vm.warp(block.timestamp + THAW_PERIOD + 1); - agreementManager.reconcileCollectorProvider(address(_collector()), indexer); + agreementManager.reconcileProvider(address(_collector()), indexer); p1 = agreementHelper.auditPair(address(recurringCollector), indexer); assertEq(p1.escrow.balance, 0); @@ -466,7 +461,7 @@ contract RecurringAgreementLifecycleTest is RecurringAgreementManagerSharedTest // Switch to Full vm.prank(operator); agreementManager.setEscrowBasis(IRecurringEscrowManagement.EscrowBasis.Full); - agreementManager.reconcileCollectorProvider(address(_collector()), indexer); + agreementManager.reconcileProvider(address(_collector()), indexer); p = agreementHelper.auditPair(address(recurringCollector), indexer); assertEq(p.escrow.balance, maxClaim); // Full deposits everything diff --git a/packages/issuance/test/unit/agreement-manager/mocks/MockRecurringCollector.sol b/packages/issuance/test/unit/agreement-manager/mocks/MockRecurringCollector.sol index 36275f404..c7956c66b 100644 --- a/packages/issuance/test/unit/agreement-manager/mocks/MockRecurringCollector.sol +++ b/packages/issuance/test/unit/agreement-manager/mocks/MockRecurringCollector.sol @@ -1,50 +1,230 @@ // SPDX-License-Identifier: MIT pragma solidity ^0.8.27; +import { + REGISTERED, + ACCEPTED, + NOTICE_GIVEN, + SETTLED, + BY_PAYER, + BY_PROVIDER, + OFFER_TYPE_NEW, + OFFER_TYPE_UPDATE +} from "@graphprotocol/interfaces/contracts/horizon/IAgreementCollector.sol"; +import { IAgreementCollector } from "@graphprotocol/interfaces/contracts/horizon/IAgreementCollector.sol"; import { IRecurringCollector } from "@graphprotocol/interfaces/contracts/horizon/IRecurringCollector.sol"; - /// @notice Minimal mock of RecurringCollector for RecurringAgreementManager testing. /// Stores agreement data set by tests, computes agreementId and hashRCA deterministically. contract MockRecurringCollector { - mapping(bytes16 => IRecurringCollector.AgreementData) private _agreements; + /// @dev Internal storage layout for mock agreements. Mirrors the real RecurringCollector's + /// internal AgreementStorage struct so tests can set up state with full fidelity. + struct AgreementStorage { + address dataService; + uint64 acceptedAt; + uint32 updateNonce; + address payer; + uint64 lastCollectionAt; + uint16 state; + address serviceProvider; + uint64 collectableUntil; + IRecurringCollector.AgreementTerms activeTerms; + IRecurringCollector.AgreementTerms pendingTerms; + } + + mapping(bytes16 => AgreementStorage) private _agreements; mapping(bytes16 => bool) private _agreementExists; // -- Test helpers -- - function setAgreement(bytes16 agreementId, IRecurringCollector.AgreementData memory data) external { + function setAgreement(bytes16 agreementId, AgreementStorage memory data) external { _agreements[agreementId] = data; _agreementExists[agreementId] = true; } // -- IRecurringCollector subset -- - function getAgreement(bytes16 agreementId) external view returns (IRecurringCollector.AgreementData memory) { - return _agreements[agreementId]; + function getAgreementData(bytes16 agreementId) external view returns (IRecurringCollector.AgreementData memory data_) { + AgreementStorage storage a = _agreements[agreementId]; + data_.agreementId = agreementId; + data_.payer = a.payer; + data_.serviceProvider = a.serviceProvider; + data_.dataService = a.dataService; + data_.acceptedAt = a.acceptedAt; + data_.lastCollectionAt = a.lastCollectionAt; + data_.collectableUntil = a.collectableUntil; + data_.updateNonce = a.updateNonce; + data_.state = a.state; + // Simplified collectability: accepted and not settled + data_.isCollectable = (a.state & ACCEPTED) != 0 && (a.state & SETTLED) == 0; + data_.collectionSeconds = 0; // Not needed for agreement manager tests + } + + function getAgreementVersionAt( + bytes16 agreementId, + uint256 index + ) external view returns (IAgreementCollector.AgreementVersion memory version) { + AgreementStorage storage a = _agreements[agreementId]; + version.agreementId = agreementId; + version.state = a.state; + if (index == 0) { + version.versionHash = a.activeTerms.hash; + } else if (index == 1) { + version.versionHash = a.pendingTerms.hash; + } } function getMaxNextClaim(bytes16 agreementId) external view returns (uint256) { - IRecurringCollector.AgreementData memory a = _agreements[agreementId]; - // Mirror RecurringCollector._getMaxNextClaim logic - if (a.state == IRecurringCollector.AgreementState.CanceledByServiceProvider) return 0; - if ( - a.state != IRecurringCollector.AgreementState.Accepted && - a.state != IRecurringCollector.AgreementState.CanceledByPayer - ) return 0; - - uint256 collectionStart = 0 < a.lastCollectionAt ? a.lastCollectionAt : a.acceptedAt; + return this.getMaxNextClaim(agreementId, 3); + } + + function getMaxNextClaim(bytes16 agreementId, uint8 claimScope) external view returns (uint256 maxClaim) { + AgreementStorage storage a = _agreements[agreementId]; + if (claimScope & 1 != 0) { + maxClaim = _mockClaimForTerms(a, a.activeTerms); + } + if (claimScope & 2 != 0) { + uint256 pendingClaim = _mockClaimForTerms(a, a.pendingTerms); + if (pendingClaim > maxClaim) maxClaim = pendingClaim; + } + } + + function _mockClaimForTerms( + AgreementStorage storage a, + IRecurringCollector.AgreementTerms memory terms + ) private view returns (uint256) { + if (terms.endsAt == 0) return 0; + uint256 collectionStart; uint256 collectionEnd; - if (a.state == IRecurringCollector.AgreementState.CanceledByPayer) { - collectionEnd = a.canceledAt < a.endsAt ? a.canceledAt : a.endsAt; + + uint16 s = a.state; + bool isRegistered = (s & REGISTERED) != 0; + bool isAccepted = (s & ACCEPTED) != 0; + bool isTerminated = (s & NOTICE_GIVEN) != 0; + bool isByPayer = (s & BY_PAYER) != 0; + + if (isRegistered && !isAccepted && !isTerminated) { + // Offered (REGISTERED only) + if (a.dataService == address(0)) return 0; + if (terms.deadline != 0 && block.timestamp > terms.deadline) return 0; + collectionStart = block.timestamp; + collectionEnd = terms.endsAt; + } else if (isRegistered && isAccepted && !isTerminated) { + // Accepted (REGISTERED | ACCEPTED) + collectionStart = 0 < a.lastCollectionAt ? a.lastCollectionAt : a.acceptedAt; + collectionEnd = terms.endsAt; + } else if (isRegistered && isAccepted && isTerminated && isByPayer) { + // CanceledByPayer (REGISTERED | ACCEPTED | NOTICE_GIVEN | BY_PAYER) + collectionStart = 0 < a.lastCollectionAt ? a.lastCollectionAt : a.acceptedAt; + collectionEnd = a.collectableUntil < terms.endsAt ? a.collectableUntil : terms.endsAt; } else { - collectionEnd = a.endsAt; + return 0; } - if (collectionEnd <= collectionStart) return 0; + if (collectionEnd <= collectionStart) return 0; uint256 windowSeconds = collectionEnd - collectionStart; - uint256 maxSeconds = windowSeconds < a.maxSecondsPerCollection ? windowSeconds : a.maxSecondsPerCollection; - uint256 maxClaim = a.maxOngoingTokensPerSecond * maxSeconds; - if (a.lastCollectionAt == 0) maxClaim += a.maxInitialTokens; - return maxClaim; + uint256 maxSeconds = windowSeconds < terms.maxSecondsPerCollection + ? windowSeconds + : terms.maxSecondsPerCollection; + uint256 claim = terms.maxOngoingTokensPerSecond * maxSeconds; + if (a.lastCollectionAt == 0) claim += terms.maxInitialTokens; + return claim; + } + + function offer( + uint8 offerType, + bytes calldata data, + uint16 /* options */ + ) external returns (IRecurringCollector.OfferResult memory result) { + if (offerType == OFFER_TYPE_NEW) { + IRecurringCollector.RecurringCollectionAgreement memory rca = abi.decode( + data, + (IRecurringCollector.RecurringCollectionAgreement) + ); + require(msg.sender == rca.payer, "MockRecurringCollector: unauthorized payer"); + result.agreementId = _storeOffer(rca); + result.dataService = rca.dataService; + result.serviceProvider = rca.serviceProvider; + } else if (offerType == OFFER_TYPE_UPDATE) { + IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau = abi.decode( + data, + (IRecurringCollector.RecurringCollectionAgreementUpdate) + ); + _storeUpdate(rcau); + result.agreementId = rcau.agreementId; + AgreementStorage storage a = _agreements[rcau.agreementId]; + result.dataService = a.dataService; + result.serviceProvider = a.serviceProvider; + } + // No callback to msg.sender — caller reconciles after return (see RecurringCollector callback model) + } + + function _storeOffer(IRecurringCollector.RecurringCollectionAgreement memory rca) internal returns (bytes16) { + bytes16 agreementId = bytes16( + keccak256(abi.encode(rca.payer, rca.dataService, rca.serviceProvider, rca.deadline, rca.nonce)) + ); + AgreementStorage storage agreement = _agreements[agreementId]; + agreement.dataService = rca.dataService; + agreement.payer = rca.payer; + agreement.serviceProvider = rca.serviceProvider; + agreement.state = REGISTERED; + agreement.acceptedAt = 0; + agreement.lastCollectionAt = 0; + agreement.updateNonce = 0; + agreement.collectableUntil = 0; + agreement.activeTerms.deadline = rca.deadline; + agreement.activeTerms.endsAt = rca.endsAt; + agreement.activeTerms.maxInitialTokens = rca.maxInitialTokens; + agreement.activeTerms.maxOngoingTokensPerSecond = rca.maxOngoingTokensPerSecond; + agreement.activeTerms.minSecondsPerCollection = rca.minSecondsPerCollection; + agreement.activeTerms.maxSecondsPerCollection = rca.maxSecondsPerCollection; + agreement.activeTerms.conditions = rca.conditions; + agreement.activeTerms.hash = keccak256( + abi.encode("rca", rca.payer, rca.dataService, rca.serviceProvider, rca.deadline, rca.nonce, rca.endsAt) + ); + delete agreement.pendingTerms; + _agreementExists[agreementId] = true; + return agreementId; + } + + function _storeUpdate(IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau) internal { + AgreementStorage storage agreement = _agreements[rcau.agreementId]; + // Validate nonce: must be exactly updateNonce + 1 + require(rcau.nonce == agreement.updateNonce + 1, "MockRecurringCollector: invalid nonce"); + agreement.pendingTerms.endsAt = rcau.endsAt; + agreement.pendingTerms.maxInitialTokens = rcau.maxInitialTokens; + agreement.pendingTerms.maxOngoingTokensPerSecond = rcau.maxOngoingTokensPerSecond; + agreement.pendingTerms.minSecondsPerCollection = rcau.minSecondsPerCollection; + agreement.pendingTerms.maxSecondsPerCollection = rcau.maxSecondsPerCollection; + agreement.pendingTerms.conditions = rcau.conditions; + agreement.pendingTerms.hash = keccak256(abi.encode("rcau", rcau.agreementId, rcau.nonce, rcau.endsAt)); + agreement.updateNonce = rcau.nonce; + } + + function cancel(bytes16 agreementId, bytes32 termsHash, uint16 /* options */) external { + AgreementStorage storage agreement = _agreements[agreementId]; + if (termsHash == agreement.pendingTerms.hash && agreement.pendingTerms.endsAt > 0) { + delete agreement.pendingTerms; + } else { + _cancelInternal(agreementId, BY_PAYER); + } + // No callback to msg.sender — caller reconciles after return (see RecurringCollector callback model) + } + + function _cancelInternal(bytes16 agreementId, uint16 byFlag) private { + AgreementStorage storage agreement = _agreements[agreementId]; + agreement.collectableUntil = uint64(block.timestamp); + bool isAccepted = (agreement.state & ACCEPTED) != 0; + if (!isAccepted) { + // Pre-accept cancel (Withdrawn): REGISTERED | NOTICE_GIVEN | SETTLED + agreement.state = REGISTERED | NOTICE_GIVEN | SETTLED; + } else if (byFlag == BY_PROVIDER) { + // Provider cancel: REGISTERED | ACCEPTED | NOTICE_GIVEN | SETTLED | BY_PROVIDER + agreement.state = REGISTERED | ACCEPTED | NOTICE_GIVEN | SETTLED | BY_PROVIDER; + } else { + // Payer cancel: REGISTERED | ACCEPTED | NOTICE_GIVEN | BY_PAYER + agreement.state = REGISTERED | ACCEPTED | NOTICE_GIVEN | byFlag; + } + delete agreement.pendingTerms; } function generateAgreementId( @@ -58,22 +238,26 @@ contract MockRecurringCollector { } function hashRCA(IRecurringCollector.RecurringCollectionAgreement calldata rca) external pure returns (bytes32) { - return - keccak256( - abi.encode( - rca.deadline, - rca.endsAt, - rca.payer, - rca.dataService, - rca.serviceProvider, - rca.maxInitialTokens, - rca.maxOngoingTokensPerSecond, - rca.minSecondsPerCollection, - rca.maxSecondsPerCollection, - rca.nonce, - rca.metadata - ) - ); + // Delegated to helper to avoid stack-too-deep without optimizer (12 fields exceeds EVM stack limit) + return _hashRCAHelper( + rca.deadline, rca.endsAt, rca.payer, rca.dataService, + rca.serviceProvider, rca.maxInitialTokens, + rca.maxOngoingTokensPerSecond, rca.minSecondsPerCollection, + rca.maxSecondsPerCollection, rca.conditions, rca.nonce, rca.metadata + ); + } + + function _hashRCAHelper( + uint64 deadline, uint64 endsAt, address payer, address dataService, + address serviceProvider, uint256 maxInitialTokens, + uint256 maxOngoingTokensPerSecond, uint32 minSecondsPerCollection, + uint32 maxSecondsPerCollection, uint16 conditions, uint256 nonce, bytes memory metadata + ) private pure returns (bytes32) { + return keccak256(abi.encode( + deadline, endsAt, payer, dataService, serviceProvider, maxInitialTokens, + maxOngoingTokensPerSecond, minSecondsPerCollection, maxSecondsPerCollection, + conditions, nonce, metadata + )); } function hashRCAU( @@ -89,6 +273,7 @@ contract MockRecurringCollector { rcau.maxOngoingTokensPerSecond, rcau.minSecondsPerCollection, rcau.maxSecondsPerCollection, + rcau.conditions, rcau.nonce, rcau.metadata ) diff --git a/packages/issuance/test/unit/agreement-manager/mocks/MockSubgraphService.sol b/packages/issuance/test/unit/agreement-manager/mocks/MockSubgraphService.sol deleted file mode 100644 index c74bf72cb..000000000 --- a/packages/issuance/test/unit/agreement-manager/mocks/MockSubgraphService.sol +++ /dev/null @@ -1,27 +0,0 @@ -// SPDX-License-Identifier: MIT -pragma solidity ^0.8.27; - -/// @notice Minimal mock of SubgraphService for RecurringAgreementManager cancelAgreement testing. -/// Records cancel calls and can be configured to revert. -contract MockSubgraphService { - mapping(bytes16 => bool) public canceled; - mapping(bytes16 => uint256) public cancelCallCount; - - bool public shouldRevert; - string public revertMessage; - - function cancelIndexingAgreementByPayer(bytes16 agreementId) external { - if (shouldRevert) { - revert(revertMessage); - } - canceled[agreementId] = true; - cancelCallCount[agreementId]++; - } - - // -- Test helpers -- - - function setRevert(bool _shouldRevert, string memory _message) external { - shouldRevert = _shouldRevert; - revertMessage = _message; - } -} diff --git a/packages/issuance/test/unit/agreement-manager/multiCollector.t.sol b/packages/issuance/test/unit/agreement-manager/multiCollector.t.sol index f5785dcbd..d5bbac7d6 100644 --- a/packages/issuance/test/unit/agreement-manager/multiCollector.t.sol +++ b/packages/issuance/test/unit/agreement-manager/multiCollector.t.sol @@ -2,6 +2,7 @@ pragma solidity ^0.8.27; import { IRecurringAgreementManagement } from "@graphprotocol/interfaces/contracts/issuance/agreement/IRecurringAgreementManagement.sol"; +import { OFFER_TYPE_NEW } from "@graphprotocol/interfaces/contracts/horizon/IAgreementCollector.sol"; import { IRecurringCollector } from "@graphprotocol/interfaces/contracts/horizon/IRecurringCollector.sol"; import { RecurringAgreementManagerSharedTest } from "./shared.t.sol"; @@ -41,6 +42,8 @@ contract RecurringAgreementManagerMultiCollectorTest is RecurringAgreementManage maxOngoingTokensPerSecond: maxOngoingTokensPerSecond, minSecondsPerCollection: 60, maxSecondsPerCollection: maxSecondsPerCollection, + conditions: 0, + minSecondsPayerCancellationNotice: 0, nonce: nonce, metadata: "" }); @@ -67,7 +70,7 @@ contract RecurringAgreementManagerMultiCollectorTest is RecurringAgreementManage ); token.mint(address(agreementManager), 1_000_000 ether); vm.prank(operator); - agreementManager.offerAgreement(rca1, _collector()); + agreementManager.offerAgreement(_collector(), OFFER_TYPE_NEW, abi.encode(rca1)); uint256 maxClaim1 = 1 ether * 3600 + 100 ether; @@ -81,7 +84,7 @@ contract RecurringAgreementManagerMultiCollectorTest is RecurringAgreementManage 2 ); vm.prank(operator); - agreementManager.offerAgreement(rca2, IRecurringCollector(address(collector2))); + agreementManager.offerAgreement(IRecurringCollector(address(collector2)), OFFER_TYPE_NEW, abi.encode(rca2)); uint256 maxClaim2 = 2 ether * 7200 + 200 ether; @@ -102,11 +105,11 @@ contract RecurringAgreementManagerMultiCollectorTest is RecurringAgreementManage ); token.mint(address(agreementManager), 1_000_000 ether); vm.prank(operator); - agreementManager.offerAgreement(rca1, _collector()); + agreementManager.offerAgreement(_collector(), OFFER_TYPE_NEW, abi.encode(rca1)); - // collector2 cannot call beforeCollection on collector1's agreement + // collector2 calling beforeCollection on collector1's agreement is a no-op + // (agreement doesn't exist under collector2's namespace) vm.prank(address(collector2)); - vm.expectRevert(IRecurringAgreementManagement.OnlyAgreementCollector.selector); agreementManager.beforeCollection(agreementId1, 100 ether); // collector1 can call beforeCollection on its own agreement @@ -126,11 +129,11 @@ contract RecurringAgreementManagerMultiCollectorTest is RecurringAgreementManage ); token.mint(address(agreementManager), 1_000_000 ether); vm.prank(operator); - agreementManager.offerAgreement(rca1, _collector()); + agreementManager.offerAgreement(_collector(), OFFER_TYPE_NEW, abi.encode(rca1)); - // collector2 cannot call afterCollection on collector1's agreement + // collector2 calling afterCollection on collector1's agreement is a no-op + // (agreement doesn't exist under collector2's namespace) vm.prank(address(collector2)); - vm.expectRevert(IRecurringAgreementManagement.OnlyAgreementCollector.selector); agreementManager.afterCollection(agreementId1, 100 ether); } @@ -145,10 +148,6 @@ contract RecurringAgreementManagerMultiCollectorTest is RecurringAgreementManage 1 ); uint256 maxClaim1 = 1 ether * 3600 + 100 ether; - // Fund with surplus so Full mode stays active (deficit < balance required) - token.mint(address(agreementManager), maxClaim1 + 1); - vm.prank(operator); - agreementManager.offerAgreement(rca1, _collector()); // Offer via collector2 (IRecurringCollector.RecurringCollectionAgreement memory rca2, ) = _makeRCAForCollector( @@ -160,10 +159,17 @@ contract RecurringAgreementManagerMultiCollectorTest is RecurringAgreementManage 2 ); uint256 maxClaim2 = 2 ether * 7200 + 200 ether; - // Fund with surplus so Full mode stays active (deficit < balance required) - token.mint(address(agreementManager), maxClaim2 + 1); + + // Fund generously so Full mode stays active through both offers. + // After both: smnca = maxClaim1 + maxClaim2, deficit = smnca. + // spare = balance - deficit. Full requires smnca * 272 / 256 < spare. + uint256 totalMaxClaim = maxClaim1 + maxClaim2; + token.mint(address(agreementManager), totalMaxClaim + (totalMaxClaim * 272) / 256 + 1); + vm.prank(operator); - agreementManager.offerAgreement(rca2, IRecurringCollector(address(collector2))); + agreementManager.offerAgreement(_collector(), OFFER_TYPE_NEW, abi.encode(rca1)); + vm.prank(operator); + agreementManager.offerAgreement(IRecurringCollector(address(collector2)), OFFER_TYPE_NEW, abi.encode(rca2)); // Escrow accounts are separate per (collector, provider) (uint256 collector1Balance, , ) = paymentsEscrow.escrowAccounts( @@ -180,7 +186,7 @@ contract RecurringAgreementManagerMultiCollectorTest is RecurringAgreementManage assertEq(collector2Balance, maxClaim2); } - function test_MultiCollector_RevokeOnlyAffectsOwnCollectorEscrow() public { + function test_MultiCollector_CancelOnlyAffectsOwnCollectorEscrow() public { // Offer via both collectors (IRecurringCollector.RecurringCollectionAgreement memory rca1, bytes16 agreementId1) = _makeRCAForCollector( recurringCollector, @@ -192,7 +198,7 @@ contract RecurringAgreementManagerMultiCollectorTest is RecurringAgreementManage ); token.mint(address(agreementManager), 1_000_000 ether); vm.prank(operator); - agreementManager.offerAgreement(rca1, _collector()); + agreementManager.offerAgreement(_collector(), OFFER_TYPE_NEW, abi.encode(rca1)); (IRecurringCollector.RecurringCollectionAgreement memory rca2, ) = _makeRCAForCollector( collector2, @@ -203,13 +209,12 @@ contract RecurringAgreementManagerMultiCollectorTest is RecurringAgreementManage 2 ); vm.prank(operator); - agreementManager.offerAgreement(rca2, IRecurringCollector(address(collector2))); + agreementManager.offerAgreement(IRecurringCollector(address(collector2)), OFFER_TYPE_NEW, abi.encode(rca2)); uint256 maxClaim2 = 2 ether * 7200 + 200 ether; - // Revoke collector1's agreement - vm.prank(operator); - agreementManager.revokeOffer(agreementId1); + // Cancel collector1's agreement + _cancelAgreement(agreementId1); // Collector1 escrow cleared, collector2 unaffected assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), 0); diff --git a/packages/issuance/test/unit/agreement-manager/multiIndexer.t.sol b/packages/issuance/test/unit/agreement-manager/multiIndexer.t.sol index 0a07ecef1..7bf41de7d 100644 --- a/packages/issuance/test/unit/agreement-manager/multiIndexer.t.sol +++ b/packages/issuance/test/unit/agreement-manager/multiIndexer.t.sol @@ -79,9 +79,9 @@ contract RecurringAgreementManagerMultiIndexerTest is RecurringAgreementManagerS assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer3), maxClaim3); // Each has exactly 1 agreement - assertEq(agreementManager.getProviderAgreementCount(indexer), 1); - assertEq(agreementManager.getProviderAgreementCount(indexer2), 1); - assertEq(agreementManager.getProviderAgreementCount(indexer3), 1); + assertEq(agreementManager.getPairAgreementCount(address(recurringCollector), indexer), 1); + assertEq(agreementManager.getPairAgreementCount(address(recurringCollector), indexer2), 1); + assertEq(agreementManager.getPairAgreementCount(address(recurringCollector), indexer3), 1); // Each has independent escrow balance (uint256 indexerBalance, , ) = paymentsEscrow.escrowAccounts( @@ -106,7 +106,7 @@ contract RecurringAgreementManagerMultiIndexerTest is RecurringAgreementManagerS // -- Isolation: revoke one indexer doesn't affect others -- - function test_MultiIndexer_RevokeIsolation() public { + function test_MultiIndexer_CancelIsolation() public { IRecurringCollector.RecurringCollectionAgreement memory rca1 = _makeRCAForIndexer( indexer, 100 ether, @@ -127,17 +127,16 @@ contract RecurringAgreementManagerMultiIndexerTest is RecurringAgreementManagerS uint256 maxClaim2 = 2 ether * 7200 + 200 ether; - // Revoke indexer1's agreement - vm.prank(operator); - agreementManager.revokeOffer(id1); + // Cancel indexer1's agreement + _cancelAgreement(id1); // Indexer1 cleared assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), 0); - assertEq(agreementManager.getProviderAgreementCount(indexer), 0); + assertEq(agreementManager.getPairAgreementCount(address(recurringCollector), indexer), 0); // Indexer2 unaffected assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer2), maxClaim2); - assertEq(agreementManager.getProviderAgreementCount(indexer2), 1); + assertEq(agreementManager.getPairAgreementCount(address(recurringCollector), indexer2), 1); } // -- Isolation: reconcile one indexer doesn't affect others -- @@ -165,7 +164,7 @@ contract RecurringAgreementManagerMultiIndexerTest is RecurringAgreementManagerS // SP cancels indexer1, reconcile it _setAgreementCanceledBySP(id1, rca1); - agreementManager.reconcileAgreement(id1); + agreementManager.reconcileAgreement(address(recurringCollector), id1); // Indexer1 cleared assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), 0); @@ -201,14 +200,14 @@ contract RecurringAgreementManagerMultiIndexerTest is RecurringAgreementManagerS _setAgreementCanceledBySP(id1, rca1); // Reconcile only indexer1 - agreementManager.reconcileAgreement(id1); + agreementManager.reconcileAgreement(address(recurringCollector), id1); // Indexer1 required escrow drops to 0 (CanceledBySP -> maxNextClaim=0) assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), 0); // Indexer2 completely unaffected (still pre-offered estimate) assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer2), maxClaim2); - assertEq(agreementManager.getAgreementMaxNextClaim(id2), maxClaim2); + assertEq(agreementManager.getAgreementMaxNextClaim(address(recurringCollector), id2), maxClaim2); } // -- Multiple agreements per indexer -- @@ -245,16 +244,16 @@ contract RecurringAgreementManagerMultiIndexerTest is RecurringAgreementManagerS uint256 maxClaim1b = 0.5 ether * 1800 + 50 ether; uint256 maxClaim2 = 2 ether * 7200 + 200 ether; - assertEq(agreementManager.getProviderAgreementCount(indexer), 2); + assertEq(agreementManager.getPairAgreementCount(address(recurringCollector), indexer), 2); assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), maxClaim1a + maxClaim1b); - assertEq(agreementManager.getProviderAgreementCount(indexer2), 1); + assertEq(agreementManager.getPairAgreementCount(address(recurringCollector), indexer2), 1); assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer2), maxClaim2); // Reconcile one of indexer's agreements _setAgreementCanceledBySP(id1a, rca1a); - agreementManager.reconcileAgreement(id1a); + agreementManager.reconcileAgreement(address(recurringCollector), id1a); - assertEq(agreementManager.getProviderAgreementCount(indexer), 1); + assertEq(agreementManager.getPairAgreementCount(address(recurringCollector), indexer), 1); assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), maxClaim1b); // Indexer2 still unaffected @@ -286,21 +285,18 @@ contract RecurringAgreementManagerMultiIndexerTest is RecurringAgreementManagerS _setAgreementAccepted(id1, rca1, uint64(block.timestamp)); _setAgreementAccepted(id2, rca2, uint64(block.timestamp)); - // Cancel indexer1's agreement via operator - vm.prank(operator); - agreementManager.cancelAgreement(id1); + // Advance time so CanceledByPayer has a non-zero claim window + vm.warp(block.timestamp + 10); - // Indexer1's required escrow updated by cancelAgreement's inline reconcile - // (still has maxNextClaim from RC since it's CanceledByPayer not CanceledBySP) - // But the mock just calls SubgraphService — the RC state doesn't change automatically. - // The cancelAgreement reconciles against whatever the mock RC says. + // Cancel indexer1's agreement via operator — collector.cancel() sets CanceledByPayer + _cancelAgreement(id1); // Reconcile indexer2 independently - agreementManager.reconcileAgreement(id2); + agreementManager.reconcileAgreement(address(recurringCollector), id2); - // Both indexers tracked independently - assertEq(agreementManager.getProviderAgreementCount(indexer), 1); - assertEq(agreementManager.getProviderAgreementCount(indexer2), 1); + // Both indexers tracked independently — id1 still has remaining claim window + assertEq(agreementManager.getPairAgreementCount(address(recurringCollector), indexer), 1); + assertEq(agreementManager.getPairAgreementCount(address(recurringCollector), indexer2), 1); } // -- Maintain isolation -- @@ -329,10 +325,10 @@ contract RecurringAgreementManagerMultiIndexerTest is RecurringAgreementManagerS // Reconcile indexer1's agreement _setAgreementCanceledBySP(id1, rca1); - agreementManager.reconcileAgreement(id1); + agreementManager.reconcileAgreement(address(recurringCollector), id1); // Update escrow for indexer1 — should thaw excess - agreementManager.reconcileCollectorProvider(address(_collector()), indexer); + agreementManager.reconcileProvider(address(_collector()), indexer); // Indexer1 escrow thawing (excess = maxClaim1, required = 0) IPaymentsEscrow.EscrowAccount memory acct1; @@ -351,8 +347,8 @@ contract RecurringAgreementManagerMultiIndexerTest is RecurringAgreementManagerS ); assertEq(indexer2Bal, maxClaim2); - // reconcileCollectorProvider on indexer2 is a no-op (balance == required, no excess) - agreementManager.reconcileCollectorProvider(address(_collector()), indexer2); + // reconcileProvider on indexer2 is a no-op (balance == required, no excess) + agreementManager.reconcileProvider(address(_collector()), indexer2); } // -- Full lifecycle across multiple indexers -- @@ -393,7 +389,7 @@ contract RecurringAgreementManagerMultiIndexerTest is RecurringAgreementManagerS vm.warp(collectionTime); // 4. Reconcile indexer1 — required should decrease (no more initial tokens) - agreementManager.reconcileAgreement(id1); + agreementManager.reconcileAgreement(address(recurringCollector), id1); assertTrue(agreementManager.getSumMaxNextClaim(_collector(), indexer) < maxClaim1); // Indexer2 unaffected @@ -401,15 +397,15 @@ contract RecurringAgreementManagerMultiIndexerTest is RecurringAgreementManagerS // 5. Cancel indexer2 by SP _setAgreementCanceledBySP(id2, rca2); - agreementManager.reconcileAgreement(id2); + agreementManager.reconcileAgreement(address(recurringCollector), id2); assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer2), 0); // 6. Reconcile indexer2's agreement - agreementManager.reconcileAgreement(id2); - assertEq(agreementManager.getProviderAgreementCount(indexer2), 0); + agreementManager.reconcileAgreement(address(recurringCollector), id2); + assertEq(agreementManager.getPairAgreementCount(address(recurringCollector), indexer2), 0); // 7. Update escrow for indexer2 (thaw excess) - agreementManager.reconcileCollectorProvider(address(_collector()), indexer2); + agreementManager.reconcileProvider(address(_collector()), indexer2); IPaymentsEscrow.EscrowAccount memory acct2; (acct2.balance, acct2.tokensThawing, acct2.thawEndTimestamp) = paymentsEscrow.escrowAccounts( address(agreementManager), @@ -419,7 +415,7 @@ contract RecurringAgreementManagerMultiIndexerTest is RecurringAgreementManagerS assertEq(acct2.balance - acct2.tokensThawing, 0); // 8. Indexer1 still active - assertEq(agreementManager.getProviderAgreementCount(indexer), 1); + assertEq(agreementManager.getPairAgreementCount(address(recurringCollector), indexer), 1); assertTrue(0 < agreementManager.getSumMaxNextClaim(_collector(), indexer)); } @@ -444,8 +440,14 @@ contract RecurringAgreementManagerMultiIndexerTest is RecurringAgreementManagerS bytes16 id1 = _offerAgreement(rca1); bytes16 id2 = _offerAgreement(rca2); - IRecurringAgreements.AgreementInfo memory info1 = agreementManager.getAgreementInfo(id1); - IRecurringAgreements.AgreementInfo memory info2 = agreementManager.getAgreementInfo(id2); + IRecurringAgreements.AgreementInfo memory info1 = agreementManager.getAgreementInfo( + address(recurringCollector), + id1 + ); + IRecurringAgreements.AgreementInfo memory info2 = agreementManager.getAgreementInfo( + address(recurringCollector), + id2 + ); assertEq(info1.provider, indexer); assertEq(info2.provider, indexer2); diff --git a/packages/issuance/test/unit/agreement-manager/offerUpdate.t.sol b/packages/issuance/test/unit/agreement-manager/offerUpdate.t.sol index 6049ea270..9007b5031 100644 --- a/packages/issuance/test/unit/agreement-manager/offerUpdate.t.sol +++ b/packages/issuance/test/unit/agreement-manager/offerUpdate.t.sol @@ -2,11 +2,17 @@ pragma solidity ^0.8.27; import { IRecurringAgreementManagement } from "@graphprotocol/interfaces/contracts/issuance/agreement/IRecurringAgreementManagement.sol"; +import { + REGISTERED, + ACCEPTED, + OFFER_TYPE_NEW, + OFFER_TYPE_UPDATE +} from "@graphprotocol/interfaces/contracts/horizon/IAgreementCollector.sol"; import { IRecurringCollector } from "@graphprotocol/interfaces/contracts/horizon/IRecurringCollector.sol"; import { IAccessControl } from "@openzeppelin/contracts/access/IAccessControl.sol"; -import { PausableUpgradeable } from "@openzeppelin/contracts-upgradeable/utils/PausableUpgradeable.sol"; import { RecurringAgreementManagerSharedTest } from "./shared.t.sol"; +import { MockRecurringCollector } from "./mocks/MockRecurringCollector.sol"; contract RecurringAgreementManagerOfferUpdateTest is RecurringAgreementManagerSharedTest { /* solhint-disable graph/func-name-mixedcase */ @@ -33,21 +39,21 @@ contract RecurringAgreementManagerOfferUpdateTest is RecurringAgreementManagerSh _offerAgreementUpdate(rcau); - // pendingMaxNextClaim = 2e18 * 7200 + 200e18 = 14600e18 - uint256 expectedPendingMaxClaim = 2 ether * 7200 + 200 ether; // Original maxNextClaim = 1e18 * 3600 + 100e18 = 3700e18 uint256 originalMaxClaim = 1 ether * 3600 + 100 ether; + // Pending = ongoing + initialExtra = 2e18 * 7200 + 200e18 = 14600e18 + uint256 pendingTotal = 2 ether * 7200 + 200 ether; - // Required escrow should include both + // Contribution = max(pending, current) since only one set of terms is active at a time assertEq( agreementManager.getSumMaxNextClaim(_collector(), indexer), - originalMaxClaim + expectedPendingMaxClaim + pendingTotal // max(3700, 14600) = 14600 ); - // Original maxNextClaim unchanged - assertEq(agreementManager.getAgreementMaxNextClaim(agreementId), originalMaxClaim); + // maxNextClaim now stores max(active, pending) + assertEq(agreementManager.getAgreementMaxNextClaim(address(recurringCollector), agreementId), pendingTotal); } - function test_OfferUpdate_AuthorizesHash() public { + function test_OfferUpdate_StoresOnCollector() public { (IRecurringCollector.RecurringCollectionAgreement memory rca, ) = _makeRCAWithId( 100 ether, 1 ether, @@ -69,10 +75,9 @@ contract RecurringAgreementManagerOfferUpdateTest is RecurringAgreementManagerSh _offerAgreementUpdate(rcau); - // The update hash should be authorized for the IAgreementOwner callback - bytes32 updateHash = recurringCollector.hashRCAU(rcau); - bytes4 result = agreementManager.approveAgreement(updateHash); - assertEq(result, agreementManager.approveAgreement.selector); + // The update is stored on the collector (not via hash authorization) + bytes32 pendingHash = recurringCollector.getAgreementVersionAt(agreementId, 1).versionHash; + assertTrue(pendingHash != bytes32(0), "Pending update should be stored"); } function test_OfferUpdate_FundsEscrow() public { @@ -85,16 +90,19 @@ contract RecurringAgreementManagerOfferUpdateTest is RecurringAgreementManagerSh ); uint256 originalMaxClaim = 1 ether * 3600 + 100 ether; - uint256 pendingMaxClaim = 2 ether * 7200 + 200 ether; - uint256 sumMaxNextClaim = originalMaxClaim + pendingMaxClaim; - - // Fund and offer agreement - token.mint(address(agreementManager), sumMaxNextClaim); + // Pending = ongoing + initialExtra = 2e18 * 7200 + 200e18 = 14600e18 + uint256 pendingTotal = 2 ether * 7200 + 200 ether; + // Contribution = max(pendingTotal, originalMaxClaim) = 14600 (only one agreement) + uint256 sumMaxNextClaim = pendingTotal; + + // Fund generously so Full mode stays active through both offers. + // After both offers, smnca = sumMaxNextClaim, deficit = sumMaxNextClaim. + // spare = balance - deficit. Full requires smnca * 272 / 256 < spare. + token.mint(address(agreementManager), sumMaxNextClaim + (sumMaxNextClaim * 272) / 256 + 1); vm.prank(operator); - bytes16 agreementId = agreementManager.offerAgreement(rca, _collector()); + bytes16 agreementId = agreementManager.offerAgreement(_collector(), OFFER_TYPE_NEW, abi.encode(rca)); // Offer update (should fund the deficit) - token.mint(address(agreementManager), pendingMaxClaim); IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau = _makeRCAU( agreementId, 200 ether, @@ -105,7 +113,7 @@ contract RecurringAgreementManagerOfferUpdateTest is RecurringAgreementManagerSh 1 ); vm.prank(operator); - agreementManager.offerAgreementUpdate(rcau); + agreementManager.offerAgreement(_collector(), OFFER_TYPE_UPDATE, abi.encode(rcau)); // Verify escrow was funded for both (uint256 escrowBalance, , ) = paymentsEscrow.escrowAccounts( @@ -128,7 +136,7 @@ contract RecurringAgreementManagerOfferUpdateTest is RecurringAgreementManagerSh uint256 originalMaxClaim = 1 ether * 3600 + 100 ether; - // First pending update + // First pending update (nonce=1) IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau1 = _makeRCAU( agreementId, 200 ether, @@ -140,10 +148,14 @@ contract RecurringAgreementManagerOfferUpdateTest is RecurringAgreementManagerSh ); _offerAgreementUpdate(rcau1); - uint256 pendingMaxClaim1 = 2 ether * 7200 + 200 ether; - assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), originalMaxClaim + pendingMaxClaim1); + // Pending1 = ongoing + initialExtra = 2e18 * 7200 + 200e18 = 14600e18 + // Contribution = max(14600, 3700) = 14600 + uint256 pendingTotal1 = 2 ether * 7200 + 200 ether; + assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), pendingTotal1); + + // Revoke first, then offer second (nonce=2, since collector incremented to 1) + _cancelPendingUpdate(agreementId); - // Second pending update (replaces first — same nonce since first was never accepted) IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau2 = _makeRCAU( agreementId, 50 ether, @@ -151,13 +163,13 @@ contract RecurringAgreementManagerOfferUpdateTest is RecurringAgreementManagerSh 60, 1800, uint64(block.timestamp + 180 days), - 1 + 2 ); _offerAgreementUpdate(rcau2); - uint256 pendingMaxClaim2 = 0.5 ether * 1800 + 50 ether; - // Old pending removed, new pending added - assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), originalMaxClaim + pendingMaxClaim2); + // Pending2 = ongoing + initialExtra = 0.5e18 * 1800 + 50e18 = 950e18 + // Contribution = max(950, 3700) = 3700 (original dominates) + assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), originalMaxClaim); } function test_OfferUpdate_EmitsEvent() public { @@ -180,13 +192,16 @@ contract RecurringAgreementManagerOfferUpdateTest is RecurringAgreementManagerSh 1 ); - uint256 pendingMaxClaim = 2 ether * 7200 + 200 ether; + // Pending maxNextClaim = ongoing + initialExtra = 2e18 * 7200 + 200e18 = 14600e18 + uint256 pendingTotal = 2 ether * 7200 + 200 ether; + uint256 originalMaxClaim = 1 ether * 3600 + 100 ether; + // The callback fires during offer, emitting AgreementReconciled vm.expectEmit(address(agreementManager)); - emit IRecurringAgreementManagement.AgreementUpdateOffered(agreementId, pendingMaxClaim, 1); + emit IRecurringAgreementManagement.AgreementReconciled(agreementId, originalMaxClaim, pendingTotal); vm.prank(operator); - agreementManager.offerAgreementUpdate(rcau); + agreementManager.offerAgreement(_collector(), OFFER_TYPE_UPDATE, abi.encode(rcau)); } function test_OfferUpdate_Revert_WhenNotOffered() public { @@ -201,9 +216,9 @@ contract RecurringAgreementManagerOfferUpdateTest is RecurringAgreementManagerSh 1 ); - vm.expectRevert(abi.encodeWithSelector(IRecurringAgreementManagement.AgreementNotOffered.selector, fakeId)); + vm.expectRevert(abi.encodeWithSelector(IRecurringAgreementManagement.ServiceProviderZeroAddress.selector)); vm.prank(operator); - agreementManager.offerAgreementUpdate(rcau); + agreementManager.offerAgreement(_collector(), OFFER_TYPE_UPDATE, abi.encode(rcau)); } function test_OfferUpdate_Revert_WhenNotOperator() public { @@ -235,38 +250,7 @@ contract RecurringAgreementManagerOfferUpdateTest is RecurringAgreementManagerSh ) ); vm.prank(nonOperator); - agreementManager.offerAgreementUpdate(rcau); - } - - function test_OfferUpdate_Revert_WhenPaused() public { - (IRecurringCollector.RecurringCollectionAgreement memory rca, ) = _makeRCAWithId( - 100 ether, - 1 ether, - 3600, - uint64(block.timestamp + 365 days) - ); - - bytes16 agreementId = _offerAgreement(rca); - - IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau = _makeRCAU( - agreementId, - 200 ether, - 2 ether, - 60, - 7200, - uint64(block.timestamp + 730 days), - 1 - ); - - // Grant pause role and pause - vm.startPrank(governor); - agreementManager.grantRole(keccak256("PAUSE_ROLE"), governor); - agreementManager.pause(); - vm.stopPrank(); - - vm.expectRevert(PausableUpgradeable.EnforcedPause.selector); - vm.prank(operator); - agreementManager.offerAgreementUpdate(rcau); + agreementManager.offerAgreement(_collector(), OFFER_TYPE_UPDATE, abi.encode(rcau)); } function test_OfferUpdate_Revert_WhenNonceWrong() public { @@ -290,11 +274,10 @@ contract RecurringAgreementManagerOfferUpdateTest is RecurringAgreementManagerSh 2 ); - vm.expectRevert( - abi.encodeWithSelector(IRecurringAgreementManagement.InvalidUpdateNonce.selector, agreementId, 1, 2) - ); + // Nonce validation is now done by the collector + vm.expectRevert("MockRecurringCollector: invalid nonce"); vm.prank(operator); - agreementManager.offerAgreementUpdate(rcau); + agreementManager.offerAgreement(_collector(), OFFER_TYPE_UPDATE, abi.encode(rcau)); } function test_OfferUpdate_Nonce2_AfterFirstAccepted() public { @@ -321,24 +304,17 @@ contract RecurringAgreementManagerOfferUpdateTest is RecurringAgreementManagerSh _offerAgreementUpdate(rcau1); // Simulate: agreement accepted with update nonce=1 applied - recurringCollector.setAgreement( - agreementId, - IRecurringCollector.AgreementData({ - dataService: rca.dataService, - payer: rca.payer, - serviceProvider: rca.serviceProvider, - acceptedAt: uint64(block.timestamp), - lastCollectionAt: 0, - endsAt: uint64(block.timestamp + 730 days), - maxInitialTokens: 200 ether, - maxOngoingTokensPerSecond: 2 ether, - minSecondsPerCollection: 60, - maxSecondsPerCollection: 7200, - updateNonce: 1, - canceledAt: 0, - state: IRecurringCollector.AgreementState.Accepted - }) + IRecurringCollector.RecurringCollectionAgreement memory updatedRca = _makeRCA( + 200 ether, 2 ether, 60, 7200, uint64(block.timestamp + 730 days) + ); + updatedRca.payer = rca.payer; + updatedRca.dataService = rca.dataService; + updatedRca.serviceProvider = rca.serviceProvider; + MockRecurringCollector.AgreementStorage memory data = _buildAgreementStorage( + updatedRca, REGISTERED | ACCEPTED, uint64(block.timestamp), 0, 0 ); + data.updateNonce = 1; + recurringCollector.setAgreement(agreementId, data); // Offer second update (nonce=2) — should succeed because collector's updateNonce=1 IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau2 = _makeRCAU( @@ -352,10 +328,11 @@ contract RecurringAgreementManagerOfferUpdateTest is RecurringAgreementManagerSh ); _offerAgreementUpdate(rcau2); - // Verify pending state was set - IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau2Check = rcau2; - bytes32 updateHash = recurringCollector.hashRCAU(rcau2Check); - assertEq(agreementManager.approveAgreement(updateHash), agreementManager.approveAgreement.selector); + // Verify pending state was set on the collector + bytes32 pendingHash = recurringCollector.getAgreementVersionAt(agreementId, 1).versionHash; + assertTrue(pendingHash != bytes32(0), "Second pending update should be stored"); + IRecurringCollector.AgreementData memory result = recurringCollector.getAgreementData(agreementId); + assertEq(result.updateNonce, 2); } function test_OfferUpdate_Revert_Nonce1_AfterFirstAccepted() public { @@ -382,24 +359,17 @@ contract RecurringAgreementManagerOfferUpdateTest is RecurringAgreementManagerSh _offerAgreementUpdate(rcau1); // Simulate: agreement accepted with update nonce=1 applied - recurringCollector.setAgreement( - agreementId, - IRecurringCollector.AgreementData({ - dataService: rca.dataService, - payer: rca.payer, - serviceProvider: rca.serviceProvider, - acceptedAt: uint64(block.timestamp), - lastCollectionAt: 0, - endsAt: uint64(block.timestamp + 730 days), - maxInitialTokens: 200 ether, - maxOngoingTokensPerSecond: 2 ether, - minSecondsPerCollection: 60, - maxSecondsPerCollection: 7200, - updateNonce: 1, - canceledAt: 0, - state: IRecurringCollector.AgreementState.Accepted - }) + IRecurringCollector.RecurringCollectionAgreement memory updatedRca = _makeRCA( + 200 ether, 2 ether, 60, 7200, uint64(block.timestamp + 730 days) ); + updatedRca.payer = rca.payer; + updatedRca.dataService = rca.dataService; + updatedRca.serviceProvider = rca.serviceProvider; + MockRecurringCollector.AgreementStorage memory data = _buildAgreementStorage( + updatedRca, REGISTERED | ACCEPTED, uint64(block.timestamp), 0, 0 + ); + data.updateNonce = 1; + recurringCollector.setAgreement(agreementId, data); // Try nonce=1 again — should fail because collector already at updateNonce=1 IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau2 = _makeRCAU( @@ -412,11 +382,10 @@ contract RecurringAgreementManagerOfferUpdateTest is RecurringAgreementManagerSh 1 ); - vm.expectRevert( - abi.encodeWithSelector(IRecurringAgreementManagement.InvalidUpdateNonce.selector, agreementId, 2, 1) - ); + // Nonce validation is now done by the collector + vm.expectRevert("MockRecurringCollector: invalid nonce"); vm.prank(operator); - agreementManager.offerAgreementUpdate(rcau2); + agreementManager.offerAgreement(_collector(), OFFER_TYPE_UPDATE, abi.encode(rcau2)); } function test_OfferUpdate_ReconcilesDuringOffer() public { @@ -459,5 +428,36 @@ contract RecurringAgreementManagerOfferUpdateTest is RecurringAgreementManagerSh assertTrue(postOfferMax < preOfferMax + pendingMaxClaim); } + function test_OfferUpdate_Succeeds_WhenPaused() public { + (IRecurringCollector.RecurringCollectionAgreement memory rca, ) = _makeRCAWithId( + 100 ether, + 1 ether, + 3600, + uint64(block.timestamp + 365 days) + ); + + bytes16 agreementId = _offerAgreement(rca); + + IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau = _makeRCAU( + agreementId, + 200 ether, + 2 ether, + 60, + 7200, + uint64(block.timestamp + 730 days), + 1 + ); + + // Grant pause role and pause + vm.startPrank(governor); + agreementManager.grantRole(keccak256("PAUSE_ROLE"), governor); + agreementManager.pause(); + vm.stopPrank(); + + // Role-gated functions should succeed even when paused + vm.prank(operator); + agreementManager.offerAgreement(_collector(), OFFER_TYPE_UPDATE, abi.encode(rcau)); + } + /* solhint-enable graph/func-name-mixedcase */ } diff --git a/packages/issuance/test/unit/agreement-manager/reconcile.t.sol b/packages/issuance/test/unit/agreement-manager/reconcile.t.sol index b2d45f413..8e99fe7cf 100644 --- a/packages/issuance/test/unit/agreement-manager/reconcile.t.sol +++ b/packages/issuance/test/unit/agreement-manager/reconcile.t.sol @@ -4,9 +4,11 @@ pragma solidity ^0.8.27; import { Vm } from "forge-std/Vm.sol"; import { IRecurringAgreementManagement } from "@graphprotocol/interfaces/contracts/issuance/agreement/IRecurringAgreementManagement.sol"; +import { REGISTERED, ACCEPTED } from "@graphprotocol/interfaces/contracts/horizon/IAgreementCollector.sol"; import { IRecurringCollector } from "@graphprotocol/interfaces/contracts/horizon/IRecurringCollector.sol"; import { RecurringAgreementManagerSharedTest } from "./shared.t.sol"; +import { MockRecurringCollector } from "./mocks/MockRecurringCollector.sol"; contract RecurringAgreementManagerReconcileTest is RecurringAgreementManagerSharedTest { /* solhint-disable graph/func-name-mixedcase */ @@ -21,7 +23,7 @@ contract RecurringAgreementManagerReconcileTest is RecurringAgreementManagerShar ); bytes16 agreementId = _offerAgreement(rca); - uint256 initialMaxClaim = agreementManager.getAgreementMaxNextClaim(agreementId); + uint256 initialMaxClaim = agreementManager.getAgreementMaxNextClaim(address(recurringCollector), agreementId); assertEq(initialMaxClaim, 3700 ether); // Simulate: agreement accepted and first collection happened @@ -34,10 +36,10 @@ contract RecurringAgreementManagerReconcileTest is RecurringAgreementManagerShar // remaining = endsAt - lastCollectionAt (large), capped by maxSecondsPerCollection = 3600 // New max = 1e18 * 3600 = 3600e18 vm.warp(lastCollectionAt); - bool exists = agreementManager.reconcileAgreement(agreementId); + bool exists = agreementManager.reconcileAgreement(address(recurringCollector), agreementId); assertTrue(exists); - uint256 newMaxClaim = agreementManager.getAgreementMaxNextClaim(agreementId); + uint256 newMaxClaim = agreementManager.getAgreementMaxNextClaim(address(recurringCollector), agreementId); assertEq(newMaxClaim, 3600 ether); assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), 3600 ether); } @@ -51,17 +53,17 @@ contract RecurringAgreementManagerReconcileTest is RecurringAgreementManagerShar ); bytes16 agreementId = _offerAgreement(rca); - assertEq(agreementManager.getAgreementMaxNextClaim(agreementId), 3700 ether); + assertEq(agreementManager.getAgreementMaxNextClaim(address(recurringCollector), agreementId), 3700 ether); // SP cancels - immediately non-collectable → reconcile deletes _setAgreementCanceledBySP(agreementId, rca); - bool exists = agreementManager.reconcileAgreement(agreementId); + bool exists = agreementManager.reconcileAgreement(address(recurringCollector), agreementId); assertFalse(exists); - assertEq(agreementManager.getAgreementMaxNextClaim(agreementId), 0); + assertEq(agreementManager.getAgreementMaxNextClaim(address(recurringCollector), agreementId), 0); assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), 0); - assertEq(agreementManager.getProviderAgreementCount(indexer), 0); + assertEq(agreementManager.getPairAgreementCount(address(recurringCollector), indexer), 0); } function test_ReconcileAgreement_CanceledByPayer_WindowOpen() public { @@ -78,16 +80,16 @@ contract RecurringAgreementManagerReconcileTest is RecurringAgreementManagerShar // Payer cancels 2 hours from now, never collected uint64 acceptedAt = startTime; - uint64 canceledAt = uint64(startTime + 2 hours); - _setAgreementCanceledByPayer(agreementId, rca, acceptedAt, canceledAt, 0); + uint64 collectableUntil = uint64(startTime + 2 hours); + _setAgreementCanceledByPayer(agreementId, rca, acceptedAt, collectableUntil, 0); - bool exists = agreementManager.reconcileAgreement(agreementId); + bool exists = agreementManager.reconcileAgreement(address(recurringCollector), agreementId); assertTrue(exists); - // Window = canceledAt - acceptedAt = 7200s, capped by maxSecondsPerCollection = 3600s + // Window = collectableUntil - acceptedAt = 7200s, capped by maxSecondsPerCollection = 3600s // maxClaim = 1e18 * 3600 + 100e18 (never collected, so includes initial) uint256 expectedMaxClaim = 1 ether * 3600 + 100 ether; - assertEq(agreementManager.getAgreementMaxNextClaim(agreementId), expectedMaxClaim); + assertEq(agreementManager.getAgreementMaxNextClaim(address(recurringCollector), agreementId), expectedMaxClaim); } function test_ReconcileAgreement_CanceledByPayer_WindowExpired() public { @@ -104,17 +106,17 @@ contract RecurringAgreementManagerReconcileTest is RecurringAgreementManagerShar // Payer cancels, and the collection already happened covering the full window uint64 acceptedAt = startTime; - uint64 canceledAt = uint64(startTime + 2 hours); - // lastCollectionAt == canceledAt means window is empty - _setAgreementCanceledByPayer(agreementId, rca, acceptedAt, canceledAt, canceledAt); + uint64 collectableUntil = uint64(startTime + 2 hours); + // lastCollectionAt == collectableUntil means window is empty + _setAgreementCanceledByPayer(agreementId, rca, acceptedAt, collectableUntil, collectableUntil); - bool exists = agreementManager.reconcileAgreement(agreementId); + bool exists = agreementManager.reconcileAgreement(address(recurringCollector), agreementId); - // collectionEnd = canceledAt, collectionStart = lastCollectionAt = canceledAt + // collectionEnd = collectableUntil, collectionStart = lastCollectionAt = collectableUntil // window is empty -> maxClaim = 0 → deleted assertFalse(exists); - assertEq(agreementManager.getAgreementMaxNextClaim(agreementId), 0); - assertEq(agreementManager.getProviderAgreementCount(indexer), 0); + assertEq(agreementManager.getAgreementMaxNextClaim(address(recurringCollector), agreementId), 0); + assertEq(agreementManager.getPairAgreementCount(address(recurringCollector), indexer), 0); } function test_ReconcileAgreement_SkipsNotAccepted() public { @@ -126,15 +128,15 @@ contract RecurringAgreementManagerReconcileTest is RecurringAgreementManagerShar ); bytes16 agreementId = _offerAgreement(rca); - uint256 originalMaxClaim = agreementManager.getAgreementMaxNextClaim(agreementId); + uint256 originalMaxClaim = agreementManager.getAgreementMaxNextClaim(address(recurringCollector), agreementId); // Mock returns NotAccepted (default state in mock - zero struct) // reconcile should skip recalculation and preserve the original estimate - bool exists = agreementManager.reconcileAgreement(agreementId); + bool exists = agreementManager.reconcileAgreement(address(recurringCollector), agreementId); assertTrue(exists); - assertEq(agreementManager.getAgreementMaxNextClaim(agreementId), originalMaxClaim); + assertEq(agreementManager.getAgreementMaxNextClaim(address(recurringCollector), agreementId), originalMaxClaim); } function test_ReconcileAgreement_EmitsEvent() public { @@ -153,9 +155,9 @@ contract RecurringAgreementManagerReconcileTest is RecurringAgreementManagerShar vm.expectEmit(address(agreementManager)); emit IRecurringAgreementManagement.AgreementReconciled(agreementId, 3700 ether, 0); vm.expectEmit(address(agreementManager)); - emit IRecurringAgreementManagement.AgreementRemoved(agreementId, indexer); + emit IRecurringAgreementManagement.AgreementRemoved(agreementId); - agreementManager.reconcileAgreement(agreementId); + agreementManager.reconcileAgreement(address(recurringCollector), agreementId); } function test_ReconcileAgreement_NoEmitWhenUnchanged() public { @@ -174,12 +176,12 @@ contract RecurringAgreementManagerReconcileTest is RecurringAgreementManagerShar // maxClaim should remain 3700e18 (never collected, maxSecondsPerCollection < window) // No event should be emitted vm.recordLogs(); - agreementManager.reconcileAgreement(agreementId); + agreementManager.reconcileAgreement(address(recurringCollector), agreementId); // Check no AgreementReconciled or AgreementRemoved events were emitted Vm.Log[] memory logs = vm.getRecordedLogs(); bytes32 reconciledTopic = keccak256("AgreementReconciled(bytes16,uint256,uint256)"); - bytes32 removedTopic = keccak256("AgreementRemoved(bytes16,address)"); + bytes32 removedTopic = keccak256("AgreementRemoved(bytes16)"); for (uint256 i = 0; i < logs.length; i++) { assertTrue(logs[i].topics[0] != reconciledTopic, "Unexpected AgreementReconciled event"); assertTrue(logs[i].topics[0] != removedTopic, "Unexpected AgreementRemoved event"); @@ -190,7 +192,7 @@ contract RecurringAgreementManagerReconcileTest is RecurringAgreementManagerShar bytes16 fakeId = bytes16(keccak256("fake")); // Returns false (not exists) when agreement not found (idempotent) - bool exists = agreementManager.reconcileAgreement(fakeId); + bool exists = agreementManager.reconcileAgreement(address(recurringCollector), fakeId); assertFalse(exists); } @@ -210,13 +212,13 @@ contract RecurringAgreementManagerReconcileTest is RecurringAgreementManagerShar _setAgreementCollected(agreementId, rca, uint64(block.timestamp), endsAt); vm.warp(endsAt); - bool exists = agreementManager.reconcileAgreement(agreementId); + bool exists = agreementManager.reconcileAgreement(address(recurringCollector), agreementId); // collectionEnd = endsAt, collectionStart = lastCollectionAt = endsAt // window empty -> maxClaim = 0 → deleted assertFalse(exists); - assertEq(agreementManager.getAgreementMaxNextClaim(agreementId), 0); - assertEq(agreementManager.getProviderAgreementCount(indexer), 0); + assertEq(agreementManager.getAgreementMaxNextClaim(address(recurringCollector), agreementId), 0); + assertEq(agreementManager.getPairAgreementCount(address(recurringCollector), indexer), 0); } function test_ReconcileAgreement_ClearsPendingUpdate() public { @@ -242,36 +244,34 @@ contract RecurringAgreementManagerReconcileTest is RecurringAgreementManagerShar ); _offerAgreementUpdate(rcau); - uint256 pendingMaxClaim = 2 ether * 7200 + 200 ether; - assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), originalMaxClaim + pendingMaxClaim); + // max(current, pending) = max(3700, 14600) = 14600 + uint256 pendingMaxClaim = 14600 ether; + assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), pendingMaxClaim); // Simulate: agreement accepted and update applied on-chain (updateNonce = 1) - recurringCollector.setAgreement( - agreementId, - IRecurringCollector.AgreementData({ - dataService: rca.dataService, - payer: rca.payer, - serviceProvider: rca.serviceProvider, - acceptedAt: uint64(block.timestamp), - lastCollectionAt: 0, - endsAt: rcau.endsAt, - maxInitialTokens: rcau.maxInitialTokens, - maxOngoingTokensPerSecond: rcau.maxOngoingTokensPerSecond, - minSecondsPerCollection: rcau.minSecondsPerCollection, - maxSecondsPerCollection: rcau.maxSecondsPerCollection, - updateNonce: 1, - canceledAt: 0, - state: IRecurringCollector.AgreementState.Accepted - }) + IRecurringCollector.RecurringCollectionAgreement memory updatedRca = _makeRCA( + rcau.maxInitialTokens, + rcau.maxOngoingTokensPerSecond, + rcau.minSecondsPerCollection, + rcau.maxSecondsPerCollection, + rcau.endsAt + ); + updatedRca.payer = rca.payer; + updatedRca.dataService = rca.dataService; + updatedRca.serviceProvider = rca.serviceProvider; + MockRecurringCollector.AgreementStorage memory data = _buildAgreementStorage( + updatedRca, REGISTERED | ACCEPTED, uint64(block.timestamp), 0, 0 ); + data.updateNonce = 1; + recurringCollector.setAgreement(agreementId, data); - bool exists = agreementManager.reconcileAgreement(agreementId); + bool exists = agreementManager.reconcileAgreement(address(recurringCollector), agreementId); assertTrue(exists); // Pending should be cleared, maxNextClaim recalculated from new terms // newMaxClaim = 2e18 * 7200 + 200e18 = 14600e18 (never collected, maxSecondsPerCollection < window) uint256 newMaxClaim = 2 ether * 7200 + 200 ether; - assertEq(agreementManager.getAgreementMaxNextClaim(agreementId), newMaxClaim); + assertEq(agreementManager.getAgreementMaxNextClaim(address(recurringCollector), agreementId), newMaxClaim); // Required = only new maxClaim (pending cleared) assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), newMaxClaim); } @@ -299,18 +299,36 @@ contract RecurringAgreementManagerReconcileTest is RecurringAgreementManagerShar ); _offerAgreementUpdate(rcau); - uint256 pendingMaxClaim = 2 ether * 7200 + 200 ether; + // Full update max = 14600 + uint256 pendingMaxClaim = 14600 ether; // Simulate: agreement accepted but update NOT yet applied (updateNonce = 0) - _setAgreementAccepted(agreementId, rca, uint64(block.timestamp)); - - bool exists = agreementManager.reconcileAgreement(agreementId); + // Must preserve pending terms on the collector (setAgreementAccepted would erase them) + MockRecurringCollector.AgreementStorage memory data = _buildAgreementStorage( + rca, REGISTERED | ACCEPTED, uint64(block.timestamp), 0, 0 + ); + data.pendingTerms = IRecurringCollector.AgreementTerms({ + deadline: 0, + endsAt: rcau.endsAt, + maxInitialTokens: rcau.maxInitialTokens, + maxOngoingTokensPerSecond: rcau.maxOngoingTokensPerSecond, + minSecondsPerCollection: rcau.minSecondsPerCollection, + maxSecondsPerCollection: rcau.maxSecondsPerCollection, + conditions: 0, + minSecondsPayerCancellationNotice: 0, + hash: bytes32(0), + metadata: "" + }); + recurringCollector.setAgreement(agreementId, data); + + bool exists = agreementManager.reconcileAgreement(address(recurringCollector), agreementId); assertTrue(exists); - // maxNextClaim recalculated from original terms (same value since never collected) - assertEq(agreementManager.getAgreementMaxNextClaim(agreementId), originalMaxClaim); - // Pending still present - assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), originalMaxClaim + pendingMaxClaim); + // maxNextClaim stores max(active, pending) + // max(3700, 14600) = 14600 (pending dominates, update not yet applied) + assertEq(agreementManager.getAgreementMaxNextClaim(address(recurringCollector), agreementId), pendingMaxClaim); + // Sum also reflects the max + assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), pendingMaxClaim); } // -- Tests merged from remove (cleanup behavior) -- @@ -328,9 +346,9 @@ contract RecurringAgreementManagerReconcileTest is RecurringAgreementManagerShar // Set as accepted but never collected - still claimable _setAgreementAccepted(agreementId, rca, uint64(block.timestamp)); - bool exists = agreementManager.reconcileAgreement(agreementId); + bool exists = agreementManager.reconcileAgreement(address(recurringCollector), agreementId); assertTrue(exists); - assertEq(agreementManager.getProviderAgreementCount(indexer), 1); + assertEq(agreementManager.getPairAgreementCount(address(recurringCollector), indexer), 1); } function test_ReconcileAgreement_DeletesExpiredOffer() public { @@ -347,10 +365,10 @@ contract RecurringAgreementManagerReconcileTest is RecurringAgreementManagerShar vm.warp(block.timestamp + 2 hours); // Agreement not accepted + past deadline — should be deleted - bool exists = agreementManager.reconcileAgreement(agreementId); + bool exists = agreementManager.reconcileAgreement(address(recurringCollector), agreementId); assertFalse(exists); - assertEq(agreementManager.getProviderAgreementCount(indexer), 0); + assertEq(agreementManager.getPairAgreementCount(address(recurringCollector), indexer), 0); assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), 0); } @@ -365,9 +383,9 @@ contract RecurringAgreementManagerReconcileTest is RecurringAgreementManagerShar bytes16 agreementId = _offerAgreement(rca); // Not accepted yet, before deadline - still potentially claimable - bool exists = agreementManager.reconcileAgreement(agreementId); + bool exists = agreementManager.reconcileAgreement(address(recurringCollector), agreementId); assertTrue(exists); - assertEq(agreementManager.getProviderAgreementCount(indexer), 1); + assertEq(agreementManager.getPairAgreementCount(address(recurringCollector), indexer), 1); } function test_ReconcileAgreement_ReturnsTrue_WhenCanceledByPayer_WindowStillOpen() public { @@ -383,13 +401,13 @@ contract RecurringAgreementManagerReconcileTest is RecurringAgreementManagerShar bytes16 agreementId = _offerAgreement(rca); // Payer canceled but window is still open (not yet collected) - uint64 canceledAt = uint64(startTime + 2 hours); - _setAgreementCanceledByPayer(agreementId, rca, startTime, canceledAt, 0); + uint64 collectableUntil = uint64(startTime + 2 hours); + _setAgreementCanceledByPayer(agreementId, rca, startTime, collectableUntil, 0); - // Still claimable: window = canceledAt - acceptedAt = 7200s, capped at 3600s - bool exists = agreementManager.reconcileAgreement(agreementId); + // Still claimable: window = collectableUntil - acceptedAt = 7200s, capped at 3600s + bool exists = agreementManager.reconcileAgreement(address(recurringCollector), agreementId); assertTrue(exists); - assertEq(agreementManager.getProviderAgreementCount(indexer), 1); + assertEq(agreementManager.getPairAgreementCount(address(recurringCollector), indexer), 1); } function test_ReconcileAgreement_ReducesRequiredEscrow_WithMultipleAgreements() public { @@ -420,15 +438,15 @@ contract RecurringAgreementManagerReconcileTest is RecurringAgreementManagerShar // Cancel agreement 1 by SP and reconcile it (deletes) _setAgreementCanceledBySP(id1, rca1); - bool exists = agreementManager.reconcileAgreement(id1); + bool exists = agreementManager.reconcileAgreement(address(recurringCollector), id1); assertFalse(exists); // Only agreement 2's original maxClaim remains assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), maxClaim2); - assertEq(agreementManager.getProviderAgreementCount(indexer), 1); + assertEq(agreementManager.getPairAgreementCount(address(recurringCollector), indexer), 1); // Agreement 2 still tracked - assertEq(agreementManager.getAgreementMaxNextClaim(id2), maxClaim2); + assertEq(agreementManager.getAgreementMaxNextClaim(address(recurringCollector), id2), maxClaim2); } function test_ReconcileAgreement_Permissionless() public { @@ -447,10 +465,10 @@ contract RecurringAgreementManagerReconcileTest is RecurringAgreementManagerShar // Anyone can reconcile address anyone = makeAddr("anyone"); vm.prank(anyone); - bool exists = agreementManager.reconcileAgreement(agreementId); + bool exists = agreementManager.reconcileAgreement(address(recurringCollector), agreementId); assertFalse(exists); - assertEq(agreementManager.getProviderAgreementCount(indexer), 0); + assertEq(agreementManager.getPairAgreementCount(address(recurringCollector), indexer), 0); } function test_ReconcileAgreement_ClearsPendingUpdate_WhenCanceled() public { @@ -476,18 +494,19 @@ contract RecurringAgreementManagerReconcileTest is RecurringAgreementManagerShar _offerAgreementUpdate(rcau); uint256 originalMaxClaim = 1 ether * 3600 + 100 ether; - uint256 pendingMaxClaim = 2 ether * 7200 + 200 ether; - assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), originalMaxClaim + pendingMaxClaim); + // max(current, pending) = max(3700, 14600) = 14600 + uint256 pendingMaxClaim = 14600 ether; + assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), pendingMaxClaim); // SP cancels - immediately removable _setAgreementCanceledBySP(agreementId, rca); - bool exists = agreementManager.reconcileAgreement(agreementId); + bool exists = agreementManager.reconcileAgreement(address(recurringCollector), agreementId); assertFalse(exists); // Both original and pending should be cleared from sumMaxNextClaim assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), 0); - assertEq(agreementManager.getProviderAgreementCount(indexer), 0); + assertEq(agreementManager.getPairAgreementCount(address(recurringCollector), indexer), 0); } /* solhint-enable graph/func-name-mixedcase */ diff --git a/packages/issuance/test/unit/agreement-manager/register.t.sol b/packages/issuance/test/unit/agreement-manager/register.t.sol index 23e1516a1..3d558cc29 100644 --- a/packages/issuance/test/unit/agreement-manager/register.t.sol +++ b/packages/issuance/test/unit/agreement-manager/register.t.sol @@ -2,9 +2,9 @@ pragma solidity ^0.8.27; import { IRecurringAgreementManagement } from "@graphprotocol/interfaces/contracts/issuance/agreement/IRecurringAgreementManagement.sol"; +import { OFFER_TYPE_NEW } from "@graphprotocol/interfaces/contracts/horizon/IAgreementCollector.sol"; import { IRecurringCollector } from "@graphprotocol/interfaces/contracts/horizon/IRecurringCollector.sol"; import { IAccessControl } from "@openzeppelin/contracts/access/IAccessControl.sol"; -import { PausableUpgradeable } from "@openzeppelin/contracts-upgradeable/utils/PausableUpgradeable.sol"; import { RecurringAgreementManagerSharedTest } from "./shared.t.sol"; @@ -25,9 +25,9 @@ contract RecurringAgreementManagerOfferTest is RecurringAgreementManagerSharedTe // maxNextClaim = maxOngoingTokensPerSecond * maxSecondsPerCollection + maxInitialTokens // = 1e18 * 3600 + 100e18 = 3700e18 uint256 expectedMaxClaim = 1 ether * 3600 + 100 ether; - assertEq(agreementManager.getAgreementMaxNextClaim(agreementId), expectedMaxClaim); + assertEq(agreementManager.getAgreementMaxNextClaim(address(recurringCollector), agreementId), expectedMaxClaim); assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), expectedMaxClaim); - assertEq(agreementManager.getProviderAgreementCount(indexer), 1); + assertEq(agreementManager.getPairAgreementCount(address(recurringCollector), indexer), 1); } function test_Offer_FundsEscrow() public { @@ -41,10 +41,12 @@ contract RecurringAgreementManagerOfferTest is RecurringAgreementManagerSharedTe uint256 expectedMaxClaim = 1 ether * 3600 + 100 ether; - // Fund with surplus so Full mode stays active (deficit < balance required) - token.mint(address(agreementManager), expectedMaxClaim + 1); + // Fund with surplus so Full mode stays active. + // spare = balance - deficit (deficit = expectedMaxClaim before deposit). + // Full requires smnca * (256 + 16) / 256 = expectedMaxClaim * 272 / 256 < spare + token.mint(address(agreementManager), expectedMaxClaim + (expectedMaxClaim * 272) / 256 + 1); vm.prank(operator); - agreementManager.offerAgreement(rca, _collector()); + agreementManager.offerAgreement(_collector(), OFFER_TYPE_NEW, abi.encode(rca)); // Verify escrow was funded (uint256 escrowBalance, , ) = paymentsEscrow.escrowAccounts( @@ -70,7 +72,7 @@ contract RecurringAgreementManagerOfferTest is RecurringAgreementManagerSharedTe // Fund with less than needed token.mint(address(agreementManager), available); vm.prank(operator); - agreementManager.offerAgreement(rca, _collector()); + agreementManager.offerAgreement(_collector(), OFFER_TYPE_NEW, abi.encode(rca)); // Since available < required, Full degrades to OnDemand (deposit target = 0). // No proactive deposit; JIT beforeCollection is the safety net. @@ -104,14 +106,15 @@ contract RecurringAgreementManagerOfferTest is RecurringAgreementManagerSharedTe token.mint(address(agreementManager), expectedMaxClaim); + // The callback fires during offer, emitting AgreementReconciled vm.expectEmit(address(agreementManager)); - emit IRecurringAgreementManagement.AgreementOffered(expectedId, indexer, expectedMaxClaim); + emit IRecurringAgreementManagement.AgreementReconciled(expectedId, 0, expectedMaxClaim); vm.prank(operator); - agreementManager.offerAgreement(rca, _collector()); + agreementManager.offerAgreement(_collector(), OFFER_TYPE_NEW, abi.encode(rca)); } - function test_Offer_AuthorizesHash() public { + function test_Offer_StoresOnCollector() public { IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCA( 100 ether, 1 ether, @@ -120,12 +123,13 @@ contract RecurringAgreementManagerOfferTest is RecurringAgreementManagerSharedTe uint64(block.timestamp + 365 days) ); - _offerAgreement(rca); + bytes16 agreementId = _offerAgreement(rca); - // The agreement hash should be authorized for the IAgreementOwner callback - bytes32 agreementHash = recurringCollector.hashRCA(rca); - bytes4 result = agreementManager.approveAgreement(agreementHash); - assertEq(result, agreementManager.approveAgreement.selector); + // The offer is stored on the collector (not via hash authorization) + IRecurringCollector.AgreementData memory data = recurringCollector.getAgreementData(agreementId); + assertEq(data.dataService, rca.dataService); + assertEq(data.payer, rca.payer); + assertEq(data.serviceProvider, rca.serviceProvider); } function test_Offer_MultipleAgreements_SameIndexer() public { @@ -151,7 +155,7 @@ contract RecurringAgreementManagerOfferTest is RecurringAgreementManagerSharedTe bytes16 id2 = _offerAgreement(rca2); assertTrue(id1 != id2); - assertEq(agreementManager.getProviderAgreementCount(indexer), 2); + assertEq(agreementManager.getPairAgreementCount(address(recurringCollector), indexer), 2); uint256 maxClaim1 = 1 ether * 3600 + 100 ether; uint256 maxClaim2 = 2 ether * 7200 + 200 ether; @@ -166,35 +170,11 @@ contract RecurringAgreementManagerOfferTest is RecurringAgreementManagerSharedTe 3600, uint64(block.timestamp + 365 days) ); - rca.payer = address(0xdead); // Wrong payer - - vm.expectRevert( - abi.encodeWithSelector( - IRecurringAgreementManagement.PayerMustBeManager.selector, - address(0xdead), - address(agreementManager) - ) - ); - vm.prank(operator); - agreementManager.offerAgreement(rca, _collector()); - } - - function test_Offer_Revert_WhenAlreadyOffered() public { - IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCA( - 100 ether, - 1 ether, - 60, - 3600, - uint64(block.timestamp + 365 days) - ); - - bytes16 agreementId = _offerAgreement(rca); + rca.payer = address(0xdead); // Wrong payer — collector rejects because msg.sender != rca.payer - vm.expectRevert( - abi.encodeWithSelector(IRecurringAgreementManagement.AgreementAlreadyOffered.selector, agreementId) - ); + vm.expectRevert("MockRecurringCollector: unauthorized payer"); vm.prank(operator); - agreementManager.offerAgreement(rca, _collector()); + agreementManager.offerAgreement(_collector(), OFFER_TYPE_NEW, abi.encode(rca)); } function test_Offer_Revert_WhenNotOperator() public { @@ -215,7 +195,7 @@ contract RecurringAgreementManagerOfferTest is RecurringAgreementManagerSharedTe ) ); vm.prank(nonOperator); - agreementManager.offerAgreement(rca, _collector()); + agreementManager.offerAgreement(_collector(), OFFER_TYPE_NEW, abi.encode(rca)); } function test_Offer_Revert_WhenUnauthorizedCollector() public { @@ -233,10 +213,10 @@ contract RecurringAgreementManagerOfferTest is RecurringAgreementManagerSharedTe abi.encodeWithSelector(IRecurringAgreementManagement.UnauthorizedCollector.selector, fakeCollector) ); vm.prank(operator); - agreementManager.offerAgreement(rca, IRecurringCollector(fakeCollector)); + agreementManager.offerAgreement(IRecurringCollector(fakeCollector), OFFER_TYPE_NEW, abi.encode(rca)); } - function test_Offer_Revert_WhenPaused() public { + function test_Offer_Succeeds_WhenPaused() public { IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCA( 100 ether, 1 ether, @@ -251,9 +231,10 @@ contract RecurringAgreementManagerOfferTest is RecurringAgreementManagerSharedTe agreementManager.pause(); vm.stopPrank(); - vm.expectRevert(PausableUpgradeable.EnforcedPause.selector); + // Role-gated functions should succeed even when paused vm.prank(operator); - agreementManager.offerAgreement(rca, _collector()); + bytes16 agreementId = agreementManager.offerAgreement(_collector(), OFFER_TYPE_NEW, abi.encode(rca)); + assertTrue(agreementId != bytes16(0)); } /* solhint-enable graph/func-name-mixedcase */ diff --git a/packages/issuance/test/unit/agreement-manager/revokeAgreementUpdate.t.sol b/packages/issuance/test/unit/agreement-manager/revokeAgreementUpdate.t.sol index 2ad9d1bca..4c337fa14 100644 --- a/packages/issuance/test/unit/agreement-manager/revokeAgreementUpdate.t.sol +++ b/packages/issuance/test/unit/agreement-manager/revokeAgreementUpdate.t.sol @@ -2,17 +2,17 @@ pragma solidity ^0.8.27; import { IRecurringAgreementManagement } from "@graphprotocol/interfaces/contracts/issuance/agreement/IRecurringAgreementManagement.sol"; +import { REGISTERED, ACCEPTED } from "@graphprotocol/interfaces/contracts/horizon/IAgreementCollector.sol"; import { IRecurringCollector } from "@graphprotocol/interfaces/contracts/horizon/IRecurringCollector.sol"; import { IRecurringAgreements } from "@graphprotocol/interfaces/contracts/issuance/agreement/IRecurringAgreements.sol"; import { IAccessControl } from "@openzeppelin/contracts/access/IAccessControl.sol"; -import { PausableUpgradeable } from "@openzeppelin/contracts-upgradeable/utils/PausableUpgradeable.sol"; import { RecurringAgreementManagerSharedTest } from "./shared.t.sol"; -contract RecurringAgreementManagerRevokeAgreementUpdateTest is RecurringAgreementManagerSharedTest { +contract RecurringAgreementManagerCancelPendingUpdateTest is RecurringAgreementManagerSharedTest { /* solhint-disable graph/func-name-mixedcase */ - function test_RevokeAgreementUpdate_ClearsPendingState() public { + function test_CancelPendingUpdate_ClearsPendingState() public { (IRecurringCollector.RecurringCollectionAgreement memory rca, ) = _makeRCAWithId( 100 ether, 1 ether, @@ -21,7 +21,6 @@ contract RecurringAgreementManagerRevokeAgreementUpdateTest is RecurringAgreemen ); bytes16 agreementId = _offerAgreement(rca); - uint256 originalMaxClaim = 1 ether * 3600 + 100 ether; // Offer a pending update IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau = _makeRCAU( @@ -35,30 +34,19 @@ contract RecurringAgreementManagerRevokeAgreementUpdateTest is RecurringAgreemen ); _offerAgreementUpdate(rcau); - uint256 pendingMaxClaim = 2 ether * 7200 + 200 ether; - assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), originalMaxClaim + pendingMaxClaim); - - // Revoke the pending update - vm.prank(operator); - bool revoked = agreementManager.revokeAgreementUpdate(agreementId); - assertTrue(revoked); + // max(current, pending) = max(3700, 14600) = 14600 + uint256 pendingMaxClaim = 14600 ether; + assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), pendingMaxClaim); - // Pending state should be fully cleared - IRecurringAgreements.AgreementInfo memory info = agreementManager.getAgreementInfo(agreementId); - assertEq(info.pendingUpdateMaxNextClaim, 0, "pending escrow should be zero"); - assertEq(info.pendingUpdateNonce, 0, "pending nonce should be zero"); - assertEq(info.pendingUpdateHash, bytes32(0), "pending hash should be zero"); + // Cancel pending update clears pending terms on the collector and reconciles + _cancelPendingUpdate(agreementId); - // sumMaxNextClaim should only include the base claim + // sumMaxNextClaim drops to active-only (3700) since pending was cleared + uint256 originalMaxClaim = 1 ether * 3600 + 100 ether; assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), originalMaxClaim); - - // The update hash should no longer be authorized - bytes32 updateHash = recurringCollector.hashRCAU(rcau); - bytes4 result = agreementManager.approveAgreement(updateHash); - assertTrue(result != agreementManager.approveAgreement.selector, "hash should not be authorized"); } - function test_RevokeAgreementUpdate_EmitsEvent() public { + function test_CancelPendingUpdate_EmitsEvent() public { (IRecurringCollector.RecurringCollectionAgreement memory rca, ) = _makeRCAWithId( 100 ether, 1 ether, @@ -79,81 +67,22 @@ contract RecurringAgreementManagerRevokeAgreementUpdateTest is RecurringAgreemen ); _offerAgreementUpdate(rcau); - uint256 pendingMaxClaim = 2 ether * 7200 + 200 ether; - - vm.expectEmit(address(agreementManager)); - emit IRecurringAgreementManagement.AgreementUpdateRevoked(agreementId, pendingMaxClaim, 1); - - vm.prank(operator); - agreementManager.revokeAgreementUpdate(agreementId); - } - - function test_RevokeAgreementUpdate_ReturnsFalse_WhenNoPending() public { - (IRecurringCollector.RecurringCollectionAgreement memory rca, ) = _makeRCAWithId( - 100 ether, - 1 ether, - 3600, - uint64(block.timestamp + 365 days) - ); - - bytes16 agreementId = _offerAgreement(rca); + // Read pending terms hash from the collector + bytes32 pendingHash = recurringCollector.getAgreementVersionAt(agreementId, 1).versionHash; - // No pending update — should return false - vm.prank(operator); - bool revoked = agreementManager.revokeAgreementUpdate(agreementId); - assertFalse(revoked); - } + // Before cancel: maxNextClaim = max(active=3700, pending=14600) = 14600 + // After cancel: pending deleted, maxNextClaim = active-only = 3700 + uint256 oldMaxClaim = agreementManager.getAgreementInfo(address(recurringCollector), agreementId).maxNextClaim; + uint256 activeOnlyClaim = 1 ether * 3600 + 100 ether; - function test_RevokeAgreementUpdate_ReturnsFalse_WhenAlreadyApplied() public { - (IRecurringCollector.RecurringCollectionAgreement memory rca, ) = _makeRCAWithId( - 100 ether, - 1 ether, - 3600, - uint64(block.timestamp + 365 days) - ); - - bytes16 agreementId = _offerAgreement(rca); - - // Offer update - IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau = _makeRCAU( - agreementId, - 200 ether, - 2 ether, - 60, - 7200, - uint64(block.timestamp + 730 days), - 1 - ); - _offerAgreementUpdate(rcau); - - // Simulate: accepted with update already applied (updateNonce=1) - recurringCollector.setAgreement( - agreementId, - IRecurringCollector.AgreementData({ - dataService: rca.dataService, - payer: rca.payer, - serviceProvider: rca.serviceProvider, - acceptedAt: uint64(block.timestamp), - lastCollectionAt: 0, - endsAt: rcau.endsAt, - maxInitialTokens: rcau.maxInitialTokens, - maxOngoingTokensPerSecond: rcau.maxOngoingTokensPerSecond, - minSecondsPerCollection: rcau.minSecondsPerCollection, - maxSecondsPerCollection: rcau.maxSecondsPerCollection, - updateNonce: 1, - canceledAt: 0, - state: IRecurringCollector.AgreementState.Accepted - }) - ); + vm.expectEmit(address(agreementManager)); + emit IRecurringAgreementManagement.AgreementReconciled(agreementId, oldMaxClaim, activeOnlyClaim); - // Reconcile inside revokeAgreementUpdate detects the update was applied - // and clears it — returns false (nothing left to revoke) vm.prank(operator); - bool revoked = agreementManager.revokeAgreementUpdate(agreementId); - assertFalse(revoked); + agreementManager.cancelAgreement(address(recurringCollector), agreementId, pendingHash, 0); } - function test_RevokeAgreementUpdate_CanOfferNewUpdateAfterRevoke() public { + function test_CancelPendingUpdate_CanOfferNewUpdateAfterCancel() public { (IRecurringCollector.RecurringCollectionAgreement memory rca, ) = _makeRCAWithId( 100 ether, 1 ether, @@ -162,6 +91,7 @@ contract RecurringAgreementManagerRevokeAgreementUpdateTest is RecurringAgreemen ); bytes16 agreementId = _offerAgreement(rca); + uint256 originalMaxClaim = 1 ether * 3600 + 100 ether; // Offer update nonce=1 IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau1 = _makeRCAU( @@ -175,12 +105,10 @@ contract RecurringAgreementManagerRevokeAgreementUpdateTest is RecurringAgreemen ); _offerAgreementUpdate(rcau1); - // Revoke it - vm.prank(operator); - agreementManager.revokeAgreementUpdate(agreementId); + // Cancel pending update on collector, then offer a new update + _cancelPendingUpdate(agreementId); - // Offer a new update with the same nonce (1) — should succeed since the - // collector's updateNonce is still 0 and the pending was cleared + // Offer a new update with the next valid nonce (2) — collector incremented to 1 IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau2 = _makeRCAU( agreementId, 50 ether, @@ -188,26 +116,34 @@ contract RecurringAgreementManagerRevokeAgreementUpdateTest is RecurringAgreemen 60, 1800, uint64(block.timestamp + 180 days), - 1 + 2 ); _offerAgreementUpdate(rcau2); - // New pending should be set - uint256 newPendingMaxClaim = 0.5 ether * 1800 + 50 ether; - IRecurringAgreements.AgreementInfo memory info = agreementManager.getAgreementInfo(agreementId); - assertEq(info.pendingUpdateMaxNextClaim, newPendingMaxClaim); - assertEq(info.pendingUpdateNonce, 1); + // maxNextClaim = max(3700, 950) = 3700 (active dominates) + IRecurringAgreements.AgreementInfo memory info = agreementManager.getAgreementInfo( + address(recurringCollector), + agreementId + ); + assertEq(info.maxNextClaim, originalMaxClaim); } - function test_RevokeAgreementUpdate_Revert_WhenNotOffered() public { + function test_CancelPendingUpdate_RejectsUnknown_WhenNotOffered() public { bytes16 fakeId = bytes16(keccak256("fake")); - vm.expectRevert(abi.encodeWithSelector(IRecurringAgreementManagement.AgreementNotOffered.selector, fakeId)); + // cancelAgreement is a passthrough — unknown agreement triggers AgreementRejected via callback + vm.expectEmit(address(agreementManager)); + emit IRecurringAgreementManagement.AgreementRejected( + fakeId, + address(recurringCollector), + IRecurringAgreementManagement.AgreementRejectionReason.UnknownAgreement + ); + vm.prank(operator); - agreementManager.revokeAgreementUpdate(fakeId); + agreementManager.cancelAgreement(address(recurringCollector), fakeId, bytes32(0), 0); } - function test_RevokeAgreementUpdate_Revert_WhenNotOperator() public { + function test_CancelPendingUpdate_Revert_WhenNotOperator() public { (IRecurringCollector.RecurringCollectionAgreement memory rca, ) = _makeRCAWithId( 100 ether, 1 ether, @@ -226,10 +162,10 @@ contract RecurringAgreementManagerRevokeAgreementUpdateTest is RecurringAgreemen ) ); vm.prank(nonOperator); - agreementManager.revokeAgreementUpdate(agreementId); + agreementManager.cancelAgreement(address(recurringCollector), agreementId, bytes32(0), 0); } - function test_RevokeAgreementUpdate_Revert_WhenPaused() public { + function test_CancelPendingUpdate_Succeeds_WhenPaused() public { (IRecurringCollector.RecurringCollectionAgreement memory rca, ) = _makeRCAWithId( 100 ether, 1 ether, @@ -251,9 +187,9 @@ contract RecurringAgreementManagerRevokeAgreementUpdateTest is RecurringAgreemen agreementManager.pause(); vm.stopPrank(); - vm.expectRevert(PausableUpgradeable.EnforcedPause.selector); + // Role-gated functions should succeed even when paused vm.prank(operator); - agreementManager.revokeAgreementUpdate(agreementId); + agreementManager.cancelAgreement(address(recurringCollector), agreementId, bytes32(0), 0); } /* solhint-enable graph/func-name-mixedcase */ diff --git a/packages/issuance/test/unit/agreement-manager/revokeOffer.t.sol b/packages/issuance/test/unit/agreement-manager/revokeOffer.t.sol index 8f69e20d0..5cae72412 100644 --- a/packages/issuance/test/unit/agreement-manager/revokeOffer.t.sol +++ b/packages/issuance/test/unit/agreement-manager/revokeOffer.t.sol @@ -2,16 +2,16 @@ pragma solidity ^0.8.27; import { IRecurringAgreementManagement } from "@graphprotocol/interfaces/contracts/issuance/agreement/IRecurringAgreementManagement.sol"; +import { IRecurringAgreements } from "@graphprotocol/interfaces/contracts/issuance/agreement/IRecurringAgreements.sol"; import { IRecurringCollector } from "@graphprotocol/interfaces/contracts/horizon/IRecurringCollector.sol"; import { IAccessControl } from "@openzeppelin/contracts/access/IAccessControl.sol"; -import { PausableUpgradeable } from "@openzeppelin/contracts-upgradeable/utils/PausableUpgradeable.sol"; import { RecurringAgreementManagerSharedTest } from "./shared.t.sol"; -contract RecurringAgreementManagerRevokeOfferTest is RecurringAgreementManagerSharedTest { +contract RecurringAgreementManagerCancelOfferedTest is RecurringAgreementManagerSharedTest { /* solhint-disable graph/func-name-mixedcase */ - function test_RevokeOffer_ClearsAgreement() public { + function test_CancelOffered_ClearsAgreement() public { (IRecurringCollector.RecurringCollectionAgreement memory rca, ) = _makeRCAWithId( 100 ether, 1 ether, @@ -20,21 +20,20 @@ contract RecurringAgreementManagerRevokeOfferTest is RecurringAgreementManagerSh ); bytes16 agreementId = _offerAgreement(rca); - assertEq(agreementManager.getProviderAgreementCount(indexer), 1); + assertEq(agreementManager.getPairAgreementCount(address(recurringCollector), indexer), 1); uint256 maxClaim = 1 ether * 3600 + 100 ether; assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), maxClaim); - vm.prank(operator); - bool gone = agreementManager.revokeOffer(agreementId); + bool gone = _cancelAgreement(agreementId); assertTrue(gone); - assertEq(agreementManager.getProviderAgreementCount(indexer), 0); + assertEq(agreementManager.getPairAgreementCount(address(recurringCollector), indexer), 0); assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), 0); - assertEq(agreementManager.getAgreementMaxNextClaim(agreementId), 0); + assertEq(agreementManager.getAgreementMaxNextClaim(address(recurringCollector), agreementId), 0); } - function test_RevokeOffer_InvalidatesHash() public { + function test_CancelOffered_FullyRemovesTracking() public { (IRecurringCollector.RecurringCollectionAgreement memory rca, ) = _makeRCAWithId( 100 ether, 1 ether, @@ -44,18 +43,18 @@ contract RecurringAgreementManagerRevokeOfferTest is RecurringAgreementManagerSh bytes16 agreementId = _offerAgreement(rca); - // Hash is authorized before revoke - bytes32 rcaHash = recurringCollector.hashRCA(rca); - agreementManager.approveAgreement(rcaHash); // should not revert - - vm.prank(operator); - agreementManager.revokeOffer(agreementId); + _cancelAgreement(agreementId); - // Hash should be rejected after revoke (agreement no longer exists) - assertEq(agreementManager.approveAgreement(rcaHash), bytes4(0)); + // Agreement info should be zeroed out after cancel + IRecurringAgreements.AgreementInfo memory info = agreementManager.getAgreementInfo( + address(recurringCollector), + agreementId + ); + assertEq(info.provider, address(0)); + assertEq(info.maxNextClaim, 0); } - function test_RevokeOffer_ClearsPendingUpdate() public { + function test_CancelOffered_ClearsPendingUpdate() public { (IRecurringCollector.RecurringCollectionAgreement memory rca, ) = _makeRCAWithId( 100 ether, 1 ether, @@ -78,17 +77,17 @@ contract RecurringAgreementManagerRevokeOfferTest is RecurringAgreementManagerSh _offerAgreementUpdate(rcau); uint256 originalMaxClaim = 1 ether * 3600 + 100 ether; - uint256 pendingMaxClaim = 2 ether * 7200 + 200 ether; - assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), originalMaxClaim + pendingMaxClaim); + // max(current, pending) = max(3700, 14600) = 14600 + uint256 pendingMaxClaim = 14600 ether; + assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), pendingMaxClaim); - vm.prank(operator); - agreementManager.revokeOffer(agreementId); + _cancelAgreement(agreementId); // Both original and pending should be cleared assertEq(agreementManager.getSumMaxNextClaim(_collector(), indexer), 0); } - function test_RevokeOffer_EmitsEvent() public { + function test_CancelOffered_EmitsEvent() public { (IRecurringCollector.RecurringCollectionAgreement memory rca, ) = _makeRCAWithId( 100 ether, 1 ether, @@ -99,40 +98,27 @@ contract RecurringAgreementManagerRevokeOfferTest is RecurringAgreementManagerSh bytes16 agreementId = _offerAgreement(rca); vm.expectEmit(address(agreementManager)); - emit IRecurringAgreementManagement.OfferRevoked(agreementId, indexer); + emit IRecurringAgreementManagement.AgreementRemoved(agreementId); - vm.prank(operator); - agreementManager.revokeOffer(agreementId); + _cancelAgreement(agreementId); } - function test_RevokeOffer_Revert_WhenAlreadyAccepted() public { - (IRecurringCollector.RecurringCollectionAgreement memory rca, ) = _makeRCAWithId( - 100 ether, - 1 ether, - 3600, - uint64(block.timestamp + 365 days) - ); - - bytes16 agreementId = _offerAgreement(rca); - - // Simulate acceptance in RC - _setAgreementAccepted(agreementId, rca, uint64(block.timestamp)); + function test_CancelOffered_RejectsUnknown_WhenNotOffered() public { + bytes16 fakeId = bytes16(keccak256("fake")); - vm.expectRevert( - abi.encodeWithSelector(IRecurringAgreementManagement.AgreementAlreadyAccepted.selector, agreementId) + // cancelAgreement is a passthrough — unknown agreement triggers AgreementRejected via callback + vm.expectEmit(address(agreementManager)); + emit IRecurringAgreementManagement.AgreementRejected( + fakeId, + address(recurringCollector), + IRecurringAgreementManagement.AgreementRejectionReason.UnknownAgreement ); - vm.prank(operator); - agreementManager.revokeOffer(agreementId); - } - function test_RevokeOffer_ReturnsTrue_WhenNotOffered() public { - bytes16 fakeId = bytes16(keccak256("fake")); vm.prank(operator); - bool gone = agreementManager.revokeOffer(fakeId); - assertTrue(gone); + agreementManager.cancelAgreement(address(recurringCollector), fakeId, bytes32(0), 0); } - function test_RevokeOffer_Revert_WhenNotOperator() public { + function test_CancelOffered_Revert_WhenNotOperator() public { (IRecurringCollector.RecurringCollectionAgreement memory rca, ) = _makeRCAWithId( 100 ether, 1 ether, @@ -143,6 +129,7 @@ contract RecurringAgreementManagerRevokeOfferTest is RecurringAgreementManagerSh bytes16 agreementId = _offerAgreement(rca); address nonOperator = makeAddr("nonOperator"); + bytes32 activeHash = recurringCollector.getAgreementVersionAt(agreementId, 0).versionHash; vm.expectRevert( abi.encodeWithSelector( IAccessControl.AccessControlUnauthorizedAccount.selector, @@ -151,10 +138,10 @@ contract RecurringAgreementManagerRevokeOfferTest is RecurringAgreementManagerSh ) ); vm.prank(nonOperator); - agreementManager.revokeOffer(agreementId); + agreementManager.cancelAgreement(address(recurringCollector), agreementId, activeHash, 0); } - function test_RevokeOffer_Revert_WhenPaused() public { + function test_CancelOffered_Succeeds_WhenPaused() public { (IRecurringCollector.RecurringCollectionAgreement memory rca, ) = _makeRCAWithId( 100 ether, 1 ether, @@ -169,9 +156,10 @@ contract RecurringAgreementManagerRevokeOfferTest is RecurringAgreementManagerSh agreementManager.pause(); vm.stopPrank(); - vm.expectRevert(PausableUpgradeable.EnforcedPause.selector); + // Role-gated functions should succeed even when paused + bytes32 activeHash = recurringCollector.getAgreementVersionAt(agreementId, 0).versionHash; vm.prank(operator); - agreementManager.revokeOffer(agreementId); + agreementManager.cancelAgreement(address(recurringCollector), agreementId, activeHash, 0); } /* solhint-enable graph/func-name-mixedcase */ diff --git a/packages/issuance/test/unit/agreement-manager/shared.t.sol b/packages/issuance/test/unit/agreement-manager/shared.t.sol index 97056e564..bd2f153d1 100644 --- a/packages/issuance/test/unit/agreement-manager/shared.t.sol +++ b/packages/issuance/test/unit/agreement-manager/shared.t.sol @@ -3,6 +3,16 @@ pragma solidity ^0.8.27; import { Test } from "forge-std/Test.sol"; +import { + REGISTERED, + ACCEPTED, + NOTICE_GIVEN, + SETTLED, + BY_PAYER, + BY_PROVIDER, + OFFER_TYPE_NEW, + OFFER_TYPE_UPDATE +} from "@graphprotocol/interfaces/contracts/horizon/IAgreementCollector.sol"; import { IRecurringCollector } from "@graphprotocol/interfaces/contracts/horizon/IRecurringCollector.sol"; import { IPaymentsEscrow } from "@graphprotocol/interfaces/contracts/horizon/IPaymentsEscrow.sol"; import { TransparentUpgradeableProxy } from "@openzeppelin/contracts/proxy/transparent/TransparentUpgradeableProxy.sol"; @@ -13,7 +23,6 @@ import { RecurringAgreementHelper } from "../../../contracts/agreement/Recurring import { MockGraphToken } from "./mocks/MockGraphToken.sol"; import { MockPaymentsEscrow } from "./mocks/MockPaymentsEscrow.sol"; import { MockRecurringCollector } from "./mocks/MockRecurringCollector.sol"; -import { MockSubgraphService } from "./mocks/MockSubgraphService.sol"; /// @notice Shared test setup for RecurringAgreementManager tests. contract RecurringAgreementManagerSharedTest is Test { @@ -21,7 +30,6 @@ contract RecurringAgreementManagerSharedTest is Test { MockGraphToken internal token; MockPaymentsEscrow internal paymentsEscrow; MockRecurringCollector internal recurringCollector; - MockSubgraphService internal mockSubgraphService; RecurringAgreementManager internal agreementManager; RecurringAgreementHelper internal agreementHelper; @@ -47,8 +55,7 @@ contract RecurringAgreementManagerSharedTest is Test { token = new MockGraphToken(); paymentsEscrow = new MockPaymentsEscrow(address(token)); recurringCollector = new MockRecurringCollector(); - mockSubgraphService = new MockSubgraphService(); - dataService = address(mockSubgraphService); + dataService = makeAddr("subgraphService"); // Deploy RecurringAgreementManager behind proxy RecurringAgreementManager impl = new RecurringAgreementManager( @@ -83,7 +90,7 @@ contract RecurringAgreementManagerSharedTest is Test { vm.label(address(recurringCollector), "RecurringCollector"); vm.label(address(agreementManager), "RecurringAgreementManager"); vm.label(address(agreementHelper), "RecurringAgreementHelper"); - vm.label(address(mockSubgraphService), "SubgraphService"); + vm.label(dataService, "SubgraphService"); } // -- Helpers -- @@ -112,6 +119,8 @@ contract RecurringAgreementManagerSharedTest is Test { maxOngoingTokensPerSecond: maxOngoingTokensPerSecond, minSecondsPerCollection: minSecondsPerCollection, maxSecondsPerCollection: maxSecondsPerCollection, + conditions: 0, + minSecondsPayerCancellationNotice: 0, nonce: 1, metadata: "" }); @@ -140,7 +149,7 @@ contract RecurringAgreementManagerSharedTest is Test { token.mint(address(agreementManager), 1_000_000 ether); vm.prank(operator); - return agreementManager.offerAgreement(rca, _collector()); + return agreementManager.offerAgreement(_collector(), OFFER_TYPE_NEW, abi.encode(rca)); } /// @notice Create a standard RCAU for an existing agreement @@ -162,17 +171,93 @@ contract RecurringAgreementManagerSharedTest is Test { maxOngoingTokensPerSecond: maxOngoingTokensPerSecond, minSecondsPerCollection: minSecondsPerCollection, maxSecondsPerCollection: maxSecondsPerCollection, + conditions: 0, + minSecondsPayerCancellationNotice: 0, nonce: nonce, metadata: "" }); } /// @notice Offer an RCAU via the operator - function _offerAgreementUpdate( - IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau - ) internal returns (bytes16) { + function _offerAgreementUpdate(IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau) internal { vm.prank(operator); - return agreementManager.offerAgreementUpdate(rcau); + agreementManager.offerAgreement(_collector(), OFFER_TYPE_UPDATE, abi.encode(rcau)); + } + + /// @notice Cancel an agreement by reading the activeTerms hash from the collector + /// @return gone True if the agreement was removed (no longer tracked) + function _cancelAgreement(bytes16 agreementId) internal returns (bool gone) { + bytes32 activeHash = recurringCollector.getAgreementVersionAt(agreementId, 0).versionHash; + vm.prank(operator); + agreementManager.cancelAgreement(address(recurringCollector), agreementId, activeHash, 0); + // cancelAgreement is void; the callback handles reconciliation. + // Check if the agreement was removed by looking at the provider field. + return agreementManager.getAgreementInfo(address(recurringCollector), agreementId).provider == address(0); + } + + /// @notice Cancel a pending update by reading the pendingTerms hash from the collector + /// @return gone True if the agreement was removed (no longer tracked) + function _cancelPendingUpdate(bytes16 agreementId) internal returns (bool gone) { + bytes32 pendingHash = recurringCollector.getAgreementVersionAt(agreementId, 1).versionHash; + vm.prank(operator); + agreementManager.cancelAgreement(address(recurringCollector), agreementId, pendingHash, 0); + return agreementManager.getAgreementInfo(address(recurringCollector), agreementId).provider == address(0); + } + + /// @notice Build active terms from an RCA + function _activeTermsFromRCA( + IRecurringCollector.RecurringCollectionAgreement memory rca + ) internal pure returns (IRecurringCollector.AgreementTerms memory) { + return IRecurringCollector.AgreementTerms({ + deadline: 0, + endsAt: rca.endsAt, + maxInitialTokens: rca.maxInitialTokens, + maxOngoingTokensPerSecond: rca.maxOngoingTokensPerSecond, + minSecondsPerCollection: rca.minSecondsPerCollection, + maxSecondsPerCollection: rca.maxSecondsPerCollection, + conditions: 0, + minSecondsPayerCancellationNotice: 0, + hash: bytes32(0), + metadata: "" + }); + } + + /// @notice Build empty pending terms + function _emptyTerms() internal pure returns (IRecurringCollector.AgreementTerms memory) { + return IRecurringCollector.AgreementTerms({ + deadline: 0, + endsAt: 0, + maxInitialTokens: 0, + maxOngoingTokensPerSecond: 0, + minSecondsPerCollection: 0, + maxSecondsPerCollection: 0, + conditions: 0, + minSecondsPayerCancellationNotice: 0, + hash: bytes32(0), + metadata: "" + }); + } + + /// @notice Build agreement data from common parameters + function _buildAgreementStorage( + IRecurringCollector.RecurringCollectionAgreement memory rca, + uint16 state, + uint64 acceptedAt, + uint64 collectableUntil, + uint64 lastCollectionAt + ) internal pure returns (MockRecurringCollector.AgreementStorage memory) { + return MockRecurringCollector.AgreementStorage({ + dataService: rca.dataService, + payer: rca.payer, + serviceProvider: rca.serviceProvider, + acceptedAt: acceptedAt, + lastCollectionAt: lastCollectionAt, + updateNonce: 0, + collectableUntil: collectableUntil, + state: state, + activeTerms: _activeTermsFromRCA(rca), + pendingTerms: _emptyTerms() + }); } /// @notice Set up a mock agreement in RecurringCollector as Accepted @@ -181,24 +266,7 @@ contract RecurringAgreementManagerSharedTest is Test { IRecurringCollector.RecurringCollectionAgreement memory rca, uint64 acceptedAt ) internal { - recurringCollector.setAgreement( - agreementId, - IRecurringCollector.AgreementData({ - dataService: rca.dataService, - payer: rca.payer, - serviceProvider: rca.serviceProvider, - acceptedAt: acceptedAt, - lastCollectionAt: 0, - endsAt: rca.endsAt, - maxInitialTokens: rca.maxInitialTokens, - maxOngoingTokensPerSecond: rca.maxOngoingTokensPerSecond, - minSecondsPerCollection: rca.minSecondsPerCollection, - maxSecondsPerCollection: rca.maxSecondsPerCollection, - updateNonce: 0, - canceledAt: 0, - state: IRecurringCollector.AgreementState.Accepted - }) - ); + recurringCollector.setAgreement(agreementId, _buildAgreementStorage(rca, REGISTERED | ACCEPTED, acceptedAt, 0, 0)); } /// @notice Set up a mock agreement as CanceledByServiceProvider @@ -208,21 +276,13 @@ contract RecurringAgreementManagerSharedTest is Test { ) internal { recurringCollector.setAgreement( agreementId, - IRecurringCollector.AgreementData({ - dataService: rca.dataService, - payer: rca.payer, - serviceProvider: rca.serviceProvider, - acceptedAt: uint64(block.timestamp), - lastCollectionAt: 0, - endsAt: rca.endsAt, - maxInitialTokens: rca.maxInitialTokens, - maxOngoingTokensPerSecond: rca.maxOngoingTokensPerSecond, - minSecondsPerCollection: rca.minSecondsPerCollection, - maxSecondsPerCollection: rca.maxSecondsPerCollection, - updateNonce: 0, - canceledAt: uint64(block.timestamp), - state: IRecurringCollector.AgreementState.CanceledByServiceProvider - }) + _buildAgreementStorage( + rca, + REGISTERED | ACCEPTED | NOTICE_GIVEN | SETTLED | BY_PROVIDER, + uint64(block.timestamp), + uint64(block.timestamp), + 0 + ) ); } @@ -231,26 +291,12 @@ contract RecurringAgreementManagerSharedTest is Test { bytes16 agreementId, IRecurringCollector.RecurringCollectionAgreement memory rca, uint64 acceptedAt, - uint64 canceledAt, + uint64 collectableUntil, uint64 lastCollectionAt ) internal { recurringCollector.setAgreement( agreementId, - IRecurringCollector.AgreementData({ - dataService: rca.dataService, - payer: rca.payer, - serviceProvider: rca.serviceProvider, - acceptedAt: acceptedAt, - lastCollectionAt: lastCollectionAt, - endsAt: rca.endsAt, - maxInitialTokens: rca.maxInitialTokens, - maxOngoingTokensPerSecond: rca.maxOngoingTokensPerSecond, - minSecondsPerCollection: rca.minSecondsPerCollection, - maxSecondsPerCollection: rca.maxSecondsPerCollection, - updateNonce: 0, - canceledAt: canceledAt, - state: IRecurringCollector.AgreementState.CanceledByPayer - }) + _buildAgreementStorage(rca, REGISTERED | ACCEPTED | NOTICE_GIVEN | BY_PAYER, acceptedAt, collectableUntil, lastCollectionAt) ); } @@ -263,21 +309,7 @@ contract RecurringAgreementManagerSharedTest is Test { ) internal { recurringCollector.setAgreement( agreementId, - IRecurringCollector.AgreementData({ - dataService: rca.dataService, - payer: rca.payer, - serviceProvider: rca.serviceProvider, - acceptedAt: acceptedAt, - lastCollectionAt: lastCollectionAt, - endsAt: rca.endsAt, - maxInitialTokens: rca.maxInitialTokens, - maxOngoingTokensPerSecond: rca.maxOngoingTokensPerSecond, - minSecondsPerCollection: rca.minSecondsPerCollection, - maxSecondsPerCollection: rca.maxSecondsPerCollection, - updateNonce: 0, - canceledAt: 0, - state: IRecurringCollector.AgreementState.Accepted - }) + _buildAgreementStorage(rca, REGISTERED | ACCEPTED, acceptedAt, 0, lastCollectionAt) ); } } diff --git a/packages/issuance/test/unit/agreement-manager/updateEscrow.t.sol b/packages/issuance/test/unit/agreement-manager/updateEscrow.t.sol index 9fb9b6462..f618f9c5c 100644 --- a/packages/issuance/test/unit/agreement-manager/updateEscrow.t.sol +++ b/packages/issuance/test/unit/agreement-manager/updateEscrow.t.sol @@ -35,9 +35,9 @@ contract RecurringAgreementManagerUpdateEscrowTest is RecurringAgreementManagerS // SP cancels — reconcileAgreement triggers escrow update, thawing the full balance _setAgreementCanceledBySP(agreementId, rca); - agreementManager.reconcileAgreement(agreementId); + agreementManager.reconcileAgreement(address(recurringCollector), agreementId); - assertEq(agreementManager.getProviderAgreementCount(indexer), 0); + assertEq(agreementManager.getPairAgreementCount(address(recurringCollector), indexer), 0); // balance should now be fully thawing IPaymentsEscrow.EscrowAccount memory account; @@ -62,18 +62,18 @@ contract RecurringAgreementManagerUpdateEscrowTest is RecurringAgreementManagerS // SP cancels and reconcile (triggers thaw) _setAgreementCanceledBySP(agreementId, rca); - agreementManager.reconcileAgreement(agreementId); + agreementManager.reconcileAgreement(address(recurringCollector), agreementId); // Fast forward past thawing period (1 day in mock) vm.warp(block.timestamp + 1 days + 1); uint256 agreementManagerBalanceBefore = token.balanceOf(address(agreementManager)); - // reconcileCollectorProvider: withdraw + // reconcileProvider: withdraw vm.expectEmit(address(agreementManager)); emit IRecurringEscrowManagement.EscrowWithdrawn(indexer, address(recurringCollector), maxClaim); - agreementManager.reconcileCollectorProvider(address(_collector()), indexer); + agreementManager.reconcileProvider(address(_collector()), indexer); // Tokens should be back in RecurringAgreementManager uint256 agreementManagerBalanceAfter = token.balanceOf(address(agreementManager)); @@ -82,7 +82,7 @@ contract RecurringAgreementManagerUpdateEscrowTest is RecurringAgreementManagerS function test_UpdateEscrow_NoopWhenNoBalance() public { // No agreements, no balance — should succeed silently - agreementManager.reconcileCollectorProvider(address(_collector()), indexer); + agreementManager.reconcileProvider(address(_collector()), indexer); } function test_UpdateEscrow_NoopWhenStillThawing() public { @@ -97,10 +97,10 @@ contract RecurringAgreementManagerUpdateEscrowTest is RecurringAgreementManagerS // SP cancels and reconcile (triggers thaw) _setAgreementCanceledBySP(agreementId, rca); - agreementManager.reconcileAgreement(agreementId); + agreementManager.reconcileAgreement(address(recurringCollector), agreementId); // Subsequent call before thaw complete: no-op (thaw in progress, amount is correct) - agreementManager.reconcileCollectorProvider(address(_collector()), indexer); + agreementManager.reconcileProvider(address(_collector()), indexer); // Balance should still be fully thawing IPaymentsEscrow.EscrowAccount memory account; @@ -113,25 +113,26 @@ contract RecurringAgreementManagerUpdateEscrowTest is RecurringAgreementManagerS } function test_UpdateEscrow_Permissionless() public { - // Anyone can call reconcileCollectorProvider + // Anyone can call reconcileProvider address anyone = makeAddr("anyone"); vm.prank(anyone); - agreementManager.reconcileCollectorProvider(address(_collector()), indexer); + agreementManager.reconcileProvider(address(_collector()), indexer); } // ==================== Excess Thawing With Active Agreements ==================== function test_UpdateEscrow_ThawsExcessWithActiveAgreements() public { // Offer agreement, accept, then reconcile down — excess should be thawed + // Use 300 ether initial so excess (300) exceeds dust threshold (3600*16/256 = 225) (IRecurringCollector.RecurringCollectionAgreement memory rca, ) = _makeRCAWithId( - 100 ether, + 300 ether, 1 ether, 3600, uint64(block.timestamp + 365 days) ); bytes16 agreementId = _offerAgreement(rca); - uint256 maxClaim = 1 ether * 3600 + 100 ether; + uint256 maxClaim = 1 ether * 3600 + 300 ether; // Accept and simulate a collection (reduces maxNextClaim) _setAgreementAccepted(agreementId, rca, uint64(block.timestamp)); @@ -140,7 +141,7 @@ contract RecurringAgreementManagerUpdateEscrowTest is RecurringAgreementManagerS vm.warp(collectionTime); // Reconcile — should reduce required escrow - agreementManager.reconcileAgreement(agreementId); + agreementManager.reconcileAgreement(address(recurringCollector), agreementId); uint256 newRequired = agreementManager.getSumMaxNextClaim(_collector(), indexer); assertTrue(newRequired < maxClaim, "Required should have decreased"); @@ -189,8 +190,8 @@ contract RecurringAgreementManagerUpdateEscrowTest is RecurringAgreementManagerS // SP cancels agreement 1, reconcile to 0 (triggers thaw of excess) _setAgreementCanceledBySP(id1, rca1); - agreementManager.reconcileAgreement(id1); - agreementManager.reconcileAgreement(id1); + agreementManager.reconcileAgreement(address(recurringCollector), id1); + agreementManager.reconcileAgreement(address(recurringCollector), id1); // Verify excess is thawing IPaymentsEscrow.EscrowAccount memory accountBefore; @@ -246,8 +247,8 @@ contract RecurringAgreementManagerUpdateEscrowTest is RecurringAgreementManagerS // SP cancels, reconcile to 0 (triggers thaw of all excess) _setAgreementCanceledBySP(id1, rca1); - agreementManager.reconcileAgreement(id1); - agreementManager.reconcileAgreement(id1); + agreementManager.reconcileAgreement(address(recurringCollector), id1); + agreementManager.reconcileAgreement(address(recurringCollector), id1); IPaymentsEscrow.EscrowAccount memory account; (account.balance, account.tokensThawing, account.thawEndTimestamp) = paymentsEscrow.escrowAccounts( @@ -303,8 +304,8 @@ contract RecurringAgreementManagerUpdateEscrowTest is RecurringAgreementManagerS // Reconcile agreement 1 to create excess (triggers thaw) _setAgreementCanceledBySP(id1, rca1); - agreementManager.reconcileAgreement(id1); - agreementManager.reconcileAgreement(id1); + agreementManager.reconcileAgreement(address(recurringCollector), id1); + agreementManager.reconcileAgreement(address(recurringCollector), id1); IPaymentsEscrow.EscrowAccount memory accountBefore; (accountBefore.balance, accountBefore.tokensThawing, accountBefore.thawEndTimestamp) = paymentsEscrow @@ -327,8 +328,8 @@ contract RecurringAgreementManagerUpdateEscrowTest is RecurringAgreementManagerS ) ); _setAgreementCanceledBySP(id2, rca2); - agreementManager.reconcileAgreement(id2); - agreementManager.reconcileAgreement(id2); + agreementManager.reconcileAgreement(address(recurringCollector), id2); + agreementManager.reconcileAgreement(address(recurringCollector), id2); IPaymentsEscrow.EscrowAccount memory accountAfter; (accountAfter.balance, accountAfter.tokensThawing, accountAfter.thawEndTimestamp) = paymentsEscrow @@ -345,7 +346,7 @@ contract RecurringAgreementManagerUpdateEscrowTest is RecurringAgreementManagerS // Tests all (escrowBasis, accountState) combinations via a helper that: // 1. Sets escrowBasis (controls min/max) // 2. Overrides mock escrow to desired (balance, tokensThawing, thawReady) - // 3. Calls reconcileCollectorProvider + // 3. Calls reconcileProvider // 4. Asserts expected (balance, tokensThawing) // // Desired behavior (the 4 objectives): @@ -377,7 +378,7 @@ contract RecurringAgreementManagerUpdateEscrowTest is RecurringAgreementManagerS ready ? block.timestamp - 1 : (0 < thawing ? block.timestamp + 1 days : 0) ); - agreementManager.reconcileCollectorProvider(address(_collector()), indexer); + agreementManager.reconcileProvider(address(_collector()), indexer); IPaymentsEscrow.EscrowAccount memory r; (r.balance, r.tokensThawing, r.thawEndTimestamp) = paymentsEscrow.escrowAccounts( @@ -415,7 +416,7 @@ contract RecurringAgreementManagerUpdateEscrowTest is RecurringAgreementManagerS thawEndTimestamp ); - agreementManager.reconcileCollectorProvider(address(_collector()), indexer); + agreementManager.reconcileProvider(address(_collector()), indexer); IPaymentsEscrow.EscrowAccount memory r; (r.balance, r.tokensThawing, r.thawEndTimestamp) = paymentsEscrow.escrowAccounts( @@ -522,7 +523,7 @@ contract RecurringAgreementManagerUpdateEscrowTest is RecurringAgreementManagerS // Reconcile indexer1's agreement (triggers thaw) _setAgreementCanceledBySP(id1, rca1); - agreementManager.reconcileAgreement(id1); + agreementManager.reconcileAgreement(address(recurringCollector), id1); IPaymentsEscrow.EscrowAccount memory acct1; (acct1.balance, acct1.tokensThawing, acct1.thawEndTimestamp) = paymentsEscrow.escrowAccounts( @@ -540,8 +541,8 @@ contract RecurringAgreementManagerUpdateEscrowTest is RecurringAgreementManagerS ); assertEq(indexer2Balance, maxClaim2); - // reconcileCollectorProvider on indexer2 should be a no-op (balance == required) - agreementManager.reconcileCollectorProvider(address(_collector()), indexer2); + // reconcileProvider on indexer2 should be a no-op (balance == required) + agreementManager.reconcileProvider(address(_collector()), indexer2); (uint256 indexer2BalanceAfter, , ) = paymentsEscrow.escrowAccounts( address(agreementManager), address(recurringCollector), @@ -571,8 +572,8 @@ contract RecurringAgreementManagerUpdateEscrowTest is RecurringAgreementManagerS ); assertEq(balanceBefore, maxClaim); - // reconcileCollectorProvider should be a no-op - agreementManager.reconcileCollectorProvider(address(_collector()), indexer); + // reconcileProvider should be a no-op + agreementManager.reconcileProvider(address(_collector()), indexer); // Nothing changed (uint256 balanceAfter, , ) = paymentsEscrow.escrowAccounts( @@ -595,15 +596,16 @@ contract RecurringAgreementManagerUpdateEscrowTest is RecurringAgreementManagerS function test_Reconcile_AutomaticallyThawsExcess() public { // Reconcile calls _updateEscrow, which should thaw excess automatically + // Use 300 ether initial so excess (300) exceeds dust threshold (3600*16/256 = 225) (IRecurringCollector.RecurringCollectionAgreement memory rca, ) = _makeRCAWithId( - 100 ether, + 300 ether, 1 ether, 3600, uint64(block.timestamp + 365 days) ); bytes16 agreementId = _offerAgreement(rca); - uint256 maxClaim = 1 ether * 3600 + 100 ether; + uint256 maxClaim = 1 ether * 3600 + 300 ether; // Accept and simulate a collection _setAgreementAccepted(agreementId, rca, uint64(block.timestamp)); @@ -612,7 +614,7 @@ contract RecurringAgreementManagerUpdateEscrowTest is RecurringAgreementManagerS vm.warp(collectionTime); // Reconcile — triggers _updateEscrow internally - agreementManager.reconcileAgreement(agreementId); + agreementManager.reconcileAgreement(address(recurringCollector), agreementId); // Excess should already be thawing IPaymentsEscrow.EscrowAccount memory account; @@ -658,7 +660,7 @@ contract RecurringAgreementManagerUpdateEscrowTest is RecurringAgreementManagerS // Cancel and reconcile rca2 -> excess (950) thawed, rca1 remains _setAgreementCanceledBySP(id2, rca2); - agreementManager.reconcileAgreement(id2); + agreementManager.reconcileAgreement(address(recurringCollector), id2); IPaymentsEscrow.EscrowAccount memory account; (account.balance, account.tokensThawing, account.thawEndTimestamp) = paymentsEscrow.escrowAccounts( @@ -676,7 +678,7 @@ contract RecurringAgreementManagerUpdateEscrowTest is RecurringAgreementManagerS vm.expectEmit(address(agreementManager)); emit IRecurringEscrowManagement.EscrowWithdrawn(indexer, address(recurringCollector), maxClaim2); - agreementManager.reconcileCollectorProvider(address(_collector()), indexer); + agreementManager.reconcileProvider(address(_collector()), indexer); // After withdraw: only rca1's required amount remains, nothing thawing (account.balance, account.tokensThawing, account.thawEndTimestamp) = paymentsEscrow.escrowAccounts( @@ -705,7 +707,7 @@ contract RecurringAgreementManagerUpdateEscrowTest is RecurringAgreementManagerS // Reconcile -> full thaw _setAgreementCanceledBySP(id1, rca1); - agreementManager.reconcileAgreement(id1); + agreementManager.reconcileAgreement(address(recurringCollector), id1); // Verify: entire balance is thawing, liquid = 0 IPaymentsEscrow.EscrowAccount memory account; @@ -743,5 +745,123 @@ contract RecurringAgreementManagerUpdateEscrowTest is RecurringAgreementManagerS assertEq(account.tokensThawing, 0, "Nothing thawing after withdraw"); } + // ==================== ThawTarget edge cases (minThawFraction variants) ==================== + // + // The thawTarget calculation has two subtraction branches that need underflow guards: + // escrowed < min → account.balance - min (guarded by: min < account.balance) + // else → account.balance - max (guarded by: max < account.balance) + // + // When minThawFraction = 0 the thaw threshold (minThawAmount) is zero, so the + // `minThawAmount <= excess` gate passes even when excess = 0. Without the + // `max < account.balance` guard this would underflow. + + /// @dev Like _check but also sets minThawFraction before snapshotting. + function _checkFrac( + IRecurringEscrowManagement.EscrowBasis basis, + uint8 fraction, + uint256 bal, + uint256 thawing, + bool ready, + uint256 expBal, + uint256 expThaw, + string memory label + ) internal { + uint256 snap = vm.snapshot(); + + vm.startPrank(operator); + agreementManager.setEscrowBasis(basis); + agreementManager.setMinThawFraction(fraction); + vm.stopPrank(); + + paymentsEscrow.setAccount( + address(agreementManager), + address(recurringCollector), + indexer, + bal, + thawing, + ready ? block.timestamp - 1 : (0 < thawing ? block.timestamp + 1 days : 0) + ); + + agreementManager.reconcileProvider(address(_collector()), indexer); + + IPaymentsEscrow.EscrowAccount memory r; + (r.balance, r.tokensThawing, r.thawEndTimestamp) = paymentsEscrow.escrowAccounts( + address(agreementManager), + address(recurringCollector), + indexer + ); + assertEq(r.balance, expBal, string.concat(label, ": balance")); + assertEq(r.tokensThawing, expThaw, string.concat(label, ": thawing")); + + assertTrue(vm.revertTo(snap)); + } + + function test_UpdateEscrow_ThawTargetEdgeCases() public { + // S = sumMaxNextClaim, established by offering one agreement in Full mode. + (IRecurringCollector.RecurringCollectionAgreement memory rca, ) = _makeRCAWithId( + 100 ether, + 1 ether, + 3600, + uint64(block.timestamp + 365 days) + ); + _offerAgreement(rca); + uint256 S = 1 ether * 3600 + 100 ether; // 3700 ether + + token.mint(address(paymentsEscrow), 10 * S); + vm.warp(100); + + IRecurringEscrowManagement.EscrowBasis O = IRecurringEscrowManagement.EscrowBasis.OnDemand; + IRecurringEscrowManagement.EscrowBasis F = IRecurringEscrowManagement.EscrowBasis.Full; + IRecurringEscrowManagement.EscrowBasis J = IRecurringEscrowManagement.EscrowBasis.JustInTime; + + // ── Key bug-fix case: balance < max, minThawFraction = 0 ──────────── + // Without the `max < account.balance` guard the thawTarget subtraction underflows. + // OnDemand: min = 0, max = S. balance = S/2, thawing = S/4. + // escrowed = S/4, excess = 0, minThawAmount = 0 → thawTarget = 0 (no excess). + // Stale thaw is cancelled; balance stays unchanged. + _checkFrac(O, 0, S / 2, S / 4, false, S / 2, 0, "E1:balcancel-thaw"); + + // Same but with zero thawing — already at ideal, no-op + _checkFrac(O, 0, S / 2, 0, false, S / 2, 0, "E2:balnoop"); + + // ── balance == max, minThawFraction = 0 ───────────────────────────── + // excess = 0, thawTarget = 0 (max == balance → no excess to thaw). + // Stale thaw cancelled; escrowed rises to full balance = max. + _checkFrac(O, 0, S, S / 4, false, S, 0, "E3:bal=max,frac=0->cancel-thaw"); + + // ── balance == 0, 0 < max, minThawFraction = 0 ───────────────────── + // escrowed = 0, excess = 0, guard: max(S) < balance(0) → false → keep 0. + _checkFrac(O, 0, 0, 0, false, 0, 0, "E4:bal=0,frac=0->noop"); + + // ── max < balance, minThawFraction = 0, excess above threshold ────── + // Normal thaw case: excess = S, 0 <= S && S < 2S → true → thawTarget = balance - max = S. + _checkFrac(O, 0, 2 * S, 0, false, 2 * S, S, "E5:excess,frac=0->thaw"); + + // ── JIT mode (max = 0): 0 < balance, minThawFraction = 0 ─────────── + // excess = escrowed, 0 <= escrowed && 0 < balance → thaw everything. + _checkFrac(J, 0, S, 0, false, S, S, "E6:jit,frac=0->thaw-all"); + + // ── Full mode: balance < min, minThawFraction = 0 ────────────────── + // Tests the min-branch underflow guard: min(S) < balance(S/2) → false → thawTarget = 0. + // Then _withdrawAndRebalance deposits to reach min. + _checkFrac(F, 0, S / 2, 0, false, S, 0, "E7:full,baldeposit"); + + // ── Default minThawFraction (16): excess below thaw threshold ─────── + // balance slightly above max, but excess < minThawAmount → no thaw. + // minThawAmount = S * 16 / 256 = S/16. excess = 1 wei < S/16 → skip. + _checkFrac(O, 16, S + 1, 0, false, S + 1, 0, "E8:below-threshold,frac=16->noop"); + + // ── Default minThawFraction (16): excess above thaw threshold ─────── + // excess = S, minThawAmount = S/16, S/16 <= S → thaw. + _checkFrac(O, 16, 2 * S, 0, false, 2 * S, S, "E9:above-threshold,frac=16->thaw"); + + // ── Thaw threshold must NOT block deficit adjustments ─────────────── + // Full mode: balance = 2*S, tokensThawing = 3*S/2 → escrowed = S/2 < min = S. + // thawTarget = balance - min = S (cancel half the thaw to reach min). + // excess = 0, 0 < minThawAmount = S/16 → threshold would block, + // but the escrowed < min exemption ensures we still act. + _checkFrac(F, 16, 2 * S, (3 * S) / 2, false, 2 * S, S, "E10:deficit-ignores-threshold"); + } + /* solhint-enable graph/func-name-mixedcase */ } diff --git a/packages/issuance/test/unit/common/enumerableSetUtil.t.sol b/packages/issuance/test/unit/common/enumerableSetUtil.t.sol new file mode 100644 index 000000000..668f1e797 --- /dev/null +++ b/packages/issuance/test/unit/common/enumerableSetUtil.t.sol @@ -0,0 +1,192 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.27; + +import { Test } from "forge-std/Test.sol"; + +import { EnumerableSetUtilHarness } from "../mocks/EnumerableSetUtilHarness.sol"; + +/// @notice Unit tests for EnumerableSetUtil pagination helpers. +contract EnumerableSetUtilTest is Test { + /* solhint-disable graph/func-name-mixedcase */ + + EnumerableSetUtilHarness internal harness; + + function setUp() public { + harness = new EnumerableSetUtilHarness(); + } + + // ==================== getPage (AddressSet) ==================== + + function test_GetPage_EmptySet_ReturnsEmpty() public view { + address[] memory result = harness.getPage(0, 10); + assertEq(result.length, 0); + } + + function test_GetPage_ReturnsAllElements() public { + address a1 = makeAddr("a1"); + address a2 = makeAddr("a2"); + address a3 = makeAddr("a3"); + harness.addAddress(a1); + harness.addAddress(a2); + harness.addAddress(a3); + + address[] memory result = harness.getPage(0, 10); + assertEq(result.length, 3); + assertEq(result[0], a1); + assertEq(result[1], a2); + assertEq(result[2], a3); + } + + function test_GetPage_WithOffset() public { + address a1 = makeAddr("a1"); + address a2 = makeAddr("a2"); + address a3 = makeAddr("a3"); + harness.addAddress(a1); + harness.addAddress(a2); + harness.addAddress(a3); + + address[] memory result = harness.getPage(1, 10); + assertEq(result.length, 2); + assertEq(result[0], a2); + assertEq(result[1], a3); + } + + function test_GetPage_WithCount() public { + address a1 = makeAddr("a1"); + address a2 = makeAddr("a2"); + address a3 = makeAddr("a3"); + harness.addAddress(a1); + harness.addAddress(a2); + harness.addAddress(a3); + + address[] memory result = harness.getPage(0, 2); + assertEq(result.length, 2); + assertEq(result[0], a1); + assertEq(result[1], a2); + } + + function test_GetPage_OffsetAndCount() public { + address a1 = makeAddr("a1"); + address a2 = makeAddr("a2"); + address a3 = makeAddr("a3"); + harness.addAddress(a1); + harness.addAddress(a2); + harness.addAddress(a3); + + address[] memory result = harness.getPage(1, 1); + assertEq(result.length, 1); + assertEq(result[0], a2); + } + + function test_GetPage_OffsetAtEnd_ReturnsEmpty() public { + harness.addAddress(makeAddr("a1")); + + address[] memory result = harness.getPage(1, 10); + assertEq(result.length, 0); + } + + function test_GetPage_OffsetPastEnd_ReturnsEmpty() public { + harness.addAddress(makeAddr("a1")); + + address[] memory result = harness.getPage(5, 10); + assertEq(result.length, 0); + } + + function test_GetPage_CountClamped() public { + address a1 = makeAddr("a1"); + harness.addAddress(a1); + + address[] memory result = harness.getPage(0, 100); + assertEq(result.length, 1); + assertEq(result[0], a1); + } + + function test_GetPage_ZeroCount_ReturnsEmpty() public { + harness.addAddress(makeAddr("a1")); + + address[] memory result = harness.getPage(0, 0); + assertEq(result.length, 0); + } + + // ==================== getPageBytes16 (Bytes32Set) ==================== + + function test_GetPageBytes16_EmptySet_ReturnsEmpty() public view { + bytes16[] memory result = harness.getPageBytes16(0, 10); + assertEq(result.length, 0); + } + + function test_GetPageBytes16_ReturnsAllElements() public { + bytes32 b1 = bytes32(bytes16(hex"00010002000300040005000600070008")); + bytes32 b2 = bytes32(bytes16(hex"000a000b000c000d000e000f00100011")); + harness.addBytes32(b1); + harness.addBytes32(b2); + + bytes16[] memory result = harness.getPageBytes16(0, 10); + assertEq(result.length, 2); + assertEq(result[0], bytes16(b1)); + assertEq(result[1], bytes16(b2)); + } + + function test_GetPageBytes16_TruncatesBytes32ToBytes16() public { + // The high 16 bytes should be kept, low 16 bytes discarded + bytes32 full = hex"0102030405060708091011121314151617181920212223242526272829303132"; + harness.addBytes32(full); + + bytes16[] memory result = harness.getPageBytes16(0, 1); + assertEq(result.length, 1); + assertEq(result[0], bytes16(full)); + } + + function test_GetPageBytes16_WithOffset() public { + bytes32 b1 = bytes32(bytes16(hex"aaaa0000000000000000000000000001")); + bytes32 b2 = bytes32(bytes16(hex"bbbb0000000000000000000000000002")); + bytes32 b3 = bytes32(bytes16(hex"cccc0000000000000000000000000003")); + harness.addBytes32(b1); + harness.addBytes32(b2); + harness.addBytes32(b3); + + bytes16[] memory result = harness.getPageBytes16(1, 10); + assertEq(result.length, 2); + assertEq(result[0], bytes16(b2)); + assertEq(result[1], bytes16(b3)); + } + + function test_GetPageBytes16_WithCount() public { + bytes32 b1 = bytes32(bytes16(hex"aaaa0000000000000000000000000001")); + bytes32 b2 = bytes32(bytes16(hex"bbbb0000000000000000000000000002")); + bytes32 b3 = bytes32(bytes16(hex"cccc0000000000000000000000000003")); + harness.addBytes32(b1); + harness.addBytes32(b2); + harness.addBytes32(b3); + + bytes16[] memory result = harness.getPageBytes16(0, 2); + assertEq(result.length, 2); + assertEq(result[0], bytes16(b1)); + assertEq(result[1], bytes16(b2)); + } + + function test_GetPageBytes16_OffsetPastEnd_ReturnsEmpty() public { + harness.addBytes32(bytes32(uint256(1))); + + bytes16[] memory result = harness.getPageBytes16(5, 10); + assertEq(result.length, 0); + } + + function test_GetPageBytes16_CountClamped() public { + bytes32 b1 = bytes32(bytes16(hex"aaaa0000000000000000000000000001")); + harness.addBytes32(b1); + + bytes16[] memory result = harness.getPageBytes16(0, 100); + assertEq(result.length, 1); + assertEq(result[0], bytes16(b1)); + } + + function test_GetPageBytes16_ZeroCount_ReturnsEmpty() public { + harness.addBytes32(bytes32(uint256(1))); + + bytes16[] memory result = harness.getPageBytes16(0, 0); + assertEq(result.length, 0); + } + + /* solhint-enable graph/func-name-mixedcase */ +} diff --git a/packages/issuance/test/unit/eligibility/eligibility.t.sol b/packages/issuance/test/unit/eligibility/eligibility.t.sol index aaa74e0c6..871c2bc87 100644 --- a/packages/issuance/test/unit/eligibility/eligibility.t.sol +++ b/packages/issuance/test/unit/eligibility/eligibility.t.sol @@ -95,7 +95,7 @@ contract RewardsEligibilityOracleEligibilityTest is RewardsEligibilityOracleShar // ==================== Edge Cases ==================== function test_NeverRegisteredIndexerEligible_WhenPeriodExceedsTimestamp() public { - // TRST-L-1: When eligibilityPeriod > block.timestamp, all indexers become eligible + // When eligibilityPeriod > block.timestamp, all indexers become eligible // because block.timestamp < 0 + eligibilityPeriod _enableValidation(); _renewEligibility(unauthorized); // set lastOracleUpdateTime diff --git a/packages/issuance/test/unit/mocks/EnumerableSetUtilHarness.sol b/packages/issuance/test/unit/mocks/EnumerableSetUtilHarness.sol new file mode 100644 index 000000000..d77fae866 --- /dev/null +++ b/packages/issuance/test/unit/mocks/EnumerableSetUtilHarness.sol @@ -0,0 +1,44 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.27; + +import { EnumerableSet } from "@openzeppelin/contracts/utils/structs/EnumerableSet.sol"; +import { EnumerableSetUtil } from "../../../contracts/common/EnumerableSetUtil.sol"; + +/// @notice Harness that exposes EnumerableSetUtil internal functions for testing. +contract EnumerableSetUtilHarness { + using EnumerableSet for EnumerableSet.AddressSet; + using EnumerableSet for EnumerableSet.Bytes32Set; + using EnumerableSetUtil for EnumerableSet.AddressSet; + using EnumerableSetUtil for EnumerableSet.Bytes32Set; + + EnumerableSet.AddressSet private _addresses; + EnumerableSet.Bytes32Set private _bytes32s; + + // -- AddressSet helpers -- + + function addAddress(address a) external { + _addresses.add(a); + } + + function addressSetLength() external view returns (uint256) { + return _addresses.length(); + } + + function getPage(uint256 offset, uint256 count) external view returns (address[] memory) { + return _addresses.getPage(offset, count); + } + + // -- Bytes32Set helpers -- + + function addBytes32(bytes32 b) external { + _bytes32s.add(b); + } + + function bytes32SetLength() external view returns (uint256) { + return _bytes32s.length(); + } + + function getPageBytes16(uint256 offset, uint256 count) external view returns (bytes16[] memory) { + return _bytes32s.getPageBytes16(offset, count); + } +} diff --git a/packages/subgraph-service/contracts/SubgraphService.sol b/packages/subgraph-service/contracts/SubgraphService.sol index b0b4b5944..246446905 100644 --- a/packages/subgraph-service/contracts/SubgraphService.sol +++ b/packages/subgraph-service/contracts/SubgraphService.sol @@ -6,12 +6,10 @@ import { IGraphToken } from "@graphprotocol/interfaces/contracts/contracts/token import { IGraphTallyCollector } from "@graphprotocol/interfaces/contracts/horizon/IGraphTallyCollector.sol"; import { IRewardsIssuer } from "@graphprotocol/interfaces/contracts/contracts/rewards/IRewardsIssuer.sol"; import { IDataService } from "@graphprotocol/interfaces/contracts/data-service/IDataService.sol"; -import { IDataServiceAgreements } from "@graphprotocol/interfaces/contracts/data-service/IDataServiceAgreements.sol"; import { ISubgraphService } from "@graphprotocol/interfaces/contracts/subgraph-service/ISubgraphService.sol"; import { IAllocation } from "@graphprotocol/interfaces/contracts/subgraph-service/internal/IAllocation.sol"; import { IIndexingAgreement } from "@graphprotocol/interfaces/contracts/subgraph-service/internal/IIndexingAgreement.sol"; import { ILegacyAllocation } from "@graphprotocol/interfaces/contracts/subgraph-service/internal/ILegacyAllocation.sol"; -import { IRecurringCollector } from "@graphprotocol/interfaces/contracts/horizon/IRecurringCollector.sol"; import { OwnableUpgradeable } from "@openzeppelin/contracts-upgradeable/access/OwnableUpgradeable.sol"; import { MulticallUpgradeable } from "@openzeppelin/contracts-upgradeable/utils/MulticallUpgradeable.sol"; @@ -19,9 +17,10 @@ import { Initializable } from "@openzeppelin/contracts-upgradeable/proxy/utils/I import { DataServicePausableUpgradeable } from "@graphprotocol/horizon/contracts/data-service/extensions/DataServicePausableUpgradeable.sol"; import { DataService } from "@graphprotocol/horizon/contracts/data-service/DataService.sol"; import { DataServiceFees } from "@graphprotocol/horizon/contracts/data-service/extensions/DataServiceFees.sol"; +import { ReentrancyGuardTransient } from "@openzeppelin/contracts/utils/ReentrancyGuardTransient.sol"; import { Directory } from "./utilities/Directory.sol"; import { AllocationManager } from "./utilities/AllocationManager.sol"; -import { SubgraphServiceV1Storage } from "./SubgraphServiceStorage.sol"; +import { SubgraphServiceV2Storage } from "./SubgraphServiceStorage.sol"; import { TokenUtils } from "@graphprotocol/contracts/contracts/utils/TokenUtils.sol"; import { PPMMath } from "@graphprotocol/horizon/contracts/libraries/PPMMath.sol"; @@ -40,6 +39,7 @@ contract SubgraphService is Initializable, OwnableUpgradeable, MulticallUpgradeable, + ReentrancyGuardTransient, DataService, DataServicePausableUpgradeable, DataServiceFees, @@ -47,7 +47,7 @@ contract SubgraphService is AllocationManager, IRewardsIssuer, ISubgraphService, - SubgraphServiceV1Storage + SubgraphServiceV2Storage { using PPMMath for uint256; using Allocation for mapping(address => IAllocation.State); @@ -80,18 +80,13 @@ contract SubgraphService is * @param disputeManager The address of the DisputeManager contract * @param graphTallyCollector The address of the GraphTallyCollector contract * @param curation The address of the Curation contract - * @param recurringCollector The address of the RecurringCollector contract */ constructor( address graphController, address disputeManager, address graphTallyCollector, - address curation, - address recurringCollector - ) - DataService(graphController) - Directory(address(this), disputeManager, graphTallyCollector, curation, recurringCollector) - { + address curation + ) DataService(graphController) Directory(address(this), disputeManager, graphTallyCollector, curation) { _disableInitializers(); } @@ -114,7 +109,7 @@ contract SubgraphService is } /** - * @notice + * @notice Register an indexer to the subgraph service * @dev Implements {IDataService.register} * * Requirements: @@ -210,7 +205,7 @@ contract SubgraphService is * @notice Close an allocation, indicating that the indexer has stopped indexing the subgraph deployment * @dev This is the equivalent of the `closeAllocation` function in the legacy Staking contract. * There are a few notable differences with the legacy function: - * - allocations are nowlong lived. All service payments, including indexing rewards, should be collected periodically + * - allocations are now long lived. All service payments, including indexing rewards, should be collected periodically * without the need of closing the allocation. Allocations should only be closed when indexers want to reclaim the allocated * tokens for other purposes. * - No POI is required to close an allocation. Indexers should present POIs to collect indexing rewards using {collect}. @@ -229,7 +224,7 @@ contract SubgraphService is function stopService(address indexer, bytes calldata data) external override enforceService(indexer, REGISTERED) { address allocationId = abi.decode(data, (address)); _checkAllocationOwnership(indexer, allocationId); - _onCloseAllocation(allocationId, false); + _onCloseAllocation(allocationId); _closeAllocation(allocationId, false); emit ServiceStopped(indexer, data); } @@ -275,7 +270,7 @@ contract SubgraphService is address indexer, IGraphPayments.PaymentTypes paymentType, bytes calldata data - ) external override enforceService(indexer, VALID_PROVISION | REGISTERED) returns (uint256) { + ) external override nonReentrant enforceService(indexer, VALID_PROVISION | REGISTERED) returns (uint256) { uint256 paymentCollected = 0; if (paymentType == IGraphPayments.PaymentTypes.QueryFee) { @@ -315,8 +310,7 @@ contract SubgraphService is IAllocation.State memory allocation = _allocations.get(allocationId); require(allocation.isStale(maxPOIStaleness), SubgraphServiceCannotForceCloseAllocation(allocationId)); require(!allocation.isAltruistic(), SubgraphServiceAllocationIsAltruistic(allocationId)); - _onCloseAllocation(allocationId, true); - _closeAllocation(allocationId, true); + _resizeAllocation(allocationId, 0, _delegationRatio); } /// @inheritdoc ISubgraphService @@ -373,100 +367,73 @@ contract SubgraphService is emit IndexingFeesCutSet(indexingFeesCut_); } - /** - * @inheritdoc ISubgraphService - * @notice Accept an indexing agreement. - * - * See {ISubgraphService.acceptIndexingAgreement}. - * - * Requirements: - * - The agreement's indexer must be registered - * - The caller must be authorized by the agreement's indexer - * - The provision must be valid according to the subgraph service rules - * - Allocation must belong to the indexer and be open - * - Agreement must be for this data service - * - Agreement's subgraph deployment must match the allocation's subgraph deployment - * - Agreement must not have been accepted before - * - Allocation must not have an agreement already - * - * @dev rca.metadata is an encoding of {IndexingAgreement.AcceptIndexingAgreementMetadata} - * - * Emits {IndexingAgreement.IndexingAgreementAccepted} event - * - * @param allocationId The id of the allocation - * @param rca The Recurring Collection Agreement - * @param signature ECDSA signature bytes, or empty for contract-approved agreements - * @return agreementId The ID of the accepted indexing agreement - */ - function acceptIndexingAgreement( - address allocationId, - IRecurringCollector.RecurringCollectionAgreement calldata rca, - bytes calldata signature - ) external enforceService(rca.serviceProvider, VALID_PROVISION | REGISTERED) returns (bytes16) { - return IndexingAgreement._getStorageManager().accept(_allocations, allocationId, rca, signature); + /// @inheritdoc ISubgraphService + function setAuthorizedCollector(address collector, bool authorized) external override onlyOwner { + require(collector != address(0), SubgraphServiceNotCollector(address(0))); + if (authorizedCollectors[collector] == authorized) return; + + authorizedCollectors[collector] = authorized; + emit AuthorizedCollectorSet(collector, authorized); } - /** - * @inheritdoc ISubgraphService - * @notice Update an indexing agreement. - * - * See {IndexingAgreement.update}. - * - * Requirements: - * - The contract must not be paused - * - The indexer must be valid - * - * @param indexer The indexer address - * @param rcau The Recurring Collection Agreement Update - * @param signature ECDSA signature bytes, or empty for contract-approved updates - */ - function updateIndexingAgreement( - address indexer, - IRecurringCollector.RecurringCollectionAgreementUpdate calldata rcau, - bytes calldata signature - ) external enforceService(indexer, VALID_PROVISION | REGISTERED) { - IndexingAgreement._getStorageManager().update(indexer, rcau, signature); + /// @inheritdoc ISubgraphService + function isAuthorizedCollector(address collector) external view override returns (bool) { + return authorizedCollectors[collector]; + } + + /// @inheritdoc ISubgraphService + function setBlockClosingAllocationWithActiveAgreement(bool enabled) external override onlyOwner { + if (blockClosingAllocationWithActiveAgreement == enabled) return; + + blockClosingAllocationWithActiveAgreement = enabled; + emit BlockClosingAllocationWithActiveAgreementSet(enabled); } /** - * @inheritdoc ISubgraphService - * @notice Cancel an indexing agreement by indexer / operator. - * - * See {IndexingAgreement.cancel}. - * - * @dev Can only be canceled on behalf of a valid indexer. - * - * Requirements: - * - The contract must not be paused - * - The indexer must be valid - * - * @param indexer The indexer address - * @param agreementId The id of the agreement + * @notice Callback from a collector when a service provider accepts an agreement (initial or update). + * @dev Reverting — rejects the acceptance if validation fails. + * Only callable by an authorized collector. + * For initial acceptance: validates indexer registration and provision, decodes allocationId from extraData. + * For updates: delegates directly to the library for term validation. + * @param agreementId The agreement ID + * @param payer The payer address + * @param serviceProvider The service provider address + * @param metadata The agreement metadata + * @param extraData Extra data for the agreement (e.g. encoded allocationId for initial accept) */ - function cancelIndexingAgreement( - address indexer, - bytes16 agreementId - ) external enforceService(indexer, VALID_PROVISION | REGISTERED) { - IndexingAgreement._getStorageManager().cancel(indexer, agreementId); + // solhint-disable-next-line use-natspec + function acceptAgreement( + bytes16 agreementId, + bytes32 /* versionHash */, + address payer, + address serviceProvider, + bytes calldata metadata, + bytes calldata extraData + ) external { + _requireCollectorCaller(); + + IndexingAgreement.StorageManager storage sm = IndexingAgreement._getStorageManager(); + if (sm.agreements[agreementId].collector == address(0)) { + // Initial accept — validate indexer and provision + require( + bytes(indexers[serviceProvider].url).length > 0, + SubgraphServiceIndexerNotRegistered(serviceProvider) + ); + _requireValidProvision(serviceProvider); + } + // Collector identity (initial: store, update: enforce match) handled by library + sm.onAcceptCallback(_allocations, agreementId, payer, serviceProvider, metadata, extraData, msg.sender); } /** - * @inheritdoc IDataServiceAgreements - * @notice Cancel an indexing agreement by payer / signer. - * - * See {IDataServiceAgreements.cancelIndexingAgreementByPayer}. - * - * Requirements: - * - The caller must be authorized by the payer - * - The agreement must be active - * - * Emits {IndexingAgreementCanceled} event - * - * @param agreementId The id of the agreement + * @notice Callback from a collector on agreement lifecycle events. + * @dev No-op — agreement state is checked on-demand (e.g. in {onCloseAllocation} and {collect}). + * The collector already emits events for all state transitions; duplicating them here + * would add gas cost without on-chain benefit. + * Required by {IAgreementStateChangeCallback} interface. */ - function cancelIndexingAgreementByPayer(bytes16 agreementId) external whenNotPaused { - IndexingAgreement._getStorageManager().cancelByPayer(agreementId); - } + // solhint-disable-next-line use-natspec,no-empty-blocks + function afterAgreementStateChange(bytes16, bytes32, uint16) external {} /// @inheritdoc ISubgraphService function getIndexingAgreement( @@ -495,6 +462,11 @@ contract SubgraphService is ); } + /// @inheritdoc ISubgraphService + function getBlockClosingAllocationWithActiveAgreement() external view override returns (bool enabled) { + enabled = blockClosingAllocationWithActiveAgreement; + } + /// @inheritdoc IRewardsIssuer function getSubgraphAllocatedTokens(bytes32 subgraphDeploymentId) external view override returns (uint256) { return _subgraphAllocatedTokens[subgraphDeploymentId]; @@ -532,12 +504,15 @@ contract SubgraphService is /** * @notice Internal function to handle closing an allocation - * @dev This function is called when an allocation is closed, either by the indexer or by a third party + * @dev This function is called when an allocation is closed, either by the indexer or by a third party. + * Cancels any active indexing agreement on the allocation. * @param _allocationId The id of the allocation being closed - * @param _forceClosed Whether the allocation was force closed */ - function _onCloseAllocation(address _allocationId, bool _forceClosed) internal { - IndexingAgreement._getStorageManager().onCloseAllocation(_allocationId, _forceClosed); + function _onCloseAllocation(address _allocationId) internal { + IndexingAgreement._getStorageManager().onCloseAllocation( + _allocationId, + blockClosingAllocationWithActiveAgreement + ); } /** @@ -573,6 +548,10 @@ contract SubgraphService is return (_disputeManager().getFishermanRewardCut(), DEFAULT_MAX_VERIFIER_CUT); } + function _requireCollectorCaller() private view { + require(authorizedCollectors[msg.sender], SubgraphServiceNotCollector(msg.sender)); + } + /** * @notice Enforces service provider requirements. * @dev Always checks pause state and caller authorization. Additional checks @@ -722,7 +701,7 @@ contract SubgraphService is (address allocationId, bytes32 poi_, bytes memory poiMetadata_) = abi.decode(_data, (address, bytes32, bytes)); _checkAllocationOwnership(_indexer, allocationId); - (uint256 paymentCollected, bool allocationForceClosed) = _presentPoi( + (uint256 paymentCollected, ) = _presentPoi( allocationId, poi_, poiMetadata_, @@ -730,10 +709,6 @@ contract SubgraphService is paymentsDestination[_indexer] ); - if (allocationForceClosed) { - _onCloseAllocation(allocationId, true); - } - return paymentCollected; } @@ -746,7 +721,7 @@ contract SubgraphService is * This could lead to an out of gas error if there are too many expired claims. In that case, the indexer will need to * manually release the claims, see {IDataServiceFees-releaseStake}, before attempting to collect again. * - * @dev Uses the {RecurringCollector} to collect payment from Graph Horizon payments protocol. + * @dev Uses the agreement's collector to collect payment from Graph Horizon payments protocol. * Fees are distributed to service provider and delegators by {GraphPayments} * * Requirements: diff --git a/packages/subgraph-service/contracts/SubgraphServiceStorage.sol b/packages/subgraph-service/contracts/SubgraphServiceStorage.sol index 2ecb69293..79050c5ed 100644 --- a/packages/subgraph-service/contracts/SubgraphServiceStorage.sol +++ b/packages/subgraph-service/contracts/SubgraphServiceStorage.sol @@ -1,10 +1,13 @@ // SPDX-License-Identifier: GPL-3.0-or-later + +// solhint-disable one-contract-per-file + pragma solidity ^0.8.27; import { ISubgraphService } from "@graphprotocol/interfaces/contracts/subgraph-service/ISubgraphService.sol"; /** - * @title SubgraphServiceStorage + * @title SubgraphServiceV1Storage * @author Edge & Node * @notice This contract holds all the storage variables for the Subgraph Service contract * @custom:security-contact Please email security+contracts@thegraph.com if you find any @@ -26,3 +29,16 @@ abstract contract SubgraphServiceV1Storage is ISubgraphService { /// @notice The cut data service takes from indexing fee payments. In PPM. uint256 public indexingFeesCut; } + +/** + * @title SubgraphServiceV2Storage + * @author Edge & Node + * @notice Adds authorized collectors. + */ +abstract contract SubgraphServiceV2Storage is SubgraphServiceV1Storage { + /// @notice Authorized collectors that can call agreement callbacks + mapping(address collector => bool authorized) internal authorizedCollectors; + + /// @notice When true, closing an allocation that has an active indexing agreement will revert. + bool internal blockClosingAllocationWithActiveAgreement; +} diff --git a/packages/subgraph-service/contracts/libraries/AllocationHandler.sol b/packages/subgraph-service/contracts/libraries/AllocationHandler.sol index 0519b3e3f..d7552718f 100644 --- a/packages/subgraph-service/contracts/libraries/AllocationHandler.sol +++ b/packages/subgraph-service/contracts/libraries/AllocationHandler.sol @@ -317,14 +317,14 @@ library AllocationHandler { * @param _subgraphAllocatedTokens The mapping of subgraph deployment ids to their allocated tokens * @param params The parameters for the POI presentation * @return rewardsCollected The amount of tokens collected - * @return allocationForceClosed True if the allocation was automatically closed due to over-allocation, false otherwise + * @return allocationDownsized True if the allocation was automatically resized down due to over-allocation, false otherwise */ function presentPOI( mapping(address allocationId => IAllocation.State allocation) storage _allocations, mapping(address indexer => uint256 tokens) storage allocationProvisionTracker, mapping(bytes32 subgraphDeploymentId => uint256 tokens) storage _subgraphAllocatedTokens, PresentParams calldata params - ) external returns (uint256 rewardsCollected, bool allocationForceClosed) { + ) external returns (uint256 rewardsCollected, bool allocationDownsized) { IAllocation.State memory allocation = _allocations.get(params._allocationId); require(allocation.isOpen(), AllocationHandler.AllocationHandlerAllocationClosed(params._allocationId)); _allocations.presentPOI(params._allocationId); // Always record POI presentation to prevent staleness @@ -392,7 +392,7 @@ library AllocationHandler { ); } - // Check if the indexer is over-allocated and force close the allocation if necessary + // Check if the indexer is over-allocated and resize the allocation to zero if necessary if ( _isOverAllocated( allocationProvisionTracker, @@ -401,14 +401,18 @@ library AllocationHandler { params._delegationRatio ) ) { - allocationForceClosed = true; - _closeAllocation( + allocationDownsized = true; + _resizeAllocation( _allocations, allocationProvisionTracker, _subgraphAllocatedTokens, + params.graphStaking, params.graphRewardsManager, params._allocationId, - true + allocation, + 0, + params._delegationRatio, + params.maxPOIStaleness ); } } @@ -491,6 +495,46 @@ library AllocationHandler { AllocationHandler.AllocationHandlerAllocationSameSize(_allocationId, _tokens) ); + _resizeAllocation( + _allocations, + allocationProvisionTracker, + _subgraphAllocatedTokens, + graphStaking, + graphRewardsManager, + _allocationId, + allocation, + _tokens, + _delegationRatio, + _maxPOIStaleness + ); + } + + /** + * @notice Internal resize logic shared by explicit resize and over-allocation downsize. + * @dev Caller must validate preconditions (allocation open, tokens changed). + * @param _allocations The allocations mapping + * @param allocationProvisionTracker The provision tracker mapping + * @param _subgraphAllocatedTokens The subgraph allocated tokens mapping + * @param graphStaking The staking contract + * @param graphRewardsManager The rewards manager contract + * @param _allocationId The allocation ID to resize + * @param allocation The current allocation state + * @param _tokens The new token amount for the allocation + * @param _delegationRatio The delegation ratio for provision tracking + * @param _maxPOIStaleness The maximum POI staleness threshold + */ + function _resizeAllocation( + mapping(address allocationId => IAllocation.State allocation) storage _allocations, + mapping(address indexer => uint256 tokens) storage allocationProvisionTracker, + mapping(bytes32 subgraphDeploymentId => uint256 tokens) storage _subgraphAllocatedTokens, + IHorizonStaking graphStaking, + IRewardsManager graphRewardsManager, + address _allocationId, + IAllocation.State memory allocation, + uint256 _tokens, + uint32 _delegationRatio, + uint256 _maxPOIStaleness + ) internal { // Update provision tracker uint256 oldTokens = allocation.tokens; if (_tokens > oldTokens) { diff --git a/packages/subgraph-service/contracts/libraries/IndexingAgreement.sol b/packages/subgraph-service/contracts/libraries/IndexingAgreement.sol index d94e1401c..1282fe55b 100644 --- a/packages/subgraph-service/contracts/libraries/IndexingAgreement.sol +++ b/packages/subgraph-service/contracts/libraries/IndexingAgreement.sol @@ -2,13 +2,13 @@ pragma solidity ^0.8.27; import { IGraphPayments } from "@graphprotocol/interfaces/contracts/horizon/IGraphPayments.sol"; +import { SETTLED, IAgreementCollector } from "@graphprotocol/interfaces/contracts/horizon/IAgreementCollector.sol"; import { IRecurringCollector } from "@graphprotocol/interfaces/contracts/horizon/IRecurringCollector.sol"; import { ISubgraphService } from "@graphprotocol/interfaces/contracts/subgraph-service/ISubgraphService.sol"; import { IIndexingAgreement } from "@graphprotocol/interfaces/contracts/subgraph-service/internal/IIndexingAgreement.sol"; import { IAllocation } from "@graphprotocol/interfaces/contracts/subgraph-service/internal/IAllocation.sol"; import { AllocationHandler } from "../libraries/AllocationHandler.sol"; -import { Directory } from "../utilities/Directory.sol"; import { Allocation } from "./Allocation.sol"; import { IndexingAgreementDecoder } from "./IndexingAgreementDecoder.sol"; @@ -141,20 +141,6 @@ library IndexingAgreement { bytes metadata ); - /** - * @notice Emitted when an indexing agreement is canceled - * @param indexer The address of the indexer - * @param payer The address of the payer - * @param agreementId The id of the agreement - * @param canceledOnBehalfOf The address of the entity that canceled the agreement - */ - event IndexingAgreementCanceled( - address indexed indexer, - address indexed payer, - bytes16 indexed agreementId, - address canceledOnBehalfOf - ); - /** * @notice Emitted when an indexing agreement is accepted * @param indexer The address of the indexer @@ -206,6 +192,14 @@ library IndexingAgreement { */ error IndexingAgreementWrongDataService(address expectedDataService, address wrongDataService); + /** + * @notice Thrown when the caller is not the collector that owns the agreement + * @param agreementId The agreement ID + * @param expectedCollector The collector that owns the agreement + * @param actualCollector The caller + */ + error IndexingAgreementCollectorMismatch(bytes16 agreementId, address expectedCollector, address actualCollector); + /** * @notice Thrown when an agreement and the allocation correspond to different deployment IDs * @param agreementDeploymentId The agreement's deployment ID @@ -231,11 +225,11 @@ library IndexingAgreement { error AllocationAlreadyHasIndexingAgreement(address allocationId); /** - * @notice Thrown when caller or proxy can not cancel an agreement - * @param owner The address of the owner of the agreement - * @param unauthorized The unauthorized caller + * @notice Emitted when an allocation is unbound from an indexing agreement + * @param agreementId The agreement ID + * @param allocationId The allocation ID that was unbound */ - error IndexingAgreementNonCancelableBy(address owner, address unauthorized); + event IndexingAgreementAllocationUnbound(bytes16 indexed agreementId, address indexed allocationId); /** * @notice Thrown when the agreement is not active @@ -256,258 +250,147 @@ library IndexingAgreement { */ error IndexingAgreementNotAuthorized(bytes16 agreementId, address unauthorizedIndexer); - /** - * @notice Thrown when indexing agreement terms are invalid - * @param tokensPerSecond The indexing agreement tokens per second - * @param maxOngoingTokensPerSecond The RCA maximum tokens per second - */ - error IndexingAgreementInvalidTerms(uint256 tokensPerSecond, uint256 maxOngoingTokensPerSecond); - /* solhint-disable function-max-lines */ /** - * @notice Accept an indexing agreement. + * @notice Handle acceptance of an agreement (initial or update). + * @dev Called by SubgraphService.acceptAgreement for both initial accepts and updates. + * On initial accept (collector not yet set): validates allocation binding, deployment + * match against payer-signed metadata, stores collector and deployment ID. + * On update (collector already set): validates collector identity, optionally rebinds + * allocation, updates terms. * * Requirements: - * - Allocation must belong to the indexer and be open - * - Agreement must be for this data service - * - Agreement's subgraph deployment must match the allocation's subgraph deployment - * - Agreement must not have been accepted before - * - Allocation must not have an agreement already + * - Initial: allocation must belong to the indexer and be open, deployment must match + * metadata, agreement must not have been accepted before, allocation must not be bound + * - Update: caller must be the collector that owns the agreement, version must be V1 + * - If rebinding (extraData contains new allocationId): new allocation must be open, + * owned by indexer, on the same deployment, and not already bound * - * @dev rca.metadata is an encoding of {IndexingAgreement.AcceptIndexingAgreementMetadata}. - * If `authData` is non-empty it is treated as an ECDSA signature; if empty the payer - * must be a contract implementing {IAgreementOwner}. - * - * Emits {IndexingAgreementAccepted} event + * Emits {IndexingAgreementAccepted} on initial accept + * Emits {IndexingAgreementUpdated} on update * * @param self The indexing agreement storage manager * @param allocations The mapping of allocation IDs to their states - * @param allocationId The id of the allocation - * @param rca The Recurring Collection Agreement - * @param authData ECDSA signature bytes, or empty for contract-approved agreements - * @return The agreement ID assigned to the accepted indexing agreement + * @param agreementId The ID of the agreement being accepted + * @param payer The address of the payer + * @param serviceProvider The address of the service provider (indexer) + * @param metadata The agreement metadata (encoded Accept or Update metadata) + * @param extraData Encoded allocationId — required for initial, optional for update + * @param collector The collector contract address (msg.sender from the callback) */ - function accept( + function onAcceptCallback( StorageManager storage self, mapping(address allocationId => IAllocation.State allocation) storage allocations, - address allocationId, - IRecurringCollector.RecurringCollectionAgreement calldata rca, - bytes calldata authData - ) external returns (bytes16) { - IAllocation.State memory allocation = _requireValidAllocation(allocations, allocationId, rca.serviceProvider); - - require(rca.dataService == address(this), IndexingAgreementWrongDataService(address(this), rca.dataService)); - - AcceptIndexingAgreementMetadata memory metadata = IndexingAgreementDecoder.decodeRCAMetadata(rca.metadata); - - bytes16 agreementId = _directory().recurringCollector().generateAgreementId( - rca.payer, - rca.dataService, - rca.serviceProvider, - rca.deadline, - rca.nonce - ); - + bytes16 agreementId, + address payer, + address serviceProvider, + bytes calldata metadata, + bytes calldata extraData, + address collector + ) external { IIndexingAgreement.State storage agreement = self.agreements[agreementId]; + bool isInitial = agreement.collector == address(0); + + // ── 1. Collector identity ── + if (isInitial) { + agreement.collector = collector; + } else { + require( + agreement.collector == collector, + IndexingAgreementCollectorMismatch(agreementId, agreement.collector, collector) + ); + } - require(agreement.allocationId == address(0), IndexingAgreementAlreadyAccepted(agreementId)); - - require( - allocation.subgraphDeploymentId == metadata.subgraphDeploymentId, - IndexingAgreementDeploymentIdMismatch( - metadata.subgraphDeploymentId, - allocationId, - allocation.subgraphDeploymentId - ) - ); + // ── 2. Decode metadata (different structs, same outputs) ── + IIndexingAgreement.IndexingAgreementVersion version; + bytes memory terms; - // Ensure that an allocation can only have one active indexing agreement - require( - self.allocationToActiveAgreementId[allocationId] == bytes16(0), - AllocationAlreadyHasIndexingAgreement(allocationId) - ); - self.allocationToActiveAgreementId[allocationId] = agreementId; + if (isInitial) { + require(agreement.allocationId == address(0), IndexingAgreementAlreadyAccepted(agreementId)); - agreement.version = metadata.version; - agreement.allocationId = allocationId; + AcceptIndexingAgreementMetadata memory meta = IndexingAgreementDecoder.decodeRCAMetadata(metadata); + version = meta.version; + terms = meta.terms; - require( - metadata.version == IIndexingAgreement.IndexingAgreementVersion.V1, - IndexingAgreementInvalidVersion(metadata.version) - ); - _setTermsV1(self, agreementId, metadata.terms, rca.maxOngoingTokensPerSecond); - - emit IndexingAgreementAccepted( - rca.serviceProvider, - rca.payer, - agreementId, - allocationId, - metadata.subgraphDeploymentId, - metadata.version, - metadata.terms - ); + agreement.subgraphDeploymentId = meta.subgraphDeploymentId; + } else { + UpdateIndexingAgreementMetadata memory meta = IndexingAgreementDecoder.decodeRCAUMetadata(metadata); + version = meta.version; + terms = meta.terms; + } - require( - _directory().recurringCollector().accept(rca, authData) == agreementId, - "internal: agreement ID mismatch" - ); - return agreementId; + // ── 3. Allocation binding ── + _bindAllocation(self, allocations, agreement, agreementId, serviceProvider, extraData); + + // ── 5. Version + terms ── + require(version == IIndexingAgreement.IndexingAgreementVersion.V1, IndexingAgreementInvalidVersion(version)); + agreement.version = version; + _setTermsV1(self, agreementId, terms); + + // ── 6. Events ── + if (isInitial) { + emit IndexingAgreementAccepted( + serviceProvider, + payer, + agreementId, + agreement.allocationId, + agreement.subgraphDeploymentId, + version, + terms + ); + } else { + emit IndexingAgreementUpdated({ + indexer: serviceProvider, + payer: payer, + agreementId: agreementId, + allocationId: agreement.allocationId, + version: version, + versionTerms: terms + }); + } } /* solhint-enable function-max-lines */ /** - * @notice Update an indexing agreement. - * - * Requirements: - * - Agreement must be active - * - The indexer must be the service provider of the agreement - * - * @dev rcau.metadata is an encoding of {IndexingAgreement.UpdateIndexingAgreementMetadata}. - * If `authData` is non-empty it is treated as an ECDSA signature; if empty the payer - * must be a contract implementing {IAgreementOwner}. - * - * Emits {IndexingAgreementUpdated} event - * - * @param self The indexing agreement storage manager - * @param indexer The indexer address - * @param rcau The Recurring Collection Agreement Update - * @param authData ECDSA signature bytes, or empty for contract-approved updates - */ - function update( - StorageManager storage self, - address indexer, - IRecurringCollector.RecurringCollectionAgreementUpdate calldata rcau, - bytes calldata authData - ) external { - IIndexingAgreement.AgreementWrapper memory wrapper = _get(self, rcau.agreementId); - require(_isActive(wrapper), IndexingAgreementNotActive(rcau.agreementId)); - require( - wrapper.collectorAgreement.serviceProvider == indexer, - IndexingAgreementNotAuthorized(rcau.agreementId, indexer) - ); - - UpdateIndexingAgreementMetadata memory metadata = IndexingAgreementDecoder.decodeRCAUMetadata(rcau.metadata); - - require( - wrapper.agreement.version == IIndexingAgreement.IndexingAgreementVersion.V1, - "internal: invalid version" - ); - require( - metadata.version == IIndexingAgreement.IndexingAgreementVersion.V1, - IndexingAgreementInvalidVersion(metadata.version) - ); - _setTermsV1(self, rcau.agreementId, metadata.terms, wrapper.collectorAgreement.maxOngoingTokensPerSecond); - - emit IndexingAgreementUpdated({ - indexer: wrapper.collectorAgreement.serviceProvider, - payer: wrapper.collectorAgreement.payer, - agreementId: rcau.agreementId, - allocationId: wrapper.agreement.allocationId, - version: metadata.version, - versionTerms: metadata.terms - }); - - _directory().recurringCollector().update(rcau, authData); - } - - /** - * @notice Cancel an indexing agreement. - * - * @dev This function allows the indexer to cancel an indexing agreement. - * - * Requirements: - * - Agreement must be active - * - The indexer must be the service provider of the agreement - * - * Emits {IndexingAgreementCanceled} event - * - * @param self The indexing agreement storage manager - * @param indexer The indexer address - * @param agreementId The id of the agreement to cancel - */ - function cancel(StorageManager storage self, address indexer, bytes16 agreementId) external { - IIndexingAgreement.AgreementWrapper memory wrapper = _get(self, agreementId); - require(_isActive(wrapper), IndexingAgreementNotActive(agreementId)); - require( - wrapper.collectorAgreement.serviceProvider == indexer, - IndexingAgreementNonCancelableBy(wrapper.collectorAgreement.serviceProvider, indexer) - ); - _cancel( - self, - agreementId, - wrapper.agreement, - wrapper.collectorAgreement, - IRecurringCollector.CancelAgreementBy.ServiceProvider - ); - } - - /** - * @notice Cancel an allocation's indexing agreement if it exists. + * @notice Handle an allocation's indexing agreement when the allocation is closed. * - * @dev This function is to be called by the data service when an allocation is closed. + * @dev Called by the data service when an allocation is closed. + * When `_blockIfActive` is true, reverts if the agreement is not SETTLED. + * When false, clears the mapping regardless of settlement state. * - * Requirements: - * - The allocation must have an active agreement - * - Agreement must be active + * DoS note: the external call to `getAgreementVersionAt` is unguarded. If the + * collector reverts (broken upgrade, corrupt storage), allocation closure is blocked. + * Mitigations: (1) governor can disable `blockClosingAllocationWithActiveAgreement`, + * (2) indexer can self-cancel via collector to set SETTLED then close, + * (3) `getAgreementVersionAt` is a view with no pause guard, so collector pause + * does not block it. * - * Emits {IndexingAgreementCanceled} event + * Escape hatch: BY_PROVIDER cancel sets SETTLED immediately, so the indexer can + * always self-cancel then close. + * Clears both sides of the bidirectional mapping atomically. * * @param self The indexing agreement storage manager * @param _allocationId The allocation ID - * @param forceClosed Whether the allocation was force closed - * + * @param _blockIfActive Whether to revert if the agreement is not settled */ - function onCloseAllocation(StorageManager storage self, address _allocationId, bool forceClosed) external { + function onCloseAllocation(StorageManager storage self, address _allocationId, bool _blockIfActive) external { bytes16 agreementId = self.allocationToActiveAgreementId[_allocationId]; - if (agreementId == bytes16(0)) { - return; - } + if (agreementId == bytes16(0)) return; - IIndexingAgreement.AgreementWrapper memory wrapper = _get(self, agreementId); - if (!_isActive(wrapper)) { - return; - } + if (_blockIfActive) { + // Check SETTLED on-demand via the collector + IAgreementCollector.AgreementVersion memory version = IAgreementCollector( + self.agreements[agreementId].collector + ).getAgreementVersionAt(agreementId, 0); - _cancel( - self, - agreementId, - wrapper.agreement, - wrapper.collectorAgreement, - forceClosed - ? IRecurringCollector.CancelAgreementBy.ThirdParty - : IRecurringCollector.CancelAgreementBy.ServiceProvider - ); - } + if (version.state & SETTLED == 0) + revert ISubgraphService.SubgraphServiceAllocationHasActiveAgreement(_allocationId, agreementId); + } - /** - * @notice Cancel an indexing agreement by the payer. - * - * @dev This function allows the payer to cancel an indexing agreement. - * - * Requirements: - * - Agreement must be active - * - The caller must be authorized to cancel the agreement in the collector on the payer's behalf - * - * Emits {IndexingAgreementCanceled} event - * - * @param self The indexing agreement storage manager - * @param agreementId The id of the agreement to cancel - */ - function cancelByPayer(StorageManager storage self, bytes16 agreementId) external { - IIndexingAgreement.AgreementWrapper memory wrapper = _get(self, agreementId); - require(_isActive(wrapper), IndexingAgreementNotActive(agreementId)); - require( - msg.sender == wrapper.collectorAgreement.payer || - _directory().recurringCollector().isAuthorized(wrapper.collectorAgreement.payer, msg.sender), - IndexingAgreementNonCancelableBy(wrapper.collectorAgreement.payer, msg.sender) - ); - _cancel( - self, - agreementId, - wrapper.agreement, - wrapper.collectorAgreement, - IRecurringCollector.CancelAgreementBy.Payer - ); + // Clear both sides of the bidirectional mapping atomically + delete self.allocationToActiveAgreementId[_allocationId]; + self.agreements[agreementId].allocationId = address(0); + emit IndexingAgreementAllocationUnbound(agreementId, _allocationId); } /* solhint-disable function-max-lines */ @@ -515,7 +398,7 @@ library IndexingAgreement { * @notice Collect indexing fees for an agreement. * @dev Computes a requested token amount from indexing agreement terms * (`collectionSeconds * (tokensPerSecond + tokensPerEntityPerSecond * entities)`) and passes - * it to {RecurringCollector}, which caps it against the RCA payer's limits. The actual payout + * it to the collector, which caps it against the payer's limits. The actual payout * is the minimum of the two. Every POI submitted is disputable — no exception for zero POI. * * Requirements: @@ -547,11 +430,13 @@ library IndexingAgreement { allocation.indexer == params.indexer, IndexingAgreementNotAuthorized(params.agreementId, params.indexer) ); - // Get collection info from RecurringCollector (single source of truth for temporal logic) - (bool isCollectable, uint256 collectionSeconds, ) = _directory().recurringCollector().getCollectionInfo( - wrapper.collectorAgreement + IRecurringCollector rc = IRecurringCollector(wrapper.agreement.collector); + + // Collection info comes from the collector (single source of truth for temporal logic) + require( + _isValid(wrapper) && wrapper.collectorAgreement.isCollectable, + IndexingAgreementNotCollectable(params.agreementId) ); - require(_isValid(wrapper) && isCollectable, IndexingAgreementNotCollectable(params.agreementId)); require( wrapper.agreement.version == IIndexingAgreement.IndexingAgreementVersion.V1, @@ -560,11 +445,20 @@ library IndexingAgreement { CollectIndexingFeeDataV1 memory data = IndexingAgreementDecoder.decodeCollectIndexingFeeDataV1(params.data); - uint256 expectedTokens = _tokensToCollect(self, params.agreementId, data.entities, collectionSeconds); + uint256 expectedTokens = _tokensToCollect( + self, + params.agreementId, + data.entities, + wrapper.collectorAgreement.collectionSeconds + ); - // `tokensCollected` <= `expectedTokens` because the recurring collector will further narrow - // down the tokens allowed, based on the RCA terms. - uint256 tokensCollected = _directory().recurringCollector().collect( + // Trust boundary: the collector is owner-authorized and controls the actual token + // transfer (via PaymentsEscrow.collect). It is trusted for both the amount moved and + // the return value. A return-value sanity check would not limit a buggy collector's + // ability to move tokens, only catch a misreported return value. The downstream effect + // of an inflated return is over-locking indexer stake (tokensCollected * stakeToFeesRatio). + // Mitigation is governance-level: only the contract owner can authorize collectors. + uint256 tokensCollected = rc.collect( IGraphPayments.PaymentTypes.IndexingFee, abi.encode( IRecurringCollector.CollectParams({ @@ -631,56 +525,13 @@ library IndexingAgreement { * @param _manager The indexing agreement storage manager * @param _agreementId The id of the agreement to update * @param _data The encoded terms data - * @param maxOngoingTokensPerSecond The RCA maximum tokens per second limit for validation */ - function _setTermsV1( - StorageManager storage _manager, - bytes16 _agreementId, - bytes memory _data, - uint256 maxOngoingTokensPerSecond - ) private { + function _setTermsV1(StorageManager storage _manager, bytes16 _agreementId, bytes memory _data) private { IndexingAgreementTermsV1 memory newTerms = IndexingAgreementDecoder.decodeIndexingAgreementTermsV1(_data); - _validateTermsAgainstRCA(newTerms, maxOngoingTokensPerSecond); _manager.termsV1[_agreementId].tokensPerSecond = newTerms.tokensPerSecond; _manager.termsV1[_agreementId].tokensPerEntityPerSecond = newTerms.tokensPerEntityPerSecond; } - /** - * @notice Cancel an indexing agreement. - * - * @dev This function does the actual agreement cancelation. - * - * Emits {IndexingAgreementCanceled} event - * - * @param _manager The indexing agreement storage manager - * @param _agreementId The id of the agreement to cancel - * @param _agreement The indexing agreement state - * @param _collectorAgreement The collector agreement data - * @param _cancelBy The entity that is canceling the agreement - */ - function _cancel( - StorageManager storage _manager, - bytes16 _agreementId, - IIndexingAgreement.State memory _agreement, - IRecurringCollector.AgreementData memory _collectorAgreement, - IRecurringCollector.CancelAgreementBy _cancelBy - ) private { - // Delete the allocation to active agreement link, so that the allocation - // can be assigned a new indexing agreement in the future. - delete _manager.allocationToActiveAgreementId[_agreement.allocationId]; - - emit IndexingAgreementCanceled( - _collectorAgreement.serviceProvider, - _collectorAgreement.payer, - _agreementId, - _cancelBy == IRecurringCollector.CancelAgreementBy.Payer - ? _collectorAgreement.payer - : _collectorAgreement.serviceProvider - ); - - _directory().recurringCollector().cancel(_agreementId, _cancelBy); - } - /** * @notice Requires that the allocation is valid and owned by the indexer. * @@ -708,15 +559,75 @@ library IndexingAgreement { return allocation; } + /** + * @notice Bind or rebind an agreement to an allocation. + * @dev If `_extraData` contains a new allocationId, validates it and updates + * the bidirectional mapping. After binding, requires the agreement has a valid, + * open allocation owned by the indexer. + * + * @param _manager The storage manager + * @param _allocations The allocation state mapping + * @param _agreement The agreement state (storage ref) + * @param _agreementId The agreement ID + * @param _serviceProvider The indexer address + * @param _extraData Encoded allocationId — required for initial, optional for update + */ + function _bindAllocation( + StorageManager storage _manager, + mapping(address => IAllocation.State) storage _allocations, + IIndexingAgreement.State storage _agreement, + bytes16 _agreementId, + address _serviceProvider, + bytes calldata _extraData + ) private { + if (0 < _extraData.length) { + address newAllocationId = abi.decode(_extraData, (address)); + address oldAllocationId = _agreement.allocationId; + + if (newAllocationId != oldAllocationId) { + IAllocation.State memory newAllocation = _requireValidAllocation( + _allocations, + newAllocationId, + _serviceProvider + ); + + require( + newAllocation.subgraphDeploymentId == _agreement.subgraphDeploymentId, + IndexingAgreementDeploymentIdMismatch( + _agreement.subgraphDeploymentId, + newAllocationId, + newAllocation.subgraphDeploymentId + ) + ); + + if (oldAllocationId != address(0)) { + delete _manager.allocationToActiveAgreementId[oldAllocationId]; + emit IndexingAgreementAllocationUnbound(_agreementId, oldAllocationId); + } + + require( + _manager.allocationToActiveAgreementId[newAllocationId] == bytes16(0), + AllocationAlreadyHasIndexingAgreement(newAllocationId) + ); + + _manager.allocationToActiveAgreementId[newAllocationId] = _agreementId; + _agreement.allocationId = newAllocationId; + } + } + + require(_agreement.allocationId != address(0), IndexingAgreementNotActive(_agreementId)); + _requireValidAllocation(_allocations, _agreement.allocationId, _serviceProvider); + } + /** * @notice Calculate the data service's requested token amount for a collection. * @dev This is an upper bound based on indexing agreement terms, not a guaranteed payout. - * The RecurringCollector further caps the actual payout against the RCA payer's limits. + * The collector further caps the actual payout against the payer's limits. * @param _manager The storage manager * @param _agreementId The agreement ID * @param _entities The number of entities indexed * @param _collectionSeconds Collection duration, already capped at maxSecondsPerCollection - * @return The requested token amount (may be narrowed by RecurringCollector) + * @return The requested token amount (may be narrowed by the collector) */ function _tokensToCollect( StorageManager storage _manager, @@ -728,18 +639,6 @@ library IndexingAgreement { return _collectionSeconds * (termsV1.tokensPerSecond + termsV1.tokensPerEntityPerSecond * _entities); } - /** - * @notice Checks if the agreement is active - * Requirements: - * - The indexing agreement is valid - * - The underlying collector agreement has been accepted - * @param wrapper The agreement wrapper containing the indexing agreement and collector agreement data - * @return True if the agreement is active, false otherwise - **/ - function _isActive(IIndexingAgreement.AgreementWrapper memory wrapper) private view returns (bool) { - return _isValid(wrapper) && wrapper.collectorAgreement.state == IRecurringCollector.AgreementState.Accepted; - } - /** * @notice Checks if the agreement is valid * Requirements: @@ -752,45 +651,20 @@ library IndexingAgreement { return wrapper.collectorAgreement.dataService == address(this) && wrapper.agreement.allocationId != address(0); } - /** - * @notice Gets the Directory - * @return The Directory contract - */ - function _directory() private view returns (Directory) { - return Directory(address(this)); - } - /** * @notice Gets the indexing agreement wrapper for a given agreement ID. * @dev This function retrieves the indexing agreement wrapper containing the agreement state and collector agreement data. * @param self The indexing agreement storage manager * @param agreementId The id of the indexing agreement - * @return The indexing agreement wrapper containing the agreement state and collector agreement data + * @return wrapper The indexing agreement wrapper containing the agreement state and collector agreement data */ function _get( StorageManager storage self, bytes16 agreementId - ) private view returns (IIndexingAgreement.AgreementWrapper memory) { - return - IIndexingAgreement.AgreementWrapper({ - agreement: self.agreements[agreementId], - collectorAgreement: _directory().recurringCollector().getAgreement(agreementId) - }); - } - - /** - * @notice Validates indexing agreement terms against RCA limits - * @param terms The indexing agreement terms to validate - * @param maxOngoingTokensPerSecond The RCA maximum tokens per second limit - */ - function _validateTermsAgainstRCA( - IndexingAgreementTermsV1 memory terms, - uint256 maxOngoingTokensPerSecond - ) private pure { - require( - // solhint-disable-next-line gas-strict-inequalities - terms.tokensPerSecond <= maxOngoingTokensPerSecond, - IndexingAgreementInvalidTerms(terms.tokensPerSecond, maxOngoingTokensPerSecond) - ); + ) private view returns (IIndexingAgreement.AgreementWrapper memory wrapper) { + wrapper.agreement = self.agreements[agreementId]; + if (wrapper.agreement.collector != address(0)) { + wrapper.collectorAgreement = IRecurringCollector(wrapper.agreement.collector).getAgreementData(agreementId); + } } } diff --git a/packages/subgraph-service/contracts/utilities/AllocationManager.sol b/packages/subgraph-service/contracts/utilities/AllocationManager.sol index 69d980b4d..051fa3260 100644 --- a/packages/subgraph-service/contracts/utilities/AllocationManager.sol +++ b/packages/subgraph-service/contracts/utilities/AllocationManager.sol @@ -118,7 +118,7 @@ abstract contract AllocationManager is * @param _delegationRatio The delegation ratio to consider when locking tokens * @param _paymentsDestination The address where indexing rewards should be sent * @return rewardsCollected Indexing rewards collected - * @return allocationForceClosed True if the allocation was force closed due to over-allocation + * @return allocationDownsized True if the allocation was resized down due to over-allocation */ // solhint-disable-next-line function-max-lines function _presentPoi( diff --git a/packages/subgraph-service/contracts/utilities/Directory.sol b/packages/subgraph-service/contracts/utilities/Directory.sol index 6c85af462..90df7fe09 100644 --- a/packages/subgraph-service/contracts/utilities/Directory.sol +++ b/packages/subgraph-service/contracts/utilities/Directory.sol @@ -1,14 +1,11 @@ // SPDX-License-Identifier: GPL-3.0-or-later pragma solidity ^0.8.27; -// TODO: Re-enable and fix issues when publishing a new version -// solhint-disable gas-indexed-events // forge-lint: disable-start(unwrapped-modifier-logic) import { IDisputeManager } from "@graphprotocol/interfaces/contracts/subgraph-service/IDisputeManager.sol"; import { ISubgraphService } from "@graphprotocol/interfaces/contracts/subgraph-service/ISubgraphService.sol"; import { IGraphTallyCollector } from "@graphprotocol/interfaces/contracts/horizon/IGraphTallyCollector.sol"; -import { IRecurringCollector } from "@graphprotocol/interfaces/contracts/horizon/IRecurringCollector.sol"; import { ICuration } from "@graphprotocol/interfaces/contracts/contracts/curation/ICuration.sol"; /** @@ -31,29 +28,25 @@ abstract contract Directory { /// @dev Required to collect payments via Graph Horizon payments protocol IGraphTallyCollector private immutable GRAPH_TALLY_COLLECTOR; - /// @notice The Recurring Collector contract address - /// @dev Required to collect indexing agreement payments via Graph Horizon payments protocol - IRecurringCollector private immutable RECURRING_COLLECTOR; - /// @notice The Curation contract address /// @dev Required for curation fees distribution ICuration private immutable CURATION; + /* solhint-disable gas-indexed-events */ /** * @notice Emitted when the Directory is initialized * @param subgraphService The Subgraph Service contract address * @param disputeManager The Dispute Manager contract address * @param graphTallyCollector The Graph Tally Collector contract address * @param curation The Curation contract address - * @param recurringCollector The Recurring Collector contract address */ event SubgraphServiceDirectoryInitialized( address subgraphService, address disputeManager, address graphTallyCollector, - address curation, - address recurringCollector + address curation ); + /* solhint-enable gas-indexed-events */ /** * @notice Thrown when the caller is not the Dispute Manager @@ -79,36 +72,14 @@ abstract contract Directory { * @param disputeManager The Dispute Manager contract address * @param graphTallyCollector The Graph Tally Collector contract address * @param curation The Curation contract address - * @param recurringCollector_ The Recurring Collector contract address */ - constructor( - address subgraphService, - address disputeManager, - address graphTallyCollector, - address curation, - address recurringCollector_ - ) { + constructor(address subgraphService, address disputeManager, address graphTallyCollector, address curation) { SUBGRAPH_SERVICE = ISubgraphService(subgraphService); DISPUTE_MANAGER = IDisputeManager(disputeManager); GRAPH_TALLY_COLLECTOR = IGraphTallyCollector(graphTallyCollector); CURATION = ICuration(curation); - RECURRING_COLLECTOR = IRecurringCollector(recurringCollector_); - emit SubgraphServiceDirectoryInitialized( - subgraphService, - disputeManager, - graphTallyCollector, - curation, - recurringCollector_ - ); - } - - /** - * @notice Returns the Recurring Collector contract address - * @return The Recurring Collector contract - */ - function recurringCollector() external view returns (IRecurringCollector) { - return RECURRING_COLLECTOR; + emit SubgraphServiceDirectoryInitialized(subgraphService, disputeManager, graphTallyCollector, curation); } /** diff --git a/packages/subgraph-service/package.json b/packages/subgraph-service/package.json index 068e81b8a..1dc7e7e87 100644 --- a/packages/subgraph-service/package.json +++ b/packages/subgraph-service/package.json @@ -32,7 +32,7 @@ "test:self": "forge test", "test:deployment": "SECURE_ACCOUNTS_DISABLE_PROVIDER=true hardhat test test/deployment/*.ts", "test:integration": "./scripts/integration", - "test:coverage": "pnpm build && pnpm test:coverage:self", + "test:coverage": "forge coverage", "test:coverage:self": "mkdir -p coverage && forge coverage --report lcov --report-file coverage/lcov.info", "prepublishOnly": "pnpm run build" }, diff --git a/packages/subgraph-service/test/unit/SubgraphBaseTest.t.sol b/packages/subgraph-service/test/unit/SubgraphBaseTest.t.sol index 31f18bbe0..befcaf351 100644 --- a/packages/subgraph-service/test/unit/SubgraphBaseTest.t.sol +++ b/packages/subgraph-service/test/unit/SubgraphBaseTest.t.sol @@ -11,6 +11,8 @@ import { IPaymentsEscrow } from "@graphprotocol/interfaces/contracts/horizon/IPa import { GraphTallyCollector } from "@graphprotocol/horizon/contracts/payments/collectors/GraphTallyCollector.sol"; import { RecurringCollector } from "@graphprotocol/horizon/contracts/payments/collectors/RecurringCollector.sol"; import { PaymentsEscrow } from "@graphprotocol/horizon/contracts/payments/PaymentsEscrow.sol"; +import { TransparentUpgradeableProxy } from "@openzeppelin/contracts/proxy/transparent/TransparentUpgradeableProxy.sol"; +import { ERC1967Utils } from "@openzeppelin/contracts/proxy/ERC1967/ERC1967Utils.sol"; import { UnsafeUpgrades } from "@openzeppelin/foundry-upgrades/src/Upgrades.sol"; import { Constants } from "./utils/Constants.sol"; @@ -40,6 +42,7 @@ abstract contract SubgraphBaseTest is Utils, Constants { IPaymentsEscrow escrow; GraphTallyCollector graphTallyCollector; RecurringCollector recurringCollector; + address recurringCollectorProxyAdmin; HorizonStaking private stakingBase; @@ -152,20 +155,25 @@ abstract contract SubgraphBaseTest is Utils, Constants { address(controller), REVOKE_SIGNER_THAWING_PERIOD ); - recurringCollector = new RecurringCollector( - "RecurringCollector", - "1", - address(controller), - REVOKE_SIGNER_THAWING_PERIOD - ); + { + RecurringCollector rcImpl = new RecurringCollector(address(controller)); + TransparentUpgradeableProxy rcProxy = new TransparentUpgradeableProxy( + address(rcImpl), + users.governor, + abi.encodeCall(RecurringCollector.initialize, ()) + ); + recurringCollector = RecurringCollector(address(rcProxy)); + recurringCollectorProxyAdmin = address( + uint160(uint256(vm.load(address(rcProxy), ERC1967Utils.ADMIN_SLOT))) + ); + } address subgraphServiceImplementation = address( new SubgraphService( address(controller), address(disputeManager), address(graphTallyCollector), - address(curation), - address(recurringCollector) + address(curation) ) ); address subgraphServiceProxy = UnsafeUpgrades.deployTransparentProxy( @@ -198,6 +206,7 @@ abstract contract SubgraphBaseTest is Utils, Constants { epochManager.setEpochLength(EPOCH_LENGTH); subgraphService.setMaxPOIStaleness(MAX_POI_STALENESS); subgraphService.setCurationCut(CURATION_CUT); + subgraphService.setAuthorizedCollector(address(recurringCollector), true); subgraphService.setPauseGuardian(users.pauseGuardian, true); } diff --git a/packages/subgraph-service/test/unit/disputeManager/disputes/indexingFee/create.t.sol b/packages/subgraph-service/test/unit/disputeManager/disputes/indexingFee/create.t.sol index 73ca400bf..c8a4b1f49 100644 --- a/packages/subgraph-service/test/unit/disputeManager/disputes/indexingFee/create.t.sol +++ b/packages/subgraph-service/test/unit/disputeManager/disputes/indexingFee/create.t.sol @@ -4,6 +4,8 @@ pragma solidity ^0.8.27; import { IDisputeManager } from "@graphprotocol/interfaces/contracts/subgraph-service/IDisputeManager.sol"; import { IGraphPayments } from "@graphprotocol/interfaces/contracts/horizon/IGraphPayments.sol"; import { IPaymentsCollector } from "@graphprotocol/interfaces/contracts/horizon/IPaymentsCollector.sol"; +import { IHorizonStakingBase } from "@graphprotocol/interfaces/contracts/horizon/internal/IHorizonStakingBase.sol"; +import { IHorizonStakingTypes } from "@graphprotocol/interfaces/contracts/horizon/internal/IHorizonStakingTypes.sol"; import { IRecurringCollector } from "@graphprotocol/interfaces/contracts/horizon/IRecurringCollector.sol"; import { SubgraphServiceIndexingAgreementSharedTest } from "../../../subgraphService/indexing-agreement/shared.t.sol"; @@ -64,11 +66,13 @@ contract DisputeManagerIndexingFeeCreateDisputeTest is SubgraphServiceIndexingAg // The collect mock prevented the real RecurringCollector from updating lastCollectionAt. // Mock getAgreement to return lastCollectionAt > 0 so the dispute can be created. - IRecurringCollector.AgreementData memory agreementData = recurringCollector.getAgreement(acceptedAgreementId); + IRecurringCollector.AgreementData memory agreementData = recurringCollector.getAgreementData( + acceptedAgreementId + ); agreementData.lastCollectionAt = uint64(block.timestamp); vm.mockCall( address(recurringCollector), - abi.encodeWithSelector(recurringCollector.getAgreement.selector, acceptedAgreementId), + abi.encodeWithSelector(recurringCollector.getAgreementData.selector, acceptedAgreementId), abi.encode(agreementData) ); } @@ -144,7 +148,7 @@ contract DisputeManagerIndexingFeeCreateDisputeTest is SubgraphServiceIndexingAg ); // Read the payer from the (mocked) agreement data - IRecurringCollector.AgreementData memory agreementData = recurringCollector.getAgreement(agreementId); + IRecurringCollector.AgreementData memory agreementData = recurringCollector.getAgreementData(agreementId); resetPrank(users.fisherman); uint256 deposit = disputeManager.disputeDeposit(); @@ -176,6 +180,45 @@ contract DisputeManagerIndexingFeeCreateDisputeTest is SubgraphServiceIndexingAg assertEq(disputeId, expectedDisputeId); } + function test_IndexingFee_Create_Dispute_RevertWhen_ZeroStake( + Seed memory seed, + uint256 unboundedTokensCollected + ) public { + (bytes16 agreementId, IndexerState memory indexerState) = _setupCollectedAgreement( + seed, + unboundedTokensCollected + ); + + // Mock staking to return zero provision tokens and zero delegation + IHorizonStakingTypes.Provision memory emptyProvision; + vm.mockCall( + address(staking), + abi.encodeWithSelector( + IHorizonStakingBase.getProvision.selector, + indexerState.addr, + address(subgraphService) + ), + abi.encode(emptyProvision) + ); + IHorizonStakingTypes.DelegationPool memory emptyPool; + vm.mockCall( + address(staking), + abi.encodeWithSelector( + IHorizonStakingBase.getDelegationPool.selector, + indexerState.addr, + address(subgraphService) + ), + abi.encode(emptyPool) + ); + + resetPrank(users.fisherman); + token.approve(address(disputeManager), disputeManager.disputeDeposit()); + + vm.expectRevert(abi.encodeWithSelector(IDisputeManager.DisputeManagerZeroTokens.selector)); + // forge-lint: disable-next-line(unsafe-typecast) + disputeManager.createIndexingFeeDisputeV1(agreementId, bytes32("disputePOI"), 200, block.number); + } + function test_IndexingFee_Create_Dispute_RevertWhen_AlreadyCreated( Seed memory seed, uint256 unboundedTokensCollected @@ -196,4 +239,43 @@ contract DisputeManagerIndexingFeeCreateDisputeTest is SubgraphServiceIndexingAg // forge-lint: disable-next-line(unsafe-typecast) disputeManager.createIndexingFeeDisputeV1(agreementId, bytes32("POI"), 100, block.number); } + + function test_IndexingFee_Accept_Dispute_RevertWhen_InvalidDisputeId() public { + // forge-lint: disable-next-line(unsafe-typecast) + bytes32 fakeDisputeId = bytes32("nonexistent"); + + resetPrank(users.arbitrator); + vm.expectRevert(abi.encodeWithSelector(IDisputeManager.DisputeManagerInvalidDispute.selector, fakeDisputeId)); + disputeManager.acceptDispute(fakeDisputeId, 1); + } + + function test_IndexingFee_Accept_Dispute_RevertWhen_NotPending( + Seed memory seed, + uint256 unboundedTokensCollected + ) public { + (bytes16 agreementId, ) = _setupCollectedAgreement(seed, unboundedTokensCollected); + + // Create and reject a dispute so it is no longer pending + resetPrank(users.fisherman); + token.approve(address(disputeManager), disputeManager.disputeDeposit()); + // forge-lint: disable-next-line(unsafe-typecast) + bytes32 disputeId = disputeManager.createIndexingFeeDisputeV1( + agreementId, + bytes32("disputePOI"), + 200, + block.number + ); + + resetPrank(users.arbitrator); + disputeManager.rejectDispute(disputeId); + + // Attempt to accept the already-rejected dispute + vm.expectRevert( + abi.encodeWithSelector( + IDisputeManager.DisputeManagerDisputeNotPending.selector, + IDisputeManager.DisputeStatus.Rejected + ) + ); + disputeManager.acceptDispute(disputeId, 1); + } } diff --git a/packages/subgraph-service/test/unit/libraries/IndexingAgreement.t.sol b/packages/subgraph-service/test/unit/libraries/IndexingAgreement.t.sol index a5270e436..ece63a608 100644 --- a/packages/subgraph-service/test/unit/libraries/IndexingAgreement.t.sol +++ b/packages/subgraph-service/test/unit/libraries/IndexingAgreement.t.sol @@ -3,10 +3,18 @@ pragma solidity ^0.8.27; import { Test } from "forge-std/Test.sol"; +import { + IAgreementCollector, + REGISTERED, + ACCEPTED, + SETTLED, + NOTICE_GIVEN, + BY_PROVIDER +} from "@graphprotocol/interfaces/contracts/horizon/IAgreementCollector.sol"; import { IRecurringCollector } from "@graphprotocol/interfaces/contracts/horizon/IRecurringCollector.sol"; import { IIndexingAgreement } from "@graphprotocol/interfaces/contracts/subgraph-service/internal/IIndexingAgreement.sol"; +import { ISubgraphService } from "@graphprotocol/interfaces/contracts/subgraph-service/ISubgraphService.sol"; import { IndexingAgreement } from "../../../contracts/libraries/IndexingAgreement.sol"; -import { Directory } from "../../../contracts/utilities/Directory.sol"; contract IndexingAgreementTest is Test { IndexingAgreement.StorageManager private _storageManager; @@ -19,16 +27,18 @@ contract IndexingAgreementTest is Test { function test_IndexingAgreement_Get(bytes16 agreementId) public { vm.assume(agreementId != bytes16(0)); - vm.mockCall( - address(this), - abi.encodeWithSelector(Directory.recurringCollector.selector), - abi.encode(IRecurringCollector(_mockCollector)) - ); + // Set the collector in the agreement state so _get() can resolve it + _storageManager.agreements[agreementId] = IIndexingAgreement.State({ + allocationId: address(0), + collector: _mockCollector, + version: IIndexingAgreement.IndexingAgreementVersion.V1, + subgraphDeploymentId: bytes32(0) + }); IRecurringCollector.AgreementData memory collectorAgreement; vm.mockCall( _mockCollector, - abi.encodeWithSelector(IRecurringCollector.getAgreement.selector, agreementId), + abi.encodeWithSelector(IRecurringCollector.getAgreementData.selector, agreementId), abi.encode(collectorAgreement) ); @@ -38,7 +48,7 @@ contract IndexingAgreementTest is Test { collectorAgreement.dataService = address(this); vm.mockCall( _mockCollector, - abi.encodeWithSelector(IRecurringCollector.getAgreement.selector, agreementId), + abi.encodeWithSelector(IRecurringCollector.getAgreementData.selector, agreementId), abi.encode(collectorAgreement) ); @@ -46,53 +56,83 @@ contract IndexingAgreementTest is Test { assertEq(wrapper.collectorAgreement.dataService, address(this)); } - function test_IndexingAgreement_OnCloseAllocation(bytes16 agreementId, address allocationId, bool stale) public { - vm.assume(agreementId != bytes16(0)); + function test_IndexingAgreement_OnCloseAllocation_NoAgreement(address allocationId) public { vm.assume(allocationId != address(0)); + // No active agreement — returns early + IndexingAgreement.onCloseAllocation(_storageManager, allocationId, true); + } - delete _storageManager; - vm.clearMockedCalls(); - - // No active agreement for allocation ID, returns early, no assertions needed - IndexingAgreement.onCloseAllocation(_storageManager, allocationId, stale); + function test_IndexingAgreement_OnCloseAllocation_RevertsWhenNotSettled( + bytes16 agreementId, + address allocationId + ) public { + vm.assume(agreementId != bytes16(0)); + vm.assume(allocationId != address(0)); - // Active agreement for allocation ID, but collector agreement is not set, returns early, no assertions needed _storageManager.allocationToActiveAgreementId[allocationId] = agreementId; + _storageManager.agreements[agreementId] = IIndexingAgreement.State({ + allocationId: allocationId, + collector: _mockCollector, + version: IIndexingAgreement.IndexingAgreementVersion.V1, + subgraphDeploymentId: bytes32(0) + }); - IRecurringCollector.AgreementData memory collectorAgreement; - - vm.mockCall( - address(this), - abi.encodeWithSelector(Directory.recurringCollector.selector), - abi.encode(IRecurringCollector(_mockCollector)) - ); - + // Mock collector returning REGISTERED | ACCEPTED (not SETTLED) + uint16 notSettledState = REGISTERED | ACCEPTED; + IAgreementCollector.AgreementVersion memory version = IAgreementCollector.AgreementVersion({ + agreementId: agreementId, + versionHash: bytes32(uint256(1)), + state: notSettledState + }); vm.mockCall( _mockCollector, - abi.encodeWithSelector(IRecurringCollector.getAgreement.selector, agreementId), - abi.encode(collectorAgreement) + abi.encodeWithSelector(IAgreementCollector.getAgreementVersionAt.selector, agreementId, 0), + abi.encode(version) ); - IndexingAgreement.onCloseAllocation(_storageManager, allocationId, stale); + vm.expectRevert( + abi.encodeWithSelector( + ISubgraphService.SubgraphServiceAllocationHasActiveAgreement.selector, + allocationId, + agreementId + ) + ); + IndexingAgreement.onCloseAllocation(_storageManager, allocationId, true); + } - // Active agreement for allocation ID, collector agreement is set, should cancel the agreement - collectorAgreement.dataService = address(this); - collectorAgreement.state = IRecurringCollector.AgreementState.Accepted; + function test_IndexingAgreement_OnCloseAllocation_SucceedsWhenSettled( + bytes16 agreementId, + address allocationId + ) public { + vm.assume(agreementId != bytes16(0)); + vm.assume(allocationId != address(0)); + _storageManager.allocationToActiveAgreementId[allocationId] = agreementId; _storageManager.agreements[agreementId] = IIndexingAgreement.State({ allocationId: allocationId, - version: IIndexingAgreement.IndexingAgreementVersion.V1 + collector: _mockCollector, + version: IIndexingAgreement.IndexingAgreementVersion.V1, + subgraphDeploymentId: bytes32(0) }); + // Mock collector returning SETTLED state + uint16 settledState = REGISTERED | ACCEPTED | NOTICE_GIVEN | SETTLED | BY_PROVIDER; + IAgreementCollector.AgreementVersion memory version = IAgreementCollector.AgreementVersion({ + agreementId: agreementId, + versionHash: bytes32(uint256(1)), + state: settledState + }); vm.mockCall( _mockCollector, - abi.encodeWithSelector(IRecurringCollector.getAgreement.selector, agreementId), - abi.encode(collectorAgreement) + abi.encodeWithSelector(IAgreementCollector.getAgreementVersionAt.selector, agreementId, 0), + abi.encode(version) ); - vm.expectCall(_mockCollector, abi.encodeWithSelector(IRecurringCollector.cancel.selector, agreementId)); + IndexingAgreement.onCloseAllocation(_storageManager, allocationId, true); - IndexingAgreement.onCloseAllocation(_storageManager, allocationId, stale); + // Both sides of mapping should be cleared + assertEq(_storageManager.allocationToActiveAgreementId[allocationId], bytes16(0)); + assertEq(_storageManager.agreements[agreementId].allocationId, address(0)); } function test_IndexingAgreement_StorageManagerLocation() public pure { diff --git a/packages/subgraph-service/test/unit/mocks/MockRewardsManager.sol b/packages/subgraph-service/test/unit/mocks/MockRewardsManager.sol index b6da3bb75..9326361fb 100644 --- a/packages/subgraph-service/test/unit/mocks/MockRewardsManager.sol +++ b/packages/subgraph-service/test/unit/mocks/MockRewardsManager.sol @@ -52,6 +52,12 @@ contract MockRewardsManager is IRewardsManager { function setDefaultReclaimAddress(address) external {} + function setRevertOnIneligible(bool) external {} + + function getRevertOnIneligible() external pure returns (bool) { + return false; + } + function reclaimRewards(bytes32, address _allocationId) external view returns (uint256) { address rewardsIssuer = msg.sender; ( diff --git a/packages/subgraph-service/test/unit/subgraphService/SubgraphService.t.sol b/packages/subgraph-service/test/unit/subgraphService/SubgraphService.t.sol index 5002900f1..f24106880 100644 --- a/packages/subgraph-service/test/unit/subgraphService/SubgraphService.t.sol +++ b/packages/subgraph-service/test/unit/subgraphService/SubgraphService.t.sol @@ -151,28 +151,30 @@ contract SubgraphServiceTest is SubgraphServiceSharedTest { uint256 previousSubgraphAllocatedTokens = subgraphService.getSubgraphAllocatedTokens( allocation.subgraphDeploymentId ); + uint256 oldTokens = allocation.tokens; vm.expectEmit(address(subgraphService)); - emit IAllocationManager.AllocationClosed( + emit IAllocationManager.AllocationResized( allocation.indexer, _allocationId, allocation.subgraphDeploymentId, - allocation.tokens, - true + 0, + oldTokens ); - // close stale allocation + // close stale allocation (resizes to 0 instead of closing) subgraphService.closeStaleAllocation(_allocationId); // update allocation allocation = subgraphService.getAllocation(_allocationId); - // check allocation - assertEq(allocation.closedAt, block.timestamp); + // check allocation is still open but with zero tokens + assertTrue(allocation.isOpen()); + assertEq(allocation.tokens, 0); // check subgraph deployment allocated tokens uint256 subgraphAllocatedTokens = subgraphService.getSubgraphAllocatedTokens(subgraphDeployment); - assertEq(subgraphAllocatedTokens, previousSubgraphAllocatedTokens - allocation.tokens); + assertEq(subgraphAllocatedTokens, previousSubgraphAllocatedTokens - oldTokens); } struct IndexingRewardsData { @@ -431,7 +433,9 @@ contract SubgraphServiceTest is SubgraphServiceSharedTest { // For too-young allocations (created in current epoch), the contract returns early // without updating other allocation state or emitting IndexingRewardsCollected if (currentEpoch > allocation.createdAtEpoch) { - assertEq(allocation.accRewardsPending, 0); + // Note: after resize (over-allocation), accRewardsPending is re-accumulated from + // the token delta and may be non-zero. This is expected — rewards from the resize + // delta are captured as pending for the next collection. uint256 accRewardsPerAllocatedToken = rewardsManager.onSubgraphAllocationUpdate( allocation.subgraphDeploymentId ); @@ -460,19 +464,9 @@ contract SubgraphServiceTest is SubgraphServiceSharedTest { collectPaymentDataBefore.delegationPoolBalance + indexingRewardsData.tokensDelegationRewards ); - // If after collecting indexing rewards the indexer is over allocated the allcation should close - uint256 tokensAvailable = staking.getTokensAvailable( - _indexer, - address(subgraphService), - subgraphService.getDelegationRatio() - ); - if (allocation.tokens <= tokensAvailable) { - // Indexer isn't over allocated so allocation should still be open - assertTrue(allocation.isOpen()); - } else { - // Indexer is over allocated so allocation should be closed - assertFalse(allocation.isOpen()); - } + // If after collecting indexing rewards the indexer is over allocated the allocation should be + // resized down (not closed), so the allocation always remains open + assertTrue(allocation.isOpen()); } function _migrateLegacyAllocation(address _indexer, address _allocationId, bytes32 _subgraphDeploymentId) internal { diff --git a/packages/subgraph-service/test/unit/subgraphService/getters.t.sol b/packages/subgraph-service/test/unit/subgraphService/getters.t.sol index 5f884cfcb..a2d89c974 100644 --- a/packages/subgraph-service/test/unit/subgraphService/getters.t.sol +++ b/packages/subgraph-service/test/unit/subgraphService/getters.t.sol @@ -23,9 +23,9 @@ contract SubgraphServiceGettersTest is SubgraphServiceTest { assertEq(result, address(curation)); } - function test_GetRecurringCollector() public view { - address result = address(subgraphService.recurringCollector()); - assertEq(result, address(recurringCollector)); + function test_IsAuthorizedCollector() public view { + assertTrue(subgraphService.isAuthorizedCollector(address(recurringCollector))); + assertFalse(subgraphService.isAuthorizedCollector(address(0xdead))); } function test_GetAllocationData(uint256 tokens) public useIndexer useAllocation(tokens) { diff --git a/packages/subgraph-service/test/unit/subgraphService/governance/authorizedCollector.t.sol b/packages/subgraph-service/test/unit/subgraphService/governance/authorizedCollector.t.sol new file mode 100644 index 000000000..1440810b3 --- /dev/null +++ b/packages/subgraph-service/test/unit/subgraphService/governance/authorizedCollector.t.sol @@ -0,0 +1,60 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.27; + +import { OwnableUpgradeable } from "@openzeppelin/contracts-upgradeable/access/OwnableUpgradeable.sol"; + +import { ISubgraphService } from "@graphprotocol/interfaces/contracts/subgraph-service/ISubgraphService.sol"; + +import { SubgraphServiceTest } from "../SubgraphService.t.sol"; + +contract SubgraphServiceAuthorizedCollectorTest is SubgraphServiceTest { + /* solhint-disable graph/func-name-mixedcase */ + + function test_SetAuthorizedCollector() public { + address collector = makeAddr("newCollector"); + assertFalse(subgraphService.isAuthorizedCollector(collector)); + + resetPrank(users.governor); + vm.expectEmit(address(subgraphService)); + emit ISubgraphService.AuthorizedCollectorSet(collector, true); + subgraphService.setAuthorizedCollector(collector, true); + + assertTrue(subgraphService.isAuthorizedCollector(collector)); + } + + function test_SetAuthorizedCollector_Remove() public { + // RC was authorized in setup + assertTrue(subgraphService.isAuthorizedCollector(address(recurringCollector))); + + resetPrank(users.governor); + vm.expectEmit(address(subgraphService)); + emit ISubgraphService.AuthorizedCollectorSet(address(recurringCollector), false); + subgraphService.setAuthorizedCollector(address(recurringCollector), false); + + assertFalse(subgraphService.isAuthorizedCollector(address(recurringCollector))); + } + + function test_SetAuthorizedCollector_Idempotent() public { + assertTrue(subgraphService.isAuthorizedCollector(address(recurringCollector))); + + resetPrank(users.governor); + // Setting same value should not emit + vm.recordLogs(); + subgraphService.setAuthorizedCollector(address(recurringCollector), true); + assertEq(vm.getRecordedLogs().length, 0); + } + + function test_SetAuthorizedCollector_RevertWhen_NotOwner() public { + address collector = makeAddr("newCollector"); + + resetPrank(users.indexer); + vm.expectRevert(abi.encodeWithSelector(OwnableUpgradeable.OwnableUnauthorizedAccount.selector, users.indexer)); + subgraphService.setAuthorizedCollector(collector, true); + } + + function test_SetAuthorizedCollector_RevertWhen_ZeroAddress() public { + resetPrank(users.governor); + vm.expectRevert(abi.encodeWithSelector(ISubgraphService.SubgraphServiceNotCollector.selector, address(0))); + subgraphService.setAuthorizedCollector(address(0), true); + } +} diff --git a/packages/subgraph-service/test/unit/subgraphService/indexing-agreement/accept.t.sol b/packages/subgraph-service/test/unit/subgraphService/indexing-agreement/accept.t.sol index 4296c8415..2ecaa6a8d 100644 --- a/packages/subgraph-service/test/unit/subgraphService/indexing-agreement/accept.t.sol +++ b/packages/subgraph-service/test/unit/subgraphService/indexing-agreement/accept.t.sol @@ -1,8 +1,7 @@ // SPDX-License-Identifier: MIT pragma solidity ^0.8.27; -import { PausableUpgradeable } from "@openzeppelin/contracts-upgradeable/utils/PausableUpgradeable.sol"; -import { ProvisionManager } from "@graphprotocol/horizon/contracts/data-service/utilities/ProvisionManager.sol"; +import { OFFER_TYPE_NEW } from "@graphprotocol/interfaces/contracts/horizon/IAgreementCollector.sol"; import { IRecurringCollector } from "@graphprotocol/interfaces/contracts/horizon/IRecurringCollector.sol"; import { IAllocation } from "@graphprotocol/interfaces/contracts/subgraph-service/internal/IAllocation.sol"; @@ -14,84 +13,119 @@ import { ISubgraphService } from "@graphprotocol/interfaces/contracts/subgraph-s import { SubgraphServiceIndexingAgreementSharedTest } from "./shared.t.sol"; contract SubgraphServiceIndexingAgreementAcceptTest is SubgraphServiceIndexingAgreementSharedTest { + /* + * HELPERS + */ + + /// @dev Submit an offer to RC and then accept it, expecting the accept to revert. + function _offerAndExpectRevertOnAccept( + IRecurringCollector.RecurringCollectionAgreement memory rca, + address allocationId, + address acceptCaller, + bytes memory expectedErr + ) internal { + vm.stopPrank(); + vm.prank(rca.payer); + bytes16 agreementId = recurringCollector.offer(OFFER_TYPE_NEW, abi.encode(rca), 0).agreementId; + bytes32 activeHash = recurringCollector.getAgreementVersionAt(agreementId, 0).versionHash; + vm.expectRevert(expectedErr); + vm.prank(acceptCaller); + recurringCollector.accept(agreementId, activeHash, abi.encode(allocationId), 0); + } + /* * TESTS */ /* solhint-disable graph/func-name-mixedcase */ - function test_SubgraphService_AcceptIndexingAgreement_Revert_WhenPaused( - address allocationId, - address operator, - IRecurringCollector.RecurringCollectionAgreement calldata rca, - bytes calldata authData - ) public withSafeIndexerOrOperator(operator) { + function test_SubgraphService_AcceptIndexingAgreement_Revert_WhenPaused(Seed memory seed) public { + // NOTE: SS pause does NOT block accept through RC — the acceptAgreement callback + // does not have whenNotPaused. When SS is paused, the RC accept still succeeds because + // the RC itself is not paused and the SS callback doesn't check pause state. + // This test now verifies the accept succeeds even when SS is paused. + Context storage ctx = _newCtx(seed); + IndexerState memory indexerState = _withIndexer(ctx); + IRecurringCollector.RecurringCollectionAgreement memory acceptableRca = _generateAcceptableRCA( + ctx, + indexerState.addr + ); + + // Pause SS after generating valid offer resetPrank(users.pauseGuardian); subgraphService.pause(); + vm.stopPrank(); - resetPrank(operator); - vm.expectRevert(PausableUpgradeable.EnforcedPause.selector); - subgraphService.acceptIndexingAgreement(allocationId, rca, authData); - } - - function test_SubgraphService_AcceptIndexingAgreement_Revert_WhenNotAuthorized( - address allocationId, - address operator, - IRecurringCollector.RecurringCollectionAgreement calldata rca, - bytes calldata authData - ) public withSafeIndexerOrOperator(operator) { - vm.assume(operator != rca.serviceProvider); - resetPrank(operator); - bytes memory expectedErr = abi.encodeWithSelector( - ProvisionManager.ProvisionManagerNotAuthorized.selector, - rca.serviceProvider, - operator - ); - vm.expectRevert(expectedErr); - subgraphService.acceptIndexingAgreement(allocationId, rca, authData); + // Offer and accept succeed even when SS is paused + vm.prank(acceptableRca.payer); + bytes16 agreementId = recurringCollector.offer(OFFER_TYPE_NEW, abi.encode(acceptableRca), 0).agreementId; + bytes32 activeHash = recurringCollector.getAgreementVersionAt(agreementId, 0).versionHash; + vm.prank(indexerState.addr); + recurringCollector.accept(agreementId, activeHash, abi.encode(indexerState.allocationId), 0); } function test_SubgraphService_AcceptIndexingAgreement_Revert_WhenInvalidProvision( address indexer, uint256 unboundedTokens, - address allocationId, - IRecurringCollector.RecurringCollectionAgreement memory rca, - bytes memory authData + Seed memory seed ) public withSafeIndexerOrOperator(indexer) { + // An indexer with insufficient provision is also not registered. + // The acceptAgreement callback checks registration BEFORE provision, + // so the actual revert is SubgraphServiceIndexerNotRegistered. uint256 tokens = bound(unboundedTokens, 1, MINIMUM_PROVISION_TOKENS - 1); mint(indexer, tokens); resetPrank(indexer); _createProvision(indexer, tokens, FISHERMAN_REWARD_PERCENTAGE, DISPUTE_PERIOD); + vm.stopPrank(); + // Build a valid RCA targeting this under-provisioned indexer + Context storage ctx = _newCtx(seed); + IRecurringCollector.RecurringCollectionAgreement memory rca = ctx.ctxInternal.seed.rca; rca.serviceProvider = indexer; + rca.dataService = address(subgraphService); + rca.metadata = abi.encode(_newAcceptIndexingAgreementMetadataV1(bytes32(uint256(1)))); + rca = _recurringCollectorHelper.sensibleRCA(rca); + bytes memory expectedErr = abi.encodeWithSelector( - ProvisionManager.ProvisionManagerInvalidValue.selector, - "tokens", - tokens, - MINIMUM_PROVISION_TOKENS, - MAXIMUM_PROVISION_TOKENS + ISubgraphService.SubgraphServiceIndexerNotRegistered.selector, + indexer ); + vm.prank(rca.payer); + bytes16 agreementId = recurringCollector.offer(OFFER_TYPE_NEW, abi.encode(rca), 0).agreementId; + bytes32 activeHash = recurringCollector.getAgreementVersionAt(agreementId, 0).versionHash; vm.expectRevert(expectedErr); - subgraphService.acceptIndexingAgreement(allocationId, rca, authData); + vm.prank(indexer); + recurringCollector.accept(agreementId, activeHash, abi.encode(address(0)), 0); } function test_SubgraphService_AcceptIndexingAgreement_Revert_WhenIndexerNotRegistered( address indexer, uint256 unboundedTokens, - address allocationId, - IRecurringCollector.RecurringCollectionAgreement memory rca, - bytes memory authData + Seed memory seed ) public withSafeIndexerOrOperator(indexer) { uint256 tokens = bound(unboundedTokens, MINIMUM_PROVISION_TOKENS, MAX_TOKENS); mint(indexer, tokens); resetPrank(indexer); _createProvision(indexer, tokens, FISHERMAN_REWARD_PERCENTAGE, DISPUTE_PERIOD); + vm.stopPrank(); + + // Build a valid RCA targeting this unregistered indexer + Context storage ctx = _newCtx(seed); + IRecurringCollector.RecurringCollectionAgreement memory rca = ctx.ctxInternal.seed.rca; rca.serviceProvider = indexer; + rca.dataService = address(subgraphService); + rca.metadata = abi.encode(_newAcceptIndexingAgreementMetadataV1(bytes32(uint256(1)))); + rca = _recurringCollectorHelper.sensibleRCA(rca); + bytes memory expectedErr = abi.encodeWithSelector( ISubgraphService.SubgraphServiceIndexerNotRegistered.selector, indexer ); + vm.prank(rca.payer); + bytes16 agreementId = recurringCollector.offer(OFFER_TYPE_NEW, abi.encode(rca), 0).agreementId; + bytes32 activeHash = recurringCollector.getAgreementVersionAt(agreementId, 0).versionHash; vm.expectRevert(expectedErr); - subgraphService.acceptIndexingAgreement(allocationId, rca, authData); + vm.prank(indexer); + recurringCollector.accept(agreementId, activeHash, abi.encode(address(0)), 0); } function test_SubgraphService_AcceptIndexingAgreement_Revert_WhenNotDataService( @@ -102,47 +136,40 @@ contract SubgraphServiceIndexingAgreementAcceptTest is SubgraphServiceIndexingAg Context storage ctx = _newCtx(seed); IndexerState memory indexerState = _withIndexer(ctx); - (IRecurringCollector.RecurringCollectionAgreement memory acceptableRca, ) = _generateAcceptableSignedRCA( + IRecurringCollector.RecurringCollectionAgreement memory acceptableRca = _generateAcceptableRCA( ctx, indexerState.addr ); acceptableRca.dataService = incorrectDataService; - ( - IRecurringCollector.RecurringCollectionAgreement memory unacceptableRca, - bytes memory signature - ) = _recurringCollectorHelper.generateSignedRCA(acceptableRca, ctx.payer.signerPrivateKey); - bytes memory expectedErr = abi.encodeWithSelector( - IndexingAgreement.IndexingAgreementWrongDataService.selector, - address(subgraphService), - unacceptableRca.dataService - ); - vm.expectRevert(expectedErr); + // In the new flow, the RC accept callback calls into the wrong dataService (or no dataService), + // so the revert depends on what incorrectDataService is. The offer will succeed since RC + // doesn't validate dataService beyond non-zero. The accept will call the wrong contract. + // Since incorrectDataService may not implement the callback, this will revert with various errors. + // We just verify the offer succeeds and accept reverts. + vm.prank(acceptableRca.payer); + bytes16 agreementId = recurringCollector.offer(OFFER_TYPE_NEW, abi.encode(acceptableRca), 0).agreementId; + bytes32 activeHash = recurringCollector.getAgreementVersionAt(agreementId, 0).versionHash; + vm.expectRevert(); vm.prank(indexerState.addr); - subgraphService.acceptIndexingAgreement(indexerState.allocationId, unacceptableRca, signature); + recurringCollector.accept(agreementId, activeHash, abi.encode(indexerState.allocationId), 0); } function test_SubgraphService_AcceptIndexingAgreement_Revert_WhenInvalidMetadata(Seed memory seed) public { Context storage ctx = _newCtx(seed); IndexerState memory indexerState = _withIndexer(ctx); - (IRecurringCollector.RecurringCollectionAgreement memory acceptableRca, ) = _generateAcceptableSignedRCA( + IRecurringCollector.RecurringCollectionAgreement memory acceptableRca = _generateAcceptableRCA( ctx, indexerState.addr ); acceptableRca.metadata = bytes("invalid"); - ( - IRecurringCollector.RecurringCollectionAgreement memory unacceptableRca, - bytes memory signature - ) = _recurringCollectorHelper.generateSignedRCA(acceptableRca, ctx.payer.signerPrivateKey); bytes memory expectedErr = abi.encodeWithSelector( IndexingAgreementDecoder.IndexingAgreementDecoderInvalidData.selector, "decodeRCAMetadata", - unacceptableRca.metadata + acceptableRca.metadata ); - vm.expectRevert(expectedErr); - vm.prank(indexerState.addr); - subgraphService.acceptIndexingAgreement(indexerState.allocationId, unacceptableRca, signature); + _offerAndExpectRevertOnAccept(acceptableRca, indexerState.allocationId, indexerState.addr, expectedErr); } function test_SubgraphService_AcceptIndexingAgreement_Revert_WhenInvalidAllocation( @@ -151,46 +178,42 @@ contract SubgraphServiceIndexingAgreementAcceptTest is SubgraphServiceIndexingAg ) public { Context storage ctx = _newCtx(seed); IndexerState memory indexerState = _withIndexer(ctx); - ( - IRecurringCollector.RecurringCollectionAgreement memory acceptableRca, - bytes memory signature - ) = _generateAcceptableSignedRCA(ctx, indexerState.addr); + IRecurringCollector.RecurringCollectionAgreement memory acceptableRca = _generateAcceptableRCA( + ctx, + indexerState.addr + ); bytes memory expectedErr = abi.encodeWithSelector( IAllocation.AllocationDoesNotExist.selector, invalidAllocationId ); - vm.expectRevert(expectedErr); - vm.prank(indexerState.addr); - subgraphService.acceptIndexingAgreement(invalidAllocationId, acceptableRca, signature); + _offerAndExpectRevertOnAccept(acceptableRca, invalidAllocationId, indexerState.addr, expectedErr); } function test_SubgraphService_AcceptIndexingAgreement_Revert_WhenAllocationNotAuthorized(Seed memory seed) public { Context storage ctx = _newCtx(seed); IndexerState memory indexerStateA = _withIndexer(ctx); IndexerState memory indexerStateB = _withIndexer(ctx); - ( - IRecurringCollector.RecurringCollectionAgreement memory acceptableRcaA, - bytes memory signatureA - ) = _generateAcceptableSignedRCA(ctx, indexerStateA.addr); + IRecurringCollector.RecurringCollectionAgreement memory acceptableRcaA = _generateAcceptableRCA( + ctx, + indexerStateA.addr + ); bytes memory expectedErr = abi.encodeWithSelector( ISubgraphService.SubgraphServiceAllocationNotAuthorized.selector, indexerStateA.addr, indexerStateB.allocationId ); - vm.expectRevert(expectedErr); - vm.prank(indexerStateA.addr); - subgraphService.acceptIndexingAgreement(indexerStateB.allocationId, acceptableRcaA, signatureA); + _offerAndExpectRevertOnAccept(acceptableRcaA, indexerStateB.allocationId, indexerStateA.addr, expectedErr); } function test_SubgraphService_AcceptIndexingAgreement_Revert_WhenAllocationClosed(Seed memory seed) public { Context storage ctx = _newCtx(seed); IndexerState memory indexerState = _withIndexer(ctx); - ( - IRecurringCollector.RecurringCollectionAgreement memory acceptableRca, - bytes memory signature - ) = _generateAcceptableSignedRCA(ctx, indexerState.addr); + IRecurringCollector.RecurringCollectionAgreement memory acceptableRca = _generateAcceptableRCA( + ctx, + indexerState.addr + ); resetPrank(indexerState.addr); subgraphService.stopService(indexerState.addr, abi.encode(indexerState.allocationId)); @@ -199,8 +222,7 @@ contract SubgraphServiceIndexingAgreementAcceptTest is SubgraphServiceIndexingAg AllocationHandler.AllocationHandlerAllocationClosed.selector, indexerState.allocationId ); - vm.expectRevert(expectedErr); - subgraphService.acceptIndexingAgreement(indexerState.allocationId, acceptableRca, signature); + _offerAndExpectRevertOnAccept(acceptableRca, indexerState.allocationId, indexerState.addr, expectedErr); } function test_SubgraphService_AcceptIndexingAgreement_Revert_WhenDeploymentIdMismatch( @@ -210,15 +232,11 @@ contract SubgraphServiceIndexingAgreementAcceptTest is SubgraphServiceIndexingAg Context storage ctx = _newCtx(seed); IndexerState memory indexerState = _withIndexer(ctx); vm.assume(indexerState.subgraphDeploymentId != wrongSubgraphDeploymentId); - (IRecurringCollector.RecurringCollectionAgreement memory acceptableRca, ) = _generateAcceptableSignedRCA( + IRecurringCollector.RecurringCollectionAgreement memory acceptableRca = _generateAcceptableRCA( ctx, indexerState.addr ); acceptableRca.metadata = abi.encode(_newAcceptIndexingAgreementMetadataV1(wrongSubgraphDeploymentId)); - ( - IRecurringCollector.RecurringCollectionAgreement memory unacceptableRca, - bytes memory signature - ) = _recurringCollectorHelper.generateSignedRCA(acceptableRca, ctx.payer.signerPrivateKey); bytes memory expectedErr = abi.encodeWithSelector( IndexingAgreement.IndexingAgreementDeploymentIdMismatch.selector, @@ -226,9 +244,7 @@ contract SubgraphServiceIndexingAgreementAcceptTest is SubgraphServiceIndexingAg indexerState.allocationId, indexerState.subgraphDeploymentId ); - vm.expectRevert(expectedErr); - vm.prank(indexerState.addr); - subgraphService.acceptIndexingAgreement(indexerState.allocationId, unacceptableRca, signature); + _offerAndExpectRevertOnAccept(acceptableRca, indexerState.allocationId, indexerState.addr, expectedErr); } function test_SubgraphService_AcceptIndexingAgreement_Revert_WhenAgreementAlreadyAccepted(Seed memory seed) public { @@ -239,19 +255,17 @@ contract SubgraphServiceIndexingAgreementAcceptTest is SubgraphServiceIndexingAg bytes16 agreementId ) = _withAcceptedIndexingAgreement(ctx, indexerState); - // Re-sign for the re-accept attempt (the original signature was consumed) - (, bytes memory signature) = _recurringCollectorHelper.generateSignedRCA( - acceptedRca, - ctx.payer.signerPrivateKey - ); - + // The agreement is already accepted on the collector, so trying to accept again + // goes to the pending-update path (state has ACCEPTED set). Since there is no pending + // update, the pending terms hash is bytes32(0) and the guard rejects with AgreementTermsEmpty. + bytes32 activeHash = recurringCollector.getAgreementVersionAt(agreementId, 0).versionHash; bytes memory expectedErr = abi.encodeWithSelector( - IndexingAgreement.IndexingAgreementAlreadyAccepted.selector, + IRecurringCollector.AgreementTermsEmpty.selector, agreementId ); vm.expectRevert(expectedErr); - resetPrank(ctx.indexers[0].addr); - subgraphService.acceptIndexingAgreement(ctx.indexers[0].allocationId, acceptedRca, signature); + vm.prank(indexerState.addr); + recurringCollector.accept(agreementId, activeHash, abi.encode(indexerState.allocationId), 0); } function test_SubgraphService_AcceptIndexingAgreement_Revert_WhenAgreementAlreadyAllocated( @@ -269,31 +283,21 @@ contract SubgraphServiceIndexingAgreementAcceptTest is SubgraphServiceIndexingAg vm.assume(acceptedRca.nonce != alternativeNonce); // Now try to accept a different agreement on the same allocation - // Create a new agreement with different nonce to ensure different agreement ID IRecurringCollector.RecurringCollectionAgreement memory newRCA = _generateAcceptableRecurringCollectionAgreement(ctx, indexerState.addr); - newRCA.nonce = alternativeNonce; // Different nonce to ensure different agreement ID + newRCA.nonce = alternativeNonce; - // Sign the new agreement - ( - IRecurringCollector.RecurringCollectionAgreement memory newSignedRca, - bytes memory newSignature - ) = _recurringCollectorHelper.generateSignedRCA(newRCA, ctx.payer.signerPrivateKey); - - // Expect the error when trying to accept a second agreement on the same allocation bytes memory expectedErr = abi.encodeWithSelector( IndexingAgreement.AllocationAlreadyHasIndexingAgreement.selector, indexerState.allocationId ); - vm.expectRevert(expectedErr); - resetPrank(indexerState.addr); - subgraphService.acceptIndexingAgreement(indexerState.allocationId, newSignedRca, newSignature); + _offerAndExpectRevertOnAccept(newRCA, indexerState.allocationId, indexerState.addr, expectedErr); } function test_SubgraphService_AcceptIndexingAgreement_Revert_WhenInvalidTermsData(Seed memory seed) public { Context storage ctx = _newCtx(seed); IndexerState memory indexerState = _withIndexer(ctx); - (IRecurringCollector.RecurringCollectionAgreement memory acceptableRca, ) = _generateAcceptableSignedRCA( + IRecurringCollector.RecurringCollectionAgreement memory acceptableRca = _generateAcceptableRCA( ctx, indexerState.addr ); @@ -303,28 +307,22 @@ contract SubgraphServiceIndexingAgreementAcceptTest is SubgraphServiceIndexingAg notAcceptableRCA.metadata = abi.encode( _newAcceptIndexingAgreementMetadataV1Terms(indexerState.subgraphDeploymentId, invalidTermsData) ); - ( - IRecurringCollector.RecurringCollectionAgreement memory notAcceptableRcaSigned, - bytes memory signature - ) = _recurringCollectorHelper.generateSignedRCA(notAcceptableRCA, ctx.payer.signerPrivateKey); bytes memory expectedErr = abi.encodeWithSelector( IndexingAgreementDecoder.IndexingAgreementDecoderInvalidData.selector, "decodeIndexingAgreementTermsV1", invalidTermsData ); - vm.expectRevert(expectedErr); - resetPrank(indexerState.addr); - subgraphService.acceptIndexingAgreement(indexerState.allocationId, notAcceptableRcaSigned, signature); + _offerAndExpectRevertOnAccept(notAcceptableRCA, indexerState.allocationId, indexerState.addr, expectedErr); } function test_SubgraphService_AcceptIndexingAgreement(Seed memory seed) public { Context storage ctx = _newCtx(seed); IndexerState memory indexerState = _withIndexer(ctx); - ( - IRecurringCollector.RecurringCollectionAgreement memory acceptableRca, - bytes memory signature - ) = _generateAcceptableSignedRCA(ctx, indexerState.addr); + IRecurringCollector.RecurringCollectionAgreement memory acceptableRca = _generateAcceptableRCA( + ctx, + indexerState.addr + ); IndexingAgreement.AcceptIndexingAgreementMetadata memory metadata = abi.decode( acceptableRca.metadata, (IndexingAgreement.AcceptIndexingAgreementMetadata) @@ -338,6 +336,13 @@ contract SubgraphServiceIndexingAgreementAcceptTest is SubgraphServiceIndexingAg acceptableRca.nonce ); + // Step 1: Submit offer to RC + vm.prank(acceptableRca.payer); + bytes16 agreementId = recurringCollector.offer(OFFER_TYPE_NEW, abi.encode(acceptableRca), 0).agreementId; + assertEq(agreementId, expectedAgreementId); + + // Step 2: Accept via RC (serviceProvider calls directly) + bytes32 activeHash = recurringCollector.getAgreementVersionAt(agreementId, 0).versionHash; vm.expectEmit(address(subgraphService)); emit IndexingAgreement.IndexingAgreementAccepted( acceptableRca.serviceProvider, @@ -349,8 +354,8 @@ contract SubgraphServiceIndexingAgreementAcceptTest is SubgraphServiceIndexingAg metadata.terms ); - resetPrank(indexerState.addr); - subgraphService.acceptIndexingAgreement(indexerState.allocationId, acceptableRca, signature); + vm.prank(indexerState.addr); + recurringCollector.accept(agreementId, activeHash, abi.encode(indexerState.allocationId), 0); } /* solhint-enable graph/func-name-mixedcase */ } diff --git a/packages/subgraph-service/test/unit/subgraphService/indexing-agreement/base.t.sol b/packages/subgraph-service/test/unit/subgraphService/indexing-agreement/base.t.sol index e01d157c0..699ed9196 100644 --- a/packages/subgraph-service/test/unit/subgraphService/indexing-agreement/base.t.sol +++ b/packages/subgraph-service/test/unit/subgraphService/indexing-agreement/base.t.sol @@ -1,7 +1,6 @@ // SPDX-License-Identifier: MIT pragma solidity ^0.8.27; -import { TransparentUpgradeableProxy } from "@openzeppelin/contracts/proxy/transparent/TransparentUpgradeableProxy.sol"; import { IRecurringCollector } from "@graphprotocol/interfaces/contracts/horizon/IRecurringCollector.sol"; import { IIndexingAgreement } from "@graphprotocol/interfaces/contracts/subgraph-service/internal/IIndexingAgreement.sol"; @@ -40,15 +39,6 @@ contract SubgraphServiceIndexingAgreementBaseTest is SubgraphServiceIndexingAgre _assertEqualAgreement(acceptedRca, agreement); } - function test_SubgraphService_Revert_WhenUnsafeAddress_WhenProxyAdmin(address indexer, bytes16 agreementId) public { - address operator = _transparentUpgradeableProxyAdmin(); - assertFalse(_isSafeSubgraphServiceCaller(operator)); - - vm.expectRevert(TransparentUpgradeableProxy.ProxyDeniedAdminAccess.selector); - resetPrank(address(operator)); - subgraphService.cancelIndexingAgreement(indexer, agreementId); - } - function test_SubgraphService_Revert_WhenUnsafeAddress_WhenGraphProxyAdmin(uint256 unboundedTokens) public { address indexer = GRAPH_PROXY_ADMIN_ADDRESS; assertFalse(_isSafeSubgraphServiceCaller(indexer)); diff --git a/packages/subgraph-service/test/unit/subgraphService/indexing-agreement/callbackGas.t.sol b/packages/subgraph-service/test/unit/subgraphService/indexing-agreement/callbackGas.t.sol new file mode 100644 index 000000000..04780b413 --- /dev/null +++ b/packages/subgraph-service/test/unit/subgraphService/indexing-agreement/callbackGas.t.sol @@ -0,0 +1,104 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.27; + +import { OFFER_TYPE_NEW, OFFER_TYPE_UPDATE } from "@graphprotocol/interfaces/contracts/horizon/IAgreementCollector.sol"; +import { IRecurringCollector } from "@graphprotocol/interfaces/contracts/horizon/IRecurringCollector.sol"; +import { IIndexingAgreement } from "@graphprotocol/interfaces/contracts/subgraph-service/internal/IIndexingAgreement.sol"; + +import { IndexingAgreement } from "../../../../contracts/libraries/IndexingAgreement.sol"; + +import { SubgraphServiceIndexingAgreementSharedTest } from "./shared.t.sol"; + +/// @notice Gas measurement for SubgraphService.acceptAgreement callback. +/// RecurringCollector forwards at most MAX_CALLBACK_GAS (1.5M) to acceptAgreement +/// during auto-update. If the callback exceeds this budget, auto-update silently +/// fails and the agreement transitions to SETTLED. +/// +/// These tests call acceptAgreement directly (pranking as the collector) to isolate +/// the data-service callback gas from the collector overhead. +contract SubgraphServiceCallbackGasTest is SubgraphServiceIndexingAgreementSharedTest { + /* solhint-disable graph/func-name-mixedcase */ + + /// @notice Must match MAX_CALLBACK_GAS in RecurringCollector. + uint256 internal constant MAX_CALLBACK_GAS = 1_500_000; + + /// @notice Assert callbacks use less than half the budget. + /// Leaves margin for cold storage and EVM repricing. + uint256 internal constant GAS_THRESHOLD = MAX_CALLBACK_GAS / 2; // 750_000 + + /// @notice Initial accept (onAcceptCallback): heaviest path with allocation binding, + /// storage writes for agreement state, and allocationToActiveAgreementId mapping. + function test_AcceptAgreement_GasWithinBudget_InitialAccept(Seed memory seed) public { + Context storage ctx = _newCtx(seed); + IndexerState memory indexerState = _withIndexer(ctx); + + IRecurringCollector.RecurringCollectionAgreement memory rca = _generateAcceptableRCA(ctx, indexerState.addr); + + // Payer submits offer to get a valid agreement in the collector + vm.prank(rca.payer); + bytes16 agreementId = recurringCollector.offer(OFFER_TYPE_NEW, abi.encode(rca), 0).agreementId; + bytes32 versionHash = recurringCollector.getAgreementVersionAt(agreementId, 0).versionHash; + + // Call acceptAgreement directly on SS, pranking as the collector, + // to isolate the data-service callback gas. + uint256 gasBefore = gasleft(); + vm.prank(address(recurringCollector)); + subgraphService.acceptAgreement( + agreementId, + versionHash, + rca.payer, + indexerState.addr, + rca.metadata, + abi.encode(indexerState.allocationId) + ); + uint256 gasUsed = gasBefore - gasleft(); + + assertLt(gasUsed, GAS_THRESHOLD, "acceptAgreement (initial) exceeds half of callback gas budget"); + } + + /// @notice Update accept (onAcceptCallback update path): validates terms and updates storage. + /// Lighter than initial accept but still exercises storage writes. + function test_AcceptAgreement_GasWithinBudget_UpdateAccept(Seed memory seed) public { + Context storage ctx = _newCtx(seed); + IndexerState memory indexerState = _withIndexer(ctx); + + // Create and accept initial agreement through the normal flow + ( + IRecurringCollector.RecurringCollectionAgreement memory rca, + bytes16 agreementId + ) = _withAcceptedIndexingAgreement(ctx, indexerState); + + // Submit an update offer + IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau = _generateAcceptableRCAU(ctx, rca); + vm.prank(rca.payer); + recurringCollector.offer(OFFER_TYPE_UPDATE, abi.encode(rcau), 0); + + bytes32 pendingHash = recurringCollector.getAgreementVersionAt(agreementId, 1).versionHash; + + // Build update metadata matching what the collector would pass + IndexingAgreement.UpdateIndexingAgreementMetadata memory updateMeta = IndexingAgreement + .UpdateIndexingAgreementMetadata({ + version: IIndexingAgreement.IndexingAgreementVersion.V1, + terms: abi.encode( + IndexingAgreement.IndexingAgreementTermsV1({ tokensPerSecond: 0, tokensPerEntityPerSecond: 0 }) + ) + }); + + // Call acceptAgreement directly on SS for the update path + uint256 gasBefore = gasleft(); + vm.prank(address(recurringCollector)); + subgraphService.acceptAgreement( + agreementId, + pendingHash, + rca.payer, + indexerState.addr, + abi.encode(updateMeta), + bytes("") + ); + uint256 gasUsed = gasBefore - gasleft(); + + assertLt(gasUsed, GAS_THRESHOLD, "acceptAgreement (update) exceeds half of callback gas budget"); + } + + /* solhint-enable graph/func-name-mixedcase */ +} diff --git a/packages/subgraph-service/test/unit/subgraphService/indexing-agreement/cancel.t.sol b/packages/subgraph-service/test/unit/subgraphService/indexing-agreement/cancel.t.sol index a0d4ed2d1..0c4eddbd2 100644 --- a/packages/subgraph-service/test/unit/subgraphService/indexing-agreement/cancel.t.sol +++ b/packages/subgraph-service/test/unit/subgraphService/indexing-agreement/cancel.t.sol @@ -2,11 +2,6 @@ pragma solidity ^0.8.27; import { IRecurringCollector } from "@graphprotocol/interfaces/contracts/horizon/IRecurringCollector.sol"; -import { PausableUpgradeable } from "@openzeppelin/contracts-upgradeable/utils/PausableUpgradeable.sol"; -import { ProvisionManager } from "@graphprotocol/horizon/contracts/data-service/utilities/ProvisionManager.sol"; - -import { ISubgraphService } from "@graphprotocol/interfaces/contracts/subgraph-service/ISubgraphService.sol"; -import { IndexingAgreement } from "../../../../contracts/libraries/IndexingAgreement.sol"; import { SubgraphServiceIndexingAgreementSharedTest } from "./shared.t.sol"; @@ -16,217 +11,26 @@ contract SubgraphServiceIndexingAgreementCancelTest is SubgraphServiceIndexingAg */ /* solhint-disable graph/func-name-mixedcase */ - function test_SubgraphService_CancelIndexingAgreementByPayer_Revert_WhenPaused( - address rando, - bytes16 agreementId - ) public withSafeIndexerOrOperator(rando) { - resetPrank(users.pauseGuardian); - subgraphService.pause(); - - vm.expectRevert(PausableUpgradeable.EnforcedPause.selector); - resetPrank(rando); - subgraphService.cancelIndexingAgreementByPayer(agreementId); - } - - function test_SubgraphService_CancelIndexingAgreementByPayer_Revert_WhenNotAuthorized( - Seed memory seed, - address rando - ) public withSafeIndexerOrOperator(rando) { - Context storage ctx = _newCtx(seed); - vm.assume(rando != seed.rca.payer); - vm.assume(rando != ctx.payer.signer); - ( - IRecurringCollector.RecurringCollectionAgreement memory acceptedRca, - bytes16 agreementId - ) = _withAcceptedIndexingAgreement(ctx, _withIndexer(ctx)); - - bytes memory expectedErr = abi.encodeWithSelector( - IndexingAgreement.IndexingAgreementNonCancelableBy.selector, - acceptedRca.payer, - rando - ); - vm.expectRevert(expectedErr); - resetPrank(rando); - subgraphService.cancelIndexingAgreementByPayer(agreementId); - } - - function test_SubgraphService_CancelIndexingAgreementByPayer_Revert_WhenNotAccepted( - Seed memory seed, - bytes16 agreementId - ) public { - Context storage ctx = _newCtx(seed); - IndexerState memory indexerState = _withIndexer(ctx); - - resetPrank(indexerState.addr); - bytes memory expectedErr = abi.encodeWithSelector( - IndexingAgreement.IndexingAgreementNotActive.selector, - agreementId - ); - vm.expectRevert(expectedErr); - subgraphService.cancelIndexingAgreementByPayer(agreementId); - } - - function test_SubgraphService_CancelIndexingAgreementByPayer_Revert_WhenCanceled( - Seed memory seed, - bool cancelSource - ) public { - Context storage ctx = _newCtx(seed); - IndexerState memory indexerState = _withIndexer(ctx); - ( - IRecurringCollector.RecurringCollectionAgreement memory acceptedRca, - bytes16 acceptedAgreementId - ) = _withAcceptedIndexingAgreement(ctx, indexerState); - IRecurringCollector.CancelAgreementBy by = cancelSource - ? IRecurringCollector.CancelAgreementBy.ServiceProvider - : IRecurringCollector.CancelAgreementBy.Payer; - _cancelAgreement(ctx, acceptedAgreementId, indexerState.addr, acceptedRca.payer, by); - - resetPrank(indexerState.addr); - bytes memory expectedErr = abi.encodeWithSelector( - IndexingAgreement.IndexingAgreementNotActive.selector, - acceptedAgreementId - ); - vm.expectRevert(expectedErr); - subgraphService.cancelIndexingAgreementByPayer(acceptedAgreementId); - } - function test_SubgraphService_CancelIndexingAgreementByPayer(Seed memory seed) public { + function test_SubgraphService_CancelByPayer_OK(Seed memory seed) public { Context storage ctx = _newCtx(seed); ( IRecurringCollector.RecurringCollectionAgreement memory acceptedRca, bytes16 acceptedAgreementId ) = _withAcceptedIndexingAgreement(ctx, _withIndexer(ctx)); - _cancelAgreement( - ctx, - acceptedAgreementId, - acceptedRca.serviceProvider, - acceptedRca.payer, - IRecurringCollector.CancelAgreementBy.Payer - ); + _cancelAgreement(ctx, acceptedAgreementId, acceptedRca.serviceProvider, acceptedRca.payer, false); } - function test_SubgraphService_CancelIndexingAgreement_Revert_WhenPaused( - address operator, - address indexer, - bytes16 agreementId - ) public withSafeIndexerOrOperator(operator) { - resetPrank(users.pauseGuardian); - subgraphService.pause(); - - vm.expectRevert(PausableUpgradeable.EnforcedPause.selector); - resetPrank(operator); - subgraphService.cancelIndexingAgreement(indexer, agreementId); - } - - function test_SubgraphService_CancelIndexingAgreement_Revert_WhenNotAuthorized( - address operator, - address indexer, - bytes16 agreementId - ) public withSafeIndexerOrOperator(operator) { - vm.assume(operator != indexer); - resetPrank(operator); - bytes memory expectedErr = abi.encodeWithSelector( - ProvisionManager.ProvisionManagerNotAuthorized.selector, - indexer, - operator - ); - vm.expectRevert(expectedErr); - subgraphService.cancelIndexingAgreement(indexer, agreementId); - } - - function test_SubgraphService_CancelIndexingAgreement_Revert_WhenInvalidProvision( - address indexer, - bytes16 agreementId, - uint256 unboundedTokens - ) public withSafeIndexerOrOperator(indexer) { - uint256 tokens = bound(unboundedTokens, 1, MINIMUM_PROVISION_TOKENS - 1); - mint(indexer, tokens); - resetPrank(indexer); - _createProvision(indexer, tokens, FISHERMAN_REWARD_PERCENTAGE, DISPUTE_PERIOD); - - bytes memory expectedErr = abi.encodeWithSelector( - ProvisionManager.ProvisionManagerInvalidValue.selector, - "tokens", - tokens, - MINIMUM_PROVISION_TOKENS, - MAXIMUM_PROVISION_TOKENS - ); - vm.expectRevert(expectedErr); - subgraphService.cancelIndexingAgreement(indexer, agreementId); - } - - function test_SubgraphService_CancelIndexingAgreement_Revert_WhenIndexerNotRegistered( - address indexer, - bytes16 agreementId, - uint256 unboundedTokens - ) public withSafeIndexerOrOperator(indexer) { - uint256 tokens = bound(unboundedTokens, MINIMUM_PROVISION_TOKENS, MAX_TOKENS); - mint(indexer, tokens); - resetPrank(indexer); - _createProvision(indexer, tokens, FISHERMAN_REWARD_PERCENTAGE, DISPUTE_PERIOD); - bytes memory expectedErr = abi.encodeWithSelector( - ISubgraphService.SubgraphServiceIndexerNotRegistered.selector, - indexer - ); - vm.expectRevert(expectedErr); - subgraphService.cancelIndexingAgreement(indexer, agreementId); - } - - function test_SubgraphService_CancelIndexingAgreement_Revert_WhenNotAccepted( - Seed memory seed, - bytes16 agreementId - ) public { - Context storage ctx = _newCtx(seed); - IndexerState memory indexerState = _withIndexer(ctx); - - resetPrank(indexerState.addr); - bytes memory expectedErr = abi.encodeWithSelector( - IndexingAgreement.IndexingAgreementNotActive.selector, - agreementId - ); - vm.expectRevert(expectedErr); - subgraphService.cancelIndexingAgreement(indexerState.addr, agreementId); - } - - function test_SubgraphService_CancelIndexingAgreement_Revert_WhenCanceled( - Seed memory seed, - bool cancelSource - ) public { - Context storage ctx = _newCtx(seed); - IndexerState memory indexerState = _withIndexer(ctx); - ( - IRecurringCollector.RecurringCollectionAgreement memory acceptedRca2, - bytes16 acceptedAgreementId - ) = _withAcceptedIndexingAgreement(ctx, indexerState); - IRecurringCollector.CancelAgreementBy by = cancelSource - ? IRecurringCollector.CancelAgreementBy.ServiceProvider - : IRecurringCollector.CancelAgreementBy.Payer; - _cancelAgreement(ctx, acceptedAgreementId, acceptedRca2.serviceProvider, acceptedRca2.payer, by); - - resetPrank(indexerState.addr); - bytes memory expectedErr = abi.encodeWithSelector( - IndexingAgreement.IndexingAgreementNotActive.selector, - acceptedAgreementId - ); - vm.expectRevert(expectedErr); - subgraphService.cancelIndexingAgreement(indexerState.addr, acceptedAgreementId); - } - - function test_SubgraphService_CancelIndexingAgreement_OK(Seed memory seed) public { + function test_SubgraphService_CancelByProvider_OK(Seed memory seed) public { Context storage ctx = _newCtx(seed); ( IRecurringCollector.RecurringCollectionAgreement memory acceptedRca, bytes16 acceptedAgreementId ) = _withAcceptedIndexingAgreement(ctx, _withIndexer(ctx)); - _cancelAgreement( - ctx, - acceptedAgreementId, - acceptedRca.serviceProvider, - acceptedRca.payer, - IRecurringCollector.CancelAgreementBy.ServiceProvider - ); + _cancelAgreement(ctx, acceptedAgreementId, acceptedRca.serviceProvider, acceptedRca.payer, true); } + /* solhint-enable graph/func-name-mixedcase */ } diff --git a/packages/subgraph-service/test/unit/subgraphService/indexing-agreement/collect.t.sol b/packages/subgraph-service/test/unit/subgraphService/indexing-agreement/collect.t.sol index 5818a1d63..02b2b4d5b 100644 --- a/packages/subgraph-service/test/unit/subgraphService/indexing-agreement/collect.t.sol +++ b/packages/subgraph-service/test/unit/subgraphService/indexing-agreement/collect.t.sol @@ -9,6 +9,7 @@ import { ProvisionManager } from "@graphprotocol/horizon/contracts/data-service/ import { ISubgraphService } from "@graphprotocol/interfaces/contracts/subgraph-service/ISubgraphService.sol"; import { IAllocation } from "@graphprotocol/interfaces/contracts/subgraph-service/internal/IAllocation.sol"; +import { Allocation } from "../../../../contracts/libraries/Allocation.sol"; import { AllocationHandler } from "../../../../contracts/libraries/AllocationHandler.sol"; import { IndexingAgreement } from "../../../../contracts/libraries/IndexingAgreement.sol"; import { IndexingAgreementDecoder } from "../../../../contracts/libraries/IndexingAgreementDecoder.sol"; @@ -16,6 +17,8 @@ import { IndexingAgreementDecoder } from "../../../../contracts/libraries/Indexi import { SubgraphServiceIndexingAgreementSharedTest } from "./shared.t.sol"; contract SubgraphServiceIndexingAgreementCollectTest is SubgraphServiceIndexingAgreementSharedTest { + using Allocation for IAllocation.State; + /* * TESTS */ @@ -262,17 +265,24 @@ contract SubgraphServiceIndexingAgreementCollectTest is SubgraphServiceIndexingA ) public { Context storage ctx = _newCtx(seed); IndexerState memory indexerState = _withIndexer(ctx); - (, bytes16 acceptedAgreementId) = _withAcceptedIndexingAgreement(ctx, indexerState); + ( + IRecurringCollector.RecurringCollectionAgreement memory rca, + bytes16 acceptedAgreementId + ) = _withAcceptedIndexingAgreement(ctx, indexerState); + + // BY_PROVIDER cancel sets SETTLED immediately — required before closing the allocation + bytes32 activeHash = recurringCollector.getAgreementVersionAt(acceptedAgreementId, 0).versionHash; + resetPrank(rca.serviceProvider); + recurringCollector.cancel(acceptedAgreementId, activeHash, 0); resetPrank(indexerState.addr); subgraphService.stopService(indexerState.addr, abi.encode(indexerState.allocationId)); uint256 currentEpochBlock = epochManager.currentEpochBlock(); - bytes memory expectedErr = abi.encodeWithSelector( - AllocationHandler.AllocationHandlerAllocationClosed.selector, - indexerState.allocationId - ); + // After SETTLED close, agreement.allocationId is cleared to address(0). + // Collecting fails because address(0) doesn't exist in the allocations mapping. + bytes memory expectedErr = abi.encodeWithSelector(IAllocation.AllocationDoesNotExist.selector, address(0)); vm.expectRevert(expectedErr); subgraphService.collect( indexerState.addr, @@ -281,7 +291,7 @@ contract SubgraphServiceIndexingAgreementCollectTest is SubgraphServiceIndexingA ); } - function test_SubgraphService_CollectIndexingFees_Reverts_WhenCloseStaleAllocation( + function test_SubgraphService_CollectIndexingFees_AllocationOpenAfterStaleDownsize( Seed memory seed, uint256 entities, bytes32 poi @@ -294,11 +304,39 @@ contract SubgraphServiceIndexingAgreementCollectTest is SubgraphServiceIndexingA resetPrank(indexerState.addr); subgraphService.closeStaleAllocation(indexerState.allocationId); + // Allocation should still be open (resized to 0, not closed) + IAllocation.State memory allocation = subgraphService.getAllocation(indexerState.allocationId); + assertTrue(allocation.isOpen()); + assertEq(allocation.tokens, 0); + } + + function test_SubgraphService_CollectIndexingFees_Revert_WhenNotCollectable( + Seed memory seed, + uint256 entities, + bytes32 poi + ) public { + Context storage ctx = _newCtx(seed); + IndexerState memory indexerState = _withIndexer(ctx); + (, bytes16 acceptedAgreementId) = _withAcceptedIndexingAgreement(ctx, indexerState); + + resetPrank(indexerState.addr); uint256 currentEpochBlock = epochManager.currentEpochBlock(); + // Mock getAgreementData to return not collectable + IRecurringCollector.AgreementData memory notCollectableData = recurringCollector.getAgreementData( + acceptedAgreementId + ); + notCollectableData.isCollectable = false; + notCollectableData.collectionSeconds = 0; + vm.mockCall( + address(recurringCollector), + abi.encodeWithSelector(IRecurringCollector.getAgreementData.selector, acceptedAgreementId), + abi.encode(notCollectableData) + ); + bytes memory expectedErr = abi.encodeWithSelector( - AllocationHandler.AllocationHandlerAllocationClosed.selector, - indexerState.allocationId + IndexingAgreement.IndexingAgreementNotCollectable.selector, + acceptedAgreementId ); vm.expectRevert(expectedErr); subgraphService.collect( diff --git a/packages/subgraph-service/test/unit/subgraphService/indexing-agreement/integration.t.sol b/packages/subgraph-service/test/unit/subgraphService/indexing-agreement/integration.t.sol index d6f69414f..ce1da92d4 100644 --- a/packages/subgraph-service/test/unit/subgraphService/indexing-agreement/integration.t.sol +++ b/packages/subgraph-service/test/unit/subgraphService/indexing-agreement/integration.t.sol @@ -1,17 +1,27 @@ // SPDX-License-Identifier: MIT pragma solidity ^0.8.27; +import { + REGISTERED, + ACCEPTED, + NOTICE_GIVEN, + OFFER_TYPE_NEW +} from "@graphprotocol/interfaces/contracts/horizon/IAgreementCollector.sol"; import { IRecurringCollector } from "@graphprotocol/interfaces/contracts/horizon/IRecurringCollector.sol"; import { IGraphPayments } from "@graphprotocol/interfaces/contracts/horizon/IGraphPayments.sol"; import { IIndexingAgreement } from "@graphprotocol/interfaces/contracts/subgraph-service/internal/IIndexingAgreement.sol"; +import { IAllocation } from "@graphprotocol/interfaces/contracts/subgraph-service/internal/IAllocation.sol"; +import { ISubgraphService } from "@graphprotocol/interfaces/contracts/subgraph-service/ISubgraphService.sol"; import { PPMMath } from "@graphprotocol/horizon/contracts/libraries/PPMMath.sol"; import { IndexingAgreement } from "../../../../contracts/libraries/IndexingAgreement.sol"; +import { Allocation } from "../../../../contracts/libraries/Allocation.sol"; import { SubgraphServiceIndexingAgreementSharedTest } from "./shared.t.sol"; contract SubgraphServiceIndexingAgreementIntegrationTest is SubgraphServiceIndexingAgreementSharedTest { using PPMMath for uint256; + using Allocation for IAllocation.State; struct TestState { uint256 escrowBalance; @@ -76,16 +86,14 @@ contract SubgraphServiceIndexingAgreementIntegrationTest is SubgraphServiceIndex IRecurringCollector.RecurringCollectionAgreement memory rca = _recurringCollectorHelper.sensibleRCA( ctx.ctxInternal.seed.rca ); + // Payer must differ from indexer, otherwise cancel resolves as BY_PROVIDER (forfeit → settled) + vm.assume(rca.payer != indexerState.addr); bytes16 acceptedAgreementId = _sharedSetup(ctx, rca, indexerState, expectedTokens); - // Cancel the indexing agreement by the payer - resetPrank(ctx.payer.signer); - subgraphService.cancelIndexingAgreementByPayer(acceptedAgreementId); - + // Collect the funded tokens first + resetPrank(indexerState.addr); TestState memory beforeCollect = _getState(rca.payer, indexerState.addr); - // Collect - resetPrank(indexerState.addr); uint256 tokensCollected = subgraphService.collect( indexerState.addr, IGraphPayments.PaymentTypes.IndexingFee, @@ -100,9 +108,20 @@ contract SubgraphServiceIndexingAgreementIntegrationTest is SubgraphServiceIndex TestState memory afterCollect = _getState(rca.payer, indexerState.addr); _sharedAssert(beforeCollect, afterCollect, expectedTokens, tokensCollected); + + // Cancel the indexing agreement by the payer (directly on collector). + // Payer cancel enforces minSecondsPayerCancellationNotice — agreement enters + // NOTICE_GIVEN | BY_PAYER state with collectableUntil in the future. + resetPrank(rca.payer); + bytes32 activeHash = recurringCollector.getAgreementVersionAt(acceptedAgreementId, 0).versionHash; + recurringCollector.cancel(acceptedAgreementId, activeHash, 0); + + // Verify agreement is in NOTICE_GIVEN state + IRecurringCollector.AgreementData memory agreement = recurringCollector.getAgreementData(acceptedAgreementId); + assertTrue(agreement.state & NOTICE_GIVEN != 0, "should be in NOTICE_GIVEN state after payer cancel"); } - function test_SubgraphService_CollectIndexingRewards_CancelsAgreementWhenOverAllocated_Integration( + function test_SubgraphService_CollectIndexingRewards_ResizesAllocationWhenOverAllocated_Integration( Seed memory seed ) public { // Setup context and indexer with active agreement @@ -123,17 +142,44 @@ contract SubgraphServiceIndexingAgreementIntegrationTest is SubgraphServiceIndex // Advance past allocation creation epoch so POI is not considered "too young" vm.roll(block.number + EPOCH_LENGTH); - // Collect indexing rewards - this should trigger allocation closure and agreement cancellation + // Collect indexing rewards - this should trigger allocation downsize (not closure) bytes memory collectData = abi.encode(indexerState.allocationId, keccak256("poi"), bytes("metadata")); resetPrank(indexerState.addr); + subgraphService.collect(indexerState.addr, IGraphPayments.PaymentTypes.IndexingRewards, collectData); - // Verify the indexing agreement was properly cancelled + // Verify the allocation is still open but resized to zero + IAllocation.State memory allocation = subgraphService.getAllocation(indexerState.allocationId); + assertTrue(allocation.isOpen()); + assertEq(allocation.tokens, 0); + + // Verify the indexing agreement was NOT cancelled — it stays active IIndexingAgreement.AgreementWrapper memory agreement = subgraphService.getIndexingAgreement(agreementId); - assertEq( - uint8(agreement.collectorAgreement.state), - uint8(IRecurringCollector.AgreementState.CanceledByServiceProvider) + assertEq(agreement.collectorAgreement.state, REGISTERED | ACCEPTED); + } + + function test_SubgraphService_StopService_RevertsWhenGuardEnabledAndActiveAgreement_Integration( + Seed memory seed + ) public { + // Setup context and indexer with active agreement + Context storage ctx = _newCtx(seed); + IndexerState memory indexerState = _withIndexer(ctx); + (, bytes16 agreementId) = _withAcceptedIndexingAgreement(ctx, indexerState); + + // Enable the close allocation guard + resetPrank(users.governor); + subgraphService.setBlockClosingAllocationWithActiveAgreement(true); + + // Attempt to close the allocation — should revert because of active agreement + resetPrank(indexerState.addr); + vm.expectRevert( + abi.encodeWithSelector( + ISubgraphService.SubgraphServiceAllocationHasActiveAgreement.selector, + indexerState.allocationId, + agreementId + ) ); + subgraphService.stopService(indexerState.addr, abi.encode(indexerState.allocationId)); } /* solhint-enable graph/func-name-mixedcase */ @@ -144,6 +190,10 @@ contract SubgraphServiceIndexingAgreementIntegrationTest is SubgraphServiceIndex IndexerState memory _indexerState, ExpectedTokens memory _expectedTokens ) internal returns (bytes16) { + // Exclude payer addresses that collide with protocol contracts to prevent + // token routing issues (e.g., receiverDestination == escrow) + vm.assume(!_isProtocolContract(_rca.payer)); + vm.assume(!_isTestUser(_rca.payer)); _addTokensToProvision(_indexerState, _expectedTokens.expectedTokensLocked); IndexingAgreement.IndexingAgreementTermsV1 memory terms = IndexingAgreement.IndexingAgreementTermsV1({ @@ -160,23 +210,22 @@ contract SubgraphServiceIndexingAgreementIntegrationTest is SubgraphServiceIndex _rca.dataService = address(subgraphService); // data service is the subgraph service _rca.metadata = _encodeAcceptIndexingAgreementMetadataV1(_indexerState.subgraphDeploymentId, terms); - _setupPayerWithEscrow( - _rca.payer, - _ctx.payer.signerPrivateKey, - _indexerState.addr, - _expectedTokens.expectedTotalTokensCollected - ); + _setupPayerWithEscrow(_rca.payer, _indexerState.addr, _expectedTokens.expectedTotalTokensCollected); resetPrank(_indexerState.addr); // Set the payments destination to the indexer address subgraphService.setPaymentsDestination(_indexerState.addr); + vm.stopPrank(); + + // Accept the Indexing Agreement via RC offer->accept flow + // Step 1: Submit offer to RC + vm.prank(_rca.payer); + bytes16 agreementId = recurringCollector.offer(OFFER_TYPE_NEW, abi.encode(_rca), 0).agreementId; - // Accept the Indexing Agreement - ( - IRecurringCollector.RecurringCollectionAgreement memory signedRca, - bytes memory signature - ) = _recurringCollectorHelper.generateSignedRCA(_rca, _ctx.payer.signerPrivateKey); - bytes16 agreementId = subgraphService.acceptIndexingAgreement(_indexerState.allocationId, signedRca, signature); + // Step 2: Service provider accepts via RC, which callbacks to SS + bytes32 activeHash = recurringCollector.getAgreementVersionAt(agreementId, 0).versionHash; + vm.prank(_indexerState.addr); + recurringCollector.accept(agreementId, activeHash, abi.encode(_indexerState.allocationId), 0); // Skip ahead to collection point skip(_expectedTokens.expectedTotalTokensCollected / terms.tokensPerSecond); @@ -245,14 +294,7 @@ contract SubgraphServiceIndexingAgreementIntegrationTest is SubgraphServiceIndex vm.stopPrank(); } - function _setupPayerWithEscrow( - address _payer, - uint256 _signerPrivateKey, - address _indexer, - uint256 _escrowTokens - ) private { - _recurringCollectorHelper.authorizeSignerWithChecks(_payer, _signerPrivateKey); - + function _setupPayerWithEscrow(address _payer, address _indexer, uint256 _escrowTokens) private { deal({ token: address(token), to: _payer, give: _escrowTokens }); vm.startPrank(_payer); _escrow(_escrowTokens, _indexer); diff --git a/packages/subgraph-service/test/unit/subgraphService/indexing-agreement/lifecycle.t.sol b/packages/subgraph-service/test/unit/subgraphService/indexing-agreement/lifecycle.t.sol new file mode 100644 index 000000000..8f0bbf2fb --- /dev/null +++ b/packages/subgraph-service/test/unit/subgraphService/indexing-agreement/lifecycle.t.sol @@ -0,0 +1,594 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.27; + +import { + OFFER_TYPE_NEW, + OFFER_TYPE_UPDATE, + REGISTERED, + ACCEPTED, + SETTLED +} from "@graphprotocol/interfaces/contracts/horizon/IAgreementCollector.sol"; +import { Vm } from "forge-std/Vm.sol"; +import { IRecurringCollector } from "@graphprotocol/interfaces/contracts/horizon/IRecurringCollector.sol"; +import { ISubgraphService } from "@graphprotocol/interfaces/contracts/subgraph-service/ISubgraphService.sol"; +import { IIndexingAgreement } from "@graphprotocol/interfaces/contracts/subgraph-service/internal/IIndexingAgreement.sol"; + +import { IndexingAgreement } from "../../../../contracts/libraries/IndexingAgreement.sol"; +import { AllocationHandler } from "../../../../contracts/libraries/AllocationHandler.sol"; + +import { SubgraphServiceIndexingAgreementSharedTest } from "./shared.t.sol"; + +/// @title Allocation-Agreement Lifecycle Tests +/// @notice Tests for the redesigned allocation-agreement lifecycle: +/// - SETTLED-gated allocation close (decision 2) +/// - Bidirectional mapping integrity (decision 1) +/// - Allocation rebinding via extraData (decision 3) +/// - Revival gating (decisions 3/5) +contract AllocationAgreementLifecycleTest is SubgraphServiceIndexingAgreementSharedTest { + /* solhint-disable graph/func-name-mixedcase */ + + // ══════════════════════════════════════════════════════════════════════ + // Helpers + // ══════════════════════════════════════════════════════════════════════ + + /// @dev Offer and accept a first update, then return an RCAU for a second update + /// with the same metadata. This ensures activeTerms.metadata is in Update format + /// so a subsequent same-metadata RCAU triggers the skip path. + function _withFirstUpdateThenSameMetadataRCAU( + Context storage _ctx, + IRecurringCollector.RecurringCollectionAgreement memory _rca, + bytes16 _agreementId, + address _indexer + ) internal returns (IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau2) { + // First update: sets activeTerms.metadata to UpdateMetadata format + IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau1 = _generateAcceptableRCAU(_ctx, _rca); + + resetPrank(_rca.payer); + recurringCollector.offer(OFFER_TYPE_UPDATE, abi.encode(rcau1), 0); + + bytes32 pendingHash = recurringCollector.getAgreementVersionAt(_agreementId, 1).versionHash; + resetPrank(_indexer); + recurringCollector.accept(_agreementId, pendingHash, bytes(""), 0); + + // Second update: same metadata as first → should trigger skip + rcau2 = _generateAcceptableRCAU(_ctx, _rca); + rcau2.nonce = rcau1.nonce + 1; + rcau2.metadata = rcau1.metadata; // same metadata as now-active terms + } + + // ══════════════════════════════════════════════════════════════════════ + // SETTLED-gated allocation close (decision 2) + // ══════════════════════════════════════════════════════════════════════ + + /// @notice close-before-settled → rejected when guard enabled + function test_CloseAllocation_RevertsWhenNotSettled(Seed memory seed) public { + Context storage ctx = _newCtx(seed); + IndexerState memory indexer = _withIndexer(ctx); + (, bytes16 agreementId) = _withAcceptedIndexingAgreement(ctx, indexer); + + resetPrank(users.governor); + subgraphService.setBlockClosingAllocationWithActiveAgreement(true); + + resetPrank(indexer.addr); + vm.expectRevert( + abi.encodeWithSelector( + ISubgraphService.SubgraphServiceAllocationHasActiveAgreement.selector, + indexer.allocationId, + agreementId + ) + ); + subgraphService.stopService(indexer.addr, abi.encode(indexer.allocationId)); + } + + /// @notice close-after-settled → allowed, both mappings cleared + function test_CloseAllocation_SucceedsWhenSettled(Seed memory seed) public { + Context storage ctx = _newCtx(seed); + IndexerState memory indexer = _withIndexer(ctx); + ( + IRecurringCollector.RecurringCollectionAgreement memory rca, + bytes16 agreementId + ) = _withAcceptedIndexingAgreement(ctx, indexer); + + _cancelAgreement(ctx, agreementId, indexer.addr, rca.payer, true); + + resetPrank(indexer.addr); + subgraphService.stopService(indexer.addr, abi.encode(indexer.allocationId)); + + IIndexingAgreement.AgreementWrapper memory wrapper = subgraphService.getIndexingAgreement(agreementId); + assertEq(wrapper.agreement.allocationId, address(0), "agreement.allocationId should be cleared"); + } + + /// @notice close-before-settled with guard → SubgraphServiceAllocationHasActiveAgreement + function test_CloseAllocation_GuardRejectsActiveAgreement(Seed memory seed) public { + Context storage ctx = _newCtx(seed); + IndexerState memory indexer = _withIndexer(ctx); + (, bytes16 agreementId) = _withAcceptedIndexingAgreement(ctx, indexer); + + resetPrank(users.governor); + subgraphService.setBlockClosingAllocationWithActiveAgreement(true); + + resetPrank(indexer.addr); + vm.expectRevert( + abi.encodeWithSelector( + ISubgraphService.SubgraphServiceAllocationHasActiveAgreement.selector, + indexer.allocationId, + agreementId + ) + ); + subgraphService.stopService(indexer.addr, abi.encode(indexer.allocationId)); + } + + // ══════════════════════════════════════════════════════════════════════ + // Rebinding via extraData (decision 3) + // ══════════════════════════════════════════════════════════════════════ + + /// @notice rebind to same allocation → no-op, accepted + function test_Rebind_ToSameAllocation_NoOp(Seed memory seed) public { + Context storage ctx = _newCtx(seed); + IndexerState memory indexer = _withIndexer(ctx); + ( + IRecurringCollector.RecurringCollectionAgreement memory rca, + bytes16 agreementId + ) = _withAcceptedIndexingAgreement(ctx, indexer); + + IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau = _generateAcceptableRCAU(ctx, rca); + + resetPrank(rca.payer); + recurringCollector.offer(OFFER_TYPE_UPDATE, abi.encode(rcau), 0); + + bytes32 pendingHash = recurringCollector.getAgreementVersionAt(agreementId, 1).versionHash; + resetPrank(indexer.addr); + recurringCollector.accept(agreementId, pendingHash, abi.encode(indexer.allocationId), 0); + + IIndexingAgreement.AgreementWrapper memory wrapper = subgraphService.getIndexingAgreement(agreementId); + assertEq(wrapper.agreement.allocationId, indexer.allocationId, "allocationId should be unchanged"); + } + + /// @notice second agreement attempting to bind to an already-bound allocation → rejected + function test_SecondAgreement_BindToAlreadyBoundAllocation_Rejected(Seed memory seed) public { + Context storage ctx = _newCtx(seed); + IndexerState memory indexer = _withIndexer(ctx); + _withAcceptedIndexingAgreement(ctx, indexer); + + IRecurringCollector.RecurringCollectionAgreement memory rca2 = _generateAcceptableRCA(ctx, indexer.addr); + // Shift deadline to produce a different agreementId (deadline is part of the ID hash) + rca2.deadline = rca2.deadline + 1; + + resetPrank(rca2.payer); + bytes16 agreementId2 = recurringCollector.offer(OFFER_TYPE_NEW, abi.encode(rca2), 0).agreementId; + + bytes32 versionHash = recurringCollector.getAgreementVersionAt(agreementId2, 0).versionHash; + vm.expectRevert( + abi.encodeWithSelector( + IndexingAgreement.AllocationAlreadyHasIndexingAgreement.selector, + indexer.allocationId + ) + ); + resetPrank(indexer.addr); + recurringCollector.accept(agreementId2, versionHash, abi.encode(indexer.allocationId), 0); + } + + // ══════════════════════════════════════════════════════════════════════ + // Revival and SETTLED transitions (decisions 3/5) + // ══════════════════════════════════════════════════════════════════════ + + /// @notice BY_PROVIDER cancel (SETTLED) + same-terms update → acceptAgreement called (revival) + function test_Revival_AfterByProviderCancel_AcceptAgreementCalled(Seed memory seed) public { + Context storage ctx = _newCtx(seed); + IndexerState memory indexer = _withIndexer(ctx); + ( + IRecurringCollector.RecurringCollectionAgreement memory rca, + bytes16 agreementId + ) = _withAcceptedIndexingAgreement(ctx, indexer); + + _cancelAgreement(ctx, agreementId, indexer.addr, rca.payer, true); + + // After cancel, do first update + then same-metadata second update to + // ensure activeTerms.metadata format matches for the skip comparison. + IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau = _generateAcceptableRCAU(ctx, rca); + + resetPrank(rca.payer); + recurringCollector.offer(OFFER_TYPE_UPDATE, abi.encode(rcau), 0); + + // Accept — agreement is SETTLED so acceptAgreement MUST be called regardless of metadata + bytes32 pendingHash = recurringCollector.getAgreementVersionAt(agreementId, 1).versionHash; + + vm.recordLogs(); + resetPrank(indexer.addr); + recurringCollector.accept(agreementId, pendingHash, bytes(""), 0); + + // Verify acceptAgreement was called by checking for IndexingAgreementUpdated event + Vm.Log[] memory logs = vm.getRecordedLogs(); + bytes32 updatedSig = IndexingAgreement.IndexingAgreementUpdated.selector; + bool found; + for (uint256 i; i < logs.length; i++) { + if (logs[i].topics[0] == updatedSig) { + found = true; + break; + } + } + assertTrue(found, "IndexingAgreementUpdated should be emitted on SETTLED revival"); + + // Agreement should be active again + IRecurringCollector.AgreementData memory data = recurringCollector.getAgreementData(agreementId); + assertEq(data.state & SETTLED, 0, "SETTLED should be cleared after revival"); + assertEq(data.state & ACCEPTED, ACCEPTED, "ACCEPTED should be set"); + } + + /// @notice revival-after-SETTLED with allocation still open → allowed without new allocationId + function test_Revival_AllocationStillOpen_NoRebindNeeded(Seed memory seed) public { + Context storage ctx = _newCtx(seed); + IndexerState memory indexer = _withIndexer(ctx); + ( + IRecurringCollector.RecurringCollectionAgreement memory rca, + bytes16 agreementId + ) = _withAcceptedIndexingAgreement(ctx, indexer); + + // Cancel BY_PROVIDER → SETTLED, but allocation mapping NOT cleared (decision 1) + _cancelAgreement(ctx, agreementId, indexer.addr, rca.payer, true); + + IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau = _generateAcceptableRCAU(ctx, rca); + + resetPrank(rca.payer); + recurringCollector.offer(OFFER_TYPE_UPDATE, abi.encode(rcau), 0); + + // Accept without extraData — allocation is still open and bound + bytes32 pendingHash = recurringCollector.getAgreementVersionAt(agreementId, 1).versionHash; + resetPrank(indexer.addr); + recurringCollector.accept(agreementId, pendingHash, bytes(""), 0); + + IIndexingAgreement.AgreementWrapper memory wrapper = subgraphService.getIndexingAgreement(agreementId); + assertEq(wrapper.agreement.allocationId, indexer.allocationId, "should still be bound to original allocation"); + } + + // ══════════════════════════════════════════════════════════════════════ + // acceptAgreement skip on no-op update (decision 5 — collector-side) + // ══════════════════════════════════════════════════════════════════════ + + /// @notice update with metadata unchanged, no extraData, not SETTLED → acceptAgreement not called + function test_NoOpUpdate_SkipsAcceptAgreement(Seed memory seed) public { + Context storage ctx = _newCtx(seed); + IndexerState memory indexer = _withIndexer(ctx); + ( + IRecurringCollector.RecurringCollectionAgreement memory rca, + bytes16 agreementId + ) = _withAcceptedIndexingAgreement(ctx, indexer); + + // First update to set activeTerms.metadata in Update format, then second with same metadata + IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau2 = _withFirstUpdateThenSameMetadataRCAU( + ctx, + rca, + agreementId, + indexer.addr + ); + + resetPrank(rca.payer); + recurringCollector.offer(OFFER_TYPE_UPDATE, abi.encode(rcau2), 0); + + bytes32 pendingHash = recurringCollector.getAgreementVersionAt(agreementId, 1).versionHash; + + vm.recordLogs(); + resetPrank(indexer.addr); + recurringCollector.accept(agreementId, pendingHash, bytes(""), 0); + + // IndexingAgreementUpdated should NOT be present (acceptAgreement was skipped) + Vm.Log[] memory logs = vm.getRecordedLogs(); + bytes32 updatedSig = IndexingAgreement.IndexingAgreementUpdated.selector; + bool found; + for (uint256 i; i < logs.length; i++) { + if (logs[i].topics[0] == updatedSig) { + found = true; + break; + } + } + assertFalse(found, "IndexingAgreementUpdated should NOT be emitted when acceptAgreement is skipped"); + } + + /// @notice update with metadata unchanged but extraData present → acceptAgreement called + function test_Update_WithExtraData_ForcesAcceptAgreement(Seed memory seed) public { + Context storage ctx = _newCtx(seed); + IndexerState memory indexer = _withIndexer(ctx); + ( + IRecurringCollector.RecurringCollectionAgreement memory rca, + bytes16 agreementId + ) = _withAcceptedIndexingAgreement(ctx, indexer); + + // First update to set activeTerms.metadata in Update format, then same metadata + IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau2 = _withFirstUpdateThenSameMetadataRCAU( + ctx, + rca, + agreementId, + indexer.addr + ); + + resetPrank(rca.payer); + recurringCollector.offer(OFFER_TYPE_UPDATE, abi.encode(rcau2), 0); + + bytes32 pendingHash = recurringCollector.getAgreementVersionAt(agreementId, 1).versionHash; + + vm.recordLogs(); + resetPrank(indexer.addr); + recurringCollector.accept(agreementId, pendingHash, abi.encode(indexer.allocationId), 0); + + // IndexingAgreementUpdated SHOULD be present (extraData forces callback) + Vm.Log[] memory logs = vm.getRecordedLogs(); + bytes32 updatedSig = IndexingAgreement.IndexingAgreementUpdated.selector; + bool found; + for (uint256 i; i < logs.length; i++) { + if (logs[i].topics[0] == updatedSig) { + found = true; + break; + } + } + assertTrue(found, "IndexingAgreementUpdated should be emitted when extraData is present"); + } + + // ══════════════════════════════════════════════════════════════════════ + // Rebind to different allocation (decision 3) + // ══════════════════════════════════════════════════════════════════════ + + /// @dev Create a second allocation for the same indexer by adding tokens to their provision. + /// Derives a deterministic, collision-free allocation key using makeAddrAndKey. + function _withSecondAllocation(IndexerState memory _indexer) internal returns (address newAllocationId) { + (address allocId, uint256 allocationKey) = makeAddrAndKey( + string.concat("secondAllocation-", vm.toString(_indexer.addr)) + ); + newAllocationId = allocId; + + uint256 tokens = MINIMUM_PROVISION_TOKENS; + mint(_indexer.addr, tokens); + + resetPrank(_indexer.addr); + token.approve(address(staking), tokens); + staking.stakeTo(_indexer.addr, tokens); + staking.addToProvision(_indexer.addr, address(subgraphService), tokens); + + bytes memory data = _createSubgraphAllocationData( + _indexer.addr, + _indexer.subgraphDeploymentId, + allocationKey, + tokens + ); + subgraphService.startService(_indexer.addr, data); + } + + /// @notice rebind to new allocation while old allocation still open → allowed, old mapping cleared + function test_Rebind_ToNewAllocation_WhileOldOpen(Seed memory seed) public { + Context storage ctx = _newCtx(seed); + IndexerState memory indexer = _withIndexer(ctx); + ( + IRecurringCollector.RecurringCollectionAgreement memory rca, + bytes16 agreementId + ) = _withAcceptedIndexingAgreement(ctx, indexer); + + address newAllocationId = _withSecondAllocation(indexer); + + // Offer update, accept with new allocation in extraData + IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau = _generateAcceptableRCAU(ctx, rca); + resetPrank(rca.payer); + recurringCollector.offer(OFFER_TYPE_UPDATE, abi.encode(rcau), 0); + + bytes32 pendingHash = recurringCollector.getAgreementVersionAt(agreementId, 1).versionHash; + resetPrank(indexer.addr); + recurringCollector.accept(agreementId, pendingHash, abi.encode(newAllocationId), 0); + + // Verify agreement is now bound to new allocation + IIndexingAgreement.AgreementWrapper memory wrapper = subgraphService.getIndexingAgreement(agreementId); + assertEq(wrapper.agreement.allocationId, newAllocationId, "should be bound to new allocation"); + } + + /// @notice rebind to new allocation after close → allowed, old mapping cleared, new mapping set + function test_Rebind_ToNewAllocation_AfterClose(Seed memory seed) public { + Context storage ctx = _newCtx(seed); + IndexerState memory indexer = _withIndexer(ctx); + ( + IRecurringCollector.RecurringCollectionAgreement memory rca, + bytes16 agreementId + ) = _withAcceptedIndexingAgreement(ctx, indexer); + + // Cancel BY_PROVIDER → SETTLED + _cancelAgreement(ctx, agreementId, indexer.addr, rca.payer, true); + + // Close old allocation (SETTLED, so allowed) + resetPrank(indexer.addr); + subgraphService.stopService(indexer.addr, abi.encode(indexer.allocationId)); + + // Create second allocation + address newAllocationId = _withSecondAllocation(indexer); + + // Offer update to revive, accept with new allocation + IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau = _generateAcceptableRCAU(ctx, rca); + resetPrank(rca.payer); + recurringCollector.offer(OFFER_TYPE_UPDATE, abi.encode(rcau), 0); + + bytes32 pendingHash = recurringCollector.getAgreementVersionAt(agreementId, 1).versionHash; + resetPrank(indexer.addr); + recurringCollector.accept(agreementId, pendingHash, abi.encode(newAllocationId), 0); + + // Verify agreement is now bound to new allocation + IIndexingAgreement.AgreementWrapper memory wrapper = subgraphService.getIndexingAgreement(agreementId); + assertEq(wrapper.agreement.allocationId, newAllocationId, "should be bound to new allocation"); + } + + /// @notice rebind to closed allocation → rejected (new allocation must be open) + function test_Rebind_ToClosedAllocation_Rejected(Seed memory seed) public { + Context storage ctx = _newCtx(seed); + IndexerState memory indexer = _withIndexer(ctx); + ( + IRecurringCollector.RecurringCollectionAgreement memory rca, + bytes16 agreementId + ) = _withAcceptedIndexingAgreement(ctx, indexer); + + // Create second allocation, then close it + address closedAllocationId = _withSecondAllocation(indexer); + + // Cancel the agreement BY_PROVIDER so we can close the second allocation + // (the second allocation is not bound to any agreement, so we just close it directly) + resetPrank(indexer.addr); + subgraphService.stopService(indexer.addr, abi.encode(closedAllocationId)); + + // Now try to rebind the agreement to the closed allocation + IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau = _generateAcceptableRCAU(ctx, rca); + resetPrank(rca.payer); + recurringCollector.offer(OFFER_TYPE_UPDATE, abi.encode(rcau), 0); + + bytes32 pendingHash = recurringCollector.getAgreementVersionAt(agreementId, 1).versionHash; + vm.expectRevert( + abi.encodeWithSelector(AllocationHandler.AllocationHandlerAllocationClosed.selector, closedAllocationId) + ); + resetPrank(indexer.addr); + recurringCollector.accept(agreementId, pendingHash, abi.encode(closedAllocationId), 0); + } + + /// @notice BY_PROVIDER cancel (SETTLED) + update with new allocation → service resumes + function test_Revival_WithNewAllocation_AfterByProviderCancel(Seed memory seed) public { + Context storage ctx = _newCtx(seed); + IndexerState memory indexer = _withIndexer(ctx); + ( + IRecurringCollector.RecurringCollectionAgreement memory rca, + bytes16 agreementId + ) = _withAcceptedIndexingAgreement(ctx, indexer); + + // Cancel BY_PROVIDER → SETTLED + _cancelAgreement(ctx, agreementId, indexer.addr, rca.payer, true); + + // Create second allocation + address newAllocationId = _withSecondAllocation(indexer); + + // Offer update with new allocation to revive service on different allocation + IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau = _generateAcceptableRCAU(ctx, rca); + resetPrank(rca.payer); + recurringCollector.offer(OFFER_TYPE_UPDATE, abi.encode(rcau), 0); + + bytes32 pendingHash = recurringCollector.getAgreementVersionAt(agreementId, 1).versionHash; + resetPrank(indexer.addr); + recurringCollector.accept(agreementId, pendingHash, abi.encode(newAllocationId), 0); + + // Verify agreement is revived and bound to new allocation + IRecurringCollector.AgreementData memory data = recurringCollector.getAgreementData(agreementId); + assertEq(data.state & SETTLED, 0, "SETTLED should be cleared"); + assertEq(data.state & ACCEPTED, ACCEPTED, "ACCEPTED should be set"); + + IIndexingAgreement.AgreementWrapper memory wrapper = subgraphService.getIndexingAgreement(agreementId); + assertEq(wrapper.agreement.allocationId, newAllocationId, "should be bound to new allocation"); + } + + // ══════════════════════════════════════════════════════════════════════ + // Cross-deployment rebinding after allocation close (audit #1) + // ══════════════════════════════════════════════════════════════════════ + + /// @notice After allocation close clears allocationId to address(0), rebinding to + /// a different subgraph deployment must still be rejected. + function test_Rebind_AfterClose_ToDifferentDeployment_Rejected(Seed memory seed) public { + Context storage ctx = _newCtx(seed); + IndexerState memory indexer = _withIndexer(ctx); + ( + IRecurringCollector.RecurringCollectionAgreement memory rca, + bytes16 agreementId + ) = _withAcceptedIndexingAgreement(ctx, indexer); + + // Cancel BY_PROVIDER → SETTLED + _cancelAgreement(ctx, agreementId, indexer.addr, rca.payer, true); + + // Close old allocation (SETTLED, so allowed) — clears agreement.allocationId to address(0) + resetPrank(indexer.addr); + subgraphService.stopService(indexer.addr, abi.encode(indexer.allocationId)); + + IIndexingAgreement.AgreementWrapper memory wrapperAfterClose = subgraphService.getIndexingAgreement( + agreementId + ); + assertEq(wrapperAfterClose.agreement.allocationId, address(0), "allocationId should be cleared after close"); + + // Create a second allocation on a DIFFERENT subgraph deployment + bytes32 differentDeploymentId = keccak256(abi.encode(indexer.subgraphDeploymentId, "different")); + (address diffAllocId, uint256 diffAllocKey) = makeAddrAndKey( + string.concat("diffDeployAlloc-", vm.toString(indexer.addr)) + ); + + uint256 tokens = MINIMUM_PROVISION_TOKENS; + mint(indexer.addr, tokens); + resetPrank(indexer.addr); + token.approve(address(staking), tokens); + staking.stakeTo(indexer.addr, tokens); + staking.addToProvision(indexer.addr, address(subgraphService), tokens); + + bytes memory allocData = _createSubgraphAllocationData( + indexer.addr, + differentDeploymentId, + diffAllocKey, + tokens + ); + subgraphService.startService(indexer.addr, allocData); + + // Attempt to rebind to the allocation on a different deployment — must revert + IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau = _generateAcceptableRCAU(ctx, rca); + resetPrank(rca.payer); + recurringCollector.offer(OFFER_TYPE_UPDATE, abi.encode(rcau), 0); + + bytes32 pendingHash = recurringCollector.getAgreementVersionAt(agreementId, 1).versionHash; + vm.expectRevert( + abi.encodeWithSelector( + IndexingAgreement.IndexingAgreementDeploymentIdMismatch.selector, + indexer.subgraphDeploymentId, + diffAllocId, + differentDeploymentId + ) + ); + resetPrank(indexer.addr); + recurringCollector.accept(agreementId, pendingHash, abi.encode(diffAllocId), 0); + } + + /// @notice Verify stored subgraphDeploymentId is accessible after accept + function test_Accept_StoresSubgraphDeploymentId(Seed memory seed) public { + Context storage ctx = _newCtx(seed); + IndexerState memory indexer = _withIndexer(ctx); + (, bytes16 agreementId) = _withAcceptedIndexingAgreement(ctx, indexer); + + IIndexingAgreement.AgreementWrapper memory wrapper = subgraphService.getIndexingAgreement(agreementId); + assertEq( + wrapper.agreement.subgraphDeploymentId, + indexer.subgraphDeploymentId, + "subgraphDeploymentId should be stored on State at initial accept" + ); + } + + // ══════════════════════════════════════════════════════════════════════ + // Metadata-based skip / force (decision 5) + // ══════════════════════════════════════════════════════════════════════ + + /// @notice update with different metadata, no extraData, not SETTLED → acceptAgreement called + function test_Update_MetadataChanged_CallsAcceptAgreement(Seed memory seed) public { + Context storage ctx = _newCtx(seed); + IndexerState memory indexer = _withIndexer(ctx); + ( + IRecurringCollector.RecurringCollectionAgreement memory rca, + bytes16 agreementId + ) = _withAcceptedIndexingAgreement(ctx, indexer); + + IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau = _generateAcceptableRCAU(ctx, rca); + + resetPrank(rca.payer); + recurringCollector.offer(OFFER_TYPE_UPDATE, abi.encode(rcau), 0); + + bytes32 pendingHash = recurringCollector.getAgreementVersionAt(agreementId, 1).versionHash; + + IndexingAgreement.UpdateIndexingAgreementMetadata memory meta = abi.decode( + rcau.metadata, + (IndexingAgreement.UpdateIndexingAgreementMetadata) + ); + vm.expectEmit(address(subgraphService)); + emit IndexingAgreement.IndexingAgreementUpdated( + indexer.addr, + rca.payer, + agreementId, + indexer.allocationId, + meta.version, + meta.terms + ); + + resetPrank(indexer.addr); + recurringCollector.accept(agreementId, pendingHash, bytes(""), 0); + } + + /* solhint-enable graph/func-name-mixedcase */ +} diff --git a/packages/subgraph-service/test/unit/subgraphService/indexing-agreement/lifecycleEdgeCases.t.sol b/packages/subgraph-service/test/unit/subgraphService/indexing-agreement/lifecycleEdgeCases.t.sol new file mode 100644 index 000000000..f5a2e89a6 --- /dev/null +++ b/packages/subgraph-service/test/unit/subgraphService/indexing-agreement/lifecycleEdgeCases.t.sol @@ -0,0 +1,606 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.27; + +import { + OFFER_TYPE_NEW, + OFFER_TYPE_UPDATE, + REGISTERED, + ACCEPTED, + SETTLED, + NOTICE_GIVEN, + BY_PAYER, + BY_PROVIDER, + UPDATE +} from "@graphprotocol/interfaces/contracts/horizon/IAgreementCollector.sol"; +import { IRecurringCollector } from "@graphprotocol/interfaces/contracts/horizon/IRecurringCollector.sol"; +import { IGraphPayments } from "@graphprotocol/interfaces/contracts/horizon/IGraphPayments.sol"; +import { IIndexingAgreement } from "@graphprotocol/interfaces/contracts/subgraph-service/internal/IIndexingAgreement.sol"; + +import { IndexingAgreement } from "../../../../contracts/libraries/IndexingAgreement.sol"; + +import { SubgraphServiceIndexingAgreementSharedTest } from "./shared.t.sol"; + +/// @title Lifecycle Edge Case Tests +/// @notice Tests for edge cases identified during audit review: +/// - Agreement expiration without collection (audit gap 5) +/// - Collection during payer cancellation notice period (audit gap 7) +/// - Sequential update stacking -- replacing pending updates (audit gap 8) +/// - Multi-cycle revival chain with full state verification (audit gap 9) +/// - Callback revert on accept rolls back cleanly (audit gap 14) +/// - Notice period shorter than minSecondsPerCollection (audit gap 15) +contract LifecycleEdgeCasesTest is SubgraphServiceIndexingAgreementSharedTest { + /* solhint-disable graph/func-name-mixedcase */ + + // ══════════════════════════════════════════════════════════════════════ + // Helpers + // ══════════════════════════════════════════════════════════════════════ + + /// @dev Fund payer's escrow for the (recurringCollector, indexer) pair + function _setupPayerWithEscrow(address _payer, address _indexer, uint256 _tokens) internal { + deal({ token: address(token), to: _payer, give: _tokens }); + vm.startPrank(_payer); + token.approve(address(escrow), _tokens); + escrow.deposit(address(recurringCollector), _indexer, _tokens); + vm.stopPrank(); + } + + /// @dev Add provision capacity so fee locking succeeds during collection + function _addTokensToProvision(IndexerState memory _indexer, uint256 _tokens) internal { + deal({ token: address(token), to: _indexer.addr, give: _tokens }); + vm.startPrank(_indexer.addr); + _addToProvision(_indexer.addr, _tokens); + vm.stopPrank(); + } + + /// @dev Create an accepted agreement with controlled timing parameters. + /// Calls sensibleRCA first (to get valid bounded fields), then overrides + /// timing params. Optionally funds escrow. + function _withControlledAgreement( + Context storage _ctx, + IndexerState memory _indexer, + uint64 _endsAt, + uint32 _minSecondsPerCollection, + uint32 _maxSecondsPerCollection, + uint32 _minSecondsPayerCancellationNotice, + uint256 _maxOngoingTokensPerSecond, + uint256 _escrowAmount + ) internal returns (IRecurringCollector.RecurringCollectionAgreement memory rca, bytes16 agreementId) { + IndexingAgreement.IndexingAgreementTermsV1 memory terms = IndexingAgreement.IndexingAgreementTermsV1({ + tokensPerSecond: 1, + tokensPerEntityPerSecond: 0 + }); + + // Get sensible defaults from fuzz seed, then override what we need + rca = _recurringCollectorHelper.sensibleRCA(_ctx.ctxInternal.seed.rca); + rca.serviceProvider = _indexer.addr; + rca.dataService = address(subgraphService); + rca.metadata = _encodeAcceptIndexingAgreementMetadataV1(_indexer.subgraphDeploymentId, terms); + rca.deadline = uint64(block.timestamp + 1 hours); + rca.endsAt = _endsAt; + rca.minSecondsPerCollection = _minSecondsPerCollection; + rca.maxSecondsPerCollection = _maxSecondsPerCollection; + rca.minSecondsPayerCancellationNotice = _minSecondsPayerCancellationNotice; + rca.maxOngoingTokensPerSecond = _maxOngoingTokensPerSecond; + rca.maxInitialTokens = 0; + + // Exclude addresses that would conflict with protocol contracts, proxy admins, or test users. + // Full _isSafeSubgraphServiceCaller check is needed because the payer interacts with + // proxied contracts (token.approve, escrow.deposit) and would trigger ProxyDeniedAdminAccess. + vm.assume(_isSafeSubgraphServiceCaller(rca.payer)); + vm.assume(!_isTestUser(rca.payer)); + vm.assume(rca.payer != _indexer.addr); + + if (_escrowAmount > 0) { + _setupPayerWithEscrow(rca.payer, _indexer.addr, _escrowAmount); + } + + // Offer + vm.prank(rca.payer); + agreementId = recurringCollector.offer(OFFER_TYPE_NEW, abi.encode(rca), 0).agreementId; + + // Accept + bytes32 versionHash = recurringCollector.getAgreementVersionAt(agreementId, 0).versionHash; + vm.prank(_indexer.addr); + recurringCollector.accept(agreementId, versionHash, abi.encode(_indexer.allocationId), 0); + } + + /// @dev Collect indexing fees with a simple 1-entity POI + function _collectIndexingFees(address _indexer, bytes16 _agreementId) internal returns (uint256) { + return + subgraphService.collect( + _indexer, + IGraphPayments.PaymentTypes.IndexingFee, + _encodeCollectDataV1(_agreementId, 1, keccak256("poi"), epochManager.currentEpochBlock(), bytes("")) + ); + } + + // ══════════════════════════════════════════════════════════════════════ + // 5. Agreement expiration without collection + // ══════════════════════════════════════════════════════════════════════ + + /// @notice Agreement accepted but never collected -- after endsAt, state is still + /// ACCEPTED (not auto-settled) and a final collection is possible. + function test_ExpirationWithoutCollection_StillCollectable(Seed memory seed) public { + Context storage ctx = _newCtx(seed); + IndexerState memory indexer = _withIndexer(ctx); + + uint64 endsAt = uint64(block.timestamp + 1 hours); + (, bytes16 agreementId) = _withControlledAgreement( + ctx, + indexer, + endsAt, + 60, // minSecondsPerCollection + 3600, // maxSecondsPerCollection + 0, // no cancellation notice + 1 ether, // maxOngoingTokensPerSecond + 0 // no escrow (state check only) + ); + + // Warp past endsAt + vm.warp(endsAt + 1); + + IRecurringCollector.AgreementData memory data = recurringCollector.getAgreementData(agreementId); + assertEq(data.state & ACCEPTED, ACCEPTED, "should still be ACCEPTED after expiry"); + assertEq(data.state & SETTLED, 0, "should NOT be SETTLED without collection trigger"); + assertTrue(data.isCollectable, "should be collectable for final collection after expiry"); + assertEq(data.collectableUntil, endsAt, "collectableUntil should equal endsAt"); + } + + /// @notice Agreement expires → collection after expiry settles it + function test_ExpirationWithoutCollection_SettlesOnCollect(Seed memory seed) public { + Context storage ctx = _newCtx(seed); + IndexerState memory indexer = _withIndexer(ctx); + + uint64 endsAt = uint64(block.timestamp + 1 hours); + // 1 token/sec * 3600 sec = 3600 tokens max + uint256 maxTokens = 3600; + _addTokensToProvision(indexer, maxTokens * STAKE_TO_FEES_RATIO); + + (, bytes16 agreementId) = _withControlledAgreement( + ctx, + indexer, + endsAt, + 1, // minSecondsPerCollection = 1 + 3600, // maxSecondsPerCollection + 0, // no cancellation notice + 1, // 1 token/sec + maxTokens // escrow + ); + + resetPrank(indexer.addr); + subgraphService.setPaymentsDestination(indexer.addr); + + // Warp past endsAt + vm.warp(endsAt + 100); + + // Collect after expiry -- should succeed (minSecondsPerCollection waived past collectableUntil) + resetPrank(indexer.addr); + uint256 tokensCollected = _collectIndexingFees(indexer.addr, agreementId); + assertTrue(tokensCollected > 0, "should collect tokens after expiry"); + + // Agreement should now be SETTLED (maxNextClaim = 0 after consuming full window) + IRecurringCollector.AgreementData memory data = recurringCollector.getAgreementData(agreementId); + assertEq(data.state & SETTLED, SETTLED, "should be SETTLED after final collection past endsAt"); + } + + // ══════════════════════════════════════════════════════════════════════ + // 7. Collection during notice period + // ══════════════════════════════════════════════════════════════════════ + + /// @notice After payer cancel, provider can still collect during the notice window + function test_CollectDuringNoticePeriod_Succeeds(Seed memory seed) public { + Context storage ctx = _newCtx(seed); + IndexerState memory indexer = _withIndexer(ctx); + + uint256 maxTokens = 100_000; + _addTokensToProvision(indexer, maxTokens * STAKE_TO_FEES_RATIO); + + (IRecurringCollector.RecurringCollectionAgreement memory rca, bytes16 agreementId) = _withControlledAgreement( + ctx, + indexer, + uint64(block.timestamp + 365 days), // long-running agreement + 60, // minSecondsPerCollection = 60s + 3600, // maxSecondsPerCollection = 1h + 7200, // minSecondsPayerCancellationNotice = 2h + 1, // 1 token/sec + maxTokens + ); + + resetPrank(indexer.addr); + subgraphService.setPaymentsDestination(indexer.addr); + + // First collection + skip(100); + resetPrank(indexer.addr); + uint256 firstCollect = _collectIndexingFees(indexer.addr, agreementId); + assertTrue(firstCollect > 0, "first collection should succeed"); + + // Payer cancels → NOTICE_GIVEN with 2h notice + resetPrank(rca.payer); + bytes32 activeHash = recurringCollector.getAgreementVersionAt(agreementId, 0).versionHash; + recurringCollector.cancel(agreementId, activeHash, 0); + + IRecurringCollector.AgreementData memory afterCancel = recurringCollector.getAgreementData(agreementId); + assertEq(afterCancel.state & NOTICE_GIVEN, NOTICE_GIVEN, "should be NOTICE_GIVEN"); + assertEq(afterCancel.state & BY_PAYER, BY_PAYER, "should be BY_PAYER"); + assertEq(afterCancel.state & SETTLED, 0, "should NOT be SETTLED during notice"); + + // Provider collects during notice period + skip(100); // satisfy minSecondsPerCollection (100 > 60) + resetPrank(indexer.addr); + uint256 secondCollect = _collectIndexingFees(indexer.addr, agreementId); + assertTrue(secondCollect > 0, "collection during notice period should succeed"); + + // Verify tokens from second collection are bounded by collectableUntil, not endsAt + assertTrue(secondCollect < 3600, "tokens should be bounded (not a full maxSecondsPerCollection window)"); + } + + // ══════════════════════════════════════════════════════════════════════ + // 8. Sequential update stacking + // ══════════════════════════════════════════════════════════════════════ + + /// @notice Offering update nonce=2 before nonce=1 is accepted replaces pending terms + function test_UpdateStacking_SecondReplacesFirst(Seed memory seed) public { + Context storage ctx = _newCtx(seed); + IndexerState memory indexer = _withIndexer(ctx); + ( + IRecurringCollector.RecurringCollectionAgreement memory rca, + bytes16 agreementId + ) = _withAcceptedIndexingAgreement(ctx, indexer); + + // Capture original collectableUntil (== activeTerms.endsAt after accept) + IRecurringCollector.AgreementData memory original = recurringCollector.getAgreementData(agreementId); + uint64 originalCollectableUntil = original.collectableUntil; + + // Offer first update (nonce=1) + IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau1 = _generateAcceptableRCAU(ctx, rca); + + resetPrank(rca.payer); + recurringCollector.offer(OFFER_TYPE_UPDATE, abi.encode(rcau1), 0); + + IRecurringCollector.AgreementData memory afterFirst = recurringCollector.getAgreementData(agreementId); + assertEq(afterFirst.updateNonce, 1, "updateNonce should be 1 after first offer"); + // Active terms unchanged: collectableUntil still reflects original + assertEq(afterFirst.collectableUntil, originalCollectableUntil, "active terms should be unchanged"); + // Pending version exists + assertEq(recurringCollector.getAgreementVersionCount(agreementId), 2, "should have active + pending version"); + + // Offer second update (nonce=2) with different endsAt + IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau2 = _generateAcceptableRCAU(ctx, rca); + rcau2.nonce = 2; + rcau2.endsAt = rcau1.endsAt + 1 days; + + resetPrank(rca.payer); + recurringCollector.offer(OFFER_TYPE_UPDATE, abi.encode(rcau2), 0); + + IRecurringCollector.AgreementData memory afterSecond = recurringCollector.getAgreementData(agreementId); + assertEq(afterSecond.updateNonce, 2, "updateNonce should be 2 after second offer"); + assertEq(afterSecond.collectableUntil, originalCollectableUntil, "active terms still unchanged"); + + // Accept second update -- first is effectively discarded + bytes32 pendingHash = recurringCollector.getAgreementVersionAt(agreementId, 1).versionHash; + resetPrank(indexer.addr); + recurringCollector.accept(agreementId, pendingHash, bytes(""), 0); + + IRecurringCollector.AgreementData memory afterAccept = recurringCollector.getAgreementData(agreementId); + assertEq(afterAccept.collectableUntil, rcau2.endsAt, "collectableUntil should reflect second update's endsAt"); + assertEq(recurringCollector.getAgreementVersionCount(agreementId), 1, "only active version after accept"); + assertEq(afterAccept.state & ACCEPTED, ACCEPTED, "should be ACCEPTED"); + assertEq(afterAccept.state & SETTLED, 0, "should not be SETTLED"); + } + + /// @notice Accept first update, then offer and accept second -- clean sequential cycle + function test_UpdateStacking_SequentialAcceptances(Seed memory seed) public { + Context storage ctx = _newCtx(seed); + IndexerState memory indexer = _withIndexer(ctx); + ( + IRecurringCollector.RecurringCollectionAgreement memory rca, + bytes16 agreementId + ) = _withAcceptedIndexingAgreement(ctx, indexer); + + // --- First update cycle --- + IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau1 = _generateAcceptableRCAU(ctx, rca); + + resetPrank(rca.payer); + recurringCollector.offer(OFFER_TYPE_UPDATE, abi.encode(rcau1), 0); + + bytes32 hash1 = recurringCollector.getAgreementVersionAt(agreementId, 1).versionHash; + resetPrank(indexer.addr); + recurringCollector.accept(agreementId, hash1, bytes(""), 0); + + IRecurringCollector.AgreementData memory afterFirst = recurringCollector.getAgreementData(agreementId); + assertEq(afterFirst.collectableUntil, rcau1.endsAt, "collectableUntil should reflect first update"); + assertEq(recurringCollector.getAgreementVersionCount(agreementId), 1, "only active version after first accept"); + assertEq(afterFirst.state, REGISTERED | ACCEPTED | UPDATE, "state includes UPDATE after accepting update"); + + // --- Second update cycle --- + IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau2 = _generateAcceptableRCAU(ctx, rca); + rcau2.nonce = 2; + rcau2.endsAt = rcau1.endsAt + 1 days; + + resetPrank(rca.payer); + recurringCollector.offer(OFFER_TYPE_UPDATE, abi.encode(rcau2), 0); + + bytes32 hash2 = recurringCollector.getAgreementVersionAt(agreementId, 1).versionHash; + resetPrank(indexer.addr); + recurringCollector.accept(agreementId, hash2, bytes(""), 0); + + IRecurringCollector.AgreementData memory afterSecond = recurringCollector.getAgreementData(agreementId); + assertEq(afterSecond.collectableUntil, rcau2.endsAt, "collectableUntil should reflect second update"); + assertEq( + recurringCollector.getAgreementVersionCount(agreementId), + 1, + "only active version after second accept" + ); + assertEq(afterSecond.state, REGISTERED | ACCEPTED | UPDATE, "state includes UPDATE after sequential updates"); + assertEq(afterSecond.updateNonce, 2, "updateNonce should be 2"); + } + + // ══════════════════════════════════════════════════════════════════════ + // 9. Revival chain -- multi-cycle cancel/revive + // ══════════════════════════════════════════════════════════════════════ + + /// @notice Cancel → revive → cancel → revive, verifying full state word at each step + function test_RevivalChain_DoubleCycleFullStateVerification(Seed memory seed) public { + Context storage ctx = _newCtx(seed); + IndexerState memory indexer = _withIndexer(ctx); + ( + IRecurringCollector.RecurringCollectionAgreement memory rca, + bytes16 agreementId + ) = _withAcceptedIndexingAgreement(ctx, indexer); + + uint64 originalAcceptedAt; + uint64 rcau1EndsAt; + + // ── Step 1: Verify initial ACCEPTED state ── + { + IRecurringCollector.AgreementData memory s1 = recurringCollector.getAgreementData(agreementId); + assertEq(s1.state, REGISTERED | ACCEPTED, "step1: should be REGISTERED|ACCEPTED"); + originalAcceptedAt = s1.acceptedAt; + } + + // ── Step 2: Cancel by provider → immediate SETTLED ── + _cancelAgreement(ctx, agreementId, indexer.addr, rca.payer, true); + + { + IRecurringCollector.AgreementData memory s2 = recurringCollector.getAgreementData(agreementId); + // Provider cancel does NOT immediately set SETTLED — it sets collectableUntil = now + // so the provider can still collect for work done since lastCollectionAt. SETTLED is + // set later when the final collection drains the remaining window. + assertEq( + s2.state, + REGISTERED | ACCEPTED | NOTICE_GIVEN | BY_PROVIDER, + "step2: NOTICE_GIVEN|BY_PROVIDER (not yet SETTLED)" + ); + + // SS-level: allocation should still be bound (provider cancel doesn't clear mappings) + IIndexingAgreement.AgreementWrapper memory w2 = subgraphService.getIndexingAgreement(agreementId); + assertEq(w2.agreement.allocationId, indexer.allocationId, "step2: allocation still bound"); + } + + // ── Step 3: Offer update (nonce=1) and accept → first revival ── + { + IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau1 = _generateAcceptableRCAU(ctx, rca); + + resetPrank(rca.payer); + recurringCollector.offer(OFFER_TYPE_UPDATE, abi.encode(rcau1), 0); + + bytes32 hash1 = recurringCollector.getAgreementVersionAt(agreementId, 1).versionHash; + resetPrank(indexer.addr); + recurringCollector.accept(agreementId, hash1, bytes(""), 0); + + IRecurringCollector.AgreementData memory s3 = recurringCollector.getAgreementData(agreementId); + assertEq(s3.state, REGISTERED | ACCEPTED | UPDATE, "step3: revived -- REGISTERED|ACCEPTED|UPDATE"); + assertEq(s3.state & SETTLED, 0, "step3: SETTLED cleared"); + assertEq(s3.state & NOTICE_GIVEN, 0, "step3: NOTICE_GIVEN cleared"); + assertEq(s3.state & BY_PROVIDER, 0, "step3: BY_PROVIDER cleared"); + assertTrue(s3.acceptedAt >= originalAcceptedAt, "step3: acceptedAt refreshed on revival"); + rcau1EndsAt = rcau1.endsAt; + assertEq(s3.collectableUntil, rcau1EndsAt, "step3: collectableUntil from first update"); + + IIndexingAgreement.AgreementWrapper memory w3 = subgraphService.getIndexingAgreement(agreementId); + assertEq(w3.agreement.allocationId, indexer.allocationId, "step3: allocation still bound"); + } + + // ── Step 4: Cancel by provider again → SETTLED ── + _cancelAgreement(ctx, agreementId, indexer.addr, rca.payer, true); + + { + IRecurringCollector.AgreementData memory s4 = recurringCollector.getAgreementData(agreementId); + // UPDATE persists from the step 3 revival (accepted an update) + assertEq( + s4.state, + REGISTERED | ACCEPTED | NOTICE_GIVEN | BY_PROVIDER | UPDATE, + "step4: NOTICE_GIVEN|BY_PROVIDER|UPDATE (not yet SETTLED)" + ); + } + + // ── Step 5: Second revival ── + { + IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau2 = _generateAcceptableRCAU(ctx, rca); + rcau2.nonce = 2; + + resetPrank(rca.payer); + recurringCollector.offer(OFFER_TYPE_UPDATE, abi.encode(rcau2), 0); + + bytes32 hash2 = recurringCollector.getAgreementVersionAt(agreementId, 1).versionHash; + resetPrank(indexer.addr); + recurringCollector.accept(agreementId, hash2, bytes(""), 0); + + IRecurringCollector.AgreementData memory s5 = recurringCollector.getAgreementData(agreementId); + assertEq(s5.state, REGISTERED | ACCEPTED | UPDATE, "step5: second revival -- REGISTERED|ACCEPTED|UPDATE"); + assertEq( + s5.state & (SETTLED | NOTICE_GIVEN | BY_PROVIDER | BY_PAYER), + 0, + "step5: all cancel flags cleared after second revival" + ); + assertEq(s5.collectableUntil, rcau2.endsAt, "step5: collectableUntil from second update"); + assertEq(s5.updateNonce, 2, "step5: updateNonce should be 2"); + + IIndexingAgreement.AgreementWrapper memory w5 = subgraphService.getIndexingAgreement(agreementId); + assertEq(w5.agreement.allocationId, indexer.allocationId, "step5: allocation bound after double revival"); + } + } + + /// @notice After revival, collection actually works (not just state looks right) + function test_RevivalChain_CollectionWorksAfterRevival(Seed memory seed) public { + Context storage ctx = _newCtx(seed); + IndexerState memory indexer = _withIndexer(ctx); + + uint256 maxTokens = 100_000; + _addTokensToProvision(indexer, maxTokens * STAKE_TO_FEES_RATIO); + + (IRecurringCollector.RecurringCollectionAgreement memory rca, bytes16 agreementId) = _withControlledAgreement( + ctx, + indexer, + uint64(block.timestamp + 365 days), + 1, // minSecondsPerCollection + 3600, // maxSecondsPerCollection + 0, // no notice + 1, // 1 token/sec + maxTokens + ); + + resetPrank(indexer.addr); + subgraphService.setPaymentsDestination(indexer.addr); + + // Collect once before cancel + skip(100); + resetPrank(indexer.addr); + uint256 preRevivalCollect = _collectIndexingFees(indexer.addr, agreementId); + assertTrue(preRevivalCollect > 0, "pre-revival collection should succeed"); + + // Cancel by provider → collectableUntil = now + _cancelAgreement(ctx, agreementId, indexer.addr, rca.payer, true); + + // Revive via controlled update with known timing and token rate. + // Using _generateAcceptableRCAU would re-bound minSecondsPerCollection to >=600s + // and maxOngoingTokensPerSecond to potentially huge values, making escrow unpredictable. + IndexingAgreement.IndexingAgreementTermsV1 memory updateTerms = IndexingAgreement.IndexingAgreementTermsV1({ + tokensPerSecond: 1, + tokensPerEntityPerSecond: 0 + }); + IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau; + rcau.agreementId = recurringCollector.generateAgreementId( + rca.payer, + rca.dataService, + rca.serviceProvider, + rca.deadline, + rca.nonce + ); + rcau.deadline = uint64(block.timestamp + 1 hours); + rcau.endsAt = uint64(block.timestamp + 365 days); + rcau.maxInitialTokens = 0; + rcau.maxOngoingTokensPerSecond = 1; + rcau.minSecondsPerCollection = 1; + rcau.maxSecondsPerCollection = 3600; + rcau.conditions = 0; + rcau.minSecondsPayerCancellationNotice = 0; + rcau.nonce = 1; + rcau.metadata = _encodeUpdateIndexingAgreementMetadataV1( + _newUpdateIndexingAgreementMetadataV1(updateTerms.tokensPerSecond, updateTerms.tokensPerEntityPerSecond) + ); + + resetPrank(rca.payer); + recurringCollector.offer(OFFER_TYPE_UPDATE, abi.encode(rcau), 0); + + bytes32 hash = recurringCollector.getAgreementVersionAt(agreementId, 1).versionHash; + resetPrank(indexer.addr); + recurringCollector.accept(agreementId, hash, bytes(""), 0); + + // Collection after revival should produce tokens + skip(100); + resetPrank(indexer.addr); + uint256 postRevivalCollect = _collectIndexingFees(indexer.addr, agreementId); + assertTrue(postRevivalCollect > 0, "post-revival collection should succeed"); + } + + // ══════════════════════════════════════════════════════════════════════ + // 14. Callback revert on accept + // ══════════════════════════════════════════════════════════════════════ + + /// @notice When the data service callback reverts during accept, the collector state + /// stays in REGISTERED (the whole tx rolls back). + function test_CallbackRevertOnAccept_CollectorStateUnchanged(Seed memory seed) public { + Context storage ctx = _newCtx(seed); + IndexerState memory indexer = _withIndexer(ctx); + + // Offer an agreement + IRecurringCollector.RecurringCollectionAgreement memory rca = _generateAcceptableRCA(ctx, indexer.addr); + + resetPrank(rca.payer); + bytes16 agreementId = recurringCollector.offer(OFFER_TYPE_NEW, abi.encode(rca), 0).agreementId; + + // Close the allocation so the SS acceptAgreement callback will revert + resetPrank(indexer.addr); + subgraphService.stopService(indexer.addr, abi.encode(indexer.allocationId)); + + // Try to accept -- callback reverts, entire tx reverts + bytes32 versionHash = recurringCollector.getAgreementVersionAt(agreementId, 0).versionHash; + resetPrank(indexer.addr); + vm.expectRevert(); + recurringCollector.accept(agreementId, versionHash, abi.encode(indexer.allocationId), 0); + + // Collector state should remain REGISTERED (not ACCEPTED) + IRecurringCollector.AgreementData memory data = recurringCollector.getAgreementData(agreementId); + assertEq(data.state, REGISTERED, "should remain REGISTERED after failed accept"); + } + + // ══════════════════════════════════════════════════════════════════════ + // 15. Notice period shorter than minSecondsPerCollection + // ══════════════════════════════════════════════════════════════════════ + + /// @notice When cancellation notice (60s) < minSecondsPerCollection (3600s), + /// the provider cannot collect during the notice window but CAN collect + /// after collectableUntil because minSecondsPerCollection is waived. + function test_NoticeShorterThanMinCollection_FinalCollectAfterExpiry(Seed memory seed) public { + Context storage ctx = _newCtx(seed); + IndexerState memory indexer = _withIndexer(ctx); + + uint256 maxTokens = 100_000; + _addTokensToProvision(indexer, maxTokens * STAKE_TO_FEES_RATIO); + + (IRecurringCollector.RecurringCollectionAgreement memory rca, bytes16 agreementId) = _withControlledAgreement( + ctx, + indexer, + uint64(block.timestamp + 365 days), + 3600, // minSecondsPerCollection = 1 hour (large) + 7200, // maxSecondsPerCollection = 2 hours + 60, // minSecondsPayerCancellationNotice = 1 minute (tiny) + 1, // 1 token/sec + maxTokens + ); + + resetPrank(indexer.addr); + subgraphService.setPaymentsDestination(indexer.addr); + + // First collection -- satisfies minSecondsPerCollection + skip(3600); + resetPrank(indexer.addr); + _collectIndexingFees(indexer.addr, agreementId); + + // Payer cancels -- collectableUntil = now + 60 + uint256 cancelTimestamp = block.timestamp; + resetPrank(rca.payer); + bytes32 activeHash = recurringCollector.getAgreementVersionAt(agreementId, 0).versionHash; + recurringCollector.cancel(agreementId, activeHash, 0); + + IRecurringCollector.AgreementData memory afterCancel = recurringCollector.getAgreementData(agreementId); + assertEq(afterCancel.state & NOTICE_GIVEN, NOTICE_GIVEN, "should be NOTICE_GIVEN"); + uint64 expectedCollectableUntil = uint64(cancelTimestamp + 60); + assertEq(afterCancel.collectableUntil, expectedCollectableUntil, "collectableUntil = now + notice"); + + // After collectableUntil: minSecondsPerCollection is waived + vm.warp(expectedCollectableUntil + 1); + + IRecurringCollector.AgreementData memory afterExpiry = recurringCollector.getAgreementData(agreementId); + assertTrue(afterExpiry.isCollectable, "should be collectable after collectableUntil (minSec waived)"); + + // Final collection succeeds + resetPrank(indexer.addr); + uint256 finalCollect = _collectIndexingFees(indexer.addr, agreementId); + assertTrue(finalCollect > 0, "final collection after notice expiry should succeed"); + } + + /* solhint-enable graph/func-name-mixedcase */ +} diff --git a/packages/subgraph-service/test/unit/subgraphService/indexing-agreement/multiCollector.t.sol b/packages/subgraph-service/test/unit/subgraphService/indexing-agreement/multiCollector.t.sol new file mode 100644 index 000000000..82e1428f3 --- /dev/null +++ b/packages/subgraph-service/test/unit/subgraphService/indexing-agreement/multiCollector.t.sol @@ -0,0 +1,101 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.27; + +import { IRecurringCollector } from "@graphprotocol/interfaces/contracts/horizon/IRecurringCollector.sol"; +import { ISubgraphService } from "@graphprotocol/interfaces/contracts/subgraph-service/ISubgraphService.sol"; +import { IIndexingAgreement } from "@graphprotocol/interfaces/contracts/subgraph-service/internal/IIndexingAgreement.sol"; + +import { IndexingAgreement } from "../../../../contracts/libraries/IndexingAgreement.sol"; + +import { SubgraphServiceIndexingAgreementSharedTest } from "./shared.t.sol"; + +contract SubgraphServiceMultiCollectorTest is SubgraphServiceIndexingAgreementSharedTest { + /* solhint-disable graph/func-name-mixedcase */ + + // ==================== Unauthorized caller ==================== + + function test_AcceptAgreement_RevertWhen_UnauthorizedCollector(Seed memory seed) public { + Context storage ctx = _newCtx(seed); + IndexerState memory indexerState = _withIndexer(ctx); + + address unauthorized = makeAddr("unauthorizedCollector"); + assertFalse(subgraphService.isAuthorizedCollector(unauthorized)); + + vm.prank(unauthorized); + vm.expectRevert(abi.encodeWithSelector(ISubgraphService.SubgraphServiceNotCollector.selector, unauthorized)); + subgraphService.acceptAgreement( + bytes16(uint128(1)), + bytes32(0), + indexerState.addr, + indexerState.addr, + bytes(""), + abi.encode(indexerState.allocationId) + ); + } + + // ==================== Cross-collector identity enforcement ==================== + + function test_AcceptAgreement_Update_RevertWhen_WrongCollector(Seed memory seed) public { + // Setup: create an agreement via the real RC + Context storage ctx = _newCtx(seed); + IndexerState memory indexerState = _withIndexer(ctx); + (, bytes16 agreementId) = _withAcceptedIndexingAgreement(ctx, indexerState); + + // Authorize a second collector + address collectorB = makeAddr("collectorB"); + resetPrank(users.governor); + subgraphService.setAuthorizedCollector(collectorB, true); + + // collectorB is authorized but NOT the collector that owns this agreement + // The library enforces collector identity: initial stores, update requires match + resetPrank(collectorB); + vm.expectRevert( + abi.encodeWithSelector( + IndexingAgreement.IndexingAgreementCollectorMismatch.selector, + agreementId, + address(recurringCollector), + collectorB + ) + ); + subgraphService.acceptAgreement(agreementId, bytes32(0), address(0), indexerState.addr, bytes(""), bytes("")); + } + + // ==================== Collector stored correctly ==================== + + function test_AcceptAgreement_StoresCollector(Seed memory seed) public { + Context storage ctx = _newCtx(seed); + IndexerState memory indexerState = _withIndexer(ctx); + (, bytes16 agreementId) = _withAcceptedIndexingAgreement(ctx, indexerState); + + // Verify the stored collector by reading back the agreement wrapper — + // if the collector weren't stored, getIndexingAgreement would fail to + // fetch collectorAgreement data from the RC. + IIndexingAgreement.AgreementWrapper memory wrapper = subgraphService.getIndexingAgreement(agreementId); + assertEq(wrapper.agreement.collector, address(recurringCollector)); + } + + // ==================== Deauthorization ==================== + + function test_DeauthorizedCollector_CannotAcceptNew(Seed memory seed) public { + // Deauthorize the RC + resetPrank(users.governor); + subgraphService.setAuthorizedCollector(address(recurringCollector), false); + + Context storage ctx = _newCtx(seed); + IndexerState memory indexerState = _withIndexer(ctx); + + // Build an RCA and offer it + IRecurringCollector.RecurringCollectionAgreement memory rca = _generateAcceptableRCA(ctx, indexerState.addr); + + vm.prank(rca.payer); + bytes16 agreementId = recurringCollector.offer(0, abi.encode(rca), 0).agreementId; + bytes32 activeHash = recurringCollector.getAgreementVersionAt(agreementId, 0).versionHash; + + // Accept should revert because RC is no longer authorized + vm.expectRevert( + abi.encodeWithSelector(ISubgraphService.SubgraphServiceNotCollector.selector, address(recurringCollector)) + ); + vm.prank(indexerState.addr); + recurringCollector.accept(agreementId, activeHash, abi.encode(indexerState.allocationId), 0); + } +} diff --git a/packages/subgraph-service/test/unit/subgraphService/indexing-agreement/shared.t.sol b/packages/subgraph-service/test/unit/subgraphService/indexing-agreement/shared.t.sol index 32e7ff1e7..e9da8c4fd 100644 --- a/packages/subgraph-service/test/unit/subgraphService/indexing-agreement/shared.t.sol +++ b/packages/subgraph-service/test/unit/subgraphService/indexing-agreement/shared.t.sol @@ -1,6 +1,7 @@ // SPDX-License-Identifier: MIT pragma solidity ^0.8.27; +import { OFFER_TYPE_NEW } from "@graphprotocol/interfaces/contracts/horizon/IAgreementCollector.sol"; import { IRecurringCollector } from "@graphprotocol/interfaces/contracts/horizon/IRecurringCollector.sol"; import { IIndexingAgreement } from "@graphprotocol/interfaces/contracts/subgraph-service/internal/IIndexingAgreement.sol"; import { Strings } from "@openzeppelin/contracts/utils/Strings.sol"; @@ -27,8 +28,7 @@ contract SubgraphServiceIndexingAgreementSharedTest is SubgraphServiceTest, Boun } struct PayerState { - address signer; - uint256 signerPrivateKey; + bool initialized; } struct ContextInternal { @@ -55,7 +55,7 @@ contract SubgraphServiceIndexingAgreementSharedTest is SubgraphServiceTest, Boun } struct PayerSeed { - uint256 unboundedSignerPrivateKey; + bool placeholder; } Context internal _context; @@ -74,7 +74,7 @@ contract SubgraphServiceIndexingAgreementSharedTest is SubgraphServiceTest, Boun function setUp() public override { super.setUp(); - _recurringCollectorHelper = new RecurringCollectorHelper(recurringCollector); + _recurringCollectorHelper = new RecurringCollectorHelper(recurringCollector, recurringCollectorProxyAdmin); } /* @@ -102,18 +102,15 @@ contract SubgraphServiceIndexingAgreementSharedTest is SubgraphServiceTest, Boun bytes16 _agreementId, address _indexer, address _payer, - IRecurringCollector.CancelAgreementBy _by + bool byIndexer ) internal { - bool byIndexer = _by == IRecurringCollector.CancelAgreementBy.ServiceProvider; - vm.expectEmit(address(subgraphService)); - emit IndexingAgreement.IndexingAgreementCanceled(_indexer, _payer, _agreementId, byIndexer ? _indexer : _payer); - + bytes32 termsHash = recurringCollector.getAgreementVersionAt(_agreementId, 0).versionHash; if (byIndexer) { _subgraphServiceSafePrank(_indexer); - subgraphService.cancelIndexingAgreement(_indexer, _agreementId); + recurringCollector.cancel(_agreementId, termsHash, 0); } else { - _subgraphServiceSafePrank(_ctx.payer.signer); - subgraphService.cancelIndexingAgreementByPayer(_agreementId); + _subgraphServiceSafePrank(_payer); + recurringCollector.cancel(_agreementId, termsHash, 0); } } @@ -185,12 +182,6 @@ contract SubgraphServiceIndexingAgreementSharedTest is SubgraphServiceTest, Boun rca = _recurringCollectorHelper.sensibleRCA(rca); - ( - IRecurringCollector.RecurringCollectionAgreement memory signedRca, - bytes memory signature - ) = _recurringCollectorHelper.generateSignedRCA(rca, _ctx.payer.signerPrivateKey); - _recurringCollectorHelper.authorizeSignerWithChecks(rca.payer, _ctx.payer.signerPrivateKey); - // Generate deterministic agreement ID for event expectation agreementId = recurringCollector.generateAgreementId( rca.payer, @@ -200,6 +191,13 @@ contract SubgraphServiceIndexingAgreementSharedTest is SubgraphServiceTest, Boun rca.nonce ); + // Step 1: Payer submits offer to the collector + vm.prank(rca.payer); + bytes16 offeredId = recurringCollector.offer(OFFER_TYPE_NEW, abi.encode(rca), 0).agreementId; + assertEq(offeredId, agreementId); + + // Step 2: Service provider accepts via RC, which callbacks to SS + bytes32 versionHash = recurringCollector.getAgreementVersionAt(agreementId, 0).versionHash; vm.expectEmit(address(subgraphService)); emit IndexingAgreement.IndexingAgreementAccepted( rca.serviceProvider, @@ -210,16 +208,10 @@ contract SubgraphServiceIndexingAgreementSharedTest is SubgraphServiceTest, Boun metadata.version, metadata.terms ); - _subgraphServiceSafePrank(_indexerState.addr); - bytes16 actualAgreementId = subgraphService.acceptIndexingAgreement( - _indexerState.allocationId, - signedRca, - signature - ); + vm.prank(_indexerState.addr); + recurringCollector.accept(agreementId, versionHash, abi.encode(_indexerState.allocationId), 0); - // Verify the agreement ID matches expectation - assertEq(actualAgreementId, agreementId); - return (signedRca, agreementId); + return (rca, agreementId); } function _newCtx(Seed memory _seed) internal returns (Context storage) { @@ -234,24 +226,16 @@ contract SubgraphServiceIndexingAgreementSharedTest is SubgraphServiceTest, Boun ctx.ctxInternal.indexers.push(_seed.indexer0); ctx.ctxInternal.indexers.push(_seed.indexer1); - // Setup payer - ctx.payer.signerPrivateKey = boundKey(ctx.ctxInternal.seed.payer.unboundedSignerPrivateKey); - ctx.payer.signer = vm.addr(ctx.payer.signerPrivateKey); + ctx.payer.initialized = true; return ctx; } - function _generateAcceptableSignedRCA( + function _generateAcceptableRCA( Context storage _ctx, address _indexerAddress - ) internal returns (IRecurringCollector.RecurringCollectionAgreement memory, bytes memory) { - IRecurringCollector.RecurringCollectionAgreement memory rca = _generateAcceptableRecurringCollectionAgreement( - _ctx, - _indexerAddress - ); - _recurringCollectorHelper.authorizeSignerWithChecks(rca.payer, _ctx.payer.signerPrivateKey); - - return _recurringCollectorHelper.generateSignedRCA(rca, _ctx.payer.signerPrivateKey); + ) internal view returns (IRecurringCollector.RecurringCollectionAgreement memory) { + return _generateAcceptableRecurringCollectionAgreement(_ctx, _indexerAddress); } function _generateAcceptableRecurringCollectionAgreement( @@ -270,15 +254,15 @@ contract SubgraphServiceIndexingAgreementSharedTest is SubgraphServiceTest, Boun return _recurringCollectorHelper.sensibleRCA(rca); } - function _generateAcceptableSignedRCAU( + function _generateAcceptableRCAU( Context storage _ctx, IRecurringCollector.RecurringCollectionAgreement memory _rca - ) internal view returns (IRecurringCollector.RecurringCollectionAgreementUpdate memory, bytes memory) { + ) internal view returns (IRecurringCollector.RecurringCollectionAgreementUpdate memory) { IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau = _generateAcceptableRecurringCollectionAgreementUpdate(_ctx, _rca); // Set correct nonce for first update (should be 1) rcau.nonce = 1; - return _recurringCollectorHelper.generateSignedRCAU(rcau, _ctx.payer.signerPrivateKey); + return rcau; } function _generateAcceptableRecurringCollectionAgreementUpdate( @@ -294,13 +278,17 @@ contract SubgraphServiceIndexingAgreementSharedTest is SubgraphServiceTest, Boun _rca.deadline, _rca.nonce ); + // Apply sensible bounds to RCAU fields first, so maxOngoingTokensPerSecond is known + rcau = _recurringCollectorHelper.sensibleRCAU(rcau, _rca.payer); + // Bound metadata terms against the RCAU's maxOngoingTokensPerSecond (not the RCA's) + // since the contract validates against the update's maxOngoingTokensPerSecond rcau.metadata = _encodeUpdateIndexingAgreementMetadataV1( _newUpdateIndexingAgreementMetadataV1( - bound(_ctx.ctxInternal.seed.termsV1.tokensPerSecond, 0, _rca.maxOngoingTokensPerSecond), + bound(_ctx.ctxInternal.seed.termsV1.tokensPerSecond, 0, rcau.maxOngoingTokensPerSecond), _ctx.ctxInternal.seed.termsV1.tokensPerEntityPerSecond ) ); - return _recurringCollectorHelper.sensibleRCAU(rcau); + return rcau; } function _requireIndexer(Context storage _ctx, address _indexer) internal view returns (IndexerState memory) { @@ -335,11 +323,22 @@ contract SubgraphServiceIndexingAgreementSharedTest is SubgraphServiceTest, Boun _addr == users.pauseGuardian; } + function _isProtocolContract(address _addr) internal view returns (bool) { + return + _addr == address(escrow) || + _addr == address(graphPayments) || + _addr == address(staking) || + _addr == address(subgraphService) || + _addr == address(recurringCollector) || + _addr == address(token); + } + function _isSafeSubgraphServiceCaller(address _candidate) internal view returns (bool) { return _candidate != address(0) && _candidate != address(_transparentUpgradeableProxyAdmin()) && - _candidate != address(proxyAdmin); + _candidate != address(proxyAdmin) && + !_isProtocolContract(_candidate); } function _transparentUpgradeableProxyAdmin() internal view returns (address) { @@ -448,10 +447,5 @@ contract SubgraphServiceIndexingAgreementSharedTest is SubgraphServiceTest, Boun assertEq(_expected.dataService, _actual.collectorAgreement.dataService); assertEq(_expected.payer, _actual.collectorAgreement.payer); assertEq(_expected.serviceProvider, _actual.collectorAgreement.serviceProvider); - assertEq(_expected.endsAt, _actual.collectorAgreement.endsAt); - assertEq(_expected.maxInitialTokens, _actual.collectorAgreement.maxInitialTokens); - assertEq(_expected.maxOngoingTokensPerSecond, _actual.collectorAgreement.maxOngoingTokensPerSecond); - assertEq(_expected.minSecondsPerCollection, _actual.collectorAgreement.minSecondsPerCollection); - assertEq(_expected.maxSecondsPerCollection, _actual.collectorAgreement.maxSecondsPerCollection); } } diff --git a/packages/subgraph-service/test/unit/subgraphService/indexing-agreement/update.t.sol b/packages/subgraph-service/test/unit/subgraphService/indexing-agreement/update.t.sol index b77d91644..912af98a6 100644 --- a/packages/subgraph-service/test/unit/subgraphService/indexing-agreement/update.t.sol +++ b/packages/subgraph-service/test/unit/subgraphService/indexing-agreement/update.t.sol @@ -1,108 +1,203 @@ // SPDX-License-Identifier: MIT pragma solidity ^0.8.27; +import { OFFER_TYPE_UPDATE } from "@graphprotocol/interfaces/contracts/horizon/IAgreementCollector.sol"; import { IRecurringCollector } from "@graphprotocol/interfaces/contracts/horizon/IRecurringCollector.sol"; -import { PausableUpgradeable } from "@openzeppelin/contracts-upgradeable/utils/PausableUpgradeable.sol"; -import { ProvisionManager } from "@graphprotocol/horizon/contracts/data-service/utilities/ProvisionManager.sol"; +import { IIndexingAgreement } from "@graphprotocol/interfaces/contracts/subgraph-service/internal/IIndexingAgreement.sol"; -import { ISubgraphService } from "@graphprotocol/interfaces/contracts/subgraph-service/ISubgraphService.sol"; import { IndexingAgreement } from "../../../../contracts/libraries/IndexingAgreement.sol"; import { IndexingAgreementDecoder } from "../../../../contracts/libraries/IndexingAgreementDecoder.sol"; import { SubgraphServiceIndexingAgreementSharedTest } from "./shared.t.sol"; contract SubgraphServiceIndexingAgreementUpgradeTest is SubgraphServiceIndexingAgreementSharedTest { + /* + * HELPERS + */ + + /// @dev Submit an update offer to RC and then accept it, expecting the accept to revert. + function _offerUpdateAndExpectRevertOnAccept( + IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau, + address payer, + address acceptCaller, + bytes memory expectedErr + ) internal { + vm.stopPrank(); + vm.prank(payer); + recurringCollector.offer(OFFER_TYPE_UPDATE, abi.encode(rcau), 0); + bytes32 pendingHash = recurringCollector.getAgreementVersionAt(rcau.agreementId, 1).versionHash; + vm.expectRevert(expectedErr); + vm.prank(acceptCaller); + recurringCollector.accept(rcau.agreementId, pendingHash, bytes(""), 0); + } + + /// @dev Submit an update offer to RC and then accept with extraData, expecting the accept to revert. + function _offerUpdateAndExpectRevertOnAcceptWithExtraData( + IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau, + address payer, + address acceptCaller, + bytes memory extraData, + bytes memory expectedErr + ) internal { + vm.stopPrank(); + vm.prank(payer); + recurringCollector.offer(OFFER_TYPE_UPDATE, abi.encode(rcau), 0); + bytes32 pendingHash = recurringCollector.getAgreementVersionAt(rcau.agreementId, 1).versionHash; + vm.expectRevert(expectedErr); + vm.prank(acceptCaller); + recurringCollector.accept(rcau.agreementId, pendingHash, extraData, 0); + } + /* * TESTS */ /* solhint-disable graph/func-name-mixedcase */ - function test_SubgraphService_UpdateIndexingAgreementIndexingAgreement_Revert_WhenPaused( - address operator, - IRecurringCollector.RecurringCollectionAgreementUpdate calldata rcau, - bytes calldata authData - ) public withSafeIndexerOrOperator(operator) { - resetPrank(users.pauseGuardian); - subgraphService.pause(); + function test_SubgraphService_UpdateIndexingAgreement_Revert_WhenRebindToDifferentDeployment( + Seed memory seed + ) public { + Context storage ctx = _newCtx(seed); + IndexerState memory indexerState = _withIndexer(ctx); + (IRecurringCollector.RecurringCollectionAgreement memory acceptedRca, ) = _withAcceptedIndexingAgreement( + ctx, + indexerState + ); + IRecurringCollector.RecurringCollectionAgreementUpdate memory acceptableRcau = _generateAcceptableRCAU( + ctx, + acceptedRca + ); - resetPrank(operator); - vm.expectRevert(PausableUpgradeable.EnforcedPause.selector); - subgraphService.updateIndexingAgreement(operator, rcau, authData); - } + // Create a second allocation for the same indexer on a DIFFERENT subgraph deployment. + // Mint extra tokens so the indexer can afford a second allocation. + bytes32 differentDeploymentId = keccak256(abi.encode(indexerState.subgraphDeploymentId, "different")); + (uint256 secondAllocKey, address secondAllocId) = boundKeyAndAddr( + uint256(keccak256(abi.encode(seed.indexer0.unboundedAllocationPrivateKey, "second"))) + ); + vm.assume(secondAllocId != indexerState.allocationId); - function test_SubgraphService_UpdateIndexingAgreement_Revert_WhenNotAuthorized( - address indexer, - address notAuthorized, - IRecurringCollector.RecurringCollectionAgreementUpdate calldata rcau, - bytes calldata authData - ) public withSafeIndexerOrOperator(notAuthorized) { - vm.assume(notAuthorized != indexer); - resetPrank(notAuthorized); + uint256 extraTokens = MINIMUM_PROVISION_TOKENS; + mint(indexerState.addr, extraTokens); + address originalPrank = _subgraphServiceSafePrank(indexerState.addr); + _addToProvision(indexerState.addr, extraTokens); + + bytes memory allocData = _createSubgraphAllocationData( + indexerState.addr, + differentDeploymentId, + secondAllocKey, + extraTokens + ); + _startService(indexerState.addr, allocData); + _stopOrResetPrank(originalPrank); + + // Attempt to rebind agreement to the allocation on a different deployment bytes memory expectedErr = abi.encodeWithSelector( - ProvisionManager.ProvisionManagerNotAuthorized.selector, - indexer, - notAuthorized + IndexingAgreement.IndexingAgreementDeploymentIdMismatch.selector, + indexerState.subgraphDeploymentId, + secondAllocId, + differentDeploymentId + ); + _offerUpdateAndExpectRevertOnAcceptWithExtraData( + acceptableRcau, + acceptedRca.payer, + indexerState.addr, + abi.encode(secondAllocId), + expectedErr ); - vm.expectRevert(expectedErr); - subgraphService.updateIndexingAgreement(indexer, rcau, authData); } - function test_SubgraphService_UpdateIndexingAgreement_Revert_WhenInvalidProvision( - address indexer, - uint256 unboundedTokens, - IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau, - bytes memory authData - ) public withSafeIndexerOrOperator(indexer) { - uint256 tokens = bound(unboundedTokens, 1, MINIMUM_PROVISION_TOKENS - 1); - mint(indexer, tokens); - resetPrank(indexer); - _createProvision(indexer, tokens, FISHERMAN_REWARD_PERCENTAGE, DISPUTE_PERIOD); + function test_SubgraphService_UpdateIndexingAgreement_OK_WhenRebindToSameDeployment(Seed memory seed) public { + Context storage ctx = _newCtx(seed); + IndexerState memory indexerState = _withIndexer(ctx); + (IRecurringCollector.RecurringCollectionAgreement memory acceptedRca, ) = _withAcceptedIndexingAgreement( + ctx, + indexerState + ); + IRecurringCollector.RecurringCollectionAgreementUpdate memory acceptableRcau = _generateAcceptableRCAU( + ctx, + acceptedRca + ); - bytes memory expectedErr = abi.encodeWithSelector( - ProvisionManager.ProvisionManagerInvalidValue.selector, - "tokens", - tokens, - MINIMUM_PROVISION_TOKENS, - MAXIMUM_PROVISION_TOKENS + // Create a second allocation for the same indexer on the SAME subgraph deployment. + // Mint extra tokens so the indexer can afford a second allocation. + (uint256 secondAllocKey, address secondAllocId) = boundKeyAndAddr( + uint256(keccak256(abi.encode(seed.indexer0.unboundedAllocationPrivateKey, "second"))) ); - vm.expectRevert(expectedErr); - subgraphService.updateIndexingAgreement(indexer, rcau, authData); - } + vm.assume(secondAllocId != indexerState.allocationId); - function test_SubgraphService_UpdateIndexingAgreement_Revert_WhenIndexerNotRegistered( - address indexer, - uint256 unboundedTokens, - IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau, - bytes memory authData - ) public withSafeIndexerOrOperator(indexer) { - uint256 tokens = bound(unboundedTokens, MINIMUM_PROVISION_TOKENS, MAX_TOKENS); - mint(indexer, tokens); - resetPrank(indexer); - _createProvision(indexer, tokens, FISHERMAN_REWARD_PERCENTAGE, DISPUTE_PERIOD); + uint256 extraTokens = MINIMUM_PROVISION_TOKENS; + mint(indexerState.addr, extraTokens); + address originalPrank2 = _subgraphServiceSafePrank(indexerState.addr); + _addToProvision(indexerState.addr, extraTokens); - bytes memory expectedErr = abi.encodeWithSelector( - ISubgraphService.SubgraphServiceIndexerNotRegistered.selector, - indexer + bytes memory allocData = _createSubgraphAllocationData( + indexerState.addr, + indexerState.subgraphDeploymentId, + secondAllocKey, + extraTokens ); - vm.expectRevert(expectedErr); - subgraphService.updateIndexingAgreement(indexer, rcau, authData); + _startService(indexerState.addr, allocData); + _stopOrResetPrank(originalPrank2); + + // Rebind to allocation on same deployment should succeed + vm.prank(acceptedRca.payer); + recurringCollector.offer(OFFER_TYPE_UPDATE, abi.encode(acceptableRcau), 0); + bytes32 pendingHash = recurringCollector.getAgreementVersionAt(acceptableRcau.agreementId, 1).versionHash; + vm.prank(indexerState.addr); + recurringCollector.accept(acceptableRcau.agreementId, pendingHash, abi.encode(secondAllocId), 0); + + // Verify the agreement is now bound to the new allocation + IIndexingAgreement.AgreementWrapper memory wrapper = subgraphService.getIndexingAgreement( + acceptableRcau.agreementId + ); + assertEq(wrapper.agreement.allocationId, secondAllocId); } - function test_SubgraphService_UpdateIndexingAgreement_Revert_WhenNotAccepted(Seed memory seed) public { + function test_SubgraphService_UpdateIndexingAgreementIndexingAgreement_Revert_WhenPaused(Seed memory seed) public { + // NOTE: SS pause does NOT block accept through RC — the acceptAgreement callback + // does not have whenNotPaused. When SS is paused, the RC accept still succeeds because + // the RC itself is not paused and the SS callback doesn't check pause state. + // This test now verifies the accept succeeds even when SS is paused. Context storage ctx = _newCtx(seed); IndexerState memory indexerState = _withIndexer(ctx); - ( - IRecurringCollector.RecurringCollectionAgreementUpdate memory acceptableRcau, - bytes memory authData - ) = _generateAcceptableSignedRCAU(ctx, _generateAcceptableRecurringCollectionAgreement(ctx, indexerState.addr)); + (IRecurringCollector.RecurringCollectionAgreement memory acceptedRca, ) = _withAcceptedIndexingAgreement( + ctx, + indexerState + ); + IRecurringCollector.RecurringCollectionAgreementUpdate memory acceptableRcau = _generateAcceptableRCAU( + ctx, + acceptedRca + ); - bytes memory expectedErr = abi.encodeWithSelector( - IndexingAgreement.IndexingAgreementNotActive.selector, - acceptableRcau.agreementId + // Pause SS after setting up the agreement + resetPrank(users.pauseGuardian); + subgraphService.pause(); + vm.stopPrank(); + + // offerUpdate and accept succeed even when SS is paused + vm.prank(acceptedRca.payer); + recurringCollector.offer(OFFER_TYPE_UPDATE, abi.encode(acceptableRcau), 0); + bytes32 pendingHash = recurringCollector.getAgreementVersionAt(acceptableRcau.agreementId, 1).versionHash; + vm.prank(indexerState.addr); + recurringCollector.accept(acceptableRcau.agreementId, pendingHash, bytes(""), 0); + } + + function test_SubgraphService_UpdateIndexingAgreement_Revert_WhenNotAccepted(Seed memory seed) public { + Context storage ctx = _newCtx(seed); + IndexerState memory indexerState = _withIndexer(ctx); + IRecurringCollector.RecurringCollectionAgreement memory rca = _generateAcceptableRecurringCollectionAgreement( + ctx, + indexerState.addr ); - vm.expectRevert(expectedErr); - resetPrank(indexerState.addr); - subgraphService.updateIndexingAgreement(indexerState.addr, acceptableRcau, authData); + IRecurringCollector.RecurringCollectionAgreementUpdate memory acceptableRcau = _generateAcceptableRCAU( + ctx, + rca + ); + + // The agreement was never accepted on RC, so offerUpdate will fail at the RC level + // because the agreement is in state None (not Accepted) + vm.expectRevert(); + vm.prank(rca.payer); + recurringCollector.offer(OFFER_TYPE_UPDATE, abi.encode(acceptableRcau), 0); } function test_SubgraphService_UpdateIndexingAgreement_Revert_WhenNotAuthorizedForAgreement( @@ -115,19 +210,24 @@ contract SubgraphServiceIndexingAgreementUpgradeTest is SubgraphServiceIndexingA ctx, indexerStateA ); - ( - IRecurringCollector.RecurringCollectionAgreementUpdate memory acceptableRcau, - bytes memory authData - ) = _generateAcceptableSignedRCAU(ctx, acceptedRca); + IRecurringCollector.RecurringCollectionAgreementUpdate memory acceptableRcau = _generateAcceptableRCAU( + ctx, + acceptedRca + ); + // offerUpdate succeeds, but accept by wrong indexer reverts at RC level + // (RC checks msg.sender == serviceProvider) + vm.prank(acceptedRca.payer); + recurringCollector.offer(OFFER_TYPE_UPDATE, abi.encode(acceptableRcau), 0); + bytes32 pendingHash = recurringCollector.getAgreementVersionAt(acceptableRcau.agreementId, 1).versionHash; bytes memory expectedErr = abi.encodeWithSelector( - IndexingAgreement.IndexingAgreementNotAuthorized.selector, - acceptableRcau.agreementId, - indexerStateB.addr + IRecurringCollector.UnauthorizedServiceProvider.selector, + indexerStateB.addr, + indexerStateA.addr ); vm.expectRevert(expectedErr); - resetPrank(indexerStateB.addr); - subgraphService.updateIndexingAgreement(indexerStateB.addr, acceptableRcau, authData); + vm.prank(indexerStateB.addr); + recurringCollector.accept(acceptableRcau.agreementId, pendingHash, bytes(""), 0); } function test_SubgraphService_UpdateIndexingAgreement_Revert_WhenInvalidMetadata(Seed memory seed) public { @@ -138,23 +238,17 @@ contract SubgraphServiceIndexingAgreementUpgradeTest is SubgraphServiceIndexingA indexerState ); IRecurringCollector.RecurringCollectionAgreementUpdate - memory acceptableUpdate = _generateAcceptableRecurringCollectionAgreementUpdate(ctx, acceptedRca); - acceptableUpdate.metadata = bytes("invalid"); + memory unacceptableRcau = _generateAcceptableRecurringCollectionAgreementUpdate(ctx, acceptedRca); + unacceptableRcau.metadata = bytes("invalid"); // Set correct nonce for first update (should be 1) - acceptableUpdate.nonce = 1; - ( - IRecurringCollector.RecurringCollectionAgreementUpdate memory unacceptableRcau, - bytes memory authData - ) = _recurringCollectorHelper.generateSignedRCAU(acceptableUpdate, ctx.payer.signerPrivateKey); + unacceptableRcau.nonce = 1; bytes memory expectedErr = abi.encodeWithSelector( IndexingAgreementDecoder.IndexingAgreementDecoderInvalidData.selector, "decodeRCAUMetadata", unacceptableRcau.metadata ); - vm.expectRevert(expectedErr); - resetPrank(indexerState.addr); - subgraphService.updateIndexingAgreement(indexerState.addr, unacceptableRcau, authData); + _offerUpdateAndExpectRevertOnAccept(unacceptableRcau, acceptedRca.payer, indexerState.addr, expectedErr); } function test_SubgraphService_UpdateIndexingAgreement_OK(Seed memory seed) public { @@ -164,16 +258,22 @@ contract SubgraphServiceIndexingAgreementUpgradeTest is SubgraphServiceIndexingA ctx, indexerState ); - ( - IRecurringCollector.RecurringCollectionAgreementUpdate memory acceptableRcau, - bytes memory authData - ) = _generateAcceptableSignedRCAU(ctx, acceptedRca); + IRecurringCollector.RecurringCollectionAgreementUpdate memory acceptableRcau = _generateAcceptableRCAU( + ctx, + acceptedRca + ); IndexingAgreement.UpdateIndexingAgreementMetadata memory metadata = abi.decode( acceptableRcau.metadata, (IndexingAgreement.UpdateIndexingAgreementMetadata) ); + // Step 1: Payer submits update offer to RC + vm.prank(acceptedRca.payer); + recurringCollector.offer(OFFER_TYPE_UPDATE, abi.encode(acceptableRcau), 0); + + // Step 2: Accept update via RC (serviceProvider calls directly) + bytes32 pendingHash = recurringCollector.getAgreementVersionAt(acceptableRcau.agreementId, 1).versionHash; vm.expectEmit(address(subgraphService)); emit IndexingAgreement.IndexingAgreementUpdated( acceptedRca.serviceProvider, @@ -184,8 +284,12 @@ contract SubgraphServiceIndexingAgreementUpgradeTest is SubgraphServiceIndexingA metadata.terms ); - resetPrank(indexerState.addr); - subgraphService.updateIndexingAgreement(indexerState.addr, acceptableRcau, authData); + vm.prank(indexerState.addr); + recurringCollector.accept(acceptableRcau.agreementId, pendingHash, bytes(""), 0); } + + // Note: a test for agreement.version being set in the update path is not viable + // because V1 is enum value 0 (same as uninitialized storage). The fix is still + // applied for correctness — it becomes load-bearing when V2 is added. /* solhint-enable graph/func-name-mixedcase */ } diff --git a/packages/testing/foundry.toml b/packages/testing/foundry.toml new file mode 100644 index 000000000..2b44a2bc6 --- /dev/null +++ b/packages/testing/foundry.toml @@ -0,0 +1,24 @@ +[profile.default] +src = 'test' +out = 'forge-artifacts' +test = 'test' +libs = ["node_modules"] +cache_path = 'cache_forge' +remappings = [ + "@openzeppelin/=node_modules/@openzeppelin/", + "@graphprotocol/=node_modules/@graphprotocol/", + "forge-std/=node_modules/forge-std/src/", + # Real contract sources via workspace symlinks + "horizon/=node_modules/@graphprotocol/horizon/contracts/", + "horizon-mocks/=node_modules/@graphprotocol/horizon/contracts/mocks/", + "issuance/=node_modules/@graphprotocol/issuance/contracts/", +] +optimizer = true +optimizer_runs = 100 +via_ir = true +solc_version = '0.8.34' +evm_version = 'cancun' + +[lint] +exclude_lints = ["mixed-case-function", "mixed-case-variable"] +ignore = ["node_modules/**", "**/node_modules/**"] diff --git a/packages/testing/package.json b/packages/testing/package.json new file mode 100644 index 000000000..93444e04a --- /dev/null +++ b/packages/testing/package.json @@ -0,0 +1,22 @@ +{ + "name": "@graphprotocol/testing", + "version": "0.0.0", + "private": true, + "description": "Cross-package integration tests for Graph Protocol contracts", + "license": "GPL-2.0-or-later", + "scripts": { + "build": "pnpm build:dep", + "build:dep": "pnpm --filter '@graphprotocol/testing^...' run build:self", + "test": "forge test", + "test:gas": "forge test --match-contract Gas -vv" + }, + "devDependencies": { + "@graphprotocol/contracts": "workspace:^", + "@graphprotocol/horizon": "workspace:^", + "@graphprotocol/interfaces": "workspace:^", + "@graphprotocol/issuance": "workspace:^", + "@openzeppelin/contracts": "^5.4.0", + "@openzeppelin/contracts-upgradeable": "^5.4.0", + "forge-std": "catalog:" + } +} diff --git a/packages/testing/test/gas/CallbackGas.t.sol b/packages/testing/test/gas/CallbackGas.t.sol new file mode 100644 index 000000000..22d55749a --- /dev/null +++ b/packages/testing/test/gas/CallbackGas.t.sol @@ -0,0 +1,374 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +pragma solidity ^0.8.27; + +import { + REGISTERED, + ACCEPTED, + NOTICE_GIVEN, + SETTLED, + BY_PROVIDER, + OFFER_TYPE_NEW +} from "@graphprotocol/interfaces/contracts/horizon/IAgreementCollector.sol"; +import { IRecurringCollector } from "@graphprotocol/interfaces/contracts/horizon/IRecurringCollector.sol"; +import { IPaymentsEscrow } from "@graphprotocol/interfaces/contracts/horizon/IPaymentsEscrow.sol"; + +import { IHorizonStakingTypes } from "@graphprotocol/interfaces/contracts/horizon/internal/IHorizonStakingTypes.sol"; + +import { RealStackHarness } from "../harness/RealStackHarness.t.sol"; + +/// @notice Gas measurement for RAM callbacks against real contracts. +/// RecurringCollector forwards at most MAX_CALLBACK_GAS (1.5M) to each callback. +/// These tests verify the real contract stack stays within that budget. +/// +/// Real contracts on callback path: PaymentsEscrow, IssuanceAllocator, RecurringCollector. +/// Stubs (not on callback path): Controller, HorizonStaking, GraphToken (bare ERC20). +/// +/// Test matrix: +/// - beforeCollection: early return, JIT deposit, cold-storage first access +/// - afterCollection: reconcile, withdraw+deposit (heaviest escrow path), deletion cascade +/// - afterAgreementStateChange: first-seen discovery, existing reconcile, deletion +contract CallbackGasTest is RealStackHarness { + /* solhint-disable graph/func-name-mixedcase */ + + /// @notice Must match MAX_CALLBACK_GAS in RecurringCollector. + uint256 internal constant MAX_CALLBACK_GAS = 1_500_000; + + /// @notice Assert callbacks use less than half the budget. + /// Leaves margin for cold storage and EVM repricing. + uint256 internal constant GAS_THRESHOLD = MAX_CALLBACK_GAS / 2; // 750_000 + + // ==================== beforeCollection ==================== + + /// @notice Worst-case beforeCollection: escrow short, triggers distributeIssuance + JIT deposit. + function test_BeforeCollection_GasWithinBudget_JitDeposit() public { + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCA( + 100 ether, + 1 ether, + 3600, + uint64(block.timestamp + 365 days) + ); + bytes16 agreementId = _offerAgreement(rca); + + IPaymentsEscrow.EscrowAccount memory account = ram.getEscrowAccount( + IRecurringCollector(address(recurringCollector)), + indexer + ); + + // Advance block so distributeIssuance actually runs (not deduped) + vm.roll(block.number + 1); + + uint256 tokensToCollect = account.balance + 500 ether; + + uint256 gasBefore = gasleft(); + vm.prank(address(recurringCollector)); + ram.beforeCollection(agreementId, tokensToCollect); + uint256 gasUsed = gasBefore - gasleft(); + + assertLt(gasUsed, GAS_THRESHOLD, "beforeCollection (JIT) exceeds half of callback gas budget"); + } + + /// @notice beforeCollection early-return path: escrow sufficient. + function test_BeforeCollection_GasWithinBudget_EscrowSufficient() public { + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCA( + 100 ether, + 1 ether, + 3600, + uint64(block.timestamp + 365 days) + ); + bytes16 agreementId = _offerAgreement(rca); + + uint256 gasBefore = gasleft(); + vm.prank(address(recurringCollector)); + ram.beforeCollection(agreementId, 1 ether); + uint256 gasUsed = gasBefore - gasleft(); + + assertLt(gasUsed, GAS_THRESHOLD, "beforeCollection (sufficient) exceeds half of callback gas budget"); + } + + /// @notice beforeCollection on an untracked agreement: exercises _getAgreementProvider discovery + /// (getAgreement from collector, role checks, set registration) before the JIT deposit. + /// This is the heaviest beforeCollection path: cold storage + discovery + JIT. + function test_BeforeCollection_GasWithinBudget_ColdDiscoveryJit() public { + // Create an agreement directly in the collector so RAM has never seen it. + // Normally offer() triggers afterAgreementStateChange which discovers the agreement, + // but we bypass RAM to test the discovery path inside beforeCollection. + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCA( + 100 ether, + 1 ether, + 3600, + uint64(block.timestamp + 365 days) + ); + + // Offer and accept without RAM tracking: offer via RAM (triggers discovery), + // then create a second agreement for a different provider that RAM hasn't seen. + address indexer2 = makeAddr("indexer2"); + _setUpProvider(indexer2); + IRecurringCollector.RecurringCollectionAgreement memory rca2 = rca; + rca2.serviceProvider = indexer2; + rca2.nonce = 2; + + // Offer via RAM — this triggers afterAgreementStateChange discovery for rca2 + bytes16 agreementId2 = _offerAgreement(rca2); + + // Now call beforeCollection on this newly-discovered agreement with escrow shortfall. + // The agreement is tracked (discovered during offer), but the provider's escrow slot + // is cold in the PaymentsEscrow (never deposited to before). + vm.roll(block.number + 1); + + IPaymentsEscrow.EscrowAccount memory account = ram.getEscrowAccount( + IRecurringCollector(address(recurringCollector)), + indexer2 + ); + uint256 tokensToCollect = account.balance + 500 ether; + + uint256 gasBefore = gasleft(); + vm.prank(address(recurringCollector)); + ram.beforeCollection(agreementId2, tokensToCollect); + uint256 gasUsed = gasBefore - gasleft(); + + assertLt(gasUsed, GAS_THRESHOLD, "beforeCollection (cold provider JIT) exceeds half of callback gas budget"); + } + + // ==================== afterCollection ==================== + + /// @notice Worst-case afterCollection: reconcile against real RecurringCollector + escrow update. + /// Exercises real RecurringCollector.getAgreementData() / getMaxNextClaim() and real + /// PaymentsEscrow.adjustThaw() / deposit(). + function test_AfterCollection_GasWithinBudget_FullReconcile() public { + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCA( + 100 ether, + 1 ether, + 3600, + uint64(block.timestamp + 365 days) + ); + bytes16 agreementId = _offerAndAccept(rca); + + // Advance time past minSecondsPerCollection, then simulate post-collection + vm.warp(block.timestamp + 1 hours); + vm.roll(block.number + 1); + + uint256 gasBefore = gasleft(); + vm.prank(address(recurringCollector)); + ram.afterCollection(agreementId, 500 ether); + uint256 gasUsed = gasBefore - gasleft(); + + assertLt(gasUsed, GAS_THRESHOLD, "afterCollection (full reconcile) exceeds half of callback gas budget"); + } + + /// @notice afterCollection exercising the heaviest escrow mutation path: + /// 1. A prior thaw has matured → withdraw (real token transfer) + /// 2. After withdrawal, escrow below min → deposit (approve + real token transfer) + /// This hits both the withdraw and deposit branches inside _withdrawAndRebalance. + function test_AfterCollection_GasWithinBudget_WithdrawAndDeposit() public { + // Create two agreements for the same provider to build up escrow + IRecurringCollector.RecurringCollectionAgreement memory rca1 = _makeRCA( + 100 ether, + 1 ether, + 3600, + uint64(block.timestamp + 365 days) + ); + bytes16 agreementId1 = _offerAndAccept(rca1); + + IRecurringCollector.RecurringCollectionAgreement memory rca2 = _makeRCA( + 100 ether, + 1 ether, + 3600, + uint64(block.timestamp + 365 days) + ); + rca2.nonce = 2; + bytes16 agreementId2 = _offerAndAccept(rca2); + + // Cancel one agreement by SP → maxNextClaim drops to 0 for that agreement, + // sumMaxNextClaim halves, escrow is now above max → triggers thaw of excess. + bytes32 activeHash2 = recurringCollector.getAgreementVersionAt(agreementId2, 0).versionHash; + vm.prank(indexer); + recurringCollector.cancel(agreementId2, activeHash2, 0); + + // The afterAgreementStateChange callback from cancel triggers reconciliation, + // which removes the agreement and thaws the excess escrow. + // Now advance past the thawing period (1 day) so the thaw matures. + vm.warp(block.timestamp + 2 days); + vm.roll(block.number + 1); + + // afterCollection on the remaining agreement: _reconcileProviderEscrow sees + // matured thaw → withdraw, then escrow may be below min → deposit. + uint256 gasBefore = gasleft(); + vm.prank(address(recurringCollector)); + ram.afterCollection(agreementId1, 0); + uint256 gasUsed = gasBefore - gasleft(); + + assertLt(gasUsed, GAS_THRESHOLD, "afterCollection (withdraw + deposit) exceeds half of callback gas budget"); + } + + /// @notice afterCollection when SP cancels → maxNextClaim goes to 0, agreement is deleted, + /// _reconcileProvider runs cascade removal (provider set remove, potentially collector set remove). + function test_AfterCollection_GasWithinBudget_DeletionCascade() public { + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCA( + 100 ether, + 1 ether, + 3600, + uint64(block.timestamp + 365 days) + ); + bytes16 agreementId = _offerAndAccept(rca); + + // SP cancels → state becomes NOTICE_GIVEN | SETTLED, maxNextClaim → 0 + bytes32 activeHash = recurringCollector.getAgreementVersionAt(agreementId, 0).versionHash; + vm.prank(indexer); + recurringCollector.cancel(agreementId, activeHash, 0); + + vm.roll(block.number + 1); + + uint256 gasBefore = gasleft(); + vm.prank(address(recurringCollector)); + ram.afterCollection(agreementId, 0); + uint256 gasUsed = gasBefore - gasleft(); + + assertLt(gasUsed, GAS_THRESHOLD, "afterCollection (deletion cascade) exceeds half of callback gas budget"); + } + + // ==================== afterAgreementStateChange ==================== + + /// @notice afterAgreementStateChange on a first-seen agreement: exercises the full + /// _getAgreementProvider discovery path (getAgreement from collector, role validation, + /// EnumerableSet insertions) followed by _reconcileAndUpdateEscrow + _reconcileProviderEscrow. + /// This is tested by measuring the callback during an accept (the offer callback already + /// discovered the agreement, so accept is the reconcile path). For the pure discovery path, + /// we need an agreement that RAM hasn't seen yet. + function test_AfterAgreementStateChange_GasWithinBudget_FirstSeenDiscovery() public { + // Create agreement directly in RecurringCollector, bypassing RAM. + // Then call afterAgreementStateChange — RAM discovers it for the first time. + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCA( + 100 ether, + 1 ether, + 3600, + uint64(block.timestamp + 365 days) + ); + + // We need to offer without going through RAM so the callback doesn't fire. + // Use a second indexer so we get a clean discovery. + address indexer2 = makeAddr("indexer2"); + _setUpProvider(indexer2); + rca.serviceProvider = indexer2; + rca.nonce = 2; + + // Offer through RAM — this triggers afterAgreementStateChange (discovery happens here). + // We measure the SECOND call (accept) which is a reconcile on an already-discovered agreement. + bytes16 agreementId = _offerAgreement(rca); + + // Accept the agreement (SP signs) + bytes32 activeHash = recurringCollector.getAgreementVersionAt(agreementId, 0).versionHash; + vm.prank(indexer2); + recurringCollector.accept(agreementId, activeHash, bytes(""), 0); + + // Now measure afterAgreementStateChange as a reconcile of the accepted agreement + vm.roll(block.number + 1); + + uint256 gasBefore = gasleft(); + vm.prank(address(recurringCollector)); + ram.afterAgreementStateChange(agreementId, activeHash, REGISTERED | ACCEPTED); + uint256 gasUsed = gasBefore - gasleft(); + + assertLt( + gasUsed, + GAS_THRESHOLD, + "afterAgreementStateChange (reconcile after accept) exceeds half of callback gas budget" + ); + } + + /// @notice First-seen discovery during offer: the collector's _notifyStateChange fires + /// afterAgreementStateChange on RAM for an agreement it has never seen before. + /// This is the heaviest callback path due to cold EnumerableSet insertions + escrow deposit. + /// Measured indirectly via total offerAgreement gas (includes the callback). + /// + /// Trace analysis shows this path uses ~490k gas (33% of 1.5M budget), dominated by + /// cold-storage writes for EnumerableSet (collectorSet, providerSet, agreements) + deposit. + /// The measured gas below is the total offer transaction including RAM-side logic and the + /// collector-side offer; the callback is the dominant component (~490k of ~560k total). + function test_AfterAgreementStateChange_GasWithinBudget_FirstSeenDiscoveryViaOffer() public { + // Use a fresh provider so all storage slots are cold + address indexer2 = makeAddr("indexer2"); + _setUpProvider(indexer2); + + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCA( + 100 ether, + 1 ether, + 3600, + uint64(block.timestamp + 365 days) + ); + rca.serviceProvider = indexer2; + rca.nonce = 2; + + token.mint(address(ram), 1_000_000 ether); + + // Measure the full offer. The collector fires _notifyStateChange → RAM.afterAgreementStateChange + // which discovers the agreement (cold getAgreement, cold set insertions, escrow deposit). + uint256 gasBefore = gasleft(); + vm.prank(operator); + ram.offerAgreement(IRecurringCollector(address(recurringCollector)), OFFER_TYPE_NEW, abi.encode(rca)); + uint256 gasUsed = gasBefore - gasleft(); + + // The callback (afterAgreementStateChange) is capped at MAX_CALLBACK_GAS by the collector. + // The total offer must stay well under the block gas limit, but more importantly, + // the callback portion (visible in trace as ~490k) must stay under MAX_CALLBACK_GAS. + // We assert the total is under the callback budget as a conservative check — + // if the total fits, the callback portion certainly fits. + assertLt( + gasUsed, + MAX_CALLBACK_GAS, + "offerAgreement (including first-seen discovery callback) exceeds callback gas budget" + ); + } + + /// @notice afterAgreementStateChange on a canceled agreement — deletion path. + /// maxNextClaim → 0, agreement removed, _reconcileProvider cascade. + function test_AfterAgreementStateChange_GasWithinBudget_Deletion() public { + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCA( + 100 ether, + 1 ether, + 3600, + uint64(block.timestamp + 365 days) + ); + bytes16 agreementId = _offerAndAccept(rca); + + // SP cancels + bytes32 activeHash = recurringCollector.getAgreementVersionAt(agreementId, 0).versionHash; + vm.prank(indexer); + recurringCollector.cancel(agreementId, activeHash, 0); + + vm.roll(block.number + 1); + + uint256 gasBefore = gasleft(); + vm.prank(address(recurringCollector)); + ram.afterAgreementStateChange( + agreementId, + activeHash, + REGISTERED | ACCEPTED | NOTICE_GIVEN | SETTLED | BY_PROVIDER + ); + uint256 gasUsed = gasBefore - gasleft(); + + assertLt(gasUsed, GAS_THRESHOLD, "afterAgreementStateChange (deletion) exceeds half of callback gas budget"); + } + + // ==================== Helpers ==================== + + function _setUpProvider(address provider) internal { + staking.setProvision( + provider, + dataService, + IHorizonStakingTypes.Provision({ + tokens: 1000 ether, + tokensThawing: 0, + sharesThawing: 0, + maxVerifierCut: 100000, + thawingPeriod: 604800, + createdAt: uint64(block.timestamp), + maxVerifierCutPending: 100000, + thawingPeriodPending: 604800, + lastParametersStagedAt: 0, + thawingNonce: 0 + }) + ); + } + + /* solhint-enable graph/func-name-mixedcase */ +} diff --git a/packages/testing/test/harness/RealStackHarness.t.sol b/packages/testing/test/harness/RealStackHarness.t.sol new file mode 100644 index 000000000..e04d0d9d8 --- /dev/null +++ b/packages/testing/test/harness/RealStackHarness.t.sol @@ -0,0 +1,205 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +pragma solidity ^0.8.27; + +import { Test } from "forge-std/Test.sol"; + +// Real contracts +import { PaymentsEscrow } from "horizon/payments/PaymentsEscrow.sol"; +import { RecurringCollector } from "horizon/payments/collectors/RecurringCollector.sol"; +import { IssuanceAllocator } from "issuance/allocate/IssuanceAllocator.sol"; +import { RecurringAgreementManager } from "issuance/agreement/RecurringAgreementManager.sol"; +import { IIssuanceTarget } from "@graphprotocol/interfaces/contracts/issuance/allocate/IIssuanceTarget.sol"; + +// Use the issuance IGraphToken for RAM/allocator (IERC20 + mint) +import { IGraphToken as IssuanceIGraphToken } from "issuance/common/IGraphToken.sol"; + +// Interfaces +import { IPaymentsEscrow } from "@graphprotocol/interfaces/contracts/horizon/IPaymentsEscrow.sol"; +import { OFFER_TYPE_NEW } from "@graphprotocol/interfaces/contracts/horizon/IAgreementCollector.sol"; +import { IRecurringCollector } from "@graphprotocol/interfaces/contracts/horizon/IRecurringCollector.sol"; +import { IHorizonStakingTypes } from "@graphprotocol/interfaces/contracts/horizon/internal/IHorizonStakingTypes.sol"; +import { TransparentUpgradeableProxy } from "@openzeppelin/contracts/proxy/transparent/TransparentUpgradeableProxy.sol"; + +// Stubs for infra not on callback path +import { ControllerStub } from "../mocks/ControllerStub.sol"; +import { HorizonStakingStub } from "../mocks/HorizonStakingStub.sol"; +import { GraphTokenMock } from "../mocks/GraphTokenMock.sol"; +import { MockDataService } from "../mocks/MockDataService.sol"; + +/// @notice Deploys the real contract stack that participates in RAM callback gas: +/// - PaymentsEscrow (real) — RAM calls deposit/adjustThaw/withdraw/escrowAccounts +/// - RecurringCollector (real) — RAM calls getAgreement/getMaxNextClaim in afterCollection +/// - IssuanceAllocator (real, behind proxy) — RAM calls distributeIssuance +/// - RecurringAgreementManager (real, behind proxy) — the contract under test +/// +/// Only infrastructure not on the callback path is stubbed: +/// - Controller (paused() check, contract registry) +/// - HorizonStaking (provision check in RecurringCollector.collect, not in RAM callbacks) +/// - GraphToken (bare ERC20 — ~2-5k cheaper per op than proxied real token) +abstract contract RealStackHarness is Test { + // -- Real contracts -- + PaymentsEscrow internal paymentsEscrow; + RecurringCollector internal recurringCollector; + IssuanceAllocator internal issuanceAllocator; + RecurringAgreementManager internal ram; + + // -- Stubs -- + ControllerStub internal controller; + HorizonStakingStub internal staking; + GraphTokenMock internal token; + + // -- Accounts -- + address internal governor; + address internal operator; + address internal indexer; + MockDataService internal dataServiceMock; + address internal dataService; + + // -- Role constants -- + bytes32 internal constant GOVERNOR_ROLE = keccak256("GOVERNOR_ROLE"); + bytes32 internal constant OPERATOR_ROLE = keccak256("OPERATOR_ROLE"); + bytes32 internal constant DATA_SERVICE_ROLE = keccak256("DATA_SERVICE_ROLE"); + bytes32 internal constant COLLECTOR_ROLE = keccak256("COLLECTOR_ROLE"); + bytes32 internal constant AGREEMENT_MANAGER_ROLE = keccak256("AGREEMENT_MANAGER_ROLE"); + + function setUp() public virtual { + governor = makeAddr("governor"); + operator = makeAddr("operator"); + indexer = makeAddr("indexer"); + dataServiceMock = new MockDataService(); + dataService = address(dataServiceMock); + + // 1. Deploy stubs + token = new GraphTokenMock(); + controller = new ControllerStub(); + staking = new HorizonStakingStub(); + + // 2. Register in controller (GraphDirectory reads these immutably at construction) + controller.register("GraphToken", address(token)); + controller.register("Staking", address(staking)); + + // 3. Deploy real PaymentsEscrow behind proxy + PaymentsEscrow escrowImpl = new PaymentsEscrow(address(controller), 1 days); + TransparentUpgradeableProxy escrowProxy = new TransparentUpgradeableProxy( + address(escrowImpl), + address(this), + abi.encodeCall(PaymentsEscrow.initialize, ()) + ); + paymentsEscrow = PaymentsEscrow(address(escrowProxy)); + controller.register("PaymentsEscrow", address(paymentsEscrow)); + + // 4. Deploy real RecurringCollector behind proxy + { + RecurringCollector rcImpl = new RecurringCollector(address(controller)); + TransparentUpgradeableProxy rcProxy = new TransparentUpgradeableProxy( + address(rcImpl), + address(this), + abi.encodeCall(RecurringCollector.initialize, ()) + ); + recurringCollector = RecurringCollector(address(rcProxy)); + } + + // 5. Deploy real IssuanceAllocator behind proxy + IssuanceAllocator allocatorImpl = new IssuanceAllocator(IssuanceIGraphToken(address(token))); + TransparentUpgradeableProxy allocatorProxy = new TransparentUpgradeableProxy( + address(allocatorImpl), + address(this), + abi.encodeCall(IssuanceAllocator.initialize, (governor)) + ); + issuanceAllocator = IssuanceAllocator(address(allocatorProxy)); + + // 6. Deploy real RecurringAgreementManager behind proxy + RecurringAgreementManager ramImpl = new RecurringAgreementManager( + IssuanceIGraphToken(address(token)), + IPaymentsEscrow(address(paymentsEscrow)) + ); + TransparentUpgradeableProxy ramProxy = new TransparentUpgradeableProxy( + address(ramImpl), + address(this), + abi.encodeCall(RecurringAgreementManager.initialize, (governor)) + ); + ram = RecurringAgreementManager(address(ramProxy)); + + // 7. Wire up roles + vm.startPrank(governor); + ram.grantRole(OPERATOR_ROLE, operator); + ram.grantRole(DATA_SERVICE_ROLE, dataService); + ram.grantRole(COLLECTOR_ROLE, address(recurringCollector)); + ram.setIssuanceAllocator(address(issuanceAllocator)); + // Configure allocator: set total issuance rate, then allocate to RAM + issuanceAllocator.setIssuancePerBlock(1 ether); + issuanceAllocator.setTargetAllocation(IIssuanceTarget(address(ram)), 1 ether); + vm.stopPrank(); + + vm.prank(operator); + ram.grantRole(AGREEMENT_MANAGER_ROLE, operator); + + // 8. Set up staking provision so RecurringCollector allows collections + staking.setProvision( + indexer, + dataService, + IHorizonStakingTypes.Provision({ + tokens: 1000 ether, + tokensThawing: 0, + sharesThawing: 0, + maxVerifierCut: 100000, + thawingPeriod: 604800, + createdAt: uint64(block.timestamp), + maxVerifierCutPending: 100000, + thawingPeriodPending: 604800, + lastParametersStagedAt: 0, + thawingNonce: 0 + }) + ); + + // Labels + vm.label(address(token), "GraphToken"); + vm.label(address(paymentsEscrow), "PaymentsEscrow"); + vm.label(address(recurringCollector), "RecurringCollector"); + vm.label(address(issuanceAllocator), "IssuanceAllocator"); + vm.label(address(ram), "RecurringAgreementManager"); + } + + // -- Helpers -- + + /// @notice Create an RCA with RAM as payer + function _makeRCA( + uint256 maxInitialTokens, + uint256 maxOngoingTokensPerSecond, + uint32 maxSecondsPerCollection, + uint64 endsAt + ) internal view returns (IRecurringCollector.RecurringCollectionAgreement memory) { + return + IRecurringCollector.RecurringCollectionAgreement({ + deadline: uint64(block.timestamp + 1 hours), + endsAt: endsAt, + payer: address(ram), + dataService: dataService, + serviceProvider: indexer, + maxInitialTokens: maxInitialTokens, + maxOngoingTokensPerSecond: maxOngoingTokensPerSecond, + minSecondsPerCollection: 60, + maxSecondsPerCollection: maxSecondsPerCollection, + conditions: 0, + minSecondsPayerCancellationNotice: 0, + nonce: 1, + metadata: "" + }); + } + + /// @notice Offer an agreement, funding the RAM first + function _offerAgreement(IRecurringCollector.RecurringCollectionAgreement memory rca) internal returns (bytes16) { + token.mint(address(ram), 1_000_000 ether); + vm.prank(operator); + return ram.offerAgreement(IRecurringCollector(address(recurringCollector)), OFFER_TYPE_NEW, abi.encode(rca)); + } + + /// @notice Offer and accept an agreement, returning the agreement ID + function _offerAndAccept(IRecurringCollector.RecurringCollectionAgreement memory rca) internal returns (bytes16) { + bytes16 agreementId = _offerAgreement(rca); + bytes32 activeHash = recurringCollector.getAgreementVersionAt(agreementId, 0).versionHash; + vm.prank(rca.serviceProvider); + recurringCollector.accept(agreementId, activeHash, bytes(""), 0); + return agreementId; + } +} diff --git a/packages/testing/test/integration/AgreementLifecycle.t.sol b/packages/testing/test/integration/AgreementLifecycle.t.sol new file mode 100644 index 000000000..3f0a43f58 --- /dev/null +++ b/packages/testing/test/integration/AgreementLifecycle.t.sol @@ -0,0 +1,368 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +pragma solidity ^0.8.27; + +import { + REGISTERED, + ACCEPTED, + NOTICE_GIVEN, + SETTLED, + BY_PAYER, + BY_PROVIDER, + OFFER_TYPE_NEW, + OFFER_TYPE_UPDATE +} from "@graphprotocol/interfaces/contracts/horizon/IAgreementCollector.sol"; +import { IRecurringCollector } from "@graphprotocol/interfaces/contracts/horizon/IRecurringCollector.sol"; +import { IGraphPayments } from "@graphprotocol/interfaces/contracts/horizon/IGraphPayments.sol"; + +import { RecurringCollector } from "horizon/payments/collectors/RecurringCollector.sol"; + +import { RealStackHarness } from "../harness/RealStackHarness.t.sol"; + +/// @notice Integration tests using the real contract stack (RAM + RecurringCollector + PaymentsEscrow). +/// Validates cross-contract flows that unit tests with mocks cannot cover. +contract AgreementLifecycleTest is RealStackHarness { + /* solhint-disable graph/func-name-mixedcase */ + + // ==================== Offer + Accept ==================== + + function test_OfferAndAccept() public { + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCA( + 100 ether, + 1 ether, + 3600, + uint64(block.timestamp + 365 days) + ); + bytes16 agreementId = _offerAndAccept(rca); + + IRecurringCollector.AgreementData memory agreement = recurringCollector.getAgreementData(agreementId); + assertEq(agreement.state, REGISTERED | ACCEPTED); + assertEq(agreement.payer, address(ram)); + assertEq(agreement.serviceProvider, indexer); + assertEq(agreement.dataService, dataService); + + // Verify active terms via the offer data + (, bytes memory offerData) = recurringCollector.getAgreementOfferAt(agreementId, 0); + IRecurringCollector.RecurringCollectionAgreement memory activeOffer = abi.decode( + offerData, + (IRecurringCollector.RecurringCollectionAgreement) + ); + assertEq(activeOffer.endsAt, rca.endsAt); + assertEq(activeOffer.maxInitialTokens, rca.maxInitialTokens); + assertEq(activeOffer.maxOngoingTokensPerSecond, rca.maxOngoingTokensPerSecond); + } + + // ==================== Payer validation ==================== + + function test_Offer_Revert_WhenPayerNotRAM() public { + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCA( + 100 ether, + 1 ether, + 3600, + uint64(block.timestamp + 365 days) + ); + rca.payer = address(0xdead); + + // RAM forwards to collector, collector checks msg.sender == rca.payer. + // msg.sender is RAM, but rca.payer is 0xdead — collector reverts. + token.mint(address(ram), 1_000_000 ether); + vm.expectRevert(); + vm.prank(operator); + ram.offerAgreement(IRecurringCollector(address(recurringCollector)), OFFER_TYPE_NEW, abi.encode(rca)); + } + + // ==================== Offer + Accept + Update ==================== + + function test_OfferUpdateAndAccept() public { + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCA( + 100 ether, + 1 ether, + 3600, + uint64(block.timestamp + 365 days) + ); + bytes16 agreementId = _offerAndAccept(rca); + + // Offer an update with doubled rate + IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau = IRecurringCollector + .RecurringCollectionAgreementUpdate({ + agreementId: agreementId, + deadline: uint64(block.timestamp + 1 hours), + endsAt: uint64(block.timestamp + 365 days), + maxInitialTokens: 200 ether, + maxOngoingTokensPerSecond: 2 ether, + minSecondsPerCollection: 60, + maxSecondsPerCollection: 3600, + conditions: 0, + minSecondsPayerCancellationNotice: 0, + nonce: 1, + metadata: "" + }); + + vm.prank(operator); + ram.offerAgreement(IRecurringCollector(address(recurringCollector)), OFFER_TYPE_UPDATE, abi.encode(rcau)); + + // Accept the update + bytes32 pendingHash = recurringCollector.getAgreementVersionAt(agreementId, 1).versionHash; + vm.prank(indexer); + recurringCollector.accept(agreementId, pendingHash, bytes(""), 0); + + // Verify updated terms are now active + (, bytes memory activeOfferData) = recurringCollector.getAgreementOfferAt(agreementId, 0); + IRecurringCollector.RecurringCollectionAgreementUpdate memory activeUpdate = abi.decode( + activeOfferData, + (IRecurringCollector.RecurringCollectionAgreementUpdate) + ); + assertEq(activeUpdate.maxOngoingTokensPerSecond, 2 ether); + assertEq(activeUpdate.maxInitialTokens, 200 ether); + assertEq(recurringCollector.getAgreementVersionAt(agreementId, 1).versionHash, bytes32(0)); // cleared + } + + // ==================== Deadline enforcement ==================== + + function test_Accept_Revert_WhenDeadlineElapsed() public { + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCA( + 100 ether, + 1 ether, + 3600, + uint64(block.timestamp + 365 days) + ); + bytes16 agreementId = _offerAgreement(rca); + + bytes32 activeHash = recurringCollector.getAgreementVersionAt(agreementId, 0).versionHash; + + // Warp past deadline + vm.warp(block.timestamp + 2 hours); + + vm.expectRevert(); + vm.prank(indexer); + recurringCollector.accept(agreementId, activeHash, bytes(""), 0); + } + + function test_AcceptUpdate_Revert_WhenDeadlineElapsed() public { + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCA( + 100 ether, + 1 ether, + 3600, + uint64(block.timestamp + 365 days) + ); + bytes16 agreementId = _offerAndAccept(rca); + + IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau = IRecurringCollector + .RecurringCollectionAgreementUpdate({ + agreementId: agreementId, + deadline: uint64(block.timestamp + 1 hours), + endsAt: uint64(block.timestamp + 365 days), + maxInitialTokens: 200 ether, + maxOngoingTokensPerSecond: 2 ether, + minSecondsPerCollection: 60, + maxSecondsPerCollection: 3600, + conditions: 0, + minSecondsPayerCancellationNotice: 0, + nonce: 1, + metadata: "" + }); + + vm.prank(operator); + ram.offerAgreement(IRecurringCollector(address(recurringCollector)), OFFER_TYPE_UPDATE, abi.encode(rcau)); + + bytes32 pendingHash = recurringCollector.getAgreementVersionAt(agreementId, 1).versionHash; + + // Warp past update deadline + vm.warp(block.timestamp + 2 hours); + + vm.expectRevert(); + vm.prank(indexer); + recurringCollector.accept(agreementId, pendingHash, bytes(""), 0); + } + + // ==================== Conditions: eligibility check ==================== + + function test_Conditions_StoredAndReadBack() public { + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCA( + 100 ether, + 1 ether, + 3600, + uint64(block.timestamp + 365 days) + ); + rca.conditions = 1; // CONDITION_ELIGIBILITY_CHECK + + bytes16 agreementId = _offerAndAccept(rca); + + (, bytes memory offerData) = recurringCollector.getAgreementOfferAt(agreementId, 0); + IRecurringCollector.RecurringCollectionAgreement memory activeOffer = abi.decode( + offerData, + (IRecurringCollector.RecurringCollectionAgreement) + ); + assertEq(activeOffer.conditions, 1); + } + + function test_Conditions_PreservedThroughUpdate() public { + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCA( + 100 ether, + 1 ether, + 3600, + uint64(block.timestamp + 365 days) + ); + rca.conditions = 1; // CONDITION_ELIGIBILITY_CHECK + bytes16 agreementId = _offerAndAccept(rca); + + // Update with different conditions + IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau = IRecurringCollector + .RecurringCollectionAgreementUpdate({ + agreementId: agreementId, + deadline: uint64(block.timestamp + 1 hours), + endsAt: uint64(block.timestamp + 365 days), + maxInitialTokens: 200 ether, + maxOngoingTokensPerSecond: 2 ether, + minSecondsPerCollection: 60, + maxSecondsPerCollection: 3600, + conditions: 0, // Remove eligibility check in update + minSecondsPayerCancellationNotice: 0, + nonce: 1, + metadata: "" + }); + + vm.prank(operator); + ram.offerAgreement(IRecurringCollector(address(recurringCollector)), OFFER_TYPE_UPDATE, abi.encode(rcau)); + + // Pending terms have conditions = 0 + { + (, bytes memory activeOfferData) = recurringCollector.getAgreementOfferAt(agreementId, 0); + IRecurringCollector.RecurringCollectionAgreement memory activeOffer = abi.decode( + activeOfferData, + (IRecurringCollector.RecurringCollectionAgreement) + ); + assertEq(activeOffer.conditions, 1); // still 1 on active + } + { + (, bytes memory pendingOfferData) = recurringCollector.getAgreementOfferAt(agreementId, 1); + assertEq( + abi.decode(pendingOfferData, (IRecurringCollector.RecurringCollectionAgreementUpdate)).conditions, + 0 + ); // 0 on pending + } + + // Accept update — conditions change + bytes32 pendingHash = recurringCollector.getAgreementVersionAt(agreementId, 1).versionHash; + vm.prank(indexer); + recurringCollector.accept(agreementId, pendingHash, bytes(""), 0); + + (, bytes memory updatedOfferData) = recurringCollector.getAgreementOfferAt(agreementId, 0); + IRecurringCollector.RecurringCollectionAgreementUpdate memory updatedOffer = abi.decode( + updatedOfferData, + (IRecurringCollector.RecurringCollectionAgreementUpdate) + ); + assertEq(updatedOffer.conditions, 0); // now 0 + } + + function test_Conditions_NoEligibilityCheckWhenFlagNotSet() public { + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCA( + 100 ether, + 1 ether, + 3600, + uint64(block.timestamp + 365 days) + ); + rca.conditions = 0; // No eligibility check + + bytes16 agreementId = _offerAndAccept(rca); + + (, bytes memory offerData) = recurringCollector.getAgreementOfferAt(agreementId, 0); + IRecurringCollector.RecurringCollectionAgreement memory activeOffer = abi.decode( + offerData, + (IRecurringCollector.RecurringCollectionAgreement) + ); + assertEq(activeOffer.conditions, 0); + } + + // ==================== Cancel ==================== + + function test_CancelByPayer() public { + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCA( + 100 ether, + 1 ether, + 3600, + uint64(block.timestamp + 365 days) + ); + bytes16 agreementId = _offerAndAccept(rca); + bytes32 activeHash = recurringCollector.getAgreementVersionAt(agreementId, 0).versionHash; + + vm.prank(address(ram)); + recurringCollector.cancel(agreementId, activeHash, 0); + + IRecurringCollector.AgreementData memory agreement = recurringCollector.getAgreementData(agreementId); + assertEq(agreement.state, REGISTERED | ACCEPTED | NOTICE_GIVEN | BY_PAYER); + } + + function test_CancelByServiceProvider() public { + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCA( + 100 ether, + 1 ether, + 3600, + uint64(block.timestamp + 365 days) + ); + bytes16 agreementId = _offerAndAccept(rca); + bytes32 activeHash = recurringCollector.getAgreementVersionAt(agreementId, 0).versionHash; + + vm.prank(indexer); + recurringCollector.cancel(agreementId, activeHash, 0); + + IRecurringCollector.AgreementData memory agreement = recurringCollector.getAgreementData(agreementId); + assertEq(agreement.state, REGISTERED | ACCEPTED | NOTICE_GIVEN | BY_PROVIDER); + } + + // ==================== Nonce sequencing ==================== + + function test_UpdateNonce_MustIncrement() public { + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCA( + 100 ether, + 1 ether, + 3600, + uint64(block.timestamp + 365 days) + ); + bytes16 agreementId = _offerAndAccept(rca); + + // First update with nonce 1 — should succeed + IRecurringCollector.RecurringCollectionAgreementUpdate memory rcau = IRecurringCollector + .RecurringCollectionAgreementUpdate({ + agreementId: agreementId, + deadline: uint64(block.timestamp + 1 hours), + endsAt: uint64(block.timestamp + 365 days), + maxInitialTokens: 200 ether, + maxOngoingTokensPerSecond: 2 ether, + minSecondsPerCollection: 60, + maxSecondsPerCollection: 3600, + conditions: 0, + minSecondsPayerCancellationNotice: 0, + nonce: 1, + metadata: "" + }); + + vm.prank(operator); + ram.offerAgreement(IRecurringCollector(address(recurringCollector)), OFFER_TYPE_UPDATE, abi.encode(rcau)); + + // Second update with nonce 1 again — should revert (expects 2) + rcau.maxOngoingTokensPerSecond = 3 ether; + rcau.nonce = 1; + + vm.expectRevert(); + vm.prank(operator); + ram.offerAgreement(IRecurringCollector(address(recurringCollector)), OFFER_TYPE_UPDATE, abi.encode(rcau)); + } + + // ==================== Hash verification ==================== + + function test_Accept_Revert_WhenHashMismatch() public { + IRecurringCollector.RecurringCollectionAgreement memory rca = _makeRCA( + 100 ether, + 1 ether, + 3600, + uint64(block.timestamp + 365 days) + ); + bytes16 agreementId = _offerAgreement(rca); + + vm.expectRevert(); + vm.prank(indexer); + recurringCollector.accept(agreementId, bytes32(uint256(1)), bytes(""), 0); + } + + /* solhint-enable graph/func-name-mixedcase */ +} diff --git a/packages/testing/test/mocks/ControllerStub.sol b/packages/testing/test/mocks/ControllerStub.sol new file mode 100644 index 000000000..6ece3ae1b --- /dev/null +++ b/packages/testing/test/mocks/ControllerStub.sol @@ -0,0 +1,42 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +pragma solidity ^0.8.27; + +import { IController } from "@graphprotocol/interfaces/contracts/contracts/governance/IController.sol"; + +/// @notice Minimal Controller stub for GraphDirectory consumers. +/// Returns registered addresses; unregistered names return a dummy nonzero address +/// so GraphDirectory constructors don't revert on zero-address checks. +contract ControllerStub is IController { + mapping(bytes32 => address) private _registry; + address private immutable _dummy; + + constructor() { + _dummy = address(uint160(uint256(keccak256("ControllerStub.dummy")))); + } + + function register(string memory name, address addr) external { + _registry[keccak256(abi.encodePacked(name))] = addr; + } + + function getContractProxy(bytes32 id) external view override returns (address) { + address a = _registry[id]; + return a != address(0) ? a : _dummy; + } + + // -- Stubs -- + function getGovernor() external pure override returns (address) { + return address(1); + } + function paused() external pure override returns (bool) { + return false; + } + function partialPaused() external pure override returns (bool) { + return false; + } + function setContractProxy(bytes32, address) external override {} + function unsetContractProxy(bytes32) external override {} + function updateController(bytes32, address) external override {} + function setPartialPaused(bool) external override {} + function setPaused(bool) external override {} + function setPauseGuardian(address) external override {} +} diff --git a/packages/testing/test/mocks/GraphTokenMock.sol b/packages/testing/test/mocks/GraphTokenMock.sol new file mode 100644 index 000000000..95f9e7424 --- /dev/null +++ b/packages/testing/test/mocks/GraphTokenMock.sol @@ -0,0 +1,20 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +pragma solidity ^0.8.27; + +import { ERC20 } from "@openzeppelin/contracts/token/ERC20/ERC20.sol"; + +/// @notice Mintable ERC20 standing in for the real GraphToken. +/// The real GraphToken is an ERC20 behind a proxy; this mock uses bare ERC20 +/// which is slightly cheaper per call. The gas delta is small (~2-5k per call). +contract GraphTokenMock is ERC20 { + constructor() ERC20("Graph Token", "GRT") {} + + function mint(address to, uint256 amount) external { + _mint(to, amount); + } + + /// @dev Matches the GraphToken burn interface (self-burn). + function burnFrom(address from, uint256 amount) external { + _burn(from, amount); + } +} diff --git a/packages/testing/test/mocks/HorizonStakingStub.sol b/packages/testing/test/mocks/HorizonStakingStub.sol new file mode 100644 index 000000000..d43cea22f --- /dev/null +++ b/packages/testing/test/mocks/HorizonStakingStub.sol @@ -0,0 +1,34 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +pragma solidity ^0.8.27; + +import { IHorizonStakingTypes } from "@graphprotocol/interfaces/contracts/horizon/internal/IHorizonStakingTypes.sol"; + +/// @notice Minimal staking stub — only provides getProviderTokensAvailable +/// (needed by RecurringCollector to gate collection). +contract HorizonStakingStub { + mapping(address => mapping(address => IHorizonStakingTypes.Provision)) public provisions; + + function setProvision( + address serviceProvider, + address verifier, + IHorizonStakingTypes.Provision memory provision + ) external { + provisions[serviceProvider][verifier] = provision; + } + + function getProvision( + address serviceProvider, + address verifier + ) external view returns (IHorizonStakingTypes.Provision memory) { + return provisions[serviceProvider][verifier]; + } + + function getProviderTokensAvailable(address serviceProvider, address verifier) external view returns (uint256) { + IHorizonStakingTypes.Provision memory p = provisions[serviceProvider][verifier]; + return p.tokens - p.tokensThawing; + } + + function isAuthorized(address, address, address) external pure returns (bool) { + return true; + } +} diff --git a/packages/testing/test/mocks/MockDataService.sol b/packages/testing/test/mocks/MockDataService.sol new file mode 100644 index 000000000..aeedfb1f6 --- /dev/null +++ b/packages/testing/test/mocks/MockDataService.sol @@ -0,0 +1,11 @@ +// SPDX-License-Identifier: GPL-2.0-or-later + +pragma solidity ^0.8.27; + +import { IDataServiceAgreements } from "@graphprotocol/interfaces/contracts/data-service/IDataServiceAgreements.sol"; + +/// @dev Mock data service that accepts all agreements/updates without validation. +contract MockDataService is IDataServiceAgreements { + function acceptAgreement(bytes16, bytes32, address, address, bytes calldata, bytes calldata) external pure {} + function afterAgreementStateChange(bytes16, bytes32, uint16) external pure {} +} diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index b4ae1a0f8..c555202ac 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -1303,6 +1303,30 @@ importers: specifier: 'catalog:' version: 5.9.3 + packages/testing: + devDependencies: + '@graphprotocol/contracts': + specifier: workspace:^ + version: link:../contracts + '@graphprotocol/horizon': + specifier: workspace:^ + version: link:../horizon + '@graphprotocol/interfaces': + specifier: workspace:^ + version: link:../interfaces + '@graphprotocol/issuance': + specifier: workspace:^ + version: link:../issuance + '@openzeppelin/contracts': + specifier: ^5.4.0 + version: 5.4.0 + '@openzeppelin/contracts-upgradeable': + specifier: ^5.4.0 + version: 5.4.0(@openzeppelin/contracts@5.4.0) + forge-std: + specifier: 'catalog:' + version: https://github.com/foundry-rs/forge-std/tarball/v1.14.0 + packages/token-distribution: dependencies: ajv: