From c2152944f68d47fa8946af13b5bb14cb12950a00 Mon Sep 17 00:00:00 2001 From: Victor Fusco <1221933+vfusco@users.noreply.github.com> Date: Fri, 6 Mar 2026 13:44:41 -0300 Subject: [PATCH 1/5] fix(service): handle SIGTERM for graceful shutdown alongside SIGINT --- pkg/service/service.go | 34 ++++++++++++++++++++++++---------- 1 file changed, 24 insertions(+), 10 deletions(-) diff --git a/pkg/service/service.go b/pkg/service/service.go index 4d889d779..362bbf0b1 100644 --- a/pkg/service/service.go +++ b/pkg/service/service.go @@ -69,6 +69,8 @@ import ( "github.com/lmittmann/tint" ) +const telemetryShutdownTimeout = 5 * time.Second + var ( ErrInvalid = fmt.Errorf("Invalid Argument") // invalid argument ) @@ -117,7 +119,7 @@ type Service struct { Context context.Context Cancel context.CancelFunc Sighup chan os.Signal // SIGHUP to reload - Sigint chan os.Signal // SIGINT to exit gracefully + SigShutdown chan os.Signal // SIGINT/SIGTERM to exit gracefully ServeMux *http.ServeMux Telemetry *http.Server TelemetryFunc func() error @@ -173,9 +175,9 @@ func Create(ctx context.Context, c *CreateInfo, s *Service) error { s.Sighup = make(chan os.Signal, 1) signal.Notify(s.Sighup, syscall.SIGHUP) } - if s.Sigint == nil { - s.Sigint = make(chan os.Signal, 1) - signal.Notify(s.Sigint, syscall.SIGINT) + if s.SigShutdown == nil { + s.SigShutdown = make(chan os.Signal, 1) + signal.Notify(s.SigShutdown, syscall.SIGINT, syscall.SIGTERM) } } @@ -245,7 +247,17 @@ func (s *Service) Stop(force bool) []error { start := time.Now() errs := s.Impl.Stop(force) if s.Telemetry != nil { - s.Telemetry.Shutdown(s.Context) + shutdownCtx, cancel := context.WithTimeout(context.Background(), telemetryShutdownTimeout) + defer cancel() + if err := s.Telemetry.Shutdown(shutdownCtx); err != nil { + errs = append(errs, err) + } + } + if s.SigShutdown != nil { + signal.Stop(s.SigShutdown) + } + if s.Sighup != nil { + signal.Stop(s.Sighup) } elapsed := time.Since(start) @@ -261,7 +273,7 @@ func (s *Service) Stop(force bool) []error { "force", force, "duration", elapsed) } - return nil + return errs } func (s *Service) Serve() error { @@ -270,7 +282,7 @@ func (s *Service) Serve() error { // Check for context cancellation before the first tick. select { case <-s.Context.Done(): - s.Stop(true) + s.Stop(true) // Stop logs errors internally. return nil default: } @@ -280,10 +292,12 @@ func (s *Service) Serve() error { select { case <-s.Sighup: s.Reload() - case <-s.Sigint: - s.Stop(false) + case <-s.SigShutdown: + s.Stop(false) // Graceful shutdown; errors are logged by Stop. + return nil case <-s.Context.Done(): - s.Stop(true) + s.Stop(true) // Stop logs errors internally. + return nil case <-s.Ticker.C: s.Tick() } From b511691c0daf3d81dd3f20427b43ed22a28cb35e Mon Sep 17 00:00:00 2001 From: Victor Fusco <1221933+vfusco@users.noreply.github.com> Date: Sun, 8 Mar 2026 22:28:28 -0300 Subject: [PATCH 2/5] fix(machine): handle graceful shutdown when SIGINT kills emulator child processes --- internal/advancer/service.go | 35 ++++++++++++++++------------------- pkg/machine/implementation.go | 32 +++++++++++++++++++++++++++++++- 2 files changed, 47 insertions(+), 20 deletions(-) diff --git a/internal/advancer/service.go b/internal/advancer/service.go index 168703a83..29521610f 100644 --- a/internal/advancer/service.go +++ b/internal/advancer/service.go @@ -8,6 +8,7 @@ import ( "errors" "fmt" "net/http" + "sync" "time" "github.com/cartesi/rollups-node/internal/config" @@ -31,6 +32,7 @@ type Service struct { inspector *inspect.Inspector HTTPServer *http.Server HTTPServerFunc func() error + stopOnce sync.Once } // CreateInfo contains the configuration for creating an advancer service @@ -102,27 +104,22 @@ func (s *Service) Tick() []error { } func (s *Service) Stop(b bool) []error { var errs []error - - // Shut down the inspect HTTP server gracefully. - // Use a dedicated timeout context because s.Context may already be cancelled - // when Stop is called from the context.Done path. - if s.HTTPServer != nil { - s.Logger.Info("Shutting down inspect HTTP server") - shutdownCtx, cancel := context.WithTimeout(context.Background(), httpShutdownTimeout) - defer cancel() - if err := s.HTTPServer.Shutdown(shutdownCtx); err != nil { - errs = append(errs, fmt.Errorf("failed to shutdown inspect HTTP server: %w", err)) + s.stopOnce.Do(func() { + if s.HTTPServer != nil { + s.Logger.Info("Shutting down inspect HTTP server") + shutdownCtx, cancel := context.WithTimeout(context.Background(), httpShutdownTimeout) + defer cancel() + if err := s.HTTPServer.Shutdown(shutdownCtx); err != nil { + errs = append(errs, fmt.Errorf("failed to shutdown inspect HTTP server: %w", err)) + } } - } - - // Close all machine instances to avoid orphaned emulator processes - if s.machineManager != nil { - s.Logger.Info("Closing machine manager") - if err := s.machineManager.Close(); err != nil { - errs = append(errs, fmt.Errorf("failed to close machine manager: %w", err)) + if s.machineManager != nil { + s.Logger.Info("Closing machine manager") + if err := s.machineManager.Close(); err != nil { + errs = append(errs, fmt.Errorf("failed to close machine manager: %w", err)) + } } - } - + }) return errs } func (s *Service) Serve() error { diff --git a/pkg/machine/implementation.go b/pkg/machine/implementation.go index 19e867060..f9718fe2f 100644 --- a/pkg/machine/implementation.go +++ b/pkg/machine/implementation.go @@ -8,6 +8,8 @@ import ( "errors" "fmt" "log/slog" + "os" + "syscall" "time" "github.com/cartesi/rollups-node/internal/model" @@ -95,7 +97,7 @@ func (m *machineImpl) Fork(ctx context.Context) (Machine, error) { } // Forks the server. - newServer, address, _, err := m.backend.ForkServer(m.params.FastDeadline) + newServer, address, pid, err := m.backend.ForkServer(m.params.FastDeadline) if err != nil { err = fmt.Errorf("could not fork the machine: %w", err) return nil, errors.Join(ErrMachineInternal, err) @@ -105,6 +107,7 @@ func (m *machineImpl) Fork(ctx context.Context) (Machine, error) { newMachine := &machineImpl{ backend: newServer, address: address, + pid: pid, params: m.params, logger: m.logger, } @@ -236,6 +239,16 @@ func (m *machineImpl) Close() error { err := m.backend.ShutdownServer(m.params.FastDeadline) if err != nil { + // ShutdownServer can fail because SIGINT was delivered to the + // entire process group: the child is already shutting down (closing + // sockets) so our RPC gets "connection reset" or "end of stream". + // Wait briefly for the child to finish exiting before reporting it + // as orphaned. + if m.pid != 0 && waitForExit(m.pid, 500*time.Millisecond) { //nolint: mnd + m.backend.Delete() + m.backend = nil + return nil + } err = fmt.Errorf("could not shut down the server: %w", err) err = errors.Join(errors.Join(ErrMachineInternal, err), fmt.Errorf("%w at address %s", ErrOrphanServer, m.address)) @@ -245,6 +258,23 @@ func (m *machineImpl) Close() error { return err } +// waitForExit polls until the process with the given PID has exited or the +// timeout elapses. Returns true if the process exited within the timeout. +func waitForExit(pid uint32, timeout time.Duration) bool { + deadline := time.Now().Add(timeout) + for time.Now().Before(deadline) { + proc, err := os.FindProcess(int(pid)) + if err != nil { + return true + } + if proc.Signal(syscall.Signal(0)) != nil { + return true + } + time.Sleep(50 * time.Millisecond) //nolint: mnd + } + return false +} + // Address returns the address of the machine server func (m *machineImpl) Address() string { return m.address From 325b309a2756ae27ac56e484e67968440fd84315 Mon Sep 17 00:00:00 2001 From: Victor Fusco <1221933+vfusco@users.noreply.github.com> Date: Fri, 6 Mar 2026 13:41:04 -0300 Subject: [PATCH 3/5] refactor(jsonrpc): extract API types and add JSON model serialization Restructure the JSON-RPC layer into focused packages: - Extract decode, params, and response types from internal/jsonrpc/ into internal/jsonrpc/api/ with generics (ListResponse[T], SingleResponse[T]) to eliminate duplicated anonymous structs - Add MarshalJSON/UnmarshalJSON to model types (Epoch, Input, Output, Report, Tournament, Commitment) with hex-encoded uint64 fields and roundtrip tests - Add --json flag to CLI send, execute, and validate commands, backed by new shared output types in internal/cli/ - Introduce DecodedData flat union type for type-safe output decoding across server and client boundaries --- .../root/deploy/application.go | 12 +- .../root/deploy/authority.go | 6 +- cmd/cartesi-rollups-cli/root/deploy/deploy.go | 4 +- .../root/execute/execute.go | 15 +- .../root/read/commitments/commitments.go | 5 +- .../root/read/epochs/epochs.go | 5 +- .../root/read/inputs/inputs.go | 5 +- .../root/read/matchadvances/matchadvances.go | 5 +- .../root/read/matches/matches.go | 5 +- .../root/read/outputs/outputs.go | 5 +- .../root/read/reports/reports.go | 5 +- .../root/read/service/jsonrpc.go | 38 +- .../root/read/service/repository.go | 64 +-- .../root/read/service/types.go | 36 +- .../root/read/tournaments/tournaments.go | 5 +- cmd/cartesi-rollups-cli/root/send/send.go | 18 +- .../root/validate/validate.go | 20 +- internal/advancer/advancer_test.go | 4 +- internal/cli/types.go | 25 ++ internal/jsonrpc/api/decode.go | 252 ++++++++++++ internal/jsonrpc/api/params.go | 165 ++++++++ internal/jsonrpc/api/response.go | 22 ++ internal/jsonrpc/jsonrpc.go | 367 ++++-------------- internal/jsonrpc/jsonrpc_test.go | 7 +- internal/jsonrpc/types.go | 362 +---------------- internal/manager/instance.go | 8 +- internal/model/models.go | 148 ++++++- internal/model/models_json_test.go | 275 +++++++++++++ 28 files changed, 1118 insertions(+), 770 deletions(-) create mode 100644 internal/cli/types.go create mode 100644 internal/jsonrpc/api/decode.go create mode 100644 internal/jsonrpc/api/params.go create mode 100644 internal/jsonrpc/api/response.go create mode 100644 internal/model/models_json_test.go diff --git a/cmd/cartesi-rollups-cli/root/deploy/application.go b/cmd/cartesi-rollups-cli/root/deploy/application.go index c40f1a81e..6934b81f7 100644 --- a/cmd/cartesi-rollups-cli/root/deploy/application.go +++ b/cmd/cartesi-rollups-cli/root/deploy/application.go @@ -217,13 +217,13 @@ func runDeployApplication(cmd *cobra.Command, args []string) { } // deploy - if verboseParam || !asJsonParam { + if verboseParam || !asJSONParam { fmt.Fprint(os.Stderr, "deploying...") } _, result, err := deployment.Deploy(ctx, client, txOpts) cobra.CheckErr(err) - if verboseParam || !asJsonParam { + if verboseParam || !asJSONParam { fmt.Fprint(os.Stderr, "success\n") fmt.Fprint(os.Stderr, result) } @@ -263,7 +263,7 @@ func runDeployApplication(cmd *cobra.Command, args []string) { } if applicationRegisterParam { - if verboseParam || !asJsonParam { + if verboseParam || !asJSONParam { fmt.Fprint(os.Stderr, "registering...") } dsn, err := config.GetDatabaseConnection() @@ -281,11 +281,11 @@ func runDeployApplication(cmd *cobra.Command, args []string) { if err != nil { cobra.CheckErr(fmt.Errorf("failed to register application: %w", err)) } - if verboseParam || !asJsonParam { + if verboseParam || !asJSONParam { fmt.Fprint(os.Stderr, "success\n") } - if verboseParam || !asJsonParam { + if verboseParam || !asJSONParam { if applicationName != "" || verboseParam { fmt.Fprintln(os.Stderr, "\tapplication name: ", applicationName) } @@ -300,7 +300,7 @@ func runDeployApplication(cmd *cobra.Command, args []string) { fmt.Fprint(os.Stderr, "registering...skipped\n") } - if asJsonParam { + if asJSONParam { report, err := json.MarshalIndent(&application, "", " ") cobra.CheckErr(err) fmt.Println(string(report)) diff --git a/cmd/cartesi-rollups-cli/root/deploy/authority.go b/cmd/cartesi-rollups-cli/root/deploy/authority.go index 03e875032..2dfe618f3 100644 --- a/cmd/cartesi-rollups-cli/root/deploy/authority.go +++ b/cmd/cartesi-rollups-cli/root/deploy/authority.go @@ -98,20 +98,20 @@ func runDeployAuthority(cmd *cobra.Command, args []string) { } // deploy - if verboseParam || !asJsonParam { + if verboseParam || !asJSONParam { fmt.Fprintf(os.Stderr, "deploying authority...") } deployment.Address, err = deployment.Deploy(ctx, client, txOpts) cobra.CheckErr(err) // report - if verboseParam || !asJsonParam { + if verboseParam || !asJSONParam { fmt.Fprintf(os.Stderr, "success\n") fmt.Fprintln(os.Stderr, "\tconsensus address: ", deployment.Address) fmt.Fprintln(os.Stderr, "\tepoch length: ", deployment.EpochLength) } - if asJsonParam { + if asJSONParam { report, err := json.MarshalIndent(&deployment, "", " ") cobra.CheckErr(err) // deployed, but fail to print diff --git a/cmd/cartesi-rollups-cli/root/deploy/deploy.go b/cmd/cartesi-rollups-cli/root/deploy/deploy.go index 259f13060..1ca3aaafa 100644 --- a/cmd/cartesi-rollups-cli/root/deploy/deploy.go +++ b/cmd/cartesi-rollups-cli/root/deploy/deploy.go @@ -13,7 +13,7 @@ import ( var ( epochLengthParam uint64 saltParam string - asJsonParam bool + asJSONParam bool verboseParam bool ) @@ -30,7 +30,7 @@ func init() { Cmd.PersistentFlags().StringVar(&saltParam, "salt", "0000000000000000000000000000000000000000000000000000000000000000", "Salt value for contract deployment") Cmd.PersistentFlags().MarkHidden("salt") - Cmd.PersistentFlags().BoolVarP(&asJsonParam, "json", "", false, + Cmd.PersistentFlags().BoolVarP(&asJSONParam, "json", "", false, "Print results as JSON") Cmd.PersistentFlags().MarkHidden("json") Cmd.PersistentFlags().BoolVarP(&verboseParam, "verbose", "", false, diff --git a/cmd/cartesi-rollups-cli/root/execute/execute.go b/cmd/cartesi-rollups-cli/root/execute/execute.go index d06f54de3..7a8e74ba8 100644 --- a/cmd/cartesi-rollups-cli/root/execute/execute.go +++ b/cmd/cartesi-rollups-cli/root/execute/execute.go @@ -4,6 +4,7 @@ package execute import ( + "encoding/json" "fmt" "os" "strings" @@ -11,6 +12,7 @@ import ( "github.com/ethereum/go-ethereum/ethclient" "github.com/spf13/cobra" + "github.com/cartesi/rollups-node/internal/cli" "github.com/cartesi/rollups-node/internal/config" "github.com/cartesi/rollups-node/internal/config/auth" "github.com/cartesi/rollups-node/internal/repository/factory" @@ -40,10 +42,12 @@ cartesi-rollups-cli execute echo-dapp 5 --yes` var ( skipConfirmation bool + asJSONParam bool ) func init() { Cmd.Flags().BoolVarP(&skipConfirmation, "yes", "y", false, "Skip confirmation prompt") + Cmd.Flags().BoolVar(&asJSONParam, "json", false, "Print result as JSON") origHelpFunc := Cmd.HelpFunc() Cmd.SetHelpFunc(func(command *cobra.Command, strings []string) { @@ -127,5 +131,14 @@ func run(cmd *cobra.Command, args []string) { ) cobra.CheckErr(err) - fmt.Printf("Voucher executed tx-hash: %v\n", txHash) + if asJSONParam { + result := cli.ExecuteResult{ + TransactionHash: txHash.Hex(), + } + jsonBytes, err := json.MarshalIndent(&result, "", " ") + cobra.CheckErr(err) + fmt.Println(string(jsonBytes)) + } else { + fmt.Printf("Voucher executed tx-hash: %v\n", txHash) + } } diff --git a/cmd/cartesi-rollups-cli/root/read/commitments/commitments.go b/cmd/cartesi-rollups-cli/root/read/commitments/commitments.go index e357d7c97..5f6a31939 100644 --- a/cmd/cartesi-rollups-cli/root/read/commitments/commitments.go +++ b/cmd/cartesi-rollups-cli/root/read/commitments/commitments.go @@ -12,6 +12,7 @@ import ( "github.com/cartesi/rollups-node/cmd/cartesi-rollups-cli/root/read/service" "github.com/cartesi/rollups-node/internal/config" "github.com/cartesi/rollups-node/internal/jsonrpc" + "github.com/cartesi/rollups-node/internal/jsonrpc/api" "github.com/spf13/cobra" ) @@ -101,7 +102,7 @@ func run(cmd *cobra.Command, args []string) { var result json.RawMessage if len(args) >= 4 { - var params jsonrpc.GetCommitmentParams + var params api.GetCommitmentParams params.Application = args[0] params.EpochIndex, err = config.AsHexString(args[1]) cobra.CheckErr(err) @@ -110,7 +111,7 @@ func run(cmd *cobra.Command, args []string) { result, err = readServ.GetCommitment(ctx, params) } else { - var params jsonrpc.ListCommitmentsParams + var params api.ListCommitmentsParams params.Application = args[0] // Add epoch index filter if provided diff --git a/cmd/cartesi-rollups-cli/root/read/epochs/epochs.go b/cmd/cartesi-rollups-cli/root/read/epochs/epochs.go index 4b9f298a9..ec966f48f 100644 --- a/cmd/cartesi-rollups-cli/root/read/epochs/epochs.go +++ b/cmd/cartesi-rollups-cli/root/read/epochs/epochs.go @@ -12,6 +12,7 @@ import ( "github.com/cartesi/rollups-node/cmd/cartesi-rollups-cli/root/read/service" "github.com/cartesi/rollups-node/internal/config" "github.com/cartesi/rollups-node/internal/jsonrpc" + "github.com/cartesi/rollups-node/internal/jsonrpc/api" "github.com/spf13/cobra" ) @@ -92,14 +93,14 @@ func run(cmd *cobra.Command, args []string) { var result json.RawMessage if len(args) >= 2 { - var params jsonrpc.GetEpochParams + var params api.GetEpochParams params.Application = args[0] params.EpochIndex, err = config.AsHexString(args[1]) cobra.CheckErr(err) result, err = readServ.GetEpoch(ctx, params) } else { - var params jsonrpc.ListEpochsParams + var params api.ListEpochsParams params.Application = args[0] // Add status filter if provided diff --git a/cmd/cartesi-rollups-cli/root/read/inputs/inputs.go b/cmd/cartesi-rollups-cli/root/read/inputs/inputs.go index 622c41484..de692b258 100644 --- a/cmd/cartesi-rollups-cli/root/read/inputs/inputs.go +++ b/cmd/cartesi-rollups-cli/root/read/inputs/inputs.go @@ -12,6 +12,7 @@ import ( "github.com/cartesi/rollups-node/cmd/cartesi-rollups-cli/root/read/service" "github.com/cartesi/rollups-node/internal/config" "github.com/cartesi/rollups-node/internal/jsonrpc" + "github.com/cartesi/rollups-node/internal/jsonrpc/api" "github.com/spf13/cobra" ) @@ -95,14 +96,14 @@ func run(cmd *cobra.Command, args []string) { var result json.RawMessage if len(args) >= 2 { - var params jsonrpc.GetInputParams + var params api.GetInputParams params.Application = args[0] params.InputIndex, err = config.AsHexString(args[1]) cobra.CheckErr(err) result, err = readServ.GetInput(ctx, params) } else { - var params jsonrpc.ListInputsParams + var params api.ListInputsParams params.Application = args[0] // Add epoch index filter if provided diff --git a/cmd/cartesi-rollups-cli/root/read/matchadvances/matchadvances.go b/cmd/cartesi-rollups-cli/root/read/matchadvances/matchadvances.go index 29c02769a..a0e3847e7 100644 --- a/cmd/cartesi-rollups-cli/root/read/matchadvances/matchadvances.go +++ b/cmd/cartesi-rollups-cli/root/read/matchadvances/matchadvances.go @@ -12,6 +12,7 @@ import ( "github.com/cartesi/rollups-node/cmd/cartesi-rollups-cli/root/read/service" "github.com/cartesi/rollups-node/internal/config" "github.com/cartesi/rollups-node/internal/jsonrpc" + "github.com/cartesi/rollups-node/internal/jsonrpc/api" "github.com/spf13/cobra" ) @@ -89,7 +90,7 @@ func run(cmd *cobra.Command, args []string) { var result json.RawMessage if len(args) >= 5 { - var params jsonrpc.GetMatchAdvancedParams + var params api.GetMatchAdvancedParams params.Application = args[0] params.EpochIndex, err = config.AsHexString(args[1]) cobra.CheckErr(err) @@ -99,7 +100,7 @@ func run(cmd *cobra.Command, args []string) { result, err = readServ.GetMatchAdvanced(ctx, params) } else { - var params jsonrpc.ListMatchAdvancesParams + var params api.ListMatchAdvancesParams params.Application = args[0] params.EpochIndex, err = config.AsHexString(args[1]) cobra.CheckErr(err) diff --git a/cmd/cartesi-rollups-cli/root/read/matches/matches.go b/cmd/cartesi-rollups-cli/root/read/matches/matches.go index 3f809dc3c..da27db00a 100644 --- a/cmd/cartesi-rollups-cli/root/read/matches/matches.go +++ b/cmd/cartesi-rollups-cli/root/read/matches/matches.go @@ -12,6 +12,7 @@ import ( "github.com/cartesi/rollups-node/cmd/cartesi-rollups-cli/root/read/service" "github.com/cartesi/rollups-node/internal/config" "github.com/cartesi/rollups-node/internal/jsonrpc" + "github.com/cartesi/rollups-node/internal/jsonrpc/api" "github.com/spf13/cobra" ) @@ -101,7 +102,7 @@ func run(cmd *cobra.Command, args []string) { var result json.RawMessage if len(args) >= 4 { - var params jsonrpc.GetMatchParams + var params api.GetMatchParams params.Application = args[0] params.EpochIndex, err = config.AsHexString(args[1]) cobra.CheckErr(err) @@ -110,7 +111,7 @@ func run(cmd *cobra.Command, args []string) { result, err = readServ.GetMatch(ctx, params) } else { - var params jsonrpc.ListMatchesParams + var params api.ListMatchesParams params.Application = args[0] // Add epoch index filter if provided diff --git a/cmd/cartesi-rollups-cli/root/read/outputs/outputs.go b/cmd/cartesi-rollups-cli/root/read/outputs/outputs.go index 07e37b15c..d584689ea 100644 --- a/cmd/cartesi-rollups-cli/root/read/outputs/outputs.go +++ b/cmd/cartesi-rollups-cli/root/read/outputs/outputs.go @@ -12,6 +12,7 @@ import ( "github.com/cartesi/rollups-node/cmd/cartesi-rollups-cli/root/read/service" "github.com/cartesi/rollups-node/internal/config" "github.com/cartesi/rollups-node/internal/jsonrpc" + "github.com/cartesi/rollups-node/internal/jsonrpc/api" "github.com/spf13/cobra" ) @@ -101,14 +102,14 @@ func run(cmd *cobra.Command, args []string) { var result json.RawMessage if len(args) >= 2 { - var params jsonrpc.GetOutputParams + var params api.GetOutputParams params.Application = args[0] params.OutputIndex, err = config.AsHexString(args[1]) cobra.CheckErr(err) result, err = readServ.GetOutput(ctx, params) } else { - var params jsonrpc.ListOutputsParams + var params api.ListOutputsParams params.Application = args[0] // Add epoch index filter if provided diff --git a/cmd/cartesi-rollups-cli/root/read/reports/reports.go b/cmd/cartesi-rollups-cli/root/read/reports/reports.go index 2b3db3503..13e86f3bd 100644 --- a/cmd/cartesi-rollups-cli/root/read/reports/reports.go +++ b/cmd/cartesi-rollups-cli/root/read/reports/reports.go @@ -12,6 +12,7 @@ import ( "github.com/cartesi/rollups-node/cmd/cartesi-rollups-cli/root/read/service" "github.com/cartesi/rollups-node/internal/config" "github.com/cartesi/rollups-node/internal/jsonrpc" + "github.com/cartesi/rollups-node/internal/jsonrpc/api" "github.com/spf13/cobra" ) @@ -95,14 +96,14 @@ func run(cmd *cobra.Command, args []string) { var result json.RawMessage if len(args) >= 2 { - var params jsonrpc.GetReportParams + var params api.GetReportParams params.Application = args[0] params.ReportIndex, err = config.AsHexString(args[1]) cobra.CheckErr(err) result, err = readServ.GetReport(ctx, params) } else { - var params jsonrpc.ListReportsParams + var params api.ListReportsParams params.Application = args[0] // Add epoch index filter if provided diff --git a/cmd/cartesi-rollups-cli/root/read/service/jsonrpc.go b/cmd/cartesi-rollups-cli/root/read/service/jsonrpc.go index f442e4c7e..73f3354f8 100644 --- a/cmd/cartesi-rollups-cli/root/read/service/jsonrpc.go +++ b/cmd/cartesi-rollups-cli/root/read/service/jsonrpc.go @@ -10,7 +10,7 @@ import ( "net/url" "github.com/cartesi/rollups-node/internal/config" - "github.com/cartesi/rollups-node/internal/jsonrpc" + "github.com/cartesi/rollups-node/internal/jsonrpc/api" "github.com/cartesi/rollups-node/internal/model" "github.com/cartesi/rollups-node/pkg/jsonrpc/client" ) @@ -19,7 +19,7 @@ type JsonrpcReadService struct { Client *client.Client } -func (s *JsonrpcReadService) GetApplication(ctx context.Context, params jsonrpc.GetApplicationParams) (json.RawMessage, error) { +func (s *JsonrpcReadService) GetApplication(ctx context.Context, params api.GetApplicationParams) (json.RawMessage, error) { if _, err := config.ToApplicationNameOrAddressFromString(params.Application); err != nil { return nil, fmt.Errorf("invalid application: %w", err) } @@ -29,7 +29,7 @@ func (s *JsonrpcReadService) GetApplication(ctx context.Context, params jsonrpc. return resp, err } -func (s *JsonrpcReadService) GetEpoch(ctx context.Context, params jsonrpc.GetEpochParams) (json.RawMessage, error) { +func (s *JsonrpcReadService) GetEpoch(ctx context.Context, params api.GetEpochParams) (json.RawMessage, error) { if _, err := config.ToApplicationNameOrAddressFromString(params.Application); err != nil { return nil, fmt.Errorf("invalid application: %w", err) } @@ -42,7 +42,7 @@ func (s *JsonrpcReadService) GetEpoch(ctx context.Context, params jsonrpc.GetEpo return resp, err } -func (s *JsonrpcReadService) ListEpochs(ctx context.Context, params jsonrpc.ListEpochsParams) (json.RawMessage, error) { +func (s *JsonrpcReadService) ListEpochs(ctx context.Context, params api.ListEpochsParams) (json.RawMessage, error) { if _, err := config.ToApplicationNameOrAddressFromString(params.Application); err != nil { return nil, fmt.Errorf("invalid application: %w", err) } @@ -59,7 +59,7 @@ func (s *JsonrpcReadService) ListEpochs(ctx context.Context, params jsonrpc.List return resp, err } -func (s *JsonrpcReadService) GetInput(ctx context.Context, params jsonrpc.GetInputParams) (json.RawMessage, error) { +func (s *JsonrpcReadService) GetInput(ctx context.Context, params api.GetInputParams) (json.RawMessage, error) { if _, err := config.ToApplicationNameOrAddressFromString(params.Application); err != nil { return nil, fmt.Errorf("invalid application: %w", err) } @@ -72,7 +72,7 @@ func (s *JsonrpcReadService) GetInput(ctx context.Context, params jsonrpc.GetInp return resp, err } -func (s *JsonrpcReadService) ListInputs(ctx context.Context, params jsonrpc.ListInputsParams) (json.RawMessage, error) { +func (s *JsonrpcReadService) ListInputs(ctx context.Context, params api.ListInputsParams) (json.RawMessage, error) { if _, err := config.ToApplicationNameOrAddressFromString(params.Application); err != nil { return nil, fmt.Errorf("invalid application: %w", err) } @@ -94,7 +94,7 @@ func (s *JsonrpcReadService) ListInputs(ctx context.Context, params jsonrpc.List return resp, err } -func (s *JsonrpcReadService) GetOutput(ctx context.Context, params jsonrpc.GetOutputParams) (json.RawMessage, error) { +func (s *JsonrpcReadService) GetOutput(ctx context.Context, params api.GetOutputParams) (json.RawMessage, error) { if _, err := config.ToApplicationNameOrAddressFromString(params.Application); err != nil { return nil, fmt.Errorf("invalid application: %w", err) } @@ -107,7 +107,7 @@ func (s *JsonrpcReadService) GetOutput(ctx context.Context, params jsonrpc.GetOu return resp, err } -func (s *JsonrpcReadService) ListOutputs(ctx context.Context, params jsonrpc.ListOutputsParams) (json.RawMessage, error) { +func (s *JsonrpcReadService) ListOutputs(ctx context.Context, params api.ListOutputsParams) (json.RawMessage, error) { if _, err := config.ToApplicationNameOrAddressFromString(params.Application); err != nil { return nil, fmt.Errorf("invalid application: %w", err) } @@ -125,7 +125,7 @@ func (s *JsonrpcReadService) ListOutputs(ctx context.Context, params jsonrpc.Lis } // Add output type filter if provided if params.OutputType != nil { - if _, err := jsonrpc.ParseOutputType(*params.OutputType); err != nil { + if _, err := api.ParseOutputType(*params.OutputType); err != nil { return nil, fmt.Errorf("invalid output type: %w", err) } } @@ -141,7 +141,7 @@ func (s *JsonrpcReadService) ListOutputs(ctx context.Context, params jsonrpc.Lis return resp, err } -func (s *JsonrpcReadService) GetReport(ctx context.Context, params jsonrpc.GetReportParams) (json.RawMessage, error) { +func (s *JsonrpcReadService) GetReport(ctx context.Context, params api.GetReportParams) (json.RawMessage, error) { if _, err := config.ToApplicationNameOrAddressFromString(params.Application); err != nil { return nil, fmt.Errorf("invalid application: %w", err) } @@ -154,7 +154,7 @@ func (s *JsonrpcReadService) GetReport(ctx context.Context, params jsonrpc.GetRe return resp, err } -func (s *JsonrpcReadService) ListReports(ctx context.Context, params jsonrpc.ListReportsParams) (json.RawMessage, error) { +func (s *JsonrpcReadService) ListReports(ctx context.Context, params api.ListReportsParams) (json.RawMessage, error) { if _, err := config.ToApplicationNameOrAddressFromString(params.Application); err != nil { return nil, fmt.Errorf("invalid application: %w", err) } @@ -176,7 +176,7 @@ func (s *JsonrpcReadService) ListReports(ctx context.Context, params jsonrpc.Lis return resp, err } -func (s *JsonrpcReadService) GetTournament(ctx context.Context, params jsonrpc.GetTournamentParams) (json.RawMessage, error) { +func (s *JsonrpcReadService) GetTournament(ctx context.Context, params api.GetTournamentParams) (json.RawMessage, error) { if _, err := config.ToApplicationNameOrAddressFromString(params.Application); err != nil { return nil, fmt.Errorf("invalid application: %w", err) } @@ -189,7 +189,7 @@ func (s *JsonrpcReadService) GetTournament(ctx context.Context, params jsonrpc.G return resp, err } -func (s *JsonrpcReadService) ListTournaments(ctx context.Context, params jsonrpc.ListTournamentsParams) (json.RawMessage, error) { +func (s *JsonrpcReadService) ListTournaments(ctx context.Context, params api.ListTournamentsParams) (json.RawMessage, error) { if _, err := config.ToApplicationNameOrAddressFromString(params.Application); err != nil { return nil, fmt.Errorf("invalid application: %w", err) } @@ -223,7 +223,7 @@ func (s *JsonrpcReadService) ListTournaments(ctx context.Context, params jsonrpc return resp, err } -func (s *JsonrpcReadService) GetCommitment(ctx context.Context, params jsonrpc.GetCommitmentParams) (json.RawMessage, error) { +func (s *JsonrpcReadService) GetCommitment(ctx context.Context, params api.GetCommitmentParams) (json.RawMessage, error) { if _, err := config.ToApplicationNameOrAddressFromString(params.Application); err != nil { return nil, fmt.Errorf("invalid application: %w", err) } @@ -242,7 +242,7 @@ func (s *JsonrpcReadService) GetCommitment(ctx context.Context, params jsonrpc.G return resp, err } -func (s *JsonrpcReadService) ListCommitments(ctx context.Context, params jsonrpc.ListCommitmentsParams) (json.RawMessage, error) { +func (s *JsonrpcReadService) ListCommitments(ctx context.Context, params api.ListCommitmentsParams) (json.RawMessage, error) { if _, err := config.ToApplicationNameOrAddressFromString(params.Application); err != nil { return nil, fmt.Errorf("invalid application: %w", err) } @@ -264,7 +264,7 @@ func (s *JsonrpcReadService) ListCommitments(ctx context.Context, params jsonrpc return resp, err } -func (s *JsonrpcReadService) GetMatch(ctx context.Context, params jsonrpc.GetMatchParams) (json.RawMessage, error) { +func (s *JsonrpcReadService) GetMatch(ctx context.Context, params api.GetMatchParams) (json.RawMessage, error) { if _, err := config.ToApplicationNameOrAddressFromString(params.Application); err != nil { return nil, fmt.Errorf("invalid application: %w", err) } @@ -283,7 +283,7 @@ func (s *JsonrpcReadService) GetMatch(ctx context.Context, params jsonrpc.GetMat return resp, err } -func (s *JsonrpcReadService) ListMatches(ctx context.Context, params jsonrpc.ListMatchesParams) (json.RawMessage, error) { +func (s *JsonrpcReadService) ListMatches(ctx context.Context, params api.ListMatchesParams) (json.RawMessage, error) { if _, err := config.ToApplicationNameOrAddressFromString(params.Application); err != nil { return nil, fmt.Errorf("invalid application: %w", err) } @@ -305,7 +305,7 @@ func (s *JsonrpcReadService) ListMatches(ctx context.Context, params jsonrpc.Lis return resp, err } -func (s *JsonrpcReadService) GetMatchAdvanced(ctx context.Context, params jsonrpc.GetMatchAdvancedParams) (json.RawMessage, error) { +func (s *JsonrpcReadService) GetMatchAdvanced(ctx context.Context, params api.GetMatchAdvancedParams) (json.RawMessage, error) { if _, err := config.ToApplicationNameOrAddressFromString(params.Application); err != nil { return nil, fmt.Errorf("invalid application: %w", err) } @@ -327,7 +327,7 @@ func (s *JsonrpcReadService) GetMatchAdvanced(ctx context.Context, params jsonrp return resp, err } -func (s *JsonrpcReadService) ListMatchAdvances(ctx context.Context, params jsonrpc.ListMatchAdvancesParams) (json.RawMessage, error) { +func (s *JsonrpcReadService) ListMatchAdvances(ctx context.Context, params api.ListMatchAdvancesParams) (json.RawMessage, error) { if _, err := config.ToApplicationNameOrAddressFromString(params.Application); err != nil { return nil, fmt.Errorf("invalid application: %w", err) } diff --git a/cmd/cartesi-rollups-cli/root/read/service/repository.go b/cmd/cartesi-rollups-cli/root/read/service/repository.go index cad7a5232..19e5c78be 100644 --- a/cmd/cartesi-rollups-cli/root/read/service/repository.go +++ b/cmd/cartesi-rollups-cli/root/read/service/repository.go @@ -9,7 +9,7 @@ import ( "fmt" "github.com/cartesi/rollups-node/internal/config" - "github.com/cartesi/rollups-node/internal/jsonrpc" + "github.com/cartesi/rollups-node/internal/jsonrpc/api" "github.com/cartesi/rollups-node/internal/model" "github.com/cartesi/rollups-node/internal/repository" "github.com/cartesi/rollups-node/internal/repository/factory" @@ -25,7 +25,7 @@ type RepositoryReadService struct { OutputAbi *abi.ABI } -func (s *RepositoryReadService) GetApplication(ctx context.Context, params jsonrpc.GetApplicationParams) (json.RawMessage, error) { +func (s *RepositoryReadService) GetApplication(ctx context.Context, params api.GetApplicationParams) (json.RawMessage, error) { repo := s.Repository application, err := config.ToApplicationNameOrAddressFromString(params.Application) if err != nil { @@ -49,7 +49,7 @@ func (s *RepositoryReadService) GetApplication(ctx context.Context, params jsonr return json.RawMessage(result), err } -func (s *RepositoryReadService) GetEpoch(ctx context.Context, params jsonrpc.GetEpochParams) (json.RawMessage, error) { +func (s *RepositoryReadService) GetEpoch(ctx context.Context, params api.GetEpochParams) (json.RawMessage, error) { repo := s.Repository application, err := config.ToApplicationNameOrAddressFromString(params.Application) if err != nil { @@ -77,7 +77,7 @@ func (s *RepositoryReadService) GetEpoch(ctx context.Context, params jsonrpc.Get return json.RawMessage(result), err } -func (s *RepositoryReadService) ListEpochs(ctx context.Context, params jsonrpc.ListEpochsParams) (json.RawMessage, error) { +func (s *RepositoryReadService) ListEpochs(ctx context.Context, params api.ListEpochsParams) (json.RawMessage, error) { repo := s.Repository application, err := config.ToApplicationNameOrAddressFromString(params.Application) if err != nil { @@ -125,7 +125,7 @@ func (s *RepositoryReadService) ListEpochs(ctx context.Context, params jsonrpc.L return json.RawMessage(result), err } -func (s *RepositoryReadService) GetInput(ctx context.Context, params jsonrpc.GetInputParams) (json.RawMessage, error) { +func (s *RepositoryReadService) GetInput(ctx context.Context, params api.GetInputParams) (json.RawMessage, error) { repo := s.Repository application, err := config.ToApplicationNameOrAddressFromString(params.Application) if err != nil { @@ -143,9 +143,9 @@ func (s *RepositoryReadService) GetInput(ctx context.Context, params jsonrpc.Get if data == nil { return nil, ErrNotFound } - dataVal, err := jsonrpc.DecodeInput(data, s.InputAbi) + dataVal, err := api.DecodeInput(data, s.InputAbi) if err != nil { - dataVal = &jsonrpc.DecodedInput{Input: data} + dataVal = &api.DecodedInput{Input: data} } response := map[string]any{ @@ -157,7 +157,7 @@ func (s *RepositoryReadService) GetInput(ctx context.Context, params jsonrpc.Get return json.RawMessage(result), err } -func (s *RepositoryReadService) ListInputs(ctx context.Context, params jsonrpc.ListInputsParams) (json.RawMessage, error) { +func (s *RepositoryReadService) ListInputs(ctx context.Context, params api.ListInputsParams) (json.RawMessage, error) { repo := s.Repository application, err := config.ToApplicationNameOrAddressFromString(params.Application) if err != nil { @@ -198,11 +198,11 @@ func (s *RepositoryReadService) ListInputs(ctx context.Context, params jsonrpc.L } data = make([]*model.Input, 0) } - dataVal := make([]*jsonrpc.DecodedInput, 0, len(data)) + dataVal := make([]*api.DecodedInput, 0, len(data)) for _, item := range data { - decoded, err := jsonrpc.DecodeInput(item, s.InputAbi) + decoded, err := api.DecodeInput(item, s.InputAbi) if err != nil { - decoded = &jsonrpc.DecodedInput{Input: item} + decoded = &api.DecodedInput{Input: item} } dataVal = append(dataVal, decoded) } @@ -221,7 +221,7 @@ func (s *RepositoryReadService) ListInputs(ctx context.Context, params jsonrpc.L return json.RawMessage(result), err } -func (s *RepositoryReadService) GetOutput(ctx context.Context, params jsonrpc.GetOutputParams) (json.RawMessage, error) { +func (s *RepositoryReadService) GetOutput(ctx context.Context, params api.GetOutputParams) (json.RawMessage, error) { repo := s.Repository application, err := config.ToApplicationNameOrAddressFromString(params.Application) if err != nil { @@ -239,9 +239,12 @@ func (s *RepositoryReadService) GetOutput(ctx context.Context, params jsonrpc.Ge if data == nil { return nil, ErrNotFound } - dataVal, err := jsonrpc.DecodeOutput(data, s.OutputAbi) + dataVal, err := api.DecodeOutput(data, s.OutputAbi) if err != nil { - dataVal = &jsonrpc.DecodedOutput{Output: data, DecodedData: err.Error()} + dataVal = &api.DecodedOutput{ + Output: data, + DecodedData: &api.DecodedData{Type: "error", RawData: err.Error()}, + } } response := map[string]any{ @@ -253,7 +256,7 @@ func (s *RepositoryReadService) GetOutput(ctx context.Context, params jsonrpc.Ge return json.RawMessage(result), err } -func (s *RepositoryReadService) ListOutputs(ctx context.Context, params jsonrpc.ListOutputsParams) (json.RawMessage, error) { +func (s *RepositoryReadService) ListOutputs(ctx context.Context, params api.ListOutputsParams) (json.RawMessage, error) { repo := s.Repository application, err := config.ToApplicationNameOrAddressFromString(params.Application) if err != nil { @@ -279,7 +282,7 @@ func (s *RepositoryReadService) ListOutputs(ctx context.Context, params jsonrpc. } // Add output type filter if provided if params.OutputType != nil { - outputTypeVal, err := jsonrpc.ParseOutputType(*params.OutputType) + outputTypeVal, err := api.ParseOutputType(*params.OutputType) if err != nil { return nil, fmt.Errorf("invalid output type: %w", err) } @@ -310,11 +313,14 @@ func (s *RepositoryReadService) ListOutputs(ctx context.Context, params jsonrpc. } data = make([]*model.Output, 0) } - dataVal := make([]*jsonrpc.DecodedOutput, 0, len(data)) + dataVal := make([]*api.DecodedOutput, 0, len(data)) for _, item := range data { - decoded, err := jsonrpc.DecodeOutput(item, s.OutputAbi) + decoded, err := api.DecodeOutput(item, s.OutputAbi) if err != nil { - decoded = &jsonrpc.DecodedOutput{Output: item, DecodedData: err.Error()} + decoded = &api.DecodedOutput{ + Output: item, + DecodedData: &api.DecodedData{Type: "error", RawData: err.Error()}, + } } dataVal = append(dataVal, decoded) } @@ -333,7 +339,7 @@ func (s *RepositoryReadService) ListOutputs(ctx context.Context, params jsonrpc. return json.RawMessage(result), err } -func (s *RepositoryReadService) GetReport(ctx context.Context, params jsonrpc.GetReportParams) (json.RawMessage, error) { +func (s *RepositoryReadService) GetReport(ctx context.Context, params api.GetReportParams) (json.RawMessage, error) { repo := s.Repository application, err := config.ToApplicationNameOrAddressFromString(params.Application) if err != nil { @@ -361,7 +367,7 @@ func (s *RepositoryReadService) GetReport(ctx context.Context, params jsonrpc.Ge return json.RawMessage(result), err } -func (s *RepositoryReadService) ListReports(ctx context.Context, params jsonrpc.ListReportsParams) (json.RawMessage, error) { +func (s *RepositoryReadService) ListReports(ctx context.Context, params api.ListReportsParams) (json.RawMessage, error) { repo := s.Repository application, err := config.ToApplicationNameOrAddressFromString(params.Application) if err != nil { @@ -417,7 +423,7 @@ func (s *RepositoryReadService) ListReports(ctx context.Context, params jsonrpc. return json.RawMessage(result), err } -func (s *RepositoryReadService) GetTournament(ctx context.Context, params jsonrpc.GetTournamentParams) (json.RawMessage, error) { +func (s *RepositoryReadService) GetTournament(ctx context.Context, params api.GetTournamentParams) (json.RawMessage, error) { repo := s.Repository application, err := config.ToApplicationNameOrAddressFromString(params.Application) if err != nil { @@ -444,7 +450,7 @@ func (s *RepositoryReadService) GetTournament(ctx context.Context, params jsonrp return json.RawMessage(result), err } -func (s *RepositoryReadService) ListTournaments(ctx context.Context, params jsonrpc.ListTournamentsParams) (json.RawMessage, error) { +func (s *RepositoryReadService) ListTournaments(ctx context.Context, params api.ListTournamentsParams) (json.RawMessage, error) { repo := s.Repository application, err := config.ToApplicationNameOrAddressFromString(params.Application) if err != nil { @@ -516,7 +522,7 @@ func (s *RepositoryReadService) ListTournaments(ctx context.Context, params json return json.RawMessage(result), err } -func (s *RepositoryReadService) GetCommitment(ctx context.Context, params jsonrpc.GetCommitmentParams) (json.RawMessage, error) { +func (s *RepositoryReadService) GetCommitment(ctx context.Context, params api.GetCommitmentParams) (json.RawMessage, error) { repo := s.Repository application, err := config.ToApplicationNameOrAddressFromString(params.Application) if err != nil { @@ -550,7 +556,7 @@ func (s *RepositoryReadService) GetCommitment(ctx context.Context, params jsonrp return json.RawMessage(result), err } -func (s *RepositoryReadService) ListCommitments(ctx context.Context, params jsonrpc.ListCommitmentsParams) (json.RawMessage, error) { +func (s *RepositoryReadService) ListCommitments(ctx context.Context, params api.ListCommitmentsParams) (json.RawMessage, error) { repo := s.Repository application, err := config.ToApplicationNameOrAddressFromString(params.Application) if err != nil { @@ -605,7 +611,7 @@ func (s *RepositoryReadService) ListCommitments(ctx context.Context, params json return json.RawMessage(result), err } -func (s *RepositoryReadService) GetMatch(ctx context.Context, params jsonrpc.GetMatchParams) (json.RawMessage, error) { +func (s *RepositoryReadService) GetMatch(ctx context.Context, params api.GetMatchParams) (json.RawMessage, error) { repo := s.Repository application, err := config.ToApplicationNameOrAddressFromString(params.Application) if err != nil { @@ -639,7 +645,7 @@ func (s *RepositoryReadService) GetMatch(ctx context.Context, params jsonrpc.Get return json.RawMessage(result), err } -func (s *RepositoryReadService) ListMatches(ctx context.Context, params jsonrpc.ListMatchesParams) (json.RawMessage, error) { +func (s *RepositoryReadService) ListMatches(ctx context.Context, params api.ListMatchesParams) (json.RawMessage, error) { repo := s.Repository application, err := config.ToApplicationNameOrAddressFromString(params.Application) if err != nil { @@ -694,7 +700,7 @@ func (s *RepositoryReadService) ListMatches(ctx context.Context, params jsonrpc. return json.RawMessage(result), err } -func (s *RepositoryReadService) GetMatchAdvanced(ctx context.Context, params jsonrpc.GetMatchAdvancedParams) (json.RawMessage, error) { +func (s *RepositoryReadService) GetMatchAdvanced(ctx context.Context, params api.GetMatchAdvancedParams) (json.RawMessage, error) { repo := s.Repository application, err := config.ToApplicationNameOrAddressFromString(params.Application) if err != nil { @@ -731,7 +737,7 @@ func (s *RepositoryReadService) GetMatchAdvanced(ctx context.Context, params jso return json.RawMessage(result), err } -func (s *RepositoryReadService) ListMatchAdvances(ctx context.Context, params jsonrpc.ListMatchAdvancesParams) (json.RawMessage, error) { +func (s *RepositoryReadService) ListMatchAdvances(ctx context.Context, params api.ListMatchAdvancesParams) (json.RawMessage, error) { repo := s.Repository application, err := config.ToApplicationNameOrAddressFromString(params.Application) if err != nil { diff --git a/cmd/cartesi-rollups-cli/root/read/service/types.go b/cmd/cartesi-rollups-cli/root/read/service/types.go index f6081274b..11d32b5ed 100644 --- a/cmd/cartesi-rollups-cli/root/read/service/types.go +++ b/cmd/cartesi-rollups-cli/root/read/service/types.go @@ -9,7 +9,7 @@ import ( "errors" "github.com/cartesi/rollups-node/internal/config" - "github.com/cartesi/rollups-node/internal/jsonrpc" + "github.com/cartesi/rollups-node/internal/jsonrpc/api" ) var ( @@ -18,23 +18,23 @@ var ( ) type ReadService interface { - GetApplication(ctx context.Context, params jsonrpc.GetApplicationParams) (json.RawMessage, error) - GetEpoch(ctx context.Context, params jsonrpc.GetEpochParams) (json.RawMessage, error) - ListEpochs(ctx context.Context, params jsonrpc.ListEpochsParams) (json.RawMessage, error) - GetInput(ctx context.Context, params jsonrpc.GetInputParams) (json.RawMessage, error) - ListInputs(ctx context.Context, params jsonrpc.ListInputsParams) (json.RawMessage, error) - GetOutput(ctx context.Context, params jsonrpc.GetOutputParams) (json.RawMessage, error) - ListOutputs(ctx context.Context, params jsonrpc.ListOutputsParams) (json.RawMessage, error) - GetReport(ctx context.Context, params jsonrpc.GetReportParams) (json.RawMessage, error) - ListReports(ctx context.Context, params jsonrpc.ListReportsParams) (json.RawMessage, error) - GetTournament(ctx context.Context, params jsonrpc.GetTournamentParams) (json.RawMessage, error) - ListTournaments(ctx context.Context, params jsonrpc.ListTournamentsParams) (json.RawMessage, error) - GetCommitment(ctx context.Context, params jsonrpc.GetCommitmentParams) (json.RawMessage, error) - ListCommitments(ctx context.Context, params jsonrpc.ListCommitmentsParams) (json.RawMessage, error) - GetMatch(ctx context.Context, params jsonrpc.GetMatchParams) (json.RawMessage, error) - ListMatches(ctx context.Context, params jsonrpc.ListMatchesParams) (json.RawMessage, error) - GetMatchAdvanced(ctx context.Context, params jsonrpc.GetMatchAdvancedParams) (json.RawMessage, error) - ListMatchAdvances(ctx context.Context, params jsonrpc.ListMatchAdvancesParams) (json.RawMessage, error) + GetApplication(ctx context.Context, params api.GetApplicationParams) (json.RawMessage, error) + GetEpoch(ctx context.Context, params api.GetEpochParams) (json.RawMessage, error) + ListEpochs(ctx context.Context, params api.ListEpochsParams) (json.RawMessage, error) + GetInput(ctx context.Context, params api.GetInputParams) (json.RawMessage, error) + ListInputs(ctx context.Context, params api.ListInputsParams) (json.RawMessage, error) + GetOutput(ctx context.Context, params api.GetOutputParams) (json.RawMessage, error) + ListOutputs(ctx context.Context, params api.ListOutputsParams) (json.RawMessage, error) + GetReport(ctx context.Context, params api.GetReportParams) (json.RawMessage, error) + ListReports(ctx context.Context, params api.ListReportsParams) (json.RawMessage, error) + GetTournament(ctx context.Context, params api.GetTournamentParams) (json.RawMessage, error) + ListTournaments(ctx context.Context, params api.ListTournamentsParams) (json.RawMessage, error) + GetCommitment(ctx context.Context, params api.GetCommitmentParams) (json.RawMessage, error) + ListCommitments(ctx context.Context, params api.ListCommitmentsParams) (json.RawMessage, error) + GetMatch(ctx context.Context, params api.GetMatchParams) (json.RawMessage, error) + ListMatches(ctx context.Context, params api.ListMatchesParams) (json.RawMessage, error) + GetMatchAdvanced(ctx context.Context, params api.GetMatchAdvancedParams) (json.RawMessage, error) + ListMatchAdvances(ctx context.Context, params api.ListMatchAdvancesParams) (json.RawMessage, error) Close() } diff --git a/cmd/cartesi-rollups-cli/root/read/tournaments/tournaments.go b/cmd/cartesi-rollups-cli/root/read/tournaments/tournaments.go index ac268955b..e140298dc 100644 --- a/cmd/cartesi-rollups-cli/root/read/tournaments/tournaments.go +++ b/cmd/cartesi-rollups-cli/root/read/tournaments/tournaments.go @@ -12,6 +12,7 @@ import ( "github.com/cartesi/rollups-node/cmd/cartesi-rollups-cli/root/read/service" "github.com/cartesi/rollups-node/internal/config" "github.com/cartesi/rollups-node/internal/jsonrpc" + "github.com/cartesi/rollups-node/internal/jsonrpc/api" "github.com/spf13/cobra" ) @@ -101,13 +102,13 @@ func run(cmd *cobra.Command, args []string) { var result json.RawMessage if len(args) >= 2 { - var params jsonrpc.GetTournamentParams + var params api.GetTournamentParams params.Application = args[0] params.Address = args[1] result, err = readServ.GetTournament(ctx, params) } else { - var params jsonrpc.ListTournamentsParams + var params api.ListTournamentsParams params.Application = args[0] // Add epoch index filter if provided diff --git a/cmd/cartesi-rollups-cli/root/send/send.go b/cmd/cartesi-rollups-cli/root/send/send.go index 707382477..696bbcf30 100644 --- a/cmd/cartesi-rollups-cli/root/send/send.go +++ b/cmd/cartesi-rollups-cli/root/send/send.go @@ -4,11 +4,13 @@ package send import ( + "encoding/json" "fmt" "io" "os" "strings" + "github.com/cartesi/rollups-node/internal/cli" "github.com/cartesi/rollups-node/internal/config" "github.com/cartesi/rollups-node/internal/config/auth" "github.com/cartesi/rollups-node/internal/repository/factory" @@ -47,11 +49,13 @@ cartesi-rollups-cli send echo-dapp "hi" --yes` var ( isHex bool skipConfirmation bool + asJSONParam bool ) func init() { Cmd.Flags().BoolVarP(&isHex, "hex", "x", false, "Force interpretation of payload as hex.") Cmd.Flags().BoolVarP(&skipConfirmation, "yes", "y", false, "Skip confirmation prompt") + Cmd.Flags().BoolVar(&asJSONParam, "json", false, "Print result as JSON") origHelpFunc := Cmd.HelpFunc() Cmd.SetHelpFunc(func(command *cobra.Command, strings []string) { @@ -172,5 +176,17 @@ func run(cmd *cobra.Command, args []string) { inputIndex, blockNumber, err := ethutil.AddInput(ctx, client, txOpts, iboxAddr, app.IApplicationAddress, payload) cobra.CheckErr(err) - fmt.Printf("Input sent to app at %s. Index: %d BlockNumber: %d\n", app.IApplicationAddress, inputIndex, blockNumber) + if asJSONParam { + result := cli.SendResult{ + ApplicationAddress: app.IApplicationAddress.Hex(), + InputIndex: fmt.Sprintf("0x%x", inputIndex), + BlockNumber: fmt.Sprintf("0x%x", blockNumber), + } + jsonBytes, err := json.MarshalIndent(&result, "", " ") + cobra.CheckErr(err) + fmt.Println(string(jsonBytes)) + } else { + fmt.Printf("Input sent to app at %s. Index: %d BlockNumber: %d\n", + app.IApplicationAddress, inputIndex, blockNumber) + } } diff --git a/cmd/cartesi-rollups-cli/root/validate/validate.go b/cmd/cartesi-rollups-cli/root/validate/validate.go index d214ff84c..3a0152734 100644 --- a/cmd/cartesi-rollups-cli/root/validate/validate.go +++ b/cmd/cartesi-rollups-cli/root/validate/validate.go @@ -4,9 +4,11 @@ package validate import ( + "encoding/json" "fmt" "os" + "github.com/cartesi/rollups-node/internal/cli" "github.com/cartesi/rollups-node/internal/config" "github.com/cartesi/rollups-node/internal/repository/factory" "github.com/cartesi/rollups-node/pkg/ethutil" @@ -33,7 +35,11 @@ cartesi-rollups-cli validate echo-dapp 5 # Validates output with index 3 using application address: cartesi-rollups-cli validate 0x1234567890123456789012345678901234567890 3` +var asJSONParam bool + func init() { + Cmd.Flags().BoolVar(&asJSONParam, "json", false, "Print result as JSON") + origHelpFunc := Cmd.HelpFunc() Cmd.SetHelpFunc(func(command *cobra.Command, strings []string) { command.Flags().Lookup("verbose").Hidden = false @@ -81,7 +87,10 @@ func run(cmd *cobra.Command, args []string) { client, err := ethclient.DialContext(ctx, ethEndpoint.String()) cobra.CheckErr(err) - fmt.Printf("Validating output app: %v (%v) output_index: %v\n", app.Name, app.IApplicationAddress, outputIndex) + if !asJSONParam { + fmt.Printf("Validating output app: %v (%v) output_index: %v\n", + app.Name, app.IApplicationAddress, outputIndex) + } err = ethutil.ValidateOutput( ctx, client, @@ -92,5 +101,12 @@ func run(cmd *cobra.Command, args []string) { ) cobra.CheckErr(err) - fmt.Println("Output validated!") + if asJSONParam { + result := cli.ValidateResult{Valid: true} + jsonBytes, err := json.MarshalIndent(&result, "", " ") + cobra.CheckErr(err) + fmt.Println(string(jsonBytes)) + } else { + fmt.Println("Output validated!") + } } diff --git a/internal/advancer/advancer_test.go b/internal/advancer/advancer_test.go index eb390e705..5ef6f60df 100644 --- a/internal/advancer/advancer_test.go +++ b/internal/advancer/advancer_test.go @@ -1144,8 +1144,8 @@ func (mock *MockMachineManager) Close() error { // MockMachineInstance is a test implementation of manager.MachineInstance type MockMachineInstance struct { - application *Application - machineImpl *MockMachineImpl + application *Application + machineImpl *MockMachineImpl createSnapshotError error } diff --git a/internal/cli/types.go b/internal/cli/types.go new file mode 100644 index 000000000..f832af2ad --- /dev/null +++ b/internal/cli/types.go @@ -0,0 +1,25 @@ +// (c) Cartesi and individual authors (see AUTHORS) +// SPDX-License-Identifier: Apache-2.0 (see LICENSE) + +// Package cli defines the JSON output types used by the CLI commands. +// Types here are CLI-specific (SendResult, ExecuteResult, ValidateResult). +// Response envelope types (ListResponse, SingleResponse, Pagination) live +// in internal/jsonrpc/api. +package cli + +// SendResult is the JSON output of the "send" CLI command. +type SendResult struct { + ApplicationAddress string `json:"application_address"` + InputIndex string `json:"input_index"` + BlockNumber string `json:"block_number"` +} + +// ExecuteResult is the JSON output of the "execute" CLI command. +type ExecuteResult struct { + TransactionHash string `json:"transaction_hash"` +} + +// ValidateResult is the JSON output of the "validate" CLI command. +type ValidateResult struct { + Valid bool `json:"valid"` +} diff --git a/internal/jsonrpc/api/decode.go b/internal/jsonrpc/api/decode.go new file mode 100644 index 000000000..6efd1b4de --- /dev/null +++ b/internal/jsonrpc/api/decode.go @@ -0,0 +1,252 @@ +// (c) Cartesi and individual authors (see AUTHORS) +// SPDX-License-Identifier: Apache-2.0 (see LICENSE) + +package api + +import ( + "bytes" + "encoding/hex" + "encoding/json" + "fmt" + "math/big" + "strings" + + "github.com/cartesi/rollups-node/internal/model" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/common" +) + +// ParseOutputType parses a hex-encoded 4-byte output type selector. +func ParseOutputType(s string) ([]byte, error) { + if strings.HasPrefix(s, "0x") || strings.HasPrefix(s, "0X") { + s = s[2:] + } + if len(s) != 8 { //nolint: mnd + return []byte{}, fmt.Errorf("invalid output type: expected exactly 4 bytes") + } + b, err := hex.DecodeString(s) + if err != nil { + return []byte{}, err + } + return b, nil +} + +// EvmAdvance represents decoded EvmAdvance input data. +type EvmAdvance struct { + ChainId string `json:"chain_id"` + AppContract string `json:"application_contract"` + MsgSender string `json:"sender"` + BlockNumber string `json:"block_number"` + BlockTimestamp string `json:"block_timestamp"` + PrevRandao string `json:"prev_randao"` + Index string `json:"index"` + Payload string `json:"payload"` +} + +// DecodedInput extends model.Input with ABI-decoded data. +type DecodedInput struct { + *model.Input + DecodedData *EvmAdvance `json:"decoded_data"` +} + +// DecodeInput ABI-decodes a raw input into a DecodedInput. +func DecodeInput(input *model.Input, parsedAbi *abi.ABI) (*DecodedInput, error) { + decoded := make(map[string]any) + if len(input.RawData) < 4 { + return &DecodedInput{Input: input}, fmt.Errorf("error: input needs at least 4 bytes") + } + + method, ok := parsedAbi.Methods["EvmAdvance"] + if !ok { + return &DecodedInput{Input: input}, fmt.Errorf("EvmAdvance method not found in ABI") + } + + err := method.Inputs.UnpackIntoMap(decoded, input.RawData[4:]) + if err != nil { + return &DecodedInput{Input: input}, err + } + + chainId, ok1 := decoded["chainId"].(*big.Int) + appContract, ok2 := decoded["appContract"].(common.Address) + msgSender, ok3 := decoded["msgSender"].(common.Address) + blockNumber, ok4 := decoded["blockNumber"].(*big.Int) + blockTimestamp, ok5 := decoded["blockTimestamp"].(*big.Int) + prevRandao, ok6 := decoded["prevRandao"].(*big.Int) + index, ok7 := decoded["index"].(*big.Int) + payload, ok8 := decoded["payload"].([]byte) + if !ok1 || !ok2 || !ok3 || !ok4 || !ok5 || !ok6 || !ok7 || !ok8 { + return &DecodedInput{Input: input}, fmt.Errorf("unable to decode EvmAdvance parameters") + } + + evmAdvance := EvmAdvance{ + ChainId: fmt.Sprintf("0x%x", chainId), + AppContract: appContract.Hex(), + MsgSender: msgSender.Hex(), + BlockNumber: fmt.Sprintf("0x%x", blockNumber), + BlockTimestamp: fmt.Sprintf("0x%x", blockTimestamp), + PrevRandao: fmt.Sprintf("0x%x", prevRandao), + Index: fmt.Sprintf("0x%x", index), + Payload: "0x" + hex.EncodeToString(payload), + } + + return &DecodedInput{ + Input: input, + DecodedData: &evmAdvance, + }, nil +} + +// MarshalJSON produces a flat JSON object merging Input fields with decoded_data. +func (d *DecodedInput) MarshalJSON() ([]byte, error) { + inputJSON, err := d.Input.MarshalJSON() + if err != nil { + return nil, err + } + if d.DecodedData == nil { + return inputJSON, nil + } + return mergeJSONField(inputJSON, "decoded_data", d.DecodedData) +} + +// DecodedData is the single canonical type for decoded output data, used by +// both the server (JSON-RPC API) and client (CLI) sides. It captures the +// superset of fields across all output types (Notice, Voucher, +// DelegateCallVoucher). Use the Type field to discriminate: +// +// - "Notice": Payload is set. +// - "Voucher": Destination, Value, and Payload are set. +// - "DelegateCallVoucher": Destination and Payload are set. +// - Any other value: RawData contains the hex-encoded raw output. +// +// Use the constructor functions (NewNoticeData, NewVoucherData, +// NewDelegateCallVoucherData, NewUnknownData) to create instances with the +// correct fields populated. +type DecodedData struct { + Type string `json:"type"` + Payload string `json:"payload,omitempty"` + Destination string `json:"destination,omitempty"` + Value string `json:"value,omitempty"` + RawData string `json:"raw_data,omitempty"` +} + +// NewNoticeData creates a DecodedData for a Notice output. +func NewNoticeData(payload string) *DecodedData { + return &DecodedData{Type: "Notice", Payload: payload} +} + +// NewVoucherData creates a DecodedData for a Voucher output. +func NewVoucherData(destination, value, payload string) *DecodedData { + return &DecodedData{ + Type: "Voucher", + Destination: destination, + Value: value, + Payload: payload, + } +} + +// NewDelegateCallVoucherData creates a DecodedData for a DelegateCallVoucher output. +func NewDelegateCallVoucherData(destination, payload string) *DecodedData { + return &DecodedData{ + Type: "DelegateCallVoucher", + Destination: destination, + Payload: payload, + } +} + +// NewUnknownData creates a DecodedData for an unrecognized output type. +func NewUnknownData(typeName, rawData string) *DecodedData { + return &DecodedData{Type: typeName, RawData: rawData} +} + +// DecodedOutput extends model.Output with ABI-decoded data. +type DecodedOutput struct { + *model.Output + DecodedData *DecodedData `json:"decoded_data"` +} + +// DecodeOutput ABI-decodes a raw output into a DecodedOutput. +func DecodeOutput(output *model.Output, parsedAbi *abi.ABI) (*DecodedOutput, error) { + decodedOutput := &DecodedOutput{Output: output} + if len(output.RawData) < 4 { + return decodedOutput, fmt.Errorf("raw data too short") + } + method, err := parsedAbi.MethodById(output.RawData[:4]) + if err != nil { + return decodedOutput, err + } + decoded := make(map[string]any) + if err := method.Inputs.UnpackIntoMap(decoded, output.RawData[4:]); err != nil { + return decodedOutput, fmt.Errorf("failed to unpack %s: %w", method.Name, err) + } + var result *DecodedData + switch method.Name { + case "Notice": + payload, ok := decoded["payload"].([]byte) + if !ok { + return decodedOutput, fmt.Errorf("unable to decode Notice payload") + } + result = NewNoticeData("0x" + hex.EncodeToString(payload)) + case "Voucher": + dest, ok1 := decoded["destination"].(common.Address) + value, ok2 := decoded["value"].(*big.Int) + payload, ok3 := decoded["payload"].([]byte) + if !ok1 || !ok2 || !ok3 { + return decodedOutput, fmt.Errorf("unable to decode Voucher parameters") + } + result = NewVoucherData( + dest.Hex(), + fmt.Sprintf("0x%x", value), + "0x"+hex.EncodeToString(payload), + ) + case "DelegateCallVoucher": + dest, ok1 := decoded["destination"].(common.Address) + payload, ok2 := decoded["payload"].([]byte) + if !ok1 || !ok2 { + return decodedOutput, fmt.Errorf("unable to decode DelegateCallVoucher parameters") + } + result = NewDelegateCallVoucherData( + dest.Hex(), + "0x"+hex.EncodeToString(payload), + ) + default: + result = NewUnknownData(method.Name, "0x"+hex.EncodeToString(output.RawData)) + } + decodedOutput.DecodedData = result + return decodedOutput, nil +} + +// MarshalJSON produces a flat JSON object merging Output fields with decoded_data. +func (d *DecodedOutput) MarshalJSON() ([]byte, error) { + outputJSON, err := d.Output.MarshalJSON() + if err != nil { + return nil, err + } + if d.DecodedData == nil { + return outputJSON, nil + } + return mergeJSONField(outputJSON, "decoded_data", d.DecodedData) +} + +// mergeJSONField appends a key-value pair to an existing JSON object. +// The base must be a valid JSON object (ending with '}'). The value is +// marshaled and appended as a new field. +func mergeJSONField(base []byte, key string, value any) ([]byte, error) { + if len(base) == 0 || base[len(base)-1] != '}' { + return nil, fmt.Errorf("mergeJSONField: base is not a valid JSON object") + } + fieldJSON, err := json.Marshal(value) + if err != nil { + return nil, err + } + trimmed := bytes.TrimSuffix(base, []byte("}")) + var buf bytes.Buffer + buf.Write(trimmed) + if len(trimmed) > 0 && trimmed[len(trimmed)-1] != '{' { + buf.WriteByte(',') + } + buf.WriteByte('"') + buf.WriteString(key) + buf.WriteString(`":`) + buf.Write(fieldJSON) + buf.WriteByte('}') + return buf.Bytes(), nil +} diff --git a/internal/jsonrpc/api/params.go b/internal/jsonrpc/api/params.go new file mode 100644 index 000000000..d3b743cae --- /dev/null +++ b/internal/jsonrpc/api/params.go @@ -0,0 +1,165 @@ +// (c) Cartesi and individual authors (see AUTHORS) +// SPDX-License-Identifier: Apache-2.0 (see LICENSE) + +package api + +// ListApplicationsParams aligns with the OpenRPC specification +type ListApplicationsParams struct { + Limit uint64 `json:"limit"` + Offset uint64 `json:"offset"` + Descending bool `json:"descending,omitempty"` +} + +// GetApplicationParams aligns with the OpenRPC specification +type GetApplicationParams struct { + Application string `json:"application"` +} + +// ListEpochsParams aligns with the OpenRPC specification +type ListEpochsParams struct { + Application string `json:"application"` + Status *string `json:"status,omitempty"` + Limit uint64 `json:"limit"` + Offset uint64 `json:"offset"` + Descending bool `json:"descending,omitempty"` +} + +// GetEpochParams aligns with the OpenRPC specification +type GetEpochParams struct { + Application string `json:"application"` + EpochIndex string `json:"epoch_index"` +} + +// GetLastAcceptedEpochIndexParams with the OpenRPC specification +type GetLastAcceptedEpochIndexParams struct { + Application string `json:"application"` +} + +// ListInputsParams aligns with the OpenRPC specification +type ListInputsParams struct { + Application string `json:"application"` + EpochIndex *string `json:"epoch_index,omitempty"` + Sender *string `json:"sender,omitempty"` + Limit uint64 `json:"limit"` + Offset uint64 `json:"offset"` + Descending bool `json:"descending,omitempty"` +} + +// GetInputParams aligns with the OpenRPC specification +type GetInputParams struct { + Application string `json:"application"` + InputIndex string `json:"input_index"` +} + +// GetProcessedInputCountParams aligns with the OpenRPC specification +type GetProcessedInputCountParams struct { + Application string `json:"application"` +} + +// ListOutputsParams aligns with the OpenRPC specification +type ListOutputsParams struct { + Application string `json:"application"` + EpochIndex *string `json:"epoch_index,omitempty"` + InputIndex *string `json:"input_index,omitempty"` + OutputType *string `json:"output_type,omitempty"` + VoucherAddress *string `json:"voucher_address,omitempty"` + Limit uint64 `json:"limit"` + Offset uint64 `json:"offset"` + Descending bool `json:"descending,omitempty"` +} + +// GetOutputParams aligns with the OpenRPC specification +type GetOutputParams struct { + Application string `json:"application"` + OutputIndex string `json:"output_index"` +} + +// ListReportsParams aligns with the OpenRPC specification +type ListReportsParams struct { + Application string `json:"application"` + EpochIndex *string `json:"epoch_index,omitempty"` + InputIndex *string `json:"input_index,omitempty"` + Limit uint64 `json:"limit"` + Offset uint64 `json:"offset"` + Descending bool `json:"descending,omitempty"` +} + +// GetReportParams aligns with the OpenRPC specification +type GetReportParams struct { + Application string `json:"application"` + ReportIndex string `json:"report_index"` +} + +// ListTournamentsParams aligns with the OpenRPC specification +type ListTournamentsParams struct { + Application string `json:"application"` + EpochIndex *string `json:"epoch_index,omitempty"` + Level *string `json:"level,omitempty"` + ParentTournamentAddress *string `json:"parent_tournament_address,omitempty"` + ParentMatchIDHash *string `json:"parent_match_id_hash,omitempty"` + Limit uint64 `json:"limit"` + Offset uint64 `json:"offset"` + Descending bool `json:"descending,omitempty"` +} + +// GetTournamentParams aligns with the OpenRPC specification +type GetTournamentParams struct { + Application string `json:"application"` + Address string `json:"address"` +} + +// ListCommitmentsParams aligns with the OpenRPC specification +type ListCommitmentsParams struct { + Application string `json:"application"` + EpochIndex *string `json:"epoch_index,omitempty"` + TournamentAddress *string `json:"tournament_address,omitempty"` + Limit uint64 `json:"limit"` + Offset uint64 `json:"offset"` + Descending bool `json:"descending,omitempty"` +} + +// GetCommitmentParams aligns with the OpenRPC specification +type GetCommitmentParams struct { + Application string `json:"application"` + EpochIndex string `json:"epoch_index"` + TournamentAddress string `json:"tournament_address"` + Commitment string `json:"commitment"` +} + +// ListMatchesParams aligns with the OpenRPC specification +type ListMatchesParams struct { + Application string `json:"application"` + EpochIndex *string `json:"epoch_index,omitempty"` + TournamentAddress *string `json:"tournament_address,omitempty"` + Limit uint64 `json:"limit"` + Offset uint64 `json:"offset"` + Descending bool `json:"descending,omitempty"` +} + +// GetMatchParams aligns with the OpenRPC specification +type GetMatchParams struct { + Application string `json:"application"` + EpochIndex string `json:"epoch_index"` + TournamentAddress string `json:"tournament_address"` + IDHash string `json:"id_hash"` +} + +// ListMatchAdvancesParams aligns with the OpenRPC specification +type ListMatchAdvancesParams struct { + Application string `json:"application"` + EpochIndex string `json:"epoch_index"` + TournamentAddress string `json:"tournament_address"` + IDHash string `json:"id_hash"` + Limit uint64 `json:"limit"` + Offset uint64 `json:"offset"` + Descending bool `json:"descending,omitempty"` +} + +// GetMatchAdvancedParams aligns with the OpenRPC specification +type GetMatchAdvancedParams struct { + Application string `json:"application"` + EpochIndex string `json:"epoch_index"` + TournamentAddress string `json:"tournament_address"` + IDHash string `json:"id_hash"` + Parent string `json:"parent"` +} diff --git a/internal/jsonrpc/api/response.go b/internal/jsonrpc/api/response.go new file mode 100644 index 000000000..8f974a8ad --- /dev/null +++ b/internal/jsonrpc/api/response.go @@ -0,0 +1,22 @@ +// (c) Cartesi and individual authors (see AUTHORS) +// SPDX-License-Identifier: Apache-2.0 (see LICENSE) + +package api + +// Pagination represents common pagination structure used in responses. +type Pagination struct { + TotalCount uint64 `json:"total_count"` + Limit uint64 `json:"limit"` + Offset uint64 `json:"offset"` +} + +// ListResponse is the generic envelope for list endpoints. +type ListResponse[T any] struct { + Data []T `json:"data"` + Pagination Pagination `json:"pagination"` +} + +// SingleResponse is the generic envelope for get endpoints. +type SingleResponse[T any] struct { + Data T `json:"data"` +} diff --git a/internal/jsonrpc/jsonrpc.go b/internal/jsonrpc/jsonrpc.go index 26752d53e..c766e90a8 100644 --- a/internal/jsonrpc/jsonrpc.go +++ b/internal/jsonrpc/jsonrpc.go @@ -5,16 +5,15 @@ package jsonrpc import ( "embed" - "encoding/hex" "encoding/json" "errors" "fmt" "io" "net/http" - "strings" "github.com/cartesi/rollups-node/internal/config" "github.com/cartesi/rollups-node/internal/evmreader" + "github.com/cartesi/rollups-node/internal/jsonrpc/api" "github.com/cartesi/rollups-node/internal/model" "github.com/cartesi/rollups-node/internal/repository" "github.com/cartesi/rollups-node/internal/version" @@ -119,7 +118,7 @@ func handleDiscover(s *Service, w http.ResponseWriter, _ *http.Request, req RPCR } func handleListApplications(s *Service, w http.ResponseWriter, r *http.Request, req RPCRequest) { - var params ListApplicationsParams + var params api.ListApplicationsParams if err := UnmarshalParams(req.Params, ¶ms); err != nil { s.Logger.Debug("Invalid parameters", "err", err) writeRPCError(w, req.ID, JSONRPC_INVALID_PARAMS, "Invalid parameters", nil) @@ -147,32 +146,18 @@ func handleListApplications(s *Service, w http.ResponseWriter, r *http.Request, apps = []*model.Application{} } - // Create result with proper pagination format per spec - result := struct { - Data []*model.Application `json:"data"` - Pagination struct { - TotalCount uint64 `json:"total_count"` - Limit uint64 `json:"limit"` - Offset uint64 `json:"offset"` - } `json:"pagination"` - }{ + writeRPCResult(w, req.ID, api.ListResponse[*model.Application]{ Data: apps, - Pagination: struct { - TotalCount uint64 `json:"total_count"` - Limit uint64 `json:"limit"` - Offset uint64 `json:"offset"` - }{ + Pagination: api.Pagination{ TotalCount: total, Limit: params.Limit, Offset: params.Offset, }, - } - - writeRPCResult(w, req.ID, result) + }) } func handleGetApplication(s *Service, w http.ResponseWriter, r *http.Request, req RPCRequest) { - var params GetApplicationParams + var params api.GetApplicationParams if err := UnmarshalParams(req.Params, ¶ms); err != nil { s.Logger.Debug("Invalid parameters", "err", err) writeRPCError(w, req.ID, JSONRPC_INVALID_PARAMS, "Invalid parameters", nil) @@ -196,18 +181,11 @@ func handleGetApplication(s *Service, w http.ResponseWriter, r *http.Request, re return } - // Return in the format specified in the OpenRPC spec - result := struct { - Data *model.Application `json:"data"` - }{ - Data: app, - } - - writeRPCResult(w, req.ID, result) + writeRPCResult(w, req.ID, api.SingleResponse[*model.Application]{Data: app}) } func handleListEpochs(s *Service, w http.ResponseWriter, r *http.Request, req RPCRequest) { - var params ListEpochsParams + var params api.ListEpochsParams if err := UnmarshalParams(req.Params, ¶ms); err != nil { s.Logger.Debug("Invalid parameters", "err", err) writeRPCError(w, req.ID, JSONRPC_INVALID_PARAMS, "Invalid parameters", nil) @@ -256,32 +234,18 @@ func handleListEpochs(s *Service, w http.ResponseWriter, r *http.Request, req RP epochs = []*model.Epoch{} } - // Format response according to spec - result := struct { - Data []*model.Epoch `json:"data"` - Pagination struct { - TotalCount uint64 `json:"total_count"` - Limit uint64 `json:"limit"` - Offset uint64 `json:"offset"` - } `json:"pagination"` - }{ + writeRPCResult(w, req.ID, api.ListResponse[*model.Epoch]{ Data: epochs, - Pagination: struct { - TotalCount uint64 `json:"total_count"` - Limit uint64 `json:"limit"` - Offset uint64 `json:"offset"` - }{ + Pagination: api.Pagination{ TotalCount: total, Limit: params.Limit, Offset: params.Offset, }, - } - - writeRPCResult(w, req.ID, result) + }) } func handleGetEpoch(s *Service, w http.ResponseWriter, r *http.Request, req RPCRequest) { - var params GetEpochParams + var params api.GetEpochParams if err := UnmarshalParams(req.Params, ¶ms); err != nil { s.Logger.Debug("Invalid parameters", "err", err) writeRPCError(w, req.ID, JSONRPC_INVALID_PARAMS, "Invalid parameters", nil) @@ -311,18 +275,11 @@ func handleGetEpoch(s *Service, w http.ResponseWriter, r *http.Request, req RPCR return } - // Format response according to spec - result := struct { - Data *model.Epoch `json:"data"` - }{ - Data: epoch, - } - - writeRPCResult(w, req.ID, result) + writeRPCResult(w, req.ID, api.SingleResponse[*model.Epoch]{Data: epoch}) } func handleGetLastAcceptedEpochIndex(s *Service, w http.ResponseWriter, r *http.Request, req RPCRequest) { - var params GetLastAcceptedEpochIndexParams + var params api.GetLastAcceptedEpochIndexParams if err := UnmarshalParams(req.Params, ¶ms); err != nil { s.Logger.Debug("Invalid parameters", "err", err) writeRPCError(w, req.ID, JSONRPC_INVALID_PARAMS, "Invalid parameters", nil) @@ -346,18 +303,11 @@ func handleGetLastAcceptedEpochIndex(s *Service, w http.ResponseWriter, r *http. return } - // Format response according to spec - result := struct { - Data string `json:"data"` - }{ - Data: fmt.Sprintf("0x%x", index), - } - - writeRPCResult(w, req.ID, result) + writeRPCResult(w, req.ID, api.SingleResponse[string]{Data: fmt.Sprintf("0x%x", index)}) } func handleListInputs(s *Service, w http.ResponseWriter, r *http.Request, req RPCRequest) { - var params ListInputsParams + var params api.ListInputsParams if err := UnmarshalParams(req.Params, ¶ms); err != nil { s.Logger.Debug("Invalid parameters", "err", err) writeRPCError(w, req.ID, JSONRPC_INVALID_PARAMS, "Invalid parameters", nil) @@ -413,41 +363,27 @@ func handleListInputs(s *Service, w http.ResponseWriter, r *http.Request, req RP return } - resultInputs := make([]*DecodedInput, 0, len(inputs)) + resultInputs := make([]*api.DecodedInput, 0, len(inputs)) for _, in := range inputs { - decoded, err := DecodeInput(in, s.inputABI) + decoded, err := api.DecodeInput(in, s.inputABI) if err != nil { s.Logger.Error("Unable to decode Input", "app", params.Application, "index", in.Index, "err", err) } resultInputs = append(resultInputs, decoded) } - // Format response according to spec - result := struct { - Data []*DecodedInput `json:"data"` - Pagination struct { - TotalCount uint64 `json:"total_count"` - Limit uint64 `json:"limit"` - Offset uint64 `json:"offset"` - } `json:"pagination"` - }{ + writeRPCResult(w, req.ID, api.ListResponse[*api.DecodedInput]{ Data: resultInputs, - Pagination: struct { - TotalCount uint64 `json:"total_count"` - Limit uint64 `json:"limit"` - Offset uint64 `json:"offset"` - }{ + Pagination: api.Pagination{ TotalCount: total, Limit: params.Limit, Offset: params.Offset, }, - } - - writeRPCResult(w, req.ID, result) + }) } func handleGetInput(s *Service, w http.ResponseWriter, r *http.Request, req RPCRequest) { - var params GetInputParams + var params api.GetInputParams if err := UnmarshalParams(req.Params, ¶ms); err != nil { s.Logger.Debug("Invalid parameters", "err", err) writeRPCError(w, req.ID, JSONRPC_INVALID_PARAMS, "Invalid parameters", nil) @@ -477,23 +413,16 @@ func handleGetInput(s *Service, w http.ResponseWriter, r *http.Request, req RPCR return } - decoded, err := DecodeInput(input, s.inputABI) + decoded, err := api.DecodeInput(input, s.inputABI) if err != nil { s.Logger.Error("Unable to decode Input", "app", params.Application, "index", input.Index, "err", err) } - // Format response according to spec - response := struct { - Data *DecodedInput `json:"data"` - }{ - Data: decoded, - } - - writeRPCResult(w, req.ID, response) + writeRPCResult(w, req.ID, api.SingleResponse[*api.DecodedInput]{Data: decoded}) } func handleGetProcessedInputCount(s *Service, w http.ResponseWriter, r *http.Request, req RPCRequest) { - var params GetApplicationParams + var params api.GetApplicationParams if err := UnmarshalParams(req.Params, ¶ms); err != nil { s.Logger.Debug("Invalid parameters", "err", err) writeRPCError(w, req.ID, JSONRPC_INVALID_PARAMS, "Invalid parameters", nil) @@ -517,33 +446,11 @@ func handleGetProcessedInputCount(s *Service, w http.ResponseWriter, r *http.Req return } - // Return processed input count as specified in the spec - result := struct { - ProcessedInputs string `json:"data"` - }{ - ProcessedInputs: fmt.Sprintf("0x%x", processedInputs), - } - - writeRPCResult(w, req.ID, result) -} - -func ParseOutputType(s string) ([]byte, error) { - if strings.HasPrefix(s, "0x") || strings.HasPrefix(s, "0X") { - s = s[2:] - } - if len(s) != 8 { //nolint: mnd - return []byte{}, fmt.Errorf("invalid output type: expected exactly 4 bytes") - } - // Decode the hex string into bytes. - b, err := hex.DecodeString(s) - if err != nil { - return []byte{}, err - } - return b, nil + writeRPCResult(w, req.ID, api.SingleResponse[string]{Data: fmt.Sprintf("0x%x", processedInputs)}) } func handleListOutputs(s *Service, w http.ResponseWriter, r *http.Request, req RPCRequest) { - var params ListOutputsParams + var params api.ListOutputsParams if err := UnmarshalParams(req.Params, ¶ms); err != nil { s.Logger.Debug("Invalid parameters", "err", err) writeRPCError(w, req.ID, JSONRPC_INVALID_PARAMS, "Invalid parameters", nil) @@ -587,7 +494,7 @@ func handleListOutputs(s *Service, w http.ResponseWriter, r *http.Request, req R // Add output type filter if provided if params.OutputType != nil { - outputType, err := ParseOutputType(*params.OutputType) + outputType, err := api.ParseOutputType(*params.OutputType) if err != nil { writeRPCError(w, req.ID, JSONRPC_INVALID_PARAMS, fmt.Sprintf("Invalid output type: %v", err), nil) return @@ -615,9 +522,9 @@ func handleListOutputs(s *Service, w http.ResponseWriter, r *http.Request, req R return } - resultOutputs := make([]*DecodedOutput, 0, len(outputs)) + resultOutputs := make([]*api.DecodedOutput, 0, len(outputs)) for _, out := range outputs { - decoded, err := DecodeOutput(out, s.outputABI) + decoded, err := api.DecodeOutput(out, s.outputABI) if err != nil { s.Logger.Error("Unable to decode Output", "app", params.Application, "index", out.Index, "err", err) } @@ -628,32 +535,18 @@ func handleListOutputs(s *Service, w http.ResponseWriter, r *http.Request, req R return } - // Format response according to spec - result := struct { - Data []*DecodedOutput `json:"data"` - Pagination struct { - TotalCount uint64 `json:"total_count"` - Limit uint64 `json:"limit"` - Offset uint64 `json:"offset"` - } `json:"pagination"` - }{ + writeRPCResult(w, req.ID, api.ListResponse[*api.DecodedOutput]{ Data: resultOutputs, - Pagination: struct { - TotalCount uint64 `json:"total_count"` - Limit uint64 `json:"limit"` - Offset uint64 `json:"offset"` - }{ + Pagination: api.Pagination{ TotalCount: total, Limit: params.Limit, Offset: params.Offset, }, - } - - writeRPCResult(w, req.ID, result) + }) } func handleGetOutput(s *Service, w http.ResponseWriter, r *http.Request, req RPCRequest) { - var params GetOutputParams + var params api.GetOutputParams if err := UnmarshalParams(req.Params, ¶ms); err != nil { s.Logger.Debug("Invalid parameters", "err", err) writeRPCError(w, req.ID, JSONRPC_INVALID_PARAMS, "Invalid parameters", nil) @@ -683,23 +576,16 @@ func handleGetOutput(s *Service, w http.ResponseWriter, r *http.Request, req RPC return } - decoded, err := DecodeOutput(output, s.outputABI) + decoded, err := api.DecodeOutput(output, s.outputABI) if err != nil { s.Logger.Error("Unable to decode Output", "app", params.Application, "index", output.Index, "err", err) } - // Format response according to spec - response := struct { - Data *DecodedOutput `json:"data"` - }{ - Data: decoded, - } - - writeRPCResult(w, req.ID, response) + writeRPCResult(w, req.ID, api.SingleResponse[*api.DecodedOutput]{Data: decoded}) } func handleListReports(s *Service, w http.ResponseWriter, r *http.Request, req RPCRequest) { - var params ListReportsParams + var params api.ListReportsParams if err := UnmarshalParams(req.Params, ¶ms); err != nil { s.Logger.Debug("Invalid parameters", "err", err) writeRPCError(w, req.ID, JSONRPC_INVALID_PARAMS, "Invalid parameters", nil) @@ -758,32 +644,18 @@ func handleListReports(s *Service, w http.ResponseWriter, r *http.Request, req R reports = []*model.Report{} } - // Format response according to spec - result := struct { - Data []*model.Report `json:"data"` - Pagination struct { - TotalCount uint64 `json:"total_count"` - Limit uint64 `json:"limit"` - Offset uint64 `json:"offset"` - } `json:"pagination"` - }{ + writeRPCResult(w, req.ID, api.ListResponse[*model.Report]{ Data: reports, - Pagination: struct { - TotalCount uint64 `json:"total_count"` - Limit uint64 `json:"limit"` - Offset uint64 `json:"offset"` - }{ + Pagination: api.Pagination{ TotalCount: total, Limit: params.Limit, Offset: params.Offset, }, - } - - writeRPCResult(w, req.ID, result) + }) } func handleGetReport(s *Service, w http.ResponseWriter, r *http.Request, req RPCRequest) { - var params GetReportParams + var params api.GetReportParams if err := UnmarshalParams(req.Params, ¶ms); err != nil { s.Logger.Debug("Invalid parameters", "err", err) writeRPCError(w, req.ID, JSONRPC_INVALID_PARAMS, "Invalid parameters", nil) @@ -813,18 +685,11 @@ func handleGetReport(s *Service, w http.ResponseWriter, r *http.Request, req RPC return } - // Format response according to spec - response := struct { - Data *model.Report `json:"data"` - }{ - Data: report, - } - - writeRPCResult(w, req.ID, response) + writeRPCResult(w, req.ID, api.SingleResponse[*model.Report]{Data: report}) } func handleListTournaments(s *Service, w http.ResponseWriter, r *http.Request, req RPCRequest) { - var params ListTournamentsParams + var params api.ListTournamentsParams if err := UnmarshalParams(req.Params, ¶ms); err != nil { s.Logger.Debug("Invalid parameters", "err", err) writeRPCError(w, req.ID, JSONRPC_INVALID_PARAMS, "Invalid parameters", nil) @@ -898,32 +763,18 @@ func handleListTournaments(s *Service, w http.ResponseWriter, r *http.Request, r tournaments = []*model.Tournament{} } - // Format response according to spec - result := struct { - Data []*model.Tournament `json:"data"` - Pagination struct { - TotalCount uint64 `json:"total_count"` - Limit uint64 `json:"limit"` - Offset uint64 `json:"offset"` - } `json:"pagination"` - }{ + writeRPCResult(w, req.ID, api.ListResponse[*model.Tournament]{ Data: tournaments, - Pagination: struct { - TotalCount uint64 `json:"total_count"` - Limit uint64 `json:"limit"` - Offset uint64 `json:"offset"` - }{ + Pagination: api.Pagination{ TotalCount: total, Limit: params.Limit, Offset: params.Offset, }, - } - - writeRPCResult(w, req.ID, result) + }) } func handleGetTournament(s *Service, w http.ResponseWriter, r *http.Request, req RPCRequest) { - var params GetTournamentParams + var params api.GetTournamentParams if err := UnmarshalParams(req.Params, ¶ms); err != nil { s.Logger.Debug("Invalid parameters", "err", err) writeRPCError(w, req.ID, JSONRPC_INVALID_PARAMS, "Invalid parameters", nil) @@ -953,18 +804,11 @@ func handleGetTournament(s *Service, w http.ResponseWriter, r *http.Request, req return } - // Format response according to spec - response := struct { - Data *model.Tournament `json:"data"` - }{ - Data: tournament, - } - - writeRPCResult(w, req.ID, response) + writeRPCResult(w, req.ID, api.SingleResponse[*model.Tournament]{Data: tournament}) } func handleListCommitments(s *Service, w http.ResponseWriter, r *http.Request, req RPCRequest) { - var params ListCommitmentsParams + var params api.ListCommitmentsParams if err := UnmarshalParams(req.Params, ¶ms); err != nil { s.Logger.Debug("Invalid parameters", "err", err) writeRPCError(w, req.ID, JSONRPC_INVALID_PARAMS, "Invalid parameters", nil) @@ -1021,32 +865,18 @@ func handleListCommitments(s *Service, w http.ResponseWriter, r *http.Request, r commitments = []*model.Commitment{} } - // Format response according to spec - result := struct { - Data []*model.Commitment `json:"data"` - Pagination struct { - TotalCount uint64 `json:"total_count"` - Limit uint64 `json:"limit"` - Offset uint64 `json:"offset"` - } `json:"pagination"` - }{ + writeRPCResult(w, req.ID, api.ListResponse[*model.Commitment]{ Data: commitments, - Pagination: struct { - TotalCount uint64 `json:"total_count"` - Limit uint64 `json:"limit"` - Offset uint64 `json:"offset"` - }{ + Pagination: api.Pagination{ TotalCount: total, Limit: params.Limit, Offset: params.Offset, }, - } - - writeRPCResult(w, req.ID, result) + }) } func handleGetCommitment(s *Service, w http.ResponseWriter, r *http.Request, req RPCRequest) { - var params GetCommitmentParams + var params api.GetCommitmentParams if err := UnmarshalParams(req.Params, ¶ms); err != nil { s.Logger.Debug("Invalid parameters", "err", err) writeRPCError(w, req.ID, JSONRPC_INVALID_PARAMS, "Invalid parameters", nil) @@ -1090,18 +920,11 @@ func handleGetCommitment(s *Service, w http.ResponseWriter, r *http.Request, req return } - // Format response according to spec - response := struct { - Data *model.Commitment `json:"data"` - }{ - Data: commitment, - } - - writeRPCResult(w, req.ID, response) + writeRPCResult(w, req.ID, api.SingleResponse[*model.Commitment]{Data: commitment}) } func handleListMatches(s *Service, w http.ResponseWriter, r *http.Request, req RPCRequest) { - var params ListMatchesParams + var params api.ListMatchesParams if err := UnmarshalParams(req.Params, ¶ms); err != nil { s.Logger.Debug("Invalid parameters", "err", err) writeRPCError(w, req.ID, JSONRPC_INVALID_PARAMS, "Invalid parameters", nil) @@ -1158,32 +981,18 @@ func handleListMatches(s *Service, w http.ResponseWriter, r *http.Request, req R matches = []*model.Match{} } - // Format response according to spec - result := struct { - Data []*model.Match `json:"data"` - Pagination struct { - TotalCount uint64 `json:"total_count"` - Limit uint64 `json:"limit"` - Offset uint64 `json:"offset"` - } `json:"pagination"` - }{ + writeRPCResult(w, req.ID, api.ListResponse[*model.Match]{ Data: matches, - Pagination: struct { - TotalCount uint64 `json:"total_count"` - Limit uint64 `json:"limit"` - Offset uint64 `json:"offset"` - }{ + Pagination: api.Pagination{ TotalCount: total, Limit: params.Limit, Offset: params.Offset, }, - } - - writeRPCResult(w, req.ID, result) + }) } func handleGetMatch(s *Service, w http.ResponseWriter, r *http.Request, req RPCRequest) { - var params GetMatchParams + var params api.GetMatchParams if err := UnmarshalParams(req.Params, ¶ms); err != nil { s.Logger.Debug("Invalid parameters", "err", err) writeRPCError(w, req.ID, JSONRPC_INVALID_PARAMS, "Invalid parameters", nil) @@ -1223,18 +1032,11 @@ func handleGetMatch(s *Service, w http.ResponseWriter, r *http.Request, req RPCR return } - // Format response according to spec - response := struct { - Data *model.Match `json:"data"` - }{ - Data: match, - } - - writeRPCResult(w, req.ID, response) + writeRPCResult(w, req.ID, api.SingleResponse[*model.Match]{Data: match}) } func handleListMatchAdvances(s *Service, w http.ResponseWriter, r *http.Request, req RPCRequest) { - var params ListMatchAdvancesParams + var params api.ListMatchAdvancesParams if err := UnmarshalParams(req.Params, ¶ms); err != nil { s.Logger.Debug("Invalid parameters", "err", err) writeRPCError(w, req.ID, JSONRPC_INVALID_PARAMS, "Invalid parameters", nil) @@ -1291,32 +1093,18 @@ func handleListMatchAdvances(s *Service, w http.ResponseWriter, r *http.Request, matchAdvances = []*model.MatchAdvanced{} } - // Format response according to spec - result := struct { - Data []*model.MatchAdvanced `json:"data"` - Pagination struct { - TotalCount uint64 `json:"total_count"` - Limit uint64 `json:"limit"` - Offset uint64 `json:"offset"` - } `json:"pagination"` - }{ + writeRPCResult(w, req.ID, api.ListResponse[*model.MatchAdvanced]{ Data: matchAdvances, - Pagination: struct { - TotalCount uint64 `json:"total_count"` - Limit uint64 `json:"limit"` - Offset uint64 `json:"offset"` - }{ + Pagination: api.Pagination{ TotalCount: total, Limit: params.Limit, Offset: params.Offset, }, - } - - writeRPCResult(w, req.ID, result) + }) } func handleGetMatchAdvanced(s *Service, w http.ResponseWriter, r *http.Request, req RPCRequest) { - var params GetMatchAdvancedParams + var params api.GetMatchAdvancedParams if err := UnmarshalParams(req.Params, ¶ms); err != nil { s.Logger.Debug("Invalid parameters", "err", err) writeRPCError(w, req.ID, JSONRPC_INVALID_PARAMS, "Invalid parameters", nil) @@ -1362,14 +1150,7 @@ func handleGetMatchAdvanced(s *Service, w http.ResponseWriter, r *http.Request, return } - // Format response according to spec - response := struct { - Data *model.MatchAdvanced `json:"data"` - }{ - Data: matchAdvanced, - } - - writeRPCResult(w, req.ID, response) + writeRPCResult(w, req.ID, api.SingleResponse[*model.MatchAdvanced]{Data: matchAdvanced}) } func handleGetChainID(s *Service, w http.ResponseWriter, r *http.Request, req RPCRequest) { @@ -1384,23 +1165,11 @@ func handleGetChainID(s *Service, w http.ResponseWriter, r *http.Request, req RP return } - result := struct { - Data string `json:"data"` - }{ - Data: fmt.Sprintf("0x%x", config.Value.ChainID), - } - - writeRPCResult(w, req.ID, result) + writeRPCResult(w, req.ID, api.SingleResponse[string]{Data: fmt.Sprintf("0x%x", config.Value.ChainID)}) } func handleGetNodeVersion(_ *Service, w http.ResponseWriter, _ *http.Request, req RPCRequest) { - result := struct { - Data string `json:"data"` - }{ - Data: version.BuildVersion, - } - - writeRPCResult(w, req.ID, result) + writeRPCResult(w, req.ID, api.SingleResponse[string]{Data: version.BuildVersion}) } func (s *Service) applicationAbsentOrError( diff --git a/internal/jsonrpc/jsonrpc_test.go b/internal/jsonrpc/jsonrpc_test.go index a06d52d92..ff77c9add 100644 --- a/internal/jsonrpc/jsonrpc_test.go +++ b/internal/jsonrpc/jsonrpc_test.go @@ -26,6 +26,7 @@ import ( "testing" "github.com/cartesi/rollups-node/internal/evmreader" + "github.com/cartesi/rollups-node/internal/jsonrpc/api" "github.com/cartesi/rollups-node/internal/model" "github.com/cartesi/rollups-node/internal/repository" "github.com/cartesi/rollups-node/internal/repository/repotest" @@ -585,7 +586,7 @@ func TestMethod(t *testing.T) { Index hex64 `json:"index"` RawData string `json:"raw_data"` // hex encoded - Voucher *Voucher `json:"decoded_data,omitempty"` + DecodedData *api.DecodedData `json:"decoded_data,omitempty"` // ... (ignore the rest of fields for test } @@ -594,7 +595,7 @@ func TestMethod(t *testing.T) { assert.Nil(t, json.Unmarshal(body, &resp)) assert.Equal(t, inr, uint64(resp.Result.Data.InputIndex)) assert.Equal(t, onr, uint64(resp.Result.Data.Index)) - assert.Equal(t, "0xdeadbeef", resp.Result.Data.Voucher.Value) + assert.Equal(t, "0xdeadbeef", resp.Result.Data.DecodedData.Value) }) }) @@ -1201,7 +1202,7 @@ func TestMethod(t *testing.T) { Index hex64 `json:"index"` RawData string `json:"raw_data"` // hex encoded - Voucher *Voucher `json:"decoded_data,omitempty"` + DecodedData *api.DecodedData `json:"decoded_data,omitempty"` // ... (ignore the rest of fields for test } diff --git a/internal/jsonrpc/types.go b/internal/jsonrpc/types.go index bc0a1da02..46607995e 100644 --- a/internal/jsonrpc/types.go +++ b/internal/jsonrpc/types.go @@ -5,18 +5,13 @@ package jsonrpc import ( "bytes" - "encoding/hex" "encoding/json" "fmt" - "math/big" "net/http" "reflect" "regexp" "github.com/cartesi/rollups-node/internal/config" - "github.com/cartesi/rollups-node/internal/model" - "github.com/ethereum/go-ethereum/accounts/abi" - "github.com/ethereum/go-ethereum/common" ) // ----------------------------------------------------------------------------- @@ -120,7 +115,7 @@ func UnmarshalParams(data json.RawMessage, target any) error { } // ----------------------------------------------------------------------------- -// Parameter and Result types (API) +// Validation helpers (server-only) // ----------------------------------------------------------------------------- var hexAddressRegex = regexp.MustCompile(`^0x[0-9a-fA-F]{40}$`) @@ -136,358 +131,3 @@ func validateNameOrAddress(nameOrAddress string) error { } return fmt.Errorf("invalid application name") } - -// Pagination represents common pagination structure used in responses -type Pagination struct { - TotalCount uint64 `json:"total_count"` - Limit uint64 `json:"limit"` - Offset uint64 `json:"offset"` -} - -// ListApplicationsParams aligns with the OpenRPC specification -type ListApplicationsParams struct { - Limit uint64 `json:"limit"` - Offset uint64 `json:"offset"` - Descending bool `json:"descending,omitempty"` -} - -// GetApplicationParams aligns with the OpenRPC specification -type GetApplicationParams struct { - Application string `json:"application"` -} - -// ListEpochsParams aligns with the OpenRPC specification -type ListEpochsParams struct { - Application string `json:"application"` - Status *string `json:"status,omitempty"` - Limit uint64 `json:"limit"` - Offset uint64 `json:"offset"` - Descending bool `json:"descending,omitempty"` -} - -// GetEpochParams aligns with the OpenRPC specification -type GetEpochParams struct { - Application string `json:"application"` - EpochIndex string `json:"epoch_index"` -} - -// GetLastAcceptedEpochIndexParams with the OpenRPC specification -type GetLastAcceptedEpochIndexParams struct { - Application string `json:"application"` -} - -// ListInputsParams aligns with the OpenRPC specification -type ListInputsParams struct { - Application string `json:"application"` - EpochIndex *string `json:"epoch_index,omitempty"` - Sender *string `json:"sender,omitempty"` - Limit uint64 `json:"limit"` - Offset uint64 `json:"offset"` - Descending bool `json:"descending,omitempty"` -} - -// GetInputParams aligns with the OpenRPC specification -type GetInputParams struct { - Application string `json:"application"` - InputIndex string `json:"input_index"` -} - -// GetProcessedInputCountParams aligns with the OpenRPC specification -type GetProcessedInputCountParams struct { - Application string `json:"application"` -} - -// ListOutputsParams aligns with the OpenRPC specification -type ListOutputsParams struct { - Application string `json:"application"` - EpochIndex *string `json:"epoch_index,omitempty"` - InputIndex *string `json:"input_index,omitempty"` - OutputType *string `json:"output_type,omitempty"` - VoucherAddress *string `json:"voucher_address,omitempty"` - Limit uint64 `json:"limit"` - Offset uint64 `json:"offset"` - Descending bool `json:"descending,omitempty"` -} - -// GetOutputParams aligns with the OpenRPC specification -type GetOutputParams struct { - Application string `json:"application"` - OutputIndex string `json:"output_index"` -} - -// ListReportsParams aligns with the OpenRPC specification -type ListReportsParams struct { - Application string `json:"application"` - EpochIndex *string `json:"epoch_index,omitempty"` - InputIndex *string `json:"input_index,omitempty"` - Limit uint64 `json:"limit"` - Offset uint64 `json:"offset"` - Descending bool `json:"descending,omitempty"` -} - -// GetReportParams aligns with the OpenRPC specification -type GetReportParams struct { - Application string `json:"application"` - ReportIndex string `json:"report_index"` -} - -// ListTournamentsParams aligns with the OpenRPC specification -type ListTournamentsParams struct { - Application string `json:"application"` - EpochIndex *string `json:"epoch_index,omitempty"` - Level *string `json:"level,omitempty"` - ParentTournamentAddress *string `json:"parent_tournament_address,omitempty"` - ParentMatchIDHash *string `json:"parent_match_id_hash,omitempty"` - Limit uint64 `json:"limit"` - Offset uint64 `json:"offset"` - Descending bool `json:"descending,omitempty"` -} - -// GetTournamentParams aligns with the OpenRPC specification -type GetTournamentParams struct { - Application string `json:"application"` - Address string `json:"address"` -} - -// ListCommitmentsParams aligns with the OpenRPC specification -type ListCommitmentsParams struct { - Application string `json:"application"` - EpochIndex *string `json:"epoch_index,omitempty"` - TournamentAddress *string `json:"tournament_address,omitempty"` - Limit uint64 `json:"limit"` - Offset uint64 `json:"offset"` - Descending bool `json:"descending,omitempty"` -} - -// GetCommitmentParams aligns with the OpenRPC specification -type GetCommitmentParams struct { - Application string `json:"application"` - EpochIndex string `json:"epoch_index"` - TournamentAddress string `json:"tournament_address"` - Commitment string `json:"commitment"` -} - -// ListMatchesParams aligns with the OpenRPC specification -type ListMatchesParams struct { - Application string `json:"application"` - EpochIndex *string `json:"epoch_index,omitempty"` - TournamentAddress *string `json:"tournament_address,omitempty"` - Limit uint64 `json:"limit"` - Offset uint64 `json:"offset"` - Descending bool `json:"descending,omitempty"` -} - -// GetMatchParams aligns with the OpenRPC specification -type GetMatchParams struct { - Application string `json:"application"` - EpochIndex string `json:"epoch_index"` - TournamentAddress string `json:"tournament_address"` - IDHash string `json:"id_hash"` -} - -// ListMatchAdvancesParams aligns with the OpenRPC specification -type ListMatchAdvancesParams struct { - Application string `json:"application"` - EpochIndex string `json:"epoch_index,omitempty"` - TournamentAddress string `json:"tournament_address,omitempty"` - IDHash string `json:"id_hash"` - Limit uint64 `json:"limit"` - Offset uint64 `json:"offset"` - Descending bool `json:"descending,omitempty"` -} - -// GetMatchAdvancedParams aligns with the OpenRPC specification -type GetMatchAdvancedParams struct { - Application string `json:"application"` - EpochIndex string `json:"epoch_index"` - TournamentAddress string `json:"tournament_address"` - IDHash string `json:"id_hash"` - Parent string `json:"parent"` -} - -// ----------------------------------------------------------------------------- -// ABI Decoding helpers (provided code) -// ----------------------------------------------------------------------------- - -type EvmAdvance struct { - ChainId string `json:"chain_id"` - AppContract string `json:"application_contract"` - MsgSender string `json:"sender"` - BlockNumber string `json:"block_number"` - BlockTimestamp string `json:"block_timestamp"` - PrevRandao string `json:"prev_randao"` - Index string `json:"index"` - Payload string `json:"payload"` -} - -type DecodedInput struct { - *model.Input - DecodedData *EvmAdvance `json:"decoded_data"` -} - -func DecodeInput(input *model.Input, parsedAbi *abi.ABI) (*DecodedInput, error) { - decoded := make(map[string]any) - if len(input.RawData) < 4 { - return nil, fmt.Errorf("error: input needs at least 4 bytes") - } - - err := parsedAbi.Methods["EvmAdvance"].Inputs.UnpackIntoMap(decoded, input.RawData[4:]) - if err != nil { - return &DecodedInput{Input: input}, err - } - - evmAdvance := EvmAdvance{ - ChainId: fmt.Sprintf("0x%x", decoded["chainId"].(*big.Int)), - AppContract: decoded["appContract"].(common.Address).Hex(), - MsgSender: decoded["msgSender"].(common.Address).Hex(), - BlockNumber: fmt.Sprintf("0x%x", decoded["blockNumber"].(*big.Int)), - BlockTimestamp: fmt.Sprintf("0x%x", decoded["blockTimestamp"].(*big.Int)), - PrevRandao: fmt.Sprintf("0x%x", decoded["prevRandao"].(*big.Int)), - Index: fmt.Sprintf("0x%x", decoded["index"].(*big.Int)), - Payload: "0x" + hex.EncodeToString(decoded["payload"].([]byte)), - } - - return &DecodedInput{ - Input: input, - DecodedData: &evmAdvance, - }, nil -} - -func (d *DecodedInput) MarshalJSON() ([]byte, error) { - // Marshal the underlying Input using its custom MarshalJSON. - inputJSON, err := d.Input.MarshalJSON() - if err != nil { - return nil, err - } - // Ensure inputJSON is a valid JSON object. - if len(inputJSON) == 0 || inputJSON[len(inputJSON)-1] != '}' { - return nil, fmt.Errorf("unexpected format from Input.MarshalJSON") - } - - if d.DecodedData == nil { - return inputJSON, nil - } - // Marshal the DecodedData field. - decodedDataJSON, err := json.Marshal(d.DecodedData) - if err != nil { - return nil, err - } - // Use a bytes.Buffer to build the final JSON. - var buf bytes.Buffer - buf.Write(bytes.TrimSuffix(inputJSON, []byte("}"))) - buf.WriteString(`,"decoded_data":`) - buf.Write(decodedDataJSON) - buf.WriteByte('}') - - return buf.Bytes(), nil -} - -type Notice struct { - Type string `json:"type"` - Payload string `json:"payload"` -} - -type Voucher struct { - Type string `json:"type"` - Destination string `json:"destination"` - Value string `json:"value"` - Payload string `json:"payload"` -} - -type DelegateCallVoucher struct { - Type string `json:"type"` - Destination string `json:"destination"` - Payload string `json:"payload"` -} - -type DecodedOutput struct { - *model.Output - DecodedData any `json:"decoded_data"` -} - -func DecodeOutput(output *model.Output, parsedAbi *abi.ABI) (*DecodedOutput, error) { - decodedOutput := &DecodedOutput{Output: output} - if len(output.RawData) < 4 { - return decodedOutput, fmt.Errorf("raw data too short") - } - method, err := parsedAbi.MethodById(output.RawData[:4]) - if err != nil { - return decodedOutput, err - } - decoded := make(map[string]any) - if err := method.Inputs.UnpackIntoMap(decoded, output.RawData[4:]); err != nil { - return decodedOutput, fmt.Errorf("failed to unpack %s: %w", method.Name, err) - } - var result any - switch method.Name { - case "Notice": - payload, ok := decoded["payload"].([]byte) - if !ok { - return decodedOutput, fmt.Errorf("unable to decode Notice payload") - } - result = Notice{ - Type: "Notice", - Payload: "0x" + hex.EncodeToString(payload), - } - case "Voucher": - dest, ok1 := decoded["destination"].(common.Address) - value, ok2 := decoded["value"].(*big.Int) - payload, ok3 := decoded["payload"].([]byte) - if !ok1 || !ok2 || !ok3 { - return decodedOutput, fmt.Errorf("unable to decode Voucher parameters") - } - result = Voucher{ - Type: "Voucher", - Destination: dest.Hex(), - Value: fmt.Sprintf("0x%x", value), - Payload: "0x" + hex.EncodeToString(payload), - } - case "DelegateCallVoucher": - dest, ok1 := decoded["destination"].(common.Address) - payload, ok2 := decoded["payload"].([]byte) - if !ok1 || !ok2 { - return decodedOutput, fmt.Errorf("unable to decode DelegateCallVoucher parameters") - } - result = DelegateCallVoucher{ - Type: "DelegateCallVoucher", - Destination: dest.Hex(), - Payload: "0x" + hex.EncodeToString(payload), - } - default: - result = map[string]any{ - "type": method.Name, - "rawData": "0x" + hex.EncodeToString(output.RawData), - } - } - decodedOutput.DecodedData = result - return decodedOutput, nil -} - -func (d *DecodedOutput) MarshalJSON() ([]byte, error) { - // Marshal the underlying Output using its custom MarshalJSON. - outputJSON, err := d.Output.MarshalJSON() - if err != nil { - return nil, err - } - // Ensure outputJSON is a valid JSON object. - if len(outputJSON) == 0 || outputJSON[len(outputJSON)-1] != '}' { - return nil, fmt.Errorf("unexpected format from Output.MarshalJSON") - } - - if d.DecodedData == nil { - return outputJSON, nil - } - // Marshal the DecodedData field. - decodedDataJSON, err := json.Marshal(d.DecodedData) - if err != nil { - return nil, err - } - // Use a bytes.Buffer to build the final JSON. - var buf bytes.Buffer - buf.Write(bytes.TrimSuffix(outputJSON, []byte("}"))) - buf.WriteString(`,"decoded_data":`) - buf.Write(decodedDataJSON) - buf.WriteByte('}') - - return buf.Bytes(), nil -} diff --git a/internal/manager/instance.go b/internal/manager/instance.go index 447dbb50a..bf1c602ee 100644 --- a/internal/manager/instance.go +++ b/internal/manager/instance.go @@ -36,12 +36,12 @@ var ( // Concurrency protocol: // - runtime: Protected by PMutex. Written under HLock, read under LLock. // - processedInputs: atomic.Uint64. Written under HLock (together with runtime swap, -// so writers see a consistent pair). Read lock-free via Load() — -// this is safe because only one advance runs at a time (advanceMutex) -// and the atomic store is visible to all goroutines immediately. +// so writers see a consistent pair). Read lock-free via Load() — +// this is safe because only one advance runs at a time (advanceMutex) +// and the atomic store is visible to all goroutines immediately. // - advanceMutex: Serializes all Advance calls. Only one input is processed at a time. // - mutex (PMutex): HLock for advance/snapshot/hash/proof (may destroy runtime on error). -// LLock for inspect (read-only fork). HLock starves LLock by design. +// LLock for inspect (read-only fork). HLock starves LLock by design. // - inspectSemaphore: Bounds concurrent inspect operations. type MachineInstanceImpl struct { application *Application diff --git a/internal/model/models.go b/internal/model/models.go index d746a0a08..52408628e 100644 --- a/internal/model/models.go +++ b/internal/model/models.go @@ -778,22 +778,22 @@ func (i *Input) UnmarshalJSON(in []byte) error { i.EpochIndex, err = ParseHexUint64(aux.EpochIndex) if err != nil { - return fmt.Errorf("error on EpochIndex: %v", err) + return fmt.Errorf("error on EpochIndex: %w", err) } i.Index, err = ParseHexUint64(aux.Index) if err != nil { - return fmt.Errorf("error on Index: %v", err) + return fmt.Errorf("error on Index: %w", err) } i.BlockNumber, err = ParseHexUint64(aux.BlockNumber) if err != nil { - return fmt.Errorf("error on BlockNumber: %v", err) + return fmt.Errorf("error on BlockNumber: %w", err) } i.RawData, err = hexutil.Decode(aux.RawData) if err != nil { - return fmt.Errorf("error on RawData: %v", err) + return fmt.Errorf("error on RawData: %w", err) } return nil @@ -903,6 +903,41 @@ func (i *Output) MarshalJSON() ([]byte, error) { return json.Marshal(aux) } +func (o *Output) UnmarshalJSON(data []byte) error { + type Alias Output + aux := &struct { + EpochIndex string `json:"epoch_index"` + InputIndex string `json:"input_index"` + Index string `json:"index"` + RawData string `json:"raw_data"` + *Alias + }{Alias: (*Alias)(o)} + + if err := json.Unmarshal(data, aux); err != nil { + return err + } + *o = Output(*aux.Alias) + + var err error + o.EpochIndex, err = ParseHexUint64(aux.EpochIndex) + if err != nil { + return fmt.Errorf("error on EpochIndex: %w", err) + } + o.InputIndex, err = ParseHexUint64(aux.InputIndex) + if err != nil { + return fmt.Errorf("error on InputIndex: %w", err) + } + o.Index, err = ParseHexUint64(aux.Index) + if err != nil { + return fmt.Errorf("error on Index: %w", err) + } + o.RawData, err = hexutil.Decode(aux.RawData) + if err != nil { + return fmt.Errorf("error on RawData: %w", err) + } + return nil +} + type Report struct { InputEpochApplicationID int64 `sql:"primary_key" json:"-"` EpochIndex uint64 `json:"epoch_index"` @@ -933,6 +968,41 @@ func (r *Report) MarshalJSON() ([]byte, error) { return json.Marshal(aux) } +func (r *Report) UnmarshalJSON(data []byte) error { + type Alias Report + aux := &struct { + EpochIndex string `json:"epoch_index"` + InputIndex string `json:"input_index"` + Index string `json:"index"` + RawData string `json:"raw_data"` + *Alias + }{Alias: (*Alias)(r)} + + if err := json.Unmarshal(data, aux); err != nil { + return err + } + *r = Report(*aux.Alias) + + var err error + r.EpochIndex, err = ParseHexUint64(aux.EpochIndex) + if err != nil { + return fmt.Errorf("error on EpochIndex: %w", err) + } + r.InputIndex, err = ParseHexUint64(aux.InputIndex) + if err != nil { + return fmt.Errorf("error on InputIndex: %w", err) + } + r.Index, err = ParseHexUint64(aux.Index) + if err != nil { + return fmt.Errorf("error on Index: %w", err) + } + r.RawData, err = hexutil.Decode(aux.RawData) + if err != nil { + return fmt.Errorf("error on RawData: %w", err) + } + return nil +} + type NodeConfig[T any] struct { Key string Value T @@ -1079,6 +1149,51 @@ func (t *Tournament) MarshalJSON() ([]byte, error) { return json.Marshal(aux) } +func (t *Tournament) UnmarshalJSON(data []byte) error { + type Alias Tournament + aux := &struct { + EpochIndex string `json:"epoch_index"` + MaxLevel string `json:"max_level"` + Level string `json:"level"` + Log2Step string `json:"log2step"` + Height string `json:"height"` + FinishedAtBlock string `json:"finished_at_block"` + *Alias + }{Alias: (*Alias)(t)} + + if err := json.Unmarshal(data, aux); err != nil { + return err + } + *t = Tournament(*aux.Alias) + + var err error + t.EpochIndex, err = ParseHexUint64(aux.EpochIndex) + if err != nil { + return fmt.Errorf("error on EpochIndex: %w", err) + } + t.MaxLevel, err = ParseHexUint64(aux.MaxLevel) + if err != nil { + return fmt.Errorf("error on MaxLevel: %w", err) + } + t.Level, err = ParseHexUint64(aux.Level) + if err != nil { + return fmt.Errorf("error on Level: %w", err) + } + t.Log2Step, err = ParseHexUint64(aux.Log2Step) + if err != nil { + return fmt.Errorf("error on Log2Step: %w", err) + } + t.Height, err = ParseHexUint64(aux.Height) + if err != nil { + return fmt.Errorf("error on Height: %w", err) + } + t.FinishedAtBlock, err = ParseHexUint64(aux.FinishedAtBlock) + if err != nil { + return fmt.Errorf("error on FinishedAtBlock: %w", err) + } + return nil +} + type Commitment struct { ApplicationID int64 `sql:"primary_key" json:"-"` EpochIndex uint64 `sql:"primary_key" json:"epoch_index"` @@ -1108,6 +1223,31 @@ func (c *Commitment) MarshalJSON() ([]byte, error) { return json.Marshal(aux) } +func (c *Commitment) UnmarshalJSON(data []byte) error { + type Alias Commitment + aux := &struct { + EpochIndex string `json:"epoch_index"` + BlockNumber string `json:"block_number"` + *Alias + }{Alias: (*Alias)(c)} + + if err := json.Unmarshal(data, aux); err != nil { + return err + } + *c = Commitment(*aux.Alias) + + var err error + c.EpochIndex, err = ParseHexUint64(aux.EpochIndex) + if err != nil { + return fmt.Errorf("error on EpochIndex: %w", err) + } + c.BlockNumber, err = ParseHexUint64(aux.BlockNumber) + if err != nil { + return fmt.Errorf("error on BlockNumber: %w", err) + } + return nil +} + type Match struct { ApplicationID int64 `sql:"primary_key" json:"-"` EpochIndex uint64 `sql:"primary_key" json:"epoch_index"` diff --git a/internal/model/models_json_test.go b/internal/model/models_json_test.go new file mode 100644 index 000000000..7b32bce04 --- /dev/null +++ b/internal/model/models_json_test.go @@ -0,0 +1,275 @@ +// (c) Cartesi and individual authors (see AUTHORS) +// SPDX-License-Identifier: Apache-2.0 (see LICENSE) + +package model + +import ( + "encoding/json" + "testing" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" +) + +func TestEpochJSONRoundtrip(t *testing.T) { + root := common.HexToHash("0xabcd") + original := Epoch{ + ApplicationID: 1, + Index: 42, + FirstBlock: 100, + LastBlock: 200, + InputIndexLowerBound: 0, + InputIndexUpperBound: 10, + VirtualIndex: 5, + Status: EpochStatus_ClaimAccepted, + OutputsMerkleRoot: &root, + CreatedAt: time.Now().Truncate(time.Microsecond).UTC(), + UpdatedAt: time.Now().Truncate(time.Microsecond).UTC(), + } + + data, err := json.Marshal(&original) + require.NoError(t, err) + + var decoded Epoch + err = json.Unmarshal(data, &decoded) + require.NoError(t, err) + + // ApplicationID is json:"-" (DB-only FK), intentionally lost in JSON roundtrip. + require.Zero(t, decoded.ApplicationID) + require.Equal(t, original.Index, decoded.Index) + require.Equal(t, original.FirstBlock, decoded.FirstBlock) + require.Equal(t, original.LastBlock, decoded.LastBlock) + require.Equal(t, original.InputIndexLowerBound, decoded.InputIndexLowerBound) + require.Equal(t, original.InputIndexUpperBound, decoded.InputIndexUpperBound) + require.Equal(t, original.VirtualIndex, decoded.VirtualIndex) + require.Equal(t, original.Status, decoded.Status) + require.Equal(t, original.OutputsMerkleRoot, decoded.OutputsMerkleRoot) +} + +func TestInputJSONRoundtrip(t *testing.T) { + machineHash := common.HexToHash("0x1234") + original := Input{ + EpochApplicationID: 1, + EpochIndex: 3, + Index: 7, + BlockNumber: 12345, + RawData: []byte{0xde, 0xad, 0xbe, 0xef}, + Status: InputCompletionStatus_Accepted, + MachineHash: &machineHash, + CreatedAt: time.Now().Truncate(time.Microsecond).UTC(), + UpdatedAt: time.Now().Truncate(time.Microsecond).UTC(), + } + + data, err := json.Marshal(&original) + require.NoError(t, err) + + var decoded Input + err = json.Unmarshal(data, &decoded) + require.NoError(t, err) + + // EpochApplicationID is json:"-" (DB-only FK), intentionally lost in JSON roundtrip. + require.Zero(t, decoded.EpochApplicationID) + require.Equal(t, original.EpochIndex, decoded.EpochIndex) + require.Equal(t, original.Index, decoded.Index) + require.Equal(t, original.BlockNumber, decoded.BlockNumber) + require.Equal(t, original.RawData, decoded.RawData) + require.Equal(t, original.Status, decoded.Status) + require.Equal(t, original.MachineHash, decoded.MachineHash) +} + +func TestOutputJSONRoundtrip(t *testing.T) { + hash := common.HexToHash("0xaaaa") + txHash := common.HexToHash("0xbbbb") + original := Output{ + InputEpochApplicationID: 1, + EpochIndex: 2, + InputIndex: 5, + Index: 10, + RawData: []byte{0xca, 0xfe}, + Hash: &hash, + OutputHashesSiblings: []common.Hash{common.HexToHash("0x1111")}, + ExecutionTransactionHash: &txHash, + CreatedAt: time.Now().Truncate(time.Microsecond).UTC(), + UpdatedAt: time.Now().Truncate(time.Microsecond).UTC(), + } + + data, err := json.Marshal(&original) + require.NoError(t, err) + + var decoded Output + err = json.Unmarshal(data, &decoded) + require.NoError(t, err) + + // InputEpochApplicationID is json:"-" (DB-only FK), intentionally lost in JSON roundtrip. + require.Zero(t, decoded.InputEpochApplicationID) + require.Equal(t, original.EpochIndex, decoded.EpochIndex) + require.Equal(t, original.InputIndex, decoded.InputIndex) + require.Equal(t, original.Index, decoded.Index) + require.Equal(t, original.RawData, decoded.RawData) + require.Equal(t, original.Hash, decoded.Hash) + require.Equal(t, original.OutputHashesSiblings, decoded.OutputHashesSiblings) + require.Equal(t, original.ExecutionTransactionHash, decoded.ExecutionTransactionHash) +} + +func TestReportJSONRoundtrip(t *testing.T) { + original := Report{ + InputEpochApplicationID: 1, + EpochIndex: 4, + InputIndex: 8, + Index: 0, + RawData: []byte{0x01, 0x02, 0x03}, + CreatedAt: time.Now().Truncate(time.Microsecond).UTC(), + UpdatedAt: time.Now().Truncate(time.Microsecond).UTC(), + } + + data, err := json.Marshal(&original) + require.NoError(t, err) + + var decoded Report + err = json.Unmarshal(data, &decoded) + require.NoError(t, err) + + // InputEpochApplicationID is json:"-" (DB-only FK), intentionally lost in JSON roundtrip. + require.Zero(t, decoded.InputEpochApplicationID) + require.Equal(t, original.EpochIndex, decoded.EpochIndex) + require.Equal(t, original.InputIndex, decoded.InputIndex) + require.Equal(t, original.Index, decoded.Index) + require.Equal(t, original.RawData, decoded.RawData) +} + +func TestTournamentJSONRoundtrip(t *testing.T) { + parentAddr := common.HexToAddress("0x1234567890123456789012345678901234567890") + parentMatch := common.HexToHash("0xeeee") + winner := common.HexToHash("0xdddd") + finalState := common.HexToHash("0xcccc") + original := Tournament{ + ApplicationID: 1, + EpochIndex: 3, + Address: common.HexToAddress("0xabcdefabcdefabcdefabcdefabcdefabcdefabcd"), + ParentTournamentAddress: &parentAddr, + ParentMatchIDHash: &parentMatch, + MaxLevel: 4, + Level: 2, + Log2Step: 16, + Height: 8, + WinnerCommitment: &winner, + FinalStateHash: &finalState, + FinishedAtBlock: 9999, + CreatedAt: time.Now().Truncate(time.Microsecond).UTC(), + UpdatedAt: time.Now().Truncate(time.Microsecond).UTC(), + } + + data, err := json.Marshal(&original) + require.NoError(t, err) + + var decoded Tournament + err = json.Unmarshal(data, &decoded) + require.NoError(t, err) + + // ApplicationID is json:"-" (DB-only FK), intentionally lost in JSON roundtrip. + require.Zero(t, decoded.ApplicationID) + require.Equal(t, original.EpochIndex, decoded.EpochIndex) + require.Equal(t, original.Address, decoded.Address) + require.Equal(t, original.ParentTournamentAddress, decoded.ParentTournamentAddress) + require.Equal(t, original.ParentMatchIDHash, decoded.ParentMatchIDHash) + require.Equal(t, original.MaxLevel, decoded.MaxLevel) + require.Equal(t, original.Level, decoded.Level) + require.Equal(t, original.Log2Step, decoded.Log2Step) + require.Equal(t, original.Height, decoded.Height) + require.Equal(t, original.WinnerCommitment, decoded.WinnerCommitment) + require.Equal(t, original.FinalStateHash, decoded.FinalStateHash) + require.Equal(t, original.FinishedAtBlock, decoded.FinishedAtBlock) +} + +func TestCommitmentJSONRoundtrip(t *testing.T) { + original := Commitment{ + ApplicationID: 1, + EpochIndex: 7, + TournamentAddress: common.HexToAddress("0xabcdefabcdefabcdefabcdefabcdefabcdefabcd"), + Commitment: common.HexToHash("0x5555"), + FinalStateHash: common.HexToHash("0x6666"), + SubmitterAddress: common.HexToAddress("0x1111111111111111111111111111111111111111"), + BlockNumber: 54321, + TxHash: common.HexToHash("0x7777"), + CreatedAt: time.Now().Truncate(time.Microsecond).UTC(), + UpdatedAt: time.Now().Truncate(time.Microsecond).UTC(), + } + + data, err := json.Marshal(&original) + require.NoError(t, err) + + var decoded Commitment + err = json.Unmarshal(data, &decoded) + require.NoError(t, err) + + // ApplicationID is json:"-" (DB-only FK), intentionally lost in JSON roundtrip. + require.Zero(t, decoded.ApplicationID) + require.Equal(t, original.EpochIndex, decoded.EpochIndex) + require.Equal(t, original.TournamentAddress, decoded.TournamentAddress) + require.Equal(t, original.Commitment, decoded.Commitment) + require.Equal(t, original.FinalStateHash, decoded.FinalStateHash) + require.Equal(t, original.SubmitterAddress, decoded.SubmitterAddress) + require.Equal(t, original.BlockNumber, decoded.BlockNumber) + require.Equal(t, original.TxHash, decoded.TxHash) +} + +func TestOutputJSONRoundtripZeroValues(t *testing.T) { + original := Output{ + EpochIndex: 0, + InputIndex: 0, + Index: 0, + RawData: []byte{}, + } + + data, err := json.Marshal(&original) + require.NoError(t, err) + + var decoded Output + err = json.Unmarshal(data, &decoded) + require.NoError(t, err) + + require.Equal(t, original.EpochIndex, decoded.EpochIndex) + require.Equal(t, original.InputIndex, decoded.InputIndex) + require.Equal(t, original.Index, decoded.Index) +} + +func TestOutputUnmarshalJSONInvalidHex(t *testing.T) { + tests := []struct { + name string + json string + wantErr string + }{ + { + name: "invalid EpochIndex", + json: `{"epoch_index":"bad","input_index":"0x0","index":"0x0","raw_data":"0x"}`, + wantErr: "EpochIndex", + }, + { + name: "invalid InputIndex", + json: `{"epoch_index":"0x0","input_index":"bad","index":"0x0","raw_data":"0x"}`, + wantErr: "InputIndex", + }, + { + name: "invalid RawData", + json: `{"epoch_index":"0x0","input_index":"0x0","index":"0x0","raw_data":"not-hex"}`, + wantErr: "RawData", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var output Output + err := json.Unmarshal([]byte(tt.json), &output) + require.Error(t, err) + require.ErrorContains(t, err, tt.wantErr) + }) + } +} + +func TestReportUnmarshalJSONInvalidHex(t *testing.T) { + invalidJSON := `{"epoch_index":"0x0","input_index":"bad","index":"0x0","raw_data":"0x"}` + var report Report + err := json.Unmarshal([]byte(invalidJSON), &report) + require.Error(t, err) + require.ErrorContains(t, err, "InputIndex") +} From 07a9d75c8a2af9fbafdc325e4d717e8e108af0b7 Mon Sep 17 00:00:00 2001 From: Victor Fusco <1221933+vfusco@users.noreply.github.com> Date: Fri, 6 Mar 2026 13:46:57 -0300 Subject: [PATCH 4/5] test(ci): add integration test framework with lifecycle, multi-app, and PRT suites --- Dockerfile | 23 +- Makefile | 67 ++- internal/jsonrpc/api/decode.go | 24 + scripts/run-integration-tests.sh | 55 +++ test/compose/compose.integration.yaml | 133 ++++++ test/integration/anvil_helpers_test.go | 291 +++++++++++++ test/integration/cli_helpers_test.go | 270 ++++++++++++ test/integration/echo_authority_test.go | 46 ++ test/integration/echo_prt_test.go | 63 +++ test/integration/lifecycle_test.go | 411 ++++++++++++++++++ test/integration/main_test.go | 41 ++ test/integration/multi_app_test.go | 201 +++++++++ test/integration/polling_helpers_test.go | 161 +++++++ test/integration/reject_exception_prt_test.go | 83 ++++ test/integration/reject_exception_test.go | 57 +++ 15 files changed, 1917 insertions(+), 9 deletions(-) create mode 100755 scripts/run-integration-tests.sh create mode 100644 test/compose/compose.integration.yaml create mode 100644 test/integration/anvil_helpers_test.go create mode 100644 test/integration/cli_helpers_test.go create mode 100644 test/integration/echo_authority_test.go create mode 100644 test/integration/echo_prt_test.go create mode 100644 test/integration/lifecycle_test.go create mode 100644 test/integration/main_test.go create mode 100644 test/integration/multi_app_test.go create mode 100644 test/integration/polling_helpers_test.go create mode 100644 test/integration/reject_exception_prt_test.go create mode 100644 test/integration/reject_exception_test.go diff --git a/Dockerfile b/Dockerfile index b32146429..09ce6401b 100644 --- a/Dockerfile +++ b/Dockerfile @@ -100,6 +100,27 @@ WORKDIR ${GO_BUILD_PATH}/rollups-node RUN make build-go +# ============================================================================= +# STAGE: tester +# +# This stage extends go-builder with tools and directories needed for testing +# and linting. It is NOT part of the production image build chain. +# ============================================================================= + +FROM go-builder AS tester + +# Install golangci-lint for linting inside Docker. +ARG GOLANGCI_LINT_VERSION=1.64.5 +RUN go install github.com/golangci/golangci-lint/cmd/golangci-lint@v${GOLANGCI_LINT_VERSION} +ENV PATH="${GOPATH}/bin:${PATH}" +ENV GOLANGCI_LINT_CACHE=${GOCACHE}/golangci-lint + +# Create /dapps directory owned by cartesi for Docker named volume pre-population. +# When a named volume is first mounted here, Docker copies this ownership. +USER root +RUN mkdir -p /dapps && chown cartesi:cartesi /dapps +USER cartesi + # ============================================================================= # STAGE: debian-packager # @@ -158,7 +179,7 @@ USER cartesi WORKDIR ${NODE_RUNTIME_DIR} HEALTHCHECK --interval=1s --timeout=1s --retries=5 \ - CMD curl -G -f -H 'Content-Type: application/json' http://127.0.0.1:10000/healthz + CMD curl -G -f -H 'Content-Type: application/json' http://127.0.0.1:10000/readyz # Set the Go supervisor as the command. CMD [ "cartesi-rollups-node" ] diff --git a/Makefile b/Makefile index aeb168818..8f0c7b633 100644 --- a/Makefile +++ b/Makefile @@ -247,7 +247,7 @@ unit-test: $(COVER_DEPS) ## Execute go unit tests integration-test: ## Execute e2e tests @echo "Running end-to-end tests" - @go test -count=1 ./test --tags=endtoendtests + @go test -count=1 -timeout 55m $(GO_BUILD_PARAMS) $(GO_TEST_FLAGS) -tags=endtoendtests ./test/integration/... echo-dapp: applications/echo-dapp ## Echo dapp @@ -270,6 +270,20 @@ applications/exception-dapp: ## Create exception-dapp test application @mkdir -p applications @cartesi-machine --ram-length=128Mi --store=applications/exception-dapp --final-hash -- "rollup accept && echo '{\"payload\": \"0x7468697320697320612064756d6d7920657863657074696f6e2074657874\"}' | rollup exception" +reject-loop-dapp: applications/reject-loop-dapp ## Reject loop dapp + +exception-loop-dapp: applications/exception-loop-dapp ## Exception loop dapp + +applications/reject-loop-dapp: ## Create reject-loop-dapp test application + @echo "Creating reject-loop-dapp test application" + @mkdir -p applications + @cartesi-machine --ram-length=128Mi --store=applications/reject-loop-dapp --final-hash -- ioctl-echo-loop --vouchers=1 --notices=1 --reports=1 --reject=1 --verbose=1 + +applications/exception-loop-dapp: ## Create exception-loop-dapp test application + @echo "Creating exception-loop-dapp test application" + @mkdir -p applications + @cartesi-machine --ram-length=128Mi --store=applications/exception-loop-dapp --final-hash -- ioctl-echo-loop --vouchers=1 --notices=1 --reports=1 --exception=1 --verbose=1 + deploy-echo-dapp: applications/echo-dapp ## Deploy echo-dapp test application @echo "Deploying echo-dapp test application" @./cartesi-rollups-cli deploy application echo-dapp applications/echo-dapp/ @@ -312,6 +326,10 @@ fmt: ## Run go fmt @echo "Running go fmt" @go fmt ./... +fmt-check: ## Check go formatting (non-destructive) + @echo "Checking go formatting" + @test -z "$$(gofmt -l .)" || (echo "Unformatted files:" && gofmt -l . && exit 1) + vet: ## Run go vet @echo "Running go vet" @go vet ./... @@ -338,7 +356,7 @@ image: ## Build the docker images using bake @docker build $(DOCKER_PLATFORM) -t cartesi/rollups-node:$(IMAGE_TAG) . tester-image: ## Build the docker images using bake - @docker build $(DOCKER_PLATFORM) --target=go-builder -t cartesi/rollups-node:tester . + @docker build $(DOCKER_PLATFORM) --target=tester -t cartesi/rollups-node:tester . debian-packager: ## Build debian packager image @echo "Building debian packager image $(DEB_PACKAGER_IMG) $(BUILD_PLATFORM)" @@ -391,15 +409,48 @@ shutdown-compose: ## Remove the containers and volumes from previous compose run unit-test-with-compose: $(CARTESI_TEST_MACHINE_IMAGES) ## Run unit tests using docker compose with auto-shutdown @trap 'docker compose -f test/compose/compose.test.yaml down -v || true' EXIT && \ - docker compose -f test/compose/compose.test.yaml run --remove-orphans unit-test + docker compose -f test/compose/compose.test.yaml run --rm --remove-orphans unit-test -#integration-test-with-compose: $(CARTESI_TEST_MACHINE_IMAGES) ## Run integration tests using docker compose with auto-shutdown -# @trap 'docker compose -f test/compose/compose.test.yaml down -v || true' EXIT && \ -# docker compose -f test/compose/compose.test.yaml run integration-test +lint-with-docker: ## Run linting inside Docker (no host Go needed) + @docker run --rm cartesi/rollups-node:tester sh -c 'make lint && make vet && make fmt-check' + +integration-test-with-compose: $(CARTESI_TEST_MACHINE_IMAGES) ## Run integration tests using docker compose with auto-shutdown + @trap 'docker compose -f test/compose/compose.integration.yaml logs --no-color > integration-logs.txt 2>&1 || true; docker compose -f test/compose/compose.integration.yaml down -v || true' EXIT && \ + docker compose -f test/compose/compose.integration.yaml run --rm --remove-orphans integration-test test-with-compose: ## Run all tests using docker compose with auto-shutdown @$(MAKE) unit-test-with-compose -# @$(MAKE) integration-test-with-compose + @$(MAKE) integration-test-with-compose + +integration-test-local: build echo-dapp reject-loop-dapp exception-loop-dapp ## Run integration tests locally (requires: make start && eval $$(make env)) + @cartesi-rollups-cli db init + @echo "Starting node in background..." + @env CARTESI_ADVANCER_POLLING_INTERVAL=1 \ + CARTESI_VALIDATOR_POLLING_INTERVAL=1 \ + CARTESI_CLAIMER_POLLING_INTERVAL=1 \ + CARTESI_PRT_POLLING_INTERVAL=1 \ + cartesi-rollups-node & NODE_PID=$$!; \ + trap 'echo "Stopping node (pid $$NODE_PID)..."; kill $$NODE_PID 2>/dev/null; wait $$NODE_PID 2>/dev/null' EXIT; \ + echo "Waiting for node to become healthy..."; \ + attempts=0; \ + until curl -sf http://localhost:10000/readyz >/dev/null 2>&1; do \ + attempts=$$((attempts + 1)); \ + if [ $$attempts -ge 60 ]; then \ + echo "ERROR: Node failed to become healthy after 120 seconds"; \ + exit 1; \ + fi; \ + sleep 2; \ + done; \ + echo "Node is healthy. Running integration tests..."; \ + export CARTESI_TEST_DAPP_PATH=$(CURDIR)/applications/echo-dapp; \ + export CARTESI_TEST_REJECT_DAPP_PATH=$(CURDIR)/applications/reject-loop-dapp; \ + export CARTESI_TEST_EXCEPTION_DAPP_PATH=$(CURDIR)/applications/exception-loop-dapp; \ + $(MAKE) integration-test + +ci-test: ## Run the full CI test pipeline locally (lint + unit + integration) +# @$(MAKE) lint-with-docker + @$(MAKE) unit-test-with-compose + @$(MAKE) integration-test-with-compose clean-test-compose-resources: ## Clean up compose resources after some unexpected test failure @echo "Cleaning up Docker Compose resources..." @@ -436,4 +487,4 @@ build-debian-package: install sed 's|ARG_VERSION|$(ROLLUPS_NODE_VERSION)|g;s|ARG_ARCH|$(DEB_ARCH)|g' control.template > $(DESTDIR)/DEBIAN/control dpkg-deb -Zxz --root-owner-group --build $(DESTDIR) $(DEB_FILENAME) -.PHONY: build build-go clean clean-go test unit-test-go e2e-test lint fmt vet escape md-lint devnet image run-with-compose shutdown-compose help docs coverage-report $(GO_ARTIFACTS) +.PHONY: build build-go clean clean-go test unit-test-go e2e-test lint fmt fmt-check vet escape md-lint devnet image run-with-compose shutdown-compose help docs coverage-report lint-with-docker integration-test-with-compose integration-test-local ci-test $(GO_ARTIFACTS) diff --git a/internal/jsonrpc/api/decode.go b/internal/jsonrpc/api/decode.go index 6efd1b4de..d1eacb8c2 100644 --- a/internal/jsonrpc/api/decode.go +++ b/internal/jsonrpc/api/decode.go @@ -214,6 +214,30 @@ func DecodeOutput(output *model.Output, parsedAbi *abi.ABI) (*DecodedOutput, err return decodedOutput, nil } +// UnmarshalJSON deserializes a DecodedOutput: it delegates base fields to +// model.Output.UnmarshalJSON, then extracts and parses the decoded_data field. +func (d *DecodedOutput) UnmarshalJSON(data []byte) error { + if d.Output == nil { + d.Output = new(model.Output) + } + if err := d.Output.UnmarshalJSON(data); err != nil { + return err + } + var raw struct { + DecodedData json.RawMessage `json:"decoded_data"` + } + if err := json.Unmarshal(data, &raw); err != nil { + return err + } + if len(raw.DecodedData) > 0 && string(raw.DecodedData) != "null" { + d.DecodedData = new(DecodedData) + if err := json.Unmarshal(raw.DecodedData, d.DecodedData); err != nil { + return err + } + } + return nil +} + // MarshalJSON produces a flat JSON object merging Output fields with decoded_data. func (d *DecodedOutput) MarshalJSON() ([]byte, error) { outputJSON, err := d.Output.MarshalJSON() diff --git a/scripts/run-integration-tests.sh b/scripts/run-integration-tests.sh new file mode 100755 index 000000000..6831ab922 --- /dev/null +++ b/scripts/run-integration-tests.sh @@ -0,0 +1,55 @@ +#!/bin/bash +# (c) Cartesi and individual authors (see AUTHORS) +# SPDX-License-Identifier: Apache-2.0 (see LICENSE) +# +# Entrypoint for the integration-test container. +# Waits for the node to become healthy, starts a background health monitor, +# then runs the Go integration test suite. +# +# Usage: run-integration-tests.sh + +set -eu + +NODE_URL="${1:-http://node:10000}" + +echo "Waiting for node to become healthy..." +attempts=0 +until curl -sf "${NODE_URL}/readyz"; do + attempts=$((attempts + 1)) + if [ "$attempts" -ge 60 ]; then + echo "ERROR: Node failed to become healthy after 120 seconds" + exit 1 + fi + sleep 2 +done +echo "Node is healthy. Running integration tests..." + +# Monitor node health in background; kill the entire process group if +# the node crashes. Uses a failure counter (3 consecutive misses) to +# tolerate transient blips such as GC pauses. +(fail_count=0; while sleep 5; do + if ! curl -sf "${NODE_URL}/readyz" >/dev/null 2>&1; then + fail_count=$((fail_count + 1)) + echo "WARNING: Node health check failed ($fail_count/3)" + if [ "$fail_count" -ge 3 ]; then + echo "ERROR: Node unhealthy after 3 consecutive checks, aborting tests." + kill 0 2>/dev/null + exit 1 + fi + else + fail_count=0 + fi +done) & +HEALTH_PID=$! +trap "kill $HEALTH_PID 2>/dev/null" EXIT + +export PATH="/opt/go/bin:/build/cartesi/go/rollups-node:$PATH" + +# Smoke-check: verify the CLI binary is on PATH before running the suite. +which cartesi-rollups-cli || { echo "ERROR: cartesi-rollups-cli not found on PATH"; exit 1; } + +# Timeout must be less than the CI job timeout-minutes (60) to produce +# a useful go test panic instead of an abrupt CI kill. +go test -count=1 -v -timeout 55m \ + -ldflags "-r /opt/cartesi/lib" \ + -tags=endtoendtests ./test/integration/... diff --git a/test/compose/compose.integration.yaml b/test/compose/compose.integration.yaml new file mode 100644 index 000000000..2a457f8e1 --- /dev/null +++ b/test/compose/compose.integration.yaml @@ -0,0 +1,133 @@ +x-env: &env + CARTESI_LOG_LEVEL: info + CARTESI_BLOCKCHAIN_HTTP_ENDPOINT: http://ethereum_provider:8545 + CARTESI_BLOCKCHAIN_WS_ENDPOINT: ws://ethereum_provider:8545 + CARTESI_BLOCKCHAIN_ID: 31337 + CARTESI_CONTRACTS_INPUT_BOX_ADDRESS: 0x1b51e2992A2755Ba4D6F7094032DF91991a0Cfac + CARTESI_CONTRACTS_AUTHORITY_FACTORY_ADDRESS: 0x5E96408CFE423b01dADeD3bc867E6013135990cc + CARTESI_CONTRACTS_APPLICATION_FACTORY_ADDRESS: 0x26E758238CB6eC5aB70ce0dd52aF2d7b82e1972E + CARTESI_CONTRACTS_SELF_HOSTED_APPLICATION_FACTORY_ADDRESS: 0x010D3CbB4223F5bCc7b7B03cEE59f3aAea8eDb8A + CARTESI_CONTRACTS_DAVE_APP_FACTORY_ADDRESS: 0xfC2DBC639b5FB9AfE66A8696eC14EaD9FbFBC404 + CARTESI_DATABASE_CONNECTION: postgres://postgres:password@database:5432/rollupsdb?sslmode=disable + CARTESI_AUTH_MNEMONIC: "test test test test test test test test test test test junk" + +services: + ethereum_provider: + image: cartesi/rollups-node-devnet:devel + networks: + - devnet + healthcheck: + test: > + curl -sf -X POST http://127.0.0.1:8545 + -H 'Content-Type: application/json' + -d '{"jsonrpc":"2.0","method":"net_listening","params":[],"id":1}' + interval: 3s + timeout: 3s + retries: 10 + + database: + image: postgres:17-alpine + shm_size: 128mb + networks: + - devnet + healthcheck: + test: ["CMD-SHELL", "pg_isready -U postgres -d rollupsdb || exit 1"] + interval: 3s + timeout: 3s + retries: 5 + environment: + POSTGRES_PASSWORD: password + POSTGRES_DB: rollupsdb + + migration: + image: cartesi/rollups-node:devel + command: cartesi-rollups-cli db init + depends_on: + database: + condition: service_healthy + networks: + - devnet + restart: "no" + environment: + <<: *env + + dapp-builder: + image: cartesi/rollups-node:tester + entrypoint: ["sh", "-c"] + command: | + ' + set -e + echo "Building echo-dapp machine snapshot..." + make echo-dapp + echo "Building reject-loop-dapp machine snapshot..." + make reject-loop-dapp + echo "Building exception-loop-dapp machine snapshot..." + make exception-loop-dapp + echo "Copying to shared volume..." + cp -r applications/echo-dapp /dapps/echo-dapp + cp -r applications/reject-loop-dapp /dapps/reject-loop-dapp + cp -r applications/exception-loop-dapp /dapps/exception-loop-dapp + echo "DApp images built successfully." + ' + volumes: + - dapp_images:/dapps + - ../downloads:/usr/share/cartesi-machine/images + restart: "no" + + node: + image: cartesi/rollups-node:devel + init: true + command: cartesi-rollups-node + depends_on: + migration: + condition: service_completed_successfully + ethereum_provider: + condition: service_healthy + dapp-builder: + condition: service_completed_successfully + volumes: + - dapp_images:/var/lib/cartesi-rollups-node/dapps + networks: + - devnet + healthcheck: + test: ["CMD", "curl", "-G", "-f", "http://127.0.0.1:10000/readyz"] + interval: 5s + timeout: 5s + retries: 30 + start_period: 10s + environment: + <<: *env + CARTESI_BLOCKCHAIN_DEFAULT_BLOCK: latest + CARTESI_ADVANCER_POLLING_INTERVAL: 1 + CARTESI_VALIDATOR_POLLING_INTERVAL: 1 + CARTESI_CLAIMER_POLLING_INTERVAL: 1 + CARTESI_PRT_POLLING_INTERVAL: 1 + + integration-test: + image: cartesi/rollups-node:tester + profiles: [integration-test] + entrypoint: ["bash"] + command: ["/scripts/run-integration-tests.sh", "http://node:10000"] + depends_on: + node: + condition: service_healthy + dapp-builder: + condition: service_completed_successfully + volumes: + - dapp_images:/var/lib/cartesi-rollups-node/dapps:ro + - ../downloads:/usr/share/cartesi-machine/images + - ../../scripts/run-integration-tests.sh:/scripts/run-integration-tests.sh:ro + networks: + - devnet + restart: "no" + environment: + <<: *env + CARTESI_TEST_DAPP_PATH: /var/lib/cartesi-rollups-node/dapps/echo-dapp + CARTESI_TEST_REJECT_DAPP_PATH: /var/lib/cartesi-rollups-node/dapps/reject-loop-dapp + CARTESI_TEST_EXCEPTION_DAPP_PATH: /var/lib/cartesi-rollups-node/dapps/exception-loop-dapp + +volumes: + dapp_images: + +networks: + devnet: diff --git a/test/integration/anvil_helpers_test.go b/test/integration/anvil_helpers_test.go new file mode 100644 index 000000000..66c9670fc --- /dev/null +++ b/test/integration/anvil_helpers_test.go @@ -0,0 +1,291 @@ +// (c) Cartesi and individual authors (see AUTHORS) +// SPDX-License-Identifier: Apache-2.0 (see LICENSE) + +//go:build endtoendtests + +package integration + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "net/http" + "testing" + "time" + + "github.com/cartesi/rollups-node/internal/model" + "github.com/cartesi/rollups-node/pkg/contracts/itournament" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/ethclient" + "github.com/stretchr/testify/require" +) + +// maxBlocksToMine is the sanity cap for mineForTournamentTimeout to prevent +// hanging if the tournament contract reports an unreasonably large allowance. +const maxBlocksToMine = 10_000 + +// Anvil devnet RPC helpers. + +var anvilHTTPClient = &http.Client{Timeout: 30 * time.Second} + +// anvilRPCCall calls an Anvil JSON-RPC method and returns the raw result. +func anvilRPCCall(ctx context.Context, method string, params ...any) (json.RawMessage, error) { + endpoint := envOrDefault( + "CARTESI_BLOCKCHAIN_HTTP_ENDPOINT", "http://localhost:8545") + + rpcReq := struct { + JSONRPC string `json:"jsonrpc"` + Method string `json:"method"` + Params []any `json:"params"` + ID int `json:"id"` + }{JSONRPC: "2.0", Method: method, Params: params, ID: 1} + body, err := json.Marshal(rpcReq) + if err != nil { + return nil, fmt.Errorf("marshal rpc request: %w", err) + } + + req, err := http.NewRequestWithContext( + ctx, "POST", endpoint, bytes.NewReader(body)) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", "application/json") + + resp, err := anvilHTTPClient.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("%s: HTTP %d", method, resp.StatusCode) + } + + // Parse JSON-RPC response and check for error. + respBody, err := io.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("%s: read response: %w", method, err) + } + var rpcResp struct { + Result json.RawMessage `json:"result"` + Error *struct { + Code int `json:"code"` + Message string `json:"message"` + } `json:"error"` + } + if err := json.Unmarshal(respBody, &rpcResp); err != nil { + return nil, fmt.Errorf("%s: parse response: %w", method, err) + } + if rpcResp.Error != nil { + return nil, fmt.Errorf("%s: JSON-RPC error %d: %s", + method, rpcResp.Error.Code, rpcResp.Error.Message) + } + return rpcResp.Result, nil +} + +// anvilRPC calls an Anvil JSON-RPC method, discarding the result. +func anvilRPC(ctx context.Context, method string, params ...any) error { + _, err := anvilRPCCall(ctx, method, params...) + return err +} + +// anvilSetBalance sets the ETH balance for an address on the Anvil devnet. +func anvilSetBalance(ctx context.Context, address string, weiHex string) error { + return anvilRPC(ctx, "anvil_setBalance", address, weiHex) +} + +// anvilMine mines the specified number of blocks on the Anvil devnet. +func anvilMine(ctx context.Context, numBlocks int) error { + return anvilRPC(ctx, "anvil_mine", fmt.Sprintf("0x%x", numBlocks)) +} + +// mineForTournamentTimeout queries the tournament contract to determine +// the timeout block (startInstant + allowance) and mines enough blocks +// to reach it. Returns the number of blocks mined. +func mineForTournamentTimeout( + ctx context.Context, + client *ethclient.Client, + tournamentAddr common.Address, +) (int, error) { + tournament, err := itournament.NewITournament(tournamentAddr, client) + if err != nil { + return 0, fmt.Errorf("bind tournament: %w", err) + } + + args, err := tournament.TournamentArguments(&bind.CallOpts{Context: ctx}) + if err != nil { + return 0, fmt.Errorf("tournament arguments: %w", err) + } + + currentBlock, err := client.BlockNumber(ctx) + if err != nil { + return 0, fmt.Errorf("block number: %w", err) + } + + finishBlock := args.StartInstant + args.Allowance + if finishBlock < args.StartInstant { // uint64 overflow + return 0, fmt.Errorf("tournament timeout overflows: start=%d allowance=%d", + args.StartInstant, args.Allowance) + } + if currentBlock >= finishBlock { + return 0, nil // Already past the timeout. + } + + gap := finishBlock - currentBlock + 1 + if gap > maxBlocksToMine { + return 0, fmt.Errorf( + "tournament needs %d blocks but cap is %d", gap, maxBlocksToMine) + } + blocksNeeded := int(gap) + if err := anvilMine(ctx, blocksNeeded); err != nil { + return 0, err + } + return blocksNeeded, nil +} + +// PRT tournament helpers. + +// waitForTournamentAndCommitment polls until a root tournament and a commitment +// exist for the given epoch index. Returns the tournament. +func waitForTournamentAndCommitment( + ctx context.Context, + t testing.TB, + require *require.Assertions, + appName string, + epochIndex uint64, +) *model.Tournament { + t.Helper() + tctx, cancel := context.WithTimeout(ctx, claimAcceptedTimeout) + defer cancel() + + var tournament *model.Tournament + var lastErr error + err := pollUntil(tctx, 5*time.Second, func() (bool, error) { + resp, err := readTournaments(tctx, appName) + if err != nil { + if isCLIExitError(err) { + lastErr = err + t.Logf(" epoch %d: poll tournaments: %v (retrying)", epochIndex, err) + return false, nil + } + return false, fmt.Errorf("poll tournaments: %w", err) + } + tournament = findRootTournament(resp.Data, epochIndex) + return tournament != nil, nil + }) + if err != nil && lastErr != nil { + err = fmt.Errorf("%w (last poll error: %v)", err, lastErr) + } + require.NoError(err, "wait for epoch %d tournament", epochIndex) + t.Logf(" epoch %d: root tournament created at %s", epochIndex, tournament.Address) + + lastErr = nil + err = pollUntil(tctx, 5*time.Second, func() (bool, error) { + resp, err := readCommitments(tctx, appName) + if err != nil { + if isCLIExitError(err) { + lastErr = err + t.Logf(" epoch %d: poll commitments: %v (retrying)", epochIndex, err) + return false, nil + } + return false, fmt.Errorf("poll commitments: %w", err) + } + return findCommitmentForEpoch(resp.Data, epochIndex) != nil, nil + }) + if err != nil && lastErr != nil { + err = fmt.Errorf("%w (last poll error: %v)", err, lastErr) + } + require.NoError(err, "wait for epoch %d commitment", epochIndex) + t.Logf(" epoch %d: commitment joined to tournament", epochIndex) + + return tournament +} + +// waitForTournamentWinner polls until the root tournament for the given epoch +// has a winner commitment. +func waitForTournamentWinner( + ctx context.Context, + t testing.TB, + require *require.Assertions, + appName string, + epochIndex uint64, +) { + t.Helper() + tctx, cancel := context.WithTimeout(ctx, 2*time.Minute) + defer cancel() + + var lastErr error + err := pollUntil(tctx, 5*time.Second, func() (bool, error) { + resp, err := readTournaments(tctx, appName) + if err != nil { + if isCLIExitError(err) { + lastErr = err + t.Logf(" epoch %d: poll winner: %v (retrying)", epochIndex, err) + return false, nil + } + return false, fmt.Errorf("poll tournament winner: %w", err) + } + tournament := findRootTournament(resp.Data, epochIndex) + return tournament != nil && tournament.WinnerCommitment != nil, nil + }) + if err != nil && lastErr != nil { + err = fmt.Errorf("%w (last poll error: %v)", err, lastErr) + } + require.NoError(err, "wait for epoch %d tournament winner", epochIndex) +} + +// settleTournament runs the full tournament cycle for a given epoch: +// wait for tournament+commitment, mine past the timeout, wait for winner. +func settleTournament( + ctx context.Context, + t testing.TB, + require *require.Assertions, + client *ethclient.Client, + appName string, + epochIndex uint64, +) { + t.Helper() + defer timed(t, fmt.Sprintf("settle tournament epoch %d", epochIndex))() + + t.Logf("Waiting for PRT to create a root tournament and join with a commitment for epoch %d...", + epochIndex) + tournament := waitForTournamentAndCommitment(ctx, t, require, appName, epochIndex) + + block, err := client.BlockNumber(ctx) + require.NoError(err, "get block number") + t.Logf("Mining blocks to advance past the epoch %d tournament timeout (current block=%d)...", + epochIndex, block) + blocksMined, err := mineForTournamentTimeout(ctx, client, tournament.Address) + require.NoError(err, "mine for epoch %d tournament timeout", epochIndex) + t.Logf(" mined %d blocks to reach timeout", blocksMined) + + t.Logf("Waiting for the PRT service to settle epoch %d (uncontested single-commitment win)...", + epochIndex) + waitForTournamentWinner(ctx, t, require, appName, epochIndex) + t.Logf(" epoch %d tournament settled — winner declared", epochIndex) +} + +// findRootTournament returns the root tournament for the given epoch index, +// or nil if not found. Root tournaments have no parent. +func findRootTournament(tournaments []model.Tournament, epochIndex uint64) *model.Tournament { + for i, t := range tournaments { + if t.EpochIndex == epochIndex && t.ParentTournamentAddress == nil { + return &tournaments[i] + } + } + return nil +} + +// findCommitmentForEpoch returns the first commitment matching the epoch index, +// or nil if not found. +func findCommitmentForEpoch(commitments []model.Commitment, epochIndex uint64) *model.Commitment { + for i, c := range commitments { + if c.EpochIndex == epochIndex { + return &commitments[i] + } + } + return nil +} diff --git a/test/integration/cli_helpers_test.go b/test/integration/cli_helpers_test.go new file mode 100644 index 000000000..f60859fc2 --- /dev/null +++ b/test/integration/cli_helpers_test.go @@ -0,0 +1,270 @@ +// (c) Cartesi and individual authors (see AUTHORS) +// SPDX-License-Identifier: Apache-2.0 (see LICENSE) + +//go:build endtoendtests + +package integration + +import ( + "context" + "crypto/rand" + "encoding/hex" + "encoding/json" + "errors" + "fmt" + "os" + "os/exec" + "strconv" + "time" + + "github.com/cartesi/rollups-node/internal/cli" + "github.com/cartesi/rollups-node/internal/jsonrpc/api" + "github.com/cartesi/rollups-node/internal/model" + + "github.com/ethereum/go-ethereum/common/hexutil" +) + +const cliBinary = "cartesi-rollups-cli" + +// cliCommandTimeout is the maximum time a single CLI command may run before +// being killed. This prevents a hanging command from consuming the entire +// suite timeout. +const cliCommandTimeout = 60 * time.Second + +// uniqueSalt generates a random 32-byte hex salt for CREATE2 deployments, +// ensuring tests are idempotent when re-run against the same blockchain state. +func uniqueSalt() string { + var b [32]byte + if _, err := rand.Read(b[:]); err != nil { + panic(fmt.Sprintf("generate salt: %v", err)) + } + return hex.EncodeToString(b[:]) +} + +// uniqueAppName generates a unique application name by appending a random +// 8-char hex suffix. This avoids DB unique-constraint violations when tests +// are re-run against the same postgres without a schema reset. +func uniqueAppName(prefix string) string { + var b [4]byte + if _, err := rand.Read(b[:]); err != nil { + panic(fmt.Sprintf("generate app name suffix: %v", err)) + } + return fmt.Sprintf("%s-%s", prefix, hex.EncodeToString(b[:])) +} + +// CLI execution helpers. + +func envOrDefault(key, fallback string) string { + if v, ok := os.LookupEnv(key); ok && v != "" { + return v + } + return fallback +} + +// cliError wraps a CLI execution failure, preserving the exit code for +// error discrimination in poll helpers. +type cliError struct { + Args []string + ExitCode int + Stderr string + Err error +} + +func (e *cliError) Error() string { + if e.Stderr != "" { + return fmt.Sprintf("cli %v failed (exit %d): %s", e.Args, e.ExitCode, e.Stderr) + } + return fmt.Sprintf("cli %v failed: %v", e.Args, e.Err) +} + +func (e *cliError) Unwrap() error { return e.Err } + +// isCLIExitError returns true if the error is a CLI command that exited +// with a non-zero code (as opposed to a context cancellation, JSON parse +// failure, or other structural error). Poll helpers use this to distinguish +// "not found yet" from genuine failures. +func isCLIExitError(err error) bool { + var ce *cliError + return errors.As(err, &ce) +} + +// runCLI executes the CLI binary with the given arguments and returns stdout. +// Each command is given an independent timeout (cliCommandTimeout) to prevent +// a single hanging call from consuming the entire suite timeout. +func runCLI(ctx context.Context, args ...string) (string, error) { + cmdCtx, cancel := context.WithTimeout(ctx, cliCommandTimeout) + defer cancel() + cmd := exec.CommandContext(cmdCtx, cliBinary, args...) + out, err := cmd.Output() + if err != nil { + var exitErr *exec.ExitError + if errors.As(err, &exitErr) { + return "", &cliError{ + Args: args, + ExitCode: exitErr.ExitCode(), + Stderr: string(exitErr.Stderr), + Err: err, + } + } + return "", fmt.Errorf("cli %v failed: %w", args, err) + } + return string(out), nil +} + +// deployApplication deploys a self-hosted application using the CLI and returns +// the application address from the JSON output. +// Extra CLI flags (e.g., "--salt", value, "--prt") can be appended via extraArgs. +func deployApplication(ctx context.Context, appName, dappPath string, extraArgs ...string) (string, error) { + args := []string{"deploy", "application", appName, dappPath, "--json"} + args = append(args, extraArgs...) + out, err := runCLI(ctx, args...) + if err != nil { + return "", fmt.Errorf("deploy: %w", err) + } + + var app struct { + IApplicationAddress string `json:"iapplication_address"` + IConsensusAddress string `json:"iconsensus_address"` + } + if err := json.Unmarshal([]byte(out), &app); err != nil { + return "", fmt.Errorf("parse deploy output: %w", err) + } + return app.IApplicationAddress, nil +} + +// sendInput sends a payload to the application and returns (inputIndex, blockNumber). +func sendInput(ctx context.Context, appName string, payload string) (uint64, uint64, error) { + out, err := runCLI(ctx, "send", appName, payload, "--yes", "--json") + if err != nil { + return 0, 0, fmt.Errorf("send: %w", err) + } + var result cli.SendResult + if err := json.Unmarshal([]byte(out), &result); err != nil { + return 0, 0, fmt.Errorf("parse send output: %w", err) + } + inputIndex, err := hexutil.DecodeUint64(result.InputIndex) + if err != nil { + return 0, 0, fmt.Errorf("parse input_index %q: %w", result.InputIndex, err) + } + blockNumber, err := hexutil.DecodeUint64(result.BlockNumber) + if err != nil { + return 0, 0, fmt.Errorf("parse block_number %q: %w", result.BlockNumber, err) + } + return inputIndex, blockNumber, nil +} + +// readOutputs lists all outputs for the application. +func readOutputs(ctx context.Context, appName string) (*api.ListResponse[api.DecodedOutput], error) { + out, err := runCLI(ctx, "read", "outputs", appName) + if err != nil { + return nil, err + } + var resp api.ListResponse[api.DecodedOutput] + if err := json.Unmarshal([]byte(out), &resp); err != nil { + return nil, fmt.Errorf("parse outputs: %w", err) + } + return &resp, nil +} + +// readOutput reads a single output by index. +func readOutput(ctx context.Context, appName string, index uint64) (*api.DecodedOutput, error) { + out, err := runCLI(ctx, "read", "outputs", appName, strconv.FormatUint(index, 10)) + if err != nil { + return nil, err + } + var resp api.SingleResponse[api.DecodedOutput] + if err := json.Unmarshal([]byte(out), &resp); err != nil { + return nil, fmt.Errorf("parse output: %w", err) + } + return &resp.Data, nil +} + +// readReports lists all reports for the application. +func readReports(ctx context.Context, appName string) (*api.ListResponse[model.Report], error) { + out, err := runCLI(ctx, "read", "reports", appName) + if err != nil { + return nil, err + } + var resp api.ListResponse[model.Report] + if err := json.Unmarshal([]byte(out), &resp); err != nil { + return nil, fmt.Errorf("parse reports: %w", err) + } + return &resp, nil +} + +// readEpoch reads a single epoch by index. +func readEpoch(ctx context.Context, appName string, epochIndex uint64) (*model.Epoch, error) { + out, err := runCLI(ctx, "read", "epochs", appName, strconv.FormatUint(epochIndex, 10)) + if err != nil { + return nil, err + } + var resp api.SingleResponse[model.Epoch] + if err := json.Unmarshal([]byte(out), &resp); err != nil { + return nil, fmt.Errorf("parse epoch: %w", err) + } + return &resp.Data, nil +} + +// readInput reads a single input by index. +func readInput(ctx context.Context, appName string, inputIndex uint64) (*model.Input, error) { + out, err := runCLI(ctx, "read", "inputs", appName, strconv.FormatUint(inputIndex, 10)) + if err != nil { + return nil, err + } + var resp api.SingleResponse[model.Input] + if err := json.Unmarshal([]byte(out), &resp); err != nil { + return nil, fmt.Errorf("parse input: %w", err) + } + return &resp.Data, nil +} + +// executeOutput executes a voucher on L1 via the CLI. +func executeOutput(ctx context.Context, appName string, index uint64) (string, error) { + out, err := runCLI(ctx, "execute", appName, strconv.FormatUint(index, 10), "--yes", "--json") + if err != nil { + return "", fmt.Errorf("execute: %w", err) + } + var result cli.ExecuteResult + if err := json.Unmarshal([]byte(out), &result); err != nil { + return "", fmt.Errorf("parse execute output: %w", err) + } + return result.TransactionHash, nil +} + +// validateOutput validates a notice on L1 via the CLI. +func validateOutput(ctx context.Context, appName string, index uint64) error { + _, err := runCLI(ctx, "validate", appName, strconv.FormatUint(index, 10)) + return err +} + +// readTournaments lists all tournaments for the application. +func readTournaments( + ctx context.Context, + appName string, +) (*api.ListResponse[model.Tournament], error) { + out, err := runCLI(ctx, "read", "tournaments", appName) + if err != nil { + return nil, err + } + var resp api.ListResponse[model.Tournament] + if err := json.Unmarshal([]byte(out), &resp); err != nil { + return nil, fmt.Errorf("parse tournaments: %w", err) + } + return &resp, nil +} + +// readCommitments lists all commitments for the application. +func readCommitments( + ctx context.Context, + appName string, +) (*api.ListResponse[model.Commitment], error) { + out, err := runCLI(ctx, "read", "commitments", appName) + if err != nil { + return nil, err + } + var resp api.ListResponse[model.Commitment] + if err := json.Unmarshal([]byte(out), &resp); err != nil { + return nil, fmt.Errorf("parse commitments: %w", err) + } + return &resp, nil +} diff --git a/test/integration/echo_authority_test.go b/test/integration/echo_authority_test.go new file mode 100644 index 000000000..8d516fba0 --- /dev/null +++ b/test/integration/echo_authority_test.go @@ -0,0 +1,46 @@ +// (c) Cartesi and individual authors (see AUTHORS) +// SPDX-License-Identifier: Apache-2.0 (see LICENSE) + +//go:build endtoendtests + +package integration + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/suite" +) + +type EchoAuthoritySuite struct { + suite.Suite + ctx context.Context + cancel context.CancelFunc +} + +func TestEchoAuthority(t *testing.T) { + suite.Run(t, new(EchoAuthoritySuite)) +} + +func (s *EchoAuthoritySuite) SetupSuite() { + s.ctx, s.cancel = context.WithTimeout(context.Background(), 10*time.Minute) +} + +func (s *EchoAuthoritySuite) TearDownSuite() { + s.cancel() +} + +// TestEchoAuthorityLifecycle tests the full L1->Machine->L1 pipeline: +// deploy, send input, verify outputs, wait for claim, execute voucher, validate notice. +func (s *EchoAuthoritySuite) TestEchoAuthorityLifecycle() { + dappPath := envOrDefault("CARTESI_TEST_DAPP_PATH", "applications/echo-dapp") + + runEchoLifecycleTest(s.ctx, s.T(), s.Require(), echoLifecycleConfig{ + AppName: uniqueAppName("echo-authority"), + DappPath: dappPath, + Payload: "hello cartesi", + }) + + s.T().Log("=== Authority lifecycle complete: L1 → Machine → Proofs → L1 execution verified ===") +} diff --git a/test/integration/echo_prt_test.go b/test/integration/echo_prt_test.go new file mode 100644 index 000000000..93811a75d --- /dev/null +++ b/test/integration/echo_prt_test.go @@ -0,0 +1,63 @@ +// (c) Cartesi and individual authors (see AUTHORS) +// SPDX-License-Identifier: Apache-2.0 (see LICENSE) + +//go:build endtoendtests + +package integration + +import ( + "context" + "testing" + "time" + + "github.com/ethereum/go-ethereum/ethclient" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" +) + +type EchoPrtSuite struct { + suite.Suite + ctx context.Context + cancel context.CancelFunc + + ethClient *ethclient.Client +} + +func TestEchoPrt(t *testing.T) { + suite.Run(t, new(EchoPrtSuite)) +} + +func (s *EchoPrtSuite) SetupSuite() { + s.ctx, s.cancel = context.WithTimeout(context.Background(), 10*time.Minute) + + endpoint := envOrDefault("CARTESI_BLOCKCHAIN_HTTP_ENDPOINT", "http://localhost:8545") + client, err := ethclient.Dial(endpoint) + s.Require().NoError(err, "dial ethclient") + s.ethClient = client +} + +func (s *EchoPrtSuite) TearDownSuite() { + s.cancel() + s.ethClient.Close() +} + +// TestEchoPrtLifecycle tests the PRT (Dave consensus) path: +// deploy with --prt, send input, verify outputs/reports, then complete +// both epoch 0 (empty, sealed at deploy) and epoch 1 (with input) tournaments. +func (s *EchoPrtSuite) TestEchoPrtLifecycle() { + dappPath := envOrDefault("CARTESI_TEST_DAPP_PATH", "applications/echo-dapp") + ethClient := s.ethClient + + runEchoLifecycleTest(s.ctx, s.T(), s.Require(), echoLifecycleConfig{ + AppName: uniqueAppName("echo-prt"), + DappPath: dappPath, + Payload: "prt-hello", + ExtraDeployArgs: []string{"--prt"}, + PreClaimHook: func(ctx context.Context, t testing.TB, require *require.Assertions, appName string) { + settleTournament(ctx, t, require, ethClient, appName, 0) + settleTournament(ctx, t, require, ethClient, appName, 1) + }, + }) + + s.T().Log("=== PRT lifecycle complete: L1 -> Machine -> Tournament -> L1 execution verified ===") +} diff --git a/test/integration/lifecycle_test.go b/test/integration/lifecycle_test.go new file mode 100644 index 000000000..4815b6555 --- /dev/null +++ b/test/integration/lifecycle_test.go @@ -0,0 +1,411 @@ +// (c) Cartesi and individual authors (see AUTHORS) +// SPDX-License-Identifier: Apache-2.0 (see LICENSE) + +//go:build endtoendtests + +// IMPORTANT: These integration tests share a single Anvil blockchain instance. +// PRT tests call anvilMine() which globally advances the block number, affecting +// Authority epoch boundaries. Tests MUST NOT run in parallel (no t.Parallel()). +// The go test runner executes tests within a package sequentially by default, +// which is required for correctness here. + +package integration + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/cartesi/rollups-node/internal/jsonrpc/api" + "github.com/cartesi/rollups-node/internal/model" + "github.com/stretchr/testify/require" +) + +// Timeouts for polling operations. +const ( + inputProcessingTimeout = 3 * time.Minute + claimAcceptedTimeout = 5 * time.Minute +) + +// oneEtherWei is 1 ETH in wei (10^18), hex-encoded for Anvil RPC calls. +// Used to fund application contracts so vouchers can transfer ETH during tests. +const oneEtherWei = "0xde0b6b3a7640000" + +// Expected output/report counts per echo-dapp input. +// The ioctl-echo-loop dapp produces 3 outputs (voucher + delegatecall voucher + notice) +// and 1 report per accepted input. These constants keep assertions in sync with dapp behavior. +const ( + echoOutputsPerInput = 3 // Voucher + DelegateCallVoucher + Notice + echoReportsPerInput = 1 + rejectOutputsPerAcceptedInput = 2 // Voucher + Notice (no DelegateCallVoucher) + rejectReportsPerAcceptedInput = 1 +) + +// echoLifecycleConfig configures a shared echo lifecycle test that covers: +// deploy, send input, verify outputs/reports, wait for claim, verify proofs, +// execute voucher, and validate notice. +type echoLifecycleConfig struct { + // AppName is the unique application name for deployment. + AppName string + // DappPath is the filesystem path to the dapp snapshot. + DappPath string + // Payload is the input payload to send. + Payload string + // ExtraDeployArgs are additional CLI flags for deploy (e.g., "--prt"). + ExtraDeployArgs []string + // PreClaimHook, if non-nil, is called after outputs are verified + // but before the claim/execution phase. For PRT consensus, this settles + // tournaments; for other consensus types, it can perform any required + // pre-claim actions. + // It receives the application name. + PreClaimHook func(ctx context.Context, t testing.TB, require *require.Assertions, appName string) +} + +// runEchoLifecycleTest runs the full L1->Machine->L1 pipeline for an echo-dapp. +// It works for both Authority and PRT consensus, controlled by the config. +func runEchoLifecycleTest(ctx context.Context, t testing.TB, require *require.Assertions, cfg echoLifecycleConfig) { + // --- Setup: deploy and fund --- + + t.Logf("--- Setup: deploying echo-dapp %s ---", cfg.AppName) + t.Logf(" dapp=%s path=%s", cfg.AppName, cfg.DappPath) + defer timed(t, "full echo lifecycle")() + + deployArgs := append([]string{"--salt", uniqueSalt()}, cfg.ExtraDeployArgs...) + + func() { + defer timed(t, "deploy echo-dapp")() + appAddr, err := deployApplication(ctx, cfg.AppName, cfg.DappPath, deployArgs...) + require.NoError(err, "deploy echo-dapp") + t.Logf(" application deployed at %s", appAddr) + + err = anvilSetBalance(ctx, appAddr, oneEtherWei) + require.NoError(err, "fund application contract") + t.Log(" funded application contract with 1 ETH for voucher execution") + }() + + // --- L1 -> Machine: send input and wait for processing --- + + t.Logf("Sending input payload=%q to the echo-dapp on L1", cfg.Payload) + inputIndex, blockNum, err := sendInput(ctx, cfg.AppName, cfg.Payload) + require.NoError(err, "send input") + require.Equal(uint64(0), inputIndex) + t.Logf(" input accepted on-chain: index=%d block=%d", inputIndex, blockNum) + + func() { + defer timed(t, "wait for input processing")() + t.Log("Waiting for the advancer to pick up the input and run it through the Cartesi Machine...") + processCtx, processCancel := context.WithTimeout(ctx, inputProcessingTimeout) + defer processCancel() + + input, err := waitForInputProcessed(processCtx, t, cfg.AppName, inputIndex) + require.NoError(err, "wait for input processing") + require.Equal(model.InputCompletionStatus_Accepted, input.Status) + t.Log(" machine processed the input and returned ACCEPTED") + }() + + // --- Verify off-chain results: outputs and reports --- + + t.Log("Checking that the echo-dapp produced the expected outputs (voucher, delegate-call voucher, notice)...") + outputsResp, err := readOutputs(ctx, cfg.AppName) + require.NoError(err, "read outputs") + require.Equal(uint64(echoOutputsPerInput), outputsResp.Pagination.TotalCount, + "expected %d outputs (voucher + delegatecall voucher + notice)", echoOutputsPerInput) + require.Len(outputsResp.Data, echoOutputsPerInput) + + var voucherIdx, noticeIdx uint64 + voucherFound, delegateVoucherFound, noticeFound := false, false, false + for _, out := range outputsResp.Data { + if out.DecodedData == nil { + continue + } + switch out.DecodedData.Type { + case "Voucher": + voucherIdx = out.Index + voucherFound = true + case "DelegateCallVoucher": + delegateVoucherFound = true + case "Notice": + noticeIdx = out.Index + noticeFound = true + } + } + require.True(voucherFound, "voucher output not found") + require.True(delegateVoucherFound, "delegate call voucher output not found") + require.True(noticeFound, "notice output not found") + + reportsResp, err := readReports(ctx, cfg.AppName) + require.NoError(err, "read reports") + require.Equal(uint64(echoReportsPerInput), reportsResp.Pagination.TotalCount, + "expected %d report(s)", echoReportsPerInput) + t.Log(" all outputs and reports verified") + + // --- Optional pre-claim hook (e.g. PRT tournament settlement) --- + + if cfg.PreClaimHook != nil { + cfg.PreClaimHook(ctx, t, require, cfg.AppName) + } + + // --- Consensus + L1 execution (shared phase) --- + + epochIndex := outputsResp.Data[0].EpochIndex + + verifyClaimAndExecute(ctx, t, require, verifyAndExecuteConfig{ + AppName: cfg.AppName, + EpochIndex: epochIndex, + EpochOutputs: outputsResp.Data, + VoucherIdx: voucherIdx, + NoticeIdx: noticeIdx, + CheckReExecution: true, + }) +} + +// rejectExceptionLifecycleConfig configures a shared reject/exception lifecycle +// test that covers: deploy, send 3 inputs (where input #1 is rejected/exception), +// verify outputs/reports, wait for claim, verify proofs, execute voucher, and +// validate notice. +type rejectExceptionLifecycleConfig struct { + // AppName is the unique application name for deployment. + AppName string + // DappPath is the filesystem path to the dapp snapshot. + DappPath string + // TestName is used for log messages (e.g., "reject", "exception"). + TestName string + // FailStatus is the expected status for the failed input (e.g., REJECTED, EXCEPTION). + FailStatus model.InputCompletionStatus + // ExtraDeployArgs are additional CLI flags for deploy (e.g., "--prt"). + ExtraDeployArgs []string + // PreClaimHook, if non-nil, is called after outputs are verified + // but before the claim/execution phase. For PRT consensus, this settles + // tournaments; for other consensus types, it can perform any required + // pre-claim actions. + // It receives the application name. + PreClaimHook func(ctx context.Context, t testing.TB, require *require.Assertions, appName string) + // EpochIndex, if non-nil, overrides the epoch used for claim/execution. + // If nil, the first output's epoch index is used. + EpochIndex *uint64 +} + +// runRejectExceptionLifecycleTest runs the reject/exception pipeline for a dapp +// that rejects or throws on input index 1. Works for both Authority and PRT +// consensus, controlled by the config. +func runRejectExceptionLifecycleTest( + ctx context.Context, + t testing.TB, + require *require.Assertions, + cfg rejectExceptionLifecycleConfig, +) { + // --- Setup: deploy and fund the dapp --- + + t.Logf("--- Setup: deploying %s-loop-dapp (will %s input #1) ---", cfg.TestName, cfg.FailStatus) + t.Logf(" dapp=%s path=%s", cfg.AppName, cfg.DappPath) + defer timed(t, fmt.Sprintf("full %s lifecycle", cfg.TestName))() + + deployArgs := append([]string{"--salt", uniqueSalt()}, cfg.ExtraDeployArgs...) + + func() { + defer timed(t, fmt.Sprintf("deploy %s-loop-dapp", cfg.TestName))() + appAddr, err := deployApplication(ctx, cfg.AppName, cfg.DappPath, deployArgs...) + require.NoError(err, "deploy %s-loop-dapp", cfg.TestName) + t.Logf(" application deployed at %s", appAddr) + + err = anvilSetBalance(ctx, appAddr, oneEtherWei) + require.NoError(err, "fund application contract") + t.Log(" funded application contract with 1 ETH for voucher execution") + }() + + // --- L1 -> Machine: send 3 inputs where input #1 will be rejected/exception --- + + t.Logf("Sending 3 inputs — the dapp will %s input #1 while accepting #0 and #2...", cfg.FailStatus) + const numInputs = 3 + for i := range numInputs { + payload := fmt.Sprintf("%s-payload-%d", cfg.TestName, i) + idx, _, err := sendInput(ctx, cfg.AppName, payload) + require.NoError(err, "send input %d", i) + require.Equal(uint64(i), idx, "input index mismatch") + t.Logf(" input %d sent (payload=%q)", i, payload) + } + + func() { + defer timed(t, "wait for input processing (3 inputs)")() + t.Log("Waiting for the advancer to process all 3 inputs through the Cartesi Machine...") + processCtx, processCancel := context.WithTimeout(ctx, inputProcessingTimeout) + defer processCancel() + + expectedStatuses := map[uint64]model.InputCompletionStatus{ + 0: model.InputCompletionStatus_Accepted, + 1: cfg.FailStatus, + 2: model.InputCompletionStatus_Accepted, + } + + for i := range uint64(numInputs) { + input, err := waitForInputProcessed(processCtx, t, cfg.AppName, i) + require.NoError(err, "wait for input %d processing", i) + require.Equal(expectedStatuses[i], input.Status, + "input %d: expected status %s, got %s", i, expectedStatuses[i], input.Status) + t.Logf(" input %d: %s", i, input.Status) + } + }() + + // --- Verify off-chain results: only accepted inputs produce outputs --- + + t.Logf("Checking outputs — only accepted inputs should produce them (no outputs from %s input #1)...", + cfg.FailStatus) + outputsResp, err := readOutputs(ctx, cfg.AppName) + require.NoError(err, "read outputs") + numAccepted := uint64(2) + require.Equal(numAccepted*rejectOutputsPerAcceptedInput, outputsResp.Pagination.TotalCount, + "expected %d outputs (%d per accepted input x %d accepted inputs)", + numAccepted*rejectOutputsPerAcceptedInput, rejectOutputsPerAcceptedInput, numAccepted) + + for _, out := range outputsResp.Data { + require.NotEqual(uint64(1), out.InputIndex, + "output %d should not belong to %s input (index 1)", out.Index, cfg.FailStatus) + } + t.Logf(" %d outputs found, none from the failed input — correct", + numAccepted*rejectOutputsPerAcceptedInput) + + t.Log("Checking reports — same rule: only accepted inputs produce reports...") + reportsResp, err := readReports(ctx, cfg.AppName) + require.NoError(err, "read reports") + require.Equal(numAccepted*rejectReportsPerAcceptedInput, reportsResp.Pagination.TotalCount, + "expected %d reports (%d per accepted input x %d accepted inputs)", + numAccepted*rejectReportsPerAcceptedInput, rejectReportsPerAcceptedInput, numAccepted) + t.Logf(" %d reports found — correct", numAccepted*rejectReportsPerAcceptedInput) + + // --- Optional pre-claim hook (e.g. PRT tournament settlement) --- + + if cfg.PreClaimHook != nil { + cfg.PreClaimHook(ctx, t, require, cfg.AppName) + } + + // --- Consensus + L1 execution (shared phase) --- + + var epochIndex uint64 + if cfg.EpochIndex != nil { + epochIndex = *cfg.EpochIndex + } else { + epochIndex = outputsResp.Data[0].EpochIndex + } + + // Collect outputs belonging to the claimed epoch and find a voucher + notice among them. + var epochOutputs []api.DecodedOutput + var voucherIdx, noticeIdx uint64 + voucherFound, noticeFound := false, false + for _, out := range outputsResp.Data { + if out.EpochIndex != epochIndex { + continue + } + epochOutputs = append(epochOutputs, out) + if out.DecodedData == nil { + continue + } + switch out.DecodedData.Type { + case "Voucher": + if !voucherFound { + voucherIdx = out.Index + voucherFound = true + } + case "Notice": + if !noticeFound { + noticeIdx = out.Index + noticeFound = true + } + } + } + require.True(voucherFound, "voucher output not found in epoch %d", epochIndex) + require.True(noticeFound, "notice output not found in epoch %d", epochIndex) + + verifyClaimAndExecute(ctx, t, require, verifyAndExecuteConfig{ + AppName: cfg.AppName, + EpochIndex: epochIndex, + EpochOutputs: epochOutputs, + VoucherIdx: voucherIdx, + NoticeIdx: noticeIdx, + }) + + t.Logf("=== %s test complete: %s handling + L1 execution verified ===", cfg.TestName, cfg.FailStatus) +} + +// verifyAndExecuteConfig describes the post-settlement verification phase: +// wait for claim, check proofs, execute voucher, validate notice. +type verifyAndExecuteConfig struct { + AppName string + EpochIndex uint64 + EpochOutputs []api.DecodedOutput + VoucherIdx uint64 + NoticeIdx uint64 + // CheckReExecution, if true, verifies that re-executing the voucher + // reverts (vouchers are single-use). + CheckReExecution bool +} + +// verifyClaimAndExecute waits for the epoch claim to be accepted, verifies +// Merkle proofs, executes a voucher, and validates a notice on L1. This is the +// shared tail of both echo and reject/exception lifecycle tests. +func verifyClaimAndExecute( + ctx context.Context, + t testing.TB, + require *require.Assertions, + cfg verifyAndExecuteConfig, +) { + // --- Consensus: wait for claim acceptance --- + + func() { + defer timed(t, "wait for claim acceptance")() + t.Logf("Waiting for the claim to be accepted for epoch %d...", cfg.EpochIndex) + claimCtx, claimCancel := context.WithTimeout(ctx, claimAcceptedTimeout) + defer claimCancel() + + epoch, err := waitForEpochStatus( + claimCtx, t, cfg.AppName, cfg.EpochIndex, model.EpochStatus_ClaimAccepted) + require.NoError(err, "wait for claim accepted") + require.NotNil(epoch.OutputsMerkleRoot, "epoch claim should be set") + t.Logf(" epoch %d claim accepted (hash=%s)", cfg.EpochIndex, *epoch.OutputsMerkleRoot) + }() + + // --- Verify Merkle proofs --- + + t.Logf("Verifying that Merkle proofs were generated for epoch %d outputs...", cfg.EpochIndex) + for _, out := range cfg.EpochOutputs { + refreshed, err := readOutput(ctx, cfg.AppName, out.Index) + require.NoError(err, "read output %d", out.Index) + require.NotEmpty(refreshed.OutputHashesSiblings, + "output %d should have proof siblings", out.Index) + } + t.Log(" all outputs have valid Merkle proof siblings") + + // --- L1 execution: execute the voucher and validate the notice on-chain --- + + func() { + defer timed(t, "L1 voucher execution + notice validation")() + + t.Logf("Executing voucher (output %d) on L1 — this calls the destination contract...", cfg.VoucherIdx) + txHash, err := executeOutput(ctx, cfg.AppName, cfg.VoucherIdx) + require.NoError(err, "execute voucher") + require.NotEmpty(txHash) + t.Logf(" voucher executed successfully (tx=%s)", txHash) + + t.Log("Waiting for the EVM reader to detect the execution event and record it in the DB...") + execCtx, execCancel := context.WithTimeout(ctx, inputProcessingTimeout) + defer execCancel() + + err = waitForExecutionRecorded(execCtx, t, cfg.AppName, cfg.VoucherIdx) + require.NoError(err, "wait for execution tx hash in DB") + t.Log(" execution event recorded in DB") + + t.Logf("Validating notice (output %d) on L1 — proving it was emitted by the machine...", cfg.NoticeIdx) + err = validateOutput(ctx, cfg.AppName, cfg.NoticeIdx) + require.NoError(err, "validate notice should succeed (no revert)") + t.Log(" notice validated on-chain (proof accepted by the contract)") + }() + + // --- Optional safety check: vouchers are single-use --- + + if cfg.CheckReExecution { + t.Log("Attempting to execute the same voucher again (should revert — vouchers are single-use)...") + _, err := executeOutput(ctx, cfg.AppName, cfg.VoucherIdx) + require.Error(err, "second voucher execution should revert") + t.Log(" correctly reverted — voucher cannot be replayed") + } +} diff --git a/test/integration/main_test.go b/test/integration/main_test.go new file mode 100644 index 000000000..1e82da4da --- /dev/null +++ b/test/integration/main_test.go @@ -0,0 +1,41 @@ +// (c) Cartesi and individual authors (see AUTHORS) +// SPDX-License-Identifier: Apache-2.0 (see LICENSE) + +//go:build endtoendtests + +package integration + +import ( + "flag" + "fmt" + "os" + "testing" +) + +// TestMain enforces that integration tests run sequentially. +// These tests share a single Anvil blockchain instance and PRT tests call +// anvilMine() which globally advances the block number, affecting Authority +// epoch boundaries. Running tests in parallel would cause subtle timing races. +func TestMain(m *testing.M) { + flag.Parse() + if testing.Short() { + fmt.Fprintln(os.Stderr, "skipping integration tests in short mode") + os.Exit(0) + } + // Warn if -test.parallel is set above 1. The test binary flag is already + // parsed by flag.Parse(), so we can inspect it directly via GOMAXPROCS or + // by checking the flag value. Go's testing package uses -test.parallel to + // control the maximum number of tests running in parallel; the default is + // GOMAXPROCS which may be >1. + p := flag.Lookup("test.parallel") + if p != nil && p.Value.String() != "1" { + fmt.Fprintln(os.Stderr, + "WARNING: integration tests must not run in parallel "+ + "(-test.parallel should be 1). Forcing -test.parallel=1.") + if err := p.Value.Set("1"); err != nil { + fmt.Fprintf(os.Stderr, "failed to set -test.parallel=1: %v\n", err) + os.Exit(1) + } + } + os.Exit(m.Run()) +} diff --git a/test/integration/multi_app_test.go b/test/integration/multi_app_test.go new file mode 100644 index 000000000..f305f809d --- /dev/null +++ b/test/integration/multi_app_test.go @@ -0,0 +1,201 @@ +// (c) Cartesi and individual authors (see AUTHORS) +// SPDX-License-Identifier: Apache-2.0 (see LICENSE) + +//go:build endtoendtests + +package integration + +import ( + "context" + "testing" + "time" + + "github.com/cartesi/rollups-node/internal/jsonrpc/api" + "github.com/cartesi/rollups-node/internal/model" + "github.com/stretchr/testify/suite" +) + +type MultiAppSuite struct { + suite.Suite + ctx context.Context + cancel context.CancelFunc + + app1Name string + app2Name string + app1Addr string + app2Addr string +} + +func TestMultiApp(t *testing.T) { + suite.Run(t, new(MultiAppSuite)) +} + +func (s *MultiAppSuite) SetupSuite() { + s.ctx, s.cancel = context.WithTimeout(context.Background(), 10*time.Minute) + s.app1Name = uniqueAppName("multi-app-1") + s.app2Name = uniqueAppName("multi-app-2") + + dappPath := envOrDefault("CARTESI_TEST_DAPP_PATH", "applications/echo-dapp") + + // Deploy two independent echo-dapp instances with unique random salts + // to avoid CREATE2 address collisions (same factory + same template hash) + // and to ensure tests are idempotent across re-runs. + s.T().Log("--- Setup: deploying two independent echo-dapps to test isolation ---") + s.T().Logf(" deploying app-1: name=%s", s.app1Name) + addr1, err := deployApplication(s.ctx, s.app1Name, dappPath, "--salt", uniqueSalt()) + s.Require().NoError(err, "deploy app-1") + s.app1Addr = addr1 + s.T().Logf(" app-1 deployed at %s", addr1) + + s.T().Logf(" deploying app-2: name=%s", s.app2Name) + addr2, err := deployApplication(s.ctx, s.app2Name, dappPath, "--salt", uniqueSalt()) + s.Require().NoError(err, "deploy app-2") + s.app2Addr = addr2 + s.T().Logf(" app-2 deployed at %s", addr2) + + // Fund both application contracts so they can execute vouchers. + err = anvilSetBalance(s.ctx, addr1, oneEtherWei) + s.Require().NoError(err, "fund app-1 contract") + err = anvilSetBalance(s.ctx, addr2, oneEtherWei) + s.Require().NoError(err, "fund app-2 contract") + s.T().Log(" both apps funded with 1 ETH for voucher execution") +} + +func (s *MultiAppSuite) TearDownSuite() { + s.cancel() +} + +// TestMultiAppIsolation verifies that two applications running on the same node +// process inputs independently and produce isolated outputs, reports, and claims. +func (s *MultiAppSuite) TestMultiAppIsolation() { + require := s.Require() + payload1 := "input-for-app-1" + payload2 := "input-for-app-2" + defer timed(s.T(), "full multi-app isolation test")() + + // --- L1 -> Machine: send one input to each app and verify independent processing --- + + s.T().Log("Sending one input to each app — they should process independently with separate input indices...") + idx1, _, err := sendInput(s.ctx, s.app1Name, payload1) + require.NoError(err, "send input to app-1") + require.Equal(uint64(0), idx1) + s.T().Logf(" app-1: input sent (index=%d)", idx1) + + idx2, _, err := sendInput(s.ctx, s.app2Name, payload2) + require.NoError(err, "send input to app-2") + require.Equal(uint64(0), idx2, "app-2 should start at input index 0 independently") + s.T().Logf(" app-2: input sent (index=%d) — independent counter, also starts at 0", idx2) + + func() { + defer timed(s.T(), "wait for both apps to process inputs")() + s.T().Log("Waiting for both apps to process their inputs through separate Cartesi Machine instances...") + processCtx, processCancel := context.WithTimeout(s.ctx, inputProcessingTimeout) + defer processCancel() + + input1, err := waitForInputProcessed(processCtx, s.T(), s.app1Name, idx1) + require.NoError(err, "wait for app-1 input processing") + require.Equal(model.InputCompletionStatus_Accepted, input1.Status) + s.T().Log(" app-1: input ACCEPTED") + + input2, err := waitForInputProcessed(processCtx, s.T(), s.app2Name, idx2) + require.NoError(err, "wait for app-2 input processing") + require.Equal(model.InputCompletionStatus_Accepted, input2.Status) + s.T().Log(" app-2: input ACCEPTED") + }() + + // --- Verify output and report isolation between apps --- + + s.T().Log("Checking output isolation — each app should have exactly 3 outputs from its own input...") + outputs1, err := readOutputs(s.ctx, s.app1Name) + require.NoError(err, "read app-1 outputs") + require.Equal(uint64(echoOutputsPerInput), outputs1.Pagination.TotalCount, + "app-1 should have %d outputs", echoOutputsPerInput) + + outputs2, err := readOutputs(s.ctx, s.app2Name) + require.NoError(err, "read app-2 outputs") + require.Equal(uint64(echoOutputsPerInput), outputs2.Pagination.TotalCount, + "app-2 should have %d outputs", echoOutputsPerInput) + s.T().Logf(" both apps have %d outputs each — isolated", echoOutputsPerInput) + + s.T().Log("Checking report isolation — each app should have exactly 1 report...") + reports1, err := readReports(s.ctx, s.app1Name) + require.NoError(err, "read app-1 reports") + require.Equal(uint64(echoReportsPerInput), reports1.Pagination.TotalCount, + "app-1 should have %d report(s)", echoReportsPerInput) + + reports2, err := readReports(s.ctx, s.app2Name) + require.NoError(err, "read app-2 reports") + require.Equal(uint64(echoReportsPerInput), reports2.Pagination.TotalCount, + "app-2 should have %d report(s)", echoReportsPerInput) + s.T().Logf(" both apps have %d report(s) each — isolated", echoReportsPerInput) + + // --- Cross-app isolation: sending to app-1 must not affect app-2 --- + + s.T().Log("Sending a second input to app-1 only — app-2 output count must remain unchanged...") + idx1b, _, err := sendInput(s.ctx, s.app1Name, "second-input") + require.NoError(err, "send second input to app-1") + require.Equal(uint64(1), idx1b, "app-1 second input should be index 1") + + func() { + defer timed(s.T(), "wait for app-1 second input")() + processCtx2, processCancel2 := context.WithTimeout(s.ctx, inputProcessingTimeout) + defer processCancel2() + + input1b, err := waitForInputProcessed(processCtx2, s.T(), s.app1Name, idx1b) + require.NoError(err, "wait for app-1 second input processing") + require.Equal(model.InputCompletionStatus_Accepted, input1b.Status) + }() + + outputs1after, err := readOutputs(s.ctx, s.app1Name) + require.NoError(err, "read app-1 outputs after second input") + require.Equal(uint64(2*echoOutputsPerInput), outputs1after.Pagination.TotalCount, + "app-1 should have %d outputs after 2 inputs", 2*echoOutputsPerInput) + + outputs2after, err := readOutputs(s.ctx, s.app2Name) + require.NoError(err, "read app-2 outputs after app-1 second input") + require.Equal(uint64(echoOutputsPerInput), outputs2after.Pagination.TotalCount, + "app-2 should still have %d outputs", echoOutputsPerInput) + s.T().Logf(" app-1 grew to %d outputs, app-2 still has %d — no cross-contamination", + 2*echoOutputsPerInput, echoOutputsPerInput) + + // --- Consensus + L1 execution for both apps independently --- + + s.T().Log("Verifying claims and executing outputs on both apps independently...") + for _, app := range []struct { + name string + outputs *api.ListResponse[api.DecodedOutput] + }{ + {s.app1Name, outputs1}, + {s.app2Name, outputs2}, + } { + epochIndex := app.outputs.Data[0].EpochIndex + + var voucherIdx, noticeIdx uint64 + voucherFound, noticeFound := false, false + for _, out := range app.outputs.Data { + if out.DecodedData == nil { + continue + } + switch out.DecodedData.Type { + case "Voucher": + voucherIdx = out.Index + voucherFound = true + case "Notice": + noticeIdx = out.Index + noticeFound = true + } + } + require.True(voucherFound, "%s: voucher output not found", app.name) + require.True(noticeFound, "%s: notice output not found", app.name) + + verifyClaimAndExecute(s.ctx, s.T(), require, verifyAndExecuteConfig{ + AppName: app.name, + EpochIndex: epochIndex, + EpochOutputs: app.outputs.Data, + VoucherIdx: voucherIdx, + NoticeIdx: noticeIdx, + }) + } + + s.T().Log("=== Multi-app isolation complete: independent processing, claims, and L1 execution verified ===") +} diff --git a/test/integration/polling_helpers_test.go b/test/integration/polling_helpers_test.go new file mode 100644 index 000000000..655badc5b --- /dev/null +++ b/test/integration/polling_helpers_test.go @@ -0,0 +1,161 @@ +// (c) Cartesi and individual authors (see AUTHORS) +// SPDX-License-Identifier: Apache-2.0 (see LICENSE) + +//go:build endtoendtests + +package integration + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/cartesi/rollups-node/internal/model" +) + +// pollUntil polls fn at the given interval until it returns true or the context +// is cancelled. Returns an error if the context times out. +func pollUntil(ctx context.Context, interval time.Duration, fn func() (bool, error)) error { + ticker := time.NewTicker(interval) + defer ticker.Stop() + + for { + select { + case <-ctx.Done(): + return fmt.Errorf("poll timed out: %w", ctx.Err()) + default: + } + + done, err := fn() + if err != nil { + return err + } + if done { + return nil + } + + select { + case <-ctx.Done(): + return fmt.Errorf("poll timed out: %w", ctx.Err()) + case <-ticker.C: + } + } +} + +// waitForInputProcessed polls until the given input has been processed +// (status != NONE) using the CLI. +// +// Error discrimination: CLI exit errors (the input doesn't exist yet) are +// retried. Structural errors (JSON parse failure, context cancellation) fail +// immediately. +func waitForInputProcessed( + ctx context.Context, + t testing.TB, + appName string, + inputIndex uint64, +) (*model.Input, error) { + var lastErr error + var result *model.Input + err := pollUntil(ctx, 2*time.Second, func() (bool, error) { + input, err := readInput(ctx, appName, inputIndex) + if err != nil { + if isCLIExitError(err) { + lastErr = err + t.Logf("poll input %d: %v (retrying)", inputIndex, err) + return false, nil // Input may not exist yet; keep polling. + } + return false, fmt.Errorf("poll input %d: %w", inputIndex, err) + } + if input.Status != model.InputCompletionStatus_None { + result = input + return true, nil + } + return false, nil + }) + if err != nil && lastErr != nil { + return nil, fmt.Errorf("%w (last poll error: %v)", err, lastErr) + } + return result, err +} + +// waitForEpochStatus polls until the epoch at the given index reaches the +// desired status using the CLI. +// +// Error discrimination: CLI exit errors (the epoch doesn't exist yet) are +// retried. Structural errors fail immediately. +func waitForEpochStatus( + ctx context.Context, + t testing.TB, + appName string, + epochIndex uint64, + status model.EpochStatus, +) (*model.Epoch, error) { + var lastErr error + var result *model.Epoch + err := pollUntil(ctx, 3*time.Second, func() (bool, error) { + epoch, err := readEpoch(ctx, appName, epochIndex) + if err != nil { + if isCLIExitError(err) { + lastErr = err + t.Logf("poll epoch %d: %v (retrying)", epochIndex, err) + return false, nil // Epoch may not exist yet; keep polling. + } + return false, fmt.Errorf("poll epoch %d: %w", epochIndex, err) + } + if epoch.Status == status { + result = epoch + return true, nil + } + return false, nil + }) + if err != nil && lastErr != nil { + return nil, fmt.Errorf("%w (last poll error: %v)", err, lastErr) + } + return result, err +} + +// waitForExecutionRecorded polls until the output's execution transaction hash +// is recorded in the database. +// +// Error discrimination: CLI exit errors are retried. Structural errors +// (JSON parse failure, context cancellation) fail immediately. +func waitForExecutionRecorded( + ctx context.Context, + t testing.TB, + appName string, + outputIdx uint64, +) error { + var lastErr error + err := pollUntil(ctx, 3*time.Second, func() (bool, error) { + out, err := readOutput(ctx, appName, outputIdx) + if err != nil { + if isCLIExitError(err) { + lastErr = err + t.Logf(" poll execution tx hash for output %d: %v (retrying)", outputIdx, err) + return false, nil + } + return false, fmt.Errorf("poll execution tx hash: %w", err) + } + return out.ExecutionTransactionHash != nil, nil + }) + if err != nil && lastErr != nil { + return fmt.Errorf("%w (last poll error: %v)", err, lastErr) + } + return err +} + +// readOutput is used above and returns *api.DecodedOutput, which embeds +// *model.Output. The ExecutionTransactionHash field is *common.Hash — a nil +// check correctly detects whether the execution event has been recorded. + +// timed logs the duration of a test phase. Usage: +// +// defer timed(t, "deploy echo-dapp")() +func timed(t testing.TB, phase string) func() { + t.Helper() + start := time.Now() + return func() { + t.Logf(" [timing] %s took %s", phase, time.Since(start).Round(time.Millisecond)) + } +} diff --git a/test/integration/reject_exception_prt_test.go b/test/integration/reject_exception_prt_test.go new file mode 100644 index 000000000..7ab80529b --- /dev/null +++ b/test/integration/reject_exception_prt_test.go @@ -0,0 +1,83 @@ +// (c) Cartesi and individual authors (see AUTHORS) +// SPDX-License-Identifier: Apache-2.0 (see LICENSE) + +//go:build endtoendtests + +package integration + +import ( + "context" + "testing" + "time" + + "github.com/cartesi/rollups-node/internal/model" + "github.com/ethereum/go-ethereum/ethclient" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" +) + +type RejectExceptionPrtSuite struct { + suite.Suite + ctx context.Context + cancel context.CancelFunc + + ethClient *ethclient.Client +} + +func TestRejectExceptionPrt(t *testing.T) { + suite.Run(t, new(RejectExceptionPrtSuite)) +} + +func (s *RejectExceptionPrtSuite) SetupSuite() { + s.ctx, s.cancel = context.WithTimeout(context.Background(), 10*time.Minute) + + endpoint := envOrDefault("CARTESI_BLOCKCHAIN_HTTP_ENDPOINT", "http://localhost:8545") + client, err := ethclient.Dial(endpoint) + s.Require().NoError(err, "dial ethclient") + s.ethClient = client +} + +func (s *RejectExceptionPrtSuite) TearDownSuite() { + s.cancel() + s.ethClient.Close() +} + +// TestRejectInputPrt deploys a reject-loop-dapp with PRT consensus, +// sends 3 inputs, and verifies that input 1 is REJECTED while inputs 0 and 2 +// are ACCEPTED. Then settles tournaments and executes outputs on L1. +func (s *RejectExceptionPrtSuite) TestRejectInputPrt() { + ethClient := s.ethClient + prtEpoch := uint64(1) + runRejectExceptionLifecycleTest(s.ctx, s.T(), s.Require(), rejectExceptionLifecycleConfig{ + AppName: uniqueAppName("reject-prt-loop"), + DappPath: envOrDefault("CARTESI_TEST_REJECT_DAPP_PATH", "applications/reject-loop-dapp"), + TestName: "reject", + FailStatus: model.InputCompletionStatus_Rejected, + ExtraDeployArgs: []string{"--prt"}, + EpochIndex: &prtEpoch, + PreClaimHook: func(ctx context.Context, t testing.TB, require *require.Assertions, appName string) { + settleTournament(ctx, t, require, ethClient, appName, 0) + settleTournament(ctx, t, require, ethClient, appName, 1) + }, + }) +} + +// TestExceptionInputPrt deploys an exception-loop-dapp with PRT consensus, +// sends 3 inputs, and verifies that input 1 is EXCEPTION while inputs 0 and 2 +// are ACCEPTED. Then settles tournaments and executes outputs on L1. +func (s *RejectExceptionPrtSuite) TestExceptionInputPrt() { + ethClient := s.ethClient + prtEpoch := uint64(1) + runRejectExceptionLifecycleTest(s.ctx, s.T(), s.Require(), rejectExceptionLifecycleConfig{ + AppName: uniqueAppName("exception-prt-loop"), + DappPath: envOrDefault("CARTESI_TEST_EXCEPTION_DAPP_PATH", "applications/exception-loop-dapp"), + TestName: "exception", + FailStatus: model.InputCompletionStatus_Exception, + ExtraDeployArgs: []string{"--prt"}, + EpochIndex: &prtEpoch, + PreClaimHook: func(ctx context.Context, t testing.TB, require *require.Assertions, appName string) { + settleTournament(ctx, t, require, ethClient, appName, 0) + settleTournament(ctx, t, require, ethClient, appName, 1) + }, + }) +} diff --git a/test/integration/reject_exception_test.go b/test/integration/reject_exception_test.go new file mode 100644 index 000000000..908187c1e --- /dev/null +++ b/test/integration/reject_exception_test.go @@ -0,0 +1,57 @@ +// (c) Cartesi and individual authors (see AUTHORS) +// SPDX-License-Identifier: Apache-2.0 (see LICENSE) + +//go:build endtoendtests + +package integration + +import ( + "context" + "testing" + "time" + + "github.com/cartesi/rollups-node/internal/model" + "github.com/stretchr/testify/suite" +) + +type RejectExceptionSuite struct { + suite.Suite + ctx context.Context + cancel context.CancelFunc +} + +func TestRejectException(t *testing.T) { + suite.Run(t, new(RejectExceptionSuite)) +} + +func (s *RejectExceptionSuite) SetupSuite() { + s.ctx, s.cancel = context.WithTimeout(context.Background(), 10*time.Minute) +} + +func (s *RejectExceptionSuite) TearDownSuite() { + s.cancel() +} + +// TestRejectInput deploys a reject-loop-dapp (ioctl-echo-loop --reject=1), +// sends 3 inputs, and verifies that input 1 is REJECTED while inputs 0 and 2 +// are ACCEPTED with correct outputs and reports. +func (s *RejectExceptionSuite) TestRejectInput() { + runRejectExceptionLifecycleTest(s.ctx, s.T(), s.Require(), rejectExceptionLifecycleConfig{ + AppName: uniqueAppName("reject-loop"), + DappPath: envOrDefault("CARTESI_TEST_REJECT_DAPP_PATH", "applications/reject-loop-dapp"), + TestName: "reject", + FailStatus: model.InputCompletionStatus_Rejected, + }) +} + +// TestExceptionInput deploys an exception-loop-dapp (ioctl-echo-loop --exception=1), +// sends 3 inputs, and verifies that input 1 is EXCEPTION while inputs 0 and 2 +// are ACCEPTED with correct outputs and reports. +func (s *RejectExceptionSuite) TestExceptionInput() { + runRejectExceptionLifecycleTest(s.ctx, s.T(), s.Require(), rejectExceptionLifecycleConfig{ + AppName: uniqueAppName("exception-loop"), + DappPath: envOrDefault("CARTESI_TEST_EXCEPTION_DAPP_PATH", "applications/exception-loop-dapp"), + TestName: "exception", + FailStatus: model.InputCompletionStatus_Exception, + }) +} From 97f306af51979d543e9597719ef34c523a5017bd Mon Sep 17 00:00:00 2001 From: Victor Fusco <1221933+vfusco@users.noreply.github.com> Date: Fri, 6 Mar 2026 13:47:58 -0300 Subject: [PATCH 5/5] build(ci): add lint job, cache test deps, and share docker images between jobs --- .github/workflows/build.yml | 182 +++++++++++++++++++------- .github/workflows/clean-up-images.yml | 22 +++- 2 files changed, 154 insertions(+), 50 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 0057d12b6..8132b7ead 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -7,9 +7,9 @@ concurrency: group: ${{ github.workflow }}-${{ github.ref }} cancel-in-progress: true -permissions: - id-token: write - contents: read +env: + GHCR_REGISTRY: ghcr.io + CI_TAG: ci-${{ github.sha }} jobs: basic-checks: @@ -41,6 +41,10 @@ jobs: build: runs-on: ubuntu-24.04 + permissions: + id-token: write + contents: read + packages: write steps: - name: Checkout source code uses: actions/checkout@v4 @@ -50,6 +54,13 @@ jobs: - name: Setup variables run: echo ROLLUPS_NODE_VERSION=`make version` >> $GITHUB_ENV + - name: Login to GHCR + uses: docker/login-action@v3 + with: + registry: ${{ env.GHCR_REGISTRY }} + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + - name: Set up Depot CLI uses: depot/setup-action@v1 @@ -59,9 +70,8 @@ jobs: file: Dockerfile context: . platforms: linux/amd64 - tags: ${{ github.repository_owner }}/rollups-node:devel-amd64 - push: false - load: true + tags: ${{ env.GHCR_REGISTRY }}/${{ github.repository_owner }}/rollups-node:devel-amd64-${{ env.CI_TAG }} + push: true project: ${{ vars.DEPOT_PROJECT }} token: ${{ secrets.DEPOT_TOKEN }} @@ -71,9 +81,20 @@ jobs: file: Dockerfile context: . platforms: linux/arm64 - tags: ${{ github.repository_owner }}/rollups-node:devel-arm64 - push: false - load: true + tags: ${{ env.GHCR_REGISTRY }}/${{ github.repository_owner }}/rollups-node:devel-arm64-${{ env.CI_TAG }} + push: true + project: ${{ vars.DEPOT_PROJECT }} + token: ${{ secrets.DEPOT_TOKEN }} + + - name: Build tester image + uses: depot/build-push-action@v1 + with: + file: Dockerfile + context: . + target: tester + platforms: linux/amd64 + tags: ${{ env.GHCR_REGISTRY }}/${{ github.repository_owner }}/rollups-node:tester-${{ env.CI_TAG }} + push: true project: ${{ vars.DEPOT_PROJECT }} token: ${{ secrets.DEPOT_TOKEN }} @@ -83,9 +104,8 @@ jobs: file: test/devnet/Dockerfile context: . platforms: linux/amd64 - tags: ${{ github.repository_owner }}/rollups-node-devnet:devel - push: false - load: true + tags: ${{ env.GHCR_REGISTRY }}/${{ github.repository_owner }}/rollups-node-devnet:${{ env.CI_TAG }} + push: true project: ${{ vars.DEPOT_PROJECT }} token: ${{ secrets.DEPOT_TOKEN }} @@ -96,7 +116,7 @@ jobs: context: . target: debian-packager platforms: linux/amd64 - tags: ${{ github.repository_owner }}/rollups-node:debian-packager-amd64 + tags: ${{ github.repository_owner }}/rollups-node:debian-packager-amd64 push: false load: true project: ${{ vars.DEPOT_PROJECT }} @@ -112,7 +132,7 @@ jobs: context: . target: debian-packager platforms: linux/arm64 - tags: ${{ github.repository_owner }}/rollups-node:debian-packager-arm64 + tags: ${{ github.repository_owner }}/rollups-node:debian-packager-arm64 push: false load: true project: ${{ vars.DEPOT_PROJECT }} @@ -121,7 +141,7 @@ jobs: - name: Export deb package artifact (arm64) run: make copy-debian-package BUILD_PLATFORM=linux/arm64 DEB_ARCH=arm64 DEB_PACKAGER_IMG=${{ github.repository_owner }}/rollups-node:debian-packager-arm64 - - name: Upload artifacts + - name: Upload deb artifacts uses: actions/upload-artifact@v4 with: name: artifacts @@ -129,60 +149,128 @@ jobs: cartesi-rollups-node-v${{ env.ROLLUPS_NODE_VERSION }}_amd64.deb cartesi-rollups-node-v${{ env.ROLLUPS_NODE_VERSION }}_arm64.deb - unit-test: + lint: + # disabled for now + if: false runs-on: ubuntu-24.04 - needs: - - build + needs: [build] + permissions: + contents: read + packages: read steps: - name: Checkout source code uses: actions/checkout@v4 - - name: Login to GitHub Container Registry + - name: Login to GHCR uses: docker/login-action@v3 with: - registry: ghcr.io + registry: ${{ env.GHCR_REGISTRY }} username: ${{ github.actor }} password: ${{ secrets.GITHUB_TOKEN }} - - name: Set up Depot CLI - uses: depot/setup-action@v1 + - name: Pull and tag tester image + run: | + docker pull ${{ env.GHCR_REGISTRY }}/${{ github.repository_owner }}/rollups-node:tester-${{ env.CI_TAG }} + docker tag ${{ env.GHCR_REGISTRY }}/${{ github.repository_owner }}/rollups-node:tester-${{ env.CI_TAG }} \ + cartesi/rollups-node:tester - - name: Build rollups-node:tester image - uses: depot/build-push-action@v1 + - name: Run linting + run: make lint-with-docker + + unit-test: + runs-on: ubuntu-24.04 + needs: [build] + permissions: + contents: read + packages: read + steps: + - name: Checkout source code + uses: actions/checkout@v4 + + - name: Login to GHCR + uses: docker/login-action@v3 with: - file: Dockerfile - context: . - target: go-builder - platforms: linux/amd64 - tags: ${{ github.repository_owner }}/rollups-node:tester - push: false - load: true - project: ${{ vars.DEPOT_PROJECT }} - token: ${{ secrets.DEPOT_TOKEN }} + registry: ${{ env.GHCR_REGISTRY }} + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} - - name: Build devnet image - uses: depot/build-push-action@v1 + - name: Pull and tag CI images + run: | + docker pull ${{ env.GHCR_REGISTRY }}/${{ github.repository_owner }}/rollups-node:tester-${{ env.CI_TAG }} + docker pull ${{ env.GHCR_REGISTRY }}/${{ github.repository_owner }}/rollups-node:devel-amd64-${{ env.CI_TAG }} + docker pull ${{ env.GHCR_REGISTRY }}/${{ github.repository_owner }}/rollups-node-devnet:${{ env.CI_TAG }} + docker tag ${{ env.GHCR_REGISTRY }}/${{ github.repository_owner }}/rollups-node:tester-${{ env.CI_TAG }} \ + cartesi/rollups-node:tester + docker tag ${{ env.GHCR_REGISTRY }}/${{ github.repository_owner }}/rollups-node:devel-amd64-${{ env.CI_TAG }} \ + cartesi/rollups-node:devel + docker tag ${{ env.GHCR_REGISTRY }}/${{ github.repository_owner }}/rollups-node-devnet:${{ env.CI_TAG }} \ + cartesi/rollups-node-devnet:devel + + - name: Cache test machine images + uses: actions/cache@v4 with: - file: test/devnet/Dockerfile - context: . - platforms: linux/amd64 - tags: ${{ github.repository_owner }}/rollups-node-devnet:devel - push: false - load: true - project: ${{ vars.DEPOT_PROJECT }} - token: ${{ secrets.DEPOT_TOKEN }} + path: test/downloads + key: test-deps-${{ hashFiles('test/dependencies.sha256') }} - name: Download test dependencies - run: | - make download-test-dependencies + run: make download-test-dependencies - name: Run unit tests + run: make unit-test-with-compose + + integration-test: + runs-on: ubuntu-24.04 + needs: [build] + timeout-minutes: 60 + permissions: + contents: read + packages: read + steps: + - name: Checkout source code + uses: actions/checkout@v4 + + - name: Login to GHCR + uses: docker/login-action@v3 + with: + registry: ${{ env.GHCR_REGISTRY }} + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Pull and tag CI images run: | - make test-with-compose + docker pull ${{ env.GHCR_REGISTRY }}/${{ github.repository_owner }}/rollups-node:tester-${{ env.CI_TAG }} + docker pull ${{ env.GHCR_REGISTRY }}/${{ github.repository_owner }}/rollups-node:devel-amd64-${{ env.CI_TAG }} + docker pull ${{ env.GHCR_REGISTRY }}/${{ github.repository_owner }}/rollups-node-devnet:${{ env.CI_TAG }} + docker tag ${{ env.GHCR_REGISTRY }}/${{ github.repository_owner }}/rollups-node:tester-${{ env.CI_TAG }} \ + cartesi/rollups-node:tester + docker tag ${{ env.GHCR_REGISTRY }}/${{ github.repository_owner }}/rollups-node:devel-amd64-${{ env.CI_TAG }} \ + cartesi/rollups-node:devel + docker tag ${{ env.GHCR_REGISTRY }}/${{ github.repository_owner }}/rollups-node-devnet:${{ env.CI_TAG }} \ + cartesi/rollups-node-devnet:devel + + - name: Cache test machine images + uses: actions/cache@v4 + with: + path: test/downloads + key: test-deps-${{ hashFiles('test/dependencies.sha256') }} + + - name: Download test dependencies + run: make download-test-dependencies + + - name: Run integration tests + run: make integration-test-with-compose + + - name: Upload integration test logs + if: always() + uses: actions/upload-artifact@v4 + with: + name: integration-test-logs + path: integration-logs.txt + retention-days: 3 publish_artifacts: name: Publish artifacts - needs: [basic-checks, build, unit-test] + needs: [basic-checks, build, unit-test, integration-test] runs-on: ubuntu-24.04 if: ${{ startsWith(github.ref, 'refs/tags/v') }} permissions: diff --git a/.github/workflows/clean-up-images.yml b/.github/workflows/clean-up-images.yml index acccfeb41..2ca11d67b 100644 --- a/.github/workflows/clean-up-images.yml +++ b/.github/workflows/clean-up-images.yml @@ -9,7 +9,7 @@ on: jobs: cleanup: - name: Cleanup ghcr.io/cartesi/${{ matrix.image }}:pr-${{ github.event.number }} image + name: Cleanup ghcr.io/cartesi/${{ matrix.image }} CI images runs-on: ubuntu-latest permissions: packages: write @@ -17,9 +17,11 @@ jobs: matrix: image: - rollups-node - - rollups-node-ci + - rollups-node-devnet steps: - - uses: vlaurin/action-ghcr-prune@v0.6.0 + # Remove PR-scoped tags immediately. + - name: Prune PR tags + uses: vlaurin/action-ghcr-prune@v0.6.0 with: organization: cartesi container: ${{ matrix.image }} @@ -28,3 +30,17 @@ jobs: keep-last: 0 prune-tags-regexes: | ^pr-${{ github.event.number }}$ + + # Prune stale CI images older than 7 days to avoid deleting + # images needed by concurrently running workflows. + - name: Prune stale CI tags + uses: vlaurin/action-ghcr-prune@v0.6.0 + with: + organization: cartesi + container: ${{ matrix.image }} + token: ${{ secrets.GITHUB_TOKEN }} + prune-untagged: false + keep-last: 0 + older-than: 7 days + prune-tags-regexes: | + ^ci-