From b89de7671bb1813b6078ae14a8516d55feaec421 Mon Sep 17 00:00:00 2001 From: stainless-bot Date: Tue, 29 Oct 2024 20:34:08 +0000 Subject: [PATCH] chore: configure new SDK language --- .devcontainer/Dockerfile | 23 + .devcontainer/devcontainer.json | 20 + .github/workflows/ci.yml | 44 ++ .gitignore | 4 + .stats.yml | 2 + Brewfile | 1 + CONTRIBUTING.md | 66 +++ LICENSE | 201 +++++++ README.md | 342 +++++++++++- SECURITY.md | 27 + account.go | 182 +++++++ account_test.go | 60 +++ aliases.go | 9 + api.md | 55 ++ block.go | 525 +++++++++++++++++++ block_test.go | 148 ++++++ client.go | 109 ++++ client_test.go | 251 +++++++++ examples/.keep | 4 + field.go | 50 ++ go.mod | 11 + go.sum | 12 + internal/apierror/apierror.go | 53 ++ internal/apiform/encoder.go | 381 ++++++++++++++ internal/apiform/form.go | 5 + internal/apiform/form_test.go | 440 ++++++++++++++++ internal/apiform/tag.go | 48 ++ internal/apijson/decoder.go | 670 ++++++++++++++++++++++++ internal/apijson/encoder.go | 391 ++++++++++++++ internal/apijson/field.go | 41 ++ internal/apijson/field_test.go | 66 +++ internal/apijson/json_test.go | 616 ++++++++++++++++++++++ internal/apijson/port.go | 107 ++++ internal/apijson/port_test.go | 178 +++++++ internal/apijson/registry.go | 31 ++ internal/apijson/tag.go | 47 ++ internal/apiquery/encoder.go | 341 ++++++++++++ internal/apiquery/query.go | 50 ++ internal/apiquery/query_test.go | 335 ++++++++++++ internal/apiquery/tag.go | 41 ++ internal/param/field.go | 29 + internal/requestconfig/requestconfig.go | 496 ++++++++++++++++++ internal/testutil/testutil.go | 27 + internal/version.go | 5 + lib/.keep | 4 + option/requestoption.go | 229 ++++++++ program.go | 164 ++++++ program_test.go | 56 ++ scripts/bootstrap | 16 + scripts/format | 8 + scripts/lint | 8 + scripts/mock | 41 ++ scripts/test | 56 ++ transaction.go | 411 +++++++++++++++ transaction_test.go | 299 +++++++++++ usage_test.go | 33 ++ 56 files changed, 7868 insertions(+), 1 deletion(-) create mode 100644 .devcontainer/Dockerfile create mode 100644 .devcontainer/devcontainer.json create mode 100644 .github/workflows/ci.yml create mode 100644 .gitignore create mode 100644 .stats.yml create mode 100644 Brewfile create mode 100644 CONTRIBUTING.md create mode 100644 LICENSE create mode 100644 SECURITY.md create mode 100644 account.go create mode 100644 account_test.go create mode 100644 aliases.go create mode 100644 api.md create mode 100644 block.go create mode 100644 block_test.go create mode 100644 client.go create mode 100644 client_test.go create mode 100644 examples/.keep create mode 100644 field.go create mode 100644 go.mod create mode 100644 go.sum create mode 100644 internal/apierror/apierror.go create mode 100644 internal/apiform/encoder.go create mode 100644 internal/apiform/form.go create mode 100644 internal/apiform/form_test.go create mode 100644 internal/apiform/tag.go create mode 100644 internal/apijson/decoder.go create mode 100644 internal/apijson/encoder.go create mode 100644 internal/apijson/field.go create mode 100644 internal/apijson/field_test.go create mode 100644 internal/apijson/json_test.go create mode 100644 internal/apijson/port.go create mode 100644 internal/apijson/port_test.go create mode 100644 internal/apijson/registry.go create mode 100644 internal/apijson/tag.go create mode 100644 internal/apiquery/encoder.go create mode 100644 internal/apiquery/query.go create mode 100644 internal/apiquery/query_test.go create mode 100644 internal/apiquery/tag.go create mode 100644 internal/param/field.go create mode 100644 internal/requestconfig/requestconfig.go create mode 100644 internal/testutil/testutil.go create mode 100644 internal/version.go create mode 100644 lib/.keep create mode 100644 option/requestoption.go create mode 100644 program.go create mode 100644 program_test.go create mode 100755 scripts/bootstrap create mode 100755 scripts/format create mode 100755 scripts/lint create mode 100755 scripts/mock create mode 100755 scripts/test create mode 100644 transaction.go create mode 100644 transaction_test.go create mode 100644 usage_test.go diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile new file mode 100644 index 0000000..1aa883d --- /dev/null +++ b/.devcontainer/Dockerfile @@ -0,0 +1,23 @@ +# syntax=docker/dockerfile:1 +FROM debian:bookworm-slim + +RUN apt-get update && apt-get install -y \ + libxkbcommon0 \ + ca-certificates \ + git \ + golang \ + unzip \ + libc++1 \ + vim \ + && apt-get clean autoclean + +# Ensure UTF-8 encoding +ENV LANG=C.UTF-8 +ENV LC_ALL=C.UTF-8 + +ENV GOPATH=/go +ENV PATH=$GOPATH/bin:$PATH + +WORKDIR /workspace + +COPY . /workspace diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json new file mode 100644 index 0000000..d55fc4d --- /dev/null +++ b/.devcontainer/devcontainer.json @@ -0,0 +1,20 @@ +// For format details, see https://aka.ms/devcontainer.json. For config options, see the +// README at: https://github.com/devcontainers/templates/tree/main/src/debian +{ + "name": "Debian", + "build": { + "dockerfile": "Dockerfile" + } + + // Features to add to the dev container. More info: https://containers.dev/features. + // "features": {}, + + // Use 'forwardPorts' to make a list of ports inside the container available locally. + // "forwardPorts": [], + + // Configure tool-specific properties. + // "customizations": {}, + + // Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root. + // "remoteUser": "root" +} diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100644 index 0000000..901e734 --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,44 @@ +name: CI +on: + push: + branches: + - main + pull_request: + branches: + - main + - next + +jobs: + lint: + name: lint + runs-on: ubuntu-latest + + + steps: + - uses: actions/checkout@v4 + + - name: Setup go + uses: actions/setup-go@v5 + with: + go-version-file: ./go.mod + + - name: Run lints + run: ./scripts/lint + test: + name: test + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v4 + + - name: Setup go + uses: actions/setup-go@v5 + with: + go-version-file: ./go.mod + + - name: Bootstrap + run: ./scripts/bootstrap + + - name: Run tests + run: ./scripts/test + diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..c6d0501 --- /dev/null +++ b/.gitignore @@ -0,0 +1,4 @@ +.prism.log +codegen.log +Brewfile.lock.json +.idea/ diff --git a/.stats.yml b/.stats.yml new file mode 100644 index 0000000..8fad8eb --- /dev/null +++ b/.stats.yml @@ -0,0 +1,2 @@ +configured_endpoints: 12 +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/maestro%2Fmaestro-arch-rpc-a04ce538c803bb8fd64c905a15cf7a02063faa72addd34cb7f31086f434fa666.yml diff --git a/Brewfile b/Brewfile new file mode 100644 index 0000000..577e34a --- /dev/null +++ b/Brewfile @@ -0,0 +1 @@ +brew "go" diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 0000000..c20842b --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,66 @@ +## Setting up the environment + +To set up the repository, run: + +```sh +$ ./scripts/bootstrap +$ ./scripts/build +``` + +This will install all the required dependencies and build the SDK. + +You can also [install go 1.18+ manually](https://go.dev/doc/install). + +## Modifying/Adding code + +Most of the SDK is generated code. Modifications to code will be persisted between generations, but may +result in merge conflicts between manual patches and changes from the generator. The generator will never +modify the contents of the `lib/` and `examples/` directories. + +## Adding and running examples + +All files in the `examples/` directory are not modified by the generator and can be freely edited or added to. + +```go +# add an example to examples//main.go + +package main + +func main() { + // ... +} +``` + +```sh +$ go run ./examples/ +``` + +## Using the repository from source + +To use a local version of this library from source in another project, edit the `go.mod` with a replace +directive. This can be done through the CLI with the following: + +```sh +$ go mod edit -replace github.com/stainless-sdks/maestro-arch-rpc-go=/path/to/maestro-arch-rpc-go +``` + +## Running tests + +Most tests require you to [set up a mock server](https://github.com/stoplightio/prism) against the OpenAPI spec to run the tests. + +```sh +# you will need npm installed +$ npx prism mock path/to/your/openapi.yml +``` + +```sh +$ ./scripts/test +``` + +## Formatting + +This library uses the standard gofmt code formatter: + +```sh +$ ./scripts/format +``` diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..18123ca --- /dev/null +++ b/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2024 Maestro Arch Rpc + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/README.md b/README.md index dab8853..17e00e6 100644 --- a/README.md +++ b/README.md @@ -1 +1,341 @@ -# maestro-arch-rpc-go \ No newline at end of file +# Maestro Arch Rpc Go API Library + +Go Reference + +The Maestro Arch Rpc Go library provides convenient access to [the Maestro Arch Rpc REST +API](https://docs.maestro-arch-rpc.com) from applications written in Go. The full API of this library can be found in [api.md](api.md). + +It is generated with [Stainless](https://www.stainlessapi.com/). + +## Installation + +```go +import ( + "github.com/stainless-sdks/maestro-arch-rpc-go" // imported as maestroarchrpc +) +``` + +Or to pin the version: + +```sh +go get -u 'github.com/stainless-sdks/maestro-arch-rpc-go@v0.0.1-alpha.0' +``` + +## Requirements + +This library requires Go 1.18+. + +## Usage + +The full API of this library can be found in [api.md](api.md). + +```go +package main + +import ( + "context" + "fmt" + + "github.com/stainless-sdks/maestro-arch-rpc-go" +) + +func main() { + client := maestroarchrpc.NewClient() + response, err := client.Accounts.Address(context.TODO(), maestroarchrpc.AccountAddressParams{ + Body: []int64{int64(0), int64(0), int64(0)}, + }) + if err != nil { + panic(err.Error()) + } + fmt.Printf("%+v\n", response.Data) +} + +``` + +### Request fields + +All request parameters are wrapped in a generic `Field` type, +which we use to distinguish zero values from null or omitted fields. + +This prevents accidentally sending a zero value if you forget a required parameter, +and enables explicitly sending `null`, `false`, `''`, or `0` on optional parameters. +Any field not specified is not sent. + +To construct fields with values, use the helpers `String()`, `Int()`, `Float()`, or most commonly, the generic `F[T]()`. +To send a null, use `Null[T]()`, and to send a nonconforming value, use `Raw[T](any)`. For example: + +```go +params := FooParams{ + Name: maestroarchrpc.F("hello"), + + // Explicitly send `"description": null` + Description: maestroarchrpc.Null[string](), + + Point: maestroarchrpc.F(maestroarchrpc.Point{ + X: maestroarchrpc.Int(0), + Y: maestroarchrpc.Int(1), + + // In cases where the API specifies a given type, + // but you want to send something else, use `Raw`: + Z: maestroarchrpc.Raw[int64](0.01), // sends a float + }), +} +``` + +### Response objects + +All fields in response structs are value types (not pointers or wrappers). + +If a given field is `null`, not present, or invalid, the corresponding field +will simply be its zero value. + +All response structs also include a special `JSON` field, containing more detailed +information about each property, which you can use like so: + +```go +if res.Name == "" { + // true if `"name"` is either not present or explicitly null + res.JSON.Name.IsNull() + + // true if the `"name"` key was not present in the repsonse JSON at all + res.JSON.Name.IsMissing() + + // When the API returns data that cannot be coerced to the expected type: + if res.JSON.Name.IsInvalid() { + raw := res.JSON.Name.Raw() + + legacyName := struct{ + First string `json:"first"` + Last string `json:"last"` + }{} + json.Unmarshal([]byte(raw), &legacyName) + name = legacyName.First + " " + legacyName.Last + } +} +``` + +These `.JSON` structs also include an `Extras` map containing +any properties in the json response that were not specified +in the struct. This can be useful for API features not yet +present in the SDK. + +```go +body := res.JSON.ExtraFields["my_unexpected_field"].Raw() +``` + +### RequestOptions + +This library uses the functional options pattern. Functions defined in the +`option` package return a `RequestOption`, which is a closure that mutates a +`RequestConfig`. These options can be supplied to the client or at individual +requests. For example: + +```go +client := maestroarchrpc.NewClient( + // Adds a header to every request made by the client + option.WithHeader("X-Some-Header", "custom_header_info"), +) + +client.Accounts.Address(context.TODO(), ..., + // Override the header + option.WithHeader("X-Some-Header", "some_other_custom_header_info"), + // Add an undocumented field to the request body, using sjson syntax + option.WithJSONSet("some.json.path", map[string]string{"my": "object"}), +) +``` + +See the [full list of request options](https://pkg.go.dev/github.com/stainless-sdks/maestro-arch-rpc-go/option). + +### Pagination + +This library provides some conveniences for working with paginated list endpoints. + +You can use `.ListAutoPaging()` methods to iterate through items across all pages: + +Or you can use simple `.List()` methods to fetch a single page and receive a standard response object +with additional helper methods like `.GetNextPage()`, e.g.: + +### Errors + +When the API returns a non-success status code, we return an error with type +`*maestroarchrpc.Error`. This contains the `StatusCode`, `*http.Request`, and +`*http.Response` values of the request, as well as the JSON of the error body +(much like other response objects in the SDK). + +To handle errors, we recommend that you use the `errors.As` pattern: + +```go +_, err := client.Accounts.Address(context.TODO(), maestroarchrpc.AccountAddressParams{ + Body: []int64{int64(0), int64(0), int64(0)}, +}) +if err != nil { + var apierr *maestroarchrpc.Error + if errors.As(err, &apierr) { + println(string(apierr.DumpRequest(true))) // Prints the serialized HTTP request + println(string(apierr.DumpResponse(true))) // Prints the serialized HTTP response + } + panic(err.Error()) // GET "/account/address": 400 Bad Request { ... } +} +``` + +When other errors occur, they are returned unwrapped; for example, +if HTTP transport fails, you might receive `*url.Error` wrapping `*net.OpError`. + +### Timeouts + +Requests do not time out by default; use context to configure a timeout for a request lifecycle. + +Note that if a request is [retried](#retries), the context timeout does not start over. +To set a per-retry timeout, use `option.WithRequestTimeout()`. + +```go +// This sets the timeout for the request, including all the retries. +ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) +defer cancel() +client.Accounts.Address( + ctx, + maestroarchrpc.AccountAddressParams{ + Body: []int64{int64(0), int64(0), int64(0)}, + }, + // This sets the per-retry timeout + option.WithRequestTimeout(20*time.Second), +) +``` + +### File uploads + +Request parameters that correspond to file uploads in multipart requests are typed as +`param.Field[io.Reader]`. The contents of the `io.Reader` will by default be sent as a multipart form +part with the file name of "anonymous_file" and content-type of "application/octet-stream". + +The file name and content-type can be customized by implementing `Name() string` or `ContentType() +string` on the run-time type of `io.Reader`. Note that `os.File` implements `Name() string`, so a +file returned by `os.Open` will be sent with the file name on disk. + +We also provide a helper `maestroarchrpc.FileParam(reader io.Reader, filename string, contentType string)` +which can be used to wrap any `io.Reader` with the appropriate file name and content type. + +### Retries + +Certain errors will be automatically retried 2 times by default, with a short exponential backoff. +We retry by default all connection errors, 408 Request Timeout, 409 Conflict, 429 Rate Limit, +and >=500 Internal errors. + +You can use the `WithMaxRetries` option to configure or disable this: + +```go +// Configure the default for all requests: +client := maestroarchrpc.NewClient( + option.WithMaxRetries(0), // default is 2 +) + +// Override per-request: +client.Accounts.Address( + context.TODO(), + maestroarchrpc.AccountAddressParams{ + Body: []int64{int64(0), int64(0), int64(0)}, + }, + option.WithMaxRetries(5), +) +``` + +### Making custom/undocumented requests + +This library is typed for convenient access to the documented API. If you need to access undocumented +endpoints, params, or response properties, the library can still be used. + +#### Undocumented endpoints + +To make requests to undocumented endpoints, you can use `client.Get`, `client.Post`, and other HTTP verbs. +`RequestOptions` on the client, such as retries, will be respected when making these requests. + +```go +var ( + // params can be an io.Reader, a []byte, an encoding/json serializable object, + // or a "…Params" struct defined in this library. + params map[string]interface{} + + // result can be an []byte, *http.Response, a encoding/json deserializable object, + // or a model defined in this library. + result *http.Response +) +err := client.Post(context.Background(), "/unspecified", params, &result) +if err != nil { + … +} +``` + +#### Undocumented request params + +To make requests using undocumented parameters, you may use either the `option.WithQuerySet()` +or the `option.WithJSONSet()` methods. + +```go +params := FooNewParams{ + ID: maestroarchrpc.F("id_xxxx"), + Data: maestroarchrpc.F(FooNewParamsData{ + FirstName: maestroarchrpc.F("John"), + }), +} +client.Foo.New(context.Background(), params, option.WithJSONSet("data.last_name", "Doe")) +``` + +#### Undocumented response properties + +To access undocumented response properties, you may either access the raw JSON of the response as a string +with `result.JSON.RawJSON()`, or get the raw JSON of a particular field on the result with +`result.JSON.Foo.Raw()`. + +Any fields that are not present on the response struct will be saved and can be accessed by `result.JSON.ExtraFields()` which returns the extra fields as a `map[string]Field`. + +### Middleware + +We provide `option.WithMiddleware` which applies the given +middleware to requests. + +```go +func Logger(req *http.Request, next option.MiddlewareNext) (res *http.Response, err error) { + // Before the request + start := time.Now() + LogReq(req) + + // Forward the request to the next handler + res, err = next(req) + + // Handle stuff after the request + end := time.Now() + LogRes(res, err, start - end) + + return res, err +} + +client := maestroarchrpc.NewClient( + option.WithMiddleware(Logger), +) +``` + +When multiple middlewares are provided as variadic arguments, the middlewares +are applied left to right. If `option.WithMiddleware` is given +multiple times, for example first in the client then the method, the +middleware in the client will run first and the middleware given in the method +will run next. + +You may also replace the default `http.Client` with +`option.WithHTTPClient(client)`. Only one http client is +accepted (this overwrites any previous client) and receives requests after any +middleware has been applied. + +## Semantic versioning + +This package generally follows [SemVer](https://semver.org/spec/v2.0.0.html) conventions, though certain backwards-incompatible changes may be released as minor versions: + +1. Changes to library internals which are technically public but not intended or documented for external use. _(Please open a GitHub issue to let us know if you are relying on such internals)_. +2. Changes that we do not expect to impact the vast majority of users in practice. + +We take backwards-compatibility seriously and work hard to ensure you can rely on a smooth upgrade experience. + +We are keen for your feedback; please open an [issue](https://www.github.com/stainless-sdks/maestro-arch-rpc-go/issues) with questions, bugs, or suggestions. + +## Contributing + +See [the contributing documentation](./CONTRIBUTING.md). diff --git a/SECURITY.md b/SECURITY.md new file mode 100644 index 0000000..2512f8b --- /dev/null +++ b/SECURITY.md @@ -0,0 +1,27 @@ +# Security Policy + +## Reporting Security Issues + +This SDK is generated by [Stainless Software Inc](http://stainlessapi.com). Stainless takes security seriously, and encourages you to report any security vulnerability promptly so that appropriate action can be taken. + +To report a security issue, please contact the Stainless team at security@stainlessapi.com. + +## Responsible Disclosure + +We appreciate the efforts of security researchers and individuals who help us maintain the security of +SDKs we generate. If you believe you have found a security vulnerability, please adhere to responsible +disclosure practices by allowing us a reasonable amount of time to investigate and address the issue +before making any information public. + +## Reporting Non-SDK Related Security Issues + +If you encounter security issues that are not directly related to SDKs but pertain to the services +or products provided by Maestro Arch Rpc please follow the respective company's security reporting guidelines. + +### Maestro Arch Rpc Terms and Policies + +Please contact support@gomaestro.org for any questions or concerns regarding security of our services. + +--- + +Thank you for helping us keep the SDKs and systems they interact with secure. diff --git a/account.go b/account.go new file mode 100644 index 0000000..19d46a3 --- /dev/null +++ b/account.go @@ -0,0 +1,182 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +package maestroarchrpc + +import ( + "context" + "net/http" + + "github.com/stainless-sdks/maestro-arch-rpc-go/internal/apijson" + "github.com/stainless-sdks/maestro-arch-rpc-go/internal/requestconfig" + "github.com/stainless-sdks/maestro-arch-rpc-go/option" +) + +// AccountService contains methods and other services that help with interacting +// with the maestro-arch-rpc API. +// +// Note, unlike clients, this service does not read variables from the environment +// automatically. You should not instantiate this service directly, and instead use +// the [NewAccountService] method instead. +type AccountService struct { + Options []option.RequestOption +} + +// NewAccountService generates a new service that applies the given options to each +// request. These options are applied after the parent client's options (if there +// is one), and before any request-specific options. +func NewAccountService(opts ...option.RequestOption) (r *AccountService) { + r = &AccountService{} + r.Options = opts + return +} + +// Account address by pubkey +func (r *AccountService) Address(ctx context.Context, body AccountAddressParams, opts ...option.RequestOption) (res *AccountAddressResponse, err error) { + opts = append(r.Options[:], opts...) + path := "account/address" + err = requestconfig.ExecuteNewRequest(ctx, http.MethodPost, path, body, &res, opts...) + return +} + +// Account info by pubkey +func (r *AccountService) Info(ctx context.Context, body AccountInfoParams, opts ...option.RequestOption) (res *AccountInfoResponse, err error) { + opts = append(r.Options[:], opts...) + path := "account/info" + err = requestconfig.ExecuteNewRequest(ctx, http.MethodPost, path, body, &res, opts...) + return +} + +type AccountAddressResponse struct { + Data string `json:"data"` + LastUpdated AccountAddressResponseLastUpdated `json:"last_updated"` + JSON accountAddressResponseJSON `json:"-"` +} + +// accountAddressResponseJSON contains the JSON metadata for the struct +// [AccountAddressResponse] +type accountAddressResponseJSON struct { + Data apijson.Field + LastUpdated apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *AccountAddressResponse) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r accountAddressResponseJSON) RawJSON() string { + return r.raw +} + +type AccountAddressResponseLastUpdated struct { + BlockHash string `json:"block_hash"` + BlockHeight int64 `json:"block_height"` + JSON accountAddressResponseLastUpdatedJSON `json:"-"` +} + +// accountAddressResponseLastUpdatedJSON contains the JSON metadata for the struct +// [AccountAddressResponseLastUpdated] +type accountAddressResponseLastUpdatedJSON struct { + BlockHash apijson.Field + BlockHeight apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *AccountAddressResponseLastUpdated) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r accountAddressResponseLastUpdatedJSON) RawJSON() string { + return r.raw +} + +type AccountInfoResponse struct { + Data AccountInfoResponseData `json:"data"` + LastUpdated AccountInfoResponseLastUpdated `json:"last_updated"` + JSON accountInfoResponseJSON `json:"-"` +} + +// accountInfoResponseJSON contains the JSON metadata for the struct +// [AccountInfoResponse] +type accountInfoResponseJSON struct { + Data apijson.Field + LastUpdated apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *AccountInfoResponse) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r accountInfoResponseJSON) RawJSON() string { + return r.raw +} + +type AccountInfoResponseData struct { + Data []int64 `json:"data"` + IsExecutable bool `json:"is_executable"` + Owner []int64 `json:"owner"` + Utxo string `json:"utxo"` + JSON accountInfoResponseDataJSON `json:"-"` +} + +// accountInfoResponseDataJSON contains the JSON metadata for the struct +// [AccountInfoResponseData] +type accountInfoResponseDataJSON struct { + Data apijson.Field + IsExecutable apijson.Field + Owner apijson.Field + Utxo apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *AccountInfoResponseData) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r accountInfoResponseDataJSON) RawJSON() string { + return r.raw +} + +type AccountInfoResponseLastUpdated struct { + BlockHash string `json:"block_hash"` + BlockHeight int64 `json:"block_height"` + JSON accountInfoResponseLastUpdatedJSON `json:"-"` +} + +// accountInfoResponseLastUpdatedJSON contains the JSON metadata for the struct +// [AccountInfoResponseLastUpdated] +type accountInfoResponseLastUpdatedJSON struct { + BlockHash apijson.Field + BlockHeight apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *AccountInfoResponseLastUpdated) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r accountInfoResponseLastUpdatedJSON) RawJSON() string { + return r.raw +} + +type AccountAddressParams struct { + Body []int64 `json:"body,required"` +} + +func (r AccountAddressParams) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r.Body) +} + +type AccountInfoParams struct { + Body []int64 `json:"body,required"` +} + +func (r AccountInfoParams) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r.Body) +} diff --git a/account_test.go b/account_test.go new file mode 100644 index 0000000..e43e965 --- /dev/null +++ b/account_test.go @@ -0,0 +1,60 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +package maestroarchrpc_test + +import ( + "context" + "errors" + "os" + "testing" + + "github.com/stainless-sdks/maestro-arch-rpc-go" + "github.com/stainless-sdks/maestro-arch-rpc-go/internal/testutil" + "github.com/stainless-sdks/maestro-arch-rpc-go/option" +) + +func TestAccountAddress(t *testing.T) { + baseURL := "http://localhost:4010" + if envURL, ok := os.LookupEnv("TEST_API_BASE_URL"); ok { + baseURL = envURL + } + if !testutil.CheckTestServer(t, baseURL) { + return + } + client := maestroarchrpc.NewClient( + option.WithBaseURL(baseURL), + ) + _, err := client.Accounts.Address(context.TODO(), maestroarchrpc.AccountAddressParams{ + Body: []int64{int64(0), int64(0), int64(0)}, + }) + if err != nil { + var apierr *maestroarchrpc.Error + if errors.As(err, &apierr) { + t.Log(string(apierr.DumpRequest(true))) + } + t.Fatalf("err should be nil: %s", err.Error()) + } +} + +func TestAccountInfo(t *testing.T) { + baseURL := "http://localhost:4010" + if envURL, ok := os.LookupEnv("TEST_API_BASE_URL"); ok { + baseURL = envURL + } + if !testutil.CheckTestServer(t, baseURL) { + return + } + client := maestroarchrpc.NewClient( + option.WithBaseURL(baseURL), + ) + _, err := client.Accounts.Info(context.TODO(), maestroarchrpc.AccountInfoParams{ + Body: []int64{int64(0), int64(0), int64(0)}, + }) + if err != nil { + var apierr *maestroarchrpc.Error + if errors.As(err, &apierr) { + t.Log(string(apierr.DumpRequest(true))) + } + t.Fatalf("err should be nil: %s", err.Error()) + } +} diff --git a/aliases.go b/aliases.go new file mode 100644 index 0000000..2f7f2e2 --- /dev/null +++ b/aliases.go @@ -0,0 +1,9 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +package maestroarchrpc + +import ( + "github.com/stainless-sdks/maestro-arch-rpc-go/internal/apierror" +) + +type Error = apierror.Error diff --git a/api.md b/api.md new file mode 100644 index 0000000..d95cd7b --- /dev/null +++ b/api.md @@ -0,0 +1,55 @@ +# Accounts + +Response Types: + +- maestroarchrpc.AccountAddressResponse +- maestroarchrpc.AccountInfoResponse + +Methods: + +- client.Accounts.Address(ctx context.Context, body maestroarchrpc.AccountAddressParams) (maestroarchrpc.AccountAddressResponse, error) +- client.Accounts.Info(ctx context.Context, body maestroarchrpc.AccountInfoParams) (maestroarchrpc.AccountInfoResponse, error) + +# Blocks + +Response Types: + +- maestroarchrpc.BlockGetResponse +- maestroarchrpc.BlockCountResponse +- maestroarchrpc.BlockLatestResponse +- maestroarchrpc.BlockLatestHashResponse +- maestroarchrpc.BlockRangeResponse +- maestroarchrpc.BlockRecentResponse + +Methods: + +- client.Blocks.Get(ctx context.Context, heightOrHash string) (maestroarchrpc.BlockGetResponse, error) +- client.Blocks.Count(ctx context.Context) (maestroarchrpc.BlockCountResponse, error) +- client.Blocks.Latest(ctx context.Context) (maestroarchrpc.BlockLatestResponse, error) +- client.Blocks.LatestHash(ctx context.Context) (maestroarchrpc.BlockLatestHashResponse, error) +- client.Blocks.Range(ctx context.Context, startHeight string, endHeight string, query maestroarchrpc.BlockRangeParams) (maestroarchrpc.BlockRangeResponse, error) +- client.Blocks.Recent(ctx context.Context, count int64) (maestroarchrpc.BlockRecentResponse, error) + +# Programs + +Response Types: + +- maestroarchrpc.ProgramAccountsResponse + +Methods: + +- client.Programs.Accounts(ctx context.Context, body maestroarchrpc.ProgramAccountsParams) (maestroarchrpc.ProgramAccountsResponse, error) + +# Transactions + +Response Types: + +- maestroarchrpc.TransactionGetResponse +- maestroarchrpc.TransactionBatchSendResponse +- maestroarchrpc.TransactionSendResponse + +Methods: + +- client.Transactions.Get(ctx context.Context, txHash string) (maestroarchrpc.TransactionGetResponse, error) +- client.Transactions.BatchSend(ctx context.Context, body maestroarchrpc.TransactionBatchSendParams) (maestroarchrpc.TransactionBatchSendResponse, error) +- client.Transactions.Send(ctx context.Context, body maestroarchrpc.TransactionSendParams) (maestroarchrpc.TransactionSendResponse, error) diff --git a/block.go b/block.go new file mode 100644 index 0000000..dcf27eb --- /dev/null +++ b/block.go @@ -0,0 +1,525 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +package maestroarchrpc + +import ( + "context" + "errors" + "fmt" + "net/http" + "net/url" + + "github.com/stainless-sdks/maestro-arch-rpc-go/internal/apijson" + "github.com/stainless-sdks/maestro-arch-rpc-go/internal/apiquery" + "github.com/stainless-sdks/maestro-arch-rpc-go/internal/param" + "github.com/stainless-sdks/maestro-arch-rpc-go/internal/requestconfig" + "github.com/stainless-sdks/maestro-arch-rpc-go/option" +) + +// BlockService contains methods and other services that help with interacting with +// the maestro-arch-rpc API. +// +// Note, unlike clients, this service does not read variables from the environment +// automatically. You should not instantiate this service directly, and instead use +// the [NewBlockService] method instead. +type BlockService struct { + Options []option.RequestOption +} + +// NewBlockService generates a new service that applies the given options to each +// request. These options are applied after the parent client's options (if there +// is one), and before any request-specific options. +func NewBlockService(opts ...option.RequestOption) (r *BlockService) { + r = &BlockService{} + r.Options = opts + return +} + +// Block info by height or hash +func (r *BlockService) Get(ctx context.Context, heightOrHash string, opts ...option.RequestOption) (res *BlockGetResponse, err error) { + opts = append(r.Options[:], opts...) + if heightOrHash == "" { + err = errors.New("missing required height_or_hash parameter") + return + } + path := fmt.Sprintf("block/%s", heightOrHash) + err = requestconfig.ExecuteNewRequest(ctx, http.MethodGet, path, nil, &res, opts...) + return +} + +// Block count +func (r *BlockService) Count(ctx context.Context, opts ...option.RequestOption) (res *BlockCountResponse, err error) { + opts = append(r.Options[:], opts...) + path := "block/count" + err = requestconfig.ExecuteNewRequest(ctx, http.MethodGet, path, nil, &res, opts...) + return +} + +// Latest block info +func (r *BlockService) Latest(ctx context.Context, opts ...option.RequestOption) (res *BlockLatestResponse, err error) { + opts = append(r.Options[:], opts...) + path := "block/latest" + err = requestconfig.ExecuteNewRequest(ctx, http.MethodGet, path, nil, &res, opts...) + return +} + +// Latest block hash +func (r *BlockService) LatestHash(ctx context.Context, opts ...option.RequestOption) (res *BlockLatestHashResponse, err error) { + opts = append(r.Options[:], opts...) + path := "block/latest/hash" + err = requestconfig.ExecuteNewRequest(ctx, http.MethodGet, path, nil, &res, opts...) + return +} + +// Block range info by start and end height +func (r *BlockService) Range(ctx context.Context, startHeight string, endHeight string, query BlockRangeParams, opts ...option.RequestOption) (res *BlockRangeResponse, err error) { + opts = append(r.Options[:], opts...) + if startHeight == "" { + err = errors.New("missing required start_height parameter") + return + } + if endHeight == "" { + err = errors.New("missing required end_height parameter") + return + } + path := fmt.Sprintf("block/range/%s/%s", startHeight, endHeight) + err = requestconfig.ExecuteNewRequest(ctx, http.MethodGet, path, query, &res, opts...) + return +} + +// Recent block info by count +func (r *BlockService) Recent(ctx context.Context, count int64, opts ...option.RequestOption) (res *BlockRecentResponse, err error) { + opts = append(r.Options[:], opts...) + path := fmt.Sprintf("block/recent/%v", count) + err = requestconfig.ExecuteNewRequest(ctx, http.MethodGet, path, nil, &res, opts...) + return +} + +type BlockGetResponse struct { + Data BlockGetResponseData `json:"data"` + LastUpdated BlockGetResponseLastUpdated `json:"last_updated"` + JSON blockGetResponseJSON `json:"-"` +} + +// blockGetResponseJSON contains the JSON metadata for the struct +// [BlockGetResponse] +type blockGetResponseJSON struct { + Data apijson.Field + LastUpdated apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *BlockGetResponse) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r blockGetResponseJSON) RawJSON() string { + return r.raw +} + +type BlockGetResponseData struct { + BitcoinBlockHeight int64 `json:"bitcoin_block_height"` + Hash string `json:"hash"` + MerkleRoot string `json:"merkle_root"` + PreviousBlockHash string `json:"previous_block_hash"` + Timestamp int64 `json:"timestamp"` + TransactionCount int64 `json:"transaction_count"` + Transactions []string `json:"transactions"` + JSON blockGetResponseDataJSON `json:"-"` +} + +// blockGetResponseDataJSON contains the JSON metadata for the struct +// [BlockGetResponseData] +type blockGetResponseDataJSON struct { + BitcoinBlockHeight apijson.Field + Hash apijson.Field + MerkleRoot apijson.Field + PreviousBlockHash apijson.Field + Timestamp apijson.Field + TransactionCount apijson.Field + Transactions apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *BlockGetResponseData) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r blockGetResponseDataJSON) RawJSON() string { + return r.raw +} + +type BlockGetResponseLastUpdated struct { + BlockHash string `json:"block_hash"` + BlockHeight int64 `json:"block_height"` + JSON blockGetResponseLastUpdatedJSON `json:"-"` +} + +// blockGetResponseLastUpdatedJSON contains the JSON metadata for the struct +// [BlockGetResponseLastUpdated] +type blockGetResponseLastUpdatedJSON struct { + BlockHash apijson.Field + BlockHeight apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *BlockGetResponseLastUpdated) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r blockGetResponseLastUpdatedJSON) RawJSON() string { + return r.raw +} + +type BlockCountResponse struct { + Data int64 `json:"data"` + LastUpdated BlockCountResponseLastUpdated `json:"last_updated"` + JSON blockCountResponseJSON `json:"-"` +} + +// blockCountResponseJSON contains the JSON metadata for the struct +// [BlockCountResponse] +type blockCountResponseJSON struct { + Data apijson.Field + LastUpdated apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *BlockCountResponse) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r blockCountResponseJSON) RawJSON() string { + return r.raw +} + +type BlockCountResponseLastUpdated struct { + BlockHash string `json:"block_hash"` + BlockHeight int64 `json:"block_height"` + JSON blockCountResponseLastUpdatedJSON `json:"-"` +} + +// blockCountResponseLastUpdatedJSON contains the JSON metadata for the struct +// [BlockCountResponseLastUpdated] +type blockCountResponseLastUpdatedJSON struct { + BlockHash apijson.Field + BlockHeight apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *BlockCountResponseLastUpdated) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r blockCountResponseLastUpdatedJSON) RawJSON() string { + return r.raw +} + +type BlockLatestResponse struct { + Data BlockLatestResponseData `json:"data"` + LastUpdated BlockLatestResponseLastUpdated `json:"last_updated"` + JSON blockLatestResponseJSON `json:"-"` +} + +// blockLatestResponseJSON contains the JSON metadata for the struct +// [BlockLatestResponse] +type blockLatestResponseJSON struct { + Data apijson.Field + LastUpdated apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *BlockLatestResponse) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r blockLatestResponseJSON) RawJSON() string { + return r.raw +} + +type BlockLatestResponseData struct { + BitcoinBlockHeight int64 `json:"bitcoin_block_height"` + Hash string `json:"hash"` + Height int64 `json:"height"` + MerkleRoot string `json:"merkle_root"` + PreviousBlockHash string `json:"previous_block_hash"` + Timestamp int64 `json:"timestamp"` + TransactionCount int64 `json:"transaction_count"` + Transactions []string `json:"transactions"` + JSON blockLatestResponseDataJSON `json:"-"` +} + +// blockLatestResponseDataJSON contains the JSON metadata for the struct +// [BlockLatestResponseData] +type blockLatestResponseDataJSON struct { + BitcoinBlockHeight apijson.Field + Hash apijson.Field + Height apijson.Field + MerkleRoot apijson.Field + PreviousBlockHash apijson.Field + Timestamp apijson.Field + TransactionCount apijson.Field + Transactions apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *BlockLatestResponseData) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r blockLatestResponseDataJSON) RawJSON() string { + return r.raw +} + +type BlockLatestResponseLastUpdated struct { + BlockHash string `json:"block_hash"` + BlockHeight int64 `json:"block_height"` + JSON blockLatestResponseLastUpdatedJSON `json:"-"` +} + +// blockLatestResponseLastUpdatedJSON contains the JSON metadata for the struct +// [BlockLatestResponseLastUpdated] +type blockLatestResponseLastUpdatedJSON struct { + BlockHash apijson.Field + BlockHeight apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *BlockLatestResponseLastUpdated) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r blockLatestResponseLastUpdatedJSON) RawJSON() string { + return r.raw +} + +type BlockLatestHashResponse struct { + Data string `json:"data"` + LastUpdated BlockLatestHashResponseLastUpdated `json:"last_updated"` + JSON blockLatestHashResponseJSON `json:"-"` +} + +// blockLatestHashResponseJSON contains the JSON metadata for the struct +// [BlockLatestHashResponse] +type blockLatestHashResponseJSON struct { + Data apijson.Field + LastUpdated apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *BlockLatestHashResponse) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r blockLatestHashResponseJSON) RawJSON() string { + return r.raw +} + +type BlockLatestHashResponseLastUpdated struct { + BlockHash string `json:"block_hash"` + BlockHeight int64 `json:"block_height"` + JSON blockLatestHashResponseLastUpdatedJSON `json:"-"` +} + +// blockLatestHashResponseLastUpdatedJSON contains the JSON metadata for the struct +// [BlockLatestHashResponseLastUpdated] +type blockLatestHashResponseLastUpdatedJSON struct { + BlockHash apijson.Field + BlockHeight apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *BlockLatestHashResponseLastUpdated) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r blockLatestHashResponseLastUpdatedJSON) RawJSON() string { + return r.raw +} + +type BlockRangeResponse struct { + Data []BlockRangeResponseData `json:"data"` + LastUpdated BlockRangeResponseLastUpdated `json:"last_updated"` + JSON blockRangeResponseJSON `json:"-"` +} + +// blockRangeResponseJSON contains the JSON metadata for the struct +// [BlockRangeResponse] +type blockRangeResponseJSON struct { + Data apijson.Field + LastUpdated apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *BlockRangeResponse) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r blockRangeResponseJSON) RawJSON() string { + return r.raw +} + +type BlockRangeResponseData struct { + BitcoinBlockHeight int64 `json:"bitcoin_block_height"` + Hash string `json:"hash"` + Height int64 `json:"height"` + MerkleRoot string `json:"merkle_root"` + PreviousBlockHash string `json:"previous_block_hash"` + Timestamp int64 `json:"timestamp"` + TransactionCount int64 `json:"transaction_count"` + Transactions []string `json:"transactions"` + JSON blockRangeResponseDataJSON `json:"-"` +} + +// blockRangeResponseDataJSON contains the JSON metadata for the struct +// [BlockRangeResponseData] +type blockRangeResponseDataJSON struct { + BitcoinBlockHeight apijson.Field + Hash apijson.Field + Height apijson.Field + MerkleRoot apijson.Field + PreviousBlockHash apijson.Field + Timestamp apijson.Field + TransactionCount apijson.Field + Transactions apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *BlockRangeResponseData) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r blockRangeResponseDataJSON) RawJSON() string { + return r.raw +} + +type BlockRangeResponseLastUpdated struct { + BlockHash string `json:"block_hash"` + BlockHeight int64 `json:"block_height"` + JSON blockRangeResponseLastUpdatedJSON `json:"-"` +} + +// blockRangeResponseLastUpdatedJSON contains the JSON metadata for the struct +// [BlockRangeResponseLastUpdated] +type blockRangeResponseLastUpdatedJSON struct { + BlockHash apijson.Field + BlockHeight apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *BlockRangeResponseLastUpdated) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r blockRangeResponseLastUpdatedJSON) RawJSON() string { + return r.raw +} + +type BlockRecentResponse struct { + Data []BlockRecentResponseData `json:"data"` + LastUpdated BlockRecentResponseLastUpdated `json:"last_updated"` + JSON blockRecentResponseJSON `json:"-"` +} + +// blockRecentResponseJSON contains the JSON metadata for the struct +// [BlockRecentResponse] +type blockRecentResponseJSON struct { + Data apijson.Field + LastUpdated apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *BlockRecentResponse) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r blockRecentResponseJSON) RawJSON() string { + return r.raw +} + +type BlockRecentResponseData struct { + BitcoinBlockHeight int64 `json:"bitcoin_block_height"` + Hash string `json:"hash"` + Height int64 `json:"height"` + MerkleRoot string `json:"merkle_root"` + PreviousBlockHash string `json:"previous_block_hash"` + Timestamp int64 `json:"timestamp"` + TransactionCount int64 `json:"transaction_count"` + Transactions []string `json:"transactions"` + JSON blockRecentResponseDataJSON `json:"-"` +} + +// blockRecentResponseDataJSON contains the JSON metadata for the struct +// [BlockRecentResponseData] +type blockRecentResponseDataJSON struct { + BitcoinBlockHeight apijson.Field + Hash apijson.Field + Height apijson.Field + MerkleRoot apijson.Field + PreviousBlockHash apijson.Field + Timestamp apijson.Field + TransactionCount apijson.Field + Transactions apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *BlockRecentResponseData) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r blockRecentResponseDataJSON) RawJSON() string { + return r.raw +} + +type BlockRecentResponseLastUpdated struct { + BlockHash string `json:"block_hash"` + BlockHeight int64 `json:"block_height"` + JSON blockRecentResponseLastUpdatedJSON `json:"-"` +} + +// blockRecentResponseLastUpdatedJSON contains the JSON metadata for the struct +// [BlockRecentResponseLastUpdated] +type blockRecentResponseLastUpdatedJSON struct { + BlockHash apijson.Field + BlockHeight apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *BlockRecentResponseLastUpdated) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r blockRecentResponseLastUpdatedJSON) RawJSON() string { + return r.raw +} + +type BlockRangeParams struct { + // Number of blocks. + Count param.Field[int64] `query:"count"` + // Page number. + Page param.Field[int64] `query:"page"` +} + +// URLQuery serializes [BlockRangeParams]'s query parameters as `url.Values`. +func (r BlockRangeParams) URLQuery() (v url.Values) { + return apiquery.MarshalWithSettings(r, apiquery.QuerySettings{ + ArrayFormat: apiquery.ArrayQueryFormatComma, + NestedFormat: apiquery.NestedQueryFormatBrackets, + }) +} diff --git a/block_test.go b/block_test.go new file mode 100644 index 0000000..03bdb89 --- /dev/null +++ b/block_test.go @@ -0,0 +1,148 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +package maestroarchrpc_test + +import ( + "context" + "errors" + "os" + "testing" + + "github.com/stainless-sdks/maestro-arch-rpc-go" + "github.com/stainless-sdks/maestro-arch-rpc-go/internal/testutil" + "github.com/stainless-sdks/maestro-arch-rpc-go/option" +) + +func TestBlockGet(t *testing.T) { + baseURL := "http://localhost:4010" + if envURL, ok := os.LookupEnv("TEST_API_BASE_URL"); ok { + baseURL = envURL + } + if !testutil.CheckTestServer(t, baseURL) { + return + } + client := maestroarchrpc.NewClient( + option.WithBaseURL(baseURL), + ) + _, err := client.Blocks.Get(context.TODO(), "height_or_hash") + if err != nil { + var apierr *maestroarchrpc.Error + if errors.As(err, &apierr) { + t.Log(string(apierr.DumpRequest(true))) + } + t.Fatalf("err should be nil: %s", err.Error()) + } +} + +func TestBlockCount(t *testing.T) { + baseURL := "http://localhost:4010" + if envURL, ok := os.LookupEnv("TEST_API_BASE_URL"); ok { + baseURL = envURL + } + if !testutil.CheckTestServer(t, baseURL) { + return + } + client := maestroarchrpc.NewClient( + option.WithBaseURL(baseURL), + ) + _, err := client.Blocks.Count(context.TODO()) + if err != nil { + var apierr *maestroarchrpc.Error + if errors.As(err, &apierr) { + t.Log(string(apierr.DumpRequest(true))) + } + t.Fatalf("err should be nil: %s", err.Error()) + } +} + +func TestBlockLatest(t *testing.T) { + baseURL := "http://localhost:4010" + if envURL, ok := os.LookupEnv("TEST_API_BASE_URL"); ok { + baseURL = envURL + } + if !testutil.CheckTestServer(t, baseURL) { + return + } + client := maestroarchrpc.NewClient( + option.WithBaseURL(baseURL), + ) + _, err := client.Blocks.Latest(context.TODO()) + if err != nil { + var apierr *maestroarchrpc.Error + if errors.As(err, &apierr) { + t.Log(string(apierr.DumpRequest(true))) + } + t.Fatalf("err should be nil: %s", err.Error()) + } +} + +func TestBlockLatestHash(t *testing.T) { + baseURL := "http://localhost:4010" + if envURL, ok := os.LookupEnv("TEST_API_BASE_URL"); ok { + baseURL = envURL + } + if !testutil.CheckTestServer(t, baseURL) { + return + } + client := maestroarchrpc.NewClient( + option.WithBaseURL(baseURL), + ) + _, err := client.Blocks.LatestHash(context.TODO()) + if err != nil { + var apierr *maestroarchrpc.Error + if errors.As(err, &apierr) { + t.Log(string(apierr.DumpRequest(true))) + } + t.Fatalf("err should be nil: %s", err.Error()) + } +} + +func TestBlockRangeWithOptionalParams(t *testing.T) { + baseURL := "http://localhost:4010" + if envURL, ok := os.LookupEnv("TEST_API_BASE_URL"); ok { + baseURL = envURL + } + if !testutil.CheckTestServer(t, baseURL) { + return + } + client := maestroarchrpc.NewClient( + option.WithBaseURL(baseURL), + ) + _, err := client.Blocks.Range( + context.TODO(), + "start_height", + "end_height", + maestroarchrpc.BlockRangeParams{ + Count: maestroarchrpc.F(int64(0)), + Page: maestroarchrpc.F(int64(0)), + }, + ) + if err != nil { + var apierr *maestroarchrpc.Error + if errors.As(err, &apierr) { + t.Log(string(apierr.DumpRequest(true))) + } + t.Fatalf("err should be nil: %s", err.Error()) + } +} + +func TestBlockRecent(t *testing.T) { + baseURL := "http://localhost:4010" + if envURL, ok := os.LookupEnv("TEST_API_BASE_URL"); ok { + baseURL = envURL + } + if !testutil.CheckTestServer(t, baseURL) { + return + } + client := maestroarchrpc.NewClient( + option.WithBaseURL(baseURL), + ) + _, err := client.Blocks.Recent(context.TODO(), int64(1)) + if err != nil { + var apierr *maestroarchrpc.Error + if errors.As(err, &apierr) { + t.Log(string(apierr.DumpRequest(true))) + } + t.Fatalf("err should be nil: %s", err.Error()) + } +} diff --git a/client.go b/client.go new file mode 100644 index 0000000..7acfdf1 --- /dev/null +++ b/client.go @@ -0,0 +1,109 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +package maestroarchrpc + +import ( + "context" + "net/http" + + "github.com/stainless-sdks/maestro-arch-rpc-go/internal/requestconfig" + "github.com/stainless-sdks/maestro-arch-rpc-go/option" +) + +// Client creates a struct with services and top level methods that help with +// interacting with the maestro-arch-rpc API. You should not instantiate this +// client directly, and instead use the [NewClient] method instead. +type Client struct { + Options []option.RequestOption + Accounts *AccountService + Blocks *BlockService + Programs *ProgramService + Transactions *TransactionService +} + +// NewClient generates a new client with the default option read from the +// environment (). The option passed in as arguments are applied after these +// default arguments, and all option will be passed down to the services and +// requests that this client makes. +func NewClient(opts ...option.RequestOption) (r *Client) { + defaults := []option.RequestOption{option.WithEnvironmentProduction()} + opts = append(defaults, opts...) + + r = &Client{Options: opts} + + r.Accounts = NewAccountService(opts...) + r.Blocks = NewBlockService(opts...) + r.Programs = NewProgramService(opts...) + r.Transactions = NewTransactionService(opts...) + + return +} + +// Execute makes a request with the given context, method, URL, request params, +// response, and request options. This is useful for hitting undocumented endpoints +// while retaining the base URL, auth, retries, and other options from the client. +// +// If a byte slice or an [io.Reader] is supplied to params, it will be used as-is +// for the request body. +// +// The params is by default serialized into the body using [encoding/json]. If your +// type implements a MarshalJSON function, it will be used instead to serialize the +// request. If a URLQuery method is implemented, the returned [url.Values] will be +// used as query strings to the url. +// +// If your params struct uses [param.Field], you must provide either [MarshalJSON], +// [URLQuery], and/or [MarshalForm] functions. It is undefined behavior to use a +// struct uses [param.Field] without specifying how it is serialized. +// +// Any "…Params" object defined in this library can be used as the request +// argument. Note that 'path' arguments will not be forwarded into the url. +// +// The response body will be deserialized into the res variable, depending on its +// type: +// +// - A pointer to a [*http.Response] is populated by the raw response. +// - A pointer to a byte array will be populated with the contents of the request +// body. +// - A pointer to any other type uses this library's default JSON decoding, which +// respects UnmarshalJSON if it is defined on the type. +// - A nil value will not read the response body. +// +// For even greater flexibility, see [option.WithResponseInto] and +// [option.WithResponseBodyInto]. +func (r *Client) Execute(ctx context.Context, method string, path string, params interface{}, res interface{}, opts ...option.RequestOption) error { + opts = append(r.Options, opts...) + return requestconfig.ExecuteNewRequest(ctx, method, path, params, res, opts...) +} + +// Get makes a GET request with the given URL, params, and optionally deserializes +// to a response. See [Execute] documentation on the params and response. +func (r *Client) Get(ctx context.Context, path string, params interface{}, res interface{}, opts ...option.RequestOption) error { + return r.Execute(ctx, http.MethodGet, path, params, res, opts...) +} + +// Post makes a POST request with the given URL, params, and optionally +// deserializes to a response. See [Execute] documentation on the params and +// response. +func (r *Client) Post(ctx context.Context, path string, params interface{}, res interface{}, opts ...option.RequestOption) error { + return r.Execute(ctx, http.MethodPost, path, params, res, opts...) +} + +// Put makes a PUT request with the given URL, params, and optionally deserializes +// to a response. See [Execute] documentation on the params and response. +func (r *Client) Put(ctx context.Context, path string, params interface{}, res interface{}, opts ...option.RequestOption) error { + return r.Execute(ctx, http.MethodPut, path, params, res, opts...) +} + +// Patch makes a PATCH request with the given URL, params, and optionally +// deserializes to a response. See [Execute] documentation on the params and +// response. +func (r *Client) Patch(ctx context.Context, path string, params interface{}, res interface{}, opts ...option.RequestOption) error { + return r.Execute(ctx, http.MethodPatch, path, params, res, opts...) +} + +// Delete makes a DELETE request with the given URL, params, and optionally +// deserializes to a response. See [Execute] documentation on the params and +// response. +func (r *Client) Delete(ctx context.Context, path string, params interface{}, res interface{}, opts ...option.RequestOption) error { + return r.Execute(ctx, http.MethodDelete, path, params, res, opts...) +} diff --git a/client_test.go b/client_test.go new file mode 100644 index 0000000..b8ea525 --- /dev/null +++ b/client_test.go @@ -0,0 +1,251 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +package maestroarchrpc_test + +import ( + "context" + "fmt" + "net/http" + "reflect" + "testing" + "time" + + "github.com/stainless-sdks/maestro-arch-rpc-go" + "github.com/stainless-sdks/maestro-arch-rpc-go/internal" + "github.com/stainless-sdks/maestro-arch-rpc-go/option" +) + +type closureTransport struct { + fn func(req *http.Request) (*http.Response, error) +} + +func (t *closureTransport) RoundTrip(req *http.Request) (*http.Response, error) { + return t.fn(req) +} + +func TestUserAgentHeader(t *testing.T) { + var userAgent string + client := maestroarchrpc.NewClient( + option.WithHTTPClient(&http.Client{ + Transport: &closureTransport{ + fn: func(req *http.Request) (*http.Response, error) { + userAgent = req.Header.Get("User-Agent") + return &http.Response{ + StatusCode: http.StatusOK, + }, nil + }, + }, + }), + ) + client.Accounts.Address(context.Background(), maestroarchrpc.AccountAddressParams{ + Body: []int64{int64(0), int64(0), int64(0)}, + }) + if userAgent != fmt.Sprintf("MaestroArchRpc/Go %s", internal.PackageVersion) { + t.Errorf("Expected User-Agent to be correct, but got: %#v", userAgent) + } +} + +func TestRetryAfter(t *testing.T) { + retryCountHeaders := make([]string, 0) + client := maestroarchrpc.NewClient( + option.WithHTTPClient(&http.Client{ + Transport: &closureTransport{ + fn: func(req *http.Request) (*http.Response, error) { + retryCountHeaders = append(retryCountHeaders, req.Header.Get("X-Stainless-Retry-Count")) + return &http.Response{ + StatusCode: http.StatusTooManyRequests, + Header: http.Header{ + http.CanonicalHeaderKey("Retry-After"): []string{"0.1"}, + }, + }, nil + }, + }, + }), + ) + res, err := client.Accounts.Address(context.Background(), maestroarchrpc.AccountAddressParams{ + Body: []int64{int64(0), int64(0), int64(0)}, + }) + if err == nil || res != nil { + t.Error("Expected there to be a cancel error and for the response to be nil") + } + + attempts := len(retryCountHeaders) + if attempts != 3 { + t.Errorf("Expected %d attempts, got %d", 3, attempts) + } + + expectedRetryCountHeaders := []string{"0", "1", "2"} + if !reflect.DeepEqual(retryCountHeaders, expectedRetryCountHeaders) { + t.Errorf("Expected %v retry count headers, got %v", expectedRetryCountHeaders, retryCountHeaders) + } +} + +func TestDeleteRetryCountHeader(t *testing.T) { + retryCountHeaders := make([]string, 0) + client := maestroarchrpc.NewClient( + option.WithHTTPClient(&http.Client{ + Transport: &closureTransport{ + fn: func(req *http.Request) (*http.Response, error) { + retryCountHeaders = append(retryCountHeaders, req.Header.Get("X-Stainless-Retry-Count")) + return &http.Response{ + StatusCode: http.StatusTooManyRequests, + Header: http.Header{ + http.CanonicalHeaderKey("Retry-After"): []string{"0.1"}, + }, + }, nil + }, + }, + }), + option.WithHeaderDel("X-Stainless-Retry-Count"), + ) + res, err := client.Accounts.Address(context.Background(), maestroarchrpc.AccountAddressParams{ + Body: []int64{int64(0), int64(0), int64(0)}, + }) + if err == nil || res != nil { + t.Error("Expected there to be a cancel error and for the response to be nil") + } + + expectedRetryCountHeaders := []string{"", "", ""} + if !reflect.DeepEqual(retryCountHeaders, expectedRetryCountHeaders) { + t.Errorf("Expected %v retry count headers, got %v", expectedRetryCountHeaders, retryCountHeaders) + } +} + +func TestOverwriteRetryCountHeader(t *testing.T) { + retryCountHeaders := make([]string, 0) + client := maestroarchrpc.NewClient( + option.WithHTTPClient(&http.Client{ + Transport: &closureTransport{ + fn: func(req *http.Request) (*http.Response, error) { + retryCountHeaders = append(retryCountHeaders, req.Header.Get("X-Stainless-Retry-Count")) + return &http.Response{ + StatusCode: http.StatusTooManyRequests, + Header: http.Header{ + http.CanonicalHeaderKey("Retry-After"): []string{"0.1"}, + }, + }, nil + }, + }, + }), + option.WithHeader("X-Stainless-Retry-Count", "42"), + ) + res, err := client.Accounts.Address(context.Background(), maestroarchrpc.AccountAddressParams{ + Body: []int64{int64(0), int64(0), int64(0)}, + }) + if err == nil || res != nil { + t.Error("Expected there to be a cancel error and for the response to be nil") + } + + expectedRetryCountHeaders := []string{"42", "42", "42"} + if !reflect.DeepEqual(retryCountHeaders, expectedRetryCountHeaders) { + t.Errorf("Expected %v retry count headers, got %v", expectedRetryCountHeaders, retryCountHeaders) + } +} + +func TestRetryAfterMs(t *testing.T) { + attempts := 0 + client := maestroarchrpc.NewClient( + option.WithHTTPClient(&http.Client{ + Transport: &closureTransport{ + fn: func(req *http.Request) (*http.Response, error) { + attempts++ + return &http.Response{ + StatusCode: http.StatusTooManyRequests, + Header: http.Header{ + http.CanonicalHeaderKey("Retry-After-Ms"): []string{"100"}, + }, + }, nil + }, + }, + }), + ) + res, err := client.Accounts.Address(context.Background(), maestroarchrpc.AccountAddressParams{ + Body: []int64{int64(0), int64(0), int64(0)}, + }) + if err == nil || res != nil { + t.Error("Expected there to be a cancel error and for the response to be nil") + } + if want := 3; attempts != want { + t.Errorf("Expected %d attempts, got %d", want, attempts) + } +} + +func TestContextCancel(t *testing.T) { + client := maestroarchrpc.NewClient( + option.WithHTTPClient(&http.Client{ + Transport: &closureTransport{ + fn: func(req *http.Request) (*http.Response, error) { + <-req.Context().Done() + return nil, req.Context().Err() + }, + }, + }), + ) + cancelCtx, cancel := context.WithCancel(context.Background()) + cancel() + res, err := client.Accounts.Address(cancelCtx, maestroarchrpc.AccountAddressParams{ + Body: []int64{int64(0), int64(0), int64(0)}, + }) + if err == nil || res != nil { + t.Error("Expected there to be a cancel error and for the response to be nil") + } +} + +func TestContextCancelDelay(t *testing.T) { + client := maestroarchrpc.NewClient( + option.WithHTTPClient(&http.Client{ + Transport: &closureTransport{ + fn: func(req *http.Request) (*http.Response, error) { + <-req.Context().Done() + return nil, req.Context().Err() + }, + }, + }), + ) + cancelCtx, cancel := context.WithTimeout(context.Background(), 2*time.Millisecond) + defer cancel() + res, err := client.Accounts.Address(cancelCtx, maestroarchrpc.AccountAddressParams{ + Body: []int64{int64(0), int64(0), int64(0)}, + }) + if err == nil || res != nil { + t.Error("expected there to be a cancel error and for the response to be nil") + } +} + +func TestContextDeadline(t *testing.T) { + testTimeout := time.After(3 * time.Second) + testDone := make(chan struct{}) + + deadline := time.Now().Add(100 * time.Millisecond) + deadlineCtx, cancel := context.WithDeadline(context.Background(), deadline) + defer cancel() + + go func() { + client := maestroarchrpc.NewClient( + option.WithHTTPClient(&http.Client{ + Transport: &closureTransport{ + fn: func(req *http.Request) (*http.Response, error) { + <-req.Context().Done() + return nil, req.Context().Err() + }, + }, + }), + ) + res, err := client.Accounts.Address(deadlineCtx, maestroarchrpc.AccountAddressParams{ + Body: []int64{int64(0), int64(0), int64(0)}, + }) + if err == nil || res != nil { + t.Error("expected there to be a deadline error and for the response to be nil") + } + close(testDone) + }() + + select { + case <-testTimeout: + t.Fatal("client didn't finish in time") + case <-testDone: + if diff := time.Since(deadline); diff < -30*time.Millisecond || 30*time.Millisecond < diff { + t.Fatalf("client did not return within 30ms of context deadline, got %s", diff) + } + } +} diff --git a/examples/.keep b/examples/.keep new file mode 100644 index 0000000..d8c73e9 --- /dev/null +++ b/examples/.keep @@ -0,0 +1,4 @@ +File generated from our OpenAPI spec by Stainless. + +This directory can be used to store example files demonstrating usage of this SDK. +It is ignored by Stainless code generation and its content (other than this keep file) won't be touched. \ No newline at end of file diff --git a/field.go b/field.go new file mode 100644 index 0000000..113d833 --- /dev/null +++ b/field.go @@ -0,0 +1,50 @@ +package maestroarchrpc + +import ( + "github.com/stainless-sdks/maestro-arch-rpc-go/internal/param" + "io" +) + +// F is a param field helper used to initialize a [param.Field] generic struct. +// This helps specify null, zero values, and overrides, as well as normal values. +// You can read more about this in our [README]. +// +// [README]: https://pkg.go.dev/github.com/stainless-sdks/maestro-arch-rpc-go#readme-request-fields +func F[T any](value T) param.Field[T] { return param.Field[T]{Value: value, Present: true} } + +// Null is a param field helper which explicitly sends null to the API. +func Null[T any]() param.Field[T] { return param.Field[T]{Null: true, Present: true} } + +// Raw is a param field helper for specifying values for fields when the +// type you are looking to send is different from the type that is specified in +// the SDK. For example, if the type of the field is an integer, but you want +// to send a float, you could do that by setting the corresponding field with +// Raw[int](0.5). +func Raw[T any](value any) param.Field[T] { return param.Field[T]{Raw: value, Present: true} } + +// Int is a param field helper which helps specify integers. This is +// particularly helpful when specifying integer constants for fields. +func Int(value int64) param.Field[int64] { return F(value) } + +// String is a param field helper which helps specify strings. +func String(value string) param.Field[string] { return F(value) } + +// Float is a param field helper which helps specify floats. +func Float(value float64) param.Field[float64] { return F(value) } + +// Bool is a param field helper which helps specify bools. +func Bool(value bool) param.Field[bool] { return F(value) } + +// FileParam is a param field helper which helps files with a mime content-type. +func FileParam(reader io.Reader, filename string, contentType string) param.Field[io.Reader] { + return F[io.Reader](&file{reader, filename, contentType}) +} + +type file struct { + io.Reader + name string + contentType string +} + +func (f *file) Name() string { return f.name } +func (f *file) ContentType() string { return f.contentType } diff --git a/go.mod b/go.mod new file mode 100644 index 0000000..62d83b2 --- /dev/null +++ b/go.mod @@ -0,0 +1,11 @@ +module github.com/stainless-sdks/maestro-arch-rpc-go + +go 1.21 + +require ( + github.com/google/uuid v1.3.0 // indirect + github.com/tidwall/gjson v1.14.4 // indirect + github.com/tidwall/match v1.1.1 // indirect + github.com/tidwall/pretty v1.2.1 // indirect + github.com/tidwall/sjson v1.2.5 // indirect +) diff --git a/go.sum b/go.sum new file mode 100644 index 0000000..569e555 --- /dev/null +++ b/go.sum @@ -0,0 +1,12 @@ +github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/tidwall/gjson v1.14.2/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/gjson v1.14.4 h1:uo0p8EbA09J7RQaflQ1aBRffTR7xedD2bcIVSYxLnkM= +github.com/tidwall/gjson v1.14.4/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= +github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= +github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= +github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4= +github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= +github.com/tidwall/sjson v1.2.5 h1:kLy8mja+1c9jlljvWTlSazM7cKDRfJuR/bOJhcY5NcY= +github.com/tidwall/sjson v1.2.5/go.mod h1:Fvgq9kS/6ociJEDnK0Fk1cpYF4FIW6ZF7LAe+6jwd28= diff --git a/internal/apierror/apierror.go b/internal/apierror/apierror.go new file mode 100644 index 0000000..72eb8c8 --- /dev/null +++ b/internal/apierror/apierror.go @@ -0,0 +1,53 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +package apierror + +import ( + "fmt" + "net/http" + "net/http/httputil" + + "github.com/stainless-sdks/maestro-arch-rpc-go/internal/apijson" +) + +// Error represents an error that originates from the API, i.e. when a request is +// made and the API returns a response with a HTTP status code. Other errors are +// not wrapped by this SDK. +type Error struct { + JSON errorJSON `json:"-"` + StatusCode int + Request *http.Request + Response *http.Response +} + +// errorJSON contains the JSON metadata for the struct [Error] +type errorJSON struct { + raw string + ExtraFields map[string]apijson.Field +} + +func (r *Error) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r errorJSON) RawJSON() string { + return r.raw +} + +func (r *Error) Error() string { + // Attempt to re-populate the response body + return fmt.Sprintf("%s \"%s\": %d %s %s", r.Request.Method, r.Request.URL, r.Response.StatusCode, http.StatusText(r.Response.StatusCode), r.JSON.RawJSON()) +} + +func (r *Error) DumpRequest(body bool) []byte { + if r.Request.GetBody != nil { + r.Request.Body, _ = r.Request.GetBody() + } + out, _ := httputil.DumpRequestOut(r.Request, body) + return out +} + +func (r *Error) DumpResponse(body bool) []byte { + out, _ := httputil.DumpResponse(r.Response, body) + return out +} diff --git a/internal/apiform/encoder.go b/internal/apiform/encoder.go new file mode 100644 index 0000000..96b0846 --- /dev/null +++ b/internal/apiform/encoder.go @@ -0,0 +1,381 @@ +package apiform + +import ( + "fmt" + "io" + "mime/multipart" + "net/textproto" + "path" + "reflect" + "sort" + "strconv" + "strings" + "sync" + "time" + + "github.com/stainless-sdks/maestro-arch-rpc-go/internal/param" +) + +var encoders sync.Map // map[encoderEntry]encoderFunc + +func Marshal(value interface{}, writer *multipart.Writer) error { + e := &encoder{dateFormat: time.RFC3339} + return e.marshal(value, writer) +} + +func MarshalRoot(value interface{}, writer *multipart.Writer) error { + e := &encoder{root: true, dateFormat: time.RFC3339} + return e.marshal(value, writer) +} + +type encoder struct { + dateFormat string + root bool +} + +type encoderFunc func(key string, value reflect.Value, writer *multipart.Writer) error + +type encoderField struct { + tag parsedStructTag + fn encoderFunc + idx []int +} + +type encoderEntry struct { + reflect.Type + dateFormat string + root bool +} + +func (e *encoder) marshal(value interface{}, writer *multipart.Writer) error { + val := reflect.ValueOf(value) + if !val.IsValid() { + return nil + } + typ := val.Type() + enc := e.typeEncoder(typ) + return enc("", val, writer) +} + +func (e *encoder) typeEncoder(t reflect.Type) encoderFunc { + entry := encoderEntry{ + Type: t, + dateFormat: e.dateFormat, + root: e.root, + } + + if fi, ok := encoders.Load(entry); ok { + return fi.(encoderFunc) + } + + // To deal with recursive types, populate the map with an + // indirect func before we build it. This type waits on the + // real func (f) to be ready and then calls it. This indirect + // func is only used for recursive types. + var ( + wg sync.WaitGroup + f encoderFunc + ) + wg.Add(1) + fi, loaded := encoders.LoadOrStore(entry, encoderFunc(func(key string, v reflect.Value, writer *multipart.Writer) error { + wg.Wait() + return f(key, v, writer) + })) + if loaded { + return fi.(encoderFunc) + } + + // Compute the real encoder and replace the indirect func with it. + f = e.newTypeEncoder(t) + wg.Done() + encoders.Store(entry, f) + return f +} + +func (e *encoder) newTypeEncoder(t reflect.Type) encoderFunc { + if t.ConvertibleTo(reflect.TypeOf(time.Time{})) { + return e.newTimeTypeEncoder() + } + if t.ConvertibleTo(reflect.TypeOf((*io.Reader)(nil)).Elem()) { + return e.newReaderTypeEncoder() + } + e.root = false + switch t.Kind() { + case reflect.Pointer: + inner := t.Elem() + + innerEncoder := e.typeEncoder(inner) + return func(key string, v reflect.Value, writer *multipart.Writer) error { + if !v.IsValid() || v.IsNil() { + return nil + } + return innerEncoder(key, v.Elem(), writer) + } + case reflect.Struct: + return e.newStructTypeEncoder(t) + case reflect.Slice, reflect.Array: + return e.newArrayTypeEncoder(t) + case reflect.Map: + return e.newMapEncoder(t) + case reflect.Interface: + return e.newInterfaceEncoder() + default: + return e.newPrimitiveTypeEncoder(t) + } +} + +func (e *encoder) newPrimitiveTypeEncoder(t reflect.Type) encoderFunc { + switch t.Kind() { + // Note that we could use `gjson` to encode these types but it would complicate our + // code more and this current code shouldn't cause any issues + case reflect.String: + return func(key string, v reflect.Value, writer *multipart.Writer) error { + return writer.WriteField(key, v.String()) + } + case reflect.Bool: + return func(key string, v reflect.Value, writer *multipart.Writer) error { + if v.Bool() { + return writer.WriteField(key, "true") + } + return writer.WriteField(key, "false") + } + case reflect.Int, reflect.Int16, reflect.Int32, reflect.Int64: + return func(key string, v reflect.Value, writer *multipart.Writer) error { + return writer.WriteField(key, strconv.FormatInt(v.Int(), 10)) + } + case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return func(key string, v reflect.Value, writer *multipart.Writer) error { + return writer.WriteField(key, strconv.FormatUint(v.Uint(), 10)) + } + case reflect.Float32: + return func(key string, v reflect.Value, writer *multipart.Writer) error { + return writer.WriteField(key, strconv.FormatFloat(v.Float(), 'f', -1, 32)) + } + case reflect.Float64: + return func(key string, v reflect.Value, writer *multipart.Writer) error { + return writer.WriteField(key, strconv.FormatFloat(v.Float(), 'f', -1, 64)) + } + default: + return func(key string, v reflect.Value, writer *multipart.Writer) error { + return fmt.Errorf("unknown type received at primitive encoder: %s", t.String()) + } + } +} + +func (e *encoder) newArrayTypeEncoder(t reflect.Type) encoderFunc { + itemEncoder := e.typeEncoder(t.Elem()) + + return func(key string, v reflect.Value, writer *multipart.Writer) error { + if key != "" { + key = key + "." + } + for i := 0; i < v.Len(); i++ { + err := itemEncoder(key+strconv.Itoa(i), v.Index(i), writer) + if err != nil { + return err + } + } + return nil + } +} + +func (e *encoder) newStructTypeEncoder(t reflect.Type) encoderFunc { + if t.Implements(reflect.TypeOf((*param.FieldLike)(nil)).Elem()) { + return e.newFieldTypeEncoder(t) + } + + encoderFields := []encoderField{} + extraEncoder := (*encoderField)(nil) + + // This helper allows us to recursively collect field encoders into a flat + // array. The parameter `index` keeps track of the access patterns necessary + // to get to some field. + var collectEncoderFields func(r reflect.Type, index []int) + collectEncoderFields = func(r reflect.Type, index []int) { + for i := 0; i < r.NumField(); i++ { + idx := append(index, i) + field := t.FieldByIndex(idx) + if !field.IsExported() { + continue + } + // If this is an embedded struct, traverse one level deeper to extract + // the field and get their encoders as well. + if field.Anonymous { + collectEncoderFields(field.Type, idx) + continue + } + // If json tag is not present, then we skip, which is intentionally + // different behavior from the stdlib. + ptag, ok := parseFormStructTag(field) + if !ok { + continue + } + // We only want to support unexported field if they're tagged with + // `extras` because that field shouldn't be part of the public API. We + // also want to only keep the top level extras + if ptag.extras && len(index) == 0 { + extraEncoder = &encoderField{ptag, e.typeEncoder(field.Type.Elem()), idx} + continue + } + if ptag.name == "-" { + continue + } + + dateFormat, ok := parseFormatStructTag(field) + oldFormat := e.dateFormat + if ok { + switch dateFormat { + case "date-time": + e.dateFormat = time.RFC3339 + case "date": + e.dateFormat = "2006-01-02" + } + } + encoderFields = append(encoderFields, encoderField{ptag, e.typeEncoder(field.Type), idx}) + e.dateFormat = oldFormat + } + } + collectEncoderFields(t, []int{}) + + // Ensure deterministic output by sorting by lexicographic order + sort.Slice(encoderFields, func(i, j int) bool { + return encoderFields[i].tag.name < encoderFields[j].tag.name + }) + + return func(key string, value reflect.Value, writer *multipart.Writer) error { + if key != "" { + key = key + "." + } + + for _, ef := range encoderFields { + field := value.FieldByIndex(ef.idx) + err := ef.fn(key+ef.tag.name, field, writer) + if err != nil { + return err + } + } + + if extraEncoder != nil { + err := e.encodeMapEntries(key, value.FieldByIndex(extraEncoder.idx), writer) + if err != nil { + return err + } + } + + return nil + } +} + +func (e *encoder) newFieldTypeEncoder(t reflect.Type) encoderFunc { + f, _ := t.FieldByName("Value") + enc := e.typeEncoder(f.Type) + + return func(key string, value reflect.Value, writer *multipart.Writer) error { + present := value.FieldByName("Present") + if !present.Bool() { + return nil + } + null := value.FieldByName("Null") + if null.Bool() { + return nil + } + raw := value.FieldByName("Raw") + if !raw.IsNil() { + return e.typeEncoder(raw.Type())(key, raw, writer) + } + return enc(key, value.FieldByName("Value"), writer) + } +} + +func (e *encoder) newTimeTypeEncoder() encoderFunc { + format := e.dateFormat + return func(key string, value reflect.Value, writer *multipart.Writer) error { + return writer.WriteField(key, value.Convert(reflect.TypeOf(time.Time{})).Interface().(time.Time).Format(format)) + } +} + +func (e encoder) newInterfaceEncoder() encoderFunc { + return func(key string, value reflect.Value, writer *multipart.Writer) error { + value = value.Elem() + if !value.IsValid() { + return nil + } + return e.typeEncoder(value.Type())(key, value, writer) + } +} + +var quoteEscaper = strings.NewReplacer("\\", "\\\\", `"`, "\\\"") + +func escapeQuotes(s string) string { + return quoteEscaper.Replace(s) +} + +func (e *encoder) newReaderTypeEncoder() encoderFunc { + return func(key string, value reflect.Value, writer *multipart.Writer) error { + reader := value.Convert(reflect.TypeOf((*io.Reader)(nil)).Elem()).Interface().(io.Reader) + filename := "anonymous_file" + contentType := "application/octet-stream" + if named, ok := reader.(interface{ Name() string }); ok { + filename = path.Base(named.Name()) + } + if typed, ok := reader.(interface{ ContentType() string }); ok { + contentType = path.Base(typed.ContentType()) + } + + // Below is taken almost 1-for-1 from [multipart.CreateFormFile] + h := make(textproto.MIMEHeader) + h.Set("Content-Disposition", fmt.Sprintf(`form-data; name="%s"; filename="%s"`, escapeQuotes(key), escapeQuotes(filename))) + h.Set("Content-Type", contentType) + filewriter, err := writer.CreatePart(h) + if err != nil { + return err + } + _, err = io.Copy(filewriter, reader) + return err + } +} + +// Given a []byte of json (may either be an empty object or an object that already contains entries) +// encode all of the entries in the map to the json byte array. +func (e *encoder) encodeMapEntries(key string, v reflect.Value, writer *multipart.Writer) error { + type mapPair struct { + key string + value reflect.Value + } + + if key != "" { + key = key + "." + } + + pairs := []mapPair{} + + iter := v.MapRange() + for iter.Next() { + if iter.Key().Type().Kind() == reflect.String { + pairs = append(pairs, mapPair{key: iter.Key().String(), value: iter.Value()}) + } else { + return fmt.Errorf("cannot encode a map with a non string key") + } + } + + // Ensure deterministic output + sort.Slice(pairs, func(i, j int) bool { + return pairs[i].key < pairs[j].key + }) + + elementEncoder := e.typeEncoder(v.Type().Elem()) + for _, p := range pairs { + err := elementEncoder(key+string(p.key), p.value, writer) + if err != nil { + return err + } + } + + return nil +} + +func (e *encoder) newMapEncoder(t reflect.Type) encoderFunc { + return func(key string, value reflect.Value, writer *multipart.Writer) error { + return e.encodeMapEntries(key, value, writer) + } +} diff --git a/internal/apiform/form.go b/internal/apiform/form.go new file mode 100644 index 0000000..5445116 --- /dev/null +++ b/internal/apiform/form.go @@ -0,0 +1,5 @@ +package apiform + +type Marshaler interface { + MarshalMultipart() ([]byte, string, error) +} diff --git a/internal/apiform/form_test.go b/internal/apiform/form_test.go new file mode 100644 index 0000000..39d1460 --- /dev/null +++ b/internal/apiform/form_test.go @@ -0,0 +1,440 @@ +package apiform + +import ( + "bytes" + "mime/multipart" + "strings" + "testing" + "time" +) + +func P[T any](v T) *T { return &v } + +type Primitives struct { + A bool `form:"a"` + B int `form:"b"` + C uint `form:"c"` + D float64 `form:"d"` + E float32 `form:"e"` + F []int `form:"f"` +} + +type PrimitivePointers struct { + A *bool `form:"a"` + B *int `form:"b"` + C *uint `form:"c"` + D *float64 `form:"d"` + E *float32 `form:"e"` + F *[]int `form:"f"` +} + +type Slices struct { + Slice []Primitives `form:"slices"` +} + +type DateTime struct { + Date time.Time `form:"date" format:"date"` + DateTime time.Time `form:"date-time" format:"date-time"` +} + +type AdditionalProperties struct { + A bool `form:"a"` + Extras map[string]interface{} `form:"-,extras"` +} + +type TypedAdditionalProperties struct { + A bool `form:"a"` + Extras map[string]int `form:"-,extras"` +} + +type EmbeddedStructs struct { + AdditionalProperties + A *int `form:"number2"` + Extras map[string]interface{} `form:"-,extras"` +} + +type Recursive struct { + Name string `form:"name"` + Child *Recursive `form:"child"` +} + +type UnknownStruct struct { + Unknown interface{} `form:"unknown"` +} + +type UnionStruct struct { + Union Union `form:"union" format:"date"` +} + +type Union interface { + union() +} + +type UnionInteger int64 + +func (UnionInteger) union() {} + +type UnionStructA struct { + Type string `form:"type"` + A string `form:"a"` + B string `form:"b"` +} + +func (UnionStructA) union() {} + +type UnionStructB struct { + Type string `form:"type"` + A string `form:"a"` +} + +func (UnionStructB) union() {} + +type UnionTime time.Time + +func (UnionTime) union() {} + +type ReaderStruct struct { +} + +var tests = map[string]struct { + buf string + val interface{} +}{ + "map_string": { + `--xxx +Content-Disposition: form-data; name="foo" + +bar +--xxx-- +`, + map[string]string{"foo": "bar"}, + }, + + "map_interface": { + `--xxx +Content-Disposition: form-data; name="a" + +1 +--xxx +Content-Disposition: form-data; name="b" + +str +--xxx +Content-Disposition: form-data; name="c" + +false +--xxx-- +`, + map[string]interface{}{"a": float64(1), "b": "str", "c": false}, + }, + + "primitive_struct": { + `--xxx +Content-Disposition: form-data; name="a" + +false +--xxx +Content-Disposition: form-data; name="b" + +237628372683 +--xxx +Content-Disposition: form-data; name="c" + +654 +--xxx +Content-Disposition: form-data; name="d" + +9999.43 +--xxx +Content-Disposition: form-data; name="e" + +43.76 +--xxx +Content-Disposition: form-data; name="f.0" + +1 +--xxx +Content-Disposition: form-data; name="f.1" + +2 +--xxx +Content-Disposition: form-data; name="f.2" + +3 +--xxx +Content-Disposition: form-data; name="f.3" + +4 +--xxx-- +`, + Primitives{A: false, B: 237628372683, C: uint(654), D: 9999.43, E: 43.76, F: []int{1, 2, 3, 4}}, + }, + + "slices": { + `--xxx +Content-Disposition: form-data; name="slices.0.a" + +false +--xxx +Content-Disposition: form-data; name="slices.0.b" + +237628372683 +--xxx +Content-Disposition: form-data; name="slices.0.c" + +654 +--xxx +Content-Disposition: form-data; name="slices.0.d" + +9999.43 +--xxx +Content-Disposition: form-data; name="slices.0.e" + +43.76 +--xxx +Content-Disposition: form-data; name="slices.0.f.0" + +1 +--xxx +Content-Disposition: form-data; name="slices.0.f.1" + +2 +--xxx +Content-Disposition: form-data; name="slices.0.f.2" + +3 +--xxx +Content-Disposition: form-data; name="slices.0.f.3" + +4 +--xxx-- +`, + Slices{ + Slice: []Primitives{{A: false, B: 237628372683, C: uint(654), D: 9999.43, E: 43.76, F: []int{1, 2, 3, 4}}}, + }, + }, + + "primitive_pointer_struct": { + `--xxx +Content-Disposition: form-data; name="a" + +false +--xxx +Content-Disposition: form-data; name="b" + +237628372683 +--xxx +Content-Disposition: form-data; name="c" + +654 +--xxx +Content-Disposition: form-data; name="d" + +9999.43 +--xxx +Content-Disposition: form-data; name="e" + +43.76 +--xxx +Content-Disposition: form-data; name="f.0" + +1 +--xxx +Content-Disposition: form-data; name="f.1" + +2 +--xxx +Content-Disposition: form-data; name="f.2" + +3 +--xxx +Content-Disposition: form-data; name="f.3" + +4 +--xxx +Content-Disposition: form-data; name="f.4" + +5 +--xxx-- +`, + PrimitivePointers{ + A: P(false), + B: P(237628372683), + C: P(uint(654)), + D: P(9999.43), + E: P(float32(43.76)), + F: &[]int{1, 2, 3, 4, 5}, + }, + }, + + "datetime_struct": { + `--xxx +Content-Disposition: form-data; name="date" + +2006-01-02 +--xxx +Content-Disposition: form-data; name="date-time" + +2006-01-02T15:04:05Z +--xxx-- +`, + DateTime{ + Date: time.Date(2006, time.January, 2, 0, 0, 0, 0, time.UTC), + DateTime: time.Date(2006, time.January, 2, 15, 4, 5, 0, time.UTC), + }, + }, + + "additional_properties": { + `--xxx +Content-Disposition: form-data; name="a" + +true +--xxx +Content-Disposition: form-data; name="bar" + +value +--xxx +Content-Disposition: form-data; name="foo" + +true +--xxx-- +`, + AdditionalProperties{ + A: true, + Extras: map[string]interface{}{ + "bar": "value", + "foo": true, + }, + }, + }, + + "recursive_struct": { + `--xxx +Content-Disposition: form-data; name="child.name" + +Alex +--xxx +Content-Disposition: form-data; name="name" + +Robert +--xxx-- +`, + Recursive{Name: "Robert", Child: &Recursive{Name: "Alex"}}, + }, + + "unknown_struct_number": { + `--xxx +Content-Disposition: form-data; name="unknown" + +12 +--xxx-- +`, + UnknownStruct{ + Unknown: 12., + }, + }, + + "unknown_struct_map": { + `--xxx +Content-Disposition: form-data; name="unknown.foo" + +bar +--xxx-- +`, + UnknownStruct{ + Unknown: map[string]interface{}{ + "foo": "bar", + }, + }, + }, + + "union_integer": { + `--xxx +Content-Disposition: form-data; name="union" + +12 +--xxx-- +`, + UnionStruct{ + Union: UnionInteger(12), + }, + }, + + "union_struct_discriminated_a": { + `--xxx +Content-Disposition: form-data; name="union.a" + +foo +--xxx +Content-Disposition: form-data; name="union.b" + +bar +--xxx +Content-Disposition: form-data; name="union.type" + +typeA +--xxx-- +`, + + UnionStruct{ + Union: UnionStructA{ + Type: "typeA", + A: "foo", + B: "bar", + }, + }, + }, + + "union_struct_discriminated_b": { + `--xxx +Content-Disposition: form-data; name="union.a" + +foo +--xxx +Content-Disposition: form-data; name="union.type" + +typeB +--xxx-- +`, + UnionStruct{ + Union: UnionStructB{ + Type: "typeB", + A: "foo", + }, + }, + }, + + "union_struct_time": { + `--xxx +Content-Disposition: form-data; name="union" + +2010-05-23 +--xxx-- +`, + UnionStruct{ + Union: UnionTime(time.Date(2010, 05, 23, 0, 0, 0, 0, time.UTC)), + }, + }, +} + +func TestEncode(t *testing.T) { + for name, test := range tests { + t.Run(name, func(t *testing.T) { + buf := bytes.NewBuffer(nil) + writer := multipart.NewWriter(buf) + writer.SetBoundary("xxx") + err := Marshal(test.val, writer) + if err != nil { + t.Errorf("serialization of %v failed with error %v", test.val, err) + } + err = writer.Close() + if err != nil { + t.Errorf("serialization of %v failed with error %v", test.val, err) + } + raw := buf.Bytes() + if string(raw) != strings.ReplaceAll(test.buf, "\n", "\r\n") { + t.Errorf("expected %+#v to serialize to '%s' but got '%s'", test.val, test.buf, string(raw)) + } + }) + } +} diff --git a/internal/apiform/tag.go b/internal/apiform/tag.go new file mode 100644 index 0000000..b22e054 --- /dev/null +++ b/internal/apiform/tag.go @@ -0,0 +1,48 @@ +package apiform + +import ( + "reflect" + "strings" +) + +const jsonStructTag = "json" +const formStructTag = "form" +const formatStructTag = "format" + +type parsedStructTag struct { + name string + required bool + extras bool + metadata bool +} + +func parseFormStructTag(field reflect.StructField) (tag parsedStructTag, ok bool) { + raw, ok := field.Tag.Lookup(formStructTag) + if !ok { + raw, ok = field.Tag.Lookup(jsonStructTag) + } + if !ok { + return + } + parts := strings.Split(raw, ",") + if len(parts) == 0 { + return tag, false + } + tag.name = parts[0] + for _, part := range parts[1:] { + switch part { + case "required": + tag.required = true + case "extras": + tag.extras = true + case "metadata": + tag.metadata = true + } + } + return +} + +func parseFormatStructTag(field reflect.StructField) (format string, ok bool) { + format, ok = field.Tag.Lookup(formatStructTag) + return +} diff --git a/internal/apijson/decoder.go b/internal/apijson/decoder.go new file mode 100644 index 0000000..68b7ed6 --- /dev/null +++ b/internal/apijson/decoder.go @@ -0,0 +1,670 @@ +package apijson + +import ( + "encoding/json" + "errors" + "fmt" + "reflect" + "strconv" + "sync" + "time" + "unsafe" + + "github.com/tidwall/gjson" +) + +// decoders is a synchronized map with roughly the following type: +// map[reflect.Type]decoderFunc +var decoders sync.Map + +// Unmarshal is similar to [encoding/json.Unmarshal] and parses the JSON-encoded +// data and stores it in the given pointer. +func Unmarshal(raw []byte, to any) error { + d := &decoderBuilder{dateFormat: time.RFC3339} + return d.unmarshal(raw, to) +} + +// UnmarshalRoot is like Unmarshal, but doesn't try to call MarshalJSON on the +// root element. Useful if a struct's UnmarshalJSON is overrode to use the +// behavior of this encoder versus the standard library. +func UnmarshalRoot(raw []byte, to any) error { + d := &decoderBuilder{dateFormat: time.RFC3339, root: true} + return d.unmarshal(raw, to) +} + +// decoderBuilder contains the 'compile-time' state of the decoder. +type decoderBuilder struct { + // Whether or not this is the first element and called by [UnmarshalRoot], see + // the documentation there to see why this is necessary. + root bool + // The dateFormat (a format string for [time.Format]) which is chosen by the + // last struct tag that was seen. + dateFormat string +} + +// decoderState contains the 'run-time' state of the decoder. +type decoderState struct { + strict bool + exactness exactness +} + +// Exactness refers to how close to the type the result was if deserialization +// was successful. This is useful in deserializing unions, where you want to try +// each entry, first with strict, then with looser validation, without actually +// having to do a lot of redundant work by marshalling twice (or maybe even more +// times). +type exactness int8 + +const ( + // Some values had to fudged a bit, for example by converting a string to an + // int, or an enum with extra values. + loose exactness = iota + // There are some extra arguments, but other wise it matches the union. + extras + // Exactly right. + exact +) + +type decoderFunc func(node gjson.Result, value reflect.Value, state *decoderState) error + +type decoderField struct { + tag parsedStructTag + fn decoderFunc + idx []int + goname string +} + +type decoderEntry struct { + reflect.Type + dateFormat string + root bool +} + +func (d *decoderBuilder) unmarshal(raw []byte, to any) error { + value := reflect.ValueOf(to).Elem() + result := gjson.ParseBytes(raw) + if !value.IsValid() { + return fmt.Errorf("apijson: cannot marshal into invalid value") + } + return d.typeDecoder(value.Type())(result, value, &decoderState{strict: false, exactness: exact}) +} + +func (d *decoderBuilder) typeDecoder(t reflect.Type) decoderFunc { + entry := decoderEntry{ + Type: t, + dateFormat: d.dateFormat, + root: d.root, + } + + if fi, ok := decoders.Load(entry); ok { + return fi.(decoderFunc) + } + + // To deal with recursive types, populate the map with an + // indirect func before we build it. This type waits on the + // real func (f) to be ready and then calls it. This indirect + // func is only used for recursive types. + var ( + wg sync.WaitGroup + f decoderFunc + ) + wg.Add(1) + fi, loaded := decoders.LoadOrStore(entry, decoderFunc(func(node gjson.Result, v reflect.Value, state *decoderState) error { + wg.Wait() + return f(node, v, state) + })) + if loaded { + return fi.(decoderFunc) + } + + // Compute the real decoder and replace the indirect func with it. + f = d.newTypeDecoder(t) + wg.Done() + decoders.Store(entry, f) + return f +} + +func indirectUnmarshalerDecoder(n gjson.Result, v reflect.Value, state *decoderState) error { + return v.Addr().Interface().(json.Unmarshaler).UnmarshalJSON([]byte(n.Raw)) +} + +func unmarshalerDecoder(n gjson.Result, v reflect.Value, state *decoderState) error { + if v.Kind() == reflect.Pointer && v.CanSet() { + v.Set(reflect.New(v.Type().Elem())) + } + return v.Interface().(json.Unmarshaler).UnmarshalJSON([]byte(n.Raw)) +} + +func (d *decoderBuilder) newTypeDecoder(t reflect.Type) decoderFunc { + if t.ConvertibleTo(reflect.TypeOf(time.Time{})) { + return d.newTimeTypeDecoder(t) + } + if !d.root && t.Implements(reflect.TypeOf((*json.Unmarshaler)(nil)).Elem()) { + return unmarshalerDecoder + } + if !d.root && reflect.PointerTo(t).Implements(reflect.TypeOf((*json.Unmarshaler)(nil)).Elem()) { + if _, ok := unionVariants[t]; !ok { + return indirectUnmarshalerDecoder + } + } + d.root = false + + if _, ok := unionRegistry[t]; ok { + return d.newUnionDecoder(t) + } + + switch t.Kind() { + case reflect.Pointer: + inner := t.Elem() + innerDecoder := d.typeDecoder(inner) + + return func(n gjson.Result, v reflect.Value, state *decoderState) error { + if !v.IsValid() { + return fmt.Errorf("apijson: unexpected invalid reflection value %+#v", v) + } + + newValue := reflect.New(inner).Elem() + err := innerDecoder(n, newValue, state) + if err != nil { + return err + } + + v.Set(newValue.Addr()) + return nil + } + case reflect.Struct: + return d.newStructTypeDecoder(t) + case reflect.Array: + fallthrough + case reflect.Slice: + return d.newArrayTypeDecoder(t) + case reflect.Map: + return d.newMapDecoder(t) + case reflect.Interface: + return func(node gjson.Result, value reflect.Value, state *decoderState) error { + if !value.IsValid() { + return fmt.Errorf("apijson: unexpected invalid value %+#v", value) + } + if node.Value() != nil && value.CanSet() { + value.Set(reflect.ValueOf(node.Value())) + } + return nil + } + default: + return d.newPrimitiveTypeDecoder(t) + } +} + +// newUnionDecoder returns a decoderFunc that deserializes into a union using an +// algorithm roughly similar to Pydantic's [smart algorithm]. +// +// Conceptually this is equivalent to choosing the best schema based on how 'exact' +// the deserialization is for each of the schemas. +// +// If there is a tie in the level of exactness, then the tie is broken +// left-to-right. +// +// [smart algorithm]: https://docs.pydantic.dev/latest/concepts/unions/#smart-mode +func (d *decoderBuilder) newUnionDecoder(t reflect.Type) decoderFunc { + unionEntry, ok := unionRegistry[t] + if !ok { + panic("apijson: couldn't find union of type " + t.String() + " in union registry") + } + decoders := []decoderFunc{} + for _, variant := range unionEntry.variants { + decoder := d.typeDecoder(variant.Type) + decoders = append(decoders, decoder) + } + return func(n gjson.Result, v reflect.Value, state *decoderState) error { + // If there is a discriminator match, circumvent the exactness logic entirely + for idx, variant := range unionEntry.variants { + decoder := decoders[idx] + if variant.TypeFilter != n.Type { + continue + } + + if len(unionEntry.discriminatorKey) != 0 { + discriminatorValue := n.Get(unionEntry.discriminatorKey).Value() + if discriminatorValue == variant.DiscriminatorValue { + inner := reflect.New(variant.Type).Elem() + err := decoder(n, inner, state) + v.Set(inner) + return err + } + } + } + + // Set bestExactness to worse than loose + bestExactness := loose - 1 + for idx, variant := range unionEntry.variants { + decoder := decoders[idx] + if variant.TypeFilter != n.Type { + continue + } + sub := decoderState{strict: state.strict, exactness: exact} + inner := reflect.New(variant.Type).Elem() + err := decoder(n, inner, &sub) + if err != nil { + continue + } + if sub.exactness == exact { + v.Set(inner) + return nil + } + if sub.exactness > bestExactness { + v.Set(inner) + bestExactness = sub.exactness + } + } + + if bestExactness < loose { + return errors.New("apijson: was not able to coerce type as union") + } + + if guardStrict(state, bestExactness != exact) { + return errors.New("apijson: was not able to coerce type as union strictly") + } + + return nil + } +} + +func (d *decoderBuilder) newMapDecoder(t reflect.Type) decoderFunc { + keyType := t.Key() + itemType := t.Elem() + itemDecoder := d.typeDecoder(itemType) + + return func(node gjson.Result, value reflect.Value, state *decoderState) (err error) { + mapValue := reflect.MakeMapWithSize(t, len(node.Map())) + + node.ForEach(func(key, value gjson.Result) bool { + // It's fine for us to just use `ValueOf` here because the key types will + // always be primitive types so we don't need to decode it using the standard pattern + keyValue := reflect.ValueOf(key.Value()) + if !keyValue.IsValid() { + if err == nil { + err = fmt.Errorf("apijson: received invalid key type %v", keyValue.String()) + } + return false + } + if keyValue.Type() != keyType { + if err == nil { + err = fmt.Errorf("apijson: expected key type %v but got %v", keyType, keyValue.Type()) + } + return false + } + + itemValue := reflect.New(itemType).Elem() + itemerr := itemDecoder(value, itemValue, state) + if itemerr != nil { + if err == nil { + err = itemerr + } + return false + } + + mapValue.SetMapIndex(keyValue, itemValue) + return true + }) + + if err != nil { + return err + } + value.Set(mapValue) + return nil + } +} + +func (d *decoderBuilder) newArrayTypeDecoder(t reflect.Type) decoderFunc { + itemDecoder := d.typeDecoder(t.Elem()) + + return func(node gjson.Result, value reflect.Value, state *decoderState) (err error) { + if !node.IsArray() { + return fmt.Errorf("apijson: could not deserialize to an array") + } + + arrayNode := node.Array() + + arrayValue := reflect.MakeSlice(reflect.SliceOf(t.Elem()), len(arrayNode), len(arrayNode)) + for i, itemNode := range arrayNode { + err = itemDecoder(itemNode, arrayValue.Index(i), state) + if err != nil { + return err + } + } + + value.Set(arrayValue) + return nil + } +} + +func (d *decoderBuilder) newStructTypeDecoder(t reflect.Type) decoderFunc { + // map of json field name to struct field decoders + decoderFields := map[string]decoderField{} + anonymousDecoders := []decoderField{} + extraDecoder := (*decoderField)(nil) + inlineDecoder := (*decoderField)(nil) + + for i := 0; i < t.NumField(); i++ { + idx := []int{i} + field := t.FieldByIndex(idx) + if !field.IsExported() { + continue + } + // If this is an embedded struct, traverse one level deeper to extract + // the fields and get their encoders as well. + if field.Anonymous { + anonymousDecoders = append(anonymousDecoders, decoderField{ + fn: d.typeDecoder(field.Type), + idx: idx[:], + }) + continue + } + // If json tag is not present, then we skip, which is intentionally + // different behavior from the stdlib. + ptag, ok := parseJSONStructTag(field) + if !ok { + continue + } + // We only want to support unexported fields if they're tagged with + // `extras` because that field shouldn't be part of the public API. + if ptag.extras { + extraDecoder = &decoderField{ptag, d.typeDecoder(field.Type.Elem()), idx, field.Name} + continue + } + if ptag.inline { + inlineDecoder = &decoderField{ptag, d.typeDecoder(field.Type), idx, field.Name} + continue + } + if ptag.metadata { + continue + } + + oldFormat := d.dateFormat + dateFormat, ok := parseFormatStructTag(field) + if ok { + switch dateFormat { + case "date-time": + d.dateFormat = time.RFC3339 + case "date": + d.dateFormat = "2006-01-02" + } + } + decoderFields[ptag.name] = decoderField{ptag, d.typeDecoder(field.Type), idx, field.Name} + d.dateFormat = oldFormat + } + + return func(node gjson.Result, value reflect.Value, state *decoderState) (err error) { + if field := value.FieldByName("JSON"); field.IsValid() { + if raw := field.FieldByName("raw"); raw.IsValid() { + setUnexportedField(raw, node.Raw) + } + } + + for _, decoder := range anonymousDecoders { + // ignore errors + decoder.fn(node, value.FieldByIndex(decoder.idx), state) + } + + if inlineDecoder != nil { + var meta Field + dest := value.FieldByIndex(inlineDecoder.idx) + isValid := false + if dest.IsValid() && node.Type != gjson.Null { + err = inlineDecoder.fn(node, dest, state) + if err == nil { + isValid = true + } + } + + if node.Type == gjson.Null { + meta = Field{ + raw: node.Raw, + status: null, + } + } else if !isValid { + meta = Field{ + raw: node.Raw, + status: invalid, + } + } else if isValid { + meta = Field{ + raw: node.Raw, + status: valid, + } + } + if metadata := getSubField(value, inlineDecoder.idx, inlineDecoder.goname); metadata.IsValid() { + metadata.Set(reflect.ValueOf(meta)) + } + return err + } + + typedExtraType := reflect.Type(nil) + typedExtraFields := reflect.Value{} + if extraDecoder != nil { + typedExtraType = value.FieldByIndex(extraDecoder.idx).Type() + typedExtraFields = reflect.MakeMap(typedExtraType) + } + untypedExtraFields := map[string]Field{} + + for fieldName, itemNode := range node.Map() { + df, explicit := decoderFields[fieldName] + var ( + dest reflect.Value + fn decoderFunc + meta Field + ) + if explicit { + fn = df.fn + dest = value.FieldByIndex(df.idx) + } + if !explicit && extraDecoder != nil { + dest = reflect.New(typedExtraType.Elem()).Elem() + fn = extraDecoder.fn + } + + isValid := false + if dest.IsValid() && itemNode.Type != gjson.Null { + err = fn(itemNode, dest, state) + if err == nil { + isValid = true + } + } + + if itemNode.Type == gjson.Null { + meta = Field{ + raw: itemNode.Raw, + status: null, + } + } else if !isValid { + meta = Field{ + raw: itemNode.Raw, + status: invalid, + } + } else if isValid { + meta = Field{ + raw: itemNode.Raw, + status: valid, + } + } + + if explicit { + if metadata := getSubField(value, df.idx, df.goname); metadata.IsValid() { + metadata.Set(reflect.ValueOf(meta)) + } + } + if !explicit { + untypedExtraFields[fieldName] = meta + } + if !explicit && extraDecoder != nil { + typedExtraFields.SetMapIndex(reflect.ValueOf(fieldName), dest) + } + } + + if extraDecoder != nil && typedExtraFields.Len() > 0 { + value.FieldByIndex(extraDecoder.idx).Set(typedExtraFields) + } + + // Set exactness to 'extras' if there are untyped, extra fields. + if len(untypedExtraFields) > 0 && state.exactness > extras { + state.exactness = extras + } + + if metadata := getSubField(value, []int{-1}, "ExtraFields"); metadata.IsValid() && len(untypedExtraFields) > 0 { + metadata.Set(reflect.ValueOf(untypedExtraFields)) + } + return nil + } +} + +func (d *decoderBuilder) newPrimitiveTypeDecoder(t reflect.Type) decoderFunc { + switch t.Kind() { + case reflect.String: + return func(n gjson.Result, v reflect.Value, state *decoderState) error { + v.SetString(n.String()) + if guardStrict(state, n.Type != gjson.String) { + return fmt.Errorf("apijson: failed to parse string strictly") + } + // Everything that is not an object can be loosely stringified. + if n.Type == gjson.JSON { + return fmt.Errorf("apijson: failed to parse string") + } + if guardUnknown(state, v) { + return fmt.Errorf("apijson: failed string enum validation") + } + return nil + } + case reflect.Bool: + return func(n gjson.Result, v reflect.Value, state *decoderState) error { + v.SetBool(n.Bool()) + if guardStrict(state, n.Type != gjson.True && n.Type != gjson.False) { + return fmt.Errorf("apijson: failed to parse bool strictly") + } + // Numbers and strings that are either 'true' or 'false' can be loosely + // deserialized as bool. + if n.Type == gjson.String && (n.Raw != "true" && n.Raw != "false") || n.Type == gjson.JSON { + return fmt.Errorf("apijson: failed to parse bool") + } + if guardUnknown(state, v) { + return fmt.Errorf("apijson: failed bool enum validation") + } + return nil + } + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return func(n gjson.Result, v reflect.Value, state *decoderState) error { + v.SetInt(n.Int()) + if guardStrict(state, n.Type != gjson.Number || n.Num != float64(int(n.Num))) { + return fmt.Errorf("apijson: failed to parse int strictly") + } + // Numbers, booleans, and strings that maybe look like numbers can be + // loosely deserialized as numbers. + if n.Type == gjson.JSON || (n.Type == gjson.String && !canParseAsNumber(n.Str)) { + return fmt.Errorf("apijson: failed to parse int") + } + if guardUnknown(state, v) { + return fmt.Errorf("apijson: failed int enum validation") + } + return nil + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return func(n gjson.Result, v reflect.Value, state *decoderState) error { + v.SetUint(n.Uint()) + if guardStrict(state, n.Type != gjson.Number || n.Num != float64(int(n.Num)) || n.Num < 0) { + return fmt.Errorf("apijson: failed to parse uint strictly") + } + // Numbers, booleans, and strings that maybe look like numbers can be + // loosely deserialized as uint. + if n.Type == gjson.JSON || (n.Type == gjson.String && !canParseAsNumber(n.Str)) { + return fmt.Errorf("apijson: failed to parse uint") + } + if guardUnknown(state, v) { + return fmt.Errorf("apijson: failed uint enum validation") + } + return nil + } + case reflect.Float32, reflect.Float64: + return func(n gjson.Result, v reflect.Value, state *decoderState) error { + v.SetFloat(n.Float()) + if guardStrict(state, n.Type != gjson.Number) { + return fmt.Errorf("apijson: failed to parse float strictly") + } + // Numbers, booleans, and strings that maybe look like numbers can be + // loosely deserialized as floats. + if n.Type == gjson.JSON || (n.Type == gjson.String && !canParseAsNumber(n.Str)) { + return fmt.Errorf("apijson: failed to parse float") + } + if guardUnknown(state, v) { + return fmt.Errorf("apijson: failed float enum validation") + } + return nil + } + default: + return func(node gjson.Result, v reflect.Value, state *decoderState) error { + return fmt.Errorf("unknown type received at primitive decoder: %s", t.String()) + } + } +} + +func (d *decoderBuilder) newTimeTypeDecoder(t reflect.Type) decoderFunc { + format := d.dateFormat + return func(n gjson.Result, v reflect.Value, state *decoderState) error { + parsed, err := time.Parse(format, n.Str) + if err == nil { + v.Set(reflect.ValueOf(parsed).Convert(t)) + return nil + } + + if guardStrict(state, true) { + return err + } + + layouts := []string{ + "2006-01-02", + "2006-01-02T15:04:05Z07:00", + "2006-01-02T15:04:05Z0700", + "2006-01-02T15:04:05", + "2006-01-02 15:04:05Z07:00", + "2006-01-02 15:04:05Z0700", + "2006-01-02 15:04:05", + } + + for _, layout := range layouts { + parsed, err := time.Parse(layout, n.Str) + if err == nil { + v.Set(reflect.ValueOf(parsed).Convert(t)) + return nil + } + } + + return fmt.Errorf("unable to leniently parse date-time string: %s", n.Str) + } +} + +func setUnexportedField(field reflect.Value, value interface{}) { + reflect.NewAt(field.Type(), unsafe.Pointer(field.UnsafeAddr())).Elem().Set(reflect.ValueOf(value)) +} + +func guardStrict(state *decoderState, cond bool) bool { + if !cond { + return false + } + + if state.strict { + return true + } + + state.exactness = loose + return false +} + +func canParseAsNumber(str string) bool { + _, err := strconv.ParseFloat(str, 64) + return err == nil +} + +func guardUnknown(state *decoderState, v reflect.Value) bool { + if have, ok := v.Interface().(interface{ IsKnown() bool }); guardStrict(state, ok && !have.IsKnown()) { + return true + } + return false +} diff --git a/internal/apijson/encoder.go b/internal/apijson/encoder.go new file mode 100644 index 0000000..f33da4a --- /dev/null +++ b/internal/apijson/encoder.go @@ -0,0 +1,391 @@ +package apijson + +import ( + "bytes" + "encoding/json" + "fmt" + "reflect" + "sort" + "strconv" + "sync" + "time" + + "github.com/tidwall/sjson" + + "github.com/stainless-sdks/maestro-arch-rpc-go/internal/param" +) + +var encoders sync.Map // map[encoderEntry]encoderFunc + +func Marshal(value interface{}) ([]byte, error) { + e := &encoder{dateFormat: time.RFC3339} + return e.marshal(value) +} + +func MarshalRoot(value interface{}) ([]byte, error) { + e := &encoder{root: true, dateFormat: time.RFC3339} + return e.marshal(value) +} + +type encoder struct { + dateFormat string + root bool +} + +type encoderFunc func(value reflect.Value) ([]byte, error) + +type encoderField struct { + tag parsedStructTag + fn encoderFunc + idx []int +} + +type encoderEntry struct { + reflect.Type + dateFormat string + root bool +} + +func (e *encoder) marshal(value interface{}) ([]byte, error) { + val := reflect.ValueOf(value) + if !val.IsValid() { + return nil, nil + } + typ := val.Type() + enc := e.typeEncoder(typ) + return enc(val) +} + +func (e *encoder) typeEncoder(t reflect.Type) encoderFunc { + entry := encoderEntry{ + Type: t, + dateFormat: e.dateFormat, + root: e.root, + } + + if fi, ok := encoders.Load(entry); ok { + return fi.(encoderFunc) + } + + // To deal with recursive types, populate the map with an + // indirect func before we build it. This type waits on the + // real func (f) to be ready and then calls it. This indirect + // func is only used for recursive types. + var ( + wg sync.WaitGroup + f encoderFunc + ) + wg.Add(1) + fi, loaded := encoders.LoadOrStore(entry, encoderFunc(func(v reflect.Value) ([]byte, error) { + wg.Wait() + return f(v) + })) + if loaded { + return fi.(encoderFunc) + } + + // Compute the real encoder and replace the indirect func with it. + f = e.newTypeEncoder(t) + wg.Done() + encoders.Store(entry, f) + return f +} + +func marshalerEncoder(v reflect.Value) ([]byte, error) { + return v.Interface().(json.Marshaler).MarshalJSON() +} + +func indirectMarshalerEncoder(v reflect.Value) ([]byte, error) { + return v.Addr().Interface().(json.Marshaler).MarshalJSON() +} + +func (e *encoder) newTypeEncoder(t reflect.Type) encoderFunc { + if t.ConvertibleTo(reflect.TypeOf(time.Time{})) { + return e.newTimeTypeEncoder() + } + if !e.root && t.Implements(reflect.TypeOf((*json.Marshaler)(nil)).Elem()) { + return marshalerEncoder + } + if !e.root && reflect.PointerTo(t).Implements(reflect.TypeOf((*json.Marshaler)(nil)).Elem()) { + return indirectMarshalerEncoder + } + e.root = false + switch t.Kind() { + case reflect.Pointer: + inner := t.Elem() + + innerEncoder := e.typeEncoder(inner) + return func(v reflect.Value) ([]byte, error) { + if !v.IsValid() || v.IsNil() { + return nil, nil + } + return innerEncoder(v.Elem()) + } + case reflect.Struct: + return e.newStructTypeEncoder(t) + case reflect.Array: + fallthrough + case reflect.Slice: + return e.newArrayTypeEncoder(t) + case reflect.Map: + return e.newMapEncoder(t) + case reflect.Interface: + return e.newInterfaceEncoder() + default: + return e.newPrimitiveTypeEncoder(t) + } +} + +func (e *encoder) newPrimitiveTypeEncoder(t reflect.Type) encoderFunc { + switch t.Kind() { + // Note that we could use `gjson` to encode these types but it would complicate our + // code more and this current code shouldn't cause any issues + case reflect.String: + return func(v reflect.Value) ([]byte, error) { + return []byte(fmt.Sprintf("%q", v.String())), nil + } + case reflect.Bool: + return func(v reflect.Value) ([]byte, error) { + if v.Bool() { + return []byte("true"), nil + } + return []byte("false"), nil + } + case reflect.Int, reflect.Int16, reflect.Int32, reflect.Int64: + return func(v reflect.Value) ([]byte, error) { + return []byte(strconv.FormatInt(v.Int(), 10)), nil + } + case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return func(v reflect.Value) ([]byte, error) { + return []byte(strconv.FormatUint(v.Uint(), 10)), nil + } + case reflect.Float32: + return func(v reflect.Value) ([]byte, error) { + return []byte(strconv.FormatFloat(v.Float(), 'f', -1, 32)), nil + } + case reflect.Float64: + return func(v reflect.Value) ([]byte, error) { + return []byte(strconv.FormatFloat(v.Float(), 'f', -1, 64)), nil + } + default: + return func(v reflect.Value) ([]byte, error) { + return nil, fmt.Errorf("unknown type received at primitive encoder: %s", t.String()) + } + } +} + +func (e *encoder) newArrayTypeEncoder(t reflect.Type) encoderFunc { + itemEncoder := e.typeEncoder(t.Elem()) + + return func(value reflect.Value) ([]byte, error) { + json := []byte("[]") + for i := 0; i < value.Len(); i++ { + var value, err = itemEncoder(value.Index(i)) + if err != nil { + return nil, err + } + if value == nil { + // Assume that empty items should be inserted as `null` so that the output array + // will be the same length as the input array + value = []byte("null") + } + + json, err = sjson.SetRawBytes(json, "-1", value) + if err != nil { + return nil, err + } + } + + return json, nil + } +} + +func (e *encoder) newStructTypeEncoder(t reflect.Type) encoderFunc { + if t.Implements(reflect.TypeOf((*param.FieldLike)(nil)).Elem()) { + return e.newFieldTypeEncoder(t) + } + + encoderFields := []encoderField{} + extraEncoder := (*encoderField)(nil) + + // This helper allows us to recursively collect field encoders into a flat + // array. The parameter `index` keeps track of the access patterns necessary + // to get to some field. + var collectEncoderFields func(r reflect.Type, index []int) + collectEncoderFields = func(r reflect.Type, index []int) { + for i := 0; i < r.NumField(); i++ { + idx := append(index, i) + field := t.FieldByIndex(idx) + if !field.IsExported() { + continue + } + // If this is an embedded struct, traverse one level deeper to extract + // the field and get their encoders as well. + if field.Anonymous { + collectEncoderFields(field.Type, idx) + continue + } + // If json tag is not present, then we skip, which is intentionally + // different behavior from the stdlib. + ptag, ok := parseJSONStructTag(field) + if !ok { + continue + } + // We only want to support unexported field if they're tagged with + // `extras` because that field shouldn't be part of the public API. We + // also want to only keep the top level extras + if ptag.extras && len(index) == 0 { + extraEncoder = &encoderField{ptag, e.typeEncoder(field.Type.Elem()), idx} + continue + } + if ptag.name == "-" { + continue + } + + dateFormat, ok := parseFormatStructTag(field) + oldFormat := e.dateFormat + if ok { + switch dateFormat { + case "date-time": + e.dateFormat = time.RFC3339 + case "date": + e.dateFormat = "2006-01-02" + } + } + encoderFields = append(encoderFields, encoderField{ptag, e.typeEncoder(field.Type), idx}) + e.dateFormat = oldFormat + } + } + collectEncoderFields(t, []int{}) + + // Ensure deterministic output by sorting by lexicographic order + sort.Slice(encoderFields, func(i, j int) bool { + return encoderFields[i].tag.name < encoderFields[j].tag.name + }) + + return func(value reflect.Value) (json []byte, err error) { + json = []byte("{}") + + for _, ef := range encoderFields { + field := value.FieldByIndex(ef.idx) + encoded, err := ef.fn(field) + if err != nil { + return nil, err + } + if encoded == nil { + continue + } + json, err = sjson.SetRawBytes(json, ef.tag.name, encoded) + if err != nil { + return nil, err + } + } + + if extraEncoder != nil { + json, err = e.encodeMapEntries(json, value.FieldByIndex(extraEncoder.idx)) + if err != nil { + return nil, err + } + } + return + } +} + +func (e *encoder) newFieldTypeEncoder(t reflect.Type) encoderFunc { + f, _ := t.FieldByName("Value") + enc := e.typeEncoder(f.Type) + + return func(value reflect.Value) (json []byte, err error) { + present := value.FieldByName("Present") + if !present.Bool() { + return nil, nil + } + null := value.FieldByName("Null") + if null.Bool() { + return []byte("null"), nil + } + raw := value.FieldByName("Raw") + if !raw.IsNil() { + return e.typeEncoder(raw.Type())(raw) + } + return enc(value.FieldByName("Value")) + } +} + +func (e *encoder) newTimeTypeEncoder() encoderFunc { + format := e.dateFormat + return func(value reflect.Value) (json []byte, err error) { + return []byte(`"` + value.Convert(reflect.TypeOf(time.Time{})).Interface().(time.Time).Format(format) + `"`), nil + } +} + +func (e encoder) newInterfaceEncoder() encoderFunc { + return func(value reflect.Value) ([]byte, error) { + value = value.Elem() + if !value.IsValid() { + return nil, nil + } + return e.typeEncoder(value.Type())(value) + } +} + +// Given a []byte of json (may either be an empty object or an object that already contains entries) +// encode all of the entries in the map to the json byte array. +func (e *encoder) encodeMapEntries(json []byte, v reflect.Value) ([]byte, error) { + type mapPair struct { + key []byte + value reflect.Value + } + + pairs := []mapPair{} + keyEncoder := e.typeEncoder(v.Type().Key()) + + iter := v.MapRange() + for iter.Next() { + var encodedKey []byte + if iter.Key().Type().Kind() == reflect.String { + encodedKey = []byte(iter.Key().String()) + } else { + var err error + encodedKey, err = keyEncoder(iter.Key()) + if err != nil { + return nil, err + } + } + pairs = append(pairs, mapPair{key: encodedKey, value: iter.Value()}) + } + + // Ensure deterministic output + sort.Slice(pairs, func(i, j int) bool { + return bytes.Compare(pairs[i].key, pairs[j].key) < 0 + }) + + elementEncoder := e.typeEncoder(v.Type().Elem()) + for _, p := range pairs { + encodedValue, err := elementEncoder(p.value) + if err != nil { + return nil, err + } + if len(encodedValue) == 0 { + continue + } + json, err = sjson.SetRawBytes(json, string(p.key), encodedValue) + if err != nil { + return nil, err + } + } + + return json, nil +} + +func (e *encoder) newMapEncoder(t reflect.Type) encoderFunc { + return func(value reflect.Value) ([]byte, error) { + json := []byte("{}") + var err error + json, err = e.encodeMapEntries(json, value) + if err != nil { + return nil, err + } + return json, nil + } +} diff --git a/internal/apijson/field.go b/internal/apijson/field.go new file mode 100644 index 0000000..3ef207c --- /dev/null +++ b/internal/apijson/field.go @@ -0,0 +1,41 @@ +package apijson + +import "reflect" + +type status uint8 + +const ( + missing status = iota + null + invalid + valid +) + +type Field struct { + raw string + status status +} + +// Returns true if the field is explicitly `null` _or_ if it is not present at all (ie, missing). +// To check if the field's key is present in the JSON with an explicit null value, +// you must check `f.IsNull() && !f.IsMissing()`. +func (j Field) IsNull() bool { return j.status <= null } +func (j Field) IsMissing() bool { return j.status == missing } +func (j Field) IsInvalid() bool { return j.status == invalid } +func (j Field) Raw() string { return j.raw } + +func getSubField(root reflect.Value, index []int, name string) reflect.Value { + strct := root.FieldByIndex(index[:len(index)-1]) + if !strct.IsValid() { + panic("couldn't find encapsulating struct for field " + name) + } + meta := strct.FieldByName("JSON") + if !meta.IsValid() { + return reflect.Value{} + } + field := meta.FieldByName(name) + if !field.IsValid() { + return reflect.Value{} + } + return field +} diff --git a/internal/apijson/field_test.go b/internal/apijson/field_test.go new file mode 100644 index 0000000..a0f1c7d --- /dev/null +++ b/internal/apijson/field_test.go @@ -0,0 +1,66 @@ +package apijson + +import ( + "testing" + "time" + + "github.com/stainless-sdks/maestro-arch-rpc-go/internal/param" +) + +type Struct struct { + A string `json:"a"` + B int64 `json:"b"` +} + +type FieldStruct struct { + A param.Field[string] `json:"a"` + B param.Field[int64] `json:"b"` + C param.Field[Struct] `json:"c"` + D param.Field[time.Time] `json:"d" format:"date"` + E param.Field[time.Time] `json:"e" format:"date-time"` + F param.Field[int64] `json:"f"` +} + +func TestFieldMarshal(t *testing.T) { + tests := map[string]struct { + value interface{} + expected string + }{ + "null_string": {param.Field[string]{Present: true, Null: true}, "null"}, + "null_int": {param.Field[int]{Present: true, Null: true}, "null"}, + "null_int64": {param.Field[int64]{Present: true, Null: true}, "null"}, + "null_struct": {param.Field[Struct]{Present: true, Null: true}, "null"}, + + "string": {param.Field[string]{Present: true, Value: "string"}, `"string"`}, + "int": {param.Field[int]{Present: true, Value: 123}, "123"}, + "int64": {param.Field[int64]{Present: true, Value: int64(123456789123456789)}, "123456789123456789"}, + "struct": {param.Field[Struct]{Present: true, Value: Struct{A: "yo", B: 123}}, `{"a":"yo","b":123}`}, + + "string_raw": {param.Field[int]{Present: true, Raw: "string"}, `"string"`}, + "int_raw": {param.Field[int]{Present: true, Raw: 123}, "123"}, + "int64_raw": {param.Field[int]{Present: true, Raw: int64(123456789123456789)}, "123456789123456789"}, + "struct_raw": {param.Field[int]{Present: true, Raw: Struct{A: "yo", B: 123}}, `{"a":"yo","b":123}`}, + + "param_struct": { + FieldStruct{ + A: param.Field[string]{Present: true, Value: "hello"}, + B: param.Field[int64]{Present: true, Value: int64(12)}, + D: param.Field[time.Time]{Present: true, Value: time.Date(2023, time.March, 18, 14, 47, 38, 0, time.UTC)}, + E: param.Field[time.Time]{Present: true, Value: time.Date(2023, time.March, 18, 14, 47, 38, 0, time.UTC)}, + }, + `{"a":"hello","b":12,"d":"2023-03-18","e":"2023-03-18T14:47:38Z"}`, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + b, err := Marshal(test.value) + if err != nil { + t.Fatalf("didn't expect error %v", err) + } + if string(b) != test.expected { + t.Fatalf("expected %s, received %s", test.expected, string(b)) + } + }) + } +} diff --git a/internal/apijson/json_test.go b/internal/apijson/json_test.go new file mode 100644 index 0000000..85cd2b5 --- /dev/null +++ b/internal/apijson/json_test.go @@ -0,0 +1,616 @@ +package apijson + +import ( + "reflect" + "strings" + "testing" + "time" + + "github.com/tidwall/gjson" +) + +func P[T any](v T) *T { return &v } + +type Primitives struct { + A bool `json:"a"` + B int `json:"b"` + C uint `json:"c"` + D float64 `json:"d"` + E float32 `json:"e"` + F []int `json:"f"` +} + +type PrimitivePointers struct { + A *bool `json:"a"` + B *int `json:"b"` + C *uint `json:"c"` + D *float64 `json:"d"` + E *float32 `json:"e"` + F *[]int `json:"f"` +} + +type Slices struct { + Slice []Primitives `json:"slices"` +} + +type DateTime struct { + Date time.Time `json:"date" format:"date"` + DateTime time.Time `json:"date-time" format:"date-time"` +} + +type AdditionalProperties struct { + A bool `json:"a"` + ExtraFields map[string]interface{} `json:"-,extras"` +} + +type TypedAdditionalProperties struct { + A bool `json:"a"` + ExtraFields map[string]int `json:"-,extras"` +} + +type EmbeddedStruct struct { + A bool `json:"a"` + B string `json:"b"` + + JSON EmbeddedStructJSON +} + +type EmbeddedStructJSON struct { + A Field + B Field + ExtraFields map[string]Field + raw string +} + +type EmbeddedStructs struct { + EmbeddedStruct + A *int `json:"a"` + ExtraFields map[string]interface{} `json:"-,extras"` + + JSON EmbeddedStructsJSON +} + +type EmbeddedStructsJSON struct { + A Field + ExtraFields map[string]Field + raw string +} + +type Recursive struct { + Name string `json:"name"` + Child *Recursive `json:"child"` +} + +type JSONFieldStruct struct { + A bool `json:"a"` + B int64 `json:"b"` + C string `json:"c"` + D string `json:"d"` + ExtraFields map[string]int64 `json:"-,extras"` + JSON JSONFieldStructJSON `json:"-,metadata"` +} + +type JSONFieldStructJSON struct { + A Field + B Field + C Field + D Field + ExtraFields map[string]Field + raw string +} + +type UnknownStruct struct { + Unknown interface{} `json:"unknown"` +} + +type UnionStruct struct { + Union Union `json:"union" format:"date"` +} + +type Union interface { + union() +} + +type Inline struct { + InlineField Primitives `json:"-,inline"` + JSON InlineJSON `json:"-,metadata"` +} + +type InlineArray struct { + InlineField []string `json:"-,inline"` + JSON InlineJSON `json:"-,metadata"` +} + +type InlineJSON struct { + InlineField Field + raw string +} + +type UnionInteger int64 + +func (UnionInteger) union() {} + +type UnionStructA struct { + Type string `json:"type"` + A string `json:"a"` + B string `json:"b"` +} + +func (UnionStructA) union() {} + +type UnionStructB struct { + Type string `json:"type"` + A string `json:"a"` +} + +func (UnionStructB) union() {} + +type UnionTime time.Time + +func (UnionTime) union() {} + +func init() { + RegisterUnion(reflect.TypeOf((*Union)(nil)).Elem(), "type", + UnionVariant{ + TypeFilter: gjson.String, + Type: reflect.TypeOf(UnionTime{}), + }, + UnionVariant{ + TypeFilter: gjson.Number, + Type: reflect.TypeOf(UnionInteger(0)), + }, + UnionVariant{ + TypeFilter: gjson.JSON, + DiscriminatorValue: "typeA", + Type: reflect.TypeOf(UnionStructA{}), + }, + UnionVariant{ + TypeFilter: gjson.JSON, + DiscriminatorValue: "typeB", + Type: reflect.TypeOf(UnionStructB{}), + }, + ) +} + +type ComplexUnionStruct struct { + Union ComplexUnion `json:"union"` +} + +type ComplexUnion interface { + complexUnion() +} + +type ComplexUnionA struct { + Boo string `json:"boo"` + Foo bool `json:"foo"` +} + +func (ComplexUnionA) complexUnion() {} + +type ComplexUnionB struct { + Boo bool `json:"boo"` + Foo string `json:"foo"` +} + +func (ComplexUnionB) complexUnion() {} + +type ComplexUnionC struct { + Boo int64 `json:"boo"` +} + +func (ComplexUnionC) complexUnion() {} + +type ComplexUnionTypeA struct { + Baz int64 `json:"baz"` + Type TypeA `json:"type"` +} + +func (ComplexUnionTypeA) complexUnion() {} + +type TypeA string + +func (t TypeA) IsKnown() bool { + return t == "a" +} + +type ComplexUnionTypeB struct { + Baz int64 `json:"baz"` + Type TypeB `json:"type"` +} + +type TypeB string + +func (t TypeB) IsKnown() bool { + return t == "b" +} + +type UnmarshalStruct struct { + Foo string `json:"foo"` + prop bool `json:"-"` +} + +func (r *UnmarshalStruct) UnmarshalJSON(json []byte) error { + r.prop = true + return UnmarshalRoot(json, r) +} + +func (ComplexUnionTypeB) complexUnion() {} + +func init() { + RegisterUnion(reflect.TypeOf((*ComplexUnion)(nil)).Elem(), "", + UnionVariant{ + TypeFilter: gjson.JSON, + Type: reflect.TypeOf(ComplexUnionA{}), + }, + UnionVariant{ + TypeFilter: gjson.JSON, + Type: reflect.TypeOf(ComplexUnionB{}), + }, + UnionVariant{ + TypeFilter: gjson.JSON, + Type: reflect.TypeOf(ComplexUnionC{}), + }, + UnionVariant{ + TypeFilter: gjson.JSON, + Type: reflect.TypeOf(ComplexUnionTypeA{}), + }, + UnionVariant{ + TypeFilter: gjson.JSON, + Type: reflect.TypeOf(ComplexUnionTypeB{}), + }, + ) +} + +type MarshallingUnionStruct struct { + Union MarshallingUnion +} + +func (r *MarshallingUnionStruct) UnmarshalJSON(data []byte) (err error) { + *r = MarshallingUnionStruct{} + err = UnmarshalRoot(data, &r.Union) + return +} + +func (r MarshallingUnionStruct) MarshalJSON() (data []byte, err error) { + return MarshalRoot(r.Union) +} + +type MarshallingUnion interface { + marshallingUnion() +} + +type MarshallingUnionA struct { + Boo string `json:"boo"` +} + +func (MarshallingUnionA) marshallingUnion() {} + +func (r *MarshallingUnionA) UnmarshalJSON(data []byte) (err error) { + return UnmarshalRoot(data, r) +} + +type MarshallingUnionB struct { + Foo string `json:"foo"` +} + +func (MarshallingUnionB) marshallingUnion() {} + +func (r *MarshallingUnionB) UnmarshalJSON(data []byte) (err error) { + return UnmarshalRoot(data, r) +} + +func init() { + RegisterUnion( + reflect.TypeOf((*MarshallingUnion)(nil)).Elem(), + "", + UnionVariant{ + TypeFilter: gjson.JSON, + Type: reflect.TypeOf(MarshallingUnionA{}), + }, + UnionVariant{ + TypeFilter: gjson.JSON, + Type: reflect.TypeOf(MarshallingUnionB{}), + }, + ) +} + +var tests = map[string]struct { + buf string + val interface{} +}{ + "true": {"true", true}, + "false": {"false", false}, + "int": {"1", 1}, + "int_bigger": {"12324", 12324}, + "int_string_coerce": {`"65"`, 65}, + "int_boolean_coerce": {"true", 1}, + "int64": {"1", int64(1)}, + "int64_huge": {"123456789123456789", int64(123456789123456789)}, + "uint": {"1", uint(1)}, + "uint_bigger": {"12324", uint(12324)}, + "uint_coerce": {`"65"`, uint(65)}, + "float_1.54": {"1.54", float32(1.54)}, + "float_1.89": {"1.89", float64(1.89)}, + "string": {`"str"`, "str"}, + "string_int_coerce": {`12`, "12"}, + "array_string": {`["foo","bar"]`, []string{"foo", "bar"}}, + "array_int": {`[1,2]`, []int{1, 2}}, + "array_int_coerce": {`["1",2]`, []int{1, 2}}, + + "ptr_true": {"true", P(true)}, + "ptr_false": {"false", P(false)}, + "ptr_int": {"1", P(1)}, + "ptr_int_bigger": {"12324", P(12324)}, + "ptr_int_string_coerce": {`"65"`, P(65)}, + "ptr_int_boolean_coerce": {"true", P(1)}, + "ptr_int64": {"1", P(int64(1))}, + "ptr_int64_huge": {"123456789123456789", P(int64(123456789123456789))}, + "ptr_uint": {"1", P(uint(1))}, + "ptr_uint_bigger": {"12324", P(uint(12324))}, + "ptr_uint_coerce": {`"65"`, P(uint(65))}, + "ptr_float_1.54": {"1.54", P(float32(1.54))}, + "ptr_float_1.89": {"1.89", P(float64(1.89))}, + + "date_time": {`"2007-03-01T13:00:00Z"`, time.Date(2007, time.March, 1, 13, 0, 0, 0, time.UTC)}, + "date_time_nano_coerce": {`"2007-03-01T13:03:05.123456789Z"`, time.Date(2007, time.March, 1, 13, 3, 5, 123456789, time.UTC)}, + + "date_time_missing_t_coerce": {`"2007-03-01 13:03:05Z"`, time.Date(2007, time.March, 1, 13, 3, 5, 0, time.UTC)}, + "date_time_missing_timezone_coerce": {`"2007-03-01T13:03:05"`, time.Date(2007, time.March, 1, 13, 3, 5, 0, time.UTC)}, + // note: using -1200 to minimize probability of conflicting with the local timezone of the test runner + // see https://en.wikipedia.org/wiki/UTC%E2%88%9212:00 + "date_time_missing_timezone_colon_coerce": {`"2007-03-01T13:03:05-1200"`, time.Date(2007, time.March, 1, 13, 3, 5, 0, time.FixedZone("", -12*60*60))}, + "date_time_nano_missing_t_coerce": {`"2007-03-01 13:03:05.123456789Z"`, time.Date(2007, time.March, 1, 13, 3, 5, 123456789, time.UTC)}, + + "map_string": {`{"foo":"bar"}`, map[string]string{"foo": "bar"}}, + "map_interface": {`{"a":1,"b":"str","c":false}`, map[string]interface{}{"a": float64(1), "b": "str", "c": false}}, + + "primitive_struct": { + `{"a":false,"b":237628372683,"c":654,"d":9999.43,"e":43.76,"f":[1,2,3,4]}`, + Primitives{A: false, B: 237628372683, C: uint(654), D: 9999.43, E: 43.76, F: []int{1, 2, 3, 4}}, + }, + + "slices": { + `{"slices":[{"a":false,"b":237628372683,"c":654,"d":9999.43,"e":43.76,"f":[1,2,3,4]}]}`, + Slices{ + Slice: []Primitives{{A: false, B: 237628372683, C: uint(654), D: 9999.43, E: 43.76, F: []int{1, 2, 3, 4}}}, + }, + }, + + "primitive_pointer_struct": { + `{"a":false,"b":237628372683,"c":654,"d":9999.43,"e":43.76,"f":[1,2,3,4,5]}`, + PrimitivePointers{ + A: P(false), + B: P(237628372683), + C: P(uint(654)), + D: P(9999.43), + E: P(float32(43.76)), + F: &[]int{1, 2, 3, 4, 5}, + }, + }, + + "datetime_struct": { + `{"date":"2006-01-02","date-time":"2006-01-02T15:04:05Z"}`, + DateTime{ + Date: time.Date(2006, time.January, 2, 0, 0, 0, 0, time.UTC), + DateTime: time.Date(2006, time.January, 2, 15, 4, 5, 0, time.UTC), + }, + }, + + "additional_properties": { + `{"a":true,"bar":"value","foo":true}`, + AdditionalProperties{ + A: true, + ExtraFields: map[string]interface{}{ + "bar": "value", + "foo": true, + }, + }, + }, + + "embedded_struct": { + `{"a":1,"b":"bar"}`, + EmbeddedStructs{ + EmbeddedStruct: EmbeddedStruct{ + A: true, + B: "bar", + JSON: EmbeddedStructJSON{ + A: Field{raw: `1`, status: valid}, + B: Field{raw: `"bar"`, status: valid}, + raw: `{"a":1,"b":"bar"}`, + }, + }, + A: P(1), + ExtraFields: map[string]interface{}{"b": "bar"}, + JSON: EmbeddedStructsJSON{ + A: Field{raw: `1`, status: valid}, + ExtraFields: map[string]Field{ + "b": {raw: `"bar"`, status: valid}, + }, + raw: `{"a":1,"b":"bar"}`, + }, + }, + }, + + "recursive_struct": { + `{"child":{"name":"Alex"},"name":"Robert"}`, + Recursive{Name: "Robert", Child: &Recursive{Name: "Alex"}}, + }, + + "metadata_coerce": { + `{"a":"12","b":"12","c":null,"extra_typed":12,"extra_untyped":{"foo":"bar"}}`, + JSONFieldStruct{ + A: false, + B: 12, + C: "", + JSON: JSONFieldStructJSON{ + raw: `{"a":"12","b":"12","c":null,"extra_typed":12,"extra_untyped":{"foo":"bar"}}`, + A: Field{raw: `"12"`, status: invalid}, + B: Field{raw: `"12"`, status: valid}, + C: Field{raw: "null", status: null}, + D: Field{raw: "", status: missing}, + ExtraFields: map[string]Field{ + "extra_typed": { + raw: "12", + status: valid, + }, + "extra_untyped": { + raw: `{"foo":"bar"}`, + status: invalid, + }, + }, + }, + ExtraFields: map[string]int64{ + "extra_typed": 12, + "extra_untyped": 0, + }, + }, + }, + + "unknown_struct_number": { + `{"unknown":12}`, + UnknownStruct{ + Unknown: 12., + }, + }, + + "unknown_struct_map": { + `{"unknown":{"foo":"bar"}}`, + UnknownStruct{ + Unknown: map[string]interface{}{ + "foo": "bar", + }, + }, + }, + + "union_integer": { + `{"union":12}`, + UnionStruct{ + Union: UnionInteger(12), + }, + }, + + "union_struct_discriminated_a": { + `{"union":{"a":"foo","b":"bar","type":"typeA"}}`, + UnionStruct{ + Union: UnionStructA{ + Type: "typeA", + A: "foo", + B: "bar", + }, + }, + }, + + "union_struct_discriminated_b": { + `{"union":{"a":"foo","type":"typeB"}}`, + UnionStruct{ + Union: UnionStructB{ + Type: "typeB", + A: "foo", + }, + }, + }, + + "union_struct_time": { + `{"union":"2010-05-23"}`, + UnionStruct{ + Union: UnionTime(time.Date(2010, 05, 23, 0, 0, 0, 0, time.UTC)), + }, + }, + + "complex_union_a": { + `{"union":{"boo":"12","foo":true}}`, + ComplexUnionStruct{Union: ComplexUnionA{Boo: "12", Foo: true}}, + }, + + "complex_union_b": { + `{"union":{"boo":true,"foo":"12"}}`, + ComplexUnionStruct{Union: ComplexUnionB{Boo: true, Foo: "12"}}, + }, + + "complex_union_c": { + `{"union":{"boo":12}}`, + ComplexUnionStruct{Union: ComplexUnionC{Boo: 12}}, + }, + + "complex_union_type_a": { + `{"union":{"baz":12,"type":"a"}}`, + ComplexUnionStruct{Union: ComplexUnionTypeA{Baz: 12, Type: TypeA("a")}}, + }, + + "complex_union_type_b": { + `{"union":{"baz":12,"type":"b"}}`, + ComplexUnionStruct{Union: ComplexUnionTypeB{Baz: 12, Type: TypeB("b")}}, + }, + + "marshalling_union_a": { + `{"boo":"hello"}`, + MarshallingUnionStruct{Union: MarshallingUnionA{Boo: "hello"}}, + }, + "marshalling_union_b": { + `{"foo":"hi"}`, + MarshallingUnionStruct{Union: MarshallingUnionB{Foo: "hi"}}, + }, + + "unmarshal": { + `{"foo":"hello"}`, + &UnmarshalStruct{Foo: "hello", prop: true}, + }, + + "array_of_unmarshal": { + `[{"foo":"hello"}]`, + []UnmarshalStruct{{Foo: "hello", prop: true}}, + }, + + "inline_coerce": { + `{"a":false,"b":237628372683,"c":654,"d":9999.43,"e":43.76,"f":[1,2,3,4]}`, + Inline{ + InlineField: Primitives{A: false, B: 237628372683, C: 0x28e, D: 9999.43, E: 43.76, F: []int{1, 2, 3, 4}}, + JSON: InlineJSON{ + InlineField: Field{raw: "{\"a\":false,\"b\":237628372683,\"c\":654,\"d\":9999.43,\"e\":43.76,\"f\":[1,2,3,4]}", status: 3}, + raw: "{\"a\":false,\"b\":237628372683,\"c\":654,\"d\":9999.43,\"e\":43.76,\"f\":[1,2,3,4]}", + }, + }, + }, + + "inline_array_coerce": { + `["Hello","foo","bar"]`, + InlineArray{ + InlineField: []string{"Hello", "foo", "bar"}, + JSON: InlineJSON{ + InlineField: Field{raw: `["Hello","foo","bar"]`, status: 3}, + raw: `["Hello","foo","bar"]`, + }, + }, + }, +} + +func TestDecode(t *testing.T) { + for name, test := range tests { + t.Run(name, func(t *testing.T) { + result := reflect.New(reflect.TypeOf(test.val)) + if err := Unmarshal([]byte(test.buf), result.Interface()); err != nil { + t.Fatalf("deserialization of %v failed with error %v", result, err) + } + if !reflect.DeepEqual(result.Elem().Interface(), test.val) { + t.Fatalf("expected '%s' to deserialize to \n%#v\nbut got\n%#v", test.buf, test.val, result.Elem().Interface()) + } + }) + } +} + +func TestEncode(t *testing.T) { + for name, test := range tests { + if strings.HasSuffix(name, "_coerce") { + continue + } + t.Run(name, func(t *testing.T) { + raw, err := Marshal(test.val) + if err != nil { + t.Fatalf("serialization of %v failed with error %v", test.val, err) + } + if string(raw) != test.buf { + t.Fatalf("expected %+#v to serialize to %s but got %s", test.val, test.buf, string(raw)) + } + }) + } +} diff --git a/internal/apijson/port.go b/internal/apijson/port.go new file mode 100644 index 0000000..80b323b --- /dev/null +++ b/internal/apijson/port.go @@ -0,0 +1,107 @@ +package apijson + +import ( + "fmt" + "reflect" +) + +// Port copies over values from one struct to another struct. +func Port(from any, to any) error { + toVal := reflect.ValueOf(to) + fromVal := reflect.ValueOf(from) + + if toVal.Kind() != reflect.Ptr || toVal.IsNil() { + return fmt.Errorf("destination must be a non-nil pointer") + } + + for toVal.Kind() == reflect.Ptr { + toVal = toVal.Elem() + } + toType := toVal.Type() + + for fromVal.Kind() == reflect.Ptr { + fromVal = fromVal.Elem() + } + fromType := fromVal.Type() + + if toType.Kind() != reflect.Struct { + return fmt.Errorf("destination must be a non-nil pointer to a struct (%v %v)", toType, toType.Kind()) + } + + values := map[string]reflect.Value{} + fields := map[string]reflect.Value{} + + fromJSON := fromVal.FieldByName("JSON") + toJSON := toVal.FieldByName("JSON") + + // First, iterate through the from fields and load all the "normal" fields in the struct to the map of + // string to reflect.Value, as well as their raw .JSON.Foo counterpart. + for i := 0; i < fromType.NumField(); i++ { + field := fromType.Field(i) + ptag, ok := parseJSONStructTag(field) + if !ok { + continue + } + if ptag.name == "-" { + continue + } + values[ptag.name] = fromVal.Field(i) + if fromJSON.IsValid() { + fields[ptag.name] = fromJSON.FieldByName(field.Name) + } + } + + // Use the values from the previous step to populate the 'to' struct. + for i := 0; i < toType.NumField(); i++ { + field := toType.Field(i) + ptag, ok := parseJSONStructTag(field) + if !ok { + continue + } + if ptag.name == "-" { + continue + } + if value, ok := values[ptag.name]; ok { + delete(values, ptag.name) + if field.Type.Kind() == reflect.Interface { + toVal.Field(i).Set(value) + } else { + switch value.Kind() { + case reflect.String: + toVal.Field(i).SetString(value.String()) + case reflect.Bool: + toVal.Field(i).SetBool(value.Bool()) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + toVal.Field(i).SetInt(value.Int()) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + toVal.Field(i).SetUint(value.Uint()) + case reflect.Float32, reflect.Float64: + toVal.Field(i).SetFloat(value.Float()) + default: + toVal.Field(i).Set(value) + } + } + } + + if fromJSONField, ok := fields[ptag.name]; ok { + if toJSONField := toJSON.FieldByName(field.Name); toJSONField.IsValid() { + toJSONField.Set(fromJSONField) + } + } + } + + // Finally, copy over the .JSON.raw and .JSON.ExtraFields + if toJSON.IsValid() { + if raw := toJSON.FieldByName("raw"); raw.IsValid() { + setUnexportedField(raw, fromJSON.Interface().(interface{ RawJSON() string }).RawJSON()) + } + + if toExtraFields := toJSON.FieldByName("ExtraFields"); toExtraFields.IsValid() { + if fromExtraFields := fromJSON.FieldByName("ExtraFields"); fromExtraFields.IsValid() { + setUnexportedField(toExtraFields, fromExtraFields.Interface()) + } + } + } + + return nil +} diff --git a/internal/apijson/port_test.go b/internal/apijson/port_test.go new file mode 100644 index 0000000..f9b6e3f --- /dev/null +++ b/internal/apijson/port_test.go @@ -0,0 +1,178 @@ +package apijson + +import ( + "reflect" + "testing" +) + +type Metadata struct { + CreatedAt string `json:"created_at"` +} + +// Card is the "combined" type of CardVisa and CardMastercard +type Card struct { + Processor CardProcessor `json:"processor"` + Data any `json:"data"` + IsFoo bool `json:"is_foo"` + IsBar bool `json:"is_bar"` + Metadata Metadata `json:"metadata"` + Value interface{} `json:"value"` + + JSON cardJSON +} + +type cardJSON struct { + Processor Field + Data Field + IsFoo Field + IsBar Field + Metadata Field + Value Field + ExtraFields map[string]Field + raw string +} + +func (r cardJSON) RawJSON() string { return r.raw } + +type CardProcessor string + +// CardVisa +type CardVisa struct { + Processor CardVisaProcessor `json:"processor"` + Data CardVisaData `json:"data"` + IsFoo bool `json:"is_foo"` + Metadata Metadata `json:"metadata"` + Value string `json:"value"` + + JSON cardVisaJSON +} + +type cardVisaJSON struct { + Processor Field + Data Field + IsFoo Field + Metadata Field + Value Field + ExtraFields map[string]Field + raw string +} + +func (r cardVisaJSON) RawJSON() string { return r.raw } + +type CardVisaProcessor string + +type CardVisaData struct { + Foo string `json:"foo"` +} + +// CardMastercard +type CardMastercard struct { + Processor CardMastercardProcessor `json:"processor"` + Data CardMastercardData `json:"data"` + IsBar bool `json:"is_bar"` + Metadata Metadata `json:"metadata"` + Value bool `json:"value"` + + JSON cardMastercardJSON +} + +type cardMastercardJSON struct { + Processor Field + Data Field + IsBar Field + Metadata Field + Value Field + ExtraFields map[string]Field + raw string +} + +func (r cardMastercardJSON) RawJSON() string { return r.raw } + +type CardMastercardProcessor string + +type CardMastercardData struct { + Bar int64 `json:"bar"` +} + +var portTests = map[string]struct { + from any + to any +}{ + "visa to card": { + CardVisa{ + Processor: "visa", + IsFoo: true, + Data: CardVisaData{ + Foo: "foo", + }, + Metadata: Metadata{ + CreatedAt: "Mar 29 2024", + }, + Value: "value", + JSON: cardVisaJSON{ + raw: `{"processor":"visa","is_foo":true,"data":{"foo":"foo"}}`, + Processor: Field{raw: `"visa"`, status: valid}, + IsFoo: Field{raw: `true`, status: valid}, + Data: Field{raw: `{"foo":"foo"}`, status: valid}, + Value: Field{raw: `"value"`, status: valid}, + ExtraFields: map[string]Field{"extra": {raw: `"yo"`, status: valid}}, + }, + }, + Card{ + Processor: "visa", + IsFoo: true, + IsBar: false, + Data: CardVisaData{ + Foo: "foo", + }, + Metadata: Metadata{ + CreatedAt: "Mar 29 2024", + }, + Value: "value", + JSON: cardJSON{ + raw: `{"processor":"visa","is_foo":true,"data":{"foo":"foo"}}`, + Processor: Field{raw: `"visa"`, status: valid}, + IsFoo: Field{raw: `true`, status: valid}, + Data: Field{raw: `{"foo":"foo"}`, status: valid}, + Value: Field{raw: `"value"`, status: valid}, + ExtraFields: map[string]Field{"extra": {raw: `"yo"`, status: valid}}, + }, + }, + }, + "mastercard to card": { + CardMastercard{ + Processor: "mastercard", + IsBar: true, + Data: CardMastercardData{ + Bar: 13, + }, + Value: false, + }, + Card{ + Processor: "mastercard", + IsFoo: false, + IsBar: true, + Data: CardMastercardData{ + Bar: 13, + }, + Value: false, + }, + }, +} + +func TestPort(t *testing.T) { + for name, test := range portTests { + t.Run(name, func(t *testing.T) { + toVal := reflect.New(reflect.TypeOf(test.to)) + + err := Port(test.from, toVal.Interface()) + if err != nil { + t.Fatalf("port of %v failed with error %v", test.from, err) + } + + if !reflect.DeepEqual(toVal.Elem().Interface(), test.to) { + t.Fatalf("expected:\n%+#v\n\nto port to:\n%+#v\n\nbut got:\n%+#v", test.from, test.to, toVal.Elem().Interface()) + } + }) + } +} diff --git a/internal/apijson/registry.go b/internal/apijson/registry.go new file mode 100644 index 0000000..2ea00ae --- /dev/null +++ b/internal/apijson/registry.go @@ -0,0 +1,31 @@ +package apijson + +import ( + "reflect" + + "github.com/tidwall/gjson" +) + +type UnionVariant struct { + TypeFilter gjson.Type + DiscriminatorValue interface{} + Type reflect.Type +} + +var unionRegistry = map[reflect.Type]unionEntry{} +var unionVariants = map[reflect.Type]interface{}{} + +type unionEntry struct { + discriminatorKey string + variants []UnionVariant +} + +func RegisterUnion(typ reflect.Type, discriminator string, variants ...UnionVariant) { + unionRegistry[typ] = unionEntry{ + discriminatorKey: discriminator, + variants: variants, + } + for _, variant := range variants { + unionVariants[variant.Type] = typ + } +} diff --git a/internal/apijson/tag.go b/internal/apijson/tag.go new file mode 100644 index 0000000..812fb3c --- /dev/null +++ b/internal/apijson/tag.go @@ -0,0 +1,47 @@ +package apijson + +import ( + "reflect" + "strings" +) + +const jsonStructTag = "json" +const formatStructTag = "format" + +type parsedStructTag struct { + name string + required bool + extras bool + metadata bool + inline bool +} + +func parseJSONStructTag(field reflect.StructField) (tag parsedStructTag, ok bool) { + raw, ok := field.Tag.Lookup(jsonStructTag) + if !ok { + return + } + parts := strings.Split(raw, ",") + if len(parts) == 0 { + return tag, false + } + tag.name = parts[0] + for _, part := range parts[1:] { + switch part { + case "required": + tag.required = true + case "extras": + tag.extras = true + case "metadata": + tag.metadata = true + case "inline": + tag.inline = true + } + } + return +} + +func parseFormatStructTag(field reflect.StructField) (format string, ok bool) { + format, ok = field.Tag.Lookup(formatStructTag) + return +} diff --git a/internal/apiquery/encoder.go b/internal/apiquery/encoder.go new file mode 100644 index 0000000..e3287bd --- /dev/null +++ b/internal/apiquery/encoder.go @@ -0,0 +1,341 @@ +package apiquery + +import ( + "encoding/json" + "fmt" + "reflect" + "strconv" + "strings" + "sync" + "time" + + "github.com/stainless-sdks/maestro-arch-rpc-go/internal/param" +) + +var encoders sync.Map // map[reflect.Type]encoderFunc + +type encoder struct { + dateFormat string + root bool + settings QuerySettings +} + +type encoderFunc func(key string, value reflect.Value) []Pair + +type encoderField struct { + tag parsedStructTag + fn encoderFunc + idx []int +} + +type encoderEntry struct { + reflect.Type + dateFormat string + root bool + settings QuerySettings +} + +type Pair struct { + key string + value string +} + +func (e *encoder) typeEncoder(t reflect.Type) encoderFunc { + entry := encoderEntry{ + Type: t, + dateFormat: e.dateFormat, + root: e.root, + settings: e.settings, + } + + if fi, ok := encoders.Load(entry); ok { + return fi.(encoderFunc) + } + + // To deal with recursive types, populate the map with an + // indirect func before we build it. This type waits on the + // real func (f) to be ready and then calls it. This indirect + // func is only used for recursive types. + var ( + wg sync.WaitGroup + f encoderFunc + ) + wg.Add(1) + fi, loaded := encoders.LoadOrStore(entry, encoderFunc(func(key string, v reflect.Value) []Pair { + wg.Wait() + return f(key, v) + })) + if loaded { + return fi.(encoderFunc) + } + + // Compute the real encoder and replace the indirect func with it. + f = e.newTypeEncoder(t) + wg.Done() + encoders.Store(entry, f) + return f +} + +func marshalerEncoder(key string, value reflect.Value) []Pair { + s, _ := value.Interface().(json.Marshaler).MarshalJSON() + return []Pair{{key, string(s)}} +} + +func (e *encoder) newTypeEncoder(t reflect.Type) encoderFunc { + if t.ConvertibleTo(reflect.TypeOf(time.Time{})) { + return e.newTimeTypeEncoder(t) + } + if !e.root && t.Implements(reflect.TypeOf((*json.Marshaler)(nil)).Elem()) { + return marshalerEncoder + } + e.root = false + switch t.Kind() { + case reflect.Pointer: + encoder := e.typeEncoder(t.Elem()) + return func(key string, value reflect.Value) (pairs []Pair) { + if !value.IsValid() || value.IsNil() { + return + } + pairs = encoder(key, value.Elem()) + return + } + case reflect.Struct: + return e.newStructTypeEncoder(t) + case reflect.Array: + fallthrough + case reflect.Slice: + return e.newArrayTypeEncoder(t) + case reflect.Map: + return e.newMapEncoder(t) + case reflect.Interface: + return e.newInterfaceEncoder() + default: + return e.newPrimitiveTypeEncoder(t) + } +} + +func (e *encoder) newStructTypeEncoder(t reflect.Type) encoderFunc { + if t.Implements(reflect.TypeOf((*param.FieldLike)(nil)).Elem()) { + return e.newFieldTypeEncoder(t) + } + + encoderFields := []encoderField{} + + // This helper allows us to recursively collect field encoders into a flat + // array. The parameter `index` keeps track of the access patterns necessary + // to get to some field. + var collectEncoderFields func(r reflect.Type, index []int) + collectEncoderFields = func(r reflect.Type, index []int) { + for i := 0; i < r.NumField(); i++ { + idx := append(index, i) + field := t.FieldByIndex(idx) + if !field.IsExported() { + continue + } + // If this is an embedded struct, traverse one level deeper to extract + // the field and get their encoders as well. + if field.Anonymous { + collectEncoderFields(field.Type, idx) + continue + } + // If query tag is not present, then we skip, which is intentionally + // different behavior from the stdlib. + ptag, ok := parseQueryStructTag(field) + if !ok { + continue + } + + if ptag.name == "-" && !ptag.inline { + continue + } + + dateFormat, ok := parseFormatStructTag(field) + oldFormat := e.dateFormat + if ok { + switch dateFormat { + case "date-time": + e.dateFormat = time.RFC3339 + case "date": + e.dateFormat = "2006-01-02" + } + } + encoderFields = append(encoderFields, encoderField{ptag, e.typeEncoder(field.Type), idx}) + e.dateFormat = oldFormat + } + } + collectEncoderFields(t, []int{}) + + return func(key string, value reflect.Value) (pairs []Pair) { + for _, ef := range encoderFields { + var subkey string = e.renderKeyPath(key, ef.tag.name) + if ef.tag.inline { + subkey = key + } + + field := value.FieldByIndex(ef.idx) + pairs = append(pairs, ef.fn(subkey, field)...) + } + return + } +} + +func (e *encoder) newMapEncoder(t reflect.Type) encoderFunc { + keyEncoder := e.typeEncoder(t.Key()) + elementEncoder := e.typeEncoder(t.Elem()) + return func(key string, value reflect.Value) (pairs []Pair) { + iter := value.MapRange() + for iter.Next() { + encodedKey := keyEncoder("", iter.Key()) + if len(encodedKey) != 1 { + panic("Unexpected number of parts for encoded map key. Are you using a non-primitive for this map?") + } + subkey := encodedKey[0].value + keyPath := e.renderKeyPath(key, subkey) + pairs = append(pairs, elementEncoder(keyPath, iter.Value())...) + } + return + } +} + +func (e *encoder) renderKeyPath(key string, subkey string) string { + if len(key) == 0 { + return subkey + } + if e.settings.NestedFormat == NestedQueryFormatDots { + return fmt.Sprintf("%s.%s", key, subkey) + } + return fmt.Sprintf("%s[%s]", key, subkey) +} + +func (e *encoder) newArrayTypeEncoder(t reflect.Type) encoderFunc { + switch e.settings.ArrayFormat { + case ArrayQueryFormatComma: + innerEncoder := e.typeEncoder(t.Elem()) + return func(key string, v reflect.Value) []Pair { + elements := []string{} + for i := 0; i < v.Len(); i++ { + for _, pair := range innerEncoder("", v.Index(i)) { + elements = append(elements, pair.value) + } + } + if len(elements) == 0 { + return []Pair{} + } + return []Pair{{key, strings.Join(elements, ",")}} + } + case ArrayQueryFormatRepeat: + innerEncoder := e.typeEncoder(t.Elem()) + return func(key string, value reflect.Value) (pairs []Pair) { + for i := 0; i < value.Len(); i++ { + pairs = append(pairs, innerEncoder(key, value.Index(i))...) + } + return pairs + } + case ArrayQueryFormatIndices: + panic("The array indices format is not supported yet") + case ArrayQueryFormatBrackets: + innerEncoder := e.typeEncoder(t.Elem()) + return func(key string, value reflect.Value) []Pair { + pairs := []Pair{} + for i := 0; i < value.Len(); i++ { + pairs = append(pairs, innerEncoder(key+"[]", value.Index(i))...) + } + return pairs + } + default: + panic(fmt.Sprintf("Unknown ArrayFormat value: %d", e.settings.ArrayFormat)) + } +} + +func (e *encoder) newPrimitiveTypeEncoder(t reflect.Type) encoderFunc { + switch t.Kind() { + case reflect.Pointer: + inner := t.Elem() + + innerEncoder := e.newPrimitiveTypeEncoder(inner) + return func(key string, v reflect.Value) []Pair { + if !v.IsValid() || v.IsNil() { + return nil + } + return innerEncoder(key, v.Elem()) + } + case reflect.String: + return func(key string, v reflect.Value) []Pair { + return []Pair{{key, v.String()}} + } + case reflect.Bool: + return func(key string, v reflect.Value) []Pair { + if v.Bool() { + return []Pair{{key, "true"}} + } + return []Pair{{key, "false"}} + } + case reflect.Int, reflect.Int16, reflect.Int32, reflect.Int64: + return func(key string, v reflect.Value) []Pair { + return []Pair{{key, strconv.FormatInt(v.Int(), 10)}} + } + case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return func(key string, v reflect.Value) []Pair { + return []Pair{{key, strconv.FormatUint(v.Uint(), 10)}} + } + case reflect.Float32, reflect.Float64: + return func(key string, v reflect.Value) []Pair { + return []Pair{{key, strconv.FormatFloat(v.Float(), 'f', -1, 64)}} + } + case reflect.Complex64, reflect.Complex128: + bitSize := 64 + if t.Kind() == reflect.Complex128 { + bitSize = 128 + } + return func(key string, v reflect.Value) []Pair { + return []Pair{{key, strconv.FormatComplex(v.Complex(), 'f', -1, bitSize)}} + } + default: + return func(key string, v reflect.Value) []Pair { + return nil + } + } +} + +func (e *encoder) newFieldTypeEncoder(t reflect.Type) encoderFunc { + f, _ := t.FieldByName("Value") + enc := e.typeEncoder(f.Type) + + return func(key string, value reflect.Value) []Pair { + present := value.FieldByName("Present") + if !present.Bool() { + return nil + } + null := value.FieldByName("Null") + if null.Bool() { + // TODO: Error? + return nil + } + raw := value.FieldByName("Raw") + if !raw.IsNil() { + return e.typeEncoder(raw.Type())(key, raw) + } + return enc(key, value.FieldByName("Value")) + } +} + +func (e *encoder) newTimeTypeEncoder(t reflect.Type) encoderFunc { + format := e.dateFormat + return func(key string, value reflect.Value) []Pair { + return []Pair{{ + key, + value.Convert(reflect.TypeOf(time.Time{})).Interface().(time.Time).Format(format), + }} + } +} + +func (e encoder) newInterfaceEncoder() encoderFunc { + return func(key string, value reflect.Value) []Pair { + value = value.Elem() + if !value.IsValid() { + return nil + } + return e.typeEncoder(value.Type())(key, value) + } + +} diff --git a/internal/apiquery/query.go b/internal/apiquery/query.go new file mode 100644 index 0000000..6f90e99 --- /dev/null +++ b/internal/apiquery/query.go @@ -0,0 +1,50 @@ +package apiquery + +import ( + "net/url" + "reflect" + "time" +) + +func MarshalWithSettings(value interface{}, settings QuerySettings) url.Values { + e := encoder{time.RFC3339, true, settings} + kv := url.Values{} + val := reflect.ValueOf(value) + if !val.IsValid() { + return nil + } + typ := val.Type() + for _, pair := range e.typeEncoder(typ)("", val) { + kv.Add(pair.key, pair.value) + } + return kv +} + +func Marshal(value interface{}) url.Values { + return MarshalWithSettings(value, QuerySettings{}) +} + +type Queryer interface { + URLQuery() url.Values +} + +type QuerySettings struct { + NestedFormat NestedQueryFormat + ArrayFormat ArrayQueryFormat +} + +type NestedQueryFormat int + +const ( + NestedQueryFormatBrackets NestedQueryFormat = iota + NestedQueryFormatDots +) + +type ArrayQueryFormat int + +const ( + ArrayQueryFormatComma ArrayQueryFormat = iota + ArrayQueryFormatRepeat + ArrayQueryFormatIndices + ArrayQueryFormatBrackets +) diff --git a/internal/apiquery/query_test.go b/internal/apiquery/query_test.go new file mode 100644 index 0000000..1e740d6 --- /dev/null +++ b/internal/apiquery/query_test.go @@ -0,0 +1,335 @@ +package apiquery + +import ( + "net/url" + "testing" + "time" +) + +func P[T any](v T) *T { return &v } + +type Primitives struct { + A bool `query:"a"` + B int `query:"b"` + C uint `query:"c"` + D float64 `query:"d"` + E float32 `query:"e"` + F []int `query:"f"` +} + +type PrimitivePointers struct { + A *bool `query:"a"` + B *int `query:"b"` + C *uint `query:"c"` + D *float64 `query:"d"` + E *float32 `query:"e"` + F *[]int `query:"f"` +} + +type Slices struct { + Slice []Primitives `query:"slices"` + Mixed []interface{} `query:"mixed"` +} + +type DateTime struct { + Date time.Time `query:"date" format:"date"` + DateTime time.Time `query:"date-time" format:"date-time"` +} + +type AdditionalProperties struct { + A bool `query:"a"` + Extras map[string]interface{} `query:"-,inline"` +} + +type Recursive struct { + Name string `query:"name"` + Child *Recursive `query:"child"` +} + +type UnknownStruct struct { + Unknown interface{} `query:"unknown"` +} + +type UnionStruct struct { + Union Union `query:"union" format:"date"` +} + +type Union interface { + union() +} + +type UnionInteger int64 + +func (UnionInteger) union() {} + +type UnionString string + +func (UnionString) union() {} + +type UnionStructA struct { + Type string `query:"type"` + A string `query:"a"` + B string `query:"b"` +} + +func (UnionStructA) union() {} + +type UnionStructB struct { + Type string `query:"type"` + A string `query:"a"` +} + +func (UnionStructB) union() {} + +type UnionTime time.Time + +func (UnionTime) union() {} + +type DeeplyNested struct { + A DeeplyNested1 `query:"a"` +} + +type DeeplyNested1 struct { + B DeeplyNested2 `query:"b"` +} + +type DeeplyNested2 struct { + C DeeplyNested3 `query:"c"` +} + +type DeeplyNested3 struct { + D *string `query:"d"` +} + +var tests = map[string]struct { + enc string + val interface{} + settings QuerySettings +}{ + "primitives": { + "a=false&b=237628372683&c=654&d=9999.43&e=43.7599983215332&f=1,2,3,4", + Primitives{A: false, B: 237628372683, C: uint(654), D: 9999.43, E: 43.76, F: []int{1, 2, 3, 4}}, + QuerySettings{}, + }, + + "slices_brackets": { + `mixed[]=1&mixed[]=2.3&mixed[]=hello&slices[][a]=false&slices[][a]=false&slices[][b]=237628372683&slices[][b]=237628372683&slices[][c]=654&slices[][c]=654&slices[][d]=9999.43&slices[][d]=9999.43&slices[][e]=43.7599983215332&slices[][e]=43.7599983215332&slices[][f][]=1&slices[][f][]=2&slices[][f][]=3&slices[][f][]=4&slices[][f][]=1&slices[][f][]=2&slices[][f][]=3&slices[][f][]=4`, + Slices{ + Slice: []Primitives{ + {A: false, B: 237628372683, C: uint(654), D: 9999.43, E: 43.76, F: []int{1, 2, 3, 4}}, + {A: false, B: 237628372683, C: uint(654), D: 9999.43, E: 43.76, F: []int{1, 2, 3, 4}}, + }, + Mixed: []interface{}{1, 2.3, "hello"}, + }, + QuerySettings{ArrayFormat: ArrayQueryFormatBrackets}, + }, + + "slices_comma": { + `mixed=1,2.3,hello`, + Slices{ + Mixed: []interface{}{1, 2.3, "hello"}, + }, + QuerySettings{ArrayFormat: ArrayQueryFormatComma}, + }, + + "slices_repeat": { + `mixed=1&mixed=2.3&mixed=hello&slices[a]=false&slices[a]=false&slices[b]=237628372683&slices[b]=237628372683&slices[c]=654&slices[c]=654&slices[d]=9999.43&slices[d]=9999.43&slices[e]=43.7599983215332&slices[e]=43.7599983215332&slices[f]=1&slices[f]=2&slices[f]=3&slices[f]=4&slices[f]=1&slices[f]=2&slices[f]=3&slices[f]=4`, + Slices{ + Slice: []Primitives{ + {A: false, B: 237628372683, C: uint(654), D: 9999.43, E: 43.76, F: []int{1, 2, 3, 4}}, + {A: false, B: 237628372683, C: uint(654), D: 9999.43, E: 43.76, F: []int{1, 2, 3, 4}}, + }, + Mixed: []interface{}{1, 2.3, "hello"}, + }, + QuerySettings{ArrayFormat: ArrayQueryFormatRepeat}, + }, + + "primitive_pointer_struct": { + "a=false&b=237628372683&c=654&d=9999.43&e=43.7599983215332&f=1,2,3,4,5", + PrimitivePointers{ + A: P(false), + B: P(237628372683), + C: P(uint(654)), + D: P(9999.43), + E: P(float32(43.76)), + F: &[]int{1, 2, 3, 4, 5}, + }, + QuerySettings{}, + }, + + "datetime_struct": { + `date=2006-01-02&date-time=2006-01-02T15:04:05Z`, + DateTime{ + Date: time.Date(2006, time.January, 2, 0, 0, 0, 0, time.UTC), + DateTime: time.Date(2006, time.January, 2, 15, 4, 5, 0, time.UTC), + }, + QuerySettings{}, + }, + + "additional_properties": { + `a=true&bar=value&foo=true`, + AdditionalProperties{ + A: true, + Extras: map[string]interface{}{ + "bar": "value", + "foo": true, + }, + }, + QuerySettings{}, + }, + + "recursive_struct_brackets": { + `child[name]=Alex&name=Robert`, + Recursive{Name: "Robert", Child: &Recursive{Name: "Alex"}}, + QuerySettings{NestedFormat: NestedQueryFormatBrackets}, + }, + + "recursive_struct_dots": { + `child.name=Alex&name=Robert`, + Recursive{Name: "Robert", Child: &Recursive{Name: "Alex"}}, + QuerySettings{NestedFormat: NestedQueryFormatDots}, + }, + + "unknown_struct_number": { + `unknown=12`, + UnknownStruct{ + Unknown: 12., + }, + QuerySettings{}, + }, + + "unknown_struct_map_brackets": { + `unknown[foo]=bar`, + UnknownStruct{ + Unknown: map[string]interface{}{ + "foo": "bar", + }, + }, + QuerySettings{NestedFormat: NestedQueryFormatBrackets}, + }, + + "unknown_struct_map_dots": { + `unknown.foo=bar`, + UnknownStruct{ + Unknown: map[string]interface{}{ + "foo": "bar", + }, + }, + QuerySettings{NestedFormat: NestedQueryFormatDots}, + }, + + "union_string": { + `union=hello`, + UnionStruct{ + Union: UnionString("hello"), + }, + QuerySettings{}, + }, + + "union_integer": { + `union=12`, + UnionStruct{ + Union: UnionInteger(12), + }, + QuerySettings{}, + }, + + "union_struct_discriminated_a": { + `union[a]=foo&union[b]=bar&union[type]=typeA`, + UnionStruct{ + Union: UnionStructA{ + Type: "typeA", + A: "foo", + B: "bar", + }, + }, + QuerySettings{}, + }, + + "union_struct_discriminated_b": { + `union[a]=foo&union[type]=typeB`, + UnionStruct{ + Union: UnionStructB{ + Type: "typeB", + A: "foo", + }, + }, + QuerySettings{}, + }, + + "union_struct_time": { + `union=2010-05-23`, + UnionStruct{ + Union: UnionTime(time.Date(2010, 05, 23, 0, 0, 0, 0, time.UTC)), + }, + QuerySettings{}, + }, + + "deeply_nested_brackets": { + `a[b][c][d]=hello`, + DeeplyNested{ + A: DeeplyNested1{ + B: DeeplyNested2{ + C: DeeplyNested3{ + D: P("hello"), + }, + }, + }, + }, + QuerySettings{NestedFormat: NestedQueryFormatBrackets}, + }, + + "deeply_nested_dots": { + `a.b.c.d=hello`, + DeeplyNested{ + A: DeeplyNested1{ + B: DeeplyNested2{ + C: DeeplyNested3{ + D: P("hello"), + }, + }, + }, + }, + QuerySettings{NestedFormat: NestedQueryFormatDots}, + }, + + "deeply_nested_brackets_empty": { + ``, + DeeplyNested{ + A: DeeplyNested1{ + B: DeeplyNested2{ + C: DeeplyNested3{ + D: nil, + }, + }, + }, + }, + QuerySettings{NestedFormat: NestedQueryFormatBrackets}, + }, + + "deeply_nested_dots_empty": { + ``, + DeeplyNested{ + A: DeeplyNested1{ + B: DeeplyNested2{ + C: DeeplyNested3{ + D: nil, + }, + }, + }, + }, + QuerySettings{NestedFormat: NestedQueryFormatDots}, + }, +} + +func TestEncode(t *testing.T) { + for name, test := range tests { + t.Run(name, func(t *testing.T) { + values := MarshalWithSettings(test.val, test.settings) + str, _ := url.QueryUnescape(values.Encode()) + if str != test.enc { + t.Fatalf("expected %+#v to serialize to %s but got %s", test.val, test.enc, str) + } + }) + } +} diff --git a/internal/apiquery/tag.go b/internal/apiquery/tag.go new file mode 100644 index 0000000..7ccd739 --- /dev/null +++ b/internal/apiquery/tag.go @@ -0,0 +1,41 @@ +package apiquery + +import ( + "reflect" + "strings" +) + +const queryStructTag = "query" +const formatStructTag = "format" + +type parsedStructTag struct { + name string + omitempty bool + inline bool +} + +func parseQueryStructTag(field reflect.StructField) (tag parsedStructTag, ok bool) { + raw, ok := field.Tag.Lookup(queryStructTag) + if !ok { + return + } + parts := strings.Split(raw, ",") + if len(parts) == 0 { + return tag, false + } + tag.name = parts[0] + for _, part := range parts[1:] { + switch part { + case "omitempty": + tag.omitempty = true + case "inline": + tag.inline = true + } + } + return +} + +func parseFormatStructTag(field reflect.StructField) (format string, ok bool) { + format, ok = field.Tag.Lookup(formatStructTag) + return +} diff --git a/internal/param/field.go b/internal/param/field.go new file mode 100644 index 0000000..4d0fd9c --- /dev/null +++ b/internal/param/field.go @@ -0,0 +1,29 @@ +package param + +import ( + "fmt" +) + +type FieldLike interface{ field() } + +// Field is a wrapper used for all values sent to the API, +// to distinguish zero values from null or omitted fields. +// +// It also allows sending arbitrary deserializable values. +// +// To instantiate a Field, use the helpers exported from +// the package root: `F()`, `Null()`, `Raw()`, etc. +type Field[T any] struct { + FieldLike + Value T + Null bool + Present bool + Raw any +} + +func (f Field[T]) String() string { + if s, ok := any(f.Value).(fmt.Stringer); ok { + return s.String() + } + return fmt.Sprintf("%v", f.Value) +} diff --git a/internal/requestconfig/requestconfig.go b/internal/requestconfig/requestconfig.go new file mode 100644 index 0000000..d92dfb3 --- /dev/null +++ b/internal/requestconfig/requestconfig.go @@ -0,0 +1,496 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +package requestconfig + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "math" + "math/rand" + "net/http" + "net/url" + "runtime" + "strconv" + "strings" + "time" + + "github.com/stainless-sdks/maestro-arch-rpc-go/internal" + "github.com/stainless-sdks/maestro-arch-rpc-go/internal/apierror" + "github.com/stainless-sdks/maestro-arch-rpc-go/internal/apiform" + "github.com/stainless-sdks/maestro-arch-rpc-go/internal/apiquery" +) + +func getDefaultHeaders() map[string]string { + return map[string]string{ + "User-Agent": fmt.Sprintf("MaestroArchRpc/Go %s", internal.PackageVersion), + } +} + +func getNormalizedOS() string { + switch runtime.GOOS { + case "ios": + return "iOS" + case "android": + return "Android" + case "darwin": + return "MacOS" + case "window": + return "Windows" + case "freebsd": + return "FreeBSD" + case "openbsd": + return "OpenBSD" + case "linux": + return "Linux" + default: + return fmt.Sprintf("Other:%s", runtime.GOOS) + } +} + +func getNormalizedArchitecture() string { + switch runtime.GOARCH { + case "386": + return "x32" + case "amd64": + return "x64" + case "arm": + return "arm" + case "arm64": + return "arm64" + default: + return fmt.Sprintf("other:%s", runtime.GOARCH) + } +} + +func getPlatformProperties() map[string]string { + return map[string]string{ + "X-Stainless-Lang": "go", + "X-Stainless-Package-Version": internal.PackageVersion, + "X-Stainless-OS": getNormalizedOS(), + "X-Stainless-Arch": getNormalizedArchitecture(), + "X-Stainless-Runtime": "go", + "X-Stainless-Runtime-Version": runtime.Version(), + } +} + +func NewRequestConfig(ctx context.Context, method string, u string, body interface{}, dst interface{}, opts ...func(*RequestConfig) error) (*RequestConfig, error) { + var reader io.Reader + + contentType := "application/json" + hasSerializationFunc := false + + if body, ok := body.(json.Marshaler); ok { + content, err := body.MarshalJSON() + if err != nil { + return nil, err + } + reader = bytes.NewBuffer(content) + hasSerializationFunc = true + } + if body, ok := body.(apiform.Marshaler); ok { + var ( + content []byte + err error + ) + content, contentType, err = body.MarshalMultipart() + if err != nil { + return nil, err + } + reader = bytes.NewBuffer(content) + hasSerializationFunc = true + } + if body, ok := body.(apiquery.Queryer); ok { + hasSerializationFunc = true + params := body.URLQuery().Encode() + if params != "" { + u = u + "?" + params + } + } + if body, ok := body.([]byte); ok { + reader = bytes.NewBuffer(body) + hasSerializationFunc = true + } + if body, ok := body.(io.Reader); ok { + reader = body + hasSerializationFunc = true + } + + // Fallback to json serialization if none of the serialization functions that we expect + // to see is present. + if body != nil && !hasSerializationFunc { + content, err := json.Marshal(body) + if err != nil { + return nil, err + } + reader = bytes.NewBuffer(content) + } + + req, err := http.NewRequestWithContext(ctx, method, u, nil) + if err != nil { + return nil, err + } + if reader != nil { + req.Header.Set("Content-Type", contentType) + } + + req.Header.Set("Accept", "application/json") + req.Header.Set("X-Stainless-Retry-Count", "0") + for k, v := range getDefaultHeaders() { + req.Header.Add(k, v) + } + + for k, v := range getPlatformProperties() { + req.Header.Add(k, v) + } + cfg := RequestConfig{ + MaxRetries: 2, + Context: ctx, + Request: req, + HTTPClient: http.DefaultClient, + Body: reader, + } + cfg.ResponseBodyInto = dst + err = cfg.Apply(opts...) + if err != nil { + return nil, err + } + return &cfg, nil +} + +// RequestConfig represents all the state related to one request. +// +// Editing the variables inside RequestConfig directly is unstable api. Prefer +// composing func(\*RequestConfig) error instead if possible. +type RequestConfig struct { + MaxRetries int + RequestTimeout time.Duration + Context context.Context + Request *http.Request + BaseURL *url.URL + HTTPClient *http.Client + Middlewares []middleware + // If ResponseBodyInto not nil, then we will attempt to deserialize into + // ResponseBodyInto. If Destination is a []byte, then it will return the body as + // is. + ResponseBodyInto interface{} + // ResponseInto copies the \*http.Response of the corresponding request into the + // given address + ResponseInto **http.Response + Body io.Reader +} + +// middleware is exactly the same type as the Middleware type found in the [option] package, +// but it is redeclared here for circular dependency issues. +type middleware = func(*http.Request, middlewareNext) (*http.Response, error) + +// middlewareNext is exactly the same type as the MiddlewareNext type found in the [option] package, +// but it is redeclared here for circular dependency issues. +type middlewareNext = func(*http.Request) (*http.Response, error) + +func applyMiddleware(middleware middleware, next middlewareNext) middlewareNext { + return func(req *http.Request) (res *http.Response, err error) { + return middleware(req, next) + } +} + +func shouldRetry(req *http.Request, res *http.Response) bool { + // If there is no way to recover the Body, then we shouldn't retry. + if req.Body != nil && req.GetBody == nil { + return false + } + + // If there is no response, that indicates that there is a connection error + // so we retry the request. + if res == nil { + return true + } + + // If the header explictly wants a retry behavior, respect that over the + // http status code. + if res.Header.Get("x-should-retry") == "true" { + return true + } + if res.Header.Get("x-should-retry") == "false" { + return false + } + + return res.StatusCode == http.StatusRequestTimeout || + res.StatusCode == http.StatusConflict || + res.StatusCode == http.StatusTooManyRequests || + res.StatusCode >= http.StatusInternalServerError +} + +func parseRetryAfterHeader(resp *http.Response) (time.Duration, bool) { + if resp == nil { + return 0, false + } + + type retryData struct { + header string + units time.Duration + + // custom is used when the regular algorithm failed and is optional. + // the returned duration is used verbatim (units is not applied). + custom func(string) (time.Duration, bool) + } + + nop := func(string) (time.Duration, bool) { return 0, false } + + // the headers are listed in order of preference + retries := []retryData{ + { + header: "Retry-After-Ms", + units: time.Millisecond, + custom: nop, + }, + { + header: "Retry-After", + units: time.Second, + + // retry-after values are expressed in either number of + // seconds or an HTTP-date indicating when to try again + custom: func(ra string) (time.Duration, bool) { + t, err := time.Parse(time.RFC1123, ra) + if err != nil { + return 0, false + } + return time.Until(t), true + }, + }, + } + + for _, retry := range retries { + v := resp.Header.Get(retry.header) + if v == "" { + continue + } + if retryAfter, err := strconv.ParseFloat(v, 64); err == nil { + return time.Duration(retryAfter * float64(retry.units)), true + } + if d, ok := retry.custom(v); ok { + return d, true + } + } + + return 0, false +} + +func retryDelay(res *http.Response, retryCount int) time.Duration { + // If the API asks us to wait a certain amount of time (and it's a reasonable amount), + // just do what it says. + + if retryAfterDelay, ok := parseRetryAfterHeader(res); ok && 0 <= retryAfterDelay && retryAfterDelay < time.Minute { + return retryAfterDelay + } + + maxDelay := 8 * time.Second + delay := time.Duration(0.5 * float64(time.Second) * math.Pow(2, float64(retryCount))) + if delay > maxDelay { + delay = maxDelay + } + + jitter := rand.Int63n(int64(delay / 4)) + delay -= time.Duration(jitter) + return delay +} + +func (cfg *RequestConfig) Execute() (err error) { + cfg.Request.URL, err = cfg.BaseURL.Parse(strings.TrimLeft(cfg.Request.URL.String(), "/")) + if err != nil { + return err + } + + if cfg.Body != nil && cfg.Request.Body == nil { + switch body := cfg.Body.(type) { + case *bytes.Buffer: + b := body.Bytes() + cfg.Request.ContentLength = int64(body.Len()) + cfg.Request.GetBody = func() (io.ReadCloser, error) { return io.NopCloser(bytes.NewReader(b)), nil } + cfg.Request.Body, _ = cfg.Request.GetBody() + case *bytes.Reader: + cfg.Request.ContentLength = int64(body.Len()) + cfg.Request.GetBody = func() (io.ReadCloser, error) { + _, err := body.Seek(0, 0) + return io.NopCloser(body), err + } + cfg.Request.Body, _ = cfg.Request.GetBody() + default: + if rc, ok := body.(io.ReadCloser); ok { + cfg.Request.Body = rc + } else { + cfg.Request.Body = io.NopCloser(body) + } + } + } + + handler := cfg.HTTPClient.Do + for i := len(cfg.Middlewares) - 1; i >= 0; i -= 1 { + handler = applyMiddleware(cfg.Middlewares[i], handler) + } + + // Don't send the current retry count in the headers if the caller modified the header defaults. + shouldSendRetryCount := cfg.Request.Header.Get("X-Stainless-Retry-Count") == "0" + + var res *http.Response + for retryCount := 0; retryCount <= cfg.MaxRetries; retryCount += 1 { + ctx := cfg.Request.Context() + if cfg.RequestTimeout != time.Duration(0) { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, cfg.RequestTimeout) + defer cancel() + } + + req := cfg.Request.Clone(ctx) + if shouldSendRetryCount { + req.Header.Set("X-Stainless-Retry-Count", strconv.Itoa(retryCount)) + } + + res, err = handler(req) + if ctx != nil && ctx.Err() != nil { + return ctx.Err() + } + if !shouldRetry(cfg.Request, res) || retryCount >= cfg.MaxRetries { + break + } + + // Prepare next request and wait for the retry delay + if cfg.Request.GetBody != nil { + cfg.Request.Body, err = cfg.Request.GetBody() + if err != nil { + return err + } + } + + // Can't actually refresh the body, so we don't attempt to retry here + if cfg.Request.GetBody == nil && cfg.Request.Body != nil { + break + } + + time.Sleep(retryDelay(res, retryCount)) + } + + // Save *http.Response if it is requested to, even if there was an error making the request. This is + // useful in cases where you might want to debug by inspecting the response. Note that if err != nil, + // the response should be generally be empty, but there are edge cases. + if cfg.ResponseInto != nil { + *cfg.ResponseInto = res + } + if responseBodyInto, ok := cfg.ResponseBodyInto.(**http.Response); ok { + *responseBodyInto = res + } + + // If there was a connection error in the final request or any other transport error, + // return that early without trying to coerce into an APIError. + if err != nil { + return err + } + + if res.StatusCode >= 400 { + contents, err := io.ReadAll(res.Body) + res.Body.Close() + if err != nil { + return err + } + + // If there is an APIError, re-populate the response body so that debugging + // utilities can conveniently dump the response without issue. + res.Body = io.NopCloser(bytes.NewBuffer(contents)) + + // Load the contents into the error format if it is provided. + aerr := apierror.Error{Request: cfg.Request, Response: res, StatusCode: res.StatusCode} + err = aerr.UnmarshalJSON(contents) + if err != nil { + return err + } + return &aerr + } + + if cfg.ResponseBodyInto == nil { + return nil + } + if _, ok := cfg.ResponseBodyInto.(**http.Response); ok { + return nil + } + + contents, err := io.ReadAll(res.Body) + if err != nil { + return fmt.Errorf("error reading response body: %w", err) + } + + // If we are not json, return plaintext + contentType := res.Header.Get("content-type") + isJSON := strings.Contains(contentType, "application/json") || strings.Contains(contentType, "application/vnd.api+json") + if !isJSON { + switch dst := cfg.ResponseBodyInto.(type) { + case *string: + *dst = string(contents) + case **string: + tmp := string(contents) + *dst = &tmp + case *[]byte: + *dst = contents + default: + return fmt.Errorf("expected destination type of 'string' or '[]byte' for responses with content-type that is not 'application/json'") + } + return nil + } + + // If the response happens to be a byte array, deserialize the body as-is. + switch dst := cfg.ResponseBodyInto.(type) { + case *[]byte: + *dst = contents + } + + err = json.NewDecoder(bytes.NewReader(contents)).Decode(cfg.ResponseBodyInto) + if err != nil { + return fmt.Errorf("error parsing response json: %w", err) + } + + return nil +} + +func ExecuteNewRequest(ctx context.Context, method string, u string, body interface{}, dst interface{}, opts ...func(*RequestConfig) error) error { + cfg, err := NewRequestConfig(ctx, method, u, body, dst, opts...) + if err != nil { + return err + } + return cfg.Execute() +} + +func (cfg *RequestConfig) Clone(ctx context.Context) *RequestConfig { + if cfg == nil { + return nil + } + req := cfg.Request.Clone(ctx) + var err error + if req.Body != nil { + req.Body, err = req.GetBody() + } + if err != nil { + return nil + } + new := &RequestConfig{ + MaxRetries: cfg.MaxRetries, + RequestTimeout: cfg.RequestTimeout, + Context: ctx, + Request: req, + BaseURL: cfg.BaseURL, + HTTPClient: cfg.HTTPClient, + Middlewares: cfg.Middlewares, + } + + return new +} + +func (cfg *RequestConfig) Apply(opts ...func(*RequestConfig) error) error { + for _, opt := range opts { + err := opt(cfg) + if err != nil { + return err + } + } + return nil +} diff --git a/internal/testutil/testutil.go b/internal/testutil/testutil.go new file mode 100644 index 0000000..826d266 --- /dev/null +++ b/internal/testutil/testutil.go @@ -0,0 +1,27 @@ +package testutil + +import ( + "net/http" + "os" + "strconv" + "testing" +) + +func CheckTestServer(t *testing.T, url string) bool { + if _, err := http.Get(url); err != nil { + const SKIP_MOCK_TESTS = "SKIP_MOCK_TESTS" + if str, ok := os.LookupEnv(SKIP_MOCK_TESTS); ok { + skip, err := strconv.ParseBool(str) + if err != nil { + t.Fatalf("strconv.ParseBool(os.LookupEnv(%s)) failed: %s", SKIP_MOCK_TESTS, err) + } + if skip { + t.Skip("The test will not run without a mock Prism server running against your OpenAPI spec") + return false + } + t.Errorf("The test will not run without a mock Prism server running against your OpenAPI spec. You can set the environment variable %s to true to skip running any tests that require the mock server", SKIP_MOCK_TESTS) + return false + } + } + return true +} diff --git a/internal/version.go b/internal/version.go new file mode 100644 index 0000000..1e49ee4 --- /dev/null +++ b/internal/version.go @@ -0,0 +1,5 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +package internal + +const PackageVersion = "0.0.1-alpha.0" diff --git a/lib/.keep b/lib/.keep new file mode 100644 index 0000000..5e2c99f --- /dev/null +++ b/lib/.keep @@ -0,0 +1,4 @@ +File generated from our OpenAPI spec by Stainless. + +This directory can be used to store custom files to expand the SDK. +It is ignored by Stainless code generation and its content (other than this keep file) won't be touched. \ No newline at end of file diff --git a/option/requestoption.go b/option/requestoption.go new file mode 100644 index 0000000..b6a659c --- /dev/null +++ b/option/requestoption.go @@ -0,0 +1,229 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +package option + +import ( + "bytes" + "fmt" + "io" + "log" + "net/http" + "net/url" + "time" + + "github.com/stainless-sdks/maestro-arch-rpc-go/internal/requestconfig" + "github.com/tidwall/sjson" +) + +// RequestOption is an option for the requests made by the maestro-arch-rpc API Client +// which can be supplied to clients, services, and methods. You can read more about this functional +// options pattern in our [README]. +// +// [README]: https://pkg.go.dev/github.com/stainless-sdks/maestro-arch-rpc-go#readme-requestoptions +type RequestOption = func(*requestconfig.RequestConfig) error + +// WithBaseURL returns a RequestOption that sets the BaseURL for the client. +func WithBaseURL(base string) RequestOption { + u, err := url.Parse(base) + if err != nil { + log.Fatalf("failed to parse BaseURL: %s\n", err) + } + return func(r *requestconfig.RequestConfig) error { + r.BaseURL = u + return nil + } +} + +// WithHTTPClient returns a RequestOption that changes the underlying [http.Client] used to make this +// request, which by default is [http.DefaultClient]. +func WithHTTPClient(client *http.Client) RequestOption { + return func(r *requestconfig.RequestConfig) error { + r.HTTPClient = client + return nil + } +} + +// MiddlewareNext is a function which is called by a middleware to pass an HTTP request +// to the next stage in the middleware chain. +type MiddlewareNext = func(*http.Request) (*http.Response, error) + +// Middleware is a function which intercepts HTTP requests, processing or modifying +// them, and then passing the request to the next middleware or handler +// in the chain by calling the provided MiddlewareNext function. +type Middleware = func(*http.Request, MiddlewareNext) (*http.Response, error) + +// WithMiddleware returns a RequestOption that applies the given middleware +// to the requests made. Each middleware will execute in the order they were given. +func WithMiddleware(middlewares ...Middleware) RequestOption { + return func(r *requestconfig.RequestConfig) error { + r.Middlewares = append(r.Middlewares, middlewares...) + return nil + } +} + +// WithMaxRetries returns a RequestOption that sets the maximum number of retries that the client +// attempts to make. When given 0, the client only makes one request. By +// default, the client retries two times. +// +// WithMaxRetries panics when retries is negative. +func WithMaxRetries(retries int) RequestOption { + if retries < 0 { + panic("option: cannot have fewer than 0 retries") + } + return func(r *requestconfig.RequestConfig) error { + r.MaxRetries = retries + return nil + } +} + +// WithHeader returns a RequestOption that sets the header value to the associated key. It overwrites +// any value if there was one already present. +func WithHeader(key, value string) RequestOption { + return func(r *requestconfig.RequestConfig) error { + r.Request.Header.Set(key, value) + return nil + } +} + +// WithHeaderAdd returns a RequestOption that adds the header value to the associated key. It appends +// onto any existing values. +func WithHeaderAdd(key, value string) RequestOption { + return func(r *requestconfig.RequestConfig) error { + r.Request.Header.Add(key, value) + return nil + } +} + +// WithHeaderDel returns a RequestOption that deletes the header value(s) associated with the given key. +func WithHeaderDel(key string) RequestOption { + return func(r *requestconfig.RequestConfig) error { + r.Request.Header.Del(key) + return nil + } +} + +// WithQuery returns a RequestOption that sets the query value to the associated key. It overwrites +// any value if there was one already present. +func WithQuery(key, value string) RequestOption { + return func(r *requestconfig.RequestConfig) error { + query := r.Request.URL.Query() + query.Set(key, value) + r.Request.URL.RawQuery = query.Encode() + return nil + } +} + +// WithQueryAdd returns a RequestOption that adds the query value to the associated key. It appends +// onto any existing values. +func WithQueryAdd(key, value string) RequestOption { + return func(r *requestconfig.RequestConfig) error { + query := r.Request.URL.Query() + query.Add(key, value) + r.Request.URL.RawQuery = query.Encode() + return nil + } +} + +// WithQueryDel returns a RequestOption that deletes the query value(s) associated with the key. +func WithQueryDel(key string) RequestOption { + return func(r *requestconfig.RequestConfig) error { + query := r.Request.URL.Query() + query.Del(key) + r.Request.URL.RawQuery = query.Encode() + return nil + } +} + +// WithJSONSet returns a RequestOption that sets the body's JSON value associated with the key. +// The key accepts a string as defined by the [sjson format]. +// +// [sjson format]: https://github.com/tidwall/sjson +func WithJSONSet(key string, value interface{}) RequestOption { + return func(r *requestconfig.RequestConfig) (err error) { + if buffer, ok := r.Body.(*bytes.Buffer); ok { + b := buffer.Bytes() + b, err = sjson.SetBytes(b, key, value) + if err != nil { + return err + } + r.Body = bytes.NewBuffer(b) + return nil + } + + return fmt.Errorf("cannot use WithJSONSet on a body that is not serialized as *bytes.Buffer") + } +} + +// WithJSONDel returns a RequestOption that deletes the body's JSON value associated with the key. +// The key accepts a string as defined by the [sjson format]. +// +// [sjson format]: https://github.com/tidwall/sjson +func WithJSONDel(key string) RequestOption { + return func(r *requestconfig.RequestConfig) (err error) { + if buffer, ok := r.Body.(*bytes.Buffer); ok { + b := buffer.Bytes() + b, err = sjson.DeleteBytes(b, key) + if err != nil { + return err + } + r.Body = bytes.NewBuffer(b) + return nil + } + + return fmt.Errorf("cannot use WithJSONDel on a body that is not serialized as *bytes.Buffer") + } +} + +// WithResponseBodyInto returns a RequestOption that overwrites the deserialization target with +// the given destination. If provided, we don't deserialize into the default struct. +func WithResponseBodyInto(dst any) RequestOption { + return func(r *requestconfig.RequestConfig) error { + r.ResponseBodyInto = dst + return nil + } +} + +// WithResponseInto returns a RequestOption that copies the [*http.Response] into the given address. +func WithResponseInto(dst **http.Response) RequestOption { + return func(r *requestconfig.RequestConfig) error { + r.ResponseInto = dst + return nil + } +} + +// WithRequestBody returns a RequestOption that provides a custom serialized body with the given +// content type. +// +// body accepts an io.Reader or raw []bytes. +func WithRequestBody(contentType string, body any) RequestOption { + return func(r *requestconfig.RequestConfig) error { + if reader, ok := body.(io.Reader); ok { + r.Body = reader + return r.Apply(WithHeader("Content-Type", contentType)) + } + + if b, ok := body.([]byte); ok { + r.Body = bytes.NewBuffer(b) + return r.Apply(WithHeader("Content-Type", contentType)) + } + + return fmt.Errorf("body must be a byte slice or implement io.Reader") + } +} + +// WithRequestTimeout returns a RequestOption that sets the timeout for +// each request attempt. This should be smaller than the timeout defined in +// the context, which spans all retries. +func WithRequestTimeout(dur time.Duration) RequestOption { + return func(r *requestconfig.RequestConfig) error { + r.RequestTimeout = dur + return nil + } +} + +// WithEnvironmentProduction returns a RequestOption that sets the current +// environment to be the "production" environment. An environment specifies which base URL +// to use by default. +func WithEnvironmentProduction() RequestOption { + return WithBaseURL("https://arch-testnet.gomaestro-api.org/v0/rpc/") +} diff --git a/program.go b/program.go new file mode 100644 index 0000000..af244d0 --- /dev/null +++ b/program.go @@ -0,0 +1,164 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +package maestroarchrpc + +import ( + "context" + "net/http" + + "github.com/stainless-sdks/maestro-arch-rpc-go/internal/apijson" + "github.com/stainless-sdks/maestro-arch-rpc-go/internal/param" + "github.com/stainless-sdks/maestro-arch-rpc-go/internal/requestconfig" + "github.com/stainless-sdks/maestro-arch-rpc-go/option" +) + +// ProgramService contains methods and other services that help with interacting +// with the maestro-arch-rpc API. +// +// Note, unlike clients, this service does not read variables from the environment +// automatically. You should not instantiate this service directly, and instead use +// the [NewProgramService] method instead. +type ProgramService struct { + Options []option.RequestOption +} + +// NewProgramService generates a new service that applies the given options to each +// request. These options are applied after the parent client's options (if there +// is one), and before any request-specific options. +func NewProgramService(opts ...option.RequestOption) (r *ProgramService) { + r = &ProgramService{} + r.Options = opts + return +} + +// Program accounts +func (r *ProgramService) Accounts(ctx context.Context, body ProgramAccountsParams, opts ...option.RequestOption) (res *ProgramAccountsResponse, err error) { + opts = append(r.Options[:], opts...) + path := "program/accounts" + err = requestconfig.ExecuteNewRequest(ctx, http.MethodPost, path, body, &res, opts...) + return +} + +type ProgramAccountsResponse struct { + Data []ProgramAccountsResponseData `json:"data"` + LastUpdated ProgramAccountsResponseLastUpdated `json:"last_updated"` + JSON programAccountsResponseJSON `json:"-"` +} + +// programAccountsResponseJSON contains the JSON metadata for the struct +// [ProgramAccountsResponse] +type programAccountsResponseJSON struct { + Data apijson.Field + LastUpdated apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *ProgramAccountsResponse) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r programAccountsResponseJSON) RawJSON() string { + return r.raw +} + +type ProgramAccountsResponseData struct { + Account ProgramAccountsResponseDataAccount `json:"account"` + Pubkey []int64 `json:"pubkey"` + JSON programAccountsResponseDataJSON `json:"-"` +} + +// programAccountsResponseDataJSON contains the JSON metadata for the struct +// [ProgramAccountsResponseData] +type programAccountsResponseDataJSON struct { + Account apijson.Field + Pubkey apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *ProgramAccountsResponseData) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r programAccountsResponseDataJSON) RawJSON() string { + return r.raw +} + +type ProgramAccountsResponseDataAccount struct { + Data []int64 `json:"data"` + IsExecutable bool `json:"is_executable"` + Owner []int64 `json:"owner"` + Utxo string `json:"utxo"` + JSON programAccountsResponseDataAccountJSON `json:"-"` +} + +// programAccountsResponseDataAccountJSON contains the JSON metadata for the struct +// [ProgramAccountsResponseDataAccount] +type programAccountsResponseDataAccountJSON struct { + Data apijson.Field + IsExecutable apijson.Field + Owner apijson.Field + Utxo apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *ProgramAccountsResponseDataAccount) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r programAccountsResponseDataAccountJSON) RawJSON() string { + return r.raw +} + +type ProgramAccountsResponseLastUpdated struct { + BlockHash string `json:"block_hash"` + BlockHeight int64 `json:"block_height"` + JSON programAccountsResponseLastUpdatedJSON `json:"-"` +} + +// programAccountsResponseLastUpdatedJSON contains the JSON metadata for the struct +// [ProgramAccountsResponseLastUpdated] +type programAccountsResponseLastUpdatedJSON struct { + BlockHash apijson.Field + BlockHeight apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *ProgramAccountsResponseLastUpdated) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r programAccountsResponseLastUpdatedJSON) RawJSON() string { + return r.raw +} + +type ProgramAccountsParams struct { + Filters param.Field[[]ProgramAccountsParamsFilter] `json:"filters"` + ProgramID param.Field[[]int64] `json:"program_id"` +} + +func (r ProgramAccountsParams) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +type ProgramAccountsParamsFilter struct { + DataSize param.Field[int64] `json:"dataSize"` + Memcmp param.Field[ProgramAccountsParamsFiltersMemcmp] `json:"memcmp"` +} + +func (r ProgramAccountsParamsFilter) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +type ProgramAccountsParamsFiltersMemcmp struct { + // hex-encoded bytes + Bytes param.Field[string] `json:"bytes"` + Offset param.Field[int64] `json:"offset"` +} + +func (r ProgramAccountsParamsFiltersMemcmp) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} diff --git a/program_test.go b/program_test.go new file mode 100644 index 0000000..1b3c2ed --- /dev/null +++ b/program_test.go @@ -0,0 +1,56 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +package maestroarchrpc_test + +import ( + "context" + "errors" + "os" + "testing" + + "github.com/stainless-sdks/maestro-arch-rpc-go" + "github.com/stainless-sdks/maestro-arch-rpc-go/internal/testutil" + "github.com/stainless-sdks/maestro-arch-rpc-go/option" +) + +func TestProgramAccountsWithOptionalParams(t *testing.T) { + baseURL := "http://localhost:4010" + if envURL, ok := os.LookupEnv("TEST_API_BASE_URL"); ok { + baseURL = envURL + } + if !testutil.CheckTestServer(t, baseURL) { + return + } + client := maestroarchrpc.NewClient( + option.WithBaseURL(baseURL), + ) + _, err := client.Programs.Accounts(context.TODO(), maestroarchrpc.ProgramAccountsParams{ + Filters: maestroarchrpc.F([]maestroarchrpc.ProgramAccountsParamsFilter{{ + DataSize: maestroarchrpc.F(int64(0)), + Memcmp: maestroarchrpc.F(maestroarchrpc.ProgramAccountsParamsFiltersMemcmp{ + Bytes: maestroarchrpc.F("bytes"), + Offset: maestroarchrpc.F(int64(0)), + }), + }, { + DataSize: maestroarchrpc.F(int64(0)), + Memcmp: maestroarchrpc.F(maestroarchrpc.ProgramAccountsParamsFiltersMemcmp{ + Bytes: maestroarchrpc.F("bytes"), + Offset: maestroarchrpc.F(int64(0)), + }), + }, { + DataSize: maestroarchrpc.F(int64(0)), + Memcmp: maestroarchrpc.F(maestroarchrpc.ProgramAccountsParamsFiltersMemcmp{ + Bytes: maestroarchrpc.F("bytes"), + Offset: maestroarchrpc.F(int64(0)), + }), + }}), + ProgramID: maestroarchrpc.F([]int64{int64(0), int64(0), int64(0)}), + }) + if err != nil { + var apierr *maestroarchrpc.Error + if errors.As(err, &apierr) { + t.Log(string(apierr.DumpRequest(true))) + } + t.Fatalf("err should be nil: %s", err.Error()) + } +} diff --git a/scripts/bootstrap b/scripts/bootstrap new file mode 100755 index 0000000..ed03e52 --- /dev/null +++ b/scripts/bootstrap @@ -0,0 +1,16 @@ +#!/usr/bin/env bash + +set -e + +cd "$(dirname "$0")/.." + +if [ -f "Brewfile" ] && [ "$(uname -s)" = "Darwin" ]; then + brew bundle check >/dev/null 2>&1 || { + echo "==> Installing Homebrew dependencies…" + brew bundle + } +fi + +echo "==> Installing Go dependencies…" + +go mod tidy diff --git a/scripts/format b/scripts/format new file mode 100755 index 0000000..db2a3fa --- /dev/null +++ b/scripts/format @@ -0,0 +1,8 @@ +#!/usr/bin/env bash + +set -e + +cd "$(dirname "$0")/.." + +echo "==> Running gofmt -s -w" +gofmt -s -w . diff --git a/scripts/lint b/scripts/lint new file mode 100755 index 0000000..fa7ba1f --- /dev/null +++ b/scripts/lint @@ -0,0 +1,8 @@ +#!/usr/bin/env bash + +set -e + +cd "$(dirname "$0")/.." + +echo "==> Running Go build" +go build ./... diff --git a/scripts/mock b/scripts/mock new file mode 100755 index 0000000..d2814ae --- /dev/null +++ b/scripts/mock @@ -0,0 +1,41 @@ +#!/usr/bin/env bash + +set -e + +cd "$(dirname "$0")/.." + +if [[ -n "$1" && "$1" != '--'* ]]; then + URL="$1" + shift +else + URL="$(grep 'openapi_spec_url' .stats.yml | cut -d' ' -f2)" +fi + +# Check if the URL is empty +if [ -z "$URL" ]; then + echo "Error: No OpenAPI spec path/url provided or found in .stats.yml" + exit 1 +fi + +echo "==> Starting mock server with URL ${URL}" + +# Run prism mock on the given spec +if [ "$1" == "--daemon" ]; then + npm exec --package=@stainless-api/prism-cli@5.8.5 -- prism mock "$URL" &> .prism.log & + + # Wait for server to come online + echo -n "Waiting for server" + while ! grep -q "✖ fatal\|Prism is listening" ".prism.log" ; do + echo -n "." + sleep 0.1 + done + + if grep -q "✖ fatal" ".prism.log"; then + cat .prism.log + exit 1 + fi + + echo +else + npm exec --package=@stainless-api/prism-cli@5.8.5 -- prism mock "$URL" +fi diff --git a/scripts/test b/scripts/test new file mode 100755 index 0000000..efebcea --- /dev/null +++ b/scripts/test @@ -0,0 +1,56 @@ +#!/usr/bin/env bash + +set -e + +cd "$(dirname "$0")/.." + +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[0;33m' +NC='\033[0m' # No Color + +function prism_is_running() { + curl --silent "http://localhost:4010" >/dev/null 2>&1 +} + +kill_server_on_port() { + pids=$(lsof -t -i tcp:"$1" || echo "") + if [ "$pids" != "" ]; then + kill "$pids" + echo "Stopped $pids." + fi +} + +function is_overriding_api_base_url() { + [ -n "$TEST_API_BASE_URL" ] +} + +if ! is_overriding_api_base_url && ! prism_is_running ; then + # When we exit this script, make sure to kill the background mock server process + trap 'kill_server_on_port 4010' EXIT + + # Start the dev server + ./scripts/mock --daemon +fi + +if is_overriding_api_base_url ; then + echo -e "${GREEN}✔ Running tests against ${TEST_API_BASE_URL}${NC}" + echo +elif ! prism_is_running ; then + echo -e "${RED}ERROR:${NC} The test suite will not run without a mock Prism server" + echo -e "running against your OpenAPI spec." + echo + echo -e "To run the server, pass in the path or url of your OpenAPI" + echo -e "spec to the prism command:" + echo + echo -e " \$ ${YELLOW}npm exec --package=@stoplight/prism-cli@~5.3.2 -- prism mock path/to/your.openapi.yml${NC}" + echo + + exit 1 +else + echo -e "${GREEN}✔ Mock prism server is running with your OpenAPI spec${NC}" + echo +fi + +echo "==> Running tests" +go test ./... "$@" diff --git a/transaction.go b/transaction.go new file mode 100644 index 0000000..8c9bdcf --- /dev/null +++ b/transaction.go @@ -0,0 +1,411 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +package maestroarchrpc + +import ( + "context" + "errors" + "fmt" + "net/http" + + "github.com/stainless-sdks/maestro-arch-rpc-go/internal/apijson" + "github.com/stainless-sdks/maestro-arch-rpc-go/internal/param" + "github.com/stainless-sdks/maestro-arch-rpc-go/internal/requestconfig" + "github.com/stainless-sdks/maestro-arch-rpc-go/option" +) + +// TransactionService contains methods and other services that help with +// interacting with the maestro-arch-rpc API. +// +// Note, unlike clients, this service does not read variables from the environment +// automatically. You should not instantiate this service directly, and instead use +// the [NewTransactionService] method instead. +type TransactionService struct { + Options []option.RequestOption +} + +// NewTransactionService generates a new service that applies the given options to +// each request. These options are applied after the parent client's options (if +// there is one), and before any request-specific options. +func NewTransactionService(opts ...option.RequestOption) (r *TransactionService) { + r = &TransactionService{} + r.Options = opts + return +} + +// Processed transaction info by hash +func (r *TransactionService) Get(ctx context.Context, txHash string, opts ...option.RequestOption) (res *TransactionGetResponse, err error) { + opts = append(r.Options[:], opts...) + if txHash == "" { + err = errors.New("missing required tx_hash parameter") + return + } + path := fmt.Sprintf("transaction/%s", txHash) + err = requestconfig.ExecuteNewRequest(ctx, http.MethodGet, path, nil, &res, opts...) + return +} + +// Send multiple transactions to the network +func (r *TransactionService) BatchSend(ctx context.Context, body TransactionBatchSendParams, opts ...option.RequestOption) (res *TransactionBatchSendResponse, err error) { + opts = append(r.Options[:], opts...) + path := "transaction/send/batch" + err = requestconfig.ExecuteNewRequest(ctx, http.MethodPost, path, body, &res, opts...) + return +} + +// Send transaction to the network +func (r *TransactionService) Send(ctx context.Context, body TransactionSendParams, opts ...option.RequestOption) (res *TransactionSendResponse, err error) { + opts = append(r.Options[:], opts...) + path := "transaction/send" + err = requestconfig.ExecuteNewRequest(ctx, http.MethodPost, path, body, &res, opts...) + return +} + +type TransactionGetResponse struct { + Data TransactionGetResponseData `json:"data"` + LastUpdated TransactionGetResponseLastUpdated `json:"last_updated"` + JSON transactionGetResponseJSON `json:"-"` +} + +// transactionGetResponseJSON contains the JSON metadata for the struct +// [TransactionGetResponse] +type transactionGetResponseJSON struct { + Data apijson.Field + LastUpdated apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *TransactionGetResponse) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r transactionGetResponseJSON) RawJSON() string { + return r.raw +} + +type TransactionGetResponseData struct { + BitcoinTxids []string `json:"bitcoin_txids"` + RuntimeTransaction TransactionGetResponseDataRuntimeTransaction `json:"runtime_transaction"` + Status string `json:"status"` + JSON transactionGetResponseDataJSON `json:"-"` +} + +// transactionGetResponseDataJSON contains the JSON metadata for the struct +// [TransactionGetResponseData] +type transactionGetResponseDataJSON struct { + BitcoinTxids apijson.Field + RuntimeTransaction apijson.Field + Status apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *TransactionGetResponseData) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r transactionGetResponseDataJSON) RawJSON() string { + return r.raw +} + +type TransactionGetResponseDataRuntimeTransaction struct { + Message TransactionGetResponseDataRuntimeTransactionMessage `json:"message"` + Signatures [][]int64 `json:"signatures"` + Version int64 `json:"version"` + JSON transactionGetResponseDataRuntimeTransactionJSON `json:"-"` +} + +// transactionGetResponseDataRuntimeTransactionJSON contains the JSON metadata for +// the struct [TransactionGetResponseDataRuntimeTransaction] +type transactionGetResponseDataRuntimeTransactionJSON struct { + Message apijson.Field + Signatures apijson.Field + Version apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *TransactionGetResponseDataRuntimeTransaction) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r transactionGetResponseDataRuntimeTransactionJSON) RawJSON() string { + return r.raw +} + +type TransactionGetResponseDataRuntimeTransactionMessage struct { + Instructions []TransactionGetResponseDataRuntimeTransactionMessageInstruction `json:"instructions"` + Signers [][]int64 `json:"signers"` + JSON transactionGetResponseDataRuntimeTransactionMessageJSON `json:"-"` +} + +// transactionGetResponseDataRuntimeTransactionMessageJSON contains the JSON +// metadata for the struct [TransactionGetResponseDataRuntimeTransactionMessage] +type transactionGetResponseDataRuntimeTransactionMessageJSON struct { + Instructions apijson.Field + Signers apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *TransactionGetResponseDataRuntimeTransactionMessage) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r transactionGetResponseDataRuntimeTransactionMessageJSON) RawJSON() string { + return r.raw +} + +type TransactionGetResponseDataRuntimeTransactionMessageInstruction struct { + Accounts []TransactionGetResponseDataRuntimeTransactionMessageInstructionsAccount `json:"accounts"` + Data []int64 `json:"data"` + ProgramID []int64 `json:"program_id"` + JSON transactionGetResponseDataRuntimeTransactionMessageInstructionJSON `json:"-"` +} + +// transactionGetResponseDataRuntimeTransactionMessageInstructionJSON contains the +// JSON metadata for the struct +// [TransactionGetResponseDataRuntimeTransactionMessageInstruction] +type transactionGetResponseDataRuntimeTransactionMessageInstructionJSON struct { + Accounts apijson.Field + Data apijson.Field + ProgramID apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *TransactionGetResponseDataRuntimeTransactionMessageInstruction) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r transactionGetResponseDataRuntimeTransactionMessageInstructionJSON) RawJSON() string { + return r.raw +} + +type TransactionGetResponseDataRuntimeTransactionMessageInstructionsAccount struct { + IsSigner bool `json:"is_signer"` + IsWritable bool `json:"is_writable"` + Pubkey []int64 `json:"pubkey"` + JSON transactionGetResponseDataRuntimeTransactionMessageInstructionsAccountJSON `json:"-"` +} + +// transactionGetResponseDataRuntimeTransactionMessageInstructionsAccountJSON +// contains the JSON metadata for the struct +// [TransactionGetResponseDataRuntimeTransactionMessageInstructionsAccount] +type transactionGetResponseDataRuntimeTransactionMessageInstructionsAccountJSON struct { + IsSigner apijson.Field + IsWritable apijson.Field + Pubkey apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *TransactionGetResponseDataRuntimeTransactionMessageInstructionsAccount) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r transactionGetResponseDataRuntimeTransactionMessageInstructionsAccountJSON) RawJSON() string { + return r.raw +} + +type TransactionGetResponseLastUpdated struct { + BlockHash string `json:"block_hash"` + BlockHeight int64 `json:"block_height"` + JSON transactionGetResponseLastUpdatedJSON `json:"-"` +} + +// transactionGetResponseLastUpdatedJSON contains the JSON metadata for the struct +// [TransactionGetResponseLastUpdated] +type transactionGetResponseLastUpdatedJSON struct { + BlockHash apijson.Field + BlockHeight apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *TransactionGetResponseLastUpdated) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r transactionGetResponseLastUpdatedJSON) RawJSON() string { + return r.raw +} + +type TransactionBatchSendResponse struct { + Data []string `json:"data"` + LastUpdated TransactionBatchSendResponseLastUpdated `json:"last_updated"` + JSON transactionBatchSendResponseJSON `json:"-"` +} + +// transactionBatchSendResponseJSON contains the JSON metadata for the struct +// [TransactionBatchSendResponse] +type transactionBatchSendResponseJSON struct { + Data apijson.Field + LastUpdated apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *TransactionBatchSendResponse) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r transactionBatchSendResponseJSON) RawJSON() string { + return r.raw +} + +type TransactionBatchSendResponseLastUpdated struct { + BlockHash string `json:"block_hash"` + BlockHeight int64 `json:"block_height"` + JSON transactionBatchSendResponseLastUpdatedJSON `json:"-"` +} + +// transactionBatchSendResponseLastUpdatedJSON contains the JSON metadata for the +// struct [TransactionBatchSendResponseLastUpdated] +type transactionBatchSendResponseLastUpdatedJSON struct { + BlockHash apijson.Field + BlockHeight apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *TransactionBatchSendResponseLastUpdated) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r transactionBatchSendResponseLastUpdatedJSON) RawJSON() string { + return r.raw +} + +type TransactionSendResponse struct { + Data string `json:"data"` + LastUpdated TransactionSendResponseLastUpdated `json:"last_updated"` + JSON transactionSendResponseJSON `json:"-"` +} + +// transactionSendResponseJSON contains the JSON metadata for the struct +// [TransactionSendResponse] +type transactionSendResponseJSON struct { + Data apijson.Field + LastUpdated apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *TransactionSendResponse) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r transactionSendResponseJSON) RawJSON() string { + return r.raw +} + +type TransactionSendResponseLastUpdated struct { + BlockHash string `json:"block_hash"` + BlockHeight int64 `json:"block_height"` + JSON transactionSendResponseLastUpdatedJSON `json:"-"` +} + +// transactionSendResponseLastUpdatedJSON contains the JSON metadata for the struct +// [TransactionSendResponseLastUpdated] +type transactionSendResponseLastUpdatedJSON struct { + BlockHash apijson.Field + BlockHeight apijson.Field + raw string + ExtraFields map[string]apijson.Field +} + +func (r *TransactionSendResponseLastUpdated) UnmarshalJSON(data []byte) (err error) { + return apijson.UnmarshalRoot(data, r) +} + +func (r transactionSendResponseLastUpdatedJSON) RawJSON() string { + return r.raw +} + +type TransactionBatchSendParams struct { + Body []TransactionBatchSendParamsBody `json:"body,required"` +} + +func (r TransactionBatchSendParams) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r.Body) +} + +type TransactionBatchSendParamsBody struct { + Message param.Field[TransactionBatchSendParamsBodyMessage] `json:"message"` + Signatures param.Field[[][]int64] `json:"signatures"` + Version param.Field[int64] `json:"version"` +} + +func (r TransactionBatchSendParamsBody) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +type TransactionBatchSendParamsBodyMessage struct { + Instructions param.Field[[]TransactionBatchSendParamsBodyMessageInstruction] `json:"instructions"` + Signers param.Field[[][]int64] `json:"signers"` +} + +func (r TransactionBatchSendParamsBodyMessage) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +type TransactionBatchSendParamsBodyMessageInstruction struct { + Accounts param.Field[[]TransactionBatchSendParamsBodyMessageInstructionsAccount] `json:"accounts"` + Data param.Field[[]int64] `json:"data"` + ProgramID param.Field[[]int64] `json:"program_id"` +} + +func (r TransactionBatchSendParamsBodyMessageInstruction) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +type TransactionBatchSendParamsBodyMessageInstructionsAccount struct { + IsSigner param.Field[bool] `json:"is_signer"` + IsWritable param.Field[bool] `json:"is_writable"` + Pubkey param.Field[[]int64] `json:"pubkey"` +} + +func (r TransactionBatchSendParamsBodyMessageInstructionsAccount) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +type TransactionSendParams struct { + Message param.Field[TransactionSendParamsMessage] `json:"message"` + Signatures param.Field[[][]int64] `json:"signatures"` + Version param.Field[int64] `json:"version"` +} + +func (r TransactionSendParams) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +type TransactionSendParamsMessage struct { + Instructions param.Field[[]TransactionSendParamsMessageInstruction] `json:"instructions"` + Signers param.Field[[][]int64] `json:"signers"` +} + +func (r TransactionSendParamsMessage) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +type TransactionSendParamsMessageInstruction struct { + Accounts param.Field[[]TransactionSendParamsMessageInstructionsAccount] `json:"accounts"` + Data param.Field[[]int64] `json:"data"` + ProgramID param.Field[[]int64] `json:"program_id"` +} + +func (r TransactionSendParamsMessageInstruction) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} + +type TransactionSendParamsMessageInstructionsAccount struct { + IsSigner param.Field[bool] `json:"is_signer"` + IsWritable param.Field[bool] `json:"is_writable"` + Pubkey param.Field[[]int64] `json:"pubkey"` +} + +func (r TransactionSendParamsMessageInstructionsAccount) MarshalJSON() (data []byte, err error) { + return apijson.MarshalRoot(r) +} diff --git a/transaction_test.go b/transaction_test.go new file mode 100644 index 0000000..1079df6 --- /dev/null +++ b/transaction_test.go @@ -0,0 +1,299 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +package maestroarchrpc_test + +import ( + "context" + "errors" + "os" + "testing" + + "github.com/stainless-sdks/maestro-arch-rpc-go" + "github.com/stainless-sdks/maestro-arch-rpc-go/internal/testutil" + "github.com/stainless-sdks/maestro-arch-rpc-go/option" +) + +func TestTransactionGet(t *testing.T) { + baseURL := "http://localhost:4010" + if envURL, ok := os.LookupEnv("TEST_API_BASE_URL"); ok { + baseURL = envURL + } + if !testutil.CheckTestServer(t, baseURL) { + return + } + client := maestroarchrpc.NewClient( + option.WithBaseURL(baseURL), + ) + _, err := client.Transactions.Get(context.TODO(), "tx_hash") + if err != nil { + var apierr *maestroarchrpc.Error + if errors.As(err, &apierr) { + t.Log(string(apierr.DumpRequest(true))) + } + t.Fatalf("err should be nil: %s", err.Error()) + } +} + +func TestTransactionBatchSend(t *testing.T) { + baseURL := "http://localhost:4010" + if envURL, ok := os.LookupEnv("TEST_API_BASE_URL"); ok { + baseURL = envURL + } + if !testutil.CheckTestServer(t, baseURL) { + return + } + client := maestroarchrpc.NewClient( + option.WithBaseURL(baseURL), + ) + _, err := client.Transactions.BatchSend(context.TODO(), maestroarchrpc.TransactionBatchSendParams{ + Body: []maestroarchrpc.TransactionBatchSendParamsBody{{ + Message: maestroarchrpc.F(maestroarchrpc.TransactionBatchSendParamsBodyMessage{ + Instructions: maestroarchrpc.F([]maestroarchrpc.TransactionBatchSendParamsBodyMessageInstruction{{ + Accounts: maestroarchrpc.F([]maestroarchrpc.TransactionBatchSendParamsBodyMessageInstructionsAccount{{ + IsSigner: maestroarchrpc.F(true), + IsWritable: maestroarchrpc.F(true), + Pubkey: maestroarchrpc.F([]int64{int64(0), int64(0), int64(0)}), + }, { + IsSigner: maestroarchrpc.F(true), + IsWritable: maestroarchrpc.F(true), + Pubkey: maestroarchrpc.F([]int64{int64(0), int64(0), int64(0)}), + }, { + IsSigner: maestroarchrpc.F(true), + IsWritable: maestroarchrpc.F(true), + Pubkey: maestroarchrpc.F([]int64{int64(0), int64(0), int64(0)}), + }}), + Data: maestroarchrpc.F([]int64{int64(0), int64(0), int64(0)}), + ProgramID: maestroarchrpc.F([]int64{int64(0), int64(0), int64(0)}), + }, { + Accounts: maestroarchrpc.F([]maestroarchrpc.TransactionBatchSendParamsBodyMessageInstructionsAccount{{ + IsSigner: maestroarchrpc.F(true), + IsWritable: maestroarchrpc.F(true), + Pubkey: maestroarchrpc.F([]int64{int64(0), int64(0), int64(0)}), + }, { + IsSigner: maestroarchrpc.F(true), + IsWritable: maestroarchrpc.F(true), + Pubkey: maestroarchrpc.F([]int64{int64(0), int64(0), int64(0)}), + }, { + IsSigner: maestroarchrpc.F(true), + IsWritable: maestroarchrpc.F(true), + Pubkey: maestroarchrpc.F([]int64{int64(0), int64(0), int64(0)}), + }}), + Data: maestroarchrpc.F([]int64{int64(0), int64(0), int64(0)}), + ProgramID: maestroarchrpc.F([]int64{int64(0), int64(0), int64(0)}), + }, { + Accounts: maestroarchrpc.F([]maestroarchrpc.TransactionBatchSendParamsBodyMessageInstructionsAccount{{ + IsSigner: maestroarchrpc.F(true), + IsWritable: maestroarchrpc.F(true), + Pubkey: maestroarchrpc.F([]int64{int64(0), int64(0), int64(0)}), + }, { + IsSigner: maestroarchrpc.F(true), + IsWritable: maestroarchrpc.F(true), + Pubkey: maestroarchrpc.F([]int64{int64(0), int64(0), int64(0)}), + }, { + IsSigner: maestroarchrpc.F(true), + IsWritable: maestroarchrpc.F(true), + Pubkey: maestroarchrpc.F([]int64{int64(0), int64(0), int64(0)}), + }}), + Data: maestroarchrpc.F([]int64{int64(0), int64(0), int64(0)}), + ProgramID: maestroarchrpc.F([]int64{int64(0), int64(0), int64(0)}), + }}), + Signers: maestroarchrpc.F([][]int64{{int64(0), int64(0), int64(0)}, {int64(0), int64(0), int64(0)}, {int64(0), int64(0), int64(0)}}), + }), + Signatures: maestroarchrpc.F([][]int64{{int64(0), int64(0), int64(0)}, {int64(0), int64(0), int64(0)}, {int64(0), int64(0), int64(0)}}), + Version: maestroarchrpc.F(int64(0)), + }, { + Message: maestroarchrpc.F(maestroarchrpc.TransactionBatchSendParamsBodyMessage{ + Instructions: maestroarchrpc.F([]maestroarchrpc.TransactionBatchSendParamsBodyMessageInstruction{{ + Accounts: maestroarchrpc.F([]maestroarchrpc.TransactionBatchSendParamsBodyMessageInstructionsAccount{{ + IsSigner: maestroarchrpc.F(true), + IsWritable: maestroarchrpc.F(true), + Pubkey: maestroarchrpc.F([]int64{int64(0), int64(0), int64(0)}), + }, { + IsSigner: maestroarchrpc.F(true), + IsWritable: maestroarchrpc.F(true), + Pubkey: maestroarchrpc.F([]int64{int64(0), int64(0), int64(0)}), + }, { + IsSigner: maestroarchrpc.F(true), + IsWritable: maestroarchrpc.F(true), + Pubkey: maestroarchrpc.F([]int64{int64(0), int64(0), int64(0)}), + }}), + Data: maestroarchrpc.F([]int64{int64(0), int64(0), int64(0)}), + ProgramID: maestroarchrpc.F([]int64{int64(0), int64(0), int64(0)}), + }, { + Accounts: maestroarchrpc.F([]maestroarchrpc.TransactionBatchSendParamsBodyMessageInstructionsAccount{{ + IsSigner: maestroarchrpc.F(true), + IsWritable: maestroarchrpc.F(true), + Pubkey: maestroarchrpc.F([]int64{int64(0), int64(0), int64(0)}), + }, { + IsSigner: maestroarchrpc.F(true), + IsWritable: maestroarchrpc.F(true), + Pubkey: maestroarchrpc.F([]int64{int64(0), int64(0), int64(0)}), + }, { + IsSigner: maestroarchrpc.F(true), + IsWritable: maestroarchrpc.F(true), + Pubkey: maestroarchrpc.F([]int64{int64(0), int64(0), int64(0)}), + }}), + Data: maestroarchrpc.F([]int64{int64(0), int64(0), int64(0)}), + ProgramID: maestroarchrpc.F([]int64{int64(0), int64(0), int64(0)}), + }, { + Accounts: maestroarchrpc.F([]maestroarchrpc.TransactionBatchSendParamsBodyMessageInstructionsAccount{{ + IsSigner: maestroarchrpc.F(true), + IsWritable: maestroarchrpc.F(true), + Pubkey: maestroarchrpc.F([]int64{int64(0), int64(0), int64(0)}), + }, { + IsSigner: maestroarchrpc.F(true), + IsWritable: maestroarchrpc.F(true), + Pubkey: maestroarchrpc.F([]int64{int64(0), int64(0), int64(0)}), + }, { + IsSigner: maestroarchrpc.F(true), + IsWritable: maestroarchrpc.F(true), + Pubkey: maestroarchrpc.F([]int64{int64(0), int64(0), int64(0)}), + }}), + Data: maestroarchrpc.F([]int64{int64(0), int64(0), int64(0)}), + ProgramID: maestroarchrpc.F([]int64{int64(0), int64(0), int64(0)}), + }}), + Signers: maestroarchrpc.F([][]int64{{int64(0), int64(0), int64(0)}, {int64(0), int64(0), int64(0)}, {int64(0), int64(0), int64(0)}}), + }), + Signatures: maestroarchrpc.F([][]int64{{int64(0), int64(0), int64(0)}, {int64(0), int64(0), int64(0)}, {int64(0), int64(0), int64(0)}}), + Version: maestroarchrpc.F(int64(0)), + }, { + Message: maestroarchrpc.F(maestroarchrpc.TransactionBatchSendParamsBodyMessage{ + Instructions: maestroarchrpc.F([]maestroarchrpc.TransactionBatchSendParamsBodyMessageInstruction{{ + Accounts: maestroarchrpc.F([]maestroarchrpc.TransactionBatchSendParamsBodyMessageInstructionsAccount{{ + IsSigner: maestroarchrpc.F(true), + IsWritable: maestroarchrpc.F(true), + Pubkey: maestroarchrpc.F([]int64{int64(0), int64(0), int64(0)}), + }, { + IsSigner: maestroarchrpc.F(true), + IsWritable: maestroarchrpc.F(true), + Pubkey: maestroarchrpc.F([]int64{int64(0), int64(0), int64(0)}), + }, { + IsSigner: maestroarchrpc.F(true), + IsWritable: maestroarchrpc.F(true), + Pubkey: maestroarchrpc.F([]int64{int64(0), int64(0), int64(0)}), + }}), + Data: maestroarchrpc.F([]int64{int64(0), int64(0), int64(0)}), + ProgramID: maestroarchrpc.F([]int64{int64(0), int64(0), int64(0)}), + }, { + Accounts: maestroarchrpc.F([]maestroarchrpc.TransactionBatchSendParamsBodyMessageInstructionsAccount{{ + IsSigner: maestroarchrpc.F(true), + IsWritable: maestroarchrpc.F(true), + Pubkey: maestroarchrpc.F([]int64{int64(0), int64(0), int64(0)}), + }, { + IsSigner: maestroarchrpc.F(true), + IsWritable: maestroarchrpc.F(true), + Pubkey: maestroarchrpc.F([]int64{int64(0), int64(0), int64(0)}), + }, { + IsSigner: maestroarchrpc.F(true), + IsWritable: maestroarchrpc.F(true), + Pubkey: maestroarchrpc.F([]int64{int64(0), int64(0), int64(0)}), + }}), + Data: maestroarchrpc.F([]int64{int64(0), int64(0), int64(0)}), + ProgramID: maestroarchrpc.F([]int64{int64(0), int64(0), int64(0)}), + }, { + Accounts: maestroarchrpc.F([]maestroarchrpc.TransactionBatchSendParamsBodyMessageInstructionsAccount{{ + IsSigner: maestroarchrpc.F(true), + IsWritable: maestroarchrpc.F(true), + Pubkey: maestroarchrpc.F([]int64{int64(0), int64(0), int64(0)}), + }, { + IsSigner: maestroarchrpc.F(true), + IsWritable: maestroarchrpc.F(true), + Pubkey: maestroarchrpc.F([]int64{int64(0), int64(0), int64(0)}), + }, { + IsSigner: maestroarchrpc.F(true), + IsWritable: maestroarchrpc.F(true), + Pubkey: maestroarchrpc.F([]int64{int64(0), int64(0), int64(0)}), + }}), + Data: maestroarchrpc.F([]int64{int64(0), int64(0), int64(0)}), + ProgramID: maestroarchrpc.F([]int64{int64(0), int64(0), int64(0)}), + }}), + Signers: maestroarchrpc.F([][]int64{{int64(0), int64(0), int64(0)}, {int64(0), int64(0), int64(0)}, {int64(0), int64(0), int64(0)}}), + }), + Signatures: maestroarchrpc.F([][]int64{{int64(0), int64(0), int64(0)}, {int64(0), int64(0), int64(0)}, {int64(0), int64(0), int64(0)}}), + Version: maestroarchrpc.F(int64(0)), + }}, + }) + if err != nil { + var apierr *maestroarchrpc.Error + if errors.As(err, &apierr) { + t.Log(string(apierr.DumpRequest(true))) + } + t.Fatalf("err should be nil: %s", err.Error()) + } +} + +func TestTransactionSendWithOptionalParams(t *testing.T) { + baseURL := "http://localhost:4010" + if envURL, ok := os.LookupEnv("TEST_API_BASE_URL"); ok { + baseURL = envURL + } + if !testutil.CheckTestServer(t, baseURL) { + return + } + client := maestroarchrpc.NewClient( + option.WithBaseURL(baseURL), + ) + _, err := client.Transactions.Send(context.TODO(), maestroarchrpc.TransactionSendParams{ + Message: maestroarchrpc.F(maestroarchrpc.TransactionSendParamsMessage{ + Instructions: maestroarchrpc.F([]maestroarchrpc.TransactionSendParamsMessageInstruction{{ + Accounts: maestroarchrpc.F([]maestroarchrpc.TransactionSendParamsMessageInstructionsAccount{{ + IsSigner: maestroarchrpc.F(true), + IsWritable: maestroarchrpc.F(true), + Pubkey: maestroarchrpc.F([]int64{int64(0), int64(0), int64(0)}), + }, { + IsSigner: maestroarchrpc.F(true), + IsWritable: maestroarchrpc.F(true), + Pubkey: maestroarchrpc.F([]int64{int64(0), int64(0), int64(0)}), + }, { + IsSigner: maestroarchrpc.F(true), + IsWritable: maestroarchrpc.F(true), + Pubkey: maestroarchrpc.F([]int64{int64(0), int64(0), int64(0)}), + }}), + Data: maestroarchrpc.F([]int64{int64(0), int64(0), int64(0)}), + ProgramID: maestroarchrpc.F([]int64{int64(0), int64(0), int64(0)}), + }, { + Accounts: maestroarchrpc.F([]maestroarchrpc.TransactionSendParamsMessageInstructionsAccount{{ + IsSigner: maestroarchrpc.F(true), + IsWritable: maestroarchrpc.F(true), + Pubkey: maestroarchrpc.F([]int64{int64(0), int64(0), int64(0)}), + }, { + IsSigner: maestroarchrpc.F(true), + IsWritable: maestroarchrpc.F(true), + Pubkey: maestroarchrpc.F([]int64{int64(0), int64(0), int64(0)}), + }, { + IsSigner: maestroarchrpc.F(true), + IsWritable: maestroarchrpc.F(true), + Pubkey: maestroarchrpc.F([]int64{int64(0), int64(0), int64(0)}), + }}), + Data: maestroarchrpc.F([]int64{int64(0), int64(0), int64(0)}), + ProgramID: maestroarchrpc.F([]int64{int64(0), int64(0), int64(0)}), + }, { + Accounts: maestroarchrpc.F([]maestroarchrpc.TransactionSendParamsMessageInstructionsAccount{{ + IsSigner: maestroarchrpc.F(true), + IsWritable: maestroarchrpc.F(true), + Pubkey: maestroarchrpc.F([]int64{int64(0), int64(0), int64(0)}), + }, { + IsSigner: maestroarchrpc.F(true), + IsWritable: maestroarchrpc.F(true), + Pubkey: maestroarchrpc.F([]int64{int64(0), int64(0), int64(0)}), + }, { + IsSigner: maestroarchrpc.F(true), + IsWritable: maestroarchrpc.F(true), + Pubkey: maestroarchrpc.F([]int64{int64(0), int64(0), int64(0)}), + }}), + Data: maestroarchrpc.F([]int64{int64(0), int64(0), int64(0)}), + ProgramID: maestroarchrpc.F([]int64{int64(0), int64(0), int64(0)}), + }}), + Signers: maestroarchrpc.F([][]int64{{int64(0), int64(0), int64(0)}, {int64(0), int64(0), int64(0)}, {int64(0), int64(0), int64(0)}}), + }), + Signatures: maestroarchrpc.F([][]int64{{int64(0), int64(0), int64(0)}, {int64(0), int64(0), int64(0)}, {int64(0), int64(0), int64(0)}}), + Version: maestroarchrpc.F(int64(0)), + }) + if err != nil { + var apierr *maestroarchrpc.Error + if errors.As(err, &apierr) { + t.Log(string(apierr.DumpRequest(true))) + } + t.Fatalf("err should be nil: %s", err.Error()) + } +} diff --git a/usage_test.go b/usage_test.go new file mode 100644 index 0000000..f423dfd --- /dev/null +++ b/usage_test.go @@ -0,0 +1,33 @@ +// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +package maestroarchrpc_test + +import ( + "context" + "os" + "testing" + + "github.com/stainless-sdks/maestro-arch-rpc-go" + "github.com/stainless-sdks/maestro-arch-rpc-go/internal/testutil" + "github.com/stainless-sdks/maestro-arch-rpc-go/option" +) + +func TestUsage(t *testing.T) { + baseURL := "http://localhost:4010" + if envURL, ok := os.LookupEnv("TEST_API_BASE_URL"); ok { + baseURL = envURL + } + if !testutil.CheckTestServer(t, baseURL) { + return + } + client := maestroarchrpc.NewClient( + option.WithBaseURL(baseURL), + ) + response, err := client.Accounts.Address(context.TODO(), maestroarchrpc.AccountAddressParams{ + Body: []int64{int64(0), int64(0), int64(0)}, + }) + if err != nil { + t.Error(err) + } + t.Logf("%+v\n", response.Data) +}