From c677e3082492d83acec7c723fbfba399da2688c0 Mon Sep 17 00:00:00 2001 From: "fern-api[bot]" <115122769+fern-api[bot]@users.noreply.github.com> Date: Wed, 7 Feb 2024 15:20:46 -0600 Subject: [PATCH] :herb: Fern Regeneration -- February 7, 2024 (#64) * SDK regeneration * Add e2e tests * Fix test * .fernignore tests * Revert "Fix test" This reverts commit cd29b517551593b45b477871aa9c371a9c4a75e4. * Patch --------- Co-authored-by: fern-api <115122769+fern-api[bot]@users.noreply.github.com> Co-authored-by: Billy Trend --- .fernignore | 4 +- .github/workflows/e2e.yml | 25 ++ client/client.go | 291 +++++++++---- client/client_test.go | 18 +- client/options.go | 50 +-- connectors.go | 36 +- connectors/client.go | 173 +++++--- core/client_option.go | 53 --- core/core.go | 59 ++- core/core_test.go | 71 +++- core/query.go | 219 ++++++++++ core/query_test.go | 134 ++++++ core/request_option.go | 115 +++++ core/retrier.go | 166 ++++++++ core/stream.go | 26 +- datasets.go | 34 +- datasets/client.go | 204 +++++---- embed_jobs.go | 10 +- embedjobs/client.go | 106 +++-- environments.go | 4 +- go.mod | 5 +- go.sum | 2 + option/request_option.go | 57 +++ tests/sdk_test.go | 348 +++++++++++++++ types.go | 873 +++++++++++++++++++++++--------------- 25 files changed, 2354 insertions(+), 729 deletions(-) create mode 100644 .github/workflows/e2e.yml delete mode 100644 core/client_option.go create mode 100644 core/query.go create mode 100644 core/query_test.go create mode 100644 core/request_option.go create mode 100644 core/retrier.go create mode 100644 option/request_option.go create mode 100644 tests/sdk_test.go diff --git a/.fernignore b/.fernignore index a1ebaab..9f170d2 100644 --- a/.fernignore +++ b/.fernignore @@ -1,4 +1,6 @@ # Specify files that shouldn't be modified by Fern README.md banner.png -LICENSE \ No newline at end of file +LICENSE +.github/workflows/e2e.yml +tests/** \ No newline at end of file diff --git a/.github/workflows/e2e.yml b/.github/workflows/e2e.yml new file mode 100644 index 0000000..51477be --- /dev/null +++ b/.github/workflows/e2e.yml @@ -0,0 +1,25 @@ +name: CI +on: + pull_request: {} +jobs: + build: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - name: Setup Go 1.x.x + uses: actions/setup-go@v4 + with: + go-version: 1.x.x + - name: Install testing dependencies here so we dont have to edit the go.mod file + run: | + go get . + go get golang.org/x/tools/go/pointer@v0.1.0-deprecated + go get golang.org/x/sys@v0.8.0 + go get golang.org/x/tools@v0.9.2-0.20230531220058-a260315e300a + - name: Build + run: go build -v ./... + - name: Test with the Go CLI + run: go test -v ./... + env: + COHERE_API_KEY: ${{ secrets.COHERE_API_KEY }} + diff --git a/client/client.go b/client/client.go index d2541b3..9ff0f5b 100644 --- a/client/client.go +++ b/client/client.go @@ -12,6 +12,7 @@ import ( core "github.com/cohere-ai/cohere-go/v2/core" datasets "github.com/cohere-ai/cohere-go/v2/datasets" embedjobs "github.com/cohere-ai/cohere-go/v2/embedjobs" + option "github.com/cohere-ai/cohere-go/v2/option" io "io" http "net/http" ) @@ -21,44 +22,59 @@ type Client struct { caller *core.Caller header http.Header + EmbedJobs *embedjobs.Client Datasets *datasets.Client Connectors *connectors.Client - EmbedJobs *embedjobs.Client } -func NewClient(opts ...core.ClientOption) *Client { - options := core.NewClientOptions() - for _, opt := range opts { - opt(options) - } +func NewClient(opts ...option.RequestOption) *Client { + options := core.NewRequestOptions(opts...) return &Client{ - baseURL: options.BaseURL, - caller: core.NewCaller(options.HTTPClient), + baseURL: options.BaseURL, + caller: core.NewCaller( + &core.CallerParams{ + Client: options.HTTPClient, + MaxAttempts: options.MaxAttempts, + }, + ), header: options.ToHeader(), + EmbedJobs: embedjobs.NewClient(opts...), Datasets: datasets.NewClient(opts...), Connectors: connectors.NewClient(opts...), - EmbedJobs: embedjobs.NewClient(opts...), } } // The `chat` endpoint allows users to have conversations with a Large Language Model (LLM) from Cohere. Users can send messages as part of a persisted conversation using the `conversation_id` parameter, or they can pass in their own conversation history using the `chat_history` parameter. // // The endpoint features additional parameters such as [connectors](https://docs.cohere.com/docs/connectors) and `documents` that enable conversations enriched by external knowledge. We call this ["Retrieval Augmented Generation"](https://docs.cohere.com/docs/retrieval-augmented-generation-rag), or "RAG". For a full breakdown of the Chat API endpoint, document and connector modes, and streaming (with code samples), see [this guide](https://docs.cohere.com/docs/cochat-beta). -func (c *Client) ChatStream(ctx context.Context, request *v2.ChatStreamRequest) (*core.Stream[v2.StreamedChatResponse], error) { - baseURL := "https://api.cohere.ai/v1" +func (c *Client) ChatStream( + ctx context.Context, + request *v2.ChatStreamRequest, + opts ...option.RequestOption, +) (*core.Stream[v2.StreamedChatResponse], error) { + options := core.NewRequestOptions(opts...) + + baseURL := "https://api.cohere.ai" if c.baseURL != "" { baseURL = c.baseURL } - endpointURL := baseURL + "/" + "chat" + if options.BaseURL != "" { + baseURL = options.BaseURL + } + endpointURL := baseURL + "/" + "v1/chat" + + headers := core.MergeHeaders(c.header.Clone(), options.ToHeader()) streamer := core.NewStreamer[v2.StreamedChatResponse](c.caller) return streamer.Stream( ctx, &core.StreamParams{ - URL: endpointURL, - Method: http.MethodPost, - Headers: c.header, - Request: request, + URL: endpointURL, + Method: http.MethodPost, + MaxAttempts: options.MaxAttempts, + Headers: headers, + Client: options.HTTPClient, + Request: request, }, ) } @@ -66,22 +82,35 @@ func (c *Client) ChatStream(ctx context.Context, request *v2.ChatStreamRequest) // The `chat` endpoint allows users to have conversations with a Large Language Model (LLM) from Cohere. Users can send messages as part of a persisted conversation using the `conversation_id` parameter, or they can pass in their own conversation history using the `chat_history` parameter. // // The endpoint features additional parameters such as [connectors](https://docs.cohere.com/docs/connectors) and `documents` that enable conversations enriched by external knowledge. We call this ["Retrieval Augmented Generation"](https://docs.cohere.com/docs/retrieval-augmented-generation-rag), or "RAG". For a full breakdown of the Chat API endpoint, document and connector modes, and streaming (with code samples), see [this guide](https://docs.cohere.com/docs/cochat-beta). -func (c *Client) Chat(ctx context.Context, request *v2.ChatRequest) (*v2.NonStreamedChatResponse, error) { - baseURL := "https://api.cohere.ai/v1" +func (c *Client) Chat( + ctx context.Context, + request *v2.ChatRequest, + opts ...option.RequestOption, +) (*v2.NonStreamedChatResponse, error) { + options := core.NewRequestOptions(opts...) + + baseURL := "https://api.cohere.ai" if c.baseURL != "" { baseURL = c.baseURL } - endpointURL := baseURL + "/" + "chat" + if options.BaseURL != "" { + baseURL = options.BaseURL + } + endpointURL := baseURL + "/" + "v1/chat" + + headers := core.MergeHeaders(c.header.Clone(), options.ToHeader()) var response *v2.NonStreamedChatResponse if err := c.caller.Call( ctx, &core.CallParams{ - URL: endpointURL, - Method: http.MethodPost, - Headers: c.header, - Request: request, - Response: &response, + URL: endpointURL, + Method: http.MethodPost, + MaxAttempts: options.MaxAttempts, + Headers: headers, + Client: options.HTTPClient, + Request: request, + Response: &response, }, ); err != nil { return nil, err @@ -90,12 +119,23 @@ func (c *Client) Chat(ctx context.Context, request *v2.ChatRequest) (*v2.NonStre } // This endpoint generates realistic text conditioned on a given input. -func (c *Client) GenerateStream(ctx context.Context, request *v2.GenerateStreamRequest) (*core.Stream[v2.GenerateStreamedResponse], error) { - baseURL := "https://api.cohere.ai/v1" +func (c *Client) GenerateStream( + ctx context.Context, + request *v2.GenerateStreamRequest, + opts ...option.RequestOption, +) (*core.Stream[v2.GenerateStreamedResponse], error) { + options := core.NewRequestOptions(opts...) + + baseURL := "https://api.cohere.ai" if c.baseURL != "" { baseURL = c.baseURL } - endpointURL := baseURL + "/" + "generate" + if options.BaseURL != "" { + baseURL = options.BaseURL + } + endpointURL := baseURL + "/" + "v1/generate" + + headers := core.MergeHeaders(c.header.Clone(), options.ToHeader()) errorDecoder := func(statusCode int, body io.Reader) error { raw, err := io.ReadAll(body) @@ -129,7 +169,9 @@ func (c *Client) GenerateStream(ctx context.Context, request *v2.GenerateStreamR &core.StreamParams{ URL: endpointURL, Method: http.MethodPost, - Headers: c.header, + MaxAttempts: options.MaxAttempts, + Headers: headers, + Client: options.HTTPClient, Request: request, ErrorDecoder: errorDecoder, }, @@ -137,12 +179,23 @@ func (c *Client) GenerateStream(ctx context.Context, request *v2.GenerateStreamR } // This endpoint generates realistic text conditioned on a given input. -func (c *Client) Generate(ctx context.Context, request *v2.GenerateRequest) (*v2.Generation, error) { - baseURL := "https://api.cohere.ai/v1" +func (c *Client) Generate( + ctx context.Context, + request *v2.GenerateRequest, + opts ...option.RequestOption, +) (*v2.Generation, error) { + options := core.NewRequestOptions(opts...) + + baseURL := "https://api.cohere.ai" if c.baseURL != "" { baseURL = c.baseURL } - endpointURL := baseURL + "/" + "generate" + if options.BaseURL != "" { + baseURL = options.BaseURL + } + endpointURL := baseURL + "/" + "v1/generate" + + headers := core.MergeHeaders(c.header.Clone(), options.ToHeader()) errorDecoder := func(statusCode int, body io.Reader) error { raw, err := io.ReadAll(body) @@ -176,7 +229,9 @@ func (c *Client) Generate(ctx context.Context, request *v2.GenerateRequest) (*v2 &core.CallParams{ URL: endpointURL, Method: http.MethodPost, - Headers: c.header, + MaxAttempts: options.MaxAttempts, + Headers: headers, + Client: options.HTTPClient, Request: request, Response: &response, ErrorDecoder: errorDecoder, @@ -192,12 +247,23 @@ func (c *Client) Generate(ctx context.Context, request *v2.GenerateRequest) (*v2 // Embeddings can be used to create text classifiers as well as empower semantic search. To learn more about embeddings, see the embedding page. // // If you want to learn more how to use the embedding model, have a look at the [Semantic Search Guide](/docs/semantic-search). -func (c *Client) Embed(ctx context.Context, request *v2.EmbedRequest) (*v2.EmbedResponse, error) { - baseURL := "https://api.cohere.ai/v1" +func (c *Client) Embed( + ctx context.Context, + request *v2.EmbedRequest, + opts ...option.RequestOption, +) (*v2.EmbedResponse, error) { + options := core.NewRequestOptions(opts...) + + baseURL := "https://api.cohere.ai" if c.baseURL != "" { baseURL = c.baseURL } - endpointURL := baseURL + "/" + "embed" + if options.BaseURL != "" { + baseURL = options.BaseURL + } + endpointURL := baseURL + "/" + "v1/embed" + + headers := core.MergeHeaders(c.header.Clone(), options.ToHeader()) errorDecoder := func(statusCode int, body io.Reader) error { raw, err := io.ReadAll(body) @@ -231,7 +297,9 @@ func (c *Client) Embed(ctx context.Context, request *v2.EmbedRequest) (*v2.Embed &core.CallParams{ URL: endpointURL, Method: http.MethodPost, - Headers: c.header, + MaxAttempts: options.MaxAttempts, + Headers: headers, + Client: options.HTTPClient, Request: request, Response: &response, ErrorDecoder: errorDecoder, @@ -243,22 +311,35 @@ func (c *Client) Embed(ctx context.Context, request *v2.EmbedRequest) (*v2.Embed } // This endpoint takes in a query and a list of texts and produces an ordered array with each text assigned a relevance score. -func (c *Client) Rerank(ctx context.Context, request *v2.RerankRequest) (*v2.RerankResponse, error) { - baseURL := "https://api.cohere.ai/v1" +func (c *Client) Rerank( + ctx context.Context, + request *v2.RerankRequest, + opts ...option.RequestOption, +) (*v2.RerankResponse, error) { + options := core.NewRequestOptions(opts...) + + baseURL := "https://api.cohere.ai" if c.baseURL != "" { baseURL = c.baseURL } - endpointURL := baseURL + "/" + "rerank" + if options.BaseURL != "" { + baseURL = options.BaseURL + } + endpointURL := baseURL + "/" + "v1/rerank" + + headers := core.MergeHeaders(c.header.Clone(), options.ToHeader()) var response *v2.RerankResponse if err := c.caller.Call( ctx, &core.CallParams{ - URL: endpointURL, - Method: http.MethodPost, - Headers: c.header, - Request: request, - Response: &response, + URL: endpointURL, + Method: http.MethodPost, + MaxAttempts: options.MaxAttempts, + Headers: headers, + Client: options.HTTPClient, + Request: request, + Response: &response, }, ); err != nil { return nil, err @@ -268,12 +349,23 @@ func (c *Client) Rerank(ctx context.Context, request *v2.RerankRequest) (*v2.Rer // This endpoint makes a prediction about which label fits the specified text inputs best. To make a prediction, Classify uses the provided `examples` of text + label pairs as a reference. // Note: [Fine-tuned models](https://docs.cohere.com/docs/classify-fine-tuning) trained on classification examples don't require the `examples` parameter to be passed in explicitly. -func (c *Client) Classify(ctx context.Context, request *v2.ClassifyRequest) (*v2.ClassifyResponse, error) { - baseURL := "https://api.cohere.ai/v1" +func (c *Client) Classify( + ctx context.Context, + request *v2.ClassifyRequest, + opts ...option.RequestOption, +) (*v2.ClassifyResponse, error) { + options := core.NewRequestOptions(opts...) + + baseURL := "https://api.cohere.ai" if c.baseURL != "" { baseURL = c.baseURL } - endpointURL := baseURL + "/" + "classify" + if options.BaseURL != "" { + baseURL = options.BaseURL + } + endpointURL := baseURL + "/" + "v1/classify" + + headers := core.MergeHeaders(c.header.Clone(), options.ToHeader()) errorDecoder := func(statusCode int, body io.Reader) error { raw, err := io.ReadAll(body) @@ -307,7 +399,9 @@ func (c *Client) Classify(ctx context.Context, request *v2.ClassifyRequest) (*v2 &core.CallParams{ URL: endpointURL, Method: http.MethodPost, - Headers: c.header, + MaxAttempts: options.MaxAttempts, + Headers: headers, + Client: options.HTTPClient, Request: request, Response: &response, ErrorDecoder: errorDecoder, @@ -318,47 +412,36 @@ func (c *Client) Classify(ctx context.Context, request *v2.ClassifyRequest) (*v2 return response, nil } -// This endpoint identifies which language each of the provided texts is written in. -func (c *Client) DetectLanguage(ctx context.Context, request *v2.DetectLanguageRequest) (*v2.DetectLanguageResponse, error) { - baseURL := "https://api.cohere.ai/v1" +// This endpoint generates a summary in English for a given text. +func (c *Client) Summarize( + ctx context.Context, + request *v2.SummarizeRequest, + opts ...option.RequestOption, +) (*v2.SummarizeResponse, error) { + options := core.NewRequestOptions(opts...) + + baseURL := "https://api.cohere.ai" if c.baseURL != "" { baseURL = c.baseURL } - endpointURL := baseURL + "/" + "detect-language" - - var response *v2.DetectLanguageResponse - if err := c.caller.Call( - ctx, - &core.CallParams{ - URL: endpointURL, - Method: http.MethodPost, - Headers: c.header, - Request: request, - Response: &response, - }, - ); err != nil { - return nil, err + if options.BaseURL != "" { + baseURL = options.BaseURL } - return response, nil -} + endpointURL := baseURL + "/" + "v1/summarize" -// This endpoint generates a summary in English for a given text. -func (c *Client) Summarize(ctx context.Context, request *v2.SummarizeRequest) (*v2.SummarizeResponse, error) { - baseURL := "https://api.cohere.ai/v1" - if c.baseURL != "" { - baseURL = c.baseURL - } - endpointURL := baseURL + "/" + "summarize" + headers := core.MergeHeaders(c.header.Clone(), options.ToHeader()) var response *v2.SummarizeResponse if err := c.caller.Call( ctx, &core.CallParams{ - URL: endpointURL, - Method: http.MethodPost, - Headers: c.header, - Request: request, - Response: &response, + URL: endpointURL, + Method: http.MethodPost, + MaxAttempts: options.MaxAttempts, + Headers: headers, + Client: options.HTTPClient, + Request: request, + Response: &response, }, ); err != nil { return nil, err @@ -367,12 +450,23 @@ func (c *Client) Summarize(ctx context.Context, request *v2.SummarizeRequest) (* } // This endpoint splits input text into smaller units called tokens using byte-pair encoding (BPE). To learn more about tokenization and byte pair encoding, see the tokens page. -func (c *Client) Tokenize(ctx context.Context, request *v2.TokenizeRequest) (*v2.TokenizeResponse, error) { - baseURL := "https://api.cohere.ai/v1" +func (c *Client) Tokenize( + ctx context.Context, + request *v2.TokenizeRequest, + opts ...option.RequestOption, +) (*v2.TokenizeResponse, error) { + options := core.NewRequestOptions(opts...) + + baseURL := "https://api.cohere.ai" if c.baseURL != "" { baseURL = c.baseURL } - endpointURL := baseURL + "/" + "tokenize" + if options.BaseURL != "" { + baseURL = options.BaseURL + } + endpointURL := baseURL + "/" + "v1/tokenize" + + headers := core.MergeHeaders(c.header.Clone(), options.ToHeader()) errorDecoder := func(statusCode int, body io.Reader) error { raw, err := io.ReadAll(body) @@ -406,7 +500,9 @@ func (c *Client) Tokenize(ctx context.Context, request *v2.TokenizeRequest) (*v2 &core.CallParams{ URL: endpointURL, Method: http.MethodPost, - Headers: c.header, + MaxAttempts: options.MaxAttempts, + Headers: headers, + Client: options.HTTPClient, Request: request, Response: &response, ErrorDecoder: errorDecoder, @@ -418,22 +514,35 @@ func (c *Client) Tokenize(ctx context.Context, request *v2.TokenizeRequest) (*v2 } // This endpoint takes tokens using byte-pair encoding and returns their text representation. To learn more about tokenization and byte pair encoding, see the tokens page. -func (c *Client) Detokenize(ctx context.Context, request *v2.DetokenizeRequest) (*v2.DetokenizeResponse, error) { - baseURL := "https://api.cohere.ai/v1" +func (c *Client) Detokenize( + ctx context.Context, + request *v2.DetokenizeRequest, + opts ...option.RequestOption, +) (*v2.DetokenizeResponse, error) { + options := core.NewRequestOptions(opts...) + + baseURL := "https://api.cohere.ai" if c.baseURL != "" { baseURL = c.baseURL } - endpointURL := baseURL + "/" + "detokenize" + if options.BaseURL != "" { + baseURL = options.BaseURL + } + endpointURL := baseURL + "/" + "v1/detokenize" + + headers := core.MergeHeaders(c.header.Clone(), options.ToHeader()) var response *v2.DetokenizeResponse if err := c.caller.Call( ctx, &core.CallParams{ - URL: endpointURL, - Method: http.MethodPost, - Headers: c.header, - Request: request, - Response: &response, + URL: endpointURL, + Method: http.MethodPost, + MaxAttempts: options.MaxAttempts, + Headers: headers, + Client: options.HTTPClient, + Request: request, + Response: &response, }, ); err != nil { return nil, err diff --git a/client/client_test.go b/client/client_test.go index a0fa36e..98760b4 100644 --- a/client/client_test.go +++ b/client/client_test.go @@ -1,11 +1,13 @@ +// This file was auto-generated by Fern from our API Definition. + package client import ( - "net/http" - "testing" - "time" - - "github.com/stretchr/testify/assert" + option "github.com/cohere-ai/cohere-go/v2/option" + assert "github.com/stretchr/testify/assert" + http "net/http" + testing "testing" + time "time" ) func TestNewClient(t *testing.T) { @@ -16,7 +18,7 @@ func TestNewClient(t *testing.T) { t.Run("base url", func(t *testing.T) { c := NewClient( - WithBaseURL("test.co"), + option.WithBaseURL("test.co"), ) assert.Equal(t, "test.co", c.baseURL) }) @@ -26,7 +28,7 @@ func TestNewClient(t *testing.T) { Timeout: 5 * time.Second, } c := NewClient( - WithHTTPClient(httpClient), + option.WithHTTPClient(httpClient), ) assert.Empty(t, c.baseURL) }) @@ -35,7 +37,7 @@ func TestNewClient(t *testing.T) { header := make(http.Header) header.Set("X-API-Tenancy", "test") c := NewClient( - WithHTTPHeader(header), + option.WithHTTPHeader(header), ) assert.Empty(t, c.baseURL) assert.Equal(t, "test", c.header.Get("X-API-Tenancy")) diff --git a/client/options.go b/client/options.go index aa652dd..906b612 100644 --- a/client/options.go +++ b/client/options.go @@ -4,45 +4,39 @@ package client import ( core "github.com/cohere-ai/cohere-go/v2/core" + option "github.com/cohere-ai/cohere-go/v2/option" http "net/http" ) -// WithBaseURL sets the client's base URL, overriding the -// default environment, if any. -func WithBaseURL(baseURL string) core.ClientOption { - return func(opts *core.ClientOptions) { - opts.BaseURL = baseURL - } +// WithBaseURL sets the base URL, overriding the default +// environment, if any. +func WithBaseURL(baseURL string) *core.BaseURLOption { + return option.WithBaseURL(baseURL) } -// WithHTTPClient uses the given HTTPClient to issue all HTTP requests. -func WithHTTPClient(httpClient core.HTTPClient) core.ClientOption { - return func(opts *core.ClientOptions) { - opts.HTTPClient = httpClient - } +// WithHTTPClient uses the given HTTPClient to issue the request. +func WithHTTPClient(httpClient core.HTTPClient) *core.HTTPClientOption { + return option.WithHTTPClient(httpClient) } -// WithHTTPHeader adds the given http.Header to all requests -// issued by the client. -func WithHTTPHeader(httpHeader http.Header) core.ClientOption { - return func(opts *core.ClientOptions) { - // Clone the headers so they can't be modified after the option call. - opts.HTTPHeader = httpHeader.Clone() - } +// WithHTTPHeader adds the given http.Header to the request. +func WithHTTPHeader(httpHeader http.Header) *core.HTTPHeaderOption { + return option.WithHTTPHeader(httpHeader) } -// WithToken sets the 'Authorization: Bearer ' header on every request. -func WithToken(token string) core.ClientOption { - return func(opts *core.ClientOptions) { - opts.Token = token - } +// WithMaxAttempts configures the maximum number of retry attempts. +func WithMaxAttempts(attempts uint) *core.MaxAttemptsOption { + return option.WithMaxAttempts(attempts) } -// WithClientName sets the clientName header on every request. +// WithToken sets the 'Authorization: Bearer ' request header. +func WithToken(token string) *core.TokenOption { + return option.WithToken(token) +} + +// WithClientName sets the clientName request header. // // The name of the project that is making the request. -func WithClientName(clientName *string) core.ClientOption { - return func(opts *core.ClientOptions) { - opts.ClientName = clientName - } +func WithClientName(clientName *string) *core.ClientNameOption { + return option.WithClientName(clientName) } diff --git a/connectors.go b/connectors.go index fe12998..1e98e69 100644 --- a/connectors.go +++ b/connectors.go @@ -4,46 +4,46 @@ package api type CreateConnectorRequest struct { // A human-readable name for the connector. - Name string `json:"name"` + Name string `json:"name" url:"name"` // A description of the connector. - Description *string `json:"description,omitempty"` + Description *string `json:"description,omitempty" url:"description,omitempty"` // The URL of the connector that will be used to search for documents. - Url string `json:"url"` + Url string `json:"url" url:"url"` // A list of fields to exclude from the prompt (fields remain in the document). - Excludes []string `json:"excludes,omitempty"` + Excludes []string `json:"excludes,omitempty" url:"excludes,omitempty"` // The OAuth 2.0 configuration for the connector. Cannot be specified if service_auth is specified. - Oauth *CreateConnectorOAuth `json:"oauth,omitempty"` + Oauth *CreateConnectorOAuth `json:"oauth,omitempty" url:"oauth,omitempty"` // Whether the connector is active or not. - Active *bool `json:"active,omitempty"` + Active *bool `json:"active,omitempty" url:"active,omitempty"` // Whether a chat request should continue or not if the request to this connector fails. - ContinueOnFailure *bool `json:"continue_on_failure,omitempty"` + ContinueOnFailure *bool `json:"continue_on_failure,omitempty" url:"continue_on_failure,omitempty"` // The service to service authentication configuration for the connector. Cannot be specified if oauth is specified. - ServiceAuth *CreateConnectorServiceAuth `json:"service_auth,omitempty"` + ServiceAuth *CreateConnectorServiceAuth `json:"service_auth,omitempty" url:"service_auth,omitempty"` } type ConnectorsListRequest struct { // Maximum number of connectors to return [0, 100]. - Limit *float64 `json:"-"` + Limit *float64 `json:"-" url:"limit,omitempty"` // Number of connectors to skip before returning results [0, inf]. - Offset *float64 `json:"-"` + Offset *float64 `json:"-" url:"offset,omitempty"` } type ConnectorsOAuthAuthorizeRequest struct { // The URL to redirect to after the connector has been authorized. - AfterTokenRedirect *string `json:"-"` + AfterTokenRedirect *string `json:"-" url:"after_token_redirect,omitempty"` } type UpdateConnectorRequest struct { // A human-readable name for the connector. - Name *string `json:"name,omitempty"` + Name *string `json:"name,omitempty" url:"name,omitempty"` // The URL of the connector that will be used to search for documents. - Url *string `json:"url,omitempty"` + Url *string `json:"url,omitempty" url:"url,omitempty"` // A list of fields to exclude from the prompt (fields remain in the document). - Excludes []string `json:"excludes,omitempty"` + Excludes []string `json:"excludes,omitempty" url:"excludes,omitempty"` // The OAuth 2.0 configuration for the connector. Cannot be specified if service_auth is specified. - Oauth *CreateConnectorOAuth `json:"oauth,omitempty"` - Active *bool `json:"active,omitempty"` - ContinueOnFailure *bool `json:"continue_on_failure,omitempty"` + Oauth *CreateConnectorOAuth `json:"oauth,omitempty" url:"oauth,omitempty"` + Active *bool `json:"active,omitempty" url:"active,omitempty"` + ContinueOnFailure *bool `json:"continue_on_failure,omitempty" url:"continue_on_failure,omitempty"` // The service to service authentication configuration for the connector. Cannot be specified if oauth is specified. - ServiceAuth *CreateConnectorServiceAuth `json:"service_auth,omitempty"` + ServiceAuth *CreateConnectorServiceAuth `json:"service_auth,omitempty" url:"service_auth,omitempty"` } diff --git a/connectors/client.go b/connectors/client.go index 6cd4249..724402d 100644 --- a/connectors/client.go +++ b/connectors/client.go @@ -10,9 +10,9 @@ import ( fmt "fmt" v2 "github.com/cohere-ai/cohere-go/v2" core "github.com/cohere-ai/cohere-go/v2/core" + option "github.com/cohere-ai/cohere-go/v2/option" io "io" http "net/http" - url "net/url" ) type Client struct { @@ -21,37 +21,47 @@ type Client struct { header http.Header } -func NewClient(opts ...core.ClientOption) *Client { - options := core.NewClientOptions() - for _, opt := range opts { - opt(options) - } +func NewClient(opts ...option.RequestOption) *Client { + options := core.NewRequestOptions(opts...) return &Client{ baseURL: options.BaseURL, - caller: core.NewCaller(options.HTTPClient), - header: options.ToHeader(), + caller: core.NewCaller( + &core.CallerParams{ + Client: options.HTTPClient, + MaxAttempts: options.MaxAttempts, + }, + ), + header: options.ToHeader(), } } // Returns a list of connectors ordered by descending creation date (newer first). See ['Managing your Connector'](https://docs.cohere.com/docs/managing-your-connector) for more information. -func (c *Client) List(ctx context.Context, request *v2.ConnectorsListRequest) (*v2.ListConnectorsResponse, error) { - baseURL := "https://api.cohere.ai/v1" +func (c *Client) List( + ctx context.Context, + request *v2.ConnectorsListRequest, + opts ...option.RequestOption, +) (*v2.ListConnectorsResponse, error) { + options := core.NewRequestOptions(opts...) + + baseURL := "https://api.cohere.ai" if c.baseURL != "" { baseURL = c.baseURL } - endpointURL := baseURL + "/" + "connectors" - - queryParams := make(url.Values) - if request.Limit != nil { - queryParams.Add("limit", fmt.Sprintf("%v", *request.Limit)) + if options.BaseURL != "" { + baseURL = options.BaseURL } - if request.Offset != nil { - queryParams.Add("offset", fmt.Sprintf("%v", *request.Offset)) + endpointURL := baseURL + "/" + "v1/connectors" + + queryParams, err := core.QueryValues(request) + if err != nil { + return nil, err } if len(queryParams) > 0 { endpointURL += "?" + queryParams.Encode() } + headers := core.MergeHeaders(c.header.Clone(), options.ToHeader()) + errorDecoder := func(statusCode int, body io.Reader) error { raw, err := io.ReadAll(body) if err != nil { @@ -84,7 +94,9 @@ func (c *Client) List(ctx context.Context, request *v2.ConnectorsListRequest) (* &core.CallParams{ URL: endpointURL, Method: http.MethodGet, - Headers: c.header, + MaxAttempts: options.MaxAttempts, + Headers: headers, + Client: options.HTTPClient, Response: &response, ErrorDecoder: errorDecoder, }, @@ -95,12 +107,23 @@ func (c *Client) List(ctx context.Context, request *v2.ConnectorsListRequest) (* } // Creates a new connector. The connector is tested during registration and will cancel registration when the test is unsuccessful. See ['Creating and Deploying a Connector'](https://docs.cohere.com/docs/creating-and-deploying-a-connector) for more information. -func (c *Client) Create(ctx context.Context, request *v2.CreateConnectorRequest) (*v2.CreateConnectorResponse, error) { - baseURL := "https://api.cohere.ai/v1" +func (c *Client) Create( + ctx context.Context, + request *v2.CreateConnectorRequest, + opts ...option.RequestOption, +) (*v2.CreateConnectorResponse, error) { + options := core.NewRequestOptions(opts...) + + baseURL := "https://api.cohere.ai" if c.baseURL != "" { baseURL = c.baseURL } - endpointURL := baseURL + "/" + "connectors" + if options.BaseURL != "" { + baseURL = options.BaseURL + } + endpointURL := baseURL + "/" + "v1/connectors" + + headers := core.MergeHeaders(c.header.Clone(), options.ToHeader()) errorDecoder := func(statusCode int, body io.Reader) error { raw, err := io.ReadAll(body) @@ -141,7 +164,9 @@ func (c *Client) Create(ctx context.Context, request *v2.CreateConnectorRequest) &core.CallParams{ URL: endpointURL, Method: http.MethodPost, - Headers: c.header, + MaxAttempts: options.MaxAttempts, + Headers: headers, + Client: options.HTTPClient, Request: request, Response: &response, ErrorDecoder: errorDecoder, @@ -153,14 +178,24 @@ func (c *Client) Create(ctx context.Context, request *v2.CreateConnectorRequest) } // Retrieve a connector by ID. See ['Connectors'](https://docs.cohere.com/docs/connectors) for more information. -// -// The ID of the connector to retrieve. -func (c *Client) Get(ctx context.Context, id string) (*v2.GetConnectorResponse, error) { - baseURL := "https://api.cohere.ai/v1" +func (c *Client) Get( + ctx context.Context, + // The ID of the connector to retrieve. + id string, + opts ...option.RequestOption, +) (*v2.GetConnectorResponse, error) { + options := core.NewRequestOptions(opts...) + + baseURL := "https://api.cohere.ai" if c.baseURL != "" { baseURL = c.baseURL } - endpointURL := fmt.Sprintf(baseURL+"/"+"connectors/%v", id) + if options.BaseURL != "" { + baseURL = options.BaseURL + } + endpointURL := fmt.Sprintf(baseURL+"/"+"v1/connectors/%v", id) + + headers := core.MergeHeaders(c.header.Clone(), options.ToHeader()) errorDecoder := func(statusCode int, body io.Reader) error { raw, err := io.ReadAll(body) @@ -201,7 +236,9 @@ func (c *Client) Get(ctx context.Context, id string) (*v2.GetConnectorResponse, &core.CallParams{ URL: endpointURL, Method: http.MethodGet, - Headers: c.header, + MaxAttempts: options.MaxAttempts, + Headers: headers, + Client: options.HTTPClient, Response: &response, ErrorDecoder: errorDecoder, }, @@ -212,14 +249,24 @@ func (c *Client) Get(ctx context.Context, id string) (*v2.GetConnectorResponse, } // Delete a connector by ID. See ['Connectors'](https://docs.cohere.com/docs/connectors) for more information. -// -// The ID of the connector to delete. -func (c *Client) Delete(ctx context.Context, id string) (v2.DeleteConnectorResponse, error) { - baseURL := "https://api.cohere.ai/v1" +func (c *Client) Delete( + ctx context.Context, + // The ID of the connector to delete. + id string, + opts ...option.RequestOption, +) (v2.DeleteConnectorResponse, error) { + options := core.NewRequestOptions(opts...) + + baseURL := "https://api.cohere.ai" if c.baseURL != "" { baseURL = c.baseURL } - endpointURL := fmt.Sprintf(baseURL+"/"+"connectors/%v", id) + if options.BaseURL != "" { + baseURL = options.BaseURL + } + endpointURL := fmt.Sprintf(baseURL+"/"+"v1/connectors/%v", id) + + headers := core.MergeHeaders(c.header.Clone(), options.ToHeader()) errorDecoder := func(statusCode int, body io.Reader) error { raw, err := io.ReadAll(body) @@ -267,7 +314,9 @@ func (c *Client) Delete(ctx context.Context, id string) (v2.DeleteConnectorRespo &core.CallParams{ URL: endpointURL, Method: http.MethodDelete, - Headers: c.header, + MaxAttempts: options.MaxAttempts, + Headers: headers, + Client: options.HTTPClient, Response: &response, ErrorDecoder: errorDecoder, }, @@ -278,14 +327,25 @@ func (c *Client) Delete(ctx context.Context, id string) (v2.DeleteConnectorRespo } // Update a connector by ID. Omitted fields will not be updated. See ['Managing your Connector'](https://docs.cohere.com/docs/managing-your-connector) for more information. -// -// The ID of the connector to update. -func (c *Client) Update(ctx context.Context, id string, request *v2.UpdateConnectorRequest) (*v2.UpdateConnectorResponse, error) { - baseURL := "https://api.cohere.ai/v1" +func (c *Client) Update( + ctx context.Context, + // The ID of the connector to update. + id string, + request *v2.UpdateConnectorRequest, + opts ...option.RequestOption, +) (*v2.UpdateConnectorResponse, error) { + options := core.NewRequestOptions(opts...) + + baseURL := "https://api.cohere.ai" if c.baseURL != "" { baseURL = c.baseURL } - endpointURL := fmt.Sprintf(baseURL+"/"+"connectors/%v", id) + if options.BaseURL != "" { + baseURL = options.BaseURL + } + endpointURL := fmt.Sprintf(baseURL+"/"+"v1/connectors/%v", id) + + headers := core.MergeHeaders(c.header.Clone(), options.ToHeader()) errorDecoder := func(statusCode int, body io.Reader) error { raw, err := io.ReadAll(body) @@ -333,7 +393,9 @@ func (c *Client) Update(ctx context.Context, id string, request *v2.UpdateConnec &core.CallParams{ URL: endpointURL, Method: http.MethodPatch, - Headers: c.header, + MaxAttempts: options.MaxAttempts, + Headers: headers, + Client: options.HTTPClient, Request: request, Response: &response, ErrorDecoder: errorDecoder, @@ -345,23 +407,34 @@ func (c *Client) Update(ctx context.Context, id string, request *v2.UpdateConnec } // Authorize the connector with the given ID for the connector oauth app. See ['Connector Authentication'](https://docs.cohere.com/docs/connector-authentication) for more information. -// -// The ID of the connector to authorize. -func (c *Client) OAuthAuthorize(ctx context.Context, id string, request *v2.ConnectorsOAuthAuthorizeRequest) (*v2.OAuthAuthorizeResponse, error) { - baseURL := "https://api.cohere.ai/v1" +func (c *Client) OAuthAuthorize( + ctx context.Context, + // The ID of the connector to authorize. + id string, + request *v2.ConnectorsOAuthAuthorizeRequest, + opts ...option.RequestOption, +) (*v2.OAuthAuthorizeResponse, error) { + options := core.NewRequestOptions(opts...) + + baseURL := "https://api.cohere.ai" if c.baseURL != "" { baseURL = c.baseURL } - endpointURL := fmt.Sprintf(baseURL+"/"+"connectors/%v/oauth/authorize", id) + if options.BaseURL != "" { + baseURL = options.BaseURL + } + endpointURL := fmt.Sprintf(baseURL+"/"+"v1/connectors/%v/oauth/authorize", id) - queryParams := make(url.Values) - if request.AfterTokenRedirect != nil { - queryParams.Add("after_token_redirect", fmt.Sprintf("%v", *request.AfterTokenRedirect)) + queryParams, err := core.QueryValues(request) + if err != nil { + return nil, err } if len(queryParams) > 0 { endpointURL += "?" + queryParams.Encode() } + headers := core.MergeHeaders(c.header.Clone(), options.ToHeader()) + errorDecoder := func(statusCode int, body io.Reader) error { raw, err := io.ReadAll(body) if err != nil { @@ -401,7 +474,9 @@ func (c *Client) OAuthAuthorize(ctx context.Context, id string, request *v2.Conn &core.CallParams{ URL: endpointURL, Method: http.MethodPost, - Headers: c.header, + MaxAttempts: options.MaxAttempts, + Headers: headers, + Client: options.HTTPClient, Response: &response, ErrorDecoder: errorDecoder, }, diff --git a/core/client_option.go b/core/client_option.go deleted file mode 100644 index 6a25bed..0000000 --- a/core/client_option.go +++ /dev/null @@ -1,53 +0,0 @@ -// This file was auto-generated by Fern from our API Definition. - -package core - -import ( - fmt "fmt" - http "net/http" -) - -// ClientOption adapts the behavior of the generated client. -type ClientOption func(*ClientOptions) - -// ClientOptions defines all of the possible client options. -// This type is primarily used by the generated code and is -// not meant to be used directly; use ClientOption instead. -type ClientOptions struct { - BaseURL string - HTTPClient HTTPClient - HTTPHeader http.Header - Token string - ClientName *string -} - -// NewClientOptions returns a new *ClientOptions value. -// This function is primarily used by the generated code and is -// not meant to be used directly; use ClientOption instead. -func NewClientOptions() *ClientOptions { - return &ClientOptions{ - HTTPClient: http.DefaultClient, - HTTPHeader: make(http.Header), - } -} - -// ToHeader maps the configured client options into a http.Header issued -// on every request. -func (c *ClientOptions) ToHeader() http.Header { - header := c.cloneHeader() - if c.Token != "" { - header.Set("Authorization", "Bearer "+c.Token) - } - if c.ClientName != nil { - header.Set("X-Client-Name", fmt.Sprintf("%v", *c.ClientName)) - } - return header -} - -func (c *ClientOptions) cloneHeader() http.Header { - headers := c.HTTPHeader.Clone() - headers.Set("X-Fern-Language", "Go") - headers.Set("X-Fern-SDK-Name", "github.com/cohere-ai/cohere-go/v2") - headers.Set("X-Fern-SDK-Version", "v2.5.1") - return headers -} diff --git a/core/core.go b/core/core.go index 10e757f..5277d13 100644 --- a/core/core.go +++ b/core/core.go @@ -22,6 +22,21 @@ type HTTPClient interface { Do(*http.Request) (*http.Response, error) } +// MergeHeaders merges the given headers together, where the right +// takes precedence over the left. +func MergeHeaders(left, right http.Header) http.Header { + for key, values := range right { + if len(values) > 1 { + left[key] = values + continue + } + if value := right.Get(key); value != "" { + left.Set(key, value) + } + } + return left +} + // WriteMultipartJSON writes the given value as a JSON part. // This is used to serialize non-primitive multipart properties // (i.e. lists, objects, etc). @@ -78,13 +93,29 @@ type ErrorDecoder func(statusCode int, body io.Reader) error // Caller calls APIs and deserializes their response, if any. type Caller struct { - client HTTPClient + client HTTPClient + retrier *Retrier +} + +// CallerParams represents the parameters used to constrcut a new *Caller. +type CallerParams struct { + Client HTTPClient + MaxAttempts uint } -// NewCaller returns a new *Caller backed by the given HTTP client. -func NewCaller(client HTTPClient) *Caller { +// NewCaller returns a new *Caller backed by the given parameters. +func NewCaller(params *CallerParams) *Caller { + var httpClient HTTPClient = http.DefaultClient + if params.Client != nil { + httpClient = params.Client + } + var retryOptions []RetryOption + if params.MaxAttempts > 0 { + retryOptions = append(retryOptions, WithMaxAttempts(params.MaxAttempts)) + } return &Caller{ - client: client, + client: httpClient, + retrier: NewRetrier(retryOptions...), } } @@ -92,7 +123,9 @@ func NewCaller(client HTTPClient) *Caller { type CallParams struct { URL string Method string + MaxAttempts uint Headers http.Header + Client HTTPClient Request interface{} Response interface{} ResponseIsOptional bool @@ -111,7 +144,23 @@ func (c *Caller) Call(ctx context.Context, params *CallParams) error { return err } - resp, err := c.client.Do(req) + client := c.client + if params.Client != nil { + // Use the HTTP client scoped to the request. + client = params.Client + } + + var retryOptions []RetryOption + if params.MaxAttempts > 0 { + retryOptions = append(retryOptions, WithMaxAttempts(params.MaxAttempts)) + } + + resp, err := c.retrier.Run( + client.Do, + req, + params.ErrorDecoder, + retryOptions..., + ) if err != nil { return err } diff --git a/core/core_test.go b/core/core_test.go index 70a59ed..f476f9e 100644 --- a/core/core_test.go +++ b/core/core_test.go @@ -113,7 +113,11 @@ func TestCall(t *testing.T) { server = newTestServer(t, test) client = server.Client() ) - caller := NewCaller(client) + caller := NewCaller( + &CallerParams{ + Client: client, + }, + ) var response *Response err := caller.Call( context.Background(), @@ -137,6 +141,68 @@ func TestCall(t *testing.T) { } } +func TestMergeHeaders(t *testing.T) { + t.Run("both empty", func(t *testing.T) { + merged := MergeHeaders(make(http.Header), make(http.Header)) + assert.Empty(t, merged) + }) + + t.Run("empty left", func(t *testing.T) { + left := make(http.Header) + + right := make(http.Header) + right.Set("X-API-Version", "0.0.1") + + merged := MergeHeaders(left, right) + assert.Equal(t, "0.0.1", merged.Get("X-API-Version")) + }) + + t.Run("empty right", func(t *testing.T) { + left := make(http.Header) + left.Set("X-API-Version", "0.0.1") + + right := make(http.Header) + + merged := MergeHeaders(left, right) + assert.Equal(t, "0.0.1", merged.Get("X-API-Version")) + }) + + t.Run("single value override", func(t *testing.T) { + left := make(http.Header) + left.Set("X-API-Version", "0.0.0") + + right := make(http.Header) + right.Set("X-API-Version", "0.0.1") + + merged := MergeHeaders(left, right) + assert.Equal(t, []string{"0.0.1"}, merged.Values("X-API-Version")) + }) + + t.Run("multiple value override", func(t *testing.T) { + left := make(http.Header) + left.Set("X-API-Versions", "0.0.0") + + right := make(http.Header) + right.Add("X-API-Versions", "0.0.1") + right.Add("X-API-Versions", "0.0.2") + + merged := MergeHeaders(left, right) + assert.Equal(t, []string{"0.0.1", "0.0.2"}, merged.Values("X-API-Versions")) + }) + + t.Run("disjoint merge", func(t *testing.T) { + left := make(http.Header) + left.Set("X-API-Tenancy", "test") + + right := make(http.Header) + right.Set("X-API-Version", "0.0.1") + + merged := MergeHeaders(left, right) + assert.Equal(t, []string{"test"}, merged.Values("X-API-Tenancy")) + assert.Equal(t, []string{"0.0.1"}, merged.Values("X-API-Version")) + }) +} + // newTestServer returns a new *httptest.Server configured with the // given test parameters. func newTestServer(t *testing.T, tc *TestCase) *httptest.Server { @@ -206,8 +272,7 @@ func newTestErrorDecoder(t *testing.T) func(int, io.Reader) error { apiError = NewAPIError(statusCode, errors.New(string(raw))) decoder = json.NewDecoder(bytes.NewReader(raw)) ) - switch statusCode { - case 404: + if statusCode == http.StatusNotFound { value := new(NotFoundError) value.APIError = apiError require.NoError(t, decoder.Decode(value)) diff --git a/core/query.go b/core/query.go new file mode 100644 index 0000000..479cbb2 --- /dev/null +++ b/core/query.go @@ -0,0 +1,219 @@ +package core + +import ( + "encoding/base64" + "fmt" + "net/url" + "reflect" + "strings" + "time" + + "github.com/google/uuid" +) + +var ( + bytesType = reflect.TypeOf([]byte{}) + queryEncoderType = reflect.TypeOf(new(QueryEncoder)).Elem() + timeType = reflect.TypeOf(time.Time{}) + uuidType = reflect.TypeOf(uuid.UUID{}) +) + +// QueryEncoder is an interface implemented by any type that wishes to encode +// itself into URL values in a non-standard way. +type QueryEncoder interface { + EncodeQueryValues(key string, v *url.Values) error +} + +// QueryValues encodes url.Values from request objects. +// +// Note: This type is inspired by Google's query encoding library, but +// supports far less customization and is tailored to fit this SDK's use case. +// +// Ref: https://github.com/google/go-querystring +func QueryValues(v interface{}) (url.Values, error) { + values := make(url.Values) + val := reflect.ValueOf(v) + for val.Kind() == reflect.Ptr { + if val.IsNil() { + return values, nil + } + val = val.Elem() + } + + if v == nil { + return values, nil + } + + if val.Kind() != reflect.Struct { + return nil, fmt.Errorf("query: Values() expects struct input. Got %v", val.Kind()) + } + + err := reflectValue(values, val, "") + return values, err +} + +// reflectValue populates the values parameter from the struct fields in val. +// Embedded structs are followed recursively (using the rules defined in the +// Values function documentation) breadth-first. +func reflectValue(values url.Values, val reflect.Value, scope string) error { + typ := val.Type() + for i := 0; i < typ.NumField(); i++ { + sf := typ.Field(i) + if sf.PkgPath != "" && !sf.Anonymous { + // Skip unexported fields. + continue + } + + sv := val.Field(i) + tag := sf.Tag.Get("url") + if tag == "" || tag == "-" { + continue + } + + name, opts := parseTag(tag) + if name == "" { + name = sf.Name + } + + if scope != "" { + name = scope + "[" + name + "]" + } + + if opts.Contains("omitempty") && isEmptyValue(sv) { + continue + } + + if sv.Type().Implements(queryEncoderType) { + // If sv is a nil pointer and the custom encoder is defined on a non-pointer + // method receiver, set sv to the zero value of the underlying type + if !reflect.Indirect(sv).IsValid() && sv.Type().Elem().Implements(queryEncoderType) { + sv = reflect.New(sv.Type().Elem()) + } + + m := sv.Interface().(QueryEncoder) + if err := m.EncodeQueryValues(name, &values); err != nil { + return err + } + continue + } + + // Recursively dereference pointers, but stop at nil pointers. + for sv.Kind() == reflect.Ptr { + if sv.IsNil() { + break + } + sv = sv.Elem() + } + + if sv.Type() == uuidType || sv.Type() == bytesType || sv.Type() == timeType { + values.Add(name, valueString(sv, opts, sf)) + continue + } + + if sv.Kind() == reflect.Slice || sv.Kind() == reflect.Array { + if sv.Len() == 0 { + // Skip if slice or array is empty. + continue + } + for i := 0; i < sv.Len(); i++ { + values.Add(name, valueString(sv.Index(i), opts, sf)) + } + continue + } + + if sv.Kind() == reflect.Struct { + if err := reflectValue(values, sv, name); err != nil { + return err + } + continue + } + + values.Add(name, valueString(sv, opts, sf)) + } + + return nil +} + +// valueString returns the string representation of a value. +func valueString(v reflect.Value, opts tagOptions, sf reflect.StructField) string { + for v.Kind() == reflect.Ptr { + if v.IsNil() { + return "" + } + v = v.Elem() + } + + if v.Type() == timeType { + t := v.Interface().(time.Time) + if format := sf.Tag.Get("format"); format == "date" { + return t.Format("2006-01-02") + } + return t.Format(time.RFC3339) + } + + if v.Type() == uuidType { + u := v.Interface().(uuid.UUID) + return u.String() + } + + if v.Type() == bytesType { + b := v.Interface().([]byte) + return base64.StdEncoding.EncodeToString(b) + } + + return fmt.Sprint(v.Interface()) +} + +// isEmptyValue checks if a value should be considered empty for the purposes +// of omitting fields with the "omitempty" option. +func isEmptyValue(v reflect.Value) bool { + type zeroable interface { + IsZero() bool + } + + if !v.IsNil() { + if z, ok := v.Interface().(zeroable); ok { + return z.IsZero() + } + } + + switch v.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + case reflect.Invalid, reflect.Complex64, reflect.Complex128, reflect.Chan, reflect.Func, reflect.Struct, reflect.UnsafePointer: + return false + } + + return false +} + +// tagOptions is the string following a comma in a struct field's "url" tag, or +// the empty string. It does not include the leading comma. +type tagOptions []string + +// parseTag splits a struct field's url tag into its name and comma-separated +// options. +func parseTag(tag string) (string, tagOptions) { + s := strings.Split(tag, ",") + return s[0], s[1:] +} + +// Contains checks whether the tagOptions contains the specified option. +func (o tagOptions) Contains(option string) bool { + for _, s := range o { + if s == option { + return true + } + } + return false +} diff --git a/core/query_test.go b/core/query_test.go new file mode 100644 index 0000000..130720f --- /dev/null +++ b/core/query_test.go @@ -0,0 +1,134 @@ +package core + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestQueryValues(t *testing.T) { + t.Run("empty optional", func(t *testing.T) { + type nested struct { + Value *string `json:"value,omitempty" url:"value,omitempty"` + } + type example struct { + Nested *nested `json:"nested,omitempty" url:"nested,omitempty"` + } + + values, err := QueryValues(&example{}) + require.NoError(t, err) + assert.Empty(t, values) + }) + + t.Run("empty required", func(t *testing.T) { + type nested struct { + Value *string `json:"value,omitempty" url:"value,omitempty"` + } + type example struct { + Required string `json:"required" url:"required"` + Nested *nested `json:"nested,omitempty" url:"nested,omitempty"` + } + + values, err := QueryValues(&example{}) + require.NoError(t, err) + assert.Equal(t, "required=", values.Encode()) + }) + + t.Run("allow multiple", func(t *testing.T) { + type example struct { + Values []string `json:"values" url:"values"` + } + + values, err := QueryValues( + &example{ + Values: []string{"foo", "bar", "baz"}, + }, + ) + require.NoError(t, err) + assert.Equal(t, "values=foo&values=bar&values=baz", values.Encode()) + }) + + t.Run("nested object", func(t *testing.T) { + type nested struct { + Value *string `json:"value,omitempty" url:"value,omitempty"` + } + type example struct { + Required string `json:"required" url:"required"` + Nested *nested `json:"nested,omitempty" url:"nested,omitempty"` + } + + nestedValue := "nestedValue" + values, err := QueryValues( + &example{ + Required: "requiredValue", + Nested: &nested{ + Value: &nestedValue, + }, + }, + ) + require.NoError(t, err) + assert.Equal(t, "nested%5Bvalue%5D=nestedValue&required=requiredValue", values.Encode()) + }) + + t.Run("url unspecified", func(t *testing.T) { + type example struct { + Required string `json:"required" url:"required"` + NotFound string `json:"notFound"` + } + + values, err := QueryValues( + &example{ + Required: "requiredValue", + NotFound: "notFound", + }, + ) + require.NoError(t, err) + assert.Equal(t, "required=requiredValue", values.Encode()) + }) + + t.Run("url ignored", func(t *testing.T) { + type example struct { + Required string `json:"required" url:"required"` + NotFound string `json:"notFound" url:"-"` + } + + values, err := QueryValues( + &example{ + Required: "requiredValue", + NotFound: "notFound", + }, + ) + require.NoError(t, err) + assert.Equal(t, "required=requiredValue", values.Encode()) + }) + + t.Run("datetime", func(t *testing.T) { + type example struct { + DateTime time.Time `json:"dateTime" url:"dateTime"` + } + + values, err := QueryValues( + &example{ + DateTime: time.Date(1994, 3, 16, 12, 34, 56, 0, time.UTC), + }, + ) + require.NoError(t, err) + assert.Equal(t, "dateTime=1994-03-16T12%3A34%3A56Z", values.Encode()) + }) + + t.Run("date", func(t *testing.T) { + type example struct { + DateTime time.Time `json:"dateTime" url:"dateTime" format:"date"` + } + + values, err := QueryValues( + &example{ + DateTime: time.Date(1994, 3, 16, 12, 34, 56, 0, time.UTC), + }, + ) + require.NoError(t, err) + assert.Equal(t, "dateTime=1994-03-16", values.Encode()) + }) +} diff --git a/core/request_option.go b/core/request_option.go new file mode 100644 index 0000000..55389a4 --- /dev/null +++ b/core/request_option.go @@ -0,0 +1,115 @@ +// This file was auto-generated by Fern from our API Definition. + +package core + +import ( + fmt "fmt" + http "net/http" +) + +// RequestOption adapts the behavior of the client or an individual request. +type RequestOption interface { + applyRequestOptions(*RequestOptions) +} + +// RequestOptions defines all of the possible request options. +// +// This type is primarily used by the generated code and is not meant +// to be used directly; use the option package instead. +type RequestOptions struct { + BaseURL string + HTTPClient HTTPClient + HTTPHeader http.Header + MaxAttempts uint + Token string + ClientName *string +} + +// NewRequestOptions returns a new *RequestOptions value. +// +// This function is primarily used by the generated code and is not meant +// to be used directly; use RequestOption instead. +func NewRequestOptions(opts ...RequestOption) *RequestOptions { + options := &RequestOptions{ + HTTPHeader: make(http.Header), + } + for _, opt := range opts { + opt.applyRequestOptions(options) + } + return options +} + +// ToHeader maps the configured request options into a http.Header used +// for the request(s). +func (r *RequestOptions) ToHeader() http.Header { + header := r.cloneHeader() + if r.Token != "" { + header.Set("Authorization", "Bearer "+r.Token) + } + if r.ClientName != nil { + header.Set("X-Client-Name", fmt.Sprintf("%v", *r.ClientName)) + } + return header +} + +func (r *RequestOptions) cloneHeader() http.Header { + headers := r.HTTPHeader.Clone() + headers.Set("X-Fern-Language", "Go") + headers.Set("X-Fern-SDK-Name", "github.com/cohere-ai/cohere-go/v2") + headers.Set("X-Fern-SDK-Version", "v2.5.2") + return headers +} + +// BaseURLOption implements the RequestOption interface. +type BaseURLOption struct { + BaseURL string +} + +func (b *BaseURLOption) applyRequestOptions(opts *RequestOptions) { + opts.BaseURL = b.BaseURL +} + +// HTTPClientOption implements the RequestOption interface. +type HTTPClientOption struct { + HTTPClient HTTPClient +} + +func (h *HTTPClientOption) applyRequestOptions(opts *RequestOptions) { + opts.HTTPClient = h.HTTPClient +} + +// HTTPHeaderOption implements the RequestOption interface. +type HTTPHeaderOption struct { + HTTPHeader http.Header +} + +func (h *HTTPHeaderOption) applyRequestOptions(opts *RequestOptions) { + opts.HTTPHeader = h.HTTPHeader +} + +// MaxAttemptsOption implements the RequestOption interface. +type MaxAttemptsOption struct { + MaxAttempts uint +} + +func (m *MaxAttemptsOption) applyRequestOptions(opts *RequestOptions) { + opts.MaxAttempts = m.MaxAttempts +} + +// TokenOption implements the RequestOption interface. +type TokenOption struct { + Token string +} + +func (t *TokenOption) applyRequestOptions(opts *RequestOptions) { + opts.Token = t.Token +} + +// ClientNameOption implements the RequestOption interface. +type ClientNameOption struct { + ClientName *string +} + +func (c *ClientNameOption) applyRequestOptions(opts *RequestOptions) { + opts.ClientName = c.ClientName +} diff --git a/core/retrier.go b/core/retrier.go new file mode 100644 index 0000000..ea24916 --- /dev/null +++ b/core/retrier.go @@ -0,0 +1,166 @@ +package core + +import ( + "crypto/rand" + "math/big" + "net/http" + "time" +) + +const ( + defaultRetryAttempts = 2 + minRetryDelay = 500 * time.Millisecond + maxRetryDelay = 5000 * time.Millisecond +) + +// RetryOption adapts the behavior the *Retrier. +type RetryOption func(*retryOptions) + +// RetryFunc is a retriable HTTP function call (i.e. *http.Client.Do). +type RetryFunc func(*http.Request) (*http.Response, error) + +// WithMaxAttempts configures the maximum number of attempts +// of the *Retrier. +func WithMaxAttempts(attempts uint) RetryOption { + return func(opts *retryOptions) { + opts.attempts = attempts + } +} + +// Retrier retries failed requests a configurable number of times with an +// exponential back-off between each retry. +type Retrier struct { + attempts uint +} + +// NewRetrier constructs a new *Retrier with the given options, if any. +func NewRetrier(opts ...RetryOption) *Retrier { + options := new(retryOptions) + for _, opt := range opts { + opt(options) + } + attempts := uint(defaultRetryAttempts) + if options.attempts > 0 { + attempts = options.attempts + } + return &Retrier{ + attempts: attempts, + } +} + +// Run issues the request and, upon failure, retries the request if possible. +// +// The request will be retried as long as the request is deemed retriable and the +// number of retry attempts has not grown larger than the configured retry limit. +func (r *Retrier) Run( + fn RetryFunc, + request *http.Request, + errorDecoder ErrorDecoder, + opts ...RetryOption, +) (*http.Response, error) { + options := new(retryOptions) + for _, opt := range opts { + opt(options) + } + maxRetryAttempts := r.attempts + if options.attempts > 0 { + maxRetryAttempts = options.attempts + } + var ( + retryAttempt uint + previousError error + ) + return r.run( + fn, + request, + errorDecoder, + maxRetryAttempts, + retryAttempt, + previousError, + ) +} + +func (r *Retrier) run( + fn RetryFunc, + request *http.Request, + errorDecoder ErrorDecoder, + maxRetryAttempts uint, + retryAttempt uint, + previousError error, +) (*http.Response, error) { + if retryAttempt >= maxRetryAttempts { + return nil, previousError + } + + // If the call has been cancelled, don't issue the request. + if err := request.Context().Err(); err != nil { + return nil, err + } + + response, err := fn(request) + if err != nil { + return nil, err + } + + if r.shouldRetry(response) { + defer response.Body.Close() + + delay, err := r.retryDelay(retryAttempt) + if err != nil { + return nil, err + } + + time.Sleep(delay) + + return r.run( + fn, + request, + errorDecoder, + maxRetryAttempts, + retryAttempt+1, + decodeError(response, errorDecoder), + ) + } + + return response, nil +} + +// shouldRetry returns true if the request should be retried based on the given +// response status code. +func (r *Retrier) shouldRetry(response *http.Response) bool { + return response.StatusCode == http.StatusTooManyRequests || + response.StatusCode == http.StatusRequestTimeout || + response.StatusCode == http.StatusConflict || + response.StatusCode >= http.StatusInternalServerError +} + +// retryDelay calculates the delay time in milliseconds based on the retry attempt. +func (r *Retrier) retryDelay(retryAttempt uint) (time.Duration, error) { + // Apply exponential backoff. + delay := minRetryDelay + minRetryDelay*time.Duration(retryAttempt*retryAttempt) + + // Do not allow the number to exceed maxRetryDelay. + if delay > maxRetryDelay { + delay = maxRetryDelay + } + + // Apply some itter by randomizing the value in the range of 75%-100%. + max := big.NewInt(int64(delay / 4)) + jitter, err := rand.Int(rand.Reader, max) + if err != nil { + return 0, err + } + + delay -= time.Duration(jitter.Int64()) + + // Never sleep less than the base sleep seconds. + if delay < minRetryDelay { + delay = minRetryDelay + } + + return delay, nil +} + +type retryOptions struct { + attempts uint +} diff --git a/core/stream.go b/core/stream.go index 33c22b0..5d97e93 100644 --- a/core/stream.go +++ b/core/stream.go @@ -13,13 +13,15 @@ const defaultStreamDelimiter = '\n' // Streamer calls APIs and streams responses using a *Stream. type Streamer[T any] struct { - client HTTPClient + client HTTPClient + retrier *Retrier } // NewStreamer returns a new *Streamer backed by the given caller's HTTP client. func NewStreamer[T any](caller *Caller) *Streamer[T] { return &Streamer[T]{ - client: caller.client, + client: caller.client, + retrier: caller.retrier, } } @@ -28,7 +30,9 @@ type StreamParams struct { URL string Method string Delimiter string + MaxAttempts uint Headers http.Header + Client HTTPClient Request interface{} ErrorDecoder ErrorDecoder } @@ -45,7 +49,23 @@ func (s *Streamer[T]) Stream(ctx context.Context, params *StreamParams) (*Stream return nil, err } - resp, err := s.client.Do(req) + client := s.client + if params.Client != nil { + // Use the HTTP client scoped to the request. + client = params.Client + } + + var retryOptions []RetryOption + if params.MaxAttempts > 0 { + retryOptions = append(retryOptions, WithMaxAttempts(params.MaxAttempts)) + } + + resp, err := s.retrier.Run( + client.Do, + req, + params.ErrorDecoder, + retryOptions..., + ) if err != nil { return nil, err } diff --git a/datasets.go b/datasets.go index 284829d..f881330 100644 --- a/datasets.go +++ b/datasets.go @@ -11,39 +11,39 @@ import ( type DatasetsCreateRequest struct { // The name of the uploaded dataset. - Name *string `json:"-"` + Name *string `json:"-" url:"name,omitempty"` // The dataset type, which is used to validate the data. - Type *DatasetType `json:"-"` + Type *DatasetType `json:"-" url:"type,omitempty"` // Indicates if the original file should be stored. - KeepOriginalFile *bool `json:"-"` + KeepOriginalFile *bool `json:"-" url:"keep_original_file,omitempty"` // Indicates whether rows with malformed input should be dropped (instead of failing the validation check). Dropped rows will be returned in the warnings field. - SkipMalformedInput *bool `json:"-"` + SkipMalformedInput *bool `json:"-" url:"skip_malformed_input,omitempty"` // List of names of fields that will be persisted in the Dataset. By default the Dataset will retain only the required fields indicated in the [schema for the corresponding Dataset type](https://docs.cohere.com/docs/datasets#dataset-types). For example, datasets of type `embed-input` will drop all fields other than the required `text` field. If any of the fields in `keep_fields` are missing from the uploaded file, Dataset validation will fail. - KeepFields []*string `json:"-"` + KeepFields []*string `json:"-" url:"keep_fields,omitempty"` // List of names of fields that will be persisted in the Dataset. By default the Dataset will retain only the required fields indicated in the [schema for the corresponding Dataset type](https://docs.cohere.com/docs/datasets#dataset-types). For example, Datasets of type `embed-input` will drop all fields other than the required `text` field. If any of the fields in `optional_fields` are missing from the uploaded file, Dataset validation will pass. - OptionalFields []*string `json:"-"` + OptionalFields []*string `json:"-" url:"optional_fields,omitempty"` // Raw .txt uploads will be split into entries using the text_separator value. - TextSeparator *string `json:"-"` + TextSeparator *string `json:"-" url:"text_separator,omitempty"` // The delimiter used for .csv uploads. - CsvDelimiter *string `json:"-"` + CsvDelimiter *string `json:"-" url:"csv_delimiter,omitempty"` } type DatasetsListRequest struct { // optional filter by dataset type - DatasetType *string `json:"-"` + DatasetType *string `json:"-" url:"datasetType,omitempty"` // optional filter before a date - Before *time.Time `json:"-"` + Before *time.Time `json:"-" url:"before,omitempty"` // optional filter after a date - After *time.Time `json:"-"` + After *time.Time `json:"-" url:"after,omitempty"` // optional limit to number of results - Limit *string `json:"-"` + Limit *string `json:"-" url:"limit,omitempty"` // optional offset to start of results - Offset *string `json:"-"` + Offset *string `json:"-" url:"offset,omitempty"` } type DatasetsCreateResponse struct { // The dataset ID - Id *string `json:"id,omitempty"` + Id *string `json:"id,omitempty" url:"id,omitempty"` _rawJSON json.RawMessage } @@ -72,7 +72,7 @@ func (d *DatasetsCreateResponse) String() string { } type DatasetsGetResponse struct { - Dataset *Dataset `json:"dataset,omitempty"` + Dataset *Dataset `json:"dataset,omitempty" url:"dataset,omitempty"` _rawJSON json.RawMessage } @@ -102,7 +102,7 @@ func (d *DatasetsGetResponse) String() string { type DatasetsGetUsageResponse struct { // The total number of bytes used by the organization. - OrganizationUsage *string `json:"organization_usage,omitempty"` + OrganizationUsage *string `json:"organization_usage,omitempty" url:"organization_usage,omitempty"` _rawJSON json.RawMessage } @@ -131,7 +131,7 @@ func (d *DatasetsGetUsageResponse) String() string { } type DatasetsListResponse struct { - Datasets []*Dataset `json:"datasets,omitempty"` + Datasets []*Dataset `json:"datasets,omitempty" url:"datasets,omitempty"` _rawJSON json.RawMessage } diff --git a/datasets/client.go b/datasets/client.go index 9aedae0..9d26e51 100644 --- a/datasets/client.go +++ b/datasets/client.go @@ -8,11 +8,10 @@ import ( fmt "fmt" v2 "github.com/cohere-ai/cohere-go/v2" core "github.com/cohere-ai/cohere-go/v2/core" + option "github.com/cohere-ai/cohere-go/v2/option" io "io" multipart "mime/multipart" http "net/http" - url "net/url" - time "time" ) type Client struct { @@ -21,54 +20,57 @@ type Client struct { header http.Header } -func NewClient(opts ...core.ClientOption) *Client { - options := core.NewClientOptions() - for _, opt := range opts { - opt(options) - } +func NewClient(opts ...option.RequestOption) *Client { + options := core.NewRequestOptions(opts...) return &Client{ baseURL: options.BaseURL, - caller: core.NewCaller(options.HTTPClient), - header: options.ToHeader(), + caller: core.NewCaller( + &core.CallerParams{ + Client: options.HTTPClient, + MaxAttempts: options.MaxAttempts, + }, + ), + header: options.ToHeader(), } } // List datasets that have been created. -func (c *Client) List(ctx context.Context, request *v2.DatasetsListRequest) (*v2.DatasetsListResponse, error) { - baseURL := "https://api.cohere.ai/v1" +func (c *Client) List( + ctx context.Context, + request *v2.DatasetsListRequest, + opts ...option.RequestOption, +) (*v2.DatasetsListResponse, error) { + options := core.NewRequestOptions(opts...) + + baseURL := "https://api.cohere.ai" if c.baseURL != "" { baseURL = c.baseURL } - endpointURL := baseURL + "/" + "datasets" - - queryParams := make(url.Values) - if request.DatasetType != nil { - queryParams.Add("datasetType", fmt.Sprintf("%v", *request.DatasetType)) - } - if request.Before != nil { - queryParams.Add("before", fmt.Sprintf("%v", request.Before.Format(time.RFC3339))) + if options.BaseURL != "" { + baseURL = options.BaseURL } - if request.After != nil { - queryParams.Add("after", fmt.Sprintf("%v", request.After.Format(time.RFC3339))) - } - if request.Limit != nil { - queryParams.Add("limit", fmt.Sprintf("%v", *request.Limit)) - } - if request.Offset != nil { - queryParams.Add("offset", fmt.Sprintf("%v", *request.Offset)) + endpointURL := baseURL + "/" + "v1/datasets" + + queryParams, err := core.QueryValues(request) + if err != nil { + return nil, err } if len(queryParams) > 0 { endpointURL += "?" + queryParams.Encode() } + headers := core.MergeHeaders(c.header.Clone(), options.ToHeader()) + var response *v2.DatasetsListResponse if err := c.caller.Call( ctx, &core.CallParams{ - URL: endpointURL, - Method: http.MethodGet, - Headers: c.header, - Response: &response, + URL: endpointURL, + Method: http.MethodGet, + MaxAttempts: options.MaxAttempts, + Headers: headers, + Client: options.HTTPClient, + Response: &response, }, ); err != nil { return nil, err @@ -77,42 +79,34 @@ func (c *Client) List(ctx context.Context, request *v2.DatasetsListRequest) (*v2 } // Create a dataset by uploading a file. See ['Dataset Creation'](https://docs.cohere.com/docs/datasets#dataset-creation) for more information. -func (c *Client) Create(ctx context.Context, data io.Reader, evalData io.Reader, request *v2.DatasetsCreateRequest) (*v2.DatasetsCreateResponse, error) { - baseURL := "https://api.cohere.ai/v1" +func (c *Client) Create( + ctx context.Context, + data io.Reader, + evalData io.Reader, + request *v2.DatasetsCreateRequest, + opts ...option.RequestOption, +) (*v2.DatasetsCreateResponse, error) { + options := core.NewRequestOptions(opts...) + + baseURL := "https://api.cohere.ai" if c.baseURL != "" { baseURL = c.baseURL } - endpointURL := baseURL + "/" + "datasets" - - queryParams := make(url.Values) - if request.Name != nil { - queryParams.Add("name", fmt.Sprintf("%v", *request.Name)) - } - if request.Type != nil { - queryParams.Add("type", fmt.Sprintf("%v", *request.Type)) - } - if request.KeepOriginalFile != nil { - queryParams.Add("keep_original_file", fmt.Sprintf("%v", *request.KeepOriginalFile)) - } - if request.SkipMalformedInput != nil { - queryParams.Add("skip_malformed_input", fmt.Sprintf("%v", *request.SkipMalformedInput)) - } - for _, value := range request.KeepFields { - queryParams.Add("keep_fields", fmt.Sprintf("%v", *value)) + if options.BaseURL != "" { + baseURL = options.BaseURL } - for _, value := range request.OptionalFields { - queryParams.Add("optional_fields", fmt.Sprintf("%v", *value)) - } - if request.TextSeparator != nil { - queryParams.Add("text_separator", fmt.Sprintf("%v", *request.TextSeparator)) - } - if request.CsvDelimiter != nil { - queryParams.Add("csv_delimiter", fmt.Sprintf("%v", *request.CsvDelimiter)) + endpointURL := baseURL + "/" + "v1/datasets" + + queryParams, err := core.QueryValues(request) + if err != nil { + return nil, err } if len(queryParams) > 0 { endpointURL += "?" + queryParams.Encode() } + headers := core.MergeHeaders(c.header.Clone(), options.ToHeader()) + var response *v2.DatasetsCreateResponse requestBuffer := bytes.NewBuffer(nil) writer := multipart.NewWriter(requestBuffer) @@ -141,16 +135,18 @@ func (c *Client) Create(ctx context.Context, data io.Reader, evalData io.Reader, if err := writer.Close(); err != nil { return nil, err } - c.header.Set("Content-Type", writer.FormDataContentType()) + headers.Set("Content-Type", writer.FormDataContentType()) if err := c.caller.Call( ctx, &core.CallParams{ - URL: endpointURL, - Method: http.MethodPost, - Headers: c.header, - Request: requestBuffer, - Response: &response, + URL: endpointURL, + Method: http.MethodPost, + MaxAttempts: options.MaxAttempts, + Headers: headers, + Client: options.HTTPClient, + Request: requestBuffer, + Response: &response, }, ); err != nil { return nil, err @@ -159,21 +155,33 @@ func (c *Client) Create(ctx context.Context, data io.Reader, evalData io.Reader, } // View the dataset storage usage for your Organization. Each Organization can have up to 10GB of storage across all their users. -func (c *Client) GetUsage(ctx context.Context) (*v2.DatasetsGetUsageResponse, error) { - baseURL := "https://api.cohere.ai/v1" +func (c *Client) GetUsage( + ctx context.Context, + opts ...option.RequestOption, +) (*v2.DatasetsGetUsageResponse, error) { + options := core.NewRequestOptions(opts...) + + baseURL := "https://api.cohere.ai" if c.baseURL != "" { baseURL = c.baseURL } - endpointURL := baseURL + "/" + "datasets/usage" + if options.BaseURL != "" { + baseURL = options.BaseURL + } + endpointURL := baseURL + "/" + "v1/datasets/usage" + + headers := core.MergeHeaders(c.header.Clone(), options.ToHeader()) var response *v2.DatasetsGetUsageResponse if err := c.caller.Call( ctx, &core.CallParams{ - URL: endpointURL, - Method: http.MethodGet, - Headers: c.header, - Response: &response, + URL: endpointURL, + Method: http.MethodGet, + MaxAttempts: options.MaxAttempts, + Headers: headers, + Client: options.HTTPClient, + Response: &response, }, ); err != nil { return nil, err @@ -182,21 +190,34 @@ func (c *Client) GetUsage(ctx context.Context) (*v2.DatasetsGetUsageResponse, er } // Retrieve a dataset by ID. See ['Datasets'](https://docs.cohere.com/docs/datasets) for more information. -func (c *Client) Get(ctx context.Context, id string) (*v2.DatasetsGetResponse, error) { - baseURL := "https://api.cohere.ai/v1" +func (c *Client) Get( + ctx context.Context, + id string, + opts ...option.RequestOption, +) (*v2.DatasetsGetResponse, error) { + options := core.NewRequestOptions(opts...) + + baseURL := "https://api.cohere.ai" if c.baseURL != "" { baseURL = c.baseURL } - endpointURL := fmt.Sprintf(baseURL+"/"+"datasets/%v", id) + if options.BaseURL != "" { + baseURL = options.BaseURL + } + endpointURL := fmt.Sprintf(baseURL+"/"+"v1/datasets/%v", id) + + headers := core.MergeHeaders(c.header.Clone(), options.ToHeader()) var response *v2.DatasetsGetResponse if err := c.caller.Call( ctx, &core.CallParams{ - URL: endpointURL, - Method: http.MethodGet, - Headers: c.header, - Response: &response, + URL: endpointURL, + Method: http.MethodGet, + MaxAttempts: options.MaxAttempts, + Headers: headers, + Client: options.HTTPClient, + Response: &response, }, ); err != nil { return nil, err @@ -205,21 +226,34 @@ func (c *Client) Get(ctx context.Context, id string) (*v2.DatasetsGetResponse, e } // Delete a dataset by ID. Datasets are automatically deleted after 30 days, but they can also be deleted manually. -func (c *Client) Delete(ctx context.Context, id string) (map[string]interface{}, error) { - baseURL := "https://api.cohere.ai/v1" +func (c *Client) Delete( + ctx context.Context, + id string, + opts ...option.RequestOption, +) (map[string]interface{}, error) { + options := core.NewRequestOptions(opts...) + + baseURL := "https://api.cohere.ai" if c.baseURL != "" { baseURL = c.baseURL } - endpointURL := fmt.Sprintf(baseURL+"/"+"datasets/%v", id) + if options.BaseURL != "" { + baseURL = options.BaseURL + } + endpointURL := fmt.Sprintf(baseURL+"/"+"v1/datasets/%v", id) + + headers := core.MergeHeaders(c.header.Clone(), options.ToHeader()) var response map[string]interface{} if err := c.caller.Call( ctx, &core.CallParams{ - URL: endpointURL, - Method: http.MethodDelete, - Headers: c.header, - Response: &response, + URL: endpointURL, + Method: http.MethodDelete, + MaxAttempts: options.MaxAttempts, + Headers: headers, + Client: options.HTTPClient, + Response: &response, }, ); err != nil { return nil, err diff --git a/embed_jobs.go b/embed_jobs.go index 05504fd..ace88b4 100644 --- a/embed_jobs.go +++ b/embed_jobs.go @@ -15,16 +15,16 @@ type CreateEmbedJobRequest struct { // - `embed-multilingual-v3.0` : 1024 // - `embed-english-light-v3.0` : 384 // - `embed-multilingual-light-v3.0` : 384 - Model string `json:"model"` + Model string `json:"model" url:"model"` // ID of a [Dataset](https://docs.cohere.com/docs/datasets). The Dataset must be of type `embed-input` and must have a validation status `Validated` - DatasetId string `json:"dataset_id"` - InputType EmbedInputType `json:"input_type,omitempty"` + DatasetId string `json:"dataset_id" url:"dataset_id"` + InputType EmbedInputType `json:"input_type,omitempty" url:"input_type,omitempty"` // The name of the embed job. - Name *string `json:"name,omitempty"` + Name *string `json:"name,omitempty" url:"name,omitempty"` // One of `START|END` to specify how the API will handle inputs longer than the maximum token length. // // Passing `START` will discard the start of the input. `END` will discard the end of the input. In both cases, input is discarded until the remaining input is exactly the maximum input token length for the model. - Truncate *CreateEmbedJobRequestTruncate `json:"truncate,omitempty"` + Truncate *CreateEmbedJobRequestTruncate `json:"truncate,omitempty" url:"truncate,omitempty"` } // One of `START|END` to specify how the API will handle inputs longer than the maximum token length. diff --git a/embedjobs/client.go b/embedjobs/client.go index 0ddd31b..be9f992 100644 --- a/embedjobs/client.go +++ b/embedjobs/client.go @@ -10,6 +10,7 @@ import ( fmt "fmt" v2 "github.com/cohere-ai/cohere-go/v2" core "github.com/cohere-ai/cohere-go/v2/core" + option "github.com/cohere-ai/cohere-go/v2/option" io "io" http "net/http" ) @@ -20,25 +21,37 @@ type Client struct { header http.Header } -func NewClient(opts ...core.ClientOption) *Client { - options := core.NewClientOptions() - for _, opt := range opts { - opt(options) - } +func NewClient(opts ...option.RequestOption) *Client { + options := core.NewRequestOptions(opts...) return &Client{ baseURL: options.BaseURL, - caller: core.NewCaller(options.HTTPClient), - header: options.ToHeader(), + caller: core.NewCaller( + &core.CallerParams{ + Client: options.HTTPClient, + MaxAttempts: options.MaxAttempts, + }, + ), + header: options.ToHeader(), } } // The list embed job endpoint allows users to view all embed jobs history for that specific user. -func (c *Client) List(ctx context.Context) (*v2.ListEmbedJobResponse, error) { - baseURL := "https://api.cohere.ai/v1" +func (c *Client) List( + ctx context.Context, + opts ...option.RequestOption, +) (*v2.ListEmbedJobResponse, error) { + options := core.NewRequestOptions(opts...) + + baseURL := "https://api.cohere.ai" if c.baseURL != "" { baseURL = c.baseURL } - endpointURL := baseURL + "/" + "embed-jobs" + if options.BaseURL != "" { + baseURL = options.BaseURL + } + endpointURL := baseURL + "/" + "v1/embed-jobs" + + headers := core.MergeHeaders(c.header.Clone(), options.ToHeader()) errorDecoder := func(statusCode int, body io.Reader) error { raw, err := io.ReadAll(body) @@ -72,7 +85,9 @@ func (c *Client) List(ctx context.Context) (*v2.ListEmbedJobResponse, error) { &core.CallParams{ URL: endpointURL, Method: http.MethodGet, - Headers: c.header, + MaxAttempts: options.MaxAttempts, + Headers: headers, + Client: options.HTTPClient, Response: &response, ErrorDecoder: errorDecoder, }, @@ -83,12 +98,23 @@ func (c *Client) List(ctx context.Context) (*v2.ListEmbedJobResponse, error) { } // This API launches an async Embed job for a [Dataset](https://docs.cohere.com/docs/datasets) of type `embed-input`. The result of a completed embed job is new Dataset of type `embed-output`, which contains the original text entries and the corresponding embeddings. -func (c *Client) Create(ctx context.Context, request *v2.CreateEmbedJobRequest) (*v2.CreateEmbedJobResponse, error) { - baseURL := "https://api.cohere.ai/v1" +func (c *Client) Create( + ctx context.Context, + request *v2.CreateEmbedJobRequest, + opts ...option.RequestOption, +) (*v2.CreateEmbedJobResponse, error) { + options := core.NewRequestOptions(opts...) + + baseURL := "https://api.cohere.ai" if c.baseURL != "" { baseURL = c.baseURL } - endpointURL := baseURL + "/" + "embed-jobs" + if options.BaseURL != "" { + baseURL = options.BaseURL + } + endpointURL := baseURL + "/" + "v1/embed-jobs" + + headers := core.MergeHeaders(c.header.Clone(), options.ToHeader()) errorDecoder := func(statusCode int, body io.Reader) error { raw, err := io.ReadAll(body) @@ -122,7 +148,9 @@ func (c *Client) Create(ctx context.Context, request *v2.CreateEmbedJobRequest) &core.CallParams{ URL: endpointURL, Method: http.MethodPost, - Headers: c.header, + MaxAttempts: options.MaxAttempts, + Headers: headers, + Client: options.HTTPClient, Request: request, Response: &response, ErrorDecoder: errorDecoder, @@ -134,14 +162,24 @@ func (c *Client) Create(ctx context.Context, request *v2.CreateEmbedJobRequest) } // This API retrieves the details about an embed job started by the same user. -// -// The ID of the embed job to retrieve. -func (c *Client) Get(ctx context.Context, id string) (*v2.EmbedJob, error) { - baseURL := "https://api.cohere.ai/v1" +func (c *Client) Get( + ctx context.Context, + // The ID of the embed job to retrieve. + id string, + opts ...option.RequestOption, +) (*v2.EmbedJob, error) { + options := core.NewRequestOptions(opts...) + + baseURL := "https://api.cohere.ai" if c.baseURL != "" { baseURL = c.baseURL } - endpointURL := fmt.Sprintf(baseURL+"/"+"embed-jobs/%v", id) + if options.BaseURL != "" { + baseURL = options.BaseURL + } + endpointURL := fmt.Sprintf(baseURL+"/"+"v1/embed-jobs/%v", id) + + headers := core.MergeHeaders(c.header.Clone(), options.ToHeader()) errorDecoder := func(statusCode int, body io.Reader) error { raw, err := io.ReadAll(body) @@ -182,7 +220,9 @@ func (c *Client) Get(ctx context.Context, id string) (*v2.EmbedJob, error) { &core.CallParams{ URL: endpointURL, Method: http.MethodGet, - Headers: c.header, + MaxAttempts: options.MaxAttempts, + Headers: headers, + Client: options.HTTPClient, Response: &response, ErrorDecoder: errorDecoder, }, @@ -193,14 +233,24 @@ func (c *Client) Get(ctx context.Context, id string) (*v2.EmbedJob, error) { } // This API allows users to cancel an active embed job. Once invoked, the embedding process will be terminated, and users will be charged for the embeddings processed up to the cancellation point. It's important to note that partial results will not be available to users after cancellation. -// -// The ID of the embed job to cancel. -func (c *Client) Cancel(ctx context.Context, id string) error { - baseURL := "https://api.cohere.ai/v1" +func (c *Client) Cancel( + ctx context.Context, + // The ID of the embed job to cancel. + id string, + opts ...option.RequestOption, +) error { + options := core.NewRequestOptions(opts...) + + baseURL := "https://api.cohere.ai" if c.baseURL != "" { baseURL = c.baseURL } - endpointURL := fmt.Sprintf(baseURL+"/"+"embed-jobs/%v/cancel", id) + if options.BaseURL != "" { + baseURL = options.BaseURL + } + endpointURL := fmt.Sprintf(baseURL+"/"+"v1/embed-jobs/%v/cancel", id) + + headers := core.MergeHeaders(c.header.Clone(), options.ToHeader()) errorDecoder := func(statusCode int, body io.Reader) error { raw, err := io.ReadAll(body) @@ -240,7 +290,9 @@ func (c *Client) Cancel(ctx context.Context, id string) error { &core.CallParams{ URL: endpointURL, Method: http.MethodPost, - Headers: c.header, + MaxAttempts: options.MaxAttempts, + Headers: headers, + Client: options.HTTPClient, ErrorDecoder: errorDecoder, }, ); err != nil { diff --git a/environments.go b/environments.go index c2ad074..1e42718 100644 --- a/environments.go +++ b/environments.go @@ -4,10 +4,10 @@ package api // Environments defines all of the API environments. // These values can be used with the WithBaseURL -// ClientOption to override the client's default environment, +// RequestOption to override the client's default environment, // if any. var Environments = struct { Production string }{ - Production: "https://api.cohere.ai/v1", + Production: "https://api.cohere.ai", } diff --git a/go.mod b/go.mod index 7437136..ac8fdef 100644 --- a/go.mod +++ b/go.mod @@ -2,7 +2,10 @@ module github.com/cohere-ai/cohere-go/v2 go 1.18 -require github.com/stretchr/testify v1.7.0 +require ( + github.com/google/uuid v1.4.0 + github.com/stretchr/testify v1.7.0 +) require ( github.com/davecgh/go-spew v1.1.0 // indirect diff --git a/go.sum b/go.sum index fc3dd9e..b3766d4 100644 --- a/go.sum +++ b/go.sum @@ -1,5 +1,7 @@ github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/google/uuid v1.4.0 h1:MtMxsa51/r9yyhkyLsVeVt0B+BGQZzpQiTQ4eHZ8bc4= +github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= diff --git a/option/request_option.go b/option/request_option.go new file mode 100644 index 0000000..db015d9 --- /dev/null +++ b/option/request_option.go @@ -0,0 +1,57 @@ +// This file was auto-generated by Fern from our API Definition. + +package option + +import ( + core "github.com/cohere-ai/cohere-go/v2/core" + http "net/http" +) + +// RequestOption adapts the behavior of an indivdual request. +type RequestOption = core.RequestOption + +// WithBaseURL sets the base URL, overriding the default +// environment, if any. +func WithBaseURL(baseURL string) *core.BaseURLOption { + return &core.BaseURLOption{ + BaseURL: baseURL, + } +} + +// WithHTTPClient uses the given HTTPClient to issue the request. +func WithHTTPClient(httpClient core.HTTPClient) *core.HTTPClientOption { + return &core.HTTPClientOption{ + HTTPClient: httpClient, + } +} + +// WithHTTPHeader adds the given http.Header to the request. +func WithHTTPHeader(httpHeader http.Header) *core.HTTPHeaderOption { + return &core.HTTPHeaderOption{ + // Clone the headers so they can't be modified after the option call. + HTTPHeader: httpHeader.Clone(), + } +} + +// WithMaxAttempts configures the maximum number of retry attempts. +func WithMaxAttempts(attempts uint) *core.MaxAttemptsOption { + return &core.MaxAttemptsOption{ + MaxAttempts: attempts, + } +} + +// WithToken sets the 'Authorization: Bearer ' request header. +func WithToken(token string) *core.TokenOption { + return &core.TokenOption{ + Token: token, + } +} + +// WithClientName sets the clientName request header. +// +// The name of the project that is making the request. +func WithClientName(clientName *string) *core.ClientNameOption { + return &core.ClientNameOption{ + ClientName: clientName, + } +} diff --git a/tests/sdk_test.go b/tests/sdk_test.go new file mode 100644 index 0000000..b2a8faf --- /dev/null +++ b/tests/sdk_test.go @@ -0,0 +1,348 @@ +package tests + +import ( + "context" + "errors" + "io" + "os" + "strings" + "testing" + + cohere "github.com/cohere-ai/cohere-go/v2" + client "github.com/cohere-ai/cohere-go/v2/client" + "github.com/stretchr/testify/require" +) + +type MyReader struct { + io.Reader + name string +} + +func (m *MyReader) Name() string { + return m.name +} + +func strPointer(s string) *string { + return &s +} + +func TestNewClient(t *testing.T) { + client := client.NewClient(client.WithToken(os.Getenv("COHERE_API_KEY"))) + + t.Run("TestGenerate", func(t *testing.T) { + prediction, err := client.Generate( + context.TODO(), + &cohere.GenerateRequest{ + Prompt: "count with me!", + }, + ) + + require.NoError(t, err) + print(prediction) + }) + + t.Run("TestGenerateStream", func(t *testing.T) { + stream, err := client.GenerateStream( + context.TODO(), + &cohere.GenerateStreamRequest{ + Prompt: "Cohere is", + }, + ) + + require.NoError(t, err) + + // Make sure to close the stream when you're done reading. + // This is easily handled with defer. + defer stream.Close() + + for { + message, err := stream.Recv() + + if errors.Is(err, io.EOF) { + // An io.EOF error means the server is done sending messages + // and should be treated as a success. + break + } + + if message.TextGeneration != nil { + print(message.TextGeneration.Text) + } + } + }) + + // Test Chat + t.Run("TestChat", func(t *testing.T) { + chat, err := client.Chat( + context.TODO(), + &cohere.ChatRequest{ + Message: "2", + }, + ) + + require.NoError(t, err) + print(chat) + }) + + // Test ChatStream + t.Run("TestChatStream", func(t *testing.T) { + stream, err := client.ChatStream( + context.TODO(), + &cohere.ChatStreamRequest{ + Message: "Cohere is", + }, + ) + + require.NoError(t, err) + + // Make sure to close the stream when you're done reading. + // This is easily handled with defer. + defer stream.Close() + + for { + message, err := stream.Recv() + + if errors.Is(err, io.EOF) { + // An io.EOF error means the server is done sending messages + // and should be treated as a success. + break + } + + if message.TextGeneration != nil { + print(message.TextGeneration.Text) + } + } + }) + + t.Run("TestClassify", func(t *testing.T) { + classify, err := client.Classify( + context.TODO(), + &cohere.ClassifyRequest{ + Examples: []*cohere.ClassifyExample{ + { + Text: strPointer("orange"), + Label: strPointer("fruit"), + }, + { + Text: strPointer("pear"), + Label: strPointer("fruit"), + }, + { + Text: strPointer("lettuce"), + Label: strPointer("vegetable"), + }, + { + Text: strPointer("cauliflower"), + Label: strPointer("vegetable"), + }, + }, + Inputs: []string{"Abiu"}, + }, + ) + + require.NoError(t, err) + print(classify) + }) + + t.Run("TestTokenizeDetokenize", func(t *testing.T) { + str := "token mctoken face" + + tokenise, err := client.Tokenize( + context.TODO(), + &cohere.TokenizeRequest{ + Text: str, + Model: strPointer("base"), + }, + ) + + require.NoError(t, err) + print(tokenise) + + detokenise, err := client.Detokenize( + context.TODO(), + &cohere.DetokenizeRequest{ + Tokens: tokenise.Tokens, + }) + + require.NoError(t, err) + print(detokenise) + + require.Equal(t, str, detokenise.Text) + }) + + t.Run("TestSummarize", func(t *testing.T) { + summarise, err := client.Summarize( + context.TODO(), + &cohere.SummarizeRequest{ + Text: "the quick brown fox jumped over the lazy dog and then the dog jumped over the fox the quick brown fox jumped over the lazy dog the quick brown fox jumped over the lazy dog the quick brown fox jumped over the lazy dog the quick brown fox jumped over the lazy dog", + }) + + require.NoError(t, err) + print(summarise) + }) + + t.Run("TestRerank", func(t *testing.T) { + rerank, err := client.Rerank( + context.TODO(), + &cohere.RerankRequest{ + Query: "What is the capital of the United States?", + Documents: []*cohere.RerankRequestDocumentsItem{ + cohere.NewRerankRequestDocumentsItemFromString("Carson City is the capital city of the American state of Nevada."), + cohere.NewRerankRequestDocumentsItemFromString("The Commonwealth of the Northern Mariana Islands is a group of islands in the Pacific Ocean. Its capital is Saipan."), + cohere.NewRerankRequestDocumentsItemFromString("Washington, D.C. (also known as simply Washington or D.C., and officially as the District of Columbia) is the capital of the United States. It is a federal district."), + cohere.NewRerankRequestDocumentsItemFromString("Capital punishment (the death penalty) has existed in the United States since beforethe United States was a country. As of 2017, capital punishment is legal in 30 of the 50 states."), + }, + }) + + require.NoError(t, err) + print(rerank) + }) + + t.Run("TestEmbed", func(t *testing.T) { + embed, err := client.Embed( + context.TODO(), + &cohere.EmbedRequest{ + Texts: []string{"hello", "goodbye"}, + Model: strPointer("embed-english-v3.0"), + InputType: cohere.EmbedInputTypeSearchDocument.Ptr(), + }) + + require.NoError(t, err) + print(embed) + }) + + t.Run("TestCreateDataset", func(t *testing.T) { + t.Skip("While we have issues with dataset upload") + + dataset, err := client.Datasets.Create( + context.TODO(), + &MyReader{Reader: strings.NewReader(`{"text": "The quick brown fox jumps over the lazy dog"}`), name: "test.jsonl"}, + &MyReader{Reader: strings.NewReader(""), name: "a.jsonl"}, + &cohere.DatasetsCreateRequest{ + Name: strPointer("prompt-completion-dataset"), + Type: cohere.DatasetTypeEmbedResult.Ptr(), + }) + + require.NoError(t, err) + print(dataset) + }) + + t.Run("TestListDatasets", func(t *testing.T) { + datasets, err := client.Datasets.List( + context.TODO(), + &cohere.DatasetsListRequest{}) + + require.NoError(t, err) + print(datasets) + }) + + t.Run("TestGetDatasetUsage", func(t *testing.T) { + t.Skip("While we have issues with dataset upload") + dataset_usage, err := client.Datasets.GetUsage(context.TODO()) + + require.NoError(t, err) + print(dataset_usage) + }) + + t.Run("TestGetDataset", func(t *testing.T) { + t.Skip("While we have issues with dataset upload") + dataset, err := client.Datasets.Get(context.TODO(), "id") + + require.NoError(t, err) + print(dataset) + }) + + t.Run("TestUpdateDataset", func(t *testing.T) { + t.Skip("While we have issues with dataset upload") + _, err := client.Datasets.Delete(context.TODO(), "id") + require.NoError(t, err) + }) + + t.Run("TestCreateEmbedJob", func(t *testing.T) { + t.Skip("While we have issues with dataset upload") + job, err := client.EmbedJobs.Create( + context.TODO(), + &cohere.CreateEmbedJobRequest{ + DatasetId: "id", + InputType: cohere.EmbedInputTypeSearchDocument, + }) + + require.NoError(t, err) + print(job) + }) + + t.Run("TestListEmbedJobs", func(t *testing.T) { + embed_jobs, err := client.EmbedJobs.List(context.TODO()) + + require.NoError(t, err) + print(embed_jobs) + }) + + t.Run("TestGetEmbedJob", func(t *testing.T) { + t.Skip("While we have issues with dataset upload") + embed_job, err := client.EmbedJobs.Get(context.TODO(), "id") + + require.NoError(t, err) + print(embed_job) + }) + + t.Run("TestCancelEmbedJob", func(t *testing.T) { + t.Skip("While we have issues with dataset upload") + err := client.EmbedJobs.Cancel(context.TODO(), "id") + + require.NoError(t, err) + }) + + t.Run("TestConnectorCRUD", func(t *testing.T) { + connector, err := client.Connectors.Create( + context.TODO(), + &cohere.CreateConnectorRequest{ + Name: "Example connector", + Url: "https://dummy-connector-o5btz7ucgq-uc.a.run.app/search", + ServiceAuth: &cohere.CreateConnectorServiceAuth{ + Token: "dummy-connector-token", + Type: "bearer", + }, + }) + + require.NoError(t, err) + print(connector) + + updated_connector, err := client.Connectors.Update( + context.TODO(), + connector.Connector.Id, + &cohere.UpdateConnectorRequest{ + Name: strPointer("Example connector renamed"), + }) + + require.NoError(t, err) + print(updated_connector) + + my_connector, err := client.Connectors.Get(context.TODO(), connector.Connector.Id) + + require.NoError(t, err) + print(my_connector) + + connectors, err := client.Connectors.List( + context.TODO(), + &cohere.ConnectorsListRequest{}) + + require.NoError(t, err) + print(connectors) + + oauth, err := client.Connectors.OAuthAuthorize( + context.TODO(), + connector.Connector.Id, + &cohere.ConnectorsOAuthAuthorizeRequest{ + AfterTokenRedirect: strPointer("https://test.com"), + }) + + // find a way to test this + require.Error(t, err) + print(oauth) + + delete, err := client.Connectors.Delete(context.TODO(), connector.Connector.Id) + + require.NoError(t, err) + print(delete) + }) +} diff --git a/types.go b/types.go index 6f08540..5c723b7 100644 --- a/types.go +++ b/types.go @@ -12,21 +12,21 @@ import ( type ChatRequest struct { // Accepts a string. // The chat message from the user to the model. - Message string `json:"message"` + Message string `json:"message" url:"message"` // Defaults to `command`. // // The identifier of the model, which can be one of the existing Cohere models or the full ID for a [fine-tuned custom model](https://docs.cohere.com/docs/chat-fine-tuning). // // Compatible Cohere models are `command` and `command-light` as well as the experimental `command-nightly` and `command-light-nightly` variants. Read more about [Cohere models](https://docs.cohere.com/docs/models). - Model *string `json:"model,omitempty"` + Model *string `json:"model,omitempty" url:"model,omitempty"` // When specified, the default Cohere preamble will be replaced with the provided one. - PreambleOverride *string `json:"preamble_override,omitempty"` + PreambleOverride *string `json:"preamble_override,omitempty" url:"preamble_override,omitempty"` // A list of previous messages between the user and the model, meant to give the model conversational context for responding to the user's `message`. - ChatHistory []*ChatMessage `json:"chat_history,omitempty"` + ChatHistory []*ChatMessage `json:"chat_history,omitempty" url:"chat_history,omitempty"` // An alternative to `chat_history`. Previous conversations can be resumed by providing the conversation's identifier. The contents of `message` and the model's response will be stored as part of this conversation. // // If a conversation with this id does not already exist, a new conversation will be created. - ConversationId *string `json:"conversation_id,omitempty"` + ConversationId *string `json:"conversation_id,omitempty" url:"conversation_id,omitempty"` // Defaults to `AUTO` when `connectors` are specified and `OFF` in all other cases. // // Dictates how the prompt will be constructed. @@ -34,26 +34,40 @@ type ChatRequest struct { // With `prompt_truncation` set to "AUTO", some elements from `chat_history` and `documents` will be dropped in an attempt to construct a prompt that fits within the model's context length limit. // // With `prompt_truncation` set to "OFF", no elements will be dropped. If the sum of the inputs exceeds the model's context length limit, a `TooManyTokens` error will be returned. - PromptTruncation *ChatRequestPromptTruncation `json:"prompt_truncation,omitempty"` + PromptTruncation *ChatRequestPromptTruncation `json:"prompt_truncation,omitempty" url:"prompt_truncation,omitempty"` // Accepts `{"id": "web-search"}`, and/or the `"id"` for a custom [connector](https://docs.cohere.com/docs/connectors), if you've [created](https://docs.cohere.com/docs/creating-and-deploying-a-connector) one. // // When specified, the model's reply will be enriched with information found by quering each of the connectors (RAG). - Connectors []*ChatConnector `json:"connectors,omitempty"` + Connectors []*ChatConnector `json:"connectors,omitempty" url:"connectors,omitempty"` // Defaults to `false`. // // When `true`, the response will only contain a list of generated search queries, but no search will take place, and no reply from the model to the user's `message` will be generated. - SearchQueriesOnly *bool `json:"search_queries_only,omitempty"` + SearchQueriesOnly *bool `json:"search_queries_only,omitempty" url:"search_queries_only,omitempty"` // A list of relevant documents that the model can use to enrich its reply. See ['Document Mode'](https://docs.cohere.com/docs/retrieval-augmented-generation-rag#document-mode) in the guide for more information. - Documents []ChatDocument `json:"documents,omitempty"` + Documents []ChatDocument `json:"documents,omitempty" url:"documents,omitempty"` // Defaults to `"accurate"`. // // Dictates the approach taken to generating citations as part of the RAG flow by allowing the user to specify whether they want `"accurate"` results or `"fast"` results. - CitationQuality *ChatRequestCitationQuality `json:"citation_quality,omitempty"` + CitationQuality *ChatRequestCitationQuality `json:"citation_quality,omitempty" url:"citation_quality,omitempty"` // Defaults to `0.3`. // // A non-negative float that tunes the degree of randomness in generation. Lower temperatures mean less random generations, and higher temperatures mean more random generations. - Temperature *float64 `json:"temperature,omitempty"` - stream bool + // + // Randomness can be further maximized by increasing the value of the `p` parameter. + Temperature *float64 `json:"temperature,omitempty" url:"temperature,omitempty"` + // The maximum number of tokens the model will generate as part of the response. Note: Setting a low value may result in incomplete generations. + MaxTokens *int `json:"max_tokens,omitempty" url:"max_tokens,omitempty"` + // Ensures only the top `k` most likely tokens are considered for generation at each step. + // Defaults to `0`, min value of `0`, max value of `500`. + K *int `json:"k,omitempty" url:"k,omitempty"` + // Ensures that only the most likely tokens, with total probability mass of `p`, are considered for generation at each step. If both `k` and `p` are enabled, `p` acts after `k`. + // Defaults to `0.75`. min value of `0.01`, max value of `0.99`. + P *float64 `json:"p,omitempty" url:"p,omitempty"` + // Used to reduce repetitiveness of generated tokens. The higher the value, the stronger a penalty is applied to previously present tokens, proportional to how many times they have already appeared in the prompt or prior generation. + FrequencyPenalty *float64 `json:"frequency_penalty,omitempty" url:"frequency_penalty,omitempty"` + // Defaults to `0.0`, min value of `0.0`, max value of `1.0`. Can be used to reduce repetitiveness of generated tokens. Similar to `frequency_penalty`, except that this penalty is applied equally to all tokens that have already appeared, regardless of their exact frequencies. + PresencePenalty *float64 `json:"presence_penalty,omitempty" url:"presence_penalty,omitempty"` + stream bool } func (c *ChatRequest) Stream() bool { @@ -86,21 +100,21 @@ func (c *ChatRequest) MarshalJSON() ([]byte, error) { type ChatStreamRequest struct { // Accepts a string. // The chat message from the user to the model. - Message string `json:"message"` + Message string `json:"message" url:"message"` // Defaults to `command`. // // The identifier of the model, which can be one of the existing Cohere models or the full ID for a [fine-tuned custom model](https://docs.cohere.com/docs/chat-fine-tuning). // // Compatible Cohere models are `command` and `command-light` as well as the experimental `command-nightly` and `command-light-nightly` variants. Read more about [Cohere models](https://docs.cohere.com/docs/models). - Model *string `json:"model,omitempty"` + Model *string `json:"model,omitempty" url:"model,omitempty"` // When specified, the default Cohere preamble will be replaced with the provided one. - PreambleOverride *string `json:"preamble_override,omitempty"` + PreambleOverride *string `json:"preamble_override,omitempty" url:"preamble_override,omitempty"` // A list of previous messages between the user and the model, meant to give the model conversational context for responding to the user's `message`. - ChatHistory []*ChatMessage `json:"chat_history,omitempty"` + ChatHistory []*ChatMessage `json:"chat_history,omitempty" url:"chat_history,omitempty"` // An alternative to `chat_history`. Previous conversations can be resumed by providing the conversation's identifier. The contents of `message` and the model's response will be stored as part of this conversation. // // If a conversation with this id does not already exist, a new conversation will be created. - ConversationId *string `json:"conversation_id,omitempty"` + ConversationId *string `json:"conversation_id,omitempty" url:"conversation_id,omitempty"` // Defaults to `AUTO` when `connectors` are specified and `OFF` in all other cases. // // Dictates how the prompt will be constructed. @@ -108,26 +122,40 @@ type ChatStreamRequest struct { // With `prompt_truncation` set to "AUTO", some elements from `chat_history` and `documents` will be dropped in an attempt to construct a prompt that fits within the model's context length limit. // // With `prompt_truncation` set to "OFF", no elements will be dropped. If the sum of the inputs exceeds the model's context length limit, a `TooManyTokens` error will be returned. - PromptTruncation *ChatStreamRequestPromptTruncation `json:"prompt_truncation,omitempty"` + PromptTruncation *ChatStreamRequestPromptTruncation `json:"prompt_truncation,omitempty" url:"prompt_truncation,omitempty"` // Accepts `{"id": "web-search"}`, and/or the `"id"` for a custom [connector](https://docs.cohere.com/docs/connectors), if you've [created](https://docs.cohere.com/docs/creating-and-deploying-a-connector) one. // // When specified, the model's reply will be enriched with information found by quering each of the connectors (RAG). - Connectors []*ChatConnector `json:"connectors,omitempty"` + Connectors []*ChatConnector `json:"connectors,omitempty" url:"connectors,omitempty"` // Defaults to `false`. // // When `true`, the response will only contain a list of generated search queries, but no search will take place, and no reply from the model to the user's `message` will be generated. - SearchQueriesOnly *bool `json:"search_queries_only,omitempty"` + SearchQueriesOnly *bool `json:"search_queries_only,omitempty" url:"search_queries_only,omitempty"` // A list of relevant documents that the model can use to enrich its reply. See ['Document Mode'](https://docs.cohere.com/docs/retrieval-augmented-generation-rag#document-mode) in the guide for more information. - Documents []ChatDocument `json:"documents,omitempty"` + Documents []ChatDocument `json:"documents,omitempty" url:"documents,omitempty"` // Defaults to `"accurate"`. // // Dictates the approach taken to generating citations as part of the RAG flow by allowing the user to specify whether they want `"accurate"` results or `"fast"` results. - CitationQuality *ChatStreamRequestCitationQuality `json:"citation_quality,omitempty"` + CitationQuality *ChatStreamRequestCitationQuality `json:"citation_quality,omitempty" url:"citation_quality,omitempty"` // Defaults to `0.3`. // // A non-negative float that tunes the degree of randomness in generation. Lower temperatures mean less random generations, and higher temperatures mean more random generations. - Temperature *float64 `json:"temperature,omitempty"` - stream bool + // + // Randomness can be further maximized by increasing the value of the `p` parameter. + Temperature *float64 `json:"temperature,omitempty" url:"temperature,omitempty"` + // The maximum number of tokens the model will generate as part of the response. Note: Setting a low value may result in incomplete generations. + MaxTokens *int `json:"max_tokens,omitempty" url:"max_tokens,omitempty"` + // Ensures only the top `k` most likely tokens are considered for generation at each step. + // Defaults to `0`, min value of `0`, max value of `500`. + K *int `json:"k,omitempty" url:"k,omitempty"` + // Ensures that only the most likely tokens, with total probability mass of `p`, are considered for generation at each step. If both `k` and `p` are enabled, `p` acts after `k`. + // Defaults to `0.75`. min value of `0.01`, max value of `0.99`. + P *float64 `json:"p,omitempty" url:"p,omitempty"` + // Used to reduce repetitiveness of generated tokens. The higher the value, the stronger a penalty is applied to previously present tokens, proportional to how many times they have already appeared in the prompt or prior generation. + FrequencyPenalty *float64 `json:"frequency_penalty,omitempty" url:"frequency_penalty,omitempty"` + // Defaults to `0.0`, min value of `0.0`, max value of `1.0`. Can be used to reduce repetitiveness of generated tokens. Similar to `frequency_penalty`, except that this penalty is applied equally to all tokens that have already appeared, regardless of their exact frequencies. + PresencePenalty *float64 `json:"presence_penalty,omitempty" url:"presence_penalty,omitempty"` + stream bool } func (c *ChatStreamRequest) Stream() bool { @@ -161,37 +189,30 @@ type ClassifyRequest struct { // A list of up to 96 texts to be classified. Each one must be a non-empty string. // There is, however, no consistent, universal limit to the length a particular input can be. We perform classification on the first `x` tokens of each input, and `x` varies depending on which underlying model is powering classification. The maximum token length for each model is listed in the "max tokens" column [here](https://docs.cohere.com/docs/models). // Note: by default the `truncate` parameter is set to `END`, so tokens exceeding the limit will be automatically dropped. This behavior can be disabled by setting `truncate` to `NONE`, which will result in validation errors for longer texts. - Inputs []string `json:"inputs,omitempty"` + Inputs []string `json:"inputs,omitempty" url:"inputs,omitempty"` // An array of examples to provide context to the model. Each example is a text string and its associated label/class. Each unique label requires at least 2 examples associated with it; the maximum number of examples is 2500, and each example has a maximum length of 512 tokens. The values should be structured as `{text: "...",label: "..."}`. // Note: [Fine-tuned Models](https://docs.cohere.com/docs/classify-fine-tuning) trained on classification examples don't require the `examples` parameter to be passed in explicitly. - Examples []*ClassifyRequestExamplesItem `json:"examples,omitempty"` + Examples []*ClassifyExample `json:"examples,omitempty" url:"examples,omitempty"` // The identifier of the model. Currently available models are `embed-multilingual-v2.0`, `embed-english-light-v2.0`, and `embed-english-v2.0` (default). Smaller "light" models are faster, while larger models will perform better. [Fine-tuned models](https://docs.cohere.com/docs/fine-tuning) can also be supplied with their full ID. - Model *string `json:"model,omitempty"` + Model *string `json:"model,omitempty" url:"model,omitempty"` // The ID of a custom playground preset. You can create presets in the [playground](https://dashboard.cohere.ai/playground/classify?model=large). If you use a preset, all other parameters become optional, and any included parameters will override the preset's parameters. - Preset *string `json:"preset,omitempty"` + Preset *string `json:"preset,omitempty" url:"preset,omitempty"` // One of `NONE|START|END` to specify how the API will handle inputs longer than the maximum token length. // Passing `START` will discard the start of the input. `END` will discard the end of the input. In both cases, input is discarded until the remaining input is exactly the maximum input token length for the model. // If `NONE` is selected, when the input exceeds the maximum input token length an error will be returned. - Truncate *ClassifyRequestTruncate `json:"truncate,omitempty"` -} - -type DetectLanguageRequest struct { - // List of strings to run the detection on. - Texts []string `json:"texts,omitempty"` - // The identifier of the model to generate with. - Model *string `json:"model,omitempty"` + Truncate *ClassifyRequestTruncate `json:"truncate,omitempty" url:"truncate,omitempty"` } type DetokenizeRequest struct { // The list of tokens to be detokenized. - Tokens []int `json:"tokens,omitempty"` + Tokens []int `json:"tokens,omitempty" url:"tokens,omitempty"` // An optional parameter to provide the model name. This will ensure that the detokenization is done by the tokenizer used by that model. - Model *string `json:"model,omitempty"` + Model *string `json:"model,omitempty" url:"model,omitempty"` } type EmbedRequest struct { // An array of strings for the model to embed. Maximum number of texts per call is `96`. We recommend reducing the length of each text to be under `512` tokens for optimal quality. - Texts []string `json:"texts,omitempty"` + Texts []string `json:"texts,omitempty" url:"texts,omitempty"` // Defaults to embed-english-v2.0 // // The identifier of the model. Smaller "light" models are faster, while larger models will perform better. [Custom models](/docs/training-custom-models) can also be supplied with their full ID. @@ -206,8 +227,8 @@ type EmbedRequest struct { // * `embed-english-v2.0` 4096 // * `embed-english-light-v2.0` 1024 // * `embed-multilingual-v2.0` 768 - Model *string `json:"model,omitempty"` - InputType *EmbedInputType `json:"input_type,omitempty"` + Model *string `json:"model,omitempty" url:"model,omitempty"` + InputType *EmbedInputType `json:"input_type,omitempty" url:"input_type,omitempty"` // Specifies the types of embeddings you want to get back. Not required and default is None, which returns the Embed Floats response type. Can be one or more of the following types. // // * `"float"`: Use this when you want to get back the default float embeddings. Valid for all models. @@ -215,69 +236,77 @@ type EmbedRequest struct { // * `"uint8"`: Use this when you want to get back unsigned int8 embeddings. Valid for only v3 models. // * `"binary"`: Use this when you want to get back signed binary embeddings. Valid for only v3 models. // * `"ubinary"`: Use this when you want to get back unsigned binary embeddings. Valid for only v3 models. - EmbeddingTypes []string `json:"embedding_types,omitempty"` + EmbeddingTypes []EmbedRequestEmbeddingTypesItem `json:"embedding_types,omitempty" url:"embedding_types,omitempty"` // One of `NONE|START|END` to specify how the API will handle inputs longer than the maximum token length. // // Passing `START` will discard the start of the input. `END` will discard the end of the input. In both cases, input is discarded until the remaining input is exactly the maximum input token length for the model. // // If `NONE` is selected, when the input exceeds the maximum input token length an error will be returned. - Truncate *EmbedRequestTruncate `json:"truncate,omitempty"` + Truncate *EmbedRequestTruncate `json:"truncate,omitempty" url:"truncate,omitempty"` } type GenerateRequest struct { // The input text that serves as the starting point for generating the response. // Note: The prompt will be pre-processed and modified before reaching the model. - Prompt string `json:"prompt"` + Prompt string `json:"prompt" url:"prompt"` // The identifier of the model to generate with. Currently available models are `command` (default), `command-nightly` (experimental), `command-light`, and `command-light-nightly` (experimental). // Smaller, "light" models are faster, while larger models will perform better. [Custom models](/docs/training-custom-models) can also be supplied with their full ID. - Model *string `json:"model,omitempty"` + Model *string `json:"model,omitempty" url:"model,omitempty"` // The maximum number of generations that will be returned. Defaults to `1`, min value of `1`, max value of `5`. - NumGenerations *int `json:"num_generations,omitempty"` + NumGenerations *int `json:"num_generations,omitempty" url:"num_generations,omitempty"` // The maximum number of tokens the model will generate as part of the response. Note: Setting a low value may result in incomplete generations. // // This parameter is off by default, and if it's not specified, the model will continue generating until it emits an EOS completion token. See [BPE Tokens](/bpe-tokens-wiki) for more details. // // Can only be set to `0` if `return_likelihoods` is set to `ALL` to get the likelihood of the prompt. - MaxTokens *int `json:"max_tokens,omitempty"` + MaxTokens *int `json:"max_tokens,omitempty" url:"max_tokens,omitempty"` // One of `NONE|START|END` to specify how the API will handle inputs longer than the maximum token length. // // Passing `START` will discard the start of the input. `END` will discard the end of the input. In both cases, input is discarded until the remaining input is exactly the maximum input token length for the model. // // If `NONE` is selected, when the input exceeds the maximum input token length an error will be returned. - Truncate *GenerateRequestTruncate `json:"truncate,omitempty"` + Truncate *GenerateRequestTruncate `json:"truncate,omitempty" url:"truncate,omitempty"` // A non-negative float that tunes the degree of randomness in generation. Lower temperatures mean less random generations. See [Temperature](/temperature-wiki) for more details. // Defaults to `0.75`, min value of `0.0`, max value of `5.0`. - Temperature *float64 `json:"temperature,omitempty"` + Temperature *float64 `json:"temperature,omitempty" url:"temperature,omitempty"` // Identifier of a custom preset. A preset is a combination of parameters, such as prompt, temperature etc. You can create presets in the [playground](https://dashboard.cohere.ai/playground/generate). // When a preset is specified, the `prompt` parameter becomes optional, and any included parameters will override the preset's parameters. - Preset *string `json:"preset,omitempty"` + Preset *string `json:"preset,omitempty" url:"preset,omitempty"` // The generated text will be cut at the beginning of the earliest occurrence of an end sequence. The sequence will be excluded from the text. - EndSequences []string `json:"end_sequences,omitempty"` + EndSequences []string `json:"end_sequences,omitempty" url:"end_sequences,omitempty"` // The generated text will be cut at the end of the earliest occurrence of a stop sequence. The sequence will be included the text. - StopSequences []string `json:"stop_sequences,omitempty"` + StopSequences []string `json:"stop_sequences,omitempty" url:"stop_sequences,omitempty"` // Ensures only the top `k` most likely tokens are considered for generation at each step. // Defaults to `0`, min value of `0`, max value of `500`. - K *int `json:"k,omitempty"` + K *int `json:"k,omitempty" url:"k,omitempty"` // Ensures that only the most likely tokens, with total probability mass of `p`, are considered for generation at each step. If both `k` and `p` are enabled, `p` acts after `k`. // Defaults to `0.75`. min value of `0.01`, max value of `0.99`. - P *float64 `json:"p,omitempty"` - // Used to reduce repetitiveness of generated tokens. The higher the value, the stronger a penalty is applied to previously present tokens, proportional to how many times they have already appeared in the prompt or prior generation.' - FrequencyPenalty *float64 `json:"frequency_penalty,omitempty"` - // Defaults to `0.0`, min value of `0.0`, max value of `1.0`. Can be used to reduce repetitiveness of generated tokens. Similar to `frequency_penalty`, except that this penalty is applied equally to all tokens that have already appeared, regardless of their exact frequencies. - PresencePenalty *float64 `json:"presence_penalty,omitempty"` + P *float64 `json:"p,omitempty" url:"p,omitempty"` + // Used to reduce repetitiveness of generated tokens. The higher the value, the stronger a penalty is applied to previously present tokens, proportional to how many times they have already appeared in the prompt or prior generation. + // + // Using `frequency_penalty` in combination with `presence_penalty` is not supported on newer models. + FrequencyPenalty *float64 `json:"frequency_penalty,omitempty" url:"frequency_penalty,omitempty"` + // Defaults to `0.0`, min value of `0.0`, max value of `1.0`. + // + // Can be used to reduce repetitiveness of generated tokens. Similar to `frequency_penalty`, except that this penalty is applied equally to all tokens that have already appeared, regardless of their exact frequencies. + // + // Using `frequency_penalty` in combination with `presence_penalty` is not supported on newer models. + PresencePenalty *float64 `json:"presence_penalty,omitempty" url:"presence_penalty,omitempty"` // One of `GENERATION|ALL|NONE` to specify how and if the token likelihoods are returned with the response. Defaults to `NONE`. // // If `GENERATION` is selected, the token likelihoods will only be provided for generated text. // // If `ALL` is selected, the token likelihoods will be provided both for the prompt and the generated text. - ReturnLikelihoods *GenerateRequestReturnLikelihoods `json:"return_likelihoods,omitempty"` + ReturnLikelihoods *GenerateRequestReturnLikelihoods `json:"return_likelihoods,omitempty" url:"return_likelihoods,omitempty"` + // Certain models support the `logit_bias` parameter. + // // Used to prevent the model from generating unwanted tokens or to incentivize it to include desired tokens. The format is `{token_id: bias}` where bias is a float between -10 and 10. Tokens can be obtained from text using [Tokenize](/reference/tokenize). // // For example, if the value `{'11': -10}` is provided, the model will be very unlikely to include the token 11 (`"\n"`, the newline character) anywhere in the generated text. In contrast `{'11': 10}` will result in generations that nearly only contain that token. Values between -10 and 10 will proportionally affect the likelihood of the token appearing in the generated text. - // - // Note: logit bias may not be supported for all custom models. - LogitBias map[string]float64 `json:"logit_bias,omitempty"` - stream bool + LogitBias map[string]float64 `json:"logit_bias,omitempty" url:"logit_bias,omitempty"` + // When enabled, the user's prompt will be sent to the model without any pre-processing. + RawPrompting *bool `json:"raw_prompting,omitempty" url:"raw_prompting,omitempty"` + stream bool } func (g *GenerateRequest) Stream() bool { @@ -310,57 +339,65 @@ func (g *GenerateRequest) MarshalJSON() ([]byte, error) { type GenerateStreamRequest struct { // The input text that serves as the starting point for generating the response. // Note: The prompt will be pre-processed and modified before reaching the model. - Prompt string `json:"prompt"` + Prompt string `json:"prompt" url:"prompt"` // The identifier of the model to generate with. Currently available models are `command` (default), `command-nightly` (experimental), `command-light`, and `command-light-nightly` (experimental). // Smaller, "light" models are faster, while larger models will perform better. [Custom models](/docs/training-custom-models) can also be supplied with their full ID. - Model *string `json:"model,omitempty"` + Model *string `json:"model,omitempty" url:"model,omitempty"` // The maximum number of generations that will be returned. Defaults to `1`, min value of `1`, max value of `5`. - NumGenerations *int `json:"num_generations,omitempty"` + NumGenerations *int `json:"num_generations,omitempty" url:"num_generations,omitempty"` // The maximum number of tokens the model will generate as part of the response. Note: Setting a low value may result in incomplete generations. // // This parameter is off by default, and if it's not specified, the model will continue generating until it emits an EOS completion token. See [BPE Tokens](/bpe-tokens-wiki) for more details. // // Can only be set to `0` if `return_likelihoods` is set to `ALL` to get the likelihood of the prompt. - MaxTokens *int `json:"max_tokens,omitempty"` + MaxTokens *int `json:"max_tokens,omitempty" url:"max_tokens,omitempty"` // One of `NONE|START|END` to specify how the API will handle inputs longer than the maximum token length. // // Passing `START` will discard the start of the input. `END` will discard the end of the input. In both cases, input is discarded until the remaining input is exactly the maximum input token length for the model. // // If `NONE` is selected, when the input exceeds the maximum input token length an error will be returned. - Truncate *GenerateStreamRequestTruncate `json:"truncate,omitempty"` + Truncate *GenerateStreamRequestTruncate `json:"truncate,omitempty" url:"truncate,omitempty"` // A non-negative float that tunes the degree of randomness in generation. Lower temperatures mean less random generations. See [Temperature](/temperature-wiki) for more details. // Defaults to `0.75`, min value of `0.0`, max value of `5.0`. - Temperature *float64 `json:"temperature,omitempty"` + Temperature *float64 `json:"temperature,omitempty" url:"temperature,omitempty"` // Identifier of a custom preset. A preset is a combination of parameters, such as prompt, temperature etc. You can create presets in the [playground](https://dashboard.cohere.ai/playground/generate). // When a preset is specified, the `prompt` parameter becomes optional, and any included parameters will override the preset's parameters. - Preset *string `json:"preset,omitempty"` + Preset *string `json:"preset,omitempty" url:"preset,omitempty"` // The generated text will be cut at the beginning of the earliest occurrence of an end sequence. The sequence will be excluded from the text. - EndSequences []string `json:"end_sequences,omitempty"` + EndSequences []string `json:"end_sequences,omitempty" url:"end_sequences,omitempty"` // The generated text will be cut at the end of the earliest occurrence of a stop sequence. The sequence will be included the text. - StopSequences []string `json:"stop_sequences,omitempty"` + StopSequences []string `json:"stop_sequences,omitempty" url:"stop_sequences,omitempty"` // Ensures only the top `k` most likely tokens are considered for generation at each step. // Defaults to `0`, min value of `0`, max value of `500`. - K *int `json:"k,omitempty"` + K *int `json:"k,omitempty" url:"k,omitempty"` // Ensures that only the most likely tokens, with total probability mass of `p`, are considered for generation at each step. If both `k` and `p` are enabled, `p` acts after `k`. // Defaults to `0.75`. min value of `0.01`, max value of `0.99`. - P *float64 `json:"p,omitempty"` - // Used to reduce repetitiveness of generated tokens. The higher the value, the stronger a penalty is applied to previously present tokens, proportional to how many times they have already appeared in the prompt or prior generation.' - FrequencyPenalty *float64 `json:"frequency_penalty,omitempty"` - // Defaults to `0.0`, min value of `0.0`, max value of `1.0`. Can be used to reduce repetitiveness of generated tokens. Similar to `frequency_penalty`, except that this penalty is applied equally to all tokens that have already appeared, regardless of their exact frequencies. - PresencePenalty *float64 `json:"presence_penalty,omitempty"` + P *float64 `json:"p,omitempty" url:"p,omitempty"` + // Used to reduce repetitiveness of generated tokens. The higher the value, the stronger a penalty is applied to previously present tokens, proportional to how many times they have already appeared in the prompt or prior generation. + // + // Using `frequency_penalty` in combination with `presence_penalty` is not supported on newer models. + FrequencyPenalty *float64 `json:"frequency_penalty,omitempty" url:"frequency_penalty,omitempty"` + // Defaults to `0.0`, min value of `0.0`, max value of `1.0`. + // + // Can be used to reduce repetitiveness of generated tokens. Similar to `frequency_penalty`, except that this penalty is applied equally to all tokens that have already appeared, regardless of their exact frequencies. + // + // Using `frequency_penalty` in combination with `presence_penalty` is not supported on newer models. + PresencePenalty *float64 `json:"presence_penalty,omitempty" url:"presence_penalty,omitempty"` // One of `GENERATION|ALL|NONE` to specify how and if the token likelihoods are returned with the response. Defaults to `NONE`. // // If `GENERATION` is selected, the token likelihoods will only be provided for generated text. // // If `ALL` is selected, the token likelihoods will be provided both for the prompt and the generated text. - ReturnLikelihoods *GenerateStreamRequestReturnLikelihoods `json:"return_likelihoods,omitempty"` + ReturnLikelihoods *GenerateStreamRequestReturnLikelihoods `json:"return_likelihoods,omitempty" url:"return_likelihoods,omitempty"` + // Certain models support the `logit_bias` parameter. + // // Used to prevent the model from generating unwanted tokens or to incentivize it to include desired tokens. The format is `{token_id: bias}` where bias is a float between -10 and 10. Tokens can be obtained from text using [Tokenize](/reference/tokenize). // // For example, if the value `{'11': -10}` is provided, the model will be very unlikely to include the token 11 (`"\n"`, the newline character) anywhere in the generated text. In contrast `{'11': 10}` will result in generations that nearly only contain that token. Values between -10 and 10 will proportionally affect the likelihood of the token appearing in the generated text. - // - // Note: logit bias may not be supported for all custom models. - LogitBias map[string]float64 `json:"logit_bias,omitempty"` - stream bool + LogitBias map[string]float64 `json:"logit_bias,omitempty" url:"logit_bias,omitempty"` + // When enabled, the user's prompt will be sent to the model without any pre-processing. + RawPrompting *bool `json:"raw_prompting,omitempty" url:"raw_prompting,omitempty"` + stream bool } func (g *GenerateStreamRequest) Stream() bool { @@ -392,53 +429,53 @@ func (g *GenerateStreamRequest) MarshalJSON() ([]byte, error) { type RerankRequest struct { // The identifier of the model to use, one of : `rerank-english-v2.0`, `rerank-multilingual-v2.0` - Model *string `json:"model,omitempty"` + Model *string `json:"model,omitempty" url:"model,omitempty"` // The search query - Query string `json:"query"` + Query string `json:"query" url:"query"` // A list of document objects or strings to rerank. // If a document is provided the text fields is required and all other fields will be preserved in the response. // // The total max chunks (length of documents * max_chunks_per_doc) must be less than 10000. // // We recommend a maximum of 1,000 documents for optimal endpoint performance. - Documents []*RerankRequestDocumentsItem `json:"documents,omitempty"` + Documents []*RerankRequestDocumentsItem `json:"documents,omitempty" url:"documents,omitempty"` // The number of most relevant documents or indices to return, defaults to the length of the documents - TopN *int `json:"top_n,omitempty"` + TopN *int `json:"top_n,omitempty" url:"top_n,omitempty"` // - If false, returns results without the doc text - the api will return a list of {index, relevance score} where index is inferred from the list passed into the request. // - If true, returns results with the doc text passed in - the api will return an ordered list of {index, text, relevance score} where index + text refers to the list passed into the request. - ReturnDocuments *bool `json:"return_documents,omitempty"` + ReturnDocuments *bool `json:"return_documents,omitempty" url:"return_documents,omitempty"` // The maximum number of chunks to produce internally from a document - MaxChunksPerDoc *int `json:"max_chunks_per_doc,omitempty"` + MaxChunksPerDoc *int `json:"max_chunks_per_doc,omitempty" url:"max_chunks_per_doc,omitempty"` } type SummarizeRequest struct { // The text to generate a summary for. Can be up to 100,000 characters long. Currently the only supported language is English. - Text string `json:"text"` + Text string `json:"text" url:"text"` // One of `short`, `medium`, `long`, or `auto` defaults to `auto`. Indicates the approximate length of the summary. If `auto` is selected, the best option will be picked based on the input text. - Length *SummarizeRequestLength `json:"length,omitempty"` + Length *SummarizeRequestLength `json:"length,omitempty" url:"length,omitempty"` // One of `paragraph`, `bullets`, or `auto`, defaults to `auto`. Indicates the style in which the summary will be delivered - in a free form paragraph or in bullet points. If `auto` is selected, the best option will be picked based on the input text. - Format *SummarizeRequestFormat `json:"format,omitempty"` + Format *SummarizeRequestFormat `json:"format,omitempty" url:"format,omitempty"` // The identifier of the model to generate the summary with. Currently available models are `command` (default), `command-nightly` (experimental), `command-light`, and `command-light-nightly` (experimental). Smaller, "light" models are faster, while larger models will perform better. - Model *string `json:"model,omitempty"` + Model *string `json:"model,omitempty" url:"model,omitempty"` // One of `low`, `medium`, `high`, or `auto`, defaults to `auto`. Controls how close to the original text the summary is. `high` extractiveness summaries will lean towards reusing sentences verbatim, while `low` extractiveness summaries will tend to paraphrase more. If `auto` is selected, the best option will be picked based on the input text. - Extractiveness *SummarizeRequestExtractiveness `json:"extractiveness,omitempty"` + Extractiveness *SummarizeRequestExtractiveness `json:"extractiveness,omitempty" url:"extractiveness,omitempty"` // Ranges from 0 to 5. Controls the randomness of the output. Lower values tend to generate more “predictable” output, while higher values tend to generate more “creative” output. The sweet spot is typically between 0 and 1. - Temperature *float64 `json:"temperature,omitempty"` + Temperature *float64 `json:"temperature,omitempty" url:"temperature,omitempty"` // A free-form instruction for modifying how the summaries get generated. Should complete the sentence "Generate a summary _". Eg. "focusing on the next steps" or "written by Yoda" - AdditionalCommand *string `json:"additional_command,omitempty"` + AdditionalCommand *string `json:"additional_command,omitempty" url:"additional_command,omitempty"` } type TokenizeRequest struct { // The string to be tokenized, the minimum text length is 1 character, and the maximum text length is 65536 characters. - Text string `json:"text"` + Text string `json:"text" url:"text"` // An optional parameter to provide the model name. This will ensure that the tokenization uses the tokenizer used by that model. - Model *string `json:"model,omitempty"` + Model *string `json:"model,omitempty" url:"model,omitempty"` } type ApiMeta struct { - ApiVersion *ApiMetaApiVersion `json:"api_version,omitempty"` - BilledUnits *ApiMetaBilledUnits `json:"billed_units,omitempty"` - Warnings []string `json:"warnings,omitempty"` + ApiVersion *ApiMetaApiVersion `json:"api_version,omitempty" url:"api_version,omitempty"` + BilledUnits *ApiMetaBilledUnits `json:"billed_units,omitempty" url:"billed_units,omitempty"` + Warnings []string `json:"warnings,omitempty" url:"warnings,omitempty"` _rawJSON json.RawMessage } @@ -467,9 +504,9 @@ func (a *ApiMeta) String() string { } type ApiMetaApiVersion struct { - Version string `json:"version"` - IsDeprecated *bool `json:"is_deprecated,omitempty"` - IsExperimental *bool `json:"is_experimental,omitempty"` + Version string `json:"version" url:"version"` + IsDeprecated *bool `json:"is_deprecated,omitempty" url:"is_deprecated,omitempty"` + IsExperimental *bool `json:"is_experimental,omitempty" url:"is_experimental,omitempty"` _rawJSON json.RawMessage } @@ -499,13 +536,13 @@ func (a *ApiMetaApiVersion) String() string { type ApiMetaBilledUnits struct { // The number of billed input tokens. - InputTokens *float64 `json:"input_tokens,omitempty"` + InputTokens *float64 `json:"input_tokens,omitempty" url:"input_tokens,omitempty"` // The number of billed output tokens. - OutputTokens *float64 `json:"output_tokens,omitempty"` + OutputTokens *float64 `json:"output_tokens,omitempty" url:"output_tokens,omitempty"` // The number of billed search units. - SearchUnits *float64 `json:"search_units,omitempty"` + SearchUnits *float64 `json:"search_units,omitempty" url:"search_units,omitempty"` // The number of billed classifications units. - Classifications *float64 `json:"classifications,omitempty"` + Classifications *float64 `json:"classifications,omitempty" url:"classifications,omitempty"` _rawJSON json.RawMessage } @@ -562,13 +599,13 @@ func (a AuthTokenType) Ptr() *AuthTokenType { // A section of the generated reply which cites external knowledge. type ChatCitation struct { // The index of text that the citation starts at, counting from zero. For example, a generation of `Hello, world!` with a citation on `world` would have a start value of `7`. This is because the citation starts at `w`, which is the seventh character. - Start int `json:"start"` + Start int `json:"start" url:"start"` // The index of text that the citation ends after, counting from zero. For example, a generation of `Hello, world!` with a citation on `world` would have an end value of `11`. This is because the citation ends after `d`, which is the eleventh character. - End int `json:"end"` + End int `json:"end" url:"end"` // The text of the citation. For example, a generation of `Hello, world!` with a citation of `world` would have a text value of `world`. - Text string `json:"text"` + Text string `json:"text" url:"text"` // Identifiers of documents cited by this section of the generated reply. - DocumentIds []string `json:"document_ids,omitempty"` + DocumentIds []string `json:"document_ids,omitempty" url:"document_ids,omitempty"` _rawJSON json.RawMessage } @@ -598,7 +635,7 @@ func (c *ChatCitation) String() string { type ChatCitationGenerationEvent struct { // Citations for the generated reply. - Citations []*ChatCitation `json:"citations,omitempty"` + Citations []*ChatCitation `json:"citations,omitempty" url:"citations,omitempty"` _rawJSON json.RawMessage } @@ -628,12 +665,12 @@ func (c *ChatCitationGenerationEvent) String() string { // The connector used for fetching documents. type ChatConnector struct { - // The identifier of the connector. Currently only 'web-search' is supported. - Id string `json:"id"` + // The identifier of the connector. + Id string `json:"id" url:"id"` // An optional override to set the token that Cohere passes to the connector in the Authorization header. - UserAccessToken *string `json:"user_access_token,omitempty"` + UserAccessToken *string `json:"user_access_token,omitempty" url:"user_access_token,omitempty"` // An optional override to set whether or not the request continues if this connector fails. - ContinueOnFailure *bool `json:"continue_on_failure,omitempty"` + ContinueOnFailure *bool `json:"continue_on_failure,omitempty" url:"continue_on_failure,omitempty"` // Provides the connector with different settings at request time. The key/value pairs of this object are specific to each connector. // // The supported options are: @@ -645,7 +682,7 @@ type ChatConnector struct { // // - `{"options": {"site": "cohere.com"}}` would restrict the results to all subdomains at cohere.com // - `{"options": {"site": "txt.cohere.com"}}` would restrict the results to `txt.cohere.com` - Options map[string]interface{} `json:"options,omitempty"` + Options map[string]interface{} `json:"options,omitempty" url:"options,omitempty"` _rawJSON json.RawMessage } @@ -681,9 +718,9 @@ type ChatDocument = map[string]string // A single message in a chat history. Contains the role of the sender, the text contents of the message, and optionally a username. type ChatMessage struct { - Role ChatMessageRole `json:"role,omitempty"` - Message string `json:"message"` - UserName *string `json:"user_name,omitempty"` + Role ChatMessageRole `json:"role,omitempty" url:"role,omitempty"` + Message string `json:"message" url:"message"` + UserName *string `json:"user_name,omitempty" url:"user_name,omitempty"` _rawJSON json.RawMessage } @@ -758,6 +795,38 @@ func (c ChatRequestCitationQuality) Ptr() *ChatRequestCitationQuality { return &c } +// (internal) Overrides specified parts of the default Chat or RAG preamble. It is recommended that these options only be used in specific scenarios where the defaults are not adequate. +type ChatRequestPromptOverride struct { + Preamble interface{} `json:"preamble,omitempty" url:"preamble,omitempty"` + TaskDescription interface{} `json:"task_description,omitempty" url:"task_description,omitempty"` + StyleGuide interface{} `json:"style_guide,omitempty" url:"style_guide,omitempty"` + + _rawJSON json.RawMessage +} + +func (c *ChatRequestPromptOverride) UnmarshalJSON(data []byte) error { + type unmarshaler ChatRequestPromptOverride + var value unmarshaler + if err := json.Unmarshal(data, &value); err != nil { + return err + } + *c = ChatRequestPromptOverride(value) + c._rawJSON = json.RawMessage(data) + return nil +} + +func (c *ChatRequestPromptOverride) String() string { + if len(c._rawJSON) > 0 { + if value, err := core.StringifyJSON(c._rawJSON); err == nil { + return value + } + } + if value, err := core.StringifyJSON(c); err == nil { + return value + } + return fmt.Sprintf("%#v", c) +} + // Defaults to `AUTO` when `connectors` are specified and `OFF` in all other cases. // // Dictates how the prompt will be constructed. @@ -787,9 +856,42 @@ func (c ChatRequestPromptTruncation) Ptr() *ChatRequestPromptTruncation { return &c } +// (internal) Sets inference and model options for RAG search query and tool use generations. Defaults are used when options are not specified here, meaning that other parameters outside of search_options are ignored (such as model= or temperature=). +type ChatRequestSearchOptions struct { + Model interface{} `json:"model,omitempty" url:"model,omitempty"` + Temperature interface{} `json:"temperature,omitempty" url:"temperature,omitempty"` + MaxTokens interface{} `json:"max_tokens,omitempty" url:"max_tokens,omitempty"` + Preamble interface{} `json:"preamble,omitempty" url:"preamble,omitempty"` + + _rawJSON json.RawMessage +} + +func (c *ChatRequestSearchOptions) UnmarshalJSON(data []byte) error { + type unmarshaler ChatRequestSearchOptions + var value unmarshaler + if err := json.Unmarshal(data, &value); err != nil { + return err + } + *c = ChatRequestSearchOptions(value) + c._rawJSON = json.RawMessage(data) + return nil +} + +func (c *ChatRequestSearchOptions) String() string { + if len(c._rawJSON) > 0 { + if value, err := core.StringifyJSON(c._rawJSON); err == nil { + return value + } + } + if value, err := core.StringifyJSON(c); err == nil { + return value + } + return fmt.Sprintf("%#v", c) +} + type ChatSearchQueriesGenerationEvent struct { // Generated search queries, meant to be used as part of the RAG flow. - SearchQueries []*ChatSearchQuery `json:"search_queries,omitempty"` + SearchQueries []*ChatSearchQuery `json:"search_queries,omitempty" url:"search_queries,omitempty"` _rawJSON json.RawMessage } @@ -820,9 +922,9 @@ func (c *ChatSearchQueriesGenerationEvent) String() string { // The generated search query. Contains the text of the query and a unique identifier for the query. type ChatSearchQuery struct { // The text of the search query. - Text string `json:"text"` + Text string `json:"text" url:"text"` // Unique identifier for the generated search query. Useful for submitting feedback. - GenerationId string `json:"generation_id"` + GenerationId string `json:"generation_id" url:"generation_id"` _rawJSON json.RawMessage } @@ -851,11 +953,11 @@ func (c *ChatSearchQuery) String() string { } type ChatSearchResult struct { - SearchQuery *ChatSearchQuery `json:"search_query,omitempty"` + SearchQuery *ChatSearchQuery `json:"search_query,omitempty" url:"search_query,omitempty"` // The connector from which this result comes from. - Connector *ChatConnector `json:"connector,omitempty"` + Connector *ChatSearchResultConnector `json:"connector,omitempty" url:"connector,omitempty"` // Identifiers of documents found by this search query. - DocumentIds []string `json:"document_ids,omitempty"` + DocumentIds []string `json:"document_ids,omitempty" url:"document_ids,omitempty"` _rawJSON json.RawMessage } @@ -883,11 +985,42 @@ func (c *ChatSearchResult) String() string { return fmt.Sprintf("%#v", c) } +// The connector used for fetching documents. +type ChatSearchResultConnector struct { + // The identifier of the connector. + Id string `json:"id" url:"id"` + + _rawJSON json.RawMessage +} + +func (c *ChatSearchResultConnector) UnmarshalJSON(data []byte) error { + type unmarshaler ChatSearchResultConnector + var value unmarshaler + if err := json.Unmarshal(data, &value); err != nil { + return err + } + *c = ChatSearchResultConnector(value) + c._rawJSON = json.RawMessage(data) + return nil +} + +func (c *ChatSearchResultConnector) String() string { + if len(c._rawJSON) > 0 { + if value, err := core.StringifyJSON(c._rawJSON); err == nil { + return value + } + } + if value, err := core.StringifyJSON(c); err == nil { + return value + } + return fmt.Sprintf("%#v", c) +} + type ChatSearchResultsEvent struct { // Conducted searches and the ids of documents retrieved from each of them. - SearchResults []*ChatSearchResult `json:"search_results,omitempty"` + SearchResults []*ChatSearchResult `json:"search_results,omitempty" url:"search_results,omitempty"` // Documents fetched from searches or provided by the user. - Documents []ChatDocument `json:"documents,omitempty"` + Documents []ChatDocument `json:"documents,omitempty" url:"documents,omitempty"` _rawJSON json.RawMessage } @@ -921,9 +1054,9 @@ type ChatStreamEndEvent struct { // - `MAX_TOKENS` - the reply was cut off because the model reached the maximum number of tokens specified by the max_tokens parameter // - `ERROR` - something went wrong when generating the reply // - `ERROR_TOXIC` - the model generated a reply that was deemed toxic - FinishReason ChatStreamEndEventFinishReason `json:"finish_reason,omitempty"` + FinishReason ChatStreamEndEventFinishReason `json:"finish_reason,omitempty" url:"finish_reason,omitempty"` // The consolidated response from the model. Contains the generated reply and all the other information streamed back in the previous events. - Response *ChatStreamEndEventResponse `json:"response,omitempty"` + Response *ChatStreamEndEventResponse `json:"response,omitempty" url:"response,omitempty"` _rawJSON json.RawMessage } @@ -1097,6 +1230,38 @@ func (c ChatStreamRequestCitationQuality) Ptr() *ChatStreamRequestCitationQualit return &c } +// (internal) Overrides specified parts of the default Chat or RAG preamble. It is recommended that these options only be used in specific scenarios where the defaults are not adequate. +type ChatStreamRequestPromptOverride struct { + Preamble interface{} `json:"preamble,omitempty" url:"preamble,omitempty"` + TaskDescription interface{} `json:"task_description,omitempty" url:"task_description,omitempty"` + StyleGuide interface{} `json:"style_guide,omitempty" url:"style_guide,omitempty"` + + _rawJSON json.RawMessage +} + +func (c *ChatStreamRequestPromptOverride) UnmarshalJSON(data []byte) error { + type unmarshaler ChatStreamRequestPromptOverride + var value unmarshaler + if err := json.Unmarshal(data, &value); err != nil { + return err + } + *c = ChatStreamRequestPromptOverride(value) + c._rawJSON = json.RawMessage(data) + return nil +} + +func (c *ChatStreamRequestPromptOverride) String() string { + if len(c._rawJSON) > 0 { + if value, err := core.StringifyJSON(c._rawJSON); err == nil { + return value + } + } + if value, err := core.StringifyJSON(c); err == nil { + return value + } + return fmt.Sprintf("%#v", c) +} + // Defaults to `AUTO` when `connectors` are specified and `OFF` in all other cases. // // Dictates how the prompt will be constructed. @@ -1126,9 +1291,42 @@ func (c ChatStreamRequestPromptTruncation) Ptr() *ChatStreamRequestPromptTruncat return &c } +// (internal) Sets inference and model options for RAG search query and tool use generations. Defaults are used when options are not specified here, meaning that other parameters outside of search_options are ignored (such as model= or temperature=). +type ChatStreamRequestSearchOptions struct { + Model interface{} `json:"model,omitempty" url:"model,omitempty"` + Temperature interface{} `json:"temperature,omitempty" url:"temperature,omitempty"` + MaxTokens interface{} `json:"max_tokens,omitempty" url:"max_tokens,omitempty"` + Preamble interface{} `json:"preamble,omitempty" url:"preamble,omitempty"` + + _rawJSON json.RawMessage +} + +func (c *ChatStreamRequestSearchOptions) UnmarshalJSON(data []byte) error { + type unmarshaler ChatStreamRequestSearchOptions + var value unmarshaler + if err := json.Unmarshal(data, &value); err != nil { + return err + } + *c = ChatStreamRequestSearchOptions(value) + c._rawJSON = json.RawMessage(data) + return nil +} + +func (c *ChatStreamRequestSearchOptions) String() string { + if len(c._rawJSON) > 0 { + if value, err := core.StringifyJSON(c._rawJSON); err == nil { + return value + } + } + if value, err := core.StringifyJSON(c); err == nil { + return value + } + return fmt.Sprintf("%#v", c) +} + type ChatStreamStartEvent struct { // Unique identifier for the generated reply. Useful for submitting feedback. - GenerationId string `json:"generation_id"` + GenerationId string `json:"generation_id" url:"generation_id"` _rawJSON json.RawMessage } @@ -1158,7 +1356,7 @@ func (c *ChatStreamStartEvent) String() string { type ChatTextGenerationEvent struct { // The next batch of text generated by the model. - Text string `json:"text"` + Text string `json:"text" url:"text"` _rawJSON json.RawMessage } @@ -1186,25 +1384,25 @@ func (c *ChatTextGenerationEvent) String() string { return fmt.Sprintf("%#v", c) } -type ClassifyRequestExamplesItem struct { - Text *string `json:"text,omitempty"` - Label *string `json:"label,omitempty"` +type ClassifyExample struct { + Text *string `json:"text,omitempty" url:"text,omitempty"` + Label *string `json:"label,omitempty" url:"label,omitempty"` _rawJSON json.RawMessage } -func (c *ClassifyRequestExamplesItem) UnmarshalJSON(data []byte) error { - type unmarshaler ClassifyRequestExamplesItem +func (c *ClassifyExample) UnmarshalJSON(data []byte) error { + type unmarshaler ClassifyExample var value unmarshaler if err := json.Unmarshal(data, &value); err != nil { return err } - *c = ClassifyRequestExamplesItem(value) + *c = ClassifyExample(value) c._rawJSON = json.RawMessage(data) return nil } -func (c *ClassifyRequestExamplesItem) String() string { +func (c *ClassifyExample) String() string { if len(c._rawJSON) > 0 { if value, err := core.StringifyJSON(c._rawJSON); err == nil { return value @@ -1245,9 +1443,9 @@ func (c ClassifyRequestTruncate) Ptr() *ClassifyRequestTruncate { } type ClassifyResponse struct { - Id string `json:"id"` - Classifications []*ClassifyResponseClassificationsItem `json:"classifications,omitempty"` - Meta *ApiMeta `json:"meta,omitempty"` + Id string `json:"id" url:"id"` + Classifications []*ClassifyResponseClassificationsItem `json:"classifications,omitempty" url:"classifications,omitempty"` + Meta *ApiMeta `json:"meta,omitempty" url:"meta,omitempty"` _rawJSON json.RawMessage } @@ -1276,21 +1474,21 @@ func (c *ClassifyResponse) String() string { } type ClassifyResponseClassificationsItem struct { - Id string `json:"id"` + Id string `json:"id" url:"id"` // The input text that was classified - Input *string `json:"input,omitempty"` + Input *string `json:"input,omitempty" url:"input,omitempty"` // The predicted label for the associated query (only filled for single-label models) - Prediction *string `json:"prediction,omitempty"` + Prediction *string `json:"prediction,omitempty" url:"prediction,omitempty"` // An array containing the predicted labels for the associated query (only filled for single-label classification) - Predictions []string `json:"predictions,omitempty"` + Predictions []string `json:"predictions,omitempty" url:"predictions,omitempty"` // The confidence score for the top predicted class (only filled for single-label classification) - Confidence *float64 `json:"confidence,omitempty"` + Confidence *float64 `json:"confidence,omitempty" url:"confidence,omitempty"` // An array containing the confidence scores of all the predictions in the same order - Confidences []float64 `json:"confidences,omitempty"` + Confidences []float64 `json:"confidences,omitempty" url:"confidences,omitempty"` // A map containing each label and its confidence score according to the classifier. All the confidence scores add up to 1 for single-label classification. For multi-label classification the label confidences are independent of each other, so they don't have to sum up to 1. - Labels map[string]*ClassifyResponseClassificationsItemLabelsValue `json:"labels,omitempty"` + Labels map[string]*ClassifyResponseClassificationsItemLabelsValue `json:"labels,omitempty" url:"labels,omitempty"` // The type of classification performed - ClassificationType ClassifyResponseClassificationsItemClassificationType `json:"classification_type,omitempty"` + ClassificationType ClassifyResponseClassificationsItemClassificationType `json:"classification_type,omitempty" url:"classification_type,omitempty"` _rawJSON json.RawMessage } @@ -1342,7 +1540,7 @@ func (c ClassifyResponseClassificationsItemClassificationType) Ptr() *ClassifyRe } type ClassifyResponseClassificationsItemLabelsValue struct { - Confidence *float64 `json:"confidence,omitempty"` + Confidence *float64 `json:"confidence,omitempty" url:"confidence,omitempty"` _rawJSON json.RawMessage } @@ -1375,32 +1573,32 @@ func (c *ClassifyResponseClassificationsItemLabelsValue) String() string { type Connector struct { // The unique identifier of the connector (used in both `/connectors` & `/chat` endpoints). // This is automatically created from the name of the connector upon registration. - Id string `json:"id"` + Id string `json:"id" url:"id"` // The organization to which this connector belongs. This is automatically set to // the organization of the user who created the connector. - OrganizationId *string `json:"organization_id,omitempty"` + OrganizationId *string `json:"organization_id,omitempty" url:"organization_id,omitempty"` // A human-readable name for the connector. - Name string `json:"name"` + Name string `json:"name" url:"name"` // A description of the connector. - Description *string `json:"description,omitempty"` + Description *string `json:"description,omitempty" url:"description,omitempty"` // The URL of the connector that will be used to search for documents. - Url *string `json:"url,omitempty"` + Url *string `json:"url,omitempty" url:"url,omitempty"` // The UTC time at which the connector was created. - CreatedAt time.Time `json:"created_at"` + CreatedAt time.Time `json:"created_at" url:"created_at"` // The UTC time at which the connector was last updated. - UpdatedAt time.Time `json:"updated_at"` + UpdatedAt time.Time `json:"updated_at" url:"updated_at"` // A list of fields to exclude from the prompt (fields remain in the document). - Excludes []string `json:"excludes,omitempty"` + Excludes []string `json:"excludes,omitempty" url:"excludes,omitempty"` // The type of authentication/authorization used by the connector. Possible values: [oauth, service_auth] - AuthType *string `json:"auth_type,omitempty"` + AuthType *string `json:"auth_type,omitempty" url:"auth_type,omitempty"` // The OAuth 2.0 configuration for the connector. - Oauth *ConnectorOAuth `json:"oauth,omitempty"` + Oauth *ConnectorOAuth `json:"oauth,omitempty" url:"oauth,omitempty"` // The OAuth status for the user making the request. One of ["valid", "expired", ""]. Empty string (field is omitted) means the user has not authorized the connector yet. - AuthStatus *ConnectorAuthStatus `json:"auth_status,omitempty"` + AuthStatus *ConnectorAuthStatus `json:"auth_status,omitempty" url:"auth_status,omitempty"` // Whether the connector is active or not. - Active *bool `json:"active,omitempty"` + Active *bool `json:"active,omitempty" url:"active,omitempty"` // Whether a chat request should continue or not if the request to this connector fails. - ContinueOnFailure *bool `json:"continue_on_failure,omitempty"` + ContinueOnFailure *bool `json:"continue_on_failure,omitempty" url:"continue_on_failure,omitempty"` _rawJSON json.RawMessage } @@ -1452,12 +1650,16 @@ func (c ConnectorAuthStatus) Ptr() *ConnectorAuthStatus { } type ConnectorOAuth struct { + // The OAuth 2.0 client ID. This field is encrypted at rest. + ClientId *string `json:"client_id,omitempty" url:"client_id,omitempty"` + // The OAuth 2.0 client Secret. This field is encrypted at rest and never returned in a response. + ClientSecret *string `json:"client_secret,omitempty" url:"client_secret,omitempty"` // The OAuth 2.0 /authorize endpoint to use when users authorize the connector. - AuthorizeUrl string `json:"authorize_url"` + AuthorizeUrl string `json:"authorize_url" url:"authorize_url"` // The OAuth 2.0 /token endpoint to use when users authorize the connector. - TokenUrl string `json:"token_url"` + TokenUrl string `json:"token_url" url:"token_url"` // The OAuth scopes to request when users authorize the connector. - Scope *string `json:"scope,omitempty"` + Scope *string `json:"scope,omitempty" url:"scope,omitempty"` _rawJSON json.RawMessage } @@ -1487,15 +1689,15 @@ func (c *ConnectorOAuth) String() string { type CreateConnectorOAuth struct { // The OAuth 2.0 client ID. This fields is encrypted at rest. - ClientId *string `json:"client_id,omitempty"` + ClientId *string `json:"client_id,omitempty" url:"client_id,omitempty"` // The OAuth 2.0 client Secret. This field is encrypted at rest and never returned in a response. - ClientSecret *string `json:"client_secret,omitempty"` + ClientSecret *string `json:"client_secret,omitempty" url:"client_secret,omitempty"` // The OAuth 2.0 /authorize endpoint to use when users authorize the connector. - AuthorizeUrl *string `json:"authorize_url,omitempty"` + AuthorizeUrl *string `json:"authorize_url,omitempty" url:"authorize_url,omitempty"` // The OAuth 2.0 /token endpoint to use when users authorize the connector. - TokenUrl *string `json:"token_url,omitempty"` + TokenUrl *string `json:"token_url,omitempty" url:"token_url,omitempty"` // The OAuth scopes to request when users authorize the connector. - Scope *string `json:"scope,omitempty"` + Scope *string `json:"scope,omitempty" url:"scope,omitempty"` _rawJSON json.RawMessage } @@ -1524,7 +1726,7 @@ func (c *CreateConnectorOAuth) String() string { } type CreateConnectorResponse struct { - Connector *Connector `json:"connector,omitempty"` + Connector *Connector `json:"connector,omitempty" url:"connector,omitempty"` _rawJSON json.RawMessage } @@ -1553,9 +1755,9 @@ func (c *CreateConnectorResponse) String() string { } type CreateConnectorServiceAuth struct { - Type AuthTokenType `json:"type,omitempty"` + Type AuthTokenType `json:"type,omitempty" url:"type,omitempty"` // The token that will be used in the HTTP Authorization header when making requests to the connector. This field is encrypted at rest and never returned in a response. - Token string `json:"token"` + Token string `json:"token" url:"token"` _rawJSON json.RawMessage } @@ -1585,8 +1787,8 @@ func (c *CreateConnectorServiceAuth) String() string { // Response from creating an embed job. type CreateEmbedJobResponse struct { - JobId string `json:"job_id"` - Meta *ApiMeta `json:"meta,omitempty"` + JobId string `json:"job_id" url:"job_id"` + Meta *ApiMeta `json:"meta,omitempty" url:"meta,omitempty"` _rawJSON json.RawMessage } @@ -1616,23 +1818,25 @@ func (c *CreateEmbedJobResponse) String() string { type Dataset struct { // The dataset ID - Id string `json:"id"` + Id string `json:"id" url:"id"` // The name of the dataset - Name string `json:"name"` + Name string `json:"name" url:"name"` // The creation date - CreatedAt time.Time `json:"created_at"` + CreatedAt time.Time `json:"created_at" url:"created_at"` // The last update date - UpdatedAt time.Time `json:"updated_at"` + UpdatedAt time.Time `json:"updated_at" url:"updated_at"` + DatasetType DatasetType `json:"dataset_type,omitempty" url:"dataset_type,omitempty"` + ValidationStatus DatasetValidationStatus `json:"validation_status,omitempty" url:"validation_status,omitempty"` // Errors found during validation - ValidationError *string `json:"validation_error,omitempty"` + ValidationError *string `json:"validation_error,omitempty" url:"validation_error,omitempty"` // the avro schema of the dataset - Schema *string `json:"schema,omitempty"` - RequiredFields []string `json:"required_fields,omitempty"` - PreserveFields []string `json:"preserve_fields,omitempty"` + Schema *string `json:"schema,omitempty" url:"schema,omitempty"` + RequiredFields []string `json:"required_fields,omitempty" url:"required_fields,omitempty"` + PreserveFields []string `json:"preserve_fields,omitempty" url:"preserve_fields,omitempty"` // the underlying files that make up the dataset - DatasetParts []*DatasetPart `json:"dataset_parts,omitempty"` + DatasetParts []*DatasetPart `json:"dataset_parts,omitempty" url:"dataset_parts,omitempty"` // warnings found during validation - ValidationWarnings []string `json:"validation_warnings,omitempty"` + ValidationWarnings []string `json:"validation_warnings,omitempty" url:"validation_warnings,omitempty"` _rawJSON json.RawMessage } @@ -1662,19 +1866,19 @@ func (d *Dataset) String() string { type DatasetPart struct { // The dataset part ID - Id string `json:"id"` + Id string `json:"id" url:"id"` // The name of the dataset part - Name string `json:"name"` + Name string `json:"name" url:"name"` // The download url of the file - Url *string `json:"url,omitempty"` + Url *string `json:"url,omitempty" url:"url,omitempty"` // The index of the file - Index *int `json:"index,omitempty"` + Index *int `json:"index,omitempty" url:"index,omitempty"` // The size of the file in bytes - SizeBytes *int `json:"size_bytes,omitempty"` + SizeBytes *int `json:"size_bytes,omitempty" url:"size_bytes,omitempty"` // The number of rows in the file - NumRows *int `json:"num_rows,omitempty"` + NumRows *int `json:"num_rows,omitempty" url:"num_rows,omitempty"` // The download url of the original file - OriginalUrl *string `json:"original_url,omitempty"` + OriginalUrl *string `json:"original_url,omitempty" url:"original_url,omitempty"` _rawJSON json.RawMessage } @@ -1783,71 +1987,10 @@ func (d DatasetValidationStatus) Ptr() *DatasetValidationStatus { type DeleteConnectorResponse = map[string]interface{} -type DetectLanguageResponse struct { - // List of languages, one per input text - Results []*DetectLanguageResponseResultsItem `json:"results,omitempty"` - Meta *ApiMeta `json:"meta,omitempty"` - - _rawJSON json.RawMessage -} - -func (d *DetectLanguageResponse) UnmarshalJSON(data []byte) error { - type unmarshaler DetectLanguageResponse - var value unmarshaler - if err := json.Unmarshal(data, &value); err != nil { - return err - } - *d = DetectLanguageResponse(value) - d._rawJSON = json.RawMessage(data) - return nil -} - -func (d *DetectLanguageResponse) String() string { - if len(d._rawJSON) > 0 { - if value, err := core.StringifyJSON(d._rawJSON); err == nil { - return value - } - } - if value, err := core.StringifyJSON(d); err == nil { - return value - } - return fmt.Sprintf("%#v", d) -} - -type DetectLanguageResponseResultsItem struct { - LanguageName *string `json:"language_name,omitempty"` - LanguageCode *string `json:"language_code,omitempty"` - - _rawJSON json.RawMessage -} - -func (d *DetectLanguageResponseResultsItem) UnmarshalJSON(data []byte) error { - type unmarshaler DetectLanguageResponseResultsItem - var value unmarshaler - if err := json.Unmarshal(data, &value); err != nil { - return err - } - *d = DetectLanguageResponseResultsItem(value) - d._rawJSON = json.RawMessage(data) - return nil -} - -func (d *DetectLanguageResponseResultsItem) String() string { - if len(d._rawJSON) > 0 { - if value, err := core.StringifyJSON(d._rawJSON); err == nil { - return value - } - } - if value, err := core.StringifyJSON(d); err == nil { - return value - } - return fmt.Sprintf("%#v", d) -} - type DetokenizeResponse struct { // A string representing the list of tokens. - Text string `json:"text"` - Meta *ApiMeta `json:"meta,omitempty"` + Text string `json:"text" url:"text"` + Meta *ApiMeta `json:"meta,omitempty" url:"meta,omitempty"` _rawJSON json.RawMessage } @@ -1876,12 +2019,12 @@ func (d *DetokenizeResponse) String() string { } type EmbedByTypeResponse struct { - Id string `json:"id"` + Id string `json:"id" url:"id"` // An object with different embedding types. The length of each embedding type array will be the same as the length of the original `texts` array. - Embeddings *EmbedByTypeResponseEmbeddings `json:"embeddings,omitempty"` + Embeddings *EmbedByTypeResponseEmbeddings `json:"embeddings,omitempty" url:"embeddings,omitempty"` // The text entries for which embeddings were returned. - Texts []string `json:"texts,omitempty"` - Meta *ApiMeta `json:"meta,omitempty"` + Texts []string `json:"texts,omitempty" url:"texts,omitempty"` + Meta *ApiMeta `json:"meta,omitempty" url:"meta,omitempty"` _rawJSON json.RawMessage } @@ -1912,15 +2055,15 @@ func (e *EmbedByTypeResponse) String() string { // An object with different embedding types. The length of each embedding type array will be the same as the length of the original `texts` array. type EmbedByTypeResponseEmbeddings struct { // An array of float embeddings. - Float [][]float64 `json:"float,omitempty"` + Float [][]float64 `json:"float,omitempty" url:"float,omitempty"` // An array of signed int8 embeddings. Each value is between -128 and 127. - Int8 [][]float64 `json:"int8,omitempty"` + Int8 [][]float64 `json:"int8,omitempty" url:"int8,omitempty"` // An array of unsigned int8 embeddings. Each value is between 0 and 255. - Uint8 [][]float64 `json:"uint8,omitempty"` + Uint8 [][]float64 `json:"uint8,omitempty" url:"uint8,omitempty"` // An array of packed signed binary embeddings. The length of each binary embedding is 1/8 the length of the float embeddings of the provided model. Each value is between -128 and 127. - Binary [][]float64 `json:"binary,omitempty"` + Binary [][]float64 `json:"binary,omitempty" url:"binary,omitempty"` // An array of packed unsigned binary embeddings. The length of each binary embedding is 1/8 the length of the float embeddings of the provided model. Each value is between 0 and 255. - Ubinary [][]float64 `json:"ubinary,omitempty"` + Ubinary [][]float64 `json:"ubinary,omitempty" url:"ubinary,omitempty"` _rawJSON json.RawMessage } @@ -1949,12 +2092,12 @@ func (e *EmbedByTypeResponseEmbeddings) String() string { } type EmbedFloatsResponse struct { - Id string `json:"id"` + Id string `json:"id" url:"id"` // An array of embeddings, where each embedding is an array of floats. The length of the `embeddings` array will be the same as the length of the original `texts` array. - Embeddings [][]float64 `json:"embeddings,omitempty"` + Embeddings [][]float64 `json:"embeddings,omitempty" url:"embeddings,omitempty"` // The text entries for which embeddings were returned. - Texts []string `json:"texts,omitempty"` - Meta *ApiMeta `json:"meta,omitempty"` + Texts []string `json:"texts,omitempty" url:"texts,omitempty"` + Meta *ApiMeta `json:"meta,omitempty" url:"meta,omitempty"` _rawJSON json.RawMessage } @@ -2018,22 +2161,22 @@ func (e EmbedInputType) Ptr() *EmbedInputType { type EmbedJob struct { // ID of the embed job - JobId string `json:"job_id"` + JobId string `json:"job_id" url:"job_id"` // The name of the embed job - Name *string `json:"name,omitempty"` + Name *string `json:"name,omitempty" url:"name,omitempty"` // The status of the embed job - Status EmbedJobStatus `json:"status,omitempty"` + Status EmbedJobStatus `json:"status,omitempty" url:"status,omitempty"` // The creation date of the embed job - CreatedAt time.Time `json:"created_at"` + CreatedAt time.Time `json:"created_at" url:"created_at"` // ID of the input dataset - InputDatasetId string `json:"input_dataset_id"` + InputDatasetId string `json:"input_dataset_id" url:"input_dataset_id"` // ID of the resulting output dataset - OutputDatasetId *string `json:"output_dataset_id,omitempty"` + OutputDatasetId *string `json:"output_dataset_id,omitempty" url:"output_dataset_id,omitempty"` // ID of the model used to embed - Model string `json:"model"` + Model string `json:"model" url:"model"` // The truncation option used - Truncate EmbedJobTruncate `json:"truncate,omitempty"` - Meta *ApiMeta `json:"meta,omitempty"` + Truncate EmbedJobTruncate `json:"truncate,omitempty" url:"truncate,omitempty"` + Meta *ApiMeta `json:"meta,omitempty" url:"meta,omitempty"` _rawJSON json.RawMessage } @@ -2116,6 +2259,37 @@ func (e EmbedJobTruncate) Ptr() *EmbedJobTruncate { return &e } +type EmbedRequestEmbeddingTypesItem string + +const ( + EmbedRequestEmbeddingTypesItemFloat EmbedRequestEmbeddingTypesItem = "float" + EmbedRequestEmbeddingTypesItemInt8 EmbedRequestEmbeddingTypesItem = "int8" + EmbedRequestEmbeddingTypesItemUint8 EmbedRequestEmbeddingTypesItem = "uint8" + EmbedRequestEmbeddingTypesItemBinary EmbedRequestEmbeddingTypesItem = "binary" + EmbedRequestEmbeddingTypesItemUbinary EmbedRequestEmbeddingTypesItem = "ubinary" +) + +func NewEmbedRequestEmbeddingTypesItemFromString(s string) (EmbedRequestEmbeddingTypesItem, error) { + switch s { + case "float": + return EmbedRequestEmbeddingTypesItemFloat, nil + case "int8": + return EmbedRequestEmbeddingTypesItemInt8, nil + case "uint8": + return EmbedRequestEmbeddingTypesItemUint8, nil + case "binary": + return EmbedRequestEmbeddingTypesItemBinary, nil + case "ubinary": + return EmbedRequestEmbeddingTypesItemUbinary, nil + } + var t EmbedRequestEmbeddingTypesItem + return "", fmt.Errorf("%s is not a valid %T", s, t) +} + +func (e EmbedRequestEmbeddingTypesItem) Ptr() *EmbedRequestEmbeddingTypesItem { + return &e +} + // One of `NONE|START|END` to specify how the API will handle inputs longer than the maximum token length. // // Passing `START` will discard the start of the input. `END` will discard the end of the input. In both cases, input is discarded until the remaining input is exactly the maximum input token length for the model. @@ -2321,9 +2495,9 @@ func (g GenerateRequestTruncate) Ptr() *GenerateRequestTruncate { } type GenerateStreamEnd struct { - IsFinished bool `json:"is_finished"` - FinishReason *FinishReason `json:"finish_reason,omitempty"` - Response *GenerateStreamEndResponse `json:"response,omitempty"` + IsFinished bool `json:"is_finished" url:"is_finished"` + FinishReason *FinishReason `json:"finish_reason,omitempty" url:"finish_reason,omitempty"` + Response *GenerateStreamEndResponse `json:"response,omitempty" url:"response,omitempty"` _rawJSON json.RawMessage } @@ -2352,9 +2526,9 @@ func (g *GenerateStreamEnd) String() string { } type GenerateStreamEndResponse struct { - Id string `json:"id"` - Prompt *string `json:"prompt,omitempty"` - Generations []*SingleGenerationInStream `json:"generations,omitempty"` + Id string `json:"id" url:"id"` + Prompt *string `json:"prompt,omitempty" url:"prompt,omitempty"` + Generations []*SingleGenerationInStream `json:"generations,omitempty" url:"generations,omitempty"` _rawJSON json.RawMessage } @@ -2384,11 +2558,11 @@ func (g *GenerateStreamEndResponse) String() string { type GenerateStreamError struct { // Refers to the nth generation. Only present when `num_generations` is greater than zero. - Index *int `json:"index,omitempty"` - IsFinished bool `json:"is_finished"` - FinishReason FinishReason `json:"finish_reason,omitempty"` + Index *int `json:"index,omitempty" url:"index,omitempty"` + IsFinished bool `json:"is_finished" url:"is_finished"` + FinishReason FinishReason `json:"finish_reason,omitempty" url:"finish_reason,omitempty"` // Error message - Err string `json:"err"` + Err string `json:"err" url:"err"` _rawJSON json.RawMessage } @@ -2505,10 +2679,10 @@ func (g GenerateStreamRequestTruncate) Ptr() *GenerateStreamRequestTruncate { type GenerateStreamText struct { // A segment of text of the generation. - Text string `json:"text"` + Text string `json:"text" url:"text"` // Refers to the nth generation. Only present when `num_generations` is greater than zero, and only when text responses are being streamed. - Index *int `json:"index,omitempty"` - IsFinished bool `json:"is_finished"` + Index *int `json:"index,omitempty" url:"index,omitempty"` + IsFinished bool `json:"is_finished" url:"is_finished"` _rawJSON json.RawMessage } @@ -2641,12 +2815,12 @@ func (g *GenerateStreamedResponse) Accept(visitor GenerateStreamedResponseVisito } type Generation struct { - Id string `json:"id"` + Id string `json:"id" url:"id"` // Prompt used for generations. - Prompt *string `json:"prompt,omitempty"` + Prompt *string `json:"prompt,omitempty" url:"prompt,omitempty"` // List of generated results - Generations []*SingleGeneration `json:"generations,omitempty"` - Meta *ApiMeta `json:"meta,omitempty"` + Generations []*SingleGeneration `json:"generations,omitempty" url:"generations,omitempty"` + Meta *ApiMeta `json:"meta,omitempty" url:"meta,omitempty"` _rawJSON json.RawMessage } @@ -2675,7 +2849,7 @@ func (g *Generation) String() string { } type GetConnectorResponse struct { - Connector *Connector `json:"connector,omitempty"` + Connector *Connector `json:"connector,omitempty" url:"connector,omitempty"` _rawJSON json.RawMessage } @@ -2704,7 +2878,9 @@ func (g *GetConnectorResponse) String() string { } type ListConnectorsResponse struct { - Connectors []*Connector `json:"connectors,omitempty"` + Connectors []*Connector `json:"connectors,omitempty" url:"connectors,omitempty"` + // Total number of connectors. + TotalCount *float64 `json:"total_count,omitempty" url:"total_count,omitempty"` _rawJSON json.RawMessage } @@ -2733,7 +2909,7 @@ func (l *ListConnectorsResponse) String() string { } type ListEmbedJobResponse struct { - EmbedJobs []*EmbedJob `json:"embed_jobs,omitempty"` + EmbedJobs []*EmbedJob `json:"embed_jobs,omitempty" url:"embed_jobs,omitempty"` _rawJSON json.RawMessage } @@ -2763,17 +2939,17 @@ func (l *ListEmbedJobResponse) String() string { type NonStreamedChatResponse struct { // Contents of the reply generated by the model. - Text string `json:"text"` + Text string `json:"text" url:"text"` // Unique identifier for the generated reply. Useful for submitting feedback. - GenerationId string `json:"generation_id"` + GenerationId string `json:"generation_id" url:"generation_id"` // Inline citations for the generated reply. - Citations []*ChatCitation `json:"citations,omitempty"` + Citations []*ChatCitation `json:"citations,omitempty" url:"citations,omitempty"` // Documents seen by the model when generating the reply. - Documents []ChatDocument `json:"documents,omitempty"` + Documents []ChatDocument `json:"documents,omitempty" url:"documents,omitempty"` // Generated search queries, meant to be used as part of the RAG flow. - SearchQueries []*ChatSearchQuery `json:"search_queries,omitempty"` + SearchQueries []*ChatSearchQuery `json:"search_queries,omitempty" url:"search_queries,omitempty"` // Documents retrieved from each of the conducted searches. - SearchResults []*ChatSearchResult `json:"search_results,omitempty"` + SearchResults []*ChatSearchResult `json:"search_results,omitempty" url:"search_results,omitempty"` _rawJSON json.RawMessage } @@ -2803,7 +2979,7 @@ func (n *NonStreamedChatResponse) String() string { type OAuthAuthorizeResponse struct { // The OAuth 2.0 redirect url. Redirect the user to this url to authorize the connector. - RedirectUrl *string `json:"redirect_url,omitempty"` + RedirectUrl *string `json:"redirect_url,omitempty" url:"redirect_url,omitempty"` _rawJSON json.RawMessage } @@ -2831,6 +3007,36 @@ func (o *OAuthAuthorizeResponse) String() string { return fmt.Sprintf("%#v", o) } +type ParseInfo struct { + Separator *string `json:"separator,omitempty" url:"separator,omitempty"` + Delimiter *string `json:"delimiter,omitempty" url:"delimiter,omitempty"` + + _rawJSON json.RawMessage +} + +func (p *ParseInfo) UnmarshalJSON(data []byte) error { + type unmarshaler ParseInfo + var value unmarshaler + if err := json.Unmarshal(data, &value); err != nil { + return err + } + *p = ParseInfo(value) + p._rawJSON = json.RawMessage(data) + return nil +} + +func (p *ParseInfo) String() string { + if len(p._rawJSON) > 0 { + if value, err := core.StringifyJSON(p._rawJSON); err == nil { + return value + } + } + if value, err := core.StringifyJSON(p); err == nil { + return value + } + return fmt.Sprintf("%#v", p) +} + type RerankRequestDocumentsItem struct { typeName string String string @@ -2890,7 +3096,7 @@ func (r *RerankRequestDocumentsItem) Accept(visitor RerankRequestDocumentsItemVi type RerankRequestDocumentsItemText struct { // The text of the document to rerank. - Text string `json:"text"` + Text string `json:"text" url:"text"` _rawJSON json.RawMessage } @@ -2919,10 +3125,10 @@ func (r *RerankRequestDocumentsItemText) String() string { } type RerankResponse struct { - Id *string `json:"id,omitempty"` + Id *string `json:"id,omitempty" url:"id,omitempty"` // An ordered list of ranked documents - Results []*RerankResponseResultsItem `json:"results,omitempty"` - Meta *ApiMeta `json:"meta,omitempty"` + Results []*RerankResponseResultsItem `json:"results,omitempty" url:"results,omitempty"` + Meta *ApiMeta `json:"meta,omitempty" url:"meta,omitempty"` _rawJSON json.RawMessage } @@ -2952,11 +3158,11 @@ func (r *RerankResponse) String() string { type RerankResponseResultsItem struct { // The doc object which was ranked - Document *RerankResponseResultsItemDocument `json:"document,omitempty"` + Document *RerankResponseResultsItemDocument `json:"document,omitempty" url:"document,omitempty"` // The index of the input document - Index int `json:"index"` + Index int `json:"index" url:"index"` // A relevance score assigned to the ranking - RelevanceScore float64 `json:"relevance_score"` + RelevanceScore float64 `json:"relevance_score" url:"relevance_score"` _rawJSON json.RawMessage } @@ -2987,7 +3193,7 @@ func (r *RerankResponseResultsItem) String() string { // The doc object which was ranked type RerankResponseResultsItemDocument struct { // The text of the document to rerank - Text string `json:"text"` + Text string `json:"text" url:"text"` _rawJSON json.RawMessage } @@ -3017,7 +3223,7 @@ func (r *RerankResponseResultsItemDocument) String() string { type SearchQueriesOnlyResponse struct { // Generated search queries, meant to be used as part of the RAG flow. - SearchQueries []*ChatSearchQuery `json:"search_queries,omitempty"` + SearchQueries []*ChatSearchQuery `json:"search_queries,omitempty" url:"search_queries,omitempty"` _rawJSON json.RawMessage } @@ -3046,13 +3252,13 @@ func (s *SearchQueriesOnlyResponse) String() string { } type SingleGeneration struct { - Id string `json:"id"` - Text string `json:"text"` + Id string `json:"id" url:"id"` + Text string `json:"text" url:"text"` // Refers to the nth generation. Only present when `num_generations` is greater than zero. - Index *int `json:"index,omitempty"` - Likelihood *float64 `json:"likelihood,omitempty"` + Index *int `json:"index,omitempty" url:"index,omitempty"` + Likelihood *float64 `json:"likelihood,omitempty" url:"likelihood,omitempty"` // Only returned if `return_likelihoods` is set to `GENERATION` or `ALL`. The likelihood refers to the average log-likelihood of the entire specified string, which is useful for [evaluating the performance of your model](likelihood-eval), especially if you've created a [custom model](/docs/training-custom-models). Individual token likelihoods provide the log-likelihood of each token. The first token will not have a likelihood. - TokenLikelihoods []*SingleGenerationTokenLikelihoodsItem `json:"token_likelihoods,omitempty"` + TokenLikelihoods []*SingleGenerationTokenLikelihoodsItem `json:"token_likelihoods,omitempty" url:"token_likelihoods,omitempty"` _rawJSON json.RawMessage } @@ -3081,11 +3287,12 @@ func (s *SingleGeneration) String() string { } type SingleGenerationInStream struct { - Id string `json:"id"` + Id string `json:"id" url:"id"` // Full text of the generation. - Text string `json:"text"` + Text string `json:"text" url:"text"` // Refers to the nth generation. Only present when `num_generations` is greater than zero. - Index *int `json:"index,omitempty"` + Index *int `json:"index,omitempty" url:"index,omitempty"` + FinishReason FinishReason `json:"finish_reason,omitempty" url:"finish_reason,omitempty"` _rawJSON json.RawMessage } @@ -3114,8 +3321,8 @@ func (s *SingleGenerationInStream) String() string { } type SingleGenerationTokenLikelihoodsItem struct { - Token string `json:"token"` - Likelihood float64 `json:"likelihood"` + Token string `json:"token" url:"token"` + Likelihood float64 `json:"likelihood" url:"likelihood"` _rawJSON json.RawMessage } @@ -3393,10 +3600,10 @@ func (s SummarizeRequestLength) Ptr() *SummarizeRequestLength { type SummarizeResponse struct { // Generated ID for the summary - Id *string `json:"id,omitempty"` + Id *string `json:"id,omitempty" url:"id,omitempty"` // Generated summary for the text - Summary *string `json:"summary,omitempty"` - Meta *ApiMeta `json:"meta,omitempty"` + Summary *string `json:"summary,omitempty" url:"summary,omitempty"` + Meta *ApiMeta `json:"meta,omitempty" url:"meta,omitempty"` _rawJSON json.RawMessage } @@ -3426,9 +3633,9 @@ func (s *SummarizeResponse) String() string { type TokenizeResponse struct { // An array of tokens, where each token is an integer. - Tokens []int `json:"tokens,omitempty"` - TokenStrings []string `json:"token_strings,omitempty"` - Meta *ApiMeta `json:"meta,omitempty"` + Tokens []int `json:"tokens,omitempty" url:"tokens,omitempty"` + TokenStrings []string `json:"token_strings,omitempty" url:"token_strings,omitempty"` + Meta *ApiMeta `json:"meta,omitempty" url:"meta,omitempty"` _rawJSON json.RawMessage } @@ -3457,7 +3664,7 @@ func (t *TokenizeResponse) String() string { } type UpdateConnectorResponse struct { - Connector *Connector `json:"connector,omitempty"` + Connector *Connector `json:"connector,omitempty" url:"connector,omitempty"` _rawJSON json.RawMessage }