Skip to content

Commit

Permalink
Merge from main
Browse files Browse the repository at this point in the history
  • Loading branch information
MatKuhr committed Oct 2, 2024
2 parents d9d912e + 771f986 commit 663d6ef
Show file tree
Hide file tree
Showing 39 changed files with 1,089 additions and 302 deletions.
5 changes: 5 additions & 0 deletions .changeset/bright-jokes-provide.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
---
'@sap-ai-sdk/langchain': patch
---

[Fixed Issue] Fix auto completion for Azure OpenAI Embedding models.
5 changes: 5 additions & 0 deletions .changeset/famous-needles-act.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
---
'@sap-ai-sdk/foundation-models': patch
---

[Fixed Issue] Fix index-based data access in embedding response. Previously, the 0th index data was always returned.
9 changes: 9 additions & 0 deletions .changeset/two-bottles-juggle.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
---
'@sap-ai-sdk/foundation-models': minor
'@sap-ai-sdk/orchestration': minor
'@sap-ai-sdk/langchain': minor
'@sap-ai-sdk/ai-api': minor
'@sap-ai-sdk/core': minor
---

[Fixed Issue] Fix sending the correct resource group headers when custom resource group is set.
7 changes: 7 additions & 0 deletions CONTRIBUTING.md
Original file line number Diff line number Diff line change
Expand Up @@ -92,3 +92,10 @@ The following rule governs code contributions:

- Contributions must be licensed under the [Apache 2.0 License](./LICENSE)
- Due to legal reasons, contributors will be asked to accept a Developer Certificate of Origin (DCO) when they create the first pull request to this project. This happens in an automated fashion during the submission process. SAP uses [the standard DCO text of the Linux Foundation](https://developercertificate.org/).

## Contributing with AI-generated Code

As artificial intelligence evolves, AI-generated code is becoming valuable for many software projects, including open-source initiatives.
While we recognize the potential benefits of incorporating AI-generated content into our open-source projects, there are certain requirements that need to be reflected and adhered to when making contributions.

Please see our [Guideline for AI-generated code contributions to SAP Open Source Software Projects](https://github.com/SAP/.github/blob/main/CONTRIBUTING_USING_GENAI.md) for more details.
12 changes: 2 additions & 10 deletions eslint.config.js
Original file line number Diff line number Diff line change
Expand Up @@ -20,14 +20,12 @@ export default [
'**/*.d.ts',
'**/dist/**/*',
'**/coverage/**/*',
'packages/ai-api/src/client/**/*',
'packages/foundation-models/src/azure-openai/client/inference/schema/on-your-data-system-assigned-managed-identity-authentication-options.ts'
'packages/**/client/**/*'
]
},
{
files: [
'**/test-util/**/*.ts',
'**/packages/orchestration/src/client/**/*'
'**/test-util/**/*.ts'
],
rules: {
'jsdoc/require-jsdoc': 'off'
Expand All @@ -39,12 +37,6 @@ export default [
'import/no-internal-modules': 'off'
}
},
{
files: ['packages/foundation-models/src/azure-openai/client/inference/schema/*.ts'],
rules: {
'jsdoc/check-indentation': 'off'
}
},
{
ignores: ['**/dist-cjs/**/*']
}
Expand Down
6 changes: 3 additions & 3 deletions package.json
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@
"smoke-tests": "pnpm -F=@sap-ai-sdk/smoke-tests",
"schema-tests": "pnpm -F=@sap-ai-sdk/schema-tests",
"check:public-api": "pnpm -r check:public-api",
"check:deps": "pnpm -r -F !./tests/smoke-tests -F !./tests/schema-tests exec depcheck --ignores nock --quiet"
"check:deps": "pnpm -r -F !./tests/smoke-tests -F !./tests/schema-tests exec depcheck --ignores=\"nock,@jest/globals\" --quiet"
},
"devDependencies": {
"@changesets/cli": "^2.27.8",
Expand All @@ -47,13 +47,13 @@
"@types/jest": "^29.5.13",
"@types/jsonwebtoken": "^9.0.7",
"@types/mock-fs": "^4.13.4",
"@types/node": "^20.16.9",
"@types/node": "^20.16.10",
"depcheck": "^1.4.7",
"eslint": "^9.11.1",
"glob": "^11.0.0",
"jest": "^30.0.0-alpha.6",
"jsonwebtoken": "^9.0.2",
"mock-fs": "^5.2.0",
"mock-fs": "^5.3.0",
"nock": "^13.5.5",
"prettier": "^3.3.3",
"ts-jest": "^29.2.5",
Expand Down
11 changes: 11 additions & 0 deletions packages/ai-api/src/utils/deployment-resolver.ts
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,17 @@ export type ModelDeployment<ModelNameT = string> =
| ModelNameT
| ((ModelConfig<ModelNameT> | DeploymentIdConfig) & ResourceGroupConfig);

/**
* @internal
*/
export function getResourceGroup(
modelDeployment: ModelDeployment
): string | undefined {
return typeof modelDeployment === 'object'
? modelDeployment.resourceGroup
: undefined;
}

/**
* Type guard to check if the given deployment configuration is a deployment ID configuration.
* @param modelDeployment - Configuration to check.
Expand Down
27 changes: 27 additions & 0 deletions packages/core/src/http-client.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -36,4 +36,31 @@ describe('http-client', () => {
expect(res.status).toBe(200);
expect(res.data).toEqual(mockPromptResponse);
}, 10000);

it('should execute a request to the AI Core service with a custom resource group', async () => {
const mockPrompt = { prompt: 'some test prompt' };
const mockPromptResponse = { completion: 'some test completion' };

const scope = nock(aiCoreDestination.url, {
reqheaders: {
'ai-resource-group': 'custom-resource-group',
'ai-client-type': 'AI SDK JavaScript'
}
})
.post('/v2/some/endpoint', mockPrompt)
.query({ 'api-version': 'mock-api-version' })
.reply(200, mockPromptResponse);

const res = await executeRequest(
{
url: '/some/endpoint',
apiVersion: 'mock-api-version',
resourceGroup: 'custom-resource-group'
},
mockPrompt
);
expect(scope.isDone()).toBe(true);
expect(res.status).toBe(200);
expect(res.data).toEqual(mockPromptResponse);
});
});
12 changes: 8 additions & 4 deletions packages/core/src/http-client.ts
Original file line number Diff line number Diff line change
Expand Up @@ -30,11 +30,14 @@ export interface EndpointOptions {
* The specific endpoint to call.
*/
url: string;

/**
* The API version to use.
*/
apiVersion?: string;
/**
* The resource group to use.
*/
resourceGroup?: string;
}
/**
* Executes a request to the AI Core service.
Expand All @@ -49,10 +52,10 @@ export async function executeRequest(
requestConfig?: CustomRequestConfig
): Promise<HttpResponse> {
const aiCoreDestination = await getAiCoreDestination();
const { url, apiVersion } = endpointOptions;
const { url, apiVersion, resourceGroup = 'default' } = endpointOptions;

const mergedRequestConfig = {
...mergeWithDefaultRequestConfig(apiVersion, requestConfig),
...mergeWithDefaultRequestConfig(apiVersion, resourceGroup, requestConfig),
data: JSON.stringify(data)
};

Expand All @@ -69,13 +72,14 @@ export async function executeRequest(

function mergeWithDefaultRequestConfig(
apiVersion?: string,
resourceGroup?: string,
requestConfig?: CustomRequestConfig
): HttpRequestConfig {
const defaultConfig: HttpRequestConfig = {
method: 'post',
headers: {
'content-type': 'application/json',
'ai-resource-group': 'default',
'ai-resource-group': resourceGroup,
'ai-client-type': 'AI SDK JavaScript'
},
params: apiVersion ? { 'api-version': apiVersion } : {}
Expand Down
51 changes: 36 additions & 15 deletions packages/foundation-models/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -4,15 +4,16 @@ This package incorporates generative AI foundation models into your AI activitie

## Table of Contents

1. [Installation](#installation)
2. [Prerequisites](#prerequisites)
3. [Usage](#usage)
- [Client Initialization](#client-initialization)
- [Azure OpenAI Client](#azure-openai-client)
- [Chat Client](#chat-client)
- [Embedding Client](#embedding-client)
4. [Support, Feedback, Contribution](#support-feedback-contribution)
5. [License](#license)
- [Table of Contents](#table-of-contents)
- [Installation](#installation)
- [Prerequisites](#prerequisites)
- [Usage](#usage)
- [Client Initialization](#client-initialization)
- [Chat Client](#chat-client)
- [Embedding Client](#embedding-client)
- [Custom Request Configuration](#custom-request-configuration)
- [Support, Feedback, Contribution](#support-feedback-contribution)
- [License](#license)

## Installation

Expand Down Expand Up @@ -60,14 +61,12 @@ const chatClient = new AzureOpenAiChatClient({
});
```

### Azure OpenAI Client

The Azure OpenAI client can then be used to send chat completion or embedding requests to models deployed in the SAP generative AI hub.

#### Chat Client
### Chat Client

Use the `AzureOpenAiChatClient` to send chat completion requests to an OpenAI model deployed in SAP generative AI hub.

The client sends request with Azure OpenAI API version `2024-06-01`.

```ts
import { AzureOpenAiChatClient } from '@sap-ai-sdk/foundation-models';

Expand Down Expand Up @@ -124,7 +123,7 @@ logger.info(

Refer to `AzureOpenAiChatCompletionParameters` interface for other parameters that can be passed to the chat completion request.

#### Embedding Client
### Embedding Client

Use the `AzureOpenAiEmbeddingClient` to send embedding requests to an OpenAI model deployed in SAP generative AI hub.

Expand All @@ -140,6 +139,28 @@ const response = await embeddingClient.run({
const embedding = response.getEmbedding();
```

### Custom Request Configuration

Set custom request configuration in the `requestConfig` parameter when calling the `run()` method of a chat or embedding client.

```ts
const response = await client.run(
{
...
},
{
headers: {
'x-custom-header': 'custom-value'
// Add more headers here
},
params: {
// Add more parameters here
}
// Add more request configuration here
}
);
```

## Support, Feedback, Contribution

This project is open to feature requests/suggestions, bug reports etc. via [GitHub issues](https://github.com/SAP/ai-sdk-js/issues).
Expand Down
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
import nock from 'nock';
import {
mockClientCredentialsGrantCall,
mockDeploymentsList,
mockInference,
parseMockResponse
} from '../../../../test-util/mock-http.js';
Expand Down Expand Up @@ -75,4 +76,55 @@ describe('Azure OpenAI chat client', () => {

await expect(client.run(prompt)).rejects.toThrow('status code 400');
});

it('executes a request with the custom resource group', async () => {
const customChatCompletionEndpoint = {
url: 'inference/deployments/1234/chat/completions',
apiVersion,
resourceGroup: 'custom-resource-group'
};

const mockResponse =
parseMockResponse<AzureOpenAiCreateChatCompletionResponse>(
'foundation-models',
'azure-openai-chat-completion-success-response.json'
);

const prompt = {
messages: [
{
role: 'user' as const,
content: 'Where is the deepest place on earth located'
}
]
};

mockDeploymentsList(
{
scenarioId: 'foundation-models',
resourceGroup: 'custom-resource-group',
executableId: 'azure-openai'
},
{ id: '1234', model: { name: 'gpt-4o', version: 'latest' } }
);

mockInference(
{
data: prompt
},
{
data: mockResponse,
status: 200
},
customChatCompletionEndpoint
);

const clientWithResourceGroup = new AzureOpenAiChatClient({
modelName: 'gpt-4o',
resourceGroup: 'custom-resource-group'
});

const response = await clientWithResourceGroup.run(prompt);
expect(response.data).toEqual(mockResponse);
});
});
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
import { type CustomRequestConfig, executeRequest } from '@sap-ai-sdk/core';
import {
getDeploymentId,
getResourceGroup,
type ModelDeployment
} from '@sap-ai-sdk/ai-api/internal.js';
import type { AzureOpenAiCreateChatCompletionRequest } from './client/inference/schema/index.js';
Expand Down Expand Up @@ -31,10 +32,12 @@ export class AzureOpenAiChatClient {
this.modelDeployment,
'azure-openai'
);
const resourceGroup = getResourceGroup(this.modelDeployment);
const response = await executeRequest(
{
url: `/inference/deployments/${deploymentId}/chat/completions`,
apiVersion
apiVersion,
resourceGroup
},
data,
requestConfig
Expand Down
Loading

0 comments on commit 663d6ef

Please sign in to comment.