133 lines
5.9 KiB
JavaScript
133 lines
5.9 KiB
JavaScript
"use strict";
|
|
// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
exports.FileBatches = void 0;
|
|
const resource_1 = require("../../core/resource.js");
|
|
const pagination_1 = require("../../core/pagination.js");
|
|
const headers_1 = require("../../internal/headers.js");
|
|
const sleep_1 = require("../../internal/utils/sleep.js");
|
|
const Util_1 = require("../../lib/Util.js");
|
|
const path_1 = require("../../internal/utils/path.js");
|
|
class FileBatches extends resource_1.APIResource {
|
|
/**
|
|
* Create a vector store file batch.
|
|
*/
|
|
create(vectorStoreID, body, options) {
|
|
return this._client.post((0, path_1.path) `/vector_stores/${vectorStoreID}/file_batches`, {
|
|
body,
|
|
...options,
|
|
headers: (0, headers_1.buildHeaders)([{ 'OpenAI-Beta': 'assistants=v2' }, options?.headers]),
|
|
});
|
|
}
|
|
/**
|
|
* Retrieves a vector store file batch.
|
|
*/
|
|
retrieve(batchID, params, options) {
|
|
const { vector_store_id } = params;
|
|
return this._client.get((0, path_1.path) `/vector_stores/${vector_store_id}/file_batches/${batchID}`, {
|
|
...options,
|
|
headers: (0, headers_1.buildHeaders)([{ 'OpenAI-Beta': 'assistants=v2' }, options?.headers]),
|
|
});
|
|
}
|
|
/**
|
|
* Cancel a vector store file batch. This attempts to cancel the processing of
|
|
* files in this batch as soon as possible.
|
|
*/
|
|
cancel(batchID, params, options) {
|
|
const { vector_store_id } = params;
|
|
return this._client.post((0, path_1.path) `/vector_stores/${vector_store_id}/file_batches/${batchID}/cancel`, {
|
|
...options,
|
|
headers: (0, headers_1.buildHeaders)([{ 'OpenAI-Beta': 'assistants=v2' }, options?.headers]),
|
|
});
|
|
}
|
|
/**
|
|
* Create a vector store batch and poll until all files have been processed.
|
|
*/
|
|
async createAndPoll(vectorStoreId, body, options) {
|
|
const batch = await this.create(vectorStoreId, body);
|
|
return await this.poll(vectorStoreId, batch.id, options);
|
|
}
|
|
/**
|
|
* Returns a list of vector store files in a batch.
|
|
*/
|
|
listFiles(batchID, params, options) {
|
|
const { vector_store_id, ...query } = params;
|
|
return this._client.getAPIList((0, path_1.path) `/vector_stores/${vector_store_id}/file_batches/${batchID}/files`, (pagination_1.CursorPage), { query, ...options, headers: (0, headers_1.buildHeaders)([{ 'OpenAI-Beta': 'assistants=v2' }, options?.headers]) });
|
|
}
|
|
/**
|
|
* Wait for the given file batch to be processed.
|
|
*
|
|
* Note: this will return even if one of the files failed to process, you need to
|
|
* check batch.file_counts.failed_count to handle this case.
|
|
*/
|
|
async poll(vectorStoreID, batchID, options) {
|
|
const headers = (0, headers_1.buildHeaders)([
|
|
options?.headers,
|
|
{
|
|
'X-Stainless-Poll-Helper': 'true',
|
|
'X-Stainless-Custom-Poll-Interval': options?.pollIntervalMs?.toString() ?? undefined,
|
|
},
|
|
]);
|
|
while (true) {
|
|
const { data: batch, response } = await this.retrieve(batchID, { vector_store_id: vectorStoreID }, {
|
|
...options,
|
|
headers,
|
|
}).withResponse();
|
|
switch (batch.status) {
|
|
case 'in_progress':
|
|
let sleepInterval = 5000;
|
|
if (options?.pollIntervalMs) {
|
|
sleepInterval = options.pollIntervalMs;
|
|
}
|
|
else {
|
|
const headerInterval = response.headers.get('openai-poll-after-ms');
|
|
if (headerInterval) {
|
|
const headerIntervalMs = parseInt(headerInterval);
|
|
if (!isNaN(headerIntervalMs)) {
|
|
sleepInterval = headerIntervalMs;
|
|
}
|
|
}
|
|
}
|
|
await (0, sleep_1.sleep)(sleepInterval);
|
|
break;
|
|
case 'failed':
|
|
case 'cancelled':
|
|
case 'completed':
|
|
return batch;
|
|
}
|
|
}
|
|
}
|
|
/**
|
|
* Uploads the given files concurrently and then creates a vector store file batch.
|
|
*
|
|
* The concurrency limit is configurable using the `maxConcurrency` parameter.
|
|
*/
|
|
async uploadAndPoll(vectorStoreId, { files, fileIds = [] }, options) {
|
|
if (files == null || files.length == 0) {
|
|
throw new Error(`No \`files\` provided to process. If you've already uploaded files you should use \`.createAndPoll()\` instead`);
|
|
}
|
|
const configuredConcurrency = options?.maxConcurrency ?? 5;
|
|
// We cap the number of workers at the number of files (so we don't start any unnecessary workers)
|
|
const concurrencyLimit = Math.min(configuredConcurrency, files.length);
|
|
const client = this._client;
|
|
const fileIterator = files.values();
|
|
const allFileIds = [...fileIds];
|
|
// This code is based on this design. The libraries don't accommodate our environment limits.
|
|
// https://stackoverflow.com/questions/40639432/what-is-the-best-way-to-limit-concurrency-when-using-es6s-promise-all
|
|
async function processFiles(iterator) {
|
|
for (let item of iterator) {
|
|
const fileObj = await client.files.create({ file: item, purpose: 'assistants' }, options);
|
|
allFileIds.push(fileObj.id);
|
|
}
|
|
}
|
|
// Start workers to process results
|
|
const workers = Array(concurrencyLimit).fill(fileIterator).map(processFiles);
|
|
// Wait for all processing to complete.
|
|
await (0, Util_1.allSettledWithThrow)(workers);
|
|
return await this.createAndPoll(vectorStoreId, {
|
|
file_ids: allFileIds,
|
|
});
|
|
}
|
|
}
|
|
exports.FileBatches = FileBatches;
|
|
//# sourceMappingURL=file-batches.js.map
|