129 lines
		
	
	
		
			5.6 KiB
		
	
	
	
		
			JavaScript
		
	
	
	
	
	
			
		
		
	
	
			129 lines
		
	
	
		
			5.6 KiB
		
	
	
	
		
			JavaScript
		
	
	
	
	
	
| // File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
 | |
| import { APIResource } from "../../core/resource.mjs";
 | |
| import { CursorPage } from "../../core/pagination.mjs";
 | |
| import { buildHeaders } from "../../internal/headers.mjs";
 | |
| import { sleep } from "../../internal/utils/sleep.mjs";
 | |
| import { allSettledWithThrow } from "../../lib/Util.mjs";
 | |
| import { path } from "../../internal/utils/path.mjs";
 | |
| export class FileBatches extends APIResource {
 | |
|     /**
 | |
|      * Create a vector store file batch.
 | |
|      */
 | |
|     create(vectorStoreID, body, options) {
 | |
|         return this._client.post(path `/vector_stores/${vectorStoreID}/file_batches`, {
 | |
|             body,
 | |
|             ...options,
 | |
|             headers: buildHeaders([{ 'OpenAI-Beta': 'assistants=v2' }, options?.headers]),
 | |
|         });
 | |
|     }
 | |
|     /**
 | |
|      * Retrieves a vector store file batch.
 | |
|      */
 | |
|     retrieve(batchID, params, options) {
 | |
|         const { vector_store_id } = params;
 | |
|         return this._client.get(path `/vector_stores/${vector_store_id}/file_batches/${batchID}`, {
 | |
|             ...options,
 | |
|             headers: buildHeaders([{ 'OpenAI-Beta': 'assistants=v2' }, options?.headers]),
 | |
|         });
 | |
|     }
 | |
|     /**
 | |
|      * Cancel a vector store file batch. This attempts to cancel the processing of
 | |
|      * files in this batch as soon as possible.
 | |
|      */
 | |
|     cancel(batchID, params, options) {
 | |
|         const { vector_store_id } = params;
 | |
|         return this._client.post(path `/vector_stores/${vector_store_id}/file_batches/${batchID}/cancel`, {
 | |
|             ...options,
 | |
|             headers: buildHeaders([{ 'OpenAI-Beta': 'assistants=v2' }, options?.headers]),
 | |
|         });
 | |
|     }
 | |
|     /**
 | |
|      * Create a vector store batch and poll until all files have been processed.
 | |
|      */
 | |
|     async createAndPoll(vectorStoreId, body, options) {
 | |
|         const batch = await this.create(vectorStoreId, body);
 | |
|         return await this.poll(vectorStoreId, batch.id, options);
 | |
|     }
 | |
|     /**
 | |
|      * Returns a list of vector store files in a batch.
 | |
|      */
 | |
|     listFiles(batchID, params, options) {
 | |
|         const { vector_store_id, ...query } = params;
 | |
|         return this._client.getAPIList(path `/vector_stores/${vector_store_id}/file_batches/${batchID}/files`, (CursorPage), { query, ...options, headers: buildHeaders([{ 'OpenAI-Beta': 'assistants=v2' }, options?.headers]) });
 | |
|     }
 | |
|     /**
 | |
|      * Wait for the given file batch to be processed.
 | |
|      *
 | |
|      * Note: this will return even if one of the files failed to process, you need to
 | |
|      * check batch.file_counts.failed_count to handle this case.
 | |
|      */
 | |
|     async poll(vectorStoreID, batchID, options) {
 | |
|         const headers = buildHeaders([
 | |
|             options?.headers,
 | |
|             {
 | |
|                 'X-Stainless-Poll-Helper': 'true',
 | |
|                 'X-Stainless-Custom-Poll-Interval': options?.pollIntervalMs?.toString() ?? undefined,
 | |
|             },
 | |
|         ]);
 | |
|         while (true) {
 | |
|             const { data: batch, response } = await this.retrieve(batchID, { vector_store_id: vectorStoreID }, {
 | |
|                 ...options,
 | |
|                 headers,
 | |
|             }).withResponse();
 | |
|             switch (batch.status) {
 | |
|                 case 'in_progress':
 | |
|                     let sleepInterval = 5000;
 | |
|                     if (options?.pollIntervalMs) {
 | |
|                         sleepInterval = options.pollIntervalMs;
 | |
|                     }
 | |
|                     else {
 | |
|                         const headerInterval = response.headers.get('openai-poll-after-ms');
 | |
|                         if (headerInterval) {
 | |
|                             const headerIntervalMs = parseInt(headerInterval);
 | |
|                             if (!isNaN(headerIntervalMs)) {
 | |
|                                 sleepInterval = headerIntervalMs;
 | |
|                             }
 | |
|                         }
 | |
|                     }
 | |
|                     await sleep(sleepInterval);
 | |
|                     break;
 | |
|                 case 'failed':
 | |
|                 case 'cancelled':
 | |
|                 case 'completed':
 | |
|                     return batch;
 | |
|             }
 | |
|         }
 | |
|     }
 | |
|     /**
 | |
|      * Uploads the given files concurrently and then creates a vector store file batch.
 | |
|      *
 | |
|      * The concurrency limit is configurable using the `maxConcurrency` parameter.
 | |
|      */
 | |
|     async uploadAndPoll(vectorStoreId, { files, fileIds = [] }, options) {
 | |
|         if (files == null || files.length == 0) {
 | |
|             throw new Error(`No \`files\` provided to process. If you've already uploaded files you should use \`.createAndPoll()\` instead`);
 | |
|         }
 | |
|         const configuredConcurrency = options?.maxConcurrency ?? 5;
 | |
|         // We cap the number of workers at the number of files (so we don't start any unnecessary workers)
 | |
|         const concurrencyLimit = Math.min(configuredConcurrency, files.length);
 | |
|         const client = this._client;
 | |
|         const fileIterator = files.values();
 | |
|         const allFileIds = [...fileIds];
 | |
|         // This code is based on this design. The libraries don't accommodate our environment limits.
 | |
|         // https://stackoverflow.com/questions/40639432/what-is-the-best-way-to-limit-concurrency-when-using-es6s-promise-all
 | |
|         async function processFiles(iterator) {
 | |
|             for (let item of iterator) {
 | |
|                 const fileObj = await client.files.create({ file: item, purpose: 'assistants' }, options);
 | |
|                 allFileIds.push(fileObj.id);
 | |
|             }
 | |
|         }
 | |
|         // Start workers to process results
 | |
|         const workers = Array(concurrencyLimit).fill(fileIterator).map(processFiles);
 | |
|         // Wait for all processing to complete.
 | |
|         await allSettledWithThrow(workers);
 | |
|         return await this.createAndPoll(vectorStoreId, {
 | |
|             file_ids: allFileIds,
 | |
|         });
 | |
|     }
 | |
| }
 | |
| //# sourceMappingURL=file-batches.mjs.map
 |