// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. import { APIResource } from "../../core/resource.mjs"; import * as RunsAPI from "./runs/runs.mjs"; import { Runs, } from "./runs/runs.mjs"; import { CursorPage } from "../../core/pagination.mjs"; import { path } from "../../internal/utils/path.mjs"; export class Evals extends APIResource { constructor() { super(...arguments); this.runs = new RunsAPI.Runs(this._client); } /** * Create the structure of an evaluation that can be used to test a model's * performance. An evaluation is a set of testing criteria and the config for a * data source, which dictates the schema of the data used in the evaluation. After * creating an evaluation, you can run it on different models and model parameters. * We support several types of graders and datasources. For more information, see * the [Evals guide](https://platform.openai.com/docs/guides/evals). */ create(body, options) { return this._client.post('/evals', { body, ...options }); } /** * Get an evaluation by ID. */ retrieve(evalID, options) { return this._client.get(path `/evals/${evalID}`, options); } /** * Update certain properties of an evaluation. */ update(evalID, body, options) { return this._client.post(path `/evals/${evalID}`, { body, ...options }); } /** * List evaluations for a project. */ list(query = {}, options) { return this._client.getAPIList('/evals', (CursorPage), { query, ...options }); } /** * Delete an evaluation. */ delete(evalID, options) { return this._client.delete(path `/evals/${evalID}`, options); } } Evals.Runs = Runs; //# sourceMappingURL=evals.mjs.map