# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. from __future__ import annotations from typing import Union, Iterable, Optional from typing_extensions import Literal import httpx from .... import _legacy_response from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven from ...._utils import ( maybe_transform, async_maybe_transform, ) from ...._compat import cached_property from .checkpoints import ( Checkpoints, AsyncCheckpoints, CheckpointsWithRawResponse, AsyncCheckpointsWithRawResponse, CheckpointsWithStreamingResponse, AsyncCheckpointsWithStreamingResponse, ) from ...._resource import SyncAPIResource, AsyncAPIResource from ...._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper from ....pagination import SyncCursorPage, AsyncCursorPage from ...._base_client import ( AsyncPaginator, make_request_options, ) from ....types.fine_tuning import job_list_params, job_create_params, job_list_events_params from ....types.fine_tuning.fine_tuning_job import FineTuningJob from ....types.fine_tuning.fine_tuning_job_event import FineTuningJobEvent __all__ = ["Jobs", "AsyncJobs"] class Jobs(SyncAPIResource): @cached_property def checkpoints(self) -> Checkpoints: return Checkpoints(self._client) @cached_property def with_raw_response(self) -> JobsWithRawResponse: return JobsWithRawResponse(self) @cached_property def with_streaming_response(self) -> JobsWithStreamingResponse: return JobsWithStreamingResponse(self) def create( self, *, model: Union[str, Literal["babbage-002", "davinci-002", "gpt-3.5-turbo"]], training_file: str, hyperparameters: job_create_params.Hyperparameters | NotGiven = NOT_GIVEN, integrations: Optional[Iterable[job_create_params.Integration]] | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, suffix: Optional[str] | NotGiven = NOT_GIVEN, validation_file: Optional[str] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> FineTuningJob: """ Creates a fine-tuning job which begins the process of creating a new model from a given dataset. Response includes details of the enqueued job including job status and the name of the fine-tuned models once complete. [Learn more about fine-tuning](https://platform.openai.com/docs/guides/fine-tuning) Args: model: The name of the model to fine-tune. You can select one of the [supported models](https://platform.openai.com/docs/guides/fine-tuning/what-models-can-be-fine-tuned). training_file: The ID of an uploaded file that contains training data. See [upload file](https://platform.openai.com/docs/api-reference/files/create) for how to upload a file. Your dataset must be formatted as a JSONL file. Additionally, you must upload your file with the purpose `fine-tune`. See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) for more details. hyperparameters: The hyperparameters used for the fine-tuning job. integrations: A list of integrations to enable for your fine-tuning job. seed: The seed controls the reproducibility of the job. Passing in the same seed and job parameters should produce the same results, but may differ in rare cases. If a seed is not specified, one will be generated for you. suffix: A string of up to 18 characters that will be added to your fine-tuned model name. For example, a `suffix` of "custom-model-name" would produce a model name like `ft:gpt-3.5-turbo:openai:custom-model-name:7p4lURel`. validation_file: The ID of an uploaded file that contains validation data. If you provide this file, the data is used to generate validation metrics periodically during fine-tuning. These metrics can be viewed in the fine-tuning results file. The same data should not be present in both train and validation files. Your dataset must be formatted as a JSONL file. You must upload your file with the purpose `fine-tune`. See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) for more details. extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ return self._post( "/fine_tuning/jobs", body=maybe_transform( { "model": model, "training_file": training_file, "hyperparameters": hyperparameters, "integrations": integrations, "seed": seed, "suffix": suffix, "validation_file": validation_file, }, job_create_params.JobCreateParams, ), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=FineTuningJob, ) def retrieve( self, fine_tuning_job_id: str, *, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> FineTuningJob: """ Get info about a fine-tuning job. [Learn more about fine-tuning](https://platform.openai.com/docs/guides/fine-tuning) Args: extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ if not fine_tuning_job_id: raise ValueError(f"Expected a non-empty value for `fine_tuning_job_id` but received {fine_tuning_job_id!r}") return self._get( f"/fine_tuning/jobs/{fine_tuning_job_id}", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=FineTuningJob, ) def list( self, *, after: str | NotGiven = NOT_GIVEN, limit: int | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> SyncCursorPage[FineTuningJob]: """ List your organization's fine-tuning jobs Args: after: Identifier for the last job from the previous pagination request. limit: Number of fine-tuning jobs to retrieve. extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ return self._get_api_list( "/fine_tuning/jobs", page=SyncCursorPage[FineTuningJob], options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout, query=maybe_transform( { "after": after, "limit": limit, }, job_list_params.JobListParams, ), ), model=FineTuningJob, ) def cancel( self, fine_tuning_job_id: str, *, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> FineTuningJob: """ Immediately cancel a fine-tune job. Args: extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ if not fine_tuning_job_id: raise ValueError(f"Expected a non-empty value for `fine_tuning_job_id` but received {fine_tuning_job_id!r}") return self._post( f"/fine_tuning/jobs/{fine_tuning_job_id}/cancel", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=FineTuningJob, ) def list_events( self, fine_tuning_job_id: str, *, after: str | NotGiven = NOT_GIVEN, limit: int | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> SyncCursorPage[FineTuningJobEvent]: """ Get status updates for a fine-tuning job. Args: after: Identifier for the last event from the previous pagination request. limit: Number of events to retrieve. extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ if not fine_tuning_job_id: raise ValueError(f"Expected a non-empty value for `fine_tuning_job_id` but received {fine_tuning_job_id!r}") return self._get_api_list( f"/fine_tuning/jobs/{fine_tuning_job_id}/events", page=SyncCursorPage[FineTuningJobEvent], options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout, query=maybe_transform( { "after": after, "limit": limit, }, job_list_events_params.JobListEventsParams, ), ), model=FineTuningJobEvent, ) class AsyncJobs(AsyncAPIResource): @cached_property def checkpoints(self) -> AsyncCheckpoints: return AsyncCheckpoints(self._client) @cached_property def with_raw_response(self) -> AsyncJobsWithRawResponse: return AsyncJobsWithRawResponse(self) @cached_property def with_streaming_response(self) -> AsyncJobsWithStreamingResponse: return AsyncJobsWithStreamingResponse(self) async def create( self, *, model: Union[str, Literal["babbage-002", "davinci-002", "gpt-3.5-turbo"]], training_file: str, hyperparameters: job_create_params.Hyperparameters | NotGiven = NOT_GIVEN, integrations: Optional[Iterable[job_create_params.Integration]] | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, suffix: Optional[str] | NotGiven = NOT_GIVEN, validation_file: Optional[str] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> FineTuningJob: """ Creates a fine-tuning job which begins the process of creating a new model from a given dataset. Response includes details of the enqueued job including job status and the name of the fine-tuned models once complete. [Learn more about fine-tuning](https://platform.openai.com/docs/guides/fine-tuning) Args: model: The name of the model to fine-tune. You can select one of the [supported models](https://platform.openai.com/docs/guides/fine-tuning/what-models-can-be-fine-tuned). training_file: The ID of an uploaded file that contains training data. See [upload file](https://platform.openai.com/docs/api-reference/files/create) for how to upload a file. Your dataset must be formatted as a JSONL file. Additionally, you must upload your file with the purpose `fine-tune`. See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) for more details. hyperparameters: The hyperparameters used for the fine-tuning job. integrations: A list of integrations to enable for your fine-tuning job. seed: The seed controls the reproducibility of the job. Passing in the same seed and job parameters should produce the same results, but may differ in rare cases. If a seed is not specified, one will be generated for you. suffix: A string of up to 18 characters that will be added to your fine-tuned model name. For example, a `suffix` of "custom-model-name" would produce a model name like `ft:gpt-3.5-turbo:openai:custom-model-name:7p4lURel`. validation_file: The ID of an uploaded file that contains validation data. If you provide this file, the data is used to generate validation metrics periodically during fine-tuning. These metrics can be viewed in the fine-tuning results file. The same data should not be present in both train and validation files. Your dataset must be formatted as a JSONL file. You must upload your file with the purpose `fine-tune`. See the [fine-tuning guide](https://platform.openai.com/docs/guides/fine-tuning) for more details. extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ return await self._post( "/fine_tuning/jobs", body=await async_maybe_transform( { "model": model, "training_file": training_file, "hyperparameters": hyperparameters, "integrations": integrations, "seed": seed, "suffix": suffix, "validation_file": validation_file, }, job_create_params.JobCreateParams, ), options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=FineTuningJob, ) async def retrieve( self, fine_tuning_job_id: str, *, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> FineTuningJob: """ Get info about a fine-tuning job. [Learn more about fine-tuning](https://platform.openai.com/docs/guides/fine-tuning) Args: extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ if not fine_tuning_job_id: raise ValueError(f"Expected a non-empty value for `fine_tuning_job_id` but received {fine_tuning_job_id!r}") return await self._get( f"/fine_tuning/jobs/{fine_tuning_job_id}", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=FineTuningJob, ) def list( self, *, after: str | NotGiven = NOT_GIVEN, limit: int | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> AsyncPaginator[FineTuningJob, AsyncCursorPage[FineTuningJob]]: """ List your organization's fine-tuning jobs Args: after: Identifier for the last job from the previous pagination request. limit: Number of fine-tuning jobs to retrieve. extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ return self._get_api_list( "/fine_tuning/jobs", page=AsyncCursorPage[FineTuningJob], options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout, query=maybe_transform( { "after": after, "limit": limit, }, job_list_params.JobListParams, ), ), model=FineTuningJob, ) async def cancel( self, fine_tuning_job_id: str, *, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> FineTuningJob: """ Immediately cancel a fine-tune job. Args: extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ if not fine_tuning_job_id: raise ValueError(f"Expected a non-empty value for `fine_tuning_job_id` but received {fine_tuning_job_id!r}") return await self._post( f"/fine_tuning/jobs/{fine_tuning_job_id}/cancel", options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout ), cast_to=FineTuningJob, ) def list_events( self, fine_tuning_job_id: str, *, after: str | NotGiven = NOT_GIVEN, limit: int | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> AsyncPaginator[FineTuningJobEvent, AsyncCursorPage[FineTuningJobEvent]]: """ Get status updates for a fine-tuning job. Args: after: Identifier for the last event from the previous pagination request. limit: Number of events to retrieve. extra_headers: Send extra headers extra_query: Add additional query parameters to the request extra_body: Add additional JSON properties to the request timeout: Override the client-level default timeout for this request, in seconds """ if not fine_tuning_job_id: raise ValueError(f"Expected a non-empty value for `fine_tuning_job_id` but received {fine_tuning_job_id!r}") return self._get_api_list( f"/fine_tuning/jobs/{fine_tuning_job_id}/events", page=AsyncCursorPage[FineTuningJobEvent], options=make_request_options( extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout, query=maybe_transform( { "after": after, "limit": limit, }, job_list_events_params.JobListEventsParams, ), ), model=FineTuningJobEvent, ) class JobsWithRawResponse: def __init__(self, jobs: Jobs) -> None: self._jobs = jobs self.create = _legacy_response.to_raw_response_wrapper( jobs.create, ) self.retrieve = _legacy_response.to_raw_response_wrapper( jobs.retrieve, ) self.list = _legacy_response.to_raw_response_wrapper( jobs.list, ) self.cancel = _legacy_response.to_raw_response_wrapper( jobs.cancel, ) self.list_events = _legacy_response.to_raw_response_wrapper( jobs.list_events, ) @cached_property def checkpoints(self) -> CheckpointsWithRawResponse: return CheckpointsWithRawResponse(self._jobs.checkpoints) class AsyncJobsWithRawResponse: def __init__(self, jobs: AsyncJobs) -> None: self._jobs = jobs self.create = _legacy_response.async_to_raw_response_wrapper( jobs.create, ) self.retrieve = _legacy_response.async_to_raw_response_wrapper( jobs.retrieve, ) self.list = _legacy_response.async_to_raw_response_wrapper( jobs.list, ) self.cancel = _legacy_response.async_to_raw_response_wrapper( jobs.cancel, ) self.list_events = _legacy_response.async_to_raw_response_wrapper( jobs.list_events, ) @cached_property def checkpoints(self) -> AsyncCheckpointsWithRawResponse: return AsyncCheckpointsWithRawResponse(self._jobs.checkpoints) class JobsWithStreamingResponse: def __init__(self, jobs: Jobs) -> None: self._jobs = jobs self.create = to_streamed_response_wrapper( jobs.create, ) self.retrieve = to_streamed_response_wrapper( jobs.retrieve, ) self.list = to_streamed_response_wrapper( jobs.list, ) self.cancel = to_streamed_response_wrapper( jobs.cancel, ) self.list_events = to_streamed_response_wrapper( jobs.list_events, ) @cached_property def checkpoints(self) -> CheckpointsWithStreamingResponse: return CheckpointsWithStreamingResponse(self._jobs.checkpoints) class AsyncJobsWithStreamingResponse: def __init__(self, jobs: AsyncJobs) -> None: self._jobs = jobs self.create = async_to_streamed_response_wrapper( jobs.create, ) self.retrieve = async_to_streamed_response_wrapper( jobs.retrieve, ) self.list = async_to_streamed_response_wrapper( jobs.list, ) self.cancel = async_to_streamed_response_wrapper( jobs.cancel, ) self.list_events = async_to_streamed_response_wrapper( jobs.list_events, ) @cached_property def checkpoints(self) -> AsyncCheckpointsWithStreamingResponse: return AsyncCheckpointsWithStreamingResponse(self._jobs.checkpoints)