1229 lines
66 KiB
Python
1229 lines
66 KiB
Python
|
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
|
||
|
|
||
|
from __future__ import annotations
|
||
|
|
||
|
from typing import Dict, List, Union, Iterable, Optional, overload
|
||
|
from typing_extensions import Literal
|
||
|
|
||
|
import httpx
|
||
|
|
||
|
from ... import _legacy_response
|
||
|
from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven
|
||
|
from ..._utils import (
|
||
|
required_args,
|
||
|
maybe_transform,
|
||
|
async_maybe_transform,
|
||
|
)
|
||
|
from ..._compat import cached_property
|
||
|
from ..._resource import SyncAPIResource, AsyncAPIResource
|
||
|
from ..._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
|
||
|
from ..._streaming import Stream, AsyncStream
|
||
|
from ...types.chat import completion_create_params
|
||
|
from ..._base_client import (
|
||
|
make_request_options,
|
||
|
)
|
||
|
from ...types.chat_model import ChatModel
|
||
|
from ...types.chat.chat_completion import ChatCompletion
|
||
|
from ...types.chat.chat_completion_chunk import ChatCompletionChunk
|
||
|
from ...types.chat.chat_completion_tool_param import ChatCompletionToolParam
|
||
|
from ...types.chat.chat_completion_message_param import ChatCompletionMessageParam
|
||
|
from ...types.chat.chat_completion_tool_choice_option_param import ChatCompletionToolChoiceOptionParam
|
||
|
|
||
|
__all__ = ["Completions", "AsyncCompletions"]
|
||
|
|
||
|
|
||
|
class Completions(SyncAPIResource):
|
||
|
@cached_property
|
||
|
def with_raw_response(self) -> CompletionsWithRawResponse:
|
||
|
return CompletionsWithRawResponse(self)
|
||
|
|
||
|
@cached_property
|
||
|
def with_streaming_response(self) -> CompletionsWithStreamingResponse:
|
||
|
return CompletionsWithStreamingResponse(self)
|
||
|
|
||
|
@overload
|
||
|
def create(
|
||
|
self,
|
||
|
*,
|
||
|
messages: Iterable[ChatCompletionMessageParam],
|
||
|
model: Union[str, ChatModel],
|
||
|
frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN,
|
||
|
function_call: completion_create_params.FunctionCall | NotGiven = NOT_GIVEN,
|
||
|
functions: Iterable[completion_create_params.Function] | NotGiven = NOT_GIVEN,
|
||
|
logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN,
|
||
|
logprobs: Optional[bool] | NotGiven = NOT_GIVEN,
|
||
|
max_tokens: Optional[int] | NotGiven = NOT_GIVEN,
|
||
|
n: Optional[int] | NotGiven = NOT_GIVEN,
|
||
|
presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
|
||
|
response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN,
|
||
|
seed: Optional[int] | NotGiven = NOT_GIVEN,
|
||
|
stop: Union[Optional[str], List[str]] | NotGiven = NOT_GIVEN,
|
||
|
stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN,
|
||
|
temperature: Optional[float] | NotGiven = NOT_GIVEN,
|
||
|
tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN,
|
||
|
tools: Iterable[ChatCompletionToolParam] | NotGiven = NOT_GIVEN,
|
||
|
top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
|
||
|
top_p: Optional[float] | NotGiven = NOT_GIVEN,
|
||
|
user: str | NotGiven = NOT_GIVEN,
|
||
|
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
||
|
# The extra values given here take precedence over values defined on the client or passed to this method.
|
||
|
extra_headers: Headers | None = None,
|
||
|
extra_query: Query | None = None,
|
||
|
extra_body: Body | None = None,
|
||
|
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
|
||
|
) -> ChatCompletion:
|
||
|
"""
|
||
|
Creates a model response for the given chat conversation.
|
||
|
|
||
|
Args:
|
||
|
messages: A list of messages comprising the conversation so far.
|
||
|
[Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models).
|
||
|
|
||
|
model: ID of the model to use. See the
|
||
|
[model endpoint compatibility](https://platform.openai.com/docs/models/model-endpoint-compatibility)
|
||
|
table for details on which models work with the Chat API.
|
||
|
|
||
|
frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their
|
||
|
existing frequency in the text so far, decreasing the model's likelihood to
|
||
|
repeat the same line verbatim.
|
||
|
|
||
|
[See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details)
|
||
|
|
||
|
function_call: Deprecated in favor of `tool_choice`.
|
||
|
|
||
|
Controls which (if any) function is called by the model. `none` means the model
|
||
|
will not call a function and instead generates a message. `auto` means the model
|
||
|
can pick between generating a message or calling a function. Specifying a
|
||
|
particular function via `{"name": "my_function"}` forces the model to call that
|
||
|
function.
|
||
|
|
||
|
`none` is the default when no functions are present. `auto` is the default if
|
||
|
functions are present.
|
||
|
|
||
|
functions: Deprecated in favor of `tools`.
|
||
|
|
||
|
A list of functions the model may generate JSON inputs for.
|
||
|
|
||
|
logit_bias: Modify the likelihood of specified tokens appearing in the completion.
|
||
|
|
||
|
Accepts a JSON object that maps tokens (specified by their token ID in the
|
||
|
tokenizer) to an associated bias value from -100 to 100. Mathematically, the
|
||
|
bias is added to the logits generated by the model prior to sampling. The exact
|
||
|
effect will vary per model, but values between -1 and 1 should decrease or
|
||
|
increase likelihood of selection; values like -100 or 100 should result in a ban
|
||
|
or exclusive selection of the relevant token.
|
||
|
|
||
|
logprobs: Whether to return log probabilities of the output tokens or not. If true,
|
||
|
returns the log probabilities of each output token returned in the `content` of
|
||
|
`message`.
|
||
|
|
||
|
max_tokens: The maximum number of [tokens](/tokenizer) that can be generated in the chat
|
||
|
completion.
|
||
|
|
||
|
The total length of input tokens and generated tokens is limited by the model's
|
||
|
context length.
|
||
|
[Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken)
|
||
|
for counting tokens.
|
||
|
|
||
|
n: How many chat completion choices to generate for each input message. Note that
|
||
|
you will be charged based on the number of generated tokens across all of the
|
||
|
choices. Keep `n` as `1` to minimize costs.
|
||
|
|
||
|
presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on
|
||
|
whether they appear in the text so far, increasing the model's likelihood to
|
||
|
talk about new topics.
|
||
|
|
||
|
[See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details)
|
||
|
|
||
|
response_format: An object specifying the format that the model must output. Compatible with
|
||
|
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
|
||
|
all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
|
||
|
|
||
|
Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
|
||
|
message the model generates is valid JSON.
|
||
|
|
||
|
**Important:** when using JSON mode, you **must** also instruct the model to
|
||
|
produce JSON yourself via a system or user message. Without this, the model may
|
||
|
generate an unending stream of whitespace until the generation reaches the token
|
||
|
limit, resulting in a long-running and seemingly "stuck" request. Also note that
|
||
|
the message content may be partially cut off if `finish_reason="length"`, which
|
||
|
indicates the generation exceeded `max_tokens` or the conversation exceeded the
|
||
|
max context length.
|
||
|
|
||
|
seed: This feature is in Beta. If specified, our system will make a best effort to
|
||
|
sample deterministically, such that repeated requests with the same `seed` and
|
||
|
parameters should return the same result. Determinism is not guaranteed, and you
|
||
|
should refer to the `system_fingerprint` response parameter to monitor changes
|
||
|
in the backend.
|
||
|
|
||
|
stop: Up to 4 sequences where the API will stop generating further tokens.
|
||
|
|
||
|
stream: If set, partial message deltas will be sent, like in ChatGPT. Tokens will be
|
||
|
sent as data-only
|
||
|
[server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format)
|
||
|
as they become available, with the stream terminated by a `data: [DONE]`
|
||
|
message.
|
||
|
[Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions).
|
||
|
|
||
|
temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
|
||
|
make the output more random, while lower values like 0.2 will make it more
|
||
|
focused and deterministic.
|
||
|
|
||
|
We generally recommend altering this or `top_p` but not both.
|
||
|
|
||
|
tool_choice: Controls which (if any) tool is called by the model. `none` means the model will
|
||
|
not call any tool and instead generates a message. `auto` means the model can
|
||
|
pick between generating a message or calling one or more tools. `required` means
|
||
|
the model must call one or more tools. Specifying a particular tool via
|
||
|
`{"type": "function", "function": {"name": "my_function"}}` forces the model to
|
||
|
call that tool.
|
||
|
|
||
|
`none` is the default when no tools are present. `auto` is the default if tools
|
||
|
are present.
|
||
|
|
||
|
tools: A list of tools the model may call. Currently, only functions are supported as a
|
||
|
tool. Use this to provide a list of functions the model may generate JSON inputs
|
||
|
for. A max of 128 functions are supported.
|
||
|
|
||
|
top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to
|
||
|
return at each token position, each with an associated log probability.
|
||
|
`logprobs` must be set to `true` if this parameter is used.
|
||
|
|
||
|
top_p: An alternative to sampling with temperature, called nucleus sampling, where the
|
||
|
model considers the results of the tokens with top_p probability mass. So 0.1
|
||
|
means only the tokens comprising the top 10% probability mass are considered.
|
||
|
|
||
|
We generally recommend altering this or `temperature` but not both.
|
||
|
|
||
|
user: A unique identifier representing your end-user, which can help OpenAI to monitor
|
||
|
and detect abuse.
|
||
|
[Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids).
|
||
|
|
||
|
extra_headers: Send extra headers
|
||
|
|
||
|
extra_query: Add additional query parameters to the request
|
||
|
|
||
|
extra_body: Add additional JSON properties to the request
|
||
|
|
||
|
timeout: Override the client-level default timeout for this request, in seconds
|
||
|
"""
|
||
|
...
|
||
|
|
||
|
@overload
|
||
|
def create(
|
||
|
self,
|
||
|
*,
|
||
|
messages: Iterable[ChatCompletionMessageParam],
|
||
|
model: Union[str, ChatModel],
|
||
|
stream: Literal[True],
|
||
|
frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN,
|
||
|
function_call: completion_create_params.FunctionCall | NotGiven = NOT_GIVEN,
|
||
|
functions: Iterable[completion_create_params.Function] | NotGiven = NOT_GIVEN,
|
||
|
logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN,
|
||
|
logprobs: Optional[bool] | NotGiven = NOT_GIVEN,
|
||
|
max_tokens: Optional[int] | NotGiven = NOT_GIVEN,
|
||
|
n: Optional[int] | NotGiven = NOT_GIVEN,
|
||
|
presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
|
||
|
response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN,
|
||
|
seed: Optional[int] | NotGiven = NOT_GIVEN,
|
||
|
stop: Union[Optional[str], List[str]] | NotGiven = NOT_GIVEN,
|
||
|
temperature: Optional[float] | NotGiven = NOT_GIVEN,
|
||
|
tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN,
|
||
|
tools: Iterable[ChatCompletionToolParam] | NotGiven = NOT_GIVEN,
|
||
|
top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
|
||
|
top_p: Optional[float] | NotGiven = NOT_GIVEN,
|
||
|
user: str | NotGiven = NOT_GIVEN,
|
||
|
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
||
|
# The extra values given here take precedence over values defined on the client or passed to this method.
|
||
|
extra_headers: Headers | None = None,
|
||
|
extra_query: Query | None = None,
|
||
|
extra_body: Body | None = None,
|
||
|
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
|
||
|
) -> Stream[ChatCompletionChunk]:
|
||
|
"""
|
||
|
Creates a model response for the given chat conversation.
|
||
|
|
||
|
Args:
|
||
|
messages: A list of messages comprising the conversation so far.
|
||
|
[Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models).
|
||
|
|
||
|
model: ID of the model to use. See the
|
||
|
[model endpoint compatibility](https://platform.openai.com/docs/models/model-endpoint-compatibility)
|
||
|
table for details on which models work with the Chat API.
|
||
|
|
||
|
stream: If set, partial message deltas will be sent, like in ChatGPT. Tokens will be
|
||
|
sent as data-only
|
||
|
[server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format)
|
||
|
as they become available, with the stream terminated by a `data: [DONE]`
|
||
|
message.
|
||
|
[Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions).
|
||
|
|
||
|
frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their
|
||
|
existing frequency in the text so far, decreasing the model's likelihood to
|
||
|
repeat the same line verbatim.
|
||
|
|
||
|
[See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details)
|
||
|
|
||
|
function_call: Deprecated in favor of `tool_choice`.
|
||
|
|
||
|
Controls which (if any) function is called by the model. `none` means the model
|
||
|
will not call a function and instead generates a message. `auto` means the model
|
||
|
can pick between generating a message or calling a function. Specifying a
|
||
|
particular function via `{"name": "my_function"}` forces the model to call that
|
||
|
function.
|
||
|
|
||
|
`none` is the default when no functions are present. `auto` is the default if
|
||
|
functions are present.
|
||
|
|
||
|
functions: Deprecated in favor of `tools`.
|
||
|
|
||
|
A list of functions the model may generate JSON inputs for.
|
||
|
|
||
|
logit_bias: Modify the likelihood of specified tokens appearing in the completion.
|
||
|
|
||
|
Accepts a JSON object that maps tokens (specified by their token ID in the
|
||
|
tokenizer) to an associated bias value from -100 to 100. Mathematically, the
|
||
|
bias is added to the logits generated by the model prior to sampling. The exact
|
||
|
effect will vary per model, but values between -1 and 1 should decrease or
|
||
|
increase likelihood of selection; values like -100 or 100 should result in a ban
|
||
|
or exclusive selection of the relevant token.
|
||
|
|
||
|
logprobs: Whether to return log probabilities of the output tokens or not. If true,
|
||
|
returns the log probabilities of each output token returned in the `content` of
|
||
|
`message`.
|
||
|
|
||
|
max_tokens: The maximum number of [tokens](/tokenizer) that can be generated in the chat
|
||
|
completion.
|
||
|
|
||
|
The total length of input tokens and generated tokens is limited by the model's
|
||
|
context length.
|
||
|
[Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken)
|
||
|
for counting tokens.
|
||
|
|
||
|
n: How many chat completion choices to generate for each input message. Note that
|
||
|
you will be charged based on the number of generated tokens across all of the
|
||
|
choices. Keep `n` as `1` to minimize costs.
|
||
|
|
||
|
presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on
|
||
|
whether they appear in the text so far, increasing the model's likelihood to
|
||
|
talk about new topics.
|
||
|
|
||
|
[See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details)
|
||
|
|
||
|
response_format: An object specifying the format that the model must output. Compatible with
|
||
|
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
|
||
|
all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
|
||
|
|
||
|
Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
|
||
|
message the model generates is valid JSON.
|
||
|
|
||
|
**Important:** when using JSON mode, you **must** also instruct the model to
|
||
|
produce JSON yourself via a system or user message. Without this, the model may
|
||
|
generate an unending stream of whitespace until the generation reaches the token
|
||
|
limit, resulting in a long-running and seemingly "stuck" request. Also note that
|
||
|
the message content may be partially cut off if `finish_reason="length"`, which
|
||
|
indicates the generation exceeded `max_tokens` or the conversation exceeded the
|
||
|
max context length.
|
||
|
|
||
|
seed: This feature is in Beta. If specified, our system will make a best effort to
|
||
|
sample deterministically, such that repeated requests with the same `seed` and
|
||
|
parameters should return the same result. Determinism is not guaranteed, and you
|
||
|
should refer to the `system_fingerprint` response parameter to monitor changes
|
||
|
in the backend.
|
||
|
|
||
|
stop: Up to 4 sequences where the API will stop generating further tokens.
|
||
|
|
||
|
temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
|
||
|
make the output more random, while lower values like 0.2 will make it more
|
||
|
focused and deterministic.
|
||
|
|
||
|
We generally recommend altering this or `top_p` but not both.
|
||
|
|
||
|
tool_choice: Controls which (if any) tool is called by the model. `none` means the model will
|
||
|
not call any tool and instead generates a message. `auto` means the model can
|
||
|
pick between generating a message or calling one or more tools. `required` means
|
||
|
the model must call one or more tools. Specifying a particular tool via
|
||
|
`{"type": "function", "function": {"name": "my_function"}}` forces the model to
|
||
|
call that tool.
|
||
|
|
||
|
`none` is the default when no tools are present. `auto` is the default if tools
|
||
|
are present.
|
||
|
|
||
|
tools: A list of tools the model may call. Currently, only functions are supported as a
|
||
|
tool. Use this to provide a list of functions the model may generate JSON inputs
|
||
|
for. A max of 128 functions are supported.
|
||
|
|
||
|
top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to
|
||
|
return at each token position, each with an associated log probability.
|
||
|
`logprobs` must be set to `true` if this parameter is used.
|
||
|
|
||
|
top_p: An alternative to sampling with temperature, called nucleus sampling, where the
|
||
|
model considers the results of the tokens with top_p probability mass. So 0.1
|
||
|
means only the tokens comprising the top 10% probability mass are considered.
|
||
|
|
||
|
We generally recommend altering this or `temperature` but not both.
|
||
|
|
||
|
user: A unique identifier representing your end-user, which can help OpenAI to monitor
|
||
|
and detect abuse.
|
||
|
[Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids).
|
||
|
|
||
|
extra_headers: Send extra headers
|
||
|
|
||
|
extra_query: Add additional query parameters to the request
|
||
|
|
||
|
extra_body: Add additional JSON properties to the request
|
||
|
|
||
|
timeout: Override the client-level default timeout for this request, in seconds
|
||
|
"""
|
||
|
...
|
||
|
|
||
|
@overload
|
||
|
def create(
|
||
|
self,
|
||
|
*,
|
||
|
messages: Iterable[ChatCompletionMessageParam],
|
||
|
model: Union[str, ChatModel],
|
||
|
stream: bool,
|
||
|
frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN,
|
||
|
function_call: completion_create_params.FunctionCall | NotGiven = NOT_GIVEN,
|
||
|
functions: Iterable[completion_create_params.Function] | NotGiven = NOT_GIVEN,
|
||
|
logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN,
|
||
|
logprobs: Optional[bool] | NotGiven = NOT_GIVEN,
|
||
|
max_tokens: Optional[int] | NotGiven = NOT_GIVEN,
|
||
|
n: Optional[int] | NotGiven = NOT_GIVEN,
|
||
|
presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
|
||
|
response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN,
|
||
|
seed: Optional[int] | NotGiven = NOT_GIVEN,
|
||
|
stop: Union[Optional[str], List[str]] | NotGiven = NOT_GIVEN,
|
||
|
temperature: Optional[float] | NotGiven = NOT_GIVEN,
|
||
|
tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN,
|
||
|
tools: Iterable[ChatCompletionToolParam] | NotGiven = NOT_GIVEN,
|
||
|
top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
|
||
|
top_p: Optional[float] | NotGiven = NOT_GIVEN,
|
||
|
user: str | NotGiven = NOT_GIVEN,
|
||
|
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
||
|
# The extra values given here take precedence over values defined on the client or passed to this method.
|
||
|
extra_headers: Headers | None = None,
|
||
|
extra_query: Query | None = None,
|
||
|
extra_body: Body | None = None,
|
||
|
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
|
||
|
) -> ChatCompletion | Stream[ChatCompletionChunk]:
|
||
|
"""
|
||
|
Creates a model response for the given chat conversation.
|
||
|
|
||
|
Args:
|
||
|
messages: A list of messages comprising the conversation so far.
|
||
|
[Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models).
|
||
|
|
||
|
model: ID of the model to use. See the
|
||
|
[model endpoint compatibility](https://platform.openai.com/docs/models/model-endpoint-compatibility)
|
||
|
table for details on which models work with the Chat API.
|
||
|
|
||
|
stream: If set, partial message deltas will be sent, like in ChatGPT. Tokens will be
|
||
|
sent as data-only
|
||
|
[server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format)
|
||
|
as they become available, with the stream terminated by a `data: [DONE]`
|
||
|
message.
|
||
|
[Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions).
|
||
|
|
||
|
frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their
|
||
|
existing frequency in the text so far, decreasing the model's likelihood to
|
||
|
repeat the same line verbatim.
|
||
|
|
||
|
[See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details)
|
||
|
|
||
|
function_call: Deprecated in favor of `tool_choice`.
|
||
|
|
||
|
Controls which (if any) function is called by the model. `none` means the model
|
||
|
will not call a function and instead generates a message. `auto` means the model
|
||
|
can pick between generating a message or calling a function. Specifying a
|
||
|
particular function via `{"name": "my_function"}` forces the model to call that
|
||
|
function.
|
||
|
|
||
|
`none` is the default when no functions are present. `auto` is the default if
|
||
|
functions are present.
|
||
|
|
||
|
functions: Deprecated in favor of `tools`.
|
||
|
|
||
|
A list of functions the model may generate JSON inputs for.
|
||
|
|
||
|
logit_bias: Modify the likelihood of specified tokens appearing in the completion.
|
||
|
|
||
|
Accepts a JSON object that maps tokens (specified by their token ID in the
|
||
|
tokenizer) to an associated bias value from -100 to 100. Mathematically, the
|
||
|
bias is added to the logits generated by the model prior to sampling. The exact
|
||
|
effect will vary per model, but values between -1 and 1 should decrease or
|
||
|
increase likelihood of selection; values like -100 or 100 should result in a ban
|
||
|
or exclusive selection of the relevant token.
|
||
|
|
||
|
logprobs: Whether to return log probabilities of the output tokens or not. If true,
|
||
|
returns the log probabilities of each output token returned in the `content` of
|
||
|
`message`.
|
||
|
|
||
|
max_tokens: The maximum number of [tokens](/tokenizer) that can be generated in the chat
|
||
|
completion.
|
||
|
|
||
|
The total length of input tokens and generated tokens is limited by the model's
|
||
|
context length.
|
||
|
[Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken)
|
||
|
for counting tokens.
|
||
|
|
||
|
n: How many chat completion choices to generate for each input message. Note that
|
||
|
you will be charged based on the number of generated tokens across all of the
|
||
|
choices. Keep `n` as `1` to minimize costs.
|
||
|
|
||
|
presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on
|
||
|
whether they appear in the text so far, increasing the model's likelihood to
|
||
|
talk about new topics.
|
||
|
|
||
|
[See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details)
|
||
|
|
||
|
response_format: An object specifying the format that the model must output. Compatible with
|
||
|
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
|
||
|
all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
|
||
|
|
||
|
Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
|
||
|
message the model generates is valid JSON.
|
||
|
|
||
|
**Important:** when using JSON mode, you **must** also instruct the model to
|
||
|
produce JSON yourself via a system or user message. Without this, the model may
|
||
|
generate an unending stream of whitespace until the generation reaches the token
|
||
|
limit, resulting in a long-running and seemingly "stuck" request. Also note that
|
||
|
the message content may be partially cut off if `finish_reason="length"`, which
|
||
|
indicates the generation exceeded `max_tokens` or the conversation exceeded the
|
||
|
max context length.
|
||
|
|
||
|
seed: This feature is in Beta. If specified, our system will make a best effort to
|
||
|
sample deterministically, such that repeated requests with the same `seed` and
|
||
|
parameters should return the same result. Determinism is not guaranteed, and you
|
||
|
should refer to the `system_fingerprint` response parameter to monitor changes
|
||
|
in the backend.
|
||
|
|
||
|
stop: Up to 4 sequences where the API will stop generating further tokens.
|
||
|
|
||
|
temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
|
||
|
make the output more random, while lower values like 0.2 will make it more
|
||
|
focused and deterministic.
|
||
|
|
||
|
We generally recommend altering this or `top_p` but not both.
|
||
|
|
||
|
tool_choice: Controls which (if any) tool is called by the model. `none` means the model will
|
||
|
not call any tool and instead generates a message. `auto` means the model can
|
||
|
pick between generating a message or calling one or more tools. `required` means
|
||
|
the model must call one or more tools. Specifying a particular tool via
|
||
|
`{"type": "function", "function": {"name": "my_function"}}` forces the model to
|
||
|
call that tool.
|
||
|
|
||
|
`none` is the default when no tools are present. `auto` is the default if tools
|
||
|
are present.
|
||
|
|
||
|
tools: A list of tools the model may call. Currently, only functions are supported as a
|
||
|
tool. Use this to provide a list of functions the model may generate JSON inputs
|
||
|
for. A max of 128 functions are supported.
|
||
|
|
||
|
top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to
|
||
|
return at each token position, each with an associated log probability.
|
||
|
`logprobs` must be set to `true` if this parameter is used.
|
||
|
|
||
|
top_p: An alternative to sampling with temperature, called nucleus sampling, where the
|
||
|
model considers the results of the tokens with top_p probability mass. So 0.1
|
||
|
means only the tokens comprising the top 10% probability mass are considered.
|
||
|
|
||
|
We generally recommend altering this or `temperature` but not both.
|
||
|
|
||
|
user: A unique identifier representing your end-user, which can help OpenAI to monitor
|
||
|
and detect abuse.
|
||
|
[Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids).
|
||
|
|
||
|
extra_headers: Send extra headers
|
||
|
|
||
|
extra_query: Add additional query parameters to the request
|
||
|
|
||
|
extra_body: Add additional JSON properties to the request
|
||
|
|
||
|
timeout: Override the client-level default timeout for this request, in seconds
|
||
|
"""
|
||
|
...
|
||
|
|
||
|
@required_args(["messages", "model"], ["messages", "model", "stream"])
|
||
|
def create(
|
||
|
self,
|
||
|
*,
|
||
|
messages: Iterable[ChatCompletionMessageParam],
|
||
|
model: Union[str, ChatModel],
|
||
|
frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN,
|
||
|
function_call: completion_create_params.FunctionCall | NotGiven = NOT_GIVEN,
|
||
|
functions: Iterable[completion_create_params.Function] | NotGiven = NOT_GIVEN,
|
||
|
logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN,
|
||
|
logprobs: Optional[bool] | NotGiven = NOT_GIVEN,
|
||
|
max_tokens: Optional[int] | NotGiven = NOT_GIVEN,
|
||
|
n: Optional[int] | NotGiven = NOT_GIVEN,
|
||
|
presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
|
||
|
response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN,
|
||
|
seed: Optional[int] | NotGiven = NOT_GIVEN,
|
||
|
stop: Union[Optional[str], List[str]] | NotGiven = NOT_GIVEN,
|
||
|
stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN,
|
||
|
temperature: Optional[float] | NotGiven = NOT_GIVEN,
|
||
|
tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN,
|
||
|
tools: Iterable[ChatCompletionToolParam] | NotGiven = NOT_GIVEN,
|
||
|
top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
|
||
|
top_p: Optional[float] | NotGiven = NOT_GIVEN,
|
||
|
user: str | NotGiven = NOT_GIVEN,
|
||
|
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
||
|
# The extra values given here take precedence over values defined on the client or passed to this method.
|
||
|
extra_headers: Headers | None = None,
|
||
|
extra_query: Query | None = None,
|
||
|
extra_body: Body | None = None,
|
||
|
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
|
||
|
) -> ChatCompletion | Stream[ChatCompletionChunk]:
|
||
|
return self._post(
|
||
|
"/chat/completions",
|
||
|
body=maybe_transform(
|
||
|
{
|
||
|
"messages": messages,
|
||
|
"model": model,
|
||
|
"frequency_penalty": frequency_penalty,
|
||
|
"function_call": function_call,
|
||
|
"functions": functions,
|
||
|
"logit_bias": logit_bias,
|
||
|
"logprobs": logprobs,
|
||
|
"max_tokens": max_tokens,
|
||
|
"n": n,
|
||
|
"presence_penalty": presence_penalty,
|
||
|
"response_format": response_format,
|
||
|
"seed": seed,
|
||
|
"stop": stop,
|
||
|
"stream": stream,
|
||
|
"temperature": temperature,
|
||
|
"tool_choice": tool_choice,
|
||
|
"tools": tools,
|
||
|
"top_logprobs": top_logprobs,
|
||
|
"top_p": top_p,
|
||
|
"user": user,
|
||
|
},
|
||
|
completion_create_params.CompletionCreateParams,
|
||
|
),
|
||
|
options=make_request_options(
|
||
|
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
|
||
|
),
|
||
|
cast_to=ChatCompletion,
|
||
|
stream=stream or False,
|
||
|
stream_cls=Stream[ChatCompletionChunk],
|
||
|
)
|
||
|
|
||
|
|
||
|
class AsyncCompletions(AsyncAPIResource):
|
||
|
@cached_property
|
||
|
def with_raw_response(self) -> AsyncCompletionsWithRawResponse:
|
||
|
return AsyncCompletionsWithRawResponse(self)
|
||
|
|
||
|
@cached_property
|
||
|
def with_streaming_response(self) -> AsyncCompletionsWithStreamingResponse:
|
||
|
return AsyncCompletionsWithStreamingResponse(self)
|
||
|
|
||
|
@overload
|
||
|
async def create(
|
||
|
self,
|
||
|
*,
|
||
|
messages: Iterable[ChatCompletionMessageParam],
|
||
|
model: Union[str, ChatModel],
|
||
|
frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN,
|
||
|
function_call: completion_create_params.FunctionCall | NotGiven = NOT_GIVEN,
|
||
|
functions: Iterable[completion_create_params.Function] | NotGiven = NOT_GIVEN,
|
||
|
logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN,
|
||
|
logprobs: Optional[bool] | NotGiven = NOT_GIVEN,
|
||
|
max_tokens: Optional[int] | NotGiven = NOT_GIVEN,
|
||
|
n: Optional[int] | NotGiven = NOT_GIVEN,
|
||
|
presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
|
||
|
response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN,
|
||
|
seed: Optional[int] | NotGiven = NOT_GIVEN,
|
||
|
stop: Union[Optional[str], List[str]] | NotGiven = NOT_GIVEN,
|
||
|
stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN,
|
||
|
temperature: Optional[float] | NotGiven = NOT_GIVEN,
|
||
|
tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN,
|
||
|
tools: Iterable[ChatCompletionToolParam] | NotGiven = NOT_GIVEN,
|
||
|
top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
|
||
|
top_p: Optional[float] | NotGiven = NOT_GIVEN,
|
||
|
user: str | NotGiven = NOT_GIVEN,
|
||
|
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
||
|
# The extra values given here take precedence over values defined on the client or passed to this method.
|
||
|
extra_headers: Headers | None = None,
|
||
|
extra_query: Query | None = None,
|
||
|
extra_body: Body | None = None,
|
||
|
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
|
||
|
) -> ChatCompletion:
|
||
|
"""
|
||
|
Creates a model response for the given chat conversation.
|
||
|
|
||
|
Args:
|
||
|
messages: A list of messages comprising the conversation so far.
|
||
|
[Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models).
|
||
|
|
||
|
model: ID of the model to use. See the
|
||
|
[model endpoint compatibility](https://platform.openai.com/docs/models/model-endpoint-compatibility)
|
||
|
table for details on which models work with the Chat API.
|
||
|
|
||
|
frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their
|
||
|
existing frequency in the text so far, decreasing the model's likelihood to
|
||
|
repeat the same line verbatim.
|
||
|
|
||
|
[See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details)
|
||
|
|
||
|
function_call: Deprecated in favor of `tool_choice`.
|
||
|
|
||
|
Controls which (if any) function is called by the model. `none` means the model
|
||
|
will not call a function and instead generates a message. `auto` means the model
|
||
|
can pick between generating a message or calling a function. Specifying a
|
||
|
particular function via `{"name": "my_function"}` forces the model to call that
|
||
|
function.
|
||
|
|
||
|
`none` is the default when no functions are present. `auto` is the default if
|
||
|
functions are present.
|
||
|
|
||
|
functions: Deprecated in favor of `tools`.
|
||
|
|
||
|
A list of functions the model may generate JSON inputs for.
|
||
|
|
||
|
logit_bias: Modify the likelihood of specified tokens appearing in the completion.
|
||
|
|
||
|
Accepts a JSON object that maps tokens (specified by their token ID in the
|
||
|
tokenizer) to an associated bias value from -100 to 100. Mathematically, the
|
||
|
bias is added to the logits generated by the model prior to sampling. The exact
|
||
|
effect will vary per model, but values between -1 and 1 should decrease or
|
||
|
increase likelihood of selection; values like -100 or 100 should result in a ban
|
||
|
or exclusive selection of the relevant token.
|
||
|
|
||
|
logprobs: Whether to return log probabilities of the output tokens or not. If true,
|
||
|
returns the log probabilities of each output token returned in the `content` of
|
||
|
`message`.
|
||
|
|
||
|
max_tokens: The maximum number of [tokens](/tokenizer) that can be generated in the chat
|
||
|
completion.
|
||
|
|
||
|
The total length of input tokens and generated tokens is limited by the model's
|
||
|
context length.
|
||
|
[Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken)
|
||
|
for counting tokens.
|
||
|
|
||
|
n: How many chat completion choices to generate for each input message. Note that
|
||
|
you will be charged based on the number of generated tokens across all of the
|
||
|
choices. Keep `n` as `1` to minimize costs.
|
||
|
|
||
|
presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on
|
||
|
whether they appear in the text so far, increasing the model's likelihood to
|
||
|
talk about new topics.
|
||
|
|
||
|
[See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details)
|
||
|
|
||
|
response_format: An object specifying the format that the model must output. Compatible with
|
||
|
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
|
||
|
all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
|
||
|
|
||
|
Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
|
||
|
message the model generates is valid JSON.
|
||
|
|
||
|
**Important:** when using JSON mode, you **must** also instruct the model to
|
||
|
produce JSON yourself via a system or user message. Without this, the model may
|
||
|
generate an unending stream of whitespace until the generation reaches the token
|
||
|
limit, resulting in a long-running and seemingly "stuck" request. Also note that
|
||
|
the message content may be partially cut off if `finish_reason="length"`, which
|
||
|
indicates the generation exceeded `max_tokens` or the conversation exceeded the
|
||
|
max context length.
|
||
|
|
||
|
seed: This feature is in Beta. If specified, our system will make a best effort to
|
||
|
sample deterministically, such that repeated requests with the same `seed` and
|
||
|
parameters should return the same result. Determinism is not guaranteed, and you
|
||
|
should refer to the `system_fingerprint` response parameter to monitor changes
|
||
|
in the backend.
|
||
|
|
||
|
stop: Up to 4 sequences where the API will stop generating further tokens.
|
||
|
|
||
|
stream: If set, partial message deltas will be sent, like in ChatGPT. Tokens will be
|
||
|
sent as data-only
|
||
|
[server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format)
|
||
|
as they become available, with the stream terminated by a `data: [DONE]`
|
||
|
message.
|
||
|
[Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions).
|
||
|
|
||
|
temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
|
||
|
make the output more random, while lower values like 0.2 will make it more
|
||
|
focused and deterministic.
|
||
|
|
||
|
We generally recommend altering this or `top_p` but not both.
|
||
|
|
||
|
tool_choice: Controls which (if any) tool is called by the model. `none` means the model will
|
||
|
not call any tool and instead generates a message. `auto` means the model can
|
||
|
pick between generating a message or calling one or more tools. `required` means
|
||
|
the model must call one or more tools. Specifying a particular tool via
|
||
|
`{"type": "function", "function": {"name": "my_function"}}` forces the model to
|
||
|
call that tool.
|
||
|
|
||
|
`none` is the default when no tools are present. `auto` is the default if tools
|
||
|
are present.
|
||
|
|
||
|
tools: A list of tools the model may call. Currently, only functions are supported as a
|
||
|
tool. Use this to provide a list of functions the model may generate JSON inputs
|
||
|
for. A max of 128 functions are supported.
|
||
|
|
||
|
top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to
|
||
|
return at each token position, each with an associated log probability.
|
||
|
`logprobs` must be set to `true` if this parameter is used.
|
||
|
|
||
|
top_p: An alternative to sampling with temperature, called nucleus sampling, where the
|
||
|
model considers the results of the tokens with top_p probability mass. So 0.1
|
||
|
means only the tokens comprising the top 10% probability mass are considered.
|
||
|
|
||
|
We generally recommend altering this or `temperature` but not both.
|
||
|
|
||
|
user: A unique identifier representing your end-user, which can help OpenAI to monitor
|
||
|
and detect abuse.
|
||
|
[Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids).
|
||
|
|
||
|
extra_headers: Send extra headers
|
||
|
|
||
|
extra_query: Add additional query parameters to the request
|
||
|
|
||
|
extra_body: Add additional JSON properties to the request
|
||
|
|
||
|
timeout: Override the client-level default timeout for this request, in seconds
|
||
|
"""
|
||
|
...
|
||
|
|
||
|
@overload
|
||
|
async def create(
|
||
|
self,
|
||
|
*,
|
||
|
messages: Iterable[ChatCompletionMessageParam],
|
||
|
model: Union[str, ChatModel],
|
||
|
stream: Literal[True],
|
||
|
frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN,
|
||
|
function_call: completion_create_params.FunctionCall | NotGiven = NOT_GIVEN,
|
||
|
functions: Iterable[completion_create_params.Function] | NotGiven = NOT_GIVEN,
|
||
|
logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN,
|
||
|
logprobs: Optional[bool] | NotGiven = NOT_GIVEN,
|
||
|
max_tokens: Optional[int] | NotGiven = NOT_GIVEN,
|
||
|
n: Optional[int] | NotGiven = NOT_GIVEN,
|
||
|
presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
|
||
|
response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN,
|
||
|
seed: Optional[int] | NotGiven = NOT_GIVEN,
|
||
|
stop: Union[Optional[str], List[str]] | NotGiven = NOT_GIVEN,
|
||
|
temperature: Optional[float] | NotGiven = NOT_GIVEN,
|
||
|
tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN,
|
||
|
tools: Iterable[ChatCompletionToolParam] | NotGiven = NOT_GIVEN,
|
||
|
top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
|
||
|
top_p: Optional[float] | NotGiven = NOT_GIVEN,
|
||
|
user: str | NotGiven = NOT_GIVEN,
|
||
|
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
||
|
# The extra values given here take precedence over values defined on the client or passed to this method.
|
||
|
extra_headers: Headers | None = None,
|
||
|
extra_query: Query | None = None,
|
||
|
extra_body: Body | None = None,
|
||
|
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
|
||
|
) -> AsyncStream[ChatCompletionChunk]:
|
||
|
"""
|
||
|
Creates a model response for the given chat conversation.
|
||
|
|
||
|
Args:
|
||
|
messages: A list of messages comprising the conversation so far.
|
||
|
[Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models).
|
||
|
|
||
|
model: ID of the model to use. See the
|
||
|
[model endpoint compatibility](https://platform.openai.com/docs/models/model-endpoint-compatibility)
|
||
|
table for details on which models work with the Chat API.
|
||
|
|
||
|
stream: If set, partial message deltas will be sent, like in ChatGPT. Tokens will be
|
||
|
sent as data-only
|
||
|
[server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format)
|
||
|
as they become available, with the stream terminated by a `data: [DONE]`
|
||
|
message.
|
||
|
[Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions).
|
||
|
|
||
|
frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their
|
||
|
existing frequency in the text so far, decreasing the model's likelihood to
|
||
|
repeat the same line verbatim.
|
||
|
|
||
|
[See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details)
|
||
|
|
||
|
function_call: Deprecated in favor of `tool_choice`.
|
||
|
|
||
|
Controls which (if any) function is called by the model. `none` means the model
|
||
|
will not call a function and instead generates a message. `auto` means the model
|
||
|
can pick between generating a message or calling a function. Specifying a
|
||
|
particular function via `{"name": "my_function"}` forces the model to call that
|
||
|
function.
|
||
|
|
||
|
`none` is the default when no functions are present. `auto` is the default if
|
||
|
functions are present.
|
||
|
|
||
|
functions: Deprecated in favor of `tools`.
|
||
|
|
||
|
A list of functions the model may generate JSON inputs for.
|
||
|
|
||
|
logit_bias: Modify the likelihood of specified tokens appearing in the completion.
|
||
|
|
||
|
Accepts a JSON object that maps tokens (specified by their token ID in the
|
||
|
tokenizer) to an associated bias value from -100 to 100. Mathematically, the
|
||
|
bias is added to the logits generated by the model prior to sampling. The exact
|
||
|
effect will vary per model, but values between -1 and 1 should decrease or
|
||
|
increase likelihood of selection; values like -100 or 100 should result in a ban
|
||
|
or exclusive selection of the relevant token.
|
||
|
|
||
|
logprobs: Whether to return log probabilities of the output tokens or not. If true,
|
||
|
returns the log probabilities of each output token returned in the `content` of
|
||
|
`message`.
|
||
|
|
||
|
max_tokens: The maximum number of [tokens](/tokenizer) that can be generated in the chat
|
||
|
completion.
|
||
|
|
||
|
The total length of input tokens and generated tokens is limited by the model's
|
||
|
context length.
|
||
|
[Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken)
|
||
|
for counting tokens.
|
||
|
|
||
|
n: How many chat completion choices to generate for each input message. Note that
|
||
|
you will be charged based on the number of generated tokens across all of the
|
||
|
choices. Keep `n` as `1` to minimize costs.
|
||
|
|
||
|
presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on
|
||
|
whether they appear in the text so far, increasing the model's likelihood to
|
||
|
talk about new topics.
|
||
|
|
||
|
[See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details)
|
||
|
|
||
|
response_format: An object specifying the format that the model must output. Compatible with
|
||
|
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
|
||
|
all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
|
||
|
|
||
|
Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
|
||
|
message the model generates is valid JSON.
|
||
|
|
||
|
**Important:** when using JSON mode, you **must** also instruct the model to
|
||
|
produce JSON yourself via a system or user message. Without this, the model may
|
||
|
generate an unending stream of whitespace until the generation reaches the token
|
||
|
limit, resulting in a long-running and seemingly "stuck" request. Also note that
|
||
|
the message content may be partially cut off if `finish_reason="length"`, which
|
||
|
indicates the generation exceeded `max_tokens` or the conversation exceeded the
|
||
|
max context length.
|
||
|
|
||
|
seed: This feature is in Beta. If specified, our system will make a best effort to
|
||
|
sample deterministically, such that repeated requests with the same `seed` and
|
||
|
parameters should return the same result. Determinism is not guaranteed, and you
|
||
|
should refer to the `system_fingerprint` response parameter to monitor changes
|
||
|
in the backend.
|
||
|
|
||
|
stop: Up to 4 sequences where the API will stop generating further tokens.
|
||
|
|
||
|
temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
|
||
|
make the output more random, while lower values like 0.2 will make it more
|
||
|
focused and deterministic.
|
||
|
|
||
|
We generally recommend altering this or `top_p` but not both.
|
||
|
|
||
|
tool_choice: Controls which (if any) tool is called by the model. `none` means the model will
|
||
|
not call any tool and instead generates a message. `auto` means the model can
|
||
|
pick between generating a message or calling one or more tools. `required` means
|
||
|
the model must call one or more tools. Specifying a particular tool via
|
||
|
`{"type": "function", "function": {"name": "my_function"}}` forces the model to
|
||
|
call that tool.
|
||
|
|
||
|
`none` is the default when no tools are present. `auto` is the default if tools
|
||
|
are present.
|
||
|
|
||
|
tools: A list of tools the model may call. Currently, only functions are supported as a
|
||
|
tool. Use this to provide a list of functions the model may generate JSON inputs
|
||
|
for. A max of 128 functions are supported.
|
||
|
|
||
|
top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to
|
||
|
return at each token position, each with an associated log probability.
|
||
|
`logprobs` must be set to `true` if this parameter is used.
|
||
|
|
||
|
top_p: An alternative to sampling with temperature, called nucleus sampling, where the
|
||
|
model considers the results of the tokens with top_p probability mass. So 0.1
|
||
|
means only the tokens comprising the top 10% probability mass are considered.
|
||
|
|
||
|
We generally recommend altering this or `temperature` but not both.
|
||
|
|
||
|
user: A unique identifier representing your end-user, which can help OpenAI to monitor
|
||
|
and detect abuse.
|
||
|
[Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids).
|
||
|
|
||
|
extra_headers: Send extra headers
|
||
|
|
||
|
extra_query: Add additional query parameters to the request
|
||
|
|
||
|
extra_body: Add additional JSON properties to the request
|
||
|
|
||
|
timeout: Override the client-level default timeout for this request, in seconds
|
||
|
"""
|
||
|
...
|
||
|
|
||
|
@overload
|
||
|
async def create(
|
||
|
self,
|
||
|
*,
|
||
|
messages: Iterable[ChatCompletionMessageParam],
|
||
|
model: Union[str, ChatModel],
|
||
|
stream: bool,
|
||
|
frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN,
|
||
|
function_call: completion_create_params.FunctionCall | NotGiven = NOT_GIVEN,
|
||
|
functions: Iterable[completion_create_params.Function] | NotGiven = NOT_GIVEN,
|
||
|
logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN,
|
||
|
logprobs: Optional[bool] | NotGiven = NOT_GIVEN,
|
||
|
max_tokens: Optional[int] | NotGiven = NOT_GIVEN,
|
||
|
n: Optional[int] | NotGiven = NOT_GIVEN,
|
||
|
presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
|
||
|
response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN,
|
||
|
seed: Optional[int] | NotGiven = NOT_GIVEN,
|
||
|
stop: Union[Optional[str], List[str]] | NotGiven = NOT_GIVEN,
|
||
|
temperature: Optional[float] | NotGiven = NOT_GIVEN,
|
||
|
tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN,
|
||
|
tools: Iterable[ChatCompletionToolParam] | NotGiven = NOT_GIVEN,
|
||
|
top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
|
||
|
top_p: Optional[float] | NotGiven = NOT_GIVEN,
|
||
|
user: str | NotGiven = NOT_GIVEN,
|
||
|
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
||
|
# The extra values given here take precedence over values defined on the client or passed to this method.
|
||
|
extra_headers: Headers | None = None,
|
||
|
extra_query: Query | None = None,
|
||
|
extra_body: Body | None = None,
|
||
|
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
|
||
|
) -> ChatCompletion | AsyncStream[ChatCompletionChunk]:
|
||
|
"""
|
||
|
Creates a model response for the given chat conversation.
|
||
|
|
||
|
Args:
|
||
|
messages: A list of messages comprising the conversation so far.
|
||
|
[Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models).
|
||
|
|
||
|
model: ID of the model to use. See the
|
||
|
[model endpoint compatibility](https://platform.openai.com/docs/models/model-endpoint-compatibility)
|
||
|
table for details on which models work with the Chat API.
|
||
|
|
||
|
stream: If set, partial message deltas will be sent, like in ChatGPT. Tokens will be
|
||
|
sent as data-only
|
||
|
[server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format)
|
||
|
as they become available, with the stream terminated by a `data: [DONE]`
|
||
|
message.
|
||
|
[Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions).
|
||
|
|
||
|
frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their
|
||
|
existing frequency in the text so far, decreasing the model's likelihood to
|
||
|
repeat the same line verbatim.
|
||
|
|
||
|
[See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details)
|
||
|
|
||
|
function_call: Deprecated in favor of `tool_choice`.
|
||
|
|
||
|
Controls which (if any) function is called by the model. `none` means the model
|
||
|
will not call a function and instead generates a message. `auto` means the model
|
||
|
can pick between generating a message or calling a function. Specifying a
|
||
|
particular function via `{"name": "my_function"}` forces the model to call that
|
||
|
function.
|
||
|
|
||
|
`none` is the default when no functions are present. `auto` is the default if
|
||
|
functions are present.
|
||
|
|
||
|
functions: Deprecated in favor of `tools`.
|
||
|
|
||
|
A list of functions the model may generate JSON inputs for.
|
||
|
|
||
|
logit_bias: Modify the likelihood of specified tokens appearing in the completion.
|
||
|
|
||
|
Accepts a JSON object that maps tokens (specified by their token ID in the
|
||
|
tokenizer) to an associated bias value from -100 to 100. Mathematically, the
|
||
|
bias is added to the logits generated by the model prior to sampling. The exact
|
||
|
effect will vary per model, but values between -1 and 1 should decrease or
|
||
|
increase likelihood of selection; values like -100 or 100 should result in a ban
|
||
|
or exclusive selection of the relevant token.
|
||
|
|
||
|
logprobs: Whether to return log probabilities of the output tokens or not. If true,
|
||
|
returns the log probabilities of each output token returned in the `content` of
|
||
|
`message`.
|
||
|
|
||
|
max_tokens: The maximum number of [tokens](/tokenizer) that can be generated in the chat
|
||
|
completion.
|
||
|
|
||
|
The total length of input tokens and generated tokens is limited by the model's
|
||
|
context length.
|
||
|
[Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken)
|
||
|
for counting tokens.
|
||
|
|
||
|
n: How many chat completion choices to generate for each input message. Note that
|
||
|
you will be charged based on the number of generated tokens across all of the
|
||
|
choices. Keep `n` as `1` to minimize costs.
|
||
|
|
||
|
presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on
|
||
|
whether they appear in the text so far, increasing the model's likelihood to
|
||
|
talk about new topics.
|
||
|
|
||
|
[See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details)
|
||
|
|
||
|
response_format: An object specifying the format that the model must output. Compatible with
|
||
|
[GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
|
||
|
all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
|
||
|
|
||
|
Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
|
||
|
message the model generates is valid JSON.
|
||
|
|
||
|
**Important:** when using JSON mode, you **must** also instruct the model to
|
||
|
produce JSON yourself via a system or user message. Without this, the model may
|
||
|
generate an unending stream of whitespace until the generation reaches the token
|
||
|
limit, resulting in a long-running and seemingly "stuck" request. Also note that
|
||
|
the message content may be partially cut off if `finish_reason="length"`, which
|
||
|
indicates the generation exceeded `max_tokens` or the conversation exceeded the
|
||
|
max context length.
|
||
|
|
||
|
seed: This feature is in Beta. If specified, our system will make a best effort to
|
||
|
sample deterministically, such that repeated requests with the same `seed` and
|
||
|
parameters should return the same result. Determinism is not guaranteed, and you
|
||
|
should refer to the `system_fingerprint` response parameter to monitor changes
|
||
|
in the backend.
|
||
|
|
||
|
stop: Up to 4 sequences where the API will stop generating further tokens.
|
||
|
|
||
|
temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
|
||
|
make the output more random, while lower values like 0.2 will make it more
|
||
|
focused and deterministic.
|
||
|
|
||
|
We generally recommend altering this or `top_p` but not both.
|
||
|
|
||
|
tool_choice: Controls which (if any) tool is called by the model. `none` means the model will
|
||
|
not call any tool and instead generates a message. `auto` means the model can
|
||
|
pick between generating a message or calling one or more tools. `required` means
|
||
|
the model must call one or more tools. Specifying a particular tool via
|
||
|
`{"type": "function", "function": {"name": "my_function"}}` forces the model to
|
||
|
call that tool.
|
||
|
|
||
|
`none` is the default when no tools are present. `auto` is the default if tools
|
||
|
are present.
|
||
|
|
||
|
tools: A list of tools the model may call. Currently, only functions are supported as a
|
||
|
tool. Use this to provide a list of functions the model may generate JSON inputs
|
||
|
for. A max of 128 functions are supported.
|
||
|
|
||
|
top_logprobs: An integer between 0 and 20 specifying the number of most likely tokens to
|
||
|
return at each token position, each with an associated log probability.
|
||
|
`logprobs` must be set to `true` if this parameter is used.
|
||
|
|
||
|
top_p: An alternative to sampling with temperature, called nucleus sampling, where the
|
||
|
model considers the results of the tokens with top_p probability mass. So 0.1
|
||
|
means only the tokens comprising the top 10% probability mass are considered.
|
||
|
|
||
|
We generally recommend altering this or `temperature` but not both.
|
||
|
|
||
|
user: A unique identifier representing your end-user, which can help OpenAI to monitor
|
||
|
and detect abuse.
|
||
|
[Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids).
|
||
|
|
||
|
extra_headers: Send extra headers
|
||
|
|
||
|
extra_query: Add additional query parameters to the request
|
||
|
|
||
|
extra_body: Add additional JSON properties to the request
|
||
|
|
||
|
timeout: Override the client-level default timeout for this request, in seconds
|
||
|
"""
|
||
|
...
|
||
|
|
||
|
@required_args(["messages", "model"], ["messages", "model", "stream"])
|
||
|
async def create(
|
||
|
self,
|
||
|
*,
|
||
|
messages: Iterable[ChatCompletionMessageParam],
|
||
|
model: Union[str, ChatModel],
|
||
|
frequency_penalty: Optional[float] | NotGiven = NOT_GIVEN,
|
||
|
function_call: completion_create_params.FunctionCall | NotGiven = NOT_GIVEN,
|
||
|
functions: Iterable[completion_create_params.Function] | NotGiven = NOT_GIVEN,
|
||
|
logit_bias: Optional[Dict[str, int]] | NotGiven = NOT_GIVEN,
|
||
|
logprobs: Optional[bool] | NotGiven = NOT_GIVEN,
|
||
|
max_tokens: Optional[int] | NotGiven = NOT_GIVEN,
|
||
|
n: Optional[int] | NotGiven = NOT_GIVEN,
|
||
|
presence_penalty: Optional[float] | NotGiven = NOT_GIVEN,
|
||
|
response_format: completion_create_params.ResponseFormat | NotGiven = NOT_GIVEN,
|
||
|
seed: Optional[int] | NotGiven = NOT_GIVEN,
|
||
|
stop: Union[Optional[str], List[str]] | NotGiven = NOT_GIVEN,
|
||
|
stream: Optional[Literal[False]] | Literal[True] | NotGiven = NOT_GIVEN,
|
||
|
temperature: Optional[float] | NotGiven = NOT_GIVEN,
|
||
|
tool_choice: ChatCompletionToolChoiceOptionParam | NotGiven = NOT_GIVEN,
|
||
|
tools: Iterable[ChatCompletionToolParam] | NotGiven = NOT_GIVEN,
|
||
|
top_logprobs: Optional[int] | NotGiven = NOT_GIVEN,
|
||
|
top_p: Optional[float] | NotGiven = NOT_GIVEN,
|
||
|
user: str | NotGiven = NOT_GIVEN,
|
||
|
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
||
|
# The extra values given here take precedence over values defined on the client or passed to this method.
|
||
|
extra_headers: Headers | None = None,
|
||
|
extra_query: Query | None = None,
|
||
|
extra_body: Body | None = None,
|
||
|
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
|
||
|
) -> ChatCompletion | AsyncStream[ChatCompletionChunk]:
|
||
|
return await self._post(
|
||
|
"/chat/completions",
|
||
|
body=await async_maybe_transform(
|
||
|
{
|
||
|
"messages": messages,
|
||
|
"model": model,
|
||
|
"frequency_penalty": frequency_penalty,
|
||
|
"function_call": function_call,
|
||
|
"functions": functions,
|
||
|
"logit_bias": logit_bias,
|
||
|
"logprobs": logprobs,
|
||
|
"max_tokens": max_tokens,
|
||
|
"n": n,
|
||
|
"presence_penalty": presence_penalty,
|
||
|
"response_format": response_format,
|
||
|
"seed": seed,
|
||
|
"stop": stop,
|
||
|
"stream": stream,
|
||
|
"temperature": temperature,
|
||
|
"tool_choice": tool_choice,
|
||
|
"tools": tools,
|
||
|
"top_logprobs": top_logprobs,
|
||
|
"top_p": top_p,
|
||
|
"user": user,
|
||
|
},
|
||
|
completion_create_params.CompletionCreateParams,
|
||
|
),
|
||
|
options=make_request_options(
|
||
|
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
|
||
|
),
|
||
|
cast_to=ChatCompletion,
|
||
|
stream=stream or False,
|
||
|
stream_cls=AsyncStream[ChatCompletionChunk],
|
||
|
)
|
||
|
|
||
|
|
||
|
class CompletionsWithRawResponse:
|
||
|
def __init__(self, completions: Completions) -> None:
|
||
|
self._completions = completions
|
||
|
|
||
|
self.create = _legacy_response.to_raw_response_wrapper(
|
||
|
completions.create,
|
||
|
)
|
||
|
|
||
|
|
||
|
class AsyncCompletionsWithRawResponse:
|
||
|
def __init__(self, completions: AsyncCompletions) -> None:
|
||
|
self._completions = completions
|
||
|
|
||
|
self.create = _legacy_response.async_to_raw_response_wrapper(
|
||
|
completions.create,
|
||
|
)
|
||
|
|
||
|
|
||
|
class CompletionsWithStreamingResponse:
|
||
|
def __init__(self, completions: Completions) -> None:
|
||
|
self._completions = completions
|
||
|
|
||
|
self.create = to_streamed_response_wrapper(
|
||
|
completions.create,
|
||
|
)
|
||
|
|
||
|
|
||
|
class AsyncCompletionsWithStreamingResponse:
|
||
|
def __init__(self, completions: AsyncCompletions) -> None:
|
||
|
self._completions = completions
|
||
|
|
||
|
self.create = async_to_streamed_response_wrapper(
|
||
|
completions.create,
|
||
|
)
|