
    h                    p   S SK Jr  S SKJrJrJrJrJr  S SKJ	r	J
r
  S SKrSSKJr  SSKJr  SSKJrJrJrJrJr  SS	KJrJrJr  SS
KJr  SSKJrJr  SSKJ r J!r!  SSK"J#r#J$r$  SSK%J&r&  SSK'J(r(  SSK)J*r*  SS/r+ " S S\5      r, " S S\5      r- " S S5      r. " S S5      r/ " S S5      r0 " S S5      r1g)    )annotations)DictListUnionIterableOptional)LiteraloverloadN   )_legacy_response)completion_create_params)	NOT_GIVENBodyQueryHeadersNotGiven)required_argsmaybe_transformasync_maybe_transform)cached_property)SyncAPIResourceAsyncAPIResource)to_streamed_response_wrapper"async_to_streamed_response_wrapper)StreamAsyncStream)make_request_options)
Completion) ChatCompletionStreamOptionsParamCompletionsAsyncCompletionsc                     \ rS rSr\SS j5       r\SS j5       r\\\\\\\\\\\\\\\\\SSS\S.                                             SS jj5       r	\\\\\\\\\\\\\\\\SSS\S	.                                             SS
 jj5       r	\\\\\\\\\\\\\\\\SSS\S	.                                             SS jj5       r	\
" SS// SQ5      \\\\\\\\\\\\\\\\SSS\S.                                             SS jj5       r	Srg)r       c                    [        U 5      $ z
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.

For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
)CompletionsWithRawResponseselfs    N/var/www/html/env/lib/python3.13/site-packages/openai/resources/completions.pywith_raw_responseCompletions.with_raw_response    s     *$//    c                    [        U 5      $ z
An alternative to `.with_raw_response` that doesn't eagerly read the response body.

For more information, see https://www.github.com/openai/openai-python#with_streaming_response
) CompletionsWithStreamingResponser'   s    r)   with_streaming_response#Completions.with_streaming_response*   s     055r,   Nbest_ofechofrequency_penalty
logit_biaslogprobs
max_tokensnpresence_penaltyseedstopstreamstream_optionssuffixtemperaturetop_puserextra_headersextra_query
extra_bodytimeoutmodelpromptc                   gu  
Creates a completion for the provided prompt and parameters.

Args:
  model: ID of the model to use. You can use the
      [List models](https://platform.openai.com/docs/api-reference/models/list) API to
      see all of your available models, or see our
      [Model overview](https://platform.openai.com/docs/models) for descriptions of
      them.

  prompt: The prompt(s) to generate completions for, encoded as a string, array of
      strings, array of tokens, or array of token arrays.

      Note that <|endoftext|> is the document separator that the model sees during
      training, so if a prompt is not specified the model will generate as if from the
      beginning of a new document.

  best_of: Generates `best_of` completions server-side and returns the "best" (the one with
      the highest log probability per token). Results cannot be streamed.

      When used with `n`, `best_of` controls the number of candidate completions and
      `n` specifies how many to return – `best_of` must be greater than `n`.

      **Note:** Because this parameter generates many completions, it can quickly
      consume your token quota. Use carefully and ensure that you have reasonable
      settings for `max_tokens` and `stop`.

  echo: Echo back the prompt in addition to the completion

  frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their
      existing frequency in the text so far, decreasing the model's likelihood to
      repeat the same line verbatim.

      [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation)

  logit_bias: Modify the likelihood of specified tokens appearing in the completion.

      Accepts a JSON object that maps tokens (specified by their token ID in the GPT
      tokenizer) to an associated bias value from -100 to 100. You can use this
      [tokenizer tool](/tokenizer?view=bpe) to convert text to token IDs.
      Mathematically, the bias is added to the logits generated by the model prior to
      sampling. The exact effect will vary per model, but values between -1 and 1
      should decrease or increase likelihood of selection; values like -100 or 100
      should result in a ban or exclusive selection of the relevant token.

      As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token
      from being generated.

  logprobs: Include the log probabilities on the `logprobs` most likely output tokens, as
      well the chosen tokens. For example, if `logprobs` is 5, the API will return a
      list of the 5 most likely tokens. The API will always return the `logprob` of
      the sampled token, so there may be up to `logprobs+1` elements in the response.

      The maximum value for `logprobs` is 5.

  max_tokens: The maximum number of [tokens](/tokenizer) that can be generated in the
      completion.

      The token count of your prompt plus `max_tokens` cannot exceed the model's
      context length.
      [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken)
      for counting tokens.

  n: How many completions to generate for each prompt.

      **Note:** Because this parameter generates many completions, it can quickly
      consume your token quota. Use carefully and ensure that you have reasonable
      settings for `max_tokens` and `stop`.

  presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on
      whether they appear in the text so far, increasing the model's likelihood to
      talk about new topics.

      [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation)

  seed: If specified, our system will make a best effort to sample deterministically,
      such that repeated requests with the same `seed` and parameters should return
      the same result.

      Determinism is not guaranteed, and you should refer to the `system_fingerprint`
      response parameter to monitor changes in the backend.

  stop: Not supported with latest reasoning models `o3` and `o4-mini`.

      Up to 4 sequences where the API will stop generating further tokens. The
      returned text will not contain the stop sequence.

  stream: Whether to stream back partial progress. If set, tokens will be sent as
      data-only
      [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format)
      as they become available, with the stream terminated by a `data: [DONE]`
      message.
      [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions).

  stream_options: Options for streaming response. Only set this when you set `stream: true`.

  suffix: The suffix that comes after a completion of inserted text.

      This parameter is only supported for `gpt-3.5-turbo-instruct`.

  temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
      make the output more random, while lower values like 0.2 will make it more
      focused and deterministic.

      We generally recommend altering this or `top_p` but not both.

  top_p: An alternative to sampling with temperature, called nucleus sampling, where the
      model considers the results of the tokens with top_p probability mass. So 0.1
      means only the tokens comprising the top 10% probability mass are considered.

      We generally recommend altering this or `temperature` but not both.

  user: A unique identifier representing your end-user, which can help OpenAI to monitor
      and detect abuse.
      [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).

  extra_headers: Send extra headers

  extra_query: Add additional query parameters to the request

  extra_body: Add additional JSON properties to the request

  timeout: Override the client-level default timeout for this request, in seconds
N r(   rG   rH   r3   r4   r5   r6   r7   r8   r9   r:   r;   r<   r=   r>   r?   r@   rA   rB   rC   rD   rE   rF   s                          r)   createCompletions.create3       r 	r,   r3   r4   r5   r6   r7   r8   r9   r:   r;   r<   r>   r?   r@   rA   rB   rC   rD   rE   rF   c                   gu  
Creates a completion for the provided prompt and parameters.

Args:
  model: ID of the model to use. You can use the
      [List models](https://platform.openai.com/docs/api-reference/models/list) API to
      see all of your available models, or see our
      [Model overview](https://platform.openai.com/docs/models) for descriptions of
      them.

  prompt: The prompt(s) to generate completions for, encoded as a string, array of
      strings, array of tokens, or array of token arrays.

      Note that <|endoftext|> is the document separator that the model sees during
      training, so if a prompt is not specified the model will generate as if from the
      beginning of a new document.

  stream: Whether to stream back partial progress. If set, tokens will be sent as
      data-only
      [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format)
      as they become available, with the stream terminated by a `data: [DONE]`
      message.
      [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions).

  best_of: Generates `best_of` completions server-side and returns the "best" (the one with
      the highest log probability per token). Results cannot be streamed.

      When used with `n`, `best_of` controls the number of candidate completions and
      `n` specifies how many to return – `best_of` must be greater than `n`.

      **Note:** Because this parameter generates many completions, it can quickly
      consume your token quota. Use carefully and ensure that you have reasonable
      settings for `max_tokens` and `stop`.

  echo: Echo back the prompt in addition to the completion

  frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their
      existing frequency in the text so far, decreasing the model's likelihood to
      repeat the same line verbatim.

      [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation)

  logit_bias: Modify the likelihood of specified tokens appearing in the completion.

      Accepts a JSON object that maps tokens (specified by their token ID in the GPT
      tokenizer) to an associated bias value from -100 to 100. You can use this
      [tokenizer tool](/tokenizer?view=bpe) to convert text to token IDs.
      Mathematically, the bias is added to the logits generated by the model prior to
      sampling. The exact effect will vary per model, but values between -1 and 1
      should decrease or increase likelihood of selection; values like -100 or 100
      should result in a ban or exclusive selection of the relevant token.

      As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token
      from being generated.

  logprobs: Include the log probabilities on the `logprobs` most likely output tokens, as
      well the chosen tokens. For example, if `logprobs` is 5, the API will return a
      list of the 5 most likely tokens. The API will always return the `logprob` of
      the sampled token, so there may be up to `logprobs+1` elements in the response.

      The maximum value for `logprobs` is 5.

  max_tokens: The maximum number of [tokens](/tokenizer) that can be generated in the
      completion.

      The token count of your prompt plus `max_tokens` cannot exceed the model's
      context length.
      [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken)
      for counting tokens.

  n: How many completions to generate for each prompt.

      **Note:** Because this parameter generates many completions, it can quickly
      consume your token quota. Use carefully and ensure that you have reasonable
      settings for `max_tokens` and `stop`.

  presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on
      whether they appear in the text so far, increasing the model's likelihood to
      talk about new topics.

      [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation)

  seed: If specified, our system will make a best effort to sample deterministically,
      such that repeated requests with the same `seed` and parameters should return
      the same result.

      Determinism is not guaranteed, and you should refer to the `system_fingerprint`
      response parameter to monitor changes in the backend.

  stop: Not supported with latest reasoning models `o3` and `o4-mini`.

      Up to 4 sequences where the API will stop generating further tokens. The
      returned text will not contain the stop sequence.

  stream_options: Options for streaming response. Only set this when you set `stream: true`.

  suffix: The suffix that comes after a completion of inserted text.

      This parameter is only supported for `gpt-3.5-turbo-instruct`.

  temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
      make the output more random, while lower values like 0.2 will make it more
      focused and deterministic.

      We generally recommend altering this or `top_p` but not both.

  top_p: An alternative to sampling with temperature, called nucleus sampling, where the
      model considers the results of the tokens with top_p probability mass. So 0.1
      means only the tokens comprising the top 10% probability mass are considered.

      We generally recommend altering this or `temperature` but not both.

  user: A unique identifier representing your end-user, which can help OpenAI to monitor
      and detect abuse.
      [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).

  extra_headers: Send extra headers

  extra_query: Add additional query parameters to the request

  extra_body: Add additional JSON properties to the request

  timeout: Override the client-level default timeout for this request, in seconds
NrK   r(   rG   rH   r=   r3   r4   r5   r6   r7   r8   r9   r:   r;   r<   r>   r?   r@   rA   rB   rC   rD   rE   rF   s                          r)   rM   rN      rO   r,   c                   grR   rK   rS   s                          r)   rM   rN   i  rO   r,   rG   rH   r=   c               :   U R                  S[        0 SU_SU_SU_SU_SU_SU_SU_S	U_S
U	_SU
_SU_SU_SU_SU_SU_SU_SU_SU0EU(       a  [        R                  O[        R                  5      [        UUUUS9[        U=(       d    S[        [           S9$ Nz/completionsrG   rH   r3   r4   r5   r6   r7   r8   r9   r:   r;   r<   r=   r>   r?   r@   rA   rB   )rC   rD   rE   rF   F)bodyoptionscast_tor=   
stream_cls)_postr   r   CompletionCreateParamsStreaming"CompletionCreateParamsNonStreamingr   r   r   rL   s                          r)   rM   rN     sC   : zz Uf w D	
 (): !*  !*  '(8 D D f %n f  ";!" U#$ D%*  )HH-PP/2 )+Q[el ?Uj)A  !
 !	
r,   rK   )returnr&   )r_   r/   .rG   KUnion[str, Literal['gpt-3.5-turbo-instruct', 'davinci-002', 'babbage-002']]rH   CUnion[str, List[str], Iterable[int], Iterable[Iterable[int]], None]r3   Optional[int] | NotGivenr4   Optional[bool] | NotGivenr5   Optional[float] | NotGivenr6   #Optional[Dict[str, int]] | NotGivenr7   rc   r8   rc   r9   rc   r:   re   r;   rc   r<   0Union[Optional[str], List[str], None] | NotGivenr=   z#Optional[Literal[False]] | NotGivenr>   5Optional[ChatCompletionStreamOptionsParam] | NotGivenr?   Optional[str] | NotGivenr@   re   rA   re   rB   str | NotGivenrC   Headers | NonerD   Query | NonerE   Body | NonerF   'float | httpx.Timeout | None | NotGivenr_   r   ).rG   ra   rH   rb   r=   Literal[True]r3   rc   r4   rd   r5   re   r6   rf   r7   rc   r8   rc   r9   rc   r:   re   r;   rc   r<   rg   r>   rh   r?   ri   r@   re   rA   re   rB   rj   rC   rk   rD   rl   rE   rm   rF   rn   r_   zStream[Completion]).rG   ra   rH   rb   r=   boolr3   rc   r4   rd   r5   re   r6   rf   r7   rc   r8   rc   r9   rc   r:   re   r;   rc   r<   rg   r>   rh   r?   ri   r@   re   rA   re   rB   rj   rC   rk   rD   rl   rE   rm   rF   rn   r_   Completion | Stream[Completion]).rG   ra   rH   rb   r3   rc   r4   rd   r5   re   r6   rf   r7   rc   r8   rc   r9   rc   r:   re   r;   rc   r<   rg   r=   3Optional[Literal[False]] | Literal[True] | NotGivenr>   rh   r?   ri   r@   re   rA   re   rB   rj   rC   rk   rD   rl   rE   rm   rF   rn   r_   rq   __name__
__module____qualname____firstlineno__r   r*   r0   r
   r   rM   r   __static_attributes__rK   r,   r)   r    r       ss   0 0 6 6  -6*38A:C-6/8&/7@)2AJ6?PY+42;,5( )-$("&;D5X [X T	X
 *X (X 6X 8X +X -X $X 5X 'X ?X 4X  N!X" )#X$ 0%X& *'X( )X. &/X0 "1X2  3X4 95X6 
7X Xt  -6*38A:C-6/8&/7@)2AJPY+42;,5( )-$("&;D5X [X T	X
 X *X (X 6X 8X +X -X $X 5X 'X ?X  N!X" )#X$ 0%X& *'X( )X. &/X0 "1X2  3X4 95X6 
7X Xt  -6*38A:C-6/8&/7@)2AJPY+42;,5( )-$("&;D5X [X T	X
 X *X (X 6X 8X +X -X $X 5X 'X ?X  N!X" )#X$ 0%X& *'X( )X. &/X0 "1X2  3X4 95X6 
)7X Xt GX&(EF -6*38A:C-6/8&/7@)2AJFOPY+42;,5( )-$("&;D5=
 [=
 T	=

 *=
 (=
 6=
 8=
 +=
 -=
 $=
 5=
 '=
 ?=
 D=
  N!=
" )#=
$ 0%=
& *'=
( )=
. &/=
0 "1=
2  3=
4 95=
6 
)7=
 G=
r,   c                     \ rS rSr\SS j5       r\SS j5       r\\\\\\\\\\\\\\\\\SSS\S.                                             SS jj5       r	\\\\\\\\\\\\\\\\SSS\S	.                                             SS
 jj5       r	\\\\\\\\\\\\\\\\SSS\S	.                                             SS jj5       r	\
" SS// SQ5      \\\\\\\\\\\\\\\\SSS\S.                                             SS jj5       r	Srg)r!   iE  c                    [        U 5      $ r%   )AsyncCompletionsWithRawResponser'   s    r)   r*   "AsyncCompletions.with_raw_responseF  s     /t44r,   c                    [        U 5      $ r.   )%AsyncCompletionsWithStreamingResponser'   s    r)   r0   (AsyncCompletions.with_streaming_responseP  s     5T::r,   Nr2   rG   rH   c                  #    g7frJ   rK   rL   s                          r)   rM   AsyncCompletions.createY       r 	   rP   c                  #    g7frR   rK   rS   s                          r)   rM   r     r   r   c                  #    g7frR   rK   rS   s                          r)   rM   r     r   r   rU   c               j  #    U R                  S[        0 SU_SU_SU_SU_SU_SU_SU_S	U_S
U	_SU
_SU_SU_SU_SU_SU_SU_SU_SU0EU(       a  [        R                  O[        R                  5      I S h  vN [        UUUUS9[        U=(       d    S[        [           S9I S h  vN $  N4 N7frW   )r\   r   r   r]   r^   r   r   r   rL   s                          r)   rM   r   *  sZ    : ZZ,Uf w D	
 (): !*  !*  '(8 D D f %n f  ";!" U#$ D%*  )HH-PP/ 2 )+Q[el ?U":.A   !
 !
 !	
!
s$   A8B3:B/
;/B3*B1+B31B3rK   )r_   r{   )r_   r~   r`   ).rG   ra   rH   rb   r=   ro   r3   rc   r4   rd   r5   re   r6   rf   r7   rc   r8   rc   r9   rc   r:   re   r;   rc   r<   rg   r>   rh   r?   ri   r@   re   rA   re   rB   rj   rC   rk   rD   rl   rE   rm   rF   rn   r_   zAsyncStream[Completion]).rG   ra   rH   rb   r=   rp   r3   rc   r4   rd   r5   re   r6   rf   r7   rc   r8   rc   r9   rc   r:   re   r;   rc   r<   rg   r>   rh   r?   ri   r@   re   rA   re   rB   rj   rC   rk   rD   rl   rE   rm   rF   rn   r_   $Completion | AsyncStream[Completion]).rG   ra   rH   rb   r3   rc   r4   rd   r5   re   r6   rf   r7   rc   r8   rc   r9   rc   r:   re   r;   rc   r<   rg   r=   rr   r>   rh   r?   ri   r@   re   rA   re   rB   rj   rC   rk   rD   rl   rE   rm   rF   rn   r_   r   rs   rK   r,   r)   r!   r!   E  ss   5 5 ; ;  -6*38A:C-6/8&/7@)2AJ6?PY+42;,5( )-$("&;D5X [X T	X
 *X (X 6X 8X +X -X $X 5X 'X ?X 4X  N!X" )#X$ 0%X& *'X( )X. &/X0 "1X2  3X4 95X6 
7X Xt  -6*38A:C-6/8&/7@)2AJPY+42;,5( )-$("&;D5X [X T	X
 X *X (X 6X 8X +X -X $X 5X 'X ?X  N!X" )#X$ 0%X& *'X( )X. &/X0 "1X2  3X4 95X6 
!7X Xt  -6*38A:C-6/8&/7@)2AJPY+42;,5( )-$("&;D5X [X T	X
 X *X (X 6X 8X +X -X $X 5X 'X ?X  N!X" )#X$ 0%X& *'X( )X. &/X0 "1X2  3X4 95X6 
.7X Xt GX&(EF -6*38A:C-6/8&/7@)2AJFOPY+42;,5( )-$("&;D5=
 [=
 T	=

 *=
 (=
 6=
 8=
 +=
 -=
 $=
 5=
 '=
 ?=
 D=
  N!=
" )#=
$ 0%=
& *'=
( )=
. &/=
0 "1=
2  3=
4 95=
6 
.7=
 G=
r,   c                      \ rS rSrSS jrSrg)r&   ik  c                Z    Xl         [        R                  " UR                  5      U l        g N)_completionsr   to_raw_response_wrapperrM   r(   completionss     r)   __init__#CompletionsWithRawResponse.__init__l  s#    '&>>
r,   r   rM   Nr   r    r_   Nonert   ru   rv   rw   r   rx   rK   r,   r)   r&   r&   k      
r,   r&   c                      \ rS rSrSS jrSrg)r{   it  c                Z    Xl         [        R                  " UR                  5      U l        g r   )r   r   async_to_raw_response_wrapperrM   r   s     r)   r   (AsyncCompletionsWithRawResponse.__init__u  s#    '&DD
r,   r   Nr   r!   r_   r   r   rK   r,   r)   r{   r{   t  r   r,   r{   c                      \ rS rSrSS jrSrg)r/   i}  c                D    Xl         [        UR                  5      U l        g r   )r   r   rM   r   s     r)   r   )CompletionsWithStreamingResponse.__init__~  s    '2
r,   r   Nr   r   rK   r,   r)   r/   r/   }  r   r,   r/   c                      \ rS rSrSS jrSrg)r~   i  c                D    Xl         [        UR                  5      U l        g r   )r   r   rM   r   s     r)   r   .AsyncCompletionsWithStreamingResponse.__init__  s    '8
r,   r   Nr   r   rK   r,   r)   r~   r~     r   r,   r~   )2
__future__r   typingr   r   r   r   r   typing_extensionsr	   r
   httpx r   typesr   _typesr   r   r   r   r   _utilsr   r   r   _compatr   	_resourcer   r   	_responser   r   
_streamingr   r   _base_clientr   types.completionr   /types.chat.chat_completion_stream_options_paramr   __all__r    r!   r&   r{   r/   r~   rK   r,   r)   <module>r      s    # 8 8 /   , > > 
 & 9 X , * ^,
-c
/ c
Lc
' c
L
 

 

 

 
r,   