diff --git a/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/ChatWithFunctions.kt b/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/ChatWithFunctions.kt index 233a24d57..9a6391a1c 100644 --- a/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/ChatWithFunctions.kt +++ b/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/ChatWithFunctions.kt @@ -29,7 +29,7 @@ fun chatFunctions(descriptors: List): List = descriptors.map(::chatFunction) fun chatFunction(fnName: String, schema: JsonObject): FunctionObject = - FunctionObject(fnName, schema, "Generated function for $fnName") + FunctionObject(fnName, "Generated function for $fnName", schema) @AiDsl suspend fun ChatApi.prompt( diff --git a/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/StreamedFunction.kt b/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/StreamedFunction.kt index 47d62c0d0..89c78bfec 100644 --- a/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/StreamedFunction.kt +++ b/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/StreamedFunction.kt @@ -61,65 +61,72 @@ sealed class StreamedFunction { val schema = function.parameters // we create an example from the schema from which we can expect and infer the paths // as the LLM is sending us chunks with malformed JSON - val example = createExampleFromSchema(schema) - chat - .createChatCompletionStream(request) - .onCompletion { - val newMessages = prompt.messages + messages - newMessages.addToMemory( - scope, - prompt.configuration.messagePolicy.addMessagesToConversation - ) - } - .collect { responseChunk -> - // Each chunk is emitted from the LLM and it will include a delta.parameters with - // the function is streaming, the JSON received will be partial and usually malformed - // and needs to be inspected and clean up to stream properties before - // the final result is ready + if (schema != null) { + val example = createExampleFromSchema(schema) + chat + .createChatCompletionStream(request) + .onCompletion { + val newMessages = prompt.messages + messages + newMessages.addToMemory( + scope, + prompt.configuration.messagePolicy.addMessagesToConversation + ) + } + .collect { responseChunk -> + // Each chunk is emitted from the LLM and it will include a delta.parameters with + // the function is streaming, the JSON received will be partial and usually malformed + // and needs to be inspected and clean up to stream properties before + // the final result is ready - // every response chunk contains a list of choices - if (responseChunk.choices.isNotEmpty()) { - // the delta contains the last emission while emitting the json character by character - val delta = responseChunk.choices.first().delta - // at any point the delta may be the last one - val finishReason = responseChunk.choices.first().finishReason - val toolCalls = delta.toolCalls.orEmpty() - toolCalls.forEach { toolCall -> - val fn = toolCall.function - val functionName = fn?.name - val arguments = fn?.arguments.orEmpty() - if (functionName != null) - // update the function name with the latest one - functionCall = functionCall.copy(name = functionName) - if (arguments.isNotEmpty()) { - // update the function arguments with the latest ones - functionCall = mergeArgumentsWithDelta(functionCall, toolCall) - // once we have info about the args we detect the last property referenced - // while streaming the arguments for the function call - val currentArg = getLastReferencedPropertyInArguments(functionCall) - if (currentProperty != currentArg && currentArg != null) { - // if the current property is different than the last one - // we update the path - // a change of property happens and we try to stream it + // every response chunk contains a list of choices + if (responseChunk.choices.isNotEmpty()) { + // the delta contains the last emission while emitting the json character by character + val delta = responseChunk.choices.first().delta + // at any point the delta may be the last one + val finishReason = responseChunk.choices.first().finishReason + val toolCalls = delta.toolCalls.orEmpty() + toolCalls.forEach { toolCall -> + val fn = toolCall.function + val functionName = fn?.name + val arguments = fn?.arguments.orEmpty() + if (functionName != null) + // update the function name with the latest one + functionCall = functionCall.copy(name = functionName) + if (arguments.isNotEmpty()) { + // update the function arguments with the latest ones + functionCall = mergeArgumentsWithDelta(functionCall, toolCall) + // once we have info about the args we detect the last property referenced + // while streaming the arguments for the function call + val currentArg = getLastReferencedPropertyInArguments(functionCall) + if (currentProperty != currentArg && currentArg != null) { + // if the current property is different than the last one + // we update the path + // a change of property happens and we try to stream it + streamProperty( + path, + currentProperty, + functionCall.arguments, + streamedProperties + ) + path = findPropertyPath(example, currentArg) ?: listOf(currentArg) + } + // update the current property being evaluated + currentProperty = currentArg + } + if (finishReason != null) { + // the stream is finished and we try to stream the last property + // because the previous chunk may had a partial property whose body + // may had not been fully streamed streamProperty(path, currentProperty, functionCall.arguments, streamedProperties) - path = findPropertyPath(example, currentArg) ?: listOf(currentArg) } - // update the current property being evaluated - currentProperty = currentArg } if (finishReason != null) { - // the stream is finished and we try to stream the last property - // because the previous chunk may had a partial property whose body - // may had not been fully streamed - streamProperty(path, currentProperty, functionCall.arguments, streamedProperties) + // we stream the result + streamResult(functionCall, messages, serializer) } } - if (finishReason != null) { - // we stream the result - streamResult(functionCall, messages, serializer) - } } - } + } } private suspend fun FlowCollector>.streamResult( diff --git a/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/assistants/AssistantThread.kt b/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/assistants/AssistantThread.kt index bed765abc..1d690c7f8 100644 --- a/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/assistants/AssistantThread.kt +++ b/core/src/commonMain/kotlin/com/xebia/functional/xef/llm/assistants/AssistantThread.kt @@ -117,7 +117,8 @@ class AssistantThread( instructions = "", tools = emptyList(), fileIds = emptyList(), - metadata = null + metadata = null, + usage = null ) ) ) diff --git a/core/src/commonTest/kotlin/com/xebia/functional/xef/data/TestChatApi.kt b/core/src/commonTest/kotlin/com/xebia/functional/xef/data/TestChatApi.kt index 85716e860..08d7836be 100644 --- a/core/src/commonTest/kotlin/com/xebia/functional/xef/data/TestChatApi.kt +++ b/core/src/commonTest/kotlin/com/xebia/functional/xef/data/TestChatApi.kt @@ -47,7 +47,8 @@ class TestChatApi( ) ), finishReason = CreateChatCompletionResponseChoicesInner.FinishReason.stop, - index = 0 + index = 0, + logprobs = null ) ), usage = CompletionUsage(0, 0, 0) diff --git a/openai-client/client/.openapi-generator/FILES b/openai-client/client/.openapi-generator/FILES index 5f7278a46..e28c5e5d0 100644 --- a/openai-client/client/.openapi-generator/FILES +++ b/openai-client/client/.openapi-generator/FILES @@ -1,12 +1,9 @@ -src/commonMain/kotlin/com/xebia/functional/openai/apis/AssistantApi.kt src/commonMain/kotlin/com/xebia/functional/openai/apis/AssistantsApi.kt src/commonMain/kotlin/com/xebia/functional/openai/apis/AudioApi.kt src/commonMain/kotlin/com/xebia/functional/openai/apis/ChatApi.kt src/commonMain/kotlin/com/xebia/functional/openai/apis/CompletionsApi.kt -src/commonMain/kotlin/com/xebia/functional/openai/apis/EditsApi.kt src/commonMain/kotlin/com/xebia/functional/openai/apis/EmbeddingsApi.kt src/commonMain/kotlin/com/xebia/functional/openai/apis/FilesApi.kt -src/commonMain/kotlin/com/xebia/functional/openai/apis/FineTunesApi.kt src/commonMain/kotlin/com/xebia/functional/openai/apis/FineTuningApi.kt src/commonMain/kotlin/com/xebia/functional/openai/apis/ImagesApi.kt src/commonMain/kotlin/com/xebia/functional/openai/apis/ModelsApi.kt @@ -40,6 +37,8 @@ src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionResponseM src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionRole.kt src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionStreamResponseDelta.kt src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionStreamResponseDeltaFunctionCall.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionTokenLogprob.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionTokenLogprobTopLogprobsInner.kt src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionTool.kt src/commonMain/kotlin/com/xebia/functional/openai/models/CompletionUsage.kt src/commonMain/kotlin/com/xebia/functional/openai/models/CreateAssistantFileRequest.kt @@ -50,6 +49,7 @@ src/commonMain/kotlin/com/xebia/functional/openai/models/CreateChatCompletionReq src/commonMain/kotlin/com/xebia/functional/openai/models/CreateChatCompletionRequestResponseFormat.kt src/commonMain/kotlin/com/xebia/functional/openai/models/CreateChatCompletionResponse.kt src/commonMain/kotlin/com/xebia/functional/openai/models/CreateChatCompletionResponseChoicesInner.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/CreateChatCompletionResponseChoicesInnerLogprobs.kt src/commonMain/kotlin/com/xebia/functional/openai/models/CreateChatCompletionStreamResponse.kt src/commonMain/kotlin/com/xebia/functional/openai/models/CreateChatCompletionStreamResponseChoicesInner.kt src/commonMain/kotlin/com/xebia/functional/openai/models/CreateCompletionRequest.kt @@ -57,17 +57,10 @@ src/commonMain/kotlin/com/xebia/functional/openai/models/CreateCompletionRequest src/commonMain/kotlin/com/xebia/functional/openai/models/CreateCompletionResponse.kt src/commonMain/kotlin/com/xebia/functional/openai/models/CreateCompletionResponseChoicesInner.kt src/commonMain/kotlin/com/xebia/functional/openai/models/CreateCompletionResponseChoicesInnerLogprobs.kt -src/commonMain/kotlin/com/xebia/functional/openai/models/CreateEditRequest.kt -src/commonMain/kotlin/com/xebia/functional/openai/models/CreateEditRequestModel.kt -src/commonMain/kotlin/com/xebia/functional/openai/models/CreateEditResponse.kt -src/commonMain/kotlin/com/xebia/functional/openai/models/CreateEditResponseChoicesInner.kt src/commonMain/kotlin/com/xebia/functional/openai/models/CreateEmbeddingRequest.kt src/commonMain/kotlin/com/xebia/functional/openai/models/CreateEmbeddingRequestModel.kt src/commonMain/kotlin/com/xebia/functional/openai/models/CreateEmbeddingResponse.kt src/commonMain/kotlin/com/xebia/functional/openai/models/CreateEmbeddingResponseUsage.kt -src/commonMain/kotlin/com/xebia/functional/openai/models/CreateFineTuneRequest.kt -src/commonMain/kotlin/com/xebia/functional/openai/models/CreateFineTuneRequestHyperparameters.kt -src/commonMain/kotlin/com/xebia/functional/openai/models/CreateFineTuneRequestModel.kt src/commonMain/kotlin/com/xebia/functional/openai/models/CreateFineTuningJobRequest.kt src/commonMain/kotlin/com/xebia/functional/openai/models/CreateFineTuningJobRequestHyperparameters.kt src/commonMain/kotlin/com/xebia/functional/openai/models/CreateFineTuningJobRequestModel.kt @@ -99,9 +92,6 @@ src/commonMain/kotlin/com/xebia/functional/openai/models/DeleteThreadResponse.kt src/commonMain/kotlin/com/xebia/functional/openai/models/Embedding.kt src/commonMain/kotlin/com/xebia/functional/openai/models/Error.kt src/commonMain/kotlin/com/xebia/functional/openai/models/ErrorResponse.kt -src/commonMain/kotlin/com/xebia/functional/openai/models/FineTune.kt -src/commonMain/kotlin/com/xebia/functional/openai/models/FineTuneEvent.kt -src/commonMain/kotlin/com/xebia/functional/openai/models/FineTuneHyperparams.kt src/commonMain/kotlin/com/xebia/functional/openai/models/FineTuningJob.kt src/commonMain/kotlin/com/xebia/functional/openai/models/FineTuningJobError.kt src/commonMain/kotlin/com/xebia/functional/openai/models/FineTuningJobEvent.kt @@ -112,8 +102,6 @@ src/commonMain/kotlin/com/xebia/functional/openai/models/ImagesResponse.kt src/commonMain/kotlin/com/xebia/functional/openai/models/ListAssistantFilesResponse.kt src/commonMain/kotlin/com/xebia/functional/openai/models/ListAssistantsResponse.kt src/commonMain/kotlin/com/xebia/functional/openai/models/ListFilesResponse.kt -src/commonMain/kotlin/com/xebia/functional/openai/models/ListFineTuneEventsResponse.kt -src/commonMain/kotlin/com/xebia/functional/openai/models/ListFineTunesResponse.kt src/commonMain/kotlin/com/xebia/functional/openai/models/ListFineTuningJobEventsResponse.kt src/commonMain/kotlin/com/xebia/functional/openai/models/ListMessageFilesResponse.kt src/commonMain/kotlin/com/xebia/functional/openai/models/ListMessagesResponse.kt @@ -140,10 +128,12 @@ src/commonMain/kotlin/com/xebia/functional/openai/models/ModifyMessageRequest.kt src/commonMain/kotlin/com/xebia/functional/openai/models/ModifyRunRequest.kt src/commonMain/kotlin/com/xebia/functional/openai/models/ModifyThreadRequest.kt src/commonMain/kotlin/com/xebia/functional/openai/models/OpenAIFile.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/RunCompletionUsage.kt src/commonMain/kotlin/com/xebia/functional/openai/models/RunObject.kt src/commonMain/kotlin/com/xebia/functional/openai/models/RunObjectLastError.kt src/commonMain/kotlin/com/xebia/functional/openai/models/RunObjectRequiredAction.kt src/commonMain/kotlin/com/xebia/functional/openai/models/RunObjectRequiredActionSubmitToolOutputs.kt +src/commonMain/kotlin/com/xebia/functional/openai/models/RunStepCompletionUsage.kt src/commonMain/kotlin/com/xebia/functional/openai/models/RunStepDetailsMessageCreationObjectMessageCreation.kt src/commonMain/kotlin/com/xebia/functional/openai/models/RunStepDetailsToolCallsCodeObject.kt src/commonMain/kotlin/com/xebia/functional/openai/models/RunStepDetailsToolCallsCodeObjectCodeInterpreter.kt diff --git a/openai-client/client/src/commonMain/kotlin/com/xebia/functional/openai/apis/AssistantsApi.kt b/openai-client/client/src/commonMain/kotlin/com/xebia/functional/openai/apis/AssistantsApi.kt index 2026e272a..e89bd2d66 100644 --- a/openai-client/client/src/commonMain/kotlin/com/xebia/functional/openai/apis/AssistantsApi.kt +++ b/openai-client/client/src/commonMain/kotlin/com/xebia/functional/openai/apis/AssistantsApi.kt @@ -26,6 +26,7 @@ import com.xebia.functional.openai.models.ListRunStepsResponse import com.xebia.functional.openai.models.ListRunsResponse import com.xebia.functional.openai.models.MessageFileObject import com.xebia.functional.openai.models.MessageObject +import com.xebia.functional.openai.models.ModifyAssistantRequest import com.xebia.functional.openai.models.ModifyMessageRequest import com.xebia.functional.openai.models.ModifyRunRequest import com.xebia.functional.openai.models.ModifyThreadRequest @@ -957,6 +958,38 @@ open class AssistantsApi : ApiClient { return request(localVariableConfig, localVariableBody, localVariableAuthNames).wrap() } + /** + * Modifies an assistant. + * + * @param assistantId The ID of the assistant to modify. + * @param modifyAssistantRequest + * @return AssistantObject + */ + @Suppress("UNCHECKED_CAST") + open suspend fun modifyAssistant( + assistantId: kotlin.String, + modifyAssistantRequest: ModifyAssistantRequest + ): HttpResponse { + + val localVariableAuthNames = listOf("ApiKeyAuth") + + val localVariableBody = modifyAssistantRequest + + val localVariableQuery = mutableMapOf>() + val localVariableHeaders = mutableMapOf() + + val localVariableConfig = + RequestConfig( + RequestMethod.POST, + "/assistants/{assistant_id}".replace("{" + "assistant_id" + "}", "$assistantId"), + query = localVariableQuery, + headers = localVariableHeaders, + requiresAuthentication = true, + ) + + return jsonRequest(localVariableConfig, localVariableBody, localVariableAuthNames).wrap() + } + /** * Modifies a message. * diff --git a/openai-client/client/src/commonMain/kotlin/com/xebia/functional/openai/apis/AudioApi.kt b/openai-client/client/src/commonMain/kotlin/com/xebia/functional/openai/apis/AudioApi.kt index 5016df4f4..76d3961ea 100644 --- a/openai-client/client/src/commonMain/kotlin/com/xebia/functional/openai/apis/AudioApi.kt +++ b/openai-client/client/src/commonMain/kotlin/com/xebia/functional/openai/apis/AudioApi.kt @@ -80,6 +80,14 @@ open class AudioApi : ApiClient { @SerialName(value = "vtt") vtt("vtt") } + /** enum for parameter timestampGranularities */ + @Serializable + enum class TimestampGranularitiesCreateTranscription(val value: kotlin.String) { + + @SerialName(value = "word") word("word"), + @SerialName(value = "segment") segment("segment") + } + /** * Transcribes audio into the input language. * @@ -100,6 +108,10 @@ open class AudioApi : ApiClient { * deterministic. If set to 0, the model will use * [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase * the temperature until certain thresholds are hit. (optional, default to 0) + * @param timestampGranularities The timestamp granularities to populate for this transcription. + * Any of these options: `word`, or `segment`. Note: There is no additional + * latency for segment timestamps, but generating word timestamps incurs additional latency. + * (optional, default to segment) * @return CreateTranscriptionResponse */ @Suppress("UNCHECKED_CAST") @@ -109,7 +121,9 @@ open class AudioApi : ApiClient { language: kotlin.String? = null, prompt: kotlin.String? = null, responseFormat: ResponseFormatCreateTranscription? = ResponseFormatCreateTranscription.json, - temperature: kotlin.Double? = 0.toDouble() + temperature: kotlin.Double? = 0.toDouble(), + timestampGranularities: kotlin.collections.List? = + TimestampGranularitiesCreateTranscription.segment.asListOfOne() ): HttpResponse { val localVariableAuthNames = listOf("ApiKeyAuth") @@ -121,6 +135,7 @@ open class AudioApi : ApiClient { prompt?.apply { appendGen("prompt", prompt) } responseFormat?.apply { appendGen("response_format", responseFormat) } temperature?.apply { appendGen("temperature", temperature) } + timestampGranularities?.onEach { appendGen("timestamp_granularities[][]", it) } } val localVariableQuery = mutableMapOf>() diff --git a/openai-client/client/src/commonMain/kotlin/com/xebia/functional/openai/apis/FilesApi.kt b/openai-client/client/src/commonMain/kotlin/com/xebia/functional/openai/apis/FilesApi.kt index c575e1714..cd57560f9 100644 --- a/openai-client/client/src/commonMain/kotlin/com/xebia/functional/openai/apis/FilesApi.kt +++ b/openai-client/client/src/commonMain/kotlin/com/xebia/functional/openai/apis/FilesApi.kt @@ -47,11 +47,12 @@ open class FilesApi : ApiClient { } /** - * Upload a file that can be used across various endpoints/features. The size of all the files - * uploaded by one organization can be up to 100 GB. The size of individual files for can be a - * maximum of 512MB. See the [Assistants Tools guide](/docs/assistants/tools) to learn more about - * the types of files supported. The Fine-tuning API only supports `.jsonl` files. - * Please [contact us](https://help.openai.com/) if you need to increase these storage limits. + * Upload a file that can be used across various endpoints. The size of all the files uploaded by + * one organization can be up to 100 GB. The size of individual files can be a maximum of 512 MB + * or 2 million tokens for Assistants. See the [Assistants Tools guide](/docs/assistants/tools) to + * learn more about the types of files supported. The Fine-tuning API only supports + * `.jsonl` files. Please [contact us](https://help.openai.com/) if you need to increase + * these storage limits. * * @param file The File object (not file name) to be uploaded. * @param purpose The intended purpose of the uploaded file. Use \\\"fine-tune\\\" for diff --git a/openai-client/client/src/commonMain/kotlin/com/xebia/functional/openai/apis/FineTuningApi.kt b/openai-client/client/src/commonMain/kotlin/com/xebia/functional/openai/apis/FineTuningApi.kt index 65fe14a1b..c2ad103f7 100644 --- a/openai-client/client/src/commonMain/kotlin/com/xebia/functional/openai/apis/FineTuningApi.kt +++ b/openai-client/client/src/commonMain/kotlin/com/xebia/functional/openai/apis/FineTuningApi.kt @@ -70,9 +70,9 @@ open class FineTuningApi : ApiClient { } /** - * Creates a job that fine-tunes a specified model from a given dataset. Response includes details - * of the enqueued job including job status and the name of the fine-tuned models once complete. - * [Learn more about fine-tuning](/docs/guides/fine-tuning) + * Creates a fine-tuning job which begins the process of creating a new model from a given + * dataset. Response includes details of the enqueued job including job status and the name of the + * fine-tuned models once complete. [Learn more about fine-tuning](/docs/guides/fine-tuning) * * @param createFineTuningJobRequest * @return FineTuningJob diff --git a/openai-client/client/src/commonMain/kotlin/com/xebia/functional/openai/infrastructure/extensions.kt b/openai-client/client/src/commonMain/kotlin/com/xebia/functional/openai/infrastructure/extensions.kt new file mode 100644 index 000000000..2c8ed5c01 --- /dev/null +++ b/openai-client/client/src/commonMain/kotlin/com/xebia/functional/openai/infrastructure/extensions.kt @@ -0,0 +1,3 @@ +package com.xebia.functional.openai.infrastructure + +fun A.asListOfOne(): List = listOf(this) diff --git a/openai-client/client/src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionFunctions.kt b/openai-client/client/src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionFunctions.kt index a2d3ea524..1943246fe 100644 --- a/openai-client/client/src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionFunctions.kt +++ b/openai-client/client/src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionFunctions.kt @@ -13,9 +13,9 @@ import kotlinx.serialization.encoding.* /** * @param name The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores * and dashes, with a maximum length of 64. - * @param parameters * @param description A description of what the function does, used by the model to choose when and * how to call the function. + * @param parameters */ @Serializable @Deprecated(message = "This schema is deprecated.") @@ -23,8 +23,8 @@ data class ChatCompletionFunctions( /* The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64. */ @SerialName(value = "name") @Required val name: kotlin.String, - @SerialName(value = "parameters") @Required val parameters: kotlinx.serialization.json.JsonObject, /* A description of what the function does, used by the model to choose when and how to call the function. */ - @SerialName(value = "description") val description: kotlin.String? = null + @SerialName(value = "description") val description: kotlin.String? = null, + @SerialName(value = "parameters") val parameters: kotlinx.serialization.json.JsonObject? = null ) diff --git a/openai-client/client/src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionNamedToolChoice.kt b/openai-client/client/src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionNamedToolChoice.kt index 5f4cce7ea..bfe448811 100644 --- a/openai-client/client/src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionNamedToolChoice.kt +++ b/openai-client/client/src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionNamedToolChoice.kt @@ -20,8 +20,8 @@ import kotlinx.serialization.encoding.* data class ChatCompletionNamedToolChoice( /* The type of the tool. Currently, only `function` is supported. */ - @SerialName(value = "type") val type: ChatCompletionNamedToolChoice.Type? = null, - @SerialName(value = "function") val function: ChatCompletionNamedToolChoiceFunction? = null + @SerialName(value = "type") @Required val type: ChatCompletionNamedToolChoice.Type, + @SerialName(value = "function") @Required val function: ChatCompletionNamedToolChoiceFunction ) { /** diff --git a/openai-client/client/src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionTokenLogprob.kt b/openai-client/client/src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionTokenLogprob.kt new file mode 100644 index 000000000..65d36a3a9 --- /dev/null +++ b/openai-client/client/src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionTokenLogprob.kt @@ -0,0 +1,40 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * @param token The token. + * @param logprob The log probability of this token. + * @param bytes A list of integers representing the UTF-8 bytes representation of the token. Useful + * in instances where characters are represented by multiple tokens and their byte representations + * must be combined to generate the correct text representation. Can be `null` if there is no + * bytes representation for the token. + * @param topLogprobs List of the most likely tokens and their log probability, at this token + * position. In rare cases, there may be fewer than the number of requested `top_logprobs` + * returned. + */ +@Serializable +data class ChatCompletionTokenLogprob( + + /* The token. */ + @SerialName(value = "token") @Required val token: kotlin.String, + + /* The log probability of this token. */ + @SerialName(value = "logprob") @Required val logprob: kotlin.Double, + + /* A list of integers representing the UTF-8 bytes representation of the token. Useful in instances where characters are represented by multiple tokens and their byte representations must be combined to generate the correct text representation. Can be `null` if there is no bytes representation for the token. */ + @SerialName(value = "bytes") @Required val bytes: kotlin.collections.List?, + + /* List of the most likely tokens and their log probability, at this token position. In rare cases, there may be fewer than the number of requested `top_logprobs` returned. */ + @SerialName(value = "top_logprobs") + @Required + val topLogprobs: kotlin.collections.List +) diff --git a/openai-client/client/src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionTokenLogprobTopLogprobsInner.kt b/openai-client/client/src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionTokenLogprobTopLogprobsInner.kt new file mode 100644 index 000000000..f435683a1 --- /dev/null +++ b/openai-client/client/src/commonMain/kotlin/com/xebia/functional/openai/models/ChatCompletionTokenLogprobTopLogprobsInner.kt @@ -0,0 +1,32 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * @param token The token. + * @param logprob The log probability of this token. + * @param bytes A list of integers representing the UTF-8 bytes representation of the token. Useful + * in instances where characters are represented by multiple tokens and their byte representations + * must be combined to generate the correct text representation. Can be `null` if there is no + * bytes representation for the token. + */ +@Serializable +data class ChatCompletionTokenLogprobTopLogprobsInner( + + /* The token. */ + @SerialName(value = "token") @Required val token: kotlin.String, + + /* The log probability of this token. */ + @SerialName(value = "logprob") @Required val logprob: kotlin.Double, + + /* A list of integers representing the UTF-8 bytes representation of the token. Useful in instances where characters are represented by multiple tokens and their byte representations must be combined to generate the correct text representation. Can be `null` if there is no bytes representation for the token. */ + @SerialName(value = "bytes") @Required val bytes: kotlin.collections.List? +) diff --git a/openai-client/client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateChatCompletionRequest.kt b/openai-client/client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateChatCompletionRequest.kt index 15f0ab0c0..53d5ece6d 100644 --- a/openai-client/client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateChatCompletionRequest.kt +++ b/openai-client/client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateChatCompletionRequest.kt @@ -17,22 +17,31 @@ import kotlinx.serialization.encoding.* * @param frequencyPenalty Number between -2.0 and 2.0. Positive values penalize new tokens based on * their existing frequency in the text so far, decreasing the model's likelihood to repeat the * same line verbatim. - * [See more information about frequency and presence penalties.](/docs/guides/gpt/parameter-details) + * [See more information about frequency and presence penalties.](/docs/guides/text-generation/parameter-details) * @param logitBias Modify the likelihood of specified tokens appearing in the completion. Accepts a * JSON object that maps tokens (specified by their token ID in the tokenizer) to an associated * bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the * model prior to sampling. The exact effect will vary per model, but values between -1 and 1 * should decrease or increase likelihood of selection; values like -100 or 100 should result in a * ban or exclusive selection of the relevant token. - * @param maxTokens The maximum number of [tokens](/tokenizer) to generate in the chat completion. - * The total length of input tokens and generated tokens is limited by the model's context length. + * @param logprobs Whether to return log probabilities of the output tokens or not. If true, returns + * the log probabilities of each output token returned in the `content` of `message`. This option + * is currently not available on the `gpt-4-vision-preview` model. + * @param topLogprobs An integer between 0 and 5 specifying the number of most likely tokens to + * return at each token position, each with an associated log probability. `logprobs` must be set + * to `true` if this parameter is used. + * @param maxTokens The maximum number of [tokens](/tokenizer) that can be generated in the chat + * completion. The total length of input tokens and generated tokens is limited by the model's + * context length. * [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) * for counting tokens. - * @param n How many chat completion choices to generate for each input message. + * @param n How many chat completion choices to generate for each input message. Note that you will + * be charged based on the number of generated tokens across all of the choices. Keep `n` as `1` + * to minimize costs. * @param presencePenalty Number between -2.0 and 2.0. Positive values penalize new tokens based on * whether they appear in the text so far, increasing the model's likelihood to talk about new * topics. - * [See more information about frequency and presence penalties.](/docs/guides/gpt/parameter-details) + * [See more information about frequency and presence penalties.](/docs/guides/text-generation/parameter-details) * @param responseFormat * @param seed This feature is in Beta. If specified, our system will make a best effort to sample * deterministically, such that repeated requests with the same `seed` and parameters should @@ -74,25 +83,31 @@ data class CreateChatCompletionRequest( @Required val model: ai.xef.openai.OpenAIModel, - /* Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. [See more information about frequency and presence penalties.](/docs/guides/gpt/parameter-details) */ + /* Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. [See more information about frequency and presence penalties.](/docs/guides/text-generation/parameter-details) */ @SerialName(value = "frequency_penalty") val frequencyPenalty: kotlin.Double? = (0).toDouble(), /* Modify the likelihood of specified tokens appearing in the completion. Accepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. */ @SerialName(value = "logit_bias") val logitBias: kotlin.collections.Map? = null, - /* The maximum number of [tokens](/tokenizer) to generate in the chat completion. The total length of input tokens and generated tokens is limited by the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. */ + /* Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the `content` of `message`. This option is currently not available on the `gpt-4-vision-preview` model. */ + @SerialName(value = "logprobs") val logprobs: kotlin.Boolean? = false, + + /* An integer between 0 and 5 specifying the number of most likely tokens to return at each token position, each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. */ + @SerialName(value = "top_logprobs") val topLogprobs: kotlin.Int? = null, + + /* The maximum number of [tokens](/tokenizer) that can be generated in the chat completion. The total length of input tokens and generated tokens is limited by the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. */ @SerialName(value = "max_tokens") val maxTokens: kotlin.Int? = null, - /* How many chat completion choices to generate for each input message. */ + /* How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs. */ @SerialName(value = "n") val n: kotlin.Int? = 1, - /* Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. [See more information about frequency and presence penalties.](/docs/guides/gpt/parameter-details) */ + /* Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. [See more information about frequency and presence penalties.](/docs/guides/text-generation/parameter-details) */ @SerialName(value = "presence_penalty") val presencePenalty: kotlin.Double? = (0).toDouble(), @SerialName(value = "response_format") val responseFormat: CreateChatCompletionRequestResponseFormat? = null, - /* This feature is in Beta. If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result. Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend. */ + /* This feature is in Beta. If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result. Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend. */ @SerialName(value = "seed") val seed: kotlin.Int? = null, @SerialName(value = "stop") val stop: com.xebia.functional.openai.models.ext.chat.create.CreateChatCompletionRequestStop? = diff --git a/openai-client/client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateChatCompletionRequestFunctionCall.kt b/openai-client/client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateChatCompletionRequestFunctionCall.kt index b96726326..17cc03e75 100644 --- a/openai-client/client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateChatCompletionRequestFunctionCall.kt +++ b/openai-client/client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateChatCompletionRequestFunctionCall.kt @@ -15,7 +15,7 @@ import kotlinx.serialization.encoding.* * `none` means the model will not call a function and instead generates a message. `auto` means the * model can pick between generating a message or calling a function. Specifying a particular * function via `{\"name\": \"my_function\"}` forces the model to call that function. `none` is the - * default when no functions are present. `auto`` is the default if functions are present. + * default when no functions are present. `auto` is the default if functions are present. * * @param name The name of the function to call. */ diff --git a/openai-client/client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateChatCompletionRequestModel.kt b/openai-client/client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateChatCompletionRequestModel.kt index f7dfb7e4c..f2d76810d 100644 --- a/openai-client/client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateChatCompletionRequestModel.kt +++ b/openai-client/client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateChatCompletionRequestModel.kt @@ -14,11 +14,13 @@ import kotlinx.serialization.* * which models work with the Chat API. * * Values: - * gpt_4_1106_preview,gpt_4_vision_preview,gpt_4,gpt_4_0314,gpt_4_0613,gpt_4_32k,gpt_4_32k_0314,gpt_4_32k_0613,gpt_3_5_turbo_1106,gpt_3_5_turbo,gpt_3_5_turbo_16k,gpt_3_5_turbo_0301,gpt_3_5_turbo_0613,gpt_3_5_turbo_16k_0613 + * gpt_4_0125_preview,gpt_4_turbo_preview,gpt_4_1106_preview,gpt_4_vision_preview,gpt_4,gpt_4_0314,gpt_4_0613,gpt_4_32k,gpt_4_32k_0314,gpt_4_32k_0613,gpt_3_5_turbo,gpt_3_5_turbo_16k,gpt_3_5_turbo_0301,gpt_3_5_turbo_0613,gpt_3_5_turbo_1106,gpt_3_5_turbo_0125,gpt_3_5_turbo_16k_0613 */ @Serializable enum class CreateChatCompletionRequestModel(val value: kotlin.String) { + @SerialName(value = "gpt-4-0125-preview") gpt_4_0125_preview("gpt-4-0125-preview"), + @SerialName(value = "gpt-4-turbo-preview") gpt_4_turbo_preview("gpt-4-turbo-preview"), @SerialName(value = "gpt-4-1106-preview") gpt_4_1106_preview("gpt-4-1106-preview"), @SerialName(value = "gpt-4-vision-preview") gpt_4_vision_preview("gpt-4-vision-preview"), @SerialName(value = "gpt-4") gpt_4("gpt-4"), @@ -27,11 +29,12 @@ enum class CreateChatCompletionRequestModel(val value: kotlin.String) { @SerialName(value = "gpt-4-32k") gpt_4_32k("gpt-4-32k"), @SerialName(value = "gpt-4-32k-0314") gpt_4_32k_0314("gpt-4-32k-0314"), @SerialName(value = "gpt-4-32k-0613") gpt_4_32k_0613("gpt-4-32k-0613"), - @SerialName(value = "gpt-3.5-turbo-1106") gpt_3_5_turbo_1106("gpt-3.5-turbo-1106"), @SerialName(value = "gpt-3.5-turbo") gpt_3_5_turbo("gpt-3.5-turbo"), @SerialName(value = "gpt-3.5-turbo-16k") gpt_3_5_turbo_16k("gpt-3.5-turbo-16k"), @SerialName(value = "gpt-3.5-turbo-0301") gpt_3_5_turbo_0301("gpt-3.5-turbo-0301"), @SerialName(value = "gpt-3.5-turbo-0613") gpt_3_5_turbo_0613("gpt-3.5-turbo-0613"), + @SerialName(value = "gpt-3.5-turbo-1106") gpt_3_5_turbo_1106("gpt-3.5-turbo-1106"), + @SerialName(value = "gpt-3.5-turbo-0125") gpt_3_5_turbo_0125("gpt-3.5-turbo-0125"), @SerialName(value = "gpt-3.5-turbo-16k-0613") gpt_3_5_turbo_16k_0613("gpt-3.5-turbo-16k-0613"); /** diff --git a/openai-client/client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateChatCompletionRequestResponseFormat.kt b/openai-client/client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateChatCompletionRequestResponseFormat.kt index 4b8ca8628..a5a571694 100644 --- a/openai-client/client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateChatCompletionRequestResponseFormat.kt +++ b/openai-client/client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateChatCompletionRequestResponseFormat.kt @@ -11,14 +11,15 @@ import kotlinx.serialization.descriptors.* import kotlinx.serialization.encoding.* /** - * An object specifying the format that the model must output. Setting to `{ \"type\": - * \"json_object\" }` enables JSON mode, which guarantees the message the model generates is valid - * JSON. **Important:** when using JSON mode, you **must** also instruct the model to produce JSON - * yourself via a system or user message. Without this, the model may generate an unending stream of - * whitespace until the generation reaches the token limit, resulting in increased latency and - * appearance of a \"stuck\" request. Also note that the message content may be partially cut off if - * `finish_reason=\"length\"`, which indicates the generation exceeded `max_tokens` or the - * conversation exceeded the max context length. + * An object specifying the format that the model must output. Compatible with + * [GPT-4 Turbo](/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than + * `gpt-3.5-turbo-1106`. Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which + * guarantees the message the model generates is valid JSON. **Important:** when using JSON mode, + * you **must** also instruct the model to produce JSON yourself via a system or user message. + * Without this, the model may generate an unending stream of whitespace until the generation + * reaches the token limit, resulting in a long-running and seemingly \"stuck\" request. Also note + * that the message content may be partially cut off if `finish_reason=\"length\"`, which indicates + * the generation exceeded `max_tokens` or the conversation exceeded the max context length. * * @param type Must be one of `text` or `json_object`. */ diff --git a/openai-client/client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateChatCompletionResponseChoicesInner.kt b/openai-client/client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateChatCompletionResponseChoicesInner.kt index fdc09bccd..b36004471 100644 --- a/openai-client/client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateChatCompletionResponseChoicesInner.kt +++ b/openai-client/client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateChatCompletionResponseChoicesInner.kt @@ -18,6 +18,7 @@ import kotlinx.serialization.encoding.* * (deprecated) if the model called a function. * @param index The index of the choice in the list of choices. * @param message + * @param logprobs */ @Serializable data class CreateChatCompletionResponseChoicesInner( @@ -29,7 +30,10 @@ data class CreateChatCompletionResponseChoicesInner( /* The index of the choice in the list of choices. */ @SerialName(value = "index") @Required val index: kotlin.Int, - @SerialName(value = "message") @Required val message: ChatCompletionResponseMessage + @SerialName(value = "message") @Required val message: ChatCompletionResponseMessage, + @SerialName(value = "logprobs") + @Required + val logprobs: CreateChatCompletionResponseChoicesInnerLogprobs? ) { /** diff --git a/openai-client/client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateChatCompletionResponseChoicesInnerLogprobs.kt b/openai-client/client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateChatCompletionResponseChoicesInnerLogprobs.kt new file mode 100644 index 000000000..f9877ede1 --- /dev/null +++ b/openai-client/client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateChatCompletionResponseChoicesInnerLogprobs.kt @@ -0,0 +1,25 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * Log probability information for the choice. + * + * @param content A list of message content tokens with log probability information. + */ +@Serializable +data class CreateChatCompletionResponseChoicesInnerLogprobs( + + /* A list of message content tokens with log probability information. */ + @SerialName(value = "content") + @Required + val content: kotlin.collections.List? +) diff --git a/openai-client/client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateChatCompletionStreamResponseChoicesInner.kt b/openai-client/client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateChatCompletionStreamResponseChoicesInner.kt index ea46c7897..48f7bc8fd 100644 --- a/openai-client/client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateChatCompletionStreamResponseChoicesInner.kt +++ b/openai-client/client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateChatCompletionStreamResponseChoicesInner.kt @@ -18,6 +18,7 @@ import kotlinx.serialization.encoding.* * flag from our content filters, `tool_calls` if the model called a tool, or `function_call` * (deprecated) if the model called a function. * @param index The index of the choice in the list of choices. + * @param logprobs */ @Serializable data class CreateChatCompletionStreamResponseChoicesInner( @@ -29,7 +30,9 @@ data class CreateChatCompletionStreamResponseChoicesInner( val finishReason: CreateChatCompletionStreamResponseChoicesInner.FinishReason?, /* The index of the choice in the list of choices. */ - @SerialName(value = "index") @Required val index: kotlin.Int + @SerialName(value = "index") @Required val index: kotlin.Int, + @SerialName(value = "logprobs") + val logprobs: CreateChatCompletionResponseChoicesInnerLogprobs? = null ) { /** diff --git a/openai-client/client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateCompletionRequest.kt b/openai-client/client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateCompletionRequest.kt index b946b1e49..d7a2f94ca 100644 --- a/openai-client/client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateCompletionRequest.kt +++ b/openai-client/client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateCompletionRequest.kt @@ -23,22 +23,22 @@ import kotlinx.serialization.encoding.* * @param frequencyPenalty Number between -2.0 and 2.0. Positive values penalize new tokens based on * their existing frequency in the text so far, decreasing the model's likelihood to repeat the * same line verbatim. - * [See more information about frequency and presence penalties.](/docs/guides/gpt/parameter-details) + * [See more information about frequency and presence penalties.](/docs/guides/text-generation/parameter-details) * @param logitBias Modify the likelihood of specified tokens appearing in the completion. Accepts a * JSON object that maps tokens (specified by their token ID in the GPT tokenizer) to an * associated bias value from -100 to 100. You can use this [tokenizer tool](/tokenizer?view=bpe) - * (which works for both GPT-2 and GPT-3) to convert text to token IDs. Mathematically, the bias - * is added to the logits generated by the model prior to sampling. The exact effect will vary per - * model, but values between -1 and 1 should decrease or increase likelihood of selection; values - * like -100 or 100 should result in a ban or exclusive selection of the relevant token. As an - * example, you can pass `{\"50256\": -100}` to prevent the <|endoftext|> token from being - * generated. - * @param logprobs Include the log probabilities on the `logprobs` most likely tokens, as well the - * chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely - * tokens. The API will always return the `logprob` of the sampled token, so there may be up to - * `logprobs+1` elements in the response. The maximum value for `logprobs` is 5. - * @param maxTokens The maximum number of [tokens](/tokenizer) to generate in the completion. The - * token count of your prompt plus `max_tokens` cannot exceed the model's context length. + * to convert text to token IDs. Mathematically, the bias is added to the logits generated by the + * model prior to sampling. The exact effect will vary per model, but values between -1 and 1 + * should decrease or increase likelihood of selection; values like -100 or 100 should result in a + * ban or exclusive selection of the relevant token. As an example, you can pass `{\"50256\": + * -100}` to prevent the <|endoftext|> token from being generated. + * @param logprobs Include the log probabilities on the `logprobs` most likely output tokens, as + * well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 + * most likely tokens. The API will always return the `logprob` of the sampled token, so there may + * be up to `logprobs+1` elements in the response. The maximum value for `logprobs` is 5. + * @param maxTokens The maximum number of [tokens](/tokenizer) that can be generated in the + * completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context + * length. * [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) * for counting tokens. * @param n How many completions to generate for each prompt. **Note:** Because this parameter @@ -47,7 +47,7 @@ import kotlinx.serialization.encoding.* * @param presencePenalty Number between -2.0 and 2.0. Positive values penalize new tokens based on * whether they appear in the text so far, increasing the model's likelihood to talk about new * topics. - * [See more information about frequency and presence penalties.](/docs/guides/gpt/parameter-details) + * [See more information about frequency and presence penalties.](/docs/guides/text-generation/parameter-details) * @param seed If specified, our system will make a best effort to sample deterministically, such * that repeated requests with the same `seed` and parameters should return the same result. * Determinism is not guaranteed, and you should refer to the `system_fingerprint` response @@ -84,23 +84,23 @@ data class CreateCompletionRequest( /* Echo back the prompt in addition to the completion */ @SerialName(value = "echo") val echo: kotlin.Boolean? = false, - /* Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. [See more information about frequency and presence penalties.](/docs/guides/gpt/parameter-details) */ + /* Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. [See more information about frequency and presence penalties.](/docs/guides/text-generation/parameter-details) */ @SerialName(value = "frequency_penalty") val frequencyPenalty: kotlin.Double? = (0).toDouble(), - /* Modify the likelihood of specified tokens appearing in the completion. Accepts a JSON object that maps tokens (specified by their token ID in the GPT tokenizer) to an associated bias value from -100 to 100. You can use this [tokenizer tool](/tokenizer?view=bpe) (which works for both GPT-2 and GPT-3) to convert text to token IDs. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. As an example, you can pass `{\"50256\": -100}` to prevent the <|endoftext|> token from being generated. */ + /* Modify the likelihood of specified tokens appearing in the completion. Accepts a JSON object that maps tokens (specified by their token ID in the GPT tokenizer) to an associated bias value from -100 to 100. You can use this [tokenizer tool](/tokenizer?view=bpe) to convert text to token IDs. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. As an example, you can pass `{\"50256\": -100}` to prevent the <|endoftext|> token from being generated. */ @SerialName(value = "logit_bias") val logitBias: kotlin.collections.Map? = null, - /* Include the log probabilities on the `logprobs` most likely tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response. The maximum value for `logprobs` is 5. */ + /* Include the log probabilities on the `logprobs` most likely output tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response. The maximum value for `logprobs` is 5. */ @SerialName(value = "logprobs") val logprobs: kotlin.Int? = null, - /* The maximum number of [tokens](/tokenizer) to generate in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. */ + /* The maximum number of [tokens](/tokenizer) that can be generated in the completion. The token count of your prompt plus `max_tokens` cannot exceed the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. */ @SerialName(value = "max_tokens") val maxTokens: kotlin.Int? = 16, /* How many completions to generate for each prompt. **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. */ @SerialName(value = "n") val n: kotlin.Int? = 1, - /* Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. [See more information about frequency and presence penalties.](/docs/guides/gpt/parameter-details) */ + /* Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. [See more information about frequency and presence penalties.](/docs/guides/text-generation/parameter-details) */ @SerialName(value = "presence_penalty") val presencePenalty: kotlin.Double? = (0).toDouble(), /* If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result. Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend. */ diff --git a/openai-client/client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateCompletionRequestModel.kt b/openai-client/client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateCompletionRequestModel.kt index 338577756..74d1efb30 100644 --- a/openai-client/client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateCompletionRequestModel.kt +++ b/openai-client/client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateCompletionRequestModel.kt @@ -13,22 +13,14 @@ import kotlinx.serialization.* * all of your available models, or see our [Model overview](/docs/models/overview) for descriptions * of them. * - * Values: - * babbage_002,davinci_002,gpt_3_5_turbo_instruct,text_davinci_003,text_davinci_002,text_davinci_001,code_davinci_002,text_curie_001,text_babbage_001,text_ada_001 + * Values: gpt_3_5_turbo_instruct,davinci_002,babbage_002 */ @Serializable enum class CreateCompletionRequestModel(val value: kotlin.String) { - @SerialName(value = "babbage-002") babbage_002("babbage-002"), - @SerialName(value = "davinci-002") davinci_002("davinci-002"), @SerialName(value = "gpt-3.5-turbo-instruct") gpt_3_5_turbo_instruct("gpt-3.5-turbo-instruct"), - @SerialName(value = "text-davinci-003") text_davinci_003("text-davinci-003"), - @SerialName(value = "text-davinci-002") text_davinci_002("text-davinci-002"), - @SerialName(value = "text-davinci-001") text_davinci_001("text-davinci-001"), - @SerialName(value = "code-davinci-002") code_davinci_002("code-davinci-002"), - @SerialName(value = "text-curie-001") text_curie_001("text-curie-001"), - @SerialName(value = "text-babbage-001") text_babbage_001("text-babbage-001"), - @SerialName(value = "text-ada-001") text_ada_001("text-ada-001"); + @SerialName(value = "davinci-002") davinci_002("davinci-002"), + @SerialName(value = "babbage-002") babbage_002("babbage-002"); /** * Override [toString()] to avoid using the enum variable name as the value, and instead use the diff --git a/openai-client/client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateEmbeddingRequest.kt b/openai-client/client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateEmbeddingRequest.kt index a4163496a..a125b00ab 100644 --- a/openai-client/client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateEmbeddingRequest.kt +++ b/openai-client/client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateEmbeddingRequest.kt @@ -15,6 +15,8 @@ import kotlinx.serialization.encoding.* * @param model * @param encodingFormat The format to return the embeddings in. Can be either `float` or * [`base64`](https://pypi.org/project/pybase64/). + * @param dimensions The number of dimensions the resulting output embeddings should have. Only + * supported in `text-embedding-3` and later models. * @param user A unique identifier representing your end-user, which can help OpenAI to monitor and * detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). */ @@ -31,6 +33,9 @@ data class CreateEmbeddingRequest( @SerialName(value = "encoding_format") val encodingFormat: CreateEmbeddingRequest.EncodingFormat? = EncodingFormat.float, + /* The number of dimensions the resulting output embeddings should have. Only supported in `text-embedding-3` and later models. */ + @SerialName(value = "dimensions") val dimensions: kotlin.Int? = null, + /* A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). */ @SerialName(value = "user") val user: kotlin.String? = null ) { diff --git a/openai-client/client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateEmbeddingRequestModel.kt b/openai-client/client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateEmbeddingRequestModel.kt index dab33db04..cba12810f 100644 --- a/openai-client/client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateEmbeddingRequestModel.kt +++ b/openai-client/client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateEmbeddingRequestModel.kt @@ -13,12 +13,14 @@ import kotlinx.serialization.* * all of your available models, or see our [Model overview](/docs/models/overview) for descriptions * of them. * - * Values: text_embedding_ada_002 + * Values: text_embedding_ada_002,text_embedding_3_small,text_embedding_3_large */ @Serializable enum class CreateEmbeddingRequestModel(val value: kotlin.String) { - @SerialName(value = "text-embedding-ada-002") text_embedding_ada_002("text-embedding-ada-002"); + @SerialName(value = "text-embedding-ada-002") text_embedding_ada_002("text-embedding-ada-002"), + @SerialName(value = "text-embedding-3-small") text_embedding_3_small("text-embedding-3-small"), + @SerialName(value = "text-embedding-3-large") text_embedding_3_large("text-embedding-3-large"); /** * Override [toString()] to avoid using the enum variable name as the value, and instead use the diff --git a/openai-client/client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateModerationResponseResultsInnerCategories.kt b/openai-client/client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateModerationResponseResultsInnerCategories.kt index 23794f79e..d761e56ec 100644 --- a/openai-client/client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateModerationResponseResultsInnerCategories.kt +++ b/openai-client/client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateModerationResponseResultsInnerCategories.kt @@ -15,7 +15,7 @@ import kotlinx.serialization.encoding.* * * @param hate Content that expresses, incites, or promotes hate based on race, gender, ethnicity, * religion, nationality, sexual orientation, disability status, or caste. Hateful content aimed - * at non-protected groups (e.g., chess players) is harrassment. + * at non-protected groups (e.g., chess players) is harassment. * @param hateThreatening Hateful content that also includes violence or serious harm towards the * targeted group based on race, gender, ethnicity, religion, nationality, sexual orientation, * disability status, or caste. @@ -40,7 +40,7 @@ import kotlinx.serialization.encoding.* @Serializable data class CreateModerationResponseResultsInnerCategories( - /* Content that expresses, incites, or promotes hate based on race, gender, ethnicity, religion, nationality, sexual orientation, disability status, or caste. Hateful content aimed at non-protected groups (e.g., chess players) is harrassment. */ + /* Content that expresses, incites, or promotes hate based on race, gender, ethnicity, religion, nationality, sexual orientation, disability status, or caste. Hateful content aimed at non-protected groups (e.g., chess players) is harassment. */ @SerialName(value = "hate") @Required val hate: kotlin.Boolean, /* Hateful content that also includes violence or serious harm towards the targeted group based on race, gender, ethnicity, religion, nationality, sexual orientation, disability status, or caste. */ diff --git a/openai-client/client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateRunRequest.kt b/openai-client/client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateRunRequest.kt index 9a8e88d94..193cd1c87 100644 --- a/openai-client/client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateRunRequest.kt +++ b/openai-client/client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateRunRequest.kt @@ -16,8 +16,11 @@ import kotlinx.serialization.encoding.* * @param model The ID of the [Model](/docs/api-reference/models) to be used to execute this run. If * a value is provided here, it will override the model associated with the assistant. If not, the * model associated with the assistant will be used. - * @param instructions Override the default system message of the assistant. This is useful for - * modifying the behavior on a per-run basis. + * @param instructions Overrides the [instructions](/docs/api-reference/assistants/createAssistant) + * of the assistant. This is useful for modifying the behavior on a per-run basis. + * @param additionalInstructions Appends additional instructions at the end of the instructions for + * the run. This is useful for modifying the behavior on a per-run basis without overriding other + * instructions. * @param tools Override the tools the assistant can use for this run. This is useful for modifying * the behavior on a per-run basis. * @param metadata Set of 16 key-value pairs that can be attached to an object. This can be useful @@ -33,9 +36,12 @@ data class CreateRunRequest( /* The ID of the [Model](/docs/api-reference/models) to be used to execute this run. If a value is provided here, it will override the model associated with the assistant. If not, the model associated with the assistant will be used. */ @SerialName(value = "model") val model: kotlin.String? = null, - /* Override the default system message of the assistant. This is useful for modifying the behavior on a per-run basis. */ + /* Overrides the [instructions](/docs/api-reference/assistants/createAssistant) of the assistant. This is useful for modifying the behavior on a per-run basis. */ @SerialName(value = "instructions") val instructions: kotlin.String? = null, + /* Appends additional instructions at the end of the instructions for the run. This is useful for modifying the behavior on a per-run basis without overriding other instructions. */ + @SerialName(value = "additional_instructions") val additionalInstructions: kotlin.String? = null, + /* Override the tools the assistant can use for this run. This is useful for modifying the behavior on a per-run basis. */ @SerialName(value = "tools") val tools: diff --git a/openai-client/client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateSpeechRequest.kt b/openai-client/client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateSpeechRequest.kt index 30954a9ac..1dafaeeec 100644 --- a/openai-client/client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateSpeechRequest.kt +++ b/openai-client/client/src/commonMain/kotlin/com/xebia/functional/openai/models/CreateSpeechRequest.kt @@ -14,9 +14,11 @@ import kotlinx.serialization.encoding.* * @param model * @param input The text to generate audio for. The maximum length is 4096 characters. * @param voice The voice to use when generating the audio. Supported voices are `alloy`, `echo`, - * `fable`, `onyx`, `nova`, and `shimmer`. - * @param responseFormat The format to audio in. Supported formats are `mp3`, `opus`, `aac`, and - * `flac`. + * `fable`, `onyx`, `nova`, and `shimmer`. Previews of the voices are available in the + * [Text to speech guide](/docs/guides/text-to-speech/voice-options). + * @param responseFormat The format to return audio in. Supported formats are `mp3`, `opus`, `aac`, + * `flac`, `pcm`, and `wav`. The `pcm` audio format, similar to `wav` but without a header, + * utilizes a 24kHz sample rate, mono channel, and 16-bit depth in signed little-endian format. * @param speed The speed of the generated audio. Select a value from `0.25` to `4.0`. `1.0` is the * default. */ @@ -29,10 +31,10 @@ data class CreateSpeechRequest( /* The text to generate audio for. The maximum length is 4096 characters. */ @SerialName(value = "input") @Required val input: kotlin.String, - /* The voice to use when generating the audio. Supported voices are `alloy`, `echo`, `fable`, `onyx`, `nova`, and `shimmer`. */ + /* The voice to use when generating the audio. Supported voices are `alloy`, `echo`, `fable`, `onyx`, `nova`, and `shimmer`. Previews of the voices are available in the [Text to speech guide](/docs/guides/text-to-speech/voice-options). */ @SerialName(value = "voice") @Required val voice: CreateSpeechRequest.Voice, - /* The format to audio in. Supported formats are `mp3`, `opus`, `aac`, and `flac`. */ + /* The format to return audio in. Supported formats are `mp3`, `opus`, `aac`, `flac`, `pcm`, and `wav`. The `pcm` audio format, similar to `wav` but without a header, utilizes a 24kHz sample rate, mono channel, and 16-bit depth in signed little-endian format. */ @SerialName(value = "response_format") val responseFormat: CreateSpeechRequest.ResponseFormat? = ResponseFormat.mp3, @@ -42,7 +44,8 @@ data class CreateSpeechRequest( /** * The voice to use when generating the audio. Supported voices are `alloy`, `echo`, `fable`, - * `onyx`, `nova`, and `shimmer`. + * `onyx`, `nova`, and `shimmer`. Previews of the voices are available in the + * [Text to speech guide](/docs/guides/text-to-speech/voice-options). * * Values: alloy,echo,fable,onyx,nova,shimmer */ @@ -56,15 +59,19 @@ data class CreateSpeechRequest( @SerialName(value = "shimmer") shimmer("shimmer") } /** - * The format to audio in. Supported formats are `mp3`, `opus`, `aac`, and `flac`. + * The format to return audio in. Supported formats are `mp3`, `opus`, `aac`, `flac`, `pcm`, and + * `wav`. The `pcm` audio format, similar to `wav` but without a header, utilizes a 24kHz sample + * rate, mono channel, and 16-bit depth in signed little-endian format. * - * Values: mp3,opus,aac,flac + * Values: mp3,opus,aac,flac,pcm,wav */ @Serializable enum class ResponseFormat(val value: kotlin.String) { @SerialName(value = "mp3") mp3("mp3"), @SerialName(value = "opus") opus("opus"), @SerialName(value = "aac") aac("aac"), - @SerialName(value = "flac") flac("flac") + @SerialName(value = "flac") flac("flac"), + @SerialName(value = "pcm") pcm("pcm"), + @SerialName(value = "wav") wav("wav") } } diff --git a/openai-client/client/src/commonMain/kotlin/com/xebia/functional/openai/models/FunctionObject.kt b/openai-client/client/src/commonMain/kotlin/com/xebia/functional/openai/models/FunctionObject.kt index 1028588d6..97e434fe4 100644 --- a/openai-client/client/src/commonMain/kotlin/com/xebia/functional/openai/models/FunctionObject.kt +++ b/openai-client/client/src/commonMain/kotlin/com/xebia/functional/openai/models/FunctionObject.kt @@ -13,17 +13,17 @@ import kotlinx.serialization.encoding.* /** * @param name The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores * and dashes, with a maximum length of 64. - * @param parameters * @param description A description of what the function does, used by the model to choose when and * how to call the function. + * @param parameters */ @Serializable data class FunctionObject( /* The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64. */ @SerialName(value = "name") @Required val name: kotlin.String, - @SerialName(value = "parameters") @Required val parameters: kotlinx.serialization.json.JsonObject, /* A description of what the function does, used by the model to choose when and how to call the function. */ - @SerialName(value = "description") val description: kotlin.String? = null + @SerialName(value = "description") val description: kotlin.String? = null, + @SerialName(value = "parameters") val parameters: kotlinx.serialization.json.JsonObject? = null ) diff --git a/openai-client/client/src/commonMain/kotlin/com/xebia/functional/openai/models/ModifyAssistantRequest.kt b/openai-client/client/src/commonMain/kotlin/com/xebia/functional/openai/models/ModifyAssistantRequest.kt index 3d1482f38..61184c856 100644 --- a/openai-client/client/src/commonMain/kotlin/com/xebia/functional/openai/models/ModifyAssistantRequest.kt +++ b/openai-client/client/src/commonMain/kotlin/com/xebia/functional/openai/models/ModifyAssistantRequest.kt @@ -20,7 +20,7 @@ import kotlinx.serialization.encoding.* * assistant. Tools can be of types `code_interpreter`, `retrieval`, or `function`. * @param fileIds A list of [File](/docs/api-reference/files) IDs attached to this assistant. There * can be a maximum of 20 files attached to the assistant. Files are ordered by their creation - * date in ascending order. If a file was previosuly attached to the list but does not show up in + * date in ascending order. If a file was previously attached to the list but does not show up in * the list, it will be deleted from the assistant. * @param metadata Set of 16 key-value pairs that can be attached to an object. This can be useful * for storing additional information about the object in a structured format. Keys can be a @@ -45,7 +45,7 @@ data class ModifyAssistantRequest( kotlin.collections.List? = arrayListOf(), - /* A list of [File](/docs/api-reference/files) IDs attached to this assistant. There can be a maximum of 20 files attached to the assistant. Files are ordered by their creation date in ascending order. If a file was previosuly attached to the list but does not show up in the list, it will be deleted from the assistant. */ + /* A list of [File](/docs/api-reference/files) IDs attached to this assistant. There can be a maximum of 20 files attached to the assistant. Files are ordered by their creation date in ascending order. If a file was previously attached to the list but does not show up in the list, it will be deleted from the assistant. */ @SerialName(value = "file_ids") val fileIds: kotlin.collections.List? = arrayListOf(), diff --git a/openai-client/client/src/commonMain/kotlin/com/xebia/functional/openai/models/RunCompletionUsage.kt b/openai-client/client/src/commonMain/kotlin/com/xebia/functional/openai/models/RunCompletionUsage.kt new file mode 100644 index 000000000..915ece198 --- /dev/null +++ b/openai-client/client/src/commonMain/kotlin/com/xebia/functional/openai/models/RunCompletionUsage.kt @@ -0,0 +1,32 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * Usage statistics related to the run. This value will be `null` if the run is not in a terminal + * state (i.e. `in_progress`, `queued`, etc.). + * + * @param completionTokens Number of completion tokens used over the course of the run. + * @param promptTokens Number of prompt tokens used over the course of the run. + * @param totalTokens Total number of tokens used (prompt + completion). + */ +@Serializable +data class RunCompletionUsage( + + /* Number of completion tokens used over the course of the run. */ + @SerialName(value = "completion_tokens") @Required val completionTokens: kotlin.Int, + + /* Number of prompt tokens used over the course of the run. */ + @SerialName(value = "prompt_tokens") @Required val promptTokens: kotlin.Int, + + /* Total number of tokens used (prompt + completion). */ + @SerialName(value = "total_tokens") @Required val totalTokens: kotlin.Int +) diff --git a/openai-client/client/src/commonMain/kotlin/com/xebia/functional/openai/models/RunObject.kt b/openai-client/client/src/commonMain/kotlin/com/xebia/functional/openai/models/RunObject.kt index e74e3cc4d..48599ba33 100644 --- a/openai-client/client/src/commonMain/kotlin/com/xebia/functional/openai/models/RunObject.kt +++ b/openai-client/client/src/commonMain/kotlin/com/xebia/functional/openai/models/RunObject.kt @@ -39,6 +39,7 @@ import kotlinx.serialization.encoding.* * @param metadata Set of 16 key-value pairs that can be attached to an object. This can be useful * for storing additional information about the object in a structured format. Keys can be a * maximum of 64 characters long and values can be a maxium of 512 characters long. + * @param usage */ @Serializable data class RunObject( @@ -97,7 +98,8 @@ data class RunObject( val fileIds: kotlin.collections.List = arrayListOf(), /* Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. */ - @SerialName(value = "metadata") @Required val metadata: kotlinx.serialization.json.JsonObject? + @SerialName(value = "metadata") @Required val metadata: kotlinx.serialization.json.JsonObject?, + @SerialName(value = "usage") @Required val usage: RunCompletionUsage? ) { /** diff --git a/openai-client/client/src/commonMain/kotlin/com/xebia/functional/openai/models/RunStepCompletionUsage.kt b/openai-client/client/src/commonMain/kotlin/com/xebia/functional/openai/models/RunStepCompletionUsage.kt new file mode 100644 index 000000000..952a75f47 --- /dev/null +++ b/openai-client/client/src/commonMain/kotlin/com/xebia/functional/openai/models/RunStepCompletionUsage.kt @@ -0,0 +1,32 @@ +/** + * Please note: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). + * Do not edit this file manually. + */ +@file:Suppress("ArrayInDataClass", "EnumEntryName", "RemoveRedundantQualifierName", "UnusedImport") + +package com.xebia.functional.openai.models + +import kotlinx.serialization.* +import kotlinx.serialization.descriptors.* +import kotlinx.serialization.encoding.* + +/** + * Usage statistics related to the run step. This value will be `null` while the run step's status + * is `in_progress`. + * + * @param completionTokens Number of completion tokens used over the course of the run step. + * @param promptTokens Number of prompt tokens used over the course of the run step. + * @param totalTokens Total number of tokens used (prompt + completion). + */ +@Serializable +data class RunStepCompletionUsage( + + /* Number of completion tokens used over the course of the run step. */ + @SerialName(value = "completion_tokens") @Required val completionTokens: kotlin.Int, + + /* Number of prompt tokens used over the course of the run step. */ + @SerialName(value = "prompt_tokens") @Required val promptTokens: kotlin.Int, + + /* Total number of tokens used (prompt + completion). */ + @SerialName(value = "total_tokens") @Required val totalTokens: kotlin.Int +) diff --git a/openai-client/client/src/commonMain/kotlin/com/xebia/functional/openai/models/RunStepObject.kt b/openai-client/client/src/commonMain/kotlin/com/xebia/functional/openai/models/RunStepObject.kt index 7a3a06fac..96a5a8674 100644 --- a/openai-client/client/src/commonMain/kotlin/com/xebia/functional/openai/models/RunStepObject.kt +++ b/openai-client/client/src/commonMain/kotlin/com/xebia/functional/openai/models/RunStepObject.kt @@ -14,7 +14,7 @@ import kotlinx.serialization.encoding.* * Represents a step in execution of a run. * * @param id The identifier of the run step, which can be referenced in API endpoints. - * @param `object` The object type, which is always `thread.run.step``. + * @param `object` The object type, which is always `thread.run.step`. * @param createdAt The Unix timestamp (in seconds) for when the run step was created. * @param assistantId The ID of the [assistant](/docs/api-reference/assistants) associated with the * run step. @@ -33,6 +33,7 @@ import kotlinx.serialization.encoding.* * @param metadata Set of 16 key-value pairs that can be attached to an object. This can be useful * for storing additional information about the object in a structured format. Keys can be a * maximum of 64 characters long and values can be a maxium of 512 characters long. + * @param usage */ @Serializable data class RunStepObject( @@ -40,7 +41,7 @@ data class RunStepObject( /* The identifier of the run step, which can be referenced in API endpoints. */ @SerialName(value = "id") @Required val id: kotlin.String, - /* The object type, which is always `thread.run.step``. */ + /* The object type, which is always `thread.run.step`. */ @SerialName(value = "object") @Required val `object`: RunStepObject.`Object`, /* The Unix timestamp (in seconds) for when the run step was created. */ @@ -78,11 +79,12 @@ data class RunStepObject( @SerialName(value = "completed_at") @Required val completedAt: kotlin.Int?, /* Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. */ - @SerialName(value = "metadata") val metadata: kotlinx.serialization.json.JsonObject? = null + @SerialName(value = "metadata") val metadata: kotlinx.serialization.json.JsonObject? = null, + @SerialName(value = "usage") @Required val usage: RunStepCompletionUsage? ) { /** - * The object type, which is always `thread.run.step``. + * The object type, which is always `thread.run.step`. * * Values: thread_run_step */ diff --git a/openai-client/client/src/jvmTest/kotlin/ArbUtils.kt b/openai-client/client/src/jvmTest/kotlin/ArbUtils.kt index 1c9ae5da8..4b023bf34 100644 --- a/openai-client/client/src/jvmTest/kotlin/ArbUtils.kt +++ b/openai-client/client/src/jvmTest/kotlin/ArbUtils.kt @@ -27,7 +27,7 @@ val functionObjectArb = arbitrary { val name = idArb.bind() val fields = Arb.list(jsonObjectFieldArb).map { JsonObject(it.toMap()) }.bind() val description = Arb.string().orNull(0.5).bind() - FunctionObject(name, fields, description) + FunctionObject(name, description, fields) } val contentImageUrlDetailArb = diff --git a/openai-client/client/src/jvmTest/kotlin/com/xebia/functional/openai/models/ext/assistant/AssistantToolsSpec.kt b/openai-client/client/src/jvmTest/kotlin/com/xebia/functional/openai/models/ext/assistant/AssistantToolsSpec.kt index ed7927e6b..18243a804 100644 --- a/openai-client/client/src/jvmTest/kotlin/com/xebia/functional/openai/models/ext/assistant/AssistantToolsSpec.kt +++ b/openai-client/client/src/jvmTest/kotlin/com/xebia/functional/openai/models/ext/assistant/AssistantToolsSpec.kt @@ -30,7 +30,7 @@ class AssistantToolsSpec { JsonObject( listOfNotNull( "name" to JsonPrimitive(fo.name), - "parameters" to fo.parameters, + "parameters" to (fo.parameters ?: JsonObject(emptyMap())), fo.description?.let { "description" to JsonPrimitive(it) } ) .toMap() diff --git a/openai-client/generator/config/openai-api-commit b/openai-client/generator/config/openai-api-commit index 024053509..7965a2453 100644 --- a/openai-client/generator/config/openai-api-commit +++ b/openai-client/generator/config/openai-api-commit @@ -1 +1 @@ -a7a0b63d1fac3acac9a223d57da1219c8a97c01e \ No newline at end of file +44306c2f4534144e169721e2ad160e00b6823bc7 diff --git a/openai-client/generator/src/main/java/ai/xef/openai/generator/KMMGeneratorConfig.java b/openai-client/generator/src/main/java/ai/xef/openai/generator/KMMGeneratorConfig.java index d43cc8b35..72b6e170e 100644 --- a/openai-client/generator/src/main/java/ai/xef/openai/generator/KMMGeneratorConfig.java +++ b/openai-client/generator/src/main/java/ai/xef/openai/generator/KMMGeneratorConfig.java @@ -1,5 +1,6 @@ package ai.xef.openai.generator; +import io.swagger.v3.oas.models.media.Schema; import org.openapitools.codegen.*; import org.openapitools.codegen.languages.KotlinClientCodegen; import org.openapitools.codegen.model.ModelMap; @@ -8,6 +9,9 @@ import org.openapitools.codegen.model.OperationsMap; import java.util.*; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + import static java.util.Map.entry; public class KMMGeneratorConfig extends KotlinClientCodegen { @@ -132,4 +136,25 @@ public OperationsMap postProcessOperationsWithModels(OperationsMap objs, List `EnumType.defaultEnumValue.asListOfOne()` + */ + return matcher.replaceFirst("$1.asListOfOne()"); + } else { + return defaultValue; + } + } else { + return defaultValue; + } + } }