| POST | /v1/chat/completions |
|---|
import 'package:servicestack/servicestack.dart';
/**
* Annotations for the message, when applicable, as when using the web search tool.
*/
// @DataContract
class UrlCitation implements IConvertible
{
/**
* The index of the last character of the URL citation in the message.
*/
// @DataMember(Name="end_index")
int end_index = 0;
/**
* The index of the first character of the URL citation in the message.
*/
// @DataMember(Name="start_index")
int start_index = 0;
/**
* The title of the web resource.
*/
// @DataMember(Name="title")
String title = "";
/**
* The URL of the web resource.
*/
// @DataMember(Name="url")
String url = "";
UrlCitation({this.end_index,this.start_index,this.title,this.url});
UrlCitation.fromJson(Map<String, dynamic> json) { fromMap(json); }
fromMap(Map<String, dynamic> json) {
end_index = json['endIndex'];
start_index = json['startIndex'];
title = json['title'];
url = json['url'];
return this;
}
Map<String, dynamic> toJson() => {
'end_index': end_index,
'start_index': start_index,
'title': title,
'url': url
};
getTypeName() => "UrlCitation";
TypeContext? context = _ctx;
}
/**
* Annotations for the message, when applicable, as when using the web search tool.
*/
// @DataContract
class ChoiceAnnotation implements IConvertible
{
/**
* The type of the URL citation. Always url_citation.
*/
// @DataMember(Name="type")
String type = "";
/**
* A URL citation when using web search.
*/
// @DataMember(Name="url_citation")
UrlCitation url_citation;
ChoiceAnnotation({this.type,this.url_citation});
ChoiceAnnotation.fromJson(Map<String, dynamic> json) { fromMap(json); }
fromMap(Map<String, dynamic> json) {
type = json['type'];
url_citation = JsonConverters.fromJson(json['urlCitation'],'UrlCitation',context!);
return this;
}
Map<String, dynamic> toJson() => {
'type': type,
'url_citation': JsonConverters.toJson(url_citation,'UrlCitation',context!)
};
getTypeName() => "ChoiceAnnotation";
TypeContext? context = _ctx;
}
/**
* If the audio output modality is requested, this object contains data about the audio response from the model.
*/
// @DataContract
class ChoiceAudio implements IConvertible
{
/**
* Base64 encoded audio bytes generated by the model, in the format specified in the request.
*/
// @DataMember(Name="data")
String data = "";
/**
* The Unix timestamp (in seconds) for when this audio response will no longer be accessible on the server for use in multi-turn conversations.
*/
// @DataMember(Name="expires_at")
int expires_at = 0;
/**
* Unique identifier for this audio response.
*/
// @DataMember(Name="id")
String id = "";
/**
* Transcript of the audio generated by the model.
*/
// @DataMember(Name="transcript")
String transcript = "";
ChoiceAudio({this.data,this.expires_at,this.id,this.transcript});
ChoiceAudio.fromJson(Map<String, dynamic> json) { fromMap(json); }
fromMap(Map<String, dynamic> json) {
data = json['data'];
expires_at = json['expiresAt'];
id = json['id'];
transcript = json['transcript'];
return this;
}
Map<String, dynamic> toJson() => {
'data': data,
'expires_at': expires_at,
'id': id,
'transcript': transcript
};
getTypeName() => "ChoiceAudio";
TypeContext? context = _ctx;
}
/**
* The tool calls generated by the model, such as function calls.
*/
// @DataContract
class ToolCall implements IConvertible
{
/**
* The ID of the tool call.
*/
// @DataMember(Name="id")
String id = "";
/**
* The type of the tool. Currently, only `function` is supported.
*/
// @DataMember(Name="type")
String type = "";
/**
* The function that the model called.
*/
// @DataMember(Name="function")
String function = "";
ToolCall({this.id,this.type,this.function});
ToolCall.fromJson(Map<String, dynamic> json) { fromMap(json); }
fromMap(Map<String, dynamic> json) {
id = json['id'];
type = json['type'];
function = json['function'];
return this;
}
Map<String, dynamic> toJson() => {
'id': id,
'type': type,
'function': function
};
getTypeName() => "ToolCall";
TypeContext? context = _ctx;
}
// @DataContract
class ChoiceMessage implements IConvertible
{
/**
* The contents of the message.
*/
// @DataMember(Name="content")
String content = "";
/**
* The refusal message generated by the model.
*/
// @DataMember(Name="refusal")
String? refusal;
/**
* The reasoning process used by the model.
*/
// @DataMember(Name="reasoning")
String? reasoning;
/**
* The role of the author of this message.
*/
// @DataMember(Name="role")
String role = "";
/**
* Annotations for the message, when applicable, as when using the web search tool.
*/
// @DataMember(Name="annotations")
List<ChoiceAnnotation>? annotations;
/**
* If the audio output modality is requested, this object contains data about the audio response from the model.
*/
// @DataMember(Name="audio")
ChoiceAudio? audio;
/**
* The tool calls generated by the model, such as function calls.
*/
// @DataMember(Name="tool_calls")
List<ToolCall>? tool_calls;
ChoiceMessage({this.content,this.refusal,this.reasoning,this.role,this.annotations,this.audio,this.tool_calls});
ChoiceMessage.fromJson(Map<String, dynamic> json) { fromMap(json); }
fromMap(Map<String, dynamic> json) {
content = json['content'];
refusal = json['refusal'];
reasoning = json['reasoning'];
role = json['role'];
annotations = JsonConverters.fromJson(json['annotations'],'List<ChoiceAnnotation>',context!);
audio = JsonConverters.fromJson(json['audio'],'ChoiceAudio',context!);
tool_calls = JsonConverters.fromJson(json['toolCalls'],'List<ToolCall>',context!);
return this;
}
Map<String, dynamic> toJson() => {
'content': content,
'refusal': refusal,
'reasoning': reasoning,
'role': role,
'annotations': JsonConverters.toJson(annotations,'List<ChoiceAnnotation>',context!),
'audio': JsonConverters.toJson(audio,'ChoiceAudio',context!),
'tool_calls': JsonConverters.toJson(tool_calls,'List<ToolCall>',context!)
};
getTypeName() => "ChoiceMessage";
TypeContext? context = _ctx;
}
// @DataContract
class Choice implements IConvertible
{
/**
* The reason the model stopped generating tokens. This will be stop if the model hit a natural stop point or a provided stop sequence, length if the maximum number of tokens specified in the request was reached, content_filter if content was omitted due to a flag from our content filters, tool_calls if the model called a tool
*/
// @DataMember(Name="finish_reason")
String finish_reason = "";
/**
* The index of the choice in the list of choices.
*/
// @DataMember(Name="index")
int index = 0;
/**
* A chat completion message generated by the model.
*/
// @DataMember(Name="message")
ChoiceMessage message;
Choice({this.finish_reason,this.index,this.message});
Choice.fromJson(Map<String, dynamic> json) { fromMap(json); }
fromMap(Map<String, dynamic> json) {
finish_reason = json['finishReason'];
index = json['index'];
message = JsonConverters.fromJson(json['message'],'ChoiceMessage',context!);
return this;
}
Map<String, dynamic> toJson() => {
'finish_reason': finish_reason,
'index': index,
'message': JsonConverters.toJson(message,'ChoiceMessage',context!)
};
getTypeName() => "Choice";
TypeContext? context = _ctx;
}
/**
* Usage statistics for the completion request.
*/
// @DataContract
class AiCompletionUsage implements IConvertible
{
/**
* When using Predicted Outputs, the number of tokens in the prediction that appeared in the completion.
*/
// @DataMember(Name="accepted_prediction_tokens")
int accepted_prediction_tokens = 0;
/**
* Audio input tokens generated by the model.
*/
// @DataMember(Name="audio_tokens")
int audio_tokens = 0;
/**
* Tokens generated by the model for reasoning.
*/
// @DataMember(Name="reasoning_tokens")
int reasoning_tokens = 0;
/**
* When using Predicted Outputs, the number of tokens in the prediction that did not appear in the completion.
*/
// @DataMember(Name="rejected_prediction_tokens")
int rejected_prediction_tokens = 0;
AiCompletionUsage({this.accepted_prediction_tokens,this.audio_tokens,this.reasoning_tokens,this.rejected_prediction_tokens});
AiCompletionUsage.fromJson(Map<String, dynamic> json) { fromMap(json); }
fromMap(Map<String, dynamic> json) {
accepted_prediction_tokens = json['acceptedPredictionTokens'];
audio_tokens = json['audioTokens'];
reasoning_tokens = json['reasoningTokens'];
rejected_prediction_tokens = json['rejectedPredictionTokens'];
return this;
}
Map<String, dynamic> toJson() => {
'accepted_prediction_tokens': accepted_prediction_tokens,
'audio_tokens': audio_tokens,
'reasoning_tokens': reasoning_tokens,
'rejected_prediction_tokens': rejected_prediction_tokens
};
getTypeName() => "AiCompletionUsage";
TypeContext? context = _ctx;
}
/**
* Breakdown of tokens used in the prompt.
*/
// @DataContract
class AiPromptUsage implements IConvertible
{
/**
* When using Predicted Outputs, the number of tokens in the prediction that appeared in the completion.
*/
// @DataMember(Name="accepted_prediction_tokens")
int accepted_prediction_tokens = 0;
/**
* Audio input tokens present in the prompt.
*/
// @DataMember(Name="audio_tokens")
int audio_tokens = 0;
/**
* Cached tokens present in the prompt.
*/
// @DataMember(Name="cached_tokens")
int cached_tokens = 0;
AiPromptUsage({this.accepted_prediction_tokens,this.audio_tokens,this.cached_tokens});
AiPromptUsage.fromJson(Map<String, dynamic> json) { fromMap(json); }
fromMap(Map<String, dynamic> json) {
accepted_prediction_tokens = json['acceptedPredictionTokens'];
audio_tokens = json['audioTokens'];
cached_tokens = json['cachedTokens'];
return this;
}
Map<String, dynamic> toJson() => {
'accepted_prediction_tokens': accepted_prediction_tokens,
'audio_tokens': audio_tokens,
'cached_tokens': cached_tokens
};
getTypeName() => "AiPromptUsage";
TypeContext? context = _ctx;
}
/**
* Usage statistics for the completion request.
*/
// @DataContract
class AiUsage implements IConvertible
{
/**
* Number of tokens in the generated completion.
*/
// @DataMember(Name="completion_tokens")
int completion_tokens = 0;
/**
* Number of tokens in the prompt.
*/
// @DataMember(Name="prompt_tokens")
int prompt_tokens = 0;
/**
* Total number of tokens used in the request (prompt + completion).
*/
// @DataMember(Name="total_tokens")
int total_tokens = 0;
/**
* Breakdown of tokens used in a completion.
*/
// @DataMember(Name="completion_tokens_details")
AiCompletionUsage? completion_tokens_details;
/**
* Breakdown of tokens used in the prompt.
*/
// @DataMember(Name="prompt_tokens_details")
AiPromptUsage? prompt_tokens_details;
AiUsage({this.completion_tokens,this.prompt_tokens,this.total_tokens,this.completion_tokens_details,this.prompt_tokens_details});
AiUsage.fromJson(Map<String, dynamic> json) { fromMap(json); }
fromMap(Map<String, dynamic> json) {
completion_tokens = json['completionTokens'];
prompt_tokens = json['promptTokens'];
total_tokens = json['totalTokens'];
completion_tokens_details = JsonConverters.fromJson(json['completionTokensDetails'],'AiCompletionUsage',context!);
prompt_tokens_details = JsonConverters.fromJson(json['promptTokensDetails'],'AiPromptUsage',context!);
return this;
}
Map<String, dynamic> toJson() => {
'completion_tokens': completion_tokens,
'prompt_tokens': prompt_tokens,
'total_tokens': total_tokens,
'completion_tokens_details': JsonConverters.toJson(completion_tokens_details,'AiCompletionUsage',context!),
'prompt_tokens_details': JsonConverters.toJson(prompt_tokens_details,'AiPromptUsage',context!)
};
getTypeName() => "AiUsage";
TypeContext? context = _ctx;
}
// @DataContract
class ChatResponse implements IConvertible
{
/**
* A unique identifier for the chat completion.
*/
// @DataMember(Name="id")
String id = "";
/**
* A list of chat completion choices. Can be more than one if n is greater than 1.
*/
// @DataMember(Name="choices")
List<Choice> choices = [];
/**
* The Unix timestamp (in seconds) of when the chat completion was created.
*/
// @DataMember(Name="created")
int created = 0;
/**
* The model used for the chat completion.
*/
// @DataMember(Name="model")
String model = "";
/**
* This fingerprint represents the backend configuration that the model runs with.
*/
// @DataMember(Name="system_fingerprint")
String? system_fingerprint;
/**
* The object type, which is always chat.completion.
*/
// @DataMember(Name="object")
String object = "";
/**
* Specifies the processing type used for serving the request.
*/
// @DataMember(Name="service_tier")
String? service_tier;
/**
* Usage statistics for the completion request.
*/
// @DataMember(Name="usage")
AiUsage usage;
/**
* The provider used for the chat completion.
*/
// @DataMember(Name="provider")
String? provider;
/**
* Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format.
*/
// @DataMember(Name="metadata")
Map<String,String?>? metadata;
// @DataMember(Name="responseStatus")
ResponseStatus? responseStatus;
ChatResponse({this.id,this.choices,this.created,this.model,this.system_fingerprint,this.object,this.service_tier,this.usage,this.provider,this.metadata,this.responseStatus});
ChatResponse.fromJson(Map<String, dynamic> json) { fromMap(json); }
fromMap(Map<String, dynamic> json) {
id = json['id'];
choices = JsonConverters.fromJson(json['choices'],'List<Choice>',context!);
created = json['created'];
model = json['model'];
system_fingerprint = json['systemFingerprint'];
object = json['object'];
service_tier = json['serviceTier'];
usage = JsonConverters.fromJson(json['usage'],'AiUsage',context!);
provider = json['provider'];
metadata = JsonConverters.toStringMap(json['metadata']);
responseStatus = JsonConverters.fromJson(json['responseStatus'],'ResponseStatus',context!);
return this;
}
Map<String, dynamic> toJson() => {
'id': id,
'choices': JsonConverters.toJson(choices,'List<Choice>',context!),
'created': created,
'model': model,
'system_fingerprint': system_fingerprint,
'object': object,
'service_tier': service_tier,
'usage': JsonConverters.toJson(usage,'AiUsage',context!),
'provider': provider,
'metadata': metadata,
'responseStatus': JsonConverters.toJson(responseStatus,'ResponseStatus',context!)
};
getTypeName() => "ChatResponse";
TypeContext? context = _ctx;
}
// @DataContract
abstract class AiContent
{
/**
* The type of the content part.
*/
// @DataMember(Name="type")
String type = "";
AiContent({this.type});
AiContent.fromJson(Map<String, dynamic> json) { fromMap(json); }
fromMap(Map<String, dynamic> json) {
type = json['type'];
return this;
}
Map<String, dynamic> toJson() => {
'type': type
};
getTypeName() => "AiContent";
TypeContext? context = _ctx;
}
/**
* A list of messages comprising the conversation so far.
*/
// @DataContract
class AiMessage implements IConvertible
{
/**
* The contents of the message.
*/
// @DataMember(Name="content")
List<AiContent>? content;
/**
* The role of the author of this message. Valid values are `system`, `user`, `assistant` and `tool`.
*/
// @DataMember(Name="role")
String role = "";
/**
* An optional name for the participant. Provides the model information to differentiate between participants of the same role.
*/
// @DataMember(Name="name")
String? name;
/**
* The tool calls generated by the model, such as function calls.
*/
// @DataMember(Name="tool_calls")
List<ToolCall>? tool_calls;
/**
* Tool call that this message is responding to.
*/
// @DataMember(Name="tool_call_id")
String? tool_call_id;
AiMessage({this.content,this.role,this.name,this.tool_calls,this.tool_call_id});
AiMessage.fromJson(Map<String, dynamic> json) { fromMap(json); }
fromMap(Map<String, dynamic> json) {
content = JsonConverters.fromJson(json['content'],'List<AiContent>',context!);
role = json['role'];
name = json['name'];
tool_calls = JsonConverters.fromJson(json['toolCalls'],'List<ToolCall>',context!);
tool_call_id = json['toolCallId'];
return this;
}
Map<String, dynamic> toJson() => {
'content': JsonConverters.toJson(content,'List<AiContent>',context!),
'role': role,
'name': name,
'tool_calls': JsonConverters.toJson(tool_calls,'List<ToolCall>',context!),
'tool_call_id': tool_call_id
};
getTypeName() => "AiMessage";
TypeContext? context = _ctx;
}
/**
* Parameters for audio output. Required when audio output is requested with modalities: [audio]
*/
// @DataContract
class AiChatAudio implements IConvertible
{
/**
* Specifies the output audio format. Must be one of wav, mp3, flac, opus, or pcm16.
*/
// @DataMember(Name="format")
String format = "";
/**
* The voice the model uses to respond. Supported voices are alloy, ash, ballad, coral, echo, fable, nova, onyx, sage, and shimmer.
*/
// @DataMember(Name="voice")
String voice = "";
AiChatAudio({this.format,this.voice});
AiChatAudio.fromJson(Map<String, dynamic> json) { fromMap(json); }
fromMap(Map<String, dynamic> json) {
format = json['format'];
voice = json['voice'];
return this;
}
Map<String, dynamic> toJson() => {
'format': format,
'voice': voice
};
getTypeName() => "AiChatAudio";
TypeContext? context = _ctx;
}
enum ResponseFormat
{
Text,
JsonObject,
}
// @DataContract
class AiResponseFormat implements IConvertible
{
/**
* An object specifying the format that the model must output. Compatible with GPT-4 Turbo and all GPT-3.5 Turbo models newer than gpt-3.5-turbo-1106.
*/
// @DataMember(Name="response_format")
ResponseFormat response_format;
AiResponseFormat({this.response_format});
AiResponseFormat.fromJson(Map<String, dynamic> json) { fromMap(json); }
fromMap(Map<String, dynamic> json) {
response_format = JsonConverters.fromJson(json['type'],'ResponseFormat',context!);
return this;
}
Map<String, dynamic> toJson() => {
'response_format': JsonConverters.toJson(response_format,'ResponseFormat',context!)
};
getTypeName() => "AiResponseFormat";
TypeContext? context = _ctx;
}
enum ToolType
{
Function,
}
// @DataContract
class Tool implements IConvertible
{
/**
* The type of the tool. Currently, only function is supported.
*/
// @DataMember(Name="type")
ToolType type;
Tool({this.type});
Tool.fromJson(Map<String, dynamic> json) { fromMap(json); }
fromMap(Map<String, dynamic> json) {
type = JsonConverters.fromJson(json['type'],'ToolType',context!);
return this;
}
Map<String, dynamic> toJson() => {
'type': JsonConverters.toJson(type,'ToolType',context!)
};
getTypeName() => "Tool";
TypeContext? context = _ctx;
}
/**
* Chat Completions API (OpenAI-Compatible)
*/
// @DataContract
class ChatCompletion implements IPost, IConvertible
{
/**
* The messages to generate chat completions for.
*/
// @DataMember(Name="messages")
List<AiMessage> messages = [];
/**
* ID of the model to use. See the model endpoint compatibility table for details on which models work with the Chat API
*/
// @DataMember(Name="model")
String model = "";
/**
* Parameters for audio output. Required when audio output is requested with modalities: [audio]
*/
// @DataMember(Name="audio")
AiChatAudio? audio;
/**
* Modify the likelihood of specified tokens appearing in the completion.
*/
// @DataMember(Name="logit_bias")
Map<int,int?>? logit_bias;
/**
* Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format.
*/
// @DataMember(Name="metadata")
Map<String,String?>? metadata;
/**
* Constrains effort on reasoning for reasoning models. Currently supported values are minimal, low, medium, and high (none, default). Reducing reasoning effort can result in faster responses and fewer tokens used on reasoning in a response.
*/
// @DataMember(Name="reasoning_effort")
String? reasoning_effort;
/**
* An object specifying the format that the model must output. Compatible with GPT-4 Turbo and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. Setting Type to ResponseFormat.JsonObject enables JSON mode, which guarantees the message the model generates is valid JSON.
*/
// @DataMember(Name="response_format")
AiResponseFormat? response_format;
/**
* Specifies the processing type used for serving the request.
*/
// @DataMember(Name="service_tier")
String? service_tier;
/**
* A stable identifier used to help detect users of your application that may be violating OpenAI's usage policies. The IDs should be a string that uniquely identifies each user.
*/
// @DataMember(Name="safety_identifier")
String? safety_identifier;
/**
* Up to 4 sequences where the API will stop generating further tokens.
*/
// @DataMember(Name="stop")
List<String>? stop;
/**
* Output types that you would like the model to generate. Most models are capable of generating text, which is the default:
*/
// @DataMember(Name="modalities")
List<String>? modalities;
/**
* Used by OpenAI to cache responses for similar requests to optimize your cache hit rates.
*/
// @DataMember(Name="prompt_cache_key")
String? prompt_cache_key;
/**
* A list of tools the model may call. Currently, only functions are supported as a tool. Use this to provide a list of functions the model may generate JSON inputs for. A max of 128 functions are supported.
*/
// @DataMember(Name="tools")
List<Tool>? tools;
/**
* Constrains the verbosity of the model's response. Lower values will result in more concise responses, while higher values will result in more verbose responses. Currently supported values are low, medium, and high.
*/
// @DataMember(Name="verbosity")
String? verbosity;
/**
* What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
*/
// @DataMember(Name="temperature")
double? temperature;
/**
* An upper bound for the number of tokens that can be generated for a completion, including visible output tokens and reasoning tokens.
*/
// @DataMember(Name="max_completion_tokens")
int? max_completion_tokens;
/**
* An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. logprobs must be set to true if this parameter is used.
*/
// @DataMember(Name="top_logprobs")
int? top_logprobs;
/**
* An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.
*/
// @DataMember(Name="top_p")
double? top_p;
/**
* Number between `-2.0` and `2.0`. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.
*/
// @DataMember(Name="frequency_penalty")
double? frequency_penalty;
/**
* Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.
*/
// @DataMember(Name="presence_penalty")
double? presence_penalty;
/**
* This feature is in Beta. If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed, and you should refer to the system_fingerprint response parameter to monitor changes in the backend.
*/
// @DataMember(Name="seed")
int? seed;
/**
* How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs.
*/
// @DataMember(Name="n")
int? n;
/**
* Whether or not to store the output of this chat completion request for use in our model distillation or evals products.
*/
// @DataMember(Name="store")
bool? store;
/**
* Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the content of message.
*/
// @DataMember(Name="logprobs")
bool? logprobs;
/**
* Whether to enable parallel function calling during tool use.
*/
// @DataMember(Name="parallel_tool_calls")
bool? parallel_tool_calls;
/**
* Whether to enable thinking mode for some Qwen models and providers.
*/
// @DataMember(Name="enable_thinking")
bool? enable_thinking;
/**
* If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a `data: [DONE]` message.
*/
// @DataMember(Name="stream")
bool? stream;
ChatCompletion({this.messages,this.model,this.audio,this.logit_bias,this.metadata,this.reasoning_effort,this.response_format,this.service_tier,this.safety_identifier,this.stop,this.modalities,this.prompt_cache_key,this.tools,this.verbosity,this.temperature,this.max_completion_tokens,this.top_logprobs,this.top_p,this.frequency_penalty,this.presence_penalty,this.seed,this.n,this.store,this.logprobs,this.parallel_tool_calls,this.enable_thinking,this.stream});
ChatCompletion.fromJson(Map<String, dynamic> json) { fromMap(json); }
fromMap(Map<String, dynamic> json) {
messages = JsonConverters.fromJson(json['messages'],'List<AiMessage>',context!);
model = json['model'];
audio = JsonConverters.fromJson(json['audio'],'AiChatAudio',context!);
logit_bias = JsonConverters.fromJson(json['logitBias'],'Map<int,int?>',context!);
metadata = JsonConverters.toStringMap(json['metadata']);
reasoning_effort = json['reasoningEffort'];
response_format = JsonConverters.fromJson(json['responseFormat'],'AiResponseFormat',context!);
service_tier = json['serviceTier'];
safety_identifier = json['safetyIdentifier'];
stop = JsonConverters.fromJson(json['stop'],'List<String>',context!);
modalities = JsonConverters.fromJson(json['modalities'],'List<String>',context!);
prompt_cache_key = json['promptCacheKey'];
tools = JsonConverters.fromJson(json['tools'],'List<Tool>',context!);
verbosity = json['verbosity'];
temperature = JsonConverters.toDouble(json['temperature']);
max_completion_tokens = json['maxCompletionTokens'];
top_logprobs = json['topLogprobs'];
top_p = JsonConverters.toDouble(json['topP']);
frequency_penalty = JsonConverters.toDouble(json['frequencyPenalty']);
presence_penalty = JsonConverters.toDouble(json['presencePenalty']);
seed = json['seed'];
n = json['n'];
store = json['store'];
logprobs = json['logprobs'];
parallel_tool_calls = json['parallelToolCalls'];
enable_thinking = json['enableThinking'];
stream = json['stream'];
return this;
}
Map<String, dynamic> toJson() => {
'messages': JsonConverters.toJson(messages,'List<AiMessage>',context!),
'model': model,
'audio': JsonConverters.toJson(audio,'AiChatAudio',context!),
'logit_bias': JsonConverters.toJson(logit_bias,'Map<int,int?>',context!),
'metadata': metadata,
'reasoning_effort': reasoning_effort,
'response_format': JsonConverters.toJson(response_format,'AiResponseFormat',context!),
'service_tier': service_tier,
'safety_identifier': safety_identifier,
'stop': JsonConverters.toJson(stop,'List<String>',context!),
'modalities': JsonConverters.toJson(modalities,'List<String>',context!),
'prompt_cache_key': prompt_cache_key,
'tools': JsonConverters.toJson(tools,'List<Tool>',context!),
'verbosity': verbosity,
'temperature': temperature,
'max_completion_tokens': max_completion_tokens,
'top_logprobs': top_logprobs,
'top_p': top_p,
'frequency_penalty': frequency_penalty,
'presence_penalty': presence_penalty,
'seed': seed,
'n': n,
'store': store,
'logprobs': logprobs,
'parallel_tool_calls': parallel_tool_calls,
'enable_thinking': enable_thinking,
'stream': stream
};
getTypeName() => "ChatCompletion";
TypeContext? context = _ctx;
}
TypeContext _ctx = TypeContext(library: 'blazor_vue.web_templates.io', types: <String, TypeInfo> {
'UrlCitation': TypeInfo(TypeOf.Class, create:() => UrlCitation()),
'ChoiceAnnotation': TypeInfo(TypeOf.Class, create:() => ChoiceAnnotation()),
'ChoiceAudio': TypeInfo(TypeOf.Class, create:() => ChoiceAudio()),
'ToolCall': TypeInfo(TypeOf.Class, create:() => ToolCall()),
'ChoiceMessage': TypeInfo(TypeOf.Class, create:() => ChoiceMessage()),
'List<ChoiceAnnotation>': TypeInfo(TypeOf.Class, create:() => <ChoiceAnnotation>[]),
'List<ToolCall>': TypeInfo(TypeOf.Class, create:() => <ToolCall>[]),
'Choice': TypeInfo(TypeOf.Class, create:() => Choice()),
'AiCompletionUsage': TypeInfo(TypeOf.Class, create:() => AiCompletionUsage()),
'AiPromptUsage': TypeInfo(TypeOf.Class, create:() => AiPromptUsage()),
'AiUsage': TypeInfo(TypeOf.Class, create:() => AiUsage()),
'ChatResponse': TypeInfo(TypeOf.Class, create:() => ChatResponse()),
'List<Choice>': TypeInfo(TypeOf.Class, create:() => <Choice>[]),
'AiContent': TypeInfo(TypeOf.AbstractClass),
'AiMessage': TypeInfo(TypeOf.Class, create:() => AiMessage()),
'List<AiContent>': TypeInfo(TypeOf.Class, create:() => <AiContent>[]),
'AiChatAudio': TypeInfo(TypeOf.Class, create:() => AiChatAudio()),
'ResponseFormat': TypeInfo(TypeOf.Enum, enumValues:ResponseFormat.values),
'AiResponseFormat': TypeInfo(TypeOf.Class, create:() => AiResponseFormat()),
'ToolType': TypeInfo(TypeOf.Enum, enumValues:ToolType.values),
'Tool': TypeInfo(TypeOf.Class, create:() => Tool()),
'ChatCompletion': TypeInfo(TypeOf.Class, create:() => ChatCompletion()),
'List<AiMessage>': TypeInfo(TypeOf.Class, create:() => <AiMessage>[]),
'Map<int,int?>': TypeInfo(TypeOf.Class, create:() => Map<int,int?>()),
'List<Tool>': TypeInfo(TypeOf.Class, create:() => <Tool>[]),
});
To override the Content-type in your clients, use the HTTP Accept Header, append the .xml suffix or ?format=xml
The following are sample HTTP requests and responses. The placeholders shown need to be replaced with actual values.
POST /v1/chat/completions HTTP/1.1
Host: blazor-vue.web-templates.io
Accept: application/xml
Content-Type: application/xml
Content-Length: length
<ChatCompletion xmlns:i="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://schemas.datacontract.org/2004/07/ServiceStack.AI">
<audio>
<format>String</format>
<voice>String</voice>
</audio>
<enable_thinking>false</enable_thinking>
<frequency_penalty>0</frequency_penalty>
<logit_bias xmlns:d2p1="http://schemas.microsoft.com/2003/10/Serialization/Arrays">
<d2p1:KeyValueOfintint>
<d2p1:Key>0</d2p1:Key>
<d2p1:Value>0</d2p1:Value>
</d2p1:KeyValueOfintint>
</logit_bias>
<logprobs>false</logprobs>
<max_completion_tokens>0</max_completion_tokens>
<messages>
<AiMessage>
<content>
<AiContent i:nil="true" />
</content>
<name>String</name>
<role>String</role>
<tool_call_id>String</tool_call_id>
<tool_calls>
<ToolCall>
<function>String</function>
<id>String</id>
<type>String</type>
</ToolCall>
</tool_calls>
</AiMessage>
</messages>
<metadata xmlns:d2p1="http://schemas.microsoft.com/2003/10/Serialization/Arrays">
<d2p1:KeyValueOfstringstring>
<d2p1:Key>String</d2p1:Key>
<d2p1:Value>String</d2p1:Value>
</d2p1:KeyValueOfstringstring>
</metadata>
<modalities xmlns:d2p1="http://schemas.microsoft.com/2003/10/Serialization/Arrays">
<d2p1:string>String</d2p1:string>
</modalities>
<model>String</model>
<n>0</n>
<parallel_tool_calls>false</parallel_tool_calls>
<presence_penalty>0</presence_penalty>
<prompt_cache_key>String</prompt_cache_key>
<reasoning_effort>String</reasoning_effort>
<response_format>
<response_format>Text</response_format>
</response_format>
<safety_identifier>String</safety_identifier>
<seed>0</seed>
<service_tier>String</service_tier>
<stop xmlns:d2p1="http://schemas.microsoft.com/2003/10/Serialization/Arrays">
<d2p1:string>String</d2p1:string>
</stop>
<store>false</store>
<stream>false</stream>
<temperature>0</temperature>
<tools>
<Tool>
<type>Function</type>
</Tool>
</tools>
<top_logprobs>0</top_logprobs>
<top_p>0</top_p>
<verbosity>String</verbosity>
</ChatCompletion>
HTTP/1.1 200 OK
Content-Type: application/xml
Content-Length: length
<ChatResponse xmlns:i="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://schemas.datacontract.org/2004/07/ServiceStack.AI">
<choices>
<Choice>
<finish_reason>String</finish_reason>
<index>0</index>
<message>
<annotations>
<ChoiceAnnotation>
<type>String</type>
<url_citation>
<end_index>0</end_index>
<start_index>0</start_index>
<title>String</title>
<url>String</url>
</url_citation>
</ChoiceAnnotation>
</annotations>
<audio>
<data>String</data>
<expires_at>0</expires_at>
<id>String</id>
<transcript>String</transcript>
</audio>
<content>String</content>
<reasoning>String</reasoning>
<refusal>String</refusal>
<role>String</role>
<tool_calls>
<ToolCall>
<function>String</function>
<id>String</id>
<type>String</type>
</ToolCall>
</tool_calls>
</message>
</Choice>
</choices>
<created>0</created>
<id>String</id>
<metadata xmlns:d2p1="http://schemas.microsoft.com/2003/10/Serialization/Arrays">
<d2p1:KeyValueOfstringstring>
<d2p1:Key>String</d2p1:Key>
<d2p1:Value>String</d2p1:Value>
</d2p1:KeyValueOfstringstring>
</metadata>
<model>String</model>
<object>String</object>
<provider>String</provider>
<responseStatus xmlns:d2p1="http://schemas.servicestack.net/types">
<d2p1:ErrorCode>String</d2p1:ErrorCode>
<d2p1:Message>String</d2p1:Message>
<d2p1:StackTrace>String</d2p1:StackTrace>
<d2p1:Errors>
<d2p1:ResponseError>
<d2p1:ErrorCode>String</d2p1:ErrorCode>
<d2p1:FieldName>String</d2p1:FieldName>
<d2p1:Message>String</d2p1:Message>
<d2p1:Meta xmlns:d5p1="http://schemas.microsoft.com/2003/10/Serialization/Arrays">
<d5p1:KeyValueOfstringstring>
<d5p1:Key>String</d5p1:Key>
<d5p1:Value>String</d5p1:Value>
</d5p1:KeyValueOfstringstring>
</d2p1:Meta>
</d2p1:ResponseError>
</d2p1:Errors>
<d2p1:Meta xmlns:d3p1="http://schemas.microsoft.com/2003/10/Serialization/Arrays">
<d3p1:KeyValueOfstringstring>
<d3p1:Key>String</d3p1:Key>
<d3p1:Value>String</d3p1:Value>
</d3p1:KeyValueOfstringstring>
</d2p1:Meta>
</responseStatus>
<service_tier>String</service_tier>
<system_fingerprint>String</system_fingerprint>
<usage>
<completion_tokens>0</completion_tokens>
<completion_tokens_details>
<accepted_prediction_tokens>0</accepted_prediction_tokens>
<audio_tokens>0</audio_tokens>
<reasoning_tokens>0</reasoning_tokens>
<rejected_prediction_tokens>0</rejected_prediction_tokens>
</completion_tokens_details>
<prompt_tokens>0</prompt_tokens>
<prompt_tokens_details>
<accepted_prediction_tokens>0</accepted_prediction_tokens>
<audio_tokens>0</audio_tokens>
<cached_tokens>0</cached_tokens>
</prompt_tokens_details>
<total_tokens>0</total_tokens>
</usage>
</ChatResponse>