func GetBaseChatRequestApiFormatEnumStringValues() []string
GetBaseChatRequestApiFormatEnumStringValues Enumerates the set of values in String for BaseChatRequestApiFormatEnum
func GetBaseChatResponseApiFormatEnumStringValues() []string
GetBaseChatResponseApiFormatEnumStringValues Enumerates the set of values in String for BaseChatResponseApiFormatEnum
func GetChatContentTypeEnumStringValues() []string
GetChatContentTypeEnumStringValues Enumerates the set of values in String for ChatContentTypeEnum
func GetCohereChatRequestCitationQualityEnumStringValues() []string
GetCohereChatRequestCitationQualityEnumStringValues Enumerates the set of values in String for CohereChatRequestCitationQualityEnum
func GetCohereChatRequestPromptTruncationEnumStringValues() []string
GetCohereChatRequestPromptTruncationEnumStringValues Enumerates the set of values in String for CohereChatRequestPromptTruncationEnum
func GetCohereChatResponseFinishReasonEnumStringValues() []string
GetCohereChatResponseFinishReasonEnumStringValues Enumerates the set of values in String for CohereChatResponseFinishReasonEnum
func GetCohereLlmInferenceRequestReturnLikelihoodsEnumStringValues() []string
GetCohereLlmInferenceRequestReturnLikelihoodsEnumStringValues Enumerates the set of values in String for CohereLlmInferenceRequestReturnLikelihoodsEnum
func GetCohereLlmInferenceRequestTruncateEnumStringValues() []string
GetCohereLlmInferenceRequestTruncateEnumStringValues Enumerates the set of values in String for CohereLlmInferenceRequestTruncateEnum
func GetCohereMessageRoleEnumStringValues() []string
GetCohereMessageRoleEnumStringValues Enumerates the set of values in String for CohereMessageRoleEnum
func GetCohereResponseFormatTypeEnumStringValues() []string
GetCohereResponseFormatTypeEnumStringValues Enumerates the set of values in String for CohereResponseFormatTypeEnum
func GetEmbedTextDetailsInputTypeEnumStringValues() []string
GetEmbedTextDetailsInputTypeEnumStringValues Enumerates the set of values in String for EmbedTextDetailsInputTypeEnum
func GetEmbedTextDetailsTruncateEnumStringValues() []string
GetEmbedTextDetailsTruncateEnumStringValues Enumerates the set of values in String for EmbedTextDetailsTruncateEnum
func GetImageUrlDetailEnumStringValues() []string
GetImageUrlDetailEnumStringValues Enumerates the set of values in String for ImageUrlDetailEnum
func GetLlmInferenceRequestRuntimeTypeEnumStringValues() []string
GetLlmInferenceRequestRuntimeTypeEnumStringValues Enumerates the set of values in String for LlmInferenceRequestRuntimeTypeEnum
func GetLlmInferenceResponseRuntimeTypeEnumStringValues() []string
GetLlmInferenceResponseRuntimeTypeEnumStringValues Enumerates the set of values in String for LlmInferenceResponseRuntimeTypeEnum
func GetMessageRoleEnumStringValues() []string
GetMessageRoleEnumStringValues Enumerates the set of values in String for MessageRoleEnum
func GetServingModeServingTypeEnumStringValues() []string
GetServingModeServingTypeEnumStringValues Enumerates the set of values in String for ServingModeServingTypeEnum
func GetSummarizeTextDetailsExtractivenessEnumStringValues() []string
GetSummarizeTextDetailsExtractivenessEnumStringValues Enumerates the set of values in String for SummarizeTextDetailsExtractivenessEnum
func GetSummarizeTextDetailsFormatEnumStringValues() []string
GetSummarizeTextDetailsFormatEnumStringValues Enumerates the set of values in String for SummarizeTextDetailsFormatEnum
func GetSummarizeTextDetailsLengthEnumStringValues() []string
GetSummarizeTextDetailsLengthEnumStringValues Enumerates the set of values in String for SummarizeTextDetailsLengthEnum
func GetToolCallTypeEnumStringValues() []string
GetToolCallTypeEnumStringValues Enumerates the set of values in String for ToolCallTypeEnum
func GetToolChoiceTypeEnumStringValues() []string
GetToolChoiceTypeEnumStringValues Enumerates the set of values in String for ToolChoiceTypeEnum
func GetToolDefinitionTypeEnumStringValues() []string
GetToolDefinitionTypeEnumStringValues Enumerates the set of values in String for ToolDefinitionTypeEnum
AssistantMessage Represents a single instance of assistant message.
type AssistantMessage struct { // Contents of the chat message. Content []ChatContent `mandatory:"false" json:"content"` // An optional name for the participant. Provides the model information to differentiate between participants of the same role. Name *string `mandatory:"false" json:"name"` // The tool calls generated by the model, such as function calls. ToolCalls []ToolCall `mandatory:"false" json:"toolCalls"` }
func (m AssistantMessage) GetContent() []ChatContent
GetContent returns Content
func (m AssistantMessage) MarshalJSON() (buff []byte, e error)
MarshalJSON marshals to json representation
func (m AssistantMessage) String() string
func (m *AssistantMessage) UnmarshalJSON(data []byte) (e error)
UnmarshalJSON unmarshals from json
func (m AssistantMessage) ValidateEnumValue() (bool, error)
ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly
BaseChatRequest The base class to use for the chat inference request.
type BaseChatRequest interface { }
BaseChatRequestApiFormatEnum Enum with underlying type: string
type BaseChatRequestApiFormatEnum string
Set of constants representing the allowable values for BaseChatRequestApiFormatEnum
const ( BaseChatRequestApiFormatCohere BaseChatRequestApiFormatEnum = "COHERE" BaseChatRequestApiFormatGeneric BaseChatRequestApiFormatEnum = "GENERIC" )
func GetBaseChatRequestApiFormatEnumValues() []BaseChatRequestApiFormatEnum
GetBaseChatRequestApiFormatEnumValues Enumerates the set of values for BaseChatRequestApiFormatEnum
func GetMappingBaseChatRequestApiFormatEnum(val string) (BaseChatRequestApiFormatEnum, bool)
GetMappingBaseChatRequestApiFormatEnum performs case Insensitive comparison on enum value and return the desired enum
BaseChatResponse The base class that creates the chat response.
type BaseChatResponse interface { }
BaseChatResponseApiFormatEnum Enum with underlying type: string
type BaseChatResponseApiFormatEnum string
Set of constants representing the allowable values for BaseChatResponseApiFormatEnum
const ( BaseChatResponseApiFormatCohere BaseChatResponseApiFormatEnum = "COHERE" BaseChatResponseApiFormatGeneric BaseChatResponseApiFormatEnum = "GENERIC" )
func GetBaseChatResponseApiFormatEnumValues() []BaseChatResponseApiFormatEnum
GetBaseChatResponseApiFormatEnumValues Enumerates the set of values for BaseChatResponseApiFormatEnum
func GetMappingBaseChatResponseApiFormatEnum(val string) (BaseChatResponseApiFormatEnum, bool)
GetMappingBaseChatResponseApiFormatEnum performs case Insensitive comparison on enum value and return the desired enum
ChatChoice Represents a single instance of the chat response.
type ChatChoice struct { // The index of the chat. Index *int `mandatory:"true" json:"index"` Message Message `mandatory:"true" json:"message"` // The reason why the model stopped generating tokens. // Stops if the model hits a natural stop point or a provided stop sequence. Returns the length if the tokens reach the specified maximum number of tokens. FinishReason *string `mandatory:"true" json:"finishReason"` Logprobs *Logprobs `mandatory:"false" json:"logprobs"` }
func (m ChatChoice) String() string
func (m *ChatChoice) UnmarshalJSON(data []byte) (e error)
UnmarshalJSON unmarshals from json
func (m ChatChoice) ValidateEnumValue() (bool, error)
ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly
ChatContent The base class for the chat content.
type ChatContent interface { }
ChatContentTypeEnum Enum with underlying type: string
type ChatContentTypeEnum string
Set of constants representing the allowable values for ChatContentTypeEnum
const ( ChatContentTypeText ChatContentTypeEnum = "TEXT" ChatContentTypeImage ChatContentTypeEnum = "IMAGE" )
func GetChatContentTypeEnumValues() []ChatContentTypeEnum
GetChatContentTypeEnumValues Enumerates the set of values for ChatContentTypeEnum
func GetMappingChatContentTypeEnum(val string) (ChatContentTypeEnum, bool)
GetMappingChatContentTypeEnum performs case Insensitive comparison on enum value and return the desired enum
ChatDetails Details of the conversation for the model to respond.
type ChatDetails struct { // The OCID of compartment in which to call the Generative AI service to chat. CompartmentId *string `mandatory:"true" json:"compartmentId"` ServingMode ServingMode `mandatory:"true" json:"servingMode"` ChatRequest BaseChatRequest `mandatory:"true" json:"chatRequest"` }
func (m ChatDetails) String() string
func (m *ChatDetails) UnmarshalJSON(data []byte) (e error)
UnmarshalJSON unmarshals from json
func (m ChatDetails) ValidateEnumValue() (bool, error)
ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly
ChatRequest wrapper for the Chat operation
Click https://docs.oracle.com/en-us/iaas/tools/go-sdk-examples/latest/generativeaiinference/Chat.go.html to see an example of how to use ChatRequest.
type ChatRequest struct { // Details of the conversation for the model to respond. ChatDetails `contributesTo:"body"` // A token that uniquely identifies a request so it can be retried in case of a timeout or // server error without risk of executing that same action again. Retry tokens expire after 24 // hours, but can be invalidated before that, in case of conflicting operations. For example, if a resource is deleted and purged from the system, then a retry of the original creation request // is rejected. OpcRetryToken *string `mandatory:"false" contributesTo:"header" name:"opc-retry-token"` // The client request ID for tracing. OpcRequestId *string `mandatory:"false" contributesTo:"header" name:"opc-request-id"` // Metadata about the request. This information will not be transmitted to the service, but // represents information that the SDK will consume to drive retry behavior. RequestMetadata common.RequestMetadata }
func (request ChatRequest) BinaryRequestBody() (*common.OCIReadSeekCloser, bool)
BinaryRequestBody implements the OCIRequest interface
func (request ChatRequest) HTTPRequest(method, path string, binaryRequestBody *common.OCIReadSeekCloser, extraHeaders map[string]string) (http.Request, error)
HTTPRequest implements the OCIRequest interface
func (request ChatRequest) RetryPolicy() *common.RetryPolicy
RetryPolicy implements the OCIRetryableRequest interface. This retrieves the specified retry policy.
func (request ChatRequest) String() string
func (request ChatRequest) ValidateEnumValue() (bool, error)
ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly
ChatResponse wrapper for the Chat operation
type ChatResponse struct { // The underlying http response RawResponse *http.Response // The ChatResult instance ChatResult `presentIn:"body"` // For optimistic concurrency control. See `if-match`. Etag *string `presentIn:"header" name:"etag"` // Unique Oracle-assigned identifier for the request. If you need to contact // Oracle about a particular request, please provide the request ID. OpcRequestId *string `presentIn:"header" name:"opc-request-id"` }
func (response ChatResponse) HTTPResponse() *http.Response
HTTPResponse implements the OCIResponse interface
func (response ChatResponse) String() string
ChatResult The response to the chat conversation.
type ChatResult struct { // The OCID of the model that's used in this inference request. ModelId *string `mandatory:"true" json:"modelId"` // The version of the model. ModelVersion *string `mandatory:"true" json:"modelVersion"` ChatResponse BaseChatResponse `mandatory:"true" json:"chatResponse"` }
func (m ChatResult) String() string
func (m *ChatResult) UnmarshalJSON(data []byte) (e error)
UnmarshalJSON unmarshals from json
func (m ChatResult) ValidateEnumValue() (bool, error)
ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly
Choice Represents a single instance of the generated text.
type Choice struct { // The index of the generated text. Index *int `mandatory:"true" json:"index"` // The generated text. Text *string `mandatory:"true" json:"text"` // The reason why the model stopped generating tokens. // Stops if the model hits a natural stop point or a provided stop sequence. Returns the length if the tokens reach the specified maximum number of tokens. FinishReason *string `mandatory:"true" json:"finishReason"` Logprobs *Logprobs `mandatory:"false" json:"logprobs"` }
func (m Choice) String() string
func (m Choice) ValidateEnumValue() (bool, error)
ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly
Citation A section of the generated response which cites the documents that were used for generating the response.
type Citation struct { // Counting from zero, the index of the text where the citation starts. Start *int `mandatory:"true" json:"start"` // Counting from zero, the index of the text that the citation ends after. End *int `mandatory:"true" json:"end"` // The text of the citation. Text *string `mandatory:"true" json:"text"` // Identifiers for the documents cited in the current generated response. DocumentIds []string `mandatory:"true" json:"documentIds"` }
func (m Citation) String() string
func (m Citation) ValidateEnumValue() (bool, error)
ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly
CohereChatBotMessage A message that represents a single chat dialog as CHATBOT role.
type CohereChatBotMessage struct { // Contents of the chat message. Message *string `mandatory:"false" json:"message"` // A list of tool calls generated by the model. ToolCalls []CohereToolCall `mandatory:"false" json:"toolCalls"` }
func (m CohereChatBotMessage) MarshalJSON() (buff []byte, e error)
MarshalJSON marshals to json representation
func (m CohereChatBotMessage) String() string
func (m CohereChatBotMessage) ValidateEnumValue() (bool, error)
ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly
CohereChatRequest Details for the chat request for Cohere models.
type CohereChatRequest struct { // The text that the user inputs for the model to respond to. Message *string `mandatory:"true" json:"message"` // The list of previous messages between the user and the model. The chat history gives the model context for responding to the user's inputs. ChatHistory []CohereMessage `mandatory:"false" json:"chatHistory"` // A list of relevant documents that the model can refer to for generating grounded responses to the user's requests. // Some example keys that you can add to the dictionary are "text", "author", and "date". Keep the total word count of the strings in the dictionary to 300 words or less. // Example: // `[ // { "title": "Tall penguins", "snippet": "Emperor penguins are the tallest." }, // { "title": "Penguin habitats", "snippet": "Emperor penguins only live in Antarctica." } // ]` Documents []interface{} `mandatory:"false" json:"documents"` ResponseFormat CohereResponseFormat `mandatory:"false" json:"responseFormat"` // When set to true, the response contains only a list of generated search queries without the search results and the model will not respond to the user's message. IsSearchQueriesOnly *bool `mandatory:"false" json:"isSearchQueriesOnly"` // If specified, the default Cohere preamble is replaced with the provided preamble. A preamble is an initial guideline message that can change the model's overall chat behavior and conversation style. Default preambles vary for different models. // Example: `You are a travel advisor. Answer with a pirate tone.` PreambleOverride *string `mandatory:"false" json:"preambleOverride"` // Whether to stream the partial progress of the model's response. When set to true, as tokens become available, they are sent as data-only server-sent events. IsStream *bool `mandatory:"false" json:"isStream"` // The maximum number of output tokens that the model will generate for the response. MaxTokens *int `mandatory:"false" json:"maxTokens"` // The maximum number of input tokens to send to the model. If not specified, max_input_tokens is the model's context length limit minus a small buffer. MaxInputTokens *int `mandatory:"false" json:"maxInputTokens"` // A number that sets the randomness of the generated output. A lower temperature means less random generations. // Use lower numbers for tasks such as question answering or summarizing. High temperatures can generate hallucinations or factually incorrect information. Start with temperatures lower than 1.0 and increase the temperature for more creative outputs, as you regenerate the prompts to refine the outputs. Temperature *float64 `mandatory:"false" json:"temperature"` // A sampling method in which the model chooses the next token randomly from the top k most likely tokens. A higher value for k generates more random output, which makes the output text sound more natural. The default value for k is 0 which disables this method and considers all tokens. To set a number for the likely tokens, choose an integer between 1 and 500. // If also using top p, then the model considers only the top tokens whose probabilities add up to p percent and ignores the rest of the k tokens. For example, if k is 20 but only the probabilities of the top 10 add up to the value of p, then only the top 10 tokens are chosen. TopK *int `mandatory:"false" json:"topK"` // If set to a probability 0.0 < p < 1.0, it ensures that only the most likely tokens, with total probability mass of p, are considered for generation at each step. // To eliminate tokens with low likelihood, assign p a minimum percentage for the next token's likelihood. For example, when p is set to 0.75, the model eliminates the bottom 25 percent for the next token. Set to 1.0 to consider all tokens and set to 0 to disable. If both k and p are enabled, p acts after k. TopP *float64 `mandatory:"false" json:"topP"` // To reduce repetitiveness of generated tokens, this number penalizes new tokens based on their frequency in the generated text so far. Greater numbers encourage the model to use new tokens, while lower numbers encourage the model to repeat the tokens. Set to 0 to disable. FrequencyPenalty *float64 `mandatory:"false" json:"frequencyPenalty"` // To reduce repetitiveness of generated tokens, this number penalizes new tokens based on whether they've appeared in the generated text so far. Greater numbers encourage the model to use new tokens, while lower numbers encourage the model to repeat the tokens. // Similar to frequency penalty, a penalty is applied to previously present tokens, except that this penalty is applied equally to all tokens that have already appeared, regardless of how many times they've appeared. Set to 0 to disable. PresencePenalty *float64 `mandatory:"false" json:"presencePenalty"` // If specified, the backend will make a best effort to sample tokens deterministically, so that repeated requests with the same seed and parameters yield the same result. However, determinism cannot be fully guaranteed. Seed *int `mandatory:"false" json:"seed"` // Returns the full prompt that was sent to the model when True. IsEcho *bool `mandatory:"false" json:"isEcho"` // A list of available tools (functions) that the model may suggest invoking before producing a text response. Tools []CohereTool `mandatory:"false" json:"tools"` // A list of results from invoking tools recommended by the model in the previous chat turn. ToolResults []CohereToolResult `mandatory:"false" json:"toolResults"` // When enabled, the model will issue (potentially multiple) tool calls in a single step, before it receives the tool responses and directly answers the user's original message. IsForceSingleStep *bool `mandatory:"false" json:"isForceSingleStep"` // Stop the model generation when it reaches a stop sequence defined in this parameter. StopSequences []string `mandatory:"false" json:"stopSequences"` // When enabled, the user’s `message` will be sent to the model without any preprocessing. IsRawPrompting *bool `mandatory:"false" json:"isRawPrompting"` // Defaults to OFF. Dictates how the prompt will be constructed. With `promptTruncation` set to AUTO_PRESERVE_ORDER, some elements from `chatHistory` and `documents` will be dropped to construct a prompt that fits within the model's context length limit. During this process the order of the documents and chat history will be preserved. With `prompt_truncation` set to OFF, no elements will be dropped. PromptTruncation CohereChatRequestPromptTruncationEnum `mandatory:"false" json:"promptTruncation,omitempty"` // When FAST is selected, citations are generated at the same time as the text output and the request will be completed sooner. May result in less accurate citations. CitationQuality CohereChatRequestCitationQualityEnum `mandatory:"false" json:"citationQuality,omitempty"` }
func (m CohereChatRequest) MarshalJSON() (buff []byte, e error)
MarshalJSON marshals to json representation
func (m CohereChatRequest) String() string
func (m *CohereChatRequest) UnmarshalJSON(data []byte) (e error)
UnmarshalJSON unmarshals from json
func (m CohereChatRequest) ValidateEnumValue() (bool, error)
ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly
CohereChatRequestCitationQualityEnum Enum with underlying type: string
type CohereChatRequestCitationQualityEnum string
Set of constants representing the allowable values for CohereChatRequestCitationQualityEnum
const ( CohereChatRequestCitationQualityAccurate CohereChatRequestCitationQualityEnum = "ACCURATE" CohereChatRequestCitationQualityFast CohereChatRequestCitationQualityEnum = "FAST" )
func GetCohereChatRequestCitationQualityEnumValues() []CohereChatRequestCitationQualityEnum
GetCohereChatRequestCitationQualityEnumValues Enumerates the set of values for CohereChatRequestCitationQualityEnum
func GetMappingCohereChatRequestCitationQualityEnum(val string) (CohereChatRequestCitationQualityEnum, bool)
GetMappingCohereChatRequestCitationQualityEnum performs case Insensitive comparison on enum value and return the desired enum
CohereChatRequestPromptTruncationEnum Enum with underlying type: string
type CohereChatRequestPromptTruncationEnum string
Set of constants representing the allowable values for CohereChatRequestPromptTruncationEnum
const ( CohereChatRequestPromptTruncationOff CohereChatRequestPromptTruncationEnum = "OFF" CohereChatRequestPromptTruncationAutoPreserveOrder CohereChatRequestPromptTruncationEnum = "AUTO_PRESERVE_ORDER" )
func GetCohereChatRequestPromptTruncationEnumValues() []CohereChatRequestPromptTruncationEnum
GetCohereChatRequestPromptTruncationEnumValues Enumerates the set of values for CohereChatRequestPromptTruncationEnum
func GetMappingCohereChatRequestPromptTruncationEnum(val string) (CohereChatRequestPromptTruncationEnum, bool)
GetMappingCohereChatRequestPromptTruncationEnum performs case Insensitive comparison on enum value and return the desired enum
CohereChatResponse The response to the chat conversation.
type CohereChatResponse struct { // Contents of the response that the model generates. Text *string `mandatory:"true" json:"text"` // The list of previous messages between the user and the model. The chat history gives the model context for responding to the user's inputs. ChatHistory []CohereMessage `mandatory:"false" json:"chatHistory"` // Inline citations for the generated response. Citations []Citation `mandatory:"false" json:"citations"` // If set to true, a search for documents is required. IsSearchRequired *bool `mandatory:"false" json:"isSearchRequired"` // If there is an error during the streaming scenario, then the `errorMessage` parameter contains details for the error. ErrorMessage *string `mandatory:"false" json:"errorMessage"` // The generated search queries. SearchQueries []SearchQuery `mandatory:"false" json:"searchQueries"` // The documents that the model can refer to when generating a response. Each document is a JSON string that represents the field and values of the document. // Example: // '[ // { // "id": "doc_0", // "snippet": "Emperor penguins are the tallest.", // "title": "Tall penguins" // }, // { // "id": "doc_1", // "snippet": "Emperor penguins only live in Antarctica.", // "title": "Penguin habitats" // } // ]' Documents []interface{} `mandatory:"false" json:"documents"` // A list of tool calls generated by the model. ToolCalls []CohereToolCall `mandatory:"false" json:"toolCalls"` // The full prompt that was sent to the model if isEcho is true when request. Prompt *string `mandatory:"false" json:"prompt"` // Why the generation stopped. FinishReason CohereChatResponseFinishReasonEnum `mandatory:"true" json:"finishReason"` }
func (m CohereChatResponse) MarshalJSON() (buff []byte, e error)
MarshalJSON marshals to json representation
func (m CohereChatResponse) String() string
func (m *CohereChatResponse) UnmarshalJSON(data []byte) (e error)
UnmarshalJSON unmarshals from json
func (m CohereChatResponse) ValidateEnumValue() (bool, error)
ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly
CohereChatResponseFinishReasonEnum Enum with underlying type: string
type CohereChatResponseFinishReasonEnum string
Set of constants representing the allowable values for CohereChatResponseFinishReasonEnum
const ( CohereChatResponseFinishReasonComplete CohereChatResponseFinishReasonEnum = "COMPLETE" CohereChatResponseFinishReasonErrorToxic CohereChatResponseFinishReasonEnum = "ERROR_TOXIC" CohereChatResponseFinishReasonErrorLimit CohereChatResponseFinishReasonEnum = "ERROR_LIMIT" CohereChatResponseFinishReasonError CohereChatResponseFinishReasonEnum = "ERROR" CohereChatResponseFinishReasonUserCancel CohereChatResponseFinishReasonEnum = "USER_CANCEL" CohereChatResponseFinishReasonMaxTokens CohereChatResponseFinishReasonEnum = "MAX_TOKENS" )
func GetCohereChatResponseFinishReasonEnumValues() []CohereChatResponseFinishReasonEnum
GetCohereChatResponseFinishReasonEnumValues Enumerates the set of values for CohereChatResponseFinishReasonEnum
func GetMappingCohereChatResponseFinishReasonEnum(val string) (CohereChatResponseFinishReasonEnum, bool)
GetMappingCohereChatResponseFinishReasonEnum performs case Insensitive comparison on enum value and return the desired enum
CohereLlmInferenceRequest Details for the text generation request for Cohere models.
type CohereLlmInferenceRequest struct { // Represents the prompt to be completed. The trailing white spaces are trimmed before completion. Prompt *string `mandatory:"true" json:"prompt"` // Whether to stream back partial progress. If set, tokens are sent as data-only server-sent events as they become available. IsStream *bool `mandatory:"false" json:"isStream"` // The number of generated texts that will be returned. NumGenerations *int `mandatory:"false" json:"numGenerations"` // Whether or not to return the user prompt in the response. This option only applies to non-stream results. IsEcho *bool `mandatory:"false" json:"isEcho"` // The maximum number of tokens to predict for each response. Includes input plus output tokens. MaxTokens *int `mandatory:"false" json:"maxTokens"` // A number that sets the randomness of the generated output. A lower temperature means a less random generations. // Use lower numbers for tasks with a correct answer such as question answering or summarizing. High temperatures can generate hallucinations or factually incorrect information. Start with temperatures lower than 1.0 and increase the temperature for more creative outputs, as you regenerate the prompts to refine the outputs. Temperature *float64 `mandatory:"false" json:"temperature"` // An integer that sets up the model to use only the top k most likely tokens in the generated output. A higher k introduces more randomness into the output making the output text sound more natural. Default value is 0 which disables this method and considers all tokens. To set a number for the likely tokens, choose an integer between 1 and 500. // If also using top p, then the model considers only the top tokens whose probabilities add up to p percent and ignores the rest of the k tokens. For example, if k is 20, but the probabilities of the top 10 add up to .75, then only the top 10 tokens are chosen. TopK *int `mandatory:"false" json:"topK"` // If set to a probability 0.0 < p < 1.0, it ensures that only the most likely tokens, with total probability mass of p, are considered for generation at each step. // To eliminate tokens with low likelihood, assign p a minimum percentage for the next token's likelihood. For example, when p is set to 0.75, the model eliminates the bottom 25 percent for the next token. Set to 1.0 to consider all tokens and set to 0 to disable. If both k and p are enabled, p acts after k. TopP *float64 `mandatory:"false" json:"topP"` // To reduce repetitiveness of generated tokens, this number penalizes new tokens based on their frequency in the generated text so far. Greater numbers encourage the model to use new tokens, while lower numbers encourage the model to repeat the tokens. Set to 0 to disable. FrequencyPenalty *float64 `mandatory:"false" json:"frequencyPenalty"` // To reduce repetitiveness of generated tokens, this number penalizes new tokens based on whether they've appeared in the generated text so far. Greater numbers encourage the model to use new tokens, while lower numbers encourage the model to repeat the tokens. // Similar to frequency penalty, a penalty is applied to previously present tokens, except that this penalty is applied equally to all tokens that have already appeared, regardless of how many times they've appeared. Set to 0 to disable. PresencePenalty *float64 `mandatory:"false" json:"presencePenalty"` // The generated text is cut at the end of the earliest occurrence of this stop sequence. The generated text will include this stop sequence. StopSequences []string `mandatory:"false" json:"stopSequences"` // Specifies how and if the token likelihoods are returned with the response. ReturnLikelihoods CohereLlmInferenceRequestReturnLikelihoodsEnum `mandatory:"false" json:"returnLikelihoods,omitempty"` // For an input that's longer than the maximum token length, specifies which part of the input text will be truncated. Truncate CohereLlmInferenceRequestTruncateEnum `mandatory:"false" json:"truncate,omitempty"` }
func (m CohereLlmInferenceRequest) MarshalJSON() (buff []byte, e error)
MarshalJSON marshals to json representation
func (m CohereLlmInferenceRequest) String() string
func (m CohereLlmInferenceRequest) ValidateEnumValue() (bool, error)
ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly
CohereLlmInferenceRequestReturnLikelihoodsEnum Enum with underlying type: string
type CohereLlmInferenceRequestReturnLikelihoodsEnum string
Set of constants representing the allowable values for CohereLlmInferenceRequestReturnLikelihoodsEnum
const ( CohereLlmInferenceRequestReturnLikelihoodsNone CohereLlmInferenceRequestReturnLikelihoodsEnum = "NONE" CohereLlmInferenceRequestReturnLikelihoodsAll CohereLlmInferenceRequestReturnLikelihoodsEnum = "ALL" CohereLlmInferenceRequestReturnLikelihoodsGeneration CohereLlmInferenceRequestReturnLikelihoodsEnum = "GENERATION" )
func GetCohereLlmInferenceRequestReturnLikelihoodsEnumValues() []CohereLlmInferenceRequestReturnLikelihoodsEnum
GetCohereLlmInferenceRequestReturnLikelihoodsEnumValues Enumerates the set of values for CohereLlmInferenceRequestReturnLikelihoodsEnum
func GetMappingCohereLlmInferenceRequestReturnLikelihoodsEnum(val string) (CohereLlmInferenceRequestReturnLikelihoodsEnum, bool)
GetMappingCohereLlmInferenceRequestReturnLikelihoodsEnum performs case Insensitive comparison on enum value and return the desired enum
CohereLlmInferenceRequestTruncateEnum Enum with underlying type: string
type CohereLlmInferenceRequestTruncateEnum string
Set of constants representing the allowable values for CohereLlmInferenceRequestTruncateEnum
const ( CohereLlmInferenceRequestTruncateNone CohereLlmInferenceRequestTruncateEnum = "NONE" CohereLlmInferenceRequestTruncateStart CohereLlmInferenceRequestTruncateEnum = "START" CohereLlmInferenceRequestTruncateEnd CohereLlmInferenceRequestTruncateEnum = "END" )
func GetCohereLlmInferenceRequestTruncateEnumValues() []CohereLlmInferenceRequestTruncateEnum
GetCohereLlmInferenceRequestTruncateEnumValues Enumerates the set of values for CohereLlmInferenceRequestTruncateEnum
func GetMappingCohereLlmInferenceRequestTruncateEnum(val string) (CohereLlmInferenceRequestTruncateEnum, bool)
GetMappingCohereLlmInferenceRequestTruncateEnum performs case Insensitive comparison on enum value and return the desired enum
CohereLlmInferenceResponse The generated text result to return.
type CohereLlmInferenceResponse struct { // Each prompt in the input array has an array of GeneratedText, controlled by numGenerations parameter in the request. GeneratedTexts []GeneratedText `mandatory:"true" json:"generatedTexts"` // The date and time that the model was created in an RFC3339 formatted datetime string. TimeCreated *common.SDKTime `mandatory:"true" json:"timeCreated"` // Represents the original prompt. Applies only to non-stream responses. Prompt *string `mandatory:"false" json:"prompt"` }
func (m CohereLlmInferenceResponse) MarshalJSON() (buff []byte, e error)
MarshalJSON marshals to json representation
func (m CohereLlmInferenceResponse) String() string
func (m CohereLlmInferenceResponse) ValidateEnumValue() (bool, error)
ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly
CohereMessage A message that represents a single chat dialog.
type CohereMessage interface { }
CohereMessageRoleEnum Enum with underlying type: string
type CohereMessageRoleEnum string
Set of constants representing the allowable values for CohereMessageRoleEnum
const ( CohereMessageRoleChatbot CohereMessageRoleEnum = "CHATBOT" CohereMessageRoleUser CohereMessageRoleEnum = "USER" CohereMessageRoleSystem CohereMessageRoleEnum = "SYSTEM" CohereMessageRoleTool CohereMessageRoleEnum = "TOOL" )
func GetCohereMessageRoleEnumValues() []CohereMessageRoleEnum
GetCohereMessageRoleEnumValues Enumerates the set of values for CohereMessageRoleEnum
func GetMappingCohereMessageRoleEnum(val string) (CohereMessageRoleEnum, bool)
GetMappingCohereMessageRoleEnum performs case Insensitive comparison on enum value and return the desired enum
CohereParameterDefinition A definition of tool parameter.
type CohereParameterDefinition struct { // The type of the parameter. Must be a valid Python type. Type *string `mandatory:"true" json:"type"` // The description of the parameter. Description *string `mandatory:"false" json:"description"` // Denotes whether the parameter is always present (required) or not. Defaults to not required. IsRequired *bool `mandatory:"false" json:"isRequired"` }
func (m CohereParameterDefinition) String() string
func (m CohereParameterDefinition) ValidateEnumValue() (bool, error)
ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly
CohereResponseFormat Specify the format the model output is guaranteed to be of
type CohereResponseFormat interface { }
CohereResponseFormatTypeEnum Enum with underlying type: string
type CohereResponseFormatTypeEnum string
Set of constants representing the allowable values for CohereResponseFormatTypeEnum
const ( CohereResponseFormatTypeJsonObject CohereResponseFormatTypeEnum = "JSON_OBJECT" CohereResponseFormatTypeText CohereResponseFormatTypeEnum = "TEXT" )
func GetCohereResponseFormatTypeEnumValues() []CohereResponseFormatTypeEnum
GetCohereResponseFormatTypeEnumValues Enumerates the set of values for CohereResponseFormatTypeEnum
func GetMappingCohereResponseFormatTypeEnum(val string) (CohereResponseFormatTypeEnum, bool)
GetMappingCohereResponseFormatTypeEnum performs case Insensitive comparison on enum value and return the desired enum
CohereResponseJsonFormat The json object format for the model structured output
type CohereResponseJsonFormat struct {
// The schema used by the structured output, described as a JSON Schema object.
Schema *interface{} `mandatory:"false" json:"schema"`
}
func (m CohereResponseJsonFormat) MarshalJSON() (buff []byte, e error)
MarshalJSON marshals to json representation
func (m CohereResponseJsonFormat) String() string
func (m CohereResponseJsonFormat) ValidateEnumValue() (bool, error)
ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly
CohereResponseTextFormat The text format for cohere model response
type CohereResponseTextFormat struct { }
func (m CohereResponseTextFormat) MarshalJSON() (buff []byte, e error)
MarshalJSON marshals to json representation
func (m CohereResponseTextFormat) String() string
func (m CohereResponseTextFormat) ValidateEnumValue() (bool, error)
ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly
CohereSystemMessage A message that represents a single chat dialog as SYSTEM role.
type CohereSystemMessage struct { // Contents of the chat message. Message *string `mandatory:"true" json:"message"` }
func (m CohereSystemMessage) MarshalJSON() (buff []byte, e error)
MarshalJSON marshals to json representation
func (m CohereSystemMessage) String() string
func (m CohereSystemMessage) ValidateEnumValue() (bool, error)
ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly
CohereTool A definition of tool (function).
type CohereTool struct { // The name of the tool to be called. Valid names contain only the characters a-z, A-Z, 0-9, _ and must not begin with a digit. Name *string `mandatory:"true" json:"name"` // The description of what the tool does, the model uses the description to choose when and how to call the function. Description *string `mandatory:"true" json:"description"` // The input parameters of the tool. ParameterDefinitions map[string]CohereParameterDefinition `mandatory:"false" json:"parameterDefinitions"` }
func (m CohereTool) String() string
func (m CohereTool) ValidateEnumValue() (bool, error)
ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly
CohereToolCall A tool call generated by the model.
type CohereToolCall struct { // Name of the tool to call. Name *string `mandatory:"true" json:"name"` // The parameters to use when invoking a tool. Parameters *interface{} `mandatory:"true" json:"parameters"` }
func (m CohereToolCall) String() string
func (m CohereToolCall) ValidateEnumValue() (bool, error)
ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly
CohereToolMessage A message that represents a single chat dialog as TOOL role.
type CohereToolMessage struct { // A list of results from invoking tools recommended by the model in the previous chat turn. ToolResults []CohereToolResult `mandatory:"true" json:"toolResults"` }
func (m CohereToolMessage) MarshalJSON() (buff []byte, e error)
MarshalJSON marshals to json representation
func (m CohereToolMessage) String() string
func (m CohereToolMessage) ValidateEnumValue() (bool, error)
ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly
CohereToolResult The result from invoking tools recommended by the model in the previous chat turn.
type CohereToolResult struct { Call *CohereToolCall `mandatory:"true" json:"call"` // An array of objects returned by tool. Outputs []interface{} `mandatory:"true" json:"outputs"` }
func (m CohereToolResult) String() string
func (m CohereToolResult) ValidateEnumValue() (bool, error)
ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly
CohereUserMessage A message that represents a single chat dialog as USER role.
type CohereUserMessage struct { // Contents of the chat message. Message *string `mandatory:"true" json:"message"` }
func (m CohereUserMessage) MarshalJSON() (buff []byte, e error)
MarshalJSON marshals to json representation
func (m CohereUserMessage) String() string
func (m CohereUserMessage) ValidateEnumValue() (bool, error)
ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly
DedicatedServingMode The model's serving mode is dedicated serving and has an endpoint on a dedicated AI cluster.
type DedicatedServingMode struct { // The OCID of the endpoint to use. EndpointId *string `mandatory:"true" json:"endpointId"` }
func (m DedicatedServingMode) MarshalJSON() (buff []byte, e error)
MarshalJSON marshals to json representation
func (m DedicatedServingMode) String() string
func (m DedicatedServingMode) ValidateEnumValue() (bool, error)
ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly
Document The input of the document to rerank.
type Document struct { // The text of the document to rerank. Text *string `mandatory:"true" json:"text"` }
func (m Document) String() string
func (m Document) ValidateEnumValue() (bool, error)
ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly
DocumentRank An object that contains a relevance score, an index and the text for a document.
type DocumentRank struct { // Index of the document in documents array. Index *int `mandatory:"false" json:"index"` // The relevance score for the document at that index. RelevanceScore *float64 `mandatory:"false" json:"relevanceScore"` Document *Document `mandatory:"false" json:"document"` }
func (m DocumentRank) String() string
func (m DocumentRank) ValidateEnumValue() (bool, error)
ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly
EmbedTextDetails Details for the request to embed texts.
type EmbedTextDetails struct { // Provide a list of strings or one base64 encoded image with `input_type` setting to `IMAGE`. If text embedding, each string can be words, a phrase, or a paragraph. The maximum length of each string entry in the list is 512 tokens. Inputs []string `mandatory:"true" json:"inputs"` ServingMode ServingMode `mandatory:"true" json:"servingMode"` // The OCID of compartment in which to call the Generative AI service to create text embeddings. CompartmentId *string `mandatory:"true" json:"compartmentId"` // Whether or not to include the original inputs in the response. Results are index-based. IsEcho *bool `mandatory:"false" json:"isEcho"` // For an input that's longer than the maximum token length, specifies which part of the input text will be truncated. Truncate EmbedTextDetailsTruncateEnum `mandatory:"false" json:"truncate,omitempty"` // Specifies the input type. InputType EmbedTextDetailsInputTypeEnum `mandatory:"false" json:"inputType,omitempty"` }
func (m EmbedTextDetails) String() string
func (m *EmbedTextDetails) UnmarshalJSON(data []byte) (e error)
UnmarshalJSON unmarshals from json
func (m EmbedTextDetails) ValidateEnumValue() (bool, error)
ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly
EmbedTextDetailsInputTypeEnum Enum with underlying type: string
type EmbedTextDetailsInputTypeEnum string
Set of constants representing the allowable values for EmbedTextDetailsInputTypeEnum
const ( EmbedTextDetailsInputTypeSearchDocument EmbedTextDetailsInputTypeEnum = "SEARCH_DOCUMENT" EmbedTextDetailsInputTypeSearchQuery EmbedTextDetailsInputTypeEnum = "SEARCH_QUERY" EmbedTextDetailsInputTypeClassification EmbedTextDetailsInputTypeEnum = "CLASSIFICATION" EmbedTextDetailsInputTypeClustering EmbedTextDetailsInputTypeEnum = "CLUSTERING" EmbedTextDetailsInputTypeImage EmbedTextDetailsInputTypeEnum = "IMAGE" )
func GetEmbedTextDetailsInputTypeEnumValues() []EmbedTextDetailsInputTypeEnum
GetEmbedTextDetailsInputTypeEnumValues Enumerates the set of values for EmbedTextDetailsInputTypeEnum
func GetMappingEmbedTextDetailsInputTypeEnum(val string) (EmbedTextDetailsInputTypeEnum, bool)
GetMappingEmbedTextDetailsInputTypeEnum performs case Insensitive comparison on enum value and return the desired enum
EmbedTextDetailsTruncateEnum Enum with underlying type: string
type EmbedTextDetailsTruncateEnum string
Set of constants representing the allowable values for EmbedTextDetailsTruncateEnum
const ( EmbedTextDetailsTruncateNone EmbedTextDetailsTruncateEnum = "NONE" EmbedTextDetailsTruncateStart EmbedTextDetailsTruncateEnum = "START" EmbedTextDetailsTruncateEnd EmbedTextDetailsTruncateEnum = "END" )
func GetEmbedTextDetailsTruncateEnumValues() []EmbedTextDetailsTruncateEnum
GetEmbedTextDetailsTruncateEnumValues Enumerates the set of values for EmbedTextDetailsTruncateEnum
func GetMappingEmbedTextDetailsTruncateEnum(val string) (EmbedTextDetailsTruncateEnum, bool)
GetMappingEmbedTextDetailsTruncateEnum performs case Insensitive comparison on enum value and return the desired enum
EmbedTextRequest wrapper for the EmbedText operation
Click https://docs.oracle.com/en-us/iaas/tools/go-sdk-examples/latest/generativeaiinference/EmbedText.go.html to see an example of how to use EmbedTextRequest.
type EmbedTextRequest struct { // Details for generating the embed response. EmbedTextDetails `contributesTo:"body"` // A token that uniquely identifies a request so it can be retried in case of a timeout or // server error without risk of executing that same action again. Retry tokens expire after 24 // hours, but can be invalidated before that, in case of conflicting operations. For example, if a resource is deleted and purged from the system, then a retry of the original creation request // is rejected. OpcRetryToken *string `mandatory:"false" contributesTo:"header" name:"opc-retry-token"` // The client request ID for tracing. OpcRequestId *string `mandatory:"false" contributesTo:"header" name:"opc-request-id"` // Metadata about the request. This information will not be transmitted to the service, but // represents information that the SDK will consume to drive retry behavior. RequestMetadata common.RequestMetadata }
func (request EmbedTextRequest) BinaryRequestBody() (*common.OCIReadSeekCloser, bool)
BinaryRequestBody implements the OCIRequest interface
func (request EmbedTextRequest) HTTPRequest(method, path string, binaryRequestBody *common.OCIReadSeekCloser, extraHeaders map[string]string) (http.Request, error)
HTTPRequest implements the OCIRequest interface
func (request EmbedTextRequest) RetryPolicy() *common.RetryPolicy
RetryPolicy implements the OCIRetryableRequest interface. This retrieves the specified retry policy.
func (request EmbedTextRequest) String() string
func (request EmbedTextRequest) ValidateEnumValue() (bool, error)
ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly
EmbedTextResponse wrapper for the EmbedText operation
type EmbedTextResponse struct { // The underlying http response RawResponse *http.Response // The EmbedTextResult instance EmbedTextResult `presentIn:"body"` // For optimistic concurrency control. See `if-match`. Etag *string `presentIn:"header" name:"etag"` // Unique Oracle-assigned identifier for the request. If you need to contact // Oracle about a particular request, please provide the request ID. OpcRequestId *string `presentIn:"header" name:"opc-request-id"` }
func (response EmbedTextResponse) HTTPResponse() *http.Response
HTTPResponse implements the OCIResponse interface
func (response EmbedTextResponse) String() string
EmbedTextResult The generated embedded result to return.
type EmbedTextResult struct { // A unique identifier for the generated result. Id *string `mandatory:"true" json:"id"` // The embeddings corresponding to inputs. Embeddings [][]float32 `mandatory:"true" json:"embeddings"` // The original inputs. Only present if "isEcho" is set to true. Inputs []string `mandatory:"false" json:"inputs"` // The OCID of the model used in this inference request. ModelId *string `mandatory:"false" json:"modelId"` // The version of the model. ModelVersion *string `mandatory:"false" json:"modelVersion"` }
func (m EmbedTextResult) String() string
func (m EmbedTextResult) ValidateEnumValue() (bool, error)
ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly
FunctionCall The function call generated by the model.
type FunctionCall struct { // The ID of the tool call. Id *string `mandatory:"true" json:"id"` // The name of the function to call. Name *string `mandatory:"false" json:"name"` // The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. Arguments *string `mandatory:"false" json:"arguments"` }
func (m FunctionCall) GetId() *string
GetId returns Id
func (m FunctionCall) MarshalJSON() (buff []byte, e error)
MarshalJSON marshals to json representation
func (m FunctionCall) String() string
func (m FunctionCall) ValidateEnumValue() (bool, error)
ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly
FunctionDefinition A function the model may call.
type FunctionDefinition struct { // The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64. Name *string `mandatory:"false" json:"name"` // A description of what the function does, used by the model to choose when and how to call the function. Description *string `mandatory:"false" json:"description"` // The parameters the functions accepts, described as a JSON Schema object. Omitting parameters defines a function with an empty parameter list. Parameters *interface{} `mandatory:"false" json:"parameters"` }
func (m FunctionDefinition) MarshalJSON() (buff []byte, e error)
MarshalJSON marshals to json representation
func (m FunctionDefinition) String() string
func (m FunctionDefinition) ValidateEnumValue() (bool, error)
ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly
GenerateTextDetails Details for the request to generate text.
type GenerateTextDetails struct { // The OCID of compartment in which to call the Generative AI service to generate text. CompartmentId *string `mandatory:"true" json:"compartmentId"` ServingMode ServingMode `mandatory:"true" json:"servingMode"` InferenceRequest LlmInferenceRequest `mandatory:"true" json:"inferenceRequest"` }
func (m GenerateTextDetails) String() string
func (m *GenerateTextDetails) UnmarshalJSON(data []byte) (e error)
UnmarshalJSON unmarshals from json
func (m GenerateTextDetails) ValidateEnumValue() (bool, error)
ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly
GenerateTextRequest wrapper for the GenerateText operation
Click https://docs.oracle.com/en-us/iaas/tools/go-sdk-examples/latest/generativeaiinference/GenerateText.go.html to see an example of how to use GenerateTextRequest.
type GenerateTextRequest struct { // Details for generating the text response. GenerateTextDetails `contributesTo:"body"` // A token that uniquely identifies a request so it can be retried in case of a timeout or // server error without risk of executing that same action again. Retry tokens expire after 24 // hours, but can be invalidated before that, in case of conflicting operations. For example, if a resource is deleted and purged from the system, then a retry of the original creation request // is rejected. OpcRetryToken *string `mandatory:"false" contributesTo:"header" name:"opc-retry-token"` // The client request ID for tracing. OpcRequestId *string `mandatory:"false" contributesTo:"header" name:"opc-request-id"` // Metadata about the request. This information will not be transmitted to the service, but // represents information that the SDK will consume to drive retry behavior. RequestMetadata common.RequestMetadata }
func (request GenerateTextRequest) BinaryRequestBody() (*common.OCIReadSeekCloser, bool)
BinaryRequestBody implements the OCIRequest interface
func (request GenerateTextRequest) HTTPRequest(method, path string, binaryRequestBody *common.OCIReadSeekCloser, extraHeaders map[string]string) (http.Request, error)
HTTPRequest implements the OCIRequest interface
func (request GenerateTextRequest) RetryPolicy() *common.RetryPolicy
RetryPolicy implements the OCIRetryableRequest interface. This retrieves the specified retry policy.
func (request GenerateTextRequest) String() string
func (request GenerateTextRequest) ValidateEnumValue() (bool, error)
ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly
GenerateTextResponse wrapper for the GenerateText operation
type GenerateTextResponse struct { // The underlying http response RawResponse *http.Response // The GenerateTextResult instance GenerateTextResult `presentIn:"body"` // For optimistic concurrency control. See `if-match`. Etag *string `presentIn:"header" name:"etag"` // Unique Oracle-assigned identifier for the request. If you need to contact // Oracle about a particular request, please provide the request ID. OpcRequestId *string `presentIn:"header" name:"opc-request-id"` }
func (response GenerateTextResponse) HTTPResponse() *http.Response
HTTPResponse implements the OCIResponse interface
func (response GenerateTextResponse) String() string
GenerateTextResult The generated text result to return.
type GenerateTextResult struct { // The OCID of the model used in this inference request. ModelId *string `mandatory:"true" json:"modelId"` // The version of the model. ModelVersion *string `mandatory:"true" json:"modelVersion"` InferenceResponse LlmInferenceResponse `mandatory:"true" json:"inferenceResponse"` }
func (m GenerateTextResult) String() string
func (m *GenerateTextResult) UnmarshalJSON(data []byte) (e error)
UnmarshalJSON unmarshals from json
func (m GenerateTextResult) ValidateEnumValue() (bool, error)
ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly
GeneratedText The text generated during each run.
type GeneratedText struct { // A unique identifier for this text generation. Id *string `mandatory:"true" json:"id"` // The generated text. Text *string `mandatory:"true" json:"text"` // The overall likelihood of the generated text. // When a large language model generates a new token for the output text, a likelihood is assigned to all tokens, where tokens with higher likelihoods are more likely to follow the current token. For example, it's more likely that the word favorite is followed by the word food or book rather than the word zebra. A lower likelihood means that it's less likely that token follows the current token. Likelihood *float64 `mandatory:"true" json:"likelihood"` // The reason why the model stopped generating tokens. // A model stops generating tokens if the model hits a natural stop point or reaches a provided stop sequence. FinishReason *string `mandatory:"false" json:"finishReason"` // A collection of generated tokens and their corresponding likelihoods. TokenLikelihoods []TokenLikelihood `mandatory:"false" json:"tokenLikelihoods"` }
func (m GeneratedText) String() string
func (m GeneratedText) ValidateEnumValue() (bool, error)
ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly
GenerativeAiInferenceClient a client for GenerativeAiInference
type GenerativeAiInferenceClient struct { common.BaseClient // contains filtered or unexported fields }
func NewGenerativeAiInferenceClientWithConfigurationProvider(configProvider common.ConfigurationProvider) (client GenerativeAiInferenceClient, err error)
NewGenerativeAiInferenceClientWithConfigurationProvider Creates a new default GenerativeAiInference client with the given configuration provider. the configuration provider will be used for the default signer as well as reading the region
func NewGenerativeAiInferenceClientWithOboToken(configProvider common.ConfigurationProvider, oboToken string) (client GenerativeAiInferenceClient, err error)
NewGenerativeAiInferenceClientWithOboToken Creates a new default GenerativeAiInference client with the given configuration provider. The obotoken will be added to default headers and signed; the configuration provider will be used for the signer
as well as reading the region
func (client GenerativeAiInferenceClient) Chat(ctx context.Context, request ChatRequest) (response ChatResponse, err error)
Chat Creates a response for the given conversation.
Click https://docs.oracle.com/en-us/iaas/tools/go-sdk-examples/latest/generativeaiinference/Chat.go.html to see an example of how to use Chat API. A default retry strategy applies to this operation Chat()
func (client *GenerativeAiInferenceClient) ConfigurationProvider() *common.ConfigurationProvider
ConfigurationProvider the ConfigurationProvider used in this client, or null if none set
func (client GenerativeAiInferenceClient) EmbedText(ctx context.Context, request EmbedTextRequest) (response EmbedTextResponse, err error)
EmbedText Produces embeddings for the inputs. An embedding is numeric representation of a piece of text. This text can be a phrase, a sentence, or one or more paragraphs. The Generative AI embedding model transforms each phrase, sentence, or paragraph that you input, into an array with 1024 numbers. You can use these embeddings for finding similarity in your input text such as finding phrases that are similar in context or category. Embeddings are mostly used for semantic searches where the search function focuses on the meaning of the text that it's searching through rather than finding results based on keywords.
Click https://docs.oracle.com/en-us/iaas/tools/go-sdk-examples/latest/generativeaiinference/EmbedText.go.html to see an example of how to use EmbedText API. A default retry strategy applies to this operation EmbedText()
func (client GenerativeAiInferenceClient) GenerateText(ctx context.Context, request GenerateTextRequest) (response GenerateTextResponse, err error)
GenerateText Generates a text response based on the user prompt.
Click https://docs.oracle.com/en-us/iaas/tools/go-sdk-examples/latest/generativeaiinference/GenerateText.go.html to see an example of how to use GenerateText API. A default retry strategy applies to this operation GenerateText()
func (client GenerativeAiInferenceClient) RerankText(ctx context.Context, request RerankTextRequest) (response RerankTextResponse, err error)
RerankText Reranks the text responses based on the input documents and a prompt. Rerank assigns an index and a relevance score to each document, indicating which document is most related to the prompt.
Click https://docs.oracle.com/en-us/iaas/tools/go-sdk-examples/latest/generativeaiinference/RerankText.go.html to see an example of how to use RerankText API. A default retry strategy applies to this operation RerankText()
func (client *GenerativeAiInferenceClient) SetRegion(region string)
SetRegion overrides the region of this client.
func (client GenerativeAiInferenceClient) SummarizeText(ctx context.Context, request SummarizeTextRequest) (response SummarizeTextResponse, err error)
SummarizeText Summarizes the input text.
Click https://docs.oracle.com/en-us/iaas/tools/go-sdk-examples/latest/generativeaiinference/SummarizeText.go.html to see an example of how to use SummarizeText API. A default retry strategy applies to this operation SummarizeText()
GenericChatRequest Details for the chat request.
type GenericChatRequest struct { // The series of messages in a chat request. Includes the previous messages in a conversation. Each message includes a role (`USER` or the `CHATBOT`) and content. Messages []Message `mandatory:"false" json:"messages"` // Whether to stream back partial progress. If set to true, as tokens become available, they are sent as data-only server-sent events. IsStream *bool `mandatory:"false" json:"isStream"` // The number of of generated texts that will be returned. NumGenerations *int `mandatory:"false" json:"numGenerations"` // If specified, the backend will make a best effort to sample tokens deterministically, so that repeated requests with the same seed and parameters yield the same result. However, determinism cannot be fully guaranteed. Seed *int `mandatory:"false" json:"seed"` // Whether to include the user prompt in the response. Applies only to non-stream results. IsEcho *bool `mandatory:"false" json:"isEcho"` // An integer that sets up the model to use only the top k most likely tokens in the generated output. A higher k introduces more randomness into the output making the output text sound more natural. Default value is -1 which means to consider all tokens. Setting to 0 disables this method and considers all tokens. // If also using top p, then the model considers only the top tokens whose probabilities add up to p percent and ignores the rest of the k tokens. For example, if k is 20, but the probabilities of the top 10 add up to .75, then only the top 10 tokens are chosen. TopK *int `mandatory:"false" json:"topK"` // If set to a probability 0.0 < p < 1.0, it ensures that only the most likely tokens, with total probability mass of p, are considered for generation at each step. // To eliminate tokens with low likelihood, assign p a minimum percentage for the next token's likelihood. For example, when p is set to 0.75, the model eliminates the bottom 25 percent for the next token. Set to 1 to consider all tokens and set to 0 to disable. If both k and p are enabled, p acts after k. TopP *float64 `mandatory:"false" json:"topP"` // A number that sets the randomness of the generated output. A lower temperature means a less random generations. // Use lower numbers for tasks with a correct answer such as question answering or summarizing. High temperatures can generate hallucinations or factually incorrect information. Start with temperatures lower than 1.0 and increase the temperature for more creative outputs, as you regenerate the prompts to refine the outputs. Temperature *float64 `mandatory:"false" json:"temperature"` // To reduce repetitiveness of generated tokens, this number penalizes new tokens based on their frequency in the generated text so far. Values > 0 encourage the model to use new tokens and values < 0 encourage the model to repeat tokens. Set to 0 to disable. FrequencyPenalty *float64 `mandatory:"false" json:"frequencyPenalty"` // To reduce repetitiveness of generated tokens, this number penalizes new tokens based on whether they've appeared in the generated text so far. Values > 0 encourage the model to use new tokens and values < 0 encourage the model to repeat tokens. // Similar to frequency penalty, a penalty is applied to previously present tokens, except that this penalty is applied equally to all tokens that have already appeared, regardless of how many times they've appeared. Set to 0 to disable. PresencePenalty *float64 `mandatory:"false" json:"presencePenalty"` // List of strings that stop the generation if they are generated for the response text. The returned output will not contain the stop strings. Stop []string `mandatory:"false" json:"stop"` // Includes the logarithmic probabilities for the most likely output tokens and the chosen tokens. // For example, if the log probability is 5, the API returns a list of the 5 most likely tokens. The API returns the log probability of the sampled token, so there might be up to logprobs+1 elements in the response. LogProbs *int `mandatory:"false" json:"logProbs"` // The maximum number of tokens that can be generated per output sequence. The token count of your prompt plus `maxTokens` must not exceed the model's context length. // Not setting a value for maxTokens results in the possible use of model's full context length. MaxTokens *int `mandatory:"false" json:"maxTokens"` // Modifies the likelihood of specified tokens that appear in the completion. // Example: '{"6395": 2, "8134": 1, "21943": 0.5, "5923": -100}' LogitBias *interface{} `mandatory:"false" json:"logitBias"` ToolChoice ToolChoice `mandatory:"false" json:"toolChoice"` // A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. A max of 128 functions are supported. Tools []ToolDefinition `mandatory:"false" json:"tools"` }
func (m GenericChatRequest) MarshalJSON() (buff []byte, e error)
MarshalJSON marshals to json representation
func (m GenericChatRequest) String() string
func (m *GenericChatRequest) UnmarshalJSON(data []byte) (e error)
UnmarshalJSON unmarshals from json
func (m GenericChatRequest) ValidateEnumValue() (bool, error)
ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly
GenericChatResponse The response for a chat conversation.
type GenericChatResponse struct { // The Unix timestamp (in seconds) of when the response text was generated. TimeCreated *common.SDKTime `mandatory:"true" json:"timeCreated"` // A list of generated texts. Can be more than one if n is greater than 1. Choices []ChatChoice `mandatory:"true" json:"choices"` }
func (m GenericChatResponse) MarshalJSON() (buff []byte, e error)
MarshalJSON marshals to json representation
func (m GenericChatResponse) String() string
func (m GenericChatResponse) ValidateEnumValue() (bool, error)
ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly
ImageContent Represents a single instance of chat image content.
type ImageContent struct { ImageUrl *ImageUrl `mandatory:"false" json:"imageUrl"` }
func (m ImageContent) MarshalJSON() (buff []byte, e error)
MarshalJSON marshals to json representation
func (m ImageContent) String() string
func (m ImageContent) ValidateEnumValue() (bool, error)
ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly
ImageUrl Provide a base64 encoded image.
type ImageUrl struct { // The base64 encoded image data. // Example for a png image: // `{ // "type": "IMAGE", // "imageUrl": { // "url": "data:image/png;base64,<base64 encoded image content>" // } // }` Url *string `mandatory:"true" json:"url"` // The default value is AUTO and only AUTO is supported. This option controls how to convert the base64 encoded image to tokens. Detail ImageUrlDetailEnum `mandatory:"false" json:"detail,omitempty"` }
func (m ImageUrl) String() string
func (m ImageUrl) ValidateEnumValue() (bool, error)
ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly
ImageUrlDetailEnum Enum with underlying type: string
type ImageUrlDetailEnum string
Set of constants representing the allowable values for ImageUrlDetailEnum
const ( ImageUrlDetailAuto ImageUrlDetailEnum = "AUTO" ImageUrlDetailHigh ImageUrlDetailEnum = "HIGH" ImageUrlDetailLow ImageUrlDetailEnum = "LOW" )
func GetImageUrlDetailEnumValues() []ImageUrlDetailEnum
GetImageUrlDetailEnumValues Enumerates the set of values for ImageUrlDetailEnum
func GetMappingImageUrlDetailEnum(val string) (ImageUrlDetailEnum, bool)
GetMappingImageUrlDetailEnum performs case Insensitive comparison on enum value and return the desired enum
LlamaLlmInferenceRequest Details for the text generation request for Llama models.
type LlamaLlmInferenceRequest struct { // Represents the prompt to be completed. The trailing white spaces are trimmed before completion. Prompt *string `mandatory:"false" json:"prompt"` // Whether to stream back partial progress. If set, tokens are sent as data-only server-sent events as they become available. IsStream *bool `mandatory:"false" json:"isStream"` // The number of of generated texts that will be returned. NumGenerations *int `mandatory:"false" json:"numGenerations"` // Whether or not to return the user prompt in the response. Applies only to non-stream results. IsEcho *bool `mandatory:"false" json:"isEcho"` // An integer that sets up the model to use only the top k most likely tokens in the generated output. A higher k introduces more randomness into the output making the output text sound more natural. Default value is -1 which means to consider all tokens. Setting to 0 disables this method and considers all tokens. // If also using top p, then the model considers only the top tokens whose probabilities add up to p percent and ignores the rest of the k tokens. For example, if k is 20, but the probabilities of the top 10 add up to .75, then only the top 10 tokens are chosen. TopK *int `mandatory:"false" json:"topK"` // If set to a probability 0.0 < p < 1.0, it ensures that only the most likely tokens, with total probability mass of p, are considered for generation at each step. // To eliminate tokens with low likelihood, assign p a minimum percentage for the next token's likelihood. For example, when p is set to 0.75, the model eliminates the bottom 25 percent for the next token. Set to 1 to consider all tokens and set to 0 to disable. If both k and p are enabled, p acts after k. TopP *float64 `mandatory:"false" json:"topP"` // A number that sets the randomness of the generated output. A lower temperature means a less random generations. // Use lower numbers for tasks with a correct answer such as question answering or summarizing. High temperatures can generate hallucinations or factually incorrect information. Start with temperatures lower than 1.0 and increase the temperature for more creative outputs, as you regenerate the prompts to refine the outputs. Temperature *float64 `mandatory:"false" json:"temperature"` // To reduce repetitiveness of generated tokens, this number penalizes new tokens based on their frequency in the generated text so far. Values > 0 encourage the model to use new tokens and values < 0 encourage the model to repeat tokens. Set to 0 to disable. FrequencyPenalty *float64 `mandatory:"false" json:"frequencyPenalty"` // To reduce repetitiveness of generated tokens, this number penalizes new tokens based on whether they've appeared in the generated text so far. Values > 0 encourage the model to use new tokens and values < 0 encourage the model to repeat tokens. // Similar to frequency penalty, a penalty is applied to previously present tokens, except that this penalty is applied equally to all tokens that have already appeared, regardless of how many times they've appeared. Set to 0 to disable. PresencePenalty *float64 `mandatory:"false" json:"presencePenalty"` // List of strings that stop the generation if they are generated for the response text. The returned output will not contain the stop strings. Stop []string `mandatory:"false" json:"stop"` // Includes the logarithmic probabilities for the most likely output tokens and the chosen tokens. // For example, if the log probability is 5, the API returns a list of the 5 most likely tokens. The API returns the log probability of the sampled token, so there might be up to logprobs+1 elements in the response. LogProbs *int `mandatory:"false" json:"logProbs"` // The maximum number of tokens that can be generated per output sequence. The token count of the prompt plus `maxTokens` cannot exceed the model's context length. MaxTokens *int `mandatory:"false" json:"maxTokens"` }
func (m LlamaLlmInferenceRequest) MarshalJSON() (buff []byte, e error)
MarshalJSON marshals to json representation
func (m LlamaLlmInferenceRequest) String() string
func (m LlamaLlmInferenceRequest) ValidateEnumValue() (bool, error)
ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly
LlamaLlmInferenceResponse The generated text result to return.
type LlamaLlmInferenceResponse struct { // The Unix timestamp (in seconds) of when the generation was created. Created *common.SDKTime `mandatory:"true" json:"created"` // A list of generated texts. Can be more than one if n is greater than 1. Choices []Choice `mandatory:"true" json:"choices"` }
func (m LlamaLlmInferenceResponse) MarshalJSON() (buff []byte, e error)
MarshalJSON marshals to json representation
func (m LlamaLlmInferenceResponse) String() string
func (m LlamaLlmInferenceResponse) ValidateEnumValue() (bool, error)
ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly
LlmInferenceRequest The base class for the inference requests.
type LlmInferenceRequest interface { }
LlmInferenceRequestRuntimeTypeEnum Enum with underlying type: string
type LlmInferenceRequestRuntimeTypeEnum string
Set of constants representing the allowable values for LlmInferenceRequestRuntimeTypeEnum
const ( LlmInferenceRequestRuntimeTypeCohere LlmInferenceRequestRuntimeTypeEnum = "COHERE" LlmInferenceRequestRuntimeTypeLlama LlmInferenceRequestRuntimeTypeEnum = "LLAMA" )
func GetLlmInferenceRequestRuntimeTypeEnumValues() []LlmInferenceRequestRuntimeTypeEnum
GetLlmInferenceRequestRuntimeTypeEnumValues Enumerates the set of values for LlmInferenceRequestRuntimeTypeEnum
func GetMappingLlmInferenceRequestRuntimeTypeEnum(val string) (LlmInferenceRequestRuntimeTypeEnum, bool)
GetMappingLlmInferenceRequestRuntimeTypeEnum performs case Insensitive comparison on enum value and return the desired enum
LlmInferenceResponse The base class for inference responses.
type LlmInferenceResponse interface { }
LlmInferenceResponseRuntimeTypeEnum Enum with underlying type: string
type LlmInferenceResponseRuntimeTypeEnum string
Set of constants representing the allowable values for LlmInferenceResponseRuntimeTypeEnum
const ( LlmInferenceResponseRuntimeTypeCohere LlmInferenceResponseRuntimeTypeEnum = "COHERE" LlmInferenceResponseRuntimeTypeLlama LlmInferenceResponseRuntimeTypeEnum = "LLAMA" )
func GetLlmInferenceResponseRuntimeTypeEnumValues() []LlmInferenceResponseRuntimeTypeEnum
GetLlmInferenceResponseRuntimeTypeEnumValues Enumerates the set of values for LlmInferenceResponseRuntimeTypeEnum
func GetMappingLlmInferenceResponseRuntimeTypeEnum(val string) (LlmInferenceResponseRuntimeTypeEnum, bool)
GetMappingLlmInferenceResponseRuntimeTypeEnum performs case Insensitive comparison on enum value and return the desired enum
Logprobs Includes the logarithmic probabilities for the most likely output tokens and the chosen tokens. For example, if the log probability is 5, the API returns a list of the 5 most likely tokens. The API returns the log probability of the sampled token, so there might be up to logprobs+1 elements in the response.
type Logprobs struct { // The text offset. TextOffset []int `mandatory:"false" json:"textOffset"` // The logarithmic probabilites of the output token. TokenLogprobs []float64 `mandatory:"false" json:"tokenLogprobs"` // The list of output tokens. Tokens []string `mandatory:"false" json:"tokens"` // The logarithmic probabilities of each of the top k tokens. TopLogprobs []map[string]string `mandatory:"false" json:"topLogprobs"` }
func (m Logprobs) String() string
func (m Logprobs) ValidateEnumValue() (bool, error)
ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly
Message A message that represents a single chat dialog.
type Message interface { // Contents of the chat message. GetContent() []ChatContent }
MessageRoleEnum Enum with underlying type: string
type MessageRoleEnum string
Set of constants representing the allowable values for MessageRoleEnum
const ( MessageRoleSystem MessageRoleEnum = "SYSTEM" MessageRoleUser MessageRoleEnum = "USER" MessageRoleAssistant MessageRoleEnum = "ASSISTANT" MessageRoleTool MessageRoleEnum = "TOOL" )
func GetMappingMessageRoleEnum(val string) (MessageRoleEnum, bool)
GetMappingMessageRoleEnum performs case Insensitive comparison on enum value and return the desired enum
func GetMessageRoleEnumValues() []MessageRoleEnum
GetMessageRoleEnumValues Enumerates the set of values for MessageRoleEnum
OnDemandServingMode The model's serving mode is on-demand serving on a shared infrastructure.
type OnDemandServingMode struct { // The unique ID of a model to use. You can use the ListModels API to list the available models. ModelId *string `mandatory:"true" json:"modelId"` }
func (m OnDemandServingMode) MarshalJSON() (buff []byte, e error)
MarshalJSON marshals to json representation
func (m OnDemandServingMode) String() string
func (m OnDemandServingMode) ValidateEnumValue() (bool, error)
ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly
RerankTextDetails Details required for a rerank request.
type RerankTextDetails struct { // Input query for search in the documents. Input *string `mandatory:"true" json:"input"` // The OCID of the compartment to call into the Generative AI service LLMs. CompartmentId *string `mandatory:"true" json:"compartmentId"` ServingMode ServingMode `mandatory:"true" json:"servingMode"` // A list of document strings to rerank based on the query asked. Documents []string `mandatory:"true" json:"documents"` // The number of most relevant documents or indices to return. Defaults to the length of the documents. TopN *int `mandatory:"false" json:"topN"` // Whether or not to return the documents in the response. IsEcho *bool `mandatory:"false" json:"isEcho"` // The maximum number of chunks to produce internally from a document. MaxChunksPerDocument *int `mandatory:"false" json:"maxChunksPerDocument"` }
func (m RerankTextDetails) String() string
func (m *RerankTextDetails) UnmarshalJSON(data []byte) (e error)
UnmarshalJSON unmarshals from json
func (m RerankTextDetails) ValidateEnumValue() (bool, error)
ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly
RerankTextRequest wrapper for the RerankText operation
Click https://docs.oracle.com/en-us/iaas/tools/go-sdk-examples/latest/generativeaiinference/RerankText.go.html to see an example of how to use RerankTextRequest.
type RerankTextRequest struct { // Details required for the rerank request. RerankTextDetails `contributesTo:"body"` // A token that uniquely identifies a request so it can be retried in case of a timeout or // server error without risk of executing that same action again. Retry tokens expire after 24 // hours, but can be invalidated before that, in case of conflicting operations. For example, if a resource is deleted and purged from the system, then a retry of the original creation request // is rejected. OpcRetryToken *string `mandatory:"false" contributesTo:"header" name:"opc-retry-token"` // The client request ID for tracing. OpcRequestId *string `mandatory:"false" contributesTo:"header" name:"opc-request-id"` // Metadata about the request. This information will not be transmitted to the service, but // represents information that the SDK will consume to drive retry behavior. RequestMetadata common.RequestMetadata }
func (request RerankTextRequest) BinaryRequestBody() (*common.OCIReadSeekCloser, bool)
BinaryRequestBody implements the OCIRequest interface
func (request RerankTextRequest) HTTPRequest(method, path string, binaryRequestBody *common.OCIReadSeekCloser, extraHeaders map[string]string) (http.Request, error)
HTTPRequest implements the OCIRequest interface
func (request RerankTextRequest) RetryPolicy() *common.RetryPolicy
RetryPolicy implements the OCIRetryableRequest interface. This retrieves the specified retry policy.
func (request RerankTextRequest) String() string
func (request RerankTextRequest) ValidateEnumValue() (bool, error)
ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly
RerankTextResponse wrapper for the RerankText operation
type RerankTextResponse struct { // The underlying http response RawResponse *http.Response // The RerankTextResult instance RerankTextResult `presentIn:"body"` // For optimistic concurrency control. See `if-match`. Etag *string `presentIn:"header" name:"etag"` // Unique Oracle-assigned identifier for the request. If you need to contact // Oracle about a particular request, please provide the request ID. OpcRequestId *string `presentIn:"header" name:"opc-request-id"` }
func (response RerankTextResponse) HTTPResponse() *http.Response
HTTPResponse implements the OCIResponse interface
func (response RerankTextResponse) String() string
RerankTextResult The rerank response to return to the caller.
type RerankTextResult struct { // A unique identifier for this `RerankResult`. Id *string `mandatory:"true" json:"id"` // Top n documents with their index and relevance score. DocumentRanks []DocumentRank `mandatory:"true" json:"documentRanks"` // The OCID of the model used in the rerank request. ModelId *string `mandatory:"false" json:"modelId"` // The version of the model. ModelVersion *string `mandatory:"false" json:"modelVersion"` }
func (m RerankTextResult) String() string
func (m RerankTextResult) ValidateEnumValue() (bool, error)
ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly
SearchQuery The generated search query.
type SearchQuery struct { // The text of the search query. Text *string `mandatory:"true" json:"text"` }
func (m SearchQuery) String() string
func (m SearchQuery) ValidateEnumValue() (bool, error)
ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly
ServingMode The model's serving mode, which is either on-demand serving or dedicated serving.
type ServingMode interface { }
ServingModeServingTypeEnum Enum with underlying type: string
type ServingModeServingTypeEnum string
Set of constants representing the allowable values for ServingModeServingTypeEnum
const ( ServingModeServingTypeOnDemand ServingModeServingTypeEnum = "ON_DEMAND" ServingModeServingTypeDedicated ServingModeServingTypeEnum = "DEDICATED" )
func GetMappingServingModeServingTypeEnum(val string) (ServingModeServingTypeEnum, bool)
GetMappingServingModeServingTypeEnum performs case Insensitive comparison on enum value and return the desired enum
func GetServingModeServingTypeEnumValues() []ServingModeServingTypeEnum
GetServingModeServingTypeEnumValues Enumerates the set of values for ServingModeServingTypeEnum
SummarizeTextDetails Details for the request to summarize text.
type SummarizeTextDetails struct { // The input string to be summarized. Input *string `mandatory:"true" json:"input"` ServingMode ServingMode `mandatory:"true" json:"servingMode"` // The OCID of compartment in which to call the Generative AI service to summarize text. CompartmentId *string `mandatory:"true" json:"compartmentId"` // Whether or not to include the original inputs in the response. IsEcho *bool `mandatory:"false" json:"isEcho"` // A number that sets the randomness of the generated output. Lower temperatures mean less random generations. // Use lower numbers for tasks with a correct answer such as question answering or summarizing. High temperatures can generate hallucinations or factually incorrect information. Start with temperatures lower than 1.0, and increase the temperature for more creative outputs, as you regenerate the prompts to refine the outputs. Temperature *float64 `mandatory:"false" json:"temperature"` // A free-form instruction for modifying how the summaries get generated. Should complete the sentence "Generate a summary _". For example, "focusing on the next steps" or "written by Yoda". AdditionalCommand *string `mandatory:"false" json:"additionalCommand"` // Indicates the approximate length of the summary. If "AUTO" is selected, the best option will be picked based on the input text. Length SummarizeTextDetailsLengthEnum `mandatory:"false" json:"length,omitempty"` // Indicates the style in which the summary will be delivered - in a free form paragraph or in bullet points. If "AUTO" is selected, the best option will be picked based on the input text. Format SummarizeTextDetailsFormatEnum `mandatory:"false" json:"format,omitempty"` // Controls how close to the original text the summary is. High extractiveness summaries will lean towards reusing sentences verbatim, while low extractiveness summaries will tend to paraphrase more. Extractiveness SummarizeTextDetailsExtractivenessEnum `mandatory:"false" json:"extractiveness,omitempty"` }
func (m SummarizeTextDetails) String() string
func (m *SummarizeTextDetails) UnmarshalJSON(data []byte) (e error)
UnmarshalJSON unmarshals from json
func (m SummarizeTextDetails) ValidateEnumValue() (bool, error)
ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly
SummarizeTextDetailsExtractivenessEnum Enum with underlying type: string
type SummarizeTextDetailsExtractivenessEnum string
Set of constants representing the allowable values for SummarizeTextDetailsExtractivenessEnum
const ( SummarizeTextDetailsExtractivenessLow SummarizeTextDetailsExtractivenessEnum = "LOW" SummarizeTextDetailsExtractivenessMedium SummarizeTextDetailsExtractivenessEnum = "MEDIUM" SummarizeTextDetailsExtractivenessHigh SummarizeTextDetailsExtractivenessEnum = "HIGH" SummarizeTextDetailsExtractivenessAuto SummarizeTextDetailsExtractivenessEnum = "AUTO" )
func GetMappingSummarizeTextDetailsExtractivenessEnum(val string) (SummarizeTextDetailsExtractivenessEnum, bool)
GetMappingSummarizeTextDetailsExtractivenessEnum performs case Insensitive comparison on enum value and return the desired enum
func GetSummarizeTextDetailsExtractivenessEnumValues() []SummarizeTextDetailsExtractivenessEnum
GetSummarizeTextDetailsExtractivenessEnumValues Enumerates the set of values for SummarizeTextDetailsExtractivenessEnum
SummarizeTextDetailsFormatEnum Enum with underlying type: string
type SummarizeTextDetailsFormatEnum string
Set of constants representing the allowable values for SummarizeTextDetailsFormatEnum
const ( SummarizeTextDetailsFormatParagraph SummarizeTextDetailsFormatEnum = "PARAGRAPH" SummarizeTextDetailsFormatBullets SummarizeTextDetailsFormatEnum = "BULLETS" SummarizeTextDetailsFormatAuto SummarizeTextDetailsFormatEnum = "AUTO" )
func GetMappingSummarizeTextDetailsFormatEnum(val string) (SummarizeTextDetailsFormatEnum, bool)
GetMappingSummarizeTextDetailsFormatEnum performs case Insensitive comparison on enum value and return the desired enum
func GetSummarizeTextDetailsFormatEnumValues() []SummarizeTextDetailsFormatEnum
GetSummarizeTextDetailsFormatEnumValues Enumerates the set of values for SummarizeTextDetailsFormatEnum
SummarizeTextDetailsLengthEnum Enum with underlying type: string
type SummarizeTextDetailsLengthEnum string
Set of constants representing the allowable values for SummarizeTextDetailsLengthEnum
const ( SummarizeTextDetailsLengthShort SummarizeTextDetailsLengthEnum = "SHORT" SummarizeTextDetailsLengthMedium SummarizeTextDetailsLengthEnum = "MEDIUM" SummarizeTextDetailsLengthLong SummarizeTextDetailsLengthEnum = "LONG" SummarizeTextDetailsLengthAuto SummarizeTextDetailsLengthEnum = "AUTO" )
func GetMappingSummarizeTextDetailsLengthEnum(val string) (SummarizeTextDetailsLengthEnum, bool)
GetMappingSummarizeTextDetailsLengthEnum performs case Insensitive comparison on enum value and return the desired enum
func GetSummarizeTextDetailsLengthEnumValues() []SummarizeTextDetailsLengthEnum
GetSummarizeTextDetailsLengthEnumValues Enumerates the set of values for SummarizeTextDetailsLengthEnum
SummarizeTextRequest wrapper for the SummarizeText operation
Click https://docs.oracle.com/en-us/iaas/tools/go-sdk-examples/latest/generativeaiinference/SummarizeText.go.html to see an example of how to use SummarizeTextRequest.
type SummarizeTextRequest struct { // Details for summarizing the text. SummarizeTextDetails `contributesTo:"body"` // A token that uniquely identifies a request so it can be retried in case of a timeout or // server error without risk of executing that same action again. Retry tokens expire after 24 // hours, but can be invalidated before that, in case of conflicting operations. For example, if a resource is deleted and purged from the system, then a retry of the original creation request // is rejected. OpcRetryToken *string `mandatory:"false" contributesTo:"header" name:"opc-retry-token"` // The client request ID for tracing. OpcRequestId *string `mandatory:"false" contributesTo:"header" name:"opc-request-id"` // Metadata about the request. This information will not be transmitted to the service, but // represents information that the SDK will consume to drive retry behavior. RequestMetadata common.RequestMetadata }
func (request SummarizeTextRequest) BinaryRequestBody() (*common.OCIReadSeekCloser, bool)
BinaryRequestBody implements the OCIRequest interface
func (request SummarizeTextRequest) HTTPRequest(method, path string, binaryRequestBody *common.OCIReadSeekCloser, extraHeaders map[string]string) (http.Request, error)
HTTPRequest implements the OCIRequest interface
func (request SummarizeTextRequest) RetryPolicy() *common.RetryPolicy
RetryPolicy implements the OCIRetryableRequest interface. This retrieves the specified retry policy.
func (request SummarizeTextRequest) String() string
func (request SummarizeTextRequest) ValidateEnumValue() (bool, error)
ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly
SummarizeTextResponse wrapper for the SummarizeText operation
type SummarizeTextResponse struct { // The underlying http response RawResponse *http.Response // The SummarizeTextResult instance SummarizeTextResult `presentIn:"body"` // For optimistic concurrency control. See `if-match`. Etag *string `presentIn:"header" name:"etag"` // Unique Oracle-assigned identifier for the request. If you need to contact // Oracle about a particular request, please provide the request ID. OpcRequestId *string `presentIn:"header" name:"opc-request-id"` }
func (response SummarizeTextResponse) HTTPResponse() *http.Response
HTTPResponse implements the OCIResponse interface
func (response SummarizeTextResponse) String() string
SummarizeTextResult Summarize text result to return to caller.
type SummarizeTextResult struct { // A unique identifier for this SummarizeTextResult. Id *string `mandatory:"true" json:"id"` // Summary result corresponding to input. Summary *string `mandatory:"true" json:"summary"` // The original input. Only included if "isEcho" set to true. Input *string `mandatory:"false" json:"input"` // The OCID of the model used in this inference request. ModelId *string `mandatory:"false" json:"modelId"` // The version of the model. ModelVersion *string `mandatory:"false" json:"modelVersion"` }
func (m SummarizeTextResult) String() string
func (m SummarizeTextResult) ValidateEnumValue() (bool, error)
ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly
SystemMessage Represents a single instance of system message.
type SystemMessage struct { // Contents of the chat message. Content []ChatContent `mandatory:"false" json:"content"` // An optional name for the participant. Provides the model information to differentiate between participants of the same role. Name *string `mandatory:"false" json:"name"` }
func (m SystemMessage) GetContent() []ChatContent
GetContent returns Content
func (m SystemMessage) MarshalJSON() (buff []byte, e error)
MarshalJSON marshals to json representation
func (m SystemMessage) String() string
func (m *SystemMessage) UnmarshalJSON(data []byte) (e error)
UnmarshalJSON unmarshals from json
func (m SystemMessage) ValidateEnumValue() (bool, error)
ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly
TextContent Represents a single instance of text in the chat content.
type TextContent struct { // The text content. Text *string `mandatory:"false" json:"text"` }
func (m TextContent) MarshalJSON() (buff []byte, e error)
MarshalJSON marshals to json representation
func (m TextContent) String() string
func (m TextContent) ValidateEnumValue() (bool, error)
ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly
TokenLikelihood An object that contains the returned token and its corresponding likelihood.
type TokenLikelihood struct { // A word, part of a word, or a punctuation. // For example, apple is a token and friendship is made up of two tokens, friend and ship. When you run a model, you can set the maximum number of output tokens. Estimate three tokens per word. Token *string `mandatory:"false" json:"token"` // The likelihood of this token during generation. Likelihood *float64 `mandatory:"false" json:"likelihood"` }
func (m TokenLikelihood) String() string
func (m TokenLikelihood) ValidateEnumValue() (bool, error)
ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly
ToolCall The tool call generated by the model, such as function call.
type ToolCall interface { // The ID of the tool call. GetId() *string }
ToolCallTypeEnum Enum with underlying type: string
type ToolCallTypeEnum string
Set of constants representing the allowable values for ToolCallTypeEnum
const ( ToolCallTypeFunction ToolCallTypeEnum = "FUNCTION" )
func GetMappingToolCallTypeEnum(val string) (ToolCallTypeEnum, bool)
GetMappingToolCallTypeEnum performs case Insensitive comparison on enum value and return the desired enum
func GetToolCallTypeEnumValues() []ToolCallTypeEnum
GetToolCallTypeEnumValues Enumerates the set of values for ToolCallTypeEnum
ToolChoice The tool choice for a tool.
type ToolChoice interface { }
ToolChoiceAuto The model can pick between generating a message or calling one or more tools.
type ToolChoiceAuto struct { }
func (m ToolChoiceAuto) MarshalJSON() (buff []byte, e error)
MarshalJSON marshals to json representation
func (m ToolChoiceAuto) String() string
func (m ToolChoiceAuto) ValidateEnumValue() (bool, error)
ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly
ToolChoiceFunction The tool choice for a function.
type ToolChoiceFunction struct { // The function name. Name *string `mandatory:"false" json:"name"` }
func (m ToolChoiceFunction) MarshalJSON() (buff []byte, e error)
MarshalJSON marshals to json representation
func (m ToolChoiceFunction) String() string
func (m ToolChoiceFunction) ValidateEnumValue() (bool, error)
ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly
ToolChoiceNone The model will not call any tool and instead generates a message.
type ToolChoiceNone struct { }
func (m ToolChoiceNone) MarshalJSON() (buff []byte, e error)
MarshalJSON marshals to json representation
func (m ToolChoiceNone) String() string
func (m ToolChoiceNone) ValidateEnumValue() (bool, error)
ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly
ToolChoiceRequired The model must call one or more tools.
type ToolChoiceRequired struct { }
func (m ToolChoiceRequired) MarshalJSON() (buff []byte, e error)
MarshalJSON marshals to json representation
func (m ToolChoiceRequired) String() string
func (m ToolChoiceRequired) ValidateEnumValue() (bool, error)
ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly
ToolChoiceTypeEnum Enum with underlying type: string
type ToolChoiceTypeEnum string
Set of constants representing the allowable values for ToolChoiceTypeEnum
const ( ToolChoiceTypeNone ToolChoiceTypeEnum = "NONE" ToolChoiceTypeAuto ToolChoiceTypeEnum = "AUTO" ToolChoiceTypeRequired ToolChoiceTypeEnum = "REQUIRED" ToolChoiceTypeFunction ToolChoiceTypeEnum = "FUNCTION" )
func GetMappingToolChoiceTypeEnum(val string) (ToolChoiceTypeEnum, bool)
GetMappingToolChoiceTypeEnum performs case Insensitive comparison on enum value and return the desired enum
func GetToolChoiceTypeEnumValues() []ToolChoiceTypeEnum
GetToolChoiceTypeEnumValues Enumerates the set of values for ToolChoiceTypeEnum
ToolDefinition A tool the model may call.
type ToolDefinition interface { }
ToolDefinitionTypeEnum Enum with underlying type: string
type ToolDefinitionTypeEnum string
Set of constants representing the allowable values for ToolDefinitionTypeEnum
const ( ToolDefinitionTypeFunction ToolDefinitionTypeEnum = "FUNCTION" )
func GetMappingToolDefinitionTypeEnum(val string) (ToolDefinitionTypeEnum, bool)
GetMappingToolDefinitionTypeEnum performs case Insensitive comparison on enum value and return the desired enum
func GetToolDefinitionTypeEnumValues() []ToolDefinitionTypeEnum
GetToolDefinitionTypeEnumValues Enumerates the set of values for ToolDefinitionTypeEnum
ToolMessage Represents a single instance of tool message.
type ToolMessage struct { // Contents of the chat message. Content []ChatContent `mandatory:"false" json:"content"` // Tool call that this message is responding to. ToolCallId *string `mandatory:"false" json:"toolCallId"` }
func (m ToolMessage) GetContent() []ChatContent
GetContent returns Content
func (m ToolMessage) MarshalJSON() (buff []byte, e error)
MarshalJSON marshals to json representation
func (m ToolMessage) String() string
func (m *ToolMessage) UnmarshalJSON(data []byte) (e error)
UnmarshalJSON unmarshals from json
func (m ToolMessage) ValidateEnumValue() (bool, error)
ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly
UserMessage Represents a single instance of user message.
type UserMessage struct { // Contents of the chat message. Content []ChatContent `mandatory:"false" json:"content"` // An optional name for the participant. Provides the model information to differentiate between participants of the same role. Name *string `mandatory:"false" json:"name"` }
func (m UserMessage) GetContent() []ChatContent
GetContent returns Content
func (m UserMessage) MarshalJSON() (buff []byte, e error)
MarshalJSON marshals to json representation
func (m UserMessage) String() string
func (m *UserMessage) UnmarshalJSON(data []byte) (e error)
UnmarshalJSON unmarshals from json
func (m UserMessage) ValidateEnumValue() (bool, error)
ValidateEnumValue returns an error when providing an unsupported enum value This function is being called during constructing API request process Not recommended for calling this function directly