Class: OCI::GenerativeAiInference::Models::GenericChatRequest

Inherits:
BaseChatRequest
  • Object
show all
Defined in:
lib/oci/generative_ai_inference/models/generic_chat_request.rb

Overview

Details for the chat request.

Constant Summary

Constants inherited from BaseChatRequest

BaseChatRequest::API_FORMAT_ENUM

Instance Attribute Summary collapse

Attributes inherited from BaseChatRequest

#api_format

Class Method Summary collapse

Instance Method Summary collapse

Methods inherited from BaseChatRequest

get_subtype

Constructor Details

#initialize(attributes = {}) ⇒ GenericChatRequest

Initializes the object

Parameters:

  • attributes (Hash) (defaults to: {})

    Model attributes in the form of hash

Options Hash (attributes):



168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
# File 'lib/oci/generative_ai_inference/models/generic_chat_request.rb', line 168

def initialize(attributes = {})
  return unless attributes.is_a?(Hash)

  attributes['apiFormat'] = 'GENERIC'

  super(attributes)

  # convert string to symbol for hash key
  attributes = attributes.each_with_object({}) { |(k, v), h| h[k.to_sym] = v }

  self.messages = attributes[:'messages'] if attributes[:'messages']

  self.is_stream = attributes[:'isStream'] unless attributes[:'isStream'].nil?
  self.is_stream = false if is_stream.nil? && !attributes.key?(:'isStream') # rubocop:disable Style/StringLiterals

  raise 'You cannot provide both :isStream and :is_stream' if attributes.key?(:'isStream') && attributes.key?(:'is_stream')

  self.is_stream = attributes[:'is_stream'] unless attributes[:'is_stream'].nil?
  self.is_stream = false if is_stream.nil? && !attributes.key?(:'isStream') && !attributes.key?(:'is_stream') # rubocop:disable Style/StringLiterals

  self.num_generations = attributes[:'numGenerations'] if attributes[:'numGenerations']

  raise 'You cannot provide both :numGenerations and :num_generations' if attributes.key?(:'numGenerations') && attributes.key?(:'num_generations')

  self.num_generations = attributes[:'num_generations'] if attributes[:'num_generations']

  self.seed = attributes[:'seed'] if attributes[:'seed']

  self.is_echo = attributes[:'isEcho'] unless attributes[:'isEcho'].nil?
  self.is_echo = false if is_echo.nil? && !attributes.key?(:'isEcho') # rubocop:disable Style/StringLiterals

  raise 'You cannot provide both :isEcho and :is_echo' if attributes.key?(:'isEcho') && attributes.key?(:'is_echo')

  self.is_echo = attributes[:'is_echo'] unless attributes[:'is_echo'].nil?
  self.is_echo = false if is_echo.nil? && !attributes.key?(:'isEcho') && !attributes.key?(:'is_echo') # rubocop:disable Style/StringLiterals

  self.top_k = attributes[:'topK'] if attributes[:'topK']

  raise 'You cannot provide both :topK and :top_k' if attributes.key?(:'topK') && attributes.key?(:'top_k')

  self.top_k = attributes[:'top_k'] if attributes[:'top_k']

  self.top_p = attributes[:'topP'] if attributes[:'topP']
  self.top_p = 1.0 if top_p.nil? && !attributes.key?(:'topP') # rubocop:disable Style/StringLiterals

  raise 'You cannot provide both :topP and :top_p' if attributes.key?(:'topP') && attributes.key?(:'top_p')

  self.top_p = attributes[:'top_p'] if attributes[:'top_p']
  self.top_p = 1.0 if top_p.nil? && !attributes.key?(:'topP') && !attributes.key?(:'top_p') # rubocop:disable Style/StringLiterals

  self.temperature = attributes[:'temperature'] if attributes[:'temperature']
  self.temperature = 1.0 if temperature.nil? && !attributes.key?(:'temperature') # rubocop:disable Style/StringLiterals

  self.frequency_penalty = attributes[:'frequencyPenalty'] if attributes[:'frequencyPenalty']
  self.frequency_penalty = 0.0 if frequency_penalty.nil? && !attributes.key?(:'frequencyPenalty') # rubocop:disable Style/StringLiterals

  raise 'You cannot provide both :frequencyPenalty and :frequency_penalty' if attributes.key?(:'frequencyPenalty') && attributes.key?(:'frequency_penalty')

  self.frequency_penalty = attributes[:'frequency_penalty'] if attributes[:'frequency_penalty']
  self.frequency_penalty = 0.0 if frequency_penalty.nil? && !attributes.key?(:'frequencyPenalty') && !attributes.key?(:'frequency_penalty') # rubocop:disable Style/StringLiterals

  self.presence_penalty = attributes[:'presencePenalty'] if attributes[:'presencePenalty']
  self.presence_penalty = 0.0 if presence_penalty.nil? && !attributes.key?(:'presencePenalty') # rubocop:disable Style/StringLiterals

  raise 'You cannot provide both :presencePenalty and :presence_penalty' if attributes.key?(:'presencePenalty') && attributes.key?(:'presence_penalty')

  self.presence_penalty = attributes[:'presence_penalty'] if attributes[:'presence_penalty']
  self.presence_penalty = 0.0 if presence_penalty.nil? && !attributes.key?(:'presencePenalty') && !attributes.key?(:'presence_penalty') # rubocop:disable Style/StringLiterals

  self.stop = attributes[:'stop'] if attributes[:'stop']

  self.log_probs = attributes[:'logProbs'] if attributes[:'logProbs']

  raise 'You cannot provide both :logProbs and :log_probs' if attributes.key?(:'logProbs') && attributes.key?(:'log_probs')

  self.log_probs = attributes[:'log_probs'] if attributes[:'log_probs']

  self.max_tokens = attributes[:'maxTokens'] if attributes[:'maxTokens']

  raise 'You cannot provide both :maxTokens and :max_tokens' if attributes.key?(:'maxTokens') && attributes.key?(:'max_tokens')

  self.max_tokens = attributes[:'max_tokens'] if attributes[:'max_tokens']

  self.logit_bias = attributes[:'logitBias'] if attributes[:'logitBias']

  raise 'You cannot provide both :logitBias and :logit_bias' if attributes.key?(:'logitBias') && attributes.key?(:'logit_bias')

  self.logit_bias = attributes[:'logit_bias'] if attributes[:'logit_bias']

  self.tool_choice = attributes[:'toolChoice'] if attributes[:'toolChoice']

  raise 'You cannot provide both :toolChoice and :tool_choice' if attributes.key?(:'toolChoice') && attributes.key?(:'tool_choice')

  self.tool_choice = attributes[:'tool_choice'] if attributes[:'tool_choice']

  self.tools = attributes[:'tools'] if attributes[:'tools']
end

Instance Attribute Details

#frequency_penaltyFloat

To reduce repetitiveness of generated tokens, this number penalizes new tokens based on their frequency in the generated text so far. Values > 0 encourage the model to use new tokens and values < 0 encourage the model to repeat tokens. Set to 0 to disable.

Returns:

  • (Float)


56
57
58
# File 'lib/oci/generative_ai_inference/models/generic_chat_request.rb', line 56

def frequency_penalty
  @frequency_penalty
end

#is_echoBOOLEAN

Whether to include the user prompt in the response. Applies only to non-stream results.

Returns:

  • (BOOLEAN)


31
32
33
# File 'lib/oci/generative_ai_inference/models/generic_chat_request.rb', line 31

def is_echo
  @is_echo
end

#is_streamBOOLEAN

Whether to stream back partial progress. If set to true, as tokens become available, they are sent as data-only server-sent events.

Returns:

  • (BOOLEAN)


18
19
20
# File 'lib/oci/generative_ai_inference/models/generic_chat_request.rb', line 18

def is_stream
  @is_stream
end

#log_probsInteger

Includes the logarithmic probabilities for the most likely output tokens and the chosen tokens.

For example, if the log probability is 5, the API returns a list of the 5 most likely tokens. The API returns the log probability of the sampled token, so there might be up to logprobs+1 elements in the response.

Returns:

  • (Integer)


74
75
76
# File 'lib/oci/generative_ai_inference/models/generic_chat_request.rb', line 74

def log_probs
  @log_probs
end

#logit_biasObject

Modifies the likelihood of specified tokens that appear in the completion.

Example: '2, "8134": 1, "21943": 0.5, "5923": -100'

Returns:

  • (Object)


87
88
89
# File 'lib/oci/generative_ai_inference/models/generic_chat_request.rb', line 87

def logit_bias
  @logit_bias
end

#max_tokensInteger

The maximum number of tokens that can be generated per output sequence. The token count of your prompt plus maxTokens must not exceed the model's context length. Not setting a value for maxTokens results in the possible use of model's full context length.

Returns:

  • (Integer)


80
81
82
# File 'lib/oci/generative_ai_inference/models/generic_chat_request.rb', line 80

def max_tokens
  @max_tokens
end

#messagesArray<OCI::GenerativeAiInference::Models::Message>

The series of messages in a chat request. Includes the previous messages in a conversation. Each message includes a role (USER or the CHATBOT) and content.



14
15
16
# File 'lib/oci/generative_ai_inference/models/generic_chat_request.rb', line 14

def messages
  @messages
end

#num_generationsInteger

The number of of generated texts that will be returned.

Returns:

  • (Integer)


22
23
24
# File 'lib/oci/generative_ai_inference/models/generic_chat_request.rb', line 22

def num_generations
  @num_generations
end

#presence_penaltyFloat

To reduce repetitiveness of generated tokens, this number penalizes new tokens based on whether they've appeared in the generated text so far. Values > 0 encourage the model to use new tokens and values < 0 encourage the model to repeat tokens.

Similar to frequency penalty, a penalty is applied to previously present tokens, except that this penalty is applied equally to all tokens that have already appeared, regardless of how many times they've appeared. Set to 0 to disable.

Returns:

  • (Float)


63
64
65
# File 'lib/oci/generative_ai_inference/models/generic_chat_request.rb', line 63

def presence_penalty
  @presence_penalty
end

#seedInteger

If specified, the backend will make a best effort to sample tokens deterministically, so that repeated requests with the same seed and parameters yield the same result. However, determinism cannot be fully guaranteed.

Returns:

  • (Integer)


27
28
29
# File 'lib/oci/generative_ai_inference/models/generic_chat_request.rb', line 27

def seed
  @seed
end

#stopArray<String>

List of strings that stop the generation if they are generated for the response text. The returned output will not contain the stop strings.

Returns:

  • (Array<String>)


67
68
69
# File 'lib/oci/generative_ai_inference/models/generic_chat_request.rb', line 67

def stop
  @stop
end

#temperatureFloat

A number that sets the randomness of the generated output. A lower temperature means a less random generations.

Use lower numbers for tasks with a correct answer such as question answering or summarizing. High temperatures can generate hallucinations or factually incorrect information. Start with temperatures lower than 1.0 and increase the temperature for more creative outputs, as you regenerate the prompts to refine the outputs.

Returns:

  • (Float)


52
53
54
# File 'lib/oci/generative_ai_inference/models/generic_chat_request.rb', line 52

def temperature
  @temperature
end

#tool_choiceOCI::GenerativeAiInference::Models::ToolChoice



90
91
92
# File 'lib/oci/generative_ai_inference/models/generic_chat_request.rb', line 90

def tool_choice
  @tool_choice
end

#toolsArray<OCI::GenerativeAiInference::Models::ToolDefinition>

A list of tools the model may call. Use this to provide a list of functions the model may generate JSON inputs for. A max of 128 functions are supported.



94
95
96
# File 'lib/oci/generative_ai_inference/models/generic_chat_request.rb', line 94

def tools
  @tools
end

#top_kInteger

An integer that sets up the model to use only the top k most likely tokens in the generated output. A higher k introduces more randomness into the output making the output text sound more natural. Default value is -1 which means to consider all tokens. Setting to 0 disables this method and considers all tokens.

If also using top p, then the model considers only the top tokens whose probabilities add up to p percent and ignores the rest of the k tokens. For example, if k is 20, but the probabilities of the top 10 add up to .75, then only the top 10 tokens are chosen.

Returns:

  • (Integer)


38
39
40
# File 'lib/oci/generative_ai_inference/models/generic_chat_request.rb', line 38

def top_k
  @top_k
end

#top_pFloat

If set to a probability 0.0 < p < 1.0, it ensures that only the most likely tokens, with total probability mass of p, are considered for generation at each step.

To eliminate tokens with low likelihood, assign p a minimum percentage for the next token's likelihood. For example, when p is set to 0.75, the model eliminates the bottom 25 percent for the next token. Set to 1 to consider all tokens and set to 0 to disable. If both k and p are enabled, p acts after k.

Returns:

  • (Float)


45
46
47
# File 'lib/oci/generative_ai_inference/models/generic_chat_request.rb', line 45

def top_p
  @top_p
end

Class Method Details

.attribute_mapObject

Attribute mapping from ruby-style variable name to JSON key.



97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
# File 'lib/oci/generative_ai_inference/models/generic_chat_request.rb', line 97

def self.attribute_map
  {
    # rubocop:disable Style/SymbolLiteral
    'api_format': :'apiFormat',
    'messages': :'messages',
    'is_stream': :'isStream',
    'num_generations': :'numGenerations',
    'seed': :'seed',
    'is_echo': :'isEcho',
    'top_k': :'topK',
    'top_p': :'topP',
    'temperature': :'temperature',
    'frequency_penalty': :'frequencyPenalty',
    'presence_penalty': :'presencePenalty',
    'stop': :'stop',
    'log_probs': :'logProbs',
    'max_tokens': :'maxTokens',
    'logit_bias': :'logitBias',
    'tool_choice': :'toolChoice',
    'tools': :'tools'
    # rubocop:enable Style/SymbolLiteral
  }
end

.swagger_typesObject

Attribute type mapping.



122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
# File 'lib/oci/generative_ai_inference/models/generic_chat_request.rb', line 122

def self.swagger_types
  {
    # rubocop:disable Style/SymbolLiteral
    'api_format': :'String',
    'messages': :'Array<OCI::GenerativeAiInference::Models::Message>',
    'is_stream': :'BOOLEAN',
    'num_generations': :'Integer',
    'seed': :'Integer',
    'is_echo': :'BOOLEAN',
    'top_k': :'Integer',
    'top_p': :'Float',
    'temperature': :'Float',
    'frequency_penalty': :'Float',
    'presence_penalty': :'Float',
    'stop': :'Array<String>',
    'log_probs': :'Integer',
    'max_tokens': :'Integer',
    'logit_bias': :'Object',
    'tool_choice': :'OCI::GenerativeAiInference::Models::ToolChoice',
    'tools': :'Array<OCI::GenerativeAiInference::Models::ToolDefinition>'
    # rubocop:enable Style/SymbolLiteral
  }
end

Instance Method Details

#==(other) ⇒ Object

Checks equality by comparing each attribute.

Parameters:

  • other (Object)

    the other object to be compared



273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
# File 'lib/oci/generative_ai_inference/models/generic_chat_request.rb', line 273

def ==(other)
  return true if equal?(other)

  self.class == other.class &&
    api_format == other.api_format &&
    messages == other.messages &&
    is_stream == other.is_stream &&
    num_generations == other.num_generations &&
    seed == other.seed &&
    is_echo == other.is_echo &&
    top_k == other.top_k &&
    top_p == other.top_p &&
    temperature == other.temperature &&
    frequency_penalty == other.frequency_penalty &&
    presence_penalty == other.presence_penalty &&
    stop == other.stop &&
    log_probs == other.log_probs &&
    max_tokens == other.max_tokens &&
    logit_bias == other.logit_bias &&
    tool_choice == other.tool_choice &&
    tools == other.tools
end

#build_from_hash(attributes) ⇒ Object

Builds the object from hash

Parameters:

  • attributes (Hash)

    Model attributes in the form of hash

Returns:

  • (Object)

    Returns the model itself



319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
# File 'lib/oci/generative_ai_inference/models/generic_chat_request.rb', line 319

def build_from_hash(attributes)
  return nil unless attributes.is_a?(Hash)

  self.class.swagger_types.each_pair do |key, type|
    if type =~ /^Array<(.*)>/i
      # check to ensure the input is an array given that the the attribute
      # is documented as an array but the input is not
      if attributes[self.class.attribute_map[key]].is_a?(Array)
        public_method("#{key}=").call(
          attributes[self.class.attribute_map[key]]
            .map { |v| OCI::Internal::Util.convert_to_type(Regexp.last_match(1), v) }
        )
      end
    elsif !attributes[self.class.attribute_map[key]].nil?
      public_method("#{key}=").call(
        OCI::Internal::Util.convert_to_type(type, attributes[self.class.attribute_map[key]])
      )
    end
    # or else data not found in attributes(hash), not an issue as the data can be optional
  end

  self
end

#eql?(other) ⇒ Boolean

Parameters:

  • other (Object)

    the other object to be compared

Returns:

  • (Boolean)

See Also:

  • `==` method


299
300
301
# File 'lib/oci/generative_ai_inference/models/generic_chat_request.rb', line 299

def eql?(other)
  self == other
end

#hashFixnum

Calculates hash code according to all attributes.

Returns:

  • (Fixnum)

    Hash code



308
309
310
# File 'lib/oci/generative_ai_inference/models/generic_chat_request.rb', line 308

def hash
  [api_format, messages, is_stream, num_generations, seed, is_echo, top_k, top_p, temperature, frequency_penalty, presence_penalty, stop, log_probs, max_tokens, logit_bias, tool_choice, tools].hash
end

#to_hashHash

Returns the object in the form of hash

Returns:

  • (Hash)

    Returns the object in the form of hash



352
353
354
355
356
357
358
359
360
361
# File 'lib/oci/generative_ai_inference/models/generic_chat_request.rb', line 352

def to_hash
  hash = {}
  self.class.attribute_map.each_pair do |attr, param|
    value = public_method(attr).call
    next if value.nil? && !instance_variable_defined?("@#{attr}")

    hash[param] = _to_hash(value)
  end
  hash
end

#to_sString

Returns the string representation of the object

Returns:

  • (String)

    String presentation of the object



346
347
348
# File 'lib/oci/generative_ai_inference/models/generic_chat_request.rb', line 346

def to_s
  to_hash.to_s
end