diff --git a/.gitignore b/.gitignore index a8f240b..270185c 100644 --- a/.gitignore +++ b/.gitignore @@ -57,3 +57,4 @@ Gemfile.lock # Used by RuboCop. Remote config files pulled in from inherit_from directive. # .rubocop-https?--* site/ +CLAUDE.md \ No newline at end of file diff --git a/instructor-rb.gemspec b/instructor-rb.gemspec index ddb81ef..a026b35 100644 --- a/instructor-rb.gemspec +++ b/instructor-rb.gemspec @@ -31,10 +31,10 @@ Gem::Specification.new do |spec| spec.require_paths = ['lib'] - spec.add_dependency 'activesupport', '~> 7.0' - spec.add_dependency 'anthropic', '~> 0.2' - spec.add_dependency 'easy_talk', '~> 0.2' - spec.add_dependency 'ruby-openai', '~> 7' + spec.add_dependency 'activesupport', '>= 6.0' + spec.add_dependency 'easy_talk', '~> 2' + spec.add_dependency 'ruby-anthropic', '~> 0.4' + spec.add_dependency 'ruby-openai', '~> 8' spec.add_development_dependency 'pry-byebug', '~> 3.10' spec.add_development_dependency 'rake', '~> 13.1' spec.add_development_dependency 'rspec', '~> 3.0' diff --git a/lib/instructor.rb b/lib/instructor.rb index 4bf0260..a7f6cb3 100644 --- a/lib/instructor.rb +++ b/lib/instructor.rb @@ -5,42 +5,60 @@ require 'easy_talk' require 'active_support/all' require_relative 'instructor/version' +require_relative 'instructor/mode' require_relative 'instructor/openai/patch' require_relative 'instructor/openai/response' +require_relative 'instructor/openai/mode' +require_relative 'instructor/anthropic/mode' require_relative 'instructor/anthropic/patch' require_relative 'instructor/anthropic/response' -require_relative 'instructor/mode' # Instructor makes it easy to reliably get structured data like JSON from Large Language Models (LLMs) # like GPT-3.5, GPT-4, GPT-4-Vision module Instructor - @mode = nil - class Error < ::StandardError; end # The ValidationError class represents an error that occurs during validation. class ValidationError < ::StandardError; end - def self.mode - @mode - end - # Patches the OpenAI client to add the following functionality: # - Retries on exceptions # - Accepts and validates a response model # - Accepts a validation_context argument # # @param openai_client [OpenAI::Client] The OpenAI client to be patched. - # @param mode [Symbol] The mode to be used. Default is `Instructor::Mode::TOOLS.function`. + # @param mode [Symbol] The mode to be used. Default is `Instructor::Mode::TOOLS_STRICT`. # @return [OpenAI::Client] The patched OpenAI client. - def self.from_openai(openai_client, mode: Instructor::Mode::TOOLS.function) - @mode = mode + # @example Using tools strict mode (default) + # client = Instructor.from_openai(openai_client) + # @example Using standard tools mode + # client = Instructor.from_openai(openai_client, mode: Instructor::Mode::TOOLS) + # @example Using JSON mode + # client = Instructor.from_openai(openai_client, mode: Instructor::Mode::JSON) + def self.from_openai(openai_client, mode: Instructor::Mode::TOOLS_STRICT) + Instructor::OpenAI.mode = mode openai_client.prepend(Instructor::OpenAI::Patch) end + # Patches the Anthropic client to add the following functionality: + # - Retries on exceptions + # - Accepts and validates a response model + # - Accepts a validation_context argument + # - Supports multiple extraction modes + # # @param anthropic_client [Anthropic::Client] The Anthropic client to be patched. + # @param mode [Symbol] The mode to be used. Default is `Instructor::Mode::ANTHROPIC_TOOLS`. # @return [Anthropic::Client] The patched Anthropic client. - def self.from_anthropic(anthropic_client) + # @example Using tools mode (default) - forces specific tool use + # client = Instructor.from_anthropic(anthropic_client) + # @example Using JSON mode - prompt-based extraction + # client = Instructor.from_anthropic(anthropic_client, mode: Instructor::Mode::ANTHROPIC_JSON) + # @example Using reasoning tools mode - allows Claude to reason + # client = Instructor.from_anthropic(anthropic_client, mode: Instructor::Mode::ANTHROPIC_REASONING_TOOLS) + # @example Using parallel tools mode - multiple tools + # client = Instructor.from_anthropic(anthropic_client, mode: Instructor::Mode::ANTHROPIC_PARALLEL_TOOLS) + def self.from_anthropic(anthropic_client, mode: Instructor::Mode::ANTHROPIC_TOOLS) + Instructor::Anthropic.mode = mode anthropic_client.prepend(Instructor::Anthropic::Patch) end end diff --git a/lib/instructor/anthropic/mode.rb b/lib/instructor/anthropic/mode.rb new file mode 100644 index 0000000..9bc6e1b --- /dev/null +++ b/lib/instructor/anthropic/mode.rb @@ -0,0 +1,23 @@ +# frozen_string_literal: true + +require_relative '../mode' + +module Instructor + # Anthropic-specific mode configuration and management + module Anthropic + # Sets the current mode for Anthropic API interactions + # + # @param mode [Symbol] The mode to use (from Instructor::Mode constants) + # @return [Symbol] The mode that was set + def self.mode=(mode) + @mode = mode + end + + # Gets the current mode for Anthropic API interactions + # + # @return [Symbol] The current mode, defaults to ANTHROPIC_TOOLS + def self.mode + @mode ||= Instructor::Mode::ANTHROPIC_TOOLS + end + end +end diff --git a/lib/instructor/anthropic/patch.rb b/lib/instructor/anthropic/patch.rb index 1b6f064..b879946 100644 --- a/lib/instructor/anthropic/patch.rb +++ b/lib/instructor/anthropic/patch.rb @@ -10,22 +10,141 @@ module Anthropic module Patch include Instructor::Base::Patch - # Sends a message request to the API and processes the response. + # Sends a chat request to the API and processes the response. # - # @param parameters [Hash] The parameters for the chat request as expected by the OpenAI client. + # @param parameters [Hash] The parameters for the chat request as expected by the Anthropic client. # @param response_model [Class] The response model class. # @param max_retries [Integer] The maximum number of retries. Default is 0. # @param validation_context [Hash] The validation context for the parameters. Optional. # @return [Object] The processed response. def messages(parameters:, response_model: nil, max_retries: 0, validation_context: nil) - with_retries(max_retries, [JSON::ParserError, Instructor::ValidationError, Faraday::ParsingError]) do - model = determine_model(response_model) + return super(parameters:) if response_model.nil? + + model = determine_model(response_model) + current_mode = Instructor::Anthropic.mode + + set_max_tokens(parameters) + + # Mode-specific parameter preparation + if tool_mode?(current_mode) function = build_function(model) - parameters[:max_tokens] = 1024 unless parameters.key?(:max_tokens) - parameters = prepare_parameters(parameters, validation_context, function) - ::Anthropic.configuration.extra_headers = { 'anthropic-beta' => 'tools-2024-04-04' } - response = ::Anthropic::Client.json_post(path: '/messages', parameters:) - process_response(response, model) + parameters = prepare_tool_parameters(parameters, validation_context, function, current_mode) + set_extra_headers + elsif json_mode?(current_mode) + parameters = prepare_json_parameters(parameters, validation_context, model) + else + raise ArgumentError, "Invalid Anthropic mode: #{current_mode}" + end + + response = super(parameters:) + process_response(response, model) + end + + private + + # Checks if the current mode is a tool-based mode + # + # @param mode [Symbol] The mode to check + # @return [Boolean] true if mode uses tools + def tool_mode?(mode) + Instructor::Mode.tool_mode?(mode) && mode.to_s.start_with?('anthropic') + end + + # Checks if the current mode is a JSON-based mode + # + # @param mode [Symbol] The mode to check + # @return [Boolean] true if mode uses JSON prompting + def json_mode?(mode) + mode == Instructor::Mode::ANTHROPIC_JSON + end + + def set_max_tokens(parameters) + parameters[:max_tokens] = 1024 unless parameters.key?(:max_tokens) + end + + def set_extra_headers + ::Anthropic.configuration.extra_headers = { 'anthropic-beta' => 'tools-2024-04-04' } + end + + def function_name(function) + function[:name] + end + + # Prepares parameters for tool-based modes + # + # @param parameters [Hash] Original parameters + # @param validation_context [Hash] Validation context + # @param function [Hash] Function/tool definition + # @param mode [Symbol] Current mode + # @return [Hash] Prepared parameters with tools and tool_choice + def prepare_tool_parameters(parameters, validation_context, function, mode) + parameters = apply_validation_context(parameters, validation_context) + parameters = parameters.merge(tools: [function]) + + tool_choice = resolve_tool_choice(function_name(function), mode) + parameters.merge!(tool_choice:) if tool_choice + + parameters + end + + # Prepares parameters for JSON mode (prompt-based) + # + # @param parameters [Hash] Original parameters + # @param validation_context [Hash] Validation context + # @param model [Class] Response model class + # @return [Hash] Prepared parameters with JSON schema in system prompt + def prepare_json_parameters(parameters, validation_context, model) + parameters = apply_validation_context(parameters, validation_context) + + # Generate JSON schema prompt + json_schema_message = <<~PROMPT.strip + As a genius expert, your task is to understand the content and provide + the parsed objects in json that match the following json_schema: + + #{JSON.pretty_generate(model.json_schema)} + + Make sure to return an instance of the JSON, not the schema itself. + PROMPT + + # Inject into system messages + system_messages = build_system_messages(parameters[:system], json_schema_message) + parameters.merge(system: system_messages) + end + + # Builds system messages array combining existing and schema messages + # + # @param existing_system [String, Array, nil] Existing system messages + # @param schema_message [String] JSON schema instruction message + # @return [Array] Array of system message hashes + def build_system_messages(existing_system, schema_message) + messages = [] + + # Add existing system messages + if existing_system.is_a?(String) + messages << { type: 'text', text: existing_system } + elsif existing_system.is_a?(Array) + messages.concat(existing_system) + end + + # Add schema message + messages << { type: 'text', text: schema_message } + + messages + end + + # Resolves tool_choice based on mode + # + # @param function_name [String] Name of the function/tool + # @param mode [Symbol] Current mode + # @return [Hash, nil] Tool choice configuration or nil + def resolve_tool_choice(function_name, mode) + case mode + when Instructor::Mode::ANTHROPIC_TOOLS + # Force specific tool use + { type: 'tool', name: function_name } + when Instructor::Mode::ANTHROPIC_REASONING_TOOLS, Instructor::Mode::ANTHROPIC_PARALLEL_TOOLS + # Allow Claude to reason/choose + { type: 'auto' } end end @@ -35,7 +154,7 @@ def messages(parameters:, response_model: nil, max_retries: 0, validation_contex # @param model [Class] The response model class. # @return [Object] The processed response. def process_response(response, model) - parsed_response = Response.new(response).parse + parsed_response = Response.create(response).parse iterable? ? process_multiple_responses(parsed_response, model) : process_single_response(parsed_response, model) end diff --git a/lib/instructor/anthropic/response.rb b/lib/instructor/anthropic/response.rb index 900d934..61f987a 100644 --- a/lib/instructor/anthropic/response.rb +++ b/lib/instructor/anthropic/response.rb @@ -2,55 +2,122 @@ module Instructor module Anthropic - # The Response class represents the response received from the OpenAI API. - # It takes the raw response and provides convenience methods to access the chat completions, - # tool calls, function responses, and parsed arguments. - class Response - # Initializes a new instance of the Response class. + module Response + # Factory method to create the appropriate response type based on the mode # - # @param response [Hash] The response received from the OpenAI API. - def initialize(response) - @response = response - end - - # Parses the function response(s) and returns the parsed arguments. - # - # @return [Array, Hash] The parsed arguments. - # @raise [StandardError] if the api response contains an error. - def parse - raise StandardError, error_message if error? + # @param response [Hash] The response received from the Anthropic API + # @return [ToolResponse, JsonResponse] The appropriate response object + def self.create(response) + current_mode = Instructor::Anthropic.mode - if single_response? - arguments.first + if tool_mode?(current_mode) + ToolResponse.new(response) + elsif json_mode?(current_mode) + JsonResponse.new(response) else - arguments + raise ArgumentError, "Invalid Anthropic mode: #{current_mode}" end end - private - - def content - @response['content'] + # Checks if the current mode is a tool-based mode + # + # @param mode [Symbol] The mode to check + # @return [Boolean] true if mode uses tools + def self.tool_mode?(mode) + Instructor::Mode.tool_mode?(mode) && mode.to_s.start_with?('anthropic') end - def tool_calls - content.is_a?(Array) && content.select { |c| c['type'] == 'tool_use' } + # Checks if the current mode is a JSON-based mode + # + # @param mode [Symbol] The mode to check + # @return [Boolean] true if mode uses JSON prompting + def self.json_mode?(mode) + mode == Instructor::Mode::ANTHROPIC_JSON end - def single_response? - tool_calls&.size == 1 - end + # Base class for Anthropic API responses with common error handling + class BaseResponse + def initialize(response) + @response = response + end - def arguments - tool_calls.map { |tc| tc['input'] } + def error? + @response['type'] == 'error' + end + + def error_message + "#{@response.dig('error', 'type')} - #{@response.dig('error', 'message')}" + end end - def error? - @response['type'] == 'error' + # Tool-based response handler for ANTHROPIC_TOOLS, ANTHROPIC_REASONING_TOOLS, and ANTHROPIC_PARALLEL_TOOLS modes + class ToolResponse < BaseResponse + # Parses the tool response(s) and returns the parsed arguments. + # + # @return [Array, Hash] The parsed arguments. + # @raise [StandardError] if the api response contains an error. + def parse + raise StandardError, error_message if error? + + if single_response? + arguments.first + else + arguments + end + end + + private + + def content + @response['content'] + end + + def tool_calls + content.is_a?(Array) && content.select { |c| c['type'] == 'tool_use' } + end + + def single_response? + tool_calls&.size == 1 + end + + def arguments + tool_calls.map { |tc| tc['input'] } + end end - def error_message - "#{@response.dig('error', 'type')} - #{@response.dig('error', 'message')}" + # JSON-based response handler for ANTHROPIC_JSON mode + class JsonResponse < BaseResponse + # Parses the JSON content from the response. + # + # @return [Hash] The parsed JSON data. + # @raise [StandardError] if the api response contains an error. + def parse + raise StandardError, error_message if error? + + # Extract text content from response + text_content = extract_text_content + + # Parse JSON from the text + JSON.parse(text_content) + rescue JSON::ParserError => e + raise StandardError, "Failed to parse JSON response: #{e.message}" + end + + private + + def extract_text_content + content = @response['content'] + + if content.is_a?(Array) + # Find first text content block + text_block = content.find { |c| c['type'] == 'text' } + text_block&.dig('text') || '' + elsif content.is_a?(String) + content + else + '' + end + end end end end diff --git a/lib/instructor/base/patch.rb b/lib/instructor/base/patch.rb index d8d8360..ccbbfca 100644 --- a/lib/instructor/base/patch.rb +++ b/lib/instructor/base/patch.rb @@ -2,7 +2,6 @@ module Instructor module Base - # The `Patch` module provides common methods for patching and modifying the client behavior. module Patch # Generates the function name for the API request. # You can customize the function name for the LLM by adding a `title` key to the schema. @@ -18,11 +17,12 @@ module Patch # end # ``` # The function name will be `User`. - # If the `title` key is not present, the function name will be the model's name. + # If the `title` key is not present, the function name will be a modified version of the model's name. # @param model [Class] The response model class. # @return [String] The generated function name. def generate_function_name(model) - model.schema.fetch(:title, model.name) + model_name = model.json_schema.fetch('title', model.name) + model_name.gsub('::', '_').gsub(/[^a-zA-Z0-9_]/, '').downcase end # Generates the description for the function. diff --git a/lib/instructor/mode.rb b/lib/instructor/mode.rb index df5d789..ad7672a 100644 --- a/lib/instructor/mode.rb +++ b/lib/instructor/mode.rb @@ -1,22 +1,132 @@ # frozen_string_literal: true -require 'ostruct' - module Instructor - # This module defines constants related to different modes of operation. - # It provides options for tool behavior, function types, and JSON modes. - # Currently supported modes are: - # - tools: select between function, auto, required, and none. - # more modes will be added in the near future. + # Mode module for patching LLM API clients. + # + # Each mode determines how the library formats and structures requests + # to different provider APIs and how it processes their responses. + # + # @example Using OpenAI tools mode + # client = Instructor.from_openai(openai_client, mode: Instructor::Mode::TOOLS) + # + # @example Using Anthropic tools mode + # client = Instructor.from_anthropic(anthropic_client, mode: Instructor::Mode::ANTHROPIC_TOOLS) module Mode - tool_options = %w[function auto required none].index_by(&:itself) - TOOL_BEHAVIOR = OpenStruct.new(tool_options) - - FUNCTIONS = 'function_call' - PARALLEL_TOOLS = 'parallel_tool_call' - TOOLS = TOOL_BEHAVIOR - JSON = 'json_mode' - MD_JSON = 'markdown_json_mode' - JSON_SCHEMA = 'json_schema_mode' + # OpenAI Modes + + # Deprecated: Use TOOLS instead + # @deprecated Use {TOOLS} instead + FUNCTIONS = :function_call + + # Parallel tool calling mode for OpenAI + PARALLEL_TOOLS = :parallel_tool_call + + # Standard tool calling mode for OpenAI (recommended) + TOOLS = :tool_call + + # Strict mode for OpenAI tools with enhanced validation + TOOLS_STRICT = :tools_strict + + # JSON mode for OpenAI + JSON = :json_mode + + # JSON schema mode for OpenAI + JSON_SCHEMA = :json_schema_mode + + # Markdown JSON mode for OpenAI + MD_JSON = :markdown_json_mode + + # Anthropic Modes + + # Tool calling mode for Anthropic Claude models + ANTHROPIC_TOOLS = :anthropic_tools + + # JSON mode for Anthropic Claude models + ANTHROPIC_JSON = :anthropic_json + + # Parallel tool calling mode for Anthropic + ANTHROPIC_PARALLEL_TOOLS = :anthropic_parallel_tools + + # Reasoning tools mode for Anthropic (extended thinking) + ANTHROPIC_REASONING_TOOLS = :anthropic_reasoning_tools + + # Track if deprecation warning has been shown + @functions_deprecation_shown = false + + class << self + # Returns a set of all tool-based modes. + # + # Tool modes use function/tool calling APIs to structure outputs. + # These modes are recommended for complex, nested data structures. + # + # @return [Set] Set of tool mode symbols + def tool_modes + Set[ + FUNCTIONS, + PARALLEL_TOOLS, + TOOLS, + TOOLS_STRICT, + ANTHROPIC_TOOLS, + ANTHROPIC_REASONING_TOOLS, + ANTHROPIC_PARALLEL_TOOLS + ] + end + + # Returns a set of all JSON-based modes. + # + # JSON modes use JSON output formatting to structure responses. + # These modes are simpler and work with more models. + # + # @return [Set] Set of JSON mode symbols + def json_modes + Set[ + JSON, + MD_JSON, + JSON_SCHEMA, + ANTHROPIC_JSON + ] + end + + # Checks if the given mode is a tool-based mode. + # + # @param mode [Symbol] The mode to check + # @return [Boolean] true if mode is tool-based + def tool_mode?(mode) + tool_modes.include?(mode) + end + + # Checks if the given mode is a JSON-based mode. + # + # @param mode [Symbol] The mode to check + # @return [Boolean] true if mode is JSON-based + def json_mode?(mode) + json_modes.include?(mode) + end + + # Warn about FUNCTIONS mode deprecation. + # + # Shows the warning only once per session to avoid spamming logs. + # + # @return [void] + def warn_mode_functions_deprecation + return if @functions_deprecation_shown + + warn 'DEPRECATION WARNING: The FUNCTIONS mode is deprecated and will be removed in future versions. ' \ + 'Please use TOOLS mode instead.' + @functions_deprecation_shown = true + end + + # Validates that a mode is supported. + # + # @param mode [Symbol] The mode to validate + # @raise [ArgumentError] if mode is not supported + # @return [void] + def validate_mode!(mode) + all_modes = tool_modes + json_modes + return if all_modes.include?(mode) + + raise ArgumentError, "Unsupported mode: #{mode}. Supported modes: #{all_modes.to_a.join(', ')}" + end + end end end diff --git a/lib/instructor/openai/mode.rb b/lib/instructor/openai/mode.rb new file mode 100644 index 0000000..df0d068 --- /dev/null +++ b/lib/instructor/openai/mode.rb @@ -0,0 +1,42 @@ +# frozen_string_literal: true + +require_relative '../mode' + +module Instructor + # This module defines constants related to different modes of operation for the OpenAI api. + # It provides options for tool behavior, function types, and JSON modes. + # Currently supported modes are: + # - tools: select between function, auto, required, and none. + module OpenAI + def self.mode=(mode) + @mode = mode + end + + def self.mode + @mode ||= Instructor::Mode::TOOLS_STRICT + end + + # @deprecated Use {Instructor::Mode} instead. This module will be removed in a future version. + module Mode + # @deprecated Use {Instructor::Mode::TOOLS_STRICT} instead + STRUCTURED_OUTPUT = :structured_output + + # @deprecated Use {Instructor::Mode::TOOLS} instead + FUNCTION_CALLING = :function_calling + + # @deprecated Use {Instructor::Mode.tool_mode?} instead + def self.structured_output? + warn 'DEPRECATION WARNING: Instructor::OpenAI::Mode.structured_output? is deprecated. ' \ + 'Use Instructor::Mode::TOOLS_STRICT instead.' + Instructor::OpenAI.mode == STRUCTURED_OUTPUT + end + + # @deprecated Use {Instructor::Mode.tool_mode?} instead + def self.function_calling? + warn 'DEPRECATION WARNING: Instructor::OpenAI::Mode.function_calling? is deprecated. ' \ + 'Use Instructor::Mode::TOOLS instead.' + Instructor::OpenAI.mode == FUNCTION_CALLING + end + end + end +end diff --git a/lib/instructor/openai/patch.rb b/lib/instructor/openai/patch.rb index 76a7eaa..3813db0 100644 --- a/lib/instructor/openai/patch.rb +++ b/lib/instructor/openai/patch.rb @@ -1,7 +1,6 @@ # frozen_string_literal: true require 'instructor/base/patch' - # The Instructor module provides functionality for interacting with OpenAI's chat API. module Instructor module OpenAI @@ -13,21 +12,33 @@ module Patch # # @param parameters [Hash] The parameters for the chat request as expected by the OpenAI client. # @param response_model [Class] The response model class. - # @param max_retries [Integer] The maximum number of retries. Default is 0. # @param validation_context [Hash] The validation context for the parameters. Optional. # @return [Object] The processed response. - def chat(parameters:, response_model: nil, max_retries: 0, validation_context: nil) - return json_post(path: '/chat/completions', parameters:) if response_model.nil? + def chat(parameters:, response_model: nil, tool_choice: :auto, validation_context: nil) + return super(parameters:) if response_model.nil? + + model = determine_model(response_model) + current_mode = Instructor::OpenAI.mode - with_retries(max_retries, [JSON::ParserError, Instructor::ValidationError, Faraday::ParsingError]) do - model = determine_model(response_model) + # Handle structured output modes (TOOLS_STRICT, JSON_SCHEMA, etc.) + if structured_output_mode?(current_mode) + schema = build_schema(model) + parameters = prepare_response_format(parameters, validation_context, schema) + # Handle tool calling modes (TOOLS, PARALLEL_TOOLS, FUNCTIONS, etc.) + elsif tool_calling_mode?(current_mode) function = build_function(model) parameters = prepare_parameters(parameters, validation_context, function) - tool_choice = resolve_tool_choice(function[:function][:name]) - parameters.merge!(tool_choice:) - response = json_post(path: '/chat/completions', parameters:) - process_response(response, model) + tool_choice = resolve_tool_choice(tool_choice, function_name(function)) + parameters.merge!(tool_choice:) if tool_choice + else + raise ArgumentError, "Invalid mode: #{current_mode}" end + response = super(parameters:) + process_response(response, model) + end + + def mode + Instructor::OpenAI.mode end # Processes the API response. @@ -36,23 +47,51 @@ def chat(parameters:, response_model: nil, max_retries: 0, validation_context: n # @param model [Class] The response model class. # @return [Object] The processed response. def process_response(response, model) - parsed_response = Response.new(response).parse + response_object = Response.create(response) + raise ArgumentError, response_object.refusal if response_object.refusal.present? + + parsed_response = response_object.parse iterable? ? process_multiple_responses(parsed_response, model) : process_single_response(parsed_response, model) end private - def resolve_tool_choice(function_name) - case Instructor.mode - when Instructor::Mode::TOOLS.function - { type: 'function', function: { name: function_name } } - when Instructor::Mode::TOOLS.auto - 'auto' - when Instructor::Mode::TOOLS.required - 'required' - when Instructor::Mode::TOOLS.none - 'none' - end + # Checks if the current mode is a structured output mode + # + # @param mode [Symbol] The mode to check + # @return [Boolean] true if mode uses structured output (response_format) + def structured_output_mode?(mode) + # Support both new modes and legacy :structured_output symbol + mode == Instructor::Mode::TOOLS_STRICT || + mode == Instructor::Mode::JSON_SCHEMA || + mode == :structured_output + end + + # Checks if the current mode is a tool calling mode + # + # @param mode [Symbol] The mode to check + # @return [Boolean] true if mode uses tool calling + def tool_calling_mode?(mode) + # Support both new modes and legacy :function_calling symbol + Instructor::Mode.tool_mode?(mode) || + mode == :function_calling + end + + def function_name(function) + function[:function][:name] + end + + def resolve_tool_choice(tool_choice, function_name) + string_choices = { + auto: 'auto', + required: 'required', + none: 'none' + } + + return string_choices[tool_choice] if string_choices.key?(tool_choice) + + # For :force or any other value, return the function hash + { type: 'function', function: { name: function_name } } end # Builds the function details for the API request. @@ -69,6 +108,23 @@ def build_function(model) } } end + + def build_schema(model) + { + type: 'json_schema', + json_schema: { + name: generate_function_name(model), + schema: model.json_schema, + strict: true + } + } + end + + def prepare_response_format(parameters, validation_context, schema) + # parameters # fetch the parameters's max_token or set it to 1024 + parameters = apply_validation_context(parameters, validation_context) + parameters.merge(response_format: schema) + end end end end diff --git a/lib/instructor/openai/response.rb b/lib/instructor/openai/response.rb index 12c4bd4..afe07b8 100644 --- a/lib/instructor/openai/response.rb +++ b/lib/instructor/openai/response.rb @@ -2,69 +2,138 @@ module Instructor module OpenAI - # The Response class represents the response received from the OpenAI API. - # It takes the raw response and provides convenience methods to access the chat completions, - # tool calls, function responses, and parsed arguments. - class Response - # Initializes a new instance of the Response class. + module Response + # Factory method to create the appropriate response type based on the mode # - # @param response [Hash] The response received from the OpenAI API. - def initialize(response) - @response = response - end + # @param response [Hash] The response received from the OpenAI API + # @return [ToolResponse, StructuredResponse] The appropriate response object + def self.create(response) + current_mode = Instructor::OpenAI.mode - # Returns the chat completions from the response. - # - # @return [Array] An array of chat completions. - def chat_completions - @response['choices'] + if structured_output_mode?(current_mode) + StructuredResponse.new(response) + elsif tool_calling_mode?(current_mode) + ToolResponse.new(response) + else + raise ArgumentError, "Invalid mode: #{current_mode}" + end end - # Returns the tool calls from the chat completions. + # Checks if the current mode is a structured output mode # - # @return [Hash, nil] The tool calls or nil if not found. - def tool_calls - chat_completions&.dig(0, 'message', 'tool_calls') + # @param mode [Symbol] The mode to check + # @return [Boolean] true if mode uses structured output (response_format) + def self.structured_output_mode?(mode) + mode == Instructor::Mode::TOOLS_STRICT || + mode == Instructor::Mode::JSON_SCHEMA || + mode == :structured_output end - # Returns the function responses from the tool calls. + # Checks if the current mode is a tool calling mode # - # @return [Array, nil] An array of function responses or nil if not found. - def function_responses - tool_calls&.map { |tool_call| tool_call['function'] } + # @param mode [Symbol] The mode to check + # @return [Boolean] true if mode uses tool calling + def self.tool_calling_mode?(mode) + Instructor::Mode.tool_mode?(mode) || + mode == :function_calling end - # Returns the first function response. - # - # @return [Hash, nil] The first function response or nil if not found. - def function_response - function_responses&.first - end + # Base class for OpenAI API responses that contains common functionality + class BaseResponse + # Initializes a new instance with the OpenAI API response. + # + # @param response [Hash] The response received from the OpenAI API. + def initialize(response) + @response = response + end - # Checks if there is only a single function response. - # - # @return [Boolean] True if there is only a single function response, false otherwise. - def single_response? - function_responses&.size == 1 + # Returns the chat completions from the response. + # + # @return [Array] An array of chat completions. + def chat_completions + @response['choices'] + end + + # Returns the refusal from the first chat completion. + # + # @return [String, nil] The refusal or nil if not found. + def refusal + chat_completions&.dig(0, 'message', 'refusal') + end end - # Parses the function response(s) and returns the parsed arguments. - # - # @return [Array, Hash] The parsed arguments. - def parse - if single_response? - JSON.parse(function_response['arguments']) - else - function_responses.map { |res| JSON.parse(res['arguments']) } + # The ToolResponse class represents the response received from the OpenAI API + # when using function calling mode. It takes the raw response and provides + # convenience methods to access the chat completions, tool calls, function + # responses, and parsed arguments. + class ToolResponse < BaseResponse + # Returns the tool calls from the chat completions. + # + # @return [Hash, nil] The tool calls or nil if not found. + def tool_calls + chat_completions&.dig(0, 'message', 'tool_calls') + end + + # Returns the function responses from the tool calls. + # + # @return [Array, nil] An array of function responses or nil if not found. + def function_responses + tool_calls&.map { |tool_call| tool_call['function'] } + end + + # Returns the first function response. + # + # @return [Hash, nil] The first function response or nil if not found. + def function_response + function_responses&.first + end + + # Checks if there is only a single function response. + # + # @return [Boolean] True if there is only a single function response, false otherwise. + def single_response? + function_responses&.size == 1 + end + + # Parses the function response(s) and returns the parsed arguments. + # + # @return [Array, Hash] The parsed arguments. + def parse + if single_response? + JSON.parse(function_response['arguments']) + else + function_responses.map { |res| JSON.parse(res['arguments']) } + end + end + + # Returns the arguments of the function with the specified name. + # + # @param function_name [String] The name of the function. + # @return [Hash, nil] The arguments of the function or nil if not found. + def by_function_name(function_name) + function_responses&.find { |res| res['name'] == function_name }&.dig('arguments') end end - # Returns the arguments of the function with the specified name. - # - # @param function_name [String] The name of the function. - # @return [Hash, nil] The arguments of the function or nil if not found. - def by_function_name(function_name) - function_responses&.find { |res| res['name'] == function_name }&.dig('arguments') + # The StructuredResponse class represents the response received from the OpenAI API + # when using structured output mode. It takes the raw response and provides + # convenience methods to access the chat completions and parse the JSON content. + class StructuredResponse < BaseResponse + # Returns the content from the first chat completion. + # + # @return [String, nil] The content or nil if not found. + def content + chat_completions&.dig(0, 'message', 'content') + end + + # Parses the content as JSON and returns the parsed data. + # + # @return [Hash] The parsed JSON data. + def parse + JSON.parse(content) + rescue StandardError + nil + end end end end diff --git a/spec/anthropic/mode_spec.rb b/spec/anthropic/mode_spec.rb new file mode 100644 index 0000000..9866514 --- /dev/null +++ b/spec/anthropic/mode_spec.rb @@ -0,0 +1,42 @@ +# frozen_string_literal: true + +require 'spec_helper' +require 'instructor/anthropic/mode' + +RSpec.describe Instructor::Anthropic do + # Reset the mode after each test to avoid test pollution + after do + described_class.mode = nil + end + + describe '.mode and .mode=' do + it 'allows setting and getting the mode' do + described_class.mode = :test_mode + expect(described_class.mode).to eq(:test_mode) + end + + it 'returns ANTHROPIC_TOOLS when mode is not set' do + expect(described_class.mode).to eq(Instructor::Mode::ANTHROPIC_TOOLS) + end + + it 'accepts Instructor::Mode::ANTHROPIC_TOOLS constant' do + described_class.mode = Instructor::Mode::ANTHROPIC_TOOLS + expect(described_class.mode).to eq(Instructor::Mode::ANTHROPIC_TOOLS) + end + + it 'accepts Instructor::Mode::ANTHROPIC_JSON constant' do + described_class.mode = Instructor::Mode::ANTHROPIC_JSON + expect(described_class.mode).to eq(Instructor::Mode::ANTHROPIC_JSON) + end + + it 'accepts Instructor::Mode::ANTHROPIC_REASONING_TOOLS constant' do + described_class.mode = Instructor::Mode::ANTHROPIC_REASONING_TOOLS + expect(described_class.mode).to eq(Instructor::Mode::ANTHROPIC_REASONING_TOOLS) + end + + it 'accepts Instructor::Mode::ANTHROPIC_PARALLEL_TOOLS constant' do + described_class.mode = Instructor::Mode::ANTHROPIC_PARALLEL_TOOLS + expect(described_class.mode).to eq(Instructor::Mode::ANTHROPIC_PARALLEL_TOOLS) + end + end +end diff --git a/spec/anthropic/patch_spec.rb b/spec/anthropic/patch_spec.rb index 10c0298..19e0556 100644 --- a/spec/anthropic/patch_spec.rb +++ b/spec/anthropic/patch_spec.rb @@ -5,6 +5,11 @@ RSpec.describe Instructor::Anthropic::Patch do subject(:patched_client) { Instructor.from_anthropic(Anthropic::Client) } + # Reset mode after each test + after do + Instructor::Anthropic.mode = nil + end + let(:user_model) do Class.new do include EasyTalk::Model @@ -55,26 +60,6 @@ def self.name end end - context 'when an exception occurs' do - let(:client) { patched_client.new } - let(:max_retries) { 3 } - let(:parameters) { {} } - let(:response_model) { double } - - before do - allow(client).to receive(:determine_model).and_return(double) - allow(client).to receive(:build_function).and_return(double) - allow(client).to receive(:prepare_parameters).and_return({}) - allow(client).to receive(:process_response).and_return(double) - allow(::Anthropic::Client).to receive(:json_post).and_raise(JSON::ParserError) - end - - it 'retries the specified number of times' do - expect { client.messages(parameters:, response_model:, max_retries:) }.to raise_error(JSON::ParserError) - expect(::Anthropic::Client).to have_received(:json_post).exactly(max_retries).times - end - end - context 'with validation context' do let(:client) { patched_client.new } let(:parameters) do @@ -132,4 +117,43 @@ def self.name end.to raise_error(Instructor::ValidationError) end end + + describe 'mode support' do + it 'uses ANTHROPIC_TOOLS mode by default' do + expect(Instructor::Anthropic.mode).to eq(Instructor::Mode::ANTHROPIC_TOOLS) + end + + it 'accepts mode parameter on initialization' do + Instructor.from_anthropic(Anthropic::Client, mode: Instructor::Mode::ANTHROPIC_JSON) + expect(Instructor::Anthropic.mode).to eq(Instructor::Mode::ANTHROPIC_JSON) + end + + context 'with ANTHROPIC_TOOLS mode' do + it 'sets the mode correctly' do + Instructor.from_anthropic(Anthropic::Client, mode: Instructor::Mode::ANTHROPIC_TOOLS) + expect(Instructor::Anthropic.mode).to eq(Instructor::Mode::ANTHROPIC_TOOLS) + end + end + + context 'with ANTHROPIC_JSON mode' do + it 'sets the mode correctly' do + Instructor.from_anthropic(Anthropic::Client, mode: Instructor::Mode::ANTHROPIC_JSON) + expect(Instructor::Anthropic.mode).to eq(Instructor::Mode::ANTHROPIC_JSON) + end + end + + context 'with ANTHROPIC_REASONING_TOOLS mode' do + it 'sets the mode correctly' do + Instructor.from_anthropic(Anthropic::Client, mode: Instructor::Mode::ANTHROPIC_REASONING_TOOLS) + expect(Instructor::Anthropic.mode).to eq(Instructor::Mode::ANTHROPIC_REASONING_TOOLS) + end + end + + context 'with ANTHROPIC_PARALLEL_TOOLS mode' do + it 'sets the mode correctly' do + Instructor.from_anthropic(Anthropic::Client, mode: Instructor::Mode::ANTHROPIC_PARALLEL_TOOLS) + expect(Instructor::Anthropic.mode).to eq(Instructor::Mode::ANTHROPIC_PARALLEL_TOOLS) + end + end + end end diff --git a/spec/anthropic/response_spec.rb b/spec/anthropic/response_spec.rb new file mode 100644 index 0000000..0f6a2c0 --- /dev/null +++ b/spec/anthropic/response_spec.rb @@ -0,0 +1,188 @@ +# frozen_string_literal: true + +require 'spec_helper' + +RSpec.describe Instructor::Anthropic::Response do + after { Instructor::Anthropic.mode = nil } + + describe '.create' do + let(:response) { { 'content' => [] } } + + context 'when in ANTHROPIC_TOOLS mode' do + before { Instructor::Anthropic.mode = Instructor::Mode::ANTHROPIC_TOOLS } + + it 'returns a ToolResponse instance' do + expect(described_class.create(response)).to be_a(described_class::ToolResponse) + end + end + + context 'when in ANTHROPIC_REASONING_TOOLS mode' do + before { Instructor::Anthropic.mode = Instructor::Mode::ANTHROPIC_REASONING_TOOLS } + + it 'returns a ToolResponse instance' do + expect(described_class.create(response)).to be_a(described_class::ToolResponse) + end + end + + context 'when in ANTHROPIC_PARALLEL_TOOLS mode' do + before { Instructor::Anthropic.mode = Instructor::Mode::ANTHROPIC_PARALLEL_TOOLS } + + it 'returns a ToolResponse instance' do + expect(described_class.create(response)).to be_a(described_class::ToolResponse) + end + end + + context 'when in ANTHROPIC_JSON mode' do + before { Instructor::Anthropic.mode = Instructor::Mode::ANTHROPIC_JSON } + + it 'returns a JsonResponse instance' do + expect(described_class.create(response)).to be_a(described_class::JsonResponse) + end + end + end + + describe Instructor::Anthropic::Response::ToolResponse do + subject(:response_object) { described_class.new(response) } + + let(:response) do + { + 'id' => 'msg_123', + 'type' => 'message', + 'role' => 'assistant', + 'content' => [ + { + 'type' => 'tool_use', + 'id' => 'toolu_456', + 'name' => 'User', + 'input' => { 'name' => 'Jason', 'age' => 25 } + } + ] + } + end + + describe '#parse' do + it 'returns the tool input for single response' do + expect(response_object.parse).to eq({ 'name' => 'Jason', 'age' => 25 }) + end + + context 'with multiple tool uses' do + let(:response) do + { + 'id' => 'msg_123', + 'type' => 'message', + 'content' => [ + { + 'type' => 'tool_use', + 'name' => 'User', + 'input' => { 'name' => 'Jason', 'age' => 25 } + }, + { + 'type' => 'tool_use', + 'name' => 'User', + 'input' => { 'name' => 'Alice', 'age' => 30 } + } + ] + } + end + + it 'returns an array of tool inputs' do + expected = [ + { 'name' => 'Jason', 'age' => 25 }, + { 'name' => 'Alice', 'age' => 30 } + ] + expect(response_object.parse).to eq(expected) + end + end + + context 'with error response' do + let(:response) do + { + 'type' => 'error', + 'error' => { + 'type' => 'invalid_request_error', + 'message' => 'Invalid request' + } + } + end + + it 'raises an error with the error message' do + expect { response_object.parse }.to raise_error(StandardError, /invalid_request_error.*Invalid request/) + end + end + end + end + + describe Instructor::Anthropic::Response::JsonResponse do + subject(:response_object) { described_class.new(response) } + + describe '#parse' do + context 'with text content containing JSON' do + let(:response) do + { + 'id' => 'msg_123', + 'type' => 'message', + 'content' => [ + { + 'type' => 'text', + 'text' => '{"name": "Jason", "age": 25}' + } + ] + } + end + + it 'parses the JSON from text content' do + expect(response_object.parse).to eq({ 'name' => 'Jason', 'age' => 25 }) + end + end + + context 'with string content' do + let(:response) do + { + 'id' => 'msg_123', + 'type' => 'message', + 'content' => '{"name": "Jason", "age": 25}' + } + end + + it 'parses the JSON from string content' do + expect(response_object.parse).to eq({ 'name' => 'Jason', 'age' => 25 }) + end + end + + context 'with invalid JSON' do + let(:response) do + { + 'id' => 'msg_123', + 'type' => 'message', + 'content' => [ + { + 'type' => 'text', + 'text' => 'not valid json' + } + ] + } + end + + it 'raises an error' do + expect { response_object.parse }.to raise_error(StandardError, /Failed to parse JSON/) + end + end + + context 'with error response' do + let(:response) do + { + 'type' => 'error', + 'error' => { + 'type' => 'invalid_request_error', + 'message' => 'Invalid request' + } + } + end + + it 'raises an error with the error message' do + expect { response_object.parse }.to raise_error(StandardError, /invalid_request_error.*Invalid request/) + end + end + end + end +end diff --git a/spec/examples/autoticketer_spec.rb b/spec/examples/autoticketer_spec.rb index e591fa4..dc70da3 100644 --- a/spec/examples/autoticketer_spec.rb +++ b/spec/examples/autoticketer_spec.rb @@ -8,7 +8,7 @@ c.include AutoticketerModels end - let(:client) { Instructor.from_openai(OpenAI::Client).new } + let(:client) { Instructor.from_openai(OpenAI::Client, mode: :function_calling).new } let(:data) do <<~DATA @@ -62,52 +62,55 @@ def generate(data) expect(result.as_json).to include_json( { - "items": [ - { + "items": [ + { "id": 1, - "name": 'Improve Authentication System', - "description": 'Work on front-end revamp and back-end optimization', - "priority": 'High', - "assignees": %w[ - Bob - Carol + "name": "Improve Authentication System", + "description": "Work on front-end revamp and back-end optimization for the authentication system", + "priority": "High", + "assignees": [ + "Bob", + "Carol" ], "subtasks": [ - { - "id": 2, - "name": 'Front-end Revamp' - }, - { - "id": 3, - "name": 'Back-end Optimization' - } - ] - }, - { + { + "id": 2, + "name": "Front-end Revamp" + }, + { + "id": 3, + "name": "Back-end Optimization" + } + ], + "dependencies": nil + }, + { "id": 4, - "name": 'Integrate Authentication System with New Billing System', - "description": 'Integrate authentication system with the new billing system', - "priority": 'Medium', + "name": "Integrate Authentication with Billing System", + "description": "Integrate the improved authentication system with the new billing system", + "priority": "Medium", "assignees": [ - 'Bob' + "Bob" ], + "subtasks": nil, "dependencies": [ - 1 + 1 ] - }, - { + }, + { "id": 5, - "name": 'Update User Documentation', - "description": 'Update user documentation to reflect changes', - "priority": 'Low', + "name": "Update User Documentation", + "description": "Update user documentation to reflect changes in authentication and billing systems", + "priority": "Low", "assignees": [ - 'Carol' + "Carol" ], + "subtasks": nil, "dependencies": [ - 2 + 2 ] - } - ] + } + ] } ) end diff --git a/spec/features/anthropic/basic_use_spec.rb b/spec/features/anthropic/basic_use_spec.rb new file mode 100644 index 0000000..bdfe3a6 --- /dev/null +++ b/spec/features/anthropic/basic_use_spec.rb @@ -0,0 +1,38 @@ +# frozen_string_literal: true + +require 'spec_helper' + +RSpec.describe 'running an Anthropic function call' do + let(:user_model) do + Class.new do + include EasyTalk::Model + + def self.name + 'User' + end + + define_schema do + property :name, String + property :age, Integer + end + end + end + + let(:client) { Instructor.from_anthropic(Anthropic::Client).new } + + let(:parameters) do + { + model: 'claude-sonnet-4-20250514', + messages: [{ role: 'user', content: 'Extract Jason is 25 years old' }] + } + end + + let(:response_model) { user_model } + + it 'returns a single object with the expected valid attribute values', vcr: 'features/anthropic/basic_spec/valid_response' do + user = client.messages(parameters:, response_model:) + + expect(user.name).to eq('Jason') + expect(user.age).to eq(25) + end +end diff --git a/spec/features/basic_use_spec.rb b/spec/features/openai/basic_use_spec.rb similarity index 82% rename from spec/features/basic_use_spec.rb rename to spec/features/openai/basic_use_spec.rb index bddd5a2..996a085 100644 --- a/spec/features/basic_use_spec.rb +++ b/spec/features/openai/basic_use_spec.rb @@ -18,7 +18,7 @@ def self.name end end - let(:client) { Instructor.from_openai(OpenAI::Client).new } + let(:client) { Instructor.from_openai(OpenAI::Client, mode: :function_calling).new } let(:parameters) do { @@ -29,7 +29,7 @@ def self.name let(:response_model) { user_model } - it 'returns a single object with the expected valid attribute values', vcr: 'basic_spec/valid_response' do + it 'returns a single object with the expected valid attribute values', vcr: 'features/openai/basic_spec/valid_response' do user = client.chat(parameters:, response_model:) expect(user.name).to eq('Jason') diff --git a/spec/features/iterable_spec.rb b/spec/features/openai/iterable_spec.rb similarity index 97% rename from spec/features/iterable_spec.rb rename to spec/features/openai/iterable_spec.rb index 1311af8..dc1c455 100644 --- a/spec/features/iterable_spec.rb +++ b/spec/features/openai/iterable_spec.rb @@ -18,7 +18,7 @@ def self.name end end - let(:client) { Instructor.from_openai(OpenAI::Client, mode: Instructor::Mode::TOOLS.required).new } + let(:client) { Instructor.from_openai(OpenAI::Client, mode: Instructor::Mode::TOOLS).new } let(:parameters) do { diff --git a/spec/features/openai/structured_output_spec.rb b/spec/features/openai/structured_output_spec.rb new file mode 100644 index 0000000..77d43b3 --- /dev/null +++ b/spec/features/openai/structured_output_spec.rb @@ -0,0 +1,38 @@ +# frozen_string_literal: true + +require 'spec_helper' + +RSpec.describe 'running an OpenAI function call' do + let(:user_model) do + Class.new do + include EasyTalk::Model + + def self.name + 'User' + end + + define_schema do + property :name, String + property :age, Integer + end + end + end + + let(:client) { Instructor.from_openai(OpenAI::Client).new } + + let(:parameters) do + { + model: 'gpt-4o-mini', + messages: [{ role: 'user', content: 'Extract Jason is 25 years old' }] + } + end + + let(:response_model) { user_model } + + it 'returns a single object with the expected valid attribute values', vcr: 'structured_output/valid_response' do + user = client.chat(parameters: parameters, response_model: response_model) + + expect(user.name).to eq('Jason') + expect(user.age).to eq(25) + end +end diff --git a/spec/instructor/mode_spec.rb b/spec/instructor/mode_spec.rb new file mode 100644 index 0000000..422b400 --- /dev/null +++ b/spec/instructor/mode_spec.rb @@ -0,0 +1,168 @@ +# frozen_string_literal: true + +require 'spec_helper' +require 'instructor/mode' + +RSpec.describe Instructor::Mode do + describe 'OpenAI mode constants' do + it 'defines FUNCTIONS constant' do + expect(described_class::FUNCTIONS).to eq(:function_call) + end + + it 'defines PARALLEL_TOOLS constant' do + expect(described_class::PARALLEL_TOOLS).to eq(:parallel_tool_call) + end + + it 'defines TOOLS constant' do + expect(described_class::TOOLS).to eq(:tool_call) + end + + it 'defines TOOLS_STRICT constant' do + expect(described_class::TOOLS_STRICT).to eq(:tools_strict) + end + + it 'defines JSON constant' do + expect(described_class::JSON).to eq(:json_mode) + end + + it 'defines JSON_SCHEMA constant' do + expect(described_class::JSON_SCHEMA).to eq(:json_schema_mode) + end + + it 'defines MD_JSON constant' do + expect(described_class::MD_JSON).to eq(:markdown_json_mode) + end + end + + describe 'Anthropic mode constants' do + it 'defines ANTHROPIC_TOOLS constant' do + expect(described_class::ANTHROPIC_TOOLS).to eq(:anthropic_tools) + end + + it 'defines ANTHROPIC_JSON constant' do + expect(described_class::ANTHROPIC_JSON).to eq(:anthropic_json) + end + + it 'defines ANTHROPIC_PARALLEL_TOOLS constant' do + expect(described_class::ANTHROPIC_PARALLEL_TOOLS).to eq(:anthropic_parallel_tools) + end + + it 'defines ANTHROPIC_REASONING_TOOLS constant' do + expect(described_class::ANTHROPIC_REASONING_TOOLS).to eq(:anthropic_reasoning_tools) + end + end + + describe '.tool_modes' do + it 'returns a Set of all tool-based modes' do + expected_modes = Set[ + :function_call, + :parallel_tool_call, + :tool_call, + :tools_strict, + :anthropic_tools, + :anthropic_reasoning_tools, + :anthropic_parallel_tools + ] + expect(described_class.tool_modes).to eq(expected_modes) + end + + it 'returns a Set object' do + expect(described_class.tool_modes).to be_a(Set) + end + end + + describe '.json_modes' do + it 'returns a Set of all JSON-based modes' do + expected_modes = Set[ + :json_mode, + :markdown_json_mode, + :json_schema_mode, + :anthropic_json + ] + expect(described_class.json_modes).to eq(expected_modes) + end + + it 'returns a Set object' do + expect(described_class.json_modes).to be_a(Set) + end + end + + describe '.tool_mode?' do + it 'returns true for tool-based modes' do + expect(described_class.tool_mode?(described_class::TOOLS)).to be true + expect(described_class.tool_mode?(described_class::TOOLS_STRICT)).to be true + expect(described_class.tool_mode?(described_class::ANTHROPIC_TOOLS)).to be true + end + + it 'returns false for JSON-based modes' do + expect(described_class.tool_mode?(described_class::JSON)).to be false + expect(described_class.tool_mode?(described_class::ANTHROPIC_JSON)).to be false + end + + it 'returns false for unknown modes' do + expect(described_class.tool_mode?(:unknown_mode)).to be false + end + end + + describe '.json_mode?' do + it 'returns true for JSON-based modes' do + expect(described_class.json_mode?(described_class::JSON)).to be true + expect(described_class.json_mode?(described_class::JSON_SCHEMA)).to be true + expect(described_class.json_mode?(described_class::ANTHROPIC_JSON)).to be true + end + + it 'returns false for tool-based modes' do + expect(described_class.json_mode?(described_class::TOOLS)).to be false + expect(described_class.json_mode?(described_class::ANTHROPIC_TOOLS)).to be false + end + + it 'returns false for unknown modes' do + expect(described_class.json_mode?(:unknown_mode)).to be false + end + end + + describe '.warn_mode_functions_deprecation' do + it 'warns about FUNCTIONS mode deprecation' do + # Reset the warning flag + described_class.instance_variable_set(:@functions_deprecation_shown, false) + + expect { described_class.warn_mode_functions_deprecation } + .to output(/DEPRECATION WARNING.*FUNCTIONS mode is deprecated/i).to_stderr + end + + it 'only warns once per session' do + # Reset the warning flag + described_class.instance_variable_set(:@functions_deprecation_shown, false) + + # First call should warn + expect { described_class.warn_mode_functions_deprecation } + .to output(/DEPRECATION WARNING/i).to_stderr + + # Second call should not warn + expect { described_class.warn_mode_functions_deprecation } + .not_to output.to_stderr + end + end + + describe '.validate_mode!' do + it 'does not raise error for valid tool modes' do + expect { described_class.validate_mode!(described_class::TOOLS) }.not_to raise_error + expect { described_class.validate_mode!(described_class::ANTHROPIC_TOOLS) }.not_to raise_error + end + + it 'does not raise error for valid JSON modes' do + expect { described_class.validate_mode!(described_class::JSON) }.not_to raise_error + expect { described_class.validate_mode!(described_class::ANTHROPIC_JSON) }.not_to raise_error + end + + it 'raises ArgumentError for unsupported modes' do + expect { described_class.validate_mode!(:invalid_mode) } + .to raise_error(ArgumentError, /Unsupported mode: invalid_mode/) + end + + it 'includes list of supported modes in error message' do + expect { described_class.validate_mode!(:invalid_mode) } + .to raise_error(ArgumentError, /Supported modes:/) + end + end +end diff --git a/spec/instructor_spec.rb b/spec/instructor_spec.rb index ce89810..c22a276 100644 --- a/spec/instructor_spec.rb +++ b/spec/instructor_spec.rb @@ -3,13 +3,30 @@ require 'spec_helper' RSpec.describe Instructor, '.class' do + after { Instructor::OpenAI.mode = nil } + it 'returns the default mode after patching' do described_class.from_openai(OpenAI::Client) - expect(described_class.mode).to eq(Instructor::Mode::TOOLS.function) + expect(Instructor::OpenAI.mode).to eq(Instructor::Mode::TOOLS_STRICT) + end + + it 'changes the mode to TOOLS_STRICT' do + described_class.from_openai(OpenAI::Client, mode: Instructor::Mode::TOOLS_STRICT) + expect(Instructor::OpenAI.mode).to eq(Instructor::Mode::TOOLS_STRICT) + end + + it 'changes the mode to TOOLS' do + described_class.from_openai(OpenAI::Client, mode: Instructor::Mode::TOOLS) + expect(Instructor::OpenAI.mode).to eq(Instructor::Mode::TOOLS) + end + + it 'supports legacy structured_output mode for backward compatibility' do + described_class.from_openai(OpenAI::Client, mode: :structured_output) + expect(Instructor::OpenAI.mode).to eq(:structured_output) end - it 'changes the the mode' do - described_class.from_openai(OpenAI::Client, mode: Instructor::Mode::TOOLS.auto) - expect(described_class.mode).to eq(Instructor::Mode::TOOLS.auto) + it 'supports legacy function_calling mode for backward compatibility' do + described_class.from_openai(OpenAI::Client, mode: :function_calling) + expect(Instructor::OpenAI.mode).to eq(:function_calling) end end diff --git a/spec/openai/mode_spec.rb b/spec/openai/mode_spec.rb new file mode 100644 index 0000000..25ffb8b --- /dev/null +++ b/spec/openai/mode_spec.rb @@ -0,0 +1,87 @@ +# frozen_string_literal: true + +require 'spec_helper' +require 'instructor/openai/mode' + +RSpec.describe Instructor::OpenAI do + # Reset the mode after each test to avoid test pollution + after do + described_class.mode = nil + end + + describe '.mode and .mode=' do + it 'allows setting and getting the mode' do + described_class.mode = :test_mode + expect(described_class.mode).to eq(:test_mode) + end + + it 'returns TOOLS_STRICT when mode is not set' do + expect(described_class.mode).to eq(Instructor::Mode::TOOLS_STRICT) + end + + it 'accepts Instructor::Mode constants' do + described_class.mode = Instructor::Mode::TOOLS + expect(described_class.mode).to eq(Instructor::Mode::TOOLS) + end + + it 'accepts Instructor::Mode JSON modes' do + described_class.mode = Instructor::Mode::JSON + expect(described_class.mode).to eq(Instructor::Mode::JSON) + end + end +end + +RSpec.describe Instructor::OpenAI::Mode do + # Reset the mode after each test to avoid test pollution + after do + Instructor::OpenAI.mode = nil + end + + describe 'deprecated constants' do + it 'defines STRUCTURED_OUTPUT constant for backward compatibility' do + expect(described_class::STRUCTURED_OUTPUT).to eq(:structured_output) + end + + it 'defines FUNCTION_CALLING constant for backward compatibility' do + expect(described_class::FUNCTION_CALLING).to eq(:function_calling) + end + end + + describe '.structured_output? (deprecated)' do + it 'returns true when mode is set to STRUCTURED_OUTPUT' do + Instructor::OpenAI.mode = Instructor::OpenAI::Mode::STRUCTURED_OUTPUT + expect { expect(described_class.structured_output?).to be true } + .to output(/DEPRECATION WARNING/i).to_stderr + end + + it 'returns false when mode is set to something else' do + Instructor::OpenAI.mode = Instructor::OpenAI::Mode::FUNCTION_CALLING + expect { expect(described_class.structured_output?).to be false } + .to output(/DEPRECATION WARNING/i).to_stderr + end + + it 'warns about deprecation' do + expect { described_class.structured_output? } + .to output(/DEPRECATION WARNING.*TOOLS_STRICT/i).to_stderr + end + end + + describe '.function_calling? (deprecated)' do + it 'returns true when mode is set to FUNCTION_CALLING' do + Instructor::OpenAI.mode = Instructor::OpenAI::Mode::FUNCTION_CALLING + expect { expect(described_class.function_calling?).to be true } + .to output(/DEPRECATION WARNING/i).to_stderr + end + + it 'returns false when mode is set to something else' do + Instructor::OpenAI.mode = Instructor::OpenAI::Mode::STRUCTURED_OUTPUT + expect { expect(described_class.function_calling?).to be false } + .to output(/DEPRECATION WARNING/i).to_stderr + end + + it 'warns about deprecation' do + expect { described_class.function_calling? } + .to output(/DEPRECATION WARNING.*TOOLS/i).to_stderr + end + end +end diff --git a/spec/openai/patch_spec.rb b/spec/openai/patch_spec.rb index 8d14b6c..f20f486 100644 --- a/spec/openai/patch_spec.rb +++ b/spec/openai/patch_spec.rb @@ -29,6 +29,11 @@ def self.instructions expect(patched_client).to eq(OpenAI::Client) end + it 'is in TOOLS_STRICT mode by default' do + client = patched_client.new + expect(client.mode).to eq(Instructor::Mode::TOOLS_STRICT) + end + context 'when generating description' do let(:client) { patched_client.new } @@ -73,7 +78,7 @@ def self.name describe 'when setting the function_name' do it 'returns the function_name based on the schema title' do client = patched_client.new - expect(client.generate_function_name(user_model)).to eq('SomeUser') + expect(client.generate_function_name(user_model)).to eq('someuser') end it 'returns the class name when the schema title is not defined' do @@ -88,16 +93,18 @@ def self.name end client = patched_client.new - expect(client.generate_function_name(model)).to eq('User') + expect(client.generate_function_name(model)).to eq('user') end end + end - it 'returns an object with the expected valid attribute values', vcr: 'patching_spec/valid_response' do + context 'in structured output mode' do + it 'returns an object with the expected valid attribute values', vcr: 'openai/patch/valid_response' do client = patched_client.new user = client.chat( parameters: { - model: 'gpt-3.5-turbo', + model: 'gpt-4o-2024-08-06', messages: [{ role: 'user', content: 'Extract Jason is 25 years old' }] }, response_model: user_model @@ -106,51 +113,52 @@ def self.name expect(user.name).to eq('Jason') expect(user.age).to eq(25) end - end - - context 'with retry mechanism' do - let(:client) { patched_client.new } - let(:parameters) { { key: 'value' } } - let(:max_retries) { 3 } - before do - allow(client).to receive(:json_post).and_return('choices' => - [{ 'index' => 0, - 'message' => - { 'role' => 'assistant', - 'tool_calls' => [{ 'id' => 'call_85vQq30Nt8xU1mly2Y2Y1tL2', 'type' => 'function', - 'function' => { 'name' => 'User', 'arguments' => '\"bad:json\"' } }] } }]) - end + context 'with validation context' do + let(:client) { patched_client.new } + let(:parameters) do + { + model: 'gpt-4o-2024-08-06', + messages: [ + { + role: 'user', + content: 'Answer the question: %s with the text chunk: %s' + } + ] + } + end - it 'retries the chat method when parsing fails' do - expect do - client.chat(parameters:, response_model: user_model, max_retries:) - end.to raise_error(JSON::ParserError) + it 'returns an object with the expected valid attribute values', vcr: 'openai/patch/with_validation_context' do + user = client.chat( + parameters:, + response_model: user_model, + validation_context: { question: 'What is your name and age?', + text_chunk: 'my name is Jason and I turned 25 years old yesterday' } + ) - expect(client).to have_received(:json_post).exactly(max_retries).times + expect(user.name).to eq('Jason') + expect(user.age).to eq(25) + end end end - context 'with validation context' do - let(:client) { patched_client.new } - let(:parameters) do - { - model: 'gpt-3.5-turbo', - messages: [ - { - role: 'user', - content: 'Answer the question: %s with the text chunk: %s' - } - ] - } + context 'in function calling mode' do + subject(:patched_client) { Instructor.from_openai(OpenAI::Client, mode: :function_calling) } + + it 'is in function calling mode' do + client = patched_client.new + expect(client.mode).to eq(:function_calling) end - it 'returns an object with the expected valid attribute values', vcr: 'patching_spec/with_validation_context' do + it 'returns an object with the expected valid attribute values', vcr: 'openai/patch/valid_function_calling_response' do + client = patched_client.new + user = client.chat( - parameters:, - response_model: user_model, - validation_context: { question: 'What is your name and age?', - text_chunk: 'my name is Jason and I turned 25 years old yesterday' } + parameters: { + model: 'gpt-4o-2024-08-06', + messages: [{ role: 'user', content: 'Extract Jason is 25 years old' }] + }, + response_model: user_model ) expect(user.name).to eq('Jason') @@ -177,23 +185,23 @@ def self.name let(:client) { patched_client.new } let(:parameters) do { - model: 'gpt-3.5-turbo', + model: 'gpt-4o-2024-08-06', messages: [{ role: 'user', content: 'Extract Jason is 25 years old' }] } end - it 'raises an error when the response model is invalid', vcr: 'patching_spec/invalid_response' do + it 'raises an argument error when the model resfuses to respond', vcr: 'openai/patch/invalid_response' do expect do client.chat(parameters:, response_model: invalid_model) - end.to raise_error(Instructor::ValidationError) + end.to raise_error(ArgumentError, "I'm sorry, I can't assist with that request.") end end - describe 'when the client is used ia a standard manner' do - it 'does not raise an error when the client is used in a standard manner', vcr: 'patching_spec/standard_usage' do + describe 'when the client is used in standard chat mode (not function calling)' do + it 'does not raise an error when the client is used in a standard manner', vcr: 'openai/patch/standard_usage' do response = patched_client.new.chat( parameters: { - model: 'gpt-3.5-turbo', + model: 'gpt-4o-2024-08-06', messages: [{ role: 'user', content: 'How is the weather today in New York?' }] } ) diff --git a/spec/openai/response_spec.rb b/spec/openai/response_spec.rb index fa1a5ac..f2c34c0 100644 --- a/spec/openai/response_spec.rb +++ b/spec/openai/response_spec.rb @@ -3,61 +3,270 @@ require 'spec_helper' RSpec.describe Instructor::OpenAI::Response do - subject(:response_object) { described_class.new(response) } - - let(:response) do - { 'id' => 'chatcmpl-9DEGpBfHqcS17uJtx1vxpRMEb4DtK', - 'object' => 'chat.completion', - 'created' => 1_712_940_147, - 'model' => 'gpt-3.5-turbo-0125', - 'choices' => [ - { 'index' => 0, - 'message' => - { 'role' => 'assistant', - 'content' => nil, - 'tool_calls' => [ - { - 'id' => 'call_ljjAxRNujNWmDhrlJW2DLprK', - 'type' => 'function', - 'function' => { 'name' => 'User', 'arguments' => '{"name": "Jason", "age": 25}' } - } - ] }, - 'logprobs' => nil, - 'finish_reason' => 'tool_calls' } - ], - 'usage' => { - 'prompt_tokens' => 63, - 'completion_tokens' => 32, - 'total_tokens' => 95 - }, - 'system_fingerprint' => 'fp_c2295e73ad' } - end + describe '.create' do + let(:response) { { 'choices' => [] } } - it 'returns a chat completion' do - expect(response_object.chat_completions).to eq(response['choices']) - end + after { Instructor::OpenAI.mode = nil } - it 'returns the tool calls' do - expect(response_object.tool_calls).to eq(response['choices'][0]['message']['tool_calls']) - end + context 'when in structured output mode' do + before { Instructor::OpenAI.mode = Instructor::Mode::TOOLS_STRICT } - it 'returns the function responses' do - expect(response_object.function_responses).to eq([response['choices'][0]['message']['tool_calls'][0]['function']]) - end + it 'returns a StructuredResponse instance' do + expect(described_class.create(response)).to be_a(described_class::StructuredResponse) + end + end + + context 'when in function calling mode' do + before { Instructor::OpenAI.mode = Instructor::Mode::TOOLS } - it 'returns the function arguments by function name' do - expect(response_object.by_function_name('User')).to eq('{"name": "Jason", "age": 25}') + it 'returns a ToolResponse instance' do + expect(described_class.create(response)).to be_a(described_class::ToolResponse) + end + end + + context 'when in legacy structured output mode' do + before { Instructor::OpenAI.mode = :structured_output } + + it 'returns a StructuredResponse instance for backward compatibility' do + expect(described_class.create(response)).to be_a(described_class::StructuredResponse) + end + end + + context 'when in legacy function calling mode' do + before { Instructor::OpenAI.mode = :function_calling } + + it 'returns a ToolResponse instance for backward compatibility' do + expect(described_class.create(response)).to be_a(described_class::ToolResponse) + end + end end - it 'single response' do - expect(response_object.single_response?).to eq(true) + describe Instructor::OpenAI::Response::BaseResponse do + subject(:response_object) { described_class.new(response) } + + let(:response) do + { 'id' => 'chatcmpl-base', + 'object' => 'chat.completion', + 'created' => 1_712_940_147, + 'choices' => [ + { 'index' => 0, + 'message' => { + 'role' => 'assistant', + 'refusal' => 'I cannot assist with that request' + }, + 'finish_reason' => 'stop' } + ] } + end + + it 'returns chat completions' do + expect(response_object.chat_completions).to eq(response['choices']) + end + + it 'returns refusal message' do + expect(response_object.refusal).to eq('I cannot assist with that request') + end + + context 'with empty response' do + let(:response) { {} } + + it 'handles missing choices gracefully' do + expect(response_object.chat_completions).to be_nil + end + + it 'handles missing refusal gracefully' do + expect(response_object.refusal).to be_nil + end + end end - it 'parses the response' do - expect(response_object.parse).to eq('name' => 'Jason', 'age' => 25) + describe Instructor::OpenAI::Response::ToolResponse do + subject(:response_object) { described_class.new(response) } + + let(:response) do + { 'id' => 'chatcmpl-9DEGpBfHqcS17uJtx1vxpRMEb4DtK', + 'object' => 'chat.completion', + 'created' => 1_712_940_147, + 'model' => 'gpt-3.5-turbo-0125', + 'choices' => [ + { 'index' => 0, + 'message' => + { 'role' => 'assistant', + 'content' => nil, + 'tool_calls' => [ + { + 'id' => 'call_ljjAxRNujNWmDhrlJW2DLprK', + 'type' => 'function', + 'function' => { 'name' => 'User', 'arguments' => '{"name": "Jason", "age": 25}' } + } + ] }, + 'logprobs' => nil, + 'finish_reason' => 'tool_calls' } + ], + 'usage' => { + 'prompt_tokens' => 63, + 'completion_tokens' => 32, + 'total_tokens' => 95 + }, + 'system_fingerprint' => 'fp_c2295e73ad' } + end + + it 'returns a chat completion' do + expect(response_object.chat_completions).to eq(response['choices']) + end + + it 'returns the tool calls' do + expect(response_object.tool_calls).to eq(response['choices'][0]['message']['tool_calls']) + end + + it 'returns the function responses' do + expect(response_object.function_responses).to eq([response['choices'][0]['message']['tool_calls'][0]['function']]) + end + + it 'returns the function arguments by function name' do + expect(response_object.by_function_name('User')).to eq('{"name": "Jason", "age": 25}') + end + + it 'single response' do + expect(response_object.single_response?).to eq(true) + end + + it 'parses the response' do + expect(response_object.parse).to eq('name' => 'Jason', 'age' => 25) + end + + it 'returns the first function response' do + expect(response_object.function_response).to eq(response['choices'][0]['message']['tool_calls'][0]['function']) + end + + context 'with multiple function responses' do + let(:response) do + { 'id' => 'chatcmpl-multi', + 'object' => 'chat.completion', + 'created' => 1_712_940_147, + 'choices' => [ + { 'index' => 0, + 'message' => { + 'role' => 'assistant', + 'content' => nil, + 'tool_calls' => [ + { + 'id' => 'call_1', + 'type' => 'function', + 'function' => { 'name' => 'User1', 'arguments' => '{"name": "Alice", "age": 30}' } + }, + { + 'id' => 'call_2', + 'type' => 'function', + 'function' => { 'name' => 'User2', 'arguments' => '{"name": "Bob", "age": 25}' } + } + ] + }, + 'finish_reason' => 'tool_calls' } + ] } + end + + it 'identifies multiple responses' do + expect(response_object.single_response?).to eq(false) + end + + it 'returns all function responses' do + expect(response_object.function_responses.size).to eq(2) + end + + it 'parses multiple responses correctly' do + expected_result = [ + { 'name' => 'Alice', 'age' => 30 }, + { 'name' => 'Bob', 'age' => 25 } + ] + expect(response_object.parse).to eq(expected_result) + end + + it 'returns the correct function by name' do + expect(response_object.by_function_name('User2')).to eq('{"name": "Bob", "age": 25}') + end + end + + context 'with invalid JSON in arguments' do + let(:response) do + { 'choices' => [ + { 'message' => { + 'tool_calls' => [ + { + 'function' => { 'name' => 'User', 'arguments' => '{invalid json}' } + } + ] + } } + ] } + end + + it 'raises a JSON::ParserError when parsing invalid JSON' do + expect { response_object.parse }.to raise_error(JSON::ParserError) + end + end + + context 'with empty response' do + let(:response) { {} } + + it 'handles missing data gracefully' do + expect(response_object.tool_calls).to be_nil + expect(response_object.function_responses).to be_nil + expect(response_object.function_response).to be_nil + expect(response_object.single_response?).to eq(false) + end + end end - it 'returns the first function response' do - expect(response_object.function_response).to eq(response['choices'][0]['message']['tool_calls'][0]['function']) + describe Instructor::OpenAI::Response::StructuredResponse do + subject(:response_object) { described_class.new(response) } + + let(:response) do + { 'id' => 'chatcmpl-123', + 'object' => 'chat.completion', + 'created' => 1_712_940_147, + 'model' => 'gpt-3.5-turbo-0125', + 'choices' => [ + { 'index' => 0, + 'message' => { + 'role' => 'assistant', + 'content' => '{"name": "Jason", "age": 25}' + }, + 'finish_reason' => 'stop' } + ] } + end + + it 'returns chat completions' do + expect(response_object.chat_completions).to eq(response['choices']) + end + + it 'returns content' do + expect(response_object.content).to eq('{"name": "Jason", "age": 25}') + end + + it 'parses the response' do + expect(response_object.parse).to eq('name' => 'Jason', 'age' => 25) + end + + context 'with invalid JSON content' do + let(:response) do + { 'choices' => [ + { 'message' => { + 'content' => '{invalid json}' + } } + ] } + end + + it 'returns nil when parsing invalid JSON' do + expect(response_object.parse).to be_nil + end + end + + context 'with empty response' do + let(:response) { {} } + + it 'handles missing content gracefully' do + expect(response_object.content).to be_nil + expect(response_object.parse).to be_nil + end + end end end diff --git a/spec/vcr_cassettes/anthropic_patch/invalid_response.yml b/spec/vcr_cassettes/anthropic_patch/invalid_response.yml index ecd85ee..829d579 100644 --- a/spec/vcr_cassettes/anthropic_patch/invalid_response.yml +++ b/spec/vcr_cassettes/anthropic_patch/invalid_response.yml @@ -6,15 +6,15 @@ http_interactions: body: encoding: UTF-8 string: '{"model":"claude-3-opus-20240229","messages":[{"role":"user","content":"Extract - Jason is 25 years old"}],"max_tokens":1024,"tools":[{"name":"Invalidmodel","description":"Correctly - extracted `InvalidModel` with all the required parameters with correct types","input_schema":{"type":"object","properties":{"name":{"type":"string"},"age":{"type":"string"}},"required":["name","age"]}}]}' + Jason is 25 years old"}],"max_tokens":1024,"tools":[{"name":"invalidmodel","description":"Correctly + extracted `InvalidModel` with all the required parameters with correct types","input_schema":{"type":"object","properties":{"name":{"type":"string"},"age":{"type":"integer"}},"additionalProperties":false,"required":["name","age"]}}],"tool_choice":{"type":"tool","name":"invalidmodel"}}' headers: - Content-Type: - - application/json X-Api-Key: - - + - "" Anthropic-Version: - '2023-06-01' + Content-Type: + - application/json Anthropic-Beta: - tools-2024-04-04 Accept-Encoding: @@ -29,47 +29,57 @@ http_interactions: message: OK headers: Date: - - Thu, 09 May 2024 20:20:11 GMT + - Thu, 02 Oct 2025 18:26:43 GMT Content-Type: - application/json Transfer-Encoding: - chunked Connection: - keep-alive + Anthropic-Ratelimit-Input-Tokens-Limit: + - '20000' + Anthropic-Ratelimit-Input-Tokens-Remaining: + - '20000' + Anthropic-Ratelimit-Input-Tokens-Reset: + - '2025-10-02T18:26:43Z' + Anthropic-Ratelimit-Output-Tokens-Limit: + - '4000' + Anthropic-Ratelimit-Output-Tokens-Remaining: + - '4000' + Anthropic-Ratelimit-Output-Tokens-Reset: + - '2025-10-02T18:26:44Z' Anthropic-Ratelimit-Requests-Limit: - - '5' + - '50' Anthropic-Ratelimit-Requests-Remaining: - - '4' + - '49' Anthropic-Ratelimit-Requests-Reset: - - '2024-05-09T20:21:30Z' + - '2025-10-02T18:26:43Z' Anthropic-Ratelimit-Tokens-Limit: - - '10000' + - '24000' Anthropic-Ratelimit-Tokens-Remaining: - - '9000' + - '24000' Anthropic-Ratelimit-Tokens-Reset: - - '2024-05-09T20:21:30Z' + - '2025-10-02T18:26:43Z' Request-Id: - - req_015hdwz7zx57NRsSTpN2HTV9 - X-Cloud-Trace-Context: - - 0bd4780909a278c548f8cc6bf3bb8178 + - req_011CTiqqaoJaTXcyT7Kw9tqY + Strict-Transport-Security: + - max-age=31536000; includeSubDomains; preload + Anthropic-Organization-Id: + - ae455ef8-0ea5-4779-a1ff-1228616561d2 + X-Envoy-Upstream-Service-Time: + - '2099' Via: - 1.1 google Cf-Cache-Status: - DYNAMIC + X-Robots-Tag: + - none Server: - cloudflare Cf-Ray: - - 881463470a0c8dd8-MIA + - 98863fe2ee4fb6f2-BOG body: encoding: ASCII-8BIT - string: '{"id":"msg_01EKfajCRuz7bheKbQE5Dkqr","type":"message","role":"assistant","model":"claude-3-opus-20240229","stop_sequence":null,"usage":{"input_tokens":489,"output_tokens":206},"content":[{"type":"text","text":"\nThe - key parts of the request are:\n- Extract information from text\n- The text - contains a name \"Jason\" and age \"25 years old\"\n\nThe Invalidmodel tool - looks relevant because it can extract a name and age. Let''s check if the - required parameters are provided:\n- name: The text contains the name \"Jason\", - so this can be provided \n- age: The text specifies Jason is \"25 years old\", - so the age can be provided as \"25 years old\"\n\nAll the required parameters - are present in the input text, so we can proceed with calling the tool.\n"},{"type":"tool_use","id":"toolu_01B6t1vi4tF9qEAx6RMH1sPU","name":"Invalidmodel","input":{"name":"Jason","age":"25 - years old"}}],"stop_reason":"tool_use"}' - recorded_at: Thu, 09 May 2024 20:20:11 GMT -recorded_with: VCR 6.2.0 + string: '{"id":"msg_018K2FPihpHjeeH4pHEEJFMu","type":"message","role":"assistant","model":"claude-3-opus-20240229","content":[{"type":"tool_use","id":"toolu_01QqqJm7JF5evuaWrytuMye4","name":"invalidmodel","input":{"name":"Jason","age":""}}],"stop_reason":"tool_use","stop_sequence":null,"usage":{"input_tokens":397,"cache_creation_input_tokens":0,"cache_read_input_tokens":0,"cache_creation":{"ephemeral_5m_input_tokens":0,"ephemeral_1h_input_tokens":0},"output_tokens":50,"service_tier":"standard"}}' + recorded_at: Thu, 02 Oct 2025 18:26:43 GMT +recorded_with: VCR 6.3.1 diff --git a/spec/vcr_cassettes/anthropic_patch/valid_response.yml b/spec/vcr_cassettes/anthropic_patch/valid_response.yml index 9f52663..bac4b78 100644 --- a/spec/vcr_cassettes/anthropic_patch/valid_response.yml +++ b/spec/vcr_cassettes/anthropic_patch/valid_response.yml @@ -6,17 +6,15 @@ http_interactions: body: encoding: UTF-8 string: '{"model":"claude-3-opus-20240229","messages":[{"role":"user","content":"Extract - Jason is 25 years old"}],"max_tokens":1024,"tools":[{"name":"User","description":"Correctly - extracted `User` with all the required parameters with correct types","input_schema":{"type":"object","properties":{"name":{"type":"string"},"age":{"type":"integer"}},"required":["name","age"]}}]}' + Jason is 25 years old"}],"max_tokens":1024,"tools":[{"name":"user","description":"Correctly + extracted `User` with all the required parameters with correct types","input_schema":{"type":"object","properties":{"name":{"type":"string"},"age":{"type":"integer"}},"additionalProperties":false,"required":["name","age"]}}],"tool_choice":{"type":"tool","name":"user"}}' headers: - Content-Type: - - application/json X-Api-Key: - - + - "" Anthropic-Version: - '2023-06-01' - Anthropic-Beta: - - tools-2024-04-04 + Content-Type: + - application/json Accept-Encoding: - gzip;q=1.0,deflate;q=0.6,identity;q=0.3 Accept: @@ -29,45 +27,57 @@ http_interactions: message: OK headers: Date: - - Thu, 09 May 2024 16:30:05 GMT + - Thu, 02 Oct 2025 18:26:39 GMT Content-Type: - application/json Transfer-Encoding: - chunked Connection: - keep-alive + Anthropic-Ratelimit-Input-Tokens-Limit: + - '20000' + Anthropic-Ratelimit-Input-Tokens-Remaining: + - '20000' + Anthropic-Ratelimit-Input-Tokens-Reset: + - '2025-10-02T18:26:37Z' + Anthropic-Ratelimit-Output-Tokens-Limit: + - '4000' + Anthropic-Ratelimit-Output-Tokens-Remaining: + - '4000' + Anthropic-Ratelimit-Output-Tokens-Reset: + - '2025-10-02T18:26:39Z' Anthropic-Ratelimit-Requests-Limit: - - '5' + - '50' Anthropic-Ratelimit-Requests-Remaining: - - '4' + - '49' Anthropic-Ratelimit-Requests-Reset: - - '2024-05-09T16:31:30Z' + - '2025-10-02T18:26:37Z' Anthropic-Ratelimit-Tokens-Limit: - - '10000' + - '24000' Anthropic-Ratelimit-Tokens-Remaining: - - '9000' + - '24000' Anthropic-Ratelimit-Tokens-Reset: - - '2024-05-09T16:31:30Z' + - '2025-10-02T18:26:37Z' Request-Id: - - req_01LRMuxYDXrJtQa7EBuwcwrJ - X-Cloud-Trace-Context: - - 12b1b2d6fb75df0730777980cbf8e87a + - req_011CTiqqCXrU2N3eefz43owj + Strict-Transport-Security: + - max-age=31536000; includeSubDomains; preload + Anthropic-Organization-Id: + - ae455ef8-0ea5-4779-a1ff-1228616561d2 + X-Envoy-Upstream-Service-Time: + - '2399' Via: - 1.1 google Cf-Cache-Status: - DYNAMIC + X-Robots-Tag: + - none Server: - cloudflare Cf-Ray: - - 881312395967a677-MIA + - 98863fc2481fb6f5-BOG body: encoding: ASCII-8BIT - string: '{"id":"msg_01GmF8irRChwxKSGNXWB64YA","type":"message","role":"assistant","model":"claude-3-opus-20240229","stop_sequence":null,"usage":{"input_tokens":486,"output_tokens":171},"content":[{"type":"text","text":"\nThe - user has provided a sentence that appears to contain information in the format - needed for the User function. Let''s break it down:\n\nName: The name \"Jason\" - is provided directly in the input.\nAge: The age of 25 years old is provided - directly after the name.\n\nBoth of the required parameters for the User function - (name and age) are present in the input. Since we have all the necessary information, - we can proceed with calling the User function.\n"},{"type":"tool_use","id":"toolu_01Um2D6sgPCrZmh7gNYn5Luu","name":"User","input":{"name":"Jason","age":25}}],"stop_reason":"tool_use"}' - recorded_at: Thu, 09 May 2024 16:30:05 GMT -recorded_with: VCR 6.2.0 + string: '{"id":"msg_01Xm6f5uDrgqZGqik9GizgRH","type":"message","role":"assistant","model":"claude-3-opus-20240229","content":[{"type":"tool_use","id":"toolu_01VPsjUwGExCrwnttYjie5Jc","name":"user","input":{"name":"Jason","age":25}}],"stop_reason":"tool_use","stop_sequence":null,"usage":{"input_tokens":394,"cache_creation_input_tokens":0,"cache_read_input_tokens":0,"cache_creation":{"ephemeral_5m_input_tokens":0,"ephemeral_1h_input_tokens":0},"output_tokens":50,"service_tier":"standard"}}' + recorded_at: Thu, 02 Oct 2025 18:26:38 GMT +recorded_with: VCR 6.3.1 diff --git a/spec/vcr_cassettes/anthropic_patch/with_validation_context.yml b/spec/vcr_cassettes/anthropic_patch/with_validation_context.yml index ed154f9..f2f5644 100644 --- a/spec/vcr_cassettes/anthropic_patch/with_validation_context.yml +++ b/spec/vcr_cassettes/anthropic_patch/with_validation_context.yml @@ -7,15 +7,15 @@ http_interactions: encoding: UTF-8 string: '{"model":"claude-3-opus-20240229","messages":[{"role":"user","content":"Answer the question: What is your name and age? with the text chunk: my name is Jason - and I turned 25 years old yesterday"}],"max_tokens":1024,"tools":[{"name":"User","description":"Correctly - extracted `User` with all the required parameters with correct types","input_schema":{"type":"object","properties":{"name":{"type":"string"},"age":{"type":"integer"}},"required":["name","age"]}}]}' + and I turned 25 years old yesterday"}],"max_tokens":1024,"tools":[{"name":"user","description":"Correctly + extracted `User` with all the required parameters with correct types","input_schema":{"type":"object","properties":{"name":{"type":"string"},"age":{"type":"integer"}},"additionalProperties":false,"required":["name","age"]}}],"tool_choice":{"type":"tool","name":"user"}}' headers: - Content-Type: - - application/json X-Api-Key: - - + - "" Anthropic-Version: - '2023-06-01' + Content-Type: + - application/json Anthropic-Beta: - tools-2024-04-04 Accept-Encoding: @@ -30,46 +30,57 @@ http_interactions: message: OK headers: Date: - - Thu, 09 May 2024 20:17:49 GMT + - Thu, 02 Oct 2025 18:26:41 GMT Content-Type: - application/json Transfer-Encoding: - chunked Connection: - keep-alive + Anthropic-Ratelimit-Input-Tokens-Limit: + - '20000' + Anthropic-Ratelimit-Input-Tokens-Remaining: + - '20000' + Anthropic-Ratelimit-Input-Tokens-Reset: + - '2025-10-02T18:26:40Z' + Anthropic-Ratelimit-Output-Tokens-Limit: + - '4000' + Anthropic-Ratelimit-Output-Tokens-Remaining: + - '4000' + Anthropic-Ratelimit-Output-Tokens-Reset: + - '2025-10-02T18:26:42Z' Anthropic-Ratelimit-Requests-Limit: - - '5' + - '50' Anthropic-Ratelimit-Requests-Remaining: - - '4' + - '49' Anthropic-Ratelimit-Requests-Reset: - - '2024-05-09T20:18:30Z' + - '2025-10-02T18:26:40Z' Anthropic-Ratelimit-Tokens-Limit: - - '10000' + - '24000' Anthropic-Ratelimit-Tokens-Remaining: - - '9000' + - '24000' Anthropic-Ratelimit-Tokens-Reset: - - '2024-05-09T20:18:30Z' + - '2025-10-02T18:26:40Z' Request-Id: - - req_01X62QzcyrjKKb1pHKVrvFj8 - X-Cloud-Trace-Context: - - dba7eed0f154afe4bd161be4c4f86725 + - req_011CTiqqPygUW3tHS7CHHZxm + Strict-Transport-Security: + - max-age=31536000; includeSubDomains; preload + Anthropic-Organization-Id: + - ae455ef8-0ea5-4779-a1ff-1228616561d2 + X-Envoy-Upstream-Service-Time: + - '2142' Via: - 1.1 google Cf-Cache-Status: - DYNAMIC + X-Robots-Tag: + - none Server: - cloudflare Cf-Ray: - - 88145fda2ef03716-MIA + - 98863fd3192fb6f6-BOG body: encoding: ASCII-8BIT - string: '{"id":"msg_01NSQ3xhUyjHFLhFd6TMVx42","type":"message","role":"assistant","model":"claude-3-opus-20240229","stop_sequence":null,"usage":{"input_tokens":507,"output_tokens":180},"content":[{"type":"text","text":"\nThe - relevant tool to use here is the User function, since the question is asking - for a name and age, which matches the parameters of that function.\n\nAnalyzing - the provided text for the required parameters:\nname: The text says \"my name - is Jason\", so the name is provided and is \"Jason\"\nage: The text says \"I - turned 25 years old yesterday\". So the age is provided and is 25.\n\nSince - both required parameters are provided, I can proceed with calling the User - function.\n"},{"type":"tool_use","id":"toolu_01V2yurTTibtau42B4QHBuCJ","name":"User","input":{"name":"Jason","age":25}}],"stop_reason":"tool_use"}' - recorded_at: Thu, 09 May 2024 20:17:49 GMT -recorded_with: VCR 6.2.0 + string: '{"id":"msg_01PE6KXxPCHbK6pw3saJijV9","type":"message","role":"assistant","model":"claude-3-opus-20240229","content":[{"type":"tool_use","id":"toolu_01MB3EMEWEZEsMM7N1hBvF5V","name":"user","input":{"name":"Jason","age":25}}],"stop_reason":"tool_use","stop_sequence":null,"usage":{"input_tokens":415,"cache_creation_input_tokens":0,"cache_read_input_tokens":0,"cache_creation":{"ephemeral_5m_input_tokens":0,"ephemeral_1h_input_tokens":0},"output_tokens":50,"service_tier":"standard"}}' + recorded_at: Thu, 02 Oct 2025 18:26:41 GMT +recorded_with: VCR 6.3.1 diff --git a/spec/vcr_cassettes/autoticketer/generate.yml b/spec/vcr_cassettes/autoticketer/generate.yml index 050db88..4722e09 100644 --- a/spec/vcr_cassettes/autoticketer/generate.yml +++ b/spec/vcr_cassettes/autoticketer/generate.yml @@ -27,16 +27,17 @@ http_interactions: all these changes. It''s a low-priority task but still important.\n\nCarol: I can take that on once the front-end changes for the authentication system are done. So, it would be dependent on that.\n\nAlice: Sounds like a plan. - Let''s get these tasks modeled out and get started.\n"}],"tools":[{"type":"function","function":{"name":"Actionitems","description":"Correctly - extracted `ActionItems` with all the required parameters with correct types","parameters":{"type":"object","properties":{"items":{"type":"array","items":{"type":"object","properties":{"id":{"type":"integer","description":"Unique + Let''s get these tasks modeled out and get started.\n"}],"tools":[{"type":"function","function":{"name":"autoticketermodels_actionitems","description":"Correctly + extracted `AutoticketerModels::ActionItems` with all the required parameters + with correct types","parameters":{"type":"object","properties":{"items":{"type":"array","items":{"type":"object","properties":{"id":{"type":"integer","description":"Unique identifier for the ticket"},"name":{"type":"string","description":"Title of the ticket"},"description":{"type":"string","description":"Detailed description of the ticket"},"priority":{"type":"string","description":"Priority level"},"assignees":{"type":"array","items":{"type":"string"},"description":"List of users assigned to the ticket"},"subtasks":{"anyOf":[{"type":"array","items":{"type":"object","properties":{"id":{"type":"integer","description":"Unique identifier for the subtask"},"name":{"type":"string","description":"Informative - title of the subtask"}},"required":["id","name"]},"description":"List of subtasks - associated with the ticket"},{"type":"null"}]},"dependencies":{"anyOf":[{"type":"array","items":{"type":"integer"},"description":"List - of ticket IDs that this ticket depends on"},{"type":"null"}]}},"required":["id","name","description","priority","assignees"]}}},"required":["items"]}}}]}' + title of the subtask"}},"additionalProperties":false,"required":["id","name"]},"description":"List + of subtasks associated with the ticket"},{"type":"null"}]},"dependencies":{"anyOf":[{"type":"array","items":{"type":"integer"},"description":"List + of ticket IDs that this ticket depends on"},{"type":"null"}]}},"additionalProperties":false,"required":["id","name","description","priority","assignees","subtasks","dependencies"]}}},"additionalProperties":false,"required":["items"]}}}],"tool_choice":"auto"}' headers: Content-Type: - application/json @@ -54,62 +55,66 @@ http_interactions: message: OK headers: Date: - - Fri, 12 Apr 2024 18:42:05 GMT + - Thu, 02 Oct 2025 18:26:47 GMT Content-Type: - application/json Transfer-Encoding: - chunked Connection: - keep-alive - Access-Control-Allow-Origin: - - "*" - Cache-Control: - - no-cache, must-revalidate - Openai-Model: - - gpt-3.5-turbo-0125 + Access-Control-Expose-Headers: + - X-Request-ID Openai-Organization: - user-jtftkqrbreteg5pmdrfzchv6 Openai-Processing-Ms: - - '2596' + - '1317' + Openai-Project: + - proj_2AuZKbF2zHX6djDZFwWGbqic Openai-Version: - '2020-10-01' - Strict-Transport-Security: - - max-age=15724800; includeSubDomains + X-Envoy-Upstream-Service-Time: + - '1649' X-Ratelimit-Limit-Requests: - '10000' X-Ratelimit-Limit-Tokens: - - '60000' + - '200000' X-Ratelimit-Remaining-Requests: - '9999' X-Ratelimit-Remaining-Tokens: - - '59579' + - '199594' X-Ratelimit-Reset-Requests: - 8.64s X-Ratelimit-Reset-Tokens: - - 421ms + - 121ms X-Request-Id: - - req_5585e8e921a606e6ff650b185bd50af9 + - req_36fe982099f24602bbcad5585701ff31 + X-Openai-Proxy-Wasm: + - v0.1 Cf-Cache-Status: - DYNAMIC Set-Cookie: - - __cf_bm=KyXMepIPH_DkWI0HlByAgyNFg0LiLphryKrP44161j0-1712947325-1.0.1.1-Xp1q6Yv31R6tOR_H91DXSzza.2NNen_DgNFpYGetiQ_IWnTPv099_aS20TkatHa3NSBOTMjQNOopWTyvShjcBg; - path=/; expires=Fri, 12-Apr-24 19:12:05 GMT; domain=.api.openai.com; HttpOnly; + - __cf_bm=IGr4MJxM2CcdEs5k00mMzaYbqBKdbDBbN1uWnp2s18g-1759429607-1.0.1.1-Kyi0KbLJBMwpSoDjduE5uapGVa9XN0ltVL9PneuDtB3arLblZqVdgdQFh3xh9.xXzvGFMWeN6Lt_1txor4nyqQaCJnaajit0acvFKmn.yg4; + path=/; expires=Thu, 02-Oct-25 18:56:47 GMT; domain=.api.openai.com; HttpOnly; Secure; SameSite=None - - _cfuvid=al3HkpS19.N3AhU0yXzSU9vvJjrStn7bzkM3TbjPvnQ-1712947325495-0.0.1.1-604800000; + - _cfuvid=lQ5PU5T92z1Gd.K4Pj17.ZnDgLP0xDsH_jJa6kMJrlg-1759429607167-0.0.1.1-604800000; path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None + Strict-Transport-Security: + - max-age=31536000; includeSubDomains; preload + X-Content-Type-Options: + - nosniff Server: - cloudflare Cf-Ray: - - 87355a9f884d09ce-MIA + - 98863ff2d9facdca-LIM Alt-Svc: - h3=":443"; ma=86400 body: encoding: ASCII-8BIT string: | { - "id": "chatcmpl-9DG8YYiZR7tfZpLP5xtm6hnOMak9J", + "id": "chatcmpl-CMIInradzc8iow4sAKMw38uJtdyOm", "object": "chat.completion", - "created": 1712947322, + "created": 1759429605, "model": "gpt-3.5-turbo-0125", "choices": [ { @@ -119,25 +124,38 @@ http_interactions: "content": null, "tool_calls": [ { - "id": "call_Tw9YOum9ssHUSXOndPSYsAn6", + "id": "call_8TTXQfUkHW3NPz5n5nIqbz4o", "type": "function", "function": { - "name": "Actionitems", - "arguments": "{\"items\":[{\"id\":1,\"name\":\"Improve Authentication System\",\"description\":\"Work on front-end revamp and back-end optimization\",\"priority\":\"High\",\"assignees\":[\"Bob\",\"Carol\"],\"subtasks\":[{\"id\":2,\"name\":\"Front-end Revamp\"},{\"id\":3,\"name\":\"Back-end Optimization\"}]},{\"id\":4,\"name\":\"Integrate Authentication System with New Billing System\",\"description\":\"Integrate authentication system with the new billing system\",\"priority\":\"Medium\",\"assignees\":[\"Bob\"],\"dependencies\":[1]},{\"id\":5,\"name\":\"Update User Documentation\",\"description\":\"Update user documentation to reflect changes\",\"priority\":\"Low\",\"assignees\":[\"Carol\"],\"dependencies\":[2]}]}" + "name": "autoticketermodels_actionitems", + "arguments": "{\"items\":[{\"id\":1,\"name\":\"Improve Authentication System\",\"description\":\"Work on front-end revamp and back-end optimization for the authentication system\",\"priority\":\"High\",\"assignees\":[\"Bob\",\"Carol\"],\"subtasks\":[{\"id\":2,\"name\":\"Front-end Revamp\"},{\"id\":3,\"name\":\"Back-end Optimization\"}],\"dependencies\":null},{\"id\":4,\"name\":\"Integrate Authentication with Billing System\",\"description\":\"Integrate the improved authentication system with the new billing system\",\"priority\":\"Medium\",\"assignees\":[\"Bob\"],\"subtasks\":null,\"dependencies\":[1]},{\"id\":5,\"name\":\"Update User Documentation\",\"description\":\"Update user documentation to reflect changes in authentication and billing systems\",\"priority\":\"Low\",\"assignees\":[\"Carol\"],\"subtasks\":null,\"dependencies\":[2]}]}" } } - ] + ], + "refusal": null, + "annotations": [] }, "logprobs": null, "finish_reason": "tool_calls" } ], "usage": { - "prompt_tokens": 525, - "completion_tokens": 147, - "total_tokens": 672 + "prompt_tokens": 534, + "completion_tokens": 174, + "total_tokens": 708, + "prompt_tokens_details": { + "cached_tokens": 0, + "audio_tokens": 0 + }, + "completion_tokens_details": { + "reasoning_tokens": 0, + "audio_tokens": 0, + "accepted_prediction_tokens": 0, + "rejected_prediction_tokens": 0 + } }, - "system_fingerprint": "fp_c2295e73ad" + "service_tier": "default", + "system_fingerprint": null } - recorded_at: Fri, 12 Apr 2024 18:42:05 GMT -recorded_with: VCR 6.2.0 + recorded_at: Thu, 02 Oct 2025 18:26:47 GMT +recorded_with: VCR 6.3.1 diff --git a/spec/vcr_cassettes/features/anthropic/basic_spec/valid_response.yml b/spec/vcr_cassettes/features/anthropic/basic_spec/valid_response.yml new file mode 100644 index 0000000..6c33efb --- /dev/null +++ b/spec/vcr_cassettes/features/anthropic/basic_spec/valid_response.yml @@ -0,0 +1,83 @@ +--- +http_interactions: +- request: + method: post + uri: https://api.anthropic.com/v1/messages + body: + encoding: UTF-8 + string: '{"model":"claude-sonnet-4-20250514","messages":[{"role":"user","content":"Extract + Jason is 25 years old"}],"max_tokens":1024,"tools":[{"name":"user","description":"Correctly + extracted `User` with all the required parameters with correct types","input_schema":{"type":"object","properties":{"name":{"type":"string"},"age":{"type":"integer"}},"additionalProperties":false,"required":["name","age"]}}],"tool_choice":{"type":"tool","name":"user"}}' + headers: + X-Api-Key: + - "" + Anthropic-Version: + - '2023-06-01' + Content-Type: + - application/json + Accept-Encoding: + - gzip;q=1.0,deflate;q=0.6,identity;q=0.3 + Accept: + - "*/*" + User-Agent: + - Ruby + response: + status: + code: 200 + message: OK + headers: + Date: + - Thu, 02 Oct 2025 18:23:59 GMT + Content-Type: + - application/json + Transfer-Encoding: + - chunked + Connection: + - keep-alive + Anthropic-Ratelimit-Input-Tokens-Limit: + - '30000' + Anthropic-Ratelimit-Input-Tokens-Remaining: + - '30000' + Anthropic-Ratelimit-Input-Tokens-Reset: + - '2025-10-02T18:23:59Z' + Anthropic-Ratelimit-Output-Tokens-Limit: + - '8000' + Anthropic-Ratelimit-Output-Tokens-Remaining: + - '8000' + Anthropic-Ratelimit-Output-Tokens-Reset: + - '2025-10-02T18:24:00Z' + Anthropic-Ratelimit-Requests-Limit: + - '50' + Anthropic-Ratelimit-Requests-Remaining: + - '49' + Anthropic-Ratelimit-Requests-Reset: + - '2025-10-02T18:23:59Z' + Anthropic-Ratelimit-Tokens-Limit: + - '38000' + Anthropic-Ratelimit-Tokens-Remaining: + - '38000' + Anthropic-Ratelimit-Tokens-Reset: + - '2025-10-02T18:23:59Z' + Request-Id: + - req_011CTiqdXbEgyTzEgLrUr9i8 + Strict-Transport-Security: + - max-age=31536000; includeSubDomains; preload + Anthropic-Organization-Id: + - ae455ef8-0ea5-4779-a1ff-1228616561d2 + X-Envoy-Upstream-Service-Time: + - '1552' + Via: + - 1.1 google + Cf-Cache-Status: + - DYNAMIC + X-Robots-Tag: + - none + Server: + - cloudflare + Cf-Ray: + - 98863be43c0ab6f2-BOG + body: + encoding: ASCII-8BIT + string: '{"id":"msg_015ovaRcwJExGq8jDeKpYvyu","type":"message","role":"assistant","model":"claude-sonnet-4-20250514","content":[{"type":"tool_use","id":"toolu_013RZsUB5nPpn3kEQFWehLAZ","name":"user","input":{"name":"Jason","age":25}}],"stop_reason":"tool_use","stop_sequence":null,"usage":{"input_tokens":413,"cache_creation_input_tokens":0,"cache_read_input_tokens":0,"cache_creation":{"ephemeral_5m_input_tokens":0,"ephemeral_1h_input_tokens":0},"output_tokens":50,"service_tier":"standard"}}' + recorded_at: Thu, 02 Oct 2025 18:23:59 GMT +recorded_with: VCR 6.3.1 diff --git a/spec/vcr_cassettes/basic_spec/valid_response.yml b/spec/vcr_cassettes/features/openai/basic_spec/valid_response.yml similarity index 53% rename from spec/vcr_cassettes/basic_spec/valid_response.yml rename to spec/vcr_cassettes/features/openai/basic_spec/valid_response.yml index 2accdd6..f4344c6 100644 --- a/spec/vcr_cassettes/basic_spec/valid_response.yml +++ b/spec/vcr_cassettes/features/openai/basic_spec/valid_response.yml @@ -6,8 +6,8 @@ http_interactions: body: encoding: UTF-8 string: '{"model":"gpt-3.5-turbo","messages":[{"role":"user","content":"Extract - Jason is 25 years old"}],"tools":[{"type":"function","function":{"name":"User","description":"Correctly - extracted `User` with all the required parameters with correct types","parameters":{"type":"object","properties":{"name":{"type":"string"},"age":{"type":"integer"}},"required":["name","age"]}}}],"tool_choice":{"type":"function","function":{"name":"User"}}}' + Jason is 25 years old"}],"tools":[{"type":"function","function":{"name":"user","description":"Correctly + extracted `User` with all the required parameters with correct types","parameters":{"type":"object","properties":{"name":{"type":"string"},"age":{"type":"integer"}},"additionalProperties":false,"required":["name","age"]}}}],"tool_choice":"auto"}' headers: Content-Type: - application/json @@ -25,56 +25,66 @@ http_interactions: message: OK headers: Date: - - Fri, 17 May 2024 21:43:36 GMT + - Thu, 02 Oct 2025 17:44:33 GMT Content-Type: - application/json Transfer-Encoding: - chunked Connection: - keep-alive + Access-Control-Expose-Headers: + - X-Request-ID Openai-Organization: - user-jtftkqrbreteg5pmdrfzchv6 Openai-Processing-Ms: - - '279' + - '364' + Openai-Project: + - proj_2AuZKbF2zHX6djDZFwWGbqic Openai-Version: - '2020-10-01' - Strict-Transport-Security: - - max-age=15724800; includeSubDomains + X-Envoy-Upstream-Service-Time: + - '672' X-Ratelimit-Limit-Requests: - '10000' X-Ratelimit-Limit-Tokens: - - '60000' + - '200000' X-Ratelimit-Remaining-Requests: - '9999' X-Ratelimit-Remaining-Tokens: - - '59975' + - '199990' X-Ratelimit-Reset-Requests: - 8.64s X-Ratelimit-Reset-Tokens: - - 25ms + - 3ms X-Request-Id: - - req_0beb8aa55a830e8bd7e8eed01a05f3c0 + - req_0079f4705016431fb3753c0179a64e32 + X-Openai-Proxy-Wasm: + - v0.1 Cf-Cache-Status: - DYNAMIC Set-Cookie: - - __cf_bm=XgFm9EbhV9_dCJnOyPugQGI.kXwrbCdRMOUM.aYIj44-1715982216-1.0.1.1-73M1D9t7hChSuX90po2Iyk26I1LElUZMiexlToP_fUTSu5kWd4KCfUbThlUttI2K0ZX4gHtk1JR13lJ6Au4oKQ; - path=/; expires=Fri, 17-May-24 22:13:36 GMT; domain=.api.openai.com; HttpOnly; + - __cf_bm=0Ef.5_4EJlQngDf4FsMIpn2Niz_wrbftieqPzB6MZ.U-1759427073-1.0.1.1-cUW32ZUngQGiGectXpmrl3GsbVmfYLjYE0rfGNacu97oMOnJlLkqkDNahXNJxxakkXtZHqvdfvnC_XzhD_9drBs5wN6tzL8SuxuuMkJ3kTQ; + path=/; expires=Thu, 02-Oct-25 18:14:33 GMT; domain=.api.openai.com; HttpOnly; Secure; SameSite=None - - _cfuvid=l1RnAIP4_bCW3R17g4Yh9oJuKE.stp0kZcHGI_O8ddE-1715982216818-0.0.1.1-604800000; + - _cfuvid=IeJL8C.9IbwwQlnIInrmqShndsK27ugGy6akONGcC0Y-1759427073432-0.0.1.1-604800000; path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None + Strict-Transport-Security: + - max-age=31536000; includeSubDomains; preload + X-Content-Type-Options: + - nosniff Server: - cloudflare Cf-Ray: - - 8856c8b47de36db3-MIA + - 988602220c0fcdcd-LIM Alt-Svc: - h3=":443"; ma=86400 body: encoding: ASCII-8BIT string: | { - "id": "chatcmpl-9PzeSAbfLl2wMV4tuBGPZjIW35bHD", + "id": "chatcmpl-CMHdx3eTlqxPa7izBgYKATRSkMD96", "object": "chat.completion", - "created": 1715982216, + "created": 1759427073, "model": "gpt-3.5-turbo-0125", "choices": [ { @@ -84,25 +94,38 @@ http_interactions: "content": null, "tool_calls": [ { - "id": "call_6t9IRWIHRRjInUG3kMAflgRi", + "id": "call_1A79rpLlJGYwWINOUdXEJIhH", "type": "function", "function": { - "name": "User", - "arguments": "{\"name\":\"Jason\",\"age\":25}" + "name": "user", + "arguments": "{\"name\": \"Jason\", \"age\": 25}" } } - ] + ], + "refusal": null, + "annotations": [] }, "logprobs": null, - "finish_reason": "stop" + "finish_reason": "tool_calls" } ], "usage": { - "prompt_tokens": 71, - "completion_tokens": 9, - "total_tokens": 80 + "prompt_tokens": 63, + "completion_tokens": 32, + "total_tokens": 95, + "prompt_tokens_details": { + "cached_tokens": 0, + "audio_tokens": 0 + }, + "completion_tokens_details": { + "reasoning_tokens": 0, + "audio_tokens": 0, + "accepted_prediction_tokens": 0, + "rejected_prediction_tokens": 0 + } }, + "service_tier": "default", "system_fingerprint": null } - recorded_at: Fri, 17 May 2024 21:43:36 GMT -recorded_with: VCR 6.2.0 + recorded_at: Thu, 02 Oct 2025 17:44:33 GMT +recorded_with: VCR 6.3.1 diff --git a/spec/vcr_cassettes/iterable_spec/valid_response.yml b/spec/vcr_cassettes/iterable_spec/valid_response.yml index d10a011..7022dc2 100644 --- a/spec/vcr_cassettes/iterable_spec/valid_response.yml +++ b/spec/vcr_cassettes/iterable_spec/valid_response.yml @@ -7,8 +7,8 @@ http_interactions: encoding: UTF-8 string: '{"model":"gpt-3.5-turbo","messages":[{"role":"system","content":"Extract the names and ages of all the users"},{"role":"user","content":"Extract `Jason - is 25 and Peter is 32`"}],"tools":[{"type":"function","function":{"name":"Users","description":"Correctly - extracted `Users` with all the required parameters with correct types","parameters":{"type":"object","properties":{"name":{"type":"string"},"age":{"type":"integer"}},"required":["name","age"]}}}],"tool_choice":"required"}' + is 25 and Peter is 32`"}],"tools":[{"type":"function","function":{"name":"users","description":"Correctly + extracted `Users` with all the required parameters with correct types","parameters":{"type":"object","properties":{"name":{"type":"string"},"age":{"type":"integer"}},"additionalProperties":false,"required":["name","age"]}}}],"tool_choice":"auto"}' headers: Content-Type: - application/json @@ -26,56 +26,66 @@ http_interactions: message: OK headers: Date: - - Sat, 18 May 2024 01:35:47 GMT + - Thu, 02 Oct 2025 17:54:18 GMT Content-Type: - application/json Transfer-Encoding: - chunked Connection: - keep-alive + Access-Control-Expose-Headers: + - X-Request-ID Openai-Organization: - user-jtftkqrbreteg5pmdrfzchv6 Openai-Processing-Ms: - - '901' + - '470' + Openai-Project: + - proj_2AuZKbF2zHX6djDZFwWGbqic Openai-Version: - '2020-10-01' - Strict-Transport-Security: - - max-age=15724800; includeSubDomains + X-Envoy-Upstream-Service-Time: + - '973' X-Ratelimit-Limit-Requests: - '10000' X-Ratelimit-Limit-Tokens: - - '60000' + - '200000' X-Ratelimit-Remaining-Requests: - '9999' X-Ratelimit-Remaining-Tokens: - - '59962' + - '199977' X-Ratelimit-Reset-Requests: - 8.64s X-Ratelimit-Reset-Tokens: - - 38ms + - 6ms X-Request-Id: - - req_e541e0199ba8ab4969cebaa93ecf7621 + - req_1b4ba4b1233645d5bd6a3810b2569021 + X-Openai-Proxy-Wasm: + - v0.1 Cf-Cache-Status: - DYNAMIC Set-Cookie: - - __cf_bm=Vw9tLN_5v0HADGwDxZlp_Xsteou0sxwUhw2uxhb2w.k-1715996147-1.0.1.1-44Ok_xULGiNWAg1PtLfVhxfNW1n4v.YZwAxEAFUFk7R2vJwYJA0bOiew0M7VI.F3mFVplHj4A_VfVZgbXyOm1Q; - path=/; expires=Sat, 18-May-24 02:05:47 GMT; domain=.api.openai.com; HttpOnly; + - __cf_bm=T26qsAL1bpG8M5FFG.CqOIaxHDI4LGNK_4g7tB5mOkM-1759427658-1.0.1.1-7zTw8L822FFCfltgm9ArMTQVTt31KKq3nr5_BM.D_F38n_91whw19VlSIG7Vaa15.flCS.ohgfCX.HjH4mCPsEM61YIG0890mT0XPU_1X7o; + path=/; expires=Thu, 02-Oct-25 18:24:18 GMT; domain=.api.openai.com; HttpOnly; Secure; SameSite=None - - _cfuvid=798B92b1HxrrOu9V8vLMsh5LvISIS._fwZvtGrxmwzs-1715996147279-0.0.1.1-604800000; + - _cfuvid=MWbjt_1N5c5cTugcUUPPFMwdRm4EPg7kDV0tal9xfks-1759427658692-0.0.1.1-604800000; path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None + Strict-Transport-Security: + - max-age=31536000; includeSubDomains; preload + X-Content-Type-Options: + - nosniff Server: - cloudflare Cf-Ray: - - 88581cc9aeae335e-MIA + - 98861068ebfab519-SCL Alt-Svc: - h3=":443"; ma=86400 body: encoding: ASCII-8BIT string: | { - "id": "chatcmpl-9Q3H8gCBtHqh2I1IS8JRDnVEhy5vM", + "id": "chatcmpl-CMHnOZQ8vPcC3rSJDgNtDZL48e7Sp", "object": "chat.completion", - "created": 1715996146, + "created": 1759427658, "model": "gpt-3.5-turbo-0125", "choices": [ { @@ -85,33 +95,46 @@ http_interactions: "content": null, "tool_calls": [ { - "id": "call_ZjPBhmR9P86MQv1GuKct4pyx", + "id": "call_onWASUOpPDGJcpYWLLeXBFI4", "type": "function", "function": { - "name": "Users", + "name": "users", "arguments": "{\"name\": \"Jason\", \"age\": 25}" } }, { - "id": "call_yaCW68EMq8VYIACaXdAZrc9z", + "id": "call_Gk66l0lk2Ll41PDE3gAVQOjx", "type": "function", "function": { - "name": "Users", + "name": "users", "arguments": "{\"name\": \"Peter\", \"age\": 32}" } } - ] + ], + "refusal": null, + "annotations": [] }, "logprobs": null, - "finish_reason": "stop" + "finish_reason": "tool_calls" } ], "usage": { - "prompt_tokens": 80, - "completion_tokens": 47, - "total_tokens": 127 + "prompt_tokens": 78, + "completion_tokens": 49, + "total_tokens": 127, + "prompt_tokens_details": { + "cached_tokens": 0, + "audio_tokens": 0 + }, + "completion_tokens_details": { + "reasoning_tokens": 0, + "audio_tokens": 0, + "accepted_prediction_tokens": 0, + "rejected_prediction_tokens": 0 + } }, + "service_tier": "default", "system_fingerprint": null } - recorded_at: Sat, 18 May 2024 01:35:47 GMT -recorded_with: VCR 6.2.0 + recorded_at: Thu, 02 Oct 2025 17:54:18 GMT +recorded_with: VCR 6.3.1 diff --git a/spec/vcr_cassettes/openai/patch/invalid_response.yml b/spec/vcr_cassettes/openai/patch/invalid_response.yml new file mode 100644 index 0000000..96d589c --- /dev/null +++ b/spec/vcr_cassettes/openai/patch/invalid_response.yml @@ -0,0 +1,120 @@ +--- +http_interactions: +- request: + method: post + uri: https://api.openai.com/v1/chat/completions + body: + encoding: UTF-8 + string: '{"model":"gpt-4o-2024-08-06","messages":[{"role":"user","content":"Extract + Jason is 25 years old"}],"response_format":{"type":"json_schema","json_schema":{"name":"invalidmodel","schema":{"type":"object","properties":{"name":{"type":"string"},"age":{"type":"string"}},"additionalProperties":false,"required":["name","age"]},"strict":true}}}' + headers: + Content-Type: + - application/json + Authorization: + - Bearer + Accept-Encoding: + - gzip;q=1.0,deflate;q=0.6,identity;q=0.3 + Accept: + - "*/*" + User-Agent: + - Ruby + response: + status: + code: 200 + message: OK + headers: + Date: + - Thu, 02 Oct 2025 18:26:54 GMT + Content-Type: + - application/json + Transfer-Encoding: + - chunked + Connection: + - keep-alive + Access-Control-Expose-Headers: + - X-Request-ID + Openai-Organization: + - user-jtftkqrbreteg5pmdrfzchv6 + Openai-Processing-Ms: + - '688' + Openai-Project: + - proj_2AuZKbF2zHX6djDZFwWGbqic + Openai-Version: + - '2020-10-01' + X-Envoy-Upstream-Service-Time: + - '982' + X-Ratelimit-Limit-Requests: + - '500' + X-Ratelimit-Limit-Tokens: + - '30000' + X-Ratelimit-Remaining-Requests: + - '499' + X-Ratelimit-Remaining-Tokens: + - '29990' + X-Ratelimit-Reset-Requests: + - 120ms + X-Ratelimit-Reset-Tokens: + - 20ms + X-Request-Id: + - req_72221d443be24c55bd6e9c6643114c8c + X-Openai-Proxy-Wasm: + - v0.1 + Cf-Cache-Status: + - DYNAMIC + Set-Cookie: + - __cf_bm=EppHJY0jbSzYTfcxIOgBx.arD35mxWyNDM0bpWK3RFw-1759429614-1.0.1.1-_nIgd0puynbXVghNULQB9jQpBcTy9uVvCT.mYyRfOSzhLp0ryDrfo8yt2b0NdFVToTYuNO_hFcuB.gzid33W8XZJXNAryLLupP4R5g76LAk; + path=/; expires=Thu, 02-Oct-25 18:56:54 GMT; domain=.api.openai.com; HttpOnly; + Secure; SameSite=None + - _cfuvid=s7EqU8O8P9e2FBM.v0iODN8KjDqKzAZXByvJBVY35vg-1759429614425-0.0.1.1-604800000; + path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None + Strict-Transport-Security: + - max-age=31536000; includeSubDomains; preload + X-Content-Type-Options: + - nosniff + Server: + - cloudflare + Cf-Ray: + - 98864027a815cdcd-LIM + Alt-Svc: + - h3=":443"; ma=86400 + body: + encoding: ASCII-8BIT + string: | + { + "id": "chatcmpl-CMIIvCjhOlibDNxidyIMGFGuoUahd", + "object": "chat.completion", + "created": 1759429613, + "model": "gpt-4o-2024-08-06", + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": null, + "refusal": "I'm sorry, I can't assist with that request.", + "annotations": [] + }, + "logprobs": null, + "finish_reason": "stop" + } + ], + "usage": { + "prompt_tokens": 51, + "completion_tokens": 9, + "total_tokens": 60, + "prompt_tokens_details": { + "cached_tokens": 0, + "audio_tokens": 0 + }, + "completion_tokens_details": { + "reasoning_tokens": 0, + "audio_tokens": 0, + "accepted_prediction_tokens": 0, + "rejected_prediction_tokens": 0 + } + }, + "service_tier": "default", + "system_fingerprint": "fp_f33640a400" + } + recorded_at: Thu, 02 Oct 2025 18:26:54 GMT +recorded_with: VCR 6.3.1 diff --git a/spec/vcr_cassettes/openai/patch/standard_usage.yml b/spec/vcr_cassettes/openai/patch/standard_usage.yml new file mode 100644 index 0000000..0cec4dc --- /dev/null +++ b/spec/vcr_cassettes/openai/patch/standard_usage.yml @@ -0,0 +1,120 @@ +--- +http_interactions: +- request: + method: post + uri: https://api.openai.com/v1/chat/completions + body: + encoding: UTF-8 + string: '{"model":"gpt-4o-2024-08-06","messages":[{"role":"user","content":"How + is the weather today in New York?"}]}' + headers: + Content-Type: + - application/json + Authorization: + - Bearer + Accept-Encoding: + - gzip;q=1.0,deflate;q=0.6,identity;q=0.3 + Accept: + - "*/*" + User-Agent: + - Ruby + response: + status: + code: 200 + message: OK + headers: + Date: + - Thu, 02 Oct 2025 18:26:55 GMT + Content-Type: + - application/json + Transfer-Encoding: + - chunked + Connection: + - keep-alive + Access-Control-Expose-Headers: + - X-Request-ID + Openai-Organization: + - user-jtftkqrbreteg5pmdrfzchv6 + Openai-Processing-Ms: + - '971' + Openai-Project: + - proj_2AuZKbF2zHX6djDZFwWGbqic + Openai-Version: + - '2020-10-01' + X-Envoy-Upstream-Service-Time: + - '1012' + X-Ratelimit-Limit-Requests: + - '500' + X-Ratelimit-Limit-Tokens: + - '30000' + X-Ratelimit-Remaining-Requests: + - '499' + X-Ratelimit-Remaining-Tokens: + - '29988' + X-Ratelimit-Reset-Requests: + - 120ms + X-Ratelimit-Reset-Tokens: + - 24ms + X-Request-Id: + - req_476eef1d647745fcb76e4390f93797df + X-Openai-Proxy-Wasm: + - v0.1 + Cf-Cache-Status: + - DYNAMIC + Set-Cookie: + - __cf_bm=Wl23zLjsQj._9mi_OsikYU6fIPo8FBPIVAPB0ZX4Znk-1759429615-1.0.1.1-5S7BLGOAyatj9skoyzYeCc8uBJMzy0DD.bH.PaFxxahmCap0lPzPDF8QelD0BarlTUA9TX9kiaSG9JjJ4i2Gz5kNnAwZyH0qDzAcWjuefU0; + path=/; expires=Thu, 02-Oct-25 18:56:55 GMT; domain=.api.openai.com; HttpOnly; + Secure; SameSite=None + - _cfuvid=aAGfJwBsKgkdOezAIhi8yyG374vPS9TNP7kwtvooEg8-1759429615857-0.0.1.1-604800000; + path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None + Strict-Transport-Security: + - max-age=31536000; includeSubDomains; preload + X-Content-Type-Options: + - nosniff + Server: + - cloudflare + Cf-Ray: + - 98864033dd2bcdc9-LIM + Alt-Svc: + - h3=":443"; ma=86400 + body: + encoding: ASCII-8BIT + string: | + { + "id": "chatcmpl-CMIIwutr3dd0Pz9I2YrFnj7wcNkiQ", + "object": "chat.completion", + "created": 1759429614, + "model": "gpt-4o-2024-08-06", + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": "I'm sorry, but I don't have real-time capabilities, so I can't provide current weather updates. You might want to check a reliable weather website or app for the most up-to-date information on New York's weather.", + "refusal": null, + "annotations": [] + }, + "logprobs": null, + "finish_reason": "stop" + } + ], + "usage": { + "prompt_tokens": 16, + "completion_tokens": 43, + "total_tokens": 59, + "prompt_tokens_details": { + "cached_tokens": 0, + "audio_tokens": 0 + }, + "completion_tokens_details": { + "reasoning_tokens": 0, + "audio_tokens": 0, + "accepted_prediction_tokens": 0, + "rejected_prediction_tokens": 0 + } + }, + "service_tier": "default", + "system_fingerprint": "fp_f33640a400" + } + recorded_at: Thu, 02 Oct 2025 18:26:55 GMT +recorded_with: VCR 6.3.1 diff --git a/spec/vcr_cassettes/openai/patch/valid_function_calling_response.yml b/spec/vcr_cassettes/openai/patch/valid_function_calling_response.yml new file mode 100644 index 0000000..a62496c --- /dev/null +++ b/spec/vcr_cassettes/openai/patch/valid_function_calling_response.yml @@ -0,0 +1,131 @@ +--- +http_interactions: +- request: + method: post + uri: https://api.openai.com/v1/chat/completions + body: + encoding: UTF-8 + string: '{"model":"gpt-4o-2024-08-06","messages":[{"role":"user","content":"Extract + Jason is 25 years old"}],"tools":[{"type":"function","function":{"name":"someuser","description":"Extract + the user''s name and age.","parameters":{"type":"object","title":"SomeUser","properties":{"name":{"type":"string"},"age":{"type":"integer"}},"additionalProperties":false,"required":["name","age"]}}}],"tool_choice":"auto"}' + headers: + Content-Type: + - application/json + Authorization: + - Bearer + Accept-Encoding: + - gzip;q=1.0,deflate;q=0.6,identity;q=0.3 + Accept: + - "*/*" + User-Agent: + - Ruby + response: + status: + code: 200 + message: OK + headers: + Date: + - Thu, 02 Oct 2025 18:26:52 GMT + Content-Type: + - application/json + Transfer-Encoding: + - chunked + Connection: + - keep-alive + Access-Control-Expose-Headers: + - X-Request-ID + Openai-Organization: + - user-jtftkqrbreteg5pmdrfzchv6 + Openai-Processing-Ms: + - '710' + Openai-Project: + - proj_2AuZKbF2zHX6djDZFwWGbqic + Openai-Version: + - '2020-10-01' + X-Envoy-Upstream-Service-Time: + - '1027' + X-Ratelimit-Limit-Requests: + - '500' + X-Ratelimit-Limit-Tokens: + - '30000' + X-Ratelimit-Remaining-Requests: + - '499' + X-Ratelimit-Remaining-Tokens: + - '29990' + X-Ratelimit-Reset-Requests: + - 120ms + X-Ratelimit-Reset-Tokens: + - 20ms + X-Request-Id: + - req_aee091027c2b4f2a9c0a1c867bf5bd62 + X-Openai-Proxy-Wasm: + - v0.1 + Cf-Cache-Status: + - DYNAMIC + Set-Cookie: + - __cf_bm=Z2Js4xNfSChgn2OknbCwGEi_7JpfiFpksx8sTQje4S0-1759429612-1.0.1.1-c227WIDGRSDhrzaew2NPfO2AyDxBEsyLZ1sxD75_kLkFcek6hlCVXvGItYvOSi3D3Lz.IX9ieRUp8lK6TlWhliIgC6Yz2uQETJrR3zmHSWI; + path=/; expires=Thu, 02-Oct-25 18:56:52 GMT; domain=.api.openai.com; HttpOnly; + Secure; SameSite=None + - _cfuvid=YjBe9jNTCqJUGM0g5vYGvPHsRWKvabWIFf81zosGxUI-1759429612504-0.0.1.1-604800000; + path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None + Strict-Transport-Security: + - max-age=31536000; includeSubDomains; preload + X-Content-Type-Options: + - nosniff + Server: + - cloudflare + Cf-Ray: + - 9886401c7b833d82-LIM + Alt-Svc: + - h3=":443"; ma=86400 + body: + encoding: ASCII-8BIT + string: | + { + "id": "chatcmpl-CMIItvQtT3bXTJptkIELHBTUKP7Hn", + "object": "chat.completion", + "created": 1759429611, + "model": "gpt-4o-2024-08-06", + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": null, + "tool_calls": [ + { + "id": "call_Hn8OyfibX1myrSsqjyGdO5rV", + "type": "function", + "function": { + "name": "someuser", + "arguments": "{\"name\":\"Jason\",\"age\":25}" + } + } + ], + "refusal": null, + "annotations": [] + }, + "logprobs": null, + "finish_reason": "tool_calls" + } + ], + "usage": { + "prompt_tokens": 59, + "completion_tokens": 18, + "total_tokens": 77, + "prompt_tokens_details": { + "cached_tokens": 0, + "audio_tokens": 0 + }, + "completion_tokens_details": { + "reasoning_tokens": 0, + "audio_tokens": 0, + "accepted_prediction_tokens": 0, + "rejected_prediction_tokens": 0 + } + }, + "service_tier": "default", + "system_fingerprint": "fp_1827dd0c55" + } + recorded_at: Thu, 02 Oct 2025 18:26:52 GMT +recorded_with: VCR 6.3.1 diff --git a/spec/vcr_cassettes/openai/patch/valid_response.yml b/spec/vcr_cassettes/openai/patch/valid_response.yml new file mode 100644 index 0000000..125c231 --- /dev/null +++ b/spec/vcr_cassettes/openai/patch/valid_response.yml @@ -0,0 +1,120 @@ +--- +http_interactions: +- request: + method: post + uri: https://api.openai.com/v1/chat/completions + body: + encoding: UTF-8 + string: '{"model":"gpt-4o-2024-08-06","messages":[{"role":"user","content":"Extract + Jason is 25 years old"}],"response_format":{"type":"json_schema","json_schema":{"name":"someuser","schema":{"type":"object","title":"SomeUser","properties":{"name":{"type":"string"},"age":{"type":"integer"}},"additionalProperties":false,"required":["name","age"]},"strict":true}}}' + headers: + Content-Type: + - application/json + Authorization: + - Bearer + Accept-Encoding: + - gzip;q=1.0,deflate;q=0.6,identity;q=0.3 + Accept: + - "*/*" + User-Agent: + - Ruby + response: + status: + code: 200 + message: OK + headers: + Date: + - Thu, 02 Oct 2025 18:26:49 GMT + Content-Type: + - application/json + Transfer-Encoding: + - chunked + Connection: + - keep-alive + Access-Control-Expose-Headers: + - X-Request-ID + Openai-Organization: + - user-jtftkqrbreteg5pmdrfzchv6 + Openai-Processing-Ms: + - '548' + Openai-Project: + - proj_2AuZKbF2zHX6djDZFwWGbqic + Openai-Version: + - '2020-10-01' + X-Envoy-Upstream-Service-Time: + - '885' + X-Ratelimit-Limit-Requests: + - '500' + X-Ratelimit-Limit-Tokens: + - '30000' + X-Ratelimit-Remaining-Requests: + - '499' + X-Ratelimit-Remaining-Tokens: + - '29990' + X-Ratelimit-Reset-Requests: + - 120ms + X-Ratelimit-Reset-Tokens: + - 20ms + X-Request-Id: + - req_d618253540094960a181d7db1d74a4ff + X-Openai-Proxy-Wasm: + - v0.1 + Cf-Cache-Status: + - DYNAMIC + Set-Cookie: + - __cf_bm=MmnlDInXLbVCQf3_HNGC6pToXEnCv5m6OwXHJ7nXF8c-1759429609-1.0.1.1-OwnRpxUlj1ulDNWd_oljcOMDDjZCtcHKgwqOILgPJ1Q5wiFbq.6otO4c0Ge0qlDq2oKzcvikDZzisXTghSzERkfBoFfZBeY3S7VkVqfY4fc; + path=/; expires=Thu, 02-Oct-25 18:56:49 GMT; domain=.api.openai.com; HttpOnly; + Secure; SameSite=None + - _cfuvid=ZG7t57fXKlrqEgI.CcGZVcWoFLrOdrcGMf0Nl8Vt_Do-1759429609264-0.0.1.1-604800000; + path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None + Strict-Transport-Security: + - max-age=31536000; includeSubDomains; preload + X-Content-Type-Options: + - nosniff + Server: + - cloudflare + Cf-Ray: + - 98864006fa14a9ea-LIM + Alt-Svc: + - h3=":443"; ma=86400 + body: + encoding: ASCII-8BIT + string: | + { + "id": "chatcmpl-CMIIqbeMLhL5kbVjDJzvVrTXVe9tL", + "object": "chat.completion", + "created": 1759429608, + "model": "gpt-4o-2024-08-06", + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": "{\"name\":\"Jason\",\"age\":25}", + "refusal": null, + "annotations": [] + }, + "logprobs": null, + "finish_reason": "stop" + } + ], + "usage": { + "prompt_tokens": 56, + "completion_tokens": 9, + "total_tokens": 65, + "prompt_tokens_details": { + "cached_tokens": 0, + "audio_tokens": 0 + }, + "completion_tokens_details": { + "reasoning_tokens": 0, + "audio_tokens": 0, + "accepted_prediction_tokens": 0, + "rejected_prediction_tokens": 0 + } + }, + "service_tier": "default", + "system_fingerprint": "fp_f33640a400" + } + recorded_at: Thu, 02 Oct 2025 18:26:49 GMT +recorded_with: VCR 6.3.1 diff --git a/spec/vcr_cassettes/openai/patch/with_validation_context.yml b/spec/vcr_cassettes/openai/patch/with_validation_context.yml new file mode 100644 index 0000000..856c294 --- /dev/null +++ b/spec/vcr_cassettes/openai/patch/with_validation_context.yml @@ -0,0 +1,121 @@ +--- +http_interactions: +- request: + method: post + uri: https://api.openai.com/v1/chat/completions + body: + encoding: UTF-8 + string: '{"model":"gpt-4o-2024-08-06","messages":[{"role":"user","content":"Answer + the question: What is your name and age? with the text chunk: my name is Jason + and I turned 25 years old yesterday"}],"response_format":{"type":"json_schema","json_schema":{"name":"someuser","schema":{"type":"object","title":"SomeUser","properties":{"name":{"type":"string"},"age":{"type":"integer"}},"additionalProperties":false,"required":["name","age"]},"strict":true}}}' + headers: + Content-Type: + - application/json + Authorization: + - Bearer + Accept-Encoding: + - gzip;q=1.0,deflate;q=0.6,identity;q=0.3 + Accept: + - "*/*" + User-Agent: + - Ruby + response: + status: + code: 200 + message: OK + headers: + Date: + - Thu, 02 Oct 2025 18:26:50 GMT + Content-Type: + - application/json + Transfer-Encoding: + - chunked + Connection: + - keep-alive + Access-Control-Expose-Headers: + - X-Request-ID + Openai-Organization: + - user-jtftkqrbreteg5pmdrfzchv6 + Openai-Processing-Ms: + - '521' + Openai-Project: + - proj_2AuZKbF2zHX6djDZFwWGbqic + Openai-Version: + - '2020-10-01' + X-Envoy-Upstream-Service-Time: + - '703' + X-Ratelimit-Limit-Requests: + - '500' + X-Ratelimit-Limit-Tokens: + - '30000' + X-Ratelimit-Remaining-Requests: + - '499' + X-Ratelimit-Remaining-Tokens: + - '29967' + X-Ratelimit-Reset-Requests: + - 120ms + X-Ratelimit-Reset-Tokens: + - 66ms + X-Request-Id: + - req_585b962d84f74d7296edacb1b1538544 + X-Openai-Proxy-Wasm: + - v0.1 + Cf-Cache-Status: + - DYNAMIC + Set-Cookie: + - __cf_bm=os9ngTYy.bEv0rdOe3O70TSBGVGPP.qnocDz4K4rWnA-1759429610-1.0.1.1-iGBSVrlfb_rkWRdWEXNfzKOSBlnR2VTkxvMbftyFkVVDGW3mAKCXAGzakc_W3SA.ZuhDSUmwl0emvPBisco40uSbyUdoDRrQuWGAd574k5A; + path=/; expires=Thu, 02-Oct-25 18:56:50 GMT; domain=.api.openai.com; HttpOnly; + Secure; SameSite=None + - _cfuvid=Wtpo_G2tLZxKHnGz0VxyPUHzN8HNbx.o4myPXvoMAN8-1759429610700-0.0.1.1-604800000; + path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None + Strict-Transport-Security: + - max-age=31536000; includeSubDomains; preload + X-Content-Type-Options: + - nosniff + Server: + - cloudflare + Cf-Ray: + - 98864013f8e6e99f-SCL + Alt-Svc: + - h3=":443"; ma=86400 + body: + encoding: ASCII-8BIT + string: | + { + "id": "chatcmpl-CMIIs7bsngTsytJ6U3qdl377zUKHy", + "object": "chat.completion", + "created": 1759429610, + "model": "gpt-4o-2024-08-06", + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": "{\"name\":\"Jason\",\"age\":25}", + "refusal": null, + "annotations": [] + }, + "logprobs": null, + "finish_reason": "stop" + } + ], + "usage": { + "prompt_tokens": 77, + "completion_tokens": 9, + "total_tokens": 86, + "prompt_tokens_details": { + "cached_tokens": 0, + "audio_tokens": 0 + }, + "completion_tokens_details": { + "reasoning_tokens": 0, + "audio_tokens": 0, + "accepted_prediction_tokens": 0, + "rejected_prediction_tokens": 0 + } + }, + "service_tier": "default", + "system_fingerprint": "fp_f33640a400" + } + recorded_at: Thu, 02 Oct 2025 18:26:50 GMT +recorded_with: VCR 6.3.1 diff --git a/spec/vcr_cassettes/patching_spec/invalid_response.yml b/spec/vcr_cassettes/patching_spec/invalid_response.yml deleted file mode 100644 index 2cefe2a..0000000 --- a/spec/vcr_cassettes/patching_spec/invalid_response.yml +++ /dev/null @@ -1,108 +0,0 @@ ---- -http_interactions: -- request: - method: post - uri: https://api.openai.com/v1/chat/completions - body: - encoding: UTF-8 - string: '{"model":"gpt-3.5-turbo","messages":[{"role":"user","content":"Extract - Jason is 25 years old"}],"tools":[{"type":"function","function":{"name":"InvalidModel","description":"Correctly - extracted `InvalidModel` with all the required parameters with correct types","parameters":{"type":"object","properties":{"name":{"type":"string"},"age":{"type":"string"}},"required":["name","age"]}}}],"tool_choice":{"type":"function","function":{"name":"InvalidModel"}}}' - headers: - Content-Type: - - application/json - Authorization: - - Bearer - Accept-Encoding: - - gzip;q=1.0,deflate;q=0.6,identity;q=0.3 - Accept: - - "*/*" - User-Agent: - - Ruby - response: - status: - code: 200 - message: OK - headers: - Date: - - Fri, 17 May 2024 21:38:40 GMT - Content-Type: - - application/json - Transfer-Encoding: - - chunked - Connection: - - keep-alive - Openai-Organization: - - user-jtftkqrbreteg5pmdrfzchv6 - Openai-Processing-Ms: - - '464' - Openai-Version: - - '2020-10-01' - Strict-Transport-Security: - - max-age=15724800; includeSubDomains - X-Ratelimit-Limit-Requests: - - '10000' - X-Ratelimit-Limit-Tokens: - - '60000' - X-Ratelimit-Remaining-Requests: - - '9997' - X-Ratelimit-Remaining-Tokens: - - '59975' - X-Ratelimit-Reset-Requests: - - 24.331s - X-Ratelimit-Reset-Tokens: - - 25ms - X-Request-Id: - - req_bc8846f72bc7e45aea0089199393efb5 - Cf-Cache-Status: - - DYNAMIC - Set-Cookie: - - __cf_bm=SN9Aw.9Y6MoQjya5MwMHqAccv0nbP0idUZewJvSCYSA-1715981920-1.0.1.1-tLf4tiuhqDimwnjiJh23BSsopzWUVqlcWRMmkmlEpnOPseRbPQUcDZPk.854YQh8SbisfZlF3eif8Ny5eEDUZw; - path=/; expires=Fri, 17-May-24 22:08:40 GMT; domain=.api.openai.com; HttpOnly; - Secure; SameSite=None - - _cfuvid=hJJnzWXK76oChvkPWG7J4e8h4Ib9K4tdSN1o14Ku07c-1715981920951-0.0.1.1-604800000; - path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None - Server: - - cloudflare - Cf-Ray: - - 8856c17a5d5309c2-MIA - Alt-Svc: - - h3=":443"; ma=86400 - body: - encoding: ASCII-8BIT - string: | - { - "id": "chatcmpl-9PzZgf0aZfhyh1YHH15sZnvUAKr6T", - "object": "chat.completion", - "created": 1715981920, - "model": "gpt-3.5-turbo-0125", - "choices": [ - { - "index": 0, - "message": { - "role": "assistant", - "content": null, - "tool_calls": [ - { - "id": "call_ZB4NXbxzAdnfPXZQI8FdfQ2q", - "type": "function", - "function": { - "name": "InvalidModel", - "arguments": "{\"name\":\"Jason\",\"age\":25}" - } - } - ] - }, - "logprobs": null, - "finish_reason": "stop" - } - ], - "usage": { - "prompt_tokens": 74, - "completion_tokens": 9, - "total_tokens": 83 - }, - "system_fingerprint": null - } - recorded_at: Fri, 17 May 2024 21:38:40 GMT -recorded_with: VCR 6.2.0 diff --git a/spec/vcr_cassettes/patching_spec/standard_usage.yml b/spec/vcr_cassettes/patching_spec/standard_usage.yml deleted file mode 100644 index a454916..0000000 --- a/spec/vcr_cassettes/patching_spec/standard_usage.yml +++ /dev/null @@ -1,97 +0,0 @@ ---- -http_interactions: -- request: - method: post - uri: https://api.openai.com/v1/chat/completions - body: - encoding: UTF-8 - string: '{"model":"gpt-3.5-turbo","messages":[{"role":"user","content":"How - is the weather today in New York?"}]}' - headers: - Content-Type: - - application/json - Authorization: - - Bearer - Accept-Encoding: - - gzip;q=1.0,deflate;q=0.6,identity;q=0.3 - Accept: - - "*/*" - User-Agent: - - Ruby - response: - status: - code: 200 - message: OK - headers: - Date: - - Mon, 20 May 2024 20:18:44 GMT - Content-Type: - - application/json - Transfer-Encoding: - - chunked - Connection: - - keep-alive - Openai-Organization: - - user-jtftkqrbreteg5pmdrfzchv6 - Openai-Processing-Ms: - - '1141' - Openai-Version: - - '2020-10-01' - Strict-Transport-Security: - - max-age=15724800; includeSubDomains - X-Ratelimit-Limit-Requests: - - '10000' - X-Ratelimit-Limit-Tokens: - - '60000' - X-Ratelimit-Remaining-Requests: - - '9999' - X-Ratelimit-Remaining-Tokens: - - '59973' - X-Ratelimit-Reset-Requests: - - 8.64s - X-Ratelimit-Reset-Tokens: - - 27ms - X-Request-Id: - - req_9cd156b89ffbc49d042594df684cd71c - Cf-Cache-Status: - - DYNAMIC - Set-Cookie: - - __cf_bm=iw7.dE814kHN0QINKJy3dRtjL44VSwwBZFyDHvN2pNo-1716236324-1.0.1.1-_sZi0S6Fm2fn7rSXA7NQ22IbWSrA0yBXtvFZ7BgT06q2J6upyZqiU7WDAqYfK3_DCCsi.xd2.aDu1erMGdb9dg; - path=/; expires=Mon, 20-May-24 20:48:44 GMT; domain=.api.openai.com; HttpOnly; - Secure; SameSite=None - - _cfuvid=l.4Wi13joGQwrPn0H.rLjBrxZJCHqY15_r5zTRua9H0-1716236324578-0.0.1.1-604800000; - path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None - Server: - - cloudflare - Cf-Ray: - - 886f047cea6374ac-MIA - Alt-Svc: - - h3=":443"; ma=86400 - body: - encoding: ASCII-8BIT - string: | - { - "id": "chatcmpl-9R3kx1j8twG05UJGM5ReVEpjUUHtL", - "object": "chat.completion", - "created": 1716236323, - "model": "gpt-3.5-turbo-0125", - "choices": [ - { - "index": 0, - "message": { - "role": "assistant", - "content": "I'm sorry, I cannot provide real-time weather information. I recommend checking a reliable weather website or app for the most up-to-date weather conditions in New York." - }, - "logprobs": null, - "finish_reason": "stop" - } - ], - "usage": { - "prompt_tokens": 16, - "completion_tokens": 33, - "total_tokens": 49 - }, - "system_fingerprint": null - } - recorded_at: Mon, 20 May 2024 20:18:44 GMT -recorded_with: VCR 6.2.0 diff --git a/spec/vcr_cassettes/patching_spec/valid_response.yml b/spec/vcr_cassettes/patching_spec/valid_response.yml deleted file mode 100644 index d89112e..0000000 --- a/spec/vcr_cassettes/patching_spec/valid_response.yml +++ /dev/null @@ -1,108 +0,0 @@ ---- -http_interactions: -- request: - method: post - uri: https://api.openai.com/v1/chat/completions - body: - encoding: UTF-8 - string: '{"model":"gpt-3.5-turbo","messages":[{"role":"user","content":"Extract - Jason is 25 years old"}],"tools":[{"type":"function","function":{"name":"SomeUser","description":"Extract - the user''s name and age.","parameters":{"type":"object","title":"SomeUser","properties":{"name":{"type":"string"},"age":{"type":"integer"}},"required":["name","age"]}}}],"tool_choice":{"type":"function","function":{"name":"SomeUser"}}}' - headers: - Content-Type: - - application/json - Authorization: - - Bearer - Accept-Encoding: - - gzip;q=1.0,deflate;q=0.6,identity;q=0.3 - Accept: - - "*/*" - User-Agent: - - Ruby - response: - status: - code: 200 - message: OK - headers: - Date: - - Fri, 17 May 2024 21:38:39 GMT - Content-Type: - - application/json - Transfer-Encoding: - - chunked - Connection: - - keep-alive - Openai-Organization: - - user-jtftkqrbreteg5pmdrfzchv6 - Openai-Processing-Ms: - - '484' - Openai-Version: - - '2020-10-01' - Strict-Transport-Security: - - max-age=15724800; includeSubDomains - X-Ratelimit-Limit-Requests: - - '10000' - X-Ratelimit-Limit-Tokens: - - '60000' - X-Ratelimit-Remaining-Requests: - - '9999' - X-Ratelimit-Remaining-Tokens: - - '59975' - X-Ratelimit-Reset-Requests: - - 8.64s - X-Ratelimit-Reset-Tokens: - - 25ms - X-Request-Id: - - req_5c69bf259dd0f0bb146f5e62bddb82cd - Cf-Cache-Status: - - DYNAMIC - Set-Cookie: - - __cf_bm=oC7QqBACb1oJFAJGRqB4BRd.IS5EGaew.qR54Kf2IbA-1715981919-1.0.1.1-gaQXQc17imcz8t4AXhsl0KkoKTjZwzl9WfJwjaY1BJChjWBBkwmCmFgh9J0KYMCf2Ftiwai.tOTVjye4iTMo7g; - path=/; expires=Fri, 17-May-24 22:08:39 GMT; domain=.api.openai.com; HttpOnly; - Secure; SameSite=None - - _cfuvid=Z9nW6TFZQpVmsPdzvKij3JJHzCgCrePIBUzh.6dEIf4-1715981919401-0.0.1.1-604800000; - path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None - Server: - - cloudflare - Cf-Ray: - - 8856c1705d194974-MIA - Alt-Svc: - - h3=":443"; ma=86400 - body: - encoding: ASCII-8BIT - string: | - { - "id": "chatcmpl-9PzZfA5SLIFg19i7NuvQzj2tmiFmZ", - "object": "chat.completion", - "created": 1715981919, - "model": "gpt-3.5-turbo-0125", - "choices": [ - { - "index": 0, - "message": { - "role": "assistant", - "content": null, - "tool_calls": [ - { - "id": "call_OmMlsCcbpCSf7p9PuJyi3hpg", - "type": "function", - "function": { - "name": "SomeUser", - "arguments": "{\"name\":\"Jason\",\"age\":25}" - } - } - ] - }, - "logprobs": null, - "finish_reason": "stop" - } - ], - "usage": { - "prompt_tokens": 70, - "completion_tokens": 9, - "total_tokens": 79 - }, - "system_fingerprint": null - } - recorded_at: Fri, 17 May 2024 21:38:39 GMT -recorded_with: VCR 6.2.0 diff --git a/spec/vcr_cassettes/patching_spec/with_validation_context.yml b/spec/vcr_cassettes/patching_spec/with_validation_context.yml deleted file mode 100644 index 0195330..0000000 --- a/spec/vcr_cassettes/patching_spec/with_validation_context.yml +++ /dev/null @@ -1,109 +0,0 @@ ---- -http_interactions: -- request: - method: post - uri: https://api.openai.com/v1/chat/completions - body: - encoding: UTF-8 - string: '{"model":"gpt-3.5-turbo","messages":[{"role":"user","content":"Answer - the question: What is your name and age? with the text chunk: my name is Jason - and I turned 25 years old yesterday"}],"tools":[{"type":"function","function":{"name":"SomeUser","description":"Extract - the user''s name and age.","parameters":{"type":"object","title":"SomeUser","properties":{"name":{"type":"string"},"age":{"type":"integer"}},"required":["name","age"]}}}],"tool_choice":{"type":"function","function":{"name":"SomeUser"}}}' - headers: - Content-Type: - - application/json - Authorization: - - Bearer - Accept-Encoding: - - gzip;q=1.0,deflate;q=0.6,identity;q=0.3 - Accept: - - "*/*" - User-Agent: - - Ruby - response: - status: - code: 200 - message: OK - headers: - Date: - - Fri, 17 May 2024 21:38:40 GMT - Content-Type: - - application/json - Transfer-Encoding: - - chunked - Connection: - - keep-alive - Openai-Organization: - - user-jtftkqrbreteg5pmdrfzchv6 - Openai-Processing-Ms: - - '380' - Openai-Version: - - '2020-10-01' - Strict-Transport-Security: - - max-age=15724800; includeSubDomains - X-Ratelimit-Limit-Requests: - - '10000' - X-Ratelimit-Limit-Tokens: - - '60000' - X-Ratelimit-Remaining-Requests: - - '9998' - X-Ratelimit-Remaining-Tokens: - - '59952' - X-Ratelimit-Reset-Requests: - - 16.422s - X-Ratelimit-Reset-Tokens: - - 48ms - X-Request-Id: - - req_b75552a51b70614ba55e34bc8f46df80 - Cf-Cache-Status: - - DYNAMIC - Set-Cookie: - - __cf_bm=f9c6ADDiOU1NPe3yv0iVc04Ol9CdLniiKCNTBo4Uk14-1715981920-1.0.1.1-u0cGWsKcgvFBXiPMxrO30A09FPBs3nQPk.s_FIAALRxQ52Yb0oPM7Jj_TzCbKRs2L03szG9fhbn462mYBWFzhg; - path=/; expires=Fri, 17-May-24 22:08:40 GMT; domain=.api.openai.com; HttpOnly; - Secure; SameSite=None - - _cfuvid=QibJLZjeMze7JqBsIiNn8haLvSu3cGDyhzKtW0yu9Ws-1715981920116-0.0.1.1-604800000; - path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None - Server: - - cloudflare - Cf-Ray: - - 8856c175cf710331-MIA - Alt-Svc: - - h3=":443"; ma=86400 - body: - encoding: ASCII-8BIT - string: | - { - "id": "chatcmpl-9PzZfj3nY4z0btlRlNo23t4uooL0p", - "object": "chat.completion", - "created": 1715981919, - "model": "gpt-3.5-turbo-0125", - "choices": [ - { - "index": 0, - "message": { - "role": "assistant", - "content": null, - "tool_calls": [ - { - "id": "call_ul0rzkJ5DEOx2O52v05JQT5H", - "type": "function", - "function": { - "name": "SomeUser", - "arguments": "{\"name\":\"Jason\",\"age\":25}" - } - } - ] - }, - "logprobs": null, - "finish_reason": "stop" - } - ], - "usage": { - "prompt_tokens": 91, - "completion_tokens": 9, - "total_tokens": 100 - }, - "system_fingerprint": null - } - recorded_at: Fri, 17 May 2024 21:38:40 GMT -recorded_with: VCR 6.2.0 diff --git a/spec/vcr_cassettes/structured_output/valid_response.yml b/spec/vcr_cassettes/structured_output/valid_response.yml new file mode 100644 index 0000000..bb9b7f2 --- /dev/null +++ b/spec/vcr_cassettes/structured_output/valid_response.yml @@ -0,0 +1,120 @@ +--- +http_interactions: +- request: + method: post + uri: https://api.openai.com/v1/chat/completions + body: + encoding: UTF-8 + string: '{"model":"gpt-4o-mini","messages":[{"role":"user","content":"Extract + Jason is 25 years old"}],"response_format":{"type":"json_schema","json_schema":{"name":"user","schema":{"type":"object","properties":{"name":{"type":"string"},"age":{"type":"integer"}},"additionalProperties":false,"required":["name","age"]},"strict":true}}}' + headers: + Content-Type: + - application/json + Authorization: + - Bearer + Accept-Encoding: + - gzip;q=1.0,deflate;q=0.6,identity;q=0.3 + Accept: + - "*/*" + User-Agent: + - Ruby + response: + status: + code: 200 + message: OK + headers: + Date: + - Thu, 02 Oct 2025 17:54:20 GMT + Content-Type: + - application/json + Transfer-Encoding: + - chunked + Connection: + - keep-alive + Access-Control-Expose-Headers: + - X-Request-ID + Openai-Organization: + - user-jtftkqrbreteg5pmdrfzchv6 + Openai-Processing-Ms: + - '667' + Openai-Project: + - proj_2AuZKbF2zHX6djDZFwWGbqic + Openai-Version: + - '2020-10-01' + X-Envoy-Upstream-Service-Time: + - '984' + X-Ratelimit-Limit-Requests: + - '10000' + X-Ratelimit-Limit-Tokens: + - '200000' + X-Ratelimit-Remaining-Requests: + - '9999' + X-Ratelimit-Remaining-Tokens: + - '199990' + X-Ratelimit-Reset-Requests: + - 8.64s + X-Ratelimit-Reset-Tokens: + - 3ms + X-Request-Id: + - req_a887988b6ee1471da8dc658941e86a53 + X-Openai-Proxy-Wasm: + - v0.1 + Cf-Cache-Status: + - DYNAMIC + Set-Cookie: + - __cf_bm=FCZjOE5tRPJaqB4SEnz.w.pvIsE9zKayED3ip5Jj2Xg-1759427660-1.0.1.1-0GXVQxaY1GXTMV8bC1kp7msQQwLsh3f2kaLvB9j_q6ftYLBT9b7HTgL5_QWmRPU79jYBYxb2c0NBrug3p7a8qwLAe0UiZycw.XsciRFiXzw; + path=/; expires=Thu, 02-Oct-25 18:24:20 GMT; domain=.api.openai.com; HttpOnly; + Secure; SameSite=None + - _cfuvid=SJ81V28kTYt4dhIdX3ocz7.nDAySK4LEGfDRxTCQOYk-1759427660746-0.0.1.1-604800000; + path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None + Strict-Transport-Security: + - max-age=31536000; includeSubDomains; preload + X-Content-Type-Options: + - nosniff + Server: + - cloudflare + Cf-Ray: + - 988610756b83a9ea-LIM + Alt-Svc: + - h3=":443"; ma=86400 + body: + encoding: ASCII-8BIT + string: | + { + "id": "chatcmpl-CMHnQSxZUlfjSQjB8FCK9VYesI180", + "object": "chat.completion", + "created": 1759427660, + "model": "gpt-4o-mini-2024-07-18", + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": "{\"name\":\"Jason\",\"age\":25}", + "refusal": null, + "annotations": [] + }, + "logprobs": null, + "finish_reason": "stop" + } + ], + "usage": { + "prompt_tokens": 49, + "completion_tokens": 9, + "total_tokens": 58, + "prompt_tokens_details": { + "cached_tokens": 0, + "audio_tokens": 0 + }, + "completion_tokens_details": { + "reasoning_tokens": 0, + "audio_tokens": 0, + "accepted_prediction_tokens": 0, + "rejected_prediction_tokens": 0 + } + }, + "service_tier": "default", + "system_fingerprint": "fp_560af6e559" + } + recorded_at: Thu, 02 Oct 2025 17:54:20 GMT +recorded_with: VCR 6.3.1