-
Notifications
You must be signed in to change notification settings - Fork 6
streaming
Nagendra Dhanakeerthi edited this page Oct 30, 2024
·
1 revision
The Streaming API allows you to receive responses in real-time chunks rather than waiting for the complete response.
client = ChatGPT::Client.new
client.chat_stream([
{ role: "user", content: "Tell me a story" }
]) do |chunk|
print chunk.dig("choices", 0, "delta", "content")
end
client.chat_stream(
messages,
{
model: 'gpt-3.5-turbo',
temperature: 0.7,
max_tokens: 100
}
) do |chunk|
# Process chunk
end
{
"choices" => [{
"delta" => {
"content" => "chunk of text"
},
"index" => 0,
"finish_reason" => nil
}]
}
begin
client.chat_stream(messages) do |chunk|
print chunk.dig("choices", 0, "delta", "content")
end
rescue ChatGPT::APIError => e
puts "Error: #{e.message}"
end
- Buffer Management
buffer = StringIO.new
client.chat_stream(messages) do |chunk|
content = chunk.dig("choices", 0, "delta", "content")
buffer << content if content
end
complete_response = buffer.string
- Progress Indicators
dots = 0
client.chat_stream(messages) do |chunk|
print chunk.dig("choices", 0, "delta", "content")
dots = (dots + 1) % 4
print "\rProcessing#{'.' * dots}#{' ' * (3 - dots)}"
end
- Error Recovery
def stream_with_retry(messages, max_attempts = 3)
attempts = 0
begin
attempts += 1
client.chat_stream(messages) do |chunk|
yield chunk if block_given?
end
rescue ChatGPT::RateLimitError => e
retry if attempts < max_attempts
raise
end
end
client.chat_stream([
{ role: "user", content: "Tell me a story" }
]) do |chunk|
print chunk.dig("choices", 0, "delta", "content")
end
full_response = ""
client.chat_stream(messages) do |chunk|
content = chunk.dig("choices", 0, "delta", "content")
full_response += content if content
print content if content
end
client.chat_stream(messages) do |chunk|
content = chunk.dig("choices", 0, "delta", "content")
if content
processed = process_content(content) # Your processing logic
print processed
end
end