Skip to content

Commit 4d86541

Browse files
committed
docs: Update README with client usage examples and enhance API interaction details
Signed-off-by: Eden Reich <eden.reich@gmail.com>
1 parent fe5ab08 commit 4d86541

File tree

1 file changed

+71
-39
lines changed

1 file changed

+71
-39
lines changed

README.md

Lines changed: 71 additions & 39 deletions
Original file line numberDiff line numberDiff line change
@@ -21,41 +21,63 @@ Run `cargo add inference-gateway-sdk`.
2121

2222
### Creating a Client
2323

24+
Here is a full example of how to create a client and interact with the Inference Gateway API:
25+
2426
```rust
2527
use inference_gateway_sdk::{
2628
GatewayError,
2729
InferenceGatewayAPI,
2830
InferenceGatewayClient,
2931
Message,
3032
Provider,
31-
MessageRole,
33+
MessageRole
3234
};
3335
use log::info;
36+
use std::env;
3437

35-
fn main() -> Result<(), GatewayError> {
38+
#[tokio::main]
39+
async fn main() -> Result<(), GatewayError> {
40+
if env::var("RUST_LOG").is_err() {
41+
env::set_var("RUST_LOG", "info");
42+
}
3643
env_logger::init();
3744

45+
// Create a client
3846
let client = InferenceGatewayClient::new("http://localhost:8080");
3947

40-
// List available models
41-
let models = client.list_models()?;
48+
// List all models and all providers
49+
let models = client.list_models().await?;
4250
for provider_models in models {
4351
info!("Provider: {:?}", provider_models.provider);
4452
for model in provider_models.models {
4553
info!("Model: {:?}", model.name);
4654
}
4755
}
4856

49-
let response = client.generate_content(
50-
Provider::Ollama,
51-
"llama2",
52-
vec![Message {
53-
role: MessageRole::User,
54-
content: "Tell me a joke".to_string(),
55-
}],
56-
)?;
57+
// List models for a specific provider
58+
let resp = client.list_models_by_provider(Provider::Groq).await?;
59+
let models = resp.models;
60+
info!("Provider: {:?}", resp.provider);
61+
for model in models {
62+
info!("Model: {:?}", model.name);
63+
}
64+
65+
// Generate content - choose from available providers and models
66+
let resp = client.generate_content(Provider::Groq, "deepseek-r1-distill-llama-70b", vec![
67+
Message{
68+
role: MessageRole::System,
69+
content: "You are an helpful assistent.".to_string()
70+
},
71+
Message{
72+
role: MessageRole::User,
73+
content: "Tell me a funny joke".to_string()
74+
}
75+
]).await?;
76+
77+
log::info!("Generated from provider: {:?}", resp.provider);
78+
log::info!("Generated response: {:?}", resp.response.role);
79+
log::info!("Generated content: {:?}", resp.response.content);
5780

58-
info!("Response: {:?}", response);
5981
Ok(())
6082
}
6183
```
@@ -70,16 +92,14 @@ use inference_gateway_sdk::{
7092
InferenceGatewayAPI,
7193
InferenceGatewayClient,
7294
Message,
73-
Provider,
74-
MessageRole
7595
};
7696
use log::info;
7797

7898
#[tokio::main]
7999
fn main() -> Result<(), GatewayError> {
80-
// ...create a client
100+
// ...Create a client
81101

82-
// List all providers and models
102+
// List all models and all providers
83103
let models = client.list_models().await?;
84104
for provider_models in models {
85105
info!("Provider: {:?}", provider_models.provider);
@@ -88,13 +108,7 @@ fn main() -> Result<(), GatewayError> {
88108
}
89109
}
90110

91-
// List models for a specific provider
92-
let resp = client.list_models_by_provider(Provider::Ollama).await?;
93-
let models = resp.models;
94-
info!("Provider: {:?}", resp.provider);
95-
for model in models {
96-
info!("Model: {:?}", model.name);
97-
}
111+
// ...
98112
}
99113
```
100114

@@ -103,38 +117,56 @@ fn main() -> Result<(), GatewayError> {
103117
To list all available models from a specific provider, use the `list_models_by_provider` method:
104118

105119
```rust
106-
// ...rest of the imports
120+
use inference_gateway_sdk::{
121+
GatewayError
122+
InferenceGatewayAPI,
123+
InferenceGatewayClient,
124+
Provider,
125+
};
107126
use log::info;
108127

109-
// ...main function
128+
// ...Open main function
129+
130+
// List models for a specific provider
110131
let resp = client.list_models_by_provider(Provider::Ollama).await?;
111132
let models = resp.models;
112133
info!("Provider: {:?}", resp.provider);
113134
for model in models {
114135
info!("Model: {:?}", model.name);
115136
}
137+
138+
// ...Rest of the main function
116139
```
117140

118141
### Generating Content
119142

120143
To generate content using a model, use the `generate_content` method:
121144

122145
```rust
123-
// ...rest of the imports
124-
use log::info;
146+
use inference_gateway_sdk::{
147+
GatewayError,
148+
InferenceGatewayAPI,
149+
InferenceGatewayClient,
150+
Message,
151+
Provider,
152+
MessageRole
153+
};
125154

126-
// ...main function
127-
let response = client.generate_content(
128-
Provider::Ollama,
129-
"llama2",
130-
vec![Message {
131-
role: MessageRole::User,
132-
content: "Tell me a joke".to_string(),
133-
}],
134-
).await?;
155+
// Generate content - choose from available providers and models
156+
let resp = client.generate_content(Provider::Groq, "deepseek-r1-distill-llama-70b", vec![
157+
Message{
158+
role: MessageRole::System,
159+
content: "You are an helpful assistent.".to_string()
160+
},
161+
Message{
162+
role: MessageRole::User,
163+
content: "Tell me a funny joke".to_string()
164+
}
165+
]).await?;
135166

136-
info!("Provider: {:?}", response.provider);
137-
info!("Response: {:?}", response.response);
167+
log::info!("Generated from provider: {:?}", resp.provider);
168+
log::info!("Generated response: {:?}", resp.response.role);
169+
log::info!("Generated content: {:?}", resp.response.content);
138170
```
139171

140172
### Health Check

0 commit comments

Comments
 (0)