@@ -181,3 +181,86 @@ func getCompletionBody(r *http.Request) (openai.CompletionRequest, error) {
181181 }
182182 return completion , nil
183183}
184+
185+ // TestCompletionWithO1Model Tests that O1 model is not supported for completion endpoint.
186+ func TestCompletionWithO1Model (t * testing.T ) {
187+ config := openai .DefaultConfig ("whatever" )
188+ config .BaseURL = "http://localhost/v1"
189+ client := openai .NewClientWithConfig (config )
190+
191+ _ , err := client .CreateCompletion (
192+ context .Background (),
193+ openai.CompletionRequest {
194+ MaxTokens : 5 ,
195+ Model : openai .O1 ,
196+ },
197+ )
198+ if ! errors .Is (err , openai .ErrCompletionUnsupportedModel ) {
199+ t .Fatalf ("CreateCompletion should return ErrCompletionUnsupportedModel for O1 model, but returned: %v" , err )
200+ }
201+ }
202+
203+ // TestCompletionWithGPT4DotModels Tests that newer GPT4 models are not supported for completion endpoint.
204+ func TestCompletionWithGPT4DotModels (t * testing.T ) {
205+ config := openai .DefaultConfig ("whatever" )
206+ config .BaseURL = "http://localhost/v1"
207+ client := openai .NewClientWithConfig (config )
208+
209+ models := []string {
210+ openai .GPT4Dot1 ,
211+ openai .GPT4Dot120250414 ,
212+ openai .GPT4Dot1Mini ,
213+ openai .GPT4Dot1Mini20250414 ,
214+ openai .GPT4Dot1Nano ,
215+ openai .GPT4Dot1Nano20250414 ,
216+ openai .GPT4Dot5Preview ,
217+ openai .GPT4Dot5Preview20250227 ,
218+ }
219+
220+ for _ , model := range models {
221+ t .Run (model , func (t * testing.T ) {
222+ _ , err := client .CreateCompletion (
223+ context .Background (),
224+ openai.CompletionRequest {
225+ MaxTokens : 5 ,
226+ Model : model ,
227+ },
228+ )
229+ if ! errors .Is (err , openai .ErrCompletionUnsupportedModel ) {
230+ t .Fatalf ("CreateCompletion should return ErrCompletionUnsupportedModel for %s model, but returned: %v" , model , err )
231+ }
232+ })
233+ }
234+ }
235+
236+ // TestCompletionWithGPT4oModels Tests that GPT4o models are not supported for completion endpoint.
237+ func TestCompletionWithGPT4oModels (t * testing.T ) {
238+ config := openai .DefaultConfig ("whatever" )
239+ config .BaseURL = "http://localhost/v1"
240+ client := openai .NewClientWithConfig (config )
241+
242+ models := []string {
243+ openai .GPT4o ,
244+ openai .GPT4o20240513 ,
245+ openai .GPT4o20240806 ,
246+ openai .GPT4o20241120 ,
247+ openai .GPT4oLatest ,
248+ openai .GPT4oMini ,
249+ openai .GPT4oMini20240718 ,
250+ }
251+
252+ for _ , model := range models {
253+ t .Run (model , func (t * testing.T ) {
254+ _ , err := client .CreateCompletion (
255+ context .Background (),
256+ openai.CompletionRequest {
257+ MaxTokens : 5 ,
258+ Model : model ,
259+ },
260+ )
261+ if ! errors .Is (err , openai .ErrCompletionUnsupportedModel ) {
262+ t .Fatalf ("CreateCompletion should return ErrCompletionUnsupportedModel for %s model, but returned: %v" , model , err )
263+ }
264+ })
265+ }
266+ }
0 commit comments