Skip to content

Commit ea6bf0a

Browse files
committed
refactor(openai-gateway): streamline provider interfaces
Remove model handling from OpenAIProvider and its implementations across all related classes, allowing the use of LLMProvider as the main reference for specific providers. This change simplifies the OpenAIProvider interface by removing unnecessary methods and attributes concerning model support checks. Now, provider checking is mainly done based on `LLMProvider` identity, thereby simplifying codebase and interactions within the gateway. Additionally, align functionality in tests and usage accordingly.
1 parent c927b0b commit ea6bf0a

File tree

16 files changed

+101
-242
lines changed

16 files changed

+101
-242
lines changed

gradle.properties

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@ android.nonTransitiveRClass=true
1414
#publishing for kmmbridge
1515
## Darwin Publish require from - nextVersion parameter must be a valid semver string. Current value: 0.1.4.
1616
## So we need set version to 0.1 or 0.2 ......
17-
LIBRARY_VERSION=0.2.2
17+
LIBRARY_VERSION=0.2.3
1818
GROUP=com.tddworks
1919

2020
# POM

openai-client/openai-client-darwin/src/appleMain/kotlin/com/tddworks/openai/darwin/api/DarwinOpenAI.kt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -19,4 +19,4 @@ object DarwinOpenAI {
1919
apiKey: () -> String = { "CONFIG_API_KEY" },
2020
baseUrl: () -> String = { OpenAI.BASE_URL },
2121
) = initOpenAI(OpenAIConfig(apiKey, baseUrl))
22-
}
22+
}
Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,10 @@
1+
package com.tddworks.openai.gateway.api
2+
3+
enum class LLMProvider {
4+
ANTHROPIC,
5+
DEEPSEEK,
6+
GEMINI,
7+
MOONSHOT,
8+
OLLAMA,
9+
OPENAI
10+
}
Lines changed: 44 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1,22 +1,59 @@
11
package com.tddworks.openai.gateway.api
22

3-
import com.tddworks.openai.api.chat.api.Chat
4-
import com.tddworks.openai.api.chat.api.OpenAIModel
5-
import com.tddworks.openai.api.images.api.Images
6-
import com.tddworks.openai.api.legacy.completions.api.Completions
3+
import com.tddworks.common.network.api.ktor.api.ListResponse
4+
import com.tddworks.openai.api.chat.api.ChatCompletion
5+
import com.tddworks.openai.api.chat.api.ChatCompletionChunk
6+
import com.tddworks.openai.api.chat.api.ChatCompletionRequest
7+
import com.tddworks.openai.api.images.api.Image
8+
import com.tddworks.openai.api.images.api.ImageCreate
9+
import com.tddworks.openai.api.legacy.completions.api.Completion
10+
import com.tddworks.openai.api.legacy.completions.api.CompletionRequest
11+
import kotlinx.coroutines.flow.Flow
712

813
/**
914
* Interface for connecting to the OpenAI Gateway to chat.
1015
*/
11-
interface OpenAIGateway : Chat, Completions, Images {
16+
interface OpenAIGateway {
1217
fun updateProvider(
1318
id: String,
1419
name: String,
15-
config: OpenAIProviderConfig,
16-
models: List<OpenAIModel>
20+
config: OpenAIProviderConfig
1721
)
1822

1923
fun addProvider(provider: OpenAIProvider): OpenAIGateway
2024
fun removeProvider(id: String)
2125
fun getProviders(): List<OpenAIProvider>
26+
27+
/**
28+
* Creates an image given a prompt.
29+
* Get images as URLs or base64-encoded JSON.
30+
* @param request image creation request.
31+
* @return list of images.
32+
*/
33+
suspend fun generate(request: ImageCreate, provider: LLMProvider): ListResponse<Image>
34+
35+
/**
36+
* Fetch a completion.
37+
* @param request The request to fetch a completion.
38+
* @param provider The provider to use for the completion.
39+
* @return The completion
40+
*/
41+
suspend fun completions(request: CompletionRequest, provider: LLMProvider): Completion
42+
43+
/**
44+
* Fetch a chat completion.
45+
* @param request The request to fetch a chat completion.
46+
* @param provider The provider to use for the chat completion.
47+
* @return The chat completion
48+
*/
49+
suspend fun chatCompletions(request: ChatCompletionRequest, provider: LLMProvider): ChatCompletion
50+
51+
52+
/**
53+
* Stream a chat completion.
54+
* @param request The request to stream a chat completion.
55+
* @param provider The provider to use for the chat completion.
56+
* @return The chat completion chunks as a stream
57+
*/
58+
fun streamChatCompletions(request: ChatCompletionRequest, provider: LLMProvider): Flow<ChatCompletionChunk>
2259
}

openai-gateway/openai-gateway-core/src/commonMain/kotlin/com/tddworks/openai/gateway/api/OpenAIProvider.kt

Lines changed: 0 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -20,23 +20,10 @@ interface OpenAIProvider : Chat, Completions, Images {
2020
*/
2121
val name: String
2222

23-
/**
24-
* The models supported by the provider.
25-
*/
26-
val models: List<OpenAIModel>
27-
2823
/**
2924
* The configuration for the provider.
3025
*/
3126
val config: OpenAIProviderConfig
3227

33-
/**
34-
* Determines if the provided model is supported or not.
35-
*
36-
* @param model The model to check for support
37-
* @return true if the model is supported, false otherwise
38-
*/
39-
fun supports(model: OpenAIModel): Boolean
40-
4128
companion object
4229
}

openai-gateway/openai-gateway-core/src/commonMain/kotlin/com/tddworks/openai/gateway/api/internal/AnthropicOpenAIProvider.kt

Lines changed: 5 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -2,11 +2,13 @@ package com.tddworks.openai.gateway.api.internal
22

33
import com.tddworks.anthropic.api.Anthropic
44
import com.tddworks.anthropic.api.AnthropicConfig
5-
import com.tddworks.anthropic.api.AnthropicModel
6-
import com.tddworks.anthropic.api.messages.api.*
5+
import com.tddworks.anthropic.api.messages.api.ContentBlockStop
6+
import com.tddworks.anthropic.api.messages.api.Ping
7+
import com.tddworks.anthropic.api.messages.api.toAnthropicRequest
8+
import com.tddworks.anthropic.api.messages.api.toOpenAIChatCompletion
9+
import com.tddworks.anthropic.api.messages.api.toOpenAIChatCompletionChunk
710
import com.tddworks.common.network.api.ktor.api.ListResponse
811
import com.tddworks.openai.api.chat.api.ChatCompletionRequest
9-
import com.tddworks.openai.api.chat.api.OpenAIModel
1012
import com.tddworks.openai.api.images.api.Image
1113
import com.tddworks.openai.api.images.api.ImageCreate
1214
import com.tddworks.openai.api.legacy.completions.api.Completion
@@ -23,9 +25,6 @@ import com.tddworks.openai.api.chat.api.ChatCompletionChunk as OpenAIChatComplet
2325
class AnthropicOpenAIProvider(
2426
override var id: String = "anthropic",
2527
override var name: String = "Anthropic",
26-
override var models: List<OpenAIModel> = AnthropicModel.availableModels.map {
27-
OpenAIModel(it.value)
28-
},
2928
override var config: AnthropicOpenAIProviderConfig,
3029

3130
private val client: Anthropic = Anthropic.create(
@@ -38,15 +37,6 @@ class AnthropicOpenAIProvider(
3837

3938
) : OpenAIProvider {
4039

41-
/**
42-
* Check if the given OpenAIModel is supported by the available models.
43-
* @param model The OpenAIModel to check for support.
44-
* @return true if the model is supported, false otherwise.
45-
*/
46-
override fun supports(model: OpenAIModel): Boolean {
47-
return models.any { it.value == model.value }
48-
}
49-
5040
/**
5141
* Override function to fetch completions from OpenAI API based on the given ChatCompletionRequest
5242
* @param request the ChatCompletionRequest object containing information needed to generate completions
@@ -85,9 +75,6 @@ class AnthropicOpenAIProvider(
8575
fun OpenAIProvider.Companion.anthropic(
8676
id: String = "anthropic",
8777
config: AnthropicOpenAIProviderConfig,
88-
models: List<OpenAIModel> = AnthropicModel.availableModels.map {
89-
OpenAIModel(it.value)
90-
},
9178
client: Anthropic = Anthropic.create(
9279
AnthropicConfig(
9380
apiKey = config.apiKey,
@@ -99,7 +86,6 @@ fun OpenAIProvider.Companion.anthropic(
9986
return AnthropicOpenAIProvider(
10087
id = id,
10188
config = config,
102-
models = models,
10389
client = client
10490
)
10591
}

openai-gateway/openai-gateway-core/src/commonMain/kotlin/com/tddworks/openai/gateway/api/internal/DefaultOpenAIGateway.kt

Lines changed: 12 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -7,11 +7,11 @@ import com.tddworks.openai.api.OpenAI
77
import com.tddworks.openai.api.chat.api.ChatCompletion
88
import com.tddworks.openai.api.chat.api.ChatCompletionChunk
99
import com.tddworks.openai.api.chat.api.ChatCompletionRequest
10-
import com.tddworks.openai.api.chat.api.OpenAIModel
1110
import com.tddworks.openai.api.images.api.Image
1211
import com.tddworks.openai.api.images.api.ImageCreate
1312
import com.tddworks.openai.api.legacy.completions.api.Completion
1413
import com.tddworks.openai.api.legacy.completions.api.CompletionRequest
14+
import com.tddworks.openai.gateway.api.LLMProvider
1515
import com.tddworks.openai.gateway.api.OpenAIGateway
1616
import com.tddworks.openai.gateway.api.OpenAIProvider
1717
import com.tddworks.openai.gateway.api.OpenAIProviderConfig
@@ -34,8 +34,7 @@ class DefaultOpenAIGateway(
3434
override fun updateProvider(
3535
id: String,
3636
name: String,
37-
config: OpenAIProviderConfig,
38-
models: List<OpenAIModel>
37+
config: OpenAIProviderConfig
3938
) {
4039
availableProviders.removeAll { it.id == id }
4140

@@ -44,28 +43,24 @@ class DefaultOpenAIGateway(
4443
id = id,
4544
name = name,
4645
config = config,
47-
models = models
4846
)
4947

5048
is AnthropicOpenAIProviderConfig -> AnthropicOpenAIProvider(
5149
id = id,
5250
name = name,
5351
config = config,
54-
models = models
5552
)
5653

5754
is OllamaOpenAIProviderConfig -> OllamaOpenAIProvider(
5855
id = id,
5956
name = name,
6057
config = config,
61-
models = models
6258
)
6359

6460
is AzureAIProviderConfig -> DefaultOpenAIProvider(
6561
id = id,
6662
name = name,
6763
config = config,
68-
models = models,
6964
openAI = OpenAI.azure(config)
7065
)
7166

@@ -94,8 +89,10 @@ class DefaultOpenAIGateway(
9489
* @param request The request containing the model for which completions are needed.
9590
* @return A ChatCompletion object containing the completions for the provided request.
9691
*/
97-
override suspend fun chatCompletions(request: ChatCompletionRequest): ChatCompletion {
98-
return availableProviders.firstOrNull { it.supports(request.model) }
92+
override suspend fun chatCompletions(request: ChatCompletionRequest, provider: LLMProvider): ChatCompletion {
93+
return availableProviders.firstOrNull {
94+
it.id.equals(provider.name, true) || it.name.equals(provider.name, true)
95+
}
9996
?.chatCompletions(request)
10097
?: throwNoProviderFound(request.model.value)
10198
}
@@ -107,9 +104,9 @@ class DefaultOpenAIGateway(
107104
* @param request a ChatCompletionRequest object containing the model for which completions are requested
108105
* @return a Flow of ChatCompletionChunk objects representing the completions for the input model
109106
*/
110-
override fun streamChatCompletions(request: ChatCompletionRequest): Flow<ChatCompletionChunk> {
107+
override fun streamChatCompletions(request: ChatCompletionRequest, provider: LLMProvider): Flow<ChatCompletionChunk> {
111108
return availableProviders.firstOrNull {
112-
it.supports(request.model)
109+
it.id.equals(provider.name, true) || it.name.equals(provider.name, true)
113110
}?.streamChatCompletions(request)
114111
?: throwNoProviderFound(request.model.value)
115112
}
@@ -120,16 +117,16 @@ class DefaultOpenAIGateway(
120117
* @param request The request containing the model for which completions are needed.
121118
* @return A Completion object containing the completions for the provided request.
122119
*/
123-
override suspend fun completions(request: CompletionRequest): Completion {
120+
override suspend fun completions(request: CompletionRequest, provider: LLMProvider): Completion {
124121
return availableProviders.firstOrNull {
125-
it.supports(request.model)
122+
it.id.equals(provider.name, true) || it.name.equals(provider.name, true)
126123
}?.completions(request)
127124
?: throwNoProviderFound(request.model.value)
128125
}
129126

130-
override suspend fun generate(request: ImageCreate): ListResponse<Image> {
127+
override suspend fun generate(request: ImageCreate, provider: LLMProvider): ListResponse<Image> {
131128
return availableProviders.firstOrNull {
132-
it.supports(request.model)
129+
it.id.equals(provider.name, true) || it.name.equals(provider.name, true)
133130
}?.generate(request)
134131
?: throwNoProviderFound(request.model.value)
135132
}

openai-gateway/openai-gateway-core/src/commonMain/kotlin/com/tddworks/openai/gateway/api/internal/DefaultOpenAIProvider.kt

Lines changed: 2 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -9,8 +9,6 @@ import com.tddworks.openai.api.OpenAI
99
import com.tddworks.openai.api.chat.api.ChatCompletion
1010
import com.tddworks.openai.api.chat.api.ChatCompletionChunk
1111
import com.tddworks.openai.api.chat.api.ChatCompletionRequest
12-
import com.tddworks.openai.api.chat.api.OpenAIModel
13-
import com.tddworks.openai.api.chat.api.OpenAIModel.Companion.availableModels
1412
import com.tddworks.openai.api.images.api.Image
1513
import com.tddworks.openai.api.images.api.ImageCreate
1614
import com.tddworks.openai.api.legacy.completions.api.Completion
@@ -23,15 +21,10 @@ import kotlinx.serialization.ExperimentalSerializationApi
2321
class DefaultOpenAIProvider(
2422
override val id: String = "openai",
2523
override val name: String = "OpenAI",
26-
override val models: List<OpenAIModel> = availableModels,
2724
override val config: OpenAIProviderConfig,
2825
private val openAI: OpenAI = OpenAI.default(config.toOpenAIConfig()),
2926
) : OpenAIProvider {
3027

31-
override fun supports(model: OpenAIModel): Boolean {
32-
return models.any { it.value == model.value }
33-
}
34-
3528
override suspend fun chatCompletions(request: ChatCompletionRequest): ChatCompletion {
3629
return openAI.chatCompletions(request)
3730
}
@@ -52,23 +45,21 @@ class DefaultOpenAIProvider(
5245
fun OpenAIProvider.Companion.openAI(
5346
id: String = "openai",
5447
config: OpenAIProviderConfig,
55-
models: List<OpenAIModel>,
5648
openAI: OpenAI = OpenAI.default(config.toOpenAIConfig())
5749
): OpenAIProvider {
5850
return DefaultOpenAIProvider(
5951
id = id,
60-
config = config, models = models, openAI = openAI
52+
config = config, openAI = openAI
6153
)
6254
}
6355

6456
fun OpenAIProvider.Companion.azure(
6557
id: String = "azure",
6658
config: OpenAIProviderConfig,
67-
models: List<OpenAIModel>,
6859
openAI: OpenAI = OpenAI.azure(config as AzureAIProviderConfig)
6960
): OpenAIProvider {
7061
return DefaultOpenAIProvider(
7162
id = id,
72-
config = config, models = models, openAI = openAI
63+
config = config, openAI = openAI
7364
)
7465
}

openai-gateway/openai-gateway-core/src/commonMain/kotlin/com/tddworks/openai/gateway/api/internal/GeminiOpenAIProvider.kt

Lines changed: 2 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -20,16 +20,9 @@ class GeminiOpenAIProvider(
2020
override val id: String = "gemini",
2121
override val name: String = "Gemini",
2222
override val config: GeminiOpenAIProviderConfig,
23-
override val models: List<OpenAIModel> = GeminiModel.availableModels.map {
24-
OpenAIModel(it.value)
25-
},
2623
val client: Gemini
2724
) : OpenAIProvider {
2825

29-
override fun supports(model: OpenAIModel): Boolean {
30-
return models.any { it.value == model.value }
31-
}
32-
3326
override suspend fun chatCompletions(request: ChatCompletionRequest): ChatCompletion {
3427
val geminiRequest = request.toGeminiGenerateContentRequest()
3528
return client.generateContent(geminiRequest).toOpenAIChatCompletion()
@@ -53,11 +46,9 @@ class GeminiOpenAIProvider(
5346
}
5447

5548
fun OpenAIProvider.Companion.gemini(
56-
id: String = "gemini", models: List<OpenAIModel> = GeminiModel.availableModels.map {
57-
OpenAIModel(it.value)
58-
}, config: GeminiOpenAIProviderConfig, client: Gemini
49+
id: String = "gemini", config: GeminiOpenAIProviderConfig, client: Gemini
5950
): OpenAIProvider {
6051
return GeminiOpenAIProvider(
61-
id = id, models = models, config = config, client = client
52+
id = id, config = config, client = client
6253
)
6354
}

0 commit comments

Comments
 (0)