Skip to content

Commit c5657d0

Browse files
Merge pull request #548 from VipinDevelops/fix/#546
[FIX]: Handle Error response from AI API
2 parents 592647b + 6c0bd76 commit c5657d0

File tree

2 files changed

+108
-58
lines changed

2 files changed

+108
-58
lines changed

backend/analytics_server/mhq/service/ai/ai_analytics_service.py

Lines changed: 42 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22
import requests
33
from http import HTTPStatus
44
from enum import Enum
5-
from typing import Dict, List
5+
from typing import Dict, List, Union
66

77

88
class AIProvider(Enum):
@@ -44,7 +44,30 @@ def __init__(self, llm: LLM, access_token: str):
4444
def _get_message(self, message: str, role: str = "user"):
4545
return {"role": role, "content": message}
4646

47+
def _handle_api_response(self, response) -> Dict[str, Union[str, int]]:
48+
"""
49+
Handles the API response, returning a success or error structure that the frontend can use.
50+
"""
51+
if response.status_code == HTTPStatus.OK:
52+
return {
53+
"status": "success",
54+
"data": response.json()["choices"][0]["message"]["content"],
55+
}
56+
elif response.status_code == HTTPStatus.UNAUTHORIZED:
57+
return {
58+
"status": "error",
59+
"message": "Unauthorized Access: Your access token is either missing, expired, or invalid. Please ensure that you are providing a valid token. ",
60+
}
61+
else:
62+
return {
63+
"status": "error",
64+
"message": f"Unexpected error: {response.text}",
65+
}
66+
4767
def _open_ai_fetch_completion_open_ai(self, messages: List[Dict[str, str]]):
68+
"""
69+
Handles the request to OpenAI API for fetching completions.
70+
"""
4871
payload = {
4972
"model": self.LLM_NAME_TO_MODEL_MAP[self._llm],
5073
"temperature": 0.6,
@@ -53,13 +76,12 @@ def _open_ai_fetch_completion_open_ai(self, messages: List[Dict[str, str]]):
5376
api_url = "https://api.openai.com/v1/chat/completions"
5477
response = requests.post(api_url, headers=self._headers, json=payload)
5578

56-
print(payload, api_url, response)
57-
if response.status_code != HTTPStatus.OK:
58-
raise Exception(response.json())
59-
60-
return response.json()
79+
return self._handle_api_response(response)
6180

6281
def _fireworks_ai_fetch_completions(self, messages: List[Dict[str, str]]):
82+
"""
83+
Handles the request to Fireworks AI API for fetching completions.
84+
"""
6385
payload = {
6486
"model": self.LLM_NAME_TO_MODEL_MAP[self._llm],
6587
"temperature": 0.6,
@@ -73,28 +95,28 @@ def _fireworks_ai_fetch_completions(self, messages: List[Dict[str, str]]):
7395
api_url = "https://api.fireworks.ai/inference/v1/chat/completions"
7496
response = requests.post(api_url, headers=self._headers, json=payload)
7597

76-
if response.status_code != HTTPStatus.OK:
77-
raise Exception(response.json())
78-
79-
return response.json()
80-
81-
def _fetch_completion(self, messages: List[Dict[str, str]]):
98+
return self._handle_api_response(response)
8299

100+
def _fetch_completion(
101+
self, messages: List[Dict[str, str]]
102+
) -> Dict[str, Union[str, int]]:
103+
"""
104+
Fetches the completion using the appropriate AI provider based on the LLM.
105+
"""
83106
if self._ai_provider == AIProvider.FIREWORKS_AI:
84-
return self._fireworks_ai_fetch_completions(messages)["choices"][0][
85-
"message"
86-
]["content"]
107+
return self._fireworks_ai_fetch_completions(messages)
87108

88109
if self._ai_provider == AIProvider.OPEN_AI:
89-
return self._open_ai_fetch_completion_open_ai(messages)["choices"][0][
90-
"message"
91-
]["content"]
110+
return self._open_ai_fetch_completion_open_ai(messages)
92111

93-
raise Exception(f"Invalid AI provider {self._ai_provider}")
112+
return {
113+
"status": "error",
114+
"message": f"Invalid AI provider {self._ai_provider}",
115+
}
94116

95117
def get_dora_metrics_score(
96118
self, four_keys_data: Dict[str, float]
97-
) -> Dict[str, str]:
119+
) -> Dict[str, Union[str, int]]:
98120
"""
99121
Calculate the DORA metrics score using input data and an LLM (Language Learning Model).
100122

web-server/pages/api/internal/ai/dora_metrics.ts

Lines changed: 66 additions & 38 deletions
Original file line numberDiff line numberDiff line change
@@ -64,50 +64,78 @@ const postSchema = yup.object().shape({
6464
});
6565

6666
const endpoint = new Endpoint(nullSchema);
67-
6867
endpoint.handle.POST(postSchema, async (req, res) => {
6968
const { data, model, access_token } = req.payload;
69+
const dora_data = data as TeamDoraMetricsApiResponseType;
70+
71+
try {
72+
const [
73+
doraMetricsScore,
74+
leadTimeSummary,
75+
CFRSummary,
76+
MTTRSummary,
77+
deploymentFrequencySummary,
78+
doraTrendSummary
79+
] = await Promise.all(
80+
[
81+
getDoraMetricsScore,
82+
getLeadTimeSummary,
83+
getCFRSummary,
84+
getMTTRSummary,
85+
getDeploymentFrequencySummary,
86+
getDoraTrendsCorrelationSummary
87+
].map((fn) => fn(dora_data, model, access_token))
88+
);
89+
90+
const aggregatedData = {
91+
...doraMetricsScore,
92+
...leadTimeSummary,
93+
...CFRSummary,
94+
...MTTRSummary,
95+
...deploymentFrequencySummary,
96+
...doraTrendSummary
97+
};
7098

71-
const dora_data = data as unknown as TeamDoraMetricsApiResponseType;
72-
73-
const [
74-
dora_metrics_score,
75-
lead_time_trends_summary,
76-
change_failure_rate_trends_summary,
77-
mean_time_to_recovery_trends_summary,
78-
deployment_frequency_trends_summary,
79-
dora_trend_summary
80-
] = await Promise.all(
81-
[
82-
getDoraMetricsScore,
83-
getLeadTimeSummary,
84-
getCFRSummary,
85-
getMTTRSummary,
86-
getDeploymentFrequencySummary,
87-
getDoraTrendsCorrelationSummary
88-
].map((f) => f(dora_data, model, access_token))
89-
);
99+
const compiledSummary = await getDORACompiledSummary(
100+
aggregatedData,
101+
model,
102+
access_token
103+
);
90104

91-
const aggregated_dora_data = {
92-
...dora_metrics_score,
93-
...lead_time_trends_summary,
94-
...change_failure_rate_trends_summary,
95-
...mean_time_to_recovery_trends_summary,
96-
...deployment_frequency_trends_summary,
97-
...dora_trend_summary
98-
} as AggregatedDORAData;
99-
100-
const dora_compiled_summary = await getDORACompiledSummary(
101-
aggregated_dora_data,
102-
model,
103-
access_token
104-
);
105+
const responses = {
106+
...aggregatedData,
107+
...compiledSummary
108+
};
105109

106-
res.send({
107-
...aggregated_dora_data,
108-
...dora_compiled_summary
109-
});
110+
const { status, message } = checkForErrors(responses);
111+
112+
if (status === 'error') {
113+
return res.status(400).send({ message });
114+
}
115+
116+
const simplifiedData = Object.fromEntries(
117+
Object.entries(responses).map(([key, value]) => [key, value.data])
118+
);
119+
120+
return res.status(200).send(simplifiedData);
121+
} catch (error) {
122+
return res.status(500).send({
123+
message: 'Internal Server Error',
124+
error: error.message
125+
});
126+
}
110127
});
128+
const checkForErrors = (
129+
responses: Record<string, { status: string; message: string }>
130+
): { status: string; message: string } => {
131+
const errorResponse = Object.values(responses).find(
132+
(value) => value.status === 'error'
133+
);
134+
135+
return errorResponse
136+
? { status: 'error', message: errorResponse.message }
137+
: { status: 'success', message: '' };
138+
};
111139

112140
const getDoraMetricsScore = (
113141
dora_data: TeamDoraMetricsApiResponseType,

0 commit comments

Comments
 (0)