|
JY-20543 add AJ reports User pilot tracking by Lak JY-20543 add AJ reports User pilot tracking by LakyLak · Pull Request #11932 · jiminny/app
Platform Sprint 1 Q2 - Platform Team - Scrum Board - Jira
Platform Sprint 1 Q2 - Platform Team - Scrum Board - Jira
Jy 19798 evaluation for ai activity types by nikolaybiaivanov · Pull Request #468 · jiminny/prophet
Jy 19798 evaluation for ai activity types by nikolaybiaivanov · Pull Request #468 · jiminny/prophet
Jiminny
Jiminny
Ask Jiminny test report - 8 Apr 2026 - Ask Jiminny test report - 13 Apr 2026.pdf
Ask Jiminny test report - 8 Apr 2026 - Ask Jiminny test report - 13 Apr 2026.pdf
Close tab
Inbox (1,540) - [EMAIL] - Jiminny Mail
Inbox (1,540) - [EMAIL] - Jiminny Mail
New Tab
Customize sidebar
Close Google Gemini (⌃X)
Tabs from other devices
Open history (⇧⌘H)
Open bookmarks (⌘B)
AI Chat settings
Close
WORK, Google Account: [EMAIL]
Main menu
New Chat
Open menu for conversation actions.
Conversation with Gemini
Conversation with Gemini
Copy prompt
You said I’m on page “<tabTitle>Jy 19798 evaluation for ai activity types by nikol</tabTitle>” with “<selection>@@ -0,0 +1,330 @@1+#!/usr/bin/env python32+import asyncio3+import json4+import os5+from pathlib import Path6+from typing import Any, Dict, List7+8+import aiohttp9+10+from src.component.call_processing.schemes.ai_activity_type_request import AiActivityTypeRequest11+from src.component.call_processing.schemes.ai_activity_type_response import AiActivityTypeResponse12+from src.eval.abstract_evaluator import AbstractEvaluator13+from src.eval.collectors import collect_test_set as es_collect_test_set14+from src.eval.common_evaluator_runner import create_parser, run_evaluation15+from src.eval.endpoints import run_endpoint as run_endpoint_http16+from src.eval.models import EvaluationResult17+18+19+class AiActivityTypeEvaluator(AbstractEvaluator):20+ENDPOINT_PATH = "call/ai-activity-type"21+REQUEST_MODEL = AiActivityTypeRequest22+RESPONSE_MODEL = AiActivityTypeResponse23+OUTPUT_SUBFOLDER = "ai_activity_type"24+USES_LLM_EVALUATION = False25+26+READS_FROM_DATASET_FOLDER = True27+28+DATA_SET: str = "initial"29+30+MINIMUM_REQUEST_BODY_SIZE: int = 031+32+SKIP_UNREPLAYABLE_ON_COLLECT: bool = True33+REPLAYABILITY_CHECK_TIMEOUT_SECONDS: float = 20.034+35+POPULATE_GROUND_TRUTH_LLM: bool = False36+GROUND_TRUTH_LLM_ENDPOINT: str = "OPENAI:gpt-5_2"37+38+COMPARE_TO_GROUND_TRUTH: str = "es"39+40+def __init__(self, **kwargs: Any) -> None:41+super().__init__(**kwargs)42+_repo_root = Path(__file__).parents[2]43+_datasets_root = _repo_root / "eval_output" / self.OUTPUT_SUBFOLDER / "eval_datasets"44+_dataset_dir = _datasets_root / self.__class__.DATA_SET45+self._dataset_dir = _dataset_dir46+self._inputs_dir = _dataset_dir / "input"47+self._ground_truths_dir = _dataset_dir / "ground_truth"48+49+async def collect_test_set(self) -> List[Dict[str, Any]]:50+if self.es_host_name:51+return await self._collect_from_es()52+return await self._collect_from_files()53+54+async def _collect_from_es(self) -> List[Dict[str, Any]]:55+if self._inputs_dir.exists() or self._ground_truths_dir.exists():56+raise FileExistsError(57+f"Dataset folder already exists: {self._inputs_dir.parent}. "58+"Choose a different --data-set name or remove the folder manually."59+ )60+61+raw_samples = await es_collect_test_set(62+es_host_name=self.es_host_name,63+es_port=self.es_port,64+endpoint_path=self.ENDPOINT_PATH,65+limit=self.limit,66+language_filter=self.language_filter,67+min_prompt_length=self.min_prompt_length,68+is_json_response=self.IS_JSON_RESPONSE,69+exclude_languages=self.exclude_languages,70+ )71+72+self._inputs_dir.mkdir(parents=True, exist_ok=True)73+self._ground_truths_dir.mkdir(parents=True, exist_ok=True)74+75+min_size = self.MINIMUM_REQUEST_BODY_SIZE76+samples: List[Dict[str, Any]] = []77+skipped_no_response = 078+skipped_too_small = 079+skipped_unreplayable = 080+skipped_probe_errors = 081+82+endpoint_url = f"{self.api_host}/{self.ENDPOINT_PATH}"83+84+timeout = aiohttp.ClientTimeout(total=self.REPLAYABILITY_CHECK_TIMEOUT_SECONDS)85+async with aiohttp.ClientSession(timeout=timeout) as session:86+for sample in raw_samples:87+es_id = sample["id"]88+request_body = sample["request_body"]89+original_response = sample.get("original_response")90+91+if not original_response:92+skipped_no_response += 193+continue94+95+if min_size and len(json.dumps(request_body)) < min_size:96+skipped_too_small += 197+continue98+99+if self.SKIP_UNREPLAYABLE_ON_COLLECT:100+try:101+async with session.post(endpoint_url, json=request_body) as resp:102+if resp.status != 200:103+body_text = await resp.text()104+if "Cannot find activity" in body_text:105+skipped_unreplayable += 1106+continue107+except Exception:108+skipped_probe_errors += 1109+110+with open(self._inputs_dir / f"{es_id}.json", "w") as f:111+json.dump(request_body, f, indent=2)112+113+with open(self._ground_truths_dir / f"gt_{es_id}.json", "w") as f:114+json.dump(original_response, f, indent=2)115+116+samples.append(117+ {118+"id": es_id,119+"request_body": request_body,120+"original_response": original_response,121+"ground_truth": original_response,122+ }123+ )124+125+print(f"Wrote {len(samples)} samples to {self._inputs_dir.parent}")126+if skipped_no_response:127+print(f"Skipped {skipped_no_response} records with no response body")128+if skipped_too_small:129+print(130+f"Skipped {skipped_too_small} records whose request body was smaller "131+f"than {min_size} characters"132+ )133+if skipped_unreplayable:134+print(f"Skipped {skipped_unreplayable} records that are not replayable (missing activity in ES)")135+if skipped_probe_errors:136+print(f"Warning: replayability probe errored for {skipped_probe_errors} records (kept those records)")137+138+if self.__class__.POPULATE_GROUND_TRUTH_LLM:139+label_results = await run_endpoint_http(140+test_set=samples,141+endpoint_url=endpoint_url,142+request_model=self.REQUEST_MODEL,143+force_llm_endpoint=self.__class__.GROUND_TRUTH_LLM_ENDPOINT,144+is_streaming_response=self.IS_STREAMING_RESPONSE,145+timeout_seconds=300.0,146+parallel_requests=self.parallel_requests,147+return_prompt_in_run=self.RETURN_PROMPT_IN_RUN,148+ )149+150+for i, r in enumerate(label_results):151+forced = self.__class__.GROUND_TRUTH_LLM_ENDPOINT152+model = forced.split(":", 1)[1] if ":" in forced else forced153+samples[i]["ground_truth_llm_model"] = model154+155+if not r.get("success", True):156+samples[i]["ground_truth_llm"] = None157+samples[i]["ground_truth_llm_reasoning"] = None158+samples[i]["ground_truth_llm_error_type"] = r.get("error_type")159+samples[i]["ground_truth_llm_error_message"] = r.get("error_message")160+continue161+162+resp = r.get("new_response") or {}163+samples[i]["ground_truth_llm"] = resp.get("ai_activity_type")164+samples[i]["ground_truth_llm_reasoning"] = resp.get("ai_activity_type_reasoning")165+166+return samples167+168+async def _collect_from_files(self) -> List[Dict[str, Any]]:169+if not self._inputs_dir.exists():170+raise FileNotFoundError(171+f"Dataset folder not found: {self._inputs_dir}. "172+"Run --steps collect first or check your --data-set name."173+ )174+175+samples: List[Dict[str, Any]] = []176+for filename in sorted(os.listdir(self._inputs_dir)):177+input_path = self._inputs_dir / filename178+gt_path = self._ground_truths_dir / f"gt_{filename}"179+180+if not gt_path.exists():181+print(f"Warning: ground truth not found for {filename}, skipping")182+continue183+184+with open(input_path, "r") as f:185+request_body: Dict[str, Any] = json.load(f)186+187+with open(gt_path, "r") as f:188+ground_truth: Dict[str, Any] = json.load(f)189+190+samples.append(191+ {192+"id": Path(filename).stem,193+"request_body": request_body,194+"original_response": None,195+"ground_truth": ground_truth,196+ }197+ )198+199+print(f"Loaded {len(samples)} samples from {self._inputs_dir.parent}")200+return samples201+202+async def evaluate_against_ground_truth(self, results: List[Dict[str, Any]]) -> List[EvaluationResult]:203+evaluation_results: List[EvaluationResult] = []204+compare_to = self.__class__.COMPARE_TO_GROUND_TRUTH205+206+for res in results:207+req_id = res["request_id"]208+is_success = res.get("success", True)209+210+if not is_success:211+evaluation_results.append(212+EvaluationResult(213+request_id=req_id,214+request_body=res["request_body"],215+new_response=res.get("new_response"),216+valid_schema=False,217+quality_score=0.0,218+quality_feedback=f"Request failed: {res.get('error_type')} - {res.get('error_message')}",219+response_time=res.get("response_time"),220+success=False,221+error_type=res.get("error_type"),222+error_message=res.get("error_message"),223+ground_truth_match=False,224+ )225+ )226+continue227+228+pred = (res.get("new_response") or {}).get("ai_activity_type")229+230+if compare_to == "llm":231+if "ground_truth_llm" not in res:232+evaluation_results.append(233+EvaluationResult(234+request_id=req_id,235+request_body=res["request_body"],236+new_response=res.get("new_response"),237+valid_schema=res.get("valid_schema", True),238+quality_score=0.0,239+quality_feedback=(240+"Missing ground_truth_llm in run results. Ensure collect step populated it and "241+"endpoints.py passes it through."242+ ),243+response_time=res.get("response_time"),244+success=True,245+ground_truth_match=False,246+ )247+ )248+continue249+gt_label = res.get("ground_truth_llm")250+else:251+gt_path = self._ground_truths_dir / f"gt_{req_id}.json"252+if not gt_path.exists():253+evaluation_results.append(254+EvaluationResult(255+request_id=req_id,256+request_body=res["request_body"],257+new_response=res.get("new_response"),258+valid_schema=res.get("valid_schema", True),259+quality_score=0.0,260+quality_feedback=f"Ground truth file not found: {gt_path}",261+response_time=res.get("response_time"),262+success=True,263+ground_truth_match=False,264+ )265+ )266+continue267+268+with open(gt_path, "r") as f:269+ground_truth_dict: Dict[str, Any] = json.load(f)270+271+gt_label = (ground_truth_dict or {}).get("ai_activity_type")272+273+matched = pred == gt_label274+275+evaluation_results.append(276+EvaluationResult(277+request_id=req_id,278+request_body=res["request_body"],279+new_response=res.get("new_response"),280+valid_schema=res.get("valid_schema", True),281+quality_score=1.0 if matched else 0.0,282+quality_feedback="PASS" if matched else f"FAIL\nExpected: {gt_label}\nGot: {pred}",283+response_time=res.get("response_time"),284+success=True,285+ground_truth_match=matched,286+ )287+ )288+289+return evaluation_results290+291+292+if __name__ == "__main__":293+parser = create_parser(AiActivityTypeEvaluator)294+parser.add_argument(295+"--data-set",296+type=str,297+default="initial",298+dest="data_set",299+help="Dataset folder under eval_output/ai_activity_type/eval_datasets/ (default: initial)",300+ )301+302+parser.add_argument(303+"--populate-ground-truth-llm",304+action="store_true",305+dest="populate_ground_truth_llm",306+ )307+parser.add_argument(308+"--ground-truth-llm-endpoint",309+type=str,310+default="OPENAI:gpt-5_2",311+dest="ground_truth_llm_endpoint",312+ )313+314+parser.add_argument(315+"--compare-to-ground-truth",316+type=str,317+choices=["es", "llm"],318+default="es",319+dest="compare_to_ground_truth",320+ )321+322+args = parser.parse_args()323+AiActivityTypeEvaluator.DATA_SET = args.data_set324+325+AiActivityTypeEvaluator.POPULATE_GROUND_TRUTH_LLM = bool(args.populate_ground_truth_llm)326+AiActivityTypeEvaluator.GROUND_TRUTH_LLM_ENDPOINT = args.ground_truth_llm_endpoint327+328+AiActivityTypeEvaluator.COMPARE_TO_GROUND_TRUTH = args.compare_to_ground_truth329+330+asyncio.run(run_evaluation(AiActivityTypeEvaluator, args))</selection>” selected. Please summarize the selection using precise and concise language. Use headers and bulleted lists in the summary, to make it scannable. Maintain the meaning and factual accuracy.
You said
I’m on page “<tabTitle>Jy 19798 evaluation for ai activity types by nikol</tabTitle>” with “<selection>@@ -0,0 +1,330 @@1+#!/usr/bin/env python32+import asyncio3+import json4+import os5+from pathlib import Path6+from typing import Any, Dict, List7+8+import aiohttp9+10+from src.component.call_processing.schemes.ai_activity_type_request import AiActivityTypeRequest11+from src.component.call_processing.schemes.ai_activity_type_response import AiActivityTypeResponse12+from src.eval.abstract_evaluator import AbstractEvaluator13+from src.eval.collectors import collect_test_set as es_collect_test_set14+from src.eval.common_evaluator_runner import create_parser, run_evaluation15+from src.eval.endpoints import run_endpoint as run_endpoint_http16+from src.eval.models import EvaluationResult17+18+19+class AiActivityTypeEvaluator(AbstractEvaluator):20+ENDPOINT_PATH = "call/ai-activity-type"21+REQUEST_MODEL = AiActivityTypeRequest22+RESPONSE_MODEL = AiActivityTypeResponse23+OUTPUT_SUBFOLDER = "ai_activity_type"24+USES_LLM_EVALUATION = False25+26+READS_FROM_DATASET_FOLDER = True27+28+DATA_SET: str = "initial"29+30+MINIMUM_REQUEST_BODY_SIZE: int = 031+32+SKIP_UNREPLAYABLE_ON_COLLECT: bool = True33+REPLAYABILITY_CHECK_TIMEOUT_SECONDS: float = 20.034+35+POPULATE_GROUND_TRUTH_LLM: bool = False36+GROUND_TRUTH_LLM_ENDPOINT: str = "OPENAI:gpt-5_2"37+38+COMPARE_TO_GROUND_TRUTH: str = "es"39+40+def __init__(self, **kwargs: Any) -> None:41+super().__init__(**kwargs)42+_repo_root = Path(__file__).parents[2]43+_datasets_root = _repo_root / "eval_output" / self.OUTPUT_SUBFOLDER / "eval_datasets"44+_dataset_dir = _datasets_root / self.__class__.DATA_SET45+self._dataset_dir = _dataset_dir46+self._inputs_dir = _dataset_dir / "input"47+self._ground_truths_dir = _dataset_dir / "ground_truth"48+49+async def collect_test_set(self) -> List[Dict[str, Any]]:50+if self.es_host_name:51+return await self._collect_from_es()52+return await self._collect_from_files()53+54+async def _collect_from_es(self) -> List[Dict[str, Any]]:55+if self._inputs_dir.exists() or self._ground_truths_dir.exists():56+raise FileExistsError(57+f"Dataset folder already exists: {self._inputs_dir.parent}. "58+"Choose a different --data-set name or remove the folder manually."59+ )60+61+raw_samples = await es_collect_test_set(62+es_host_name=self.es_host_name,63+es_port=self.es_port,64+endpoint_path=self.ENDPOINT_PATH,65+limit=self.limit,66+language_filter=self.language_filter,67+min_prompt_length=self.min_prompt_length,68+is_json_response=self.IS_JSON_RESPONSE,69+exclude_languages=self.exclude_languages,70+ )71+72+self._inputs_dir.mkdir(parents=True, exist_ok=True)73+self._ground_truths_dir.mkdir(parents=True, exist_ok=True)74+75+min_size = self.MINIMUM_REQUEST_BODY_SIZE76+samples: List[Dict[str, Any]] = []77+skipped_no_response = 078+skipped_too_small = 079+skipped_unreplayable = 080+skipped_probe_errors = 081+82+endpoint_url = f"{self.api_host}/{self.ENDPOINT_PATH}"83+84+timeout = aiohttp.ClientTimeout(total=self.REPLAYABILITY_CHECK_TIMEOUT_SECONDS)85+async with aiohttp.ClientSession(timeout=timeout) as session:86+for sample in raw_samples:87+es_id = sample["id"]88+request_body = sample["request_body"]89+original_response = sample.get("original_response")90+91+if not original_response:92+skipped_no_response += 193+continue94+95+if min_size and len(json.dumps(request_body)) < min_size:96+skipped_too_small += 197+continue98+99+if self.SKIP_UNREPLAYABLE_ON_COLLECT:100+try:101+async with session.post(endpoint_url, json=request_body) as resp:102+if resp.status != 200:103+body_text = await resp.text()104+if "Cannot find activity" in body_text:105+skipped_unreplayable += 1106+continue107+except Exception:108+skipped_probe_errors += 1109+110+with open(self._inputs_dir / f"{es_id}.json", "w") as f:111+json.dump(request_body, f, indent=2)112+113+with open(self._ground_truths_dir / f"gt_{es_id}.json", "w") as f:114+json.dump(original_response, f, indent=2)115+116+samples.append(117+ {118+"id": es_id,119+"request_body": request_body,120+"original_response": original_response,121+"ground_truth": original_response,122+ }123+ )124+125+print(f"Wrote {len(samples)} samples to {self._inputs_dir.parent}")126+if skipped_no_response:127+print(f"Skipped {skipped_no_response} records with no response body")128+if skipped_too_small:129+print(130+f"Skipped {skipped_too_small} records whose request body was smaller "131+f"than {min_size} characters"132+ )133+if skipped_unreplayable:134+print(f"Skipped {skipped_unreplayable} records that are not replayable (missing activity in ES)")135+if skipped_probe_errors:136+print(f"Warning: replayability probe errored for {skipped_probe_errors} records (kept those records)")137+138+if self.__class__.POPULATE_GROUND_TRUTH_LLM:139+label_results = await run_endpoint_http(140+test_set=samples,141+endpoint_url=endpoint_url,142+request_model=self.REQUEST_MODEL,143+force_llm_endpoint=self.__class__.GROUND_TRUTH_LLM_ENDPOINT,144+is_streaming_response=self.IS_STREAMING_RESPONSE,145+timeout_seconds=300.0,146+parallel_requests=self.parallel_requests,147+return_prompt_in_run=self.RETURN_PROMPT_IN_RUN,148+ )149+150+for i, r in enumerate(label_results):151+forced = self.__class__.GROUND_TRUTH_LLM_ENDPOINT152+model = forced.split(":", 1)[1] if ":" in forced else forced153+samples[i]["ground_truth_llm_model"] = model154+155+if not r.get("success", True):156+samples[i]["ground_truth_llm"] = None157+samples[i]["ground_truth_llm_reasoning"] = None158+samples[i]["ground_truth_llm_error_type"] = r.get("error_type")159+samples[i]["ground_truth_llm_error_message"] = r.get("error_message")160+continue161+162+resp = r.get("new_response") or {}163+samples[i]["ground_truth_llm"] = resp.get("ai_activity_type")164+samples[i]["ground_truth_llm_reasoning"] = resp.get("ai_activity_type_reasoning")165+166+return samples167+168+async def _collect_from_files(self) -> List[Dict[str, Any]]:169+if not self._inputs_dir.exists():170+raise FileNotFoundError(171+f"Dataset folder not found: {self._inputs_dir}. "172+"Run --steps collect first or check your --data-set name."173+ )174+175+samples: List[Dict[str, Any]] = []176+for filename in sorted(os.listdir(self._inputs_dir)):177+input_path = self._inputs_dir / filename178+gt_path = self._ground_truths_dir / f"gt_{filename}"179+180+if not gt_path.exists():181+print(f"Warning: ground truth not found for {filename}, skipping")182+continue183+184+with open(input_path, "r") as f:185+request_body: Dict[str, Any] = json.load(f)186+187+with open(gt_path, "r") as f:188+ground_truth: Dict[str, Any] = json.load(f)189+190+samples.append(191+ {192+"id": Path(filename).stem,193+"request_body": request_body,194+"original_response": None,195+"ground_truth": ground_truth,196+ }197+ )198+199+print(f"Loaded {len(samples)} samples from {self._inputs_dir.parent}")200+return samples201+202+async def evaluate_against_ground_truth(self, results: List[Dict[str, Any]]) -> List[EvaluationResult]:203+evaluation_results: List[EvaluationResult] = []204+compare_to = self.__class__.COMPARE_TO_GROUND_TRUTH205+206+for res in results:207+req_id = res["request_id"]208+is_success = res.get("success", True)209+210+if not is_success:211+evaluation_results.append(212+EvaluationResult(213+request_id=req_id,214+request_body=res["request_body"],215+new_response=res.get("new_response"),216+valid_schema=False,217+quality_score=0.0,218+quality_feedback=f"Request failed: {res.get('error_type')} - {res.get('error_message')}",219+response_time=res.get("response_time"),220+success=False,221+error_type=res.get("error_type"),222+error_message=res.get("error_message"),223+ground_truth_match=False,224+ )225+ )226+continue227+228+pred = (res.get("new_response") or {}).get("ai_activity_type")229+230+if compare_to == "llm":231+if "ground_truth_llm" not in res:232+evaluation_results.append(233+EvaluationResult(234+request_id=req_id,235+request_body=res["request_body"],236+new_response=res.get("new_response"),237+valid_schema=res.get("valid_schema", True),238+quality_score=0.0,239+quality_feedback=(240+"Missing ground_truth_llm in run results. Ensure collect step populated it and "241+"endpoints.py passes it through."242+ ),243+response_time=res.get("response_time"),244+success=True,245+ground_truth_match=False,246+ )247+ )248+continue249+gt_label = res.get("ground_truth_llm")250+else:251+gt_path = self._ground_truths_dir / f"gt_{req_id}.json"252+if not gt_path.exists():253+evaluation_results.append(254+EvaluationResult(255+request_id=req_id,256+request_body=res["request_body"],257+new_response=res.get("new_response"),258+valid_schema=res.get("valid_schema", True),259+quality_score=0.0,260+quality_feedback=f"Ground truth file not found: {gt_path}",261+response_time=res.get("response_time"),262+success=True,263+ground_truth_match=False,264+ )265+ )266+continue267+268+with open(gt_path, "r") as f:269+ground_truth_dict: Dict[str, Any] = json.load(f)270+271+gt_label = (ground_truth_dict or {}).get("ai_activity_type")272+273+matched = pred == gt_label274+275+evaluation_results.append(276+EvaluationResult(277+request_id=req_id,278+request_body=res["request_body"],279+new_response=res.get("new_response"),280+valid_schema=res.get("valid_schema", True),281+quality_score=1.0 if matched else 0.0,282+quality_feedback="PASS" if matched else f"FAIL\nExpected: {gt_label}\nGot: {pred}",283+response_time=res.get("response_time"),284+success=True,285+ground_truth_match=matched,286+ )287+ )288+289+return evaluation_results290+291+292+if __name__ == "__main__":293+parser = create_parser(AiActivityTypeEvaluator)294+parser.add_argument(295+"--data-set",296+type=str,297+default="initial",298+dest="data_set",299+help="Dataset folder under eval_output/ai_activity_type/eval_datasets/ (default: initial)",300+ )301+302+parser.add_argument(303+"--populate-ground-truth-llm",304+action="store_true",305+dest="populate_ground_truth_llm",306+ )307+parser.add_argument(308+"--ground-truth-llm-endpoint",309+type=str,310+default="OPENAI:gpt-5_2",311+dest="ground_truth_llm_endpoint",312+ )313+314+parser.add_argument(315+"--compare-to-ground-truth",316+type=str,317+choices=["es", "llm"],318+default="es",319+dest="compare_to_ground_truth",320+ )321+322+args = parser.parse_args()323+AiActivityTypeEvaluator.DATA_SET = args.data_set324+325+AiActivityTypeEvaluator.POPULATE_GROUND_TRUTH_LLM = bool(args.populate_ground_truth_llm)326+AiActivityTypeEvaluator.GROUND_TRUTH_LLM_ENDPOINT = args.ground_truth_llm_endpoint327+328+AiActivityTypeEvaluator.COMPARE_TO_GROUND_TRUTH = args.compare_to_ground_truth329+330+asyncio.run(run_evaluation(AiActivityTypeEvaluator, args))</selection>” selected.
Please summarize the selection using precise and concise language. Use headers and bulleted lists in the summary, to make it scannable. Maintain the meaning and factual accuracy.
Expand
Listen
Show more options
Gemini said
Gemini said
This Python script defines the
AiActivityTypeEvaluator
class, a specialized tool for evaluating AI-driven activity type classification. It extends a base evaluation framework to handle data collection, ground-truth comparison, and performance scoring.
Core Functionality
Core Functionality
Data Collection (
collect_test_set
):
* Supports fetching raw samples from
Elasticsearch (ES)
or loading them from
local JSON files
.
Implements a
replayability check
: It probes the live API endpoint to ensure the activity exists before including it in the dataset.
Filters data based on request body size, language, and the presence of an original response.
Ground Truth Management:
Saves inputs and "gold standard" responses to a local directory structure (
eval_output/ai_activity_type/...
).
Features an optional mode to populate ground truth using a high-order LLM (e.g.,
gpt-5_2
) if the
POPULATE_GROUND_TRUTH_LLM
flag is set.
Evaluation Logic (
evaluate_against_ground_truth
):
Compares the model's predicted
ai_activity_type
against either the original ES response or the LLM-generated ground truth.
Assigns a
quality_score
of
1.0
for a match and
0.0
for a mismatch or request failure.
Key Configuration Constants
Key Configuration Constants
Constant
Purpose
ENDPOINT_PATH
The specific API route being tested:
call/ai-activity-type
.
COMPARE_TO_GROUND_TRUTH
Determines if the baseline is the original data (
es
) or an
llm
.
SKIP_UNREPLAYABLE_ON_COLLECT
If
True
, discards records where the API returns "Cannot find activity."
REPLAYABILITY_CHECK_TIMEOUT_SECONDS
Limits the probe request to 20 seconds.
Constant
ENDPOINT_PATH
COMPARE_TO_GROUND_TRUTH
SKIP_UNREPLAYABLE_ON_COLLECT
REPLAYABILITY_CHECK_TIMEOUT_SECONDS
Purpose
The specific API route being tested:
call/ai-activity-type
.
Determines if the baseline is the original data (
es
) or an
llm
.
If
True
, discards records where the API returns "Cannot find activity."
Limits the probe request to 20 seconds.
Copy table
Execution Workflow
Execution Workflow
Parsing:
The script uses
create_parser
to handle CLI arguments like
--data-set
,
--populate-ground-truth-llm
, and
--compare-to-ground-truth
.
Initialization:
Sets up directory paths for inputs and ground truths based on the selected dataset name.
Run:
Executes the evaluation via
asyncio.run(run_evaluation(...))
, which orchestrates the collection, endpoint calling, and result scoring phases.
Copy prompt
Edit
You said are there soem issues with this PR
You said
are there soem issues with this PR
Listen
Show more options
Show thinking
Gemini said
Gemini said
Yes, there are several issues and code smells in this Pull Request, ranging from performance bottlenecks to architectural anti-patterns.
Here is a breakdown of the most notable issues, categorized by impact:
Performance & Async Anti-Patterns
Performance & Async Anti-Patterns
Sequential Requests Defeating Async (Lines 86-106):
Inside
_collect_from_es
, the code loops through
raw_samples
and
await
s a
session.post
request for each sample one by one. This means the script waits for one HTTP request to finish before starting the next, completely defeating the purpose of using
aiohttp
for asynchronous concurrency.
Fix:
Collect these coroutines into a list and use
asyncio.gather()
(ideally wrapped in an
asyncio.Semaphore
to avoid overwhelming the endpoint) to run the replayability probes concurrently.
Blocking File I/O in Async Functions (Lines 110-114, 184-188, 268-269):
Standard synchronous file operations (
with open(...)
,
json.dump
,
json.load
) are used inside
async def
functions. In Python, this blocks the entire event loop while the disk is reading/writing.
Fix:
Use an asynchronous file I/O library like
aiofiles
, or run the blocking I/O operations in a separate thread using
asyncio.to_thread()
.
Architecture & State Management
Architecture & State Management
Mutating Class-Level Variables (Lines 323-328):
The script parses CLI arguments and directly mutates the class attributes (e.g.,
AiActivityTypeEvaluator.DATA_SET = args.data_set
). This is a major anti-pattern. If you ever try to run tests in parallel or create multiple instances of this evaluator in a single process, the state will bleed across them.
Fix:
Pass these arguments into the
__init__
constructor of the instance (
self.data_set = args.data_set
) rather than modifying the class definition globally.
Code Quality & Edge Cases
Code Quality & Edge Cases
Broad Exception Catching (Line 107):
Using a bare
except Exception:
during the replayability probe is dangerous. It will catch
aiohttp.ClientError
(which is intended), but it will also silently swallow
TimeoutError
or even memory errors, making debugging incredibly difficult if the endpoint goes down.
Fix:
Scope the exception to specific expected errors, like
except (aiohttp.ClientError, asyncio.TimeoutError):
.
Inefficient Size Calculation (Line 95):
len(json.dumps(request_body)) < min_size
requires serializing the entire dictionary to a string
just
to check its length. Since
min_size
defaults to
0
, the
if min_size
short-circuit saves it for now, but if anyone sets a minimum size, this will become an unnecessary CPU bottleneck on large datasets.
Potential Path Traversal Risk (Lines 110, 113, 251):...
|
Firefox
|
Ask Jiminny test report - 8 Apr 2026 - Ask Jiminny Ask Jiminny test report - 8 Apr 2026 - Ask Jiminny test report - 13 Apr 2026.pdf — Work...
|
app.staging.jiminny.com/ai-reports/pdf/e2f4890a-d0 app.staging.jiminny.com/ai-reports/pdf/e2f4890a-d097-495b-8dfd-d4c25a5e8843...
|
8648
|
|
JY-20543 add AJ reports User pilot tracking by Lak JY-20543 add AJ reports User pilot tracking by LakyLak · Pull Request #11932 · jiminny/app
Platform Sprint 1 Q2 - Platform Team - Scrum Board - Jira
Platform Sprint 1 Q2 - Platform Team - Scrum Board - Jira
Jy 19798 evaluation for ai activity types by nikolaybiaivanov · Pull Request #468 · jiminny/prophet
Jy 19798 evaluation for ai activity types by nikolaybiaivanov · Pull Request #468 · jiminny/prophet
Jiminny
Jiminny
Ask Jiminny test report - 8 Apr 2026 - Ask Jiminny test report - 13 Apr 2026.pdf
Ask Jiminny test report - 8 Apr 2026 - Ask Jiminny test report - 13 Apr 2026.pdf
Close tab
Inbox (1,540) - [EMAIL] - Jiminny Mail
Inbox (1,540) - [EMAIL] - Jiminny Mail
New Tab
Customize sidebar
Close Google Gemini (⌃X)
Tabs from other devices
Open history (⇧⌘H)
Open bookmarks (⌘B)
AI Chat settings
Close
WORK, Google Account: [EMAIL]
Main menu
New Chat
Open menu for conversation actions.
Conversation with Gemini
Conversation with Gemini
Copy prompt
You said I’m on page “<tabTitle>Jy 19798 evaluation for ai activity types by nikol</tabTitle>” with “<selection>@@ -0,0 +1,330 @@1+#!/usr/bin/env python32+import asyncio3+import json4+import os5+from pathlib import Path6+from typing import Any, Dict, List7+8+import aiohttp9+10+from src.component.call_processing.schemes.ai_activity_type_request import AiActivityTypeRequest11+from src.component.call_processing.schemes.ai_activity_type_response import AiActivityTypeResponse12+from src.eval.abstract_evaluator import AbstractEvaluator13+from src.eval.collectors import collect_test_set as es_collect_test_set14+from src.eval.common_evaluator_runner import create_parser, run_evaluation15+from src.eval.endpoints import run_endpoint as run_endpoint_http16+from src.eval.models import EvaluationResult17+18+19+class AiActivityTypeEvaluator(AbstractEvaluator):20+ENDPOINT_PATH = "call/ai-activity-type"21+REQUEST_MODEL = AiActivityTypeRequest22+RESPONSE_MODEL = AiActivityTypeResponse23+OUTPUT_SUBFOLDER = "ai_activity_type"24+USES_LLM_EVALUATION = False25+26+READS_FROM_DATASET_FOLDER = True27+28+DATA_SET: str = "initial"29+30+MINIMUM_REQUEST_BODY_SIZE: int = 031+32+SKIP_UNREPLAYABLE_ON_COLLECT: bool = True33+REPLAYABILITY_CHECK_TIMEOUT_SECONDS: float = 20.034+35+POPULATE_GROUND_TRUTH_LLM: bool = False36+GROUND_TRUTH_LLM_ENDPOINT: str = "OPENAI:gpt-5_2"37+38+COMPARE_TO_GROUND_TRUTH: str = "es"39+40+def __init__(self, **kwargs: Any) -> None:41+super().__init__(**kwargs)42+_repo_root = Path(__file__).parents[2]43+_datasets_root = _repo_root / "eval_output" / self.OUTPUT_SUBFOLDER / "eval_datasets"44+_dataset_dir = _datasets_root / self.__class__.DATA_SET45+self._dataset_dir = _dataset_dir46+self._inputs_dir = _dataset_dir / "input"47+self._ground_truths_dir = _dataset_dir / "ground_truth"48+49+async def collect_test_set(self) -> List[Dict[str, Any]]:50+if self.es_host_name:51+return await self._collect_from_es()52+return await self._collect_from_files()53+54+async def _collect_from_es(self) -> List[Dict[str, Any]]:55+if self._inputs_dir.exists() or self._ground_truths_dir.exists():56+raise FileExistsError(57+f"Dataset folder already exists: {self._inputs_dir.parent}. "58+"Choose a different --data-set name or remove the folder manually."59+ )60+61+raw_samples = await es_collect_test_set(62+es_host_name=self.es_host_name,63+es_port=self.es_port,64+endpoint_path=self.ENDPOINT_PATH,65+limit=self.limit,66+language_filter=self.language_filter,67+min_prompt_length=self.min_prompt_length,68+is_json_response=self.IS_JSON_RESPONSE,69+exclude_languages=self.exclude_languages,70+ )71+72+self._inputs_dir.mkdir(parents=True, exist_ok=True)73+self._ground_truths_dir.mkdir(parents=True, exist_ok=True)74+75+min_size = self.MINIMUM_REQUEST_BODY_SIZE76+samples: List[Dict[str, Any]] = []77+skipped_no_response = 078+skipped_too_small = 079+skipped_unreplayable = 080+skipped_probe_errors = 081+82+endpoint_url = f"{self.api_host}/{self.ENDPOINT_PATH}"83+84+timeout = aiohttp.ClientTimeout(total=self.REPLAYABILITY_CHECK_TIMEOUT_SECONDS)85+async with aiohttp.ClientSession(timeout=timeout) as session:86+for sample in raw_samples:87+es_id = sample["id"]88+request_body = sample["request_body"]89+original_response = sample.get("original_response")90+91+if not original_response:92+skipped_no_response += 193+continue94+95+if min_size and len(json.dumps(request_body)) < min_size:96+skipped_too_small += 197+continue98+99+if self.SKIP_UNREPLAYABLE_ON_COLLECT:100+try:101+async with session.post(endpoint_url, json=request_body) as resp:102+if resp.status != 200:103+body_text = await resp.text()104+if "Cannot find activity" in body_text:105+skipped_unreplayable += 1106+continue107+except Exception:108+skipped_probe_errors += 1109+110+with open(self._inputs_dir / f"{es_id}.json", "w") as f:111+json.dump(request_body, f, indent=2)112+113+with open(self._ground_truths_dir / f"gt_{es_id}.json", "w") as f:114+json.dump(original_response, f, indent=2)115+116+samples.append(117+ {118+"id": es_id,119+"request_body": request_body,120+"original_response": original_response,121+"ground_truth": original_response,122+ }123+ )124+125+print(f"Wrote {len(samples)} samples to {self._inputs_dir.parent}")126+if skipped_no_response:127+print(f"Skipped {skipped_no_response} records with no response body")128+if skipped_too_small:129+print(130+f"Skipped {skipped_too_small} records whose request body was smaller "131+f"than {min_size} characters"132+ )133+if skipped_unreplayable:134+print(f"Skipped {skipped_unreplayable} records that are not replayable (missing activity in ES)")135+if skipped_probe_errors:136+print(f"Warning: replayability probe errored for {skipped_probe_errors} records (kept those records)")137+138+if self.__class__.POPULATE_GROUND_TRUTH_LLM:139+label_results = await run_endpoint_http(140+test_set=samples,141+endpoint_url=endpoint_url,142+request_model=self.REQUEST_MODEL,143+force_llm_endpoint=self.__class__.GROUND_TRUTH_LLM_ENDPOINT,144+is_streaming_response=self.IS_STREAMING_RESPONSE,145+timeout_seconds=300.0,146+parallel_requests=self.parallel_requests,147+return_prompt_in_run=self.RETURN_PROMPT_IN_RUN,148+ )149+150+for i, r in enumerate(label_results):151+forced = self.__class__.GROUND_TRUTH_LLM_ENDPOINT152+model = forced.split(":", 1)[1] if ":" in forced else forced153+samples[i]["ground_truth_llm_model"] = model154+155+if not r.get("success", True):156+samples[i]["ground_truth_llm"] = None157+samples[i]["ground_truth_llm_reasoning"] = None158+samples[i]["ground_truth_llm_error_type"] = r.get("error_type")159+samples[i]["ground_truth_llm_error_message"] = r.get("error_message")160+continue161+162+resp = r.get("new_response") or {}163+samples[i]["ground_truth_llm"] = resp.get("ai_activity_type")164+samples[i]["ground_truth_llm_reasoning"] = resp.get("ai_activity_type_reasoning")165+166+return samples167+168+async def _collect_from_files(self) -> List[Dict[str, Any]]:169+if not self._inputs_dir.exists():170+raise FileNotFoundError(171+f"Dataset folder not found: {self._inputs_dir}. "172+"Run --steps collect first or check your --data-set name."173+ )174+175+samples: List[Dict[str, Any]] = []176+for filename in sorted(os.listdir(self._inputs_dir)):177+input_path = self._inputs_dir / filename178+gt_path = self._ground_truths_dir / f"gt_{filename}"179+180+if not gt_path.exists():181+print(f"Warning: ground truth not found for {filename}, skipping")182+continue183+184+with open(input_path, "r") as f:185+request_body: Dict[str, Any] = json.load(f)186+187+with open(gt_path, "r") as f:188+ground_truth: Dict[str, Any] = json.load(f)189+190+samples.append(191+ {192+"id": Path(filename).stem,193+"request_body": request_body,194+"original_response": None,195+"ground_truth": ground_truth,196+ }197+ )198+199+print(f"Loaded {len(samples)} samples from {self._inputs_dir.parent}")200+return samples201+202+async def evaluate_against_ground_truth(self, results: List[Dict[str, Any]]) -> List[EvaluationResult]:203+evaluation_results: List[EvaluationResult] = []204+compare_to = self.__class__.COMPARE_TO_GROUND_TRUTH205+206+for res in results:207+req_id = res["request_id"]208+is_success = res.get("success", True)209+210+if not is_success:211+evaluation_results.append(212+EvaluationResult(213+request_id=req_id,214+request_body=res["request_body"],215+new_response=res.get("new_response"),216+valid_schema=False,217+quality_score=0.0,218+quality_feedback=f"Request failed: {res.get('error_type')} - {res.get('error_message')}",219+response_time=res.get("response_time"),220+success=False,221+error_type=res.get("error_type"),222+error_message=res.get("error_message"),223+ground_truth_match=False,224+ )225+ )226+continue227+228+pred = (res.get("new_response") or {}).get("ai_activity_type")229+230+if compare_to == "llm":231+if "ground_truth_llm" not in res:232+evaluation_results.append(233+EvaluationResult(234+request_id=req_id,235+request_body=res["request_body"],236+new_response=res.get("new_response"),237+valid_schema=res.get("valid_schema", True),238+quality_score=0.0,239+quality_feedback=(240+"Missing ground_truth_llm in run results. Ensure collect step populated it and "241+"endpoints.py passes it through."242+ ),243+response_time=res.get("response_time"),244+success=True,245+ground_truth_match=False,246+ )247+ )248+continue249+gt_label = res.get("ground_truth_llm")250+else:251+gt_path = self._ground_truths_dir / f"gt_{req_id}.json"252+if not gt_path.exists():253+evaluation_results.append(254+EvaluationResult(255+request_id=req_id,256+request_body=res["request_body"],257+new_response=res.get("new_response"),258+valid_schema=res.get("valid_schema", True),259+quality_score=0.0,260+quality_feedback=f"Ground truth file not found: {gt_path}",261+response_time=res.get("response_time"),262+success=True,263+ground_truth_match=False,264+ )265+ )266+continue267+268+with open(gt_path, "r") as f:269+ground_truth_dict: Dict[str, Any] = json.load(f)270+271+gt_label = (ground_truth_dict or {}).get("ai_activity_type")272+273+matched = pred == gt_label274+275+evaluation_results.append(276+EvaluationResult(277+request_id=req_id,278+request_body=res["request_body"],279+new_response=res.get("new_response"),280+valid_schema=res.get("valid_schema", True),281+quality_score=1.0 if matched else 0.0,282+quality_feedback="PASS" if matched else f"FAIL\nExpected: {gt_label}\nGot: {pred}",283+response_time=res.get("response_time"),284+success=True,285+ground_truth_match=matched,286+ )287+ )288+289+return evaluation_results290+291+292+if __name__ == "__main__":293+parser = create_parser(AiActivityTypeEvaluator)294+parser.add_argument(295+"--data-set",296+type=str,297+default="initial",298+dest="data_set",299+help="Dataset folder under eval_output/ai_activity_type/eval_datasets/ (default: initial)",300+ )301+302+parser.add_argument(303+"--populate-ground-truth-llm",304+action="store_true",305+dest="populate_ground_truth_llm",306+ )307+parser.add_argument(308+"--ground-truth-llm-endpoint",309+type=str,310+default="OPENAI:gpt-5_2",311+dest="ground_truth_llm_endpoint",312+ )313+314+parser.add_argument(315+"--compare-to-ground-truth",316+type=str,317+choices=["es", "llm"],318+default="es",319+dest="compare_to_ground_truth",320+ )321+322+args = parser.parse_args()323+AiActivityTypeEvaluator.DATA_SET = args.data_set324+325+AiActivityTypeEvaluator.POPULATE_GROUND_TRUTH_LLM = bool(args.populate_ground_truth_llm)326+AiActivityTypeEvaluator.GROUND_TRUTH_LLM_ENDPOINT = args.ground_truth_llm_endpoint327+328+AiActivityTypeEvaluator.COMPARE_TO_GROUND_TRUTH = args.compare_to_ground_truth329+330+asyncio.run(run_evaluation(AiActivityTypeEvaluator, args))</selection>” selected. Please summarize the selection using precise and concise language. Use headers and bulleted lists in the summary, to make it scannable. Maintain the meaning and factual accuracy.
You said
I’m on page “<tabTitle>Jy 19798 evaluation for ai activity types by nikol</tabTitle>” with “<selection>@@ -0,0 +1,330 @@1+#!/usr/bin/env python32+import asyncio3+import json4+import os5+from pathlib import Path6+from typing import Any, Dict, List7+8+import aiohttp9+10+from src.component.call_processing.schemes.ai_activity_type_request import AiActivityTypeRequest11+from src.component.call_processing.schemes.ai_activity_type_response import AiActivityTypeResponse12+from src.eval.abstract_evaluator import AbstractEvaluator13+from src.eval.collectors import collect_test_set as es_collect_test_set14+from src.eval.common_evaluator_runner import create_parser, run_evaluation15+from src.eval.endpoints import run_endpoint as run_endpoint_http16+from src.eval.models import EvaluationResult17+18+19+class AiActivityTypeEvaluator(AbstractEvaluator):20+ENDPOINT_PATH = "call/ai-activity-type"21+REQUEST_MODEL = AiActivityTypeRequest22+RESPONSE_MODEL = AiActivityTypeResponse23+OUTPUT_SUBFOLDER = "ai_activity_type"24+USES_LLM_EVALUATION = False25+26+READS_FROM_DATASET_FOLDER = True27+28+DATA_SET: str = "initial"29+30+MINIMUM_REQUEST_BODY_SIZE: int = 031+32+SKIP_UNREPLAYABLE_ON_COLLECT: bool = True33+REPLAYABILITY_CHECK_TIMEOUT_SECONDS: float = 20.034+35+POPULATE_GROUND_TRUTH_LLM: bool = False36+GROUND_TRUTH_LLM_ENDPOINT: str = "OPENAI:gpt-5_2"37+38+COMPARE_TO_GROUND_TRUTH: str = "es"39+40+def __init__(self, **kwargs: Any) -> None:41+super().__init__(**kwargs)42+_repo_root = Path(__file__).parents[2]43+_datasets_root = _repo_root / "eval_output" / self.OUTPUT_SUBFOLDER / "eval_datasets"44+_dataset_dir = _datasets_root / self.__class__.DATA_SET45+self._dataset_dir = _dataset_dir46+self._inputs_dir = _dataset_dir / "input"47+self._ground_truths_dir = _dataset_dir / "ground_truth"48+49+async def collect_test_set(self) -> List[Dict[str, Any]]:50+if self.es_host_name:51+return await self._collect_from_es()52+return await self._collect_from_files()53+54+async def _collect_from_es(self) -> List[Dict[str, Any]]:55+if self._inputs_dir.exists() or self._ground_truths_dir.exists():56+raise FileExistsError(57+f"Dataset folder already exists: {self._inputs_dir.parent}. "58+"Choose a different --data-set name or remove the folder manually."59+ )60+61+raw_samples = await es_collect_test_set(62+es_host_name=self.es_host_name,63+es_port=self.es_port,64+endpoint_path=self.ENDPOINT_PATH,65+limit=self.limit,66+language_filter=self.language_filter,67+min_prompt_length=self.min_prompt_length,68+is_json_response=self.IS_JSON_RESPONSE,69+exclude_languages=self.exclude_languages,70+ )71+72+self._inputs_dir.mkdir(parents=True, exist_ok=True)73+self._ground_truths_dir.mkdir(parents=True, exist_ok=True)74+75+min_size = self.MINIMUM_REQUEST_BODY_SIZE76+samples: List[Dict[str, Any]] = []77+skipped_no_response = 078+skipped_too_small = 079+skipped_unreplayable = 080+skipped_probe_errors = 081+82+endpoint_url = f"{self.api_host}/{self.ENDPOINT_PATH}"83+84+timeout = aiohttp.ClientTimeout(total=self.REPLAYABILITY_CHECK_TIMEOUT_SECONDS)85+async with aiohttp.ClientSession(timeout=timeout) as session:86+for sample in raw_samples:87+es_id = sample["id"]88+request_body = sample["request_body"]89+original_response = sample.get("original_response")90+91+if not original_response:92+skipped_no_response += 193+continue94+95+if min_size and len(json.dumps(request_body)) < min_size:96+skipped_too_small += 197+continue98+99+if self.SKIP_UNREPLAYABLE_ON_COLLECT:100+try:101+async with session.post(endpoint_url, json=request_body) as resp:102+if resp.status != 200:103+body_text = await resp.text()104+if "Cannot find activity" in body_text:105+skipped_unreplayable += 1106+continue107+except Exception:108+skipped_probe_errors += 1109+110+with open(self._inputs_dir / f"{es_id}.json", "w") as f:111+json.dump(request_body, f, indent=2)112+113+with open(self._ground_truths_dir / f"gt_{es_id}.json", "w") as f:114+json.dump(original_response, f, indent=2)115+116+samples.append(117+ {118+"id": es_id,119+"request_body": request_body,120+"original_response": original_response,121+"ground_truth": original_response,122+ }123+ )124+125+print(f"Wrote {len(samples)} samples to {self._inputs_dir.parent}")126+if skipped_no_response:127+print(f"Skipped {skipped_no_response} records with no response body")128+if skipped_too_small:129+print(130+f"Skipped {skipped_too_small} records whose request body was smaller "131+f"than {min_size} characters"132+ )133+if skipped_unreplayable:134+print(f"Skipped {skipped_unreplayable} records that are not replayable (missing activity in ES)")135+if skipped_probe_errors:136+print(f"Warning: replayability probe errored for {skipped_probe_errors} records (kept those records)")137+138+if self.__class__.POPULATE_GROUND_TRUTH_LLM:139+label_results = await run_endpoint_http(140+test_set=samples,141+endpoint_url=endpoint_url,142+request_model=self.REQUEST_MODEL,143+force_llm_endpoint=self.__class__.GROUND_TRUTH_LLM_ENDPOINT,144+is_streaming_response=self.IS_STREAMING_RESPONSE,145+timeout_seconds=300.0,146+parallel_requests=self.parallel_requests,147+return_prompt_in_run=self.RETURN_PROMPT_IN_RUN,148+ )149+150+for i, r in enumerate(label_results):151+forced = self.__class__.GROUND_TRUTH_LLM_ENDPOINT152+model = forced.split(":", 1)[1] if ":" in forced else forced153+samples[i]["ground_truth_llm_model"] = model154+155+if not r.get("success", True):156+samples[i]["ground_truth_llm"] = None157+samples[i]["ground_truth_llm_reasoning"] = None158+samples[i]["ground_truth_llm_error_type"] = r.get("error_type")159+samples[i]["ground_truth_llm_error_message"] = r.get("error_message")160+continue161+162+resp = r.get("new_response") or {}163+samples[i]["ground_truth_llm"] = resp.get("ai_activity_type")164+samples[i]["ground_truth_llm_reasoning"] = resp.get("ai_activity_type_reasoning")165+166+return samples167+168+async def _collect_from_files(self) -> List[Dict[str, Any]]:169+if not self._inputs_dir.exists():170+raise FileNotFoundError(171+f"Dataset folder not found: {self._inputs_dir}. "172+"Run --steps collect first or check your --data-set name."173+ )174+175+samples: List[Dict[str, Any]] = []176+for filename in sorted(os.listdir(self._inputs_dir)):177+input_path = self._inputs_dir / filename178+gt_path = self._ground_truths_dir / f"gt_{filename}"179+180+if not gt_path.exists():181+print(f"Warning: ground truth not found for {filename}, skipping")182+continue183+184+with open(input_path, "r") as f:185+request_body: Dict[str, Any] = json.load(f)186+187+with open(gt_path, "r") as f:188+ground_truth: Dict[str, Any] = json.load(f)189+190+samples.append(191+ {192+"id": Path(filename).stem,193+"request_body": request_body,194+"original_response": None,195+"ground_truth": ground_truth,196+ }197+ )198+199+print(f"Loaded {len(samples)} samples from {self._inputs_dir.parent}")200+return samples201+202+async def evaluate_against_ground_truth(self, results: List[Dict[str, Any]]) -> List[EvaluationResult]:203+evaluation_results: List[EvaluationResult] = []204+compare_to = self.__class__.COMPARE_TO_GROUND_TRUTH205+206+for res in results:207+req_id = res["request_id"]208+is_success = res.get("success", True)209+210+if not is_success:211+evaluation_results.append(212+EvaluationResult(213+request_id=req_id,214+request_body=res["request_body"],215+new_response=res.get("new_response"),216+valid_schema=False,217+quality_score=0.0,218+quality_feedback=f"Request failed: {res.get('error_type')} - {res.get('error_message')}",219+response_time=res.get("response_time"),220+success=False,221+error_type=res.get("error_type"),222+error_message=res.get("error_message"),223+ground_truth_match=False,224+ )225+ )226+continue227+228+pred = (res.get("new_response") or {}).get("ai_activity_type")229+230+if compare_to == "llm":231+if "ground_truth_llm" not in res:232+evaluation_results.append(233+EvaluationResult(234+request_id=req_id,235+request_body=res["request_body"],236+new_response=res.get("new_response"),237+valid_schema=res.get("valid_schema", True),238+quality_score=0.0,239+quality_feedback=(240+"Missing ground_truth_llm in run results. Ensure collect step populated it and "241+"endpoints.py passes it through."242+ ),243+response_time=res.get("response_time"),244+success=True,245+ground_truth_match=False,246+ )247+ )248+continue249+gt_label = res.get("ground_truth_llm")250+else:251+gt_path = self._ground_truths_dir / f"gt_{req_id}.json"252+if not gt_path.exists():253+evaluation_results.append(254+EvaluationResult(255+request_id=req_id,256+request_body=res["request_body"],257+new_response=res.get("new_response"),258+valid_schema=res.get("valid_schema", True),259+quality_score=0.0,260+quality_feedback=f"Ground truth file not found: {gt_path}",261+response_time=res.get("response_time"),262+success=True,263+ground_truth_match=False,264+ )265+ )266+continue267+268+with open(gt_path, "r") as f:269+ground_truth_dict: Dict[str, Any] = json.load(f)270+271+gt_label = (ground_truth_dict or {}).get("ai_activity_type")272+273+matched = pred == gt_label274+275+evaluation_results.append(276+EvaluationResult(277+request_id=req_id,278+request_body=res["request_body"],279+new_response=res.get("new_response"),280+valid_schema=res.get("valid_schema", True),281+quality_score=1.0 if matched else 0.0,282+quality_feedback="PASS" if matched else f"FAIL\nExpected: {gt_label}\nGot: {pred}",283+response_time=res.get("response_time"),284+success=True,285+ground_truth_match=matched,286+ )287+ )288+289+return evaluation_results290+291+292+if __name__ == "__main__":293+parser = create_parser(AiActivityTypeEvaluator)294+parser.add_argument(295+"--data-set",296+type=str,297+default="initial",298+dest="data_set",299+help="Dataset folder under eval_output/ai_activity_type/eval_datasets/ (default: initial)",300+ )301+302+parser.add_argument(303+"--populate-ground-truth-llm",304+action="store_true",305+dest="populate_ground_truth_llm",306+ )307+parser.add_argument(308+"--ground-truth-llm-endpoint",309+type=str,310+default="OPENAI:gpt-5_2",311+dest="ground_truth_llm_endpoint",312+ )313+314+parser.add_argument(315+"--compare-to-ground-truth",316+type=str,317+choices=["es", "llm"],318+default="es",319+dest="compare_to_ground_truth",320+ )321+322+args = parser.parse_args()323+AiActivityTypeEvaluator.DATA_SET = args.data_set324+325+AiActivityTypeEvaluator.POPULATE_GROUND_TRUTH_LLM = bool(args.populate_ground_truth_llm)326+AiActivityTypeEvaluator.GROUND_TRUTH_LLM_ENDPOINT = args.ground_truth_llm_endpoint327+328+AiActivityTypeEvaluator.COMPARE_TO_GROUND_TRUTH = args.compare_to_ground_truth329+330+asyncio.run(run_evaluation(AiActivityTypeEvaluator, args))</selection>” selected.
Please summarize the selection using precise and concise language. Use headers and bulleted lists in the summary, to make it scannable. Maintain the meaning and factual accuracy.
Expand
Listen
Show more options
Gemini said
Gemini said
This Python script defines the
AiActivityTypeEvaluator
class, a specialized tool for evaluating AI-driven activity type classification. It extends a base evaluation framework to handle data collection, ground-truth comparison, and performance scoring.
Core Functionality
Core Functionality
Data Collection (
collect_test_set
):
* Supports fetching raw samples from
Elasticsearch (ES)
or loading them from
local JSON files
.
Implements a
replayability check
: It probes the live API endpoint to ensure the activity exists before including it in the dataset.
Filters data based on request body size, language, and the presence of an original response.
Ground Truth Management:
Saves inputs and "gold standard" responses to a local directory structure (
eval_output/ai_activity_type/...
).
Features an optional mode to populate ground truth using a high-order LLM (e.g.,
gpt-5_2
) if the
POPULATE_GROUND_TRUTH_LLM
flag is set.
Evaluation Logic (
evaluate_against_ground_truth
):
Compares the model's predicted
ai_activity_type
against either the original ES response or the LLM-generated ground truth.
Assigns a
quality_score
of
1.0
for a match and
0.0
for a mismatch or request failure.
Key Configuration Constants
Key Configuration Constants
Constant
Purpose
ENDPOINT_PATH
The specific API route being tested:
call/ai-activity-type
.
COMPARE_TO_GROUND_TRUTH
Determines if the baseline is the original data (
es
) or an
llm
.
SKIP_UNREPLAYABLE_ON_COLLECT
If
True
, discards records where the API returns "Cannot find activity."
REPLAYABILITY_CHECK_TIMEOUT_SECONDS
Limits the probe request to 20 seconds.
Constant
ENDPOINT_PATH
COMPARE_TO_GROUND_TRUTH
SKIP_UNREPLAYABLE_ON_COLLECT
REPLAYABILITY_CHECK_TIMEOUT_SECONDS
Purpose
The specific API route being tested:
call/ai-activity-type
.
Determines if the baseline is the original data (
es
) or an
llm
.
If
True
, discards records where the API returns "Cannot find activity."
Limits the probe request to 20 seconds.
Copy table
Execution Workflow
Execution Workflow
Parsing:
The script uses
create_parser
to handle CLI arguments like
--data-set
,
--populate-ground-truth-llm
, and
--compare-to-ground-truth
.
Initialization:
Sets up directory paths for inputs and ground truths based on the selected dataset name.
Run:
Executes the evaluation via
asyncio.run(run_evaluation(...))
, which orchestrates the collection, endpoint calling, and result scoring phases.
Copy prompt
Edit
You said are there soem issues with this PR
You said
are there soem issues with this PR
Listen
Show more options
Show thinking
Gemini said
Gemini said
Yes, there are several issues and code smells in this Pull Request, ranging from performance bottlenecks to architectural anti-patterns.
Here is a breakdown of the most notable issues, categorized by impact:
Performance & Async Anti-Patterns
Performance & Async Anti-Patterns
Sequential Requests Defeating Async (Lines 86-106):
Inside
_collect_from_es
, the code loops through
raw_samples
and
await
s a
session.post
request for each sample one by one. This means the script waits for one HTTP request to finish before starting the next, completely defeating the purpose of using
aiohttp
for asynchronous concurrency.
Fix:
Collect these coroutines into a list and use
asyncio.gather()
(ideally wrapped in an
asyncio.Semaphore
to avoid overwhelming the endpoint) to run the replayability probes concurrently.
Blocking File I/O in Async Functions (Lines 110-114, 184-188, 268-269):
Standard synchronous file operations (
with open(...)
,
json.dump
,
json.load
) are used inside
async def
functions. In Python, this blocks the entire event loop while the disk is reading/writing.
Fix:
Use an asynchronous file I/O library like
aiofiles
, or run the blocking I/O operations in a separate thread using
asyncio.to_thread()
.
Architecture & State Management
Architecture & State Management
Mutating Class-Level Variables (Lines 323-328):
The script parses CLI arguments and directly mutates the class attributes (e.g.,
AiActivityTypeEvaluator.DATA_SET = args.data_set
). This is a major anti-pattern. If you ever try to run tests in parallel or create multiple instances of this evaluator in a single process, the state will bleed across them.
Fix:
Pass these arguments into the
__init__
constructor of the instance (
self.data_set = args.data_set
) rather than modifying the class definition globally.
Code Quality & Edge Cases
Code Quality & Edge Cases
Broad Exception Catching (Line 107):
Using a bare
except Exception:
during the replayability probe is dangerous. It will catch
aiohttp.ClientError
(which is intended), but it will also silently swallow
TimeoutError
or even memory errors, making debugging incredibly difficult if the endpoint goes down.
Fix:
Scope the exception to specific expected errors, like
except (aiohttp.ClientError, asyncio.TimeoutError):
.
Inefficient Size Calculation (Line 95):
len(json.dumps(request_body)) < min_size
requires serializing the entire dictionary to a string
just
to check its length. Since
min_size
defaults to
0
, the
if min_size
short-circuit saves it for now, but if anyone sets a minimum size, this will become an unnecessary CPU bottleneck on large datasets.
Potential Path Traversal Risk (Lines 110, 113, 251):
The script writes and reads files directly using
f"{es_id}.json"
and
f"gt_{req_id}.json"
. If
es_id
or
req_id
originate from a database and ever contain characters like
../
, it could result in files being read from or written to unintended directories.
Fix:
Sanitize the IDs or strictly enforce that they only contain alphanumeric characters.
Would you like me to rewrite the
_collect_from_es
method to show how to fix the sequential async loop and blocking file I/O?
Enter a prompt for Gemini
encrypted
Enter a prompt for Gemini
encrypted
Open upload file menu
Tools
Open mode picker...
|
Firefox
|
Ask Jiminny test report - 8 Apr 2026 - Ask Jiminny Ask Jiminny test report - 8 Apr 2026 - Ask Jiminny test report - 13 Apr 2026.pdf — Work...
|
app.staging.jiminny.com/ai-reports/pdf/e2f4890a-d0 app.staging.jiminny.com/ai-reports/pdf/e2f4890a-d097-495b-8dfd-d4c25a5e8843...
|
8647
|
|
FirefoxFileEoitViewHistoryBookmarksProfilesToolsWi FirefoxFileEoitViewHistoryBookmarksProfilesToolsWindow Helpapp.staging.jiminny.com/ai-reports/pdf/e2f4890a-d097-495b-8dfd-d4c25a5e8843~ Google Gemini1of 3• < 40 ll • | Daily -Platform • 8m leftA 100%C 8• Tue 14 Apr 9:57:50- | + Automatic Zoom~Platform Sprint 1 Q2 - Platform Tea@ Jy 19798 evaluation for ai activity t(8 Jiminnyg Ask Jiminny test report - 8 Apr XInbox (1,540) - lukas.kovalik@umir+ New labsocc ce Cdcseciie3.Run: Executes the evaluation viaasyncio.run(run_evaluation(...)),which orchestrates the collection,endpoint calling, and result scoringOesesare there soem issueswith this PRShow thinking vYes, there are several issues and code smellsin this Pull Request, ranging from performancebottlenecks to architectural anti-patterns.Here is a breakdown of the most notableissues, categorized by impact:Performance & Async Anti-Patterns• Sequential Requests Defeating Async(Lines 86-106): Inside_collect_from_es, the code loopsthrough raw_samples and await sasession.post request for each sampleone by one. This means the script waitsfor one HTTP request to finish beforestarting the next, completely defeatingthe purpose of using aiohttp forasynchronous concurrency.• Fix: Collect these coroutines into alist and use asyncio.gather ()(ideally wrapped in anasyncio.Semaphore to avoidoverwhelming the endpoint) to run• Enter a prompt for GeminiAsk Jiminny Test Report8 Apr 2026ProvYour Jiminny chats aren't used to improve our models. Gemini is Alane can make mistakes, Inciualne aoout peodleYour privacy & GeminiSummarize pageJIMINNY-...
|
NULL
|
NULL
|
NULL
|
8646
|
|
FirefoxFileEoitViewHistoryBookmarksProfilesToolsWi FirefoxFileEoitViewHistoryBookmarksProfilesToolsWindow Helpapp.staging.jiminny.com/ai-reports/pdf/e2f4890a-d097-495b-8dfd-d4c25a5e8843~ Google Gemini2 of 3C 40 lbl | Daily - Platform • 8 m leftA 7 10%C2 &• Tue 14 Apr 9:57:47- | + Automatic ZoomPlatform Sprint 1 Q2 - Platform Tea@ Jy 19798 evaluation for ai activity t(8 Jiminnyg Ask Jiminny test report - 8 Apr XInbox (1,540) - lukas.kovalik@umir+ New labsoccce cdcselnde.3.Run: Executes the evaluation viaasyncio.run(run_evaluation(...)),which orchestrates the collection,endpoint calling, and result scoringOesesare there soem issueswith this PRShow thinking vYes, there are several issues and code smellsin this Pull Request, ranging from performancebottlenecks to architectural anti-patterns.Here is a breakdown of the most notableissues, categorized by impact:Performance & Async Anti-Patterns• Sequential Requests Defeating Async(Lines 86-106): Inside_collect_from_es, the code loopsthrough raw_samples and await s asession.post request for each sampleone by one. This means the script waitsfor one HTTP request to finish beforestarting the next, completely defeatingthe purpose of using aiohttp forasynchronous concurrency.• Fix: Collect these coroutines into alist and use asyncio.gather ()(ideally wrapped in anasyncio.Semaphore to avoidoverwhelming the endpoint) to run• Enter a prompt for GeminiPro vYour Jiminny chats aren't used to improve our models. Gemini is Alane can make mistakes, Inciualne aoout peodleYour privacy & GeminiSummarize pagestrateictoltor salesenacilemtent coachineeand pipeline vistity. Belntv asyutilizis ofthekey themes and trends observed across our accounts.Driving Adoption and Coachingclienrane ie marine mtving beyond basic al recording to leverage jiminy for structured• Manager Accountability: Many teams are shifting coaching responsibility to managers byusing automated nudges to highlight specific calls for review.• Peer-to-Peer Learning: Organizations like Insuritas and PipeDrive are creating "best of'"playlists to share successful objection handling and sales techniques, fostering a culture ofreflection.• Self-Coaching: Several clients, including Funding Circle and Edozo, are implementing self-coaching workflows where reps identify their own calls for review based on specific criterialike urgency or discovery quality.CRM Integration and Data AccuracySeamless integration with CRM platforms remains a primary value driver for our users:• Salesforce & HubSpot: Clients such as Productsup, Unbiased, and PostHog prioritize deepCRM integration to ensure Jiminny serves as a single source of truth.• Technical Troubleshooting: We are actively addressing integration challenges, such asSalesforce mapping errors and Notetaker configuration issues, to ensure data flows correctlybetween systems.• Roadmap Requests: There is significant interest in upcoming features, including native Zohointegration and automated field mapping for Salesforce.Strategic Al UtilizationThe "Ask Jiminny" feature and Al-driven insights are becoming central to how teams managetheir pipeline:• Deal Visibility: Users at Specright and Cision are using "Ask Jiminny" at the opportunity levelto generate summaries, identify risks, and determine next steps without needing to listen toevery call....
|
NULL
|
NULL
|
NULL
|
8645
|
|
DiaFilelEditViewTabsBookmarksHistoryExtensionsWind DiaFilelEditViewTabsBookmarksHistoryExtensionsWindowHelpmeet.google.com / Daily - PlatformNikolay Yankov (Presenting)SInbox (2)SRD-677XC Jy 19798(7 Service-oi Font. AmrCostI Clhttps://jiminny.atlassian.net/jira/software/c/projects/JY/boards/377selectedissue=JY-18909• Sminm0 Projects# AWSE3 SSHEl DutadogPlatform TeamQ Search board800$ Jy-19240 / A JY-18900READY FOR DEV 1ooeeheTxWirtexceed Fontawesomepackage limitsMAINTENANCISReady for Dev©-20564 1 П ****=PriorityStor..% JY-20574 [AI) Ensure PDF formatting is good= Medium= Medium% JY-20586 (QA) Manual Testing= Medium|% JY-20626 Integration testing= MediumLinked work itemsis cloned byJY-20458 CRUD reports with Ask. JiminnyIs tested byJY-20649 Generate Ask Jiminny Report and display it in AI Reports• JY-20656 Expired Ask Jiminny report template is automatically disabled an…..© JY-20660 Non-creator cannot delete generated Ask Jiminny report from A...JY-20651 Generated Ask Jiminny Report PDF displays Jiminny logo when n...JY-20655 Generated Ask Jiminny Report emall is sent to recipients0 JY-20654 Generated Ask Jiminny Reoort is visible only to creator and shar...JY-20650 Generated Ask Jiminny Report PDF includes branding, report se..•**_AssigneeStatus3 Steliyan ...i2 Stellyan -INDEYVINDEV vAnellya A...& Unassign...READY FOR DEVREADY FOR DEVSDEPLOYEDVDONEDONEDONEDONEDONEMDONEWUnEActions9:57 AM Daily - Platform> 0.Daily - Platform • 8 m left8•ue 14 Apr 9:57CE: DepeDependol• Far DepeD Tasks7 Insights & Coachin• Del0ux.D Al Bookmarks© 2XZ Improve Story~ DetailsAssignee32 Stellyan GeorgievAsuntoneReporter2 Galya DimitrovaUerClo eiene@ Open with VS Code|3 branches32 commits1 pull request1 build failingRelessesProduction7 See all deploymentsComponentsPlatformSub-ProductAooton5 days agcUrEhGroup: QueriesDEPLOY &fix connome ouring teleature fiag releaseDeployed0E -20585 15 + ****=AI Review - Q1CTNIHEMANTANORCODeployed19982 1No such column "Type' onentty 'Event"PLATTORM STABTUTTYDeployed8 -20384 05 11 ****=Missed deletion of stale0 m40048009(Pipedrive) DI > status notlabeled, date data missingREDUCE CHURNNDeployed0 -20614 05 П ****=:Stefka StoyanovaSteliyan GeorgievKovalik12:238 • Tue 14 Apr 9:57:47ChatNikolay YankovNikolay Ivanov...
|
NULL
|
NULL
|
NULL
|
8644
|
|
JY-20543 add AJ reports User pilot tracking by Lak JY-20543 add AJ reports User pilot tracking by LakyLak · Pull Request #11932 · jiminny/app
Platform Sprint 1 Q2 - Platform Team - Scrum Board - Jira
Platform Sprint 1 Q2 - Platform Team - Scrum Board - Jira
Jy 19798 evaluation for ai activity types by nikolaybiaivanov · Pull Request #468 · jiminny/prophet
Jy 19798 evaluation for ai activity types by nikolaybiaivanov · Pull Request #468 · jiminny/prophet
Jiminny
Jiminny
Ask Jiminny test report - 8 Apr 2026 - Ask Jiminny test report - 13 Apr 2026.pdf
Ask Jiminny test report - 8 Apr 2026 - Ask Jiminny test report - 13 Apr 2026.pdf
Close tab
Inbox (1,540) - [EMAIL] - Jiminny Mail
Inbox (1,540) - [EMAIL] - Jiminny Mail
New Tab
Customize sidebar
Close Google Gemini (⌃X)
Tabs from other devices
Open history (⇧⌘H)
Open bookmarks (⌘B)
AI Chat settings
Close
WORK, Google Account: [EMAIL]
Main menu
New Chat
Open menu for conversation actions.
Conversation with Gemini
Conversation with Gemini
Copy prompt
You said I’m on page “<tabTitle>Jy 19798 evaluation for ai activity types by nikol</tabTitle>” with “<selection>@@ -0,0 +1,330 @@1+#!/usr/bin/env python32+import asyncio3+import json4+import os5+from pathlib import Path6+from typing import Any, Dict, List7+8+import aiohttp9+10+from src.component.call_processing.schemes.ai_activity_type_request import AiActivityTypeRequest11+from src.component.call_processing.schemes.ai_activity_type_response import AiActivityTypeResponse12+from src.eval.abstract_evaluator import AbstractEvaluator13+from src.eval.collectors import collect_test_set as es_collect_test_set14+from src.eval.common_evaluator_runner import create_parser, run_evaluation15+from src.eval.endpoints import run_endpoint as run_endpoint_http16+from src.eval.models import EvaluationResult17+18+19+class AiActivityTypeEvaluator(AbstractEvaluator):20+ENDPOINT_PATH = "call/ai-activity-type"21+REQUEST_MODEL = AiActivityTypeRequest22+RESPONSE_MODEL = AiActivityTypeResponse23+OUTPUT_SUBFOLDER = "ai_activity_type"24+USES_LLM_EVALUATION = False25+26+READS_FROM_DATASET_FOLDER = True27+28+DATA_SET: str = "initial"29+30+MINIMUM_REQUEST_BODY_SIZE: int = 031+32+SKIP_UNREPLAYABLE_ON_COLLECT: bool = True33+REPLAYABILITY_CHECK_TIMEOUT_SECONDS: float = 20.034+35+POPULATE_GROUND_TRUTH_LLM: bool = False36+GROUND_TRUTH_LLM_ENDPOINT: str = "OPENAI:gpt-5_2"37+38+COMPARE_TO_GROUND_TRUTH: str = "es"39+40+def __init__(self, **kwargs: Any) -> None:41+super().__init__(**kwargs)42+_repo_root = Path(__file__).parents[2]43+_datasets_root = _repo_root / "eval_output" / self.OUTPUT_SUBFOLDER / "eval_datasets"44+_dataset_dir = _datasets_root / self.__class__.DATA_SET45+self._dataset_dir = _dataset_dir46+self._inputs_dir = _dataset_dir / "input"47+self._ground_truths_dir = _dataset_dir / "ground_truth"48+49+async def collect_test_set(self) -> List[Dict[str, Any]]:50+if self.es_host_name:51+return await self._collect_from_es()52+return await self._collect_from_files()53+54+async def _collect_from_es(self) -> List[Dict[str, Any]]:55+if self._inputs_dir.exists() or self._ground_truths_dir.exists():56+raise FileExistsError(57+f"Dataset folder already exists: {self._inputs_dir.parent}. "58+"Choose a different --data-set name or remove the folder manually."59+ )60+61+raw_samples = await es_collect_test_set(62+es_host_name=self.es_host_name,63+es_port=self.es_port,64+endpoint_path=self.ENDPOINT_PATH,65+limit=self.limit,66+language_filter=self.language_filter,67+min_prompt_length=self.min_prompt_length,68+is_json_response=self.IS_JSON_RESPONSE,69+exclude_languages=self.exclude_languages,70+ )71+72+self._inputs_dir.mkdir(parents=True, exist_ok=True)73+self._ground_truths_dir.mkdir(parents=True, exist_ok=True)74+75+min_size = self.MINIMUM_REQUEST_BODY_SIZE76+samples: List[Dict[str, Any]] = []77+skipped_no_response = 078+skipped_too_small = 079+skipped_unreplayable = 080+skipped_probe_errors = 081+82+endpoint_url = f"{self.api_host}/{self.ENDPOINT_PATH}"83+84+timeout = aiohttp.ClientTimeout(total=self.REPLAYABILITY_CHECK_TIMEOUT_SECONDS)85+async with aiohttp.ClientSession(timeout=timeout) as session:86+for sample in raw_samples:87+es_id = sample["id"]88+request_body = sample["request_body"]89+original_response = sample.get("original_response")90+91+if not original_response:92+skipped_no_response += 193+continue94+95+if min_size and len(json.dumps(request_body)) < min_size:96+skipped_too_small += 197+continue98+99+if self.SKIP_UNREPLAYABLE_ON_COLLECT:100+try:101+async with session.post(endpoint_url, json=request_body) as resp:102+if resp.status != 200:103+body_text = await resp.text()104+if "Cannot find activity" in body_text:105+skipped_unreplayable += 1106+continue107+except Exception:108+skipped_probe_errors += 1109+110+with open(self._inputs_dir / f"{es_id}.json", "w") as f:111+json.dump(request_body, f, indent=2)112+113+with open(self._ground_truths_dir / f"gt_{es_id}.json", "w") as f:114+json.dump(original_response, f, indent=2)115+116+samples.append(117+ {118+"id": es_id,119+"request_body": request_body,120+"original_response": original_response,121+"ground_truth": original_response,122+ }123+ )124+125+print(f"Wrote {len(samples)} samples to {self._inputs_dir.parent}")126+if skipped_no_response:127+print(f"Skipped {skipped_no_response} records with no response body")128+if skipped_too_small:129+print(130+f"Skipped {skipped_too_small} records whose request body was smaller "131+f"than {min_size} characters"132+ )133+if skipped_unreplayable:134+print(f"Skipped {skipped_unreplayable} records that are not replayable (missing activity in ES)")135+if skipped_probe_errors:136+print(f"Warning: replayability probe errored for {skipped_probe_errors} records (kept those records)")137+138+if self.__class__.POPULATE_GROUND_TRUTH_LLM:139+label_results = await run_endpoint_http(140+test_set=samples,141+endpoint_url=endpoint_url,142+request_model=self.REQUEST_MODEL,143+force_llm_endpoint=self.__class__.GROUND_TRUTH_LLM_ENDPOINT,144+is_streaming_response=self.IS_STREAMING_RESPONSE,145+timeout_seconds=300.0,146+parallel_requests=self.parallel_requests,147+return_prompt_in_run=self.RETURN_PROMPT_IN_RUN,148+ )149+150+for i, r in enumerate(label_results):151+forced = self.__class__.GROUND_TRUTH_LLM_ENDPOINT152+model = forced.split(":", 1)[1] if ":" in forced else forced153+samples[i]["ground_truth_llm_model"] = model154+155+if not r.get("success", True):156+samples[i]["ground_truth_llm"] = None157+samples[i]["ground_truth_llm_reasoning"] = None158+samples[i]["ground_truth_llm_error_type"] = r.get("error_type")159+samples[i]["ground_truth_llm_error_message"] = r.get("error_message")160+continue161+162+resp = r.get("new_response") or {}163+samples[i]["ground_truth_llm"] = resp.get("ai_activity_type")164+samples[i]["ground_truth_llm_reasoning"] = resp.get("ai_activity_type_reasoning")165+166+return samples167+168+async def _collect_from_files(self) -> List[Dict[str, Any]]:169+if not self._inputs_dir.exists():170+raise FileNotFoundError(171+f"Dataset folder not found: {self._inputs_dir}. "172+"Run --steps collect first or check your --data-set name."173+ )174+175+samples: List[Dict[str, Any]] = []176+for filename in sorted(os.listdir(self._inputs_dir)):177+input_path = self._inputs_dir / filename178+gt_path = self._ground_truths_dir / f"gt_{filename}"179+180+if not gt_path.exists():181+print(f"Warning: ground truth not found for {filename}, skipping")182+continue183+184+with open(input_path, "r") as f:185+request_body: Dict[str, Any] = json.load(f)186+187+with open(gt_path, "r") as f:188+ground_truth: Dict[str, Any] = json.load(f)189+190+samples.append(191+ {192+"id": Path(filename).stem,193+"request_body": request_body,194+"original_response": None,195+"ground_truth": ground_truth,196+ }197+ )198+199+print(f"Loaded {len(samples)} samples from {self._inputs_dir.parent}")200+return samples201+202+async def evaluate_against_ground_truth(self, results: List[Dict[str, Any]]) -> List[EvaluationResult]:203+evaluation_results: List[EvaluationResult] = []204+compare_to = self.__class__.COMPARE_TO_GROUND_TRUTH205+206+for res in results:207+req_id = res["request_id"]208+is_success = res.get("success", True)209+210+if not is_success:211+evaluation_results.append(212+EvaluationResult(213+request_id=req_id,214+request_body=res["request_body"],215+new_response=res.get("new_response"),216+valid_schema=False,217+quality_score=0.0,218+quality_feedback=f"Request failed: {res.get('error_type')} - {res.get('error_message')}",219+response_time=res.get("response_time"),220+success=False,221+error_type=res.get("error_type"),222+error_message=res.get("error_message"),223+ground_truth_match=False,224+ )225+ )226+continue227+228+pred = (res.get("new_response") or {}).get("ai_activity_type")229+230+if compare_to == "llm":231+if "ground_truth_llm" not in res:232+evaluation_results.append(233+EvaluationResult(234+request_id=req_id,235+request_body=res["request_body"],236+new_response=res.get("new_response"),237+valid_schema=res.get("valid_schema", True),238+quality_score=0.0,239+quality_feedback=(240+"Missing ground_truth_llm in run results. Ensure collect step populated it and "241+"endpoints.py passes it through."242+ ),243+response_time=res.get("response_time"),244+success=True,245+ground_truth_match=False,246+ )247+ )248+continue249+gt_label = res.get("ground_truth_llm")250+else:251+gt_path = self._ground_truths_dir / f"gt_{req_id}.json"252+if not gt_path.exists():253+evaluation_results.append(254+EvaluationResult(255+request_id=req_id,256+request_body=res["request_body"],257+new_response=res.get("new_response"),258+valid_schema=res.get("valid_schema", True),259+quality_score=0.0,260+quality_feedback=f"Ground truth file not found: {gt_path}",261+response_time=res.get("response_time"),262+success=True,263+ground_truth_match=False,264+ )265+ )266+continue267+268+with open(gt_path, "r") as f:269+ground_truth_dict: Dict[str, Any] = json.load(f)270+271+gt_label = (ground_truth_dict or {}).get("ai_activity_type")272+273+matched = pred == gt_label274+275+evaluation_results.append(276+EvaluationResult(277+request_id=req_id,278+request_body=res["request_body"],279+new_response=res.get("new_response"),280+valid_schema=res.get("valid_schema", True),281+quality_score=1.0 if matched else 0.0,282+quality_feedback="PASS" if matched else f"FAIL\nExpected: {gt_label}\nGot: {pred}",283+response_time=res.get("response_time"),284+success=True,285+ground_truth_match=matched,286+ )287+ )288+289+return evaluation_results290+291+292+if __name__ == "__main__":293+parser = create_parser(AiActivityTypeEvaluator)294+parser.add_argument(295+"--data-set",296+type=str,297+default="initial",298+dest="data_set",299+help="Dataset folder under eval_output/ai_activity_type/eval_datasets/ (default: initial)",300+ )301+302+parser.add_argument(303+"--populate-ground-truth-llm",304+action="store_true",305+dest="populate_ground_truth_llm",306+ )307+parser.add_argument(308+"--ground-truth-llm-endpoint",309+type=str,310+default="OPENAI:gpt-5_2",311+dest="ground_truth_llm_endpoint",312+ )313+314+parser.add_argument(315+"--compare-to-ground-truth",316+type=str,317+choices=["es", "llm"],318+default="es",319+dest="compare_to_ground_truth",320+ )321+322+args = parser.parse_args()323+AiActivityTypeEvaluator.DATA_SET = args.data_set324+325+AiActivityTypeEvaluator.POPULATE_GROUND_TRUTH_LLM = bool(args.populate_ground_truth_llm)326+AiActivityTypeEvaluator.GROUND_TRUTH_LLM_ENDPOINT = args.ground_truth_llm_endpoint327+328+AiActivityTypeEvaluator.COMPARE_TO_GROUND_TRUTH = args.compare_to_ground_truth329+330+asyncio.run(run_evaluation(AiActivityTypeEvaluator, args))</selection>” selected. Please summarize the selection using precise and concise language. Use headers and bulleted lists in the summary, to make it scannable. Maintain the meaning and factual accuracy.
You said
I’m on page “<tabTitle>Jy 19798 evaluation for ai activity types by nikol</tabTitle>” with “<selection>@@ -0,0 +1,330 @@1+#!/usr/bin/env python32+import asyncio3+import json4+import os5+from pathlib import Path6+from typing import Any, Dict, List7+8+import aiohttp9+10+from src.component.call_processing.schemes.ai_activity_type_request import AiActivityTypeRequest11+from src.component.call_processing.schemes.ai_activity_type_response import AiActivityTypeResponse12+from src.eval.abstract_evaluator import AbstractEvaluator13+from src.eval.collectors import collect_test_set as es_collect_test_set14+from src.eval.common_evaluator_runner import create_parser, run_evaluation15+from src.eval.endpoints import run_endpoint as run_endpoint_http16+from src.eval.models import EvaluationResult17+18+19+class AiActivityTypeEvaluator(AbstractEvaluator):20+ENDPOINT_PATH = "call/ai-activity-type"21+REQUEST_MODEL = AiActivityTypeRequest22+RESPONSE_MODEL = AiActivityTypeResponse23+OUTPUT_SUBFOLDER = "ai_activity_type"24+USES_LLM_EVALUATION = False25+26+READS_FROM_DATASET_FOLDER = True27+28+DATA_SET: str = "initial"29+30+MINIMUM_REQUEST_BODY_SIZE: int = 031+32+SKIP_UNREPLAYABLE_ON_COLLECT: bool = True33+REPLAYABILITY_CHECK_TIMEOUT_SECONDS: float = 20.034+35+POPULATE_GROUND_TRUTH_LLM: bool = False36+GROUND_TRUTH_LLM_ENDPOINT: str = "OPENAI:gpt-5_2"37+38+COMPARE_TO_GROUND_TRUTH: str = "es"39+40+def __init__(self, **kwargs: Any) -> None:41+super().__init__(**kwargs)42+_repo_root = Path(__file__).parents[2]43+_datasets_root = _repo_root / "eval_output" / self.OUTPUT_SUBFOLDER / "eval_datasets"44+_dataset_dir = _datasets_root / self.__class__.DATA_SET45+self._dataset_dir = _dataset_dir46+self._inputs_dir = _dataset_dir / "input"47+self._ground_truths_dir = _dataset_dir / "ground_truth"48+49+async def collect_test_set(self) -> List[Dict[str, Any]]:50+if self.es_host_name:51+return await self._collect_from_es()52+return await self._collect_from_files()53+54+async def _collect_from_es(self) -> List[Dict[str, Any]]:55+if self._inputs_dir.exists() or self._ground_truths_dir.exists():56+raise FileExistsError(57+f"Dataset folder already exists: {self._inputs_dir.parent}. "58+"Choose a different --data-set name or remove the folder manually."59+ )60+61+raw_samples = await es_collect_test_set(62+es_host_name=self.es_host_name,63+es_port=self.es_port,64+endpoint_path=self.ENDPOINT_PATH,65+limit=self.limit,66+language_filter=self.language_filter,67+min_prompt_length=self.min_prompt_length,68+is_json_response=self.IS_JSON_RESPONSE,69+exclude_languages=self.exclude_languages,70+ )71+72+self._inputs_dir.mkdir(parents=True, exist_ok=True)73+self._ground_truths_dir.mkdir(parents=True, exist_ok=True)74+75+min_size = self.MINIMUM_REQUEST_BODY_SIZE76+samples: List[Dict[str, Any]] = []77+skipped_no_response = 078+skipped_too_small = 079+skipped_unreplayable = 080+skipped_probe_errors = 081+82+endpoint_url = f"{self.api_host}/{self.ENDPOINT_PATH}"83+84+timeout = aiohttp.ClientTimeout(total=self.REPLAYABILITY_CHECK_TIMEOUT_SECONDS)85+async with aiohttp.ClientSession(timeout=timeout) as session:86+for sample in raw_samples:87+es_id = sample["id"]88+request_body = sample["request_body"]89+original_response = sample.get("original_response")90+91+if not original_response:92+skipped_no_response += 193+continue94+95+if min_size and len(json.dumps(request_body)) < min_size:96+skipped_too_small += 197+continue98+99+if self.SKIP_UNREPLAYABLE_ON_COLLECT:100+try:101+async with session.post(endpoint_url, json=request_body) as resp:102+if resp.status != 200:103+body_text = await resp.text()104+if "Cannot find activity" in body_text:105+skipped_unreplayable += 1106+continue107+except Exception:108+skipped_probe_errors += 1109+110+with open(self._inputs_dir / f"{es_id}.json", "w") as f:111+json.dump(request_body, f, indent=2)112+113+with open(self._ground_truths_dir / f"gt_{es_id}.json", "w") as f:114+json.dump(original_response, f, indent=2)115+116+samples.append(117+ {118+"id": es_id,119+"request_body": request_body,120+"original_response": original_response,121+"ground_truth": original_response,122+ }123+ )124+125+print(f"Wrote {len(samples)} samples to {self._inputs_dir.parent}")126+if skipped_no_response:127+print(f"Skipped {skipped_no_response} records with no response body")128+if skipped_too_small:129+print(130+f"Skipped {skipped_too_small} records whose request body was smaller "131+f"than {min_size} characters"132+ )133+if skipped_unreplayable:134+print(f"Skipped {skipped_unreplayable} records that are not replayable (missing activity in ES)")135+if skipped_probe_errors:136+print(f"Warning: replayability probe errored for {skipped_probe_errors} records (kept those records)")137+138+if self.__class__.POPULATE_GROUND_TRUTH_LLM:139+label_results = await run_endpoint_http(140+test_set=samples,141+endpoint_url=endpoint_url,142+request_model=self.REQUEST_MODEL,143+force_llm_endpoint=self.__class__.GROUND_TRUTH_LLM_ENDPOINT,144+is_streaming_response=self.IS_STREAMING_RESPONSE,145+timeout_seconds=300.0,146+parallel_requests=self.parallel_requests,147+return_prompt_in_run=self.RETURN_PROMPT_IN_RUN,148+ )149+150+for i, r in enumerate(label_results):151+forced = self.__class__.GROUND_TRUTH_LLM_ENDPOINT152+model = forced.split(":", 1)[1] if ":" in forced else forced153+samples[i]["ground_truth_llm_model"] = model154+155+if not r.get("success", True):156+samples[i]["ground_truth_llm"] = None157+samples[i]["ground_truth_llm_reasoning"] = None158+samples[i]["ground_truth_llm_error_type"] = r.get("error_type")159+samples[i]["ground_truth_llm_error_message"] = r.get("error_message")160+continue161+162+resp = r.get("new_response") or {}163+samples[i]["ground_truth_llm"] = resp.get("ai_activity_type")164+samples[i]["ground_truth_llm_reasoning"] = resp.get("ai_activity_type_reasoning")165+166+return samples167+168+async def _collect_from_files(self) -> List[Dict[str, Any]]:169+if not self._inputs_dir.exists():170+raise FileNotFoundError(171+f"Dataset folder not found: {self._inputs_dir}. "172+"Run --steps collect first or check your --data-set name."173+ )174+175+samples: List[Dict[str, Any]] = []176+for filename in sorted(os.listdir(self._inputs_dir)):177+input_path = self._inputs_dir / filename178+gt_path = self._ground_truths_dir / f"gt_{filename}"179+180+if not gt_path.exists():181+print(f"Warning: ground truth not found for {filename}, skipping")182+continue183+184+with open(input_path, "r") as f:185+request_body: Dict[str, Any] = json.load(f)186+187+with open(gt_path, "r") as f:188+ground_truth: Dict[str, Any] = json.load(f)189+190+samples.append(191+ {192+"id": Path(filename).stem,193+"request_body": request_body,194+"original_response": None,195+"ground_truth": ground_truth,196+ }197+ )198+199+print(f"Loaded {len(samples)} samples from {self._inputs_dir.parent}")200+return samples201+202+async def evaluate_against_ground_truth(self, results: List[Dict[str, Any]]) -> List[EvaluationResult]:203+evaluation_results: List[EvaluationResult] = []204+compare_to = self.__class__.COMPARE_TO_GROUND_TRUTH205+206+for res in results:207+req_id = res["request_id"]208+is_success = res.get("success", True)209+210+if not is_success:211+evaluation_results.append(212+EvaluationResult(213+request_id=req_id,214+request_body=res["request_body"],215+new_response=res.get("new_response"),216+valid_schema=False,217+quality_score=0.0,218+quality_feedback=f"Request failed: {res.get('error_type')} - {res.get('error_message')}",219+response_time=res.get("response_time"),220+success=False,221+error_type=res.get("error_type"),222+error_message=res.get("error_message"),223+ground_truth_match=False,224+ )225+ )226+continue227+228+pred = (res.get("new_response") or {}).get("ai_activity_type")229+230+if compare_to == "llm":231+if "ground_truth_llm" not in res:232+evaluation_results.append(233+EvaluationResult(234+request_id=req_id,235+request_body=res["request_body"],236+new_response=res.get("new_response"),237+valid_schema=res.get("valid_schema", True),238+quality_score=0.0,239+quality_feedback=(240+"Missing ground_truth_llm in run results. Ensure collect step populated it and "241+"endpoints.py passes it through."242+ ),243+response_time=res.get("response_time"),244+success=True,245+ground_truth_match=False,246+ )247+ )248+continue249+gt_label = res.get("ground_truth_llm")250+else:251+gt_path = self._ground_truths_dir / f"gt_{req_id}.json"252+if not gt_path.exists():253+evaluation_results.append(254+EvaluationResult(255+request_id=req_id,256+request_body=res["request_body"],257+new_response=res.get("new_response"),258+valid_schema=res.get("valid_schema", True),259+quality_score=0.0,260+quality_feedback=f"Ground truth file not found: {gt_path}",261+response_time=res.get("response_time"),262+success=True,263+ground_truth_match=False,264+ )265+ )266+continue267+268+with open(gt_path, "r") as f:269+ground_truth_dict: Dict[str, Any] = json.load(f)270+271+gt_label = (ground_truth_dict or {}).get("ai_activity_type")272+273+matched = pred == gt_label274+275+evaluation_results.append(276+EvaluationResult(277+request_id=req_id,278+request_body=res["request_body"],279+new_response=res.get("new_response"),280+valid_schema=res.get("valid_schema", True),281+quality_score=1.0 if matched else 0.0,282+quality_feedback="PASS" if matched else f"FAIL\nExpected: {gt_label}\nGot: {pred}",283+response_time=res.get("response_time"),284+success=True,285+ground_truth_match=matched,286+ )287+ )288+289+return evaluation_results290+291+292+if __name__ == "__main__":293+parser = create_parser(AiActivityTypeEvaluator)294+parser.add_argument(295+"--data-set",296+type=str,297+default="initial",298+dest="data_set",299+help="Dataset folder under eval_output/ai_activity_type/eval_datasets/ (default: initial)",300+ )301+302+parser.add_argument(303+"--populate-ground-truth-llm",304+action="store_true",305+dest="populate_ground_truth_llm",306+ )307+parser.add_argument(308+"--ground-truth-llm-endpoint",309+type=str,310+default="OPENAI:gpt-5_2",311+dest="ground_truth_llm_endpoint",312+ )313+314+parser.add_argument(315+"--compare-to-ground-truth",316+type=str,317+choices=["es", "llm"],318+default="es",319+dest="compare_to_ground_truth",320+ )321+322+args = parser.parse_args()323+AiActivityTypeEvaluator.DATA_SET = args.data_set324+325+AiActivityTypeEvaluator.POPULATE_GROUND_TRUTH_LLM = bool(args.populate_ground_truth_llm)326+AiActivityTypeEvaluator.GROUND_TRUTH_LLM_ENDPOINT = args.ground_truth_llm_endpoint327+328+AiActivityTypeEvaluator.COMPARE_TO_GROUND_TRUTH = args.compare_to_ground_truth329+330+asyncio.run(run_evaluation(AiActivityTypeEvaluator, args))</selection>” selected.
Please summarize the selection using precise and concise language. Use headers and bulleted lists in the summary, to make it scannable. Maintain the meaning and factual accuracy.
Expand
Listen
Show more options
Gemini said
Gemini said
This Python script defines the
AiActivityTypeEvaluator
class, a specialized tool for evaluating AI-driven activity type classification. It extends a base evaluation framework to handle data collection, ground-truth comparison, and performance scoring.
Core Functionality
Core Functionality
Data Collection (
collect_test_set
):
* Supports fetching raw samples from
Elasticsearch (ES)
or loading them from
local JSON files
.
Implements a
replayability check
: It probes the live API endpoint to ensure the activity exists before including it in the dataset.
Filters data based on request body size, language, and the presence of an original response.
Ground Truth Management:
Saves inputs and "gold standard" responses to a local directory structure (
eval_output/ai_activity_type/...
).
Features an optional mode to populate ground truth using a high-order LLM (e.g.,
gpt-5_2
) if the
POPULATE_GROUND_TRUTH_LLM
flag is set.
Evaluation Logic (
evaluate_against_ground_truth
):
Compares the model's predicted
ai_activity_type
against either the original ES response or the LLM-generated ground truth.
Assigns a
quality_score
of
1.0
for a match and
0.0
for a mismatch or request failure.
Key Configuration Constants
Key Configuration Constants
Constant
Purpose
ENDPOINT_PATH
The specific API route being tested:
call/ai-activity-type
.
COMPARE_TO_GROUND_TRUTH
Determines if the baseline is the original data (
es
) or an
llm
.
SKIP_UNREPLAYABLE_ON_COLLECT
If
True
, discards records where the API returns "Cannot find activity."
REPLAYABILITY_CHECK_TIMEOUT_SECONDS
Limits the probe request to 20 seconds.
Constant
ENDPOINT_PATH
COMPARE_TO_GROUND_TRUTH
SKIP_UNREPLAYABLE_ON_COLLECT
REPLAYABILITY_CHECK_TIMEOUT_SECONDS
Purpose
The specific API route being tested:
call/ai-activity-type
.
Determines if the baseline is the original data (
es
) or an
llm
.
If
True
, discards records where the API returns "Cannot find activity."
Limits the probe request to 20 seconds.
Copy table
Execution Workflow
Execution Workflow
Parsing:
The script uses
create_parser
to handle CLI arguments like
--data-set
,
--populate-ground-truth-llm
, and
--compare-to-ground-truth
.
Initialization:
Sets up directory paths for inputs and ground truths based on the selected dataset name.
Run:
Executes the evaluation via
asyncio.run(run_evaluation(...))
, which orchestrates the collection, endpoint calling, and result scoring phases.
Copy prompt
Edit
You said are there soem issues with this PR
You said
are there soem issues with this PR
Listen
Show more options
Show thinking
Gemini said
Gemini said
Yes, there are several issues and code smells in this Pull Request, ranging from performance bottlenecks to architectural anti-patterns.
Here is a breakdown of the most notable issues, categorized by impact:
Performance & Async Anti-Patterns
Performance & Async Anti-Patterns
Sequential Requests Defeating Async (Lines 86-106):
Inside
_collect_from_es
, the code loops through
raw_samples
and
await
s a
session.post
request for each sample one by one. This means the script waits for one HTTP request to finish before starting the next, completely defeating the purpose of using
aiohttp
for asynchronous concurrency.
Fix:
Collect these coroutines into a list and use
asyncio.gather()
(ideally wrapped in an
asyncio.Semaphore
to avoid overwhelming the endpoint) to run the replayability probes concurrently.
Blocking File I/O in Async Functions (Lines 110-114, 184-188, 268-269):
Standard synchronous file operations (
with open(...)
,
json.dump
,
json.load
) are used inside
async def
functions. In Python, this blocks the entire event loop while the disk is reading/writing.
Fix:
Use an asynchronous file I/O library like
aiofiles
, or run the blocking I/O operations in a separate thread using
asyncio.to_thread()
.
Architecture & State Management
Architecture & State Management
Mutating Class-Level Variables (Lines 323-328):
The script parses CLI arguments and directly mutates the class attributes (e.g.,
AiActivityTypeEvaluator.DATA_SET = args.data_set
). This is a major anti-pattern. If you ever try to run tests in parallel or create multiple instances of this evaluator in a single process, the state will bleed across them.
Fix:
Pass these arguments into the
__init__
constructor of the instance (
self.data_set = args.data_set
) rather than modifying the class definition globally.
Code Quality & Edge Cases
Code Quality & Edge Cases
Broad Exception Catching (Line 107):
Using a bare
except Exception:
during the replayability probe is dangerous. It will catch
aiohttp.ClientError
(which is intended), but it will also silently swallow
TimeoutError...
|
Firefox
|
Ask Jiminny test report - 8 Apr 2026 - Ask Jiminny Ask Jiminny test report - 8 Apr 2026 - Ask Jiminny test report - 13 Apr 2026.pdf — Work...
|
app.staging.jiminny.com/ai-reports/pdf/e2f4890a-d0 app.staging.jiminny.com/ai-reports/pdf/e2f4890a-d097-495b-8dfd-d4c25a5e8843...
|
8643
|
|
JY-20543 add AJ reports User pilot tracking by Lak JY-20543 add AJ reports User pilot tracking by LakyLak · Pull Request #11932 · jiminny/app
Platform Sprint 1 Q2 - Platform Team - Scrum Board - Jira
Platform Sprint 1 Q2 - Platform Team - Scrum Board - Jira
Jy 19798 evaluation for ai activity types by nikolaybiaivanov · Pull Request #468 · jiminny/prophet
Jy 19798 evaluation for ai activity types by nikolaybiaivanov · Pull Request #468 · jiminny/prophet
Jiminny
Jiminny
Ask Jiminny test report - 8 Apr 2026 - Ask Jiminny test report - 13 Apr 2026.pdf
Ask Jiminny test report - 8 Apr 2026 - Ask Jiminny test report - 13 Apr 2026.pdf
Close tab
Inbox (1,540) - [EMAIL] - Jiminny Mail
Inbox (1,540) - [EMAIL] - Jiminny Mail
New Tab
Customize sidebar
Close Google Gemini (⌃X)
Tabs from other devices
Open history (⇧⌘H)
Open bookmarks (⌘B)
AI Chat settings
Close
WORK, Google Account: [EMAIL]
Main menu
New Chat
Open menu for conversation actions.
Conversation with Gemini
Conversation with Gemini
Copy prompt
You said I’m on page “<tabTitle>Jy 19798 evaluation for ai activity types by nikol</tabTitle>” with “<selection>@@ -0,0 +1,330 @@1+#!/usr/bin/env python32+import asyncio3+import json4+import os5+from pathlib import Path6+from typing import Any, Dict, List7+8+import aiohttp9+10+from src.component.call_processing.schemes.ai_activity_type_request import AiActivityTypeRequest11+from src.component.call_processing.schemes.ai_activity_type_response import AiActivityTypeResponse12+from src.eval.abstract_evaluator import AbstractEvaluator13+from src.eval.collectors import collect_test_set as es_collect_test_set14+from src.eval.common_evaluator_runner import create_parser, run_evaluation15+from src.eval.endpoints import run_endpoint as run_endpoint_http16+from src.eval.models import EvaluationResult17+18+19+class AiActivityTypeEvaluator(AbstractEvaluator):20+ENDPOINT_PATH = "call/ai-activity-type"21+REQUEST_MODEL = AiActivityTypeRequest22+RESPONSE_MODEL = AiActivityTypeResponse23+OUTPUT_SUBFOLDER = "ai_activity_type"24+USES_LLM_EVALUATION = False25+26+READS_FROM_DATASET_FOLDER = True27+28+DATA_SET: str = "initial"29+30+MINIMUM_REQUEST_BODY_SIZE: int = 031+32+SKIP_UNREPLAYABLE_ON_COLLECT: bool = True33+REPLAYABILITY_CHECK_TIMEOUT_SECONDS: float = 20.034+35+POPULATE_GROUND_TRUTH_LLM: bool = False36+GROUND_TRUTH_LLM_ENDPOINT: str = "OPENAI:gpt-5_2"37+38+COMPARE_TO_GROUND_TRUTH: str = "es"39+40+def __init__(self, **kwargs: Any) -> None:41+super().__init__(**kwargs)42+_repo_root = Path(__file__).parents[2]43+_datasets_root = _repo_root / "eval_output" / self.OUTPUT_SUBFOLDER / "eval_datasets"44+_dataset_dir = _datasets_root / self.__class__.DATA_SET45+self._dataset_dir = _dataset_dir46+self._inputs_dir = _dataset_dir / "input"47+self._ground_truths_dir = _dataset_dir / "ground_truth"48+49+async def collect_test_set(self) -> List[Dict[str, Any]]:50+if self.es_host_name:51+return await self._collect_from_es()52+return await self._collect_from_files()53+54+async def _collect_from_es(self) -> List[Dict[str, Any]]:55+if self._inputs_dir.exists() or self._ground_truths_dir.exists():56+raise FileExistsError(57+f"Dataset folder already exists: {self._inputs_dir.parent}. "58+"Choose a different --data-set name or remove the folder manually."59+ )60+61+raw_samples = await es_collect_test_set(62+es_host_name=self.es_host_name,63+es_port=self.es_port,64+endpoint_path=self.ENDPOINT_PATH,65+limit=self.limit,66+language_filter=self.language_filter,67+min_prompt_length=self.min_prompt_length,68+is_json_response=self.IS_JSON_RESPONSE,69+exclude_languages=self.exclude_languages,70+ )71+72+self._inputs_dir.mkdir(parents=True, exist_ok=True)73+self._ground_truths_dir.mkdir(parents=True, exist_ok=True)74+75+min_size = self.MINIMUM_REQUEST_BODY_SIZE76+samples: List[Dict[str, Any]] = []77+skipped_no_response = 078+skipped_too_small = 079+skipped_unreplayable = 080+skipped_probe_errors = 081+82+endpoint_url = f"{self.api_host}/{self.ENDPOINT_PATH}"83+84+timeout = aiohttp.ClientTimeout(total=self.REPLAYABILITY_CHECK_TIMEOUT_SECONDS)85+async with aiohttp.ClientSession(timeout=timeout) as session:86+for sample in raw_samples:87+es_id = sample["id"]88+request_body = sample["request_body"]89+original_response = sample.get("original_response")90+91+if not original_response:92+skipped_no_response += 193+continue94+95+if min_size and len(json.dumps(request_body)) < min_size:96+skipped_too_small += 197+continue98+99+if self.SKIP_UNREPLAYABLE_ON_COLLECT:100+try:101+async with session.post(endpoint_url, json=request_body) as resp:102+if resp.status != 200:103+body_text = await resp.text()104+if "Cannot find activity" in body_text:105+skipped_unreplayable += 1106+continue107+except Exception:108+skipped_probe_errors += 1109+110+with open(self._inputs_dir / f"{es_id}.json", "w") as f:111+json.dump(request_body, f, indent=2)112+113+with open(self._ground_truths_dir / f"gt_{es_id}.json", "w") as f:114+json.dump(original_response, f, indent=2)115+116+samples.append(117+ {118+"id": es_id,119+"request_body": request_body,120+"original_response": original_response,121+"ground_truth": original_response,122+ }123+ )124+125+print(f"Wrote {len(samples)} samples to {self._inputs_dir.parent}")126+if skipped_no_response:127+print(f"Skipped {skipped_no_response} records with no response body")128+if skipped_too_small:129+print(130+f"Skipped {skipped_too_small} records whose request body was smaller "131+f"than {min_size} characters"132+ )133+if skipped_unreplayable:134+print(f"Skipped {skipped_unreplayable} records that are not replayable (missing activity in ES)")135+if skipped_probe_errors:136+print(f"Warning: replayability probe errored for {skipped_probe_errors} records (kept those records)")137+138+if self.__class__.POPULATE_GROUND_TRUTH_LLM:139+label_results = await run_endpoint_http(140+test_set=samples,141+endpoint_url=endpoint_url,142+request_model=self.REQUEST_MODEL,143+force_llm_endpoint=self.__class__.GROUND_TRUTH_LLM_ENDPOINT,144+is_streaming_response=self.IS_STREAMING_RESPONSE,145+timeout_seconds=300.0,146+parallel_requests=self.parallel_requests,147+return_prompt_in_run=self.RETURN_PROMPT_IN_RUN,148+ )149+150+for i, r in enumerate(label_results):151+forced = self.__class__.GROUND_TRUTH_LLM_ENDPOINT152+model = forced.split(":", 1)[1] if ":" in forced else forced153+samples[i]["ground_truth_llm_model"] = model154+155+if not r.get("success", True):156+samples[i]["ground_truth_llm"] = None157+samples[i]["ground_truth_llm_reasoning"] = None158+samples[i]["ground_truth_llm_error_type"] = r.get("error_type")159+samples[i]["ground_truth_llm_error_message"] = r.get("error_message")160+continue161+162+resp = r.get("new_response") or {}163+samples[i]["ground_truth_llm"] = resp.get("ai_activity_type")164+samples[i]["ground_truth_llm_reasoning"] = resp.get("ai_activity_type_reasoning")165+166+return samples167+168+async def _collect_from_files(self) -> List[Dict[str, Any]]:169+if not self._inputs_dir.exists():170+raise FileNotFoundError(171+f"Dataset folder not found: {self._inputs_dir}. "172+"Run --steps collect first or check your --data-set name."173+ )174+175+samples: List[Dict[str, Any]] = []176+for filename in sorted(os.listdir(self._inputs_dir)):177+input_path = self._inputs_dir / filename178+gt_path = self._ground_truths_dir / f"gt_{filename}"179+180+if not gt_path.exists():181+print(f"Warning: ground truth not found for {filename}, skipping")182+continue183+184+with open(input_path, "r") as f:185+request_body: Dict[str, Any] = json.load(f)186+187+with open(gt_path, "r") as f:188+ground_truth: Dict[str, Any] = json.load(f)189+190+samples.append(191+ {192+"id": Path(filename).stem,193+"request_body": request_body,194+"original_response": None,195+"ground_truth": ground_truth,196+ }197+ )198+199+print(f"Loaded {len(samples)} samples from {self._inputs_dir.parent}")200+return samples201+202+async def evaluate_against_ground_truth(self, results: List[Dict[str, Any]]) -> List[EvaluationResult]:203+evaluation_results: List[EvaluationResult] = []204+compare_to = self.__class__.COMPARE_TO_GROUND_TRUTH205+206+for res in results:207+req_id = res["request_id"]208+is_success = res.get("success", True)209+210+if not is_success:211+evaluation_results.append(212+EvaluationResult(213+request_id=req_id,214+request_body=res["request_body"],215+new_response=res.get("new_response"),216+valid_schema=False,217+quality_score=0.0,218+quality_feedback=f"Request failed: {res.get('error_type')} - {res.get('error_message')}",219+response_time=res.get("response_time"),220+success=False,221+error_type=res.get("error_type"),222+error_message=res.get("error_message"),223+ground_truth_match=False,224+ )225+ )226+continue227+228+pred = (res.get("new_response") or {}).get("ai_activity_type")229+230+if compare_to == "llm":231+if "ground_truth_llm" not in res:232+evaluation_results.append(233+EvaluationResult(234+request_id=req_id,235+request_body=res["request_body"],236+new_response=res.get("new_response"),237+valid_schema=res.get("valid_schema", True),238+quality_score=0.0,239+quality_feedback=(240+"Missing ground_truth_llm in run results. Ensure collect step populated it and "241+"endpoints.py passes it through."242+ ),243+response_time=res.get("response_time"),244+success=True,245+ground_truth_match=False,246+ )247+ )248+continue249+gt_label = res.get("ground_truth_llm")250+else:251+gt_path = self._ground_truths_dir / f"gt_{req_id}.json"252+if not gt_path.exists():253+evaluation_results.append(254+EvaluationResult(255+request_id=req_id,256+request_body=res["request_body"],257+new_response=res.get("new_response"),258+valid_schema=res.get("valid_schema", True),259+quality_score=0.0,260+quality_feedback=f"Ground truth file not found: {gt_path}",261+response_time=res.get("response_time"),262+success=True,263+ground_truth_match=False,264+ )265+ )266+continue267+268+with open(gt_path, "r") as f:269+ground_truth_dict: Dict[str, Any] = json.load(f)270+271+gt_label = (ground_truth_dict or {}).get("ai_activity_type")272+273+matched = pred == gt_label274+275+evaluation_results.append(276+EvaluationResult(277+request_id=req_id,278+request_body=res["request_body"],279+new_response=res.get("new_response"),280+valid_schema=res.get("valid_schema", True),281+quality_score=1.0 if matched else 0.0,282+quality_feedback="PASS" if matched else f"FAIL\nExpected: {gt_label}\nGot: {pred}",283+response_time=res.get("response_time"),284+success=True,285+ground_truth_match=matched,286+ )287+ )288+289+return evaluation_results290+291+292+if __name__ == "__main__":293+parser = create_parser(AiActivityTypeEvaluator)294+parser.add_argument(295+"--data-set",296+type=str,297+default="initial",298+dest="data_set",299+help="Dataset folder under eval_output/ai_activity_type/eval_datasets/ (default: initial)",300+ )301+302+parser.add_argument(303+"--populate-ground-truth-llm",304+action="store_true",305+dest="populate_ground_truth_llm",306+ )307+parser.add_argument(308+"--ground-truth-llm-endpoint",309+type=str,310+default="OPENAI:gpt-5_2",311+dest="ground_truth_llm_endpoint",312+ )313+314+parser.add_argument(315+"--compare-to-ground-truth",316+type=str,317+choices=["es", "llm"],318+default="es",319+dest="compare_to_ground_truth",320+ )321+322+args = parser.parse_args()323+AiActivityTypeEvaluator.DATA_SET = args.data_set324+325+AiActivityTypeEvaluator.POPULATE_GROUND_TRUTH_LLM = bool(args.populate_ground_truth_llm)326+AiActivityTypeEvaluator.GROUND_TRUTH_LLM_ENDPOINT = args.ground_truth_llm_endpoint327+328+AiActivityTypeEvaluator.COMPARE_TO_GROUND_TRUTH = args.compare_to_ground_truth329+330+asyncio.run(run_evaluation(AiActivityTypeEvaluator, args))</selection>” selected. Please summarize the selection using precise and concise language. Use headers and bulleted lists in the summary, to make it scannable. Maintain the meaning and factual accuracy.
You said
I’m on page “<tabTitle>Jy 19798 evaluation for ai activity types by nikol</tabTitle>” with “<selection>@@ -0,0 +1,330 @@1+#!/usr/bin/env python32+import asyncio3+import json4+import os5+from pathlib import Path6+from typing import Any, Dict, List7+8+import aiohttp9+10+from src.component.call_processing.schemes.ai_activity_type_request import AiActivityTypeRequest11+from src.component.call_processing.schemes.ai_activity_type_response import AiActivityTypeResponse12+from src.eval.abstract_evaluator import AbstractEvaluator13+from src.eval.collectors import collect_test_set as es_collect_test_set14+from src.eval.common_evaluator_runner import create_parser, run_evaluation15+from src.eval.endpoints import run_endpoint as run_endpoint_http16+from src.eval.models import EvaluationResult17+18+19+class AiActivityTypeEvaluator(AbstractEvaluator):20+ENDPOINT_PATH = "call/ai-activity-type"21+REQUEST_MODEL = AiActivityTypeRequest22+RESPONSE_MODEL = AiActivityTypeResponse23+OUTPUT_SUBFOLDER = "ai_activity_type"24+USES_LLM_EVALUATION = False25+26+READS_FROM_DATASET_FOLDER = True27+28+DATA_SET: str = "initial"29+30+MINIMUM_REQUEST_BODY_SIZE: int = 031+32+SKIP_UNREPLAYABLE_ON_COLLECT: bool = True33+REPLAYABILITY_CHECK_TIMEOUT_SECONDS: float = 20.034+35+POPULATE_GROUND_TRUTH_LLM: bool = False36+GROUND_TRUTH_LLM_ENDPOINT: str = "OPENAI:gpt-5_2"37+38+COMPARE_TO_GROUND_TRUTH: str = "es"39+40+def __init__(self, **kwargs: Any) -> None:41+super().__init__(**kwargs)42+_repo_root = Path(__file__).parents[2]43+_datasets_root = _repo_root / "eval_output" / self.OUTPUT_SUBFOLDER / "eval_datasets"44+_dataset_dir = _datasets_root / self.__class__.DATA_SET45+self._dataset_dir = _dataset_dir46+self._inputs_dir = _dataset_dir / "input"47+self._ground_truths_dir = _dataset_dir / "ground_truth"48+49+async def collect_test_set(self) -> List[Dict[str, Any]]:50+if self.es_host_name:51+return await self._collect_from_es()52+return await self._collect_from_files()53+54+async def _collect_from_es(self) -> List[Dict[str, Any]]:55+if self._inputs_dir.exists() or self._ground_truths_dir.exists():56+raise FileExistsError(57+f"Dataset folder already exists: {self._inputs_dir.parent}. "58+"Choose a different --data-set name or remove the folder manually."59+ )60+61+raw_samples = await es_collect_test_set(62+es_host_name=self.es_host_name,63+es_port=self.es_port,64+endpoint_path=self.ENDPOINT_PATH,65+limit=self.limit,66+language_filter=self.language_filter,67+min_prompt_length=self.min_prompt_length,68+is_json_response=self.IS_JSON_RESPONSE,69+exclude_languages=self.exclude_languages,70+ )71+72+self._inputs_dir.mkdir(parents=True, exist_ok=True)73+self._ground_truths_dir.mkdir(parents=True, exist_ok=True)74+75+min_size = self.MINIMUM_REQUEST_BODY_SIZE76+samples: List[Dict[str, Any]] = []77+skipped_no_response = 078+skipped_too_small = 079+skipped_unreplayable = 080+skipped_probe_errors = 081+82+endpoint_url = f"{self.api_host}/{self.ENDPOINT_PATH}"83+84+timeout = aiohttp.ClientTimeout(total=self.REPLAYABILITY_CHECK_TIMEOUT_SECONDS)85+async with aiohttp.ClientSession(timeout=timeout) as session:86+for sample in raw_samples:87+es_id = sample["id"]88+request_body = sample["request_body"]89+original_response = sample.get("original_response")90+91+if not original_response:92+skipped_no_response += 193+continue94+95+if min_size and len(json.dumps(request_body)) < min_size:96+skipped_too_small += 197+continue98+99+if self.SKIP_UNREPLAYABLE_ON_COLLECT:100+try:101+async with session.post(endpoint_url, json=request_body) as resp:102+if resp.status != 200:103+body_text = await resp.text()104+if "Cannot find activity" in body_text:105+skipped_unreplayable += 1106+continue107+except Exception:108+skipped_probe_errors += 1109+110+with open(self._inputs_dir / f"{es_id}.json", "w") as f:111+json.dump(request_body, f, indent=2)112+113+with open(self._ground_truths_dir / f"gt_{es_id}.json", "w") as f:114+json.dump(original_response, f, indent=2)115+116+samples.append(117+ {118+"id": es_id,119+"request_body": request_body,120+"original_response": original_response,121+"ground_truth": original_response,122+ }123+ )124+125+print(f"Wrote {len(samples)} samples to {self._inputs_dir.parent}")126+if skipped_no_response:127+print(f"Skipped {skipped_no_response} records with no response body")128+if skipped_too_small:129+print(130+f"Skipped {skipped_too_small} records whose request body was smaller "131+f"than {min_size} characters"132+ )133+if skipped_unreplayable:134+print(f"Skipped {skipped_unreplayable} records that are not replayable (missing activity in ES)")135+if skipped_probe_errors:136+print(f"Warning: replayability probe errored for {skipped_probe_errors} records (kept those records)")137+138+if self.__class__.POPULATE_GROUND_TRUTH_LLM:139+label_results = await run_endpoint_http(140+test_set=samples,141+endpoint_url=endpoint_url,142+request_model=self.REQUEST_MODEL,143+force_llm_endpoint=self.__class__.GROUND_TRUTH_LLM_ENDPOINT,144+is_streaming_response=self.IS_STREAMING_RESPONSE,145+timeout_seconds=300.0,146+parallel_requests=self.parallel_requests,147+return_prompt_in_run=self.RETURN_PROMPT_IN_RUN,148+ )149+150+for i, r in enumerate(label_results):151+forced = self.__class__.GROUND_TRUTH_LLM_ENDPOINT152+model = forced.split(":", 1)[1] if ":" in forced else forced153+samples[i]["ground_truth_llm_model"] = model154+155+if not r.get("success", True):156+samples[i]["ground_truth_llm"] = None157+samples[i]["ground_truth_llm_reasoning"] = None158+samples[i]["ground_truth_llm_error_type"] = r.get("error_type")159+samples[i]["ground_truth_llm_error_message"] = r.get("error_message")160+continue161+162+resp = r.get("new_response") or {}163+samples[i]["ground_truth_llm"] = resp.get("ai_activity_type")164+samples[i]["ground_truth_llm_reasoning"] = resp.get("ai_activity_type_reasoning")165+166+return samples167+168+async def _collect_from_files(self) -> List[Dict[str, Any]]:169+if not self._inputs_dir.exists():170+raise FileNotFoundError(171+f"Dataset folder not found: {self._inputs_dir}. "172+"Run --steps collect first or check your --data-set name."173+ )174+175+samples: List[Dict[str, Any]] = []176+for filename in sorted(os.listdir(self._inputs_dir)):177+input_path = self._inputs_dir / filename178+gt_path = self._ground_truths_dir / f"gt_{filename}"179+180+if not gt_path.exists():181+print(f"Warning: ground truth not found for {filename}, skipping")182+continue183+184+with open(input_path, "r") as f:185+request_body: Dict[str, Any] = json.load(f)186+187+with open(gt_path, "r") as f:188+ground_truth: Dict[str, Any] = json.load(f)189+190+samples.append(191+ {192+"id": Path(filename).stem,193+"request_body": request_body,194+"original_response": None,195+"ground_truth": ground_truth,196+ }197+ )198+199+print(f"Loaded {len(samples)} samples from {self._inputs_dir.parent}")200+return samples201+202+async def evaluate_against_ground_truth(self, results: List[Dict[str, Any]]) -> List[EvaluationResult]:203+evaluation_results: List[EvaluationResult] = []204+compare_to = self.__class__.COMPARE_TO_GROUND_TRUTH205+206+for res in results:207+req_id = res["request_id"]208+is_success = res.get("success", True)209+210+if not is_success:211+evaluation_results.append(212+EvaluationResult(213+request_id=req_id,214+request_body=res["request_body"],215+new_response=res.get("new_response"),216+valid_schema=False,217+quality_score=0.0,218+quality_feedback=f"Request failed: {res.get('error_type')} - {res.get('error_message')}",219+response_time=res.get("response_time"),220+success=False,221+error_type=res.get("error_type"),222+error_message=res.get("error_message"),223+ground_truth_match=False,224+ )225+ )226+continue227+228+pred = (res.get("new_response") or {}).get("ai_activity_type")229+230+if compare_to == "llm":231+if "ground_truth_llm" not in res:232+evaluation_results.append(233+EvaluationResult(234+request_id=req_id,235+request_body=res["request_body"],236+new_response=res.get("new_response"),237+valid_schema=res.get("valid_schema", True),238+quality_score=0.0,239+quality_feedback=(240+"Missing ground_truth_llm in run results. Ensure collect step populated it and "241+"endpoints.py passes it through."242+ ),243+response_time=res.get("response_time"),244+success=True,245+ground_truth_match=False,246+ )247+ )248+continue249+gt_label = res.get("ground_truth_llm")250+else:251+gt_path = self._ground_truths_dir / f"gt_{req_id}.json"252+if not gt_path.exists():253+evaluation_results.append(254+EvaluationResult(255+request_id=req_id,256+request_body=res["request_body"],257+new_response=res.get("new_response"),258+valid_schema=res.get("valid_schema", True),259+quality_score=0.0,260+quality_feedback=f"Ground truth file not found: {gt_path}",261+response_time=res.get("response_time"),262+success=True,263+ground_truth_match=False,264+ )265+ )266+continue267+268+with open(gt_path, "r") as f:269+ground_truth_dict: Dict[str, Any] = json.load(f)270+271+gt_label = (ground_truth_dict or {}).get("ai_activity_type")272+273+matched = pred == gt_label274+275+evaluation_results.append(276+EvaluationResult(277+request_id=req_id,278+request_body=res["request_body"],279+new_response=res.get("new_response"),280+valid_schema=res.get("valid_schema", True),281+quality_score=1.0 if matched else 0.0,282+quality_feedback="PASS" if matched else f"FAIL\nExpected: {gt_label}\nGot: {pred}",283+response_time=res.get("response_time"),284+success=True,285+ground_truth_match=matched,286+ )287+ )288+289+return evaluation_results290+291+292+if __name__ == "__main__":293+parser = create_parser(AiActivityTypeEvaluator)294+parser.add_argument(295+"--data-set",296+type=str,297+default="initial",298+dest="data_set",299+help="Dataset folder under eval_output/ai_activity_type/eval_datasets/ (default: initial)",300+ )301+302+parser.add_argument(303+"--populate-ground-truth-llm",304+action="store_true",305+dest="populate_ground_truth_llm",306+ )307+parser.add_argument(308+"--ground-truth-llm-endpoint",309+type=str,310+default="OPENAI:gpt-5_2",311+dest="ground_truth_llm_endpoint",312+ )313+314+parser.add_argument(315+"--compare-to-ground-truth",316+type=str,317+choices=["es", "llm"],318+default="es",319+dest="compare_to_ground_truth",320+ )321+322+args = parser.parse_args()323+AiActivityTypeEvaluator.DATA_SET = args.data_set324+325+AiActivityTypeEvaluator.POPULATE_GROUND_TRUTH_LLM = bool(args.populate_ground_truth_llm)326+AiActivityTypeEvaluator.GROUND_TRUTH_LLM_ENDPOINT = args.ground_truth_llm_endpoint327+328+AiActivityTypeEvaluator.COMPARE_TO_GROUND_TRUTH = args.compare_to_ground_truth329+330+asyncio.run(run_evaluation(AiActivityTypeEvaluator, args))</selection>” selected.
Please summarize the selection using precise and concise language. Use headers and bulleted lists in the summary, to make it scannable. Maintain the meaning and factual accuracy.
Expand
Listen
Show more options
Gemini said
Gemini said
This Python script defines the
AiActivityTypeEvaluator
class, a specialized tool for evaluating AI-driven activity type classification. It extends a base evaluation framework to handle data collection, ground-truth comparison, and performance scoring.
Core Functionality
Core Functionality
Data Collection (
collect_test_set
):
* Supports fetching raw samples from
Elasticsearch (ES)
or loading them from
local JSON files
.
Implements a
replayability check
: It probes the live API endpoint to ensure the activity exists before including it in the dataset.
Filters data based on request body size, language, and the presence of an original response.
Ground Truth Management:
Saves inputs and "gold standard" responses to a local directory structure (
eval_output/ai_activity_type/...
).
Features an optional mode to populate ground truth using a high-order LLM (e.g.,
gpt-5_2
) if the
POPULATE_GROUND_TRUTH_LLM
flag is set.
Evaluation Logic (
evaluate_against_ground_truth
):
Compares the model's predicted
ai_activity_type
against either the original ES response or the LLM-generated ground truth.
Assigns a
quality_score
of
1.0
for a match and
0.0
for a mismatch or request failure.
Key Configuration Constants
Key Configuration Constants
Constant
Purpose
ENDPOINT_PATH
The specific API route being tested:
call/ai-activity-type
.
COMPARE_TO_GROUND_TRUTH
Determines if the baseline is the original data (
es
) or an
llm
.
SKIP_UNREPLAYABLE_ON_COLLECT
If
True
, discards records where the API returns "Cannot find activity."
REPLAYABILITY_CHECK_TIMEOUT_SECONDS
Limits the probe request to 20 seconds.
Constant
ENDPOINT_PATH
COMPARE_TO_GROUND_TRUTH
SKIP_UNREPLAYABLE_ON_COLLECT
REPLAYABILITY_CHECK_TIMEOUT_SECONDS
Purpose
The specific API route being tested:
call/ai-activity-type
.
Determines if the baseline is the original data (
es
) or an
llm
.
If
True
, discards records where the API returns "Cannot find activity."
Limits the probe request to 20 seconds.
Copy table
Execution Workflow
Execution Workflow
Parsing:
The script uses
create_parser
to handle CLI arguments like
--data-set
,
--populate-ground-truth-llm
, and
--compare-to-ground-truth
.
Initialization:
Sets up directory paths for inputs and ground truths based on the selected dataset name.
Run:
Executes the evaluation via
asyncio.run(run_evaluation(...))
, which orchestrates the collection, endpoint calling, and result scoring phases.
Copy prompt
Edit
You said are there soem issues with this PR
You said
are there soem issues with this PR
Listen
Show more options
Show thinking
Gemini said
Gemini said
Yes, there are several issues and code smells in this Pull Request, ranging from performance bottlenecks to architectural anti-patterns.
Here is a breakdown of the most notable issues, categorized by impact:
Performance & Async Anti-Patterns
Performance & Async Anti-Patterns
Sequential Requests Defeating Async (Lines 86-106):
Inside
_collect_from_es
, the code loops through
raw_samples
and
await
s a
session.post
request for each sample one by one. This means the script waits for one HTTP request to finish before starting the next, completely defeating the purpose of using
aiohttp
for asynchronous concurrency.
Fix:
Collect these coroutines into a list and use
asyncio.gather()
(ideally wrapped in an
asyncio.Semaphore
to avoid overwhelming the endpoint) to run the replayability probes concurrently.
Blocking File I/O in Async Functions (Lines 110-114, 184-188, 268-269):
Standard synchronous file operations (
with open(...)
,
json.dump
,
json.load
) are used inside
async def
functions. In Python, this blocks the entire event loop while the disk is reading/writing.
Fix:
Use an asynchronous file I/O library like
aiofiles
, or run the blocking I/O operations in a separate thread using
asyncio.to_thread()
.
Architecture & State Management
Architecture & State Management
Mutating Class-Level Variables (Lines 323-328):
The script parses CLI arguments and directly mutates the class attributes (e.g.,
AiActivityTypeEvaluator.DATA_SET = args.data_set
). This is a major anti-pattern. If you ever try to run tests in parallel or create multiple instances of this evaluator in a single process, the state will bleed across them.
Fix:
Pass these arguments into the
__init__
constructor of the instance (
self.data_set = args.data_set
) rather than modifying the class definition globally.
Code Quality & Edge Cases
Code Quality & Edge Cases
Broad Exception Catching (Line 107):
Using a bare
except Exception:
during the replayability probe is dangerous. It will catch
aiohttp.ClientError
(which is intended), but it will also silently swallow
TimeoutError
or even memory errors, making debugging incredibly difficult if the endpoint goes down.
Fix:
Scope the exception to specific expected errors, like
except (aiohttp.ClientError, asyncio.TimeoutError):
.
Inefficient Size Calculation (Line 95):
len(json.dumps(request_body)) < min_size
requires serializing the entire dictionary to a string
just
to check its length. Since
min_size
defaults to...
|
Firefox
|
Ask Jiminny test report - 8 Apr 2026 - Ask Jiminny Ask Jiminny test report - 8 Apr 2026 - Ask Jiminny test report - 13 Apr 2026.pdf — Work...
|
app.staging.jiminny.com/ai-reports/pdf/e2f4890a-d0 app.staging.jiminny.com/ai-reports/pdf/e2f4890a-d097-495b-8dfd-d4c25a5e8843...
|
8642
|
|
FirefoxFileEoitViewHistoryBookmarksProfilesToolsWi FirefoxFileEoitViewHistoryBookmarksProfilesToolsWindow Helpapp.staging.jiminny.com/ai-reports/pdf/e2f4890a-d097-495b-8dfd-d4c25a5e8843~ Google Gemini2 of 3C. 40 lbl | Daily - Platform • 8mleft A 100% C/2 &• Tue 14 Apr 9:57:38- | + Automatic ZoomPlatform Sprint 1 Q2 - Platform Tea@ Jy 19798 evaluation for ai activity t(8 Jiminny@ Ask jirminy test report - 8 Api *Inbox (1,540) - lukas.kovalik@umir+ New labsoccce cdcselnde.3.Run: Executes the evaluation viaasyncio.run(run_evaluation(...)),which orchestrates the collection,endpoint calling, and result scoringoesesare there soem issueswith this PRShow thinking vYes, there are several issues and code smellsin this Pull Request, ranging from performancebottlenecks to architectural anti-patterns.Here is a breakdown of the most notableissues, categorized by impact:Performance & Async Anti-Patterns• Sequential Requests Defeating Async(Lines 86-106): Inside_collect_from_es, the code loopsthrough raw_samples and await sasession.post request for each sampleone by one. This means the script waitsfor one HTTP request to finish beforestarting the next, completely defeatingthe purpose of using aiohttp forasynchronous concurrency.• Fix: Collect these coroutines into alist and use asyncio.gather ()(ideally wrapped in anasyncio.Semaphore to avoidoverwhelming the endpoint) to run• Enter a prompt for GeminiProvYour Jiminny chats aren't used to improve our models. Gemini is Alane can make mistakes, Inciualne aoout peodleYour privacy & GeminiSummarize pageBased on the analysis of recent customer interactions, Jiminny is currently being utilized as astrategic tool for sales enablement, coaching, and pipeline visibility. Below is a synthesis of thekey themes and trends observed across our accounts.Driving Adoption and CoachingClreorsmanecie masing y moving beyond basic all recording to leverage Jiminy for structured• Manager Accountability: Many teams are shifting coaching responsibility to managers byusing automated nudges to highlight specific calls for review.• Peer-to-Peer Learning: Organizations like Insuritas and PipeDrive are creating "best of"playlists to share successful objection handling and sales techniques, fostering a culture ofreflection.• Self-Coaching: Several clients, including Funding Circle and Edozo, are implementing self-coaching workflows where reps identify their own calls for review based on specific criterialike urgency or discovery quality.CRM Integration and Data AccuracySeamless integration with CRM platforms remains a primary value driver for our users:• Salesforce & HubSpot: Clients such as Productsup, Unbiased, and PostHog prioritize deepCRM integration to ensure Jiminny serves as a single source of truth.• Technical Troubleshooting: We are actively addressing integration challenges, such asSalesforce mapping errors and Notetaker configuration issues, to ensure data flows correctlybetween systems.• Roadmap Requests: There is significant interest in upcoming features, including native Zohointegration and automated field mapping for Salesforce.Strategic Al UtilizationThe "Ask Jiminny" feature and Al-driven insights are becoming central to how teams managetheir pipeline:• Deal Visibility: Users at Specright and Cision are using "Ask Jiminny" at the opportunity levelto generate summaries, identify risks, and determine next steps without needing to listen toevery call....
|
NULL
|
NULL
|
NULL
|
8641
|
|
DiaFilelEditViewTabsBookmarksHistoryExtensionsWind DiaFilelEditViewTabsBookmarksHistoryExtensionsWindowHelpmeet.google.com / Daily - PlatformNikolay Yankov (Presenting)Inbox (2)PlactormSRD-677C Jy 19796(7 Service-oie Font. AmrCostI ChActionshttps://jiminny.atlassian.net/jira/software/c/projects/JY/boards/377selectedissue=Jy-18909• SminmProjects# AWSE SSHEl DatadogPlatform TeamQ Search board800$ Jy-19240 / A JY-18900READY FOR DEV 1oohenetxwirtexceed Fontawesomepackage limitsMAINTENANCSReady for Dev8-20564 1 П ****=PriorityStor..% JY-20574 [AI) Ensure PDF formatting is good= Medium= Medium% JY-20586 (QA) Manual Testing= Medium|% JY-20626 Integration testing= MediumLinked work itemsis cloned byJY-20458 CRUD reports with Ask. Jiminnyis tested byJY-20649 Generate Ask Jiminny Report and display it in AI ReportsJY-20656 Expired Ask Jiminny report template is automatically disabled an..JY-20660 Non-creator cannot delete generated Ask Jiminny report from A.JY-20651 Generated Ask Jiminny Report PDF displays Jiminny logo when n...JY-20655 Generated Ask Jiminny Report emall is sent to recipients0 JY-20654 Generated Ask Jiminny Reoort is visible onty to creator and shar..JY-20650 Generated Ask Jiminny Report PDF includes branding, report se..* Claude3 Circieci•"*_AssigneeStatus32 Steliyan ...32 Stellyan .-.INOEYVINDEV vAnellya A...& Unassign...READY FOR DEVREADY FOR DEVSDEPLOYEDVDONEDONE MDONEDONE MDONEDONEWUnE9:57 AM Daily - Platform> 0.Daily - Platform • 8 m leftС Ct: DepeActionsDependolD Tasks2 Insights & Coachin• DevDux.© 2In Dev vXz Improve StoryDetailsAssignee3 Stellyan GeorgievReporter2 Galya DimitrovaUeyele enecne@ Open with VS Code|3 branches32 commits1 pull request1 build failing5 days agcUrEhRelessesProductionSee all deploymentsComponentsPlatformSub-ProductAootcons8•• Far DepelGroup: Queriesfix connume ouring toleature fiag releaseDeployed0E -20585 15 * ****=AI Review - Q1CTNIHEWANANORCONDeployed-199821 @ ••=•=No such column "Type' onentty 'Event"PLATTORM STABILITYDeployed2038405 +600=Missed deletion of staleadsese(Pipedrive) DI > status notlabeled, date data missingREDUCE CHURNDeployed0 -20614 05 П ****=D Al BookmarksStefka Stoyanovaiyan GeorgievKovalik12:138 • Tue 14 Apr 9:57:37ChatNikolay YankovNikolay Ivanov...
|
NULL
|
NULL
|
NULL
|
8640
|
|
JY-20543 add AJ reports User pilot tracking by Lak JY-20543 add AJ reports User pilot tracking by LakyLak · Pull Request #11932 · jiminny/app
Platform Sprint 1 Q2 - Platform Team - Scrum Board - Jira
Platform Sprint 1 Q2 - Platform Team - Scrum Board - Jira
Jy 19798 evaluation for ai activity types by nikolaybiaivanov · Pull Request #468 · jiminny/prophet
Jy 19798 evaluation for ai activity types by nikolaybiaivanov · Pull Request #468 · jiminny/prophet
Jiminny
Jiminny
Close tab
Ask Jiminny test report - 8 Apr 2026 - Ask Jiminny test report - 13 Apr 2026.pdf
Ask Jiminny test report - 8 Apr 2026 - Ask Jiminny test report - 13 Apr 2026.pdf
Close tab
Inbox (1,540) - [EMAIL] - Jiminny Mail
Inbox (1,540) - [EMAIL] - Jiminny Mail
New Tab...
|
Firefox
|
Jiminny — Work
|
app.staging.jiminny.com/ai-reports
|
8639
|
|
JY-20543 add AJ reports User pilot tracking by Lak JY-20543 add AJ reports User pilot tracking by LakyLak · Pull Request #11932 · jiminny/app
Platform Sprint 1 Q2 - Platform Team - Scrum Board - Jira
Platform Sprint 1 Q2 - Platform Team - Scrum Board - Jira
Jy 19798 evaluation for ai activity types by nikolaybiaivanov · Pull Request #468 · jiminny/prophet
Jy 19798 evaluation for ai activity types by nikolaybiaivanov · Pull Request #468 · jiminny/prophet
Jiminny
Jiminny
Close tab
Ask Jiminny test report - 8 Apr 2026 - Ask Jiminny test report - 13 Apr 2026.pdf
Ask Jiminny test report - 8 Apr 2026 - Ask Jiminny test report - 13 Apr 2026.pdf
Close tab
Inbox (1,540) - [EMAIL] - Jiminny Mail
Inbox (1,540) - [EMAIL] - Jiminny Mail
New Tab...
|
Firefox
|
Jiminny — Work
|
app.staging.jiminny.com/ai-reports
|
8638
|
|
Nikolay Yankov (Presenting)
Nikolay Yankov (Presen Nikolay Yankov (Presenting)
Nikolay Yankov (Presenting)
People
6
Take notes with Gemini
Take notes with Gemini
Gemini
Gemini
Unpin Nikolay Yankov's presentation from your main screen
You can't unmute someone else's presentation
More options for Nikolay Yankov
Zoom in
Open in new window
Enter Full Screen
Pin Stefka Stoyanova to your main screen
Mute Stefka Stoyanova's microphone
More options for Stefka Stoyanova
Stefka Stoyanova
Pin Nikolay Yankov to your main screen
Mute Nikolay Yankov's microphone
More options for Nikolay Yankov
Nikolay Yankov
Pin Steliyan Georgiev to your main screen
You can't unmute someone else
More options for Steliyan Georgiev
Steliyan Georgiev
Pin Nikolay Ivanov to your main screen
You can't unmute someone else
More options for Nikolay Ivanov
Nikolay Ivanov
You’re continuously framed
Backgrounds and effects
More options for Lukas Kovalik
Lukas Kovalik
Others might see more of your background. Click to view your full video.
9:57
AM
Daily - Platform
Daily - Platform
Audio settings
Turn on microphone
Video settings
Turn off camera
Nikolay Yankov is presenting
Send a reaction
Turn on captions
Raise hand (ctrl + ⌘ + h)
More options
Leave call
Meeting details
Chat with everyone
Meeting tools
Your microphone is off.
meet.google.com / Daily - Platform
DiaFileEditViewTabsBookmarksHistoryExtensionsWindowHelpmeet.google.com / Daily - PlatformNikolay Yankov (Presenting)Inbox (2)PlactormSRD-677X(7 (5RD-67)C Jy 19796(7 Service-oie Font. AmrCostI Chhttps://jiminny.atlassian.net/jira/software/c/projects/JY/boards/377selectedissue=Jy-18909• SminmProjects# AWSE SSHPlatform TeamQ Search board800$ Jy-19240 / A JY-18900READY FOR DEV 1exceed Fontawesomepackage limitsMAINTENANCSReady for Dev8-20564 1 П ****=% JY-20574 [AI) Ensure PDF formatting is good% JY-20586 (QA) Manual Testing% JY-20626 Integration testingLinked work itemsis cloned byJY-20458 CRUD reports with Ask. Jiminnyis tested byEl Datadog3 CircieciPriority= Medium= Medium32 SteliyanSteliyan...= Medii= Medi4h8 UnaDEPLOYEDVDONEJY-20656 Expired Ask Jiminny report template is automatically disabled an.DONE MJY-20660 Non-creator cannot delete generated Ask Jiminny report from A.DONEJY-20651 Generated Ask Jiminny Report PDF displays Jiminny logo when n...DONE MJY-20655 Generated Ask Jiminny Report emall is sent to recipientsDONE0 JY-20654 Generated Ask Jiminny Reoort is visible onty to creator and sihar......DONEJY-20650 Generated Ask Jiminny Report PDF includes branding, report se..WUnEActionsStatusINOEYVINDEV vREADY FOR DEV <READY FOR DEVS9:57 AM Daily - PlatformlaalDaily - Platform - 8 m left100% <478• Tue 14 Apr 9:57:34ChatС Ct: DepeQ Actions sDependolD Tasks7 Insights & Coachin• DelDux.Xz Improve Story~ DetailsAssignee3 Stellyan GeorgievReporter2 Galya DimitrovaUeyele enecne@ Open with VS Code|3 branches32 commits1 pull request1 build failing5 days agcUrEhRelessesProductionSee all deploymentsComponentsPlatformSub-ProductAootcons8•• Far DepelD Al BookmarksGroup: Querieshix connoie ouring toDeployed0 -20ses 15 4 •***=•AI Review - Q1CTNIHEWANANORCONDeployed-19982 1 @ •..•=No such column "Type' onentty 'Event"PLATTORM STABTLITYDeployed8 -20384 05 ^ ****=|Missed deletion of staleadsese(Pipedrive) DI > status notQStefka StoyanovaNikolay YankovSteliyan GeorgievNikolay IvanovKovalik:12:10...
|
Dia
|
Work: Meet - Daily - Pla…
|
NULL
|
8637
|
|
FirefoxFilePlatform Sprint 1 Q2 - Platform Tea@ Jy FirefoxFilePlatform Sprint 1 Q2 - Platform Tea@ Jy 19798 evaluation for ai activity t(8 Jiminnyg Ask Jiminny test report - 8 Apr 20)M Inbox (1,540) - lukas.kovalik@j+ New labHistoryBookmarksProfilesToolsWindowHelp1lidll.cooele.com nailuo tinoox~ Google Gemini...socc ce Cdcseciie3.Run: Executes the evaluation viaasyncio.run(run_evaluation(...)),which orchestrates the collection,endpoint calling, and result scoringoeses.=MailChatMeetare there soem issueswith this PRShow thinking vYes, there are several issues and code smellsin this Pull Request, ranging from performancebottlenecks to architectural anti-patterns.Here is a breakdown of the most notableissues, categorized by impact:Performance & Async Anti-Patterns• Sequential Requests Defeating Async(Lines 86-106): Inside_collect_from_es, the code loopsthrough raw_samples and await sasession.post request for each sampleone by one. This means the script waitsfor one HTTP request to finish beforestarting the next, completely defeatingthe purpose of using aiohttp forasynchronous concurrency.• Fix: Collect these coroutines into alist and use asyncio.gather ()(ideally wrapped in anasyncio.Semaphore to avoidoverwhelming the endpoint) to run• Enter a prompt for GeminiPro vYour Jiminny chats aren't used to improve our models. Gemini is Alane can make mistakes, Inciualne aoout peodleYour privacy & GeminiSummarize pageM GmailComposeInboxStarred1,540onoozeeSentDraftsCategoriesÔPurchasesv MoreLabelsApp emailsGithubappextensionvuejs9.449215IvOlesSharedSentryUtilities100,15%Q Ask Gmail_ * Veselin .. sonarqub. 5• # steliyan., sonarqub. 3• & Datadog• # Google 2• # Datadog•# Datadog• # The Jiminny Team_ * Nikolay .. sonarqub. 4• # Datadog• * steliya.. sonarqu. 6• & steliyan., sonarqub. 21• * The Jiminny Team• * ilian-j... sonarqu. 7• & steliya.. claude[. 34• * noreply@salesforce..• # Google 2._ * The Jiminny Team• * Datadog• * Veselin .. sonarqub. 3• * github-actions[bot]• & Twilio_ * stellyan. .. Nikolay 16• * Atlassian• # Atlassian• # Salesforce• # Datadog* Veselin .. sonarqub. 3Enable desktop notifications for Jiminny Mail.OK No thanksI Daily - Platform • 8m leftTue 14 Apr 9:57:24O JIMINNY1-50 of 25,263[jiminny/infrastructure] JY-20627 Increas cpu resources to default and processing-4 workers (PR #729) - Increase max task count on processing-4 to 40 Increase max ta...[jiminny/prophet] JY-20674: panorama pdf add header section (PR #467) - You can view, comment on, or merge this pull request online at: https:/github.com/jiminny/proph…..Your Weekly Digest from Datadog - The Weekly Datadog Digest for Jiminny, Inc. Here's what happened in Datadog since April 06. Metric Alerts 8 Total Events 8 Alerts Recover...Security alert - New sign-in to your account [EMAIL] We noticed a new sign-in to your Google Account. If this was you, you don't need to do anything. If not, .Your Daily Digest from Datadog - The Daily Digest for Jiminny, Inc. Here's what happened in Datadog on April 10. Metric Alerts 5 Total Events 5 Alerts Recovered [Recovered] E...Your Daily Digest from Datadog - The Daily Digest for Jiminny, Inc. Here's what happened in Datadog on April 09. Metric Alerts 4 Total Events 4 Alerts Recovered [Recovered) D...Your Ask Jiminny test report report is ready - Hi there, Your latest Ask Jiminny Report has been generated and is now available. Report details: Name: Ask Jiminny test report Peri....Por Ask Jiminny tes...[jiminny/prophet] Jy 19798 evaluation for ai activity types (PR #468) - You can view, comment on, or merge this pull request online at: [URL_WITH_CREDENTIALS] - Reply to this email directly, view i...[jiminny/prophet] JY020575: panorama reports absolute links (PR #466) - You can view, comment on, or merge this pull request online at: [URL_WITH_CREDENTIALS] pushed 1 commit. 44d1cad Update swagger-ui to v5.32.2 — View it on GitHu...Your Twilio account has been funded - We charged the Jiminny (Production) Credit Card (**** **** **** 6609) $4965.78 to bring the balance to -$217.51. Visit the Twilio Payme...[jiminny/prophet] JY-19982: Allow multiple LLM evals with one command run (PR #464) - eg python src/eval/eval-ask-anything-chat.py \ --steps run validate evaluate expo...YOur API oken s about to expire - Auassian ou Ap noken sabonco exore a ukas Kovalk YOUAPROKen Wun aoelfest aolexolesae4026-04-070000004UCOAVAre you our Rovo Competition winner!? - Votes are in! Here are your top Rovo demos Top ways real teams are using Rovo Al We asked how your teams are using Rovo Al in ...ATTENTION: Update to Communications for Certificate Rotations - Product & Service Notification What's changing? Salesforce is updating our communication policy regard...Your Daily Digest from Datadog - The Daily Digest for Jiminny, Inc. Here's what happened in Datadog on April 06. Metric Alerts 3 Total Events 3 Alerts Recovered [Recovered] D...Ljiminny/intrastructurej Jt-20623 Add sos queue tor panorama reports (PK #(28) - You can view, comment on, or merge this pull request online at: nttps.//gitnub.com/jiml..Lukas Kovalik, here is your weekly update for 7 Apr - Your weekly update Space updates Check on 3 work items without updates for 28 days JY-20584 [BE] Change search report...9.33 AMI8:55 AM1:37AMApr 11Apr 11Apr 10Apr 10Apr 9Apr9Apr 9Apr9Apr 9Apr 9Apr 9Apr 9Apr 8Apr 8Apr 8Apr 8Apr 8Apr 8Apr 8Apr 8Apr 8Apr 7Apr 7Apr 7Apr 7...
|
NULL
|
NULL
|
NULL
|
8636
|
|
DiaFileEditViewTabsBookmarksHistoryExtensionsWindo DiaFileEditViewTabsBookmarksHistoryExtensionsWindowHelpmeet.google.com / Daily - PlatformNikolay Yankov (Presenting)Inbox (2)SRD-677XF7 (SRD-67C Jy 19798(7 Service-oir Font. AmrCostI Clhttps://jiminny.atlassian.net/jira/software/c/projects/JY/boards/377selectedissue=JY-18909• Siminm0 ProjectsE3 SSHEl DatadogPlatform Team200$ Jy-19240 / [ JY-18900READY FOR DEV 1oohesnetixwirtexceed Fontawesomepackage limits(MAINTENANCISReady for Dev8-20564 1 П ****=Priority% JY-20574 [AI) Ensure PDF formatting is good= Medium= Medium% JY-20586 (QA) Manual Testing= Medium% JY-20626 Integration testing= MediumLinked work itemsis cloned byA JY-20458 CRUD reports with Ask Jiminnyis tested byJY-20649 Generate Ask Jiminny Report and display it in AI ReportsJY-20656 Expired Ask Jiminny report template is automatically disabled an.JY-20660 Non-creator cannot delete generated Ask Jiminny report from A…JY-20651 Generated Ask Jiminny Report PDF displays Jiminny logo when n...JY-20655 Generated Ask Jiminny Report emall is sent to recipients0 JY-20654 Generated Ask Jiminny Reoort is visible onty to creator and shar...JY-20650 Generated Ask Jiminny Report PDF includes branding, report se..AssigneeStatus3 Steliyan...i2 Stellyan .-.INDEYVINDEV vAnellya A...READY FOR DEV <& Unassign...READY FOR DEVSDEPLOYEDDONEDONE MDONEDONE MDONEDONEWUnEActions9:57 AM Daily - PlatformlaalDaily - Platform - 8 m left100% <478• Tue 14 Apr 9:57:246ChatCE: DeperDependolD TasksInsights & CoachinD DevD ux.© 2Xz Improve Story~ Details32 Stellyan GeorgievAsentoeReporter2 Galya DimitrovaUeyele encne@ Open with VS Code|3 branches32 commits1 pull request1 build failing5 days agcRelessesProduction2 See all deploymentsComponentsPlatformSub-ProductAoorcons8•• Far DepeGroup: QueriesDEPLOY &leature fiag reieaseDeployed0 -20585 15 * ****=AI Review - Q1CTNMEUANANORCORDeployed_19982 1 0 •***=No such column "Type' onentty 'Event"PLATTORM STABTUTTYDeployed9 -20384 05 ^ ****=|Missed deletion of stale408099(Pipedrive] DI > status notlabeled, date data missingREDUCE CHURNDeployed0 -20614 05 П ****=:D Al BookmarksStefka StoyanovaNikolay Yankovan GeorgievNikolay Ivanovovalik12:00...
|
NULL
|
NULL
|
NULL
|
8635
|
|
Nikolay Yankov (Presenting)
Nikolay Yankov (Presen Nikolay Yankov (Presenting)
Nikolay Yankov (Presenting)
People
6
Take notes with Gemini
Take notes with Gemini
Gemini
Gemini
Zoom in
Open in new window
Enter Full Screen
Stefka Stoyanova...
|
Dia
|
Work: Meet - Daily - Pla…
|
NULL
|
8634
|
|
Nikolay Yankov (Presenting)
Nikolay Yankov (Presen Nikolay Yankov (Presenting)
Nikolay Yankov (Presenting)
People
6
Take notes with Gemini
Take notes with Gemini
Gemini
Gemini
Zoom in
Open in new window
Enter Full Screen
Stefka Stoyanova...
|
Dia
|
Work: Meet - Daily - Pla…
|
NULL
|
8633
|
|
FirefoxFileEoitViewHistoryBookmarksPlatform Sprint FirefoxFileEoitViewHistoryBookmarksPlatform Sprint 1 Q2 - Platform Tea@ Jy 19798 evaluation for ai activity t(8 Jiminnyg Ask Jiminny test report - 8 Apr 20)M Inbox (1,540) - lukas.kovalik@j+ New labProfilesToolsWindowHelp1dll.eooele.com nalluo tinoox~ Google Geminisocc ce Cdcseciie3.Run: Executes the evaluation viaasyncio.run(run_evaluation(...)),which orchestrates the collection,endpoint calling, and result scoringoeses=MailChatMeetare there soem issueswith this PRShow thinking vYes, there are several issues and code smellsin this Pull Request, ranging from performancebottlenecks to architectural anti-patterns.Here is a breakdown of the most notableissues, categorized by impact:Performance & Async Anti-Patterns• Sequential Requests Defeating Async(Lines 86-106): Inside_collect_from_es, the code loopsthrough raw_samples and await sasession.post request for each sampleone by one. This means the script waitsfor one HTTP request to finish beforestarting the next, completely defeatingthe purpose of using aiohttp forasynchronous concurrency.• Fix: Collect these coroutines into alist and use asyncio.gather ()(ideally wrapped in anasyncio.Semaphore to avoidoverwhelming the endpoint) to run• Enter a prompt for GeminiPro vYour Jiminny chats aren't used to improve our models. Gemini is Alane can make mistakes, Inciualne aoout peodleYour privacy & GeminiSummarize pageM GmailComposeInboxStarred1,540onoozeeSentDraftsCategoriesPurchasesv MoreLabelsApp emailsGithubappextensionvuejs9.449215IvOlesSharedSentryUtilities100,15%Q Ask Gmail_ * Veselin .. sonarqub. 5• # steliyan., sonarqub. 3• & Datadog• # Google 2• # Datadog•# Datadog• # The Jiminny Team_ * Nikolay .. sonarqub. 4• # Datadog• * steliya.. sonarqu. 6• & steliyan., sonarqub. 21• * The Jiminny Team• * ilian-j... sonarqu. 7• & steliya.. claude[. 34• * noreply@salesforce..• # Google 2._ * The Jiminny Team• * Datadog• * Veselin .. sonarqub. 3• * github-actions[bot]• & Twilio_ * stellyan. .. Nikolay 16• * Atlassian• # Atlassian• # Salesforce• # Datadog* Veselin .. sonarqub. 3Enable desktop notifications for Jiminny Mail.OK No thankshhl| Daily - Platform • 8m leftA100% C8 • Tue 14 Apr 9:57:21*O JIMINNY1-50 of 25,263[jiminny/infrastructure] JY-20627 Increas cpu resources to default and processing-4 workers (PR #729) - Increase max task count on processing-4 to 40 Increase max ta...[jiminny/prophet] JY-20674: panorama pdf add header section (PR #467) - You can view, comment on, or merge this pull request online at: https:/github.com/jiminny/proph…..Your Weekly Digest from Datadog - The Weekly Datadog Digest for Jiminny, Inc. Here's what happened in Datadog since April 06. Metric Alerts 8 Total Events 8 Alerts Recover...Security alert - New sign-in to your account [EMAIL] We noticed a new sign-in to your Google Account. If this was you, you don't need to do anything. If not, .Your Daily Digest from Datadog - The Daily Digest for Jiminny, Inc. Here's what happened in Datadog on April 10. Metric Alerts 5 Total Events 5 Alerts Recovered [Recovered] E...Your Daily Digest from Datadog - The Daily Digest for Jiminny, Inc. Here's what happened in Datadog on April 09. Metric Alerts 4 Total Events 4 Alerts Recovered [Recovered] D...Your Ask Jiminny test report report is ready - Hi there, Your latest Ask Jiminny Report has been generated and is now available. Report details: Name: Ask Jiminny test report Peri....Por Ask Jiminny tes...[jiminny/prophet] Jy 19798 evaluation for ai activity types (PR #468) - You can view, comment on, or merge this pull request online at: [URL_WITH_CREDENTIALS] - Reply to this email directly, view i...[jiminny/prophet] JY020575: panorama reports absolute links (PR #466) - You can view, comment on, or merge this pull request online at: [URL_WITH_CREDENTIALS] pushed 1 commit. 44d1cad Update swagger-ui to v5.32.2 — View it on GitHu...Your Twilio account has been funded - We charged the Jiminny (Production) Credit Card (**** **** **** 6609) $4965.78 to bring the balance to -$217.51. Visit the Twilio Payme...[jiminny/prophet] JY-19982: Allow multiple LLM evals with one command run (PR #464) - eg python src/eval/eval-ask-anything-chat.py \ --steps run validate evaluate expo...YOur API oken s about to expire - Auassian ou Ap noken sabonco exore a ukas Kovalk YOUAPROKen Wun aoelfest aolexolesae4026-04-070000004UCOAVAre you our Rovo Competition winner!? - Votes are in! Here are your top Rovo demos Top ways real teams are using Rovo Al We asked how your teams are using Rovo Al in ...ATTENTION: Update to Communications for Certificate Rotations - Product & Service Notification What's changing? Salesforce is updating our communication policy regard...Your Daily Digest from Datadog - The Daily Digest for Jiminny, Inc. Here's what happened in Datadog on April 06. Metric Alerts 3 Total Events 3 Alerts Recovered [Recovered] D...[jiminny/infrastructure] JY-20623 Add SQS queue for panorama reports (PR #728) - You can view, comment on, or merge this pull request online at: https://github.com/jimi...Lukas Kovalik, here is your weekly update for 7 Apr - Your weekly update Space updates Check on 3 work items without updates for 28 days JY-20584 [BE] Change search report...9.33 AM8:55 AM1:37AMApr 11Apr 11Apr 10Apr 10Apr 9Apr9Apr 9Apr9Apr 9Apr 9Apr 9Apr 9Apr 8Apr 8Apr 8Apr 8Apr 8Apr 8Apr 8Apr 8Apr 8Apr 7Apr 7Apr 7Apr 7...
|
NULL
|
NULL
|
NULL
|
8632
|
|
FirefoxFileEoitViewHistoryBookmarksPlatform Sprint FirefoxFileEoitViewHistoryBookmarksPlatform Sprint 1 Q2 - Platform Tea@ Jy 19798 evaluation for ai activity t(8 Jiminnyg Ask Jiminny test report - 8 Apr 201M Inbox (1,540) - lukas.kovalik@j+ New labProfilesToolsWindowHelp1dll.eoocle.com nalluo tinoox~ Google Geminisocc ce Cdcseciie3.Run: Executes the evaluation viaasyncio.run(run_evaluation(...)),which orchestrates the collection,endpoint calling, and result scoringOeses=MailChatMeetare there soem issueswith this PRShow thinking vYes, there are several issues and code smellsin this Pull Request, ranging from performancebottlenecks to architectural anti-patterns.Here is a breakdown of the most notableissues, categorized by impact:Performance & Async Anti-Patterns• Sequential Requests Defeating Async(Lines 86-106): Inside_collect_from_es, the code loopsthrough raw_samples and await sasession.post request for each sampleone by one. This means the script waitsfor one HTTP request to finish beforestarting the next, completely defeatingthe purpose of using aiohttp forasynchronous concurrency.• Fix: Collect these coroutines into alist and use asyncio.gather ()(ideally wrapped in anasyncio.Semaphore to avoidoverwhelming the endpoint) to run• Enter a prompt for GeminiPro vYour Jiminny chats aren't used to improve our models. Gemini is Alane can make mistakes, Inciualne about peodleYour privacy & GeminiSummarize pageM GmailComposeInboxStarred1,540onoozeeSentDrafts+DCategoriesPurchasesv MoreLabelsApp emailsGithubappextensionvuejs9.449215JKHIvOlesSharedSentryUtilities100,15%Q Ask Gmail_ * Veselin .. sonarqub. 5• # steliyan., sonarqub. 3• * Datadog• # Google 2• # Datadog•# Datadog#:0 # The Jiminny Team_ * Nikolay .. sonarqub. 4• # Datadog• * steliya.. sonarqu. 6• & steliyan., sonarqub. 21• * The Jiminny Team• * ilian-j... sonarqu. 7• & steliya.. claude[. 34• * noreply@salesforce..• # Google 2._ * The Jiminny Team• * Datadog• * Veselin .. sonarqub. 3• * github-actions[bot]• & Twilio_ * stellyan. .. Nikolay 16• * Atlassian• # Atlassian• # Salesforce• # Datadog* Veselin .. sonarqub. 3Enable desktop notifications for Jiminny Mail.OK No thankshhl| Daily - Platform • 8m leftA100% C8 • Tue 14 Apr 9:57:15*O JIMINNY1-50 of 25,263[jiminny/infrastructure] JY-20627 Increas cpu resources to default and processing-4 workers (PR #729) - Increase max task count on processing-4 to 40 Increase max ta...[jiminny/prophet] JY-20674: panorama pdf add header section (PR #467) - You can view, comment on, or merge this pull request online at: https:/github.com/jiminny/proph…..Your Weekly Digest from Datadog - The Weekly Datadog Digest for Jiminny, Inc. Here's what happened in Datadog since April 06. Metric Alerts 8 Total Events 8 Alerts Recover...Security alert - New sign-in to your account [EMAIL] We noticed a new sign-in to your Google Account. If this was you, you don't need to do anything. If not, .Your Daily Digest from Datadog - The Daily Digest for Jiminny, Inc. Here's what happened in Datadog on April 10. Metric Alerts 5 Total Events 5 Alerts Recovered [Recovered] E...Your Daily Digest from Datadog - The Daily Digest for Jiminny, Inc. Here's what happened in Datadog on April 09. Metric Alerts 4 Total Events 4 Alerts Recovered [Recovered] D...Your Ask Jiminny test report report is ready - Hi there, Your latest Ask Jiminny Report has been generated and is now available. Report details: Name: Ask Jiminny test r...Eal Ask Jiminny tes,[jiminny/prophet] Jy 19798 evaluation for ai activity types (PR #468) - You can view, comment on, or merge this pull request online at: [URL_WITH_CREDENTIALS] - Reply to this email directly, view i...[jiminny/prophet] JY020575: panorama reports absolute links (PR #466) - You can view, comment on, or merge this pull request online at: [URL_WITH_CREDENTIALS] pushed 1 commit. 44d1cad Update swagger-ui to v5.32.2 — View it on GitHu...Your Twilio account has been funded - We charged the Jiminny (Production) Credit Card (**** **** **** 6609) $4965.78 to bring the balance to -$217.51. Visit the Twilio Payme...[jiminny/prophet] JY-19982: Allow multiple LLM evals with one command run (PR #464) - eg python src/eval/eval-ask-anything-chat.py \ --steps run validate evaluate expo...YOur API oken s about to expire - Auassian ou Ap noken sabonco exore a ukas Kovalk YOUAPROKen Wun aoelfest aolexolesae4026-04-070000004UCOAVAre you our Rovo Competition winner!? - Votes are in! Here are your top Rovo demos Top ways real teams are using Rovo Al We asked how your teams are using Rovo Al in ...ATTENTION: Update to Communications for Certificate Rotations - Product & Service Notification What's changing? Salesforce is updating our communication policy regard...Your Daily Digest from Datadog - The Daily Digest for Jiminny, Inc. Here's what happened in Datadog on April 06. Metric Alerts 3 Total Events 3 Alerts Recovered [Recovered] D...[jiminny/infrastructure] JY-20623 Add SQS queue for panorama reports (PR #728) - You can view, comment on, or merge this pull request online at: https://github.com/jimi...Lukas Kovalik, here is your weekly update for 7 Apr - Your weekly update Space updates Check on 3 work items without updates for 28 days JY-20584 [BE] Change search report...9.33 AM8:55 AM1:37AMApr 11Apr 11Apr 10Apr 9Apr9Apr 9Apr9Apr 9Apr 9Apr 9Apr 9Apr 8Apr 8Apr 8Apr 8Apr 8Apr 8Apr 8Apr 8Apr 8Apr 7Apr 7Apr 7Apr 7...
|
NULL
|
NULL
|
NULL
|
8631
|
|
FirefoxFileEoitViewHistoryBookmarksPlatform Sprint FirefoxFileEoitViewHistoryBookmarksPlatform Sprint 1 Q2 - Platform Tea@ Jy 19798 evaluation for ai activity t(8 Jiminnyg Ask Jiminny test report - 8 Apr 20:Inbox 1,540) - lukas.kovallk@+ New labProfilesToolsWindowHelp1dll.eooele.com nalluo tinoox~ Google Geminisocc ce Cdcseciie3.Run: Executes the evaluation viaasyncio.run(run_evaluation(...)),which orchestrates the collection,endpoint calling, and result scoringoeses=MailChatMeetare there soem issueswith this PRShow thinking vYes, there are several issues and code smellsin this Pull Request, ranging from performancebottlenecks to architectural anti-patterns.Here is a breakdown of the most notableissues, categorized by impact:Performance & Async Anti-Patterns• Sequential Requests Defeating Async(Lines 86-106): Inside_collect_from_es, the code loopsthrough raw_samples and await s asession.post request for each sampleone by one. This means the script waitsfor one HTTP request to finish beforestarting the next, completely defeatingthe purpose of using aiohttp forasynchronous concurrency.• Fix: Collect these coroutines into alist and use asyncio.gather ()(ideally wrapped in anasyncio.Semaphore to avoidoverwhelming the endpoint) to run• Enter a prompt for GeminiPro vYour Jiminny chats aren't used to improve our models. Gemini is Alane can make mistakes, Inciualne aoout peodleYour privacy & GeminiSummarize pageM GmailComposeInboxStarredonoozeeSentDraftsCategoriesPurchasesv MoreLabelsApp emailsGithubappextensionvuejsIvOlesSharedSentryUtilities1,5407389.449215100,15%C Ask GmailH0• & TwilioAand hAll Hhddl1b. 3D.4b.2ce..b. 3• * Atlassian• # Atlassian• # Salesforce• * Datadog• * Veselin... sonarqub. 3• # Jira>0 hbl© | Daily - Platform • 8m leftA100% C•8• Tue 14 Apr 9:57:03O JIMINNY1-50 01 6.2631[jiminny/infrastructure] JY-20627 Increas cpu resources to default and processing-4 workers (PR #729) - Increase max task count on processing-4 to 40 Increase max ta...[jiminny/prophet] JY-20674: panorama pdf add header section (PR #467) - You can view, comment on, or merge this pull request online at: https:/github.com/jiminny/proph…..Your Weekly Digest from Datadog - The Weekly Datadog Digest for Jiminny, Inc. Here's what happened in Datadog since April 06. Metric Alerts 8 Total Events 8 Alerts Recover...Security alert - New sign-in to your account [EMAIL] We noticed a new sign-in to your Google Account. If this was you, you don't need to do anything. If not, .Your Daily Digest from Datadog - The Daily Digest for Jiminny, Inc. Here's what happened in Datadog on April 10. Metric Alerts 5 Total Events 5 Alerts Recovered [Recovered] E...Your Daily Digest from Datadog - The Daily Digest for Jiminny, Inc. Here's what happened in Datadog on April 09. Metric Alerts 4 Total Events 4 Alerts Recovered [Recovered] D...Your Ask Jiminny test report report is ready - Hi there, Your latest Ask Jiminny Report has been generated and is now available. Report details: Name: Ask Jiminny test report Peri....PE Ask Jiminny tes...[jiminny/prophet] Jy 19798 evaluation for ai activity types (PR #468) - You can view, comment on, or merge this pull request online at: [URL_WITH_CREDENTIALS] - Reply to this email directly, view i...[jiminny/prophet] JY020575: panorama reports absolute links (PR #466) - You can view, comment on, or merge this pull request online at: [URL_WITH_CREDENTIALS] pushed 1 commit. 44d1cad Update swagger-ui to v5.32.2 — View it on GitHu...Your Twilio account has been funded - We charged the Jiminny (Production) Credit Card (**** **** **** 6609) $4965.78 to bring the balance to -$217.51. Visit the Twilio Payme...[jiminny/prophet] JY-19982: Allow multiple LLM evals with one command run (PR #464) - eg python src/eval/eval-ask-anything-chat.py \ --steps run validate evaluate expo...YOur API oken s about to expire - Auassian ou Ap noken sabonco exore a ukas Kovalk YOUAPROKen Wun aoelfest aolexolesae4026-04-070000004UCOAVAre you our Rovo Competition winner!? - Votes are in! Here are your top Rovo demos Top ways real teams are using Rovo Al We asked how your teams are using Rovo Al in ...ATTENTION: Update to Communications for Certificate Rotations - Product & Service Notification What's changing? Salesforce is updating our communication policy regard...Your Daily Digest from Datadog - The Daily Digest for Jiminny, Inc. Here's what happened in Datadog on April 06. Metric Alerts 3 Total Events 3 Alerts Recovered [Recovered] D...Ljiminny/intrastructurej Jt-20623 Add sos queue tor panorama reports (PK #(28) - You can view, comment on, or merge this pull request online at: nttps.//gitnub.com/jiml..Lukas Kovalik, here is your weekly update for 7 Apr - Your weekly update Space updates Check on 3 work items without updates for 28 days JY-20584 [BE] Change search report...9.33 AMI8:55 AM1:37AMApr 11Apr 11Apr 10Apr 10Apr 9Apr9Apr 9Apr9Apr 9Apr9Apr 9Apr 8Apr 8Apr 8Apr oApr 8Apr 8Apr 8Apr 8Apr 8Apr 7Apr 7Apr 7Apr 7...
|
NULL
|
NULL
|
NULL
|
8630
|
|
JY-20543 add AJ reports User pilot tracking by Lak JY-20543 add AJ reports User pilot tracking by LakyLak · Pull Request #11932 · jiminny/app
Platform Sprint 1 Q2 - Platform Team - Scrum Board - Jira
Platform Sprint 1 Q2 - Platform Team - Scrum Board - Jira
Jy 19798 evaluation for ai activity types by nikolaybiaivanov · Pull Request #468 · jiminny/prophet
Jy 19798 evaluation for ai activity types by nikolaybiaivanov · Pull Request #468 · jiminny/prophet
Jiminny
Jiminny
Ask Jiminny test report - 8 Apr 2026 - Ask Jiminny test report - 13 Apr 2026.pdf
Ask Jiminny test report - 8 Apr 2026 - Ask Jiminny test report - 13 Apr 2026.pdf
Jiminny Mail
Jiminny Mail
Close tab
New Tab
Customize sidebar
Close Google Gemini (⌃X)
Tabs from other devices
Open history (⇧⌘H)
Open bookmarks (⌘B)
AI Chat settings
Close
WORK, Google Account: [EMAIL]
Main menu
New Chat
Open menu for conversation actions.
Conversation with Gemini
Conversation with Gemini
Copy prompt
You said I’m on page “<tabTitle>Jy 19798 evaluation for ai activity types by nikol</tabTitle>” with “<selection>@@ -0,0 +1,330 @@1+#!/usr/bin/env python32+import asyncio3+import json4+import os5+from pathlib import Path6+from typing import Any, Dict, List7+8+import aiohttp9+10+from src.component.call_processing.schemes.ai_activity_type_request import AiActivityTypeRequest11+from src.component.call_processing.schemes.ai_activity_type_response import AiActivityTypeResponse12+from src.eval.abstract_evaluator import AbstractEvaluator13+from src.eval.collectors import collect_test_set as es_collect_test_set14+from src.eval.common_evaluator_runner import create_parser, run_evaluation15+from src.eval.endpoints import run_endpoint as run_endpoint_http16+from src.eval.models import EvaluationResult17+18+19+class AiActivityTypeEvaluator(AbstractEvaluator):20+ENDPOINT_PATH = "call/ai-activity-type"21+REQUEST_MODEL = AiActivityTypeRequest22+RESPONSE_MODEL = AiActivityTypeResponse23+OUTPUT_SUBFOLDER = "ai_activity_type"24+USES_LLM_EVALUATION = False25+26+READS_FROM_DATASET_FOLDER = True27+28+DATA_SET: str = "initial"29+30+MINIMUM_REQUEST_BODY_SIZE: int = 031+32+SKIP_UNREPLAYABLE_ON_COLLECT: bool = True33+REPLAYABILITY_CHECK_TIMEOUT_SECONDS: float = 20.034+35+POPULATE_GROUND_TRUTH_LLM: bool = False36+GROUND_TRUTH_LLM_ENDPOINT: str = "OPENAI:gpt-5_2"37+38+COMPARE_TO_GROUND_TRUTH: str = "es"39+40+def __init__(self, **kwargs: Any) -> None:41+super().__init__(**kwargs)42+_repo_root = Path(__file__).parents[2]43+_datasets_root = _repo_root / "eval_output" / self.OUTPUT_SUBFOLDER / "eval_datasets"44+_dataset_dir = _datasets_root / self.__class__.DATA_SET45+self._dataset_dir = _dataset_dir46+self._inputs_dir = _dataset_dir / "input"47+self._ground_truths_dir = _dataset_dir / "ground_truth"48+49+async def collect_test_set(self) -> List[Dict[str, Any]]:50+if self.es_host_name:51+return await self._collect_from_es()52+return await self._collect_from_files()53+54+async def _collect_from_es(self) -> List[Dict[str, Any]]:55+if self._inputs_dir.exists() or self._ground_truths_dir.exists():56+raise FileExistsError(57+f"Dataset folder already exists: {self._inputs_dir.parent}. "58+"Choose a different --data-set name or remove the folder manually."59+ )60+61+raw_samples = await es_collect_test_set(62+es_host_name=self.es_host_name,63+es_port=self.es_port,64+endpoint_path=self.ENDPOINT_PATH,65+limit=self.limit,66+language_filter=self.language_filter,67+min_prompt_length=self.min_prompt_length,68+is_json_response=self.IS_JSON_RESPONSE,69+exclude_languages=self.exclude_languages,70+ )71+72+self._inputs_dir.mkdir(parents=True, exist_ok=True)73+self._ground_truths_dir.mkdir(parents=True, exist_ok=True)74+75+min_size = self.MINIMUM_REQUEST_BODY_SIZE76+samples: List[Dict[str, Any]] = []77+skipped_no_response = 078+skipped_too_small = 079+skipped_unreplayable = 080+skipped_probe_errors = 081+82+endpoint_url = f"{self.api_host}/{self.ENDPOINT_PATH}"83+84+timeout = aiohttp.ClientTimeout(total=self.REPLAYABILITY_CHECK_TIMEOUT_SECONDS)85+async with aiohttp.ClientSession(timeout=timeout) as session:86+for sample in raw_samples:87+es_id = sample["id"]88+request_body = sample["request_body"]89+original_response = sample.get("original_response")90+91+if not original_response:92+skipped_no_response += 193+continue94+95+if min_size and len(json.dumps(request_body)) < min_size:96+skipped_too_small += 197+continue98+99+if self.SKIP_UNREPLAYABLE_ON_COLLECT:100+try:101+async with session.post(endpoint_url, json=request_body) as resp:102+if resp.status != 200:103+body_text = await resp.text()104+if "Cannot find activity" in body_text:105+skipped_unreplayable += 1106+continue107+except Exception:108+skipped_probe_errors += 1109+110+with open(self._inputs_dir / f"{es_id}.json", "w") as f:111+json.dump(request_body, f, indent=2)112+113+with open(self._ground_truths_dir / f"gt_{es_id}.json", "w") as f:114+json.dump(original_response, f, indent=2)115+116+samples.append(117+ {118+"id": es_id,119+"request_body": request_body,120+"original_response": original_response,121+"ground_truth": original_response,122+ }123+ )124+125+print(f"Wrote {len(samples)} samples to {self._inputs_dir.parent}")126+if skipped_no_response:127+print(f"Skipped {skipped_no_response} records with no response body")128+if skipped_too_small:129+print(130+f"Skipped {skipped_too_small} records whose request body was smaller "131+f"than {min_size} characters"132+ )133+if skipped_unreplayable:134+print(f"Skipped {skipped_unreplayable} records that are not replayable (missing activity in ES)")135+if skipped_probe_errors:136+print(f"Warning: replayability probe errored for {skipped_probe_errors} records (kept those records)")137+138+if self.__class__.POPULATE_GROUND_TRUTH_LLM:139+label_results = await run_endpoint_http(140+test_set=samples,141+endpoint_url=endpoint_url,142+request_model=self.REQUEST_MODEL,143+force_llm_endpoint=self.__class__.GROUND_TRUTH_LLM_ENDPOINT,144+is_streaming_response=self.IS_STREAMING_RESPONSE,145+timeout_seconds=300.0,146+parallel_requests=self.parallel_requests,147+return_prompt_in_run=self.RETURN_PROMPT_IN_RUN,148+ )149+150+for i, r in enumerate(label_results):151+forced = self.__class__.GROUND_TRUTH_LLM_ENDPOINT152+model = forced.split(":", 1)[1] if ":" in forced else forced153+samples[i]["ground_truth_llm_model"] = model154+155+if not r.get("success", True):156+samples[i]["ground_truth_llm"] = None157+samples[i]["ground_truth_llm_reasoning"] = None158+samples[i]["ground_truth_llm_error_type"] = r.get("error_type")159+samples[i]["ground_truth_llm_error_message"] = r.get("error_message")160+continue161+162+resp = r.get("new_response") or {}163+samples[i]["ground_truth_llm"] = resp.get("ai_activity_type")164+samples[i]["ground_truth_llm_reasoning"] = resp.get("ai_activity_type_reasoning")165+166+return samples167+168+async def _collect_from_files(self) -> List[Dict[str, Any]]:169+if not self._inputs_dir.exists():170+raise FileNotFoundError(171+f"Dataset folder not found: {self._inputs_dir}. "172+"Run --steps collect first or check your --data-set name."173+ )174+175+samples: List[Dict[str, Any]] = []176+for filename in sorted(os.listdir(self._inputs_dir)):177+input_path = self._inputs_dir / filename178+gt_path = self._ground_truths_dir / f"gt_{filename}"179+180+if not gt_path.exists():181+print(f"Warning: ground truth not found for {filename}, skipping")182+continue183+184+with open(input_path, "r") as f:185+request_body: Dict[str, Any] = json.load(f)186+187+with open(gt_path, "r") as f:188+ground_truth: Dict[str, Any] = json.load(f)189+190+samples.append(191+ {192+"id": Path(filename).stem,193+"request_body": request_body,194+"original_response": None,195+"ground_truth": ground_truth,196+ }197+ )198+199+print(f"Loaded {len(samples)} samples from {self._inputs_dir.parent}")200+return samples201+202+async def evaluate_against_ground_truth(self, results: List[Dict[str, Any]]) -> List[EvaluationResult]:203+evaluation_results: List[EvaluationResult] = []204+compare_to = self.__class__.COMPARE_TO_GROUND_TRUTH205+206+for res in results:207+req_id = res["request_id"]208+is_success = res.get("success", True)209+210+if not is_success:211+evaluation_results.append(212+EvaluationResult(213+request_id=req_id,214+request_body=res["request_body"],215+new_response=res.get("new_response"),216+valid_schema=False,217+quality_score=0.0,218+quality_feedback=f"Request failed: {res.get('error_type')} - {res.get('error_message')}",219+response_time=res.get("response_time"),220+success=False,221+error_type=res.get("error_type"),222+error_message=res.get("error_message"),223+ground_truth_match=False,224+ )225+ )226+continue227+228+pred = (res.get("new_response") or {}).get("ai_activity_type")229+230+if compare_to == "llm":231+if "ground_truth_llm" not in res:232+evaluation_results.append(233+EvaluationResult(234+request_id=req_id,235+request_body=res["request_body"],236+new_response=res.get("new_response"),237+valid_schema=res.get("valid_schema", True),238+quality_score=0.0,239+quality_feedback=(240+"Missing ground_truth_llm in run results. Ensure collect step populated it and "241+"endpoints.py passes it through."242+ ),243+response_time=res.get("response_time"),244+success=True,245+ground_truth_match=False,246+ )247+ )248+continue249+gt_label = res.get("ground_truth_llm")250+else:251+gt_path = self._ground_truths_dir / f"gt_{req_id}.json"252+if not gt_path.exists():253+evaluation_results.append(254+EvaluationResult(255+request_id=req_id,256+request_body=res["request_body"],257+new_response=res.get("new_response"),258+valid_schema=res.get("valid_schema", True),259+quality_score=0.0,260+quality_feedback=f"Ground truth file not found: {gt_path}",261+response_time=res.get("response_time"),262+success=True,263+ground_truth_match=False,264+ )265+ )266+continue267+268+with open(gt_path, "r") as f:269+ground_truth_dict: Dict[str, Any] = json.load(f)270+271+gt_label = (ground_truth_dict or {}).get("ai_activity_type")272+273+matched = pred == gt_label274+275+evaluation_results.append(276+EvaluationResult(277+request_id=req_id,278+request_body=res["request_body"],279+new_response=res.get("new_response"),280+valid_schema=res.get("valid_schema", True),281+quality_score=1.0 if matched else 0.0,282+quality_feedback="PASS" if matched else f"FAIL\nExpected: {gt_label}\nGot: {pred}",283+response_time=res.get("response_time"),284+success=True,285+ground_truth_match=matched,286+ )287+ )288+289+return evaluation_results290+291+292+if __name__ == "__main__":293+parser = create_parser(AiActivityTypeEvaluator)294+parser.add_argument(295+"--data-set",296+type=str,297+default="initial",298+dest="data_set",299+help="Dataset folder under eval_output/ai_activity_type/eval_datasets/ (default: initial)",300+ )301+302+parser.add_argument(303+"--populate-ground-truth-llm",304+action="store_true",305+dest="populate_ground_truth_llm",306+ )307+parser.add_argument(308+"--ground-truth-llm-endpoint",309+type=str,310+default="OPENAI:gpt-5_2",311+dest="ground_truth_llm_endpoint",312+ )313+314+parser.add_argument(315+"--compare-to-ground-truth",316+type=str,317+choices=["es", "llm"],318+default="es",319+dest="compare_to_ground_truth",320+ )321+322+args = parser.parse_args()323+AiActivityTypeEvaluator.DATA_SET = args.data_set324+325+AiActivityTypeEvaluator.POPULATE_GROUND_TRUTH_LLM = bool(args.populate_ground_truth_llm)326+AiActivityTypeEvaluator.GROUND_TRUTH_LLM_ENDPOINT = args.ground_truth_llm_endpoint327+328+AiActivityTypeEvaluator.COMPARE_TO_GROUND_TRUTH = args.compare_to_ground_truth329+330+asyncio.run(run_evaluation(AiActivityTypeEvaluator, args))</selection>” selected. Please summarize the selection using precise and concise language. Use headers and bulleted lists in the summary, to make it scannable. Maintain the meaning and factual accuracy.
You said
I’m on page “<tabTitle>Jy 19798 evaluation for ai activity types by nikol</tabTitle>” with “<selection>@@ -0,0 +1,330 @@1+#!/usr/bin/env python32+import asyncio3+import json4+import os5+from pathlib import Path6+from typing import Any, Dict, List7+8+import aiohttp9+10+from src.component.call_processing.schemes.ai_activity_type_request import AiActivityTypeRequest11+from src.component.call_processing.schemes.ai_activity_type_response import AiActivityTypeResponse12+from src.eval.abstract_evaluator import AbstractEvaluator13+from src.eval.collectors import collect_test_set as es_collect_test_set14+from src.eval.common_evaluator_runner import create_parser, run_evaluation15+from src.eval.endpoints import run_endpoint as run_endpoint_http16+from src.eval.models import EvaluationResult17+18+19+class AiActivityTypeEvaluator(AbstractEvaluator):20+ENDPOINT_PATH = "call/ai-activity-type"21+REQUEST_MODEL = AiActivityTypeRequest22+RESPONSE_MODEL = AiActivityTypeResponse23+OUTPUT_SUBFOLDER = "ai_activity_type"24+USES_LLM_EVALUATION = False25+26+READS_FROM_DATASET_FOLDER = True27+28+DATA_SET: str = "initial"29+30+MINIMUM_REQUEST_BODY_SIZE: int = 031+32+SKIP_UNREPLAYABLE_ON_COLLECT: bool = True33+REPLAYABILITY_CHECK_TIMEOUT_SECONDS: float = 20.034+35+POPULATE_GROUND_TRUTH_LLM: bool = False36+GROUND_TRUTH_LLM_ENDPOINT: str = "OPENAI:gpt-5_2"37+38+COMPARE_TO_GROUND_TRUTH: str = "es"39+40+def __init__(self, **kwargs: Any) -> None:41+super().__init__(**kwargs)42+_repo_root = Path(__file__).parents[2]43+_datasets_root = _repo_root / "eval_output" / self.OUTPUT_SUBFOLDER / "eval_datasets"44+_dataset_dir = _datasets_root / self.__class__.DATA_SET45+self._dataset_dir = _dataset_dir46+self._inputs_dir = _dataset_dir / "input"47+self._ground_truths_dir = _dataset_dir / "ground_truth"48+49+async def collect_test_set(self) -> List[Dict[str, Any]]:50+if self.es_host_name:51+return await self._collect_from_es()52+return await self._collect_from_files()53+54+async def _collect_from_es(self) -> List[Dict[str, Any]]:55+if self._inputs_dir.exists() or self._ground_truths_dir.exists():56+raise FileExistsError(57+f"Dataset folder already exists: {self._inputs_dir.parent}. "58+"Choose a different --data-set name or remove the folder manually."59+ )60+61+raw_samples = await es_collect_test_set(62+es_host_name=self.es_host_name,63+es_port=self.es_port,64+endpoint_path=self.ENDPOINT_PATH,65+limit=self.limit,66+language_filter=self.language_filter,67+min_prompt_length=self.min_prompt_length,68+is_json_response=self.IS_JSON_RESPONSE,69+exclude_languages=self.exclude_languages,70+ )71+72+self._inputs_dir.mkdir(parents=True, exist_ok=True)73+self._ground_truths_dir.mkdir(parents=True, exist_ok=True)74+75+min_size = self.MINIMUM_REQUEST_BODY_SIZE76+samples: List[Dict[str, Any]] = []77+skipped_no_response = 078+skipped_too_small = 079+skipped_unreplayable = 080+skipped_probe_errors = 081+82+endpoint_url = f"{self.api_host}/{self.ENDPOINT_PATH}"83+84+timeout = aiohttp.ClientTimeout(total=self.REPLAYABILITY_CHECK_TIMEOUT_SECONDS)85+async with aiohttp.ClientSession(timeout=timeout) as session:86+for sample in raw_samples:87+es_id = sample["id"]88+request_body = sample["request_body"]89+original_response = sample.get("original_response")90+91+if not original_response:92+skipped_no_response += 193+continue94+95+if min_size and len(json.dumps(request_body)) < min_size:96+skipped_too_small += 197+continue98+99+if self.SKIP_UNREPLAYABLE_ON_COLLECT:100+try:101+async with session.post(endpoint_url, json=request_body) as resp:102+if resp.status != 200:103+body_text = await resp.text()104+if "Cannot find activity" in body_text:105+skipped_unreplayable += 1106+continue107+except Exception:108+skipped_probe_errors += 1109+110+with open(self._inputs_dir / f"{es_id}.json", "w") as f:111+json.dump(request_body, f, indent=2)112+113+with open(self._ground_truths_dir / f"gt_{es_id}.json", "w") as f:114+json.dump(original_response, f, indent=2)115+116+samples.append(117+ {118+"id": es_id,119+"request_body": request_body,120+"original_response": original_response,121+"ground_truth": original_response,122+ }123+ )124+125+print(f"Wrote {len(samples)} samples to {self._inputs_dir.parent}")126+if skipped_no_response:127+print(f"Skipped {skipped_no_response} records with no response body")128+if skipped_too_small:129+print(130+f"Skipped {skipped_too_small} records whose request body was smaller "131+f"than {min_size} characters"132+ )133+if skipped_unreplayable:134+print(f"Skipped {skipped_unreplayable} records that are not replayable (missing activity in ES)")135+if skipped_probe_errors:136+print(f"Warning: replayability probe errored for {skipped_probe_errors} records (kept those records)")137+138+if self.__class__.POPULATE_GROUND_TRUTH_LLM:139+label_results = await run_endpoint_http(140+test_set=samples,141+endpoint_url=endpoint_url,142+request_model=self.REQUEST_MODEL,143+force_llm_endpoint=self.__class__.GROUND_TRUTH_LLM_ENDPOINT,144+is_streaming_response=self.IS_STREAMING_RESPONSE,145+timeout_seconds=300.0,146+parallel_requests=self.parallel_requests,147+return_prompt_in_run=self.RETURN_PROMPT_IN_RUN,148+ )149+150+for i, r in enumerate(label_results):151+forced = self.__class__.GROUND_TRUTH_LLM_ENDPOINT152+model = forced.split(":", 1)[1] if ":" in forced else forced153+samples[i]["ground_truth_llm_model"] = model154+155+if not r.get("success", True):156+samples[i]["ground_truth_llm"] = None157+samples[i]["ground_truth_llm_reasoning"] = None158+samples[i]["ground_truth_llm_error_type"] = r.get("error_type")159+samples[i]["ground_truth_llm_error_message"] = r.get("error_message")160+continue161+162+resp = r.get("new_response") or {}163+samples[i]["ground_truth_llm"] = resp.get("ai_activity_type")164+samples[i]["ground_truth_llm_reasoning"] = resp.get("ai_activity_type_reasoning")165+166+return samples167+168+async def _collect_from_files(self) -> List[Dict[str, Any]]:169+if not self._inputs_dir.exists():170+raise FileNotFoundError(171+f"Dataset folder not found: {self._inputs_dir}. "172+"Run --steps collect first or check your --data-set name."173+ )174+175+samples: List[Dict[str, Any]] = []176+for filename in sorted(os.listdir(self._inputs_dir)):177+input_path = self._inputs_dir / filename178+gt_path = self._ground_truths_dir / f"gt_{filename}"179+180+if not gt_path.exists():181+print(f"Warning: ground truth not found for {filename}, skipping")182+continue183+184+with open(input_path, "r") as f:185+request_body: Dict[str, Any] = json.load(f)186+187+with open(gt_path, "r") as f:188+ground_truth: Dict[str, Any] = json.load(f)189+190+samples.append(191+ {192+"id": Path(filename).stem,193+"request_body": request_body,194+"original_response": None,195+"ground_truth": ground_truth,196+ }197+ )198+199+print(f"Loaded {len(samples)} samples from {self._inputs_dir.parent}")200+return samples201+202+async def evaluate_against_ground_truth(self, results: List[Dict[str, Any]]) -> List[EvaluationResult]:203+evaluation_results: List[EvaluationResult] = []204+compare_to = self.__class__.COMPARE_TO_GROUND_TRUTH205+206+for res in results:207+req_id = res["request_id"]208+is_success = res.get("success", True)209+210+if not is_success:211+evaluation_results.append(212+EvaluationResult(213+request_id=req_id,214+request_body=res["request_body"],215+new_response=res.get("new_response"),216+valid_schema=False,217+quality_score=0.0,218+quality_feedback=f"Request failed: {res.get('error_type')} - {res.get('error_message')}",219+response_time=res.get("response_time"),220+success=False,221+error_type=res.get("error_type"),222+error_message=res.get("error_message"),223+ground_truth_match=False,224+ )225+ )226+continue227+228+pred = (res.get("new_response") or {}).get("ai_activity_type")229+230+if compare_to == "llm":231+if "ground_truth_llm" not in res:232+evaluation_results.append(233+EvaluationResult(234+request_id=req_id,235+request_body=res["request_body"],236+new_response=res.get("new_response"),237+valid_schema=res.get("valid_schema", True),238+quality_score=0.0,239+quality_feedback=(240+"Missing ground_truth_llm in run results. Ensure collect step populated it and "241+"endpoints.py passes it through."242+ ),243+response_time=res.get("response_time"),244+success=True,245+ground_truth_match=False,246+ )247+ )248+continue249+gt_label = res.get("ground_truth_llm")250+else:251+gt_path = self._ground_truths_dir / f"gt_{req_id}.json"252+if not gt_path.exists():253+evaluation_results.append(254+EvaluationResult(255+request_id=req_id,256+request_body=res["request_body"],257+new_response=res.get("new_response"),258+valid_schema=res.get("valid_schema", True),259+quality_score=0.0,260+quality_feedback=f"Ground truth file not found: {gt_path}",261+response_time=res.get("response_time"),262+success=True,263+ground_truth_match=False,264+ )265+ )266+continue267+268+with open(gt_path, "r") as f:269+ground_truth_dict: Dict[str, Any] = json.load(f)270+271+gt_label = (ground_truth_dict or {}).get("ai_activity_type")272+273+matched = pred == gt_label274+275+evaluation_results.append(276+EvaluationResult(277+request_id=req_id,278+request_body=res["request_body"],279+new_response=res.get("new_response"),280+valid_schema=res.get("valid_schema", True),281+quality_score=1.0 if matched else 0.0,282+quality_feedback="PASS" if matched else f"FAIL\nExpected: {gt_label}\nGot: {pred}",283+response_time=res.get("response_time"),284+success=True,285+ground_truth_match=matched,286+ )287+ )288+289+return evaluation_results290+291+292+if __name__ == "__main__":293+parser = create_parser(AiActivityTypeEvaluator)294+parser.add_argument(295+"--data-set",296+type=str,297+default="initial",298+dest="data_set",299+help="Dataset folder under eval_output/ai_activity_type/eval_datasets/ (default: initial)",300+ )301+302+parser.add_argument(303+"--populate-ground-truth-llm",304+action="store_true",305+dest="populate_ground_truth_llm",306+ )307+parser.add_argument(308+"--ground-truth-llm-endpoint",309+type=str,310+default="OPENAI:gpt-5_2",311+dest="ground_truth_llm_endpoint",312+ )313+314+parser.add_argument(315+"--compare-to-ground-truth",316+type=str,317+choices=["es", "llm"],318+default="es",319+dest="compare_to_ground_truth",320+ )321+322+args = parser.parse_args()323+AiActivityTypeEvaluator.DATA_SET = args.data_set324+325+AiActivityTypeEvaluator.POPULATE_GROUND_TRUTH_LLM = bool(args.populate_ground_truth_llm)326+AiActivityTypeEvaluator.GROUND_TRUTH_LLM_ENDPOINT = args.ground_truth_llm_endpoint327+328+AiActivityTypeEvaluator.COMPARE_TO_GROUND_TRUTH = args.compare_to_ground_truth329+330+asyncio.run(run_evaluation(AiActivityTypeEvaluator, args))</selection>” selected.
Please summarize the selection using precise and concise language. Use headers and bulleted lists in the summary, to make it scannable. Maintain the meaning and factual accuracy.
Expand
Listen
Show more options
Gemini said
Gemini said
This Python script defines the
AiActivityTypeEvaluator
class, a specialized tool for evaluating AI-driven activity type classification. It extends a base evaluation framework to handle data collection, ground-truth comparison, and performance scoring.
Core Functionality
Core Functionality
Data Collection (
collect_test_set
):
* Supports fetching raw samples from
Elasticsearch (ES)
or loading them from
local JSON files
.
Implements a
replayability check
: It probes the live API endpoint to ensure the activity exists before including it in the dataset.
Filters data based on request body size, language, and the presence of an original response.
Ground Truth Management:
Saves inputs and "gold standard" responses to a local directory structure (
eval_output/ai_activity_type/...
).
Features an optional mode to populate ground truth using a high-order LLM (e.g.,
gpt-5_2
) if the
POPULATE_GROUND_TRUTH_LLM
flag is set.
Evaluation Logic (
evaluate_against_ground_truth
):
Compares the model's predicted
ai_activity_type
against either the original ES response or the LLM-generated ground truth.
Assigns a
quality_score
of
1.0
for a match and
0.0
for a mismatch or request failure.
Key Configuration Constants
Key Configuration Constants
Constant
Purpose
ENDPOINT_PATH
The specific API route being tested:
call/ai-activity-type
.
COMPARE_TO_GROUND_TRUTH
Determines if the baseline is the original data (
es
) or an
llm
.
SKIP_UNREPLAYABLE_ON_COLLECT
If
True
, discards records where the API returns "Cannot find activity."
REPLAYABILITY_CHECK_TIMEOUT_SECONDS
Limits the probe request to 20 seconds.
Constant
ENDPOINT_PATH
COMPARE_TO_GROUND_TRUTH
SKIP_UNREPLAYABLE_ON_COLLECT
REPLAYABILITY_CHECK_TIMEOUT_SECONDS...
|
Firefox
|
Jiminny Mail — Work
|
mail.google.com/mail/u/0/#inbox
|
8629
|
|
FirefoxFileEoitViewHistoryBookmarksProfilesToolsWi FirefoxFileEoitViewHistoryBookmarksProfilesToolsWindowHelpSearch with Google or enter addressImport bookmarks...Sprint BoardT SRD QueueGithuh~ Google GeminiPlatform Sprint 1 Q2 - Platform Tea( Jy 19798 evaluation for ai activity t8 Jiminny8 Ask Jiminny test report - 8 Apr 202New Tab+ New Tabselecteu uataset name.3. Run: Executes the evaluation viaasyncio.run(run_evaluation(...)),which orchestrates the collection,endpoint calling, and result scoringphases.are there soem issueswith this PRshow thinkingYes, there are several issues and code smellsin this Pull Request, ranging from performancebottlenecks to architectural anti-patterns.Here is a breakdown of the most notableissues, categorized by impact:Performance & Async Anti-Patterns• Sequential Requests Defeating Async(Lines 86-106): Inside_collect_from_es , the code loopsthrough raw_samples and awaits asession.post request for each sampleone by one. This means the script waitsfor one HTTP request to finish beforestarting the next, completely defeatingthe purpose of using aiohttp forasynchronous concurrency.• Fix: Collect these coroutines into alist and use asyncio.gather()(ideally wrapped in anohore to avoidi• Enter a prompt for GeminiProvYour Jiminny chats aren't used to improve our models. Gemini is Alane can make mistakes, Inciualne aoout peodleYour privacy & GeminiSummarize pageFirefoxSearch with Google or enter addressPlatform Sprint1 Q2 - Platfor...Inbox (1,539) - [EMAIL] - Jiminny MailInbox (1,539)-Meet - Daily -JY-20543 addJiminnyWikipediaYouTubeDaily - Platform • 9m left100% CD8•Tue 14 Apr 9:56:59Ö:22°CNew York CityReddit...
|
NULL
|
NULL
|
NULL
|
8628
|
|
DiaFilelEditViewTabsBookmarksHistoryExtensionsWind DiaFilelEditViewTabsBookmarksHistoryExtensionsWindowHelpmeet.google.com / Daily - PlatformNikolay Yankov (Presenting)Inbox (2)PlactormSRD-677C Jy 19798(7 Service-oir Font. AmrCostI Clhttps://jiminny.atlassian.net/jira/software/c/projects/JY/boards/377selectedissue=JY-18909• Siminm0 ProjectsB AWSE3 SSHEl DatadogPlatform TeamQ Search board200$ Jy-19240 / [ JY-18900READY FOR DEV 1oohesnetixwirtexceed Fontawesomepackage limits(MAINTENANCISReady for Dev8-20564 1 П ****=PriorityStor..% JY-20574 [AI) Ensure PDF formatting is good= Medium= Medium% JY-20586 (QA) Manual Testing= Medium% JY-20626 Integration testing= MediumLinked work itemsis cloned byJY-20458 CRUD reports with Ask. Jiminnyis tested byJY-20649 Generate Ask Jiminny Report and display it in AI ReportsJY-20656 Expired Ask Jiminny report template is automatically disabled an.JY-20660 Non-creator cannot delete generated Ask Jiminny report from A…JY-20651 Generated Ask Jiminny Report PDF displays Jiminny logo when n...JY-20655 Generated Ask Jiminny Report emall is sent to recipients0 JY-20654 Generated Ask Jiminny Reoort is visible onty to creator and shar...JY-20650 Generated Ask Jiminny Report PDF includes branding, report se..AssigneeStatus3 Steliyan...i2 Stellyan .-.INDEYVINDEV vAnellya A...READY FOR DEV& Unassign...READY FOR DEVSDEPLOYEDDONEDONEDONEDONE MDONEDONEWUnEActions9:56 AM | Daily - Platform•10> 0.Daily - Platform • 9 m left8•CE: DepeDependol• Far DepeD Tasks2 Insights & CoachinD DevD ux.©2|In Dev vXz Improve Story~ Details32 Stellyan GeorgievAsuntoeReporter2 Galya DimitrovaUeyele encne@ Open with VS Code|3 branches32 commits1 pull request1 build failingRelessesProduction2 See all deploymentsComponentsPlatformSub-ProductAoorcons5 days agcD Al BookmarksGroup: QueriesDEPLOY &leature fiag releaseDeployed0 -20585 15 * ****=AI Review - Q1CTNMEUANANORCORDeployed_19982 1 0 •***=No such column "Type' onentty 'Event"PLATTORM STABTUTTYDeployed9 -20384 05 П ****=Missed deletion of stale0 m400R 4.0000(Pipedrive] DI > status notlabeled, date data missingREDUCE CHURNDeployed0 -20614 05 П ****=:Stefka StoyanovaSteliyan Georgievalik11:358 • Tue 14 Apr 9:56:59ChatNikolay YankovNikolay Ivanov...
|
NULL
|
NULL
|
NULL
|
8627
|
|
FirefoxFileEoitViewHistoryBookmarksProfilesToolsWi FirefoxFileEoitViewHistoryBookmarksProfilesToolsWindowHelpSearch with Google or enter address→ Import bookmarks..Sprint BoardTSRD QueueGithuh~ Google GeminiPlatform Sprint 1 Q2 - Platform Tea( Jy 19798 evaluation for ai activity t8 Jiminny8 Ask Jiminny test report - 8 Apr XNew Tab+ New Tabselecteu uataset name.Run: Executes the evaluation viaAsk liminnv test renort - 8 Aor 2026 - Ask|Jiminny test report - 13 Aor 2026,001p(...)).ion,oringssueswith this PRshow thinkingYes, there are several issues and code smellsin this Pull Request, ranging from performancebottlenecks to architectural anti-patterns.Here is a breakdown of the most notableissues, categorized by impact:Performance & Async Anti-Patterns• Sequential Requests Defeating Async(Lines 86-106): Inside_collect_from_es , the code loopsthrough raw_samples and awaits asession.post request for each sampleone by one. This means the script waitsfor one HTTP request to finish beforestarting the next, completely defeatingthe purpose of using aiohttp forasynchronous concurrency.• Fix: Collect these coroutines into alist and use asyncio.gather()(ideally wrapped in anohore to avoidi• Enter a prompt for GeminiPro vYour Jiminny chats aren't used to improve our models. Gemini is Alane can make mistakes, Inciualne aoout peodleYour privacy & GeminiSummarize pageFirefoxPlatform Sprint1 Q2 - Platfor...Search with Google or enter addressMInbox (1,539)-Meet - Daily -PlatformJY-20543 addAJ reports...JiminnyWikipediaYouTubeDaily - Platform • 9m left100% C8•Tue 14 Apr 9:56:57Ö:22°CNew York CityReddit...
|
NULL
|
NULL
|
NULL
|
8626
|
|
JY-20543 add AJ reports User pilot tracking by Lak JY-20543 add AJ reports User pilot tracking by LakyLak · Pull Request #11932 · jiminny/app
Platform Sprint 1 Q2 - Platform Team - Scrum Board - Jira
Platform Sprint 1 Q2 - Platform Team - Scrum Board - Jira
Jy 19798 evaluation for ai activity types by nikolaybiaivanov · Pull Request #468 · jiminny/prophet
Jy 19798 evaluation for ai activity types by nikolaybiaivanov · Pull Request #468 · jiminny/prophet
Jiminny
Jiminny
Ask Jiminny test report - 8 Apr 2026 - Ask Jiminny test report - 13 Apr 2026.pdf
Ask Jiminny test report - 8 Apr 2026 - Ask Jiminny test report - 13 Apr 2026.pdf
Close tab
New Tab
Customize sidebar
Close Google Gemini (⌃X)
Tabs from other devices
Open history (⇧⌘H)
Open bookmarks (⌘B)
AI Chat settings
Close
WORK, Google Account: [EMAIL]
Main menu
New Chat
Open menu for conversation actions.
Conversation with Gemini
Conversation with Gemini
Copy prompt
You said I’m on page “<tabTitle>Jy 19798 evaluation for ai activity types by nikol</tabTitle>” with “<selection>@@ -0,0 +1,330 @@1+#!/usr/bin/env python32+import asyncio3+import json4+import os5+from pathlib import Path6+from typing import Any, Dict, List7+8+import aiohttp9+10+from src.component.call_processing.schemes.ai_activity_type_request import AiActivityTypeRequest11+from src.component.call_processing.schemes.ai_activity_type_response import AiActivityTypeResponse12+from src.eval.abstract_evaluator import AbstractEvaluator13+from src.eval.collectors import collect_test_set as es_collect_test_set14+from src.eval.common_evaluator_runner import create_parser, run_evaluation15+from src.eval.endpoints import run_endpoint as run_endpoint_http16+from src.eval.models import EvaluationResult17+18+19+class AiActivityTypeEvaluator(AbstractEvaluator):20+ENDPOINT_PATH = "call/ai-activity-type"21+REQUEST_MODEL = AiActivityTypeRequest22+RESPONSE_MODEL = AiActivityTypeResponse23+OUTPUT_SUBFOLDER = "ai_activity_type"24+USES_LLM_EVALUATION = False25+26+READS_FROM_DATASET_FOLDER = True27+28+DATA_SET: str = "initial"29+30+MINIMUM_REQUEST_BODY_SIZE: int = 031+32+SKIP_UNREPLAYABLE_ON_COLLECT: bool = True33+REPLAYABILITY_CHECK_TIMEOUT_SECONDS: float = 20.034+35+POPULATE_GROUND_TRUTH_LLM: bool = False36+GROUND_TRUTH_LLM_ENDPOINT: str = "OPENAI:gpt-5_2"37+38+COMPARE_TO_GROUND_TRUTH: str = "es"39+40+def __init__(self, **kwargs: Any) -> None:41+super().__init__(**kwargs)42+_repo_root = Path(__file__).parents[2]43+_datasets_root = _repo_root / "eval_output" / self.OUTPUT_SUBFOLDER / "eval_datasets"44+_dataset_dir = _datasets_root / self.__class__.DATA_SET45+self._dataset_dir = _dataset_dir46+self._inputs_dir = _dataset_dir / "input"47+self._ground_truths_dir = _dataset_dir / "ground_truth"48+49+async def collect_test_set(self) -> List[Dict[str, Any]]:50+if self.es_host_name:51+return await self._collect_from_es()52+return await self._collect_from_files()53+54+async def _collect_from_es(self) -> List[Dict[str, Any]]:55+if self._inputs_dir.exists() or self._ground_truths_dir.exists():56+raise FileExistsError(57+f"Dataset folder already exists: {self._inputs_dir.parent}. "58+"Choose a different --data-set name or remove the folder manually."59+ )60+61+raw_samples = await es_collect_test_set(62+es_host_name=self.es_host_name,63+es_port=self.es_port,64+endpoint_path=self.ENDPOINT_PATH,65+limit=self.limit,66+language_filter=self.language_filter,67+min_prompt_length=self.min_prompt_length,68+is_json_response=self.IS_JSON_RESPONSE,69+exclude_languages=self.exclude_languages,70+ )71+72+self._inputs_dir.mkdir(parents=True, exist_ok=True)73+self._ground_truths_dir.mkdir(parents=True, exist_ok=True)74+75+min_size = self.MINIMUM_REQUEST_BODY_SIZE76+samples: List[Dict[str, Any]] = []77+skipped_no_response = 078+skipped_too_small = 079+skipped_unreplayable = 080+skipped_probe_errors = 081+82+endpoint_url = f"{self.api_host}/{self.ENDPOINT_PATH}"83+84+timeout = aiohttp.ClientTimeout(total=self.REPLAYABILITY_CHECK_TIMEOUT_SECONDS)85+async with aiohttp.ClientSession(timeout=timeout) as session:86+for sample in raw_samples:87+es_id = sample["id"]88+request_body = sample["request_body"]89+original_response = sample.get("original_response")90+91+if not original_response:92+skipped_no_response += 193+continue94+95+if min_size and len(json.dumps(request_body)) < min_size:96+skipped_too_small += 197+continue98+99+if self.SKIP_UNREPLAYABLE_ON_COLLECT:100+try:101+async with session.post(endpoint_url, json=request_body) as resp:102+if resp.status != 200:103+body_text = await resp.text()104+if "Cannot find activity" in body_text:105+skipped_unreplayable += 1106+continue107+except Exception:108+skipped_probe_errors += 1109+110+with open(self._inputs_dir / f"{es_id}.json", "w") as f:111+json.dump(request_body, f, indent=2)112+113+with open(self._ground_truths_dir / f"gt_{es_id}.json", "w") as f:114+json.dump(original_response, f, indent=2)115+116+samples.append(117+ {118+"id": es_id,119+"request_body": request_body,120+"original_response": original_response,121+"ground_truth": original_response,122+ }123+ )124+125+print(f"Wrote {len(samples)} samples to {self._inputs_dir.parent}")126+if skipped_no_response:127+print(f"Skipped {skipped_no_response} records with no response body")128+if skipped_too_small:129+print(130+f"Skipped {skipped_too_small} records whose request body was smaller "131+f"than {min_size} characters"132+ )133+if skipped_unreplayable:134+print(f"Skipped {skipped_unreplayable} records that are not replayable (missing activity in ES)")135+if skipped_probe_errors:136+print(f"Warning: replayability probe errored for {skipped_probe_errors} records (kept those records)")137+138+if self.__class__.POPULATE_GROUND_TRUTH_LLM:139+label_results = await run_endpoint_http(140+test_set=samples,141+endpoint_url=endpoint_url,142+request_model=self.REQUEST_MODEL,143+force_llm_endpoint=self.__class__.GROUND_TRUTH_LLM_ENDPOINT,144+is_streaming_response=self.IS_STREAMING_RESPONSE,145+timeout_seconds=300.0,146+parallel_requests=self.parallel_requests,147+return_prompt_in_run=self.RETURN_PROMPT_IN_RUN,148+ )149+150+for i, r in enumerate(label_results):151+forced = self.__class__.GROUND_TRUTH_LLM_ENDPOINT152+model = forced.split(":", 1)[1] if ":" in forced else forced153+samples[i]["ground_truth_llm_model"] = model154+155+if not r.get("success", True):156+samples[i]["ground_truth_llm"] = None157+samples[i]["ground_truth_llm_reasoning"] = None158+samples[i]["ground_truth_llm_error_type"] = r.get("error_type")159+samples[i]["ground_truth_llm_error_message"] = r.get("error_message")160+continue161+162+resp = r.get("new_response") or {}163+samples[i]["ground_truth_llm"] = resp.get("ai_activity_type")164+samples[i]["ground_truth_llm_reasoning"] = resp.get("ai_activity_type_reasoning")165+166+return samples167+168+async def _collect_from_files(self) -> List[Dict[str, Any]]:169+if not self._inputs_dir.exists():170+raise FileNotFoundError(171+f"Dataset folder not found: {self._inputs_dir}. "172+"Run --steps collect first or check your --data-set name."173+ )174+175+samples: List[Dict[str, Any]] = []176+for filename in sorted(os.listdir(self._inputs_dir)):177+input_path = self._inputs_dir / filename178+gt_path = self._ground_truths_dir / f"gt_{filename}"179+180+if not gt_path.exists():181+print(f"Warning: ground truth not found for {filename}, skipping")182+continue183+184+with open(input_path, "r") as f:185+request_body: Dict[str, Any] = json.load(f)186+187+with open(gt_path, "r") as f:188+ground_truth: Dict[str, Any] = json.load(f)189+190+samples.append(191+ {192+"id": Path(filename).stem,193+"request_body": request_body,194+"original_response": None,195+"ground_truth": ground_truth,196+ }197+ )198+199+print(f"Loaded {len(samples)} samples from {self._inputs_dir.parent}")200+return samples201+202+async def evaluate_against_ground_truth(self, results: List[Dict[str, Any]]) -> List[EvaluationResult]:203+evaluation_results: List[EvaluationResult] = []204+compare_to = self.__class__.COMPARE_TO_GROUND_TRUTH205+206+for res in results:207+req_id = res["request_id"]208+is_success = res.get("success", True)209+210+if not is_success:211+evaluation_results.append(212+EvaluationResult(213+request_id=req_id,214+request_body=res["request_body"],215+new_response=res.get("new_response"),216+valid_schema=False,217+quality_score=0.0,218+quality_feedback=f"Request failed: {res.get('error_type')} - {res.get('error_message')}",219+response_time=res.get("response_time"),220+success=False,221+error_type=res.get("error_type"),222+error_message=res.get("error_message"),223+ground_truth_match=False,224+ )225+ )226+continue227+228+pred = (res.get("new_response") or {}).get("ai_activity_type")229+230+if compare_to == "llm":231+if "ground_truth_llm" not in res:232+evaluation_results.append(233+EvaluationResult(234+request_id=req_id,235+request_body=res["request_body"],236+new_response=res.get("new_response"),237+valid_schema=res.get("valid_schema", True),238+quality_score=0.0,239+quality_feedback=(240+"Missing ground_truth_llm in run results. Ensure collect step populated it and "241+"endpoints.py passes it through."242+ ),243+response_time=res.get("response_time"),244+success=True,245+ground_truth_match=False,246+ )247+ )248+continue249+gt_label = res.get("ground_truth_llm")250+else:251+gt_path = self._ground_truths_dir / f"gt_{req_id}.json"252+if not gt_path.exists():253+evaluation_results.append(254+EvaluationResult(255+request_id=req_id,256+request_body=res["request_body"],257+new_response=res.get("new_response"),258+valid_schema=res.get("valid_schema", True),259+quality_score=0.0,260+quality_feedback=f"Ground truth file not found: {gt_path}",261+response_time=res.get("response_time"),262+success=True,263+ground_truth_match=False,264+ )265+ )266+continue267+268+with open(gt_path, "r") as f:269+ground_truth_dict: Dict[str, Any] = json.load(f)270+271+gt_label = (ground_truth_dict or {}).get("ai_activity_type")272+273+matched = pred == gt_label274+275+evaluation_results.append(276+EvaluationResult(277+request_id=req_id,278+request_body=res["request_body"],279+new_response=res.get("new_response"),280+valid_schema=res.get("valid_schema", True),281+quality_score=1.0 if matched else 0.0,282+quality_feedback="PASS" if matched else f"FAIL\nExpected: {gt_label}\nGot: {pred}",283+response_time=res.get("response_time"),284+success=True,285+ground_truth_match=matched,286+ )287+ )288+289+return evaluation_results290+291+292+if __name__ == "__main__":293+parser = create_parser(AiActivityTypeEvaluator)294+parser.add_argument(295+"--data-set",296+type=str,297+default="initial",298+dest="data_set",299+help="Dataset folder under eval_output/ai_activity_type/eval_datasets/ (default: initial)",300+ )301+302+parser.add_argument(303+"--populate-ground-truth-llm",304+action="store_true",305+dest="populate_ground_truth_llm",306+ )307+parser.add_argument(308+"--ground-truth-llm-endpoint",309+type=str,310+default="OPENAI:gpt-5_2",311+dest="ground_truth_llm_endpoint",312+ )313+314+parser.add_argument(315+"--compare-to-ground-truth",316+type=str,317+choices=["es", "llm"],318+default="es",319+dest="compare_to_ground_truth",320+ )321+322+args = parser.parse_args()323+AiActivityTypeEvaluator.DATA_SET = args.data_set324+325+AiActivityTypeEvaluator.POPULATE_GROUND_TRUTH_LLM = bool(args.populate_ground_truth_llm)326+AiActivityTypeEvaluator.GROUND_TRUTH_LLM_ENDPOINT = args.ground_truth_llm_endpoint327+328+AiActivityTypeEvaluator.COMPARE_TO_GROUND_TRUTH = args.compare_to_ground_truth329+330+asyncio.run(run_evaluation(AiActivityTypeEvaluator, args))</selection>” selected. Please summarize the selection using precise and concise language. Use headers and bulleted lists in the summary, to make it scannable. Maintain the meaning and factual accuracy.
You said
I’m on page “<tabTitle>Jy 19798 evaluation for ai activity types by nikol</tabTitle>” with “<selection>@@ -0,0 +1,330 @@1+#!/usr/bin/env python32+import asyncio3+import json4+import os5+from pathlib import Path6+from typing import Any, Dict, List7+8+import aiohttp9+10+from src.component.call_processing.schemes.ai_activity_type_request import AiActivityTypeRequest11+from src.component.call_processing.schemes.ai_activity_type_response import AiActivityTypeResponse12+from src.eval.abstract_evaluator import AbstractEvaluator13+from src.eval.collectors import collect_test_set as es_collect_test_set14+from src.eval.common_evaluator_runner import create_parser, run_evaluation15+from src.eval.endpoints import run_endpoint as run_endpoint_http16+from src.eval.models import EvaluationResult17+18+19+class AiActivityTypeEvaluator(AbstractEvaluator):20+ENDPOINT_PATH = "call/ai-activity-type"21+REQUEST_MODEL = AiActivityTypeRequest22+RESPONSE_MODEL = AiActivityTypeResponse23+OUTPUT_SUBFOLDER = "ai_activity_type"24+USES_LLM_EVALUATION = False25+26+READS_FROM_DATASET_FOLDER = True27+28+DATA_SET: str = "initial"29+30+MINIMUM_REQUEST_BODY_SIZE: int = 031+32+SKIP_UNREPLAYABLE_ON_COLLECT: bool = True33+REPLAYABILITY_CHECK_TIMEOUT_SECONDS: float = 20.034+35+POPULATE_GROUND_TRUTH_LLM: bool = False36+GROUND_TRUTH_LLM_ENDPOINT: str = "OPENAI:gpt-5_2"37+38+COMPARE_TO_GROUND_TRUTH: str = "es"39+40+def __init__(self, **kwargs: Any) -> None:41+super().__init__(**kwargs)42+_repo_root = Path(__file__).parents[2]43+_datasets_root = _repo_root / "eval_output" / self.OUTPUT_SUBFOLDER / "eval_datasets"44+_dataset_dir = _datasets_root / self.__class__.DATA_SET45+self._dataset_dir = _dataset_dir46+self._inputs_dir = _dataset_dir / "input"47+self._ground_truths_dir = _dataset_dir / "ground_truth"48+49+async def collect_test_set(self) -> List[Dict[str, Any]]:50+if self.es_host_name:51+return await self._collect_from_es()52+return await self._collect_from_files()53+54+async def _collect_from_es(self) -> List[Dict[str, Any]]:55+if self._inputs_dir.exists() or self._ground_truths_dir.exists():56+raise FileExistsError(57+f"Dataset folder already exists: {self._inputs_dir.parent}. "58+"Choose a different --data-set name or remove the folder manually."59+ )60+61+raw_samples = await es_collect_test_set(62+es_host_name=self.es_host_name,63+es_port=self.es_port,64+endpoint_path=self.ENDPOINT_PATH,65+limit=self.limit,66+language_filter=self.language_filter,67+min_prompt_length=self.min_prompt_length,68+is_json_response=self.IS_JSON_RESPONSE,69+exclude_languages=self.exclude_languages,70+ )71+72+self._inputs_dir.mkdir(parents=True, exist_ok=True)73+self._ground_truths_dir.mkdir(parents=True, exist_ok=True)74+75+min_size = self.MINIMUM_REQUEST_BODY_SIZE76+samples: List[Dict[str, Any]] = []77+skipped_no_response = 078+skipped_too_small = 079+skipped_unreplayable = 080+skipped_probe_errors = 081+82+endpoint_url = f"{self.api_host}/{self.ENDPOINT_PATH}"83+84+timeout = aiohttp.ClientTimeout(total=self.REPLAYABILITY_CHECK_TIMEOUT_SECONDS)85+async with aiohttp.ClientSession(timeout=timeout) as session:86+for sample in raw_samples:87+es_id = sample["id"]88+request_body = sample["request_body"]89+original_response = sample.get("original_response")90+91+if not original_response:92+skipped_no_response += 193+continue94+95+if min_size and len(json.dumps(request_body)) < min_size:96+skipped_too_small += 197+continue98+99+if self.SKIP_UNREPLAYABLE_ON_COLLECT:100+try:101+async with session.post(endpoint_url, json=request_body) as resp:102+if resp.status != 200:103+body_text = await resp.text()104+if "Cannot find activity" in body_text:105+skipped_unreplayable += 1106+continue107+except Exception:108+skipped_probe_errors += 1109+110+with open(self._inputs_dir / f"{es_id}.json", "w") as f:111+json.dump(request_body, f, indent=2)112+113+with open(self._ground_truths_dir / f"gt_{es_id}.json", "w") as f:114+json.dump(original_response, f, indent=2)115+116+samples.append(117+ {118+"id": es_id,119+"request_body": request_body,120+"original_response": original_response,121+"ground_truth": original_response,122+ }123+ )124+125+print(f"Wrote {len(samples)} samples to {self._inputs_dir.parent}")126+if skipped_no_response:127+print(f"Skipped {skipped_no_response} records with no response body")128+if skipped_too_small:129+print(130+f"Skipped {skipped_too_small} records whose request body was smaller "131+f"than {min_size} characters"132+ )133+if skipped_unreplayable:134+print(f"Skipped {skipped_unreplayable} records that are not replayable (missing activity in ES)")135+if skipped_probe_errors:136+print(f"Warning: replayability probe errored for {skipped_probe_errors} records (kept those records)")137+138+if self.__class__.POPULATE_GROUND_TRUTH_LLM:139+label_results = await run_endpoint_http(140+test_set=samples,141+endpoint_url=endpoint_url,142+request_model=self.REQUEST_MODEL,143+force_llm_endpoint=self.__class__.GROUND_TRUTH_LLM_ENDPOINT,144+is_streaming_response=self.IS_STREAMING_RESPONSE,145+timeout_seconds=300.0,146+parallel_requests=self.parallel_requests,147+return_prompt_in_run=self.RETURN_PROMPT_IN_RUN,148+ )149+150+for i, r in enumerate(label_results):151+forced = self.__class__.GROUND_TRUTH_LLM_ENDPOINT152+model = forced.split(":", 1)[1] if ":" in forced else forced153+samples[i]["ground_truth_llm_model"] = model154+155+if not r.get("success", True):156+samples[i]["ground_truth_llm"] = None157+samples[i]["ground_truth_llm_reasoning"] = None158+samples[i]["ground_truth_llm_error_type"] = r.get("error_type")159+samples[i]["ground_truth_llm_error_message"] = r.get("error_message")160+continue161+162+resp = r.get("new_response") or {}163+samples[i]["ground_truth_llm"] = resp.get("ai_activity_type")164+samples[i]["ground_truth_llm_reasoning"] = resp.get("ai_activity_type_reasoning")165+166+return samples167+168+async def _collect_from_files(self) -> List[Dict[str, Any]]:169+if not self._inputs_dir.exists():170+raise FileNotFoundError(171+f"Dataset folder not found: {self._inputs_dir}. "172+"Run --steps collect first or check your --data-set name."173+ )174+175+samples: List[Dict[str, Any]] = []176+for filename in sorted(os.listdir(self._inputs_dir)):177+input_path = self._inputs_dir / filename178+gt_path = self._ground_truths_dir / f"gt_{filename}"179+180+if not gt_path.exists():181+print(f"Warning: ground truth not found for {filename}, skipping")182+continue183+184+with open(input_path, "r") as f:185+request_body: Dict[str, Any] = json.load(f)186+187+with open(gt_path, "r") as f:188+ground_truth: Dict[str, Any] = json.load(f)189+190+samples.append(191+ {192+"id": Path(filename).stem,193+"request_body": request_body,194+"original_response": None,195+"ground_truth": ground_truth,196+ }197+ )198+199+print(f"Loaded {len(samples)} samples from {self._inputs_dir.parent}")200+return samples201+202+async def evaluate_against_ground_truth(self, results: List[Dict[str, Any]]) -> List[EvaluationResult]:203+evaluation_results: List[EvaluationResult] = []204+compare_to = self.__class__.COMPARE_TO_GROUND_TRUTH205+206+for res in results:207+req_id = res["request_id"]208+is_success = res.get("success", True)209+210+if not is_success:211+evaluation_results.append(212+EvaluationResult(213+request_id=req_id,214+request_body=res["request_body"],215+new_response=res.get("new_response"),216+valid_schema=False,217+quality_score=0.0,218+quality_feedback=f"Request failed: {res.get('error_type')} - {res.get('error_message')}",219+response_time=res.get("response_time"),220+success=False,221+error_type=res.get("error_type"),222+error_message=res.get("error_message"),223+ground_truth_match=False,224+ )225+ )226+continue227+228+pred = (res.get("new_response") or {}).get("ai_activity_type")229+230+if compare_to == "llm":231+if "ground_truth_llm" not in res:232+evaluation_results.append(233+EvaluationResult(234+request_id=req_id,235+request_body=res["request_body"],236+new_response=res.get("new_response"),237+valid_schema=res.get("valid_schema", True),238+quality_score=0.0,239+quality_feedback=(240+"Missing ground_truth_llm in run results. Ensure collect step populated it and "241+"endpoints.py passes it through."242+ ),243+response_time=res.get("response_time"),244+success=True,245+ground_truth_match=False,246+ )247+ )248+continue249+gt_label = res.get("ground_truth_llm")250+else:251+gt_path = self._ground_truths_dir / f"gt_{req_id}.json"252+if not gt_path.exists():253+evaluation_results.append(254+EvaluationResult(255+request_id=req_id,256+request_body=res["request_body"],257+new_response=res.get("new_response"),258+valid_schema=res.get("valid_schema", True),259+quality_score=0.0,260+quality_feedback=f"Ground truth file not found: {gt_path}",261+response_time=res.get("response_time"),262+success=True,263+ground_truth_match=False,264+ )265+ )266+continue267+268+with open(gt_path, "r") as f:269+ground_truth_dict: Dict[str, Any] = json.load(f)270+271+gt_label = (ground_truth_dict or {}).get("ai_activity_type")272+273+matched = pred == gt_label274+275+evaluation_results.append(276+EvaluationResult(277+request_id=req_id,278+request_body=res["request_body"],279+new_response=res.get("new_response"),280+valid_schema=res.get("valid_schema", True),281+quality_score=1.0 if matched else 0.0,282+quality_feedback="PASS" if matched else f"FAIL\nExpected: {gt_label}\nGot: {pred}",283+response_time=res.get("response_time"),284+success=True,285+ground_truth_match=matched,286+ )287+ )288+289+return evaluation_results290+291+292+if __name__ == "__main__":293+parser = create_parser(AiActivityTypeEvaluator)294+parser.add_argument(295+"--data-set",296+type=str,297+default="initial",298+dest="data_set",299+help="Dataset folder under eval_output/ai_activity_type/eval_datasets/ (default: initial)",300+ )301+302+parser.add_argument(303+"--populate-ground-truth-llm",304+action="store_true",305+dest="populate_ground_truth_llm",306+ )307+parser.add_argument(308+"--ground-truth-llm-endpoint",309+type=str,310+default="OPENAI:gpt-5_2",311+dest="ground_truth_llm_endpoint",312+ )313+314+parser.add_argument(315+"--compare-to-ground-truth",316+type=str,317+choices=["es", "llm"],318+default="es",319+dest="compare_to_ground_truth",320+ )321+322+args = parser.parse_args()323+AiActivityTypeEvaluator.DATA_SET = args.data_set324+325+AiActivityTypeEvaluator.POPULATE_GROUND_TRUTH_LLM = bool(args.populate_ground_truth_llm)326+AiActivityTypeEvaluator.GROUND_TRUTH_LLM_ENDPOINT = args.ground_truth_llm_endpoint327+328+AiActivityTypeEvaluator.COMPARE_TO_GROUND_TRUTH = args.compare_to_ground_truth329+330+asyncio.run(run_evaluation(AiActivityTypeEvaluator, args))</selection>” selected.
Please summarize the selection using precise and concise language. Use headers and bulleted lists in the summary, to make it scannable. Maintain the meaning and factual accuracy.
Expand
Listen
Show more options
Gemini said
Gemini said
This Python script defines the
AiActivityTypeEvaluator
class, a specialized tool for evaluating AI-driven activity type classification. It extends a base evaluation framework to handle data collection, ground-truth comparison, and performance scoring.
Core Functionality
Core Functionality
Data Collection (
collect_test_set
):
* Supports fetching raw samples from
Elasticsearch (ES)
or loading them from
local JSON files
.
Implements a
replayability check
: It probes the live API endpoint to ensure the activity exists before including it in the dataset.
Filters data based on request body size, language, and the presence of an original response.
Ground Truth Management:
Saves inputs and "gold standard" responses to a local directory structure (
eval_output/ai_activity_type/...
).
Features an optional mode to populate ground truth using a high-order LLM (e.g.,
gpt-5_2
) if the
POPULATE_GROUND_TRUTH_LLM
flag is set.
Evaluation Logic (
evaluate_against_ground_truth
):
Compares the model's predicted
ai_activity_type
against either the original ES response or the LLM-generated ground truth.
Assigns a
quality_score
of
1.0
for a match and
0.0
for a mismatch or request failure.
Key Configuration Constants
Key Configuration Constants
Constant
Purpose
ENDPOINT_PATH
The specific API route being tested:...
|
Firefox
|
Ask Jiminny test report - 8 Apr 2026 - Ask Jiminny Ask Jiminny test report - 8 Apr 2026 - Ask Jiminny test report - 13 Apr 2026.pdf — Work...
|
NULL
|
8625
|
|
JY-20543 add AJ reports User pilot tracking by Lak JY-20543 add AJ reports User pilot tracking by LakyLak · Pull Request #11932 · jiminny/app
Platform Sprint 1 Q2 - Platform Team - Scrum Board - Jira
Platform Sprint 1 Q2 - Platform Team - Scrum Board - Jira
Jy 19798 evaluation for ai activity types by nikolaybiaivanov · Pull Request #468 · jiminny/prophet
Jy 19798 evaluation for ai activity types by nikolaybiaivanov · Pull Request #468 · jiminny/prophet
Jiminny
Jiminny
Ask Jiminny test report - 8 Apr 2026 - Ask Jiminny test report - 13 Apr 2026.pdf
Ask Jiminny test report - 8 Apr 2026 - Ask Jiminny test report - 13 Apr 2026.pdf
Close tab
New Tab
Customize sidebar
Close Google Gemini (⌃X)
Tabs from other devices
Open history (⇧⌘H)
Open bookmarks (⌘B)
AI Chat settings
Close
WORK, Google Account: [EMAIL]
Main menu
New Chat
Open menu for conversation actions.
Conversation with Gemini
Conversation with Gemini
Copy prompt
You said I’m on page “<tabTitle>Jy 19798 evaluation for ai activity types by nikol</tabTitle>” with “<selection>@@ -0,0 +1,330 @@1+#!/usr/bin/env python32+import asyncio3+import json4+import os5+from pathlib import Path6+from typing import Any, Dict, List7+8+import aiohttp9+10+from src.component.call_processing.schemes.ai_activity_type_request import AiActivityTypeRequest11+from src.component.call_processing.schemes.ai_activity_type_response import AiActivityTypeResponse12+from src.eval.abstract_evaluator import AbstractEvaluator13+from src.eval.collectors import collect_test_set as es_collect_test_set14+from src.eval.common_evaluator_runner import create_parser, run_evaluation15+from src.eval.endpoints import run_endpoint as run_endpoint_http16+from src.eval.models import EvaluationResult17+18+19+class AiActivityTypeEvaluator(AbstractEvaluator):20+ENDPOINT_PATH = "call/ai-activity-type"21+REQUEST_MODEL = AiActivityTypeRequest22+RESPONSE_MODEL = AiActivityTypeResponse23+OUTPUT_SUBFOLDER = "ai_activity_type"24+USES_LLM_EVALUATION = False25+26+READS_FROM_DATASET_FOLDER = True27+28+DATA_SET: str = "initial"29+30+MINIMUM_REQUEST_BODY_SIZE: int = 031+32+SKIP_UNREPLAYABLE_ON_COLLECT: bool = True33+REPLAYABILITY_CHECK_TIMEOUT_SECONDS: float = 20.034+35+POPULATE_GROUND_TRUTH_LLM: bool = False36+GROUND_TRUTH_LLM_ENDPOINT: str = "OPENAI:gpt-5_2"37+38+COMPARE_TO_GROUND_TRUTH: str = "es"39+40+def __init__(self, **kwargs: Any) -> None:41+super().__init__(**kwargs)42+_repo_root = Path(__file__).parents[2]43+_datasets_root = _repo_root / "eval_output" / self.OUTPUT_SUBFOLDER / "eval_datasets"44+_dataset_dir = _datasets_root / self.__class__.DATA_SET45+self._dataset_dir = _dataset_dir46+self._inputs_dir = _dataset_dir / "input"47+self._ground_truths_dir = _dataset_dir / "ground_truth"48+49+async def collect_test_set(self) -> List[Dict[str, Any]]:50+if self.es_host_name:51+return await self._collect_from_es()52+return await self._collect_from_files()53+54+async def _collect_from_es(self) -> List[Dict[str, Any]]:55+if self._inputs_dir.exists() or self._ground_truths_dir.exists():56+raise FileExistsError(57+f"Dataset folder already exists: {self._inputs_dir.parent}. "58+"Choose a different --data-set name or remove the folder manually."59+ )60+61+raw_samples = await es_collect_test_set(62+es_host_name=self.es_host_name,63+es_port=self.es_port,64+endpoint_path=self.ENDPOINT_PATH,65+limit=self.limit,66+language_filter=self.language_filter,67+min_prompt_length=self.min_prompt_length,68+is_json_response=self.IS_JSON_RESPONSE,69+exclude_languages=self.exclude_languages,70+ )71+72+self._inputs_dir.mkdir(parents=True, exist_ok=True)73+self._ground_truths_dir.mkdir(parents=True, exist_ok=True)74+75+min_size = self.MINIMUM_REQUEST_BODY_SIZE76+samples: List[Dict[str, Any]] = []77+skipped_no_response = 078+skipped_too_small = 079+skipped_unreplayable = 080+skipped_probe_errors = 081+82+endpoint_url = f"{self.api_host}/{self.ENDPOINT_PATH}"83+84+timeout = aiohttp.ClientTimeout(total=self.REPLAYABILITY_CHECK_TIMEOUT_SECONDS)85+async with aiohttp.ClientSession(timeout=timeout) as session:86+for sample in raw_samples:87+es_id = sample["id"]88+request_body = sample["request_body"]89+original_response = sample.get("original_response")90+91+if not original_response:92+skipped_no_response += 193+continue94+95+if min_size and len(json.dumps(request_body)) < min_size:96+skipped_too_small += 197+continue98+99+if self.SKIP_UNREPLAYABLE_ON_COLLECT:100+try:101+async with session.post(endpoint_url, json=request_body) as resp:102+if resp.status != 200:103+body_text = await resp.text()104+if "Cannot find activity" in body_text:105+skipped_unreplayable += 1106+continue107+except Exception:108+skipped_probe_errors += 1109+110+with open(self._inputs_dir / f"{es_id}.json", "w") as f:111+json.dump(request_body, f, indent=2)112+113+with open(self._ground_truths_dir / f"gt_{es_id}.json", "w") as f:114+json.dump(original_response, f, indent=2)115+116+samples.append(117+ {118+"id": es_id,119+"request_body": request_body,120+"original_response": original_response,121+"ground_truth": original_response,122+ }123+ )124+125+print(f"Wrote {len(samples)} samples to {self._inputs_dir.parent}")126+if skipped_no_response:127+print(f"Skipped {skipped_no_response} records with no response body")128+if skipped_too_small:129+print(130+f"Skipped {skipped_too_small} records whose request body was smaller "131+f"than {min_size} characters"132+ )133+if skipped_unreplayable:134+print(f"Skipped {skipped_unreplayable} records that are not replayable (missing activity in ES)")135+if skipped_probe_errors:136+print(f"Warning: replayability probe errored for {skipped_probe_errors} records (kept those records)")137+138+if self.__class__.POPULATE_GROUND_TRUTH_LLM:139+label_results = await run_endpoint_http(140+test_set=samples,141+endpoint_url=endpoint_url,142+request_model=self.REQUEST_MODEL,143+force_llm_endpoint=self.__class__.GROUND_TRUTH_LLM_ENDPOINT,144+is_streaming_response=self.IS_STREAMING_RESPONSE,145+timeout_seconds=300.0,146+parallel_requests=self.parallel_requests,147+return_prompt_in_run=self.RETURN_PROMPT_IN_RUN,148+ )149+150+for i, r in enumerate(label_results):151+forced = self.__class__.GROUND_TRUTH_LLM_ENDPOINT152+model = forced.split(":", 1)[1] if ":" in forced else forced153+samples[i]["ground_truth_llm_model"] = model154+155+if not r.get("success", True):156+samples[i]["ground_truth_llm"] = None157+samples[i]["ground_truth_llm_reasoning"] = None158+samples[i]["ground_truth_llm_error_type"] = r.get("error_type")159+samples[i]["ground_truth_llm_error_message"] = r.get("error_message")160+continue161+162+resp = r.get("new_response") or {}163+samples[i]["ground_truth_llm"] = resp.get("ai_activity_type")164+samples[i]["ground_truth_llm_reasoning"] = resp.get("ai_activity_type_reasoning")165+166+return samples167+168+async def _collect_from_files(self) -> List[Dict[str, Any]]:169+if not self._inputs_dir.exists():170+raise FileNotFoundError(171+f"Dataset folder not found: {self._inputs_dir}. "172+"Run --steps collect first or check your --data-set name."173+ )174+175+samples: List[Dict[str, Any]] = []176+for filename in sorted(os.listdir(self._inputs_dir)):177+input_path = self._inputs_dir / filename178+gt_path = self._ground_truths_dir / f"gt_{filename}"179+180+if not gt_path.exists():181+print(f"Warning: ground truth not found for {filename}, skipping")182+continue183+184+with open(input_path, "r") as f:185+request_body: Dict[str, Any] = json.load(f)186+187+with open(gt_path, "r") as f:188+ground_truth: Dict[str, Any] = json.load(f)189+190+samples.append(191+ {192+"id": Path(filename).stem,193+"request_body": request_body,194+"original_response": None,195+"ground_truth": ground_truth,196+ }197+ )198+199+print(f"Loaded {len(samples)} samples from {self._inputs_dir.parent}")200+return samples201+202+async def evaluate_against_ground_truth(self, results: List[Dict[str, Any]]) -> List[EvaluationResult]:203+evaluation_results: List[EvaluationResult] = []204+compare_to = self.__class__.COMPARE_TO_GROUND_TRUTH205+206+for res in results:207+req_id = res["request_id"]208+is_success = res.get("success", True)209+210+if not is_success:211+evaluation_results.append(212+EvaluationResult(213+request_id=req_id,214+request_body=res["request_body"],215+new_response=res.get("new_response"),216+valid_schema=False,217+quality_score=0.0,218+quality_feedback=f"Request failed: {res.get('error_type')} - {res.get('error_message')}",219+response_time=res.get("response_time"),220+success=False,221+error_type=res.get("error_type"),222+error_message=res.get("error_message"),223+ground_truth_match=False,224+ )225+ )226+continue227+228+pred = (res.get("new_response") or {}).get("ai_activity_type")229+230+if compare_to == "llm":231+if "ground_truth_llm" not in res:232+evaluation_results.append(233+EvaluationResult(234+request_id=req_id,235+request_body=res["request_body"],236+new_response=res.get("new_response"),237+valid_schema=res.get("valid_schema", True),238+quality_score=0.0,239+quality_feedback=(240+"Missing ground_truth_llm in run results. Ensure collect step populated it and "241+"endpoints.py passes it through."242+ ),243+response_time=res.get("response_time"),244+success=True,245+ground_truth_match=False,246+ )247+ )248+continue249+gt_label = res.get("ground_truth_llm")250+else:251+gt_path = self._ground_truths_dir / f"gt_{req_id}.json"252+if not gt_path.exists():253+evaluation_results.append(254+EvaluationResult(255+request_id=req_id,256+request_body=res["request_body"],257+new_response=res.get("new_response"),258+valid_schema=res.get("valid_schema", True),259+quality_score=0.0,260+quality_feedback=f"Ground truth file not found: {gt_path}",261+response_time=res.get("response_time"),262+success=True,263+ground_truth_match=False,264+ )265+ )266+continue267+268+with open(gt_path, "r") as f:269+ground_truth_dict: Dict[str, Any] = json.load(f)270+271+gt_label = (ground_truth_dict or {}).get("ai_activity_type")272+273+matched = pred == gt_label274+275+evaluation_results.append(276+EvaluationResult(277+request_id=req_id,278+request_body=res["request_body"],279+new_response=res.get("new_response"),280+valid_schema=res.get("valid_schema", True),281+quality_score=1.0 if matched else 0.0,282+quality_feedback="PASS" if matched else f"FAIL\nExpected: {gt_label}\nGot: {pred}",283+response_time=res.get("response_time"),284+success=True,285+ground_truth_match=matched,286+ )287+ )288+289+return evaluation_results290+291+292+if __name__ == "__main__":293+parser = create_parser(AiActivityTypeEvaluator)294+parser.add_argument(295+"--data-set",296+type=str,297+default="initial",298+dest="data_set",299+help="Dataset folder under eval_output/ai_activity_type/eval_datasets/ (default: initial)",300+ )301+302+parser.add_argument(303+"--populate-ground-truth-llm",304+action="store_true",305+dest="populate_ground_truth_llm",306+ )307+parser.add_argument(308+"--ground-truth-llm-endpoint",309+type=str,310+default="OPENAI:gpt-5_2",311+dest="ground_truth_llm_endpoint",312+ )313+314+parser.add_argument(315+"--compare-to-ground-truth",316+type=str,317+choices=["es", "llm"],318+default="es",319+dest="compare_to_ground_truth",320+ )321+322+args = parser.parse_args()323+AiActivityTypeEvaluator.DATA_SET = args.data_set324+325+AiActivityTypeEvaluator.POPULATE_GROUND_TRUTH_LLM = bool(args.populate_ground_truth_llm)326+AiActivityTypeEvaluator.GROUND_TRUTH_LLM_ENDPOINT = args.ground_truth_llm_endpoint327+328+AiActivityTypeEvaluator.COMPARE_TO_GROUND_TRUTH = args.compare_to_ground_truth329+330+asyncio.run(run_evaluation(AiActivityTypeEvaluator, args))</selection>” selected. Please summarize the selection using precise and concise language. Use headers and bulleted lists in the summary, to make it scannable. Maintain the meaning and factual accuracy.
You said
I’m on page “<tabTitle>Jy 19798 evaluation for ai activity types by nikol</tabTitle>” with “<selection>@@ -0,0 +1,330 @@1+#!/usr/bin/env python32+import asyncio3+import json4+import os5+from pathlib import Path6+from typing import Any, Dict, List7+8+import aiohttp9+10+from src.component.call_processing.schemes.ai_activity_type_request import AiActivityTypeRequest11+from src.component.call_processing.schemes.ai_activity_type_response import AiActivityTypeResponse12+from src.eval.abstract_evaluator import AbstractEvaluator13+from src.eval.collectors import collect_test_set as es_collect_test_set14+from src.eval.common_evaluator_runner import create_parser, run_evaluation15+from src.eval.endpoints import run_endpoint as run_endpoint_http16+from src.eval.models import EvaluationResult17+18+19+class AiActivityTypeEvaluator(AbstractEvaluator):20+ENDPOINT_PATH = "call/ai-activity-type"21+REQUEST_MODEL = AiActivityTypeRequest22+RESPONSE_MODEL = AiActivityTypeResponse23+OUTPUT_SUBFOLDER = "ai_activity_type"24+USES_LLM_EVALUATION = False25+26+READS_FROM_DATASET_FOLDER = True27+28+DATA_SET: str = "initial"29+30+MINIMUM_REQUEST_BODY_SIZE: int = 031+32+SKIP_UNREPLAYABLE_ON_COLLECT: bool = True33+REPLAYABILITY_CHECK_TIMEOUT_SECONDS: float = 20.034+35+POPULATE_GROUND_TRUTH_LLM: bool = False36+GROUND_TRUTH_LLM_ENDPOINT: str = "OPENAI:gpt-5_2"37+38+COMPARE_TO_GROUND_TRUTH: str = "es"39+40+def __init__(self, **kwargs: Any) -> None:41+super().__init__(**kwargs)42+_repo_root = Path(__file__).parents[2]43+_datasets_root = _repo_root / "eval_output" / self.OUTPUT_SUBFOLDER / "eval_datasets"44+_dataset_dir = _datasets_root / self.__class__.DATA_SET45+self._dataset_dir = _dataset_dir46+self._inputs_dir = _dataset_dir / "input"47+self._ground_truths_dir = _dataset_dir / "ground_truth"48+49+async def collect_test_set(self) -> List[Dict[str, Any]]:50+if self.es_host_name:51+return await self._collect_from_es()52+return await self._collect_from_files()53+54+async def _collect_from_es(self) -> List[Dict[str, Any]]:55+if self._inputs_dir.exists() or self._ground_truths_dir.exists():56+raise FileExistsError(57+f"Dataset folder already exists: {self._inputs_dir.parent}. "58+"Choose a different --data-set name or remove the folder manually."59+ )60+61+raw_samples = await es_collect_test_set(62+es_host_name=self.es_host_name,63+es_port=self.es_port,64+endpoint_path=self.ENDPOINT_PATH,65+limit=self.limit,66+language_filter=self.language_filter,67+min_prompt_length=self.min_prompt_length,68+is_json_response=self.IS_JSON_RESPONSE,69+exclude_languages=self.exclude_languages,70+ )71+72+self._inputs_dir.mkdir(parents=True, exist_ok=True)73+self._ground_truths_dir.mkdir(parents=True, exist_ok=True)74+75+min_size = self.MINIMUM_REQUEST_BODY_SIZE76+samples: List[Dict[str, Any]] = []77+skipped_no_response = 078+skipped_too_small = 079+skipped_unreplayable = 080+skipped_probe_errors = 081+82+endpoint_url = f"{self.api_host}/{self.ENDPOINT_PATH}"83+84+timeout = aiohttp.ClientTimeout(total=self.REPLAYABILITY_CHECK_TIMEOUT_SECONDS)85+async with aiohttp.ClientSession(timeout=timeout) as session:86+for sample in raw_samples:87+es_id = sample["id"]88+request_body = sample["request_body"]89+original_response = sample.get("original_response")90+91+if not original_response:92+skipped_no_response += 193+continue94+95+if min_size and len(json.dumps(request_body)) < min_size:96+skipped_too_small += 197+continue98+99+if self.SKIP_UNREPLAYABLE_ON_COLLECT:100+try:101+async with session.post(endpoint_url, json=request_body) as resp:102+if resp.status != 200:103+body_text = await resp.text()104+if "Cannot find activity" in body_text:105+skipped_unreplayable += 1106+continue107+except Exception:108+skipped_probe_errors += 1109+110+with open(self._inputs_dir / f"{es_id}.json", "w") as f:111+json.dump(request_body, f, indent=2)112+113+with open(self._ground_truths_dir / f"gt_{es_id}.json", "w") as f:114+json.dump(original_response, f, indent=2)115+116+samples.append(117+ {118+"id": es_id,119+"request_body": request_body,120+"original_response": original_response,121+"ground_truth": original_response,122+ }123+ )124+125+print(f"Wrote {len(samples)} samples to {self._inputs_dir.parent}")126+if skipped_no_response:127+print(f"Skipped {skipped_no_response} records with no response body")128+if skipped_too_small:129+print(130+f"Skipped {skipped_too_small} records whose request body was smaller "131+f"than {min_size} characters"132+ )133+if skipped_unreplayable:134+print(f"Skipped {skipped_unreplayable} records that are not replayable (missing activity in ES)")135+if skipped_probe_errors:136+print(f"Warning: replayability probe errored for {skipped_probe_errors} records (kept those records)")137+138+if self.__class__.POPULATE_GROUND_TRUTH_LLM:139+label_results = await run_endpoint_http(140+test_set=samples,141+endpoint_url=endpoint_url,142+request_model=self.REQUEST_MODEL,143+force_llm_endpoint=self.__class__.GROUND_TRUTH_LLM_ENDPOINT,144+is_streaming_response=self.IS_STREAMING_RESPONSE,145+timeout_seconds=300.0,146+parallel_requests=self.parallel_requests,147+return_prompt_in_run=self.RETURN_PROMPT_IN_RUN,148+ )149+150+for i, r in enumerate(label_results):151+forced = self.__class__.GROUND_TRUTH_LLM_ENDPOINT152+model = forced.split(":", 1)[1] if ":" in forced else forced153+samples[i]["ground_truth_llm_model"] = model154+155+if not r.get("success", True):156+samples[i]["ground_truth_llm"] = None157+samples[i]["ground_truth_llm_reasoning"] = None158+samples[i]["ground_truth_llm_error_type"] = r.get("error_type")159+samples[i]["ground_truth_llm_error_message"] = r.get("error_message")160+continue161+162+resp = r.get("new_response") or {}163+samples[i]["ground_truth_llm"] = resp.get("ai_activity_type")164+samples[i]["ground_truth_llm_reasoning"] = resp.get("ai_activity_type_reasoning")165+166+return samples167+168+async def _collect_from_files(self) -> List[Dict[str, Any]]:169+if not self._inputs_dir.exists():170+raise FileNotFoundError(171+f"Dataset folder not found: {self._inputs_dir}. "172+"Run --steps collect first or check your --data-set name."173+ )174+175+samples: List[Dict[str, Any]] = []176+for filename in sorted(os.listdir(self._inputs_dir)):177+input_path = self._inputs_dir / filename178+gt_path = self._ground_truths_dir / f"gt_{filename}"179+180+if not gt_path.exists():181+print(f"Warning: ground truth not found for {filename}, skipping")182+continue183+184+with open(input_path, "r") as f:185+request_body: Dict[str, Any] = json.load(f)186+187+with open(gt_path, "r") as f:188+ground_truth: Dict[str, Any] = json.load(f)189+190+samples.append(191+ {192+"id": Path(filename).stem,193+"request_body": request_body,194+"original_response": None,195+"ground_truth": ground_truth,196+ }197+ )198+199+print(f"Loaded {len(samples)} samples from {self._inputs_dir.parent}")200+return samples201+202+async def evaluate_against_ground_truth(self, results: List[Dict[str, Any]]) -> List[EvaluationResult]:203+evaluation_results: List[EvaluationResult] = []204+compare_to = self.__class__.COMPARE_TO_GROUND_TRUTH205+206+for res in results:207+req_id = res["request_id"]208+is_success = res.get("success", True)209+210+if not is_success:211+evaluation_results.append(212+EvaluationResult(213+request_id=req_id,214+request_body=res["request_body"],215+new_response=res.get("new_response"),216+valid_schema=False,217+quality_score=0.0,218+quality_feedback=f"Request failed: {res.get('error_type')} - {res.get('error_message')}",219+response_time=res.get("response_time"),220+success=False,221+error_type=res.get("error_type"),222+error_message=res.get("error_message"),223+ground_truth_match=False,224+ )225+ )226+continue227+228+pred = (res.get("new_response") or {}).get("ai_activity_type")229+230+if compare_to == "llm":231+if "ground_truth_llm" not in res:232+evaluation_results.append(233+EvaluationResult(234+request_id=req_id,235+request_body=res["request_body"],236+new_response=res.get("new_response"),237+valid_schema=res.get("valid_schema", True),238+quality_score=0.0,239+quality_feedback=(240+"Missing ground_truth_llm in run results. Ensure collect step populated it and "241+"endpoints.py passes it through."242+ ),243+response_time=res.get("response_time"),244+success=True,245+ground_truth_match=False,246+ )247+ )248+continue249+gt_label = res.get("ground_truth_llm")250+else:251+gt_path = self._ground_truths_dir / f"gt_{req_id}.json"252+if not gt_path.exists():253+evaluation_results.append(254+EvaluationResult(255+request_id=req_id,256+request_body=res["request_body"],257+new_response=res.get("new_response"),258+valid_schema=res.get("valid_schema", True),259+quality_score=0.0,260+quality_feedback=f"Ground truth file not found: {gt_path}",261+response_time=res.get("response_time"),262+success=True,263+ground_truth_match=False,264+ )265+ )266+continue267+268+with open(gt_path, "r") as f:269+ground_truth_dict: Dict[str, Any] = json.load(f)270+271+gt_label = (ground_truth_dict or {}).get("ai_activity_type")272+273+matched = pred == gt_label274+275+evaluation_results.append(276+EvaluationResult(277+request_id=req_id,278+request_body=res["request_body"],279+new_response=res.get("new_response"),280+valid_schema=res.get("valid_schema", True),281+quality_score=1.0 if matched else 0.0,282+quality_feedback="PASS" if matched else f"FAIL\nExpected: {gt_label}\nGot: {pred}",283+response_time=res.get("response_time"),284+success=True,285+ground_truth_match=matched,286+ )287+ )288+289+return evaluation_results290+291+292+if __name__ == "__main__":293+parser = create_parser(AiActivityTypeEvaluator)294+parser.add_argument(295+"--data-set",296+type=str,297+default="initial",298+dest="data_set",299+help="Dataset folder under eval_output/ai_activity_type/eval_datasets/ (default: initial)",300+ )301+302+parser.add_argument(303+"--populate-ground-truth-llm",304+action="store_true",305+dest="populate_ground_truth_llm",306+ )307+parser.add_argument(308+"--ground-truth-llm-endpoint",309+type=str,310+default="OPENAI:gpt-5_2",311+dest="ground_truth_llm_endpoint",312+ )313+314+parser.add_argument(315+"--compare-to-ground-truth",316+type=str,317+choices=["es", "llm"],318+default="es",319+dest="compare_to_ground_truth",320+ )321+322+args = parser.parse_args()323+AiActivityTypeEvaluator.DATA_SET = args.data_set324+325+AiActivityTypeEvaluator.POPULATE_GROUND_TRUTH_LLM = bool(args.populate_ground_truth_llm)326+AiActivityTypeEvaluator.GROUND_TRUTH_LLM_ENDPOINT = args.ground_truth_llm_endpoint327+328+AiActivityTypeEvaluator.COMPARE_TO_GROUND_TRUTH = args.compare_to_ground_truth329+330+asyncio.run(run_evaluation(AiActivityTypeEvaluator, args))</selection>” selected.
Please summarize the selection using precise and concise language. Use headers and bulleted lists in the summary, to make it scannable. Maintain the meaning and factual accuracy.
Expand
Listen
Show more options
Gemini said
Gemini said
This Python script defines the
AiActivityTypeEvaluator
class, a specialized tool for evaluating AI-driven activity type classification. It extends a base evaluation framework to handle data collection, ground-truth comparison, and performance scoring.
Core Functionality
Core Functionality
Data Collection (
collect_test_set
):
* Supports fetching raw samples from
Elasticsearch (ES)
or loading them from
local JSON files
.
Implements a
replayability check
: It probes the live API endpoint to ensure the activity exists before including it in the dataset.
Filters data based on request body size, language, and the presence of an original response.
Ground Truth Management:
Saves inputs and "gold standard" responses to a local directory structure (
eval_output/ai_activity_type/...
).
Features an optional mode to populate ground truth using a high-order LLM (e.g.,
gpt-5_2
) if the
POPULATE_GROUND_TRUTH_LLM...
|
Firefox
|
Ask Jiminny test report - 8 Apr 2026 - Ask Jiminny Ask Jiminny test report - 8 Apr 2026 - Ask Jiminny test report - 13 Apr 2026.pdf — Work...
|
NULL
|
8624
|
|
JY-20543 add AJ reports User pilot tracking by Lak JY-20543 add AJ reports User pilot tracking by LakyLak · Pull Request #11932 · jiminny/app
Platform Sprint 1 Q2 - Platform Team - Scrum Board - Jira
Platform Sprint 1 Q2 - Platform Team - Scrum Board - Jira
Jy 19798 evaluation for ai activity types by nikolaybiaivanov · Pull Request #468 · jiminny/prophet
Jy 19798 evaluation for ai activity types by nikolaybiaivanov · Pull Request #468 · jiminny/prophet
Jiminny
Jiminny
Ask Jiminny test report - 8 Apr 2026 - Ask Jiminny test report - 13 Apr 2026.pdf
Ask Jiminny test report - 8 Apr 2026 - Ask Jiminny test report - 13 Apr 2026.pdf
Close tab
New Tab
Customize sidebar
Close Google Gemini (⌃X)
Tabs from other devices
Open history (⇧⌘H)
Open bookmarks (⌘B)
AI Chat settings
Close
WORK, Google Account: [EMAIL]
Main menu
New Chat
Open menu for conversation actions.
Conversation with Gemini
Conversation with Gemini
Copy prompt
You said I’m on page “<tabTitle>Jy 19798 evaluation for ai activity types by nikol</tabTitle>” with “<selection>@@ -0,0 +1,330 @@1+#!/usr/bin/env python32+import asyncio3+import json4+import os5+from pathlib import Path6+from typing import Any, Dict, List7+8+import aiohttp9+10+from src.component.call_processing.schemes.ai_activity_type_request import AiActivityTypeRequest11+from src.component.call_processing.schemes.ai_activity_type_response import AiActivityTypeResponse12+from src.eval.abstract_evaluator import AbstractEvaluator13+from src.eval.collectors import collect_test_set as es_collect_test_set14+from src.eval.common_evaluator_runner import create_parser, run_evaluation15+from src.eval.endpoints import run_endpoint as run_endpoint_http16+from src.eval.models import EvaluationResult17+18+19+class AiActivityTypeEvaluator(AbstractEvaluator):20+ENDPOINT_PATH = "call/ai-activity-type"21+REQUEST_MODEL = AiActivityTypeRequest22+RESPONSE_MODEL = AiActivityTypeResponse23+OUTPUT_SUBFOLDER = "ai_activity_type"24+USES_LLM_EVALUATION = False25+26+READS_FROM_DATASET_FOLDER = True27+28+DATA_SET: str = "initial"29+30+MINIMUM_REQUEST_BODY_SIZE: int = 031+32+SKIP_UNREPLAYABLE_ON_COLLECT: bool = True33+REPLAYABILITY_CHECK_TIMEOUT_SECONDS: float = 20.034+35+POPULATE_GROUND_TRUTH_LLM: bool = False36+GROUND_TRUTH_LLM_ENDPOINT: str = "OPENAI:gpt-5_2"37+38+COMPARE_TO_GROUND_TRUTH: str = "es"39+40+def __init__(self, **kwargs: Any) -> None:41+super().__init__(**kwargs)42+_repo_root = Path(__file__).parents[2]43+_datasets_root = _repo_root / "eval_output" / self.OUTPUT_SUBFOLDER / "eval_datasets"44+_dataset_dir = _datasets_root / self.__class__.DATA_SET45+self._dataset_dir = _dataset_dir46+self._inputs_dir = _dataset_dir / "input"47+self._ground_truths_dir = _dataset_dir / "ground_truth"48+49+async def collect_test_set(self) -> List[Dict[str, Any]]:50+if self.es_host_name:51+return await self._collect_from_es()52+return await self._collect_from_files()53+54+async def _collect_from_es(self) -> List[Dict[str, Any]]:55+if self._inputs_dir.exists() or self._ground_truths_dir.exists():56+raise FileExistsError(57+f"Dataset folder already exists: {self._inputs_dir.parent}. "58+"Choose a different --data-set name or remove the folder manually."59+ )60+61+raw_samples = await es_collect_test_set(62+es_host_name=self.es_host_name,63+es_port=self.es_port,64+endpoint_path=self.ENDPOINT_PATH,65+limit=self.limit,66+language_filter=self.language_filter,67+min_prompt_length=self.min_prompt_length,68+is_json_response=self.IS_JSON_RESPONSE,69+exclude_languages=self.exclude_languages,70+ )71+72+self._inputs_dir.mkdir(parents=True, exist_ok=True)73+self._ground_truths_dir.mkdir(parents=True, exist_ok=True)74+75+min_size = self.MINIMUM_REQUEST_BODY_SIZE76+samples: List[Dict[str, Any]] = []77+skipped_no_response = 078+skipped_too_small = 079+skipped_unreplayable = 080+skipped_probe_errors = 081+82+endpoint_url = f"{self.api_host}/{self.ENDPOINT_PATH}"83+84+timeout = aiohttp.ClientTimeout(total=self.REPLAYABILITY_CHECK_TIMEOUT_SECONDS)85+async with aiohttp.ClientSession(timeout=timeout) as session:86+for sample in raw_samples:87+es_id = sample["id"]88+request_body = sample["request_body"]89+original_response = sample.get("original_response")90+91+if not original_response:92+skipped_no_response += 193+continue94+95+if min_size and len(json.dumps(request_body)) < min_size:96+skipped_too_small += 197+continue98+99+if self.SKIP_UNREPLAYABLE_ON_COLLECT:100+try:101+async with session.post(endpoint_url, json=request_body) as resp:102+if resp.status != 200:103+body_text = await resp.text()104+if "Cannot find activity" in body_text:105+skipped_unreplayable += 1106+continue107+except Exception:108+skipped_probe_errors += 1109+110+with open(self._inputs_dir / f"{es_id}.json", "w") as f:111+json.dump(request_body, f, indent=2)112+113+with open(self._ground_truths_dir / f"gt_{es_id}.json", "w") as f:114+json.dump(original_response, f, indent=2)115+116+samples.append(117+ {118+"id": es_id,119+"request_body": request_body,120+"original_response": original_response,121+"ground_truth": original_response,122+ }123+ )124+125+print(f"Wrote {len(samples)} samples to {self._inputs_dir.parent}")126+if skipped_no_response:127+print(f"Skipped {skipped_no_response} records with no response body")128+if skipped_too_small:129+print(130+f"Skipped {skipped_too_small} records whose request body was smaller "131+f"than {min_size} characters"132+ )133+if skipped_unreplayable:134+print(f"Skipped {skipped_unreplayable} records that are not replayable (missing activity in ES)")135+if skipped_probe_errors:136+print(f"Warning: replayability probe errored for {skipped_probe_errors} records (kept those records)")137+138+if self.__class__.POPULATE_GROUND_TRUTH_LLM:139+label_results = await run_endpoint_http(140+test_set=samples,141+endpoint_url=endpoint_url,142+request_model=self.REQUEST_MODEL,143+force_llm_endpoint=self.__class__.GROUND_TRUTH_LLM_ENDPOINT,144+is_streaming_response=self.IS_STREAMING_RESPONSE,145+timeout_seconds=300.0,146+parallel_requests=self.parallel_requests,147+return_prompt_in_run=self.RETURN_PROMPT_IN_RUN,148+ )149+150+for i, r in enumerate(label_results):151+forced = self.__class__.GROUND_TRUTH_LLM_ENDPOINT152+model = forced.split(":", 1)[1] if ":" in forced else forced153+samples[i]["ground_truth_llm_model"] = model154+155+if not r.get("success", True):156+samples[i]["ground_truth_llm"] = None157+samples[i]["ground_truth_llm_reasoning"] = None158+samples[i]["ground_truth_llm_error_type"] = r.get("error_type")159+samples[i]["ground_truth_llm_error_message"] = r.get("error_message")160+continue161+162+resp = r.get("new_response") or {}163+samples[i]["ground_truth_llm"] = resp.get("ai_activity_type")164+samples[i]["ground_truth_llm_reasoning"] = resp.get("ai_activity_type_reasoning")165+166+return samples167+168+async def _collect_from_files(self) -> List[Dict[str, Any]]:169+if not self._inputs_dir.exists():170+raise FileNotFoundError(171+f"Dataset folder not found: {self._inputs_dir}. "172+"Run --steps collect first or check your --data-set name."173+ )174+175+samples: List[Dict[str, Any]] = []176+for filename in sorted(os.listdir(self._inputs_dir)):177+input_path = self._inputs_dir / filename178+gt_path = self._ground_truths_dir / f"gt_{filename}"179+180+if not gt_path.exists():181+print(f"Warning: ground truth not found for {filename}, skipping")182+continue183+184+with open(input_path, "r") as f:185+request_body: Dict[str, Any] = json.load(f)186+187+with open(gt_path, "r") as f:188+ground_truth: Dict[str, Any] = json.load(f)189+190+samples.append(191+ {192+"id": Path(filename).stem,193+"request_body": request_body,194+"original_response": None,195+"ground_truth": ground_truth,196+ }197+ )198+199+print(f"Loaded {len(samples)} samples from {self._inputs_dir.parent}")200+return samples201+202+async def evaluate_against_ground_truth(self, results: List[Dict[str, Any]]) -> List[EvaluationResult]:203+evaluation_results: List[EvaluationResult] = []204+compare_to = self.__class__.COMPARE_TO_GROUND_TRUTH205+206+for res in results:207+req_id = res["request_id"]208+is_success = res.get("success", True)209+210+if not is_success:211+evaluation_results.append(212+EvaluationResult(213+request_id=req_id,214+request_body=res["request_body"],215+new_response=res.get("new_response"),216+valid_schema=False,217+quality_score=0.0,218+quality_feedback=f"Request failed: {res.get('error_type')} - {res.get('error_message')}",219+response_time=res.get("response_time"),220+success=False,221+error_type=res.get("error_type"),222+error_message=res.get("error_message"),223+ground_truth_match=False,224+ )225+ )226+continue227+228+pred = (res.get("new_response") or {}).get("ai_activity_type")229+230+if compare_to == "llm":231+if "ground_truth_llm" not in res:232+evaluation_results.append(233+EvaluationResult(234+request_id=req_id,235+request_body=res["request_body"],236+new_response=res.get("new_response"),237+valid_schema=res.get("valid_schema", True),238+quality_score=0.0,239+quality_feedback=(240+"Missing ground_truth_llm in run results. Ensure collect step populated it and "241+"endpoints.py passes it through."242+ ),243+response_time=res.get("response_time"),244+success=True,245+ground_truth_match=False,246+ )247+ )248+continue249+gt_label = res.get("ground_truth_llm")250+else:251+gt_path = self._ground_truths_dir / f"gt_{req_id}.json"252+if not gt_path.exists():253+evaluation_results.append(254+EvaluationResult(255+request_id=req_id,256+request_body=res["request_body"],257+new_response=res.get("new_response"),258+valid_schema=res.get("valid_schema", True),259+quality_score=0.0,260+quality_feedback=f"Ground truth file not found: {gt_path}",261+response_time=res.get("response_time"),262+success=True,263+ground_truth_match=False,264+ )265+ )266+continue267+268+with open(gt_path, "r") as f:269+ground_truth_dict: Dict[str, Any] = json.load(f)270+271+gt_label = (ground_truth_dict or {}).get("ai_activity_type")272+273+matched = pred == gt_label274+275+evaluation_results.append(276+EvaluationResult(277+request_id=req_id,278+request_body=res["request_body"],279+new_response=res.get("new_response"),280+valid_schema=res.get("valid_schema", True),281+quality_score=1.0 if matched else 0.0,282+quality_feedback="PASS" if matched else f"FAIL\nExpected: {gt_label}\nGot: {pred}",283+response_time=res.get("response_time"),284+success=True,285+ground_truth_match=matched,286+ )287+ )288+289+return evaluation_results290+291+292+if __name__ == "__main__":293+parser = create_parser(AiActivityTypeEvaluator)294+parser.add_argument(295+"--data-set",296+type=str,297+default="initial",298+dest="data_set",299+help="Dataset folder under eval_output/ai_activity_type/eval_datasets/ (default: initial)",300+ )301+302+parser.add_argument(303+"--populate-ground-truth-llm",304+action="store_true",305+dest="populate_ground_truth_llm",306+ )307+parser.add_argument(308+"--ground-truth-llm-endpoint",309+type=str,310+default="OPENAI:gpt-5_2",311+dest="ground_truth_llm_endpoint",312+ )313+314+parser.add_argument(315+"--compare-to-ground-truth",316+type=str,317+choices=["es", "llm"],318+default="es",319+dest="compare_to_ground_truth",320+ )321+322+args = parser.parse_args()323+AiActivityTypeEvaluator.DATA_SET = args.data_set324+325+AiActivityTypeEvaluator.POPULATE_GROUND_TRUTH_LLM = bool(args.populate_ground_truth_llm)326+AiActivityTypeEvaluator.GROUND_TRUTH_LLM_ENDPOINT = args.ground_truth_llm_endpoint327+328+AiActivityTypeEvaluator.COMPARE_TO_GROUND_TRUTH = args.compare_to_ground_truth329+330+asyncio.run(run_evaluation(AiActivityTypeEvaluator, args))</selection>” selected. Please summarize the selection using precise and concise language. Use headers and bulleted lists in the summary, to make it scannable. Maintain the meaning and factual accuracy.
You said
I’m on page “<tabTitle>Jy 19798 evaluation for ai activity types by nikol</tabTitle>” with “<selection>@@ -0,0 +1,330 @@1+#!/usr/bin/env python32+import asyncio3+import json4+import os5+from pathlib import Path6+from typing import Any, Dict, List7+8+import aiohttp9+10+from src.component.call_processing.schemes.ai_activity_type_request import AiActivityTypeRequest11+from src.component.call_processing.schemes.ai_activity_type_response import AiActivityTypeResponse12+from src.eval.abstract_evaluator import AbstractEvaluator13+from src.eval.collectors import collect_test_set as es_collect_test_set14+from src.eval.common_evaluator_runner import create_parser, run_evaluation15+from src.eval.endpoints import run_endpoint as run_endpoint_http16+from src.eval.models import EvaluationResult17+18+19+class AiActivityTypeEvaluator(AbstractEvaluator):20+ENDPOINT_PATH = "call/ai-activity-type"21+REQUEST_MODEL = AiActivityTypeRequest22+RESPONSE_MODEL = AiActivityTypeResponse23+OUTPUT_SUBFOLDER = "ai_activity_type"24+USES_LLM_EVALUATION = False25+26+READS_FROM_DATASET_FOLDER = True27+28+DATA_SET: str = "initial"29+30+MINIMUM_REQUEST_BODY_SIZE: int = 031+32+SKIP_UNREPLAYABLE_ON_COLLECT: bool = True33+REPLAYABILITY_CHECK_TIMEOUT_SECONDS: float = 20.034+35+POPULATE_GROUND_TRUTH_LLM: bool = False36+GROUND_TRUTH_LLM_ENDPOINT: str = "OPENAI:gpt-5_2"37+38+COMPARE_TO_GROUND_TRUTH: str = "es"39+40+def __init__(self, **kwargs: Any) -> None:41+super().__init__(**kwargs)42+_repo_root = Path(__file__).parents[2]43+_datasets_root = _repo_root / "eval_output" / self.OUTPUT_SUBFOLDER / "eval_datasets"44+_dataset_dir = _datasets_root / self.__class__.DATA_SET45+self._dataset_dir = _dataset_dir46+self._inputs_dir = _dataset_dir / "input"47+self._ground_truths_dir = _dataset_dir / "ground_truth"48+49+async def collect_test_set(self) -> List[Dict[str, Any]]:50+if self.es_host_name:51+return await self._collect_from_es()52+return await self._collect_from_files()53+54+async def _collect_from_es(self) -> List[Dict[str, Any]]:55+if self._inputs_dir.exists() or self._ground_truths_dir.exists():56+raise FileExistsError(57+f"Dataset folder already exists: {self._inputs_dir.parent}. "58+"Choose a different --data-set name or remove the folder manually."59+ )60+61+raw_samples = await es_collect_test_set(62+es_host_name=self.es_host_name,63+es_port=self.es_port,64+endpoint_path=self.ENDPOINT_PATH,65+limit=self.limit,66+language_filter=self.language_filter,67+min_prompt_length=self.min_prompt_length,68+is_json_response=self.IS_JSON_RESPONSE,69+exclude_languages=self.exclude_languages,70+ )71+72+self._inputs_dir.mkdir(parents=True, exist_ok=True)73+self._ground_truths_dir.mkdir(parents=True, exist_ok=True)74+75+min_size = self.MINIMUM_REQUEST_BODY_SIZE76+samples: List[Dict[str, Any]] = []77+skipped_no_response = 078+skipped_too_small = 079+skipped_unreplayable = 080+skipped_probe_errors = 081+82+endpoint_url = f"{self.api_host}/{self.ENDPOINT_PATH}"83+84+timeout = aiohttp.ClientTimeout(total=self.REPLAYABILITY_CHECK_TIMEOUT_SECONDS)85+async with aiohttp.ClientSession(timeout=timeout) as session:86+for sample in raw_samples:87+es_id = sample["id"]88+request_body = sample["request_body"]89+original_response = sample.get("original_response")90+91+if not original_response:92+skipped_no_response += 193+continue94+95+if min_size and len(json.dumps(request_body)) < min_size:96+skipped_too_small += 197+continue98+99+if self.SKIP_UNREPLAYABLE_ON_COLLECT:100+try:101+async with session.post(endpoint_url, json=request_body) as resp:102+if resp.status != 200:103+body_text = await resp.text()104+if "Cannot find activity" in body_text:105+skipped_unreplayable += 1106+continue107+except Exception:108+skipped_probe_errors += 1109+110+with open(self._inputs_dir / f"{es_id}.json", "w") as f:111+json.dump(request_body, f, indent=2)112+113+with open(self._ground_truths_dir / f"gt_{es_id}.json", "w") as f:114+json.dump(original_response, f, indent=2)115+116+samples.append(117+ {118+"id": es_id,119+"request_body": request_body,120+"original_response": original_response,121+"ground_truth": original_response,122+ }123+ )124+125+print(f"Wrote {len(samples)} samples to {self._inputs_dir.parent}")126+if skipped_no_response:127+print(f"Skipped {skipped_no_response} records with no response body")128+if skipped_too_small:129+print(130+f"Skipped {skipped_too_small} records whose request body was smaller "131+f"than {min_size} characters"132+ )133+if skipped_unreplayable:134+print(f"Skipped {skipped_unreplayable} records that are not replayable (missing activity in ES)")135+if skipped_probe_errors:136+print(f"Warning: replayability probe errored for {skipped_probe_errors} records (kept those records)")137+138+if self.__class__.POPULATE_GROUND_TRUTH_LLM:139+label_results = await run_endpoint_http(140+test_set=samples,141+endpoint_url=endpoint_url,142+request_model=self.REQUEST_MODEL,143+force_llm_endpoint=self.__class__.GROUND_TRUTH_LLM_ENDPOINT,144+is_streaming_response=self.IS_STREAMING_RESPONSE,145+timeout_seconds=300.0,146+parallel_requests=self.parallel_requests,147+return_prompt_in_run=self.RETURN_PROMPT_IN_RUN,148+ )149+150+for i, r in enumerate(label_results):151+forced = self.__class__.GROUND_TRUTH_LLM_ENDPOINT152+model = forced.split(":", 1)[1] if ":" in forced else forced153+samples[i]["ground_truth_llm_model"] = model154+155+if not r.get("success", True):156+samples[i]["ground_truth_llm"] = None157+samples[i]["ground_truth_llm_reasoning"] = None158+samples[i]["ground_truth_llm_error_type"] = r.get("error_type")159+samples[i]["ground_truth_llm_error_message"] = r.get("error_message")160+continue161+162+resp = r.get("new_response") or {}163+samples[i]["ground_truth_llm"] = resp.get("ai_activity_type")164+samples[i]["ground_truth_llm_reasoning"] = resp.get("ai_activity_type_reasoning")165+166+return samples167+168+async def _collect_from_files(self) -> List[Dict[str, Any]]:169+if not self._inputs_dir.exists():170+raise FileNotFoundError(171+f"Dataset folder not found: {self._inputs_dir}. "172+"Run --steps collect first or check your --data-set name."173+ )174+175+samples: List[Dict[str, Any]] = []176+for filename in sorted(os.listdir(self._inputs_dir)):177+input_path = self._inputs_dir / filename178+gt_path = self._ground_truths_dir / f"gt_{filename}"179+180+if not gt_path.exists():181+print(f"Warning: ground truth not found for {filename}, skipping")182+continue183+184+with open(input_path, "r") as f:185+request_body: Dict[str, Any] = json.load(f)186+187+with open(gt_path, "r") as f:188+ground_truth: Dict[str, Any] = json.load(f)189+190+samples.append(191+ {192+"id": Path(filename).stem,193+"request_body": request_body,194+"original_response": None,195+"ground_truth": ground_truth,196+ }197+ )198+199+print(f"Loaded {len(samples)} samples from {self._inputs_dir.parent}")200+return samples201+202+async def evaluate_against_ground_truth(self, results: List[Dict[str, Any]]) -> List[EvaluationResult]:203+evaluation_results: List[EvaluationResult] = []204+compare_to = self.__class__.COMPARE_TO_GROUND_TRUTH205+206+for res in results:207+req_id = res["request_id"]208+is_success = res.get("success", True)209+210+if not is_success:211+evaluation_results.append(212+EvaluationResult(213+request_id=req_id,214+request_body=res["request_body"],215+new_response=res.get("new_response"),216+valid_schema=False,217+quality_score=0.0,218+quality_feedback=f"Request failed: {res.get('error_type')} - {res.get('error_message')}",219+response_time=res.get("response_time"),220+success=False,221+error_type=res.get("error_type"),222+error_message=res.get("error_message"),223+ground_truth_match=False,224+ )225+ )226+continue227+228+pred = (res.get("new_response") or {}).get("ai_activity_type")229+230+if compare_to == "llm":231+if "ground_truth_llm" not in res:232+evaluation_results.append(233+EvaluationResult(234+request_id=req_id,235+request_body=res["request_body"],236+new_response=res.get("new_response"),237+valid_schema=res.get("valid_schema", True),238+quality_score=0.0,239+quality_feedback=(240+"Missing ground_truth_llm in run results. Ensure collect step populated it and "241+"endpoints.py passes it through."242+ ),243+response_time=res.get("response_time"),244+success=True,245+ground_truth_match=False,246+ )247+ )248+continue249+gt_label = res.get("ground_truth_llm")250+else:251+gt_path = self._ground_truths_dir / f"gt_{req_id}.json"252+if not gt_path.exists():253+evaluation_results.append(254+EvaluationResult(255+request_id=req_id,256+request_body=res["request_body"],257+new_response=res.get("new_response"),258+valid_schema=res.get("valid_schema", True),259+quality_score=0.0,260+quality_feedback=f"Ground truth file not found: {gt_path}",261+response_time=res.get("response_time"),262+success=True,263+ground_truth_match=False,264+ )265+ )266+continue267+268+with open(gt_path, "r") as f:269+ground_truth_dict: Dict[str, Any] = json.load(f)270+271+gt_label = (ground_truth_dict or {}).get("ai_activity_type")272+273+matched = pred == gt_label274+275+evaluation_results.append(276+EvaluationResult(277+request_id=req_id,278+request_body=res["request_body"],279+new_response=res.get("new_response"),280+valid_schema=res.get("valid_schema", True),281+quality_score=1.0 if matched else 0.0,282+quality_feedback="PASS" if matched else f"FAIL\nExpected: {gt_label}\nGot: {pred}",283+response_time=res.get("response_time"),284+success=True,285+ground_truth_match=matched,286+ )287+ )288+289+return evaluation_results290+291+292+if __name__ == "__main__":293+parser = create_parser(AiActivityTypeEvaluator)294+parser.add_argument(295+"--data-set",296+type=str,297+default="initial",298+dest="data_set",299+help="Dataset folder under eval_output/ai_activity_type/eval_datasets/ (default: initial)",300+ )301+302+parser.add_argument(303+"--populate-ground-truth-llm",304+action="store_true",305+dest="populate_ground_truth_llm",306+ )307+parser.add_argument(308+"--ground-truth-llm-endpoint",309+type=str,310+default="OPENAI:gpt-5_2",311+dest="ground_truth_llm_endpoint",312+ )313+314+parser.add_argument(315+"--compare-to-ground-truth",316+type=str,317+choices=["es", "llm"],318+default="es",319+dest="compare_to_ground_truth",320+ )321+322+args = parser.parse_args()323+AiActivityTypeEvaluator.DATA_SET = args.data_set324+325+AiActivityTypeEvaluator.POPULATE_GROUND_TRUTH_LLM = bool(args.populate_ground_truth_llm)326+AiActivityTypeEvaluator.GROUND_TRUTH_LLM_ENDPOINT = args.ground_truth_llm_endpoint327+328+AiActivityTypeEvaluator.COMPARE_TO_GROUND_TRUTH = args.compare_to_ground_truth329+330+asyncio.run(run_evaluation(AiActivityTypeEvaluator, args))</selection>” selected.
Please summarize the selection using precise and concise language. Use headers and bulleted lists in the summary, to make it scannable. Maintain the meaning and factual accuracy.
Expand
Listen
Show more options
Gemini said
Gemini said
This Python script defines the
AiActivityTypeEvaluator
class, a specialized tool for evaluating AI-driven activity type classification. It extends a base evaluation framework to handle data collection, ground-truth comparison, and performance scoring.
Core Functionality
Core Functionality
Data Collection (
collect_test_set
):
* Supports fetching raw samples from
Elasticsearch (ES)
or loading them from
local JSON files
.
Implements a
replayability check
: It probes the live API endpoint to ensure the activity exists before including it in the dataset.
Filters data based on request body size, language, and the presence of an original response.
Ground Truth Management:
Saves inputs and "gold standard" responses to a local directory structure (
eval_output/ai_activity_type/...
).
Features an optional mode to populate ground truth using a high-order LLM (e.g.,
gpt-5_2
) if the
POPULATE_GROUND_TRUTH_LLM
flag is set.
Evaluation Logic (
evaluate_against_ground_truth
):
Compares the model's predicted
ai_activity_type
against either the original ES response or the LLM-generated ground truth.
Assigns a
quality_score
of
1.0
for a match and
0.0
for a mismatch or request failure.
Key Configuration Constants
Key Configuration Constants
Constant
Purpose
ENDPOINT_PATH
The specific API route being tested:
call/ai-activity-type
.
COMPARE_TO_GROUND_TRUTH
Determines if the baseline is the original data (
es
) or an
llm
.
SKIP_UNREPLAYABLE_ON_COLLECT
If
True
, discards records where the API returns "Cannot find activity."
REPLAYABILITY_CHECK_TIMEOUT_SECONDS
Limits the probe request to 20 seconds.
Constant
ENDPOINT_PATH
COMPARE_TO_GROUND_TRUTH
SKIP_UNREPLAYABLE_ON_COLLECT
REPLAYABILITY_CHECK_TIMEOUT_SECONDS
Purpose
The specific API route being tested:
call/ai-activity-type
.
Determines if the baseline is the original data (
es
) or an
llm
.
If
True
, discards records where the API returns "Cannot find activity."
Limits the probe request to 20 seconds.
Copy table
Execution Workflow
Execution Workflow
Parsing:
The script uses
create_parser
to handle CLI arguments like
--data-set
,
--populate-ground-truth-llm
, and
--compare-to-ground-truth
.
Initialization:
Sets up directory paths for inputs and ground truths based on the selected dataset name.
Run:
Executes the evaluation via
asyncio.run(run_evaluation(...))
, which orchestrates the collection, endpoint calling, and result scoring phases.
Copy prompt
Edit
You said are there soem issues with this PR
You said
are there soem issues with this PR
Listen
Show more options
Show thinking
Gemini said
Gemini said
Yes, there are several issues and code smells in this Pull Request, ranging from performance bottlenecks to architectural anti-patterns.
Here is a breakdown of the most notable issues, categorized by impact:
Performance & Async Anti-Patterns
Performance & Async Anti-Patterns
Sequential Requests Defeating Async (Lines 86-106):
Inside
_collect_from_es
, the code loops through
raw_samples
and
await
s a
session.post
request for each sample one by one. This means the script waits for one HTTP request to finish before starting the next, completely defeating the purpose of using
aiohttp
for asynchronous concurrency.
Fix:
Collect these coroutines into a list and use
asyncio.gather()
(ideally wrapped in an
asyncio.Semaphore
to avoid overwhelming the endpoint) to run the replayability probes concurrently.
Blocking File I/O in Async Functions (Lines 110-114, 184-188, 268-269):
Standard synchronous file operations (
with open(...)
,
json.dump
,
json.load
) are used inside
async def
functions. In Python, this blocks the entire event loop while the disk is reading/writing.
Fix:
Use an asynchronous file I/O library like
aiofiles
, or run the blocking I/O operations in a separate thread using
asyncio.to_thread()
.
Architecture & State Management
Architecture & State Management
Mutating Class-Level Variables (Lines 323-328):
The script parses CLI arguments and directly mutates the class attributes (e.g.,
AiActivityTypeEvaluator.DATA_SET = args.data_set
). This is a major anti-pattern. If you ever try to run tests in parallel or create multiple instances of this evaluator in a single process, the state will bleed across them.
Fix:
Pass these arguments into the
__init__
constructor of the instance (
self.data_set = args.data_set
) rather than modifying the class definition globally.
Code Quality & Edge Cases
Code Quality & Edge Cases
Broad Exception Catching (Line 107):
Using a bare
except Exception:
during the replayability probe is dangerous. It will catch
aiohttp.ClientError
(which is intended), but it will also silently swallow
TimeoutError
or even memory errors, making debugging incredibly difficult if the endpoint goes down.
Fix:
Scope the exception to specific expected errors, like
except (aiohttp.ClientError, asyncio.TimeoutError):
.
Inefficient Size Calculation (Line 95):...
|
Firefox
|
Ask Jiminny test report - 8 Apr 2026 - Ask Jiminny Ask Jiminny test report - 8 Apr 2026 - Ask Jiminny test report - 13 Apr 2026.pdf — Work...
|
app.staging.jiminny.com/ai-reports/pdf/e2f4890a-d0 app.staging.jiminny.com/ai-reports/pdf/e2f4890a-d097-495b-8dfd-d4c25a5e8843...
|
8623
|
|
DiaFilelEditViewTabsBookmarksHistoryExtensionsWind DiaFilelEditViewTabsBookmarksHistoryExtensionsWindowHelpmeet.google.com / Daily - PlatformNikolay Yankov (Presenting)Inbox (2)PlactormSRD-677C Jy 19798(7 Service-oir Font. AmrCostI Clhttps://jiminny.atlassian.net/jira/software/c/projects/JY/boards/377selectedissue=JY-18909• Siminm0 ProjectsB AWSE3 SSHEl DatadogPlatform TeamQ Search board200$ Jy-19240 / [ JY-18900READY FOR DEV 1oohesnetixwirtexceed Fontawesomepackage limits(MAINTENANCISReady for Dev8-20564 1 П ****=PriorityStor..% JY-20574 [AI) Ensure PDF formatting is good= Medium= Medium% JY-20586 (QA) Manual Testing= Medium% JY-20626 Integration testing= MediumLinked work itemsis cloned byA JY-20458 CRUD reports with Ask Jiminnyis tested byJY-20649 Generate Ask Jiminny Report and display it in AI ReportsJY-20656 Expired Ask Jiminny report template is automatically disabled an.JY-20660 Non-creator cannot delete generated Ask Jiminny report from A…JY-20651 Generated Ask Jiminny Report PDF displays Jiminny logo when n...JY-20655 Generated Ask Jiminny Report emall is sent to recipients0 JY-20654 Generated Ask Jiminny Reoort is visible onty to creator and shar...JY-20650 Generated Ask Jiminny Report PDF includes branding, report se..AssigneeStatus3 Steliyan...i2 Stellyan .-.INDEYVINDEV vAnellya A...READY FOR DEV& Unassign...READY FOR DEVSDEPLOYEDDONEDONE MDONEDONE MDONEDONEWUnEActions9:56 AM | Daily - Platform> 0.Daily - Platform • 9 m left8•CE: DepeDependol• Far DepeD TasksInsights & CoachinD DevD ux.©2|In Dev vXz Improve Story~ Details32 Stellyan GeorgievAsuntoeReporter2 Galya DimitrovaUeyele encne@ Open with VS Code|3 branches32 commits1 pull request1 build failingRelessesProduction2 See all deploymentsComponentsPlatformSub-ProductAoorcons5 days agcD Al BookmarksGroup: QueriesDEPLOY &leature fiag releaseDeployed0 -20585 15 * ****=AI Review - Q1CTNMEUANANORCORDeployed_19982 1 0 •***=No such column "Type' onentty 'Event"PLATTORM STABTUTTYDeployed9 -20384 05 ^ ****=|Missed deletion of stale408099(Pipedrive] DI > status notlabeled, date data missingREDUCE CHURNDeployed0 -20614 05 П ****=:Stefka Stoyanovateliyan Georgievalik11:168 • Tue 14 Apr 9:56:40ChatNikolay YankovNikolay Ivanov...
|
NULL
|
NULL
|
NULL
|
8622
|
|
DiaFilelEditViewTabsBookmarksHistoryExtensionsWind DiaFilelEditViewTabsBookmarksHistoryExtensionsWindowHelpmeet.google.com / Daily - PlatformNikolay Yankov (Presenting)Inbox (2)PlactormSRD-677C Jy 19798(7 Service-oir Font. AmrCostI Clhttps://jiminny.atlassian.net/jira/software/c/projects/JY/boards/377selectedissue=JY-18909• Siminm0 ProjectsB AWSE3 SSHEl DatadogPlatform TeamQ Search board200$ Jy-19240 / [ JY-18900READY FOR DEV 1oohesnetixwirtexceed Fontawesomepackage limits(MAINTENANCISReady for Dev8-20564 1 П ****=PriorityStor..% JY-20574 [AI) Ensure PDF formatting is good= Medium= Medium% JY-20586 (QA) Manual Testing= Medium% JY-20626 Integration testing= MediumLinked work itemsis cloned byA JY-20458 CRUD reports with Ask Jiminnyis tested byJY-20649 Generate Ask Jiminny Report and display it in AI ReportsJY-20656 Expired Ask Jiminny report template is automatically disabled an.JY-20660 Non-creator cannot delete generated Ask Jiminny report from A…JY-20651 Generated Ask Jiminny Report PDF displays Jiminny logo when n...JY-20655 Generated Ask Jiminny Report emall is sent to recipients0 JY-20654 Generated Ask Jiminny Reoort is visible onty to creator and shar...JY-20650 Generated Ask Jiminny Report PDF includes branding, report se..AssigneeStatus3 Steliyan...i2 Stellyan .-.INDEYVINDEV vAnellya A...READY FOR DEV& Unassign...READY FOR DEVSDEPLOYEDDONEDONE MDONEDONE MDONEDONEWUnEActions9:56 AM | Daily - Platform> 0.Daily - Platform • 9 m left8•CE: DepeDependol• Far DepeD TasksInsights & CoachinD DevD ux.©2|In Dev vXz Improve Story~ Details32 Stellyan GeorgievAsuntoeReporter2 Galya DimitrovaUeyele encne@ Open with VS Code|3 branches32 commits1 pull request1 build failingRelessesProduction2 See all deploymentsComponentsPlatformSub-ProductAoorcons5 days agcD Al BookmarksGroup: QueriesDEPLOY &leature fiag releaseDeployed0 -20585 15 * ****=AI Review - Q1CTNMEUANANORCORDeployed_19982 1 0 •***=No such column "Type' onentty 'Event"PLATTORM STABTUTTYDeployed9 -20384 05 ^ ****=|Missed deletion of stale408099(Pipedrive] DI > status notlabeled, date data missingREDUCE CHURNDeployed0 -20614 05 П ****=:Stefka Stoyanovateliyan Georgievalik11:128 • Tue 14 Apr 9:56:36ChatNikolay YankovNikolay Ivanov...
|
NULL
|
NULL
|
NULL
|
8621
|
|
JY-20543 add AJ reports User pilot tracking by Lak JY-20543 add AJ reports User pilot tracking by LakyLak · Pull Request #11932 · jiminny/app
Platform Sprint 1 Q2 - Platform Team - Scrum Board - Jira
Platform Sprint 1 Q2 - Platform Team - Scrum Board - Jira
Jy 19798 evaluation for ai activity types by nikolaybiaivanov · Pull Request #468 · jiminny/prophet
Jy 19798 evaluation for ai activity types by nikolaybiaivanov · Pull Request #468 · jiminny/prophet
Jiminny
Jiminny
Ask Jiminny test report - 8 Apr 2026 - Ask Jiminny test report - 13 Apr 2026.pdf
Ask Jiminny test report - 8 Apr 2026 - Ask Jiminny test report - 13 Apr 2026.pdf
Close tab
New Tab
Customize sidebar
Close Google Gemini (⌃X)
Tabs from other devices
Open history (⇧⌘H)
Open bookmarks (⌘B)
AI Chat settings
Close
WORK, Google Account: [EMAIL]
Main menu
New Chat
Open menu for conversation actions.
Conversation with Gemini
Conversation with Gemini
Copy prompt
You said I’m on page “<tabTitle>Jy 19798 evaluation for ai activity types by nikol</tabTitle>” with “<selection>@@ -0,0 +1,330 @@1+#!/usr/bin/env python32+import asyncio3+import json4+import os5+from pathlib import Path6+from typing import Any, Dict, List7+8+import aiohttp9+10+from src.component.call_processing.schemes.ai_activity_type_request import AiActivityTypeRequest11+from src.component.call_processing.schemes.ai_activity_type_response import AiActivityTypeResponse12+from src.eval.abstract_evaluator import AbstractEvaluator13+from src.eval.collectors import collect_test_set as es_collect_test_set14+from src.eval.common_evaluator_runner import create_parser, run_evaluation15+from src.eval.endpoints import run_endpoint as run_endpoint_http16+from src.eval.models import EvaluationResult17+18+19+class AiActivityTypeEvaluator(AbstractEvaluator):20+ENDPOINT_PATH = "call/ai-activity-type"21+REQUEST_MODEL = AiActivityTypeRequest22+RESPONSE_MODEL = AiActivityTypeResponse23+OUTPUT_SUBFOLDER = "ai_activity_type"24+USES_LLM_EVALUATION = False25+26+READS_FROM_DATASET_FOLDER = True27+28+DATA_SET: str = "initial"29+30+MINIMUM_REQUEST_BODY_SIZE: int = 031+32+SKIP_UNREPLAYABLE_ON_COLLECT: bool = True33+REPLAYABILITY_CHECK_TIMEOUT_SECONDS: float = 20.034+35+POPULATE_GROUND_TRUTH_LLM: bool = False36+GROUND_TRUTH_LLM_ENDPOINT: str = "OPENAI:gpt-5_2"37+38+COMPARE_TO_GROUND_TRUTH: str = "es"39+40+def __init__(self, **kwargs: Any) -> None:41+super().__init__(**kwargs)42+_repo_root = Path(__file__).parents[2]43+_datasets_root = _repo_root / "eval_output" / self.OUTPUT_SUBFOLDER / "eval_datasets"44+_dataset_dir = _datasets_root / self.__class__.DATA_SET45+self._dataset_dir = _dataset_dir46+self._inputs_dir = _dataset_dir / "input"47+self._ground_truths_dir = _dataset_dir / "ground_truth"48+49+async def collect_test_set(self) -> List[Dict[str, Any]]:50+if self.es_host_name:51+return await self._collect_from_es()52+return await self._collect_from_files()53+54+async def _collect_from_es(self) -> List[Dict[str, Any]]:55+if self._inputs_dir.exists() or self._ground_truths_dir.exists():56+raise FileExistsError(57+f"Dataset folder already exists: {self._inputs_dir.parent}. "58+"Choose a different --data-set name or remove the folder manually."59+ )60+61+raw_samples = await es_collect_test_set(62+es_host_name=self.es_host_name,63+es_port=self.es_port,64+endpoint_path=self.ENDPOINT_PATH,65+limit=self.limit,66+language_filter=self.language_filter,67+min_prompt_length=self.min_prompt_length,68+is_json_response=self.IS_JSON_RESPONSE,69+exclude_languages=self.exclude_languages,70+ )71+72+self._inputs_dir.mkdir(parents=True, exist_ok=True)73+self._ground_truths_dir.mkdir(parents=True, exist_ok=True)74+75+min_size = self.MINIMUM_REQUEST_BODY_SIZE76+samples: List[Dict[str, Any]] = []77+skipped_no_response = 078+skipped_too_small = 079+skipped_unreplayable = 080+skipped_probe_errors = 081+82+endpoint_url = f"{self.api_host}/{self.ENDPOINT_PATH}"83+84+timeout = aiohttp.ClientTimeout(total=self.REPLAYABILITY_CHECK_TIMEOUT_SECONDS)85+async with aiohttp.ClientSession(timeout=timeout) as session:86+for sample in raw_samples:87+es_id = sample["id"]88+request_body = sample["request_body"]89+original_response = sample.get("original_response")90+91+if not original_response:92+skipped_no_response += 193+continue94+95+if min_size and len(json.dumps(request_body)) < min_size:96+skipped_too_small += 197+continue98+99+if self.SKIP_UNREPLAYABLE_ON_COLLECT:100+try:101+async with session.post(endpoint_url, json=request_body) as resp:102+if resp.status != 200:103+body_text = await resp.text()104+if "Cannot find activity" in body_text:105+skipped_unreplayable += 1106+continue107+except Exception:108+skipped_probe_errors += 1109+110+with open(self._inputs_dir / f"{es_id}.json", "w") as f:111+json.dump(request_body, f, indent=2)112+113+with open(self._ground_truths_dir / f"gt_{es_id}.json", "w") as f:114+json.dump(original_response, f, indent=2)115+116+samples.append(117+ {118+"id": es_id,119+"request_body": request_body,120+"original_response": original_response,121+"ground_truth": original_response,122+ }123+ )124+125+print(f"Wrote {len(samples)} samples to {self._inputs_dir.parent}")126+if skipped_no_response:127+print(f"Skipped {skipped_no_response} records with no response body")128+if skipped_too_small:129+print(130+f"Skipped {skipped_too_small} records whose request body was smaller "131+f"than {min_size} characters"132+ )133+if skipped_unreplayable:134+print(f"Skipped {skipped_unreplayable} records that are not replayable (missing activity in ES)")135+if skipped_probe_errors:136+print(f"Warning: replayability probe errored for {skipped_probe_errors} records (kept those records)")137+138+if self.__class__.POPULATE_GROUND_TRUTH_LLM:139+label_results = await run_endpoint_http(140+test_set=samples,141+endpoint_url=endpoint_url,142+request_model=self.REQUEST_MODEL,143+force_llm_endpoint=self.__class__.GROUND_TRUTH_LLM_ENDPOINT,144+is_streaming_response=self.IS_STREAMING_RESPONSE,145+timeout_seconds=300.0,146+parallel_requests=self.parallel_requests,147+return_prompt_in_run=self.RETURN_PROMPT_IN_RUN,148+ )149+150+for i, r in enumerate(label_results):151+forced = self.__class__.GROUND_TRUTH_LLM_ENDPOINT152+model = forced.split(":", 1)[1] if ":" in forced else forced153+samples[i]["ground_truth_llm_model"] = model154+155+if not r.get("success", True):156+samples[i]["ground_truth_llm"] = None157+samples[i]["ground_truth_llm_reasoning"] = None158+samples[i]["ground_truth_llm_error_type"] = r.get("error_type")159+samples[i]["ground_truth_llm_error_message"] = r.get("error_message")160+continue161+162+resp = r.get("new_response") or {}163+samples[i]["ground_truth_llm"] = resp.get("ai_activity_type")164+samples[i]["ground_truth_llm_reasoning"] = resp.get("ai_activity_type_reasoning")165+166+return samples167+168+async def _collect_from_files(self) -> List[Dict[str, Any]]:169+if not self._inputs_dir.exists():170+raise FileNotFoundError(171+f"Dataset folder not found: {self._inputs_dir}. "172+"Run --steps collect first or check your --data-set name."173+ )174+175+samples: List[Dict[str, Any]] = []176+for filename in sorted(os.listdir(self._inputs_dir)):177+input_path = self._inputs_dir / filename178+gt_path = self._ground_truths_dir / f"gt_{filename}"179+180+if not gt_path.exists():181+print(f"Warning: ground truth not found for {filename}, skipping")182+continue183+184+with open(input_path, "r") as f:185+request_body: Dict[str, Any] = json.load(f)186+187+with open(gt_path, "r") as f:188+ground_truth: Dict[str, Any] = json.load(f)189+190+samples.append(191+ {192+"id": Path(filename).stem,193+"request_body": request_body,194+"original_response": None,195+"ground_truth": ground_truth,196+ }197+ )198+199+print(f"Loaded {len(samples)} samples from {self._inputs_dir.parent}")200+return samples201+202+async def evaluate_against_ground_truth(self, results: List[Dict[str, Any]]) -> List[EvaluationResult]:203+evaluation_results: List[EvaluationResult] = []204+compare_to = self.__class__.COMPARE_TO_GROUND_TRUTH205+206+for res in results:207+req_id = res["request_id"]208+is_success = res.get("success", True)209+210+if not is_success:211+evaluation_results.append(212+EvaluationResult(213+request_id=req_id,214+request_body=res["request_body"],215+new_response=res.get("new_response"),216+valid_schema=False,217+quality_score=0.0,218+quality_feedback=f"Request failed: {res.get('error_type')} - {res.get('error_message')}",219+response_time=res.get("response_time"),220+success=False,221+error_type=res.get("error_type"),222+error_message=res.get("error_message"),223+ground_truth_match=False,224+ )225+ )226+continue227+228+pred = (res.get("new_response") or {}).get("ai_activity_type")229+230+if compare_to == "llm":231+if "ground_truth_llm" not in res:232+evaluation_results.append(233+EvaluationResult(234+request_id=req_id,235+request_body=res["request_body"],236+new_response=res.get("new_response"),237+valid_schema=res.get("valid_schema", True),238+quality_score=0.0,239+quality_feedback=(240+"Missing ground_truth_llm in run results. Ensure collect step populated it and "241+"endpoints.py passes it through."242+ ),243+response_time=res.get("response_time"),244+success=True,245+ground_truth_match=False,246+ )247+ )248+continue249+gt_label = res.get("ground_truth_llm")250+else:251+gt_path = self._ground_truths_dir / f"gt_{req_id}.json"252+if not gt_path.exists():253+evaluation_results.append(254+EvaluationResult(255+request_id=req_id,256+request_body=res["request_body"],257+new_response=res.get("new_response"),258+valid_schema=res.get("valid_schema", True),259+quality_score=0.0,260+quality_feedback=f"Ground truth file not found: {gt_path}",261+response_time=res.get("response_time"),262+success=True,263+ground_truth_match=False,264+ )265+ )266+continue267+268+with open(gt_path, "r") as f:269+ground_truth_dict: Dict[str, Any] = json.load(f)270+271+gt_label = (ground_truth_dict or {}).get("ai_activity_type")272+273+matched = pred == gt_label274+275+evaluation_results.append(276+EvaluationResult(277+request_id=req_id,278+request_body=res["request_body"],279+new_response=res.get("new_response"),280+valid_schema=res.get("valid_schema", True),281+quality_score=1.0 if matched else 0.0,282+quality_feedback="PASS" if matched else f"FAIL\nExpected: {gt_label}\nGot: {pred}",283+response_time=res.get("response_time"),284+success=True,285+ground_truth_match=matched,286+ )287+ )288+289+return evaluation_results290+291+292+if __name__ == "__main__":293+parser = create_parser(AiActivityTypeEvaluator)294+parser.add_argument(295+"--data-set",296+type=str,297+default="initial",298+dest="data_set",299+help="Dataset folder under eval_output/ai_activity_type/eval_datasets/ (default: initial)",300+ )301+302+parser.add_argument(303+"--populate-ground-truth-llm",304+action="store_true",305+dest="populate_ground_truth_llm",306+ )307+parser.add_argument(308+"--ground-truth-llm-endpoint",309+type=str,310+default="OPENAI:gpt-5_2",311+dest="ground_truth_llm_endpoint",312+ )313+314+parser.add_argument(315+"--compare-to-ground-truth",316+type=str,317+choices=["es", "llm"],318+default="es",319+dest="compare_to_ground_truth",320+ )321+322+args = parser.parse_args()323+AiActivityTypeEvaluator.DATA_SET = args.data_set324+325+AiActivityTypeEvaluator.POPULATE_GROUND_TRUTH_LLM = bool(args.populate_ground_truth_llm)326+AiActivityTypeEvaluator.GROUND_TRUTH_LLM_ENDPOINT = args.ground_truth_llm_endpoint327+328+AiActivityTypeEvaluator.COMPARE_TO_GROUND_TRUTH = args.compare_to_ground_truth329+330+asyncio.run(run_evaluation(AiActivityTypeEvaluator, args))</selection>” selected. Please summarize the selection using precise and concise language. Use headers and bulleted lists in the summary, to make it scannable. Maintain the meaning and factual accuracy.
You said
I’m on page “<tabTitle>Jy 19798 evaluation for ai activity types by nikol</tabTitle>” with “<selection>@@ -0,0 +1,330 @@1+#!/usr/bin/env python32+import asyncio3+import json4+import os5+from pathlib import Path6+from typing import Any, Dict, List7+8+import aiohttp9+10+from src.component.call_processing.schemes.ai_activity_type_request import AiActivityTypeRequest11+from src.component.call_processing.schemes.ai_activity_type_response import AiActivityTypeResponse12+from src.eval.abstract_evaluator import AbstractEvaluator13+from src.eval.collectors import collect_test_set as es_collect_test_set14+from src.eval.common_evaluator_runner import create_parser, run_evaluation15+from src.eval.endpoints import run_endpoint as run_endpoint_http16+from src.eval.models import EvaluationResult17+18+19+class AiActivityTypeEvaluator(AbstractEvaluator):20+ENDPOINT_PATH = "call/ai-activity-type"21+REQUEST_MODEL = AiActivityTypeRequest22+RESPONSE_MODEL = AiActivityTypeResponse23+OUTPUT_SUBFOLDER = "ai_activity_type"24+USES_LLM_EVALUATION = False25+26+READS_FROM_DATASET_FOLDER = True27+28+DATA_SET: str = "initial"29+30+MINIMUM_REQUEST_BODY_SIZE: int = 031+32+SKIP_UNREPLAYABLE_ON_COLLECT: bool = True33+REPLAYABILITY_CHECK_TIMEOUT_SECONDS: float = 20.034+35+POPULATE_GROUND_TRUTH_LLM: bool = False36+GROUND_TRUTH_LLM_ENDPOINT: str = "OPENAI:gpt-5_2"37+38+COMPARE_TO_GROUND_TRUTH: str = "es"39+40+def __init__(self, **kwargs: Any) -> None:41+super().__init__(**kwargs)42+_repo_root = Path(__file__).parents[2]43+_datasets_root = _repo_root / "eval_output" / self.OUTPUT_SUBFOLDER / "eval_datasets"44+_dataset_dir = _datasets_root / self.__class__.DATA_SET45+self._dataset_dir = _dataset_dir46+self._inputs_dir = _dataset_dir / "input"47+self._ground_truths_dir = _dataset_dir / "ground_truth"48+49+async def collect_test_set(self) -> List[Dict[str, Any]]:50+if self.es_host_name:51+return await self._collect_from_es()52+return await self._collect_from_files()53+54+async def _collect_from_es(self) -> List[Dict[str, Any]]:55+if self._inputs_dir.exists() or self._ground_truths_dir.exists():56+raise FileExistsError(57+f"Dataset folder already exists: {self._inputs_dir.parent}. "58+"Choose a different --data-set name or remove the folder manually."59+ )60+61+raw_samples = await es_collect_test_set(62+es_host_name=self.es_host_name,63+es_port=self.es_port,64+endpoint_path=self.ENDPOINT_PATH,65+limit=self.limit,66+language_filter=self.language_filter,67+min_prompt_length=self.min_prompt_length,68+is_json_response=self.IS_JSON_RESPONSE,69+exclude_languages=self.exclude_languages,70+ )71+72+self._inputs_dir.mkdir(parents=True, exist_ok=True)73+self._ground_truths_dir.mkdir(parents=True, exist_ok=True)74+75+min_size = self.MINIMUM_REQUEST_BODY_SIZE76+samples: List[Dict[str, Any]] = []77+skipped_no_response = 078+skipped_too_small = 079+skipped_unreplayable = 080+skipped_probe_errors = 081+82+endpoint_url = f"{self.api_host}/{self.ENDPOINT_PATH}"83+84+timeout = aiohttp.ClientTimeout(total=self.REPLAYABILITY_CHECK_TIMEOUT_SECONDS)85+async with aiohttp.ClientSession(timeout=timeout) as session:86+for sample in raw_samples:87+es_id = sample["id"]88+request_body = sample["request_body"]89+original_response = sample.get("original_response")90+91+if not original_response:92+skipped_no_response += 193+continue94+95+if min_size and len(json.dumps(request_body)) < min_size:96+skipped_too_small += 197+continue98+99+if self.SKIP_UNREPLAYABLE_ON_COLLECT:100+try:101+async with session.post(endpoint_url, json=request_body) as resp:102+if resp.status != 200:103+body_text = await resp.text()104+if "Cannot find activity" in body_text:105+skipped_unreplayable += 1106+continue107+except Exception:108+skipped_probe_errors += 1109+110+with open(self._inputs_dir / f"{es_id}.json", "w") as f:111+json.dump(request_body, f, indent=2)112+113+with open(self._ground_truths_dir / f"gt_{es_id}.json", "w") as f:114+json.dump(original_response, f, indent=2)115+116+samples.append(117+ {118+"id": es_id,119+"request_body": request_body,120+"original_response": original_response,121+"ground_truth": original_response,122+ }123+ )124+125+print(f"Wrote {len(samples)} samples to {self._inputs_dir.parent}")126+if skipped_no_response:127+print(f"Skipped {skipped_no_response} records with no response body")128+if skipped_too_small:129+print(130+f"Skipped {skipped_too_small} records whose request body was smaller "131+f"than {min_size} characters"132+ )133+if skipped_unreplayable:134+print(f"Skipped {skipped_unreplayable} records that are not replayable (missing activity in ES)")135+if skipped_probe_errors:136+print(f"Warning: replayability probe errored for {skipped_probe_errors} records (kept those records)")137+138+if self.__class__.POPULATE_GROUND_TRUTH_LLM:139+label_results = await run_endpoint_http(140+test_set=samples,141+endpoint_url=endpoint_url,142+request_model=self.REQUEST_MODEL,143+force_llm_endpoint=self.__class__.GROUND_TRUTH_LLM_ENDPOINT,144+is_streaming_response=self.IS_STREAMING_RESPONSE,145+timeout_seconds=300.0,146+parallel_requests=self.parallel_requests,147+return_prompt_in_run=self.RETURN_PROMPT_IN_RUN,148+ )149+150+for i, r in enumerate(label_results):151+forced = self.__class__.GROUND_TRUTH_LLM_ENDPOINT152+model = forced.split(":", 1)[1] if ":" in forced else forced153+samples[i]["ground_truth_llm_model"] = model154+155+if not r.get("success", True):156+samples[i]["ground_truth_llm"] = None157+samples[i]["ground_truth_llm_reasoning"] = None158+samples[i]["ground_truth_llm_error_type"] = r.get("error_type")159+samples[i]["ground_truth_llm_error_message"] = r.get("error_message")160+continue161+162+resp = r.get("new_response") or {}163+samples[i]["ground_truth_llm"] = resp.get("ai_activity_type")164+samples[i]["ground_truth_llm_reasoning"] = resp.get("ai_activity_type_reasoning")165+166+return samples167+168+async def _collect_from_files(self) -> List[Dict[str, Any]]:169+if not self._inputs_dir.exists():170+raise FileNotFoundError(171+f"Dataset folder not found: {self._inputs_dir}. "172+"Run --steps collect first or check your --data-set name."173+ )174+175+samples: List[Dict[str, Any]] = []176+for filename in sorted(os.listdir(self._inputs_dir)):177+input_path = self._inputs_dir / filename178+gt_path = self._ground_truths_dir / f"gt_{filename}"179+180+if not gt_path.exists():181+print(f"Warning: ground truth not found for {filename}, skipping")182+continue183+184+with open(input_path, "r") as f:185+request_body: Dict[str, Any] = json.load(f)186+187+with open(gt_path, "r") as f:188+ground_truth: Dict[str, Any] = json.load(f)189+190+samples.append(191+ {192+"id": Path(filename).stem,193+"request_body": request_body,194+"original_response": None,195+"ground_truth": ground_truth,196+ }197+ )198+199+print(f"Loaded {len(samples)} samples from {self._inputs_dir.parent}")200+return samples201+202+async def evaluate_against_ground_truth(self, results: List[Dict[str, Any]]) -> List[EvaluationResult]:203+evaluation_results: List[EvaluationResult] = []204+compare_to = self.__class__.COMPARE_TO_GROUND_TRUTH205+206+for res in results:207+req_id = res["request_id"]208+is_success = res.get("success", True)209+210+if not is_success:211+evaluation_results.append(212+EvaluationResult(213+request_id=req_id,214+request_body=res["request_body"],215+new_response=res.get("new_response"),216+valid_schema=False,217+quality_score=0.0,218+quality_feedback=f"Request failed: {res.get('error_type')} - {res.get('error_message')}",219+response_time=res.get("response_time"),220+success=False,221+error_type=res.get("error_type"),222+error_message=res.get("error_message"),223+ground_truth_match=False,224+ )225+ )226+continue227+228+pred = (res.get("new_response") or {}).get("ai_activity_type")229+230+if compare_to == "llm":231+if "ground_truth_llm" not in res:232+evaluation_results.append(233+EvaluationResult(234+request_id=req_id,235+request_body=res["request_body"],236+new_response=res.get("new_response"),237+valid_schema=res.get("valid_schema", True),238+quality_score=0.0,239+quality_feedback=(240+"Missing ground_truth_llm in run results. Ensure collect step populated it and "241+"endpoints.py passes it through."242+ ),243+response_time=res.get("response_time"),244+success=True,245+ground_truth_match=False,246+ )247+ )248+continue249+gt_label = res.get("ground_truth_llm")250+else:251+gt_path = self._ground_truths_dir / f"gt_{req_id}.json"252+if not gt_path.exists():253+evaluation_results.append(254+EvaluationResult(255+request_id=req_id,256+request_body=res["request_body"],257+new_response=res.get("new_response"),258+valid_schema=res.get("valid_schema", True),259+quality_score=0.0,260+quality_feedback=f"Ground truth file not found: {gt_path}",261+response_time=res.get("response_time"),262+success=True,263+ground_truth_match=False,264+ )265+ )266+continue267+268+with open(gt_path, "r") as f:269+ground_truth_dict: Dict[str, Any] = json.load(f)270+271+gt_label = (ground_truth_dict or {}).get("ai_activity_type")272+273+matched = pred == gt_label274+275+evaluation_results.append(276+EvaluationResult(277+request_id=req_id,278+request_body=res["request_body"],279+new_response=res.get("new_response"),280+valid_schema=res.get("valid_schema", True),281+quality_score=1.0 if matched else 0.0,282+quality_feedback="PASS" if matched else f"FAIL\nExpected: {gt_label}\nGot: {pred}",283+response_time=res.get("response_time"),284+success=True,285+ground_truth_match=matched,286+ )287+ )288+289+return evaluation_results290+291+292+if __name__ == "__main__":293+parser = create_parser(AiActivityTypeEvaluator)294+parser.add_argument(295+"--data-set",296+type=str,297+default="initial",298+dest="data_set",299+help="Dataset folder under eval_output/ai_activity_type/eval_datasets/ (default: initial)",300+ )301+302+parser.add_argument(303+"--populate-ground-truth-llm",304+action="store_true",305+dest="populate_ground_truth_llm",306+ )307+parser.add_argument(308+"--ground-truth-llm-endpoint",309+type=str,310+default="OPENAI:gpt-5_2",311+dest="ground_truth_llm_endpoint",312+ )313+314+parser.add_argument(315+"--compare-to-ground-truth",316+type=str,317+choices=["es", "llm"],318+default="es",319+dest="compare_to_ground_truth",320+ )321+322+args = parser.parse_args()323+AiActivityTypeEvaluator.DATA_SET = args.data_set324+325+AiActivityTypeEvaluator.POPULATE_GROUND_TRUTH_LLM = bool(args.populate_ground_truth_llm)326+AiActivityTypeEvaluator.GROUND_TRUTH_LLM_ENDPOINT = args.ground_truth_llm_endpoint327+328+AiActivityTypeEvaluator.COMPARE_TO_GROUND_TRUTH = args.compare_to_ground_truth329+330+asyncio.run(run_evaluation(AiActivityTypeEvaluator, args))</selection>” selected.
Please summarize the selection using precise and concise language. Use headers and bulleted lists in the summary, to make it scannable. Maintain the meaning and factual accuracy.
Expand
Listen
Show more options
Gemini said
Gemini said
This Python script defines the
AiActivityTypeEvaluator
class, a specialized tool for evaluating AI-driven activity type classification. It extends a base evaluation framework to handle data collection, ground-truth comparison, and performance scoring.
Core Functionality
Core Functionality
Data Collection (
collect_test_set
):
* Supports fetching raw samples from
Elasticsearch (ES)
or loading them from
local JSON files
.
Implements a
replayability check
: It probes the live API endpoint to ensure the activity exists before including it in the dataset.
Filters data based on request body size, language, and the presence of an original response.
Ground Truth Management:
Saves inputs and "gold standard" responses to a local directory structure (
eval_output/ai_activity_type/...
).
Features an optional mode to populate ground truth using a high-order LLM (e.g.,
gpt-5_2
) if the
POPULATE_GROUND_TRUTH_LLM
flag is set.
Evaluation Logic (
evaluate_against_ground_truth
):
Compares the model's predicted
ai_activity_type
against either the original ES response or the LLM-generated ground truth.
Assigns a
quality_score
of
1.0
for a match and
0.0
for a mismatch or request failure.
Key Configuration Constants
Key Configuration Constants
Constant
Purpose
ENDPOINT_PATH
The specific API route being tested:
call/ai-activity-type
.
COMPARE_TO_GROUND_TRUTH
Determines if the baseline is the original data (
es
) or an
llm
.
SKIP_UNREPLAYABLE_ON_COLLECT
If
True
, discards records where the API returns "Cannot find activity."
REPLAYABILITY_CHECK_TIMEOUT_SECONDS
Limits the probe request to 20 seconds.
Constant
ENDPOINT_PATH
COMPARE_TO_GROUND_TRUTH
SKIP_UNREPLAYABLE_ON_COLLECT
REPLAYABILITY_CHECK_TIMEOUT_SECONDS
Purpose
The specific API route being tested:
call/ai-activity-type
.
Determines if the baseline is the original data (
es
) or an
llm
.
If
True
, discards records where the API returns "Cannot find activity."
Limits the probe request to 20 seconds.
Copy table
Execution Workflow
Execution Workflow
Parsing:
The script uses
create_parser
to handle CLI arguments like
--data-set
,
--populate-ground-truth-llm
, and
--compare-to-ground-truth
.
Initialization:
Sets up directory paths for inputs and ground truths based on the selected dataset name.
Run:
Executes the evaluation via
asyncio.run(run_evaluation(...))
, which orchestrates the collection, endpoint calling, and result scoring phases.
Copy prompt
Edit
You said are there soem issues with this PR
You said
are there soem issues with this PR
Listen
Show more options
Show thinking
Gemini said
Gemini said
Yes, there are several issues and code smells in this Pull Request, ranging from performance bottlenecks to architectural anti-patterns.
Here is a breakdown of the most notable issues, categorized by impact:
Performance & Async Anti-Patterns
Performance & Async Anti-Patterns
Sequential Requests Defeating Async (Lines 86-106):
Inside
_collect_from_es
, the code loops through
raw_samples
and
await
s a
session.post
request for each sample one by one. This means the script waits for one HTTP request to finish before starting the next, completely defeating the purpose of using
aiohttp
for asynchronous concurrency.
Fix:
Collect these coroutines into a list and use
asyncio.gather()
(ideally wrapped in an
asyncio.Semaphore
to avoid overwhelming the endpoint) to run the replayability probes concurrently.
Blocking File I/O in Async Functions (Lines 110-114, 184-188, 268-269):
Standard synchronous file operations (
with open(...)
,
json.dump
,
json.load
) are used inside
async def
functions. In Python, this blocks the entire event loop while the disk is reading/writing.
Fix:
Use an asynchronous file I/O library like
aiofiles
, or run the blocking I/O operations in a separate thread using
asyncio.to_thread()
.
Architecture & State Management
Architecture & State Management
Mutating Class-Level Variables (Lines 323-328):
The script parses CLI arguments and directly mutates the class attributes (e.g.,
AiActivityTypeEvaluator.DATA_SET = args.data_set
). This is a major anti-pattern. If you ever try to run tests in parallel or create multiple instances of this evaluator in a single process, the state will bleed across them.
Fix:
Pass these arguments into the
__init__
constructor of the instance (
self.data_set = args.data_set
) rather than modifying the class definition globally.
Code Quality & Edge Cases
Code Quality & Edge Cases
Broad Exception Catching (Line 107):
Using a bare
except Exception:
during the replayability probe is dangerous. It will catch
aiohttp.ClientError
(which is intended), but it will also silently swallow
TimeoutError
or even memory errors, making debugging incredibly difficult if the endpoint goes down.
Fix:
Scope the exception to specific expected errors, like
except (aiohttp.ClientError, asyncio.TimeoutError):
.
Inefficient Size Calculation (Line 95):
len(json.dumps(request_body)) < min_size
requires serializing the entire dictionary to a string
just
to check its length. Since
min_size
defaults to
0
, the
if min_size
short-circuit saves it for now, but if anyone sets a minimum size, this will become an unnecessary CPU bottleneck on large datasets.
Potential Path Traversal Risk (Lines 110, 113, 251):
The script writes and reads files directly using
f"{es_id}.json"
and
f"gt_{req_id}.json"
. If
es_id
or
req_id
originate from a database and ever contain characters like
../
, it could result in files being read from or written to unintended directories.
Fix:
Sanitize the IDs or strictly enforce that they only contain alphanumeric characters.
Would you like me to rewrite the
_collect_from_es
method to show how to fix the sequential async loop and blocking file I/O?
Enter a prompt for Gemini
encrypted
Enter a prompt for Gemini
encrypted
Open upload file menu
Tools...
|
Firefox
|
Ask Jiminny test report - 8 Apr 2026 - Ask Jiminny Ask Jiminny test report - 8 Apr 2026 - Ask Jiminny test report - 13 Apr 2026.pdf — Work...
|
app.staging.jiminny.com/ai-reports/pdf/e2f4890a-d0 app.staging.jiminny.com/ai-reports/pdf/e2f4890a-d097-495b-8dfd-d4c25a5e8843...
|
8620
|
|
FirefoxFileEoitViewHistoryBookmarksProfilesToolsWi FirefoxFileEoitViewHistoryBookmarksProfilesToolsWindow Helpapp.staging.jiminny.com/ai-reports/pdf/e2f4890a-d097-495b-8dfd-d4c25a5e8843libd• | Daily - Platform • 9m left100% Ca8 • Tue 14 Apr 9:56:11~ Google GeminiPlatform Sprint 1 Q2 - Platform Tea@ Jy 19798 evaluation for ai activity t(8 Jiminny•app.staging.minny.com/ai-rep+ New Tabsocc ce Cdcseciie3.Run: Executes the evaluation viaasyncio.run(run_evaluation(...)),which orchestrates the collection,endpoint calling, and result scoringoeses.are there soem issueswith this PRShow thinking vYes, there are several issues and code smellsin this Pull Request, ranging from performancebottlenecks to architectural anti-patterns.Here is a breakdown of the most notableissues, categorized by impact:Performance & Async Anti-Patterns• Sequential Requests Defeating Async(Lines 86-106): Inside_collect_from_es, the code loopsthrough raw_samples and await sasession.post request for each sampleone by one. This means the script waitsfor one HTTP request to finish beforestarting the next, completely defeatingthe purpose of using aiohttp forasynchronous concurrency.• Fix: Collect these coroutines into alist and use asyncio.gather ()(ideally wrapped in anasyncio.Semaphore to avoidoverwhelming the endpoint) to run• Enter a prompt for GeminiProvYour Jiminny chats aren't used to improve our models. Gemini is Alane can make mistakes, Inciualne aoout peodleYour privacy & GeminiSummarize page...
|
NULL
|
NULL
|
NULL
|
8619
|
|
FirefoxFileEoitViewHistoryBookmarksProfilesToolsWi FirefoxFileEoitViewHistoryBookmarksProfilesToolsWindow Helpdoo.sidemne.minny.com/dl-teoorls~ Google Gemini.. XC< 40Wl © | Daily -Platform 9mleft A 100%Ca &• Tue 14 Apr 9:56:10Platform Sprint 1 Q2 - Platform Tea@ Jy 19798 evaluation for ai activity t8 Jiminny+ New labsocc ce Cdcseciie3. Run: Executes the evaluation viaasyncio.run(run_evaluation(...)),which orchestrates the collection,endpoint calling, and result scoringoeses.Al Reports• Report nameNAME -Ask Jiminny Test Report - 8 Apr 2026S03 Ask Jiminny reportsPeriodiReport Type10• Clear all |FREQUENCY+DailySHAREDICCanDATEY09/04/9016PrevieiNsP C шare there soem issueswith this PRShow thinking vYes, there are several issues and code smellsin this Pull Request, ranging from performancebottlenecks to architectural anti-patterns.Here is a breakdown of the most notableissues, categorized by impact:Performance & Async Anti-Patterns• Sequential Requests Defeating Async(Lines 86-106): Inside_collect_from_es, the code loopsthrough raw_samples and await sasession.post request for each sampleone by one. This means the script waitsfor one HTTP request to finish beforestarting the next, completely defeatingthe purpose of using aiohttp forasynchronous concurrency.• Fix: Collect these coroutines into alist and use asyncio.gather ()(ideally wrapped in anasyncio. Semaphore to avoidoverwhelming the endpoint) to run• Enter a prompt for GeminiProvYour Jiminny chats aren't used to improve our models. Gemini is Alane can make mistakes, Inciualne aoout peodleYour privacy & GeminiSummarize pageGapp.staging.jiminny.com/ai-...
|
NULL
|
NULL
|
NULL
|
8618
|
|
DiaFilelEditViewTabsBookmarksHistoryExtensionsWind DiaFilelEditViewTabsBookmarksHistoryExtensionsWindowHelpmeet.google.com / Daily - PlatformNikolay Yankov (Presenting)Inbox (2)PlactormSRD-677C Jy 19798(7 Service-oir Font. AmrCostI Clhttps://jiminny.atlassian.net/jira/software/c/projects/JY/boards/377selectedissue=JY-18909• Siminm0 ProjectsB AWSE3 SSHEl DatadogPlatform TeamQ Search board200$ Jy-19240 / [ JY-18900READY FOR DEV 1oohesnetixwirtexceed Fontawesomepackage limits(MAINTENANCISReady for Dev8-20564 1 П ****=PriorityStor..% JY-20574 [AI) Ensure PDF formatting is good= Medium= Medium% JY-20586 (QA) Manual Testing= Medium% JY-20626 Integration testing= MediumLinked work itemsis cloned byA JY-20458 CRUD reports with Ask Jiminnyis tested byJY-20649 Generate Ask Jiminny Report and display it in AI ReportsJY-20656 Expired Ask Jiminny report template is automatically disabled an.JY-20660 Non-creator cannot delete generated Ask Jiminny report from A…JY-20651 Generated Ask Jiminny Report PDF displays Jiminny logo when n...JY-20655 Generated Ask Jiminny Report emall is sent to recipients0 JY-20654 Generated Ask Jiminny Reoort is visible onty to creator and shar...JY-20650 Generated Ask Jiminny Report PDF includes branding, report se..AssigneeStatus3 Steliyan...i2 Stellyan .-.INDEYVINDEV vAnellya A...READY FOR DEV& Unassign...READY FOR DEVSDEPLOYEDDONEDONEDONEDONE MDONEDONEWUnEActions9:56 AM | Daily - Platform> 0.Daily - Platform - 9m left8•CE: DepeDependol• Far DepeD TasksInsights & CoachinD DevD ux.©2|In Dev vXz Improve Story~ Details32 Stellyan GeorgievAsentoeReporter2 Galya DimitrovaUeyele encne@ Open with VS Code|3 branches32 commits1 pull request1 build failingRelessesProduction2 See all deploymentsComponentsPlatformSub-ProductAoorcons5 days agcD Al BookmarksGroup: QueriesDEPLOY &leature fiag releaseDeployed0 -20585 15 * ****=AI Review - Q1CTNMEUANANORCORDeployed_19982 1 0 •***=No such column "Type' onentty 'Event"PLATTORM STABTUTTYDeployed9 -20384 05 ^ ****=|Missed deletion of stalea080se(Pipedrive] DI > status notlabeled, date data missingREDUCE CHURNDeployed0 -20614 05 П ****=:Stefka StoyanovaSteliyan Georgievvalik10:468 • Tue 14 Apr 9:56:10Chat6Nikolay YankovNikolay Ivanov...
|
NULL
|
NULL
|
NULL
|
8617
|
|
FirefoxEditHistoryBookmarksProfilesToolsWindow Hel FirefoxEditHistoryBookmarksProfilesToolsWindow Helpdoo.sideme.minny.com/ di-tcoorls~ Google Gemini.. X40 lil © I Daily -Platform• 9mleft CA 100% C2 3• Tue 14 Apr 9:56:10Platform Sprint 1 Q2 - Platform Tea@ Jy 19798 evaluation for ai activity t8 Jiminny+ New labsocc ce Cdcseciie3. Run: Executes the evaluation viaasyncio.run(run_evaluation(...)),which orchestrates the collection,endpoint calling, and result scoringoeses.Al Reports• Report nameNAME -Ask Jiminny Test Report - 8 Apr 2026Eã Ask Jiminny reportsPeriodiReport Type10• Clear all |FREQUENCYDailySHAREDIDATEY09/04/9016ACTIONSP COEare there soem issueswith this PRShow thinking vYes, there are several issues and code smellsin this Pull Request, ranging from performancebottlenecks to architectural anti-patterns.Here is a breakdown of the most notableissues, categorized by impact:Performance & Async Anti-Patterns• Sequential Requests Defeating Async(Lines 86-106): Inside_collect_from_es, the code loopsthrough raw_samples and await sasession.post request for each sampleone by one. This means the script waitsfor one HTTP request to finish beforestarting the next, completely defeatingthe purpose of using aiohttp forasynchronous concurrency.• Fix: Collect these coroutines into alist and use asyncio.gather ()(ideally wrapped in anasyncio. Semaphore to avoidoverwhelming the endpoint) to run• Enter a prompt for GeminiProvYour Jiminny chats aren't used to improve our models. Gemini is Alane can make mistakes, Inciualne aoout peodleYour privacy & GeminiSummarize pageJY-18909-automated-reports-ask-iiminny = 869453)...
|
NULL
|
NULL
|
NULL
|
8616
|
|
JY-20543 add AJ reports User pilot tracking by Lak JY-20543 add AJ reports User pilot tracking by LakyLak · Pull Request #11932 · jiminny/app
Platform Sprint 1 Q2 - Platform Team - Scrum Board - Jira
Platform Sprint 1 Q2 - Platform Team - Scrum Board - Jira
Jy 19798 evaluation for ai activity types by nikolaybiaivanov · Pull Request #468 · jiminny/prophet
Jy 19798 evaluation for ai activity types by nikolaybiaivanov · Pull Request #468 · jiminny/prophet
Jiminny
Jiminny
Close tab
New Tab
Customize sidebar...
|
Firefox
|
Jiminny — Work
|
app.staging.jiminny.com/ai-reports
|
8615
|
|
Nikolay Yankov (Presenting)
Nikolay Yankov (Presen Nikolay Yankov (Presenting)
Nikolay Yankov (Presenting)
People
6
Take notes with Gemini
Take notes with Gemini
Gemini
Gemini
Unpin Nikolay Yankov's presentation from your main screen
You can't unmute someone else's presentation
More options for Nikolay Yankov
Zoom in
Open in new window
Enter Full Screen
Pin Stefka Stoyanova to your main screen
You can't unmute someone else
More options for Stefka Stoyanova
Stefka Stoyanova
Pin Nikolay Yankov to your main screen
Mute Nikolay Yankov's microphone
More options for Nikolay Yankov
Nikolay Yankov
Pin Steliyan Georgiev to your main screen
Mute Steliyan Georgiev's microphone
More options for Steliyan Georgiev
Steliyan Georgiev
Pin Nikolay Ivanov to your main screen
You can't unmute someone else
More options for Nikolay Ivanov
Nikolay Ivanov
You’re continuously framed
Backgrounds and effects
More options for Lukas Kovalik
Lukas Kovalik
Others might see more of your background. Click to view your full video.
9:55
AM
Daily - Platform
Daily - Platform
Audio settings
Turn on microphone
Video settings
Turn off camera
Nikolay Yankov is presenting
Send a reaction
Turn on captions
Raise hand (ctrl + ⌘ + h)
More options
Leave call
Meeting details
Chat with everyone
Meeting tools
Turn on microphone (⌘ + d)
Your microphone is off.
meet.google.com / Daily - Platform
FirefoxEditHistoryBookmarksProfilesToolsWindow Helpdoo.sideme.minny.com/di-teoorls~ Google Gemini.. X‹ 40 lil © I Daily - Platform • 10mleft (A 100% C2 2• Tue 14 Apr 9:55:58Platform Sprint 1 Q2 - Platform Tea@ Jy 19798 evaluation for ai activity t8 Jiminny- New laosocc ce Cdcseciie3. Run: Executes the evaluation viaasyncio.run(run_evaluation(...)),which orchestrates the collection,endpoint calling, and result scoringoeses.Al Reports• Report nameNAME -Ask Jiminny Test Report - 8 Apr 2026{03 Ask Jiminny reportsPeriodiReport Type10• Clear all |FREQUENCYDailySHAREDDATEY09/04/9016ACTIONSare there soem issueswith this PRShow thinking vYes, there are several issues and code smellsin this Pull Request, ranging from performancebottlenecks to architectural anti-patterns.Here is a breakdown of the most notableissues, categorized by impact:Performance & Async Anti-Patterns• Sequential Requests Defeating Async(Lines 86-106): Inside_collect_from_es, the code loopsthrough raw_samples and await sasession.post request for each sampleone by one. This means the script waitsfor one HTTP request to finish beforestarting the next, completely defeatingthe purpose of using aiohttp forasynchronous concurrency.• Fix: Collect these coroutines into alist and use asyncio.gather ()(ideally wrapped in anasyncio. Semaphore to avoidoverwhelming the endpoint) to run• Enter a prompt for GeminiPro vYour Jiminny chats aren't used to improve our models. Gemini is Alane can make mistakes, Inciualne aoout peodleYour privacy & GeminiSummarize pageJY-18909-automated-reports-ask-iiminny = 869453)...
|
Dia
|
Work: Meet - Daily - Pla…
|
NULL
|
8614
|
|
DiaFileEditViewTabsBookmarksHistoryExtensionsWindo DiaFileEditViewTabsBookmarksHistoryExtensionsWindowHelpmeet.google.com / Daily - PlatformNikolay Yankov (Presenting)Inbox (2)SRD-677XF7 (SRD-67C Jy 19798(7 Service-oir Font. AmrCostI Clhttps://jiminny.atlassian.net/jira/software/c/projects/JY/boards/377selectedissue=JY-18909• Siminm0 ProjectsE3 SSHEl DatadogPlatform Team200$ Jy-19240 / [ JY-18900READY FOR DEV 1oohesnetixwirtexceed Fontawesomepackage limits(MAINTENANCISReady for Dev8-20564 1 П ****=PriorityStor..% JY-20574 [AI) Ensure PDF formatting is good= Medium= Medium% JY-20586 (QA) Manual Testing= Medium% JY-20626 Integration testing= MediumLinked work itemsis cloned byJY-20458 CRUD reports with Ask. Jiminnyis tested byJY-20649 Generate Ask Jiminny Report and display it in AI ReportsJY-20656 Expired Ask Jiminny report template is automatically disabled an.JY-20660 Non-creator cannot delete generated Ask Jiminny report from A…JY-20651 Generated Ask Jiminny Report PDF displays Jiminny logo when n...JY-20655 Generated Ask Jiminny Report emall is sent to recipients0 JY-20654 Generated Ask Jiminny Reoort is visible onty to creator and shar...JY-20650 Generated Ask Jiminny Report PDF includes branding, report se..Assignee3 Steliyan...i2 Stellyan .-.Anellya A...& Unassign...DEPLOYEDDONEDONE MDONEDONE MDONEDONEWUnEActionsStatusINDEYVINDEV vREADY FOR DEV <READY FOR DEVSTurn on microphone (88 + d)9:55 AM | Daily - Platform(ablDaily - Platform - 10 m left100% [8• Tue 14 Apr 9:55:57ChatCE: DeperDependolD TasksInsights & Coachin• DelD ux.© 2Xz Improve Story~ Details32 Stellyan GeorgievAsentoeReporter2 Galya DimitrovaUeyele encne@ Open with VS Code|3 branches32 commits1 pull request1 build failing5 days agcRelessesProduction2 See all deploymentsComponentsPlatformSub-ProductAoorcons8•• Far DepeGroup: QueriesDEPLOY &leature fiag releaseDeployed12006515 * *00=AI Review - Q1CTNMEUANANORCORDeployed_19982 |1 0 •*=•=No such column "Type' onentty 'Event"PLATTORM STABTUTTYDeployed9 -20384 05 11 ****=Missed deletion of stale0 -400R3 409009(Pipedrive] DI > status notlabeled, date data missingREDUCE CHURNDeployed0 -20614 05 П ****=:Stefka StoyanovaNikolay YankovSteliyan GeorgievNikolay lvanov#Lukas Kovalik10:33...
|
NULL
|
NULL
|
NULL
|
8613
|
|
Nikolay Yankov (Presenting)
Nikolay Yankov (Presen Nikolay Yankov (Presenting)
Nikolay Yankov (Presenting)
People
6
Take notes with Gemini
Take notes with Gemini
Gemini
Gemini
Zoom in
Open in new window
Enter Full Screen
Stefka Stoyanova
Nikolay Yankov
Steliyan Georgiev
Nikolay Ivanov
Lukas Kovalik
Others might see more of your background. Click to view your full video.
9:55
AM
Daily - Platform
Daily - Platform
Audio settings
Turn on microphone
Video settings
Turn off camera
Nikolay Yankov is presenting
Send a reaction
Turn on captions
Raise hand (ctrl + ⌘ + h)
More options
Leave call
Meeting details
Chat with everyone
Meeting tools
Your microphone is off.
meet.google.com / Daily - Platform...
|
Dia
|
Work: Meet - Daily - Pla…
|
NULL
|
8612
|
|
Nikolay Yankov (Presenting)
Nikolay Yankov (Presen Nikolay Yankov (Presenting)
Nikolay Yankov (Presenting)
People
6
Take notes with Gemini
Take notes with Gemini
Gemini
Gemini
Zoom in
Open in new window
Enter Full Screen
Stefka Stoyanova
Nikolay Yankov
Steliyan Georgiev
Nikolay Ivanov
Lukas Kovalik
Others might see more of your background. Click to view your full video.
9:55
AM
Daily - Platform
Daily - Platform
Audio settings
Turn on microphone
Video settings
Turn off camera
Nikolay Yankov is presenting
Send a reaction
Turn on captions
Raise hand (ctrl + ⌘ + h)
More options
Leave call
Meeting details
Chat with everyone
Meeting tools
Your microphone is off.
meet.google.com / Daily - Platform...
|
Dia
|
Work: Meet - Daily - Pla…
|
NULL
|
8611
|
|
FirefoxFileEoitViewHistoryBookmarksProfilesToolsWi FirefoxFileEoitViewHistoryBookmarksProfilesToolsWindow Helpdoo.sidemne.minny.com/dl-teoorls~ Google Gemini.. XC< 40Wl © | Daily -Platform • 10mleft A 100%C &• Tue 14 Apr 9:55:48Platform Sprint 1 Q2 - Platform Tea@ Jy 19798 evaluation for ai activity t8 Jiminny+ New labsocc ce Cdcseciie3.Run: Executes the evaluation viaasyncio.run(run_evaluation(...)),which orchestrates the collection,endpoint calling, and result scoringoeses.Al Reports• Report nameNAME -Ask Jiminny Test Report - 8 Apr 20263 Ask Jiminny reportsPeriodiReport Type© Clear all |10FREQUENCY+DailySHAREDIDATE09/04/9016ACTIONSare there soem issueswith this PRShow thinking vYes, there are several issues and code smellsin this Pull Request, ranging from performancebottlenecks to architectural anti-patterns.Here is a breakdown of the most notableissues, categorized by impact:Performance & Async Anti-Patterns• Sequential Requests Defeating Async(Lines 86-106): Inside_collect_from_es, the code loopsthrough raw_samples and await sasession.post request for each sampleone by one. This means the script waitsfor one HTTP request to finish beforestarting the next, completely defeatingthe purpose of using aiohttp forasynchronous concurrency.• Fix: Collect these coroutines into alist and use asyncio.gather ()(ideally wrapped in anasyncio. Semaphore to avoidoverwhelming the endpoint) to run• Enter a prompt for GeminiProvYour Jiminny chats aren't used to improve our models. Gemini is Alane can make mistakes, Inciualne aoout peodleYour privacy & GeminiSummarize pageJY-18909-automated-reports-ask-iiminny = 869453)...
|
NULL
|
NULL
|
NULL
|
8610
|
|
JY-20543 add AJ reports User pilot tracking by Lak JY-20543 add AJ reports User pilot tracking by LakyLak · Pull Request #11932 · jiminny/app
Platform Sprint 1 Q2 - Platform Team - Scrum Board - Jira
Platform Sprint 1 Q2 - Platform Team - Scrum Board - Jira
Jy 19798 evaluation for ai activity types by nikolaybiaivanov · Pull Request #468 · jiminny/prophet
Jy 19798 evaluation for ai activity types by nikolaybiaivanov · Pull Request #468 · jiminny/prophet
Jiminny
Jiminny
Close tab
New Tab
Customize sidebar
Close Google Gemini (⌃X)
Tabs from other devices
Open history (⇧⌘H)
Open bookmarks (⌘B)
AI Chat settings
Close
WORK, Google Account: [EMAIL]
Main menu
New Chat
Open menu for conversation actions.
Conversation with Gemini
Conversation with Gemini
Copy prompt
You said I’m on page “<tabTitle>Jy 19798 evaluation for ai activity types by nikol</tabTitle>” with “<selection>@@ -0,0 +1,330 @@1+#!/usr/bin/env python32+import asyncio3+import json4+import os5+from pathlib import Path6+from typing import Any, Dict, List7+8+import aiohttp9+10+from src.component.call_processing.schemes.ai_activity_type_request import AiActivityTypeRequest11+from src.component.call_processing.schemes.ai_activity_type_response import AiActivityTypeResponse12+from src.eval.abstract_evaluator import AbstractEvaluator13+from src.eval.collectors import collect_test_set as es_collect_test_set14+from src.eval.common_evaluator_runner import create_parser, run_evaluation15+from src.eval.endpoints import run_endpoint as run_endpoint_http16+from src.eval.models import EvaluationResult17+18+19+class AiActivityTypeEvaluator(AbstractEvaluator):20+ENDPOINT_PATH = "call/ai-activity-type"21+REQUEST_MODEL = AiActivityTypeRequest22+RESPONSE_MODEL = AiActivityTypeResponse23+OUTPUT_SUBFOLDER = "ai_activity_type"24+USES_LLM_EVALUATION = False25+26+READS_FROM_DATASET_FOLDER = True27+28+DATA_SET: str = "initial"29+30+MINIMUM_REQUEST_BODY_SIZE: int = 031+32+SKIP_UNREPLAYABLE_ON_COLLECT: bool = True33+REPLAYABILITY_CHECK_TIMEOUT_SECONDS: float = 20.034+35+POPULATE_GROUND_TRUTH_LLM: bool = False36+GROUND_TRUTH_LLM_ENDPOINT: str = "OPENAI:gpt-5_2"37+38+COMPARE_TO_GROUND_TRUTH: str = "es"39+40+def __init__(self, **kwargs: Any) -> None:41+super().__init__(**kwargs)42+_repo_root = Path(__file__).parents[2]43+_datasets_root = _repo_root / "eval_output" / self.OUTPUT_SUBFOLDER / "eval_datasets"44+_dataset_dir = _datasets_root / self.__class__.DATA_SET45+self._dataset_dir = _dataset_dir46+self._inputs_dir = _dataset_dir / "input"47+self._ground_truths_dir = _dataset_dir / "ground_truth"48+49+async def collect_test_set(self) -> List[Dict[str, Any]]:50+if self.es_host_name:51+return await self._collect_from_es()52+return await self._collect_from_files()53+54+async def _collect_from_es(self) -> List[Dict[str, Any]]:55+if self._inputs_dir.exists() or self._ground_truths_dir.exists():56+raise FileExistsError(57+f"Dataset folder already exists: {self._inputs_dir.parent}. "58+"Choose a different --data-set name or remove the folder manually."59+ )60+61+raw_samples = await es_collect_test_set(62+es_host_name=self.es_host_name,63+es_port=self.es_port,64+endpoint_path=self.ENDPOINT_PATH,65+limit=self.limit,66+language_filter=self.language_filter,67+min_prompt_length=self.min_prompt_length,68+is_json_response=self.IS_JSON_RESPONSE,69+exclude_languages=self.exclude_languages,70+ )71+72+self._inputs_dir.mkdir(parents=True, exist_ok=True)73+self._ground_truths_dir.mkdir(parents=True, exist_ok=True)74+75+min_size = self.MINIMUM_REQUEST_BODY_SIZE76+samples: List[Dict[str, Any]] = []77+skipped_no_response = 078+skipped_too_small = 079+skipped_unreplayable = 080+skipped_probe_errors = 081+82+endpoint_url = f"{self.api_host}/{self.ENDPOINT_PATH}"83+84+timeout = aiohttp.ClientTimeout(total=self.REPLAYABILITY_CHECK_TIMEOUT_SECONDS)85+async with aiohttp.ClientSession(timeout=timeout) as session:86+for sample in raw_samples:87+es_id = sample["id"]88+request_body = sample["request_body"]89+original_response = sample.get("original_response")90+91+if not original_response:92+skipped_no_response += 193+continue94+95+if min_size and len(json.dumps(request_body)) < min_size:96+skipped_too_small += 197+continue98+99+if self.SKIP_UNREPLAYABLE_ON_COLLECT:100+try:101+async with session.post(endpoint_url, json=request_body) as resp:102+if resp.status != 200:103+body_text = await resp.text()104+if "Cannot find activity" in body_text:105+skipped_unreplayable += 1106+continue107+except Exception:108+skipped_probe_errors += 1109+110+with open(self._inputs_dir / f"{es_id}.json", "w") as f:111+json.dump(request_body, f, indent=2)112+113+with open(self._ground_truths_dir / f"gt_{es_id}.json", "w") as f:114+json.dump(original_response, f, indent=2)115+116+samples.append(117+ {118+"id": es_id,119+"request_body": request_body,120+"original_response": original_response,121+"ground_truth": original_response,122+ }123+ )124+125+print(f"Wrote {len(samples)} samples to {self._inputs_dir.parent}")126+if skipped_no_response:127+print(f"Skipped {skipped_no_response} records with no response body")128+if skipped_too_small:129+print(130+f"Skipped {skipped_too_small} records whose request body was smaller "131+f"than {min_size} characters"132+ )133+if skipped_unreplayable:134+print(f"Skipped {skipped_unreplayable} records that are not replayable (missing activity in ES)")135+if skipped_probe_errors:136+print(f"Warning: replayability probe errored for {skipped_probe_errors} records (kept those records)")137+138+if self.__class__.POPULATE_GROUND_TRUTH_LLM:139+label_results = await run_endpoint_http(140+test_set=samples,141+endpoint_url=endpoint_url,142+request_model=self.REQUEST_MODEL,143+force_llm_endpoint=self.__class__.GROUND_TRUTH_LLM_ENDPOINT,144+is_streaming_response=self.IS_STREAMING_RESPONSE,145+timeout_seconds=300.0,146+parallel_requests=self.parallel_requests,147+return_prompt_in_run=self.RETURN_PROMPT_IN_RUN,148+ )149+150+for i, r in enumerate(label_results):151+forced = self.__class__.GROUND_TRUTH_LLM_ENDPOINT152+model = forced.split(":", 1)[1] if ":" in forced else forced153+samples[i]["ground_truth_llm_model"] = model154+155+if not r.get("success", True):156+samples[i]["ground_truth_llm"] = None157+samples[i]["ground_truth_llm_reasoning"] = None158+samples[i]["ground_truth_llm_error_type"] = r.get("error_type")159+samples[i]["ground_truth_llm_error_message"] = r.get("error_message")160+continue161+162+resp = r.get("new_response") or {}163+samples[i]["ground_truth_llm"] = resp.get("ai_activity_type")164+samples[i]["ground_truth_llm_reasoning"] = resp.get("ai_activity_type_reasoning")165+166+return samples167+168+async def _collect_from_files(self) -> List[Dict[str, Any]]:169+if not self._inputs_dir.exists():170+raise FileNotFoundError(171+f"Dataset folder not found: {self._inputs_dir}. "172+"Run --steps collect first or check your --data-set name."173+ )174+175+samples: List[Dict[str, Any]] = []176+for filename in sorted(os.listdir(self._inputs_dir)):177+input_path = self._inputs_dir / filename178+gt_path = self._ground_truths_dir / f"gt_{filename}"179+180+if not gt_path.exists():181+print(f"Warning: ground truth not found for {filename}, skipping")182+continue183+184+with open(input_path, "r") as f:185+request_body: Dict[str, Any] = json.load(f)186+187+with open(gt_path, "r") as f:188+ground_truth: Dict[str, Any] = json.load(f)189+190+samples.append(191+ {192+"id": Path(filename).stem,193+"request_body": request_body,194+"original_response": None,195+"ground_truth": ground_truth,196+ }197+ )198+199+print(f"Loaded {len(samples)} samples from {self._inputs_dir.parent}")200+return samples201+202+async def evaluate_against_ground_truth(self, results: List[Dict[str, Any]]) -> List[EvaluationResult]:203+evaluation_results: List[EvaluationResult] = []204+compare_to = self.__class__.COMPARE_TO_GROUND_TRUTH205+206+for res in results:207+req_id = res["request_id"]208+is_success = res.get("success", True)209+210+if not is_success:211+evaluation_results.append(212+EvaluationResult(213+request_id=req_id,214+request_body=res["request_body"],215+new_response=res.get("new_response"),216+valid_schema=False,217+quality_score=0.0,218+quality_feedback=f"Request failed: {res.get('error_type')} - {res.get('error_message')}",219+response_time=res.get("response_time"),220+success=False,221+error_type=res.get("error_type"),222+error_message=res.get("error_message"),223+ground_truth_match=False,224+ )225+ )226+continue227+228+pred = (res.get("new_response") or {}).get("ai_activity_type")229+230+if compare_to == "llm":231+if "ground_truth_llm" not in res:232+evaluation_results.append(233+EvaluationResult(234+request_id=req_id,235+request_body=res["request_body"],236+new_response=res.get("new_response"),237+valid_schema=res.get("valid_schema", True),238+quality_score=0.0,239+quality_feedback=(240+"Missing ground_truth_llm in run results. Ensure collect step populated it and "241+"endpoints.py passes it through."242+ ),243+response_time=res.get("response_time"),244+success=True,245+ground_truth_match=False,246+ )247+ )248+continue249+gt_label = res.get("ground_truth_llm")250+else:251+gt_path = self._ground_truths_dir / f"gt_{req_id}.json"252+if not gt_path.exists():253+evaluation_results.append(254+EvaluationResult(255+request_id=req_id,256+request_body=res["request_body"],257+new_response=res.get("new_response"),258+valid_schema=res.get("valid_schema", True),259+quality_score=0.0,260+quality_feedback=f"Ground truth file not found: {gt_path}",261+response_time=res.get("response_time"),262+success=True,263+ground_truth_match=False,264+ )265+ )266+continue267+268+with open(gt_path, "r") as f:269+ground_truth_dict: Dict[str, Any] = json.load(f)270+271+gt_label = (ground_truth_dict or {}).get("ai_activity_type")272+273+matched = pred == gt_label274+275+evaluation_results.append(276+EvaluationResult(277+request_id=req_id,278+request_body=res["request_body"],279+new_response=res.get("new_response"),280+valid_schema=res.get("valid_schema", True),281+quality_score=1.0 if matched else 0.0,282+quality_feedback="PASS" if matched else f"FAIL\nExpected: {gt_label}\nGot: {pred}",283+response_time=res.get("response_time"),284+success=True,285+ground_truth_match=matched,286+ )287+ )288+289+return evaluation_results290+291+292+if __name__ == "__main__":293+parser = create_parser(AiActivityTypeEvaluator)294+parser.add_argument(295+"--data-set",296+type=str,297+default="initial",298+dest="data_set",299+help="Dataset folder under eval_output/ai_activity_type/eval_datasets/ (default: initial)",300+ )301+302+parser.add_argument(303+"--populate-ground-truth-llm",304+action="store_true",305+dest="populate_ground_truth_llm",306+ )307+parser.add_argument(308+"--ground-truth-llm-endpoint",309+type=str,310+default="OPENAI:gpt-5_2",311+dest="ground_truth_llm_endpoint",312+ )313+314+parser.add_argument(315+"--compare-to-ground-truth",316+type=str,317+choices=["es", "llm"],318+default="es",319+dest="compare_to_ground_truth",320+ )321+322+args = parser.parse_args()323+AiActivityTypeEvaluator.DATA_SET = args.data_set324+325+AiActivityTypeEvaluator.POPULATE_GROUND_TRUTH_LLM = bool(args.populate_ground_truth_llm)326+AiActivityTypeEvaluator.GROUND_TRUTH_LLM_ENDPOINT = args.ground_truth_llm_endpoint327+328+AiActivityTypeEvaluator.COMPARE_TO_GROUND_TRUTH = args.compare_to_ground_truth329+330+asyncio.run(run_evaluation(AiActivityTypeEvaluator, args))</selection>” selected. Please summarize the selection using precise and concise language. Use headers and bulleted lists in the summary, to make it scannable. Maintain the meaning and factual accuracy.
You said
I’m on page “<tabTitle>Jy 19798 evaluation for ai activity types by nikol</tabTitle>” with “<selection>@@ -0,0 +1,330 @@1+#!/usr/bin/env python32+import asyncio3+import json4+import os5+from pathlib import Path6+from typing import Any, Dict, List7+8+import aiohttp9+10+from src.component.call_processing.schemes.ai_activity_type_request import AiActivityTypeRequest11+from src.component.call_processing.schemes.ai_activity_type_response import AiActivityTypeResponse12+from src.eval.abstract_evaluator import AbstractEvaluator13+from src.eval.collectors import collect_test_set as es_collect_test_set14+from src.eval.common_evaluator_runner import create_parser, run_evaluation15+from src.eval.endpoints import run_endpoint as run_endpoint_http16+from src.eval.models import EvaluationResult17+18+19+class AiActivityTypeEvaluator(AbstractEvaluator):20+ENDPOINT_PATH = "call/ai-activity-type"21+REQUEST_MODEL = AiActivityTypeRequest22+RESPONSE_MODEL = AiActivityTypeResponse23+OUTPUT_SUBFOLDER = "ai_activity_type"24+USES_LLM_EVALUATION = False25+26+READS_FROM_DATASET_FOLDER = True27+28+DATA_SET: str = "initial"29+30+MINIMUM_REQUEST_BODY_SIZE: int = 031+32+SKIP_UNREPLAYABLE_ON_COLLECT: bool = True33+REPLAYABILITY_CHECK_TIMEOUT_SECONDS: float = 20.034+35+POPULATE_GROUND_TRUTH_LLM: bool = False36+GROUND_TRUTH_LLM_ENDPOINT: str = "OPENAI:gpt-5_2"37+38+COMPARE_TO_GROUND_TRUTH: str = "es"39+40+def __init__(self, **kwargs: Any) -> None:41+super().__init__(**kwargs)42+_repo_root = Path(__file__).parents[2]43+_datasets_root = _repo_root / "eval_output" / self.OUTPUT_SUBFOLDER / "eval_datasets"44+_dataset_dir = _datasets_root / self.__class__.DATA_SET45+self._dataset_dir = _dataset_dir46+self._inputs_dir = _dataset_dir / "input"47+self._ground_truths_dir = _dataset_dir / "ground_truth"48+49+async def collect_test_set(self) -> List[Dict[str, Any]]:50+if self.es_host_name:51+return await self._collect_from_es()52+return await self._collect_from_files()53+54+async def _collect_from_es(self) -> List[Dict[str, Any]]:55+if self._inputs_dir.exists() or self._ground_truths_dir.exists():56+raise FileExistsError(57+f"Dataset folder already exists: {self._inputs_dir.parent}. "58+"Choose a different --data-set name or remove the folder manually."59+ )60+61+raw_samples = await es_collect_test_set(62+es_host_name=self.es_host_name,63+es_port=self.es_port,64+endpoint_path=self.ENDPOINT_PATH,65+limit=self.limit,66+language_filter=self.language_filter,67+min_prompt_length=self.min_prompt_length,68+is_json_response=self.IS_JSON_RESPONSE,69+exclude_languages=self.exclude_languages,70+ )71+72+self._inputs_dir.mkdir(parents=True, exist_ok=True)73+self._ground_truths_dir.mkdir(parents=True, exist_ok=True)74+75+min_size = self.MINIMUM_REQUEST_BODY_SIZE76+samples: List[Dict[str, Any]] = []77+skipped_no_response = 078+skipped_too_small = 079+skipped_unreplayable = 080+skipped_probe_errors = 081+82+endpoint_url = f"{self.api_host}/{self.ENDPOINT_PATH}"83+84+timeout = aiohttp.ClientTimeout(total=self.REPLAYABILITY_CHECK_TIMEOUT_SECONDS)85+async with aiohttp.ClientSession(timeout=timeout) as session:86+for sample in raw_samples:87+es_id = sample["id"]88+request_body = sample["request_body"]89+original_response = sample.get("original_response")90+91+if not original_response:92+skipped_no_response += 193+continue94+95+if min_size and len(json.dumps(request_body)) < min_size:96+skipped_too_small += 197+continue98+99+if self.SKIP_UNREPLAYABLE_ON_COLLECT:100+try:101+async with session.post(endpoint_url, json=request_body) as resp:102+if resp.status != 200:103+body_text = await resp.text()104+if "Cannot find activity" in body_text:105+skipped_unreplayable += 1106+continue107+except Exception:108+skipped_probe_errors += 1109+110+with open(self._inputs_dir / f"{es_id}.json", "w") as f:111+json.dump(request_body, f, indent=2)112+113+with open(self._ground_truths_dir / f"gt_{es_id}.json", "w") as f:114+json.dump(original_response, f, indent=2)115+116+samples.append(117+ {118+"id": es_id,119+"request_body": request_body,120+"original_response": original_response,121+"ground_truth": original_response,122+ }123+ )124+125+print(f"Wrote {len(samples)} samples to {self._inputs_dir.parent}")126+if skipped_no_response:127+print(f"Skipped {skipped_no_response} records with no response body")128+if skipped_too_small:129+print(130+f"Skipped {skipped_too_small} records whose request body was smaller "131+f"than {min_size} characters"132+ )133+if skipped_unreplayable:134+print(f"Skipped {skipped_unreplayable} records that are not replayable (missing activity in ES)")135+if skipped_probe_errors:136+print(f"Warning: replayability probe errored for {skipped_probe_errors} records (kept those records)")137+138+if self.__class__.POPULATE_GROUND_TRUTH_LLM:139+label_results = await run_endpoint_http(140+test_set=samples,141+endpoint_url=endpoint_url,142+request_model=self.REQUEST_MODEL,143+force_llm_endpoint=self.__class__.GROUND_TRUTH_LLM_ENDPOINT,144+is_streaming_response=self.IS_STREAMING_RESPONSE,145+timeout_seconds=300.0,146+parallel_requests=self.parallel_requests,147+return_prompt_in_run=self.RETURN_PROMPT_IN_RUN,148+ )149+150+for i, r in enumerate(label_results):151+forced = self.__class__.GROUND_TRUTH_LLM_ENDPOINT152+model = forced.split(":", 1)[1] if ":" in forced else forced153+samples[i]["ground_truth_llm_model"] = model154+155+if not r.get("success", True):156+samples[i]["ground_truth_llm"] = None157+samples[i]["ground_truth_llm_reasoning"] = None158+samples[i]["ground_truth_llm_error_type"] = r.get("error_type")159+samples[i]["ground_truth_llm_error_message"] = r.get("error_message")160+continue161+162+resp = r.get("new_response") or {}163+samples[i]["ground_truth_llm"] = resp.get("ai_activity_type")164+samples[i]["ground_truth_llm_reasoning"] = resp.get("ai_activity_type_reasoning")165+166+return samples167+168+async def _collect_from_files(self) -> List[Dict[str, Any]]:169+if not self._inputs_dir.exists():170+raise FileNotFoundError(171+f"Dataset folder not found: {self._inputs_dir}. "172+"Run --steps collect first or check your --data-set name."173+ )174+175+samples: List[Dict[str, Any]] = []176+for filename in sorted(os.listdir(self._inputs_dir)):177+input_path = self._inputs_dir / filename178+gt_path = self._ground_truths_dir / f"gt_{filename}"179+180+if not gt_path.exists():181+print(f"Warning: ground truth not found for {filename}, skipping")182+continue183+184+with open(input_path, "r") as f:185+request_body: Dict[str, Any] = json.load(f)186+187+with open(gt_path, "r") as f:188+ground_truth: Dict[str, Any] = json.load(f)189+190+samples.append(191+ {192+"id": Path(filename).stem,193+"request_body": request_body,194+"original_response": None,195+"ground_truth": ground_truth,196+ }197+ )198+199+print(f"Loaded {len(samples)} samples from {self._inputs_dir.parent}")200+return samples201+202+async def evaluate_against_ground_truth(self, results: List[Dict[str, Any]]) -> List[EvaluationResult]:203+evaluation_results: List[EvaluationResult] = []204+compare_to = self.__class__.COMPARE_TO_GROUND_TRUTH205+206+for res in results:207+req_id = res["request_id"]208+is_success = res.get("success", True)209+210+if not is_success:211+evaluation_results.append(212+EvaluationResult(213+request_id=req_id,214+request_body=res["request_body"],215+new_response=res.get("new_response"),216+valid_schema=False,217+quality_score=0.0,218+quality_feedback=f"Request failed: {res.get('error_type')} - {res.get('error_message')}",219+response_time=res.get("response_time"),220+success=False,221+error_type=res.get("error_type"),222+error_message=res.get("error_message"),223+ground_truth_match=False,224+ )225+ )226+continue227+228+pred = (res.get("new_response") or {}).get("ai_activity_type")229+230+if compare_to == "llm":231+if "ground_truth_llm" not in res:232+evaluation_results.append(233+EvaluationResult(234+request_id=req_id,235+request_body=res["request_body"],236+new_response=res.get("new_response"),237+valid_schema=res.get("valid_schema", True),238+quality_score=0.0,239+quality_feedback=(240+"Missing ground_truth_llm in run results. Ensure collect step populated it and "241+"endpoints.py passes it through."242+ ),243+response_time=res.get("response_time"),244+success=True,245+ground_truth_match=False,246+ )247+ )248+continue249+gt_label = res.get("ground_truth_llm")250+else:251+gt_path = self._ground_truths_dir / f"gt_{req_id}.json"252+if not gt_path.exists():253+evaluation_results.append(254+EvaluationResult(255+request_id=req_id,256+request_body=res["request_body"],257+new_response=res.get("new_response"),258+valid_schema=res.get("valid_schema", True),259+quality_score=0.0,260+quality_feedback=f"Ground truth file not found: {gt_path}",261+response_time=res.get("response_time"),262+success=True,263+ground_truth_match=False,264+ )265+ )266+continue267+268+with open(gt_path, "r") as f:269+ground_truth_dict: Dict[str, Any] = json.load(f)270+271+gt_label = (ground_truth_dict or {}).get("ai_activity_type")272+273+matched = pred == gt_label274+275+evaluation_results.append(276+EvaluationResult(277+request_id=req_id,278+request_body=res["request_body"],279+new_response=res.get("new_response"),280+valid_schema=res.get("valid_schema", True),281+quality_score=1.0 if matched else 0.0,282+quality_feedback="PASS" if matched else f"FAIL\nExpected: {gt_label}\nGot: {pred}",283+response_time=res.get("response_time"),284+success=True,285+ground_truth_match=matched,286+ )287+ )288+289+return evaluation_results290+291+292+if __name__ == "__main__":293+parser = create_parser(AiActivityTypeEvaluator)294+parser.add_argument(295+"--data-set",296+type=str,297+default="initial",298+dest="data_set",299+help="Dataset folder under eval_output/ai_activity_type/eval_datasets/ (default: initial)",300+ )301+302+parser.add_argument(303+"--populate-ground-truth-llm",304+action="store_true",305+dest="populate_ground_truth_llm",306+ )307+parser.add_argument(308+"--ground-truth-llm-endpoint",309+type=str,310+default="OPENAI:gpt-5_2",311+dest="ground_truth_llm_endpoint",312+ )313+314+parser.add_argument(315+"--compare-to-ground-truth",316+type=str,317+choices=["es", "llm"],318+default="es",319+dest="compare_to_ground_truth",320+ )321+322+args = parser.parse_args()323+AiActivityTypeEvaluator.DATA_SET = args.data_set324+325+AiActivityTypeEvaluator.POPULATE_GROUND_TRUTH_LLM = bool(args.populate_ground_truth_llm)326+AiActivityTypeEvaluator.GROUND_TRUTH_LLM_ENDPOINT = args.ground_truth_llm_endpoint327+328+AiActivityTypeEvaluator.COMPARE_TO_GROUND_TRUTH = args.compare_to_ground_truth329+330+asyncio.run(run_evaluation(AiActivityTypeEvaluator, args))</selection>” selected.
Please summarize the selection using precise and concise language. Use headers and bulleted lists in the summary, to make it scannable. Maintain the meaning and factual accuracy.
Expand
Listen
Show more options
Gemini said
Gemini said
This Python script defines the
AiActivityTypeEvaluator
class, a specialized tool for evaluating AI-driven activity type classification. It extends a base evaluation framework to handle data collection, ground-truth comparison, and performance scoring.
Core Functionality
Core Functionality
Data Collection (
collect_test_set
):
* Supports fetching raw samples from
Elasticsearch (ES)
or loading them from
local JSON files
.
Implements a
replayability check
: It probes the live API endpoint to ensure the activity exists before including it in the dataset.
Filters data based on request body size, language, and the presence of an original response.
Ground Truth Management:
Saves inputs and "gold standard" responses to a local directory structure (
eval_output/ai_activity_type/...
).
Features an optional mode to populate ground truth using a high-order LLM (e.g.,
gpt-5_2
) if the
POPULATE_GROUND_TRUTH_LLM
flag is set.
Evaluation Logic (
evaluate_against_ground_truth
):
Compares the model's predicted
ai_activity_type
against either the original ES response or the LLM-generated ground truth.
Assigns a
quality_score
of
1.0
for a match and
0.0
for a mismatch or request failure.
Key Configuration Constants
Key Configuration Constants
Constant
Purpose
ENDPOINT_PATH
The specific API route being tested:
call/ai-activity-type
.
COMPARE_TO_GROUND_TRUTH
Determines if the baseline is the original data (
es
) or an
llm
.
SKIP_UNREPLAYABLE_ON_COLLECT
If
True
, discards records where the API returns "Cannot find activity."
REPLAYABILITY_CHECK_TIMEOUT_SECONDS
Limits the probe request to 20 seconds.
Constant
ENDPOINT_PATH
COMPARE_TO_GROUND_TRUTH
SKIP_UNREPLAYABLE_ON_COLLECT
REPLAYABILITY_CHECK_TIMEOUT_SECONDS
Purpose
The specific API route being tested:
call/ai-activity-type
.
Determines if the baseline is the original data (
es
) or an
llm
.
If
True
, discards records where the API returns "Cannot find activity."
Limits the probe request to 20 seconds.
Copy table
Execution Workflow
Execution Workflow
Parsing:
The script uses
create_parser
to handle CLI arguments like
--data-set
,
--populate-ground-truth-llm
, and
--compare-to-ground-truth
.
Initialization:
Sets up directory paths for inputs and ground truths based on the selected dataset name.
Run:
Executes the evaluation via
asyncio.run(run_evaluation(...))
, which orchestrates the collection, endpoint calling, and result scoring phases.
Copy prompt
Edit
You said are there soem issues with this PR
You said
are there soem issues with this PR
Listen
Show more options
Show thinking
Gemini said
Gemini said
Yes, there are several issues and code smells in this Pull Request, ranging from performance bottlenecks to architectural anti-patterns.
Here is a breakdown of the most notable issues, categorized by impact:
Performance & Async Anti-Patterns
Performance & Async Anti-Patterns
Sequential Requests Defeating Async (Lines 86-106):
Inside
_collect_from_es
, the code loops through
raw_samples
and
await
s a
session.post
request for each sample one by one. This means the script waits for one HTTP request to finish before starting the next, completely defeating the purpose of using
aiohttp
for asynchronous concurrency.
Fix:
Collect these coroutines into a list and use
asyncio.gather()
(ideally wrapped in an
asyncio.Semaphore
to avoid overwhelming the endpoint) to run the replayability probes concurrently.
Blocking File I/O in Async Functions (Lines 110-114, 184-188, 268-269):
Standard synchronous file operations (
with open(...)
,
json.dump
,
json.load
) are used inside
async def
functions. In Python, this blocks the entire event loop while the disk is reading/writing.
Fix:
Use an asynchronous file I/O library like
aiofiles
, or run the blocking I/O operations in a separate thread using
asyncio.to_thread()
.
Architecture & State Management
Architecture & State Management
Mutating Class-Level Variables (Lines 323-328):
The script parses CLI arguments and directly mutates the class attributes (e.g.,
AiActivityTypeEvaluator.DATA_SET = args.data_set
). This is a major anti-pattern. If you ever try to run tests in parallel or create multiple instances of this evaluator in a single process, the state will bleed across them.
Fix:
Pass these arguments into the
__init__
constructor of the instance (
self.data_set = args.data_set
) rather than modifying the class definition globally.
Code Quality & Edge Cases
Code Quality & Edge Cases
Broad Exception Catching (Line 107):
Using a bare
except Exception:
during the replayability probe is dangerous. It will catch
aiohttp.ClientError
(which is intended), but it will also silently swallow
TimeoutError
or even memory errors, making debugging incredibly difficult if the endpoint goes down.
Fix:
Scope the exception to specific expected errors, like
except (aiohttp.ClientError, asyncio.TimeoutError):
.
Inefficient Size Calculation (Line 95):
len(json.dumps(request_body)) < min_size
requires serializing the entire dictionary to a string
just
to check its length. Since
min_size
defaults to
0
, the
if min_size
short-circuit saves it for now, but if anyone sets a minimum size, this will become an unnecessary CPU bottleneck on large datasets.
Potential Path Traversal Risk (Lines 110, 113, 251):
The script writes and reads files directly using
f"{es_id}.json"
and
f"gt_{req_id}.json"
. If
es_id
or
req_id
originate from a database and ever contain characters like
../
, it could result in files being read from or written to unintended directories.
Fix:
Sanitize the IDs or strictly enforce that they only contain alphanumeric characters.
Would you like me to rewrite the
_collect_from_es
method to show how to fix the sequential async loop and blocking file I/O?
Enter a prompt for Gemini
encrypted
Enter a prompt for Gemini
encrypted
Open upload file menu...
|
Firefox
|
Jiminny — Work
|
app.staging.jiminny.com/ai-reports
|
8609
|
|
FirefoxFileEoitViewHistoryBookmarksProfilesToolsWi FirefoxFileEoitViewHistoryBookmarksProfilesToolsWindow Helpdoo.sidemne.minny.com/dl-teoorls~ Google Gemini.. XC< 40Wl © | Daily -Platform • 10mleft A 100%C &• Tue 14 Apr 9:55:40Platform Sprint 1 Q2 - Platform Tea@ Jy 19798 evaluation for ai activity t8 Jiminny+ New labsocc ce Cdcseciie3. Run: Executes the evaluation viaasyncio.run(run_evaluation(...)),which orchestrates the collection,endpoint calling, and result scoringoeses.Al Reports• Report nameNAME -Ask Jiminny Test Report - 8 Apr 20263 Ask Jiminny reportsPeriodiReport Type© Clear all |10FREQUENCY +DailySHAREDIDATE09/04/9016ACTIONSare there soem issueswith this PRShow thinking vYes, there are several issues and code smellsin this Pull Request, ranging from performancebottlenecks to architectural anti-patterns.Here is a breakdown of the most notableissues, categorized by impact:Performance & Async Anti-Patterns• Sequential Requests Defeating Async(Lines 86-106): Inside_collect_from_es, the code loopsthrough raw_samples and await sasession.post request for each sampleone by one. This means the script waitsfor one HTTP request to finish beforestarting the next, completely defeatingthe purpose of using aiohttp forasynchronous concurrency.• Fix: Collect these coroutines into alist and use asyncio.gather ()(ideally wrapped in anasyncio. Semaphore to avoidoverwhelming the endpoint) to run• Enter a prompt for GeminiProvYour Jiminny chats aren't used to improve our models. Gemini is Alane can make mistakes, Inciualne aoout peodleYour privacy & GeminiSummarize pageJY-18909-automated-reports-ask-iiminny = 869453)...
|
NULL
|
NULL
|
NULL
|
8608
|
|
JY-20543 add AJ reports User pilot tracking by Lak JY-20543 add AJ reports User pilot tracking by LakyLak · Pull Request #11932 · jiminny/app
Platform Sprint 1 Q2 - Platform Team - Scrum Board - Jira
Platform Sprint 1 Q2 - Platform Team - Scrum Board - Jira
Jy 19798 evaluation for ai activity types by nikolaybiaivanov · Pull Request #468 · jiminny/prophet
Jy 19798 evaluation for ai activity types by nikolaybiaivanov · Pull Request #468 · jiminny/prophet
Jiminny
Jiminny
Close tab
New Tab
Customize sidebar
Close Google Gemini (⌃X)
Tabs from other devices
Open history (⇧⌘H)
Open bookmarks (⌘B)
AI Chat settings
Close
WORK, Google Account: [EMAIL]
Main menu
New Chat
Open menu for conversation actions.
Conversation with Gemini
Conversation with Gemini
Copy prompt
You said I’m on page “<tabTitle>Jy 19798 evaluation for ai activity types by nikol</tabTitle>” with “<selection>@@ -0,0 +1,330 @@1+#!/usr/bin/env python32+import asyncio3+import json4+import os5+from pathlib import Path6+from typing import Any, Dict, List7+8+import aiohttp9+10+from src.component.call_processing.schemes.ai_activity_type_request import AiActivityTypeRequest11+from src.component.call_processing.schemes.ai_activity_type_response import AiActivityTypeResponse12+from src.eval.abstract_evaluator import AbstractEvaluator13+from src.eval.collectors import collect_test_set as es_collect_test_set14+from src.eval.common_evaluator_runner import create_parser, run_evaluation15+from src.eval.endpoints import run_endpoint as run_endpoint_http16+from src.eval.models import EvaluationResult17+18+19+class AiActivityTypeEvaluator(AbstractEvaluator):20+ENDPOINT_PATH = "call/ai-activity-type"21+REQUEST_MODEL = AiActivityTypeRequest22+RESPONSE_MODEL = AiActivityTypeResponse23+OUTPUT_SUBFOLDER = "ai_activity_type"24+USES_LLM_EVALUATION = False25+26+READS_FROM_DATASET_FOLDER = True27+28+DATA_SET: str = "initial"29+30+MINIMUM_REQUEST_BODY_SIZE: int = 031+32+SKIP_UNREPLAYABLE_ON_COLLECT: bool = True33+REPLAYABILITY_CHECK_TIMEOUT_SECONDS: float = 20.034+35+POPULATE_GROUND_TRUTH_LLM: bool = False36+GROUND_TRUTH_LLM_ENDPOINT: str = "OPENAI:gpt-5_2"37+38+COMPARE_TO_GROUND_TRUTH: str = "es"39+40+def __init__(self, **kwargs: Any) -> None:41+super().__init__(**kwargs)42+_repo_root = Path(__file__).parents[2]43+_datasets_root = _repo_root / "eval_output" / self.OUTPUT_SUBFOLDER / "eval_datasets"44+_dataset_dir = _datasets_root / self.__class__.DATA_SET45+self._dataset_dir = _dataset_dir46+self._inputs_dir = _dataset_dir / "input"47+self._ground_truths_dir = _dataset_dir / "ground_truth"48+49+async def collect_test_set(self) -> List[Dict[str, Any]]:50+if self.es_host_name:51+return await self._collect_from_es()52+return await self._collect_from_files()53+54+async def _collect_from_es(self) -> List[Dict[str, Any]]:55+if self._inputs_dir.exists() or self._ground_truths_dir.exists():56+raise FileExistsError(57+f"Dataset folder already exists: {self._inputs_dir.parent}. "58+"Choose a different --data-set name or remove the folder manually."59+ )60+61+raw_samples = await es_collect_test_set(62+es_host_name=self.es_host_name,63+es_port=self.es_port,64+endpoint_path=self.ENDPOINT_PATH,65+limit=self.limit,66+language_filter=self.language_filter,67+min_prompt_length=self.min_prompt_length,68+is_json_response=self.IS_JSON_RESPONSE,69+exclude_languages=self.exclude_languages,70+ )71+72+self._inputs_dir.mkdir(parents=True, exist_ok=True)73+self._ground_truths_dir.mkdir(parents=True, exist_ok=True)74+75+min_size = self.MINIMUM_REQUEST_BODY_SIZE76+samples: List[Dict[str, Any]] = []77+skipped_no_response = 078+skipped_too_small = 079+skipped_unreplayable = 080+skipped_probe_errors = 081+82+endpoint_url = f"{self.api_host}/{self.ENDPOINT_PATH}"83+84+timeout = aiohttp.ClientTimeout(total=self.REPLAYABILITY_CHECK_TIMEOUT_SECONDS)85+async with aiohttp.ClientSession(timeout=timeout) as session:86+for sample in raw_samples:87+es_id = sample["id"]88+request_body = sample["request_body"]89+original_response = sample.get("original_response")90+91+if not original_response:92+skipped_no_response += 193+continue94+95+if min_size and len(json.dumps(request_body)) < min_size:96+skipped_too_small += 197+continue98+99+if self.SKIP_UNREPLAYABLE_ON_COLLECT:100+try:101+async with session.post(endpoint_url, json=request_body) as resp:102+if resp.status != 200:103+body_text = await resp.text()104+if "Cannot find activity" in body_text:105+skipped_unreplayable += 1106+continue107+except Exception:108+skipped_probe_errors += 1109+110+with open(self._inputs_dir / f"{es_id}.json", "w") as f:111+json.dump(request_body, f, indent=2)112+113+with open(self._ground_truths_dir / f"gt_{es_id}.json", "w") as f:114+json.dump(original_response, f, indent=2)115+116+samples.append(117+ {118+"id": es_id,119+"request_body": request_body,120+"original_response": original_response,121+"ground_truth": original_response,122+ }123+ )124+125+print(f"Wrote {len(samples)} samples to {self._inputs_dir.parent}")126+if skipped_no_response:127+print(f"Skipped {skipped_no_response} records with no response body")128+if skipped_too_small:129+print(130+f"Skipped {skipped_too_small} records whose request body was smaller "131+f"than {min_size} characters"132+ )133+if skipped_unreplayable:134+print(f"Skipped {skipped_unreplayable} records that are not replayable (missing activity in ES)")135+if skipped_probe_errors:136+print(f"Warning: replayability probe errored for {skipped_probe_errors} records (kept those records)")137+138+if self.__class__.POPULATE_GROUND_TRUTH_LLM:139+label_results = await run_endpoint_http(140+test_set=samples,141+endpoint_url=endpoint_url,142+request_model=self.REQUEST_MODEL,143+force_llm_endpoint=self.__class__.GROUND_TRUTH_LLM_ENDPOINT,144+is_streaming_response=self.IS_STREAMING_RESPONSE,145+timeout_seconds=300.0,146+parallel_requests=self.parallel_requests,147+return_prompt_in_run=self.RETURN_PROMPT_IN_RUN,148+ )149+150+for i, r in enumerate(label_results):151+forced = self.__class__.GROUND_TRUTH_LLM_ENDPOINT152+model = forced.split(":", 1)[1] if ":" in forced else forced153+samples[i]["ground_truth_llm_model"] = model154+155+if not r.get("success", True):156+samples[i]["ground_truth_llm"] = None157+samples[i]["ground_truth_llm_reasoning"] = None158+samples[i]["ground_truth_llm_error_type"] = r.get("error_type")159+samples[i]["ground_truth_llm_error_message"] = r.get("error_message")160+continue161+162+resp = r.get("new_response") or {}163+samples[i]["ground_truth_llm"] = resp.get("ai_activity_type")164+samples[i]["ground_truth_llm_reasoning"] = resp.get("ai_activity_type_reasoning")165+166+return samples167+168+async def _collect_from_files(self) -> List[Dict[str, Any]]:169+if not self._inputs_dir.exists():170+raise FileNotFoundError(171+f"Dataset folder not found: {self._inputs_dir}. "172+"Run --steps collect first or check your --data-set name."173+ )174+175+samples: List[Dict[str, Any]] = []176+for filename in sorted(os.listdir(self._inputs_dir)):177+input_path = self._inputs_dir / filename178+gt_path = self._ground_truths_dir / f"gt_{filename}"179+180+if not gt_path.exists():181+print(f"Warning: ground truth not found for {filename}, skipping")182+continue183+184+with open(input_path, "r") as f:185+request_body: Dict[str, Any] = json.load(f)186+187+with open(gt_path, "r") as f:188+ground_truth: Dict[str, Any] = json.load(f)189+190+samples.append(191+ {192+"id": Path(filename).stem,193+"request_body": request_body,194+"original_response": None,195+"ground_truth": ground_truth,196+ }197+ )198+199+print(f"Loaded {len(samples)} samples from {self._inputs_dir.parent}")200+return samples201+202+async def evaluate_against_ground_truth(self, results: List[Dict[str, Any]]) -> List[EvaluationResult]:203+evaluation_results: List[EvaluationResult] = []204+compare_to = self.__class__.COMPARE_TO_GROUND_TRUTH205+206+for res in results:207+req_id = res["request_id"]208+is_success = res.get("success", True)209+210+if not is_success:211+evaluation_results.append(212+EvaluationResult(213+request_id=req_id,214+request_body=res["request_body"],215+new_response=res.get("new_response"),216+valid_schema=False,217+quality_score=0.0,218+quality_feedback=f"Request failed: {res.get('error_type')} - {res.get('error_message')}",219+response_time=res.get("response_time"),220+success=False,221+error_type=res.get("error_type"),222+error_message=res.get("error_message"),223+ground_truth_match=False,224+ )225+ )226+continue227+228+pred = (res.get("new_response") or {}).get("ai_activity_type")229+230+if compare_to == "llm":231+if "ground_truth_llm" not in res:232+evaluation_results.append(233+EvaluationResult(234+request_id=req_id,235+request_body=res["request_body"],236+new_response=res.get("new_response"),237+valid_schema=res.get("valid_schema", True),238+quality_score=0.0,239+quality_feedback=(240+"Missing ground_truth_llm in run results. Ensure collect step populated it and "241+"endpoints.py passes it through."242+ ),243+response_time=res.get("response_time"),244+success=True,245+ground_truth_match=False,246+ )247+ )248+continue249+gt_label = res.get("ground_truth_llm")250+else:251+gt_path = self._ground_truths_dir / f"gt_{req_id}.json"252+if not gt_path.exists():253+evaluation_results.append(254+EvaluationResult(255+request_id=req_id,256+request_body=res["request_body"],257+new_response=res.get("new_response"),258+valid_schema=res.get("valid_schema", True),259+quality_score=0.0,260+quality_feedback=f"Ground truth file not found: {gt_path}",261+response_time=res.get("response_time"),262+success=True,263+ground_truth_match=False,264+ )265+ )266+continue267+268+with open(gt_path, "r") as f:269+ground_truth_dict: Dict[str, Any] = json.load(f)270+271+gt_label = (ground_truth_dict or {}).get("ai_activity_type")272+273+matched = pred == gt_label274+275+evaluation_results.append(276+EvaluationResult(277+request_id=req_id,278+request_body=res["request_body"],279+new_response=res.get("new_response"),280+valid_schema=res.get("valid_schema", True),281+quality_score=1.0 if matched else 0.0,282+quality_feedback="PASS" if matched else f"FAIL\nExpected: {gt_label}\nGot: {pred}",283+response_time=res.get("response_time"),284+success=True,285+ground_truth_match=matched,286+ )287+ )288+289+return evaluation_results290+291+292+if __name__ == "__main__":293+parser = create_parser(AiActivityTypeEvaluator)294+parser.add_argument(295+"--data-set",296+type=str,297+default="initial",298+dest="data_set",299+help="Dataset folder under eval_output/ai_activity_type/eval_datasets/ (default: initial)",300+ )301+302+parser.add_argument(303+"--populate-ground-truth-llm",304+action="store_true",305+dest="populate_ground_truth_llm",306+ )307+parser.add_argument(308+"--ground-truth-llm-endpoint",309+type=str,310+default="OPENAI:gpt-5_2",311+dest="ground_truth_llm_endpoint",312+ )313+314+parser.add_argument(315+"--compare-to-ground-truth",316+type=str,317+choices=["es", "llm"],318+default="es",319+dest="compare_to_ground_truth",320+ )321+322+args = parser.parse_args()323+AiActivityTypeEvaluator.DATA_SET = args.data_set324+325+AiActivityTypeEvaluator.POPULATE_GROUND_TRUTH_LLM = bool(args.populate_ground_truth_llm)326+AiActivityTypeEvaluator.GROUND_TRUTH_LLM_ENDPOINT = args.ground_truth_llm_endpoint327+328+AiActivityTypeEvaluator.COMPARE_TO_GROUND_TRUTH = args.compare_to_ground_truth329+330+asyncio.run(run_evaluation(AiActivityTypeEvaluator, args))</selection>” selected. Please summarize the selection using precise and concise language. Use headers and bulleted lists in the summary, to make it scannable. Maintain the meaning and factual accuracy.
You said
I’m on page “<tabTitle>Jy 19798 evaluation for ai activity types by nikol</tabTitle>” with “<selection>@@ -0,0 +1,330 @@1+#!/usr/bin/env python32+import asyncio3+import json4+import os5+from pathlib import Path6+from typing import Any, Dict, List7+8+import aiohttp9+10+from src.component.call_processing.schemes.ai_activity_type_request import AiActivityTypeRequest11+from src.component.call_processing.schemes.ai_activity_type_response import AiActivityTypeResponse12+from src.eval.abstract_evaluator import AbstractEvaluator13+from src.eval.collectors import collect_test_set as es_collect_test_set14+from src.eval.common_evaluator_runner import create_parser, run_evaluation15+from src.eval.endpoints import run_endpoint as run_endpoint_http16+from src.eval.models import EvaluationResult17+18+19+class AiActivityTypeEvaluator(AbstractEvaluator):20+ENDPOINT_PATH = "call/ai-activity-type"21+REQUEST_MODEL = AiActivityTypeRequest22+RESPONSE_MODEL = AiActivityTypeResponse23+OUTPUT_SUBFOLDER = "ai_activity_type"24+USES_LLM_EVALUATION = False25+26+READS_FROM_DATASET_FOLDER = True27+28+DATA_SET: str = "initial"29+30+MINIMUM_REQUEST_BODY_SIZE: int = 031+32+SKIP_UNREPLAYABLE_ON_COLLECT: bool = True33+REPLAYABILITY_CHECK_TIMEOUT_SECONDS: float = 20.034+35+POPULATE_GROUND_TRUTH_LLM: bool = False36+GROUND_TRUTH_LLM_ENDPOINT: str = "OPENAI:gpt-5_2"37+38+COMPARE_TO_GROUND_TRUTH: str = "es"39+40+def __init__(self, **kwargs: Any) -> None:41+super().__init__(**kwargs)42+_repo_root = Path(__file__).parents[2]43+_datasets_root = _repo_root / "eval_output" / self.OUTPUT_SUBFOLDER / "eval_datasets"44+_dataset_dir = _datasets_root / self.__class__.DATA_SET45+self._dataset_dir = _dataset_dir46+self._inputs_dir = _dataset_dir / "input"47+self._ground_truths_dir = _dataset_dir / "ground_truth"48+49+async def collect_test_set(self) -> List[Dict[str, Any]]:50+if self.es_host_name:51+return await self._collect_from_es()52+return await self._collect_from_files()53+54+async def _collect_from_es(self) -> List[Dict[str, Any]]:55+if self._inputs_dir.exists() or self._ground_truths_dir.exists():56+raise FileExistsError(57+f"Dataset folder already exists: {self._inputs_dir.parent}. "58+"Choose a different --data-set name or remove the folder manually."59+ )60+61+raw_samples = await es_collect_test_set(62+es_host_name=self.es_host_name,63+es_port=self.es_port,64+endpoint_path=self.ENDPOINT_PATH,65+limit=self.limit,66+language_filter=self.language_filter,67+min_prompt_length=self.min_prompt_length,68+is_json_response=self.IS_JSON_RESPONSE,69+exclude_languages=self.exclude_languages,70+ )71+72+self._inputs_dir.mkdir(parents=True, exist_ok=True)73+self._ground_truths_dir.mkdir(parents=True, exist_ok=True)74+75+min_size = self.MINIMUM_REQUEST_BODY_SIZE76+samples: List[Dict[str, Any]] = []77+skipped_no_response = 078+skipped_too_small = 079+skipped_unreplayable = 080+skipped_probe_errors = 081+82+endpoint_url = f"{self.api_host}/{self.ENDPOINT_PATH}"83+84+timeout = aiohttp.ClientTimeout(total=self.REPLAYABILITY_CHECK_TIMEOUT_SECONDS)85+async with aiohttp.ClientSession(timeout=timeout) as session:86+for sample in raw_samples:87+es_id = sample["id"]88+request_body = sample["request_body"]89+original_response = sample.get("original_response")90+91+if not original_response:92+skipped_no_response += 193+continue94+95+if min_size and len(json.dumps(request_body)) < min_size:96+skipped_too_small += 197+continue98+99+if self.SKIP_UNREPLAYABLE_ON_COLLECT:100+try:101+async with session.post(endpoint_url, json=request_body) as resp:102+if resp.status != 200:103+body_text = await resp.text()104+if "Cannot find activity" in body_text:105+skipped_unreplayable += 1106+continue107+except Exception:108+skipped_probe_errors += 1109+110+with open(self._inputs_dir / f"{es_id}.json", "w") as f:111+json.dump(request_body, f, indent=2)112+113+with open(self._ground_truths_dir / f"gt_{es_id}.json", "w") as f:114+json.dump(original_response, f, indent=2)115+116+samples.append(117+ {118+"id": es_id,119+"request_body": request_body,120+"original_response": original_response,121+"ground_truth": original_response,122+ }123+ )124+125+print(f"Wrote {len(samples)} samples to {self._inputs_dir.parent}")126+if skipped_no_response:127+print(f"Skipped {skipped_no_response} records with no response body")128+if skipped_too_small:129+print(130+f"Skipped {skipped_too_small} records whose request body was smaller "131+f"than {min_size} characters"132+ )133+if skipped_unreplayable:134+print(f"Skipped {skipped_unreplayable} records that are not replayable (missing activity in ES)")135+if skipped_probe_errors:136+print(f"Warning: replayability probe errored for {skipped_probe_errors} records (kept those records)")137+138+if self.__class__.POPULATE_GROUND_TRUTH_LLM:139+label_results = await run_endpoint_http(140+test_set=samples,141+endpoint_url=endpoint_url,142+request_model=self.REQUEST_MODEL,143+force_llm_endpoint=self.__class__.GROUND_TRUTH_LLM_ENDPOINT,144+is_streaming_response=self.IS_STREAMING_RESPONSE,145+timeout_seconds=300.0,146+parallel_requests=self.parallel_requests,147+return_prompt_in_run=self.RETURN_PROMPT_IN_RUN,148+ )149+150+for i, r in enumerate(label_results):151+forced = self.__class__.GROUND_TRUTH_LLM_ENDPOINT152+model = forced.split(":", 1)[1] if ":" in forced else forced153+samples[i]["ground_truth_llm_model"] = model154+155+if not r.get("success", True):156+samples[i]["ground_truth_llm"] = None157+samples[i]["ground_truth_llm_reasoning"] = None158+samples[i]["ground_truth_llm_error_type"] = r.get("error_type")159+samples[i]["ground_truth_llm_error_message"] = r.get("error_message")160+continue161+162+resp = r.get("new_response") or {}163+samples[i]["ground_truth_llm"] = resp.get("ai_activity_type")164+samples[i]["ground_truth_llm_reasoning"] = resp.get("ai_activity_type_reasoning")165+166+return samples167+168+async def _collect_from_files(self) -> List[Dict[str, Any]]:169+if not self._inputs_dir.exists():170+raise FileNotFoundError(171+f"Dataset folder not found: {self._inputs_dir}. "172+"Run --steps collect first or check your --data-set name."173+ )174+175+samples: List[Dict[str, Any]] = []176+for filename in sorted(os.listdir(self._inputs_dir)):177+input_path = self._inputs_dir / filename178+gt_path = self._ground_truths_dir / f"gt_{filename}"179+180+if not gt_path.exists():181+print(f"Warning: ground truth not found for {filename}, skipping")182+continue183+184+with open(input_path, "r") as f:185+request_body: Dict[str, Any] = json.load(f)186+187+with open(gt_path, "r") as f:188+ground_truth: Dict[str, Any] = json.load(f)189+190+samples.append(191+ {192+"id": Path(filename).stem,193+"request_body": request_body,194+"original_response": None,195+"ground_truth": ground_truth,196+ }197+ )198+199+print(f"Loaded {len(samples)} samples from {self._inputs_dir.parent}")200+return samples201+202+async def evaluate_against_ground_truth(self, results: List[Dict[str, Any]]) -> List[EvaluationResult]:203+evaluation_results: List[EvaluationResult] = []204+compare_to = self.__class__.COMPARE_TO_GROUND_TRUTH205+206+for res in results:207+req_id = res["request_id"]208+is_success = res.get("success", True)209+210+if not is_success:211+evaluation_results.append(212+EvaluationResult(213+request_id=req_id,214+request_body=res["request_body"],215+new_response=res.get("new_response"),216+valid_schema=False,217+quality_score=0.0,218+quality_feedback=f"Request failed: {res.get('error_type')} - {res.get('error_message')}",219+response_time=res.get("response_time"),220+success=False,221+error_type=res.get("error_type"),222+error_message=res.get("error_message"),223+ground_truth_match=False,224+ )225+ )226+continue227+228+pred = (res.get("new_response") or {}).get("ai_activity_type")229+230+if compare_to == "llm":231+if "ground_truth_llm" not in res:232+evaluation_results.append(233+EvaluationResult(234+request_id=req_id,235+request_body=res["request_body"],236+new_response=res.get("new_response"),237+valid_schema=res.get("valid_schema", True),238+quality_score=0.0,239+quality_feedback=(240+"Missing ground_truth_llm in run results. Ensure collect step populated it and "241+"endpoints.py passes it through."242+ ),243+response_time=res.get("response_time"),244+success=True,245+ground_truth_match=False,246+ )247+ )248+continue249+gt_label = res.get("ground_truth_llm")250+else:251+gt_path = self._ground_truths_dir / f"gt_{req_id}.json"252+if not gt_path.exists():253+evaluation_results.append(254+EvaluationResult(255+request_id=req_id,256+request_body=res["request_body"],257+new_response=res.get("new_response"),258+valid_schema=res.get("valid_schema", True),259+quality_score=0.0,260+quality_feedback=f"Ground truth file not found: {gt_path}",261+response_time=res.get("response_time"),262+success=True,263+ground_truth_match=False,264+ )265+ )266+continue267+268+with open(gt_path, "r") as f:269+ground_truth_dict: Dict[str, Any] = json.load(f)270+271+gt_label = (ground_truth_dict or {}).get("ai_activity_type")272+273+matched = pred == gt_label274+275+evaluation_results.append(276+EvaluationResult(277+request_id=req_id,278+request_body=res["request_body"],279+new_response=res.get("new_response"),280+valid_schema=res.get("valid_schema", True),281+quality_score=1.0 if matched else 0.0,282+quality_feedback="PASS" if matched else f"FAIL\nExpected: {gt_label}\nGot: {pred}",283+response_time=res.get("response_time"),284+success=True,285+ground_truth_match=matched,286+ )287+ )288+289+return evaluation_results290+291+292+if __name__ == "__main__":293+parser = create_parser(AiActivityTypeEvaluator)294+parser.add_argument(295+"--data-set",296+type=str,297+default="initial",298+dest="data_set",299+help="Dataset folder under eval_output/ai_activity_type/eval_datasets/ (default: initial)",300+ )301+302+parser.add_argument(303+"--populate-ground-truth-llm",304+action="store_true",305+dest="populate_ground_truth_llm",306+ )307+parser.add_argument(308+"--ground-truth-llm-endpoint",309+type=str,310+default="OPENAI:gpt-5_2",311+dest="ground_truth_llm_endpoint",312+ )313+314+parser.add_argument(315+"--compare-to-ground-truth",316+type=str,317+choices=["es", "llm"],318+default="es",319+dest="compare_to_ground_truth",320+ )321+322+args = parser.parse_args()323+AiActivityTypeEvaluator.DATA_SET = args.data_set324+325+AiActivityTypeEvaluator.POPULATE_GROUND_TRUTH_LLM = bool(args.populate_ground_truth_llm)326+AiActivityTypeEvaluator.GROUND_TRUTH_LLM_ENDPOINT = args.ground_truth_llm_endpoint327+328+AiActivityTypeEvaluator.COMPARE_TO_GROUND_TRUTH = args.compare_to_ground_truth329+330+asyncio.run(run_evaluation(AiActivityTypeEvaluator, args))</selection>” selected.
Please summarize the selection using precise and concise language. Use headers and bulleted lists in the summary, to make it scannable. Maintain the meaning and factual accuracy.
Expand
Listen
Show more options
Gemini said
Gemini said
This Python script defines the
AiActivityTypeEvaluator
class, a specialized tool for evaluating AI-driven activity type classification. It extends a base evaluation framework to handle data collection, ground-truth comparison, and performance scoring.
Core Functionality
Core Functionality
Data Collection (
collect_test_set
):
* Supports fetching raw samples from
Elasticsearch (ES)
or loading them from
local JSON files
.
Implements a
replayability check
: It probes the live API endpoint to ensure the activity exists before including it in the dataset.
Filters data based on request body size, language, and the presence of an original response.
Ground Truth Management:
Saves inputs and "gold standard" responses to a local directory structure (
eval_output/ai_activity_type/...
).
Features an optional mode to populate ground truth using a high-order LLM (e.g.,
gpt-5_2
) if the
POPULATE_GROUND_TRUTH_LLM
flag is set.
Evaluation Logic (
evaluate_against_ground_truth
):
Compares the model's predicted
ai_activity_type
against either the original ES response or the LLM-generated ground truth.
Assigns a
quality_score
of
1.0
for a match and
0.0
for a mismatch or request failure.
Key Configuration Constants
Key Configuration Constants
Constant
Purpose
ENDPOINT_PATH
The specific API route being tested:
call/ai-activity-type
.
COMPARE_TO_GROUND_TRUTH
Determines if the baseline is the original data (
es
) or an
llm
.
SKIP_UNREPLAYABLE_ON_COLLECT
If
True
, discards records where the API returns "Cannot find activity."
REPLAYABILITY_CHECK_TIMEOUT_SECONDS
Limits the probe request to 20 seconds.
Constant
ENDPOINT_PATH
COMPARE_TO_GROUND_TRUTH
SKIP_UNREPLAYABLE_ON_COLLECT
REPLAYABILITY_CHECK_TIMEOUT_SECONDS
Purpose
The specific API route being tested:
call/ai-activity-type
.
Determines if the baseline is the original data (
es
) or an
llm
.
If
True
, discards records where the API returns "Cannot find activity."
Limits the probe request to 20 seconds.
Copy table
Execution Workflow
Execution Workflow
Parsing:
The script uses
create_parser
to handle CLI arguments like
--data-set
,
--populate-ground-truth-llm
, and
--compare-to-ground-truth
.
Initialization:
Sets up directory paths for inputs and ground truths based on the selected dataset name.
Run:
Executes the evaluation via
asyncio.run(run_evaluation(...))
, which orchestrates the collection, endpoint calling, and result scoring phases.
Copy prompt
Edit
You said are there soem issues with this PR
You said
are there soem issues with this PR
Listen
Show more options
Show thinking
Gemini said
Gemini said
Yes, there are several issues and code smells in this Pull Request, ranging from performance bottlenecks to architectural anti-patterns.
Here is a breakdown of the most notable issues, categorized by impact:
Performance & Async Anti-Patterns
Performance & Async Anti-Patterns
Sequential Requests Defeating Async (Lines 86-106):
Inside
_collect_from_es
, the code loops through
raw_samples
and
await
s a
session.post
request for each sample one by one. This means the script waits for one HTTP request to finish before starting the next, completely defeating the purpose of using
aiohttp
for asynchronous concurrency.
Fix:
Collect these coroutines into a list and use
asyncio.gather()
(ideally wrapped in an
asyncio.Semaphore
to avoid overwhelming the endpoint) to run the replayability probes concurrently.
Blocking File I/O in Async Functions (Lines 110-114, 184-188, 268-269):
Standard synchronous file operations (
with open(...)
,
json.dump
,
json.load
) are used inside
async def
functions. In Python, this blocks the entire event loop while the disk is reading/writing.
Fix:
Use an asynchronous file I/O library like
aiofiles
, or run the blocking I/O operations in a separate thread using
asyncio.to_thread()
.
Architecture & State Management
Architecture & State Management
Mutating Class-Level Variables (Lines 323-328):
The script parses CLI arguments and directly mutates the class attributes (e.g.,
AiActivityTypeEvaluator.DATA_SET = args.data_set
). This is a major anti-pattern. If you ever try to run tests in parallel or create multiple instances of this evaluator in a single process, the state will bleed across them.
Fix:
Pass these arguments into the
__init__
constructor of the instance (
self.data_set = args.data_set
) rather than modifying the class definition globally.
Code Quality & Edge Cases
Code Quality & Edge Cases
Broad Exception Catching (Line 107):
Using a bare
except Exception:
during the replayability probe is dangerous. It will catch
aiohttp.ClientError
(which is intended), but it will also silently swallow
TimeoutError
or even memory errors, making debugging incredibly difficult if the endpoint goes down.
Fix:
Scope the exception to specific expected errors, like
except (aiohttp.ClientError, asyncio.TimeoutError):
.
Inefficient Size Calculation (Line 95):
len(json.dumps(request_body)) < min_size
requires serializing the entire dictionary to a string
just
to check its length. Since
min_size
defaults to...
|
Firefox
|
Jiminny — Work
|
app.staging.jiminny.com/ai-reports
|
8607
|
|
FirefoxFileEoitViewHistoryBookmarksProfilesToolsWi FirefoxFileEoitViewHistoryBookmarksProfilesToolsWindow Helpdoo.sidemne.minny.com/dl-teoorls~ Google Gemini.. XC< 40Wl © | Daily -Platform • 10mleft A 100%C &• Tue 14 Apr 9:55:39Platform Sprint 1 Q2 - Platform Tea@ Jy 19798 evaluation for ai activity t8 Jiminny+ New labsocc ce Cdcseciie3. Run: Executes the evaluation viaasyncio.run(run_evaluation(...)),which orchestrates the collection,endpoint calling, and result scoringoeses.Al Reports• Report nameNAME -Ask Jiminny Test Report - 8 Apr 2026S03 Ask Jiminny reportsPeriodiReport Type10• Clear all |FREQUENCY+DailySHAREDICCanDATEY09/04/9016ACTIONSare there soem issueswith this PRShow thinking vYes, there are several issues and code smellsin this Pull Request, ranging from performancebottlenecks to architectural anti-patterns.Here is a breakdown of the most notableissues, categorized by impact:Performance & Async Anti-Patterns• Sequential Requests Defeating Async(Lines 86-106): Inside_collect_from_es, the code loopsthrough raw_samples and await sasession.post request for each sampleone by one. This means the script waitsfor one HTTP request to finish beforestarting the next, completely defeatingthe purpose of using aiohttp forasynchronous concurrency.• Fix: Collect these coroutines into alist and use asyncio.gather ()(ideally wrapped in anasyncio.Semaphore to avoidoverwhelming the endpoint) to run• Enter a prompt for GeminiProvYour Jiminny chats aren't used to improve our models. Gemini is Alane can make mistakes, Inciualne aoout peodleYour privacy & GeminiSummarize pageGapp.staging.jiminny.com/a...
|
NULL
|
NULL
|
NULL
|
8606
|
|
DiaFilelEditViewTabsBookmarksHistoryExtensionsWind DiaFilelEditViewTabsBookmarksHistoryExtensionsWindowHelpmeet.google.com / Daily - PlatformNikolay Yankov (Presenting)Inbox (2)PlactormSRD-677C Jy 19798(7 Service-oir Font. Amrhttps://jiminny.atlassian.net/jira/software/c/projects/JY/boards/377selectedissue=JY-18909• Siminm0 ProjectsB AWSE3 SSHEl DatadogPlatform TeamQ Search board200$ Jy-19240 / [ JY-18900READY FOR DEV 1oohesnetixwirtexceed Fontawesomepackage limits(MAINTENANCISReady for Dev8-20564 1 П ****=Priority% JY-20574 [AI) Ensure PDF formatting is good= Medium= Medium% JY-20586 (QA) Manual Testing= Medium% JY-20626 Integration testing= MediumLinked work itemsis cloned byA JY-20458 CRUD reports with Ask Jiminnyis tested byJY-20649 Generate Ask Jiminny Report and display it in AI ReportsJY-20656 Expired Ask Jiminny report template is automatically disabled an.JY-20660 Non-creator cannot delete generated Ask Jiminny report from A…JY-20651 Generated Ask Jiminny Report PDF displays Jiminny logo when n...JY-20655 Generated Ask Jiminny Report emall is sent to recipients0 JY-20654 Generated Ask Jiminny Reoort is visible onty to creator and shar...JY-20650 Generated Ask Jiminny Report PDF includes branding, report se..CostI ClActionsAssignee3 Steliyan...i2 Stellyan .-.Anellya A...& Unassign...DEPLOYEDDONEDONE MDONEDONE MDONEDONEWUnEStatusINDEYVINDEV vREADY FOR DEVREADY FOR DEVS9:55 AM | Daily - PlatformDaily - Platform - 10 m left8•CE: DepeDependol• Far DepeD Tasks2 Insights & CoachinD DevD ux.©2|In Dev vXz Improve Story~ Details32 Stellyan GeorgievAsentoeReporter2 Galya DimitrovaUeyele encne@ Open with VS Code|3 branches32 commits1 pull request1 build failingRelessesProduction2 See all deploymentsComponentsPlatformSub-ProductAoorcons5 days agcD Al BookmarksGroup: QueriesDEPLOY 8leature fiag releaseDeployed0 -20585 15 * ****=AI Review - Q1CTNMEUANANORCORDeployed_19982 1 0 •***=No such column "Type' onentty 'Event"PLATTORM STABTUTTYDeployed9 -20384 05 ^ ****=|Missed deletion of staleaesese(Pipedrive] DI > status notlabeled, date data missingREDUCE CHURNDeployed0 -20614 05 П ****=:Stefka StoyanovaSteliyan GeorgievValik10:158 • Tue 14 Apr 9:55:39ChatNikolay YankovNikolay Ivanov...
|
NULL
|
NULL
|
NULL
|
8605
|
|
JY-20543 add AJ reports User pilot tracking by Lak JY-20543 add AJ reports User pilot tracking by LakyLak · Pull Request #11932 · jiminny/app
Platform Sprint 1 Q2 - Platform Team - Scrum Board - Jira
Platform Sprint 1 Q2 - Platform Team - Scrum Board - Jira
Jy 19798 evaluation for ai activity types by nikolaybiaivanov · Pull Request #468 · jiminny/prophet
Jy 19798 evaluation for ai activity types by nikolaybiaivanov · Pull Request #468 · jiminny/prophet
Jiminny
Jiminny
Close tab
New Tab
Customize sidebar
Close Google Gemini (⌃X)
Tabs from other devices
Open history (⇧⌘H)
Open bookmarks (⌘B)
AI Chat settings
Close
WORK, Google Account: [EMAIL]
Main menu
New Chat
Open menu for conversation actions.
Conversation with Gemini
Conversation with Gemini
Copy prompt
You said I’m on page “<tabTitle>Jy 19798 evaluation for ai activity types by nikol</tabTitle>” with “<selection>@@ -0,0 +1,330 @@1+#!/usr/bin/env python32+import asyncio3+import json4+import os5+from pathlib import Path6+from typing import Any, Dict, List7+8+import aiohttp9+10+from src.component.call_processing.schemes.ai_activity_type_request import AiActivityTypeRequest11+from src.component.call_processing.schemes.ai_activity_type_response import AiActivityTypeResponse12+from src.eval.abstract_evaluator import AbstractEvaluator13+from src.eval.collectors import collect_test_set as es_collect_test_set14+from src.eval.common_evaluator_runner import create_parser, run_evaluation15+from src.eval.endpoints import run_endpoint as run_endpoint_http16+from src.eval.models import EvaluationResult17+18+19+class AiActivityTypeEvaluator(AbstractEvaluator):20+ENDPOINT_PATH = "call/ai-activity-type"21+REQUEST_MODEL = AiActivityTypeRequest22+RESPONSE_MODEL = AiActivityTypeResponse23+OUTPUT_SUBFOLDER = "ai_activity_type"24+USES_LLM_EVALUATION = False25+26+READS_FROM_DATASET_FOLDER = True27+28+DATA_SET: str = "initial"29+30+MINIMUM_REQUEST_BODY_SIZE: int = 031+32+SKIP_UNREPLAYABLE_ON_COLLECT: bool = True33+REPLAYABILITY_CHECK_TIMEOUT_SECONDS: float = 20.034+35+POPULATE_GROUND_TRUTH_LLM: bool = False36+GROUND_TRUTH_LLM_ENDPOINT: str = "OPENAI:gpt-5_2"37+38+COMPARE_TO_GROUND_TRUTH: str = "es"39+40+def __init__(self, **kwargs: Any) -> None:41+super().__init__(**kwargs)42+_repo_root = Path(__file__).parents[2]43+_datasets_root = _repo_root / "eval_output" / self.OUTPUT_SUBFOLDER / "eval_datasets"44+_dataset_dir = _datasets_root / self.__class__.DATA_SET45+self._dataset_dir = _dataset_dir46+self._inputs_dir = _dataset_dir / "input"47+self._ground_truths_dir = _dataset_dir / "ground_truth"48+49+async def collect_test_set(self) -> List[Dict[str, Any]]:50+if self.es_host_name:51+return await self._collect_from_es()52+return await self._collect_from_files()53+54+async def _collect_from_es(self) -> List[Dict[str, Any]]:55+if self._inputs_dir.exists() or self._ground_truths_dir.exists():56+raise FileExistsError(57+f"Dataset folder already exists: {self._inputs_dir.parent}. "58+"Choose a different --data-set name or remove the folder manually."59+ )60+61+raw_samples = await es_collect_test_set(62+es_host_name=self.es_host_name,63+es_port=self.es_port,64+endpoint_path=self.ENDPOINT_PATH,65+limit=self.limit,66+language_filter=self.language_filter,67+min_prompt_length=self.min_prompt_length,68+is_json_response=self.IS_JSON_RESPONSE,69+exclude_languages=self.exclude_languages,70+ )71+72+self._inputs_dir.mkdir(parents=True, exist_ok=True)73+self._ground_truths_dir.mkdir(parents=True, exist_ok=True)74+75+min_size = self.MINIMUM_REQUEST_BODY_SIZE76+samples: List[Dict[str, Any]] = []77+skipped_no_response = 078+skipped_too_small = 079+skipped_unreplayable = 080+skipped_probe_errors = 081+82+endpoint_url = f"{self.api_host}/{self.ENDPOINT_PATH}"83+84+timeout = aiohttp.ClientTimeout(total=self.REPLAYABILITY_CHECK_TIMEOUT_SECONDS)85+async with aiohttp.ClientSession(timeout=timeout) as session:86+for sample in raw_samples:87+es_id = sample["id"]88+request_body = sample["request_body"]89+original_response = sample.get("original_response")90+91+if not original_response:92+skipped_no_response += 193+continue94+95+if min_size and len(json.dumps(request_body)) < min_size:96+skipped_too_small += 197+continue98+99+if self.SKIP_UNREPLAYABLE_ON_COLLECT:100+try:101+async with session.post(endpoint_url, json=request_body) as resp:102+if resp.status != 200:103+body_text = await resp.text()104+if "Cannot find activity" in body_text:105+skipped_unreplayable += 1106+continue107+except Exception:108+skipped_probe_errors += 1109+110+with open(self._inputs_dir / f"{es_id}.json", "w") as f:111+json.dump(request_body, f, indent=2)112+113+with open(self._ground_truths_dir / f"gt_{es_id}.json", "w") as f:114+json.dump(original_response, f, indent=2)115+116+samples.append(117+ {118+"id": es_id,119+"request_body": request_body,120+"original_response": original_response,121+"ground_truth": original_response,122+ }123+ )124+125+print(f"Wrote {len(samples)} samples to {self._inputs_dir.parent}")126+if skipped_no_response:127+print(f"Skipped {skipped_no_response} records with no response body")128+if skipped_too_small:129+print(130+f"Skipped {skipped_too_small} records whose request body was smaller "131+f"than {min_size} characters"132+ )133+if skipped_unreplayable:134+print(f"Skipped {skipped_unreplayable} records that are not replayable (missing activity in ES)")135+if skipped_probe_errors:136+print(f"Warning: replayability probe errored for {skipped_probe_errors} records (kept those records)")137+138+if self.__class__.POPULATE_GROUND_TRUTH_LLM:139+label_results = await run_endpoint_http(140+test_set=samples,141+endpoint_url=endpoint_url,142+request_model=self.REQUEST_MODEL,143+force_llm_endpoint=self.__class__.GROUND_TRUTH_LLM_ENDPOINT,144+is_streaming_response=self.IS_STREAMING_RESPONSE,145+timeout_seconds=300.0,146+parallel_requests=self.parallel_requests,147+return_prompt_in_run=self.RETURN_PROMPT_IN_RUN,148+ )149+150+for i, r in enumerate(label_results):151+forced = self.__class__.GROUND_TRUTH_LLM_ENDPOINT152+model = forced.split(":", 1)[1] if ":" in forced else forced153+samples[i]["ground_truth_llm_model"] = model154+155+if not r.get("success", True):156+samples[i]["ground_truth_llm"] = None157+samples[i]["ground_truth_llm_reasoning"] = None158+samples[i]["ground_truth_llm_error_type"] = r.get("error_type")159+samples[i]["ground_truth_llm_error_message"] = r.get("error_message")160+continue161+162+resp = r.get("new_response") or {}163+samples[i]["ground_truth_llm"] = resp.get("ai_activity_type")164+samples[i]["ground_truth_llm_reasoning"] = resp.get("ai_activity_type_reasoning")165+166+return samples167+168+async def _collect_from_files(self) -> List[Dict[str, Any]]:169+if not self._inputs_dir.exists():170+raise FileNotFoundError(171+f"Dataset folder not found: {self._inputs_dir}. "172+"Run --steps collect first or check your --data-set name."173+ )174+175+samples: List[Dict[str, Any]] = []176+for filename in sorted(os.listdir(self._inputs_dir)):177+input_path = self._inputs_dir / filename178+gt_path = self._ground_truths_dir / f"gt_{filename}"179+180+if not gt_path.exists():181+print(f"Warning: ground truth not found for {filename}, skipping")182+continue183+184+with open(input_path, "r") as f:185+request_body: Dict[str, Any] = json.load(f)186+187+with open(gt_path, "r") as f:188+ground_truth: Dict[str, Any] = json.load(f)189+190+samples.append(191+ {192+"id": Path(filename).stem,193+"request_body": request_body,194+"original_response": None,195+"ground_truth": ground_truth,196+ }197+ )198+199+print(f"Loaded {len(samples)} samples from {self._inputs_dir.parent}")200+return samples201+202+async def evaluate_against_ground_truth(self, results: List[Dict[str, Any]]) -> List[EvaluationResult]:203+evaluation_results: List[EvaluationResult] = []204+compare_to = self.__class__.COMPARE_TO_GROUND_TRUTH205+206+for res in results:207+req_id = res["request_id"]208+is_success = res.get("success", True)209+210+if not is_success:211+evaluation_results.append(212+EvaluationResult(213+request_id=req_id,214+request_body=res["request_body"],215+new_response=res.get("new_response"),216+valid_schema=False,217+quality_score=0.0,218+quality_feedback=f"Request failed: {res.get('error_type')} - {res.get('error_message')}",219+response_time=res.get("response_time"),220+success=False,221+error_type=res.get("error_type"),222+error_message=res.get("error_message"),223+ground_truth_match=False,224+ )225+ )226+continue227+228+pred = (res.get("new_response") or {}).get("ai_activity_type")229+230+if compare_to == "llm":231+if "ground_truth_llm" not in res:232+evaluation_results.append(233+EvaluationResult(234+request_id=req_id,235+request_body=res["request_body"],236+new_response=res.get("new_response"),237+valid_schema=res.get("valid_schema", True),238+quality_score=0.0,239+quality_feedback=(240+"Missing ground_truth_llm in run results. Ensure collect step populated it and "241+"endpoints.py passes it through."242+ ),243+response_time=res.get("response_time"),244+success=True,245+ground_truth_match=False,246+ )247+ )248+continue249+gt_label = res.get("ground_truth_llm")250+else:251+gt_path = self._ground_truths_dir / f"gt_{req_id}.json"252+if not gt_path.exists():253+evaluation_results.append(254+EvaluationResult(255+request_id=req_id,256+request_body=res["request_body"],257+new_response=res.get("new_response"),258+valid_schema=res.get("valid_schema", True),259+quality_score=0.0,260+quality_feedback=f"Ground truth file not found: {gt_path}",261+response_time=res.get("response_time"),262+success=True,263+ground_truth_match=False,264+ )265+ )266+continue267+268+with open(gt_path, "r") as f:269+ground_truth_dict: Dict[str, Any] = json.load(f)270+271+gt_label = (ground_truth_dict or {}).get("ai_activity_type")272+273+matched = pred == gt_label274+275+evaluation_results.append(276+EvaluationResult(277+request_id=req_id,278+request_body=res["request_body"],279+new_response=res.get("new_response"),280+valid_schema=res.get("valid_schema", True),281+quality_score=1.0 if matched else 0.0,282+quality_feedback="PASS" if matched else f"FAIL\nExpected: {gt_label}\nGot: {pred}",283+response_time=res.get("response_time"),284+success=True,285+ground_truth_match=matched,286+ )287+ )288+289+return evaluation_results290+291+292+if __name__ == "__main__":293+parser = create_parser(AiActivityTypeEvaluator)294+parser.add_argument(295+"--data-set",296+type=str,297+default="initial",298+dest="data_set",299+help="Dataset folder under eval_output/ai_activity_type/eval_datasets/ (default: initial)",300+ )301+302+parser.add_argument(303+"--populate-ground-truth-llm",304+action="store_true",305+dest="populate_ground_truth_llm",306+ )307+parser.add_argument(308+"--ground-truth-llm-endpoint",309+type=str,310+default="OPENAI:gpt-5_2",311+dest="ground_truth_llm_endpoint",312+ )313+314+parser.add_argument(315+"--compare-to-ground-truth",316+type=str,317+choices=["es", "llm"],318+default="es",319+dest="compare_to_ground_truth",320+ )321+322+args = parser.parse_args()323+AiActivityTypeEvaluator.DATA_SET = args.data_set324+325+AiActivityTypeEvaluator.POPULATE_GROUND_TRUTH_LLM = bool(args.populate_ground_truth_llm)326+AiActivityTypeEvaluator.GROUND_TRUTH_LLM_ENDPOINT = args.ground_truth_llm_endpoint327+328+AiActivityTypeEvaluator.COMPARE_TO_GROUND_TRUTH = args.compare_to_ground_truth329+330+asyncio.run(run_evaluation(AiActivityTypeEvaluator, args))</selection>” selected. Please summarize the selection using precise and concise language. Use headers and bulleted lists in the summary, to make it scannable. Maintain the meaning and factual accuracy.
You said
I’m on page “<tabTitle>Jy 19798 evaluation for ai activity types by nikol</tabTitle>” with “<selection>@@ -0,0 +1,330 @@1+#!/usr/bin/env python32+import asyncio3+import json4+import os5+from pathlib import Path6+from typing import Any, Dict, List7+8+import aiohttp9+10+from src.component.call_processing.schemes.ai_activity_type_request import AiActivityTypeRequest11+from src.component.call_processing.schemes.ai_activity_type_response import AiActivityTypeResponse12+from src.eval.abstract_evaluator import AbstractEvaluator13+from src.eval.collectors import collect_test_set as es_collect_test_set14+from src.eval.common_evaluator_runner import create_parser, run_evaluation15+from src.eval.endpoints import run_endpoint as run_endpoint_http16+from src.eval.models import EvaluationResult17+18+19+class AiActivityTypeEvaluator(AbstractEvaluator):20+ENDPOINT_PATH = "call/ai-activity-type"21+REQUEST_MODEL = AiActivityTypeRequest22+RESPONSE_MODEL = AiActivityTypeResponse23+OUTPUT_SUBFOLDER = "ai_activity_type"24+USES_LLM_EVALUATION = False25+26+READS_FROM_DATASET_FOLDER = True27+28+DATA_SET: str = "initial"29+30+MINIMUM_REQUEST_BODY_SIZE: int = 031+32+SKIP_UNREPLAYABLE_ON_COLLECT: bool = True33+REPLAYABILITY_CHECK_TIMEOUT_SECONDS: float = 20.034+35+POPULATE_GROUND_TRUTH_LLM: bool = False36+GROUND_TRUTH_LLM_ENDPOINT: str = "OPENAI:gpt-5_2"37+38+COMPARE_TO_GROUND_TRUTH: str = "es"39+40+def __init__(self, **kwargs: Any) -> None:41+super().__init__(**kwargs)42+_repo_root = Path(__file__).parents[2]43+_datasets_root = _repo_root / "eval_output" / self.OUTPUT_SUBFOLDER / "eval_datasets"44+_dataset_dir = _datasets_root / self.__class__.DATA_SET45+self._dataset_dir = _dataset_dir46+self._inputs_dir = _dataset_dir / "input"47+self._ground_truths_dir = _dataset_dir / "ground_truth"48+49+async def collect_test_set(self) -> List[Dict[str, Any]]:50+if self.es_host_name:51+return await self._collect_from_es()52+return await self._collect_from_files()53+54+async def _collect_from_es(self) -> List[Dict[str, Any]]:55+if self._inputs_dir.exists() or self._ground_truths_dir.exists():56+raise FileExistsError(57+f"Dataset folder already exists: {self._inputs_dir.parent}. "58+"Choose a different --data-set name or remove the folder manually."59+ )60+61+raw_samples = await es_collect_test_set(62+es_host_name=self.es_host_name,63+es_port=self.es_port,64+endpoint_path=self.ENDPOINT_PATH,65+limit=self.limit,66+language_filter=self.language_filter,67+min_prompt_length=self.min_prompt_length,68+is_json_response=self.IS_JSON_RESPONSE,69+exclude_languages=self.exclude_languages,70+ )71+72+self._inputs_dir.mkdir(parents=True, exist_ok=True)73+self._ground_truths_dir.mkdir(parents=True, exist_ok=True)74+75+min_size = self.MINIMUM_REQUEST_BODY_SIZE76+samples: List[Dict[str, Any]] = []77+skipped_no_response = 078+skipped_too_small = 079+skipped_unreplayable = 080+skipped_probe_errors = 081+82+endpoint_url = f"{self.api_host}/{self.ENDPOINT_PATH}"83+84+timeout = aiohttp.ClientTimeout(total=self.REPLAYABILITY_CHECK_TIMEOUT_SECONDS)85+async with aiohttp.ClientSession(timeout=timeout) as session:86+for sample in raw_samples:87+es_id = sample["id"]88+request_body = sample["request_body"]89+original_response = sample.get("original_response")90+91+if not original_response:92+skipped_no_response += 193+continue94+95+if min_size and len(json.dumps(request_body)) < min_size:96+skipped_too_small += 197+continue98+99+if self.SKIP_UNREPLAYABLE_ON_COLLECT:100+try:101+async with session.post(endpoint_url, json=request_body) as resp:102+if resp.status != 200:103+body_text = await resp.text()104+if "Cannot find activity" in body_text:105+skipped_unreplayable += 1106+continue107+except Exception:108+skipped_probe_errors += 1109+110+with open(self._inputs_dir / f"{es_id}.json", "w") as f:111+json.dump(request_body, f, indent=2)112+113+with open(self._ground_truths_dir / f"gt_{es_id}.json", "w") as f:114+json.dump(original_response, f, indent=2)115+116+samples.append(117+ {118+"id": es_id,119+"request_body": request_body,120+"original_response": original_response,121+"ground_truth": original_response,122+ }123+ )124+125+print(f"Wrote {len(samples)} samples to {self._inputs_dir.parent}")126+if skipped_no_response:127+print(f"Skipped {skipped_no_response} records with no response body")128+if skipped_too_small:129+print(130+f"Skipped {skipped_too_small} records whose request body was smaller "131+f"than {min_size} characters"132+ )133+if skipped_unreplayable:134+print(f"Skipped {skipped_unreplayable} records that are not replayable (missing activity in ES)")135+if skipped_probe_errors:136+print(f"Warning: replayability probe errored for {skipped_probe_errors} records (kept those records)")137+138+if self.__class__.POPULATE_GROUND_TRUTH_LLM:139+label_results = await run_endpoint_http(140+test_set=samples,141+endpoint_url=endpoint_url,142+request_model=self.REQUEST_MODEL,143+force_llm_endpoint=self.__class__.GROUND_TRUTH_LLM_ENDPOINT,144+is_streaming_response=self.IS_STREAMING_RESPONSE,145+timeout_seconds=300.0,146+parallel_requests=self.parallel_requests,147+return_prompt_in_run=self.RETURN_PROMPT_IN_RUN,148+ )149+150+for i, r in enumerate(label_results):151+forced = self.__class__.GROUND_TRUTH_LLM_ENDPOINT152+model = forced.split(":", 1)[1] if ":" in forced else forced153+samples[i]["ground_truth_llm_model"] = model154+155+if not r.get("success", True):156+samples[i]["ground_truth_llm"] = None157+samples[i]["ground_truth_llm_reasoning"] = None158+samples[i]["ground_truth_llm_error_type"] = r.get("error_type")159+samples[i]["ground_truth_llm_error_message"] = r.get("error_message")160+continue161+162+resp = r.get("new_response") or {}163+samples[i]["ground_truth_llm"] = resp.get("ai_activity_type")164+samples[i]["ground_truth_llm_reasoning"] = resp.get("ai_activity_type_reasoning")165+166+return samples167+168+async def _collect_from_files(self) -> List[Dict[str, Any]]:169+if not self._inputs_dir.exists():170+raise FileNotFoundError(171+f"Dataset folder not found: {self._inputs_dir}. "172+"Run --steps collect first or check your --data-set name."173+ )174+175+samples: List[Dict[str, Any]] = []176+for filename in sorted(os.listdir(self._inputs_dir)):177+input_path = self._inputs_dir / filename178+gt_path = self._ground_truths_dir / f"gt_{filename}"179+180+if not gt_path.exists():181+print(f"Warning: ground truth not found for {filename}, skipping")182+continue183+184+with open(input_path, "r") as f:185+request_body: Dict[str, Any] = json.load(f)186+187+with open(gt_path, "r") as f:188+ground_truth: Dict[str, Any] = json.load(f)189+190+samples.append(191+ {192+"id": Path(filename).stem,193+"request_body": request_body,194+"original_response": None,195+"ground_truth": ground_truth,196+ }197+ )198+199+print(f"Loaded {len(samples)} samples from {self._inputs_dir.parent}")200+return samples201+202+async def evaluate_against_ground_truth(self, results: List[Dict[str, Any]]) -> List[EvaluationResult]:203+evaluation_results: List[EvaluationResult] = []204+compare_to = self.__class__.COMPARE_TO_GROUND_TRUTH205+206+for res in results:207+req_id = res["request_id"]208+is_success = res.get("success", True)209+210+if not is_success:211+evaluation_results.append(212+EvaluationResult(213+request_id=req_id,214+request_body=res["request_body"],215+new_response=res.get("new_response"),216+valid_schema=False,217+quality_score=0.0,218+quality_feedback=f"Request failed: {res.get('error_type')} - {res.get('error_message')}",219+response_time=res.get("response_time"),220+success=False,221+error_type=res.get("error_type"),222+error_message=res.get("error_message"),223+ground_truth_match=False,224+ )225+ )226+continue227+228+pred = (res.get("new_response") or {}).get("ai_activity_type")229+230+if compare_to == "llm":231+if "ground_truth_llm" not in res:232+evaluation_results.append(233+EvaluationResult(234+request_id=req_id,235+request_body=res["request_body"],236+new_response=res.get("new_response"),237+valid_schema=res.get("valid_schema", True),238+quality_score=0.0,239+quality_feedback=(240+"Missing ground_truth_llm in run results. Ensure collect step populated it and "241+"endpoints.py passes it through."242+ ),243+response_time=res.get("response_time"),244+success=True,245+ground_truth_match=False,246+ )247+ )248+continue249+gt_label = res.get("ground_truth_llm")250+else:251+gt_path = self._ground_truths_dir / f"gt_{req_id}.json"252+if not gt_path.exists():253+evaluation_results.append(254+EvaluationResult(255+request_id=req_id,256+request_body=res["request_body"],257+new_response=res.get("new_response"),258+valid_schema=res.get("valid_schema", True),259+quality_score=0.0,260+quality_feedback=f"Ground truth file not found: {gt_path}",261+response_time=res.get("response_time"),262+success=True,263+ground_truth_match=False,264+ )265+ )266+continue267+268+with open(gt_path, "r") as f:269+ground_truth_dict: Dict[str, Any] = json.load(f)270+271+gt_label = (ground_truth_dict or {}).get("ai_activity_type")272+273+matched = pred == gt_label274+275+evaluation_results.append(276+EvaluationResult(277+request_id=req_id,278+request_body=res["request_body"],279+new_response=res.get("new_response"),280+valid_schema=res.get("valid_schema", True),281+quality_score=1.0 if matched else 0.0,282+quality_feedback="PASS" if matched else f"FAIL\nExpected: {gt_label}\nGot: {pred}",283+response_time=res.get("response_time"),284+success=True,285+ground_truth_match=matched,286+ )287+ )288+289+return evaluation_results290+291+292+if __name__ == "__main__":293+parser = create_parser(AiActivityTypeEvaluator)294+parser.add_argument(295+"--data-set",296+type=str,297+default="initial",298+dest="data_set",299+help="Dataset folder under eval_output/ai_activity_type/eval_datasets/ (default: initial)",300+ )301+302+parser.add_argument(303+"--populate-ground-truth-llm",304+action="store_true",305+dest="populate_ground_truth_llm",306+ )307+parser.add_argument(308+"--ground-truth-llm-endpoint",309+type=str,310+default="OPENAI:gpt-5_2",311+dest="ground_truth_llm_endpoint",312+ )313+314+parser.add_argument(315+"--compare-to-ground-truth",316+type=str,317+choices=["es", "llm"],318+default="es",319+dest="compare_to_ground_truth",320+ )321+322+args = parser.parse_args()323+AiActivityTypeEvaluator.DATA_SET = args.data_set324+325+AiActivityTypeEvaluator.POPULATE_GROUND_TRUTH_LLM = bool(args.populate_ground_truth_llm)326+AiActivityTypeEvaluator.GROUND_TRUTH_LLM_ENDPOINT = args.ground_truth_llm_endpoint327+328+AiActivityTypeEvaluator.COMPARE_TO_GROUND_TRUTH = args.compare_to_ground_truth329+330+asyncio.run(run_evaluation(AiActivityTypeEvaluator, args))</selection>” selected.
Please summarize the selection using precise and concise language. Use headers and bulleted lists in the summary, to make it scannable. Maintain the meaning and factual accuracy.
Expand
Listen
Show more options
Gemini said
Gemini said
This Python script defines the
AiActivityTypeEvaluator
class, a specialized tool for evaluating AI-driven activity type classification. It extends a base evaluation framework to handle data collection, ground-truth comparison, and performance scoring.
Core Functionality
Core Functionality
Data Collection (
collect_test_set
):
* Supports fetching raw samples from
Elasticsearch (ES)
or loading them from
local JSON files
.
Implements a
replayability check
: It probes the live API endpoint to ensure the activity exists before including it in the dataset.
Filters data based on request body size, language, and the presence of an original response.
Ground Truth Management:
Saves inputs and "gold standard" responses to a local directory structure (
eval_output/ai_activity_type/...
).
Features an optional mode to populate ground truth using a high-order LLM (e.g.,
gpt-5_2
) if the
POPULATE_GROUND_TRUTH_LLM
flag is set.
Evaluation Logic (
evaluate_against_ground_truth
):
Compares the model's predicted
ai_activity_type
against either the original ES response or the LLM-generated ground truth.
Assigns a
quality_score
of
1.0
for a match and
0.0
for a mismatch or request failure.
Key Configuration Constants
Key Configuration Constants
Constant
Purpose
ENDPOINT_PATH
The specific API route being tested:
call/ai-activity-type
.
COMPARE_TO_GROUND_TRUTH
Determines if the baseline is the original data (
es
) or an
llm
.
SKIP_UNREPLAYABLE_ON_COLLECT
If
True
, discards records where the API returns "Cannot find activity."
REPLAYABILITY_CHECK_TIMEOUT_SECONDS
Limits the probe request to 20 seconds.
Constant
ENDPOINT_PATH
COMPARE_TO_GROUND_TRUTH
SKIP_UNREPLAYABLE_ON_COLLECT
REPLAYABILITY_CHECK_TIMEOUT_SECONDS
Purpose
The specific API route being tested:
call/ai-activity-type
.
Determines if the baseline is the original data (
es
) or an
llm
.
If
True
, discards records where the API returns "Cannot find activity."
Limits the probe request to 20 seconds.
Copy table
Execution Workflow
Execution Workflow
Parsing:
The script uses
create_parser
to handle CLI arguments like
--data-set
,
--populate-ground-truth-llm
, and
--compare-to-ground-truth
.
Initialization:
Sets up directory paths for inputs and ground truths based on the selected dataset name.
Run:
Executes the evaluation via
asyncio.run(run_evaluation(...))
, which orchestrates the collection, endpoint calling, and result scoring phases.
Copy prompt
Edit
You said are there soem issues with this PR
You said
are there soem issues with this PR
Listen
Show more options
Show thinking
Gemini said
Gemini said
Yes, there are several issues and code smells in this Pull Request, ranging from performance bottlenecks to architectural anti-patterns.
Here is a breakdown of the most notable issues, categorized by impact:
Performance & Async Anti-Patterns
Performance & Async Anti-Patterns
Sequential Requests Defeating Async (Lines 86-106):
Inside
_collect_from_es
, the code loops through
raw_samples
and
await
s a
session.post
request for each sample one by one. This means the script waits for one HTTP request to finish before starting the next, completely defeating the purpose of using
aiohttp
for asynchronous concurrency.
Fix:
Collect these coroutines into a list and use
asyncio.gather()
(ideally wrapped in an
asyncio.Semaphore
to avoid overwhelming the endpoint) to run the replayability probes concurrently.
Blocking File I/O in Async Functions (Lines 110-114, 184-188, 268-269):
Standard synchronous file operations (
with open(...)
,
json.dump
,
json.load
) are used inside
async def
functions. In Python, this blocks the entire event loop while the disk is reading/writing.
Fix:
Use an asynchronous file I/O library like
aiofiles
, or run the blocking I/O operations in a separate thread using
asyncio.to_thread()
.
Architecture & State Management
Architecture & State Management
Mutating Class-Level Variables (Lines 323-328):
The script parses CLI arguments and directly mutates the class attributes (e.g.,
AiActivityTypeEvaluator.DATA_SET = args.data_set
). This is a major anti-pattern. If you ever try to run tests in parallel or create multiple instances of this evaluator in a single process, the state will bleed across them.
Fix:
Pass these arguments into the
__init__
constructor of the instance (
self.data_set = args.data_set
) rather than modifying the class definition globally.
Code Quality & Edge Cases
Code Quality & Edge Cases
Broad Exception Catching (Line 107):
Using a bare
except Exception:
during the replayability probe is dangerous. It will catch
aiohttp.ClientError
(which is intended), but it will also silently swallow
TimeoutError
or even memory errors, making debugging incredibly difficult if the endpoint goes down.
Fix:
Scope the exception to specific expected errors, like
except (aiohttp.ClientError, asyncio.TimeoutError):
.
Inefficient Size Calculation (Line 95):
len(json.dumps(request_body)) < min_size
requires serializing the entire dictionary to a string
just
to check its length. Since
min_size
defaults to
0
, the
if min_size
short-circuit saves it for now, but if anyone sets a minimum size, this will become an unnecessary CPU bottleneck on large datasets.
Potential Path Traversal Risk (Lines 110, 113, 251):
The script writes and reads files directly using
f"{es_id}.json"
and
f"gt_{req_id}.json"
. If...
|
Firefox
|
Jiminny — Work
|
app.staging.jiminny.com/ai-reports
|
8604
|
|
DiaFilelEditViewTabsBookmarksHistoryExtensionsWind DiaFilelEditViewTabsBookmarksHistoryExtensionsWindowHelpmeet.google.com / Daily - PlatformNikolay Yankov (Presenting)Inbox (2)PlactormSRD-677C Jy 19798(7 Service-oir Font. Amrhttps://jiminny.atlassian.net/jira/software/c/projects/JY/boards/377selectedissue=JY-18909Sminm0 ProjectsB AWSE3 SSHEl DatadogPlatform TeamQ Search board200$ Jy-19240 / [ JY-18900READY FOR DEV 1oohesnetixwirtexceed Fontawesomepackage limits(MAINTENANCISReady for Dev8-20564 1 П ****=Priority% JY-20574 [AI) Ensure PDF formatting is good= Medium= Medium% JY-20586 (QA) Manual Testing= Medium% JY-20626 Integration testing= MediumLinked work itemsis cloned byJY-20458 CRUD reports with Ask. Jiminnyis tested byJY-20649 Generate Ask Jiminny Report and display it in AI ReportsJY-20656 Expired Ask Jiminny report template is automatically disabled an.JY-20660 Non-creator cannot delete generated Ask Jiminny report from A…JY-20651 Generated Ask Jiminny Report PDF displays Jiminny logo when n...JY-20655 Generated Ask Jiminny Report emall is sent to recipients0 JY-20654 Generated Ask Jiminny Reoort is visible onty to creator and shar...JY-20650 Generated Ask Jiminny Report PDF includes branding, report se..CostI ClActionsAssignee3 Steliyan...i2 Stellyan .-.Anellya A...& Unassign...DEPLOYEDDONEDONE MDONEDONE MDONEDONEWUnEStatusINDEYVINDEV vREADY FOR DEVREADY FOR DEVS9:55 AM | Daily - PlatformDaily - Platform - 10 m left8•CE: DepeDependol• Far DepeD TasksInsights & CoachinD DevD ux.©2|In Dev vXz Improve Story~ Details32 Stellyan GeorgievAsuntoeReporter2 Galya DimitrovaUeyele encne@ Open with VS Code|3 branches32 commits1 pull request1 build failingRelessesProduction2 See all deploymentsComponentsPlatformSub-ProductAoorcons5 days agcD Al BookmarksGroup: QueriesDEPLOY 8leature fiag releaseDeployed12006515 * *00=AI Review - Q1CTNMEUANANORCORDeployed19982 1 @ •..•=No such column "Type' onentty 'Event"PLATTORM STABTUTTYDeployed9 -20384 05 ^ ****=|Missed deletion of staleaeeese(Pipedrive) DI › status notlabeled, date data missingREDUCE CHURNDeployed0 -20614 05 П ****=:Stefka StoyanovaSteliyan Georgievvalik10:128 • Tue 14 Apr 9:55:36ChatNikolay YankovNikolay Ivanov...
|
NULL
|
NULL
|
NULL
|
8603
|
|
FirefoxFileEoitViewHistoryBookmarksProfilesToolsWi FirefoxFileEoitViewHistoryBookmarksProfilesToolsWindow Helpdoo.sidemne.minny.com/dl-teoorls~ Google Gemini.. XC< 40Wl © | Daily -Platform • 10mleft A 100%C &• Tue 14 Apr 9:55:33Platform Sprint 1 Q2 - Platform Tea@ Jy 19798 evaluation for ai activity t8 Jiminny+ New labsocc ce Cdcseciie3.Run: Executes the evaluation viaasyncio.run(run_evaluation(...)),which orchestrates the collection,endpoint calling, and result scoringoeses.Al Reports• Report nameNAME -Ask Jiminny Test Report - 8 Apr 20263 Ask Jiminny reportsPeriodiReport Type10• Clear all |FREQUENCY +DailySHAREDIDATE09/04/9016ACTIONSare there soem issueswith this PRShow thinking vYes, there are several issues and code smellsin this Pull Request, ranging from performancebottlenecks to architectural anti-patterns.Here is a breakdown of the most notableissues, categorized by impact:Performance & Async Anti-Patterns• Sequential Requests Defeating Async(Lines 86-106): Inside_collect_from_es, the code loopsthrough raw_samples and await sasession.post request for each sampleone by one. This means the script waitsfor one HTTP request to finish beforestarting the next, completely defeatingthe purpose of using aiohttp forasynchronous concurrency.• Fix: Collect these coroutines into alist and use asyncio.gather ()(ideally wrapped in anasyncio. Semaphore to avoidoverwhelming the endpoint) to run• Enter a prompt for GeminiProvYour Jiminny chats aren't used to improve our models. Gemini is Alane can make mistakes, Inciualne aoout peodleYour privacy & GeminiSummarize pageJY-18909-automated-reports-ask-iiminny = 869453)...
|
NULL
|
NULL
|
NULL
|
8602
|
|
FirefoxFileEoitViewHistoryBookmarksProfilesToolsWi FirefoxFileEoitViewHistoryBookmarksProfilesToolsWindow Helpdoo.sidemne.minny.com/dl-teoorls~ Google Gemini.. X•< 40 Wl © | Daily -Platform • 10mleft A 100%C &• Tue 14 Apr 9:55:27Platform Sprint 1 Q2 - Platform Tea@ Jy 19798 evaluation for ai activity t8 Jiminny+ New labsocc ce Cdcseciie3. Run: Executes the evaluation viaasyncio.run(run_evaluation(...)),which orchestrates the collection,endpoint calling, and result scoringoeses.Al Reports• Report nameNAME -Ask Jiminny Test Report - 8 Apr 2026S03 Ask Jiminny reportsPeriodiReport Type© Clear all |10FREQUENCY+DailySHAREDICCanDATEY09/04/9016ACTIONSP Croare there soem issueswith this PRShow thinking vYes, there are several issues and code smellsin this Pull Request, ranging from performancebottlenecks to architectural anti-patterns.Here is a breakdown of the most notableissues, categorized by impact:Performance & Async Anti-Patterns• Sequential Requests Defeating Async(Lines 86-106): Inside_collect_from_es, the code loopsthrough raw_samples and await sasession.post request for each sampleone by one. This means the script waitsfor one HTTP request to finish beforestarting the next, completely defeatingthe purpose of using aiohttp forasynchronous concurrency.• Fix: Collect these coroutines into alist and use asyncio.gather ()(ideally wrapped in anasyncio. Semaphore to avoidoverwhelming the endpoint) to run• Enter a prompt for GeminiProvYour Jiminny chats aren't used to improve our models. Gemini is Alane can make mistakes, Inciualne about peodleYour privacy & GeminiSummarize pageGapp.staging.jiminny.com/a...
|
NULL
|
NULL
|
NULL
|
8601
|
|
FirefoxFileEoitViewHistoryBookmarksProfilesToolsWi FirefoxFileEoitViewHistoryBookmarksProfilesToolsWindow Helpdoo.sidemne.minny.com/dl-teoorls~ Google Gemini.. XC< 40Wl © | Daily - Platform • 10mleft A 100%C &• Tue 14 Apr 9:55:21Platform Sprint 1 Q2 - Platform Tea@ Jy 19798 evaluation for ai activity t8 Jiminny+ New labsocc ce Cdcseciie3. Run: Executes the evaluation viaasyncio.run(run_evaluation(...)),which orchestrates the collection,endpoint calling, and result scoringoeses.Al Reports• Report nameNAME -Ask Jiminny Test Report - 8 Apr 20263 Ask Jiminny reportsPeriodiReport Type10© Clear allFREQUENCYDailySHAREDLKamDATEY09/04/9016ACTIONS• Co dare there soem issueswith this PRShow thinking vYes, there are several issues and code smellsin this Pull Request, ranging from performancebottlenecks to architectural anti-patterns.Here is a breakdown of the most notableissues, categorized by impact:Performance & Async Anti-Patterns• Sequential Requests Defeating Async(Lines 86-106): Inside_collect_from_es, the code loopsthrough raw_samples and await sasession.post request for each sampleone by one. This means the script waitsfor one HTTP request to finish beforestarting the next, completely defeatingthe purpose of using aiohttp forasynchronous concurrency.• Fix: Collect these coroutines into alist and use asyncio.gather ()(ideally wrapped in anasyncio. Semaphore to avoidoverwhelming the endpoint) to run• Enter a prompt for GeminiPro vYour Jiminny chats aren't used to improve our models. Gemini is Alane can make mistakes, Inciualne aoout peodleYour privacy & GeminiSummarize pageJY-18909-automated-reports-ask-iiminny = 869453)...
|
NULL
|
NULL
|
NULL
|
8600
|
|
FirefoxFileEoitViewHistoryBookmarksProfilesToolsWi FirefoxFileEoitViewHistoryBookmarksProfilesToolsWindow Helpdoo.sidemne.minny.com/dl-teoorls~ Google Gemini.. X•< 40 ll © | Daily - Platform • 10m leftA 100%C &• Tue 14 Apr 9:55:18Platform Sprint 1 Q2 - Platform TeaJy 19798 evaluation for ai activity t•Jiminny+ New labAl Reports• Report nameNAME €S03 Ask Jiminny reportsPeriodiReport Type© Clear all |socc ce Cdcseciie3.Run: Executes the evaluation viaasyncio.run(run_evaluation(...)),which orchestrates the collection,endpoint calling, and result scoringolcses.FREQUENCY #DATE €ACTIONSI10are there soem issueswith this PRShow thinking vYes, there are several issues and code smellsin this Pull Request, ranging from performancebottlenecks to architectural anti-patterns.Here is a breakdown of the most notableissues, categorized by impact:Performance & Async Anti-Patterns• Sequential Requests Defeating Async(Lines 86-106): Inside_collect_from_es, the code loopsthrough raw_samples and await s asession.post request for each sampleone by one. This means the script waitsfor one HTTP request to finish beforestarting the next, completely defeatingthe purpose of using aiohttp forasynchronous concurrency.• Fix: Collect these coroutines into alist and use asyncio.gather ()(ideally wrapped in anasyncio. Semaphore to avoidoverwhelming the endpoint) to run• Enter a prompt for GeminiProvYour Jiminny chats aren't used to improve our models. Gemini is Alane can make mistakes, Inciualne aoout peodleYour privacy & GeminiSummarize pageapp.staging.fiminnv.com/ai-rgports k-iiminny - 869453...
|
NULL
|
NULL
|
NULL
|
8599
|