diff --git a/tests/unit/vertexai/test_evaluation.py b/tests/unit/vertexai/test_evaluation.py index 48fc5683e3..e2c8c0bb7f 100644 --- a/tests/unit/vertexai/test_evaluation.py +++ b/tests/unit/vertexai/test_evaluation.py @@ -2925,6 +2925,9 @@ def test_rubric_generation_parsing_additional_fields(self): critique_metric=metric_prompt_template_examples.MetricPromptTemplateExamples.Pointwise.COHERENCE, ) dataset_with_rubrics = rbm.generate_rubrics(_TEST_EVAL_DATASET_PROMPT_RESPONSE) - expected = _EXPECTED_EVAL_DATASET_PROMPT_RESPONSE_WITH_RUBRICS + expected = _EXPECTED_EVAL_DATASET_PROMPT_RESPONSE_WITH_RUBRICS.copy() expected["desc"] = ["test_desc", "test_desc", "test_desc"] + expected["desc"] = expected["desc"].astype(object) + + assert dataset_with_rubrics.equals(expected) assert dataset_with_rubrics.equals(expected) diff --git a/vertexai/evaluation/utils.py b/vertexai/evaluation/utils.py index 05148d1623..564ecb9b1d 100644 --- a/vertexai/evaluation/utils.py +++ b/vertexai/evaluation/utils.py @@ -209,7 +209,7 @@ def _load_jsonl(filepath: str) -> "pd.DataFrame": ) if filepath.startswith(_GCS_PREFIX): file_contents = _read_gcs_file_contents(filepath) - return pd.read_json(file_contents, lines=True) + return pd.read_json(io.StringIO(file_contents), lines=True) else: with open(filepath, "r") as f: return pd.read_json(f, lines=True)