diff --git a/generated_proto/sample/__init__.py b/generated_proto/sample/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/generated_proto/sample/sample_pb2.py b/generated_proto/sample/sample_pb2.py deleted file mode 100644 index 47c07074d..000000000 --- a/generated_proto/sample/sample_pb2.py +++ /dev/null @@ -1,33 +0,0 @@ -# -*- coding: utf-8 -*- -# Generated by the protocol buffer compiler. DO NOT EDIT! -# NO CHECKED-IN PROTOBUF GENCODE -# source: sample.proto -# Protobuf Python Version: 5.29.2 -"""Generated protocol buffer code.""" - -from google.protobuf import descriptor as _descriptor -from google.protobuf import descriptor_pool as _descriptor_pool -from google.protobuf import runtime_version as _runtime_version -from google.protobuf import symbol_database as _symbol_database -from google.protobuf.internal import builder as _builder - -_runtime_version.ValidateProtobufRuntimeVersion( - _runtime_version.Domain.PUBLIC, 5, 29, 2, "", "sample.proto" -) -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile( - b'\n\x0csample.proto"5\n\x06Sample\x12\x11\n\ttimestamp\x18\x01 \x01(\t\x12\n\n\x02id\x18\x02 \x01(\t\x12\x0c\n\x04name\x18\x03 \x01(\t' -) - -_globals = globals() -_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) -_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, "sample_pb2", _globals) -if not _descriptor._USE_C_DESCRIPTORS: - DESCRIPTOR._loaded_options = None - _globals["_SAMPLE"]._serialized_start = 16 - _globals["_SAMPLE"]._serialized_end = 69 -# @@protoc_insertion_point(module_scope) diff --git a/generated_proto/testrun/ta_testrun_pb2.py b/generated_proto/testrun/ta_testrun_pb2.py index 0de6f6acf..ba133248f 100644 --- a/generated_proto/testrun/ta_testrun_pb2.py +++ b/generated_proto/testrun/ta_testrun_pb2.py @@ -20,7 +20,7 @@ DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile( - b'\n\x10ta_testrun.proto"\xcd\x02\n\x07TestRun\x12\x11\n\ttimestamp\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\x11\n\tclassname\x18\x03 \x01(\t\x12\x11\n\ttestsuite\x18\x04 \x01(\t\x12\x15\n\rcomputed_name\x18\x05 \x01(\t\x12!\n\x07outcome\x18\x06 \x01(\x0e\x32\x10.TestRun.Outcome\x12\x17\n\x0f\x66\x61ilure_message\x18\x07 \x01(\t\x12\x10\n\x08\x64uration\x18\x08 \x01(\x02\x12\x0e\n\x06repoid\x18\n \x01(\x03\x12\x12\n\ncommit_sha\x18\x0b \x01(\t\x12\x0e\n\x06\x62ranch\x18\x0c \x01(\t\x12\r\n\x05\x66lags\x18\r \x03(\t\x12\x10\n\x08\x66ilename\x18\x0e \x01(\t\x12\x11\n\tframework\x18\x0f \x01(\t".\n\x07Outcome\x12\n\n\x06PASSED\x10\x00\x12\n\n\x06\x46\x41ILED\x10\x01\x12\x0b\n\x07SKIPPED\x10\x02' + b'\n\x10ta_testrun.proto"\xda\x02\n\x07TestRun\x12\x11\n\ttimestamp\x18\x01 \x01(\x03\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\x11\n\tclassname\x18\x03 \x01(\t\x12\x11\n\ttestsuite\x18\x04 \x01(\t\x12\x15\n\rcomputed_name\x18\x05 \x01(\t\x12!\n\x07outcome\x18\x06 \x01(\x0e\x32\x10.TestRun.Outcome\x12\x17\n\x0f\x66\x61ilure_message\x18\x07 \x01(\t\x12\x18\n\x10\x64uration_seconds\x18\x08 \x01(\x02\x12\x0e\n\x06repoid\x18\n \x01(\x03\x12\x12\n\ncommit_sha\x18\x0b \x01(\t\x12\x13\n\x0b\x62ranch_name\x18\x0c \x01(\t\x12\r\n\x05\x66lags\x18\r \x03(\t\x12\x10\n\x08\x66ilename\x18\x0e \x01(\t\x12\x11\n\tframework\x18\x0f \x01(\t".\n\x07Outcome\x12\n\n\x06PASSED\x10\x00\x12\n\n\x06\x46\x41ILED\x10\x01\x12\x0b\n\x07SKIPPED\x10\x02' ) _globals = globals() @@ -29,7 +29,7 @@ if not _descriptor._USE_C_DESCRIPTORS: DESCRIPTOR._loaded_options = None _globals["_TESTRUN"]._serialized_start = 21 - _globals["_TESTRUN"]._serialized_end = 354 - _globals["_TESTRUN_OUTCOME"]._serialized_start = 308 - _globals["_TESTRUN_OUTCOME"]._serialized_end = 354 + _globals["_TESTRUN"]._serialized_end = 367 + _globals["_TESTRUN_OUTCOME"]._serialized_start = 321 + _globals["_TESTRUN_OUTCOME"]._serialized_end = 367 # @@protoc_insertion_point(module_scope) diff --git a/generated_proto/testrun/ta_testrun_pb2.pyi b/generated_proto/testrun/ta_testrun_pb2.pyi new file mode 100644 index 000000000..bd9eb0d61 --- /dev/null +++ b/generated_proto/testrun/ta_testrun_pb2.pyi @@ -0,0 +1,83 @@ +from typing import ClassVar as _ClassVar +from typing import Iterable as _Iterable +from typing import Optional as _Optional +from typing import Union as _Union + +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf.internal import containers as _containers +from google.protobuf.internal import enum_type_wrapper as _enum_type_wrapper + +DESCRIPTOR: _descriptor.FileDescriptor + +class TestRun(_message.Message): + __slots__ = ( + "timestamp", + "name", + "classname", + "testsuite", + "computed_name", + "outcome", + "failure_message", + "duration_seconds", + "repoid", + "commit_sha", + "branch_name", + "flags", + "filename", + "framework", + ) + class Outcome(int, metaclass=_enum_type_wrapper.EnumTypeWrapper): + __slots__ = () + PASSED: _ClassVar[TestRun.Outcome] + FAILED: _ClassVar[TestRun.Outcome] + SKIPPED: _ClassVar[TestRun.Outcome] + + PASSED: TestRun.Outcome + FAILED: TestRun.Outcome + SKIPPED: TestRun.Outcome + TIMESTAMP_FIELD_NUMBER: _ClassVar[int] + NAME_FIELD_NUMBER: _ClassVar[int] + CLASSNAME_FIELD_NUMBER: _ClassVar[int] + TESTSUITE_FIELD_NUMBER: _ClassVar[int] + COMPUTED_NAME_FIELD_NUMBER: _ClassVar[int] + OUTCOME_FIELD_NUMBER: _ClassVar[int] + FAILURE_MESSAGE_FIELD_NUMBER: _ClassVar[int] + DURATION_SECONDS_FIELD_NUMBER: _ClassVar[int] + REPOID_FIELD_NUMBER: _ClassVar[int] + COMMIT_SHA_FIELD_NUMBER: _ClassVar[int] + BRANCH_NAME_FIELD_NUMBER: _ClassVar[int] + FLAGS_FIELD_NUMBER: _ClassVar[int] + FILENAME_FIELD_NUMBER: _ClassVar[int] + FRAMEWORK_FIELD_NUMBER: _ClassVar[int] + timestamp: int + name: str + classname: str + testsuite: str + computed_name: str + outcome: TestRun.Outcome + failure_message: str + duration_seconds: float + repoid: int + commit_sha: str + branch_name: str + flags: _containers.RepeatedScalarFieldContainer[str] + filename: str + framework: str + def __init__( + self, + timestamp: _Optional[int] = ..., + name: _Optional[str] = ..., + classname: _Optional[str] = ..., + testsuite: _Optional[str] = ..., + computed_name: _Optional[str] = ..., + outcome: _Optional[_Union[TestRun.Outcome, str]] = ..., + failure_message: _Optional[str] = ..., + duration_seconds: _Optional[float] = ..., + repoid: _Optional[int] = ..., + commit_sha: _Optional[str] = ..., + branch_name: _Optional[str] = ..., + flags: _Optional[_Iterable[str]] = ..., + filename: _Optional[str] = ..., + framework: _Optional[str] = ..., + ) -> None: ... diff --git a/protobuf/sample.proto b/protobuf/sample.proto deleted file mode 100644 index 6411c78a6..000000000 --- a/protobuf/sample.proto +++ /dev/null @@ -1,8 +0,0 @@ -syntax = "proto2"; - -message Sample { - optional string timestamp = 1; - optional string id = 2; - optional string name = 3; -} - diff --git a/protobuf/ta_testrun.proto b/protobuf/ta_testrun.proto index 08c38435b..c0530b875 100644 --- a/protobuf/ta_testrun.proto +++ b/protobuf/ta_testrun.proto @@ -1,7 +1,7 @@ syntax = "proto2"; message TestRun { - optional string timestamp = 1; + optional int64 timestamp = 1; optional string name = 2; optional string classname = 3; optional string testsuite = 4; @@ -16,12 +16,12 @@ message TestRun { optional Outcome outcome = 6; optional string failure_message = 7; - optional float duration = 8; + optional float duration_seconds = 8; optional int64 repoid = 10; optional string commit_sha = 11; - optional string branch = 12; + optional string branch_name = 12; repeated string flags = 13; diff --git a/requirements.in b/requirements.in index 0be765301..e02c6e0e5 100644 --- a/requirements.in +++ b/requirements.in @@ -28,7 +28,7 @@ pre-commit polars==1.12.0 proto-plus>=1.25.0 psycopg2>=2.9.10 -protobuf>=5.29.2` +protobuf>=5.29.2 pydantic>=2.9.0 PyJWT>=2.4.0 pytest diff --git a/services/tests/test_bigquery.py b/services/tests/test_bigquery.py index 04a3288e9..9a9198d69 100644 --- a/services/tests/test_bigquery.py +++ b/services/tests/test_bigquery.py @@ -4,7 +4,7 @@ import polars as pl import pytest -import generated_proto.sample.sample_pb2 as sample_pb2 +import generated_proto.testrun.ta_testrun_pb2 as ta_testrun_pb2 from services.bigquery import BigQueryService fake_private_key = """-----BEGIN RSA PRIVATE KEY----- @@ -55,12 +55,12 @@ def test_bigquery_service(): results = bigquery_service.query(sql) assert len(results) == 2 - assert set([row["timestamp"] for row in results]) == { + assert {row["timestamp"] for row in results} == { datetime.fromisoformat("2025-01-01T00:00:00Z"), datetime.fromisoformat("2024-12-30T00:00:00Z"), } - assert set([row["name"] for row in results]) == {"name", "name2"} - assert set([row["id"] for row in results]) == {1, 2} + assert {row["name"] for row in results} == {"name", "name2"} + assert {row["id"] for row in results} == {1, 2} @pytest.mark.skip(reason="This test requires being run using actual working creds") @@ -78,33 +78,51 @@ def test_bigquery_service_polars(): ) assert len(results) == 2 - assert set(results["timestamp"].to_list()) == { + assert {x for x in results["timestamp"].to_list()} == { datetime.fromisoformat("2025-01-01T00:00:00Z"), datetime.fromisoformat("2024-12-30T00:00:00Z"), } - assert set(results["name"].to_list()) == {"name", "name2"} - assert set(results["id"].to_list()) == {1, 2} + assert {x for x in results["name"].to_list()} == {"name", "name2"} + assert {x for x in results["id"].to_list()} == {1, 2} # this test should only be run manually when making changes to the way we write to bigquery # the reason it's not automated is because vcrpy does not seem to work with the gRPC requests @pytest.mark.skip(reason="This test requires being run using actual working creds") def test_bigquery_service_write(): - table_name = "codecov-dev.test_dataset.sample_table" + table_name = "codecov-dev.test_dataset.testruns" bigquery_service = BigQueryService(gcp_config) bigquery_service.query(f"TRUNCATE TABLE `{table_name}`") data = [ - sample_pb2.Sample( - timestamp="2025-01-01T00:00:00Z", - id="1", + ta_testrun_pb2.TestRun( + timestamp=int( + datetime.fromisoformat("2025-01-01T00:00:00.000000Z").timestamp() + * 1000000 + ), name="name", + classname="classname", + testsuite="testsuite", + computed_name="computed_name", + outcome=ta_testrun_pb2.TestRun.Outcome.PASSED, + failure_message="failure_message", + duration_seconds=1.0, + filename="filename", ), - sample_pb2.Sample( - timestamp="2024-12-30T00:00:00Z", - id="2", + ta_testrun_pb2.TestRun( + timestamp=int( + datetime.fromisoformat("2024-12-30T00:00:00.000000Z").timestamp() + * 1000000 + ), name="name2", + classname="classname2", + testsuite="testsuite2", + computed_name="computed_name2", + outcome=ta_testrun_pb2.TestRun.Outcome.FAILED, + failure_message="failure_message2", + duration_seconds=2.0, + filename="filename2", ), ] @@ -112,8 +130,8 @@ def test_bigquery_service_write(): bigquery_service.write( "test_dataset", - "test_table", - sample_pb2, + "testruns", + ta_testrun_pb2, serialized_data, ) @@ -121,10 +139,10 @@ def test_bigquery_service_write(): assert len(results) == 2 - assert set([row["timestamp"] for row in results]) == set( + assert {row["timestamp"] for row in results} == set( [ datetime.fromisoformat("2025-01-01T00:00:00Z"), datetime.fromisoformat("2024-12-30T00:00:00Z"), ] ) - assert set([row["name"] for row in results]) == set(["name", "name2"]) + assert {row["name"] for row in results} == set(["name", "name2"]) diff --git a/ta_storage/base.py b/ta_storage/base.py index 6a6df471d..f8b3a4629 100644 --- a/ta_storage/base.py +++ b/ta_storage/base.py @@ -1,5 +1,6 @@ from abc import ABC, abstractmethod -from typing import Any + +from test_results_parser import Testrun from database.models.reports import Upload @@ -8,11 +9,12 @@ class TADriver(ABC): @abstractmethod def write_testruns( self, + timestamp: int, repo_id: int, - commit_id: str, - branch: str, + commit_sha: str, + branch_name: str, upload: Upload, framework: str | None, - testruns: list[dict[str, Any]], + testruns: list[Testrun], ): pass diff --git a/ta_storage/bq.py b/ta_storage/bq.py index a46e01b8e..6617e31f4 100644 --- a/ta_storage/bq.py +++ b/ta_storage/bq.py @@ -1,6 +1,8 @@ -from typing import Any, cast +from datetime import datetime +from typing import Literal, TypedDict, cast from shared.config import get_config +from test_results_parser import Testrun import generated_proto.testrun.ta_testrun_pb2 as ta_testrun_pb2 from database.models.reports import Upload @@ -16,51 +18,68 @@ ) -def outcome_to_int(outcome: str) -> int: +def outcome_to_int( + outcome: Literal["pass", "skip", "failure", "error"], +) -> ta_testrun_pb2.TestRun.Outcome: match outcome: case "pass": - return 0 + return ta_testrun_pb2.TestRun.Outcome.PASSED case "skip": - return 2 + return ta_testrun_pb2.TestRun.Outcome.SKIPPED case "failure" | "error": - return 1 + return ta_testrun_pb2.TestRun.Outcome.FAILED case _: raise ValueError(f"Invalid outcome: {outcome}") +class TransformedTestrun(TypedDict): + name: str + classname: str + testsuite: str + computed_name: str + outcome: int + failure_message: str + duration: float + filename: str + + class BQDriver(TADriver): def write_testruns( self, + timestamp: int | None, repo_id: int, - commit_id: str, - branch: str, + commit_sha: str, + branch_name: str, upload: Upload, framework: str | None, - testruns: list[dict[str, Any]], + testruns: list[Testrun], ): bq_service = get_bigquery_service() - testruns = [ - {k: v for k, v in testrun.items() if k != "build_url"} - for testrun in testruns - ] - - for testrun in testruns: - testrun["outcome"] = outcome_to_int(testrun["outcome"]) + if timestamp is None: + timestamp = int(datetime.now().timestamp() * 1000000) flag_names = upload.flag_names - testruns_pb: list[bytes] = [] - for testrun in testruns: + + for t in testruns: test_run = ta_testrun_pb2.TestRun( + timestamp=timestamp, repoid=repo_id, - commit_sha=commit_id, - framework=framework or "", - branch=branch, + commit_sha=commit_sha, + framework=framework, + branch_name=branch_name, flags=list(flag_names), - **testrun, + classname=t["classname"], + name=t["name"], + testsuite=t["testsuite"], + computed_name=t["computed_name"], + outcome=outcome_to_int(t["outcome"]), + failure_message=t["failure_message"], + duration_seconds=t["duration"], + filename=t["filename"], ) - print(test_run) testruns_pb.append(test_run.SerializeToString()) + flag_names = upload.flag_names bq_service.write(DATASET_NAME, TESTRUN_TABLE_NAME, ta_testrun_pb2, testruns_pb) diff --git a/ta_storage/pg.py b/ta_storage/pg.py index c68bc6606..becf4fd7b 100644 --- a/ta_storage/pg.py +++ b/ta_storage/pg.py @@ -1,9 +1,9 @@ from datetime import date, datetime from typing import Any, Literal, TypedDict -from msgpack import unpackb from sqlalchemy.dialects.postgresql import insert from sqlalchemy.orm import Session +from test_results_parser import Testrun from database.models import ( DailyTestRollup, @@ -13,7 +13,6 @@ TestInstance, Upload, ) -from services.redis import get_redis_connection from services.test_results import generate_flags_hash, generate_test_id from ta_storage.base import TADriver @@ -33,67 +32,6 @@ class DailyTotals(TypedDict): avg_duration_seconds: float -def persist_intermediate_results( - db_session: Session, - repoid: int, - commitid: str, - branch: str | None, - uploads: dict[int, Upload], - flaky_test_set: set[str], -) -> None: - tests_to_write: dict[str, dict[str, Any]] = {} - test_instances_to_write: list[dict[str, Any]] = [] - daily_totals: dict[str, DailyTotals] = dict() - test_flag_bridge_data: list[dict] = [] - - for upload in uploads.values(): - redis_client = get_redis_connection() - - intermediate_key = f"ta/intermediate/{repoid}/{commitid}/{upload.id}" - msgpacked_intermediate_result = redis_client.get(intermediate_key) - if msgpacked_intermediate_result is None: - continue - - intermediate_result = unpackb(msgpacked_intermediate_result) - - repo_flag_ids = get_repo_flag_ids(db_session, repoid, upload.flag_names) - - for parsed_junit in intermediate_result: - framework = parsed_junit["framework"] - for testrun in parsed_junit["testruns"]: - modify_structures( - tests_to_write, - test_instances_to_write, - test_flag_bridge_data, - daily_totals, - testrun, - upload, - repoid, - branch, - commitid, - repo_flag_ids, - flaky_test_set, - framework, - ) - - if len(tests_to_write) > 0: - save_tests(db_session, tests_to_write) - - if len(test_flag_bridge_data) > 0: - save_test_flag_bridges(db_session, test_flag_bridge_data) - - if len(daily_totals) > 0: - save_daily_test_rollups(db_session, daily_totals) - - if len(test_instances_to_write) > 0: - save_test_instances(db_session, test_instances_to_write) - - upload.state = "v2_persisted" - db_session.commit() - - redis_client.delete(intermediate_key) - - def get_repo_flag_ids(db_session: Session, repoid: int, flags: list[str]) -> set[int]: if not flags: return set() @@ -113,14 +51,14 @@ def modify_structures( test_instances_to_write: list[dict[str, Any]], test_flag_bridge_data: list[dict], daily_totals: dict[str, DailyTotals], - testrun: dict[str, Any], + testrun: Testrun, upload: Upload, repoid: int, branch: str | None, - commitid: str, + commit_sha: str, repo_flag_ids: set[int], flaky_test_set: set[str], - framework: str, + framework: str | None, ): flags_hash = generate_flags_hash(upload.flag_names) test_id = generate_test_id( @@ -134,7 +72,7 @@ def modify_structures( tests_to_write[test_id] = test test_instance = generate_test_instance_dict( - test_id, upload, testrun, commitid, branch, repoid + test_id, upload, testrun, commit_sha, branch, repoid ) test_instances_to_write.append(test_instance) @@ -158,7 +96,7 @@ def modify_structures( testrun["duration"], testrun["outcome"], branch, - commitid, + commit_sha, flaky_test_set, ) @@ -166,9 +104,9 @@ def modify_structures( def generate_test_dict( test_id: str, repoid: int, - testrun: dict[str, Any], + testrun: Testrun, flags_hash: str, - framework: str, + framework: str | None, ) -> dict[str, Any]: return { "id": test_id, @@ -185,8 +123,8 @@ def generate_test_dict( def generate_test_instance_dict( test_id: str, upload: Upload, - testrun: dict[str, Any], - commitid: str, + testrun: Testrun, + commit_sha: str, branch: str | None, repoid: int, ) -> dict[str, Any]: @@ -196,7 +134,7 @@ def generate_test_instance_dict( "duration_seconds": testrun["duration"], "outcome": testrun["outcome"], "failure_message": testrun["failure_message"], - "commitid": commitid, + "commitid": commit_sha, "branch": branch, "reduced_error_id": None, "repoid": repoid, @@ -206,7 +144,7 @@ def generate_test_instance_dict( def update_daily_totals( daily_totals: dict, test_id: str, - duration_seconds: float, + duration_seconds: float | None, outcome: Literal["pass", "failure", "error", "skip"], ): daily_totals[test_id]["last_duration_seconds"] = duration_seconds @@ -216,11 +154,22 @@ def update_daily_totals( # (old_avg * num of values used to compute old avg) + new value # ------------------------------------------------------------- # num of values used to compute old avg + 1 - daily_totals[test_id]["avg_duration_seconds"] = ( - daily_totals[test_id]["avg_duration_seconds"] - * (daily_totals[test_id]["pass_count"] + daily_totals[test_id]["fail_count"]) - + duration_seconds - ) / (daily_totals[test_id]["pass_count"] + daily_totals[test_id]["fail_count"] + 1) + if ( + duration_seconds is not None + and daily_totals[test_id]["avg_duration_seconds"] is not None + ): + daily_totals[test_id]["avg_duration_seconds"] = ( + daily_totals[test_id]["avg_duration_seconds"] + * ( + daily_totals[test_id]["pass_count"] + + daily_totals[test_id]["fail_count"] + ) + + duration_seconds + ) / ( + daily_totals[test_id]["pass_count"] + + daily_totals[test_id]["fail_count"] + + 1 + ) if outcome == "pass": daily_totals[test_id]["pass_count"] += 1 @@ -234,10 +183,10 @@ def create_daily_totals( daily_totals: dict, test_id: str, repoid: int, - duration_seconds: float, + duration_seconds: float | None, outcome: Literal["pass", "failure", "error", "skip"], branch: str | None, - commitid: str, + commit_sha: str, flaky_test_set: set[str], ): daily_totals[test_id] = { @@ -254,7 +203,7 @@ def create_daily_totals( "branch": branch, "date": date.today(), "latest_run": datetime.now(), - "commits_where_fail": [commitid] + "commits_where_fail": [commit_sha] if (outcome == "failure" or outcome == "error") else [], } @@ -335,12 +284,13 @@ def __init__(self, db_session: Session, flaky_test_set: set): def write_testruns( self, + timestamp: int | None, repo_id: int, - commit_id: str, - branch: str, + commit_sha: str, + branch_name: str, upload: Upload, framework: str | None, - testruns: list[dict[str, Any]], + testruns: list[Testrun], ): tests_to_write: dict[str, dict[str, Any]] = {} test_instances_to_write: list[dict[str, Any]] = [] @@ -358,15 +308,14 @@ def write_testruns( testrun, upload, repo_id, - branch, - commit_id, + branch_name, + commit_sha, repo_flag_ids, self.flaky_test_set, framework, ) if len(tests_to_write) > 0: - print(tests_to_write) save_tests(self.db_session, tests_to_write) if len(test_flag_bridge_data) > 0: diff --git a/ta_storage/tests/test_bq.py b/ta_storage/tests/test_bq.py index 73e572623..43f20e83a 100644 --- a/ta_storage/tests/test_bq.py +++ b/ta_storage/tests/test_bq.py @@ -1,6 +1,8 @@ +from datetime import datetime from unittest.mock import MagicMock, patch import pytest +from test_results_parser import Testrun import generated_proto.testrun.ta_testrun_pb2 as ta_testrun_pb2 from database.tests.factories import RepositoryFlagFactory, UploadFactory @@ -36,7 +38,7 @@ def test_bigquery_driver(dbsession, mock_bigquery_service): upload.flags.append(repo_flag_2) dbsession.flush() - test_data = [ + test_data: list[Testrun] = [ { "name": "test_name", "classname": "test_class", @@ -61,7 +63,10 @@ def test_bigquery_driver(dbsession, mock_bigquery_service): }, ] + timestamp = int(datetime.now().timestamp() * 1000000) + bq.write_testruns( + timestamp, upload.report.commit.repoid, upload.report.commit.commitid, upload.report.commit.branch, @@ -77,10 +82,11 @@ def test_bigquery_driver(dbsession, mock_bigquery_service): ta_testrun_pb2, [ ta_testrun_pb2.TestRun( + timestamp=timestamp, name="test_name", classname="test_class", testsuite="test_suite", - duration=100.0, + duration_seconds=100.0, outcome=ta_testrun_pb2.TestRun.Outcome.PASSED, filename="test_file", computed_name="test_computed_name", @@ -88,14 +94,15 @@ def test_bigquery_driver(dbsession, mock_bigquery_service): repoid=upload.report.commit.repoid, commit_sha=upload.report.commit.commitid, framework="pytest", - branch=upload.report.commit.branch, + branch_name=upload.report.commit.branch, flags=["flag1", "flag2"], ).SerializeToString(), ta_testrun_pb2.TestRun( + timestamp=timestamp, name="test_name2", classname="test_class2", testsuite="test_suite2", - duration=100.0, + duration_seconds=100.0, outcome=ta_testrun_pb2.TestRun.Outcome.FAILED, filename="test_file2", computed_name="test_computed_name2", @@ -103,7 +110,7 @@ def test_bigquery_driver(dbsession, mock_bigquery_service): repoid=upload.report.commit.repoid, commit_sha=upload.report.commit.commitid, framework="pytest", - branch=upload.report.commit.branch, + branch_name=upload.report.commit.branch, flags=["flag1", "flag2"], ).SerializeToString(), ], diff --git a/ta_storage/tests/test_pg.py b/ta_storage/tests/test_pg.py index 421408dc6..370cd1b42 100644 --- a/ta_storage/tests/test_pg.py +++ b/ta_storage/tests/test_pg.py @@ -25,6 +25,7 @@ def test_pg_driver(dbsession): dbsession.flush() pg.write_testruns( + None, upload.report.commit.repoid, upload.report.commit.id, upload.report.commit.branch, @@ -36,7 +37,7 @@ def test_pg_driver(dbsession): "classname": "test_class", "testsuite": "test_suite", "duration": 100.0, - "outcome": "passed", + "outcome": "pass", "build_url": "https://example.com/build/123", "filename": "test_file", "computed_name": "test_computed_name", @@ -47,7 +48,7 @@ def test_pg_driver(dbsession): "classname": "test_class2", "testsuite": "test_suite2", "duration": 100.0, - "outcome": "failed", + "outcome": "failure", "build_url": "https://example.com/build/123", "filename": "test_file2", "computed_name": "test_computed_name2",