From 85f415c62ef06bc21bfb2c9cab0f557a0a40f43f Mon Sep 17 00:00:00 2001 From: theOehrly <23384863+theOehrly@users.noreply.github.com> Date: Fri, 22 Sep 2023 19:19:04 +0200 Subject: [PATCH] test --- .github/workflows/tests.yml | 17 +- fastf1/tests/test_cache.py | 120 -------- fastf1/tests/test_laps.py | 367 ----------------------- fastf1/tests/test_laps_summary.py | 25 -- fastf1/tests/test_livetiming.py | 40 --- fastf1/tests/test_mvapi.py | 42 --- fastf1/tests/test_plotting.py | 26 -- fastf1/tests/test_project_structure.py | 14 - fastf1/tests/test_telemetry.py | 387 ------------------------- fastf1/tests/test_utils.py | 47 --- 10 files changed, 16 insertions(+), 1069 deletions(-) delete mode 100644 fastf1/tests/test_cache.py delete mode 100644 fastf1/tests/test_laps.py delete mode 100644 fastf1/tests/test_laps_summary.py delete mode 100644 fastf1/tests/test_livetiming.py delete mode 100644 fastf1/tests/test_mvapi.py delete mode 100644 fastf1/tests/test_plotting.py delete mode 100644 fastf1/tests/test_project_structure.py delete mode 100644 fastf1/tests/test_telemetry.py delete mode 100644 fastf1/tests/test_utils.py diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 3839e5fd9..d48d28588 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -13,7 +13,7 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - python-version: [ '3.8', '3.9', '3.10', '3.11'] + python-version: ['3.11'] name: Tests on ${{ matrix.python-version }} steps: - name: Setup python @@ -53,6 +53,21 @@ jobs: restore-keys: | fastf1-${{ matrix.python-version }} + - name: Cache multiple paths + uses: whywaita/actions-cache-s3@v2 + with: + path: ./test_cache + key: fastf1-${{ matrix.python-version }}-${{ hashFiles('*.*') }} + restore-keys: | + fastf1-${{ matrix.python-version }} + aws-s3-bucket: ${{ secrets.CLOUDFLARE_R2_BUCKET_NAME }} + aws-access-key-id: ${{ secrets.CLOUDFLARE_ACCESS_KEY_ID }} + aws-secret-access-key: ${{ secrets.CLOUDFLARE_SECRET_ACCESS_KEY }} + aws-region: auto # Optional + aws-endpoint: https://b9261d7f75d36ec3c0ae559737628538.eu.r2.cloudflarestorage.com # Optional + aws-s3-bucket-endpoint: false # Optional + aws-s3-force-path-style: true # Optional + - name: Run tests run: | pytest -ra diff --git a/fastf1/tests/test_cache.py b/fastf1/tests/test_cache.py deleted file mode 100644 index 0ef7e164c..000000000 --- a/fastf1/tests/test_cache.py +++ /dev/null @@ -1,120 +0,0 @@ -import logging -import os - -import fastf1._api -from fastf1 import Cache -from fastf1.logger import LoggingManager -import fastf1.testing - - -def test_enable_cache(tmpdir): - fastf1.testing.run_in_subprocess(_test_enable_cache, tmpdir) - - -def _test_enable_cache(tmpdir): - Cache.enable_cache(tmpdir) - - -def test_cache_used_and_clear(tmpdir): - fastf1.testing.run_in_subprocess(_test_cache_used_and_clear, tmpdir) - - -def _test_cache_used_and_clear(tmpdir): - # this test requires using requests_mock to allow running offline - # other tests can depend on fastf1's internal cache (which is tested here) - # for offline running, after they've had one online run - import fastf1 - import requests_mock - - with requests_mock.Mocker() as mocker: - # create a custom requests session here so that requests_mock is - # properly used - - Cache.ci_mode(False) - LoggingManager.debug = True - # special, relevant on Linux only. - # ci mode does not propagate to subprocess on windows - - # enable fastf1's own pickle cache - Cache.enable_cache(tmpdir, use_requests_cache=False) - - with open('fastf1/testing/reference_data/' - 'schedule_2020.json', 'rb') as fobj: - content = fobj.read() - mocker.get('https://raw.githubusercontent.com/theOehrly/f1schedule/' - 'master/schedule_2020.json', - content=content, status_code=200) - - with open('fastf1/testing/reference_data/' - 'Index2020.json', 'rb') as fobj: - content = fobj.read() - mocker.get('https://livetiming.formula1.com/static/2020/Index.json', - content=content, status_code=200) - - # create mock repsonses for general api requests - with open('fastf1/testing/reference_data/2020_05_FP2/' - 'ergast_race.raw', 'rb') as fobj: - content = fobj.read() - mocker.get('https://ergast.com/api/f1/2020/5.json', - content=content, status_code=200) - - with open('fastf1/testing/reference_data/2020_05_FP2/' - 'ergast_race_result.raw', 'rb') as fobj: - content = fobj.read() - mocker.get('https://ergast.com/api/f1/2020/5/results.json', - content=content, status_code=200) - - # rainy and short session, good for fast test/quick loading - session = fastf1.get_session(2020, 5, 'FP2') - - # create mock responses for f1 api requests - req_pages = ['timing_data', 'timing_app_data', 'track_status', - 'session_status', 'car_data', 'position', - 'weather_data', 'driver_list', 'race_control_messages', - 'session_info'] - for p in req_pages: - with open(f'fastf1/testing/reference_data/' - f'2020_05_FP2/{p}.raw', 'rb') as fobj: - lines = fobj.readlines() - - # ensure correct newline character (as expected by api parser) - # strip all newline characters and terminate each line with \r\n - # needs to work despite os and git newline character substitution - content = b'' - for line in lines: - content += line.strip(b'\n').strip(b'\r') + b'\r\n' - - path = fastf1._api.base_url \ - + session.api_path \ - + fastf1._api.pages[p] - - mocker.get(path, content=content, status_code=200) - - # load the data - session.load() - - # check cache directory, pickled results should now exist - cache_dir_path = os.path.join(tmpdir, session.api_path[8:]) - dir_list = os.listdir(cache_dir_path) - expected_dir_list = ['car_data.ff1pkl', 'position_data.ff1pkl', - 'driver_info.ff1pkl', - 'session_status_data.ff1pkl', - 'timing_app_data.ff1pkl', - '_extended_timing_data.ff1pkl', - 'track_status_data.ff1pkl', - 'weather_data.ff1pkl', - 'race_control_messages.ff1pkl', - 'session_info.ff1pkl'] - # test both ways round - assert all(elem in expected_dir_list for elem in dir_list) - assert all(elem in dir_list for elem in expected_dir_list) - - # recreate session and reload data - # this should use the cache this time - log_handle = fastf1.testing.capture_log(logging.INFO) - session = fastf1.get_session(2020, 5, 'FP2') - session.load() - assert "Using cached data for" in log_handle.text - - Cache.clear_cache(tmpdir) # should delete pickle files - assert os.listdir(cache_dir_path) == [] diff --git a/fastf1/tests/test_laps.py b/fastf1/tests/test_laps.py deleted file mode 100644 index ebf4bac04..000000000 --- a/fastf1/tests/test_laps.py +++ /dev/null @@ -1,367 +0,0 @@ -import pytest - -import datetime - -import pandas as pd -import pandas - -import fastf1 -from fastf1.testing.reference_values import LAP_DTYPES, ensure_data_type - - -def test_constructor(): - laps = fastf1.core.Laps({'example': (1, 2, 3, 4, 5, 6)}) - sliced = laps.iloc[:2] - assert isinstance(sliced, fastf1.core.Laps) - - -def test_constructor_sliced(): - laps = fastf1.core.Laps({'example': (1, 2, 3, 4, 5, 6)}) - single = laps.iloc[:2].iloc[0] - assert isinstance(single, fastf1.core.Lap) - - -def test_base_class_view_laps(): - laps = fastf1.core.Laps() - bcv = laps.base_class_view - assert isinstance(bcv, pandas.DataFrame) - - -@pytest.mark.f1telapi -def test_dtypes_from_api(reference_laps_data): - session, laps = reference_laps_data - ensure_data_type(LAP_DTYPES, laps) - - -def test_dtypes_default_columns(): - laps = fastf1.core.Laps(force_default_cols=True) - ensure_data_type(LAP_DTYPES, laps) - - -@pytest.mark.f1telapi -def test_dtypes_pick(reference_laps_data): - session, laps = reference_laps_data - drv = list(laps['Driver'].unique())[1] # some driver - ensure_data_type(LAP_DTYPES, laps.pick_drivers(drv)) - ensure_data_type(LAP_DTYPES, laps.pick_quicklaps()) - ensure_data_type(LAP_DTYPES, laps.iloc[:2]) - ensure_data_type(LAP_DTYPES, - laps.pick_drivers(drv).iloc[:3].pick_quicklaps()) - - -@pytest.mark.f1telapi -def test_laps_get_car_data(reference_laps_data): - session, laps = reference_laps_data - drv_laps = laps.pick_drivers('BOT') - car = drv_laps.get_car_data() - assert car.shape == (26559, 10) - assert not car.isna().sum().sum() # sum rows then columns - for col in ('Speed', 'RPM', 'nGear', 'Throttle', 'Brake', 'DRS', - 'Time', 'SessionTime', 'Date', 'Source'): - assert col in car.columns - - -@pytest.mark.f1telapi -def test_laps_get_pos_data(reference_laps_data): - session, laps = reference_laps_data - drv_laps = laps.pick_drivers('BOT') - pos = drv_laps.get_pos_data() - assert pos.shape == (29330, 8) - assert not pos.isna().sum().sum() - for col in ('X', 'Y', 'Z', 'Status', 'Time', 'SessionTime', 'Date', - 'Source'): - assert col in pos.columns - - -@pytest.mark.f1telapi -def test_laps_get_telemetry(reference_laps_data): - session, laps = reference_laps_data - drv_laps = laps.pick_drivers('BOT') - tel = drv_laps.get_telemetry() - assert tel.shape == (55788, 18) - assert not tel.isna().sum().sum() - for col in ('Speed', 'RPM', 'nGear', 'Throttle', 'Brake', 'DRS', - 'X', 'Y', 'Z', 'Status', 'Time', 'SessionTime', 'Date', - 'Source', 'Distance', 'DriverAhead'): - assert col in tel.columns - - -@pytest.mark.f1telapi -def test_laps_get_weather_data(reference_laps_data): - session, laps = reference_laps_data - wd = laps.get_weather_data() - assert wd.shape == (926, 8) - for col in ('AirTemp', 'Humidity', 'Pressure', 'Rainfall', - 'TrackTemp', 'WindDirection', 'WindSpeed', 'Time'): - assert col in wd.columns - - # test that an empty laps object returns empty weather data - no_laps = fastf1.core.Laps() - no_laps.session = session - no_wd = no_laps.get_weather_data() - assert isinstance(no_wd, pd.DataFrame) - assert no_wd.empty - for col in ('AirTemp', 'Humidity', 'Pressure', 'Rainfall', - 'TrackTemp', 'WindDirection', 'WindSpeed', 'Time'): - assert col in wd.columns - - -@pytest.mark.f1telapi -def test_lap_get_car_data(reference_laps_data): - session, laps = reference_laps_data - drv_laps = laps.pick_fastest() - car = drv_laps.get_car_data() - assert car.shape == (340, 10) - assert not car.isna().sum().sum() # sum rows then columns - for col in ('Speed', 'RPM', 'nGear', 'Throttle', 'Brake', 'DRS', - 'Time', 'SessionTime', 'Date', 'Source'): - assert col in car.columns - - -@pytest.mark.f1telapi -def test_lap_get_pos_data(reference_laps_data): - session, laps = reference_laps_data - drv_laps = laps.pick_fastest() - pos = drv_laps.get_pos_data() - assert pos.shape == (377, 8) - assert not pos.isna().sum().sum() - for col in ('X', 'Y', 'Z', 'Status', 'Time', 'SessionTime', 'Date', - 'Source'): - assert col in pos.columns - - -@pytest.mark.f1telapi -def test_lap_get_telemetry(reference_laps_data): - session, laps = reference_laps_data - drv_laps = laps.pick_fastest() - tel = drv_laps.get_telemetry() - assert tel.shape == (719, 18) - # DistanceToDriverAhead may contain nan values - assert not tel.loc[:, tel.columns != 'DistanceToDriverAhead']\ - .isna().sum().sum() - for col in ('Speed', 'RPM', 'nGear', 'Throttle', 'Brake', 'DRS', - 'X', 'Y', 'Z', 'Status', 'Time', 'SessionTime', 'Date', - 'Source', 'Distance', 'DriverAhead'): - assert col in tel.columns - - -@pytest.mark.f1telapi -def test_lap_get_weather_data(reference_laps_data): - session, laps = reference_laps_data - # check a valid lap - fastest = laps.pick_fastest() - wd = fastest.get_weather_data() - assert wd.shape == (8, ) - for col in ('AirTemp', 'Humidity', 'Pressure', 'Rainfall', - 'TrackTemp', 'WindDirection', 'WindSpeed', 'Time'): - assert col in wd.index - - # create a 'fake' lap for which no weather data exists - # should use last known value - lap = fastf1.core.Lap(index=fastest.index, dtype='object') - lap.session = session - lap['Time'] = datetime.timedelta(days=1/24*3) - lap['LapStartTime'] = lap['Time'] - datetime.timedelta(seconds=30) - wd_last = lap.get_weather_data() - pd.testing.assert_series_equal(wd_last, session.weather_data.iloc[-1]) - - -@pytest.mark.f1telapi -def test_laps_pick_laps(reference_laps_data): - session, laps = reference_laps_data - - # one lap - one_lap = laps.pick_laps(10) - assert one_lap.shape == (19, 31) - ensure_data_type(LAP_DTYPES, one_lap) - - # multiple laps - mul_laps = laps.pick_laps([10, 20, 30]) - assert mul_laps.shape == (54, 31) - ensure_data_type(LAP_DTYPES, mul_laps) - - # invalid input - try: - _ = laps.pick_laps(2.5) - except ValueError as e: - assert str(e) == "Invalid value 2.5 in `lap_numbers`" - - -@pytest.mark.f1telapi -def test_laps_pick_drivers(reference_laps_data): - session, laps = reference_laps_data - - # one driver - one_driver_abv = laps.pick_drivers("HAM") - one_driver_number = laps.pick_drivers(44) - assert one_driver_abv.shape == (53, 31) - ensure_data_type(LAP_DTYPES, one_driver_abv) - pd.testing.assert_frame_equal(one_driver_abv, one_driver_number) - - # multiple drivers - mul_driver_mixed = laps.pick_drivers([5, "BOT", 7]) - assert mul_driver_mixed.shape == (112, 31) - ensure_data_type(LAP_DTYPES, mul_driver_mixed) - - -@pytest.mark.f1telapi -def test_laps_pick_teams(reference_laps_data): - session, laps = reference_laps_data - - # one team - one_lap = laps.pick_teams("Mercedes") - assert one_lap.shape == (106, 31) - ensure_data_type(LAP_DTYPES, one_lap) - - # multiple teams - mul_teams = laps.pick_teams(["Mercedes", "Ferrari"]) - # both Ferraris DNF - assert mul_teams.shape == (136, 31) - ensure_data_type(LAP_DTYPES, mul_teams) - - -@pytest.mark.f1telapi -def test_laps_pick_compounds(reference_laps_data): - session, laps = reference_laps_data - - # one team - one_compound = laps.pick_compounds("HARD") - assert one_compound.shape == (172, 31) - ensure_data_type(LAP_DTYPES, one_compound) - - # multiple compounds - mul_compounds = laps.pick_compounds(["SOFT", "MEDIUM"]) - assert mul_compounds.shape == (754, 31) - ensure_data_type(LAP_DTYPES, mul_compounds) - - -@pytest.mark.f1telapi -def test_laps_pick_track_status(reference_laps_data): - session, laps = reference_laps_data - - # equals - equals = laps.pick_track_status('2', how="equals") - assert equals.shape == (48, 31) - ensure_data_type(LAP_DTYPES, equals) - - # contains - contains = laps.pick_track_status('4', how="contains") - assert contains.shape == (115, 31) - ensure_data_type(LAP_DTYPES, contains) - - # excludes - excludes = laps.pick_track_status('4', how="excludes") - assert excludes.shape == (811, 31) - ensure_data_type(LAP_DTYPES, excludes) - - # any - any_ = laps.pick_track_status('12', how="any") - assert any_.shape == (848, 31) - ensure_data_type(LAP_DTYPES, any_) - - # none - none = laps.pick_track_status('46', how="none") - assert none.shape == (811, 31) - ensure_data_type(LAP_DTYPES, none) - - -@pytest.mark.f1telapi -@pytest.mark.parametrize("source", ["session_status", "timing_data"]) -def test_split_quali_laps(source): - session = fastf1.get_session(2023, 2, 'Q') - session.load(telemetry=False, weather=False) - - if source == "session_status": - # delete precalculated split times (from api parser) - session._session_split_times = None - - q1, q2, q3 = session.laps.split_qualifying_sessions() - - assert len(q1['DriverNumber'].unique()) == 20 - assert len(q2['DriverNumber'].unique()) == 15 - assert len(q3['DriverNumber'].unique()) == 10 - - -@pytest.mark.f1telapi -@pytest.mark.parametrize("source", ["session_status", "timing_data"]) -def test_split_sprint_shootout_laps(source): - session = fastf1.get_session(2023, 4, 'SS') - session.load(telemetry=False, weather=False) - - if source == "session_status": - # delete precalculated split times (from api parser) - session._session_split_times = None - - q1, q2, q3 = session.laps.split_qualifying_sessions() - - assert len(q1['DriverNumber'].unique()) == 20 - - # Logan Sargeant was 15th in Q1 but crashed and couldn't participate in Q2 - assert len(q2['DriverNumber'].unique()) == 14 - assert len(q3['DriverNumber'].unique()) == 9 - - -@pytest.mark.f1telapi -@pytest.mark.parametrize("source", ["session_status", "timing_data"]) -def test_calculated_quali_results(source): - session = fastf1.get_session(2023, 4, 'Q') - session.load(telemetry=False, weather=False) - - # copy and delete (!) before recalculating - ergast_results = session.results.copy() - session.results.loc[:, ('Q1', 'Q2', 'Q3')] = pd.NaT - - if source == "session_status": - # delete precalculated split times (from api parser) - session._session_split_times = None - - session._calculate_quali_like_session_results(force=True) - - # Note that differences may exist if one or more drivers didn't set a - # proper lap time in any of the Quali sessions. In this case, Ergast may - # still return a (very slow) lap time, while the calculation will return - # NaT. This is acceptable. Testing is done on a session where this is not - # an issue. - pd.testing.assert_frame_equal(ergast_results, session.results) - - -@pytest.mark.f1telapi -@pytest.mark.parametrize("source", ["session_status", "timing_data"]) -def test_quali_q3_cancelled(source): - session = fastf1.get_session(2023, 4, 'Q') - session.load(telemetry=False, weather=False) - - # Remove Q3 to simulate cancelled Q3. If a future race has a cancelled Q3, - # that would be a better test case. The last one was the US GP in 2015, so - # no lap data is available. - session.session_status.drop([13, 14, 15, 16], inplace=True) - session.results['Q3'] = pd.NaT - if source == "session_status": - # delete precalculated split times (from api parser) - session._session_split_times = None - else: - session._session_split_times.pop(-1) - - # Test split_qualifying_sessions() - q1, q2, q3 = session.laps.split_qualifying_sessions() - - assert len(q1['DriverNumber'].unique()) == 20 - assert len(q2['DriverNumber'].unique()) == 15 - assert q3 is None - - # Test _calculate_quali_like_session_results() - # copy and delete (!) before recalculating - orig_results = session.results.copy() - session.results.loc[:, ('Q1', 'Q2', 'Q3')] = pd.NaT - session._calculate_quali_like_session_results(force=True) - - # Note that differences may exist if one or more drivers didn't set a - # proper lap time in any of the Quali sessions. In this case, Ergast may - # still return a (very slow) lap time, while the calculation will return - # NaT. This is acceptable. Testing is done on a session where this is not - # an issue. - pd.testing.assert_series_equal( - session.results['Q1'].sort_values(), orig_results['Q1'].sort_values()) - pd.testing.assert_series_equal( - session.results['Q2'].sort_values(), orig_results['Q2'].sort_values()) - assert session.results['Q3'].isna().all() diff --git a/fastf1/tests/test_laps_summary.py b/fastf1/tests/test_laps_summary.py deleted file mode 100644 index 46634e85f..000000000 --- a/fastf1/tests/test_laps_summary.py +++ /dev/null @@ -1,25 +0,0 @@ -# test api laps data stuff; only make sure that nothing crashes - -import pytest -import fastf1 as ff1 - - -@pytest.mark.f1telapi -@pytest.mark.slow -def test_2019(): - for evn in range(1, 22): # 21 races - for ses in ('FP1', 'FP2', 'FP3', 'Q', 'R'): - if evn == 17 and ses == 'FP3': - continue # session did not take place - - session = ff1.get_session(2019, evn, ses) - session.load(telemetry=False) - - -@pytest.mark.f1telapi -@pytest.mark.slow -def test_2020(): - for evn in range(1, 20): # 19 races - for ses in ('FP1', 'FP2', 'FP3', 'Q', 'R'): - session = ff1.get_session(2020, evn, ses) - session.load(telemetry=False) diff --git a/fastf1/tests/test_livetiming.py b/fastf1/tests/test_livetiming.py deleted file mode 100644 index 611900255..000000000 --- a/fastf1/tests/test_livetiming.py +++ /dev/null @@ -1,40 +0,0 @@ -import os - -import fastf1.events -from fastf1.livetiming.data import LiveTimingData - - -def test_file_loading_w_errors(): - # load file with many errors and invalid data without crashing - livedata = LiveTimingData('fastf1/testing/reference_data/livedata/with_errors.txt') - livedata.load() - - -def test_file_loading(): - # load a valid file - livedata = LiveTimingData('fastf1/testing/reference_data/livedata/2021_1_FP3.txt') - livedata.load() - - session = fastf1.get_session(2021, 1, 'Practice 3') - session.load(livedata=livedata) - - assert session.laps.shape == (273, 31) - assert session.car_data['44'].shape == (17362, 10) - - -def test_duplicate_removal(tmpdir): - # create a temporary file with two identical lines of data - tmpfile = os.path.join(tmpdir, 'tmpfile.txt') - data = "['TimingAppData', {'Lines': {'22': {'Stints': {'0': {" \ - "'LapFlags': 0, 'Compound': 'UNKNOWN', 'New': 'false'," \ - "'TyresNotChanged': '0', 'TotalLaps': 0, 'StartLaps':" \ - "0}}}}}, '2021-03-27T12:00:32.086Z']\n" - with open(tmpfile, 'w') as fobj: - fobj.write(data) - fobj.write(data) - - livedata = LiveTimingData(tmpfile) - assert len(livedata.get('TimingAppData')) == 1 - - livedata = LiveTimingData(tmpfile, remove_duplicates=False) - assert len(livedata.get('TimingAppData')) == 2 diff --git a/fastf1/tests/test_mvapi.py b/fastf1/tests/test_mvapi.py deleted file mode 100644 index 0f842d2c6..000000000 --- a/fastf1/tests/test_mvapi.py +++ /dev/null @@ -1,42 +0,0 @@ -import requests_mock - -from fastf1 import get_session -from fastf1.mvapi import get_circuit_info - - -def _setup_mocker(mocker): - with open('fastf1/testing/reference_data/2020_05_FP2/mvapi_circuits.raw', - 'rb') as fobj: - content = fobj.read() - mocker.get('https://api.multiviewer.app/api/v1/circuits/39/2020', - content=content, status_code=200) - - -def test_get_circuit_info(): - with requests_mock.Mocker() as mocker: - _setup_mocker(mocker) - circuit_info = get_circuit_info(year=2020, circuit_key=39) - - assert circuit_info is not None - - for col, dtype in (("X", 'float64'), ("Y", 'float64'), ("Number", 'int64'), - ("Letter", 'object'), ("Angle", 'float64'), - ("Distance", 'float64')): - assert col in circuit_info.corners.columns - assert circuit_info.corners.dtypes[col] == dtype - - -def test_get_circuit_info_warns_no_telemetry(caplog): - session = get_session(2020, 'Italy', 'R') - session.load(telemetry=False) - - with requests_mock.Mocker() as mocker: - _setup_mocker(mocker) - session.get_circuit_info() - - assert "Failed to generate marker distance information" in caplog.text - - -def test_get_circuit_info_invalid_key(caplog): - get_circuit_info(year=2020, circuit_key=0) - assert "Failed to load circuit info" in caplog.text diff --git a/fastf1/tests/test_plotting.py b/fastf1/tests/test_plotting.py deleted file mode 100644 index 57049bf58..000000000 --- a/fastf1/tests/test_plotting.py +++ /dev/null @@ -1,26 +0,0 @@ -import pytest -from fastf1.plotting import TEAM_COLORS, TEAM_TRANSLATE, DRIVER_TRANSLATE, DRIVER_COLORS - - -def test_team_colors_dict_warning(): - with pytest.raises(KeyError): - with pytest.warns(UserWarning): - TEAM_COLORS['Ferrari'] - - with pytest.warns(UserWarning): - TEAM_COLORS.get('Ferrari', None) - - TEAM_COLORS['ferrari'] - TEAM_COLORS.get('ferrari', None) - - -def test_team_color_name_abbreviation_integrity(): - for value in TEAM_TRANSLATE.values(): - assert value in TEAM_COLORS - assert len(TEAM_COLORS) == len(TEAM_TRANSLATE) - - -def test_driver_color_name_abbreviation_integrity(): - for value in DRIVER_TRANSLATE.values(): - assert value in DRIVER_COLORS - assert len(DRIVER_COLORS) == len(DRIVER_TRANSLATE) diff --git a/fastf1/tests/test_project_structure.py b/fastf1/tests/test_project_structure.py deleted file mode 100644 index 1a7e422ab..000000000 --- a/fastf1/tests/test_project_structure.py +++ /dev/null @@ -1,14 +0,0 @@ -import pytest -import subprocess - -pytestmark = pytest.mark.prjdoc - - -def test_readme_renders(): - # verify that the readme file renders without errors for pypi too - ret = subprocess.call('python -m readme_renderer README.md', shell=True, - stdout=subprocess.DEVNULL, - stderr=subprocess.DEVNULL) - - if ret != 0: - raise Exception("README fails to render correctly!") diff --git a/fastf1/tests/test_telemetry.py b/fastf1/tests/test_telemetry.py deleted file mode 100644 index 15cf53dd5..000000000 --- a/fastf1/tests/test_telemetry.py +++ /dev/null @@ -1,387 +0,0 @@ -import pytest - -import pandas -import numpy - -import fastf1.core -from fastf1.testing.reference_values import \ - (CAR_DATA_DTYPES, POS_DATA_DTYPES, ensure_data_type) - - -def test_constructor(): - tel = fastf1.core.Telemetry({'example': (1, 2, 3, 4, 5, 6)}) - sliced = tel.iloc[:2] - assert isinstance(sliced, fastf1.core.Telemetry) - - -def test_base_class_view(): - tel = fastf1.core.Telemetry({'example': (1, 2, 3, 4, 5, 6)}) - bcv = tel.base_class_view - assert isinstance(bcv, pandas.DataFrame) - - -def test_metadata_propagation_slicing(): - class Example: - pass - e = Example() - - tel = fastf1.core.Telemetry({'example': (1, 2, 3, 4, 5, 6)}, session=e) - partial = tel.iloc[:2] - assert hasattr(partial, 'session') - assert isinstance(partial.session, Example) - - -def test_merging_with_metadata_propagation(): - class Example: - pass - e = Example() - - tel1 = fastf1.core.Telemetry({'example_1': (1, 2, 3, 4, 5, 6)}, session=e) - tel2 = fastf1.core.Telemetry({'example_2': (1, 2, 3, 4, 5, 6)}, session=e) - merged = tel1.merge(tel2, left_index=True, right_index=True) - assert hasattr(merged, 'session') - assert isinstance(merged.session, Example) - assert merged.session is e - assert all(col in merged.columns for col in ('example_1', 'example_2')) - - -def test_joining_with_metadata_propagation(): - class Example: - pass - - e = Example() - - tel1 = fastf1.core.Telemetry({'example_1': (1, 2, 3, 4, 5, 6)}, session=e) - tel2 = fastf1.core.Telemetry({'example_2': (1, 2, 3, 4, 5, 6)}, session=e) - joined = tel1.join(tel2) - assert hasattr(joined, 'session') - assert isinstance(joined.session, Example) - assert joined.session is e - assert all(col in joined.columns for col in ('example_1', 'example_2')) - - -@pytest.mark.f1telapi -def test_merge_channels_with_metadata_propagation(reference_laps_data): - session, laps = reference_laps_data - lap = laps.pick_fastest() - car_data = lap.get_car_data() - pos_data = lap.get_pos_data() - - for freq in ('original', 10): - merged = car_data.merge_channels(pos_data, frequency=freq) - for attr in fastf1.core.Telemetry._metadata: - assert getattr(merged, attr, None) is not None - assert merged.session is session - - -@pytest.mark.f1telapi -def test_resample_channels_with_metadata_propagation(reference_laps_data): - session, laps = reference_laps_data - lap = laps.pick_fastest() - car_data = lap.get_car_data() - - for freq in ('0.5S', '0.1S'): - resampled = car_data.resample_channels(rule=freq) - for attr in fastf1.core.Telemetry._metadata: - assert getattr(resampled, attr, None) is not None - assert resampled.session is session - - -@pytest.mark.f1telapi -def test_dtypes_from_api(reference_laps_data): - session, laps = reference_laps_data - for drv in session.car_data.keys(): - ensure_data_type(CAR_DATA_DTYPES, session.car_data[drv]) - - for drv in session.pos_data.keys(): - ensure_data_type(POS_DATA_DTYPES, session.pos_data[drv]) - - -@pytest.mark.f1telapi -def test_slice_by_time(reference_laps_data): - session, laps = reference_laps_data - drv = list(session.car_data.keys())[1] # some driver - test_data = session.car_data[drv] - t0 = test_data['SessionTime'].iloc[1000] - t1 = test_data['SessionTime'].iloc[2000] - - slice1 = test_data.slice_by_time(t0, t1) - assert slice1['SessionTime'].iloc[0] == t0 - assert slice1['SessionTime'].iloc[-1] == t1 - assert len(slice1) == 1001 - ensure_data_type(CAR_DATA_DTYPES, slice1) - - dt = pandas.Timedelta(100, 'ms') - slice2 = test_data.slice_by_time(t0-dt, t1+dt, interpolate_edges=True) - assert slice2['SessionTime'].iloc[0] == t0 - dt - assert slice2['SessionTime'].iloc[-1] == t1 + dt - assert len(slice2) == 1003 - ensure_data_type(CAR_DATA_DTYPES, slice2) - - -@pytest.mark.f1telapi -def test_slice_by_mask(reference_laps_data): - session, laps = reference_laps_data - drv = list(session.car_data.keys())[1] # some driver - test_data = session.car_data[drv] - mask = numpy.array([False, ] * len(test_data)) - mask[200:500] = True - - slice1 = test_data.slice_by_mask(mask) - assert len(slice1) == 300 - assert slice1['SessionTime'].iloc[0] == test_data['SessionTime'].iloc[200] - - slice2 = test_data.slice_by_mask(mask, pad=2, pad_side='both') - ref_mask = numpy.array([False, ] * len(test_data)) - ref_mask[198:502] = True - assert len(slice2) == 304 - assert slice2['SessionTime'].iloc[0] == test_data['SessionTime'].iloc[198] - - -@pytest.mark.f1telapi -def test_slice_by_lap(reference_laps_data): - session, laps = reference_laps_data - drv = list(session.car_data.keys())[1] # some driver - test_data = session.car_data[drv] - test_laps = laps.pick_drivers(drv) - - lap2 = test_laps[test_laps['LapNumber'] == 2].iloc[0] - lap3 = test_laps[test_laps['LapNumber'] == 3].iloc[0] - lap2_3 = test_laps[(test_laps['LapNumber'] == 2) | (test_laps['LapNumber'] == 3)] - - tel2 = test_data.slice_by_lap(lap2) - tel3 = test_data.slice_by_lap(lap3) - tel2_3 = test_data.slice_by_lap(lap2_3) - - assert len(tel2) > 0 - assert len(tel3) > 0 - assert len(tel2_3) > 0 - assert len(tel2_3) == len(tel2) + len(tel3) - - -@pytest.mark.f1telapi -def test_merging_original_freq(reference_laps_data): - session, laps = reference_laps_data - lap = laps.pick_fastest() - drv = lap['DriverNumber'] - test_car_data = session.car_data[drv].slice_by_lap(lap) - test_pos_data = session.pos_data[drv].slice_by_lap(lap) - merged = test_car_data.merge_channels(test_pos_data, frequency='original') - - ensure_data_type(CAR_DATA_DTYPES, merged) - ensure_data_type(POS_DATA_DTYPES, merged) - - # test that all channels still exist - channels = set(test_car_data.columns).union(set(test_pos_data.columns)) - for ch in channels: - assert ch in merged.columns - - # test that merged number of samples is within 1% of sum of samples of the individual objects - # some samples can overlap and therefore be combined during merging but should only happen for very few - assert round((len(test_car_data) + len(test_pos_data)) / len(merged), 2) == 1.0 - - # no values should be nan; everything should be interpolated - assert not pandas.isnull(merged.to_numpy()).any() - - # check correct timing - assert merged['Time'].iloc[0] == pandas.Timedelta(0) - assert merged['SessionTime'].iloc[0] != pandas.Timedelta(0) - - -@pytest.mark.f1telapi -def test_merging_10_hz(reference_laps_data): - session, laps = reference_laps_data - lap = laps.pick_fastest() - drv = lap['DriverNumber'] - test_car_data = session.car_data[drv].slice_by_lap(lap) - test_pos_data = session.pos_data[drv].slice_by_lap(lap) - merged = test_car_data.merge_channels(test_pos_data, frequency=10) - - ensure_data_type(CAR_DATA_DTYPES, merged) - ensure_data_type(POS_DATA_DTYPES, merged) - - # test that all channels still exist - channels = set(test_car_data.columns).union(set(test_pos_data.columns)) - for ch in channels: - assert ch in merged.columns - - # assert correct number of samples for duration at 10 Hz within +-1 sample - n_samples_target = round(test_car_data['Time'].iloc[-1].total_seconds() * 10, 0) - assert len(merged) in (n_samples_target-1, n_samples_target, n_samples_target+1) - - # no values should be nan; everything should be interpolated - assert not pandas.isnull(merged.to_numpy()).any() - - # check correct timing - assert merged['Time'].iloc[0] == pandas.Timedelta(0) - assert merged['SessionTime'].iloc[0] != pandas.Timedelta(0) - - -def test_drop_unknown_channels(caplog): - fastf1.core.Telemetry.register_new_channel("test_keep", "discrete") - data = {"Speed": [200, 202, 203], - "test_keep": [1, 2, 3], - "test_drop": [1, 2, 3]} - - tel = fastf1.core.Telemetry(data, drop_unknown_channels=False) - assert "Speed" in tel.columns - assert "test_keep" in tel.columns - assert "test_drop" in tel.columns - - tel = fastf1.core.Telemetry(data, drop_unknown_channels=True) - assert "Speed" in tel.columns - assert "test_keep" in tel.columns - assert "test_drop" not in tel.columns - assert "unknown telemetry channels have been dropped" in caplog.text - - fastf1.core.Telemetry._CHANNELS.pop("test_keep") # clean up - - -@pytest.mark.f1telapi -def test_resampling_down(reference_laps_data): - session, laps = reference_laps_data - lap = laps.pick_fastest() - drv = lap['DriverNumber'] - test_data = session.car_data[drv].slice_by_lap(lap) - - test_data = test_data.resample_channels(rule='0.5S') - - # assert correct number of samples for duration at 2 Hz within +-1 sample - n_samples_target = round(test_data['Time'].iloc[-1].total_seconds() * 2, 0) - assert len(test_data) in (n_samples_target - 1, n_samples_target, n_samples_target + 1) - - # no values should be nan; everything should be interpolated - assert not pandas.isnull(test_data.to_numpy()).any() - - # check correct timing - assert test_data['Time'].iloc[0] == pandas.Timedelta(0) - assert test_data['SessionTime'].iloc[0] != pandas.Timedelta(0) - - -@pytest.mark.f1telapi -def test_resampling_up(reference_laps_data): - session, laps = reference_laps_data - lap = laps.pick_fastest() - drv = lap['DriverNumber'] - test_data = session.car_data[drv].slice_by_lap(lap) - - test_data = test_data.resample_channels(rule='0.05S') - - # assert correct number of samples for duration at 20 Hz within +-1 sample - n_samples_target = round(test_data['Time'].iloc[-1].total_seconds() * 20, 0) - assert len(test_data) in (n_samples_target - 1, n_samples_target, n_samples_target + 1) - - # no values should be nan; everything should be interpolated - assert not pandas.isnull(test_data.to_numpy()).any() - - # check correct timing - assert test_data['Time'].iloc[0] == pandas.Timedelta(0) - assert test_data['SessionTime'].iloc[0] != pandas.Timedelta(0) - - -@pytest.mark.f1telapi -def test_add_driver_ahead(reference_laps_data): - session, laps = reference_laps_data - test_data = laps.pick_fastest().get_car_data() - test_data = test_data.add_driver_ahead() - # only first value may be NaN - assert test_data['DistanceToDriverAhead'].isnull().sum() <= 1 - - -@pytest.mark.f1telapi -def test_add_driver_ahead_resampled(reference_laps_data): - session, laps = reference_laps_data - test_data = laps.pick_fastest().get_car_data()\ - .resample_channels(rule='0.5S') - test_data = test_data.add_driver_ahead() - # only first value may be NaN - assert test_data['DistanceToDriverAhead'].isnull().sum() <= 1 - - -@pytest.mark.f1telapi -def test_add_track_status(reference_laps_data): - session, laps = reference_laps_data - - test_data = laps.pick_drivers('VER').get_telemetry() - test_data = test_data.add_track_status() - - # Get statuses - statuses = session.track_status['Status'] - - # First and last track statuses must be equal to first and last - # statuses - assert test_data['TrackStatus'].iloc[0] == statuses.iloc[0] - assert test_data['TrackStatus'].iloc[-1] == statuses.iloc[-1] - - -def create_sample_car_data(): - # create sample telemetry for testing the .add_* methods - # which work with distance, only time and speed really needs - # to make sense for that - t0 = pandas.Timestamp(year=2020, month=5, day=7, hour=14) - t1 = pandas.Timestamp(year=2020, month=5, day=7, hour=14, minute=1) - dates = pandas.date_range(t0, t1, freq='261 ms') - session_times = dates - t0 - times = session_times - t = numpy.linspace(0, len(dates)*0.261, len(dates)) - speed = 80 + numpy.sqrt(320*t) - - tel = fastf1.core.Telemetry({ - 'Time': times, 'SessionTime': session_times, 'Date': dates, - 'Source': 'car', 'Speed': speed, 'RPM': 9000, 'nGear': 7, - 'Throttle': 100, 'Brake': 0, 'DRS': 0 - }) - return tel - - -def test_add_distance(): - car_data = create_sample_car_data() - car_data = car_data.add_distance() - - # check that the results make sense - assert 'Distance' in car_data.columns - assert car_data['Distance'].max() == car_data['Distance'].iloc[-1] - # distance is analytically verified (+-10m due to numeric integration) - assert car_data['Distance'].max().round(0) == 2867 - - # set all distance values to zero and check that they stay zero - car_data['Distance'] = 0 - car_data = car_data.add_distance(drop_existing=False) - assert pandas.unique(car_data['Distance']) == [0, ] - - # now without drop_existing=False - car_data = car_data.add_distance() - assert car_data['Distance'].max().round(0) == 2867 - - -def test_add_relative_distance(): - # test with no existing distance column first - car_data = create_sample_car_data() - car_data = car_data.add_relative_distance() - - # check that the results make sense - assert 'RelativeDistance' in car_data.columns - assert car_data['RelativeDistance'].max() == \ - car_data['RelativeDistance'].iloc[-1] - assert car_data['RelativeDistance'].max() == 1.0 - assert car_data['RelativeDistance'].min() == 0.0 - - # set all distance values to zero and check that they stay zero - car_data['RelativeDistance'] = 0 - car_data = car_data.add_relative_distance(drop_existing=False) - assert pandas.unique(car_data['RelativeDistance']) == [0, ] - - # now with drop existing = True - car_data = car_data.add_relative_distance() - assert car_data['RelativeDistance'].max().round(0) == 1.0 - - # test with already existing distance column - car_data = create_sample_car_data() - car_data = car_data.add_distance().add_relative_distance() - - # check that the results make sense - assert 'RelativeDistance' in car_data.columns - assert car_data['RelativeDistance'].max() == \ - car_data['RelativeDistance'].iloc[-1] - assert car_data['RelativeDistance'].max() == 1.0 - assert car_data['RelativeDistance'].min() == 0.0 diff --git a/fastf1/tests/test_utils.py b/fastf1/tests/test_utils.py deleted file mode 100644 index 7debcef6a..000000000 --- a/fastf1/tests/test_utils.py +++ /dev/null @@ -1,47 +0,0 @@ -import datetime - -from fastf1.utils import to_datetime, to_timedelta - - -def test_to_timedelta(): - cases = [ - ('13:24:46.320215', - datetime.timedelta(hours=13, minutes=24, - seconds=46, microseconds=320215)), - ('13:24:46.32', - datetime.timedelta(hours=13, minutes=24, - seconds=46, microseconds=320000)), - ('13:24:46.', - datetime.timedelta(hours=13, minutes=24, - seconds=46, microseconds=0)), - ('13:24:46', datetime.timedelta(hours=13, minutes=24, seconds=46)), - ('24:46', datetime.timedelta(minutes=24, seconds=46)), - ('4:46', datetime.timedelta(minutes=4, seconds=46)), - ('46', datetime.timedelta(seconds=46)), - ('4:46.5264', datetime.timedelta(minutes=4, seconds=46, - microseconds=526400)), - - ] - for ts, expected in cases: - assert to_timedelta(ts) == expected - - -def test_to_datetime(): - cases = [ - ('2020-12-13T13:27:15.320653Z', - datetime.datetime(2020, 12, 13, 13, 27, 15, 320653)), - ('2020-12-13T13:27:15.320000Z', - datetime.datetime(2020, 12, 13, 13, 27, 15, 320000)), - ('2020-12-13T13:27:15.320000', - datetime.datetime(2020, 12, 13, 13, 27, 15, 320000)), - ('2020-12-13T13:27:15.32Z', - datetime.datetime(2020, 12, 13, 13, 27, 15, 320000)), - ('2020-12-13T13:27:15', - datetime.datetime(2020, 12, 13, 13, 27, 15, 0)), - ('2020-12-13T13:27:15.', - datetime.datetime(2020, 12, 13, 13, 27, 15, 0)), - (datetime.datetime(2020, 12, 13, 13, 27, 15, 0), - datetime.datetime(2020, 12, 13, 13, 27, 15, 0)) - ] - for ts, expected in cases: - assert to_datetime(ts) == expected