diff --git a/repro.py b/repro.py new file mode 100644 index 00000000000000..c0dad6b2cb3bbe --- /dev/null +++ b/repro.py @@ -0,0 +1,84 @@ +#!/usr/bin/env python3 +import os +import time +import subprocess + +import cereal.messaging as messaging +from msgq.visionipc import VisionIpcClient, VisionStreamType, VisionBuf +from openpilot.common.basedir import BASEDIR +from openpilot.common.params import Params + +import cv2 +import numpy as np + +run_cnt = 0 +frame_cnt = 0 +def is_tearing(img): + global run_cnt + global frame_cnt + frame_cnt += 1 + image = np.array(img.data[:img.uv_offset], dtype=np.uint8).reshape((-1, img.stride))[:img.height, :img.width] + + sobel_x = cv2.Sobel(image, cv2.CV_64F, 1, 0, ksize=3) # Horizontal edges + sobel_y = cv2.Sobel(image, cv2.CV_64F, 0, 1, ksize=3) # Vertical edges + + edge_magnitude = np.sqrt(sobel_x**2 + sobel_y**2) + edge_magnitude = cv2.normalize(edge_magnitude, None, 0, 255, cv2.NORM_MINMAX) + + _, binary_mask = cv2.threshold(edge_magnitude.astype(np.uint8), 30, 255, cv2.THRESH_BINARY) + row_sums = np.sum(binary_mask, axis=1) + + tearing_regions = row_sums > np.mean(row_sums) + 1.5 * np.std(row_sums) + + tearing = False + output_image = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR) + for i, is_tearing in enumerate(tearing_regions): + if is_tearing and 630 < i < 675: + cv2.line(output_image, (0, i), (output_image.shape[1], i), (0, 0, 255), 1) + tearing = True + + #cv2.imwrite(f"/data/tmp/frame_{run_cnt:03}_{frame_cnt:03}_{tearing}.png", image) + #cv2.imwrite(f"/data/tmp/frame_{run_cnt:03}_{frame_cnt:03}_debug.png", output_image) + return tearing + +if __name__ == "__main__": + if False: + v = VisionIpcClient("camerad", VisionStreamType.VISION_STREAM_ROAD, True) + assert v.connect(False) + while True: + img = v.recv() + assert img is not None + print("is_tearing", is_tearing(img)) + exit() + + tearing_run_cnt = 0 + manager_path = BASEDIR + "/system/manager/manager.py" + params = Params() + for _ in range(30): + run_cnt += 1 + print(f"====== {run_cnt} ======") + + v = VisionIpcClient("camerad", VisionStreamType.VISION_STREAM_ROAD, True) + try: + proc = subprocess.Popen(["python", manager_path], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) + while not v.connect(False): + time.sleep(0.1) + time.sleep(2) + + tearing_cnt = 0 + for _ in range(30): + while (img := v.recv()) is None: + time.sleep(0.1) + tearing_cnt += is_tearing(img) + tore = tearing_cnt >= 1 + tearing_run_cnt += tore + print(" - tore ", tore, tearing_cnt) + print(" - route", params.get("CurrentRoute", encoding="utf8")) + print(f" - {tearing_run_cnt:03} / {run_cnt:03} ({tearing_run_cnt/run_cnt:.2%})") + + + finally: + proc.terminate() + if proc.wait(60) is None: + proc.kill() + #break diff --git a/system/camerad/cameras/camera_qcom2.cc b/system/camerad/cameras/camera_qcom2.cc index ef5a03085b55c3..25b9422e8fb087 100644 --- a/system/camerad/cameras/camera_qcom2.cc +++ b/system/camerad/cameras/camera_qcom2.cc @@ -55,7 +55,7 @@ class CameraState { float fl_pix = 0; - CameraState(SpectraMaster *master, const CameraConfig &config) : camera(master, config, true /*config.stream_type == VISION_STREAM_ROAD*/) {}; + CameraState(SpectraMaster *master, const CameraConfig &config) : camera(master, config, config.stream_type == VISION_STREAM_ROAD) {}; ~CameraState(); void init(VisionIpcServer *v, cl_device_id device_id, cl_context ctx); void update_exposure_score(float desired_ev, int exp_t, int exp_g_idx, float exp_gain); diff --git a/system/camerad/cameras/spectra.cc b/system/camerad/cameras/spectra.cc index b262ddba9a57ab..d8fef9b65a6b4d 100644 --- a/system/camerad/cameras/spectra.cc +++ b/system/camerad/cameras/spectra.cc @@ -666,11 +666,14 @@ void SpectraCamera::config_ife(int idx, int request_id, bool init) { assert(ret == 0); } +#include +#include void SpectraCamera::enqueue_buffer(int i, bool dp) { int ret; uint64_t request_id = request_ids[i]; if (buf_handle_raw[i] && sync_objs[i]) { + double st = millis_since_boot(); // wait struct cam_sync_wait sync_wait = {0}; sync_wait.sync_obj = sync_objs[i]; @@ -680,10 +683,34 @@ void SpectraCamera::enqueue_buffer(int i, bool dp) { LOGE("failed to wait for sync: %d %d", ret, sync_wait.sync_obj); // TODO: handle frame drop cleanly } + double et = millis_since_boot(); buf.frame_metadata[i].timestamp_end_of_isp = (uint64_t)nanos_since_boot(); buf.frame_metadata[i].timestamp_eof = buf.frame_metadata[i].timestamp_sof + sensor->readout_time_ns; if (dp) buf.queue(i); + if (buf.stream_type == VISION_STREAM_ROAD) { + double dt = et-st; + printf("%.2f ms\n", dt); + static std::deque dq; + dq.push_back(dt); + if (dq.size() > 20*3) { + dq.pop_front(); + + int cnt = 0; + for (int j = 0; j < dq.size(); j++) { + if (dq.at(j) < 1.0f) cnt++; + } + + if (cnt > 3) { + printf("BAD!!!!\n"); + std::ofstream("/tmp/bad", std::ios::app).close(); + } else { + std::ofstream("/tmp/good", std::ios::app).close(); + } + } + + } + // destroy old output fence for (auto so : {sync_objs, sync_objs_bps_out}) { if (so[i] == 0) continue; diff --git a/system/hardware/tici/tests/test_power_draw.py b/system/hardware/tici/tests/test_power_draw.py index 0ef34549b58472..8598b2faa20bbb 100644 --- a/system/hardware/tici/tests/test_power_draw.py +++ b/system/hardware/tici/tests/test_power_draw.py @@ -31,7 +31,7 @@ def name(self): PROCS = [ - Proc(['camerad'], 2.1, msgs=['roadCameraState', 'wideRoadCameraState', 'driverCameraState']), + Proc(['camerad'], 1.75, msgs=['roadCameraState', 'wideRoadCameraState', 'driverCameraState']), Proc(['modeld'], 1.12, atol=0.2, msgs=['modelV2']), Proc(['dmonitoringmodeld'], 0.5, msgs=['driverStateV2']), Proc(['encoderd'], 0.23, msgs=[]),