Skip to content

Commit

Permalink
Merge pull request #282 from mahkoh/jorth/1.6.0-nvidia-vblank
Browse files Browse the repository at this point in the history
metal: emulate vblank events on the nvidia driver (backport)
  • Loading branch information
mahkoh authored Oct 4, 2024
2 parents f004afd + 9e66e45 commit 85da8ff
Show file tree
Hide file tree
Showing 8 changed files with 73 additions and 16 deletions.
2 changes: 2 additions & 0 deletions release-notes.md
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
# Unreleased

- Emulate vblank events on the nvidia driver.

# 1.6.0 (2024-09-25)

- Various bugfixes.
Expand Down
23 changes: 19 additions & 4 deletions src/backends/metal/video.rs
Original file line number Diff line number Diff line change
Expand Up @@ -740,8 +740,19 @@ impl MetalConnector {

fn queue_sequence(&self) {
if let Some(crtc) = self.crtc.get() {
if crtc.needs_vblank_emulation.get() {
return;
}
if let Err(e) = self.master.queue_sequence(crtc.id) {
log::error!("Could not queue a CRTC sequence: {}", ErrorFmt(e));
log::error!("Could not queue a CRTC sequence: {}", ErrorFmt(&e));
if let DrmError::QueueSequence(OsError(c::EOPNOTSUPP)) = e {
if let Some(node) = self.state.root.outputs.get(&self.connector_id) {
log::warn!("{}: Switching to vblank emulation", self.kernel_id());
crtc.needs_vblank_emulation.set(true);
node.global.connector.needs_vblank_emulation.set(true);
node.vblank();
}
}
} else {
crtc.have_queued_sequence.set(true);
}
Expand Down Expand Up @@ -944,6 +955,7 @@ pub struct MetalCrtc {

pub mode_blob: CloneCell<Option<Rc<PropBlob>>>,
pub have_queued_sequence: Cell<bool>,
pub needs_vblank_emulation: Cell<bool>,
}

impl Debug for MetalCrtc {
Expand Down Expand Up @@ -1291,6 +1303,7 @@ fn create_crtc(
vrr_enabled: props.get("VRR_ENABLED")?.map(|v| v == 1),
mode_blob: Default::default(),
have_queued_sequence: Cell::new(false),
needs_vblank_emulation: Cell::new(false),
})
}

Expand Down Expand Up @@ -1955,6 +1968,10 @@ impl MetalBackend {
connector.queue_sequence();
}
self.update_u32_sequence(&connector, sequence);
let time_ns = tv_sec as u64 * 1_000_000_000 + tv_usec as u64 * 1000;
if crtc.needs_vblank_emulation.get() {
self.handle_drm_sequence_event(dev, crtc_id, time_ns as _, connector.sequence.get());
}
connector.can_present.set(true);
if let Some(fb) = connector.next_framebuffer.take() {
*connector.active_framebuffer.borrow_mut() = Some(fb);
Expand All @@ -1976,9 +1993,7 @@ impl MetalBackend {
{
connector.schedule_present();
}
connector
.next_flip_nsec
.set(tv_sec as u64 * 1_000_000_000 + tv_usec as u64 * 1000 + dd.refresh as u64);
connector.next_flip_nsec.set(time_ns + dd.refresh as u64);
{
let mut flags = KIND_HW_COMPLETION;
if connector.presentation_is_sync.get() {
Expand Down
1 change: 1 addition & 0 deletions src/compositor.rs
Original file line number Diff line number Diff line change
Expand Up @@ -507,6 +507,7 @@ fn create_dummy_output(state: &Rc<State>) {
drm_dev: None,
async_event: Default::default(),
damaged: Cell::new(false),
needs_vblank_emulation: Cell::new(false),
});
let schedule = Rc::new(OutputSchedule::new(
&state.ring,
Expand Down
43 changes: 31 additions & 12 deletions src/cpu_worker.rs
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ use {
ptr_ext::MutPtrExt, queue::AsyncQueue, stack::Stack,
},
},
parking_lot::Mutex,
parking_lot::{Condvar, Mutex},
std::{
any::Any,
cell::{Cell, RefCell},
Expand Down Expand Up @@ -113,18 +113,25 @@ enum Job {

unsafe impl Send for Job {}

#[derive(Default)]
struct CompletedJobsExchange {
queue: VecDeque<CpuJobId>,
condvar: Option<Arc<Condvar>>,
}

struct CpuWorkerData {
next: CpuJobIds,
jobs_to_enqueue: AsyncQueue<Job>,
new_jobs: Arc<Mutex<VecDeque<Job>>>,
have_new_jobs: Rc<OwnedFd>,
completed_jobs_remote: Arc<Mutex<VecDeque<CpuJobId>>>,
completed_jobs_remote: Arc<Mutex<CompletedJobsExchange>>,
completed_jobs_local: RefCell<VecDeque<CpuJobId>>,
have_completed_jobs: Rc<OwnedFd>,
pending_jobs: CopyHashMap<CpuJobId, Rc<PendingJobData>>,
ring: Rc<IoUring>,
_stop: OwnedFd,
pending_job_data_cache: Stack<Rc<PendingJobData>>,
sync_wake_condvar: Arc<Condvar>,
}

linear_ids!(CpuJobIds, CpuJobId, u64);
Expand Down Expand Up @@ -172,12 +179,16 @@ impl Drop for PendingJob {
self.job_data.state.set(PendingJobState::Abandoned);
data.jobs_to_enqueue.push(Job::Cancel { id });
data.do_equeue_jobs();
let mut buf = 0u64;
while data.pending_jobs.contains(&id) {
if let Err(e) = uapi::read(data.have_completed_jobs.raw(), &mut buf) {
panic!("Could not wait for job completions: {}", ErrorFmt(e));
}
loop {
data.dispatch_completions();
if !data.pending_jobs.contains(&id) {
break;
}
let mut remote = data.completed_jobs_remote.lock();
while remote.queue.is_empty() {
remote.condvar = Some(data.sync_wake_condvar.clone());
data.sync_wake_condvar.wait(&mut remote);
}
}
}
PendingJobState::Abandoned => {}
Expand All @@ -204,7 +215,7 @@ impl CpuWorkerData {

fn dispatch_completions(&self) {
let completions = &mut *self.completed_jobs_local.borrow_mut();
mem::swap(completions, &mut *self.completed_jobs_remote.lock());
mem::swap(completions, &mut self.completed_jobs_remote.lock().queue);
while let Some(id) = completions.pop_front() {
let job_data = self.pending_jobs.remove(&id).unwrap();
let job = job_data.job.take().unwrap();
Expand Down Expand Up @@ -242,7 +253,7 @@ impl CpuWorkerData {
impl CpuWorker {
pub fn new(ring: &Rc<IoUring>, eng: &Rc<AsyncEngine>) -> Result<Self, CpuWorkerError> {
let new_jobs: Arc<Mutex<VecDeque<Job>>> = Default::default();
let completed_jobs: Arc<Mutex<VecDeque<CpuJobId>>> = Default::default();
let completed_jobs: Arc<Mutex<CompletedJobsExchange>> = Default::default();
let (stop_read, stop_write) =
uapi::pipe2(c::O_CLOEXEC).map_err(|e| CpuWorkerError::Pipe(e.into()))?;
let have_new_jobs =
Expand Down Expand Up @@ -281,6 +292,7 @@ impl CpuWorker {
ring: ring.clone(),
_stop: stop_read,
pending_job_data_cache: Default::default(),
sync_wake_condvar: Arc::new(Condvar::new()),
});
Ok(Self {
_completions_listener: eng.spawn(
Expand Down Expand Up @@ -313,7 +325,7 @@ impl CpuWorker {

fn work(
new_jobs: Arc<Mutex<VecDeque<Job>>>,
completed_jobs: Arc<Mutex<VecDeque<CpuJobId>>>,
completed_jobs: Arc<Mutex<CompletedJobsExchange>>,
stop: OwnedFd,
have_new_jobs: OwnedFd,
have_completed_jobs: OwnedFd,
Expand Down Expand Up @@ -343,7 +355,7 @@ fn work(
struct Worker {
eng: Rc<AsyncEngine>,
ring: Rc<IoUring>,
completed_jobs: Arc<Mutex<VecDeque<CpuJobId>>>,
completed_jobs: Arc<Mutex<CompletedJobsExchange>>,
have_completed_jobs: OwnedFd,
async_jobs: CopyHashMap<CpuJobId, AsyncJob>,
stopped: Cell<bool>,
Expand Down Expand Up @@ -428,7 +440,14 @@ impl Worker {
}

fn send_completion(&self, id: CpuJobId) {
self.completed_jobs.lock().push_back(id);
let cv = {
let mut exchange = self.completed_jobs.lock();
exchange.queue.push_back(id);
exchange.condvar.take()
};
if let Some(cv) = cv {
cv.notify_all();
}
if let Err(e) = uapi::eventfd_write(self.have_completed_jobs.raw(), 1) {
panic!("Could not signal job completion: {}", ErrorFmt(e));
}
Expand Down
1 change: 1 addition & 0 deletions src/state.rs
Original file line number Diff line number Diff line change
Expand Up @@ -301,6 +301,7 @@ pub struct ConnectorData {
pub drm_dev: Option<Rc<DrmDevData>>,
pub async_event: Rc<AsyncEvent>,
pub damaged: Cell<bool>,
pub needs_vblank_emulation: Cell<bool>,
}

pub struct OutputData {
Expand Down
1 change: 1 addition & 0 deletions src/tasks/connector.rs
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@ pub fn handle(state: &Rc<State>, connector: &Rc<dyn Connector>) {
drm_dev: drm_dev.clone(),
async_event: Rc::new(AsyncEvent::default()),
damaged: Cell::new(false),
needs_vblank_emulation: Cell::new(false),
});
if let Some(dev) = drm_dev {
dev.connectors.set(id, data.clone());
Expand Down
10 changes: 10 additions & 0 deletions src/tree/output.rs
Original file line number Diff line number Diff line change
Expand Up @@ -136,6 +136,16 @@ impl OutputNode {
for listener in self.vblank_event.iter() {
listener.after_vblank();
}
if self.global.connector.needs_vblank_emulation.get() {
if self.vblank_event.has_listeners() {
self.global.connector.damage();
} else {
let connector = self.global.connector.clone();
self.vblank_event.on_attach(Box::new(move || {
connector.damage();
}));
}
}
}

pub fn presented(&self, tv_sec: u64, tv_nsec: u32, refresh: u32, seq: u64, flags: u32) {
Expand Down
8 changes: 8 additions & 0 deletions src/utils/event_listener.rs
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,14 @@ impl<T: ?Sized> EventSource<T> {
iter: self.listeners.iter(),
}
}

pub fn has_listeners(&self) -> bool {
self.listeners.is_not_empty()
}

pub fn on_attach(&self, f: Box<dyn FnOnce()>) {
self.on_attach.set(Some(f));
}
}

pub struct EventSourceIter<T: ?Sized> {
Expand Down

0 comments on commit 85da8ff

Please sign in to comment.