From 689ff7faf9673a38641711a97f38c7c8a558eae5 Mon Sep 17 00:00:00 2001 From: "Han, Longfei" Date: Tue, 21 Nov 2023 15:33:14 +0800 Subject: [PATCH] [Script] align open code btw branches align open code btw branches --- .../linux/common/os/xe/include/dma-buf.h | 132 +- .../linux/common/os/xe/include/xe_drm.h | 1538 ++++++++--------- 2 files changed, 835 insertions(+), 835 deletions(-) diff --git a/media_softlet/linux/common/os/xe/include/dma-buf.h b/media_softlet/linux/common/os/xe/include/dma-buf.h index fbc97afe7ea..5a6fda66d9a 100644 --- a/media_softlet/linux/common/os/xe/include/dma-buf.h +++ b/media_softlet/linux/common/os/xe/include/dma-buf.h @@ -52,27 +52,27 @@ * other synchronization primitive outside the scope of the DMA buffer API. */ struct dma_buf_sync { - /** - * @flags: Set of access flags - * - * DMA_BUF_SYNC_START: - * Indicates the start of a map access session. - * - * DMA_BUF_SYNC_END: - * Indicates the end of a map access session. - * - * DMA_BUF_SYNC_READ: - * Indicates that the mapped DMA buffer will be read by the - * client via the CPU map. - * - * DMA_BUF_SYNC_WRITE: - * Indicates that the mapped DMA buffer will be written by the - * client via the CPU map. - * - * DMA_BUF_SYNC_RW: - * An alias for DMA_BUF_SYNC_READ | DMA_BUF_SYNC_WRITE. - */ - __u64 flags; + /** + * @flags: Set of access flags + * + * DMA_BUF_SYNC_START: + * Indicates the start of a map access session. + * + * DMA_BUF_SYNC_END: + * Indicates the end of a map access session. + * + * DMA_BUF_SYNC_READ: + * Indicates that the mapped DMA buffer will be read by the + * client via the CPU map. + * + * DMA_BUF_SYNC_WRITE: + * Indicates that the mapped DMA buffer will be written by the + * client via the CPU map. + * + * DMA_BUF_SYNC_RW: + * An alias for DMA_BUF_SYNC_READ | DMA_BUF_SYNC_WRITE. + */ + __u64 flags; }; #define DMA_BUF_SYNC_READ (1 << 0) @@ -81,9 +81,9 @@ struct dma_buf_sync { #define DMA_BUF_SYNC_START (0 << 2) #define DMA_BUF_SYNC_END (1 << 2) #define DMA_BUF_SYNC_VALID_FLAGS_MASK \ - (DMA_BUF_SYNC_RW | DMA_BUF_SYNC_END) + (DMA_BUF_SYNC_RW | DMA_BUF_SYNC_END) -#define DMA_BUF_NAME_LEN 32 +#define DMA_BUF_NAME_LEN 32 /** * struct dma_buf_export_sync_file - Get a sync_file from a dma-buf @@ -116,25 +116,25 @@ struct dma_buf_sync { * submits work between steps 1 and 3 above. */ struct dma_buf_export_sync_file { - /** - * @flags: Read/write flags - * - * Must be DMA_BUF_SYNC_READ, DMA_BUF_SYNC_WRITE, or both. - * - * If DMA_BUF_SYNC_READ is set and DMA_BUF_SYNC_WRITE is not set, - * the returned sync file waits on any writers of the dma-buf to - * complete. Waiting on the returned sync file is equivalent to - * poll() with POLLIN. - * - * If DMA_BUF_SYNC_WRITE is set, the returned sync file waits on - * any users of the dma-buf (read or write) to complete. Waiting - * on the returned sync file is equivalent to poll() with POLLOUT. - * If both DMA_BUF_SYNC_WRITE and DMA_BUF_SYNC_READ are set, this - * is equivalent to just DMA_BUF_SYNC_WRITE. - */ - __u32 flags; - /** @fd: Returned sync file descriptor */ - __s32 fd; + /** + * @flags: Read/write flags + * + * Must be DMA_BUF_SYNC_READ, DMA_BUF_SYNC_WRITE, or both. + * + * If DMA_BUF_SYNC_READ is set and DMA_BUF_SYNC_WRITE is not set, + * the returned sync file waits on any writers of the dma-buf to + * complete. Waiting on the returned sync file is equivalent to + * poll() with POLLIN. + * + * If DMA_BUF_SYNC_WRITE is set, the returned sync file waits on + * any users of the dma-buf (read or write) to complete. Waiting + * on the returned sync file is equivalent to poll() with POLLOUT. + * If both DMA_BUF_SYNC_WRITE and DMA_BUF_SYNC_READ are set, this + * is equivalent to just DMA_BUF_SYNC_WRITE. + */ + __u32 flags; + /** @fd: Returned sync file descriptor */ + __s32 fd; }; /** @@ -148,35 +148,35 @@ struct dma_buf_export_sync_file { * drivers/video. */ struct dma_buf_import_sync_file { - /** - * @flags: Read/write flags - * - * Must be DMA_BUF_SYNC_READ, DMA_BUF_SYNC_WRITE, or both. - * - * If DMA_BUF_SYNC_READ is set and DMA_BUF_SYNC_WRITE is not set, - * this inserts the sync_file as a read-only fence. Any subsequent - * implicitly synchronized writes to this dma-buf will wait on this - * fence but reads will not. - * - * If DMA_BUF_SYNC_WRITE is set, this inserts the sync_file as a - * write fence. All subsequent implicitly synchronized access to - * this dma-buf will wait on this fence. - */ - __u32 flags; - /** @fd: Sync file descriptor */ - __s32 fd; + /** + * @flags: Read/write flags + * + * Must be DMA_BUF_SYNC_READ, DMA_BUF_SYNC_WRITE, or both. + * + * If DMA_BUF_SYNC_READ is set and DMA_BUF_SYNC_WRITE is not set, + * this inserts the sync_file as a read-only fence. Any subsequent + * implicitly synchronized writes to this dma-buf will wait on this + * fence but reads will not. + * + * If DMA_BUF_SYNC_WRITE is set, this inserts the sync_file as a + * write fence. All subsequent implicitly synchronized access to + * this dma-buf will wait on this fence. + */ + __u32 flags; + /** @fd: Sync file descriptor */ + __s32 fd; }; -#define DMA_BUF_BASE 'b' -#define DMA_BUF_IOCTL_SYNC _IOW(DMA_BUF_BASE, 0, struct dma_buf_sync) +#define DMA_BUF_BASE 'b' +#define DMA_BUF_IOCTL_SYNC _IOW(DMA_BUF_BASE, 0, struct dma_buf_sync) /* 32/64bitness of this uapi was botched in android, there's no difference * between them in actual uapi, they're just different numbers. */ -#define DMA_BUF_SET_NAME _IOW(DMA_BUF_BASE, 1, const char *) -#define DMA_BUF_SET_NAME_A _IOW(DMA_BUF_BASE, 1, __u32) -#define DMA_BUF_SET_NAME_B _IOW(DMA_BUF_BASE, 1, __u64) -#define DMA_BUF_IOCTL_EXPORT_SYNC_FILE _IOWR(DMA_BUF_BASE, 2, struct dma_buf_export_sync_file) -#define DMA_BUF_IOCTL_IMPORT_SYNC_FILE _IOW(DMA_BUF_BASE, 3, struct dma_buf_import_sync_file) +#define DMA_BUF_SET_NAME _IOW(DMA_BUF_BASE, 1, const char *) +#define DMA_BUF_SET_NAME_A _IOW(DMA_BUF_BASE, 1, __u32) +#define DMA_BUF_SET_NAME_B _IOW(DMA_BUF_BASE, 1, __u64) +#define DMA_BUF_IOCTL_EXPORT_SYNC_FILE _IOWR(DMA_BUF_BASE, 2, struct dma_buf_export_sync_file) +#define DMA_BUF_IOCTL_IMPORT_SYNC_FILE _IOW(DMA_BUF_BASE, 3, struct dma_buf_import_sync_file) #endif diff --git a/media_softlet/linux/common/os/xe/include/xe_drm.h b/media_softlet/linux/common/os/xe/include/xe_drm.h index 8f63bfd6a1f..cda0a762331 100644 --- a/media_softlet/linux/common/os/xe/include/xe_drm.h +++ b/media_softlet/linux/common/os/xe/include/xe_drm.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: MIT */ /* - * Copyright (c) 2023 Intel Corporation + * Copyright © 2023 Intel Corporation */ #ifndef _UAPI_XE_DRM_H_ @@ -45,18 +45,18 @@ extern "C" { * * .. code-block:: C * - * struct xe_user_extension ext3 { - * .next_extension = 0, // end - * .name = ..., - * }; - * struct xe_user_extension ext2 { - * .next_extension = (uintptr_t)&ext3, - * .name = ..., - * }; - * struct xe_user_extension ext1 { - * .next_extension = (uintptr_t)&ext2, - * .name = ..., - * }; + * struct xe_user_extension ext3 { + * .next_extension = 0, // end + * .name = ..., + * }; + * struct xe_user_extension ext2 { + * .next_extension = (uintptr_t)&ext3, + * .name = ..., + * }; + * struct xe_user_extension ext1 { + * .next_extension = (uintptr_t)&ext2, + * .name = ..., + * }; * * Typically the struct xe_user_extension would be embedded in some uAPI * struct, and in this case we would feed it the head of the chain(i.e ext1), @@ -64,30 +64,30 @@ extern "C" { * */ struct xe_user_extension { - /** - * @next_extension: - * - * Pointer to the next struct xe_user_extension, or zero if the end. - */ - __u64 next_extension; - - /** - * @name: Name of the extension. - * - * Note that the name here is just some integer. - * - * Also note that the name space for this is not global for the whole - * driver, but rather its scope/meaning is limited to the specific piece - * of uAPI which has embedded the struct xe_user_extension. - */ - __u32 name; - - /** - * @pad: MBZ - * - * All undefined bits must be zero. - */ - __u32 pad; + /** + * @next_extension: + * + * Pointer to the next struct xe_user_extension, or zero if the end. + */ + __u64 next_extension; + + /** + * @name: Name of the extension. + * + * Note that the name here is just some integer. + * + * Also note that the name space for this is not global for the whole + * driver, but rather its scope/meaning is limited to the specific piece + * of uAPI which has embedded the struct xe_user_extension. + */ + __u32 name; + + /** + * @pad: MBZ + * + * All undefined bits must be zero. + */ + __u32 pad; }; /* @@ -97,67 +97,67 @@ struct xe_user_extension { * [0x40, 0xa0) (a0 is excluded). The numbers below are defined as offset * against DRM_COMMAND_BASE and should be between [0x0, 0x60). */ -#define DRM_XE_DEVICE_QUERY 0x00 -#define DRM_XE_GEM_CREATE 0x01 -#define DRM_XE_GEM_MMAP_OFFSET 0x02 -#define DRM_XE_VM_CREATE 0x03 -#define DRM_XE_VM_DESTROY 0x04 -#define DRM_XE_VM_BIND 0x05 -#define DRM_XE_EXEC_QUEUE_CREATE 0x06 -#define DRM_XE_EXEC_QUEUE_DESTROY 0x07 -#define DRM_XE_EXEC 0x08 -#define DRM_XE_EXEC_QUEUE_SET_PROPERTY 0x09 -#define DRM_XE_WAIT_USER_FENCE 0x0a -#define DRM_XE_VM_MADVISE 0x0b -#define DRM_XE_EXEC_QUEUE_GET_PROPERTY 0x0c +#define DRM_XE_DEVICE_QUERY 0x00 +#define DRM_XE_GEM_CREATE 0x01 +#define DRM_XE_GEM_MMAP_OFFSET 0x02 +#define DRM_XE_VM_CREATE 0x03 +#define DRM_XE_VM_DESTROY 0x04 +#define DRM_XE_VM_BIND 0x05 +#define DRM_XE_EXEC_QUEUE_CREATE 0x06 +#define DRM_XE_EXEC_QUEUE_DESTROY 0x07 +#define DRM_XE_EXEC 0x08 +#define DRM_XE_EXEC_QUEUE_SET_PROPERTY 0x09 +#define DRM_XE_WAIT_USER_FENCE 0x0a +#define DRM_XE_VM_MADVISE 0x0b +#define DRM_XE_EXEC_QUEUE_GET_PROPERTY 0x0c /* Must be kept compact -- no holes */ -#define DRM_IOCTL_XE_DEVICE_QUERY DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_DEVICE_QUERY, struct drm_xe_device_query) -#define DRM_IOCTL_XE_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_GEM_CREATE, struct drm_xe_gem_create) -#define DRM_IOCTL_XE_GEM_MMAP_OFFSET DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_GEM_MMAP_OFFSET, struct drm_xe_gem_mmap_offset) -#define DRM_IOCTL_XE_VM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_VM_CREATE, struct drm_xe_vm_create) -#define DRM_IOCTL_XE_VM_DESTROY DRM_IOW(DRM_COMMAND_BASE + DRM_XE_VM_DESTROY, struct drm_xe_vm_destroy) -#define DRM_IOCTL_XE_VM_BIND DRM_IOW(DRM_COMMAND_BASE + DRM_XE_VM_BIND, struct drm_xe_vm_bind) -#define DRM_IOCTL_XE_EXEC_QUEUE_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_EXEC_QUEUE_CREATE, struct drm_xe_exec_queue_create) -#define DRM_IOCTL_XE_EXEC_QUEUE_GET_PROPERTY DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_EXEC_QUEUE_GET_PROPERTY, struct drm_xe_exec_queue_get_property) -#define DRM_IOCTL_XE_EXEC_QUEUE_DESTROY DRM_IOW(DRM_COMMAND_BASE + DRM_XE_EXEC_QUEUE_DESTROY, struct drm_xe_exec_queue_destroy) -#define DRM_IOCTL_XE_EXEC DRM_IOW(DRM_COMMAND_BASE + DRM_XE_EXEC, struct drm_xe_exec) -#define DRM_IOCTL_XE_EXEC_QUEUE_SET_PROPERTY DRM_IOW(DRM_COMMAND_BASE + DRM_XE_EXEC_QUEUE_SET_PROPERTY, struct drm_xe_exec_queue_set_property) -#define DRM_IOCTL_XE_WAIT_USER_FENCE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_WAIT_USER_FENCE, struct drm_xe_wait_user_fence) -#define DRM_IOCTL_XE_VM_MADVISE DRM_IOW(DRM_COMMAND_BASE + DRM_XE_VM_MADVISE, struct drm_xe_vm_madvise) +#define DRM_IOCTL_XE_DEVICE_QUERY DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_DEVICE_QUERY, struct drm_xe_device_query) +#define DRM_IOCTL_XE_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_GEM_CREATE, struct drm_xe_gem_create) +#define DRM_IOCTL_XE_GEM_MMAP_OFFSET DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_GEM_MMAP_OFFSET, struct drm_xe_gem_mmap_offset) +#define DRM_IOCTL_XE_VM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_VM_CREATE, struct drm_xe_vm_create) +#define DRM_IOCTL_XE_VM_DESTROY DRM_IOW(DRM_COMMAND_BASE + DRM_XE_VM_DESTROY, struct drm_xe_vm_destroy) +#define DRM_IOCTL_XE_VM_BIND DRM_IOW(DRM_COMMAND_BASE + DRM_XE_VM_BIND, struct drm_xe_vm_bind) +#define DRM_IOCTL_XE_EXEC_QUEUE_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_EXEC_QUEUE_CREATE, struct drm_xe_exec_queue_create) +#define DRM_IOCTL_XE_EXEC_QUEUE_GET_PROPERTY DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_EXEC_QUEUE_GET_PROPERTY, struct drm_xe_exec_queue_get_property) +#define DRM_IOCTL_XE_EXEC_QUEUE_DESTROY DRM_IOW(DRM_COMMAND_BASE + DRM_XE_EXEC_QUEUE_DESTROY, struct drm_xe_exec_queue_destroy) +#define DRM_IOCTL_XE_EXEC DRM_IOW(DRM_COMMAND_BASE + DRM_XE_EXEC, struct drm_xe_exec) +#define DRM_IOCTL_XE_EXEC_QUEUE_SET_PROPERTY DRM_IOW(DRM_COMMAND_BASE + DRM_XE_EXEC_QUEUE_SET_PROPERTY, struct drm_xe_exec_queue_set_property) +#define DRM_IOCTL_XE_WAIT_USER_FENCE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_WAIT_USER_FENCE, struct drm_xe_wait_user_fence) +#define DRM_IOCTL_XE_VM_MADVISE DRM_IOW(DRM_COMMAND_BASE + DRM_XE_VM_MADVISE, struct drm_xe_vm_madvise) /** struct drm_xe_engine_class_instance - instance of an engine class */ struct drm_xe_engine_class_instance { -#define DRM_XE_ENGINE_CLASS_RENDER 0 -#define DRM_XE_ENGINE_CLASS_COPY 1 -#define DRM_XE_ENGINE_CLASS_VIDEO_DECODE 2 -#define DRM_XE_ENGINE_CLASS_VIDEO_ENHANCE 3 -#define DRM_XE_ENGINE_CLASS_COMPUTE 4 - /* - * Kernel only classes (not actual hardware engine class). Used for - * creating ordered queues of VM bind operations. - */ -#define DRM_XE_ENGINE_CLASS_VM_BIND_ASYNC 5 -#define DRM_XE_ENGINE_CLASS_VM_BIND_SYNC 6 - __u16 engine_class; - - __u16 engine_instance; - __u16 gt_id; - __u16 rsvd; +#define DRM_XE_ENGINE_CLASS_RENDER 0 +#define DRM_XE_ENGINE_CLASS_COPY 1 +#define DRM_XE_ENGINE_CLASS_VIDEO_DECODE 2 +#define DRM_XE_ENGINE_CLASS_VIDEO_ENHANCE 3 +#define DRM_XE_ENGINE_CLASS_COMPUTE 4 + /* + * Kernel only classes (not actual hardware engine class). Used for + * creating ordered queues of VM bind operations. + */ +#define DRM_XE_ENGINE_CLASS_VM_BIND_ASYNC 5 +#define DRM_XE_ENGINE_CLASS_VM_BIND_SYNC 6 + __u16 engine_class; + + __u16 engine_instance; + __u16 gt_id; + __u16 rsvd; }; /** * enum drm_xe_memory_class - Supported memory classes. */ enum drm_xe_memory_class { - /** @XE_MEM_REGION_CLASS_SYSMEM: Represents system memory. */ - XE_MEM_REGION_CLASS_SYSMEM = 0, - /** - * @XE_MEM_REGION_CLASS_VRAM: On discrete platforms, this - * represents the memory that is local to the device, which we - * call VRAM. Not valid on integrated platforms. - */ - XE_MEM_REGION_CLASS_VRAM + /** @XE_MEM_REGION_CLASS_SYSMEM: Represents system memory. */ + XE_MEM_REGION_CLASS_SYSMEM = 0, + /** + * @XE_MEM_REGION_CLASS_VRAM: On discrete platforms, this + * represents the memory that is local to the device, which we + * call VRAM. Not valid on integrated platforms. + */ + XE_MEM_REGION_CLASS_VRAM }; /** @@ -165,76 +165,76 @@ enum drm_xe_memory_class { * the driver. */ struct drm_xe_query_mem_region { - /** - * @mem_class: The memory class describing this region. - * - * See enum drm_xe_memory_class for supported values. - */ - __u16 mem_class; - /** - * @instance: The instance for this region. - * - * The @mem_class and @instance taken together will always give - * a unique pair. - */ - __u16 instance; - /** @pad: MBZ */ - __u32 pad; - /** - * @min_page_size: Min page-size in bytes for this region. - * - * When the kernel allocates memory for this region, the - * underlying pages will be at least @min_page_size in size. - * - * Important note: When userspace allocates a GTT address which - * can point to memory allocated from this region, it must also - * respect this minimum alignment. This is enforced by the - * kernel. - */ - __u32 min_page_size; - /** - * @total_size: The usable size in bytes for this region. - */ - __u64 total_size; - /** - * @used: Estimate of the memory used in bytes for this region. - * - * Requires CAP_PERFMON or CAP_SYS_ADMIN to get reliable - * accounting. Without this the value here will always equal - * zero. - */ - __u64 used; - /** - * @cpu_visible_size: How much of this region can be CPU - * accessed, in bytes. - * - * This will always be <= @total_size, and the remainder (if - * any) will not be CPU accessible. If the CPU accessible part - * is smaller than @total_size then this is referred to as a - * small BAR system. - * - * On systems without small BAR (full BAR), the probed_size will - * always equal the @total_size, since all of it will be CPU - * accessible. - * - * Note this is only tracked for XE_MEM_REGION_CLASS_VRAM - * regions (for other types the value here will always equal - * zero). - */ - __u64 cpu_visible_size; - /** - * @cpu_visible_used: Estimate of CPU visible memory used, in - * bytes. - * - * Requires CAP_PERFMON or CAP_SYS_ADMIN to get reliable - * accounting. Without this the value here will always equal - * zero. Note this is only currently tracked for - * XE_MEM_REGION_CLASS_VRAM regions (for other types the value - * here will always be zero). - */ - __u64 cpu_visible_used; - /** @reserved: MBZ */ - __u64 reserved[6]; + /** + * @mem_class: The memory class describing this region. + * + * See enum drm_xe_memory_class for supported values. + */ + __u16 mem_class; + /** + * @instance: The instance for this region. + * + * The @mem_class and @instance taken together will always give + * a unique pair. + */ + __u16 instance; + /** @pad: MBZ */ + __u32 pad; + /** + * @min_page_size: Min page-size in bytes for this region. + * + * When the kernel allocates memory for this region, the + * underlying pages will be at least @min_page_size in size. + * + * Important note: When userspace allocates a GTT address which + * can point to memory allocated from this region, it must also + * respect this minimum alignment. This is enforced by the + * kernel. + */ + __u32 min_page_size; + /** + * @total_size: The usable size in bytes for this region. + */ + __u64 total_size; + /** + * @used: Estimate of the memory used in bytes for this region. + * + * Requires CAP_PERFMON or CAP_SYS_ADMIN to get reliable + * accounting. Without this the value here will always equal + * zero. + */ + __u64 used; + /** + * @cpu_visible_size: How much of this region can be CPU + * accessed, in bytes. + * + * This will always be <= @total_size, and the remainder (if + * any) will not be CPU accessible. If the CPU accessible part + * is smaller than @total_size then this is referred to as a + * small BAR system. + * + * On systems without small BAR (full BAR), the probed_size will + * always equal the @total_size, since all of it will be CPU + * accessible. + * + * Note this is only tracked for XE_MEM_REGION_CLASS_VRAM + * regions (for other types the value here will always equal + * zero). + */ + __u64 cpu_visible_size; + /** + * @cpu_visible_used: Estimate of CPU visible memory used, in + * bytes. + * + * Requires CAP_PERFMON or CAP_SYS_ADMIN to get reliable + * accounting. Without this the value here will always equal + * zero. Note this is only currently tracked for + * XE_MEM_REGION_CLASS_VRAM regions (for other types the value + * here will always be zero). + */ + __u64 cpu_visible_used; + /** @reserved: MBZ */ + __u64 reserved[6]; }; /** @@ -251,44 +251,44 @@ struct drm_xe_query_mem_region { * streamer cycle count was captured. */ struct drm_xe_query_engine_cycles { - /** - * @eci: This is input by the user and is the engine for which command - * streamer cycles is queried. - */ - struct drm_xe_engine_class_instance eci; - - /** - * @clockid: This is input by the user and is the reference clock id for - * CPU timestamp. For definition, see clock_gettime(2) and - * perf_event_open(2). Supported clock ids are CLOCK_MONOTONIC, - * CLOCK_MONOTONIC_RAW, CLOCK_REALTIME, CLOCK_BOOTTIME, CLOCK_TAI. - */ - __s32 clockid; - - /** @width: Width of the engine cycle counter in bits. */ - __u32 width; - - /** - * @engine_cycles: Engine cycles as read from its register - * at 0x358 offset. - */ - __u64 engine_cycles; - - /** @engine_frequency: Frequency of the engine cycles in Hz. */ - __u64 engine_frequency; - - /** - * @cpu_timestamp: CPU timestamp in ns. The timestamp is captured before - * reading the engine_cycles register using the reference clockid set by the - * user. - */ - __u64 cpu_timestamp; - - /** - * @cpu_delta: Time delta in ns captured around reading the lower dword - * of the engine_cycles register. - */ - __u64 cpu_delta; + /** + * @eci: This is input by the user and is the engine for which command + * streamer cycles is queried. + */ + struct drm_xe_engine_class_instance eci; + + /** + * @clockid: This is input by the user and is the reference clock id for + * CPU timestamp. For definition, see clock_gettime(2) and + * perf_event_open(2). Supported clock ids are CLOCK_MONOTONIC, + * CLOCK_MONOTONIC_RAW, CLOCK_REALTIME, CLOCK_BOOTTIME, CLOCK_TAI. + */ + __s32 clockid; + + /** @width: Width of the engine cycle counter in bits. */ + __u32 width; + + /** + * @engine_cycles: Engine cycles as read from its register + * at 0x358 offset. + */ + __u64 engine_cycles; + + /** @engine_frequency: Frequency of the engine cycles in Hz. */ + __u64 engine_frequency; + + /** + * @cpu_timestamp: CPU timestamp in ns. The timestamp is captured before + * reading the engine_cycles register using the reference clockid set by the + * user. + */ + __u64 cpu_timestamp; + + /** + * @cpu_delta: Time delta in ns captured around reading the lower dword + * of the engine_cycles register. + */ + __u64 cpu_delta; }; /** @@ -299,12 +299,12 @@ struct drm_xe_query_engine_cycles { * struct drm_xe_query_mem_usage in .data. */ struct drm_xe_query_mem_usage { - /** @num_regions: number of memory regions returned in @regions */ - __u32 num_regions; - /** @pad: MBZ */ - __u32 pad; - /** @regions: The returned regions for this device */ - struct drm_xe_query_mem_region regions[]; + /** @num_regions: number of memory regions returned in @regions */ + __u32 num_regions; + /** @pad: MBZ */ + __u32 pad; + /** @regions: The returned regions for this device */ + struct drm_xe_query_mem_region regions[]; }; /** @@ -315,23 +315,23 @@ struct drm_xe_query_mem_usage { * struct drm_xe_query_config in .data. */ struct drm_xe_query_config { - /** @num_params: number of parameters returned in info */ - __u32 num_params; - - /** @pad: MBZ */ - __u32 pad; - -#define XE_QUERY_CONFIG_REV_AND_DEVICE_ID 0 -#define XE_QUERY_CONFIG_FLAGS 1 - #define XE_QUERY_CONFIG_FLAGS_HAS_VRAM (0x1 << 0) -#define XE_QUERY_CONFIG_MIN_ALIGNMENT 2 -#define XE_QUERY_CONFIG_VA_BITS 3 -#define XE_QUERY_CONFIG_GT_COUNT 4 -#define XE_QUERY_CONFIG_MEM_REGION_COUNT 5 -#define XE_QUERY_CONFIG_MAX_EXEC_QUEUE_PRIORITY 6 -#define XE_QUERY_CONFIG_NUM_PARAM (XE_QUERY_CONFIG_MAX_EXEC_QUEUE_PRIORITY + 1) - /** @info: array of elements containing the config info */ - __u64 info[]; + /** @num_params: number of parameters returned in info */ + __u32 num_params; + + /** @pad: MBZ */ + __u32 pad; + +#define XE_QUERY_CONFIG_REV_AND_DEVICE_ID 0 +#define XE_QUERY_CONFIG_FLAGS 1 + #define XE_QUERY_CONFIG_FLAGS_HAS_VRAM (0x1 << 0) +#define XE_QUERY_CONFIG_MIN_ALIGNMENT 2 +#define XE_QUERY_CONFIG_VA_BITS 3 +#define XE_QUERY_CONFIG_GT_COUNT 4 +#define XE_QUERY_CONFIG_MEM_REGION_COUNT 5 +#define XE_QUERY_CONFIG_MAX_EXEC_QUEUE_PRIORITY 6 +#define XE_QUERY_CONFIG_NUM_PARAM (XE_QUERY_CONFIG_MAX_EXEC_QUEUE_PRIORITY + 1) + /** @info: array of elements containing the config info */ + __u64 info[]; }; /** @@ -343,34 +343,34 @@ struct drm_xe_query_config { * implementing graphics and/or media operations. */ struct drm_xe_query_gt { -#define XE_QUERY_GT_TYPE_MAIN 0 -#define XE_QUERY_GT_TYPE_REMOTE 1 -#define XE_QUERY_GT_TYPE_MEDIA 2 - /** @type: GT type: Main, Remote, or Media */ - __u16 type; - /** @gt_id: Unique ID of this GT within the PCI Device */ - __u16 gt_id; - /** @clock_freq: A clock frequency for timestamp */ - __u32 clock_freq; - /** - * @native_mem_regions: Bit mask of instances from - * drm_xe_query_mem_usage that lives on the same GPU/Tile and have - * direct access. - */ - __u64 native_mem_regions; - /** - * @slow_mem_regions: Bit mask of instances from - * drm_xe_query_mem_usage that this GT can indirectly access, although - * they live on a different GPU/Tile. - */ - __u64 slow_mem_regions; - /** - * @inaccessible_mem_regions: Bit mask of instances from - * drm_xe_query_mem_usage that is not accessible by this GT at all. - */ - __u64 inaccessible_mem_regions; - /** @reserved: Reserved */ - __u64 reserved[8]; +#define XE_QUERY_GT_TYPE_MAIN 0 +#define XE_QUERY_GT_TYPE_REMOTE 1 +#define XE_QUERY_GT_TYPE_MEDIA 2 + /** @type: GT type: Main, Remote, or Media */ + __u16 type; + /** @gt_id: Unique ID of this GT within the PCI Device */ + __u16 gt_id; + /** @clock_freq: A clock frequency for timestamp */ + __u32 clock_freq; + /** + * @native_mem_regions: Bit mask of instances from + * drm_xe_query_mem_usage that lives on the same GPU/Tile and have + * direct access. + */ + __u64 native_mem_regions; + /** + * @slow_mem_regions: Bit mask of instances from + * drm_xe_query_mem_usage that this GT can indirectly access, although + * they live on a different GPU/Tile. + */ + __u64 slow_mem_regions; + /** + * @inaccessible_mem_regions: Bit mask of instances from + * drm_xe_query_mem_usage that is not accessible by this GT at all. + */ + __u64 inaccessible_mem_regions; + /** @reserved: Reserved */ + __u64 reserved[8]; }; /** @@ -381,12 +381,12 @@ struct drm_xe_query_gt { * drm_xe_query_gt_list in .data. */ struct drm_xe_query_gt_list { - /** @num_gt: number of GT items returned in gt_list */ - __u32 num_gt; - /** @pad: MBZ */ - __u32 pad; - /** @gt_list: The GT list returned for this device */ - struct drm_xe_query_gt gt_list[]; + /** @num_gt: number of GT items returned in gt_list */ + __u32 num_gt; + /** @pad: MBZ */ + __u32 pad; + /** @gt_list: The GT list returned for this device */ + struct drm_xe_query_gt gt_list[]; }; /** @@ -400,41 +400,41 @@ struct drm_xe_query_gt_list { * struct drm_xe_query_topology_mask in .data. */ struct drm_xe_query_topology_mask { - /** @gt_id: GT ID the mask is associated with */ - __u16 gt_id; - - /* - * To query the mask of Dual Sub Slices (DSS) available for geometry - * operations. For example a query response containing the following - * in mask: - * DSS_GEOMETRY ff ff ff ff 00 00 00 00 - * means 32 DSS are available for geometry. - */ -#define XE_TOPO_DSS_GEOMETRY (1 << 0) - /* - * To query the mask of Dual Sub Slices (DSS) available for compute - * operations. For example a query response containing the following - * in mask: - * DSS_COMPUTE ff ff ff ff 00 00 00 00 - * means 32 DSS are available for compute. - */ -#define XE_TOPO_DSS_COMPUTE (1 << 1) - /* - * To query the mask of Execution Units (EU) available per Dual Sub - * Slices (DSS). For example a query response containing the following - * in mask: - * EU_PER_DSS ff ff 00 00 00 00 00 00 - * means each DSS has 16 EU. - */ -#define XE_TOPO_EU_PER_DSS (1 << 2) - /** @type: type of mask */ - __u16 type; - - /** @num_bytes: number of bytes in requested mask */ - __u32 num_bytes; - - /** @mask: little-endian mask of @num_bytes */ - __u8 mask[]; + /** @gt_id: GT ID the mask is associated with */ + __u16 gt_id; + + /* + * To query the mask of Dual Sub Slices (DSS) available for geometry + * operations. For example a query response containing the following + * in mask: + * DSS_GEOMETRY ff ff ff ff 00 00 00 00 + * means 32 DSS are available for geometry. + */ +#define XE_TOPO_DSS_GEOMETRY (1 << 0) + /* + * To query the mask of Dual Sub Slices (DSS) available for compute + * operations. For example a query response containing the following + * in mask: + * DSS_COMPUTE ff ff ff ff 00 00 00 00 + * means 32 DSS are available for compute. + */ +#define XE_TOPO_DSS_COMPUTE (1 << 1) + /* + * To query the mask of Execution Units (EU) available per Dual Sub + * Slices (DSS). For example a query response containing the following + * in mask: + * EU_PER_DSS ff ff 00 00 00 00 00 00 + * means each DSS has 16 EU. + */ +#define XE_TOPO_EU_PER_DSS (1 << 2) + /** @type: type of mask */ + __u16 type; + + /** @num_bytes: number of bytes in requested mask */ + __u32 num_bytes; + + /** @mask: little-endian mask of @num_bytes */ + __u8 mask[]; }; /** @@ -449,66 +449,66 @@ struct drm_xe_query_topology_mask { * * .. code-block:: C * - * struct drm_xe_engine_class_instance *hwe; - * struct drm_xe_device_query query = { - * .extensions = 0, - * .query = DRM_XE_DEVICE_QUERY_ENGINES, - * .size = 0, - * .data = 0, - * }; - * ioctl(fd, DRM_IOCTL_XE_DEVICE_QUERY, &query); - * hwe = malloc(query.size); - * query.data = (uintptr_t)hwe; - * ioctl(fd, DRM_IOCTL_XE_DEVICE_QUERY, &query); - * int num_engines = query.size / sizeof(*hwe); - * for (int i = 0; i < num_engines; i++) { - * printf("Engine %d: %s\n", i, - * hwe[i].engine_class == DRM_XE_ENGINE_CLASS_RENDER ? "RENDER": - * hwe[i].engine_class == DRM_XE_ENGINE_CLASS_COPY ? "COPY": - * hwe[i].engine_class == DRM_XE_ENGINE_CLASS_VIDEO_DECODE ? "VIDEO_DECODE": - * hwe[i].engine_class == DRM_XE_ENGINE_CLASS_VIDEO_ENHANCE ? "VIDEO_ENHANCE": - * hwe[i].engine_class == DRM_XE_ENGINE_CLASS_COMPUTE ? "COMPUTE": - * "UNKNOWN"); - * } - * free(hwe); + * struct drm_xe_engine_class_instance *hwe; + * struct drm_xe_device_query query = { + * .extensions = 0, + * .query = DRM_XE_DEVICE_QUERY_ENGINES, + * .size = 0, + * .data = 0, + * }; + * ioctl(fd, DRM_IOCTL_XE_DEVICE_QUERY, &query); + * hwe = malloc(query.size); + * query.data = (uintptr_t)hwe; + * ioctl(fd, DRM_IOCTL_XE_DEVICE_QUERY, &query); + * int num_engines = query.size / sizeof(*hwe); + * for (int i = 0; i < num_engines; i++) { + * printf("Engine %d: %s\n", i, + * hwe[i].engine_class == DRM_XE_ENGINE_CLASS_RENDER ? "RENDER": + * hwe[i].engine_class == DRM_XE_ENGINE_CLASS_COPY ? "COPY": + * hwe[i].engine_class == DRM_XE_ENGINE_CLASS_VIDEO_DECODE ? "VIDEO_DECODE": + * hwe[i].engine_class == DRM_XE_ENGINE_CLASS_VIDEO_ENHANCE ? "VIDEO_ENHANCE": + * hwe[i].engine_class == DRM_XE_ENGINE_CLASS_COMPUTE ? "COMPUTE": + * "UNKNOWN"); + * } + * free(hwe); */ struct drm_xe_device_query { - /** @extensions: Pointer to the first extension struct, if any */ - __u64 extensions; - -#define DRM_XE_DEVICE_QUERY_ENGINES 0 -#define DRM_XE_DEVICE_QUERY_MEM_USAGE 1 -#define DRM_XE_DEVICE_QUERY_CONFIG 2 -#define DRM_XE_DEVICE_QUERY_GT_LIST 3 -#define DRM_XE_DEVICE_QUERY_HWCONFIG 4 -#define DRM_XE_DEVICE_QUERY_GT_TOPOLOGY 5 -#define DRM_XE_DEVICE_QUERY_ENGINE_CYCLES 6 - /** @query: The type of data to query */ - __u32 query; - - /** @size: Size of the queried data */ - __u32 size; - - /** @data: Queried data is placed here */ - __u64 data; - - /** @reserved: Reserved */ - __u64 reserved[2]; + /** @extensions: Pointer to the first extension struct, if any */ + __u64 extensions; + +#define DRM_XE_DEVICE_QUERY_ENGINES 0 +#define DRM_XE_DEVICE_QUERY_MEM_USAGE 1 +#define DRM_XE_DEVICE_QUERY_CONFIG 2 +#define DRM_XE_DEVICE_QUERY_GT_LIST 3 +#define DRM_XE_DEVICE_QUERY_HWCONFIG 4 +#define DRM_XE_DEVICE_QUERY_GT_TOPOLOGY 5 +#define DRM_XE_DEVICE_QUERY_ENGINE_CYCLES 6 + /** @query: The type of data to query */ + __u32 query; + + /** @size: Size of the queried data */ + __u32 size; + + /** @data: Queried data is placed here */ + __u64 data; + + /** @reserved: Reserved */ + __u64 reserved[2]; }; struct drm_xe_gem_create { - /** @extensions: Pointer to the first extension struct, if any */ - __u64 extensions; - - /** - * @size: Requested size for the object - * - * The (page-aligned) allocated size for the object will be returned. - */ - __u64 size; - -#define XE_GEM_CREATE_FLAG_DEFER_BACKING (0x1 << 24) -#define XE_GEM_CREATE_FLAG_SCANOUT (0x1 << 25) + /** @extensions: Pointer to the first extension struct, if any */ + __u64 extensions; + + /** + * @size: Requested size for the object + * + * The (page-aligned) allocated size for the object will be returned. + */ + __u64 size; + +#define XE_GEM_CREATE_FLAG_DEFER_BACKING (0x1 << 24) +#define XE_GEM_CREATE_FLAG_SCANOUT (0x1 << 25) /* * When using VRAM as a possible placement, ensure that the corresponding VRAM * allocation will always use the CPU accessible part of VRAM. This is important @@ -524,211 +524,211 @@ struct drm_xe_gem_create { * display surfaces, therefore the kernel requires setting this flag for such * objects, otherwise an error is thrown on small-bar systems. */ -#define XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM (0x1 << 26) - /** - * @flags: Flags, currently a mask of memory instances of where BO can - * be placed - */ - __u32 flags; - - /** - * @vm_id: Attached VM, if any - * - * If a VM is specified, this BO must: - * - * 1. Only ever be bound to that VM. - * 2. Cannot be exported as a PRIME fd. - */ - __u32 vm_id; - - /** - * @handle: Returned handle for the object. - * - * Object handles are nonzero. - */ - __u32 handle; - - /** @pad: MBZ */ - __u32 pad; - - /** @reserved: Reserved */ - __u64 reserved[2]; +#define XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM (0x1 << 26) + /** + * @flags: Flags, currently a mask of memory instances of where BO can + * be placed + */ + __u32 flags; + + /** + * @vm_id: Attached VM, if any + * + * If a VM is specified, this BO must: + * + * 1. Only ever be bound to that VM. + * 2. Cannot be exported as a PRIME fd. + */ + __u32 vm_id; + + /** + * @handle: Returned handle for the object. + * + * Object handles are nonzero. + */ + __u32 handle; + + /** @pad: MBZ */ + __u32 pad; + + /** @reserved: Reserved */ + __u64 reserved[2]; }; struct drm_xe_gem_mmap_offset { - /** @extensions: Pointer to the first extension struct, if any */ - __u64 extensions; + /** @extensions: Pointer to the first extension struct, if any */ + __u64 extensions; - /** @handle: Handle for the object being mapped. */ - __u32 handle; + /** @handle: Handle for the object being mapped. */ + __u32 handle; - /** @flags: Must be zero */ - __u32 flags; + /** @flags: Must be zero */ + __u32 flags; - /** @offset: The fake offset to use for subsequent mmap call */ - __u64 offset; + /** @offset: The fake offset to use for subsequent mmap call */ + __u64 offset; - /** @reserved: Reserved */ - __u64 reserved[2]; + /** @reserved: Reserved */ + __u64 reserved[2]; }; /** struct drm_xe_ext_set_property - XE set property extension */ struct drm_xe_ext_set_property { - /** @base: base user extension */ - struct xe_user_extension base; + /** @base: base user extension */ + struct xe_user_extension base; - /** @property: property to set */ - __u32 property; + /** @property: property to set */ + __u32 property; - /** @pad: MBZ */ - __u32 pad; + /** @pad: MBZ */ + __u32 pad; - /** @value: property value */ - __u64 value; + /** @value: property value */ + __u64 value; - /** @reserved: Reserved */ - __u64 reserved[2]; + /** @reserved: Reserved */ + __u64 reserved[2]; }; struct drm_xe_vm_create { -#define XE_VM_EXTENSION_SET_PROPERTY 0 - /** @extensions: Pointer to the first extension struct, if any */ - __u64 extensions; - -#define DRM_XE_VM_CREATE_SCRATCH_PAGE (0x1 << 0) -#define DRM_XE_VM_CREATE_COMPUTE_MODE (0x1 << 1) -#define DRM_XE_VM_CREATE_ASYNC_DEFAULT (0x1 << 2) -#define DRM_XE_VM_CREATE_FAULT_MODE (0x1 << 3) - /** @flags: Flags */ - __u32 flags; - - /** @vm_id: Returned VM ID */ - __u32 vm_id; - - /** @reserved: Reserved */ - __u64 reserved[2]; +#define XE_VM_EXTENSION_SET_PROPERTY 0 + /** @extensions: Pointer to the first extension struct, if any */ + __u64 extensions; + +#define DRM_XE_VM_CREATE_SCRATCH_PAGE (0x1 << 0) +#define DRM_XE_VM_CREATE_COMPUTE_MODE (0x1 << 1) +#define DRM_XE_VM_CREATE_ASYNC_DEFAULT (0x1 << 2) +#define DRM_XE_VM_CREATE_FAULT_MODE (0x1 << 3) + /** @flags: Flags */ + __u32 flags; + + /** @vm_id: Returned VM ID */ + __u32 vm_id; + + /** @reserved: Reserved */ + __u64 reserved[2]; }; struct drm_xe_vm_destroy { - /** @vm_id: VM ID */ - __u32 vm_id; + /** @vm_id: VM ID */ + __u32 vm_id; - /** @pad: MBZ */ - __u32 pad; + /** @pad: MBZ */ + __u32 pad; - /** @reserved: Reserved */ - __u64 reserved[2]; + /** @reserved: Reserved */ + __u64 reserved[2]; }; struct drm_xe_vm_bind_op { - /** - * @obj: GEM object to operate on, MBZ for MAP_USERPTR, MBZ for UNMAP - */ - __u32 obj; - - /** @pad: MBZ */ - __u32 pad; - - union { - /** - * @obj_offset: Offset into the object, MBZ for CLEAR_RANGE, - * ignored for unbind - */ - __u64 obj_offset; - - /** @userptr: user pointer to bind on */ - __u64 userptr; - }; - - /** - * @range: Number of bytes from the object to bind to addr, MBZ for UNMAP_ALL - */ - __u64 range; - - /** @addr: Address to operate on, MBZ for UNMAP_ALL */ - __u64 addr; - - /** - * @tile_mask: Mask for which tiles to create binds for, 0 == All tiles, - * only applies to creating new VMAs - */ - __u64 tile_mask; - -#define XE_VM_BIND_OP_MAP 0x0 -#define XE_VM_BIND_OP_UNMAP 0x1 -#define XE_VM_BIND_OP_MAP_USERPTR 0x2 -#define XE_VM_BIND_OP_UNMAP_ALL 0x3 -#define XE_VM_BIND_OP_PREFETCH 0x4 - /** @op: Bind operation to perform */ - __u32 op; - -#define XE_VM_BIND_FLAG_READONLY (0x1 << 0) -#define XE_VM_BIND_FLAG_ASYNC (0x1 << 1) - /* - * Valid on a faulting VM only, do the MAP operation immediately rather - * than deferring the MAP to the page fault handler. - */ -#define XE_VM_BIND_FLAG_IMMEDIATE (0x1 << 2) - /* - * When the NULL flag is set, the page tables are setup with a special - * bit which indicates writes are dropped and all reads return zero. In - * the future, the NULL flags will only be valid for XE_VM_BIND_OP_MAP - * operations, the BO handle MBZ, and the BO offset MBZ. This flag is - * intended to implement VK sparse bindings. - */ -#define XE_VM_BIND_FLAG_NULL (0x1 << 3) - /** @flags: Bind flags */ - __u32 flags; - - /** @mem_region: Memory region to prefetch VMA to, instance not a mask */ - __u32 region; - - /** @reserved: Reserved */ - __u64 reserved[2]; + /** + * @obj: GEM object to operate on, MBZ for MAP_USERPTR, MBZ for UNMAP + */ + __u32 obj; + + /** @pad: MBZ */ + __u32 pad; + + union { + /** + * @obj_offset: Offset into the object, MBZ for CLEAR_RANGE, + * ignored for unbind + */ + __u64 obj_offset; + + /** @userptr: user pointer to bind on */ + __u64 userptr; + }; + + /** + * @range: Number of bytes from the object to bind to addr, MBZ for UNMAP_ALL + */ + __u64 range; + + /** @addr: Address to operate on, MBZ for UNMAP_ALL */ + __u64 addr; + + /** + * @tile_mask: Mask for which tiles to create binds for, 0 == All tiles, + * only applies to creating new VMAs + */ + __u64 tile_mask; + +#define XE_VM_BIND_OP_MAP 0x0 +#define XE_VM_BIND_OP_UNMAP 0x1 +#define XE_VM_BIND_OP_MAP_USERPTR 0x2 +#define XE_VM_BIND_OP_UNMAP_ALL 0x3 +#define XE_VM_BIND_OP_PREFETCH 0x4 + /** @op: Bind operation to perform */ + __u32 op; + +#define XE_VM_BIND_FLAG_READONLY (0x1 << 0) +#define XE_VM_BIND_FLAG_ASYNC (0x1 << 1) + /* + * Valid on a faulting VM only, do the MAP operation immediately rather + * than deferring the MAP to the page fault handler. + */ +#define XE_VM_BIND_FLAG_IMMEDIATE (0x1 << 2) + /* + * When the NULL flag is set, the page tables are setup with a special + * bit which indicates writes are dropped and all reads return zero. In + * the future, the NULL flags will only be valid for XE_VM_BIND_OP_MAP + * operations, the BO handle MBZ, and the BO offset MBZ. This flag is + * intended to implement VK sparse bindings. + */ +#define XE_VM_BIND_FLAG_NULL (0x1 << 3) + /** @flags: Bind flags */ + __u32 flags; + + /** @mem_region: Memory region to prefetch VMA to, instance not a mask */ + __u32 region; + + /** @reserved: Reserved */ + __u64 reserved[2]; }; struct drm_xe_vm_bind { - /** @extensions: Pointer to the first extension struct, if any */ - __u64 extensions; + /** @extensions: Pointer to the first extension struct, if any */ + __u64 extensions; - /** @vm_id: The ID of the VM to bind to */ - __u32 vm_id; + /** @vm_id: The ID of the VM to bind to */ + __u32 vm_id; - /** - * @exec_queue_id: exec_queue_id, must be of class DRM_XE_ENGINE_CLASS_VM_BIND - * and exec queue must have same vm_id. If zero, the default VM bind engine - * is used. - */ - __u32 exec_queue_id; + /** + * @exec_queue_id: exec_queue_id, must be of class DRM_XE_ENGINE_CLASS_VM_BIND + * and exec queue must have same vm_id. If zero, the default VM bind engine + * is used. + */ + __u32 exec_queue_id; - /** @num_binds: number of binds in this IOCTL */ - __u32 num_binds; + /** @num_binds: number of binds in this IOCTL */ + __u32 num_binds; - /** @pad: MBZ */ - __u32 pad; + /** @pad: MBZ */ + __u32 pad; - union { - /** @bind: used if num_binds == 1 */ - struct drm_xe_vm_bind_op bind; + union { + /** @bind: used if num_binds == 1 */ + struct drm_xe_vm_bind_op bind; - /** - * @vector_of_binds: userptr to array of struct - * drm_xe_vm_bind_op if num_binds > 1 - */ - __u64 vector_of_binds; - }; + /** + * @vector_of_binds: userptr to array of struct + * drm_xe_vm_bind_op if num_binds > 1 + */ + __u64 vector_of_binds; + }; - /** @num_syncs: amount of syncs to wait on */ - __u32 num_syncs; + /** @num_syncs: amount of syncs to wait on */ + __u32 num_syncs; - /** @pad2: MBZ */ - __u32 pad2; + /** @pad2: MBZ */ + __u32 pad2; - /** @syncs: pointer to struct drm_xe_sync array */ - __u64 syncs; + /** @syncs: pointer to struct drm_xe_sync array */ + __u64 syncs; - /** @reserved: Reserved */ - __u64 reserved[2]; + /** @reserved: Reserved */ + __u64 reserved[2]; }; /** @@ -737,156 +737,156 @@ struct drm_xe_vm_bind { * Same namespace for extensions as drm_xe_exec_queue_create */ struct drm_xe_exec_queue_set_property { - /** @extensions: Pointer to the first extension struct, if any */ - __u64 extensions; - - /** @exec_queue_id: Exec queue ID */ - __u32 exec_queue_id; - -#define XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY 0 -#define XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE 1 -#define XE_EXEC_QUEUE_SET_PROPERTY_PREEMPTION_TIMEOUT 2 -#define XE_EXEC_QUEUE_SET_PROPERTY_PERSISTENCE 3 -#define XE_EXEC_QUEUE_SET_PROPERTY_JOB_TIMEOUT 4 -#define XE_EXEC_QUEUE_SET_PROPERTY_ACC_TRIGGER 5 -#define XE_EXEC_QUEUE_SET_PROPERTY_ACC_NOTIFY 6 -#define XE_EXEC_QUEUE_SET_PROPERTY_ACC_GRANULARITY 7 - /** @property: property to set */ - __u32 property; - - /** @value: property value */ - __u64 value; - - /** @reserved: Reserved */ - __u64 reserved[2]; + /** @extensions: Pointer to the first extension struct, if any */ + __u64 extensions; + + /** @exec_queue_id: Exec queue ID */ + __u32 exec_queue_id; + +#define XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY 0 +#define XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE 1 +#define XE_EXEC_QUEUE_SET_PROPERTY_PREEMPTION_TIMEOUT 2 +#define XE_EXEC_QUEUE_SET_PROPERTY_PERSISTENCE 3 +#define XE_EXEC_QUEUE_SET_PROPERTY_JOB_TIMEOUT 4 +#define XE_EXEC_QUEUE_SET_PROPERTY_ACC_TRIGGER 5 +#define XE_EXEC_QUEUE_SET_PROPERTY_ACC_NOTIFY 6 +#define XE_EXEC_QUEUE_SET_PROPERTY_ACC_GRANULARITY 7 + /** @property: property to set */ + __u32 property; + + /** @value: property value */ + __u64 value; + + /** @reserved: Reserved */ + __u64 reserved[2]; }; struct drm_xe_exec_queue_create { #define XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY 0 - /** @extensions: Pointer to the first extension struct, if any */ - __u64 extensions; + /** @extensions: Pointer to the first extension struct, if any */ + __u64 extensions; - /** @width: submission width (number BB per exec) for this exec queue */ - __u16 width; + /** @width: submission width (number BB per exec) for this exec queue */ + __u16 width; - /** @num_placements: number of valid placements for this exec queue */ - __u16 num_placements; + /** @num_placements: number of valid placements for this exec queue */ + __u16 num_placements; - /** @vm_id: VM to use for this exec queue */ - __u32 vm_id; + /** @vm_id: VM to use for this exec queue */ + __u32 vm_id; - /** @flags: MBZ */ - __u32 flags; + /** @flags: MBZ */ + __u32 flags; - /** @exec_queue_id: Returned exec queue ID */ - __u32 exec_queue_id; + /** @exec_queue_id: Returned exec queue ID */ + __u32 exec_queue_id; - /** - * @instances: user pointer to a 2-d array of struct - * drm_xe_engine_class_instance - * - * length = width (i) * num_placements (j) - * index = j + i * width - */ - __u64 instances; + /** + * @instances: user pointer to a 2-d array of struct + * drm_xe_engine_class_instance + * + * length = width (i) * num_placements (j) + * index = j + i * width + */ + __u64 instances; - /** @reserved: Reserved */ - __u64 reserved[2]; + /** @reserved: Reserved */ + __u64 reserved[2]; }; struct drm_xe_exec_queue_get_property { - /** @extensions: Pointer to the first extension struct, if any */ - __u64 extensions; + /** @extensions: Pointer to the first extension struct, if any */ + __u64 extensions; - /** @exec_queue_id: Exec queue ID */ - __u32 exec_queue_id; + /** @exec_queue_id: Exec queue ID */ + __u32 exec_queue_id; -#define XE_EXEC_QUEUE_GET_PROPERTY_BAN 0 - /** @property: property to get */ - __u32 property; +#define XE_EXEC_QUEUE_GET_PROPERTY_BAN 0 + /** @property: property to get */ + __u32 property; - /** @value: property value */ - __u64 value; + /** @value: property value */ + __u64 value; - /** @reserved: Reserved */ - __u64 reserved[2]; + /** @reserved: Reserved */ + __u64 reserved[2]; }; struct drm_xe_exec_queue_destroy { - /** @exec_queue_id: Exec queue ID */ - __u32 exec_queue_id; + /** @exec_queue_id: Exec queue ID */ + __u32 exec_queue_id; - /** @pad: MBZ */ - __u32 pad; + /** @pad: MBZ */ + __u32 pad; - /** @reserved: Reserved */ - __u64 reserved[2]; + /** @reserved: Reserved */ + __u64 reserved[2]; }; struct drm_xe_sync { - /** @extensions: Pointer to the first extension struct, if any */ - __u64 extensions; - -#define DRM_XE_SYNC_SYNCOBJ 0x0 -#define DRM_XE_SYNC_TIMELINE_SYNCOBJ 0x1 -#define DRM_XE_SYNC_DMA_BUF 0x2 -#define DRM_XE_SYNC_USER_FENCE 0x3 -#define DRM_XE_SYNC_SIGNAL 0x10 - __u32 flags; - - /** @pad: MBZ */ - __u32 pad; - - union { - __u32 handle; - - /** - * @addr: Address of user fence. When sync passed in via exec - * IOCTL this a GPU address in the VM. When sync passed in via - * VM bind IOCTL this is a user pointer. In either case, it is - * the users responsibility that this address is present and - * mapped when the user fence is signalled. Must be qword - * aligned. - */ - __u64 addr; - }; - - __u64 timeline_value; - - /** @reserved: Reserved */ - __u64 reserved[2]; + /** @extensions: Pointer to the first extension struct, if any */ + __u64 extensions; + +#define DRM_XE_SYNC_SYNCOBJ 0x0 +#define DRM_XE_SYNC_TIMELINE_SYNCOBJ 0x1 +#define DRM_XE_SYNC_DMA_BUF 0x2 +#define DRM_XE_SYNC_USER_FENCE 0x3 +#define DRM_XE_SYNC_SIGNAL 0x10 + __u32 flags; + + /** @pad: MBZ */ + __u32 pad; + + union { + __u32 handle; + + /** + * @addr: Address of user fence. When sync passed in via exec + * IOCTL this a GPU address in the VM. When sync passed in via + * VM bind IOCTL this is a user pointer. In either case, it is + * the users responsibility that this address is present and + * mapped when the user fence is signalled. Must be qword + * aligned. + */ + __u64 addr; + }; + + __u64 timeline_value; + + /** @reserved: Reserved */ + __u64 reserved[2]; }; struct drm_xe_exec { - /** @extensions: Pointer to the first extension struct, if any */ - __u64 extensions; + /** @extensions: Pointer to the first extension struct, if any */ + __u64 extensions; - /** @exec_queue_id: Exec queue ID for the batch buffer */ - __u32 exec_queue_id; + /** @exec_queue_id: Exec queue ID for the batch buffer */ + __u32 exec_queue_id; - /** @num_syncs: Amount of struct drm_xe_sync in array. */ - __u32 num_syncs; + /** @num_syncs: Amount of struct drm_xe_sync in array. */ + __u32 num_syncs; - /** @syncs: Pointer to struct drm_xe_sync array. */ - __u64 syncs; + /** @syncs: Pointer to struct drm_xe_sync array. */ + __u64 syncs; - /** - * @address: address of batch buffer if num_batch_buffer == 1 or an - * array of batch buffer addresses - */ - __u64 address; + /** + * @address: address of batch buffer if num_batch_buffer == 1 or an + * array of batch buffer addresses + */ + __u64 address; - /** - * @num_batch_buffer: number of batch buffer in this exec, must match - * the width of the engine - */ - __u16 num_batch_buffer; + /** + * @num_batch_buffer: number of batch buffer in this exec, must match + * the width of the engine + */ + __u16 num_batch_buffer; - /** @pad: MBZ */ - __u16 pad[3]; + /** @pad: MBZ */ + __u16 pad[3]; - /** @reserved: Reserved */ - __u64 reserved[2]; + /** @reserved: Reserved */ + __u64 reserved[2]; }; /** @@ -895,143 +895,143 @@ struct drm_xe_exec { * Wait on user fence, XE will wake-up on every HW engine interrupt in the * instances list and check if user fence is complete:: * - * (*addr & MASK) OP (VALUE & MASK) + * (*addr & MASK) OP (VALUE & MASK) * * Returns to user on user fence completion or timeout. */ struct drm_xe_wait_user_fence { - /** @extensions: Pointer to the first extension struct, if any */ - __u64 extensions; - - /** - * @addr: user pointer address to wait on, must qword aligned - */ - __u64 addr; - -#define DRM_XE_UFENCE_WAIT_EQ 0 -#define DRM_XE_UFENCE_WAIT_NEQ 1 -#define DRM_XE_UFENCE_WAIT_GT 2 -#define DRM_XE_UFENCE_WAIT_GTE 3 -#define DRM_XE_UFENCE_WAIT_LT 4 -#define DRM_XE_UFENCE_WAIT_LTE 5 - /** @op: wait operation (type of comparison) */ - __u16 op; - -#define DRM_XE_UFENCE_WAIT_SOFT_OP (1 << 0) /* e.g. Wait on VM bind */ -#define DRM_XE_UFENCE_WAIT_ABSTIME (1 << 1) - /** @flags: wait flags */ - __u16 flags; - - /** @pad: MBZ */ - __u32 pad; - - /** @value: compare value */ - __u64 value; - -#define DRM_XE_UFENCE_WAIT_U8 0xffu -#define DRM_XE_UFENCE_WAIT_U16 0xffffu -#define DRM_XE_UFENCE_WAIT_U32 0xffffffffu -#define DRM_XE_UFENCE_WAIT_U64 0xffffffffffffffffu - /** @mask: comparison mask */ - __u64 mask; - /** - * @timeout: how long to wait before bailing, value in nanoseconds. - * Without DRM_XE_UFENCE_WAIT_ABSTIME flag set (relative timeout) - * it contains timeout expressed in nanoseconds to wait (fence will - * expire at now() + timeout). - * When DRM_XE_UFENCE_WAIT_ABSTIME flat is set (absolute timeout) wait - * will end at timeout (uses system MONOTONIC_CLOCK). - * Passing negative timeout leads to neverending wait. - * - * On relative timeout this value is updated with timeout left - * (for restarting the call in case of signal delivery). - * On absolute timeout this value stays intact (restarted call still - * expire at the same point of time). - */ - __s64 timeout; - - /** - * @num_engines: number of engine instances to wait on, must be zero - * when DRM_XE_UFENCE_WAIT_SOFT_OP set - */ - __u64 num_engines; - - /** - * @instances: user pointer to array of drm_xe_engine_class_instance to - * wait on, must be NULL when DRM_XE_UFENCE_WAIT_SOFT_OP set - */ - __u64 instances; - - /** @reserved: Reserved */ - __u64 reserved[2]; + /** @extensions: Pointer to the first extension struct, if any */ + __u64 extensions; + + /** + * @addr: user pointer address to wait on, must qword aligned + */ + __u64 addr; + +#define DRM_XE_UFENCE_WAIT_EQ 0 +#define DRM_XE_UFENCE_WAIT_NEQ 1 +#define DRM_XE_UFENCE_WAIT_GT 2 +#define DRM_XE_UFENCE_WAIT_GTE 3 +#define DRM_XE_UFENCE_WAIT_LT 4 +#define DRM_XE_UFENCE_WAIT_LTE 5 + /** @op: wait operation (type of comparison) */ + __u16 op; + +#define DRM_XE_UFENCE_WAIT_SOFT_OP (1 << 0) /* e.g. Wait on VM bind */ +#define DRM_XE_UFENCE_WAIT_ABSTIME (1 << 1) + /** @flags: wait flags */ + __u16 flags; + + /** @pad: MBZ */ + __u32 pad; + + /** @value: compare value */ + __u64 value; + +#define DRM_XE_UFENCE_WAIT_U8 0xffu +#define DRM_XE_UFENCE_WAIT_U16 0xffffu +#define DRM_XE_UFENCE_WAIT_U32 0xffffffffu +#define DRM_XE_UFENCE_WAIT_U64 0xffffffffffffffffu + /** @mask: comparison mask */ + __u64 mask; + /** + * @timeout: how long to wait before bailing, value in nanoseconds. + * Without DRM_XE_UFENCE_WAIT_ABSTIME flag set (relative timeout) + * it contains timeout expressed in nanoseconds to wait (fence will + * expire at now() + timeout). + * When DRM_XE_UFENCE_WAIT_ABSTIME flat is set (absolute timeout) wait + * will end at timeout (uses system MONOTONIC_CLOCK). + * Passing negative timeout leads to neverending wait. + * + * On relative timeout this value is updated with timeout left + * (for restarting the call in case of signal delivery). + * On absolute timeout this value stays intact (restarted call still + * expire at the same point of time). + */ + __s64 timeout; + + /** + * @num_engines: number of engine instances to wait on, must be zero + * when DRM_XE_UFENCE_WAIT_SOFT_OP set + */ + __u64 num_engines; + + /** + * @instances: user pointer to array of drm_xe_engine_class_instance to + * wait on, must be NULL when DRM_XE_UFENCE_WAIT_SOFT_OP set + */ + __u64 instances; + + /** @reserved: Reserved */ + __u64 reserved[2]; }; struct drm_xe_vm_madvise { - /** @extensions: Pointer to the first extension struct, if any */ - __u64 extensions; - - /** @vm_id: The ID VM in which the VMA exists */ - __u32 vm_id; - - /** @pad: MBZ */ - __u32 pad; - - /** @range: Number of bytes in the VMA */ - __u64 range; - - /** @addr: Address of the VMA to operation on */ - __u64 addr; - - /* - * Setting the preferred location will trigger a migrate of the VMA - * backing store to new location if the backing store is already - * allocated. - * - * For DRM_XE_VM_MADVISE_PREFERRED_MEM_CLASS usage, see enum - * drm_xe_memory_class. - */ -#define DRM_XE_VM_MADVISE_PREFERRED_MEM_CLASS 0 -#define DRM_XE_VM_MADVISE_PREFERRED_GT 1 - /* - * In this case lower 32 bits are mem class, upper 32 are GT. - * Combination provides a single IOCTL plus migrate VMA to preferred - * location. - */ -#define DRM_XE_VM_MADVISE_PREFERRED_MEM_CLASS_GT 2 - /* - * The CPU will do atomic memory operations to this VMA. Must be set on - * some devices for atomics to behave correctly. - */ -#define DRM_XE_VM_MADVISE_CPU_ATOMIC 3 - /* - * The device will do atomic memory operations to this VMA. Must be set - * on some devices for atomics to behave correctly. - */ -#define DRM_XE_VM_MADVISE_DEVICE_ATOMIC 4 - /* - * Priority WRT to eviction (moving from preferred memory location due - * to memory pressure). The lower the priority, the more likely to be - * evicted. - */ -#define DRM_XE_VM_MADVISE_PRIORITY 5 -#define DRM_XE_VMA_PRIORITY_LOW 0 - /* Default */ -#define DRM_XE_VMA_PRIORITY_NORMAL 1 - /* Must be user with elevated privileges */ -#define DRM_XE_VMA_PRIORITY_HIGH 2 - /* Pin the VMA in memory, must be user with elevated privileges */ -#define DRM_XE_VM_MADVISE_PIN 6 - /** @property: property to set */ - __u32 property; - - /** @pad2: MBZ */ - __u32 pad2; - - /** @value: property value */ - __u64 value; - - /** @reserved: Reserved */ - __u64 reserved[2]; + /** @extensions: Pointer to the first extension struct, if any */ + __u64 extensions; + + /** @vm_id: The ID VM in which the VMA exists */ + __u32 vm_id; + + /** @pad: MBZ */ + __u32 pad; + + /** @range: Number of bytes in the VMA */ + __u64 range; + + /** @addr: Address of the VMA to operation on */ + __u64 addr; + + /* + * Setting the preferred location will trigger a migrate of the VMA + * backing store to new location if the backing store is already + * allocated. + * + * For DRM_XE_VM_MADVISE_PREFERRED_MEM_CLASS usage, see enum + * drm_xe_memory_class. + */ +#define DRM_XE_VM_MADVISE_PREFERRED_MEM_CLASS 0 +#define DRM_XE_VM_MADVISE_PREFERRED_GT 1 + /* + * In this case lower 32 bits are mem class, upper 32 are GT. + * Combination provides a single IOCTL plus migrate VMA to preferred + * location. + */ +#define DRM_XE_VM_MADVISE_PREFERRED_MEM_CLASS_GT 2 + /* + * The CPU will do atomic memory operations to this VMA. Must be set on + * some devices for atomics to behave correctly. + */ +#define DRM_XE_VM_MADVISE_CPU_ATOMIC 3 + /* + * The device will do atomic memory operations to this VMA. Must be set + * on some devices for atomics to behave correctly. + */ +#define DRM_XE_VM_MADVISE_DEVICE_ATOMIC 4 + /* + * Priority WRT to eviction (moving from preferred memory location due + * to memory pressure). The lower the priority, the more likely to be + * evicted. + */ +#define DRM_XE_VM_MADVISE_PRIORITY 5 +#define DRM_XE_VMA_PRIORITY_LOW 0 + /* Default */ +#define DRM_XE_VMA_PRIORITY_NORMAL 1 + /* Must be user with elevated privileges */ +#define DRM_XE_VMA_PRIORITY_HIGH 2 + /* Pin the VMA in memory, must be user with elevated privileges */ +#define DRM_XE_VM_MADVISE_PIN 6 + /** @property: property to set */ + __u32 property; + + /** @pad2: MBZ */ + __u32 pad2; + + /** @value: property value */ + __u64 value; + + /** @reserved: Reserved */ + __u64 reserved[2]; }; /** @@ -1045,19 +1045,19 @@ struct drm_xe_vm_madvise { * * .. code-block:: C * - * struct perf_event_attr attr; - * long long count; - * int cpu = 0; - * int fd; + * struct perf_event_attr attr; + * long long count; + * int cpu = 0; + * int fd; * - * memset(&attr, 0, sizeof(struct perf_event_attr)); - * attr.type = type; // eg: /sys/bus/event_source/devices/xe_0000_56_00.0/type - * attr.read_format = PERF_FORMAT_TOTAL_TIME_ENABLED; - * attr.use_clockid = 1; - * attr.clockid = CLOCK_MONOTONIC; - * attr.config = XE_PMU_INTERRUPTS(0); + * memset(&attr, 0, sizeof(struct perf_event_attr)); + * attr.type = type; // eg: /sys/bus/event_source/devices/xe_0000_56_00.0/type + * attr.read_format = PERF_FORMAT_TOTAL_TIME_ENABLED; + * attr.use_clockid = 1; + * attr.clockid = CLOCK_MONOTONIC; + * attr.config = XE_PMU_INTERRUPTS(0); * - * fd = syscall(__NR_perf_event_open, &attr, -1, cpu, -1, 0); + * fd = syscall(__NR_perf_event_open, &attr, -1, cpu, -1, 0); */ /* @@ -1066,13 +1066,13 @@ struct drm_xe_vm_madvise { #define __XE_PMU_GT_SHIFT (56) #define ___XE_PMU_OTHER(gt, x) \ - (((__u64)(x)) | ((__u64)(gt) << __XE_PMU_GT_SHIFT)) + (((__u64)(x)) | ((__u64)(gt) << __XE_PMU_GT_SHIFT)) -#define XE_PMU_INTERRUPTS(gt) ___XE_PMU_OTHER(gt, 0) -#define XE_PMU_RENDER_GROUP_BUSY(gt) ___XE_PMU_OTHER(gt, 1) -#define XE_PMU_COPY_GROUP_BUSY(gt) ___XE_PMU_OTHER(gt, 2) -#define XE_PMU_MEDIA_GROUP_BUSY(gt) ___XE_PMU_OTHER(gt, 3) -#define XE_PMU_ANY_ENGINE_GROUP_BUSY(gt) ___XE_PMU_OTHER(gt, 4) +#define XE_PMU_INTERRUPTS(gt) ___XE_PMU_OTHER(gt, 0) +#define XE_PMU_RENDER_GROUP_BUSY(gt) ___XE_PMU_OTHER(gt, 1) +#define XE_PMU_COPY_GROUP_BUSY(gt) ___XE_PMU_OTHER(gt, 2) +#define XE_PMU_MEDIA_GROUP_BUSY(gt) ___XE_PMU_OTHER(gt, 3) +#define XE_PMU_ANY_ENGINE_GROUP_BUSY(gt) ___XE_PMU_OTHER(gt, 4) #if defined(__cplusplus) }