diff --git a/apps/from_numpy/main.py b/apps/from_numpy/main.py index 8ea4721..2dfe514 100644 --- a/apps/from_numpy/main.py +++ b/apps/from_numpy/main.py @@ -6,7 +6,7 @@ libmain = ctypes.cdll.LoadLibrary("./libmain.so") -class DLContext(ctypes.Structure): +class DLDevice(ctypes.Structure): _fields_ = [("device_type", ctypes.c_int), ("device_id", ctypes.c_int)] @@ -26,7 +26,7 @@ class DLDataType(ctypes.Structure): class DLTensor(ctypes.Structure): _fields_ = [("data", ctypes.c_void_p), - ("ctx", DLContext), + ("device", DLDevice), ("ndim", ctypes.c_int), ("dtype", DLDataType), ("shape", ctypes.POINTER(ctypes.c_int64)), @@ -74,7 +74,7 @@ def make_dl_tensor(array): # You may check array.flags here, e.g. array.flags['C_CONTIGUOUS'] dl_tensor = DLTensor() dl_tensor.data = array.ctypes.data_as(ctypes.c_void_p) - dl_tensor.ctx = DLContext(1, 0) + dl_tensor.device = DLDevice(1, 0) dl_tensor.ndim = array.ndim dl_tensor.dtype = DLDataType.TYPE_MAP[str(array.dtype)] # For 0-dim ndarrays, strides and shape will be NULL diff --git a/apps/from_numpy/numpy_dlpack.c b/apps/from_numpy/numpy_dlpack.c index f4309c8..59642a3 100644 --- a/apps/from_numpy/numpy_dlpack.c +++ b/apps/from_numpy/numpy_dlpack.c @@ -9,9 +9,9 @@ void display(DLManagedTensor a) { int i; int ndim = a.dl_tensor.ndim; printf("data = %p\n", a.dl_tensor.data); - printf("ctx = (device_type = %d, device_id = %d)\n", - (int) a.dl_tensor.ctx.device_type, - (int) a.dl_tensor.ctx.device_id); + printf("device = (device_type = %d, device_id = %d)\n", + (int) a.dl_tensor.device.device_type, + (int) a.dl_tensor.device.device_id); printf("dtype = (code = %d, bits = %d, lanes = %d)\n", (int) a.dl_tensor.dtype.code, (int) a.dl_tensor.dtype.bits, diff --git a/contrib/dlpack/dlpackcpp.h b/contrib/dlpack/dlpackcpp.h index 3f12399..44c7d59 100644 --- a/contrib/dlpack/dlpackcpp.h +++ b/contrib/dlpack/dlpackcpp.h @@ -26,8 +26,8 @@ class DLTContainer { handle_.dtype.code = kDLFloat; handle_.dtype.bits = 32U; handle_.dtype.lanes = 1U; - handle_.ctx.device_type = kDLCPU; - handle_.ctx.device_id = 0; + handle_.device.device_type = kDLCPU; + handle_.device.device_id = 0; handle_.shape = nullptr; handle_.strides = nullptr; handle_.byte_offset = 0; diff --git a/include/dlpack/dlpack.h b/include/dlpack/dlpack.h index dc21461..5f1b452 100644 --- a/include/dlpack/dlpack.h +++ b/include/dlpack/dlpack.h @@ -33,7 +33,7 @@ extern "C" { #endif /*! - * \brief The device type in DLContext. + * \brief The device type in DLDevice. */ typedef enum { /*! \brief CPU device */ @@ -64,14 +64,19 @@ typedef enum { } DLDeviceType; /*! - * \brief A Device context for Tensor and operator. + * \brief A Device for Tensor and operator. */ typedef struct { /*! \brief The device type used in the device. */ DLDeviceType device_type; /*! \brief The device index */ int device_id; -} DLContext; +} DLDevice; + +/*! + * \brief This is an alias for DLDevice. Notice that this will be removed in the next release. + */ +typedef DLDevice DLContext; /*! * \brief The type code options DLDataType. @@ -144,8 +149,8 @@ typedef struct { * \endcode */ void* data; - /*! \brief The device context of the tensor */ - DLContext ctx; + /*! \brief The device of the tensor */ + DLDevice device; /*! \brief Number of dimensions */ int ndim; /*! \brief The data type of the pointer*/