static int exynos_drm_open(struct drm_device *dev, struct drm_file *file)
{
+ struct exynos_drm_file_private *file_private;
+
DRM_DEBUG_DRIVER("%s\n", __FILE__);
+ file_private = kzalloc(sizeof(*file_private), GFP_KERNEL);
+ if (!file_private) {
+ DRM_ERROR("failed to allocate exynos_drm_file_private\n");
+ return -ENOMEM;
+ }
+ INIT_LIST_HEAD(&file_private->gem_cpu_acquire_list);
+
+ file->driver_priv = file_private;
+
return exynos_drm_subdrv_open(dev, file);
}
static void exynos_drm_preclose(struct drm_device *dev,
struct drm_file *file)
{
+ struct exynos_drm_file_private *file_private = file->driver_priv;
+ struct exynos_drm_gem_obj_node *cur, *d;
+
DRM_DEBUG_DRIVER("%s\n", __FILE__);
+ mutex_lock(&dev->struct_mutex);
+ /* release kds resource sets for outstanding GEM object acquires */
+ list_for_each_entry_safe(cur, d,
+ &file_private->gem_cpu_acquire_list, list) {
+#ifdef CONFIG_DMA_SHARED_BUFFER_USES_KDS
+ BUG_ON(cur->exynos_gem_obj->resource_set == NULL);
+ kds_resource_set_release(&cur->exynos_gem_obj->resource_set);
+#endif
+ drm_gem_object_unreference(&cur->exynos_gem_obj->base);
+ kfree(cur);
+ }
+ mutex_unlock(&dev->struct_mutex);
+ INIT_LIST_HEAD(&file_private->gem_cpu_acquire_list);
+
exynos_drm_subdrv_close(dev, file);
}
DRM_UNLOCKED | DRM_AUTH),
DRM_IOCTL_DEF_DRV(EXYNOS_VIDI_CONNECTION,
vidi_connection_ioctl, DRM_UNLOCKED | DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(EXYNOS_GEM_CPU_ACQUIRE,
+ exynos_drm_gem_cpu_acquire_ioctl,
+ DRM_UNLOCKED | DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(EXYNOS_GEM_CPU_RELEASE,
+ exynos_drm_gem_cpu_release_ioctl,
+ DRM_UNLOCKED | DRM_AUTH),
};
static const struct file_operations exynos_drm_driver_fops = {
#endif
};
+/*
+ * Exynos drm_file private structure.
+ *
+ * @gem_cpu_acquire_list: list of GEM objects we hold acquires on
+ */
+struct exynos_drm_file_private {
+ struct list_head gem_cpu_acquire_list;
+};
+
/*
* Exynos drm sub driver structure.
*
#include "drmP.h"
#include "drm.h"
+#include <linux/completion.h>
+#include <linux/kds.h>
#include <linux/shmem_fs.h>
#include <drm/exynos_drm.h>
DRM_DEBUG_KMS("handle count = %d\n", atomic_read(&obj->handle_count));
+ if (exynos_gem_obj->resource_set != NULL) {
+ /* kds_resource_set_release NULLs the pointer */
+ kds_resource_set_release(&exynos_gem_obj->resource_set);
+ }
+
if ((exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) &&
exynos_gem_obj->buffer->pages)
exynos_drm_gem_put_pages(obj);
return 0;
}
+static void cpu_acquire_kds_cb_fn(void *param1, void *param2)
+{
+ struct completion* completion = (struct completion *)param1;
+ complete(completion);
+}
+
+int exynos_drm_gem_cpu_acquire_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_exynos_gem_cpu_acquire *args = data;
+ struct exynos_drm_file_private *file_priv = file->driver_priv;
+ struct drm_gem_object *obj;
+ struct exynos_drm_gem_obj *exynos_gem_obj;
+#ifdef CONFIG_DMA_SHARED_BUFFER_USES_KDS
+ struct kds_resource *kds;
+ struct kds_resource_set *rset;
+ unsigned long exclusive;
+ struct kds_callback callback;
+ DECLARE_COMPLETION_ONSTACK(completion);
+#endif
+ struct exynos_drm_gem_obj_node *gem_node;
+ int ret = 0;
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ mutex_lock(&dev->struct_mutex);
+
+ if (!(dev->driver->driver_features & DRIVER_GEM)) {
+ DRM_ERROR("does not support GEM.\n");
+ ret = -ENODEV;
+ goto unlock;
+ }
+
+ obj = drm_gem_object_lookup(dev, file, args->handle);
+ if (!obj) {
+ DRM_ERROR("failed to lookup gem object.\n");
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ exynos_gem_obj = to_exynos_gem_obj(obj);
+
+#ifdef CONFIG_DMA_SHARED_BUFFER_USES_KDS
+ if (exynos_gem_obj->base.export_dma_buf == NULL) {
+ /* If there is no dmabuf present, there is no cross-process/
+ * cross-device sharing and sync is unnecessary.
+ */
+ ret = 0;
+ goto unref_obj;
+ }
+
+ exclusive = 0;
+ if ((args->flags & DRM_EXYNOS_GEM_CPU_ACQUIRE_EXCLUSIVE) != 0)
+ exclusive = 1;
+ kds = &exynos_gem_obj->base.export_dma_buf->kds;
+ kds_callback_init(&callback, 1, &cpu_acquire_kds_cb_fn);
+ ret = kds_async_waitall(&rset, KDS_FLAG_LOCKED_WAIT, &callback,
+ &completion, NULL, 1, &exclusive, &kds);
+ mutex_unlock(&dev->struct_mutex);
+
+ if (!IS_ERR_VALUE(ret))
+ ret = wait_for_completion_interruptible(&completion);
+ kds_callback_term(&callback);
+
+ mutex_lock(&dev->struct_mutex);
+ if (IS_ERR_VALUE(ret))
+ goto release_rset;
+#endif
+
+ gem_node = kzalloc(sizeof(*gem_node), GFP_KERNEL);
+ if (!gem_node) {
+ DRM_ERROR("failed to allocate eyxnos_drm_gem_obj_node.\n");
+ ret = -ENOMEM;
+ goto release_rset;
+ }
+
+#ifdef CONFIG_DMA_SHARED_BUFFER_USES_KDS
+ exynos_gem_obj->resource_set = rset;
+#endif
+
+ gem_node->exynos_gem_obj = exynos_gem_obj;
+ list_add(&gem_node->list, &file_priv->gem_cpu_acquire_list);
+ mutex_unlock(&dev->struct_mutex);
+ return 0;
+
+
+release_rset:
+#ifdef CONFIG_DMA_SHARED_BUFFER_USES_KDS
+ kds_resource_set_release_sync(&rset);
+#endif
+
+unref_obj:
+ drm_gem_object_unreference(obj);
+
+unlock:
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+}
+
+int exynos_drm_gem_cpu_release_ioctl(struct drm_device *dev, void* data,
+ struct drm_file *file)
+{
+ struct drm_exynos_gem_cpu_acquire *args = data;
+ struct exynos_drm_file_private *file_priv = file->driver_priv;
+ struct drm_gem_object *obj;
+ struct exynos_drm_gem_obj *exynos_gem_obj;
+ struct list_head *cur;
+ int ret = 0;
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ mutex_lock(&dev->struct_mutex);
+
+ if (!(dev->driver->driver_features & DRIVER_GEM)) {
+ DRM_ERROR("does not support GEM.\n");
+ ret = -ENODEV;
+ goto unlock;
+ }
+
+ obj = drm_gem_object_lookup(dev, file, args->handle);
+ if (!obj) {
+ DRM_ERROR("failed to lookup gem object.\n");
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ exynos_gem_obj = to_exynos_gem_obj(obj);
+
+#ifdef CONFIG_DMA_SHARED_BUFFER_USES_KDS
+ if (exynos_gem_obj->base.export_dma_buf == NULL) {
+ /* If there is no dmabuf present, there is no cross-process/
+ * cross-device sharing and sync is unnecessary.
+ */
+ ret = 0;
+ goto unref_obj;
+ }
+#endif
+
+ list_for_each(cur, &file_priv->gem_cpu_acquire_list) {
+ struct exynos_drm_gem_obj_node *node = list_entry(
+ cur, struct exynos_drm_gem_obj_node, list);
+ if (node->exynos_gem_obj == exynos_gem_obj)
+ break;
+ }
+ if (cur == &file_priv->gem_cpu_acquire_list) {
+ DRM_ERROR("gem object not acquired for current process.\n");
+ ret = -EINVAL;
+ goto unref_obj;
+ }
+
+#ifdef CONFIG_DMA_SHARED_BUFFER_USES_KDS
+ /* kds_resource_set_release NULLs the pointer */
+ BUG_ON(exynos_gem_obj->resource_set == NULL);
+ kds_resource_set_release(&exynos_gem_obj->resource_set);
+#endif
+
+ list_del(cur);
+ kfree(list_entry(cur, struct exynos_drm_gem_obj_node, list));
+ /* unreference for the reference held since cpu_acquire_ioctl */
+ drm_gem_object_unreference(obj);
+ ret = 0;
+
+unref_obj:
+ /* unreference for the reference from drm_gem_object_lookup() */
+ drm_gem_object_unreference(obj);
+
+unlock:
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+}
+
int exynos_drm_gem_init_object(struct drm_gem_object *obj)
{
DRM_DEBUG_KMS("%s\n", __FILE__);
* by user request or at framebuffer creation.
* continuous memory region allocated by user request
* or at framebuffer creation.
+ * @resource_set: the KDS resource set held by the currently outstanding CPU
+ * acquire (if any).
* @flags: indicate memory type to allocated buffer and cache attribute.
*
* P.S. this object would be transfered to user as kms_bo.handle so
struct exynos_drm_gem_obj {
struct drm_gem_object base;
struct exynos_drm_gem_buf *buffer;
+#ifdef CONFIG_DMA_SHARED_BUFFER_USES_KDS
+ struct kds_resource_set *resource_set;
+#endif
unsigned int flags;
};
+/*
+ * exynos drm buffer linked list structure.
+ *
+ * @list: list link.
+ * @gexynos_gem_obj: struct exynos_drm_gem_obj that this entry points to.
+ */
+struct exynos_drm_gem_obj_node {
+ struct list_head list;
+ struct exynos_drm_gem_obj *exynos_gem_obj;
+};
+
struct page **exynos_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask);
/* destroy a buffer with gem object */
int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+/*
+ * acquire gem object for CPU access.
+ */
+int exynos_drm_gem_cpu_acquire_ioctl(struct drm_device *dev, void* data,
+ struct drm_file *file_priv);
+/*
+ * release gem object after CPU access.
+ */
+int exynos_drm_gem_cpu_release_ioctl(struct drm_device *dev, void* data,
+ struct drm_file *file_priv);
+
/* initialize gem object. */
int exynos_drm_gem_init_object(struct drm_gem_object *obj);
uint64_t mapped;
};
+struct drm_exynos_plane_set_zpos {
+ __u32 plane_id;
+ __s32 zpos;
+};
+
/**
* A structure for user connection request of virtual display.
*
uint64_t edid;
};
-struct drm_exynos_plane_set_zpos {
- __u32 plane_id;
- __s32 zpos;
+/* acquire type definitions. */
+enum drm_exynos_gem_cpu_acquire_type {
+ DRM_EXYNOS_GEM_CPU_ACQUIRE_SHARED = 0x0,
+ DRM_EXYNOS_GEM_CPU_ACQUIRE_EXCLUSIVE = 0x1,
+};
+
+/**
+ * A structure for acquiring buffer for CPU access.
+ *
+ * @handle: a handle to gem object created.
+ */
+struct drm_exynos_gem_cpu_acquire {
+ unsigned int handle;
+ unsigned int flags;
+};
+
+/*
+ * A structure for releasing buffer for GPU access.
+ *
+ * @handle: a handle to gem object created.
+ */
+struct drm_exynos_gem_cpu_release {
+ unsigned int handle;
};
/* memory type definitions. */
-enum e_drm_exynos_gem_mem_type {
+enum drm_exynos_gem_mem_type {
/* Physically Non-Continuous memory. */
EXYNOS_BO_NONCONTIG = 1 << 0,
EXYNOS_BO_MASK = EXYNOS_BO_NONCONTIG
/* Reserved 0x03 ~ 0x05 for exynos specific gem ioctl */
#define DRM_EXYNOS_PLANE_SET_ZPOS 0x06
#define DRM_EXYNOS_VIDI_CONNECTION 0x07
+#define DRM_EXYNOS_GEM_CPU_ACQUIRE 0x08
+#define DRM_EXYNOS_GEM_CPU_RELEASE 0x09
#define DRM_IOCTL_EXYNOS_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + \
DRM_EXYNOS_GEM_CREATE, struct drm_exynos_gem_create)
#define DRM_IOCTL_EXYNOS_VIDI_CONNECTION DRM_IOWR(DRM_COMMAND_BASE + \
DRM_EXYNOS_VIDI_CONNECTION, struct drm_exynos_vidi_connection)
+#define DRM_IOCTL_EXYNOS_GEM_CPU_ACQUIRE DRM_IOWR(DRM_COMMAND_BASE + \
+ DRM_EXYNOS_GEM_CPU_ACQUIRE, struct drm_exynos_gem_cpu_acquire)
+
+#define DRM_IOCTL_EXYNOS_GEM_CPU_RELEASE DRM_IOWR(DRM_COMMAND_BASE + \
+ DRM_EXYNOS_GEM_CPU_RELEASE, struct drm_exynos_gem_cpu_release)
+
#ifdef __KERNEL__
/**