{
struct eb_objects *eb;
- eb = malloc(sizeof(*eb), DRM_I915_GEM, M_WAITOK | M_ZERO);
+ eb = malloc(sizeof(*eb),
+ DRM_I915_GEM, M_WAITOK | M_ZERO);
eb->buckets = hashinit(size, DRM_I915_GEM, &eb->hashmask);
- return (eb);
+ return eb;
}
static void
LIST_FOREACH(obj, &eb->buckets[handle & eb->hashmask], exec_node) {
if (obj->exec_handle == handle)
- return (obj);
+ return obj;
}
- return (NULL);
+
+ return NULL;
}
static void
/* We can't wait for rendering with pagefaults disabled */
if (obj->active && (curthread->td_pflags & TDP_NOFAULTING) != 0)
- return (-EFAULT);
+ return -EFAULT;
reloc->delta += target_offset;
if (use_cpu_reloc(obj)) {
sf = sf_buf_alloc(obj->pages[OFF_TO_IDX(reloc->offset)],
SFB_NOWAIT);
if (sf == NULL)
- return (-ENOMEM);
+ return -ENOMEM;
vaddr = (void *)sf_buf_kva(sf);
*(uint32_t *)(vaddr + page_offset) = reloc->delta;
sf_buf_free(sf);
i915_gem_retire_requests(dev);
ret = 0;
- pflags = vm_fault_disable_pagefaults();
/* This is the fast path and we cannot handle a pagefault whilst
* holding the device lock lest the user pass in the relocations
* contained within a mmaped bo. For in such a case we, the page
* fault handler would call i915_gem_fault() and we would try to
* acquire the device lock again. Obviously this is bad.
*/
-
+ pflags = vm_fault_disable_pagefaults();
list_for_each_entry(obj, objects, exec_list) {
ret = i915_gem_execbuffer_relocate_object(obj, eb);
if (ret)
struct drm_i915_gem_object *obj;
struct list_head ordered_objects;
bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
- int ret, retry;
+ int retry;
+ int ret;
dev_priv = ring->dev->dev_private;
INIT_LIST_HEAD(&ordered_objects);
return -EINVAL;
length = exec[i].relocation_count *
- sizeof(struct drm_i915_gem_relocation_entry);
+ sizeof(struct drm_i915_gem_relocation_entry);
if (length == 0) {
(*map)[i] = NULL;
continue;
}
+
/*
* Since both start and end of the relocation region
* may be not aligned on the page boundary, be
if ((*maplen)[i] == -1) {
free(ma, DRM_I915_GEM);
(*map)[i] = NULL;
- return (-EFAULT);
+ return -EFAULT;
}
}
char *mkva;
uint64_t po_r, po_w;
uint32_t cmd;
-
+
po_r = batch_obj->base.dev->agp->base + batch_obj->gtt_offset +
batch_start_offset + batch_len;
if (batch_len > 0)
int i915_fix_mi_batchbuffer_end = 0;
- static int
+static int
i915_reset_gen7_sol_offsets(struct drm_device *dev,
struct intel_ring_buffer *ring)
{
struct drm_i915_gem_object *batch_obj;
struct drm_clip_rect *cliprects = NULL;
struct intel_ring_buffer *ring;
- vm_page_t **relocs_ma;
- int *relocs_len;
u32 ctx_id = i915_execbuffer2_get_context_id(*args);
u32 exec_start, exec_len;
u32 seqno;
u32 mask;
int ret, mode, i;
+ vm_page_t **relocs_ma;
+ int *relocs_len;
if (!i915_gem_check_execbuffer(args)) {
DRM_DEBUG("execbuf with invalid offset/length\n");
if (args->batch_len == 0)
return (0);
- ret = validate_exec_list(exec, args->buffer_count, &relocs_ma,
- &relocs_len);
- if (ret != 0)
- goto pre_struct_lock_err;
+ ret = validate_exec_list(exec, args->buffer_count,
+ &relocs_ma, &relocs_len);
+ if (ret)
+ goto pre_mutex_err;
switch (args->flags & I915_EXEC_RING_MASK) {
case I915_EXEC_DEFAULT:
DRM_DEBUG("Ring %s doesn't support contexts\n",
ring->name);
ret = -EPERM;
- goto pre_struct_lock_err;
+ goto pre_mutex_err;
}
break;
case I915_EXEC_BLT:
DRM_DEBUG("Ring %s doesn't support contexts\n",
ring->name);
ret = -EPERM;
- goto pre_struct_lock_err;
+ goto pre_mutex_err;
}
break;
default:
DRM_DEBUG("execbuf with unknown ring: %d\n",
(int)(args->flags & I915_EXEC_RING_MASK));
ret = -EINVAL;
- goto pre_struct_lock_err;
+ goto pre_mutex_err;
}
if (!intel_ring_initialized(ring)) {
DRM_DEBUG("execbuf with invalid ring: %d\n",
(int)(args->flags & I915_EXEC_RING_MASK));
ret = -EINVAL;
- goto pre_struct_lock_err;
+ goto pre_mutex_err;
}
mode = args->flags & I915_EXEC_CONSTANTS_MASK;
mode != dev_priv->relative_constants_mode) {
if (INTEL_INFO(dev)->gen < 4) {
ret = -EINVAL;
- goto pre_struct_lock_err;
+ goto pre_mutex_err;
}
if (INTEL_INFO(dev)->gen > 5 &&
mode == I915_EXEC_CONSTANTS_REL_SURFACE) {
ret = -EINVAL;
- goto pre_struct_lock_err;
+ goto pre_mutex_err;
}
/* The HW changed the meaning on this bit on gen6 */
default:
DRM_DEBUG("execbuf with unknown constants: %d\n", mode);
ret = -EINVAL;
- goto pre_struct_lock_err;
+ goto pre_mutex_err;
}
if (args->buffer_count < 1) {
DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
ret = -EINVAL;
- goto pre_struct_lock_err;
+ goto pre_mutex_err;
}
if (args->num_cliprects != 0) {
if (ring != &dev_priv->rings[RCS]) {
DRM_DEBUG("clip rectangles are only valid with the render ring\n");
ret = -EINVAL;
- goto pre_struct_lock_err;
+ goto pre_mutex_err;
}
if (INTEL_INFO(dev)->gen >= 5) {
DRM_DEBUG("clip rectangles are only valid on pre-gen5\n");
ret = -EINVAL;
- goto pre_struct_lock_err;
+ goto pre_mutex_err;
}
if (args->num_cliprects > UINT_MAX / sizeof(*cliprects)) {
DRM_DEBUG("execbuf with %u cliprects\n",
args->num_cliprects);
ret = -EINVAL;
- goto pre_struct_lock_err;
+ goto pre_mutex_err;
}
- cliprects = malloc( sizeof(*cliprects) * args->num_cliprects,
- DRM_I915_GEM, M_WAITOK | M_ZERO);
+ cliprects = malloc(args->num_cliprects * sizeof(*cliprects),
+ DRM_I915_GEM, M_WAITOK | M_ZERO);
ret = -copyin((void *)(uintptr_t)args->cliprects_ptr, cliprects,
sizeof(*cliprects) * args->num_cliprects);
if (ret != 0)
- goto pre_struct_lock_err;
+ goto pre_mutex_err;
}
ret = i915_mutex_lock_interruptible(dev);
if (ret)
- goto pre_struct_lock_err;
+ goto pre_mutex_err;
if (dev_priv->mm.suspended) {
DRM_UNLOCK(dev);
ret = -EBUSY;
- goto pre_struct_lock_err;
+ goto pre_mutex_err;
}
eb = eb_create(args->buffer_count);
if (eb == NULL) {
DRM_UNLOCK(dev);
ret = -ENOMEM;
- goto pre_struct_lock_err;
+ goto pre_mutex_err;
}
/* Look up object handles */
mode != dev_priv->relative_constants_mode) {
ret = intel_ring_begin(ring, 4);
if (ret)
- goto err;
+ goto err;
intel_ring_emit(ring, MI_NOOP);
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
args->batch_start_offset, args->batch_len);
}
- CTR4(KTR_DRM, "ring_dispatch %s %d exec %x %x", ring->name, seqno,
- exec_start, exec_len);
-
if (cliprects) {
for (i = 0; i < args->num_cliprects; i++) {
ret = i915_emit_box(dev, &cliprects[i],
goto err;
}
+ CTR4(KTR_DRM, "ring_dispatch %s %d exec %x %x", ring->name, seqno,
+ exec_start, exec_len);
+
i915_gem_execbuffer_move_to_active(&objects, ring, seqno);
i915_gem_execbuffer_retire_commands(dev, file, ring);
list_del_init(&obj->exec_list);
drm_gem_object_unreference(&obj->base);
}
+
DRM_UNLOCK(dev);
-pre_struct_lock_err:
+pre_mutex_err:
for (i = 0; i < args->buffer_count; i++) {
if (relocs_ma[i] != NULL) {
vm_page_unhold_pages(relocs_ma[i], relocs_len[i]);
args->buffer_count, ret);
free(exec_list, DRM_I915_GEM);
free(exec2_list, DRM_I915_GEM);
- return (ret);
+ return ret;
}
for (i = 0; i < args->buffer_count; i++) {
}
/* XXXKIB user-controllable malloc size */
- exec2_list = malloc(sizeof(*exec2_list) * args->buffer_count,
- DRM_I915_GEM, M_WAITOK);
+ exec2_list = malloc(sizeof(*exec2_list)*args->buffer_count,
+ DRM_I915_GEM, M_WAITOK);
ret = -copyin((void *)(uintptr_t)args->buffers_ptr, exec2_list,
sizeof(*exec2_list) * args->buffer_count);
if (ret != 0) {
dev_priv->mem_freq = 1600;
break;
default:
- DRM_DEBUG("unknown memory frequency 0x%02x\n",
+ DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n",
ddrpll & 0xff);
dev_priv->mem_freq = 0;
break;
dev_priv->fsb_freq = 6400;
break;
default:
- DRM_DEBUG("unknown fsb frequency 0x%04x\n",
+ DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n",
csipll & 0x3ff);
dev_priv->fsb_freq = 0;
break;
* zero and give the hw a chance to gather more samples.
*/
if (diff1 <= 10)
- return (dev_priv->chipset_power);
+ return dev_priv->chipset_power;
count1 = I915_READ(DMIEC);
count2 = I915_READ(DDREC);
dev_priv->last_time1 = now;
dev_priv->chipset_power = ret;
- return (ret);
+ return ret;
}
unsigned long i915_mch_val(struct drm_i915_private *dev_priv)
dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK);
}
+static void ibx_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ /*
+ * On Ibex Peak and Cougar Point, we need to disable clock
+ * gating for the panel power sequencer or it will fail to
+ * start up when no ports are active.
+ */
+ I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
+}
+
static void ironlake_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
_3D_CHICKEN2_WM_READ_PIPELINED);
}
+static void cpt_init_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int pipe;
+
+ /*
+ * On Ibex Peak and Cougar Point, we need to disable clock
+ * gating for the panel power sequencer or it will fail to
+ * start up when no ports are active.
+ */
+ I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
+ I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
+ DPLS_EDP_PPS_FIX_DIS);
+ /* Without this, mode sets may fail silently on FDI */
+ for_each_pipe(pipe)
+ I915_WRITE(TRANS_CHICKEN2(pipe), TRANS_AUTOTRAIN_GEN_STALL_DIS);
+}
+
static void gen6_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
}
-static void ibx_init_clock_gating(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- /*
- * On Ibex Peak and Cougar Point, we need to disable clock
- * gating for the panel power sequencer or it will fail to
- * start up when no ports are active.
- */
- I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
-}
-
-static void cpt_init_clock_gating(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int pipe;
-
- /*
- * On Ibex Peak and Cougar Point, we need to disable clock
- * gating for the panel power sequencer or it will fail to
- * start up when no ports are active.
- */
- I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
- I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
- DPLS_EDP_PPS_FIX_DIS);
- /* Without this, mode sets may fail silently on FDI */
- for_each_pipe(pipe)
- I915_WRITE(TRANS_CHICKEN2(pipe), TRANS_AUTOTRAIN_GEN_STALL_DIS);
-}
-
void intel_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int (*init)(struct intel_ring_buffer *ring);
void (*write_tail)(struct intel_ring_buffer *ring,
- uint32_t value);
+ u32 value);
int (*flush)(struct intel_ring_buffer *ring,
- uint32_t invalidate_domains,
- uint32_t flush_domains);
+ u32 invalidate_domains,
+ u32 flush_domains);
int (*add_request)(struct intel_ring_buffer *ring,
uint32_t *seqno);
uint32_t (*get_seqno)(struct intel_ring_buffer *ring);
int (*dispatch_execbuffer)(struct intel_ring_buffer *ring,
- uint32_t offset, uint32_t length);
+ u32 offset, u32 length);
#define I915_DISPATCH_SECURE 0x1
#define I915_DISPATCH_PINNED 0x2
void (*cleanup)(struct intel_ring_buffer *ring);
return 1 << ring->id;
}
-static inline uint32_t
+static inline u32
intel_ring_sync_index(struct intel_ring_buffer *ring,
struct intel_ring_buffer *other)
{
{
/* Ensure that the compiler doesn't optimize away the load. */
__compiler_membar();
- return (atomic_load_acq_32(ring->status_page.page_addr + reg));
+ return atomic_load_acq_32(ring->status_page.page_addr + reg);
}
void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring);
void i915_trace_irq_get(struct intel_ring_buffer *ring, uint32_t seqno);
/* DRI warts */
-int intel_render_ring_init_dri(struct drm_device *dev, uint64_t start,
- uint32_t size);
+int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size);
#endif /* _INTEL_RINGBUFFER_H_ */