diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-05-23 11:48:48 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-05-23 11:48:48 -0700 |
commit | 1d6da87a3241deb13d073c4125d19ed0e5a0c62c (patch) | |
tree | 42b7a9842618dad2afe7db9709cc6217ced03120 /drivers/gpu/drm/i915/i915_gem_execbuffer.c | |
parent | 1f40c49570eb01436786a9b5845c4469a9a1f362 (diff) | |
parent | a39ed680bddb1ead592e22ed812c7e47286bfc03 (diff) |
Merge branch 'drm-next' of git://people.freedesktop.org/~airlied/linux
Pull drm updates from Dave Airlie:
"Here's the main drm pull request for 4.7, it's been a busy one, and
I've been a bit more distracted in real life this merge window. Lots
more ARM drivers, not sure if it'll ever end. I think I've at least
one more coming the next merge window.
But changes are all over the place, support for AMD Polaris GPUs is in
here, some missing GM108 support for nouveau (found in some Lenovos),
a bunch of MST and skylake fixes.
I've also noticed a few fixes from Arnd in my inbox, that I'll try and
get in asap, but I didn't think they should hold this up.
New drivers:
- Hisilicon kirin display driver
- Mediatek MT8173 display driver
- ARC PGU - bitstreamer on Synopsys ARC SDP boards
- Allwinner A13 initial RGB output driver
- Analogix driver for DisplayPort IP found in exynos and rockchip
DRM Core:
- UAPI headers fixes and C++ safety
- DRM connector reference counting
- DisplayID mode parsing for Dell 5K monitors
- Removal of struct_mutex from drivers
- Connector registration cleanups
- MST robustness fixes
- MAINTAINERS updates
- Lockless GEM object freeing
- Generic fbdev deferred IO support
panel:
- Support for a bunch of new panels
i915:
- VBT refactoring
- PLL computation cleanups
- DSI support for BXT
- Color manager support
- More atomic patches
- GEM improvements
- GuC fw loading fixes
- DP detection fixes
- SKL GPU hang fixes
- Lots of BXT fixes
radeon/amdgpu:
- Initial Polaris support
- GPUVM/Scheduler/Clock/Power improvements
- ASYNC pageflip support
- New mesa feature support
nouveau:
- GM108 support
- Power sensor support improvements
- GR init + ucode fixes.
- Use GPU provided topology information
vmwgfx:
- Add host messaging support
gma500:
- Some cleanups and fixes
atmel:
- Bridge support
- Async atomic commit support
fsl-dcu:
- Timing controller for LCD support
- Pixel clock polarity support
rcar-du:
- Misc fixes
exynos:
- Pipeline clock support
- Exynoss4533 SoC support
- HW trigger mode support
- export HDMI_PHY clock
- DECON5433 fixes
- Use generic prime functions
- use DMA mapping APIs
rockchip:
- Lots of little fixes
vc4:
- Render node support
- Gamma ramp support
- DPI output support
msm:
- Mostly cleanups and fixes
- Conversion to generic struct fence
etnaviv:
- Fix for prime buffer handling
- Allow hangcheck to be coalesced with other wakeups
tegra:
- Gamme table size fix"
* 'drm-next' of git://people.freedesktop.org/~airlied/linux: (1050 commits)
drm/edid: add displayid detailed 1 timings to the modelist. (v1.1)
drm/edid: move displayid validation to it's own function.
drm/displayid: Iterate over all DisplayID blocks
drm/edid: move displayid tiled block parsing into separate function.
drm: Nuke ->vblank_disable_allowed
drm/vmwgfx: Report vmwgfx version to vmware.log
drm/vmwgfx: Add VMWare host messaging capability
drm/vmwgfx: Kill some lockdep warnings
drm/nouveau/gr/gf100-: fix race condition in fecs/gpccs ucode
drm/nouveau/core: recognise GM108 chipsets
drm/nouveau/gr/gm107-: fix touching non-existent ppcs in attrib cb setup
drm/nouveau/gr/gk104-: share implementation of ppc exception init
drm/nouveau/gr/gk104-: move rop_active_fbps init to nonctx
drm/nouveau/bios/pll: check BIT table version before trying to parse it
drm/nouveau/bios/pll: prevent oops when limits table can't be parsed
drm/nouveau/volt/gk104: round up in gk104_volt_set
drm/nouveau/fb/gm200: setup mmu debug buffer registers at init()
drm/nouveau/fb/gk20a,gm20b: setup mmu debug buffer registers at init()
drm/nouveau/fb/gf100-: allocate mmu debug buffers
drm/nouveau/fb: allow chipset-specific actions for oneinit()
...
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem_execbuffer.c')
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem_execbuffer.c | 159 |
1 files changed, 79 insertions, 80 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index d4d7c88ab5954..33df74d98269c 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -313,7 +313,8 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj, uint64_t target_offset) { struct drm_device *dev = obj->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); + struct i915_ggtt *ggtt = &dev_priv->ggtt; uint64_t delta = relocation_target(reloc, target_offset); uint64_t offset; void __iomem *reloc_page; @@ -330,7 +331,7 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj, /* Map the page containing the relocation we're going to perform. */ offset = i915_gem_obj_ggtt_offset(obj); offset += reloc->offset; - reloc_page = io_mapping_map_atomic_wc(dev_priv->gtt.mappable, + reloc_page = io_mapping_map_atomic_wc(ggtt->mappable, offset & PAGE_MASK); iowrite32(lower_32_bits(delta), reloc_page + offset_in_page(offset)); @@ -340,7 +341,7 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj, if (offset_in_page(offset) == 0) { io_mapping_unmap_atomic(reloc_page); reloc_page = - io_mapping_map_atomic_wc(dev_priv->gtt.mappable, + io_mapping_map_atomic_wc(ggtt->mappable, offset); } @@ -597,7 +598,7 @@ static bool only_mappable_for_reloc(unsigned int flags) static int i915_gem_execbuffer_reserve_vma(struct i915_vma *vma, - struct intel_engine_cs *ring, + struct intel_engine_cs *engine, bool *need_reloc) { struct drm_i915_gem_object *obj = vma->obj; @@ -711,7 +712,7 @@ eb_vma_misplaced(struct i915_vma *vma) } static int -i915_gem_execbuffer_reserve(struct intel_engine_cs *ring, +i915_gem_execbuffer_reserve(struct intel_engine_cs *engine, struct list_head *vmas, struct intel_context *ctx, bool *need_relocs) @@ -721,10 +722,10 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *ring, struct i915_address_space *vm; struct list_head ordered_vmas; struct list_head pinned_vmas; - bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4; + bool has_fenced_gpu_access = INTEL_INFO(engine->dev)->gen < 4; int retry; - i915_gem_retire_requests_ring(ring); + i915_gem_retire_requests_ring(engine); vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm; @@ -786,7 +787,9 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *ring, if (eb_vma_misplaced(vma)) ret = i915_vma_unbind(vma); else - ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs); + ret = i915_gem_execbuffer_reserve_vma(vma, + engine, + need_relocs); if (ret) goto err; } @@ -796,7 +799,8 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *ring, if (drm_mm_node_allocated(&vma->node)) continue; - ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs); + ret = i915_gem_execbuffer_reserve_vma(vma, engine, + need_relocs); if (ret) goto err; } @@ -819,7 +823,7 @@ static int i915_gem_execbuffer_relocate_slow(struct drm_device *dev, struct drm_i915_gem_execbuffer2 *args, struct drm_file *file, - struct intel_engine_cs *ring, + struct intel_engine_cs *engine, struct eb_vmas *eb, struct drm_i915_gem_exec_object2 *exec, struct intel_context *ctx) @@ -908,7 +912,8 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev, goto err; need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0; - ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, ctx, &need_relocs); + ret = i915_gem_execbuffer_reserve(engine, &eb->vmas, ctx, + &need_relocs); if (ret) goto err; @@ -936,7 +941,7 @@ static int i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req, struct list_head *vmas) { - const unsigned other_rings = ~intel_ring_flag(req->ring); + const unsigned other_rings = ~intel_engine_flag(req->engine); struct i915_vma *vma; uint32_t flush_domains = 0; bool flush_chipset = false; @@ -946,7 +951,7 @@ i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req, struct drm_i915_gem_object *obj = vma->obj; if (obj->active & other_rings) { - ret = i915_gem_object_sync(obj, req->ring, &req); + ret = i915_gem_object_sync(obj, req->engine, &req); if (ret) return ret; } @@ -958,7 +963,7 @@ i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req, } if (flush_chipset) - i915_gem_chipset_flush(req->ring->dev); + i915_gem_chipset_flush(req->engine->dev); if (flush_domains & I915_GEM_DOMAIN_GTT) wmb(); @@ -1060,12 +1065,12 @@ validate_exec_list(struct drm_device *dev, static struct intel_context * i915_gem_validate_context(struct drm_device *dev, struct drm_file *file, - struct intel_engine_cs *ring, const u32 ctx_id) + struct intel_engine_cs *engine, const u32 ctx_id) { struct intel_context *ctx = NULL; struct i915_ctx_hang_stats *hs; - if (ring->id != RCS && ctx_id != DEFAULT_CONTEXT_HANDLE) + if (engine->id != RCS && ctx_id != DEFAULT_CONTEXT_HANDLE) return ERR_PTR(-EINVAL); ctx = i915_gem_context_get(file->driver_priv, ctx_id); @@ -1078,8 +1083,8 @@ i915_gem_validate_context(struct drm_device *dev, struct drm_file *file, return ERR_PTR(-EIO); } - if (i915.enable_execlists && !ctx->engine[ring->id].state) { - int ret = intel_lr_context_deferred_alloc(ctx, ring); + if (i915.enable_execlists && !ctx->engine[engine->id].state) { + int ret = intel_lr_context_deferred_alloc(ctx, engine); if (ret) { DRM_DEBUG("Could not create LRC %u: %d\n", ctx_id, ret); return ERR_PTR(ret); @@ -1093,7 +1098,7 @@ void i915_gem_execbuffer_move_to_active(struct list_head *vmas, struct drm_i915_gem_request *req) { - struct intel_engine_cs *ring = i915_gem_request_get_ring(req); + struct intel_engine_cs *engine = i915_gem_request_get_engine(req); struct i915_vma *vma; list_for_each_entry(vma, vmas, exec_list) { @@ -1120,7 +1125,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas, if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) { i915_gem_request_assign(&obj->last_fenced_req, req); if (entry->flags & __EXEC_OBJECT_HAS_FENCE) { - struct drm_i915_private *dev_priv = to_i915(ring->dev); + struct drm_i915_private *dev_priv = to_i915(engine->dev); list_move_tail(&dev_priv->fence_regs[obj->fence_reg].lru_list, &dev_priv->mm.fence_list); } @@ -1130,11 +1135,11 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas, } } -void +static void i915_gem_execbuffer_retire_commands(struct i915_execbuffer_params *params) { /* Unconditionally force add_request to emit a full flush. */ - params->ring->gpu_caches_dirty = true; + params->engine->gpu_caches_dirty = true; /* Add a breadcrumb for the completion of the batch buffer */ __i915_add_request(params->request, params->batch_obj, true); @@ -1144,11 +1149,11 @@ static int i915_reset_gen7_sol_offsets(struct drm_device *dev, struct drm_i915_gem_request *req) { - struct intel_engine_cs *ring = req->ring; + struct intel_engine_cs *engine = req->engine; struct drm_i915_private *dev_priv = dev->dev_private; int ret, i; - if (!IS_GEN7(dev) || ring != &dev_priv->ring[RCS]) { + if (!IS_GEN7(dev) || engine != &dev_priv->engine[RCS]) { DRM_DEBUG("sol reset is gen7/rcs only\n"); return -EINVAL; } @@ -1158,18 +1163,18 @@ i915_reset_gen7_sol_offsets(struct drm_device *dev, return ret; for (i = 0; i < 4; i++) { - intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); - intel_ring_emit_reg(ring, GEN7_SO_WRITE_OFFSET(i)); - intel_ring_emit(ring, 0); + intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1)); + intel_ring_emit_reg(engine, GEN7_SO_WRITE_OFFSET(i)); + intel_ring_emit(engine, 0); } - intel_ring_advance(ring); + intel_ring_advance(engine); return 0; } static struct drm_i915_gem_object* -i915_gem_execbuffer_parse(struct intel_engine_cs *ring, +i915_gem_execbuffer_parse(struct intel_engine_cs *engine, struct drm_i915_gem_exec_object2 *shadow_exec_entry, struct eb_vmas *eb, struct drm_i915_gem_object *batch_obj, @@ -1181,12 +1186,12 @@ i915_gem_execbuffer_parse(struct intel_engine_cs *ring, struct i915_vma *vma; int ret; - shadow_batch_obj = i915_gem_batch_pool_get(&ring->batch_pool, + shadow_batch_obj = i915_gem_batch_pool_get(&engine->batch_pool, PAGE_ALIGN(batch_len)); if (IS_ERR(shadow_batch_obj)) return shadow_batch_obj; - ret = i915_parse_cmds(ring, + ret = i915_parse_cmds(engine, batch_obj, shadow_batch_obj, batch_start_offset, @@ -1227,7 +1232,7 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params, struct list_head *vmas) { struct drm_device *dev = params->dev; - struct intel_engine_cs *ring = params->ring; + struct intel_engine_cs *engine = params->engine; struct drm_i915_private *dev_priv = dev->dev_private; u64 exec_start, exec_len; int instp_mode; @@ -1242,8 +1247,8 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params, if (ret) return ret; - WARN(params->ctx->ppgtt && params->ctx->ppgtt->pd_dirty_rings & (1<<ring->id), - "%s didn't clear reload\n", ring->name); + WARN(params->ctx->ppgtt && params->ctx->ppgtt->pd_dirty_rings & (1<<engine->id), + "%s didn't clear reload\n", engine->name); instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK; instp_mask = I915_EXEC_CONSTANTS_MASK; @@ -1251,7 +1256,7 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params, case I915_EXEC_CONSTANTS_REL_GENERAL: case I915_EXEC_CONSTANTS_ABSOLUTE: case I915_EXEC_CONSTANTS_REL_SURFACE: - if (instp_mode != 0 && ring != &dev_priv->ring[RCS]) { + if (instp_mode != 0 && engine != &dev_priv->engine[RCS]) { DRM_DEBUG("non-0 rel constants mode on non-RCS\n"); return -EINVAL; } @@ -1278,17 +1283,17 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params, return -EINVAL; } - if (ring == &dev_priv->ring[RCS] && + if (engine == &dev_priv->engine[RCS] && instp_mode != dev_priv->relative_constants_mode) { ret = intel_ring_begin(params->request, 4); if (ret) return ret; - intel_ring_emit(ring, MI_NOOP); - intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); - intel_ring_emit_reg(ring, INSTPM); - intel_ring_emit(ring, instp_mask << 16 | instp_mode); - intel_ring_advance(ring); + intel_ring_emit(engine, MI_NOOP); + intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1)); + intel_ring_emit_reg(engine, INSTPM); + intel_ring_emit(engine, instp_mask << 16 | instp_mode); + intel_ring_advance(engine); dev_priv->relative_constants_mode = instp_mode; } @@ -1306,7 +1311,7 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params, if (exec_len == 0) exec_len = params->batch_obj->base.size; - ret = ring->dispatch_execbuffer(params->request, + ret = engine->dispatch_execbuffer(params->request, exec_start, exec_len, params->dispatch_flags); if (ret) @@ -1315,7 +1320,6 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params, trace_i915_gem_ring_dispatch(params->request, params->dispatch_flags); i915_gem_execbuffer_move_to_active(vmas, params->request); - i915_gem_execbuffer_retire_commands(params); return 0; } @@ -1363,7 +1367,7 @@ eb_get_batch(struct eb_vmas *eb) #define I915_USER_RINGS (4) -static const enum intel_ring_id user_ring_map[I915_USER_RINGS + 1] = { +static const enum intel_engine_id user_ring_map[I915_USER_RINGS + 1] = { [I915_EXEC_DEFAULT] = RCS, [I915_EXEC_RENDER] = RCS, [I915_EXEC_BLT] = BCS, @@ -1406,12 +1410,12 @@ eb_select_ring(struct drm_i915_private *dev_priv, return -EINVAL; } - *ring = &dev_priv->ring[_VCS(bsd_idx)]; + *ring = &dev_priv->engine[_VCS(bsd_idx)]; } else { - *ring = &dev_priv->ring[user_ring_map[user_ring_id]]; + *ring = &dev_priv->engine[user_ring_map[user_ring_id]]; } - if (!intel_ring_initialized(*ring)) { + if (!intel_engine_initialized(*ring)) { DRM_DEBUG("execbuf with invalid ring: %u\n", user_ring_id); return -EINVAL; } @@ -1425,12 +1429,13 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, struct drm_i915_gem_execbuffer2 *args, struct drm_i915_gem_exec_object2 *exec) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); + struct i915_ggtt *ggtt = &dev_priv->ggtt; struct drm_i915_gem_request *req = NULL; struct eb_vmas *eb; struct drm_i915_gem_object *batch_obj; struct drm_i915_gem_exec_object2 shadow_exec_entry; - struct intel_engine_cs *ring; + struct intel_engine_cs *engine; struct intel_context *ctx; struct i915_address_space *vm; struct i915_execbuffer_params params_master; /* XXX: will be removed later */ @@ -1457,7 +1462,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, if (args->flags & I915_EXEC_IS_PINNED) dispatch_flags |= I915_DISPATCH_PINNED; - ret = eb_select_ring(dev_priv, file, args, &ring); + ret = eb_select_ring(dev_priv, file, args, &engine); if (ret) return ret; @@ -1471,9 +1476,9 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, DRM_DEBUG("RS is only allowed for Haswell, Gen8 and above\n"); return -EINVAL; } - if (ring->id != RCS) { + if (engine->id != RCS) { DRM_DEBUG("RS is not available on %s\n", - ring->name); + engine->name); return -EINVAL; } @@ -1486,7 +1491,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, if (ret) goto pre_mutex_err; - ctx = i915_gem_validate_context(dev, file, ring, ctx_id); + ctx = i915_gem_validate_context(dev, file, engine, ctx_id); if (IS_ERR(ctx)) { mutex_unlock(&dev->struct_mutex); ret = PTR_ERR(ctx); @@ -1498,7 +1503,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, if (ctx->ppgtt) vm = &ctx->ppgtt->base; else - vm = &dev_priv->gtt.base; + vm = &ggtt->base; memset(¶ms_master, 0x00, sizeof(params_master)); @@ -1520,7 +1525,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, /* Move the objects en-masse into the GTT, evicting if necessary. */ need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0; - ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, ctx, &need_relocs); + ret = i915_gem_execbuffer_reserve(engine, &eb->vmas, ctx, + &need_relocs); if (ret) goto err; @@ -1529,7 +1535,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, ret = i915_gem_execbuffer_relocate(eb); if (ret) { if (ret == -EFAULT) { - ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring, + ret = i915_gem_execbuffer_relocate_slow(dev, args, file, + engine, eb, exec, ctx); BUG_ON(!mutex_is_locked(&dev->struct_mutex)); } @@ -1545,16 +1552,16 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, } params->args_batch_start_offset = args->batch_start_offset; - if (i915_needs_cmd_parser(ring) && args->batch_len) { + if (i915_needs_cmd_parser(engine) && args->batch_len) { struct drm_i915_gem_object *parsed_batch_obj; - parsed_batch_obj = i915_gem_execbuffer_parse(ring, - &shadow_exec_entry, - eb, - batch_obj, - args->batch_start_offset, - args->batch_len, - file->is_master); + parsed_batch_obj = i915_gem_execbuffer_parse(engine, + &shadow_exec_entry, + eb, + batch_obj, + args->batch_start_offset, + args->batch_len, + file->is_master); if (IS_ERR(parsed_batch_obj)) { ret = PTR_ERR(parsed_batch_obj); goto err; @@ -1606,7 +1613,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, params->batch_obj_vm_offset = i915_gem_obj_offset(batch_obj, vm); /* Allocate a request for this batch buffer nice and early. */ - req = i915_gem_request_alloc(ring, ctx); + req = i915_gem_request_alloc(engine, ctx); if (IS_ERR(req)) { ret = PTR_ERR(req); goto err_batch_unpin; @@ -1614,7 +1621,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, ret = i915_gem_request_add_to_client(req, file); if (ret) - goto err_batch_unpin; + goto err_request; /* * Save assorted stuff away to pass through to *_submission(). @@ -1624,13 +1631,15 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, */ params->dev = dev; params->file = file; - params->ring = ring; + params->engine = engine; params->dispatch_flags = dispatch_flags; params->batch_obj = batch_obj; params->ctx = ctx; params->request = req; ret = dev_priv->gt.execbuf_submit(params, args, &eb->vmas); +err_request: + i915_gem_execbuffer_retire_commands(params); err_batch_unpin: /* @@ -1647,14 +1656,6 @@ err: i915_gem_context_unreference(ctx); eb_destroy(eb); - /* - * If the request was created but not successfully submitted then it - * must be freed again. If it was submitted then it is being tracked - * on the active request list and no clean up is required here. - */ - if (ret && !IS_ERR_OR_NULL(req)) - i915_gem_request_cancel(req); - mutex_unlock(&dev->struct_mutex); pre_mutex_err: @@ -1773,11 +1774,9 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data, return -EINVAL; } - exec2_list = kmalloc(sizeof(*exec2_list)*args->buffer_count, - GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY); - if (exec2_list == NULL) - exec2_list = drm_malloc_ab(sizeof(*exec2_list), - args->buffer_count); + exec2_list = drm_malloc_gfp(args->buffer_count, + sizeof(*exec2_list), + GFP_TEMPORARY); if (exec2_list == NULL) { DRM_DEBUG("Failed to allocate exec list for %d buffers\n", args->buffer_count); |