case I915_PARAM_HAS_PRIME_VMAP_FLUSH:
                value = 1;
                break;
+       case I915_PARAM_HAS_SECURE_BATCHES:
+               value = capable(CAP_SYS_ADMIN);
+               break;
        default:
                DRM_DEBUG_DRIVER("Unknown parameter %d\n",
                                 param->param);
 
        u32 exec_start, exec_len;
        u32 seqno;
        u32 mask;
+       u32 flags;
        int ret, mode, i;
 
        if (!i915_gem_check_execbuffer(args)) {
        if (ret)
                return ret;
 
+       flags = 0;
+       if (args->flags & I915_EXEC_SECURE) {
+               if (!file->is_master || !capable(CAP_SYS_ADMIN))
+                   return -EPERM;
+
+               flags |= I915_DISPATCH_SECURE;
+       }
+
        switch (args->flags & I915_EXEC_RING_MASK) {
        case I915_EXEC_DEFAULT:
        case I915_EXEC_RENDER:
        }
        batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
 
+       /* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
+        * batch" bit. Hence we need to pin secure batches into the global gtt.
+        * hsw should have this fixed, but let's be paranoid and do it
+        * unconditionally for now. */
+       if (flags & I915_DISPATCH_SECURE && !batch_obj->has_global_gtt_mapping)
+               i915_gem_gtt_bind_object(batch_obj, batch_obj->cache_level);
+
        ret = i915_gem_execbuffer_move_to_gpu(ring, &objects);
        if (ret)
                goto err;
                        goto err;
        }
 
-       trace_i915_gem_ring_dispatch(ring, seqno);
+       trace_i915_gem_ring_dispatch(ring, seqno, flags);
 
        exec_start = batch_obj->gtt_offset + args->batch_start_offset;
        exec_len = args->batch_len;
                                goto err;
 
                        ret = ring->dispatch_execbuffer(ring,
-                                                       exec_start, exec_len);
+                                                       exec_start, exec_len,
+                                                       flags);
                        if (ret)
                                goto err;
                }
        } else {
-               ret = ring->dispatch_execbuffer(ring, exec_start, exec_len);
+               ret = ring->dispatch_execbuffer(ring,
+                                               exec_start, exec_len,
+                                               flags);
                if (ret)
                        goto err;
        }
 
 #define   MI_INVALIDATE_TLB    (1<<18)
 #define   MI_INVALIDATE_BSD    (1<<7)
 #define MI_BATCH_BUFFER                MI_INSTR(0x30, 1)
-#define   MI_BATCH_NON_SECURE  (1)
-#define   MI_BATCH_NON_SECURE_I965 (1<<8)
+#define   MI_BATCH_NON_SECURE          (1)
+/* for snb/ivb/vlv this also means "batch in ppgtt" when ppgtt is enabled. */
+#define   MI_BATCH_NON_SECURE_I965     (1<<8)
+#define   MI_BATCH_PPGTT_HSW           (1<<8)
+#define   MI_BATCH_NON_SECURE_HSW      (1<<13)
 #define MI_BATCH_BUFFER_START  MI_INSTR(0x31, 0)
 #define   MI_BATCH_GTT             (2<<6) /* aliased with (1<<7) on gen4 */
 #define MI_SEMAPHORE_MBOX      MI_INSTR(0x16, 1) /* gen6+ */
 
 );
 
 TRACE_EVENT(i915_gem_ring_dispatch,
-           TP_PROTO(struct intel_ring_buffer *ring, u32 seqno),
-           TP_ARGS(ring, seqno),
+           TP_PROTO(struct intel_ring_buffer *ring, u32 seqno, u32 flags),
+           TP_ARGS(ring, seqno, flags),
 
            TP_STRUCT__entry(
                             __field(u32, dev)
                             __field(u32, ring)
                             __field(u32, seqno)
+                            __field(u32, flags)
                             ),
 
            TP_fast_assign(
                           __entry->dev = ring->dev->primary->index;
                           __entry->ring = ring->id;
                           __entry->seqno = seqno;
+                          __entry->flags = flags;
                           i915_trace_irq_get(ring, seqno);
                           ),
 
-           TP_printk("dev=%u, ring=%u, seqno=%u",
-                     __entry->dev, __entry->ring, __entry->seqno)
+           TP_printk("dev=%u, ring=%u, seqno=%u, flags=%x",
+                     __entry->dev, __entry->ring, __entry->seqno, __entry->flags)
 );
 
 TRACE_EVENT(i915_gem_ring_flush,
 
 }
 
 static int
-i965_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length)
+i965_dispatch_execbuffer(struct intel_ring_buffer *ring,
+                        u32 offset, u32 length,
+                        unsigned flags)
 {
        int ret;
 
        intel_ring_emit(ring,
                        MI_BATCH_BUFFER_START |
                        MI_BATCH_GTT |
-                       MI_BATCH_NON_SECURE_I965);
+                       (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965));
        intel_ring_emit(ring, offset);
        intel_ring_advance(ring);
 
 
 static int
 i830_dispatch_execbuffer(struct intel_ring_buffer *ring,
-                               u32 offset, u32 len)
+                               u32 offset, u32 len,
+                               unsigned flags)
 {
        int ret;
 
                return ret;
 
        intel_ring_emit(ring, MI_BATCH_BUFFER);
-       intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
+       intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
        intel_ring_emit(ring, offset + len - 8);
        intel_ring_emit(ring, 0);
        intel_ring_advance(ring);
 
 static int
 i915_dispatch_execbuffer(struct intel_ring_buffer *ring,
-                               u32 offset, u32 len)
+                        u32 offset, u32 len,
+                        unsigned flags)
 {
        int ret;
 
                return ret;
 
        intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
-       intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
+       intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
        intel_ring_advance(ring);
 
        return 0;
        return 0;
 }
 
+static int
+hsw_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
+                             u32 offset, u32 len,
+                             unsigned flags)
+{
+       int ret;
+
+       ret = intel_ring_begin(ring, 2);
+       if (ret)
+               return ret;
+
+       intel_ring_emit(ring,
+                       MI_BATCH_BUFFER_START | MI_BATCH_PPGTT_HSW |
+                       (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_HSW));
+       /* bit0-7 is the length on GEN6+ */
+       intel_ring_emit(ring, offset);
+       intel_ring_advance(ring);
+
+       return 0;
+}
+
 static int
 gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
-                             u32 offset, u32 len)
+                             u32 offset, u32 len,
+                             unsigned flags)
 {
        int ret;
 
        if (ret)
                return ret;
 
-       intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965);
+       intel_ring_emit(ring,
+                       MI_BATCH_BUFFER_START |
+                       (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965));
        /* bit0-7 is the length on GEN6+ */
        intel_ring_emit(ring, offset);
        intel_ring_advance(ring);
                ring->irq_enable_mask = I915_USER_INTERRUPT;
        }
        ring->write_tail = ring_write_tail;
-       if (INTEL_INFO(dev)->gen >= 6)
+       if (IS_HASWELL(dev))
+               ring->dispatch_execbuffer = hsw_ring_dispatch_execbuffer;
+       else if (INTEL_INFO(dev)->gen >= 6)
                ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
        else if (INTEL_INFO(dev)->gen >= 4)
                ring->dispatch_execbuffer = i965_dispatch_execbuffer;
 
        u32             (*get_seqno)(struct intel_ring_buffer *ring,
                                     bool lazy_coherency);
        int             (*dispatch_execbuffer)(struct intel_ring_buffer *ring,
-                                              u32 offset, u32 length);
+                                              u32 offset, u32 length,
+                                              unsigned flags);
+#define I915_DISPATCH_SECURE 0x1
        void            (*cleanup)(struct intel_ring_buffer *ring);
        int             (*sync_to)(struct intel_ring_buffer *ring,
                                   struct intel_ring_buffer *to,
 
 #define I915_PARAM_HAS_SEMAPHORES       20
 #define I915_PARAM_HAS_PRIME_VMAP_FLUSH         21
 #define I915_PARAM_RSVD_FOR_FUTURE_USE  22
+#define I915_PARAM_HAS_SECURE_BATCHES   23
 
 typedef struct drm_i915_getparam {
        int param;
 /** Resets the SO write offset registers for transform feedback on gen7. */
 #define I915_EXEC_GEN7_SOL_RESET       (1<<8)
 
+/** Request a privileged ("secure") batch buffer. Note only available for
+ * DRM_ROOT_ONLY | DRM_MASTER processes.
+ */
+#define I915_EXEC_SECURE               (1<<9)
+
 #define I915_EXEC_CONTEXT_ID_MASK      (0xffffffff)
 #define i915_execbuffer2_set_context_id(eb2, context) \
        (eb2).rsvd1 = context & I915_EXEC_CONTEXT_ID_MASK