if (client->vm)
                atomic_inc(&client->vm->engref[nv_engidx(engobj)]);
        list_add(&nv_engctx(engctx)->head, &engine->contexts);
+       nv_engctx(engctx)->addr = ~0ULL;
        spin_unlock_irqrestore(&engine->lock, save);
        return 0;
 }
 
        mutex_unlock(&nv_subdev(priv)->mutex);
 }
 
+int
+nv04_fifo_context_attach(struct nouveau_object *parent,
+                        struct nouveau_object *object)
+{
+       nv_engctx(object)->addr = nouveau_fifo_chan(parent)->chid;
+       return 0;
+}
+
 static int
 nv04_fifo_chan_ctor(struct nouveau_object *parent,
                    struct nouveau_object *engine,
 
        nv_parent(chan)->object_attach = nv04_fifo_object_attach;
        nv_parent(chan)->object_detach = nv04_fifo_object_detach;
+       nv_parent(chan)->context_attach = nv04_fifo_context_attach;
        chan->ramfc = chan->base.chid * 32;
 
        nv_wo32(priv->ramfc, chan->ramfc + 0x00, args->offset);
 
 
        nv_parent(chan)->object_attach = nv04_fifo_object_attach;
        nv_parent(chan)->object_detach = nv04_fifo_object_detach;
+       nv_parent(chan)->context_attach = nv04_fifo_context_attach;
        chan->ramfc = chan->base.chid * 32;
 
        nv_wo32(priv->ramfc, chan->ramfc + 0x00, args->offset);
 
 
        nv_parent(chan)->object_attach = nv04_fifo_object_attach;
        nv_parent(chan)->object_detach = nv04_fifo_object_detach;
+       nv_parent(chan)->context_attach = nv04_fifo_context_attach;
        chan->ramfc = chan->base.chid * 64;
 
        nv_wo32(priv->ramfc, chan->ramfc + 0x00, args->offset);
 
        }
 
        spin_lock_irqsave(&priv->base.lock, flags);
+       nv_engctx(engctx)->addr = nv_gpuobj(engctx)->addr >> 4;
        nv_mask(priv, 0x002500, 0x00000001, 0x00000000);
 
        if ((nv_rd32(priv, 0x003204) & priv->base.max) == chan->base.chid)
-               nv_wr32(priv, reg, nv_gpuobj(engctx)->addr >> 4);
-       nv_wo32(priv->ramfc, chan->ramfc + ctx, nv_gpuobj(engctx)->addr >> 4);
+               nv_wr32(priv, reg, nv_engctx(engctx)->addr);
+       nv_wo32(priv->ramfc, chan->ramfc + ctx, nv_engctx(engctx)->addr);
 
        nv_mask(priv, 0x002500, 0x00000001, 0x00000001);
        spin_unlock_irqrestore(&priv->base.lock, flags);
 
                return -EINVAL;
        }
 
+       nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12;
        nv_wo32(base->eng, addr + 0x00, 0x00190000);
        nv_wo32(base->eng, addr + 0x04, lower_32_bits(limit));
        nv_wo32(base->eng, addr + 0x08, lower_32_bits(start));
 
                return -EINVAL;
        }
 
+       nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12;
        nv_wo32(base->eng, addr + 0x00, 0x00190000);
        nv_wo32(base->eng, addr + 0x04, lower_32_bits(limit));
        nv_wo32(base->eng, addr + 0x08, lower_32_bits(start));
 
                                            NV_MEM_ACCESS_RW, &ectx->vma);
                if (ret)
                        return ret;
+
+               nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12;
        }
 
        nv_wo32(base, addr + 0x00, lower_32_bits(ectx->vma.offset) | 4);
 
                                            NV_MEM_ACCESS_RW, &ectx->vma);
                if (ret)
                        return ret;
+
+               nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12;
        }
 
        nv_wo32(base, addr + 0x00, lower_32_bits(ectx->vma.offset) | 4);
 
        struct nouveau_gpuobj base;
        struct nouveau_vma vma;
        struct list_head head;
+       u64 addr;
 };
 
 static inline struct nouveau_engctx *
 
 extern struct nouveau_oclass nve0_fifo_oclass;
 
 void nv04_fifo_intr(struct nouveau_subdev *);
+int  nv04_fifo_context_attach(struct nouveau_object *, struct nouveau_object *);
 
 #endif