@@ -50,6 +50,37 @@ static void nouveau_gem_prime_kunmap(struct dma_buf *buf, unsigned long page,
{
}
+static inline u64 nouveau_bo_mmap_offset(struct nouveau_bo *bo)
+{
+ return drm_vma_node_offset_addr(&bo->bo.vma_node) >> PAGE_SHIFT;
+}
+
+static int nouveau_gem_prime_mmap(struct dma_buf *buf,
+ struct vm_area_struct *vma)
+{
+ struct nouveau_bo *bo = nouveau_gem_object(buf->priv);
+ struct drm_gem_object *obj = buf->priv;
+ int ret;
+
+ /* check for valid size */
+ if (obj->size < vma->vm_end - vma->vm_start)
+ return -EINVAL;
+
+ vma->vm_pgoff += nouveau_bo_mmap_offset(bo);
+
+ if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
+ return drm_legacy_mmap(vma->vm_file, vma);
+
+ ret = drm_vma_node_allow(&obj->vma_node, vma->vm_file->private_data);
+ if (ret)
+ return ret;
+
+ ret = ttm_bo_mmap(vma->vm_file, vma, bo->bo.bdev);
+ drm_vma_node_revoke(&obj->vma_node, vma->vm_file->private_data);
+
+ return ret;
+}
+
static void *nouveau_gem_prime_vmap(struct dma_buf *buf)
{
struct nouveau_bo *bo = nouveau_gem_object(buf->priv);
@@ -79,6 +110,7 @@ static const struct dma_buf_ops nouveau_gem_prime_dmabuf_ops = {
.unmap_atomic = nouveau_gem_prime_kunmap_atomic,
.map = nouveau_gem_prime_kmap,
.unmap = nouveau_gem_prime_kunmap,
+ .mmap = nouveau_gem_prime_mmap,
.vmap = nouveau_gem_prime_vmap,
.vunmap = nouveau_gem_prime_vunmap,
};