#include <linux/highmem.h>
 #include <linux/seq_file.h>
 #include <linux/magic.h>
+#include <linux/fcntl.h>
 
 #include <asm/uaccess.h>
 #include <asm/pgtable.h>
 static int shmem_setattr(struct dentry *dentry, struct iattr *attr)
 {
        struct inode *inode = dentry->d_inode;
+       struct shmem_inode_info *info = SHMEM_I(inode);
        int error;
 
        error = inode_change_ok(inode, attr);
                loff_t oldsize = inode->i_size;
                loff_t newsize = attr->ia_size;
 
+               /* protected by i_mutex */
+               if ((newsize < oldsize && (info->seals & F_SEAL_SHRINK)) ||
+                   (newsize > oldsize && (info->seals & F_SEAL_GROW)))
+                       return -EPERM;
+
                if (newsize != oldsize) {
                        error = shmem_reacct_size(SHMEM_I(inode)->flags,
                                        oldsize, newsize);
                info = SHMEM_I(inode);
                memset(info, 0, (char *)inode - (char *)info);
                spin_lock_init(&info->lock);
+               info->seals = F_SEAL_SEAL;
                info->flags = flags & VM_NORESERVE;
                INIT_LIST_HEAD(&info->swaplist);
                simple_xattrs_init(&info->xattrs);
                        struct page **pagep, void **fsdata)
 {
        struct inode *inode = mapping->host;
+       struct shmem_inode_info *info = SHMEM_I(inode);
        pgoff_t index = pos >> PAGE_CACHE_SHIFT;
+
+       /* i_mutex is held by caller */
+       if (unlikely(info->seals)) {
+               if (info->seals & F_SEAL_WRITE)
+                       return -EPERM;
+               if ((info->seals & F_SEAL_GROW) && pos + len > inode->i_size)
+                       return -EPERM;
+       }
+
        return shmem_getpage(inode, index, pagep, SGP_WRITE, NULL);
 }
 
        return offset;
 }
 
+static int shmem_wait_for_pins(struct address_space *mapping)
+{
+       return 0;
+}
+
+#define F_ALL_SEALS (F_SEAL_SEAL | \
+                    F_SEAL_SHRINK | \
+                    F_SEAL_GROW | \
+                    F_SEAL_WRITE)
+
+int shmem_add_seals(struct file *file, unsigned int seals)
+{
+       struct inode *inode = file_inode(file);
+       struct shmem_inode_info *info = SHMEM_I(inode);
+       int error;
+
+       /*
+        * SEALING
+        * Sealing allows multiple parties to share a shmem-file but restrict
+        * access to a specific subset of file operations. Seals can only be
+        * added, but never removed. This way, mutually untrusted parties can
+        * share common memory regions with a well-defined policy. A malicious
+        * peer can thus never perform unwanted operations on a shared object.
+        *
+        * Seals are only supported on special shmem-files and always affect
+        * the whole underlying inode. Once a seal is set, it may prevent some
+        * kinds of access to the file. Currently, the following seals are
+        * defined:
+        *   SEAL_SEAL: Prevent further seals from being set on this file
+        *   SEAL_SHRINK: Prevent the file from shrinking
+        *   SEAL_GROW: Prevent the file from growing
+        *   SEAL_WRITE: Prevent write access to the file
+        *
+        * As we don't require any trust relationship between two parties, we
+        * must prevent seals from being removed. Therefore, sealing a file
+        * only adds a given set of seals to the file, it never touches
+        * existing seals. Furthermore, the "setting seals"-operation can be
+        * sealed itself, which basically prevents any further seal from being
+        * added.
+        *
+        * Semantics of sealing are only defined on volatile files. Only
+        * anonymous shmem files support sealing. More importantly, seals are
+        * never written to disk. Therefore, there's no plan to support it on
+        * other file types.
+        */
+
+       if (file->f_op != &shmem_file_operations)
+               return -EINVAL;
+       if (!(file->f_mode & FMODE_WRITE))
+               return -EPERM;
+       if (seals & ~(unsigned int)F_ALL_SEALS)
+               return -EINVAL;
+
+       mutex_lock(&inode->i_mutex);
+
+       if (info->seals & F_SEAL_SEAL) {
+               error = -EPERM;
+               goto unlock;
+       }
+
+       if ((seals & F_SEAL_WRITE) && !(info->seals & F_SEAL_WRITE)) {
+               error = mapping_deny_writable(file->f_mapping);
+               if (error)
+                       goto unlock;
+
+               error = shmem_wait_for_pins(file->f_mapping);
+               if (error) {
+                       mapping_allow_writable(file->f_mapping);
+                       goto unlock;
+               }
+       }
+
+       info->seals |= seals;
+       error = 0;
+
+unlock:
+       mutex_unlock(&inode->i_mutex);
+       return error;
+}
+EXPORT_SYMBOL_GPL(shmem_add_seals);
+
+int shmem_get_seals(struct file *file)
+{
+       if (file->f_op != &shmem_file_operations)
+               return -EINVAL;
+
+       return SHMEM_I(file_inode(file))->seals;
+}
+EXPORT_SYMBOL_GPL(shmem_get_seals);
+
+long shmem_fcntl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+       long error;
+
+       switch (cmd) {
+       case F_ADD_SEALS:
+               /* disallow upper 32bit */
+               if (arg > UINT_MAX)
+                       return -EINVAL;
+
+               error = shmem_add_seals(file, arg);
+               break;
+       case F_GET_SEALS:
+               error = shmem_get_seals(file);
+               break;
+       default:
+               error = -EINVAL;
+               break;
+       }
+
+       return error;
+}
+
 static long shmem_fallocate(struct file *file, int mode, loff_t offset,
                                                         loff_t len)
 {
        struct inode *inode = file_inode(file);
        struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
+       struct shmem_inode_info *info = SHMEM_I(inode);
        struct shmem_falloc shmem_falloc;
        pgoff_t start, index, end;
        int error;
                loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1;
                DECLARE_WAIT_QUEUE_HEAD_ONSTACK(shmem_falloc_waitq);
 
+               /* protected by i_mutex */
+               if (info->seals & F_SEAL_WRITE) {
+                       error = -EPERM;
+                       goto out;
+               }
+
                shmem_falloc.waitq = &shmem_falloc_waitq;
                shmem_falloc.start = unmap_start >> PAGE_SHIFT;
                shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT;
        if (error)
                goto out;
 
+       if ((info->seals & F_SEAL_GROW) && offset + len > inode->i_size) {
+               error = -EPERM;
+               goto out;
+       }
+
        start = offset >> PAGE_CACHE_SHIFT;
        end = (offset + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
        /* Try to avoid a swapstorm if len is impossible to satisfy */