afs: [DON'T MERGE] Implement trivial content crypto for testing purposes

Implement trivial content crypto for afs for testing.
diff --git a/fs/afs/file.c b/fs/afs/file.c
index 42131fe..e040a94 100644
--- a/fs/afs/file.c
+++ b/fs/afs/file.c
@@ -141,6 +141,12 @@ int afs_open(struct inode *inode, struct file *file)
 	if (ret < 0)
 		goto error_af;
 
+	if (test_bit(NETFS_ICTX_ENCRYPTED, &vnode->netfs.flags) && !vnode->content_ci) {
+		ret = afs_open_crypto(vnode);
+		if (ret < 0)
+			goto error_af;
+	}
+
 	if (file->f_mode & FMODE_WRITE) {
 		ret = afs_cache_wb_key(vnode, af);
 		if (ret < 0)
@@ -387,8 +393,14 @@ static int afs_init_request(struct netfs_io_request *rreq, struct file *file)
 
 	if (file)
 		rreq->netfs_priv = key_get(afs_file_key(file));
-	rreq->rsize = 256 * 1024;
-	rreq->wsize = 256 * 1024 * 1024;
+	if (test_bit(NETFS_RREQ_CONTENT_ENCRYPTION, &rreq->flags) &&
+	    S_ISREG(rreq->inode->i_mode)) {
+		rreq->rsize = 64 * 1024;
+		rreq->wsize = 64 * 1024;
+	} else {
+		rreq->rsize = 256 * 1024;
+		rreq->wsize = 256 * 1024 * 1024;
+	}
 
 	switch (rreq->origin) {
 	case NETFS_READ_SINGLE:
@@ -460,6 +472,8 @@ const struct netfs_request_ops afs_req_ops = {
 	.estimate_write		= afs_estimate_write,
 	.issue_write		= afs_issue_write,
 	.retry_request		= afs_retry_request,
+	.encrypt_block		= afs_encrypt_block,
+	.decrypt_block		= afs_decrypt_block,
 };
 
 static void afs_add_open_mmap(struct afs_vnode *vnode)
diff --git a/fs/afs/fs_operation.c b/fs/afs/fs_operation.c
index c0dbbc6..9f1925b 100644
--- a/fs/afs/fs_operation.c
+++ b/fs/afs/fs_operation.c
@@ -58,7 +58,7 @@ struct afs_io_locker {
 /*
  * Unlock the I/O lock on a vnode.
  */
-static void afs_unlock_for_io(struct afs_vnode *vnode)
+void afs_unlock_for_io(struct afs_vnode *vnode)
 {
 	struct afs_io_locker *locker;
 
@@ -80,7 +80,7 @@ static void afs_unlock_for_io(struct afs_vnode *vnode)
  * Lock the I/O lock on a vnode uninterruptibly.  We can't use an ordinary
  * mutex as lockdep will complain if we unlock it in the wrong thread.
  */
-static void afs_lock_for_io(struct afs_vnode *vnode)
+void afs_lock_for_io(struct afs_vnode *vnode)
 {
 	struct afs_io_locker myself = { .task = current, };
 
@@ -107,7 +107,7 @@ static void afs_lock_for_io(struct afs_vnode *vnode)
  * Lock the I/O lock on a vnode interruptibly.  We can't use an ordinary mutex
  * as lockdep will complain if we unlock it in the wrong thread.
  */
-static int afs_lock_for_io_interruptible(struct afs_vnode *vnode)
+int afs_lock_for_io_interruptible(struct afs_vnode *vnode)
 {
 	struct afs_io_locker myself = { .task = current, };
 	int ret = 0;
diff --git a/fs/afs/inode.c b/fs/afs/inode.c
index 94e3442..d71d82f 100644
--- a/fs/afs/inode.c
+++ b/fs/afs/inode.c
@@ -22,6 +22,7 @@
 #include <linux/mount.h>
 #include <linux/namei.h>
 #include <linux/iversion.h>
+#include <crypto/skcipher.h>
 #include "internal.h"
 #include "afs_fs.h"
 
@@ -144,7 +145,12 @@ static noinline void dump_vnode(struct afs_vnode *vnode, struct afs_vnode *paren
  */
 static void afs_set_netfs_context(struct afs_vnode *vnode)
 {
+	struct afs_super_info *as = AFS_FS_S(vnode->netfs.inode.i_sb);
+	struct netfs_inode *ictx = &vnode->netfs;
+
 	netfs_inode_init(&vnode->netfs, &afs_req_ops, true);
+	if (vnode->status.type == AFS_FTYPE_FILE && as->fscrypt)
+		__set_bit(NETFS_ICTX_ENCRYPTED, &ictx->flags);
 }
 
 /*
@@ -763,6 +769,9 @@ void afs_evict_inode(struct inode *inode)
 	truncate_inode_pages_final(&inode->i_data);
 	bvecq_put(vnode->directory);
 
+	if (vnode->content_ci)
+		crypto_free_skcipher(vnode->content_ci);
+
 	afs_set_cache_aux(vnode, &aux);
 	netfs_clear_inode_writeback(inode, &aux);
 	clear_inode(inode);
diff --git a/fs/afs/internal.h b/fs/afs/internal.h
index a60df93..a8544ea 100644
--- a/fs/afs/internal.h
+++ b/fs/afs/internal.h
@@ -53,6 +53,7 @@ struct afs_fs_context {
 	bool			autocell;	/* T if set auto mount operation */
 	bool			dyn_root;	/* T if dynamic root */
 	bool			no_cell;	/* T if the source is "none" (for dynroot) */
+	bool			fscrypt;	/* T if content encryption is engaged */
 	enum afs_flock_mode	flock_mode;	/* Partial file-locking emulation mode */
 	afs_voltype_t		type;		/* type of volume requested */
 	unsigned int		volnamesz;	/* size of volume name */
@@ -252,6 +253,7 @@ struct afs_super_info {
 	struct afs_volume	*volume;	/* volume record */
 	enum afs_flock_mode	flock_mode:8;	/* File locking emulation mode */
 	bool			dyn_root;	/* True if dynamic root */
+	bool			fscrypt;	/* T if content encryption is engaged */
 };
 
 static inline struct afs_super_info *AFS_FS_S(struct super_block *sb)
@@ -695,6 +697,7 @@ struct afs_vnode {
 	struct rw_semaphore	validate_lock;	/* lock for validating this vnode */
 	struct rw_semaphore	rmdir_lock;	/* Lock for rmdir vs sillyrename */
 	struct key		*silly_key;	/* Silly rename key */
+	struct crypto_skcipher	*content_ci;	/* Content crypto cipher */
 	spinlock_t		wb_lock;	/* lock for wb_keys */
 	spinlock_t		lock;		/* waitqueue/flags lock */
 	unsigned long		flags;
@@ -1206,6 +1209,9 @@ extern void afs_fs_store_acl(struct afs_operation *);
 /*
  * fs_operation.c
  */
+void afs_unlock_for_io(struct afs_vnode *vnode);
+void afs_lock_for_io(struct afs_vnode *vnode);
+int afs_lock_for_io_interruptible(struct afs_vnode *vnode);
 extern struct afs_operation *afs_alloc_operation(struct key *, struct afs_volume *);
 extern int afs_put_operation(struct afs_operation *);
 extern bool afs_begin_vnode_operation(struct afs_operation *);
@@ -1690,6 +1696,11 @@ extern int afs_writepages(struct address_space *, struct writeback_control *);
 extern int afs_fsync(struct file *, loff_t, loff_t, int);
 extern vm_fault_t afs_page_mkwrite(struct vm_fault *vmf);
 extern void afs_prune_wb_keys(struct afs_vnode *);
+int afs_open_crypto(struct afs_vnode *vnode);
+void afs_encrypt_block(struct netfs_io_subrequest *subreq, gfp_t gfp);
+int afs_decrypt_block(struct netfs_io_request *rreq, loff_t pos, size_t len,
+		      struct scatterlist *source_sg, unsigned int n_source,
+		      struct scatterlist *dest_sg, unsigned int n_dest);
 
 /*
  * xattr.c
diff --git a/fs/afs/super.c b/fs/afs/super.c
index 942f3e9..939e971 100644
--- a/fs/afs/super.c
+++ b/fs/afs/super.c
@@ -71,6 +71,7 @@ enum afs_param {
 	Opt_autocell,
 	Opt_dyn,
 	Opt_flock,
+	Opt_fscrypt,
 	Opt_source,
 };
 
@@ -86,6 +87,7 @@ static const struct fs_parameter_spec afs_fs_parameters[] = {
 	fsparam_flag  ("autocell",	Opt_autocell),
 	fsparam_flag  ("dyn",		Opt_dyn),
 	fsparam_enum  ("flock",		Opt_flock, afs_param_flock),
+	fsparam_flag  ("fscrypt",	Opt_fscrypt),
 	fsparam_string("source",	Opt_source),
 	{}
 };
@@ -194,6 +196,8 @@ static int afs_show_options(struct seq_file *m, struct dentry *root)
 
 	if (as->dyn_root)
 		seq_puts(m, ",dyn");
+	if (as->fscrypt)
+		seq_puts(m, ",fscrypt");
 	switch (as->flock_mode) {
 	case afs_flock_mode_unset:	break;
 	case afs_flock_mode_local:	p = "local";	break;
@@ -341,6 +345,10 @@ static int afs_parse_param(struct fs_context *fc, struct fs_parameter *param)
 		ctx->flock_mode = result.uint_32;
 		break;
 
+	case Opt_fscrypt:
+		ctx->fscrypt = true;
+		break;
+
 	default:
 		return -EINVAL;
 	}
@@ -512,6 +520,7 @@ static struct afs_super_info *afs_alloc_sbi(struct fs_context *fc)
 			as->cell = afs_use_cell(ctx->cell, afs_cell_trace_use_sbi);
 			as->volume = afs_get_volume(ctx->volume,
 						    afs_volume_trace_get_alloc_sbi);
+			as->fscrypt = ctx->fscrypt;
 		}
 	}
 	return as;
@@ -687,6 +696,7 @@ static struct inode *afs_alloc_inode(struct super_block *sb)
 	vnode->permit_cache	= NULL;
 	vnode->directory	= NULL;
 	vnode->directory_size	= 0;
+	vnode->content_ci	= NULL;
 
 	vnode->flags		= 1 << AFS_VNODE_UNSET;
 	vnode->lock_state	= AFS_VNODE_LOCK_NONE;
diff --git a/fs/afs/write.c b/fs/afs/write.c
index 1f6045b..0ebed86 100644
--- a/fs/afs/write.c
+++ b/fs/afs/write.c
@@ -12,6 +12,8 @@
 #include <linux/writeback.h>
 #include <linux/pagevec.h>
 #include <linux/netfs.h>
+#include <crypto/skcipher.h>
+#include <crypto/sha2.h>
 #include <trace/events/netfs.h>
 #include "internal.h"
 
@@ -93,6 +95,9 @@ int afs_estimate_write(struct netfs_io_request *wreq,
 	unsigned long long limit = ULLONG_MAX - stream->issue_from;
 	unsigned long long max_len = 256 * 1024 * 1024;
 
+	if (test_bit(NETFS_RREQ_CONTENT_ENCRYPTION, &wreq->flags))
+		max_len = 64 * 1024;
+
 	//if (test_bit(NETFS_SREQ_RETRYING, &subreq->flags))
 	//	max_len = 512 * 1024;
 
@@ -324,3 +329,180 @@ void afs_prune_wb_keys(struct afs_vnode *vnode)
 		afs_put_wb_key(wbk);
 	}
 }
+
+static void netfs_dump_sg(const char *prefix, struct scatterlist *sg, unsigned int n_sg)
+{
+	unsigned int i;
+
+	for (i = 0; i < n_sg; i++) {
+		void *p = kmap_local_page(sg_page(sg));
+		unsigned int l = min_t(size_t, sg->length, 16);
+
+		printk("%s[%x] %016lx %04x %04x %*phN\n",
+		       prefix, i, sg->page_link, sg->offset, sg->length,
+		       l, p + sg->offset);
+		kunmap_local(p);
+		sg++;
+	}
+}
+
+/*
+ * Create a keyed symmetric cipher for use in content crypto ops.
+ */
+int afs_open_crypto(struct afs_vnode *vnode)
+{
+	struct crypto_skcipher *ci;
+	struct sha256_ctx sha;
+	int ret = 0;
+	u8 key[SHA256_DIGEST_SIZE];
+
+	afs_lock_for_io(vnode);
+	if (vnode->content_ci)
+		goto out;
+
+	ci = crypto_alloc_skcipher("cbc(aes)", 0, 0);
+	if (IS_ERR(ci)) {
+		ret = PTR_ERR(ci);
+		pr_err("Can't allocate cipher: %d\n", ret);
+		goto out;
+	}
+
+	if (crypto_skcipher_ivsize(ci) > 16 &&
+	    crypto_skcipher_blocksize(ci) > 16) {
+		pr_err("iv wrong size: %u\n", crypto_skcipher_ivsize(ci));
+		ret = -EINVAL;
+		goto error_ci;
+	}
+
+	sha256_init(&sha);
+	sha256_update(&sha, vnode->volume->cell->name, vnode->volume->cell->name_len);
+	sha256_update(&sha, (u8 *)&vnode->fid, sizeof(vnode->fid));
+	sha256_final(&sha, key);
+
+	crypto_skcipher_set_flags(ci, CRYPTO_TFM_REQ_FORBID_WEAK_KEYS);
+	ret = crypto_skcipher_setkey(ci, key, sizeof(key));
+	if (ret < 0) {
+		pr_err("Setkey failed: %d\n", ret);
+		goto error_ci;
+	}
+
+	vnode->content_ci = ci;
+	ret = 0;
+out:
+	afs_unlock_for_io(vnode);
+	return ret;
+
+error_ci:
+	crypto_free_skcipher(ci);
+	goto out;
+}
+
+/*
+ * Encrypt part of a write for fscrypt.
+ */
+void afs_encrypt_block(struct netfs_io_subrequest *subreq, gfp_t gfp)
+{
+	struct skcipher_request *req;
+	struct crypto_skcipher *ci = AFS_FS_I(subreq->rreq->inode)->content_ci;
+	size_t len = subreq->len, reqsize, ivsize;
+	u8 *iv;
+	int ret = -ENOMEM;
+
+	//netfs_dump_sg("SRC", &subreq->src_sg, 1);
+
+	reqsize = round_up(sizeof(struct skcipher_request) +
+			   crypto_skcipher_reqsize(ci),
+			   CRYPTO_MINALIGN);
+	ivsize = crypto_skcipher_ivsize(ci);
+
+	req = kzalloc(reqsize + ivsize, gfp);
+	if (!req)
+		goto error;
+
+	iv = (void *)req + reqsize;
+	*(__be64 *)iv = cpu_to_be64(subreq->start);
+
+	skcipher_request_set_tfm(req, ci);
+
+	subreq->crypto_req = req;
+	skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+				      netfs_crypto_req_done, subreq);
+	skcipher_request_set_crypt(req, &subreq->src_sg, &subreq->dst_sg, len, iv);
+	ret = crypto_skcipher_encrypt(req);
+	switch (ret) {
+	case 0:
+		/* Synchronous completion; callback not invoked. */
+		//netfs_dump_sg("DST", &subreq->dst_sg, 1);
+		netfs_crypto_req_done(subreq, 0);
+		break;
+
+	case -EINPROGRESS:
+		/* The request is in progress; the callback will be/may already
+		 * have been invoked.
+		 */
+		break;
+	case -EBUSY:
+		/* The request is "backlogged" (I think this means queued for
+		 * later processing - and it may have completed before we even
+		 * see the error).
+		 */
+		break;
+	default:
+		pr_err("R=%x[%x] Encrypt failed: %d\n",
+		       subreq->rreq->debug_id, subreq->debug_index, ret);
+		goto error_req;
+	}
+
+	/* subreq and req now belong to the crypto layer */
+	return;
+
+error_req:
+	skcipher_request_free(req);
+error:
+	netfs_crypto_req_done(subreq, ret);
+}
+
+/*
+ * Decrypt part of a read for fscrypt.  The caller reserved an extra
+ * scatterlist element before each of source_sg and dest_sg for our purposes,
+ * should we need them.
+ */
+int afs_decrypt_block(struct netfs_io_request *rreq, loff_t start, size_t len,
+		      struct scatterlist *source_sg, unsigned int n_source,
+		      struct scatterlist *dest_sg, unsigned int n_dest)
+{
+	struct skcipher_request *req;
+	struct crypto_skcipher *ci = AFS_FS_I(rreq->inode)->content_ci;
+	size_t reqsize, ivsize;
+	u8 *iv;
+	int ret = -ENOMEM;
+	DECLARE_CRYPTO_WAIT(wait);
+
+	_enter("%llx,%zx", start, len);
+
+	reqsize = round_up(sizeof(struct skcipher_request) +
+			   crypto_skcipher_reqsize(ci),
+			   CRYPTO_MINALIGN);
+	ivsize = crypto_skcipher_ivsize(ci);
+
+	req = kzalloc(reqsize + ivsize, GFP_KERNEL);
+	if (!req)
+		return -ENOMEM;
+
+	iv = (void *)req + reqsize;
+	*(__be64 *)iv = cpu_to_be64(start);
+
+	skcipher_request_set_tfm(req, ci);
+	skcipher_request_set_callback(
+		req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
+		crypto_req_done, &wait);
+	skcipher_request_set_crypt(req, source_sg, dest_sg, len, iv);
+
+	ret = crypto_wait_req(crypto_skcipher_decrypt(req), &wait);
+	if (ret < 0)
+		pr_err("Decrypt failed: %d\n", ret);
+
+	skcipher_request_free(req);
+	_leave(" = %d", ret);
+	return ret;
+}