libceph: Simplify messenger_v1

Simplify the messenger_v1 I/O transport.  The data type is no longer
necessary and we can just use the iterator.  On top of that, the msg_data
iterator routines can be simplified a bit and the transmission loop now
only needs the sendmsg-from-iterator branch.

Signed-off-by: David Howells <dhowells@redhat.com>
cc: Ilya Dryomov <idryomov@gmail.com>
cc: Xiubo Li <xiubli@redhat.com>
cc: Jeff Layton <jlayton@kernel.org>
cc: ceph-devel@vger.kernel.org
cc: linux-fsdevel@vger.kernel.org
diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h
index 517e927..bba0c6b 100644
--- a/include/linux/ceph/messenger.h
+++ b/include/linux/ceph/messenger.h
@@ -116,18 +116,11 @@ struct ceph_messenger {
 	spinlock_t global_seq_lock;
 };
 
-enum ceph_msg_data_type {
-	CEPH_MSG_DATA_NONE,	/* message contains no data payload */
-	CEPH_MSG_DATA_BVECQ,	/* data source/destination is a bvecq */
-	CEPH_MSG_DATA_ITER,	/* data source/destination is an iov_iter */
-};
-
 struct ceph_msg_data {
-	enum ceph_msg_data_type	type;
-	struct iov_iter		iter;
 	bool			release_bvecq;
+	struct iov_iter		iter;
 	struct bvecq		*bvecq;
-	u32			bvecq_len;
+	size_t			bvecq_len;
 };
 
 struct ceph_msg_data_cursor {
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index b2fe695..c82cf1d 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -778,18 +778,7 @@ static bool ceph_msg_data_iter_advance(struct ceph_msg_data_cursor *cursor,
  */
 static void __ceph_msg_data_cursor_init(struct ceph_msg_data_cursor *cursor)
 {
-	size_t length = cursor->total_resid;
-
-	switch (cursor->data->type) {
-	case CEPH_MSG_DATA_BVECQ:
-	case CEPH_MSG_DATA_ITER:
-		ceph_msg_data_iter_cursor_init(cursor, length);
-		break;
-	case CEPH_MSG_DATA_NONE:
-	default:
-		/* BUG(); */
-		break;
-	}
+	ceph_msg_data_iter_cursor_init(cursor, cursor->total_resid);
 	cursor->need_crc = true;
 }
 
@@ -817,17 +806,7 @@ struct page *ceph_msg_data_next(struct ceph_msg_data_cursor *cursor,
 {
 	struct page *page;
 
-	switch (cursor->data->type) {
-	case CEPH_MSG_DATA_BVECQ:
-	case CEPH_MSG_DATA_ITER:
-		page = ceph_msg_data_iter_next(cursor, page_offset, length);
-		break;
-	case CEPH_MSG_DATA_NONE:
-	default:
-		page = NULL;
-		break;
-	}
-
+	page = ceph_msg_data_iter_next(cursor, page_offset, length);
 	BUG_ON(!page);
 	BUG_ON(*page_offset + *length > PAGE_SIZE);
 	BUG_ON(!*length);
@@ -845,16 +824,7 @@ void ceph_msg_data_advance(struct ceph_msg_data_cursor *cursor, size_t bytes)
 	bool new_piece;
 
 	BUG_ON(bytes > cursor->resid);
-	switch (cursor->data->type) {
-	case CEPH_MSG_DATA_BVECQ:
-	case CEPH_MSG_DATA_ITER:
-		new_piece = ceph_msg_data_iter_advance(cursor, bytes);
-		break;
-	case CEPH_MSG_DATA_NONE:
-	default:
-		BUG();
-		break;
-	}
+	new_piece = ceph_msg_data_iter_advance(cursor, bytes);
 	cursor->total_resid -= bytes;
 
 	if (!cursor->resid && cursor->total_resid) {
@@ -1588,7 +1558,6 @@ void ceph_msg_data_add_bvecq(struct ceph_msg *msg, struct bvecq *bvecq, size_t l
 	BUG_ON(!len);
 
 	data = ceph_msg_data_add(msg);
-	data->type	= CEPH_MSG_DATA_BVECQ;
 	data->bvecq	= bvecq_get(bvecq);
 	data->bvecq_len	= len;
 	msg->data_length += len;
@@ -1600,12 +1569,9 @@ EXPORT_SYMBOL(ceph_msg_data_add_bvecq);
 void ceph_msg_data_add_iter(struct ceph_msg *msg,
 			    struct iov_iter *iter)
 {
-	struct ceph_msg_data *data;
+	struct ceph_msg_data *data = ceph_msg_data_add(msg);
 
-	data = ceph_msg_data_add(msg);
-	data->type = CEPH_MSG_DATA_ITER;
 	data->iter = *iter;
-
 	msg->data_length += iov_iter_count(&data->iter);
 }
 
diff --git a/net/ceph/messenger_v1.c b/net/ceph/messenger_v1.c
index 84c0b74..1348427 100644
--- a/net/ceph/messenger_v1.c
+++ b/net/ceph/messenger_v1.c
@@ -521,36 +521,16 @@ static int write_partial_message_data(struct ceph_connection *con,
 			continue;
 		}
 
-		if (cursor->data->type == CEPH_MSG_DATA_BVECQ ||
-		    cursor->data->type == CEPH_MSG_DATA_ITER) {
-			ret = ceph_tcp_sock_sendmsg(con->sock, &cursor->iov_iter,
-						    MSG_MORE);
-			if (ret <= 0) {
-				if (do_datacrc)
-					msg->footer.data_crc = cpu_to_le32(crc);
+		ret = ceph_tcp_sock_sendmsg(con->sock, &cursor->iov_iter,
+					    MSG_MORE);
+		if (ret <= 0) {
+			if (do_datacrc)
+				msg->footer.data_crc = cpu_to_le32(crc);
 
-				return ret;
-			}
-			if (do_datacrc && cursor->need_crc)
-				ceph_calc_crc(&cursor->crc_iter, ret, &crc);
-		} else {
-			struct page *page;
-			size_t page_offset;
-			size_t length;
-
-			page = ceph_msg_data_next(cursor, &page_offset, &length);
-			ret = ceph_tcp_sendpage(con->sock, page, page_offset,
-						length, MSG_MORE);
-			if (ret <= 0) {
-				if (do_datacrc)
-					msg->footer.data_crc = cpu_to_le32(crc);
-
-				return ret;
-			}
-			if (do_datacrc && cursor->need_crc)
-				crc = ceph_crc32c_page(crc, page, page_offset,
-						       length);
+			return ret;
 		}
+		if (do_datacrc && cursor->need_crc)
+			ceph_calc_crc(&cursor->crc_iter, ret, &crc);
 		ceph_msg_data_advance(cursor, (size_t)ret);
 	}
 
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index de8f968..7eff8b2 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -4479,7 +4479,6 @@ static void handle_watch_notify(struct ceph_osd_client *osdc,
 
 			if (data) {
 				if (lreq->preply) {
-					WARN_ON(data->type != CEPH_MSG_DATA_BVECQ);
 					*lreq->preply = data->bvecq;
 					*lreq->preply_len = data->bvecq_len;
 					data->bvecq = NULL;