Merge branch 'fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tmlind/linux-omap

* 'fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tmlind/linux-omap: (31 commits)
  ARM: OMAP: Fix export.h or module.h includes
  ARM: OMAP: omap_device: Include linux/export.h
  ARM: OMAP2: Fix H4 matrix keyboard warning
  ARM: OMAP1: Remove unused omap-alsa.h
  ARM: OMAP1: Fix warnings about enabling 32 KiHz timer
  ARM: OMAP2+: timer: Remove omap_device_pm_latency
  ARM: OMAP2+: clock data: Remove redundant timer clkdev
  ARM: OMAP: Devkit8000: Remove double omap_mux_init_gpio
  ARM: OMAP: usb: musb: OMAP: Delete unused function
  MAINTAINERS: Update linux-omap git repository
  ARM: OMAP: change get_context_loss_count ret value to int
  ARM: OMAP4: hsmmc: configure SDMMC1_DR0 properly
  ARM: OMAP4: hsmmc: Fix Pbias configuration on regulator OFF
  ARM: OMAP3: hwmod: fix variant registration and remove SmartReflex from common list
  ARM: OMAP: I2C: Fix omap_register_i2c_bus() return value on success
  ARM: OMAP: dmtimer: Include linux/module.h
  ARM: OMAP2+: l3-noc: Include linux/module.h
  ARM: OMAP2+: devices: Fixes for McPDM
  ARM: OMAP: Fix errors and warnings when building for one board
  ARM: OMAP3: PM: restrict erratum i443 handling to OMAP3430 only
  ...
diff --git a/Documentation/ABI/stable/sysfs-acpi-pmprofile b/Documentation/ABI/stable/sysfs-acpi-pmprofile
new file mode 100644
index 0000000..964c7a8
--- /dev/null
+++ b/Documentation/ABI/stable/sysfs-acpi-pmprofile
@@ -0,0 +1,22 @@
+What: 		/sys/firmware/acpi/pm_profile
+Date:		03-Nov-2011
+KernelVersion:	v3.2
+Contact:	linux-acpi@vger.kernel.org
+Description: 	The ACPI pm_profile sysfs interface exports the platform
+		power management (and performance) requirement expectations
+		as provided by BIOS. The integer value is directly passed as
+		retrieved from the FADT ACPI table.
+Values:         For possible values see ACPI specification:
+		5.2.9 Fixed ACPI Description Table (FADT)
+		Field: Preferred_PM_Profile
+
+		Currently these values are defined by spec:
+		0 Unspecified
+		1 Desktop
+		2 Mobile
+		3 Workstation
+		4 Enterprise Server
+		5 SOHO Server
+		6 Appliance PC
+		7 Performance Server
+		>7 Reserved
diff --git a/Documentation/DocBook/mtdnand.tmpl b/Documentation/DocBook/mtdnand.tmpl
index 17910e2..0c674be 100644
--- a/Documentation/DocBook/mtdnand.tmpl
+++ b/Documentation/DocBook/mtdnand.tmpl
@@ -572,7 +572,7 @@
 			</para>
 			<para>
 				The simplest way to activate the FLASH based bad block table support 
-				is to set the option NAND_USE_FLASH_BBT in the option field of
+				is to set the option NAND_BBT_USE_FLASH in the bbt_option field of
 				the nand chip structure before calling nand_scan(). For AG-AND
 				chips is this done by default.
 				This activates the default FLASH based bad block table functionality 
@@ -773,20 +773,6 @@
 				done according to the default builtin scheme.
 			</para>
 		</sect2>
-		<sect2 id="User_space_placement_selection">
-			<title>User space placement selection</title>
-		<para>
-			All non ecc functions like mtd->read and mtd->write use an internal 
-			structure, which can be set by an ioctl. This structure is preset 
-			to the autoplacement default.
-	     		<programlisting>
-	ioctl (fd, MEMSETOOBSEL, oobsel);
-	     		</programlisting>
-			oobsel is a pointer to a user supplied structure of type
-			nand_oobconfig. The contents of this structure must match the 
-			criteria of the filesystem, which will be used. See an example in utils/nandwrite.c.
-		</para>
-		</sect2>
 	</sect1>	
 	<sect1 id="Spare_area_autoplacement_default">
 		<title>Spare area autoplacement default schemes</title>
@@ -1158,9 +1144,6 @@
 		These constants are defined in nand.h. They are ored together to describe
 		the functionality.
      		<programlisting>
-/* Use a flash based bad block table. This option is parsed by the
- * default bad block table function (nand_default_bbt). */
-#define NAND_USE_FLASH_BBT	0x00010000
 /* The hw ecc generator provides a syndrome instead a ecc value on read 
  * This can only work if we have the ecc bytes directly behind the 
  * data bytes. Applies for DOC and AG-AND Renesas HW Reed Solomon generators */
diff --git a/Documentation/devicetree/bindings/mtd/atmel-dataflash.txt b/Documentation/devicetree/bindings/mtd/atmel-dataflash.txt
new file mode 100644
index 0000000..ef66ddd
--- /dev/null
+++ b/Documentation/devicetree/bindings/mtd/atmel-dataflash.txt
@@ -0,0 +1,14 @@
+* Atmel Data Flash
+
+Required properties:
+- compatible : "atmel,<model>", "atmel,<series>", "atmel,dataflash".
+
+Example:
+
+flash@1 {
+	#address-cells = <1>;
+	#size-cells = <1>;
+	compatible = "atmel,at45db321d", "atmel,at45", "atmel,dataflash";
+	spi-max-frequency = <25000000>;
+	reg = <1>;
+};
diff --git a/MAINTAINERS b/MAINTAINERS
index 9fb6bd9..4808256 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1032,6 +1032,7 @@
 F:	arch/arm/include/asm/hardware/iomd.h
 F:	arch/arm/include/asm/hardware/memc.h
 F:	arch/arm/mach-rpc/
+F:	drivers/net/ethernet/8390/etherh.c
 F:	drivers/net/ethernet/i825xx/ether1*
 F:	drivers/net/ethernet/seeq/ether3*
 F:	drivers/scsi/arm/
@@ -5470,7 +5471,7 @@
 F:	drivers/net/ethernet/rdc/r6040.c
 
 RDS - RELIABLE DATAGRAM SOCKETS
-M:	Andy Grover <andy.grover@oracle.com>
+M:	Venkat Venkatsubra <venkat.x.venkatsubra@oracle.com>
 L:	rds-devel@oss.oracle.com (moderated for non-subscribers)
 S:	Supported
 F:	net/rds/
diff --git a/arch/arm/mach-at91/board-afeb-9260v1.c b/arch/arm/mach-at91/board-afeb-9260v1.c
index 0487ea1..4282d96 100644
--- a/arch/arm/mach-at91/board-afeb-9260v1.c
+++ b/arch/arm/mach-at91/board-afeb-9260v1.c
@@ -130,19 +130,14 @@
 	},
 };
 
-static struct mtd_partition * __init nand_partitions(int size, int *num_partitions)
-{
-	*num_partitions = ARRAY_SIZE(afeb9260_nand_partition);
-	return afeb9260_nand_partition;
-}
-
 static struct atmel_nand_data __initdata afeb9260_nand_data = {
 	.ale		= 21,
 	.cle		= 22,
 	.rdy_pin	= AT91_PIN_PC13,
 	.enable_pin	= AT91_PIN_PC14,
-	.partition_info	= nand_partitions,
 	.bus_width_16	= 0,
+	.parts		= afeb9260_nand_partition,
+	.num_parts	= ARRAY_SIZE(afeb9260_nand_partition),
 };
 
 
diff --git a/arch/arm/mach-at91/board-cam60.c b/arch/arm/mach-at91/board-cam60.c
index 747b2ea..f90cfb3 100644
--- a/arch/arm/mach-at91/board-cam60.c
+++ b/arch/arm/mach-at91/board-cam60.c
@@ -132,19 +132,14 @@
 	},
 };
 
-static struct mtd_partition * __init nand_partitions(int size, int *num_partitions)
-{
-	*num_partitions = ARRAY_SIZE(cam60_nand_partition);
-	return cam60_nand_partition;
-}
-
 static struct atmel_nand_data __initdata cam60_nand_data = {
 	.ale		= 21,
 	.cle		= 22,
 	// .det_pin	= ... not there
 	.rdy_pin	= AT91_PIN_PA9,
 	.enable_pin	= AT91_PIN_PA7,
-	.partition_info	= nand_partitions,
+	.parts		= cam60_nand_partition,
+	.num_parts	= ARRAY_SIZE(cam60_nand_partition),
 };
 
 static struct sam9_smc_config __initdata cam60_nand_smc_config = {
diff --git a/arch/arm/mach-at91/board-cap9adk.c b/arch/arm/mach-at91/board-cap9adk.c
index 0626703..5dffd3b 100644
--- a/arch/arm/mach-at91/board-cap9adk.c
+++ b/arch/arm/mach-at91/board-cap9adk.c
@@ -169,19 +169,14 @@
 	},
 };
 
-static struct mtd_partition * __init nand_partitions(int size, int *num_partitions)
-{
-	*num_partitions = ARRAY_SIZE(cap9adk_nand_partitions);
-	return cap9adk_nand_partitions;
-}
-
 static struct atmel_nand_data __initdata cap9adk_nand_data = {
 	.ale		= 21,
 	.cle		= 22,
 //	.det_pin	= ... not connected
 //	.rdy_pin	= ... not connected
 	.enable_pin	= AT91_PIN_PD15,
-	.partition_info	= nand_partitions,
+	.parts		= cap9adk_nand_partitions,
+	.num_parts	= ARRAY_SIZE(cap9adk_nand_partitions),
 };
 
 static struct sam9_smc_config __initdata cap9adk_nand_smc_config = {
diff --git a/arch/arm/mach-at91/board-kb9202.c b/arch/arm/mach-at91/board-kb9202.c
index 15a3f1a..e61351f 100644
--- a/arch/arm/mach-at91/board-kb9202.c
+++ b/arch/arm/mach-at91/board-kb9202.c
@@ -97,19 +97,14 @@
 	},
 };
 
-static struct mtd_partition * __init nand_partitions(int size, int *num_partitions)
-{
-	*num_partitions = ARRAY_SIZE(kb9202_nand_partition);
-	return kb9202_nand_partition;
-}
-
 static struct atmel_nand_data __initdata kb9202_nand_data = {
 	.ale		= 22,
 	.cle		= 21,
 	// .det_pin	= ... not there
 	.rdy_pin	= AT91_PIN_PC29,
 	.enable_pin	= AT91_PIN_PC28,
-	.partition_info	= nand_partitions,
+	.parts		= kb9202_nand_partition,
+	.num_parts	= ARRAY_SIZE(kb9202_nand_partition),
 };
 
 static void __init kb9202_board_init(void)
diff --git a/arch/arm/mach-at91/board-neocore926.c b/arch/arm/mach-at91/board-neocore926.c
index 6094496..ef816c1 100644
--- a/arch/arm/mach-at91/board-neocore926.c
+++ b/arch/arm/mach-at91/board-neocore926.c
@@ -182,19 +182,14 @@
 	},
 };
 
-static struct mtd_partition * __init nand_partitions(int size, int *num_partitions)
-{
-	*num_partitions = ARRAY_SIZE(neocore926_nand_partition);
-	return neocore926_nand_partition;
-}
-
 static struct atmel_nand_data __initdata neocore926_nand_data = {
 	.ale			= 21,
 	.cle			= 22,
 	.rdy_pin		= AT91_PIN_PB19,
 	.rdy_pin_active_low	= 1,
 	.enable_pin		= AT91_PIN_PD15,
-	.partition_info		= nand_partitions,
+	.parts			= neocore926_nand_partition,
+	.num_parts		= ARRAY_SIZE(neocore926_nand_partition),
 };
 
 static struct sam9_smc_config __initdata neocore926_nand_smc_config = {
diff --git a/arch/arm/mach-at91/board-qil-a9260.c b/arch/arm/mach-at91/board-qil-a9260.c
index 938cc39..07421bd 100644
--- a/arch/arm/mach-at91/board-qil-a9260.c
+++ b/arch/arm/mach-at91/board-qil-a9260.c
@@ -130,19 +130,14 @@
 	},
 };
 
-static struct mtd_partition * __init nand_partitions(int size, int *num_partitions)
-{
-	*num_partitions = ARRAY_SIZE(ek_nand_partition);
-	return ek_nand_partition;
-}
-
 static struct atmel_nand_data __initdata ek_nand_data = {
 	.ale		= 21,
 	.cle		= 22,
 //	.det_pin	= ... not connected
 	.rdy_pin	= AT91_PIN_PC13,
 	.enable_pin	= AT91_PIN_PC14,
-	.partition_info	= nand_partitions,
+	.parts		= ek_nand_partition,
+	.num_parts	= ARRAY_SIZE(ek_nand_partition),
 };
 
 static struct sam9_smc_config __initdata ek_nand_smc_config = {
diff --git a/arch/arm/mach-at91/board-rm9200dk.c b/arch/arm/mach-at91/board-rm9200dk.c
index b4ac30e..80a8c9c 100644
--- a/arch/arm/mach-at91/board-rm9200dk.c
+++ b/arch/arm/mach-at91/board-rm9200dk.c
@@ -138,19 +138,14 @@
 	},
 };
 
-static struct mtd_partition * __init nand_partitions(int size, int *num_partitions)
-{
-	*num_partitions = ARRAY_SIZE(dk_nand_partition);
-	return dk_nand_partition;
-}
-
 static struct atmel_nand_data __initdata dk_nand_data = {
 	.ale		= 22,
 	.cle		= 21,
 	.det_pin	= AT91_PIN_PB1,
 	.rdy_pin	= AT91_PIN_PC2,
 	// .enable_pin	= ... not there
-	.partition_info	= nand_partitions,
+	.parts		= dk_nand_partition,
+	.num_parts	= ARRAY_SIZE(dk_nand_partition),
 };
 
 #define DK_FLASH_BASE	AT91_CHIPSELECT_0
diff --git a/arch/arm/mach-at91/board-sam9-l9260.c b/arch/arm/mach-at91/board-sam9-l9260.c
index 2a21e79..072d53a 100644
--- a/arch/arm/mach-at91/board-sam9-l9260.c
+++ b/arch/arm/mach-at91/board-sam9-l9260.c
@@ -131,19 +131,14 @@
 	},
 };
 
-static struct mtd_partition * __init nand_partitions(int size, int *num_partitions)
-{
-	*num_partitions = ARRAY_SIZE(ek_nand_partition);
-	return ek_nand_partition;
-}
-
 static struct atmel_nand_data __initdata ek_nand_data = {
 	.ale		= 21,
 	.cle		= 22,
 //	.det_pin	= ... not connected
 	.rdy_pin	= AT91_PIN_PC13,
 	.enable_pin	= AT91_PIN_PC14,
-	.partition_info	= nand_partitions,
+	.parts		= ek_nand_partition,
+	.num_parts	= ARRAY_SIZE(ek_nand_partition),
 };
 
 static struct sam9_smc_config __initdata ek_nand_smc_config = {
diff --git a/arch/arm/mach-at91/board-sam9260ek.c b/arch/arm/mach-at91/board-sam9260ek.c
index 89c8b57..4f10181 100644
--- a/arch/arm/mach-at91/board-sam9260ek.c
+++ b/arch/arm/mach-at91/board-sam9260ek.c
@@ -173,19 +173,14 @@
 	},
 };
 
-static struct mtd_partition * __init nand_partitions(int size, int *num_partitions)
-{
-	*num_partitions = ARRAY_SIZE(ek_nand_partition);
-	return ek_nand_partition;
-}
-
 static struct atmel_nand_data __initdata ek_nand_data = {
 	.ale		= 21,
 	.cle		= 22,
 //	.det_pin	= ... not connected
 	.rdy_pin	= AT91_PIN_PC13,
 	.enable_pin	= AT91_PIN_PC14,
-	.partition_info	= nand_partitions,
+	.parts		= ek_nand_partition,
+	.num_parts	= ARRAY_SIZE(ek_nand_partition),
 };
 
 static struct sam9_smc_config __initdata ek_nand_smc_config = {
diff --git a/arch/arm/mach-at91/board-sam9261ek.c b/arch/arm/mach-at91/board-sam9261ek.c
index 3741f43..b005b73 100644
--- a/arch/arm/mach-at91/board-sam9261ek.c
+++ b/arch/arm/mach-at91/board-sam9261ek.c
@@ -179,19 +179,14 @@
 	},
 };
 
-static struct mtd_partition * __init nand_partitions(int size, int *num_partitions)
-{
-	*num_partitions = ARRAY_SIZE(ek_nand_partition);
-	return ek_nand_partition;
-}
-
 static struct atmel_nand_data __initdata ek_nand_data = {
 	.ale		= 22,
 	.cle		= 21,
 //	.det_pin	= ... not connected
 	.rdy_pin	= AT91_PIN_PC15,
 	.enable_pin	= AT91_PIN_PC14,
-	.partition_info	= nand_partitions,
+	.parts		= ek_nand_partition,
+	.num_parts	= ARRAY_SIZE(ek_nand_partition),
 };
 
 static struct sam9_smc_config __initdata ek_nand_smc_config = {
diff --git a/arch/arm/mach-at91/board-sam9263ek.c b/arch/arm/mach-at91/board-sam9263ek.c
index a580dd4..bccdcf2 100644
--- a/arch/arm/mach-at91/board-sam9263ek.c
+++ b/arch/arm/mach-at91/board-sam9263ek.c
@@ -180,19 +180,14 @@
 	},
 };
 
-static struct mtd_partition * __init nand_partitions(int size, int *num_partitions)
-{
-	*num_partitions = ARRAY_SIZE(ek_nand_partition);
-	return ek_nand_partition;
-}
-
 static struct atmel_nand_data __initdata ek_nand_data = {
 	.ale		= 21,
 	.cle		= 22,
 //	.det_pin	= ... not connected
 	.rdy_pin	= AT91_PIN_PA22,
 	.enable_pin	= AT91_PIN_PD15,
-	.partition_info	= nand_partitions,
+	.parts		= ek_nand_partition,
+	.num_parts	= ARRAY_SIZE(ek_nand_partition),
 };
 
 static struct sam9_smc_config __initdata ek_nand_smc_config = {
diff --git a/arch/arm/mach-at91/board-sam9g20ek.c b/arch/arm/mach-at91/board-sam9g20ek.c
index 8d77c2f..64fc75c 100644
--- a/arch/arm/mach-at91/board-sam9g20ek.c
+++ b/arch/arm/mach-at91/board-sam9g20ek.c
@@ -157,19 +157,14 @@
 	},
 };
 
-static struct mtd_partition * __init nand_partitions(int size, int *num_partitions)
-{
-	*num_partitions = ARRAY_SIZE(ek_nand_partition);
-	return ek_nand_partition;
-}
-
 /* det_pin is not connected */
 static struct atmel_nand_data __initdata ek_nand_data = {
 	.ale		= 21,
 	.cle		= 22,
 	.rdy_pin	= AT91_PIN_PC13,
 	.enable_pin	= AT91_PIN_PC14,
-	.partition_info	= nand_partitions,
+	.parts		= ek_nand_partition,
+	.num_parts	= ARRAY_SIZE(ek_nand_partition),
 };
 
 static struct sam9_smc_config __initdata ek_nand_smc_config = {
diff --git a/arch/arm/mach-at91/board-sam9m10g45ek.c b/arch/arm/mach-at91/board-sam9m10g45ek.c
index 2d6203a..92de912 100644
--- a/arch/arm/mach-at91/board-sam9m10g45ek.c
+++ b/arch/arm/mach-at91/board-sam9m10g45ek.c
@@ -137,19 +137,14 @@
 	},
 };
 
-static struct mtd_partition * __init nand_partitions(int size, int *num_partitions)
-{
-	*num_partitions = ARRAY_SIZE(ek_nand_partition);
-	return ek_nand_partition;
-}
-
 /* det_pin is not connected */
 static struct atmel_nand_data __initdata ek_nand_data = {
 	.ale		= 21,
 	.cle		= 22,
 	.rdy_pin	= AT91_PIN_PC8,
 	.enable_pin	= AT91_PIN_PC14,
-	.partition_info	= nand_partitions,
+	.parts		= ek_nand_partition,
+	.num_parts	= ARRAY_SIZE(ek_nand_partition),
 };
 
 static struct sam9_smc_config __initdata ek_nand_smc_config = {
diff --git a/arch/arm/mach-at91/board-sam9rlek.c b/arch/arm/mach-at91/board-sam9rlek.c
index 39a28ef..b2b7482 100644
--- a/arch/arm/mach-at91/board-sam9rlek.c
+++ b/arch/arm/mach-at91/board-sam9rlek.c
@@ -88,19 +88,14 @@
 	},
 };
 
-static struct mtd_partition * __init nand_partitions(int size, int *num_partitions)
-{
-	*num_partitions = ARRAY_SIZE(ek_nand_partition);
-	return ek_nand_partition;
-}
-
 static struct atmel_nand_data __initdata ek_nand_data = {
 	.ale		= 21,
 	.cle		= 22,
 //	.det_pin	= ... not connected
 	.rdy_pin	= AT91_PIN_PD17,
 	.enable_pin	= AT91_PIN_PB6,
-	.partition_info	= nand_partitions,
+	.parts		= ek_nand_partition,
+	.num_parts	= ARRAY_SIZE(ek_nand_partition),
 };
 
 static struct sam9_smc_config __initdata ek_nand_smc_config = {
diff --git a/arch/arm/mach-at91/board-snapper9260.c b/arch/arm/mach-at91/board-snapper9260.c
index c73d25e..0df01c6 100644
--- a/arch/arm/mach-at91/board-snapper9260.c
+++ b/arch/arm/mach-at91/board-snapper9260.c
@@ -97,18 +97,12 @@
 	},
 };
 
-static struct mtd_partition * __init
-snapper9260_nand_partition_info(int size, int *num_partitions)
-{
-	*num_partitions = ARRAY_SIZE(snapper9260_nand_partitions);
-	return snapper9260_nand_partitions;
-}
-
 static struct atmel_nand_data __initdata snapper9260_nand_data = {
 	.ale		= 21,
 	.cle		= 22,
 	.rdy_pin	= AT91_PIN_PC13,
-	.partition_info	= snapper9260_nand_partition_info,
+	.parts		= snapper9260_nand_partitions,
+	.num_parts	= ARRAY_SIZE(snapper9260_nand_partitions),
 	.bus_width_16	= 0,
 };
 
diff --git a/arch/arm/mach-at91/board-usb-a926x.c b/arch/arm/mach-at91/board-usb-a926x.c
index 5852d3d..0a20bab 100644
--- a/arch/arm/mach-at91/board-usb-a926x.c
+++ b/arch/arm/mach-at91/board-usb-a926x.c
@@ -190,19 +190,14 @@
 	}
 };
 
-static struct mtd_partition * __init nand_partitions(int size, int *num_partitions)
-{
-	*num_partitions = ARRAY_SIZE(ek_nand_partition);
-	return ek_nand_partition;
-}
-
 static struct atmel_nand_data __initdata ek_nand_data = {
 	.ale		= 21,
 	.cle		= 22,
 //	.det_pin	= ... not connected
 	.rdy_pin	= AT91_PIN_PA22,
 	.enable_pin	= AT91_PIN_PD15,
-	.partition_info	= nand_partitions,
+	.parts		= ek_nand_partition,
+	.num_parts	= ARRAY_SIZE(ek_nand_partition),
 };
 
 static struct sam9_smc_config __initdata usb_a9260_nand_smc_config = {
diff --git a/arch/arm/mach-at91/board-yl-9200.c b/arch/arm/mach-at91/board-yl-9200.c
index 3c288b3..649b052 100644
--- a/arch/arm/mach-at91/board-yl-9200.c
+++ b/arch/arm/mach-at91/board-yl-9200.c
@@ -172,19 +172,14 @@
 	}
 };
 
-static struct mtd_partition * __init nand_partitions(int size, int *num_partitions)
-{
-	*num_partitions = ARRAY_SIZE(yl9200_nand_partition);
-	return yl9200_nand_partition;
-}
-
 static struct atmel_nand_data __initdata yl9200_nand_data = {
 	.ale		= 6,
 	.cle		= 7,
 	// .det_pin	= ... not connected
 	.rdy_pin	= AT91_PIN_PC14,	/* R/!B (Sheet10) */
 	.enable_pin	= AT91_PIN_PC15,	/* !CE  (Sheet10) */
-	.partition_info	= nand_partitions,
+	.parts		= yl9200_nand_partition,
+	.num_parts	= ARRAY_SIZE(yl9200_nand_partition),
 };
 
 /*
diff --git a/arch/arm/mach-at91/cpuidle.c b/arch/arm/mach-at91/cpuidle.c
index f474272..a851e6c9 100644
--- a/arch/arm/mach-at91/cpuidle.c
+++ b/arch/arm/mach-at91/cpuidle.c
@@ -34,7 +34,8 @@
 
 /* Actual code that puts the SoC in different idle states */
 static int at91_enter_idle(struct cpuidle_device *dev,
-			       struct cpuidle_state *state)
+			struct cpuidle_driver *drv,
+			       int index)
 {
 	struct timeval before, after;
 	int idle_time;
@@ -42,10 +43,10 @@
 
 	local_irq_disable();
 	do_gettimeofday(&before);
-	if (state == &dev->states[0])
+	if (index == 0)
 		/* Wait for interrupt state */
 		cpu_do_idle();
-	else if (state == &dev->states[1]) {
+	else if (index == 1) {
 		asm("b 1f; .align 5; 1:");
 		asm("mcr p15, 0, r0, c7, c10, 4");	/* drain write buffer */
 		saved_lpr = sdram_selfrefresh_enable();
@@ -56,34 +57,38 @@
 	local_irq_enable();
 	idle_time = (after.tv_sec - before.tv_sec) * USEC_PER_SEC +
 			(after.tv_usec - before.tv_usec);
-	return idle_time;
+
+	dev->last_residency = idle_time;
+	return index;
 }
 
 /* Initialize CPU idle by registering the idle states */
 static int at91_init_cpuidle(void)
 {
 	struct cpuidle_device *device;
-
-	cpuidle_register_driver(&at91_idle_driver);
+	struct cpuidle_driver *driver = &at91_idle_driver;
 
 	device = &per_cpu(at91_cpuidle_device, smp_processor_id());
 	device->state_count = AT91_MAX_STATES;
+	driver->state_count = AT91_MAX_STATES;
 
 	/* Wait for interrupt state */
-	device->states[0].enter = at91_enter_idle;
-	device->states[0].exit_latency = 1;
-	device->states[0].target_residency = 10000;
-	device->states[0].flags = CPUIDLE_FLAG_TIME_VALID;
-	strcpy(device->states[0].name, "WFI");
-	strcpy(device->states[0].desc, "Wait for interrupt");
+	driver->states[0].enter = at91_enter_idle;
+	driver->states[0].exit_latency = 1;
+	driver->states[0].target_residency = 10000;
+	driver->states[0].flags = CPUIDLE_FLAG_TIME_VALID;
+	strcpy(driver->states[0].name, "WFI");
+	strcpy(driver->states[0].desc, "Wait for interrupt");
 
 	/* Wait for interrupt and RAM self refresh state */
-	device->states[1].enter = at91_enter_idle;
-	device->states[1].exit_latency = 10;
-	device->states[1].target_residency = 10000;
-	device->states[1].flags = CPUIDLE_FLAG_TIME_VALID;
-	strcpy(device->states[1].name, "RAM_SR");
-	strcpy(device->states[1].desc, "WFI and RAM Self Refresh");
+	driver->states[1].enter = at91_enter_idle;
+	driver->states[1].exit_latency = 10;
+	driver->states[1].target_residency = 10000;
+	driver->states[1].flags = CPUIDLE_FLAG_TIME_VALID;
+	strcpy(driver->states[1].name, "RAM_SR");
+	strcpy(driver->states[1].desc, "WFI and RAM Self Refresh");
+
+	cpuidle_register_driver(&at91_idle_driver);
 
 	if (cpuidle_register_device(device)) {
 		printk(KERN_ERR "at91_init_cpuidle: Failed registering\n");
diff --git a/arch/arm/mach-at91/include/mach/board.h b/arch/arm/mach-at91/include/mach/board.h
index d07767f..eac92e9 100644
--- a/arch/arm/mach-at91/include/mach/board.h
+++ b/arch/arm/mach-at91/include/mach/board.h
@@ -117,7 +117,8 @@
 	u8		ale;		/* address line number connected to ALE */
 	u8		cle;		/* address line number connected to CLE */
 	u8		bus_width_16;	/* buswidth is 16 bit */
-	struct mtd_partition* (*partition_info)(int, int*);
+	struct mtd_partition *parts;
+	unsigned int	num_parts;
 };
 extern void __init at91_add_device_nand(struct atmel_nand_data *data);
 
diff --git a/arch/arm/mach-davinci/board-da830-evm.c b/arch/arm/mach-davinci/board-da830-evm.c
index 26d94c0..11c3db9 100644
--- a/arch/arm/mach-davinci/board-da830-evm.c
+++ b/arch/arm/mach-davinci/board-da830-evm.c
@@ -377,7 +377,7 @@
 	.nr_parts	= ARRAY_SIZE(da830_evm_nand_partitions),
 	.ecc_mode	= NAND_ECC_HW,
 	.ecc_bits	= 4,
-	.options	= NAND_USE_FLASH_BBT,
+	.bbt_options	= NAND_BBT_USE_FLASH,
 	.bbt_td		= &da830_evm_nand_bbt_main_descr,
 	.bbt_md		= &da830_evm_nand_bbt_mirror_descr,
 	.timing         = &da830_evm_nandflash_timing,
diff --git a/arch/arm/mach-davinci/board-da850-evm.c b/arch/arm/mach-davinci/board-da850-evm.c
index ec21663..1d7d249 100644
--- a/arch/arm/mach-davinci/board-da850-evm.c
+++ b/arch/arm/mach-davinci/board-da850-evm.c
@@ -256,7 +256,7 @@
 	.nr_parts	= ARRAY_SIZE(da850_evm_nandflash_partition),
 	.ecc_mode	= NAND_ECC_HW,
 	.ecc_bits	= 4,
-	.options	= NAND_USE_FLASH_BBT,
+	.bbt_options	= NAND_BBT_USE_FLASH,
 	.timing		= &da850_evm_nandflash_timing,
 };
 
diff --git a/arch/arm/mach-davinci/board-dm355-evm.c b/arch/arm/mach-davinci/board-dm355-evm.c
index 6556628..4e0e707 100644
--- a/arch/arm/mach-davinci/board-dm355-evm.c
+++ b/arch/arm/mach-davinci/board-dm355-evm.c
@@ -77,7 +77,7 @@
 	.parts			= davinci_nand_partitions,
 	.nr_parts		= ARRAY_SIZE(davinci_nand_partitions),
 	.ecc_mode		= NAND_ECC_HW,
-	.options		= NAND_USE_FLASH_BBT,
+	.bbt_options		= NAND_BBT_USE_FLASH,
 	.ecc_bits		= 4,
 };
 
diff --git a/arch/arm/mach-davinci/board-dm355-leopard.c b/arch/arm/mach-davinci/board-dm355-leopard.c
index b307470..ff2d241 100644
--- a/arch/arm/mach-davinci/board-dm355-leopard.c
+++ b/arch/arm/mach-davinci/board-dm355-leopard.c
@@ -74,7 +74,7 @@
 	.parts			= davinci_nand_partitions,
 	.nr_parts		= ARRAY_SIZE(davinci_nand_partitions),
 	.ecc_mode		= NAND_ECC_HW_SYNDROME,
-	.options		= NAND_USE_FLASH_BBT,
+	.bbt_options		= NAND_BBT_USE_FLASH,
 };
 
 static struct resource davinci_nand_resources[] = {
diff --git a/arch/arm/mach-davinci/board-dm365-evm.c b/arch/arm/mach-davinci/board-dm365-evm.c
index 04c43ab..1918ae7 100644
--- a/arch/arm/mach-davinci/board-dm365-evm.c
+++ b/arch/arm/mach-davinci/board-dm365-evm.c
@@ -139,7 +139,7 @@
 	.parts			= davinci_nand_partitions,
 	.nr_parts		= ARRAY_SIZE(davinci_nand_partitions),
 	.ecc_mode		= NAND_ECC_HW,
-	.options		= NAND_USE_FLASH_BBT,
+	.bbt_options		= NAND_BBT_USE_FLASH,
 	.ecc_bits		= 4,
 };
 
diff --git a/arch/arm/mach-davinci/board-dm644x-evm.c b/arch/arm/mach-davinci/board-dm644x-evm.c
index 28fafa78..0cf8abf 100644
--- a/arch/arm/mach-davinci/board-dm644x-evm.c
+++ b/arch/arm/mach-davinci/board-dm644x-evm.c
@@ -151,7 +151,7 @@
 	.parts		= davinci_evm_nandflash_partition,
 	.nr_parts	= ARRAY_SIZE(davinci_evm_nandflash_partition),
 	.ecc_mode	= NAND_ECC_HW,
-	.options	= NAND_USE_FLASH_BBT,
+	.bbt_options	= NAND_BBT_USE_FLASH,
 	.timing		= &davinci_evm_nandflash_timing,
 };
 
diff --git a/arch/arm/mach-davinci/board-mityomapl138.c b/arch/arm/mach-davinci/board-mityomapl138.c
index 6efc84cc..3cfff55 100644
--- a/arch/arm/mach-davinci/board-mityomapl138.c
+++ b/arch/arm/mach-davinci/board-mityomapl138.c
@@ -396,7 +396,8 @@
 	.parts		= mityomapl138_nandflash_partition,
 	.nr_parts	= ARRAY_SIZE(mityomapl138_nandflash_partition),
 	.ecc_mode	= NAND_ECC_HW,
-	.options	= NAND_USE_FLASH_BBT | NAND_BUSWIDTH_16,
+	.bbt_options	= NAND_BBT_USE_FLASH,
+	.options	= NAND_BUSWIDTH_16,
 	.ecc_bits	= 1, /* 4 bit mode is not supported with 16 bit NAND */
 };
 
diff --git a/arch/arm/mach-davinci/board-neuros-osd2.c b/arch/arm/mach-davinci/board-neuros-osd2.c
index 38d6f64..e5f231a 100644
--- a/arch/arm/mach-davinci/board-neuros-osd2.c
+++ b/arch/arm/mach-davinci/board-neuros-osd2.c
@@ -87,7 +87,7 @@
 	.parts		= davinci_ntosd2_nandflash_partition,
 	.nr_parts	= ARRAY_SIZE(davinci_ntosd2_nandflash_partition),
 	.ecc_mode	= NAND_ECC_HW,
-	.options	= NAND_USE_FLASH_BBT,
+	.bbt_options	= NAND_BBT_USE_FLASH,
 };
 
 static struct resource davinci_ntosd2_nandflash_resource[] = {
diff --git a/arch/arm/mach-davinci/board-tnetv107x-evm.c b/arch/arm/mach-davinci/board-tnetv107x-evm.c
index 90ee7b5..f69e40a 100644
--- a/arch/arm/mach-davinci/board-tnetv107x-evm.c
+++ b/arch/arm/mach-davinci/board-tnetv107x-evm.c
@@ -144,7 +144,7 @@
 	.parts		= nand_partitions,
 	.nr_parts	= ARRAY_SIZE(nand_partitions),
 	.ecc_mode	= NAND_ECC_HW,
-	.options	= NAND_USE_FLASH_BBT,
+	.bbt_options	= NAND_BBT_USE_FLASH,
 	.ecc_bits	= 1,
 };
 
diff --git a/arch/arm/mach-davinci/cpuidle.c b/arch/arm/mach-davinci/cpuidle.c
index 60d2f48..a30c7c5 100644
--- a/arch/arm/mach-davinci/cpuidle.c
+++ b/arch/arm/mach-davinci/cpuidle.c
@@ -79,9 +79,11 @@
 
 /* Actual code that puts the SoC in different idle states */
 static int davinci_enter_idle(struct cpuidle_device *dev,
-						struct cpuidle_state *state)
+				struct cpuidle_driver *drv,
+						int index)
 {
-	struct davinci_ops *ops = cpuidle_get_statedata(state);
+	struct cpuidle_state_usage *state_usage = &dev->states_usage[index];
+	struct davinci_ops *ops = cpuidle_get_statedata(state_usage);
 	struct timeval before, after;
 	int idle_time;
 
@@ -99,13 +101,17 @@
 	local_irq_enable();
 	idle_time = (after.tv_sec - before.tv_sec) * USEC_PER_SEC +
 			(after.tv_usec - before.tv_usec);
-	return idle_time;
+
+	dev->last_residency = idle_time;
+
+	return index;
 }
 
 static int __init davinci_cpuidle_probe(struct platform_device *pdev)
 {
 	int ret;
 	struct cpuidle_device *device;
+	struct cpuidle_driver *driver = &davinci_idle_driver;
 	struct davinci_cpuidle_config *pdata = pdev->dev.platform_data;
 
 	device = &per_cpu(davinci_cpuidle_device, smp_processor_id());
@@ -117,33 +123,34 @@
 
 	ddr2_reg_base = pdata->ddr2_ctlr_base;
 
+	/* Wait for interrupt state */
+	driver->states[0].enter = davinci_enter_idle;
+	driver->states[0].exit_latency = 1;
+	driver->states[0].target_residency = 10000;
+	driver->states[0].flags = CPUIDLE_FLAG_TIME_VALID;
+	strcpy(driver->states[0].name, "WFI");
+	strcpy(driver->states[0].desc, "Wait for interrupt");
+
+	/* Wait for interrupt and DDR self refresh state */
+	driver->states[1].enter = davinci_enter_idle;
+	driver->states[1].exit_latency = 10;
+	driver->states[1].target_residency = 10000;
+	driver->states[1].flags = CPUIDLE_FLAG_TIME_VALID;
+	strcpy(driver->states[1].name, "DDR SR");
+	strcpy(driver->states[1].desc, "WFI and DDR Self Refresh");
+	if (pdata->ddr2_pdown)
+		davinci_states[1].flags |= DAVINCI_CPUIDLE_FLAGS_DDR2_PWDN;
+	cpuidle_set_statedata(&device->states_usage[1], &davinci_states[1]);
+
+	device->state_count = DAVINCI_CPUIDLE_MAX_STATES;
+	driver->state_count = DAVINCI_CPUIDLE_MAX_STATES;
+
 	ret = cpuidle_register_driver(&davinci_idle_driver);
 	if (ret) {
 		dev_err(&pdev->dev, "failed to register driver\n");
 		return ret;
 	}
 
-	/* Wait for interrupt state */
-	device->states[0].enter = davinci_enter_idle;
-	device->states[0].exit_latency = 1;
-	device->states[0].target_residency = 10000;
-	device->states[0].flags = CPUIDLE_FLAG_TIME_VALID;
-	strcpy(device->states[0].name, "WFI");
-	strcpy(device->states[0].desc, "Wait for interrupt");
-
-	/* Wait for interrupt and DDR self refresh state */
-	device->states[1].enter = davinci_enter_idle;
-	device->states[1].exit_latency = 10;
-	device->states[1].target_residency = 10000;
-	device->states[1].flags = CPUIDLE_FLAG_TIME_VALID;
-	strcpy(device->states[1].name, "DDR SR");
-	strcpy(device->states[1].desc, "WFI and DDR Self Refresh");
-	if (pdata->ddr2_pdown)
-		davinci_states[1].flags |= DAVINCI_CPUIDLE_FLAGS_DDR2_PWDN;
-	cpuidle_set_statedata(&device->states[1], &davinci_states[1]);
-
-	device->state_count = DAVINCI_CPUIDLE_MAX_STATES;
-
 	ret = cpuidle_register_device(device);
 	if (ret) {
 		dev_err(&pdev->dev, "failed to register device\n");
diff --git a/arch/arm/mach-davinci/include/mach/nand.h b/arch/arm/mach-davinci/include/mach/nand.h
index 0251510..1cf555a 100644
--- a/arch/arm/mach-davinci/include/mach/nand.h
+++ b/arch/arm/mach-davinci/include/mach/nand.h
@@ -74,8 +74,10 @@
 	nand_ecc_modes_t	ecc_mode;
 	u8			ecc_bits;
 
-	/* e.g. NAND_BUSWIDTH_16 or NAND_USE_FLASH_BBT */
+	/* e.g. NAND_BUSWIDTH_16 */
 	unsigned		options;
+	/* e.g. NAND_BBT_USE_FLASH */
+	unsigned		bbt_options;
 
 	/* Main and mirror bbt descriptor overrides */
 	struct nand_bbt_descr	*bbt_td;
diff --git a/arch/arm/mach-ep93xx/ts72xx.c b/arch/arm/mach-ep93xx/ts72xx.c
index 1ade3c3..8b2f143 100644
--- a/arch/arm/mach-ep93xx/ts72xx.c
+++ b/arch/arm/mach-ep93xx/ts72xx.c
@@ -116,8 +116,9 @@
 		.mask_flags	= MTD_WRITEABLE,	/* force read-only */
 	}, {
 		.name		= "Linux",
-		.offset		= MTDPART_OFS_APPEND,
-		.size		= 0,			/* filled in later */
+		.offset		= MTDPART_OFS_RETAIN,
+		.size		= TS72XX_REDBOOT_PART_SIZE,
+				/* leave so much for last partition */
 	}, {
 		.name		= "RedBoot",
 		.offset		= MTDPART_OFS_APPEND,
@@ -126,28 +127,14 @@
 	},
 };
 
-static void ts72xx_nand_set_parts(uint64_t size,
-				  struct platform_nand_chip *chip)
-{
-	/* Factory TS-72xx boards only come with 32MiB or 128MiB NAND options */
-	if (size == SZ_32M || size == SZ_128M) {
-		/* Set the "Linux" partition size */
-		ts72xx_nand_parts[1].size = size - TS72XX_REDBOOT_PART_SIZE;
-
-		chip->partitions = ts72xx_nand_parts;
-		chip->nr_partitions = ARRAY_SIZE(ts72xx_nand_parts);
-	} else {
-		pr_warning("Unknown nand disk size:%lluMiB\n", size >> 20);
-	}
-}
-
 static struct platform_nand_data ts72xx_nand_data = {
 	.chip = {
 		.nr_chips	= 1,
 		.chip_offset	= 0,
 		.chip_delay	= 15,
 		.part_probe_types = ts72xx_nand_part_probes,
-		.set_parts	= ts72xx_nand_set_parts,
+		.partitions	= ts72xx_nand_parts,
+		.nr_partitions	= ARRAY_SIZE(ts72xx_nand_parts),
 	},
 	.ctrl = {
 		.cmd_ctrl	= ts72xx_nand_hwcontrol,
diff --git a/arch/arm/mach-exynos/cpuidle.c b/arch/arm/mach-exynos/cpuidle.c
index bf7e96f..35f6502 100644
--- a/arch/arm/mach-exynos/cpuidle.c
+++ b/arch/arm/mach-exynos/cpuidle.c
@@ -16,7 +16,8 @@
 #include <asm/proc-fns.h>
 
 static int exynos4_enter_idle(struct cpuidle_device *dev,
-			      struct cpuidle_state *state);
+			struct cpuidle_driver *drv,
+			      int index);
 
 static struct cpuidle_state exynos4_cpuidle_set[] = {
 	[0] = {
@@ -37,7 +38,8 @@
 };
 
 static int exynos4_enter_idle(struct cpuidle_device *dev,
-			      struct cpuidle_state *state)
+				struct cpuidle_driver *drv,
+			      int index)
 {
 	struct timeval before, after;
 	int idle_time;
@@ -52,29 +54,31 @@
 	idle_time = (after.tv_sec - before.tv_sec) * USEC_PER_SEC +
 		    (after.tv_usec - before.tv_usec);
 
-	return idle_time;
+	dev->last_residency = idle_time;
+	return index;
 }
 
 static int __init exynos4_init_cpuidle(void)
 {
 	int i, max_cpuidle_state, cpu_id;
 	struct cpuidle_device *device;
+	struct cpuidle_driver *drv = &exynos4_idle_driver;
 
+	/* Setup cpuidle driver */
+	drv->state_count = (sizeof(exynos4_cpuidle_set) /
+				       sizeof(struct cpuidle_state));
+	max_cpuidle_state = drv->state_count;
+	for (i = 0; i < max_cpuidle_state; i++) {
+		memcpy(&drv->states[i], &exynos4_cpuidle_set[i],
+				sizeof(struct cpuidle_state));
+	}
 	cpuidle_register_driver(&exynos4_idle_driver);
 
 	for_each_cpu(cpu_id, cpu_online_mask) {
 		device = &per_cpu(exynos4_cpuidle_device, cpu_id);
 		device->cpu = cpu_id;
 
-		device->state_count = (sizeof(exynos4_cpuidle_set) /
-					       sizeof(struct cpuidle_state));
-
-		max_cpuidle_state = device->state_count;
-
-		for (i = 0; i < max_cpuidle_state; i++) {
-			memcpy(&device->states[i], &exynos4_cpuidle_set[i],
-					sizeof(struct cpuidle_state));
-		}
+		device->state_count = drv->state_count;
 
 		if (cpuidle_register_device(device)) {
 			printk(KERN_ERR "CPUidle register device failed\n,");
diff --git a/arch/arm/mach-kirkwood/cpuidle.c b/arch/arm/mach-kirkwood/cpuidle.c
index 864e569..7088180 100644
--- a/arch/arm/mach-kirkwood/cpuidle.c
+++ b/arch/arm/mach-kirkwood/cpuidle.c
@@ -33,17 +33,18 @@
 
 /* Actual code that puts the SoC in different idle states */
 static int kirkwood_enter_idle(struct cpuidle_device *dev,
-			       struct cpuidle_state *state)
+				struct cpuidle_driver *drv,
+			       int index)
 {
 	struct timeval before, after;
 	int idle_time;
 
 	local_irq_disable();
 	do_gettimeofday(&before);
-	if (state == &dev->states[0])
+	if (index == 0)
 		/* Wait for interrupt state */
 		cpu_do_idle();
-	else if (state == &dev->states[1]) {
+	else if (index == 1) {
 		/*
 		 * Following write will put DDR in self refresh.
 		 * Note that we have 256 cycles before DDR puts it
@@ -58,35 +59,40 @@
 	local_irq_enable();
 	idle_time = (after.tv_sec - before.tv_sec) * USEC_PER_SEC +
 			(after.tv_usec - before.tv_usec);
-	return idle_time;
+
+	/* Update last residency */
+	dev->last_residency = idle_time;
+
+	return index;
 }
 
 /* Initialize CPU idle by registering the idle states */
 static int kirkwood_init_cpuidle(void)
 {
 	struct cpuidle_device *device;
-
-	cpuidle_register_driver(&kirkwood_idle_driver);
+	struct cpuidle_driver *driver = &kirkwood_idle_driver;
 
 	device = &per_cpu(kirkwood_cpuidle_device, smp_processor_id());
 	device->state_count = KIRKWOOD_MAX_STATES;
+	driver->state_count = KIRKWOOD_MAX_STATES;
 
 	/* Wait for interrupt state */
-	device->states[0].enter = kirkwood_enter_idle;
-	device->states[0].exit_latency = 1;
-	device->states[0].target_residency = 10000;
-	device->states[0].flags = CPUIDLE_FLAG_TIME_VALID;
-	strcpy(device->states[0].name, "WFI");
-	strcpy(device->states[0].desc, "Wait for interrupt");
+	driver->states[0].enter = kirkwood_enter_idle;
+	driver->states[0].exit_latency = 1;
+	driver->states[0].target_residency = 10000;
+	driver->states[0].flags = CPUIDLE_FLAG_TIME_VALID;
+	strcpy(driver->states[0].name, "WFI");
+	strcpy(driver->states[0].desc, "Wait for interrupt");
 
 	/* Wait for interrupt and DDR self refresh state */
-	device->states[1].enter = kirkwood_enter_idle;
-	device->states[1].exit_latency = 10;
-	device->states[1].target_residency = 10000;
-	device->states[1].flags = CPUIDLE_FLAG_TIME_VALID;
-	strcpy(device->states[1].name, "DDR SR");
-	strcpy(device->states[1].desc, "WFI and DDR Self Refresh");
+	driver->states[1].enter = kirkwood_enter_idle;
+	driver->states[1].exit_latency = 10;
+	driver->states[1].target_residency = 10000;
+	driver->states[1].flags = CPUIDLE_FLAG_TIME_VALID;
+	strcpy(driver->states[1].name, "DDR SR");
+	strcpy(driver->states[1].desc, "WFI and DDR Self Refresh");
 
+	cpuidle_register_driver(&kirkwood_idle_driver);
 	if (cpuidle_register_device(device)) {
 		printk(KERN_ERR "kirkwood_init_cpuidle: Failed registering\n");
 		return -EIO;
diff --git a/arch/arm/mach-mmp/aspenite.c b/arch/arm/mach-mmp/aspenite.c
index 06b5ad7..7a60bbb 100644
--- a/arch/arm/mach-mmp/aspenite.c
+++ b/arch/arm/mach-mmp/aspenite.c
@@ -167,8 +167,9 @@
 
 static struct pxa3xx_nand_platform_data aspenite_nand_info = {
 	.enable_arbiter	= 1,
-	.parts		= aspenite_nand_partitions,
-	.nr_parts	= ARRAY_SIZE(aspenite_nand_partitions),
+	.num_cs = 1,
+	.parts[0]	= aspenite_nand_partitions,
+	.nr_parts[0]	= ARRAY_SIZE(aspenite_nand_partitions),
 };
 
 static struct i2c_board_info aspenite_i2c_info[] __initdata = {
diff --git a/arch/arm/mach-omap2/cpuidle34xx.c b/arch/arm/mach-omap2/cpuidle34xx.c
index 4bf6e6e..1fe35c2 100644
--- a/arch/arm/mach-omap2/cpuidle34xx.c
+++ b/arch/arm/mach-omap2/cpuidle34xx.c
@@ -88,17 +88,21 @@
 /**
  * omap3_enter_idle - Programs OMAP3 to enter the specified state
  * @dev: cpuidle device
- * @state: The target state to be programmed
+ * @drv: cpuidle driver
+ * @index: the index of state to be entered
  *
  * Called from the CPUidle framework to program the device to the
  * specified target state selected by the governor.
  */
 static int omap3_enter_idle(struct cpuidle_device *dev,
-			struct cpuidle_state *state)
+				struct cpuidle_driver *drv,
+				int index)
 {
-	struct omap3_idle_statedata *cx = cpuidle_get_statedata(state);
+	struct omap3_idle_statedata *cx =
+			cpuidle_get_statedata(&dev->states_usage[index]);
 	struct timespec ts_preidle, ts_postidle, ts_idle;
 	u32 mpu_state = cx->mpu_state, core_state = cx->core_state;
+	int idle_time;
 
 	/* Used to keep track of the total time in idle */
 	getnstimeofday(&ts_preidle);
@@ -113,7 +117,7 @@
 		goto return_sleep_time;
 
 	/* Deny idle for C1 */
-	if (state == &dev->states[0]) {
+	if (index == 0) {
 		pwrdm_for_each_clkdm(mpu_pd, _cpuidle_deny_idle);
 		pwrdm_for_each_clkdm(core_pd, _cpuidle_deny_idle);
 	}
@@ -122,7 +126,7 @@
 	omap_sram_idle();
 
 	/* Re-allow idle for C1 */
-	if (state == &dev->states[0]) {
+	if (index == 0) {
 		pwrdm_for_each_clkdm(mpu_pd, _cpuidle_allow_idle);
 		pwrdm_for_each_clkdm(core_pd, _cpuidle_allow_idle);
 	}
@@ -134,28 +138,38 @@
 	local_irq_enable();
 	local_fiq_enable();
 
-	return ts_idle.tv_nsec / NSEC_PER_USEC + ts_idle.tv_sec * USEC_PER_SEC;
+	idle_time = ts_idle.tv_nsec / NSEC_PER_USEC + ts_idle.tv_sec * \
+								USEC_PER_SEC;
+
+	/* Update cpuidle counters */
+	dev->last_residency = idle_time;
+
+	return index;
 }
 
 /**
  * next_valid_state - Find next valid C-state
  * @dev: cpuidle device
- * @state: Currently selected C-state
+ * @drv: cpuidle driver
+ * @index: Index of currently selected c-state
  *
- * If the current state is valid, it is returned back to the caller.
- * Else, this function searches for a lower c-state which is still
- * valid.
+ * If the state corresponding to index is valid, index is returned back
+ * to the caller. Else, this function searches for a lower c-state which is
+ * still valid (as defined in omap3_power_states[]) and returns its index.
  *
  * A state is valid if the 'valid' field is enabled and
  * if it satisfies the enable_off_mode condition.
  */
-static struct cpuidle_state *next_valid_state(struct cpuidle_device *dev,
-					      struct cpuidle_state *curr)
+static int next_valid_state(struct cpuidle_device *dev,
+			struct cpuidle_driver *drv,
+				int index)
 {
-	struct cpuidle_state *next = NULL;
-	struct omap3_idle_statedata *cx = cpuidle_get_statedata(curr);
+	struct cpuidle_state_usage *curr_usage = &dev->states_usage[index];
+	struct cpuidle_state *curr = &drv->states[index];
+	struct omap3_idle_statedata *cx = cpuidle_get_statedata(curr_usage);
 	u32 mpu_deepest_state = PWRDM_POWER_RET;
 	u32 core_deepest_state = PWRDM_POWER_RET;
+	int next_index = -1;
 
 	if (enable_off_mode) {
 		mpu_deepest_state = PWRDM_POWER_OFF;
@@ -172,20 +186,20 @@
 	if ((cx->valid) &&
 	    (cx->mpu_state >= mpu_deepest_state) &&
 	    (cx->core_state >= core_deepest_state)) {
-		return curr;
+		return index;
 	} else {
 		int idx = OMAP3_NUM_STATES - 1;
 
 		/* Reach the current state starting at highest C-state */
 		for (; idx >= 0; idx--) {
-			if (&dev->states[idx] == curr) {
-				next = &dev->states[idx];
+			if (&drv->states[idx] == curr) {
+				next_index = idx;
 				break;
 			}
 		}
 
 		/* Should never hit this condition */
-		WARN_ON(next == NULL);
+		WARN_ON(next_index == -1);
 
 		/*
 		 * Drop to next valid state.
@@ -193,41 +207,44 @@
 		 */
 		idx--;
 		for (; idx >= 0; idx--) {
-			cx = cpuidle_get_statedata(&dev->states[idx]);
+			cx = cpuidle_get_statedata(&dev->states_usage[idx]);
 			if ((cx->valid) &&
 			    (cx->mpu_state >= mpu_deepest_state) &&
 			    (cx->core_state >= core_deepest_state)) {
-				next = &dev->states[idx];
+				next_index = idx;
 				break;
 			}
 		}
 		/*
 		 * C1 is always valid.
-		 * So, no need to check for 'next==NULL' outside this loop.
+		 * So, no need to check for 'next_index == -1' outside
+		 * this loop.
 		 */
 	}
 
-	return next;
+	return next_index;
 }
 
 /**
  * omap3_enter_idle_bm - Checks for any bus activity
  * @dev: cpuidle device
- * @state: The target state to be programmed
+ * @drv: cpuidle driver
+ * @index: array index of target state to be programmed
  *
  * This function checks for any pending activity and then programs
  * the device to the specified or a safer state.
  */
 static int omap3_enter_idle_bm(struct cpuidle_device *dev,
-			       struct cpuidle_state *state)
+				struct cpuidle_driver *drv,
+			       int index)
 {
-	struct cpuidle_state *new_state;
+	int new_state_idx;
 	u32 core_next_state, per_next_state = 0, per_saved_state = 0, cam_state;
 	struct omap3_idle_statedata *cx;
 	int ret;
 
 	if (!omap3_can_sleep()) {
-		new_state = dev->safe_state;
+		new_state_idx = drv->safe_state_index;
 		goto select_state;
 	}
 
@@ -237,7 +254,7 @@
 	 */
 	cam_state = pwrdm_read_pwrst(cam_pd);
 	if (cam_state == PWRDM_POWER_ON) {
-		new_state = dev->safe_state;
+		new_state_idx = drv->safe_state_index;
 		goto select_state;
 	}
 
@@ -253,7 +270,7 @@
 	 * Prevent PER off if CORE is not in retention or off as this
 	 * would disable PER wakeups completely.
 	 */
-	cx = cpuidle_get_statedata(state);
+	cx = cpuidle_get_statedata(&dev->states_usage[index]);
 	core_next_state = cx->core_state;
 	per_next_state = per_saved_state = pwrdm_read_next_pwrst(per_pd);
 	if ((per_next_state == PWRDM_POWER_OFF) &&
@@ -264,11 +281,10 @@
 	if (per_next_state != per_saved_state)
 		pwrdm_set_next_pwrst(per_pd, per_next_state);
 
-	new_state = next_valid_state(dev, state);
+	new_state_idx = next_valid_state(dev, drv, index);
 
 select_state:
-	dev->last_state = new_state;
-	ret = omap3_enter_idle(dev, new_state);
+	ret = omap3_enter_idle(dev, drv, new_state_idx);
 
 	/* Restore original PER state if it was modified */
 	if (per_next_state != per_saved_state)
@@ -301,22 +317,31 @@
 	.owner = 	THIS_MODULE,
 };
 
-/* Helper to fill the C-state common data and register the driver_data */
-static inline struct omap3_idle_statedata *_fill_cstate(
-					struct cpuidle_device *dev,
+/* Helper to fill the C-state common data*/
+static inline void _fill_cstate(struct cpuidle_driver *drv,
 					int idx, const char *descr)
 {
-	struct omap3_idle_statedata *cx = &omap3_idle_data[idx];
-	struct cpuidle_state *state = &dev->states[idx];
+	struct cpuidle_state *state = &drv->states[idx];
 
 	state->exit_latency	= cpuidle_params_table[idx].exit_latency;
 	state->target_residency	= cpuidle_params_table[idx].target_residency;
 	state->flags		= CPUIDLE_FLAG_TIME_VALID;
 	state->enter		= omap3_enter_idle_bm;
-	cx->valid		= cpuidle_params_table[idx].valid;
 	sprintf(state->name, "C%d", idx + 1);
 	strncpy(state->desc, descr, CPUIDLE_DESC_LEN);
-	cpuidle_set_statedata(state, cx);
+
+}
+
+/* Helper to register the driver_data */
+static inline struct omap3_idle_statedata *_fill_cstate_usage(
+					struct cpuidle_device *dev,
+					int idx)
+{
+	struct omap3_idle_statedata *cx = &omap3_idle_data[idx];
+	struct cpuidle_state_usage *state_usage = &dev->states_usage[idx];
+
+	cx->valid		= cpuidle_params_table[idx].valid;
+	cpuidle_set_statedata(state_usage, cx);
 
 	return cx;
 }
@@ -330,6 +355,7 @@
 int __init omap3_idle_init(void)
 {
 	struct cpuidle_device *dev;
+	struct cpuidle_driver *drv = &omap3_idle_driver;
 	struct omap3_idle_statedata *cx;
 
 	mpu_pd = pwrdm_lookup("mpu_pwrdm");
@@ -337,44 +363,52 @@
 	per_pd = pwrdm_lookup("per_pwrdm");
 	cam_pd = pwrdm_lookup("cam_pwrdm");
 
-	cpuidle_register_driver(&omap3_idle_driver);
+
+	drv->safe_state_index = -1;
 	dev = &per_cpu(omap3_idle_dev, smp_processor_id());
 
 	/* C1 . MPU WFI + Core active */
-	cx = _fill_cstate(dev, 0, "MPU ON + CORE ON");
-	(&dev->states[0])->enter = omap3_enter_idle;
-	dev->safe_state = &dev->states[0];
+	_fill_cstate(drv, 0, "MPU ON + CORE ON");
+	(&drv->states[0])->enter = omap3_enter_idle;
+	drv->safe_state_index = 0;
+	cx = _fill_cstate_usage(dev, 0);
 	cx->valid = 1;	/* C1 is always valid */
 	cx->mpu_state = PWRDM_POWER_ON;
 	cx->core_state = PWRDM_POWER_ON;
 
 	/* C2 . MPU WFI + Core inactive */
-	cx = _fill_cstate(dev, 1, "MPU ON + CORE ON");
+	_fill_cstate(drv, 1, "MPU ON + CORE ON");
+	cx = _fill_cstate_usage(dev, 1);
 	cx->mpu_state = PWRDM_POWER_ON;
 	cx->core_state = PWRDM_POWER_ON;
 
 	/* C3 . MPU CSWR + Core inactive */
-	cx = _fill_cstate(dev, 2, "MPU RET + CORE ON");
+	_fill_cstate(drv, 2, "MPU RET + CORE ON");
+	cx = _fill_cstate_usage(dev, 2);
 	cx->mpu_state = PWRDM_POWER_RET;
 	cx->core_state = PWRDM_POWER_ON;
 
 	/* C4 . MPU OFF + Core inactive */
-	cx = _fill_cstate(dev, 3, "MPU OFF + CORE ON");
+	_fill_cstate(drv, 3, "MPU OFF + CORE ON");
+	cx = _fill_cstate_usage(dev, 3);
 	cx->mpu_state = PWRDM_POWER_OFF;
 	cx->core_state = PWRDM_POWER_ON;
 
 	/* C5 . MPU RET + Core RET */
-	cx = _fill_cstate(dev, 4, "MPU RET + CORE RET");
+	_fill_cstate(drv, 4, "MPU RET + CORE RET");
+	cx = _fill_cstate_usage(dev, 4);
 	cx->mpu_state = PWRDM_POWER_RET;
 	cx->core_state = PWRDM_POWER_RET;
 
 	/* C6 . MPU OFF + Core RET */
-	cx = _fill_cstate(dev, 5, "MPU OFF + CORE RET");
+	_fill_cstate(drv, 5, "MPU OFF + CORE RET");
+	cx = _fill_cstate_usage(dev, 5);
 	cx->mpu_state = PWRDM_POWER_OFF;
 	cx->core_state = PWRDM_POWER_RET;
 
 	/* C7 . MPU OFF + Core OFF */
-	cx = _fill_cstate(dev, 6, "MPU OFF + CORE OFF");
+	_fill_cstate(drv, 6, "MPU OFF + CORE OFF");
+	cx = _fill_cstate_usage(dev, 6);
 	/*
 	 * Erratum i583: implementation for ES rev < Es1.2 on 3630. We cannot
 	 * enable OFF mode in a stable form for previous revisions.
@@ -388,6 +422,9 @@
 	cx->mpu_state = PWRDM_POWER_OFF;
 	cx->core_state = PWRDM_POWER_OFF;
 
+	drv->state_count = OMAP3_NUM_STATES;
+	cpuidle_register_driver(&omap3_idle_driver);
+
 	dev->state_count = OMAP3_NUM_STATES;
 	if (cpuidle_register_device(dev)) {
 		printk(KERN_ERR "%s: CPUidle register device failed\n",
diff --git a/arch/arm/mach-orion5x/ts78xx-setup.c b/arch/arm/mach-orion5x/ts78xx-setup.c
index 6c75cd3..b35e200 100644
--- a/arch/arm/mach-orion5x/ts78xx-setup.c
+++ b/arch/arm/mach-orion5x/ts78xx-setup.c
@@ -275,7 +275,7 @@
 		.partitions		= ts78xx_ts_nand_parts,
 		.nr_partitions		= ARRAY_SIZE(ts78xx_ts_nand_parts),
 		.chip_delay		= 15,
-		.options		= NAND_USE_FLASH_BBT,
+		.bbt_options		= NAND_BBT_USE_FLASH,
 	},
 	.ctrl	= {
 		/*
diff --git a/arch/arm/mach-pxa/cm-x300.c b/arch/arm/mach-pxa/cm-x300.c
index 3a7387f..e096bba 100644
--- a/arch/arm/mach-pxa/cm-x300.c
+++ b/arch/arm/mach-pxa/cm-x300.c
@@ -424,8 +424,9 @@
 static struct pxa3xx_nand_platform_data cm_x300_nand_info = {
 	.enable_arbiter	= 1,
 	.keep_config	= 1,
-	.parts		= cm_x300_nand_partitions,
-	.nr_parts	= ARRAY_SIZE(cm_x300_nand_partitions),
+	.num_cs		= 1,
+	.parts[0]	= cm_x300_nand_partitions,
+	.nr_parts[0]	= ARRAY_SIZE(cm_x300_nand_partitions),
 };
 
 static void __init cm_x300_init_nand(void)
diff --git a/arch/arm/mach-pxa/colibri-pxa3xx.c b/arch/arm/mach-pxa/colibri-pxa3xx.c
index 3f9be41..2b8ca0d 100644
--- a/arch/arm/mach-pxa/colibri-pxa3xx.c
+++ b/arch/arm/mach-pxa/colibri-pxa3xx.c
@@ -139,8 +139,9 @@
 static struct pxa3xx_nand_platform_data colibri_nand_info = {
 	.enable_arbiter	= 1,
 	.keep_config	= 1,
-	.parts		= colibri_nand_partitions,
-	.nr_parts	= ARRAY_SIZE(colibri_nand_partitions),
+	.num_cs		= 1,
+	.parts[0]	= colibri_nand_partitions,
+	.nr_parts[0]	= ARRAY_SIZE(colibri_nand_partitions),
 };
 
 void __init colibri_pxa3xx_init_nand(void)
diff --git a/arch/arm/mach-pxa/littleton.c b/arch/arm/mach-pxa/littleton.c
index 0037e57..7b324ec 100644
--- a/arch/arm/mach-pxa/littleton.c
+++ b/arch/arm/mach-pxa/littleton.c
@@ -325,8 +325,9 @@
 
 static struct pxa3xx_nand_platform_data littleton_nand_info = {
 	.enable_arbiter	= 1,
-	.parts		= littleton_nand_partitions,
-	.nr_parts	= ARRAY_SIZE(littleton_nand_partitions),
+	.num_cs		= 1,
+	.parts[0]	= littleton_nand_partitions,
+	.nr_parts[0]	= ARRAY_SIZE(littleton_nand_partitions),
 };
 
 static void __init littleton_init_nand(void)
diff --git a/arch/arm/mach-pxa/mxm8x10.c b/arch/arm/mach-pxa/mxm8x10.c
index b5a8fd3..90928d6 100644
--- a/arch/arm/mach-pxa/mxm8x10.c
+++ b/arch/arm/mach-pxa/mxm8x10.c
@@ -389,10 +389,11 @@
 };
 
 static struct pxa3xx_nand_platform_data mxm_8x10_nand_info = {
-	.enable_arbiter = 1,
-	.keep_config = 1,
-	.parts = mxm_8x10_nand_partitions,
-	.nr_parts = ARRAY_SIZE(mxm_8x10_nand_partitions)
+	.enable_arbiter	= 1,
+	.keep_config	= 1,
+	.num_cs		= 1,
+	.parts[0]	= mxm_8x10_nand_partitions,
+	.nr_parts[0]	= ARRAY_SIZE(mxm_8x10_nand_partitions)
 };
 
 static void __init mxm_8x10_nand_init(void)
diff --git a/arch/arm/mach-pxa/raumfeld.c b/arch/arm/mach-pxa/raumfeld.c
index 6810cdd..f0c05f4d 100644
--- a/arch/arm/mach-pxa/raumfeld.c
+++ b/arch/arm/mach-pxa/raumfeld.c
@@ -346,8 +346,9 @@
 static struct pxa3xx_nand_platform_data raumfeld_nand_info = {
 	.enable_arbiter	= 1,
 	.keep_config	= 1,
-	.parts		= raumfeld_nand_partitions,
-	.nr_parts	= ARRAY_SIZE(raumfeld_nand_partitions),
+	.num_cs		= 1,
+	.parts[0]	= raumfeld_nand_partitions,
+	.nr_parts[0]	= ARRAY_SIZE(raumfeld_nand_partitions),
 };
 
 /**
diff --git a/arch/arm/mach-pxa/zylonite.c b/arch/arm/mach-pxa/zylonite.c
index 31d4968..6c39c33 100644
--- a/arch/arm/mach-pxa/zylonite.c
+++ b/arch/arm/mach-pxa/zylonite.c
@@ -366,8 +366,9 @@
 
 static struct pxa3xx_nand_platform_data zylonite_nand_info = {
 	.enable_arbiter	= 1,
-	.parts		= zylonite_nand_partitions,
-	.nr_parts	= ARRAY_SIZE(zylonite_nand_partitions),
+	.num_cs		= 1,
+	.parts[0]	= zylonite_nand_partitions,
+	.nr_parts[0]	= ARRAY_SIZE(zylonite_nand_partitions),
 };
 
 static void __init zylonite_init_nand(void)
diff --git a/arch/arm/plat-pxa/include/plat/pxa3xx_nand.h b/arch/arm/plat-pxa/include/plat/pxa3xx_nand.h
index 442301f..c42f39f 100644
--- a/arch/arm/plat-pxa/include/plat/pxa3xx_nand.h
+++ b/arch/arm/plat-pxa/include/plat/pxa3xx_nand.h
@@ -41,6 +41,19 @@
 	struct pxa3xx_nand_timing *timing;	/* NAND Flash timing */
 };
 
+/*
+ * Current pxa3xx_nand controller has two chip select which
+ * both be workable.
+ *
+ * Notice should be taken that:
+ * When you want to use this feature, you should not enable the
+ * keep configuration feature, for two chip select could be
+ * attached with different nand chip. The different page size
+ * and timing requirement make the keep configuration impossible.
+ */
+
+/* The max num of chip select current support */
+#define NUM_CHIP_SELECT		(2)
 struct pxa3xx_nand_platform_data {
 
 	/* the data flash bus is shared between the Static Memory
@@ -52,8 +65,11 @@
 	/* allow platform code to keep OBM/bootloader defined NFC config */
 	int	keep_config;
 
-	const struct mtd_partition		*parts;
-	unsigned int				nr_parts;
+	/* indicate how many chip selects will be used */
+	int	num_cs;
+
+	const struct mtd_partition		*parts[NUM_CHIP_SELECT];
+	unsigned int				nr_parts[NUM_CHIP_SELECT];
 
 	const struct pxa3xx_nand_flash * 	flash;
 	size_t					num_flash;
diff --git a/arch/arm/plat-samsung/dma-ops.c b/arch/arm/plat-samsung/dma-ops.c
index 6e3d9ab..93a994a 100644
--- a/arch/arm/plat-samsung/dma-ops.c
+++ b/arch/arm/plat-samsung/dma-ops.c
@@ -14,6 +14,7 @@
 #include <linux/errno.h>
 #include <linux/amba/pl330.h>
 #include <linux/scatterlist.h>
+#include <linux/export.h>
 
 #include <mach/dma.h>
 
diff --git a/arch/arm/plat-samsung/s3c-dma-ops.c b/arch/arm/plat-samsung/s3c-dma-ops.c
index 582333c..7814949 100644
--- a/arch/arm/plat-samsung/s3c-dma-ops.c
+++ b/arch/arm/plat-samsung/s3c-dma-ops.c
@@ -14,6 +14,7 @@
 #include <linux/errno.h>
 #include <linux/slab.h>
 #include <linux/types.h>
+#include <linux/export.h>
 
 #include <mach/dma.h>
 
diff --git a/arch/avr32/boards/atngw100/setup.c b/arch/avr32/boards/atngw100/setup.c
index fafed4c..1f17bde 100644
--- a/arch/avr32/boards/atngw100/setup.c
+++ b/arch/avr32/boards/atngw100/setup.c
@@ -90,11 +90,6 @@
 	},
 };
 
-static struct mtd_partition *nand_part_info(int size, int *num_partitions)
-{
-	*num_partitions = ARRAY_SIZE(nand_partitions);
-	return nand_partitions;
-}
 
 static struct atmel_nand_data atngw100mkii_nand_data __initdata = {
 	.cle		= 21,
@@ -102,7 +97,8 @@
 	.rdy_pin	= GPIO_PIN_PB(28),
 	.enable_pin	= GPIO_PIN_PE(23),
 	.bus_width_16	= true,
-	.partition_info	= nand_part_info,
+	.parts		= nand_partitions,
+	.num_parts	= ARRAY_SIZE(nand_partitions),
 };
 #endif
 
diff --git a/arch/avr32/boards/atstk1000/atstk1002.c b/arch/avr32/boards/atstk1000/atstk1002.c
index 6ce30fb..4643ff5 100644
--- a/arch/avr32/boards/atstk1000/atstk1002.c
+++ b/arch/avr32/boards/atstk1000/atstk1002.c
@@ -90,18 +90,13 @@
 	},
 };
 
-static struct mtd_partition *nand_part_info(int size, int *num_partitions)
-{
-	*num_partitions = ARRAY_SIZE(nand_partitions);
-	return nand_partitions;
-}
-
 static struct atmel_nand_data atstk1006_nand_data __initdata = {
 	.cle		= 21,
 	.ale		= 22,
 	.rdy_pin	= GPIO_PIN_PB(30),
 	.enable_pin	= GPIO_PIN_PB(29),
-	.partition_info	= nand_part_info,
+	.parts		= nand_partitions,
+	.num_parts	= ARRAY_SIZE(num_partitions),
 };
 #endif
 
diff --git a/arch/avr32/mach-at32ap/include/mach/board.h b/arch/avr32/mach-at32ap/include/mach/board.h
index 679458d..5d7ffca 100644
--- a/arch/avr32/mach-at32ap/include/mach/board.h
+++ b/arch/avr32/mach-at32ap/include/mach/board.h
@@ -128,7 +128,8 @@
 	u8	ale;		/* address line number connected to ALE */
 	u8	cle;		/* address line number connected to CLE */
 	u8	bus_width_16;	/* buswidth is 16 bit */
-	struct mtd_partition *(*partition_info)(int size, int *num_partitions);
+	struct mtd_partition *parts;
+	unsigned int	num_parts;
 };
 struct platform_device *
 at32_add_device_nand(unsigned int id, struct atmel_nand_data *data);
diff --git a/arch/cris/arch-v32/drivers/mach-a3/nandflash.c b/arch/cris/arch-v32/drivers/mach-a3/nandflash.c
index f58f2c1..7fb5212 100644
--- a/arch/cris/arch-v32/drivers/mach-a3/nandflash.c
+++ b/arch/cris/arch-v32/drivers/mach-a3/nandflash.c
@@ -163,7 +163,7 @@
 	this->ecc.mode = NAND_ECC_SOFT;
 
 	/* Enable the following for a flash based bad block table */
-	/* this->options = NAND_USE_FLASH_BBT; */
+	/* this->bbt_options = NAND_BBT_USE_FLASH; */
 
 	/* Scan to find existence of the device */
 	if (nand_scan(crisv32_mtd, 1)) {
diff --git a/arch/cris/arch-v32/drivers/mach-fs/nandflash.c b/arch/cris/arch-v32/drivers/mach-fs/nandflash.c
index d5b0cc9..e032384 100644
--- a/arch/cris/arch-v32/drivers/mach-fs/nandflash.c
+++ b/arch/cris/arch-v32/drivers/mach-fs/nandflash.c
@@ -154,7 +154,7 @@
 	this->ecc.mode = NAND_ECC_SOFT;
 
 	/* Enable the following for a flash based bad block table */
-	/* this->options = NAND_USE_FLASH_BBT; */
+	/* this->bbt_options = NAND_BBT_USE_FLASH; */
 
 	/* Scan to find existence of the device */
 	if (nand_scan(crisv32_mtd, 1)) {
diff --git a/arch/ia64/include/asm/xen/interface.h b/arch/ia64/include/asm/xen/interface.h
index e951e74..1d2427d 100644
--- a/arch/ia64/include/asm/xen/interface.h
+++ b/arch/ia64/include/asm/xen/interface.h
@@ -76,6 +76,7 @@
 DEFINE_GUEST_HANDLE(int);
 DEFINE_GUEST_HANDLE(long);
 DEFINE_GUEST_HANDLE(void);
+DEFINE_GUEST_HANDLE(uint64_t);
 
 typedef unsigned long xen_pfn_t;
 DEFINE_GUEST_HANDLE(xen_pfn_t);
diff --git a/arch/s390/kernel/jump_label.c b/arch/s390/kernel/jump_label.c
index 44cc06b..b987ab2 100644
--- a/arch/s390/kernel/jump_label.c
+++ b/arch/s390/kernel/jump_label.c
@@ -18,26 +18,15 @@
 } __packed;
 
 struct insn_args {
-	unsigned long *target;
-	struct insn *insn;
-	ssize_t size;
+	struct jump_entry *entry;
+	enum jump_label_type type;
 };
 
-static int __arch_jump_label_transform(void *data)
+static void __jump_label_transform(struct jump_entry *entry,
+				   enum jump_label_type type)
 {
-	struct insn_args *args = data;
-	int rc;
-
-	rc = probe_kernel_write(args->target, args->insn, args->size);
-	WARN_ON_ONCE(rc < 0);
-	return 0;
-}
-
-void arch_jump_label_transform(struct jump_entry *entry,
-			       enum jump_label_type type)
-{
-	struct insn_args args;
 	struct insn insn;
+	int rc;
 
 	if (type == JUMP_LABEL_ENABLE) {
 		/* brcl 15,offset */
@@ -49,11 +38,33 @@
 		insn.offset = 0;
 	}
 
-	args.target = (void *) entry->code;
-	args.insn = &insn;
-	args.size = JUMP_LABEL_NOP_SIZE;
+	rc = probe_kernel_write((void *)entry->code, &insn, JUMP_LABEL_NOP_SIZE);
+	WARN_ON_ONCE(rc < 0);
+}
 
-	stop_machine(__arch_jump_label_transform, &args, NULL);
+static int __sm_arch_jump_label_transform(void *data)
+{
+	struct insn_args *args = data;
+
+	__jump_label_transform(args->entry, args->type);
+	return 0;
+}
+
+void arch_jump_label_transform(struct jump_entry *entry,
+			       enum jump_label_type type)
+{
+	struct insn_args args;
+
+	args.entry = entry;
+	args.type = type;
+
+	stop_machine(__sm_arch_jump_label_transform, &args, NULL);
+}
+
+void arch_jump_label_transform_static(struct jump_entry *entry,
+				      enum jump_label_type type)
+{
+	__jump_label_transform(entry, type);
 }
 
 #endif
diff --git a/arch/sh/kernel/cpu/shmobile/cpuidle.c b/arch/sh/kernel/cpu/shmobile/cpuidle.c
index 7d98f90..1cc257c 100644
--- a/arch/sh/kernel/cpu/shmobile/cpuidle.c
+++ b/arch/sh/kernel/cpu/shmobile/cpuidle.c
@@ -26,11 +26,12 @@
 };
 
 static int cpuidle_sleep_enter(struct cpuidle_device *dev,
-			       struct cpuidle_state *state)
+				struct cpuidle_driver *drv,
+				int index)
 {
 	unsigned long allowed_mode = arch_hwblk_sleep_mode();
 	ktime_t before, after;
-	int requested_state = state - &dev->states[0];
+	int requested_state = index;
 	int allowed_state;
 	int k;
 
@@ -47,11 +48,13 @@
 	 */
 	k = min_t(int, allowed_state, requested_state);
 
-	dev->last_state = &dev->states[k];
 	before = ktime_get();
 	sh_mobile_call_standby(cpuidle_mode[k]);
 	after = ktime_get();
-	return ktime_to_ns(ktime_sub(after, before)) >> 10;
+
+	dev->last_residency = (int)ktime_to_ns(ktime_sub(after, before)) >> 10;
+
+	return k;
 }
 
 static struct cpuidle_device cpuidle_dev;
@@ -63,19 +66,19 @@
 void sh_mobile_setup_cpuidle(void)
 {
 	struct cpuidle_device *dev = &cpuidle_dev;
+	struct cpuidle_driver *drv = &cpuidle_driver;
 	struct cpuidle_state *state;
 	int i;
 
-	cpuidle_register_driver(&cpuidle_driver);
 
 	for (i = 0; i < CPUIDLE_STATE_MAX; i++) {
-		dev->states[i].name[0] = '\0';
-		dev->states[i].desc[0] = '\0';
+		drv->states[i].name[0] = '\0';
+		drv->states[i].desc[0] = '\0';
 	}
 
 	i = CPUIDLE_DRIVER_STATE_START;
 
-	state = &dev->states[i++];
+	state = &drv->states[i++];
 	snprintf(state->name, CPUIDLE_NAME_LEN, "C1");
 	strncpy(state->desc, "SuperH Sleep Mode", CPUIDLE_DESC_LEN);
 	state->exit_latency = 1;
@@ -85,10 +88,10 @@
 	state->flags |= CPUIDLE_FLAG_TIME_VALID;
 	state->enter = cpuidle_sleep_enter;
 
-	dev->safe_state = state;
+	drv->safe_state_index = i-1;
 
 	if (sh_mobile_sleep_supported & SUSP_SH_SF) {
-		state = &dev->states[i++];
+		state = &drv->states[i++];
 		snprintf(state->name, CPUIDLE_NAME_LEN, "C2");
 		strncpy(state->desc, "SuperH Sleep Mode [SF]",
 			CPUIDLE_DESC_LEN);
@@ -101,7 +104,7 @@
 	}
 
 	if (sh_mobile_sleep_supported & SUSP_SH_STANDBY) {
-		state = &dev->states[i++];
+		state = &drv->states[i++];
 		snprintf(state->name, CPUIDLE_NAME_LEN, "C3");
 		strncpy(state->desc, "SuperH Mobile Standby Mode [SF]",
 			CPUIDLE_DESC_LEN);
@@ -113,7 +116,10 @@
 		state->enter = cpuidle_sleep_enter;
 	}
 
+	drv->state_count = i;
 	dev->state_count = i;
 
+	cpuidle_register_driver(&cpuidle_driver);
+
 	cpuidle_register_device(dev);
 }
diff --git a/arch/sparc/kernel/jump_label.c b/arch/sparc/kernel/jump_label.c
index ea2dafc..971fd43 100644
--- a/arch/sparc/kernel/jump_label.c
+++ b/arch/sparc/kernel/jump_label.c
@@ -36,12 +36,4 @@
 	put_online_cpus();
 }
 
-void arch_jump_label_text_poke_early(jump_label_t addr)
-{
-	u32 *insn_p = (u32 *) (unsigned long) addr;
-
-	*insn_p = 0x01000000;
-	flushi(insn_p);
-}
-
 #endif
diff --git a/arch/x86/include/asm/xen/hypercall.h b/arch/x86/include/asm/xen/hypercall.h
index 417777d..5728852 100644
--- a/arch/x86/include/asm/xen/hypercall.h
+++ b/arch/x86/include/asm/xen/hypercall.h
@@ -47,6 +47,7 @@
 #include <xen/interface/xen.h>
 #include <xen/interface/sched.h>
 #include <xen/interface/physdev.h>
+#include <xen/interface/platform.h>
 
 /*
  * The hypercall asms have to meet several constraints:
@@ -301,6 +302,13 @@
 }
 
 static inline int
+HYPERVISOR_dom0_op(struct xen_platform_op *platform_op)
+{
+	platform_op->interface_version = XENPF_INTERFACE_VERSION;
+	return _hypercall1(int, dom0_op, platform_op);
+}
+
+static inline int
 HYPERVISOR_set_debugreg(int reg, unsigned long value)
 {
 	return _hypercall2(int, set_debugreg, reg, value);
diff --git a/arch/x86/include/asm/xen/interface.h b/arch/x86/include/asm/xen/interface.h
index 5d4922a..a1f2db5 100644
--- a/arch/x86/include/asm/xen/interface.h
+++ b/arch/x86/include/asm/xen/interface.h
@@ -55,6 +55,7 @@
 DEFINE_GUEST_HANDLE(int);
 DEFINE_GUEST_HANDLE(long);
 DEFINE_GUEST_HANDLE(void);
+DEFINE_GUEST_HANDLE(uint64_t);
 #endif
 
 #ifndef HYPERVISOR_VIRT_START
diff --git a/arch/x86/kernel/jump_label.c b/arch/x86/kernel/jump_label.c
index cacdd46..ea9d5f2f 100644
--- a/arch/x86/kernel/jump_label.c
+++ b/arch/x86/kernel/jump_label.c
@@ -24,8 +24,9 @@
 	} __attribute__((packed));
 };
 
-void arch_jump_label_transform(struct jump_entry *entry,
-			       enum jump_label_type type)
+static void __jump_label_transform(struct jump_entry *entry,
+				   enum jump_label_type type,
+				   void *(*poker)(void *, const void *, size_t))
 {
 	union jump_code_union code;
 
@@ -35,17 +36,24 @@
 				(entry->code + JUMP_LABEL_NOP_SIZE);
 	} else
 		memcpy(&code, ideal_nops[NOP_ATOMIC5], JUMP_LABEL_NOP_SIZE);
+
+	(*poker)((void *)entry->code, &code, JUMP_LABEL_NOP_SIZE);
+}
+
+void arch_jump_label_transform(struct jump_entry *entry,
+			       enum jump_label_type type)
+{
 	get_online_cpus();
 	mutex_lock(&text_mutex);
-	text_poke_smp((void *)entry->code, &code, JUMP_LABEL_NOP_SIZE);
+	__jump_label_transform(entry, type, text_poke_smp);
 	mutex_unlock(&text_mutex);
 	put_online_cpus();
 }
 
-void __init_or_module arch_jump_label_text_poke_early(jump_label_t addr)
+void arch_jump_label_transform_static(struct jump_entry *entry,
+				      enum jump_label_type type)
 {
-	text_poke_early((void *)addr, ideal_nops[NOP_ATOMIC5],
-			JUMP_LABEL_NOP_SIZE);
+	__jump_label_transform(entry, type, text_poke_early);
 }
 
 #endif
diff --git a/arch/x86/platform/mrst/pmu.c b/arch/x86/platform/mrst/pmu.c
index 9281da7..c0ac06d 100644
--- a/arch/x86/platform/mrst/pmu.c
+++ b/arch/x86/platform/mrst/pmu.c
@@ -70,7 +70,7 @@
 /* 24 */ { 0x4110, 0 },			/* Lincroft */
 };
 
-/* n.b. We ignore PCI-id 0x815 in LSS9 b/c MeeGo has no driver for it */
+/* n.b. We ignore PCI-id 0x815 in LSS9 b/c Linux has no driver for it */
 static u16 mrst_lss9_pci_ids[] = {0x080a, 0x0814, 0};
 static u16 mrst_lss10_pci_ids[] = {0x0800, 0x0801, 0x0802, 0x0803,
 					0x0804, 0x0805, 0x080f, 0};
diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c
index 163b467..0296a95 100644
--- a/arch/x86/xen/time.c
+++ b/arch/x86/xen/time.c
@@ -201,8 +201,22 @@
 
 static int xen_set_wallclock(unsigned long now)
 {
+	struct xen_platform_op op;
+	int rc;
+
 	/* do nothing for domU */
-	return -1;
+	if (!xen_initial_domain())
+		return -1;
+
+	op.cmd = XENPF_settime;
+	op.u.settime.secs = now;
+	op.u.settime.nsecs = 0;
+	op.u.settime.system_time = xen_clocksource_read();
+
+	rc = HYPERVISOR_dom0_op(&op);
+	WARN(rc != 0, "XENPF_settime failed: now=%ld\n", now);
+
+	return rc;
 }
 
 static struct clocksource xen_clocksource __read_mostly = {
diff --git a/drivers/acpi/acpica/hwregs.c b/drivers/acpi/acpica/hwregs.c
index 55accb7..cc70f3f 100644
--- a/drivers/acpi/acpica/hwregs.c
+++ b/drivers/acpi/acpica/hwregs.c
@@ -269,16 +269,17 @@
 
 	status = acpi_hw_register_write(ACPI_REGISTER_PM1_STATUS,
 					ACPI_BITMASK_ALL_FIXED_STATUS);
-	if (ACPI_FAILURE(status)) {
-		goto unlock_and_exit;
-	}
+
+	acpi_os_release_lock(acpi_gbl_hardware_lock, lock_flags);
+
+	if (ACPI_FAILURE(status))
+		goto exit;
 
 	/* Clear the GPE Bits in all GPE registers in all GPE blocks */
 
 	status = acpi_ev_walk_gpe_list(acpi_hw_clear_gpe_block, NULL);
 
-      unlock_and_exit:
-	acpi_os_release_lock(acpi_gbl_hardware_lock, lock_flags);
+exit:
 	return_ACPI_STATUS(status);
 }
 
diff --git a/drivers/acpi/atomicio.c b/drivers/acpi/atomicio.c
index 04ae1c8..cfc0cc1 100644
--- a/drivers/acpi/atomicio.c
+++ b/drivers/acpi/atomicio.c
@@ -76,7 +76,7 @@
 {
 	struct acpi_iomap *map;
 
-	map = __acpi_find_iomap(paddr, size);
+	map = __acpi_find_iomap(paddr, size/8);
 	if (map)
 		return map->vaddr + (paddr - map->paddr);
 	else
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 437ddbf..9ecec98 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -911,10 +911,7 @@
 	}
 #endif
 
-	status =
-	    acpi_enable_subsystem(~
-				  (ACPI_NO_HARDWARE_INIT |
-				   ACPI_NO_ACPI_ENABLE));
+	status = acpi_enable_subsystem(~ACPI_NO_ACPI_ENABLE);
 	if (ACPI_FAILURE(status)) {
 		printk(KERN_ERR PREFIX "Unable to enable ACPI\n");
 		goto error0;
@@ -935,8 +932,7 @@
 
 	acpi_os_initialize1();
 
-	status =
-	    acpi_enable_subsystem(ACPI_NO_HARDWARE_INIT | ACPI_NO_ACPI_ENABLE);
+	status = acpi_enable_subsystem(ACPI_NO_ACPI_ENABLE);
 	if (ACPI_FAILURE(status)) {
 		printk(KERN_ERR PREFIX
 		       "Unable to start the ACPI Interpreter\n");
diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
index a4e0f1b..9d7bc9f 100644
--- a/drivers/acpi/processor_driver.c
+++ b/drivers/acpi/processor_driver.c
@@ -426,7 +426,7 @@
 
 	if (action == CPU_ONLINE && pr) {
 		acpi_processor_ppc_has_changed(pr, 0);
-		acpi_processor_cst_has_changed(pr);
+		acpi_processor_hotplug(pr);
 		acpi_processor_reevaluate_tstate(pr, action);
 		acpi_processor_tstate_has_changed(pr);
 	}
@@ -503,8 +503,7 @@
 	acpi_processor_get_throttling_info(pr);
 	acpi_processor_get_limit_info(pr);
 
-
-	if (cpuidle_get_driver() == &acpi_idle_driver)
+	if (!cpuidle_get_driver() || cpuidle_get_driver() == &acpi_idle_driver)
 		acpi_processor_power_init(pr, device);
 
 	pr->cdev = thermal_cooling_device_register("Processor", device,
@@ -800,17 +799,9 @@
 
 	memset(&errata, 0, sizeof(errata));
 
-	if (!cpuidle_register_driver(&acpi_idle_driver)) {
-		printk(KERN_DEBUG "ACPI: %s registered with cpuidle\n",
-			acpi_idle_driver.name);
-	} else {
-		printk(KERN_DEBUG "ACPI: acpi_idle yielding to %s\n",
-			cpuidle_get_driver()->name);
-	}
-
 	result = acpi_bus_register_driver(&acpi_processor_driver);
 	if (result < 0)
-		goto out_cpuidle;
+		return result;
 
 	acpi_processor_install_hotplug_notify();
 
@@ -821,11 +812,6 @@
 	acpi_processor_throttling_init();
 
 	return 0;
-
-out_cpuidle:
-	cpuidle_unregister_driver(&acpi_idle_driver);
-
-	return result;
 }
 
 static void __exit acpi_processor_exit(void)
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 9b88f98..73b2909 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -741,22 +741,25 @@
 /**
  * acpi_idle_enter_c1 - enters an ACPI C1 state-type
  * @dev: the target CPU
- * @state: the state data
+ * @drv: cpuidle driver containing cpuidle state info
+ * @index: index of target state
  *
  * This is equivalent to the HALT instruction.
  */
 static int acpi_idle_enter_c1(struct cpuidle_device *dev,
-			      struct cpuidle_state *state)
+		struct cpuidle_driver *drv, int index)
 {
 	ktime_t  kt1, kt2;
 	s64 idle_time;
 	struct acpi_processor *pr;
-	struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
+	struct cpuidle_state_usage *state_usage = &dev->states_usage[index];
+	struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage);
 
 	pr = __this_cpu_read(processors);
+	dev->last_residency = 0;
 
 	if (unlikely(!pr))
-		return 0;
+		return -EINVAL;
 
 	local_irq_disable();
 
@@ -764,7 +767,7 @@
 	if (acpi_idle_suspend) {
 		local_irq_enable();
 		cpu_relax();
-		return 0;
+		return -EINVAL;
 	}
 
 	lapic_timer_state_broadcast(pr, cx, 1);
@@ -773,37 +776,47 @@
 	kt2 = ktime_get_real();
 	idle_time =  ktime_to_us(ktime_sub(kt2, kt1));
 
+	/* Update device last_residency*/
+	dev->last_residency = (int)idle_time;
+
 	local_irq_enable();
 	cx->usage++;
 	lapic_timer_state_broadcast(pr, cx, 0);
 
-	return idle_time;
+	return index;
 }
 
 /**
  * acpi_idle_enter_simple - enters an ACPI state without BM handling
  * @dev: the target CPU
- * @state: the state data
+ * @drv: cpuidle driver with cpuidle state information
+ * @index: the index of suggested state
  */
 static int acpi_idle_enter_simple(struct cpuidle_device *dev,
-				  struct cpuidle_state *state)
+		struct cpuidle_driver *drv, int index)
 {
 	struct acpi_processor *pr;
-	struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
+	struct cpuidle_state_usage *state_usage = &dev->states_usage[index];
+	struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage);
 	ktime_t  kt1, kt2;
 	s64 idle_time_ns;
 	s64 idle_time;
 
 	pr = __this_cpu_read(processors);
+	dev->last_residency = 0;
 
 	if (unlikely(!pr))
-		return 0;
-
-	if (acpi_idle_suspend)
-		return(acpi_idle_enter_c1(dev, state));
+		return -EINVAL;
 
 	local_irq_disable();
 
+	if (acpi_idle_suspend) {
+		local_irq_enable();
+		cpu_relax();
+		return -EINVAL;
+	}
+
+
 	if (cx->entry_method != ACPI_CSTATE_FFH) {
 		current_thread_info()->status &= ~TS_POLLING;
 		/*
@@ -815,7 +828,7 @@
 		if (unlikely(need_resched())) {
 			current_thread_info()->status |= TS_POLLING;
 			local_irq_enable();
-			return 0;
+			return -EINVAL;
 		}
 	}
 
@@ -837,6 +850,9 @@
 	idle_time = idle_time_ns;
 	do_div(idle_time, NSEC_PER_USEC);
 
+	/* Update device last_residency*/
+	dev->last_residency = (int)idle_time;
+
 	/* Tell the scheduler how much we idled: */
 	sched_clock_idle_wakeup_event(idle_time_ns);
 
@@ -848,7 +864,7 @@
 
 	lapic_timer_state_broadcast(pr, cx, 0);
 	cx->time += idle_time;
-	return idle_time;
+	return index;
 }
 
 static int c3_cpu_count;
@@ -857,37 +873,43 @@
 /**
  * acpi_idle_enter_bm - enters C3 with proper BM handling
  * @dev: the target CPU
- * @state: the state data
+ * @drv: cpuidle driver containing state data
+ * @index: the index of suggested state
  *
  * If BM is detected, the deepest non-C3 idle state is entered instead.
  */
 static int acpi_idle_enter_bm(struct cpuidle_device *dev,
-			      struct cpuidle_state *state)
+		struct cpuidle_driver *drv, int index)
 {
 	struct acpi_processor *pr;
-	struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
+	struct cpuidle_state_usage *state_usage = &dev->states_usage[index];
+	struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage);
 	ktime_t  kt1, kt2;
 	s64 idle_time_ns;
 	s64 idle_time;
 
 
 	pr = __this_cpu_read(processors);
+	dev->last_residency = 0;
 
 	if (unlikely(!pr))
-		return 0;
+		return -EINVAL;
 
-	if (acpi_idle_suspend)
-		return(acpi_idle_enter_c1(dev, state));
+
+	if (acpi_idle_suspend) {
+		cpu_relax();
+		return -EINVAL;
+	}
 
 	if (!cx->bm_sts_skip && acpi_idle_bm_check()) {
-		if (dev->safe_state) {
-			dev->last_state = dev->safe_state;
-			return dev->safe_state->enter(dev, dev->safe_state);
+		if (drv->safe_state_index >= 0) {
+			return drv->states[drv->safe_state_index].enter(dev,
+						drv, drv->safe_state_index);
 		} else {
 			local_irq_disable();
 			acpi_safe_halt();
 			local_irq_enable();
-			return 0;
+			return -EINVAL;
 		}
 	}
 
@@ -904,7 +926,7 @@
 		if (unlikely(need_resched())) {
 			current_thread_info()->status |= TS_POLLING;
 			local_irq_enable();
-			return 0;
+			return -EINVAL;
 		}
 	}
 
@@ -954,6 +976,9 @@
 	idle_time = idle_time_ns;
 	do_div(idle_time, NSEC_PER_USEC);
 
+	/* Update device last_residency*/
+	dev->last_residency = (int)idle_time;
+
 	/* Tell the scheduler how much we idled: */
 	sched_clock_idle_wakeup_event(idle_time_ns);
 
@@ -965,7 +990,7 @@
 
 	lapic_timer_state_broadcast(pr, cx, 0);
 	cx->time += idle_time;
-	return idle_time;
+	return index;
 }
 
 struct cpuidle_driver acpi_idle_driver = {
@@ -974,14 +999,16 @@
 };
 
 /**
- * acpi_processor_setup_cpuidle - prepares and configures CPUIDLE
+ * acpi_processor_setup_cpuidle_cx - prepares and configures CPUIDLE
+ * device i.e. per-cpu data
+ *
  * @pr: the ACPI processor
  */
-static int acpi_processor_setup_cpuidle(struct acpi_processor *pr)
+static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr)
 {
 	int i, count = CPUIDLE_DRIVER_STATE_START;
 	struct acpi_processor_cx *cx;
-	struct cpuidle_state *state;
+	struct cpuidle_state_usage *state_usage;
 	struct cpuidle_device *dev = &pr->power.dev;
 
 	if (!pr->flags.power_setup_done)
@@ -992,17 +1019,13 @@
 	}
 
 	dev->cpu = pr->id;
-	for (i = 0; i < CPUIDLE_STATE_MAX; i++) {
-		dev->states[i].name[0] = '\0';
-		dev->states[i].desc[0] = '\0';
-	}
 
 	if (max_cstate == 0)
 		max_cstate = 1;
 
 	for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
 		cx = &pr->power.states[i];
-		state = &dev->states[count];
+		state_usage = &dev->states_usage[count];
 
 		if (!cx->valid)
 			continue;
@@ -1013,8 +1036,64 @@
 		    !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
 			continue;
 #endif
-		cpuidle_set_statedata(state, cx);
 
+		cpuidle_set_statedata(state_usage, cx);
+
+		count++;
+		if (count == CPUIDLE_STATE_MAX)
+			break;
+	}
+
+	dev->state_count = count;
+
+	if (!count)
+		return -EINVAL;
+
+	return 0;
+}
+
+/**
+ * acpi_processor_setup_cpuidle states- prepares and configures cpuidle
+ * global state data i.e. idle routines
+ *
+ * @pr: the ACPI processor
+ */
+static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
+{
+	int i, count = CPUIDLE_DRIVER_STATE_START;
+	struct acpi_processor_cx *cx;
+	struct cpuidle_state *state;
+	struct cpuidle_driver *drv = &acpi_idle_driver;
+
+	if (!pr->flags.power_setup_done)
+		return -EINVAL;
+
+	if (pr->flags.power == 0)
+		return -EINVAL;
+
+	drv->safe_state_index = -1;
+	for (i = 0; i < CPUIDLE_STATE_MAX; i++) {
+		drv->states[i].name[0] = '\0';
+		drv->states[i].desc[0] = '\0';
+	}
+
+	if (max_cstate == 0)
+		max_cstate = 1;
+
+	for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
+		cx = &pr->power.states[i];
+
+		if (!cx->valid)
+			continue;
+
+#ifdef CONFIG_HOTPLUG_CPU
+		if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) &&
+		    !pr->flags.has_cst &&
+		    !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
+			continue;
+#endif
+
+		state = &drv->states[count];
 		snprintf(state->name, CPUIDLE_NAME_LEN, "C%d", i);
 		strncpy(state->desc, cx->desc, CPUIDLE_DESC_LEN);
 		state->exit_latency = cx->latency;
@@ -1027,13 +1106,13 @@
 				state->flags |= CPUIDLE_FLAG_TIME_VALID;
 
 			state->enter = acpi_idle_enter_c1;
-			dev->safe_state = state;
+			drv->safe_state_index = count;
 			break;
 
 			case ACPI_STATE_C2:
 			state->flags |= CPUIDLE_FLAG_TIME_VALID;
 			state->enter = acpi_idle_enter_simple;
-			dev->safe_state = state;
+			drv->safe_state_index = count;
 			break;
 
 			case ACPI_STATE_C3:
@@ -1049,7 +1128,7 @@
 			break;
 	}
 
-	dev->state_count = count;
+	drv->state_count = count;
 
 	if (!count)
 		return -EINVAL;
@@ -1057,7 +1136,7 @@
 	return 0;
 }
 
-int acpi_processor_cst_has_changed(struct acpi_processor *pr)
+int acpi_processor_hotplug(struct acpi_processor *pr)
 {
 	int ret = 0;
 
@@ -1078,7 +1157,7 @@
 	cpuidle_disable_device(&pr->power.dev);
 	acpi_processor_get_power_info(pr);
 	if (pr->flags.power) {
-		acpi_processor_setup_cpuidle(pr);
+		acpi_processor_setup_cpuidle_cx(pr);
 		ret = cpuidle_enable_device(&pr->power.dev);
 	}
 	cpuidle_resume_and_unlock();
@@ -1086,10 +1165,72 @@
 	return ret;
 }
 
+int acpi_processor_cst_has_changed(struct acpi_processor *pr)
+{
+	int cpu;
+	struct acpi_processor *_pr;
+
+	if (disabled_by_idle_boot_param())
+		return 0;
+
+	if (!pr)
+		return -EINVAL;
+
+	if (nocst)
+		return -ENODEV;
+
+	if (!pr->flags.power_setup_done)
+		return -ENODEV;
+
+	/*
+	 * FIXME:  Design the ACPI notification to make it once per
+	 * system instead of once per-cpu.  This condition is a hack
+	 * to make the code that updates C-States be called once.
+	 */
+
+	if (smp_processor_id() == 0 &&
+			cpuidle_get_driver() == &acpi_idle_driver) {
+
+		cpuidle_pause_and_lock();
+		/* Protect against cpu-hotplug */
+		get_online_cpus();
+
+		/* Disable all cpuidle devices */
+		for_each_online_cpu(cpu) {
+			_pr = per_cpu(processors, cpu);
+			if (!_pr || !_pr->flags.power_setup_done)
+				continue;
+			cpuidle_disable_device(&_pr->power.dev);
+		}
+
+		/* Populate Updated C-state information */
+		acpi_processor_setup_cpuidle_states(pr);
+
+		/* Enable all cpuidle devices */
+		for_each_online_cpu(cpu) {
+			_pr = per_cpu(processors, cpu);
+			if (!_pr || !_pr->flags.power_setup_done)
+				continue;
+			acpi_processor_get_power_info(_pr);
+			if (_pr->flags.power) {
+				acpi_processor_setup_cpuidle_cx(_pr);
+				cpuidle_enable_device(&_pr->power.dev);
+			}
+		}
+		put_online_cpus();
+		cpuidle_resume_and_unlock();
+	}
+
+	return 0;
+}
+
+static int acpi_processor_registered;
+
 int __cpuinit acpi_processor_power_init(struct acpi_processor *pr,
 			      struct acpi_device *device)
 {
 	acpi_status status = 0;
+	int retval;
 	static int first_run;
 
 	if (disabled_by_idle_boot_param())
@@ -1126,9 +1267,26 @@
 	 * platforms that only support C1.
 	 */
 	if (pr->flags.power) {
-		acpi_processor_setup_cpuidle(pr);
-		if (cpuidle_register_device(&pr->power.dev))
-			return -EIO;
+		/* Register acpi_idle_driver if not already registered */
+		if (!acpi_processor_registered) {
+			acpi_processor_setup_cpuidle_states(pr);
+			retval = cpuidle_register_driver(&acpi_idle_driver);
+			if (retval)
+				return retval;
+			printk(KERN_DEBUG "ACPI: %s registered with cpuidle\n",
+					acpi_idle_driver.name);
+		}
+		/* Register per-cpu cpuidle_device. Cpuidle driver
+		 * must already be registered before registering device
+		 */
+		acpi_processor_setup_cpuidle_cx(pr);
+		retval = cpuidle_register_device(&pr->power.dev);
+		if (retval) {
+			if (acpi_processor_registered == 0)
+				cpuidle_unregister_driver(&acpi_idle_driver);
+			return retval;
+		}
+		acpi_processor_registered++;
 	}
 	return 0;
 }
@@ -1139,8 +1297,13 @@
 	if (disabled_by_idle_boot_param())
 		return 0;
 
-	cpuidle_unregister_device(&pr->power.dev);
-	pr->flags.power_setup_done = 0;
+	if (pr->flags.power) {
+		cpuidle_unregister_device(&pr->power.dev);
+		acpi_processor_registered--;
+		if (acpi_processor_registered == 0)
+			cpuidle_unregister_driver(&acpi_idle_driver);
+	}
 
+	pr->flags.power_setup_done = 0;
 	return 0;
 }
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 449c556..8ab80ba 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -1062,13 +1062,12 @@
 	if (!id)
 		return;
 
-	id->id = kmalloc(strlen(dev_id) + 1, GFP_KERNEL);
+	id->id = kstrdup(dev_id, GFP_KERNEL);
 	if (!id->id) {
 		kfree(id);
 		return;
 	}
 
-	strcpy(id->id, dev_id);
 	list_add_tail(&id->list, &device->pnp.ids);
 }
 
diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
index c538d0e..9f66181 100644
--- a/drivers/acpi/sysfs.c
+++ b/drivers/acpi/sysfs.c
@@ -706,11 +706,23 @@
 	return;
 }
 
+static ssize_t
+acpi_show_profile(struct device *dev, struct device_attribute *attr,
+		  char *buf)
+{
+	return sprintf(buf, "%d\n", acpi_gbl_FADT.preferred_profile);
+}
+
+static const struct device_attribute pm_profile_attr =
+	__ATTR(pm_profile, S_IRUGO, acpi_show_profile, NULL);
+
 int __init acpi_sysfs_init(void)
 {
 	int result;
 
 	result = acpi_tables_sysfs_init();
-
+	if (result)
+		return result;
+	result = sysfs_create_file(acpi_kobj, &pm_profile_attr.attr);
 	return result;
 }
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
index db7cb81..106beb1 100644
--- a/drivers/bluetooth/ath3k.c
+++ b/drivers/bluetooth/ath3k.c
@@ -105,7 +105,7 @@
 
 	pipe = usb_sndctrlpipe(udev, 0);
 
-	send_buf = kmalloc(BULK_SIZE, GFP_ATOMIC);
+	send_buf = kmalloc(BULK_SIZE, GFP_KERNEL);
 	if (!send_buf) {
 		BT_ERR("Can't allocate memory chunk for firmware");
 		return -ENOMEM;
@@ -176,7 +176,7 @@
 
 	count = firmware->size;
 
-	send_buf = kmalloc(BULK_SIZE, GFP_ATOMIC);
+	send_buf = kmalloc(BULK_SIZE, GFP_KERNEL);
 	if (!send_buf) {
 		BT_ERR("Can't allocate memory chunk for firmware");
 		return -ENOMEM;
diff --git a/drivers/bluetooth/bcm203x.c b/drivers/bluetooth/bcm203x.c
index 8b1b643..54952ab 100644
--- a/drivers/bluetooth/bcm203x.c
+++ b/drivers/bluetooth/bcm203x.c
@@ -24,6 +24,7 @@
 
 #include <linux/module.h>
 
+#include <linux/atomic.h>
 #include <linux/kernel.h>
 #include <linux/init.h>
 #include <linux/slab.h>
@@ -65,6 +66,7 @@
 	unsigned long		state;
 
 	struct work_struct	work;
+	atomic_t		shutdown;
 
 	struct urb		*urb;
 	unsigned char		*buffer;
@@ -97,6 +99,7 @@
 
 		data->state = BCM203X_SELECT_MEMORY;
 
+		/* use workqueue to have a small delay */
 		schedule_work(&data->work);
 		break;
 
@@ -155,7 +158,10 @@
 	struct bcm203x_data *data =
 		container_of(work, struct bcm203x_data, work);
 
-	if (usb_submit_urb(data->urb, GFP_ATOMIC) < 0)
+	if (atomic_read(&data->shutdown))
+		return;
+
+	if (usb_submit_urb(data->urb, GFP_KERNEL) < 0)
 		BT_ERR("Can't submit URB");
 }
 
@@ -243,6 +249,7 @@
 
 	usb_set_intfdata(intf, data);
 
+	/* use workqueue to have a small delay */
 	schedule_work(&data->work);
 
 	return 0;
@@ -254,6 +261,9 @@
 
 	BT_DBG("intf %p", intf);
 
+	atomic_inc(&data->shutdown);
+	cancel_work_sync(&data->work);
+
 	usb_kill_urb(data->urb);
 
 	usb_set_intfdata(intf, NULL);
diff --git a/drivers/bluetooth/bfusb.c b/drivers/bluetooth/bfusb.c
index 005919a..61b5914 100644
--- a/drivers/bluetooth/bfusb.c
+++ b/drivers/bluetooth/bfusb.c
@@ -568,22 +568,23 @@
 
 	BT_INFO("BlueFRITZ! USB loading firmware");
 
+	buf = kmalloc(BFUSB_MAX_BLOCK_SIZE + 3, GFP_KERNEL);
+	if (!buf) {
+		BT_ERR("Can't allocate memory chunk for firmware");
+		return -ENOMEM;
+	}
+
 	pipe = usb_sndctrlpipe(data->udev, 0);
 
 	if (usb_control_msg(data->udev, pipe, USB_REQ_SET_CONFIGURATION,
 				0, 1, 0, NULL, 0, USB_CTRL_SET_TIMEOUT) < 0) {
 		BT_ERR("Can't change to loading configuration");
+		kfree(buf);
 		return -EBUSY;
 	}
 
 	data->udev->toggle[0] = data->udev->toggle[1] = 0;
 
-	buf = kmalloc(BFUSB_MAX_BLOCK_SIZE + 3, GFP_ATOMIC);
-	if (!buf) {
-		BT_ERR("Can't allocate memory chunk for firmware");
-		return -ENOMEM;
-	}
-
 	pipe = usb_sndbulkpipe(data->udev, data->bulk_out_ep);
 
 	while (count) {
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index becd6d9..06ce268 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -62,8 +62,9 @@
 int cpuidle_idle_call(void)
 {
 	struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
+	struct cpuidle_driver *drv = cpuidle_get_driver();
 	struct cpuidle_state *target_state;
-	int next_state;
+	int next_state, entered_state;
 
 	if (off)
 		return -ENODEV;
@@ -84,45 +85,36 @@
 	hrtimer_peek_ahead_timers();
 #endif
 
-	/*
-	 * Call the device's prepare function before calling the
-	 * governor's select function.  ->prepare gives the device's
-	 * cpuidle driver a chance to update any dynamic information
-	 * of its cpuidle states for the current idle period, e.g.
-	 * state availability, latencies, residencies, etc.
-	 */
-	if (dev->prepare)
-		dev->prepare(dev);
-
 	/* ask the governor for the next state */
-	next_state = cpuidle_curr_governor->select(dev);
+	next_state = cpuidle_curr_governor->select(drv, dev);
 	if (need_resched()) {
 		local_irq_enable();
 		return 0;
 	}
 
-	target_state = &dev->states[next_state];
-
-	/* enter the state and update stats */
-	dev->last_state = target_state;
+	target_state = &drv->states[next_state];
 
 	trace_power_start(POWER_CSTATE, next_state, dev->cpu);
 	trace_cpu_idle(next_state, dev->cpu);
 
-	dev->last_residency = target_state->enter(dev, target_state);
+	entered_state = target_state->enter(dev, drv, next_state);
 
 	trace_power_end(dev->cpu);
 	trace_cpu_idle(PWR_EVENT_EXIT, dev->cpu);
 
-	if (dev->last_state)
-		target_state = dev->last_state;
-
-	target_state->time += (unsigned long long)dev->last_residency;
-	target_state->usage++;
+	if (entered_state >= 0) {
+		/* Update cpuidle counters */
+		/* This can be moved to within driver enter routine
+		 * but that results in multiple copies of same code.
+		 */
+		dev->states_usage[entered_state].time +=
+				(unsigned long long)dev->last_residency;
+		dev->states_usage[entered_state].usage++;
+	}
 
 	/* give the governor an opportunity to reflect on the outcome */
 	if (cpuidle_curr_governor->reflect)
-		cpuidle_curr_governor->reflect(dev);
+		cpuidle_curr_governor->reflect(dev, entered_state);
 
 	return 0;
 }
@@ -173,11 +165,11 @@
 EXPORT_SYMBOL_GPL(cpuidle_resume_and_unlock);
 
 #ifdef CONFIG_ARCH_HAS_CPU_RELAX
-static int poll_idle(struct cpuidle_device *dev, struct cpuidle_state *st)
+static int poll_idle(struct cpuidle_device *dev,
+		struct cpuidle_driver *drv, int index)
 {
 	ktime_t	t1, t2;
 	s64 diff;
-	int ret;
 
 	t1 = ktime_get();
 	local_irq_enable();
@@ -189,15 +181,14 @@
 	if (diff > INT_MAX)
 		diff = INT_MAX;
 
-	ret = (int) diff;
-	return ret;
+	dev->last_residency = (int) diff;
+
+	return index;
 }
 
-static void poll_idle_init(struct cpuidle_device *dev)
+static void poll_idle_init(struct cpuidle_driver *drv)
 {
-	struct cpuidle_state *state = &dev->states[0];
-
-	cpuidle_set_statedata(state, NULL);
+	struct cpuidle_state *state = &drv->states[0];
 
 	snprintf(state->name, CPUIDLE_NAME_LEN, "POLL");
 	snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE");
@@ -208,7 +199,7 @@
 	state->enter = poll_idle;
 }
 #else
-static void poll_idle_init(struct cpuidle_device *dev) {}
+static void poll_idle_init(struct cpuidle_driver *drv) {}
 #endif /* CONFIG_ARCH_HAS_CPU_RELAX */
 
 /**
@@ -235,21 +226,20 @@
 			return ret;
 	}
 
-	poll_idle_init(dev);
+	poll_idle_init(cpuidle_get_driver());
 
 	if ((ret = cpuidle_add_state_sysfs(dev)))
 		return ret;
 
 	if (cpuidle_curr_governor->enable &&
-	    (ret = cpuidle_curr_governor->enable(dev)))
+	    (ret = cpuidle_curr_governor->enable(cpuidle_get_driver(), dev)))
 		goto fail_sysfs;
 
 	for (i = 0; i < dev->state_count; i++) {
-		dev->states[i].usage = 0;
-		dev->states[i].time = 0;
+		dev->states_usage[i].usage = 0;
+		dev->states_usage[i].time = 0;
 	}
 	dev->last_residency = 0;
-	dev->last_state = NULL;
 
 	smp_wmb();
 
@@ -283,7 +273,7 @@
 	dev->enabled = 0;
 
 	if (cpuidle_curr_governor->disable)
-		cpuidle_curr_governor->disable(dev);
+		cpuidle_curr_governor->disable(cpuidle_get_driver(), dev);
 
 	cpuidle_remove_state_sysfs(dev);
 	enabled_devices--;
@@ -311,26 +301,6 @@
 
 	init_completion(&dev->kobj_unregister);
 
-	/*
-	 * cpuidle driver should set the dev->power_specified bit
-	 * before registering the device if the driver provides
-	 * power_usage numbers.
-	 *
-	 * For those devices whose ->power_specified is not set,
-	 * we fill in power_usage with decreasing values as the
-	 * cpuidle code has an implicit assumption that state Cn
-	 * uses less power than C(n-1).
-	 *
-	 * With CONFIG_ARCH_HAS_CPU_RELAX, C0 is already assigned
-	 * an power value of -1.  So we use -2, -3, etc, for other
-	 * c-states.
-	 */
-	if (!dev->power_specified) {
-		int i;
-		for (i = CPUIDLE_DRIVER_STATE_START; i < dev->state_count; i++)
-			dev->states[i].power_usage = -1 - i;
-	}
-
 	per_cpu(cpuidle_devices, dev->cpu) = dev;
 	list_add(&dev->device_list, &cpuidle_detected_devices);
 	if ((ret = cpuidle_add_sysfs(sys_dev))) {
diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c
index 3f7e3ce..284d7af 100644
--- a/drivers/cpuidle/driver.c
+++ b/drivers/cpuidle/driver.c
@@ -17,6 +17,30 @@
 static struct cpuidle_driver *cpuidle_curr_driver;
 DEFINE_SPINLOCK(cpuidle_driver_lock);
 
+static void __cpuidle_register_driver(struct cpuidle_driver *drv)
+{
+	int i;
+	/*
+	 * cpuidle driver should set the drv->power_specified bit
+	 * before registering if the driver provides
+	 * power_usage numbers.
+	 *
+	 * If power_specified is not set,
+	 * we fill in power_usage with decreasing values as the
+	 * cpuidle code has an implicit assumption that state Cn
+	 * uses less power than C(n-1).
+	 *
+	 * With CONFIG_ARCH_HAS_CPU_RELAX, C0 is already assigned
+	 * an power value of -1.  So we use -2, -3, etc, for other
+	 * c-states.
+	 */
+	if (!drv->power_specified) {
+		for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++)
+			drv->states[i].power_usage = -1 - i;
+	}
+}
+
+
 /**
  * cpuidle_register_driver - registers a driver
  * @drv: the driver
@@ -34,6 +58,7 @@
 		spin_unlock(&cpuidle_driver_lock);
 		return -EBUSY;
 	}
+	__cpuidle_register_driver(drv);
 	cpuidle_curr_driver = drv;
 	spin_unlock(&cpuidle_driver_lock);
 
diff --git a/drivers/cpuidle/governors/ladder.c b/drivers/cpuidle/governors/ladder.c
index 3b8fce2..b6a09ea 100644
--- a/drivers/cpuidle/governors/ladder.c
+++ b/drivers/cpuidle/governors/ladder.c
@@ -60,9 +60,11 @@
 
 /**
  * ladder_select_state - selects the next state to enter
+ * @drv: cpuidle driver
  * @dev: the CPU
  */
-static int ladder_select_state(struct cpuidle_device *dev)
+static int ladder_select_state(struct cpuidle_driver *drv,
+				struct cpuidle_device *dev)
 {
 	struct ladder_device *ldev = &__get_cpu_var(ladder_devices);
 	struct ladder_device_state *last_state;
@@ -77,15 +79,17 @@
 
 	last_state = &ldev->states[last_idx];
 
-	if (dev->states[last_idx].flags & CPUIDLE_FLAG_TIME_VALID)
-		last_residency = cpuidle_get_last_residency(dev) - dev->states[last_idx].exit_latency;
+	if (drv->states[last_idx].flags & CPUIDLE_FLAG_TIME_VALID) {
+		last_residency = cpuidle_get_last_residency(dev) - \
+					 drv->states[last_idx].exit_latency;
+	}
 	else
 		last_residency = last_state->threshold.promotion_time + 1;
 
 	/* consider promotion */
-	if (last_idx < dev->state_count - 1 &&
+	if (last_idx < drv->state_count - 1 &&
 	    last_residency > last_state->threshold.promotion_time &&
-	    dev->states[last_idx + 1].exit_latency <= latency_req) {
+	    drv->states[last_idx + 1].exit_latency <= latency_req) {
 		last_state->stats.promotion_count++;
 		last_state->stats.demotion_count = 0;
 		if (last_state->stats.promotion_count >= last_state->threshold.promotion_count) {
@@ -96,11 +100,11 @@
 
 	/* consider demotion */
 	if (last_idx > CPUIDLE_DRIVER_STATE_START &&
-	    dev->states[last_idx].exit_latency > latency_req) {
+	    drv->states[last_idx].exit_latency > latency_req) {
 		int i;
 
 		for (i = last_idx - 1; i > CPUIDLE_DRIVER_STATE_START; i--) {
-			if (dev->states[i].exit_latency <= latency_req)
+			if (drv->states[i].exit_latency <= latency_req)
 				break;
 		}
 		ladder_do_selection(ldev, last_idx, i);
@@ -123,9 +127,11 @@
 
 /**
  * ladder_enable_device - setup for the governor
+ * @drv: cpuidle driver
  * @dev: the CPU
  */
-static int ladder_enable_device(struct cpuidle_device *dev)
+static int ladder_enable_device(struct cpuidle_driver *drv,
+				struct cpuidle_device *dev)
 {
 	int i;
 	struct ladder_device *ldev = &per_cpu(ladder_devices, dev->cpu);
@@ -134,8 +140,8 @@
 
 	ldev->last_state_idx = CPUIDLE_DRIVER_STATE_START;
 
-	for (i = 0; i < dev->state_count; i++) {
-		state = &dev->states[i];
+	for (i = 0; i < drv->state_count; i++) {
+		state = &drv->states[i];
 		lstate = &ldev->states[i];
 
 		lstate->stats.promotion_count = 0;
@@ -144,7 +150,7 @@
 		lstate->threshold.promotion_count = PROMOTION_COUNT;
 		lstate->threshold.demotion_count = DEMOTION_COUNT;
 
-		if (i < dev->state_count - 1)
+		if (i < drv->state_count - 1)
 			lstate->threshold.promotion_time = state->exit_latency;
 		if (i > 0)
 			lstate->threshold.demotion_time = state->exit_latency;
@@ -153,11 +159,24 @@
 	return 0;
 }
 
+/**
+ * ladder_reflect - update the correct last_state_idx
+ * @dev: the CPU
+ * @index: the index of actual state entered
+ */
+static void ladder_reflect(struct cpuidle_device *dev, int index)
+{
+	struct ladder_device *ldev = &__get_cpu_var(ladder_devices);
+	if (index > 0)
+		ldev->last_state_idx = index;
+}
+
 static struct cpuidle_governor ladder_governor = {
 	.name =		"ladder",
 	.rating =	10,
 	.enable =	ladder_enable_device,
 	.select =	ladder_select_state,
+	.reflect =	ladder_reflect,
 	.owner =	THIS_MODULE,
 };
 
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index 0027524..ad09526 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -183,7 +183,7 @@
 
 static DEFINE_PER_CPU(struct menu_device, menu_devices);
 
-static void menu_update(struct cpuidle_device *dev);
+static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev);
 
 /* This implements DIV_ROUND_CLOSEST but avoids 64 bit division */
 static u64 div_round64(u64 dividend, u32 divisor)
@@ -229,9 +229,10 @@
 
 /**
  * menu_select - selects the next idle state to enter
+ * @drv: cpuidle driver containing state data
  * @dev: the CPU
  */
-static int menu_select(struct cpuidle_device *dev)
+static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
 {
 	struct menu_device *data = &__get_cpu_var(menu_devices);
 	int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY);
@@ -241,7 +242,7 @@
 	struct timespec t;
 
 	if (data->needs_update) {
-		menu_update(dev);
+		menu_update(drv, dev);
 		data->needs_update = 0;
 	}
 
@@ -286,11 +287,9 @@
 	 * Find the idle state with the lowest power while satisfying
 	 * our constraints.
 	 */
-	for (i = CPUIDLE_DRIVER_STATE_START; i < dev->state_count; i++) {
-		struct cpuidle_state *s = &dev->states[i];
+	for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++) {
+		struct cpuidle_state *s = &drv->states[i];
 
-		if (s->flags & CPUIDLE_FLAG_IGNORE)
-			continue;
 		if (s->target_residency > data->predicted_us)
 			continue;
 		if (s->exit_latency > latency_req)
@@ -311,26 +310,30 @@
 /**
  * menu_reflect - records that data structures need update
  * @dev: the CPU
+ * @index: the index of actual entered state
  *
  * NOTE: it's important to be fast here because this operation will add to
  *       the overall exit latency.
  */
-static void menu_reflect(struct cpuidle_device *dev)
+static void menu_reflect(struct cpuidle_device *dev, int index)
 {
 	struct menu_device *data = &__get_cpu_var(menu_devices);
-	data->needs_update = 1;
+	data->last_state_idx = index;
+	if (index >= 0)
+		data->needs_update = 1;
 }
 
 /**
  * menu_update - attempts to guess what happened after entry
+ * @drv: cpuidle driver containing state data
  * @dev: the CPU
  */
-static void menu_update(struct cpuidle_device *dev)
+static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
 {
 	struct menu_device *data = &__get_cpu_var(menu_devices);
 	int last_idx = data->last_state_idx;
 	unsigned int last_idle_us = cpuidle_get_last_residency(dev);
-	struct cpuidle_state *target = &dev->states[last_idx];
+	struct cpuidle_state *target = &drv->states[last_idx];
 	unsigned int measured_us;
 	u64 new_factor;
 
@@ -384,9 +387,11 @@
 
 /**
  * menu_enable_device - scans a CPU's states and does setup
+ * @drv: cpuidle driver
  * @dev: the CPU
  */
-static int menu_enable_device(struct cpuidle_device *dev)
+static int menu_enable_device(struct cpuidle_driver *drv,
+				struct cpuidle_device *dev)
 {
 	struct menu_device *data = &per_cpu(menu_devices, dev->cpu);
 
diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
index be7917ec..1e756e1 100644
--- a/drivers/cpuidle/sysfs.c
+++ b/drivers/cpuidle/sysfs.c
@@ -216,7 +216,8 @@
 
 struct cpuidle_state_attr {
 	struct attribute attr;
-	ssize_t (*show)(struct cpuidle_state *, char *);
+	ssize_t (*show)(struct cpuidle_state *, \
+					struct cpuidle_state_usage *, char *);
 	ssize_t (*store)(struct cpuidle_state *, const char *, size_t);
 };
 
@@ -224,19 +225,22 @@
 static struct cpuidle_state_attr attr_##_name = __ATTR(_name, 0444, show, NULL)
 
 #define define_show_state_function(_name) \
-static ssize_t show_state_##_name(struct cpuidle_state *state, char *buf) \
+static ssize_t show_state_##_name(struct cpuidle_state *state, \
+			 struct cpuidle_state_usage *state_usage, char *buf) \
 { \
 	return sprintf(buf, "%u\n", state->_name);\
 }
 
 #define define_show_state_ull_function(_name) \
-static ssize_t show_state_##_name(struct cpuidle_state *state, char *buf) \
+static ssize_t show_state_##_name(struct cpuidle_state *state, \
+			struct cpuidle_state_usage *state_usage, char *buf) \
 { \
-	return sprintf(buf, "%llu\n", state->_name);\
+	return sprintf(buf, "%llu\n", state_usage->_name);\
 }
 
 #define define_show_state_str_function(_name) \
-static ssize_t show_state_##_name(struct cpuidle_state *state, char *buf) \
+static ssize_t show_state_##_name(struct cpuidle_state *state, \
+			struct cpuidle_state_usage *state_usage, char *buf) \
 { \
 	if (state->_name[0] == '\0')\
 		return sprintf(buf, "<null>\n");\
@@ -269,16 +273,18 @@
 
 #define kobj_to_state_obj(k) container_of(k, struct cpuidle_state_kobj, kobj)
 #define kobj_to_state(k) (kobj_to_state_obj(k)->state)
+#define kobj_to_state_usage(k) (kobj_to_state_obj(k)->state_usage)
 #define attr_to_stateattr(a) container_of(a, struct cpuidle_state_attr, attr)
 static ssize_t cpuidle_state_show(struct kobject * kobj,
 	struct attribute * attr ,char * buf)
 {
 	int ret = -EIO;
 	struct cpuidle_state *state = kobj_to_state(kobj);
+	struct cpuidle_state_usage *state_usage = kobj_to_state_usage(kobj);
 	struct cpuidle_state_attr * cattr = attr_to_stateattr(attr);
 
 	if (cattr->show)
-		ret = cattr->show(state, buf);
+		ret = cattr->show(state, state_usage, buf);
 
 	return ret;
 }
@@ -316,13 +322,15 @@
 {
 	int i, ret = -ENOMEM;
 	struct cpuidle_state_kobj *kobj;
+	struct cpuidle_driver *drv = cpuidle_get_driver();
 
 	/* state statistics */
 	for (i = 0; i < device->state_count; i++) {
 		kobj = kzalloc(sizeof(struct cpuidle_state_kobj), GFP_KERNEL);
 		if (!kobj)
 			goto error_state;
-		kobj->state = &device->states[i];
+		kobj->state = &drv->states[i];
+		kobj->state_usage = &device->states_usage[i];
 		init_completion(&kobj->kobj_unregister);
 
 		ret = kobject_init_and_add(&kobj->kobj, &ktype_state_cpuidle, &device->kobj,
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 711d965..9a2e2a1 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -163,6 +163,7 @@
 	{ DRM_MODE_CONNECTOR_HDMIB, "HDMI-B", 0 },
 	{ DRM_MODE_CONNECTOR_TV, "TV", 0 },
 	{ DRM_MODE_CONNECTOR_eDP, "eDP", 0 },
+	{ DRM_MODE_CONNECTOR_VIRTUAL, "Virtual", 0},
 };
 
 static struct drm_prop_enum_list drm_encoder_enum_list[] =
@@ -171,6 +172,7 @@
 	{ DRM_MODE_ENCODER_TMDS, "TMDS" },
 	{ DRM_MODE_ENCODER_LVDS, "LVDS" },
 	{ DRM_MODE_ENCODER_TVDAC, "TV" },
+	{ DRM_MODE_ENCODER_VIRTUAL, "Virtual" },
 };
 
 char *drm_get_encoder_name(struct drm_encoder *encoder)
@@ -464,8 +466,10 @@
 	list_add_tail(&connector->head, &dev->mode_config.connector_list);
 	dev->mode_config.num_connector++;
 
-	drm_connector_attach_property(connector,
-				      dev->mode_config.edid_property, 0);
+	if (connector_type != DRM_MODE_CONNECTOR_VIRTUAL)
+		drm_connector_attach_property(connector,
+					      dev->mode_config.edid_property,
+					      0);
 
 	drm_connector_attach_property(connector,
 				      dev->mode_config.dpms_property, 0);
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
index 9f363e0..cf8b4bc 100644
--- a/drivers/gpu/drm/radeon/Makefile
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -70,7 +70,7 @@
 	r200.o radeon_legacy_tv.o r600_cs.o r600_blit.o r600_blit_shaders.o \
 	r600_blit_kms.o radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o \
 	evergreen.o evergreen_cs.o evergreen_blit_shaders.o evergreen_blit_kms.o \
-	radeon_trace_points.o ni.o cayman_blit_shaders.o
+	radeon_trace_points.o ni.o cayman_blit_shaders.o atombios_encoders.o
 
 radeon-$(CONFIG_COMPAT) += radeon_ioc32.o
 radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index a515b2a..87921c8 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -558,7 +558,7 @@
 				bpc = connector->display_info.bpc;
 			encoder_mode = atombios_get_encoder_mode(encoder);
 			if ((radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) ||
-			    radeon_encoder_is_dp_bridge(encoder)) {
+			    (radeon_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE)) {
 				if (connector) {
 					struct radeon_connector *radeon_connector = to_radeon_connector(connector);
 					struct radeon_connector_atom_dig *dig_connector =
@@ -638,44 +638,29 @@
 				if (ss_enabled && ss->percentage)
 					args.v3.sInput.ucDispPllConfig |=
 						DISPPLL_CONFIG_SS_ENABLE;
-				if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT) ||
-				    radeon_encoder_is_dp_bridge(encoder)) {
+				if (ENCODER_MODE_IS_DP(encoder_mode)) {
+					args.v3.sInput.ucDispPllConfig |=
+						DISPPLL_CONFIG_COHERENT_MODE;
+					/* 16200 or 27000 */
+					args.v3.sInput.usPixelClock = cpu_to_le16(dp_clock / 10);
+				} else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
 					struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
-					if (encoder_mode == ATOM_ENCODER_MODE_DP) {
+					if (encoder_mode == ATOM_ENCODER_MODE_HDMI)
+						/* deep color support */
+						args.v3.sInput.usPixelClock =
+							cpu_to_le16((mode->clock * bpc / 8) / 10);
+					if (dig->coherent_mode)
 						args.v3.sInput.ucDispPllConfig |=
 							DISPPLL_CONFIG_COHERENT_MODE;
-						/* 16200 or 27000 */
-						args.v3.sInput.usPixelClock = cpu_to_le16(dp_clock / 10);
-					} else {
-						if (encoder_mode == ATOM_ENCODER_MODE_HDMI) {
-							/* deep color support */
-							args.v3.sInput.usPixelClock =
-								cpu_to_le16((mode->clock * bpc / 8) / 10);
-						}
-						if (dig->coherent_mode)
-							args.v3.sInput.ucDispPllConfig |=
-								DISPPLL_CONFIG_COHERENT_MODE;
-						if (mode->clock > 165000)
-							args.v3.sInput.ucDispPllConfig |=
-								DISPPLL_CONFIG_DUAL_LINK;
-					}
-				} else if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
-					if (encoder_mode == ATOM_ENCODER_MODE_DP) {
+					if (mode->clock > 165000)
 						args.v3.sInput.ucDispPllConfig |=
-							DISPPLL_CONFIG_COHERENT_MODE;
-						/* 16200 or 27000 */
-						args.v3.sInput.usPixelClock = cpu_to_le16(dp_clock / 10);
-					} else if (encoder_mode != ATOM_ENCODER_MODE_LVDS) {
-						if (mode->clock > 165000)
-							args.v3.sInput.ucDispPllConfig |=
-								DISPPLL_CONFIG_DUAL_LINK;
-					}
+							DISPPLL_CONFIG_DUAL_LINK;
 				}
-				if (radeon_encoder_is_dp_bridge(encoder)) {
-					struct drm_encoder *ext_encoder = radeon_atom_get_external_encoder(encoder);
-					struct radeon_encoder *ext_radeon_encoder = to_radeon_encoder(ext_encoder);
-					args.v3.sInput.ucExtTransmitterID = ext_radeon_encoder->encoder_id;
-				} else
+				if (radeon_encoder_get_dp_bridge_encoder_id(encoder) !=
+				    ENCODER_OBJECT_ID_NONE)
+					args.v3.sInput.ucExtTransmitterID =
+						radeon_encoder_get_dp_bridge_encoder_id(encoder);
+				else
 					args.v3.sInput.ucExtTransmitterID = 0;
 
 				atom_execute_table(rdev->mode_info.atom_context,
@@ -945,6 +930,7 @@
 		bpc = connector->display_info.bpc;
 
 		switch (encoder_mode) {
+		case ATOM_ENCODER_MODE_DP_MST:
 		case ATOM_ENCODER_MODE_DP:
 			/* DP/eDP */
 			dp_clock = dig_connector->dp_clock / 10;
@@ -1450,7 +1436,7 @@
 				 * PPLL/DCPLL programming and only program the DP DTO for the
 				 * crtc virtual pixel clock.
 				 */
-				if (atombios_get_encoder_mode(test_encoder) == ATOM_ENCODER_MODE_DP) {
+				if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(test_encoder))) {
 					if (ASIC_IS_DCE5(rdev) || rdev->clock.dp_extclk)
 						return ATOM_PPLL_INVALID;
 				}
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 79e8ebc..a0de485 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -482,7 +482,8 @@
 	int bpp = convert_bpc_to_bpp(connector->display_info.bpc);
 	int lane_num, max_pix_clock;
 
-	if (radeon_connector_encoder_is_dp_bridge(connector))
+	if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) ==
+	    ENCODER_OBJECT_ID_NUTMEG)
 		return 270000;
 
 	lane_num = radeon_dp_get_dp_lane_number(connector, dpcd, pix_clock);
@@ -553,17 +554,32 @@
 {
 	struct drm_device *dev = encoder->dev;
 	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
 	int panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE;
 
 	if (!ASIC_IS_DCE4(rdev))
 		return;
 
-	if (radeon_connector_encoder_is_dp_bridge(connector))
+	if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) ==
+	    ENCODER_OBJECT_ID_NUTMEG)
 		panel_mode = DP_PANEL_MODE_INTERNAL_DP1_MODE;
+	else if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) ==
+		 ENCODER_OBJECT_ID_TRAVIS)
+		panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE;
+	else if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
+		u8 tmp = radeon_read_dpcd_reg(radeon_connector, DP_EDP_CONFIGURATION_CAP);
+		if (tmp & 1)
+			panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE;
+	}
 
 	atombios_dig_encoder_setup(encoder,
 				   ATOM_ENCODER_CMD_SETUP_PANEL_MODE,
 				   panel_mode);
+
+	if ((connector->connector_type == DRM_MODE_CONNECTOR_eDP) &&
+	    (panel_mode == DP_PANEL_MODE_INTERNAL_DP2_MODE)) {
+		radeon_write_dpcd_reg(radeon_connector, DP_EDP_CONFIGURATION_SET, 1);
+	}
 }
 
 void radeon_dp_set_link_config(struct drm_connector *connector,
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
new file mode 100644
index 0000000..39c04c1
--- /dev/null
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -0,0 +1,2369 @@
+/*
+ * Copyright 2007-11 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ */
+#include "drmP.h"
+#include "drm_crtc_helper.h"
+#include "radeon_drm.h"
+#include "radeon.h"
+#include "atom.h"
+
+extern int atom_debug;
+
+/* evil but including atombios.h is much worse */
+bool radeon_atom_get_tv_timings(struct radeon_device *rdev, int index,
+				struct drm_display_mode *mode);
+
+
+static inline bool radeon_encoder_is_digital(struct drm_encoder *encoder)
+{
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	switch (radeon_encoder->encoder_id) {
+	case ENCODER_OBJECT_ID_INTERNAL_LVDS:
+	case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+	case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+	case ENCODER_OBJECT_ID_INTERNAL_DVO1:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+	case ENCODER_OBJECT_ID_INTERNAL_DDI:
+	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+		return true;
+	default:
+		return false;
+	}
+}
+
+static struct drm_connector *
+radeon_get_connector_for_encoder_init(struct drm_encoder *encoder)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct drm_connector *connector;
+	struct radeon_connector *radeon_connector;
+
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		radeon_connector = to_radeon_connector(connector);
+		if (radeon_encoder->devices & radeon_connector->devices)
+			return connector;
+	}
+	return NULL;
+}
+
+static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
+				   struct drm_display_mode *mode,
+				   struct drm_display_mode *adjusted_mode)
+{
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+
+	/* set the active encoder to connector routing */
+	radeon_encoder_set_active_device(encoder);
+	drm_mode_set_crtcinfo(adjusted_mode, 0);
+
+	/* hw bug */
+	if ((mode->flags & DRM_MODE_FLAG_INTERLACE)
+	    && (mode->crtc_vsync_start < (mode->crtc_vdisplay + 2)))
+		adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + 2;
+
+	/* get the native mode for LVDS */
+	if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT))
+		radeon_panel_mode_fixup(encoder, adjusted_mode);
+
+	/* get the native mode for TV */
+	if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) {
+		struct radeon_encoder_atom_dac *tv_dac = radeon_encoder->enc_priv;
+		if (tv_dac) {
+			if (tv_dac->tv_std == TV_STD_NTSC ||
+			    tv_dac->tv_std == TV_STD_NTSC_J ||
+			    tv_dac->tv_std == TV_STD_PAL_M)
+				radeon_atom_get_tv_timings(rdev, 0, adjusted_mode);
+			else
+				radeon_atom_get_tv_timings(rdev, 1, adjusted_mode);
+		}
+	}
+
+	if (ASIC_IS_DCE3(rdev) &&
+	    ((radeon_encoder->active_device & (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) ||
+	     (radeon_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE))) {
+		struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+		radeon_dp_set_link_config(connector, mode);
+	}
+
+	return true;
+}
+
+static void
+atombios_dac_setup(struct drm_encoder *encoder, int action)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	DAC_ENCODER_CONTROL_PS_ALLOCATION args;
+	int index = 0;
+	struct radeon_encoder_atom_dac *dac_info = radeon_encoder->enc_priv;
+
+	memset(&args, 0, sizeof(args));
+
+	switch (radeon_encoder->encoder_id) {
+	case ENCODER_OBJECT_ID_INTERNAL_DAC1:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
+		index = GetIndexIntoMasterTable(COMMAND, DAC1EncoderControl);
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_DAC2:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
+		index = GetIndexIntoMasterTable(COMMAND, DAC2EncoderControl);
+		break;
+	}
+
+	args.ucAction = action;
+
+	if (radeon_encoder->active_device & (ATOM_DEVICE_CRT_SUPPORT))
+		args.ucDacStandard = ATOM_DAC1_PS2;
+	else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
+		args.ucDacStandard = ATOM_DAC1_CV;
+	else {
+		switch (dac_info->tv_std) {
+		case TV_STD_PAL:
+		case TV_STD_PAL_M:
+		case TV_STD_SCART_PAL:
+		case TV_STD_SECAM:
+		case TV_STD_PAL_CN:
+			args.ucDacStandard = ATOM_DAC1_PAL;
+			break;
+		case TV_STD_NTSC:
+		case TV_STD_NTSC_J:
+		case TV_STD_PAL_60:
+		default:
+			args.ucDacStandard = ATOM_DAC1_NTSC;
+			break;
+		}
+	}
+	args.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+
+	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+}
+
+static void
+atombios_tv_setup(struct drm_encoder *encoder, int action)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	TV_ENCODER_CONTROL_PS_ALLOCATION args;
+	int index = 0;
+	struct radeon_encoder_atom_dac *dac_info = radeon_encoder->enc_priv;
+
+	memset(&args, 0, sizeof(args));
+
+	index = GetIndexIntoMasterTable(COMMAND, TVEncoderControl);
+
+	args.sTVEncoder.ucAction = action;
+
+	if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
+		args.sTVEncoder.ucTvStandard = ATOM_TV_CV;
+	else {
+		switch (dac_info->tv_std) {
+		case TV_STD_NTSC:
+			args.sTVEncoder.ucTvStandard = ATOM_TV_NTSC;
+			break;
+		case TV_STD_PAL:
+			args.sTVEncoder.ucTvStandard = ATOM_TV_PAL;
+			break;
+		case TV_STD_PAL_M:
+			args.sTVEncoder.ucTvStandard = ATOM_TV_PALM;
+			break;
+		case TV_STD_PAL_60:
+			args.sTVEncoder.ucTvStandard = ATOM_TV_PAL60;
+			break;
+		case TV_STD_NTSC_J:
+			args.sTVEncoder.ucTvStandard = ATOM_TV_NTSCJ;
+			break;
+		case TV_STD_SCART_PAL:
+			args.sTVEncoder.ucTvStandard = ATOM_TV_PAL; /* ??? */
+			break;
+		case TV_STD_SECAM:
+			args.sTVEncoder.ucTvStandard = ATOM_TV_SECAM;
+			break;
+		case TV_STD_PAL_CN:
+			args.sTVEncoder.ucTvStandard = ATOM_TV_PALCN;
+			break;
+		default:
+			args.sTVEncoder.ucTvStandard = ATOM_TV_NTSC;
+			break;
+		}
+	}
+
+	args.sTVEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+
+	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+}
+
+union dvo_encoder_control {
+	ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION ext_tmds;
+	DVO_ENCODER_CONTROL_PS_ALLOCATION dvo;
+	DVO_ENCODER_CONTROL_PS_ALLOCATION_V3 dvo_v3;
+};
+
+void
+atombios_dvo_setup(struct drm_encoder *encoder, int action)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	union dvo_encoder_control args;
+	int index = GetIndexIntoMasterTable(COMMAND, DVOEncoderControl);
+	uint8_t frev, crev;
+
+	memset(&args, 0, sizeof(args));
+
+	if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
+		return;
+
+	switch (frev) {
+	case 1:
+		switch (crev) {
+		case 1:
+			/* R4xx, R5xx */
+			args.ext_tmds.sXTmdsEncoder.ucEnable = action;
+
+			if (radeon_encoder->pixel_clock > 165000)
+				args.ext_tmds.sXTmdsEncoder.ucMisc |= PANEL_ENCODER_MISC_DUAL;
+
+			args.ext_tmds.sXTmdsEncoder.ucMisc |= ATOM_PANEL_MISC_888RGB;
+			break;
+		case 2:
+			/* RS600/690/740 */
+			args.dvo.sDVOEncoder.ucAction = action;
+			args.dvo.sDVOEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+			/* DFP1, CRT1, TV1 depending on the type of port */
+			args.dvo.sDVOEncoder.ucDeviceType = ATOM_DEVICE_DFP1_INDEX;
+
+			if (radeon_encoder->pixel_clock > 165000)
+				args.dvo.sDVOEncoder.usDevAttr.sDigAttrib.ucAttribute |= PANEL_ENCODER_MISC_DUAL;
+			break;
+		case 3:
+			/* R6xx */
+			args.dvo_v3.ucAction = action;
+			args.dvo_v3.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+			args.dvo_v3.ucDVOConfig = 0; /* XXX */
+			break;
+		default:
+			DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
+			break;
+		}
+		break;
+	default:
+		DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
+		break;
+	}
+
+	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+union lvds_encoder_control {
+	LVDS_ENCODER_CONTROL_PS_ALLOCATION    v1;
+	LVDS_ENCODER_CONTROL_PS_ALLOCATION_V2 v2;
+};
+
+void
+atombios_digital_setup(struct drm_encoder *encoder, int action)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+	union lvds_encoder_control args;
+	int index = 0;
+	int hdmi_detected = 0;
+	uint8_t frev, crev;
+
+	if (!dig)
+		return;
+
+	if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
+		hdmi_detected = 1;
+
+	memset(&args, 0, sizeof(args));
+
+	switch (radeon_encoder->encoder_id) {
+	case ENCODER_OBJECT_ID_INTERNAL_LVDS:
+		index = GetIndexIntoMasterTable(COMMAND, LVDSEncoderControl);
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+		index = GetIndexIntoMasterTable(COMMAND, TMDS1EncoderControl);
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+		if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
+			index = GetIndexIntoMasterTable(COMMAND, LVDSEncoderControl);
+		else
+			index = GetIndexIntoMasterTable(COMMAND, TMDS2EncoderControl);
+		break;
+	}
+
+	if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
+		return;
+
+	switch (frev) {
+	case 1:
+	case 2:
+		switch (crev) {
+		case 1:
+			args.v1.ucMisc = 0;
+			args.v1.ucAction = action;
+			if (hdmi_detected)
+				args.v1.ucMisc |= PANEL_ENCODER_MISC_HDMI_TYPE;
+			args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+			if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
+				if (dig->lcd_misc & ATOM_PANEL_MISC_DUAL)
+					args.v1.ucMisc |= PANEL_ENCODER_MISC_DUAL;
+				if (dig->lcd_misc & ATOM_PANEL_MISC_888RGB)
+					args.v1.ucMisc |= ATOM_PANEL_MISC_888RGB;
+			} else {
+				if (dig->linkb)
+					args.v1.ucMisc |= PANEL_ENCODER_MISC_TMDS_LINKB;
+				if (radeon_encoder->pixel_clock > 165000)
+					args.v1.ucMisc |= PANEL_ENCODER_MISC_DUAL;
+				/*if (pScrn->rgbBits == 8) */
+				args.v1.ucMisc |= ATOM_PANEL_MISC_888RGB;
+			}
+			break;
+		case 2:
+		case 3:
+			args.v2.ucMisc = 0;
+			args.v2.ucAction = action;
+			if (crev == 3) {
+				if (dig->coherent_mode)
+					args.v2.ucMisc |= PANEL_ENCODER_MISC_COHERENT;
+			}
+			if (hdmi_detected)
+				args.v2.ucMisc |= PANEL_ENCODER_MISC_HDMI_TYPE;
+			args.v2.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+			args.v2.ucTruncate = 0;
+			args.v2.ucSpatial = 0;
+			args.v2.ucTemporal = 0;
+			args.v2.ucFRC = 0;
+			if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
+				if (dig->lcd_misc & ATOM_PANEL_MISC_DUAL)
+					args.v2.ucMisc |= PANEL_ENCODER_MISC_DUAL;
+				if (dig->lcd_misc & ATOM_PANEL_MISC_SPATIAL) {
+					args.v2.ucSpatial = PANEL_ENCODER_SPATIAL_DITHER_EN;
+					if (dig->lcd_misc & ATOM_PANEL_MISC_888RGB)
+						args.v2.ucSpatial |= PANEL_ENCODER_SPATIAL_DITHER_DEPTH;
+				}
+				if (dig->lcd_misc & ATOM_PANEL_MISC_TEMPORAL) {
+					args.v2.ucTemporal = PANEL_ENCODER_TEMPORAL_DITHER_EN;
+					if (dig->lcd_misc & ATOM_PANEL_MISC_888RGB)
+						args.v2.ucTemporal |= PANEL_ENCODER_TEMPORAL_DITHER_DEPTH;
+					if (((dig->lcd_misc >> ATOM_PANEL_MISC_GREY_LEVEL_SHIFT) & 0x3) == 2)
+						args.v2.ucTemporal |= PANEL_ENCODER_TEMPORAL_LEVEL_4;
+				}
+			} else {
+				if (dig->linkb)
+					args.v2.ucMisc |= PANEL_ENCODER_MISC_TMDS_LINKB;
+				if (radeon_encoder->pixel_clock > 165000)
+					args.v2.ucMisc |= PANEL_ENCODER_MISC_DUAL;
+			}
+			break;
+		default:
+			DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
+			break;
+		}
+		break;
+	default:
+		DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
+		break;
+	}
+
+	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+int
+atombios_get_encoder_mode(struct drm_encoder *encoder)
+{
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct drm_connector *connector;
+	struct radeon_connector *radeon_connector;
+	struct radeon_connector_atom_dig *dig_connector;
+
+	/* dp bridges are always DP */
+	if (radeon_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE)
+		return ATOM_ENCODER_MODE_DP;
+
+	/* DVO is always DVO */
+	if (radeon_encoder->encoder_id == ATOM_ENCODER_MODE_DVO)
+		return ATOM_ENCODER_MODE_DVO;
+
+	connector = radeon_get_connector_for_encoder(encoder);
+	/* if we don't have an active device yet, just use one of
+	 * the connectors tied to the encoder.
+	 */
+	if (!connector)
+		connector = radeon_get_connector_for_encoder_init(encoder);
+	radeon_connector = to_radeon_connector(connector);
+
+	switch (connector->connector_type) {
+	case DRM_MODE_CONNECTOR_DVII:
+	case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */
+		if (drm_detect_monitor_audio(radeon_connector->edid) && radeon_audio) {
+			/* fix me */
+			if (ASIC_IS_DCE4(rdev))
+				return ATOM_ENCODER_MODE_DVI;
+			else
+				return ATOM_ENCODER_MODE_HDMI;
+		} else if (radeon_connector->use_digital)
+			return ATOM_ENCODER_MODE_DVI;
+		else
+			return ATOM_ENCODER_MODE_CRT;
+		break;
+	case DRM_MODE_CONNECTOR_DVID:
+	case DRM_MODE_CONNECTOR_HDMIA:
+	default:
+		if (drm_detect_monitor_audio(radeon_connector->edid) && radeon_audio) {
+			/* fix me */
+			if (ASIC_IS_DCE4(rdev))
+				return ATOM_ENCODER_MODE_DVI;
+			else
+				return ATOM_ENCODER_MODE_HDMI;
+		} else
+			return ATOM_ENCODER_MODE_DVI;
+		break;
+	case DRM_MODE_CONNECTOR_LVDS:
+		return ATOM_ENCODER_MODE_LVDS;
+		break;
+	case DRM_MODE_CONNECTOR_DisplayPort:
+		dig_connector = radeon_connector->con_priv;
+		if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
+		    (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP))
+			return ATOM_ENCODER_MODE_DP;
+		else if (drm_detect_monitor_audio(radeon_connector->edid) && radeon_audio) {
+			/* fix me */
+			if (ASIC_IS_DCE4(rdev))
+				return ATOM_ENCODER_MODE_DVI;
+			else
+				return ATOM_ENCODER_MODE_HDMI;
+		} else
+			return ATOM_ENCODER_MODE_DVI;
+		break;
+	case DRM_MODE_CONNECTOR_eDP:
+		return ATOM_ENCODER_MODE_DP;
+	case DRM_MODE_CONNECTOR_DVIA:
+	case DRM_MODE_CONNECTOR_VGA:
+		return ATOM_ENCODER_MODE_CRT;
+		break;
+	case DRM_MODE_CONNECTOR_Composite:
+	case DRM_MODE_CONNECTOR_SVIDEO:
+	case DRM_MODE_CONNECTOR_9PinDIN:
+		/* fix me */
+		return ATOM_ENCODER_MODE_TV;
+		/*return ATOM_ENCODER_MODE_CV;*/
+		break;
+	}
+}
+
+/*
+ * DIG Encoder/Transmitter Setup
+ *
+ * DCE 3.0/3.1
+ * - 2 DIG transmitter blocks. UNIPHY (links A and B) and LVTMA.
+ * Supports up to 3 digital outputs
+ * - 2 DIG encoder blocks.
+ * DIG1 can drive UNIPHY link A or link B
+ * DIG2 can drive UNIPHY link B or LVTMA
+ *
+ * DCE 3.2
+ * - 3 DIG transmitter blocks. UNIPHY0/1/2 (links A and B).
+ * Supports up to 5 digital outputs
+ * - 2 DIG encoder blocks.
+ * DIG1/2 can drive UNIPHY0/1/2 link A or link B
+ *
+ * DCE 4.0/5.0
+ * - 3 DIG transmitter blocks UNIPHY0/1/2 (links A and B).
+ * Supports up to 6 digital outputs
+ * - 6 DIG encoder blocks.
+ * - DIG to PHY mapping is hardcoded
+ * DIG1 drives UNIPHY0 link A, A+B
+ * DIG2 drives UNIPHY0 link B
+ * DIG3 drives UNIPHY1 link A, A+B
+ * DIG4 drives UNIPHY1 link B
+ * DIG5 drives UNIPHY2 link A, A+B
+ * DIG6 drives UNIPHY2 link B
+ *
+ * DCE 4.1
+ * - 3 DIG transmitter blocks UNIPHY0/1/2 (links A and B).
+ * Supports up to 6 digital outputs
+ * - 2 DIG encoder blocks.
+ * DIG1/2 can drive UNIPHY0/1/2 link A or link B
+ *
+ * Routing
+ * crtc -> dig encoder -> UNIPHY/LVTMA (1 or 2 links)
+ * Examples:
+ * crtc0 -> dig2 -> LVTMA   links A+B -> TMDS/HDMI
+ * crtc1 -> dig1 -> UNIPHY0 link  B   -> DP
+ * crtc0 -> dig1 -> UNIPHY2 link  A   -> LVDS
+ * crtc1 -> dig2 -> UNIPHY1 link  B+A -> TMDS/HDMI
+ */
+
+union dig_encoder_control {
+	DIG_ENCODER_CONTROL_PS_ALLOCATION v1;
+	DIG_ENCODER_CONTROL_PARAMETERS_V2 v2;
+	DIG_ENCODER_CONTROL_PARAMETERS_V3 v3;
+	DIG_ENCODER_CONTROL_PARAMETERS_V4 v4;
+};
+
+void
+atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mode)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+	struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+	union dig_encoder_control args;
+	int index = 0;
+	uint8_t frev, crev;
+	int dp_clock = 0;
+	int dp_lane_count = 0;
+	int hpd_id = RADEON_HPD_NONE;
+	int bpc = 8;
+
+	if (connector) {
+		struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+		struct radeon_connector_atom_dig *dig_connector =
+			radeon_connector->con_priv;
+
+		dp_clock = dig_connector->dp_clock;
+		dp_lane_count = dig_connector->dp_lane_count;
+		hpd_id = radeon_connector->hpd.hpd;
+		bpc = connector->display_info.bpc;
+	}
+
+	/* no dig encoder assigned */
+	if (dig->dig_encoder == -1)
+		return;
+
+	memset(&args, 0, sizeof(args));
+
+	if (ASIC_IS_DCE4(rdev))
+		index = GetIndexIntoMasterTable(COMMAND, DIGxEncoderControl);
+	else {
+		if (dig->dig_encoder)
+			index = GetIndexIntoMasterTable(COMMAND, DIG2EncoderControl);
+		else
+			index = GetIndexIntoMasterTable(COMMAND, DIG1EncoderControl);
+	}
+
+	if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
+		return;
+
+	switch (frev) {
+	case 1:
+		switch (crev) {
+		case 1:
+			args.v1.ucAction = action;
+			args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+			if (action == ATOM_ENCODER_CMD_SETUP_PANEL_MODE)
+				args.v3.ucPanelMode = panel_mode;
+			else
+				args.v1.ucEncoderMode = atombios_get_encoder_mode(encoder);
+
+			if (ENCODER_MODE_IS_DP(args.v1.ucEncoderMode))
+				args.v1.ucLaneNum = dp_lane_count;
+			else if (radeon_encoder->pixel_clock > 165000)
+				args.v1.ucLaneNum = 8;
+			else
+				args.v1.ucLaneNum = 4;
+
+			if (ENCODER_MODE_IS_DP(args.v1.ucEncoderMode) && (dp_clock == 270000))
+				args.v1.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
+			switch (radeon_encoder->encoder_id) {
+			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+				args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER1;
+				break;
+			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+			case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+				args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER2;
+				break;
+			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+				args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER3;
+				break;
+			}
+			if (dig->linkb)
+				args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKB;
+			else
+				args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKA;
+			break;
+		case 2:
+		case 3:
+			args.v3.ucAction = action;
+			args.v3.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+			if (action == ATOM_ENCODER_CMD_SETUP_PANEL_MODE)
+				args.v3.ucPanelMode = panel_mode;
+			else
+				args.v3.ucEncoderMode = atombios_get_encoder_mode(encoder);
+
+			if (ENCODER_MODE_IS_DP(args.v1.ucEncoderMode))
+				args.v3.ucLaneNum = dp_lane_count;
+			else if (radeon_encoder->pixel_clock > 165000)
+				args.v3.ucLaneNum = 8;
+			else
+				args.v3.ucLaneNum = 4;
+
+			if (ENCODER_MODE_IS_DP(args.v1.ucEncoderMode) && (dp_clock == 270000))
+				args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V3_DPLINKRATE_2_70GHZ;
+			args.v3.acConfig.ucDigSel = dig->dig_encoder;
+			switch (bpc) {
+			case 0:
+				args.v3.ucBitPerColor = PANEL_BPC_UNDEFINE;
+				break;
+			case 6:
+				args.v3.ucBitPerColor = PANEL_6BIT_PER_COLOR;
+				break;
+			case 8:
+			default:
+				args.v3.ucBitPerColor = PANEL_8BIT_PER_COLOR;
+				break;
+			case 10:
+				args.v3.ucBitPerColor = PANEL_10BIT_PER_COLOR;
+				break;
+			case 12:
+				args.v3.ucBitPerColor = PANEL_12BIT_PER_COLOR;
+				break;
+			case 16:
+				args.v3.ucBitPerColor = PANEL_16BIT_PER_COLOR;
+				break;
+			}
+			break;
+		case 4:
+			args.v4.ucAction = action;
+			args.v4.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+			if (action == ATOM_ENCODER_CMD_SETUP_PANEL_MODE)
+				args.v4.ucPanelMode = panel_mode;
+			else
+				args.v4.ucEncoderMode = atombios_get_encoder_mode(encoder);
+
+			if (ENCODER_MODE_IS_DP(args.v1.ucEncoderMode))
+				args.v4.ucLaneNum = dp_lane_count;
+			else if (radeon_encoder->pixel_clock > 165000)
+				args.v4.ucLaneNum = 8;
+			else
+				args.v4.ucLaneNum = 4;
+
+			if (ENCODER_MODE_IS_DP(args.v1.ucEncoderMode)) {
+				if (dp_clock == 270000)
+					args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V4_DPLINKRATE_2_70GHZ;
+				else if (dp_clock == 540000)
+					args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V4_DPLINKRATE_5_40GHZ;
+			}
+			args.v4.acConfig.ucDigSel = dig->dig_encoder;
+			switch (bpc) {
+			case 0:
+				args.v4.ucBitPerColor = PANEL_BPC_UNDEFINE;
+				break;
+			case 6:
+				args.v4.ucBitPerColor = PANEL_6BIT_PER_COLOR;
+				break;
+			case 8:
+			default:
+				args.v4.ucBitPerColor = PANEL_8BIT_PER_COLOR;
+				break;
+			case 10:
+				args.v4.ucBitPerColor = PANEL_10BIT_PER_COLOR;
+				break;
+			case 12:
+				args.v4.ucBitPerColor = PANEL_12BIT_PER_COLOR;
+				break;
+			case 16:
+				args.v4.ucBitPerColor = PANEL_16BIT_PER_COLOR;
+				break;
+			}
+			if (hpd_id == RADEON_HPD_NONE)
+				args.v4.ucHPD_ID = 0;
+			else
+				args.v4.ucHPD_ID = hpd_id + 1;
+			break;
+		default:
+			DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
+			break;
+		}
+		break;
+	default:
+		DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
+		break;
+	}
+
+	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+}
+
+union dig_transmitter_control {
+	DIG_TRANSMITTER_CONTROL_PS_ALLOCATION v1;
+	DIG_TRANSMITTER_CONTROL_PARAMETERS_V2 v2;
+	DIG_TRANSMITTER_CONTROL_PARAMETERS_V3 v3;
+	DIG_TRANSMITTER_CONTROL_PARAMETERS_V4 v4;
+};
+
+void
+atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t lane_num, uint8_t lane_set)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+	struct drm_connector *connector;
+	union dig_transmitter_control args;
+	int index = 0;
+	uint8_t frev, crev;
+	bool is_dp = false;
+	int pll_id = 0;
+	int dp_clock = 0;
+	int dp_lane_count = 0;
+	int connector_object_id = 0;
+	int igp_lane_info = 0;
+	int dig_encoder = dig->dig_encoder;
+
+	if (action == ATOM_TRANSMITTER_ACTION_INIT) {
+		connector = radeon_get_connector_for_encoder_init(encoder);
+		/* just needed to avoid bailing in the encoder check.  the encoder
+		 * isn't used for init
+		 */
+		dig_encoder = 0;
+	} else
+		connector = radeon_get_connector_for_encoder(encoder);
+
+	if (connector) {
+		struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+		struct radeon_connector_atom_dig *dig_connector =
+			radeon_connector->con_priv;
+
+		dp_clock = dig_connector->dp_clock;
+		dp_lane_count = dig_connector->dp_lane_count;
+		connector_object_id =
+			(radeon_connector->connector_object_id & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
+		igp_lane_info = dig_connector->igp_lane_info;
+	}
+
+	if (encoder->crtc) {
+		struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
+		pll_id = radeon_crtc->pll_id;
+	}
+
+	/* no dig encoder assigned */
+	if (dig_encoder == -1)
+		return;
+
+	if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)))
+		is_dp = true;
+
+	memset(&args, 0, sizeof(args));
+
+	switch (radeon_encoder->encoder_id) {
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+		index = GetIndexIntoMasterTable(COMMAND, DVOOutputControl);
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+		index = GetIndexIntoMasterTable(COMMAND, UNIPHYTransmitterControl);
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+		index = GetIndexIntoMasterTable(COMMAND, LVTMATransmitterControl);
+		break;
+	}
+
+	if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
+		return;
+
+	switch (frev) {
+	case 1:
+		switch (crev) {
+		case 1:
+			args.v1.ucAction = action;
+			if (action == ATOM_TRANSMITTER_ACTION_INIT) {
+				args.v1.usInitInfo = cpu_to_le16(connector_object_id);
+			} else if (action == ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH) {
+				args.v1.asMode.ucLaneSel = lane_num;
+				args.v1.asMode.ucLaneSet = lane_set;
+			} else {
+				if (is_dp)
+					args.v1.usPixelClock =
+						cpu_to_le16(dp_clock / 10);
+				else if (radeon_encoder->pixel_clock > 165000)
+					args.v1.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10);
+				else
+					args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+			}
+
+			args.v1.ucConfig = ATOM_TRANSMITTER_CONFIG_CLKSRC_PPLL;
+
+			if (dig_encoder)
+				args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG2_ENCODER;
+			else
+				args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER;
+
+			if ((rdev->flags & RADEON_IS_IGP) &&
+			    (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_UNIPHY)) {
+				if (is_dp || (radeon_encoder->pixel_clock <= 165000)) {
+					if (igp_lane_info & 0x1)
+						args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_3;
+					else if (igp_lane_info & 0x2)
+						args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_4_7;
+					else if (igp_lane_info & 0x4)
+						args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_11;
+					else if (igp_lane_info & 0x8)
+						args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_12_15;
+				} else {
+					if (igp_lane_info & 0x3)
+						args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_7;
+					else if (igp_lane_info & 0xc)
+						args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_15;
+				}
+			}
+
+			if (dig->linkb)
+				args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKB;
+			else
+				args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKA;
+
+			if (is_dp)
+				args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_COHERENT;
+			else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
+				if (dig->coherent_mode)
+					args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_COHERENT;
+				if (radeon_encoder->pixel_clock > 165000)
+					args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_8LANE_LINK;
+			}
+			break;
+		case 2:
+			args.v2.ucAction = action;
+			if (action == ATOM_TRANSMITTER_ACTION_INIT) {
+				args.v2.usInitInfo = cpu_to_le16(connector_object_id);
+			} else if (action == ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH) {
+				args.v2.asMode.ucLaneSel = lane_num;
+				args.v2.asMode.ucLaneSet = lane_set;
+			} else {
+				if (is_dp)
+					args.v2.usPixelClock =
+						cpu_to_le16(dp_clock / 10);
+				else if (radeon_encoder->pixel_clock > 165000)
+					args.v2.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10);
+				else
+					args.v2.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+			}
+
+			args.v2.acConfig.ucEncoderSel = dig_encoder;
+			if (dig->linkb)
+				args.v2.acConfig.ucLinkSel = 1;
+
+			switch (radeon_encoder->encoder_id) {
+			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+				args.v2.acConfig.ucTransmitterSel = 0;
+				break;
+			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+				args.v2.acConfig.ucTransmitterSel = 1;
+				break;
+			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+				args.v2.acConfig.ucTransmitterSel = 2;
+				break;
+			}
+
+			if (is_dp) {
+				args.v2.acConfig.fCoherentMode = 1;
+				args.v2.acConfig.fDPConnector = 1;
+			} else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
+				if (dig->coherent_mode)
+					args.v2.acConfig.fCoherentMode = 1;
+				if (radeon_encoder->pixel_clock > 165000)
+					args.v2.acConfig.fDualLinkConnector = 1;
+			}
+			break;
+		case 3:
+			args.v3.ucAction = action;
+			if (action == ATOM_TRANSMITTER_ACTION_INIT) {
+				args.v3.usInitInfo = cpu_to_le16(connector_object_id);
+			} else if (action == ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH) {
+				args.v3.asMode.ucLaneSel = lane_num;
+				args.v3.asMode.ucLaneSet = lane_set;
+			} else {
+				if (is_dp)
+					args.v3.usPixelClock =
+						cpu_to_le16(dp_clock / 10);
+				else if (radeon_encoder->pixel_clock > 165000)
+					args.v3.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10);
+				else
+					args.v3.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+			}
+
+			if (is_dp)
+				args.v3.ucLaneNum = dp_lane_count;
+			else if (radeon_encoder->pixel_clock > 165000)
+				args.v3.ucLaneNum = 8;
+			else
+				args.v3.ucLaneNum = 4;
+
+			if (dig->linkb)
+				args.v3.acConfig.ucLinkSel = 1;
+			if (dig_encoder & 1)
+				args.v3.acConfig.ucEncoderSel = 1;
+
+			/* Select the PLL for the PHY
+			 * DP PHY should be clocked from external src if there is
+			 * one.
+			 */
+			/* On DCE4, if there is an external clock, it generates the DP ref clock */
+			if (is_dp && rdev->clock.dp_extclk)
+				args.v3.acConfig.ucRefClkSource = 2; /* external src */
+			else
+				args.v3.acConfig.ucRefClkSource = pll_id;
+
+			switch (radeon_encoder->encoder_id) {
+			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+				args.v3.acConfig.ucTransmitterSel = 0;
+				break;
+			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+				args.v3.acConfig.ucTransmitterSel = 1;
+				break;
+			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+				args.v3.acConfig.ucTransmitterSel = 2;
+				break;
+			}
+
+			if (is_dp)
+				args.v3.acConfig.fCoherentMode = 1; /* DP requires coherent */
+			else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
+				if (dig->coherent_mode)
+					args.v3.acConfig.fCoherentMode = 1;
+				if (radeon_encoder->pixel_clock > 165000)
+					args.v3.acConfig.fDualLinkConnector = 1;
+			}
+			break;
+		case 4:
+			args.v4.ucAction = action;
+			if (action == ATOM_TRANSMITTER_ACTION_INIT) {
+				args.v4.usInitInfo = cpu_to_le16(connector_object_id);
+			} else if (action == ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH) {
+				args.v4.asMode.ucLaneSel = lane_num;
+				args.v4.asMode.ucLaneSet = lane_set;
+			} else {
+				if (is_dp)
+					args.v4.usPixelClock =
+						cpu_to_le16(dp_clock / 10);
+				else if (radeon_encoder->pixel_clock > 165000)
+					args.v4.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10);
+				else
+					args.v4.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+			}
+
+			if (is_dp)
+				args.v4.ucLaneNum = dp_lane_count;
+			else if (radeon_encoder->pixel_clock > 165000)
+				args.v4.ucLaneNum = 8;
+			else
+				args.v4.ucLaneNum = 4;
+
+			if (dig->linkb)
+				args.v4.acConfig.ucLinkSel = 1;
+			if (dig_encoder & 1)
+				args.v4.acConfig.ucEncoderSel = 1;
+
+			/* Select the PLL for the PHY
+			 * DP PHY should be clocked from external src if there is
+			 * one.
+			 */
+			/* On DCE5 DCPLL usually generates the DP ref clock */
+			if (is_dp) {
+				if (rdev->clock.dp_extclk)
+					args.v4.acConfig.ucRefClkSource = ENCODER_REFCLK_SRC_EXTCLK;
+				else
+					args.v4.acConfig.ucRefClkSource = ENCODER_REFCLK_SRC_DCPLL;
+			} else
+				args.v4.acConfig.ucRefClkSource = pll_id;
+
+			switch (radeon_encoder->encoder_id) {
+			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+				args.v4.acConfig.ucTransmitterSel = 0;
+				break;
+			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+				args.v4.acConfig.ucTransmitterSel = 1;
+				break;
+			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+				args.v4.acConfig.ucTransmitterSel = 2;
+				break;
+			}
+
+			if (is_dp)
+				args.v4.acConfig.fCoherentMode = 1; /* DP requires coherent */
+			else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
+				if (dig->coherent_mode)
+					args.v4.acConfig.fCoherentMode = 1;
+				if (radeon_encoder->pixel_clock > 165000)
+					args.v4.acConfig.fDualLinkConnector = 1;
+			}
+			break;
+		default:
+			DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
+			break;
+		}
+		break;
+	default:
+		DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
+		break;
+	}
+
+	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+bool
+atombios_set_edp_panel_power(struct drm_connector *connector, int action)
+{
+	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+	struct drm_device *dev = radeon_connector->base.dev;
+	struct radeon_device *rdev = dev->dev_private;
+	union dig_transmitter_control args;
+	int index = GetIndexIntoMasterTable(COMMAND, UNIPHYTransmitterControl);
+	uint8_t frev, crev;
+
+	if (connector->connector_type != DRM_MODE_CONNECTOR_eDP)
+		goto done;
+
+	if (!ASIC_IS_DCE4(rdev))
+		goto done;
+
+	if ((action != ATOM_TRANSMITTER_ACTION_POWER_ON) &&
+	    (action != ATOM_TRANSMITTER_ACTION_POWER_OFF))
+		goto done;
+
+	if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
+		goto done;
+
+	memset(&args, 0, sizeof(args));
+
+	args.v1.ucAction = action;
+
+	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+	/* wait for the panel to power up */
+	if (action == ATOM_TRANSMITTER_ACTION_POWER_ON) {
+		int i;
+
+		for (i = 0; i < 300; i++) {
+			if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd))
+				return true;
+			mdelay(1);
+		}
+		return false;
+	}
+done:
+	return true;
+}
+
+union external_encoder_control {
+	EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION v1;
+	EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION_V3 v3;
+};
+
+static void
+atombios_external_encoder_setup(struct drm_encoder *encoder,
+				struct drm_encoder *ext_encoder,
+				int action)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_encoder *ext_radeon_encoder = to_radeon_encoder(ext_encoder);
+	union external_encoder_control args;
+	struct drm_connector *connector;
+	int index = GetIndexIntoMasterTable(COMMAND, ExternalEncoderControl);
+	u8 frev, crev;
+	int dp_clock = 0;
+	int dp_lane_count = 0;
+	int connector_object_id = 0;
+	u32 ext_enum = (ext_radeon_encoder->encoder_enum & ENUM_ID_MASK) >> ENUM_ID_SHIFT;
+	int bpc = 8;
+
+	if (action == EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT)
+		connector = radeon_get_connector_for_encoder_init(encoder);
+	else
+		connector = radeon_get_connector_for_encoder(encoder);
+
+	if (connector) {
+		struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+		struct radeon_connector_atom_dig *dig_connector =
+			radeon_connector->con_priv;
+
+		dp_clock = dig_connector->dp_clock;
+		dp_lane_count = dig_connector->dp_lane_count;
+		connector_object_id =
+			(radeon_connector->connector_object_id & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
+		bpc = connector->display_info.bpc;
+	}
+
+	memset(&args, 0, sizeof(args));
+
+	if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
+		return;
+
+	switch (frev) {
+	case 1:
+		/* no params on frev 1 */
+		break;
+	case 2:
+		switch (crev) {
+		case 1:
+		case 2:
+			args.v1.sDigEncoder.ucAction = action;
+			args.v1.sDigEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+			args.v1.sDigEncoder.ucEncoderMode = atombios_get_encoder_mode(encoder);
+
+			if (ENCODER_MODE_IS_DP(args.v1.sDigEncoder.ucEncoderMode)) {
+				if (dp_clock == 270000)
+					args.v1.sDigEncoder.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
+				args.v1.sDigEncoder.ucLaneNum = dp_lane_count;
+			} else if (radeon_encoder->pixel_clock > 165000)
+				args.v1.sDigEncoder.ucLaneNum = 8;
+			else
+				args.v1.sDigEncoder.ucLaneNum = 4;
+			break;
+		case 3:
+			args.v3.sExtEncoder.ucAction = action;
+			if (action == EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT)
+				args.v3.sExtEncoder.usConnectorId = cpu_to_le16(connector_object_id);
+			else
+				args.v3.sExtEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+			args.v3.sExtEncoder.ucEncoderMode = atombios_get_encoder_mode(encoder);
+
+			if (ENCODER_MODE_IS_DP(args.v3.sExtEncoder.ucEncoderMode)) {
+				if (dp_clock == 270000)
+					args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_2_70GHZ;
+				else if (dp_clock == 540000)
+					args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_5_40GHZ;
+				args.v3.sExtEncoder.ucLaneNum = dp_lane_count;
+			} else if (radeon_encoder->pixel_clock > 165000)
+				args.v3.sExtEncoder.ucLaneNum = 8;
+			else
+				args.v3.sExtEncoder.ucLaneNum = 4;
+			switch (ext_enum) {
+			case GRAPH_OBJECT_ENUM_ID1:
+				args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_ENCODER1;
+				break;
+			case GRAPH_OBJECT_ENUM_ID2:
+				args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_ENCODER2;
+				break;
+			case GRAPH_OBJECT_ENUM_ID3:
+				args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_ENCODER3;
+				break;
+			}
+			switch (bpc) {
+			case 0:
+				args.v3.sExtEncoder.ucBitPerColor = PANEL_BPC_UNDEFINE;
+				break;
+			case 6:
+				args.v3.sExtEncoder.ucBitPerColor = PANEL_6BIT_PER_COLOR;
+				break;
+			case 8:
+			default:
+				args.v3.sExtEncoder.ucBitPerColor = PANEL_8BIT_PER_COLOR;
+				break;
+			case 10:
+				args.v3.sExtEncoder.ucBitPerColor = PANEL_10BIT_PER_COLOR;
+				break;
+			case 12:
+				args.v3.sExtEncoder.ucBitPerColor = PANEL_12BIT_PER_COLOR;
+				break;
+			case 16:
+				args.v3.sExtEncoder.ucBitPerColor = PANEL_16BIT_PER_COLOR;
+				break;
+			}
+			break;
+		default:
+			DRM_ERROR("Unknown table version: %d, %d\n", frev, crev);
+			return;
+		}
+		break;
+	default:
+		DRM_ERROR("Unknown table version: %d, %d\n", frev, crev);
+		return;
+	}
+	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+static void
+atombios_yuv_setup(struct drm_encoder *encoder, bool enable)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
+	ENABLE_YUV_PS_ALLOCATION args;
+	int index = GetIndexIntoMasterTable(COMMAND, EnableYUV);
+	uint32_t temp, reg;
+
+	memset(&args, 0, sizeof(args));
+
+	if (rdev->family >= CHIP_R600)
+		reg = R600_BIOS_3_SCRATCH;
+	else
+		reg = RADEON_BIOS_3_SCRATCH;
+
+	/* XXX: fix up scratch reg handling */
+	temp = RREG32(reg);
+	if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
+		WREG32(reg, (ATOM_S3_TV1_ACTIVE |
+			     (radeon_crtc->crtc_id << 18)));
+	else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
+		WREG32(reg, (ATOM_S3_CV_ACTIVE | (radeon_crtc->crtc_id << 24)));
+	else
+		WREG32(reg, 0);
+
+	if (enable)
+		args.ucEnable = ATOM_ENABLE;
+	args.ucCRTC = radeon_crtc->crtc_id;
+
+	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+	WREG32(reg, temp);
+}
+
+static void
+radeon_atom_encoder_dpms_avivo(struct drm_encoder *encoder, int mode)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION args;
+	int index = 0;
+
+	memset(&args, 0, sizeof(args));
+
+	switch (radeon_encoder->encoder_id) {
+	case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+		index = GetIndexIntoMasterTable(COMMAND, TMDSAOutputControl);
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_DVO1:
+	case ENCODER_OBJECT_ID_INTERNAL_DDI:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+		index = GetIndexIntoMasterTable(COMMAND, DVOOutputControl);
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_LVDS:
+		index = GetIndexIntoMasterTable(COMMAND, LCD1OutputControl);
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+		if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
+			index = GetIndexIntoMasterTable(COMMAND, LCD1OutputControl);
+		else
+			index = GetIndexIntoMasterTable(COMMAND, LVTMAOutputControl);
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_DAC1:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
+		if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
+			index = GetIndexIntoMasterTable(COMMAND, TV1OutputControl);
+		else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
+			index = GetIndexIntoMasterTable(COMMAND, CV1OutputControl);
+		else
+			index = GetIndexIntoMasterTable(COMMAND, DAC1OutputControl);
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_DAC2:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
+		if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
+			index = GetIndexIntoMasterTable(COMMAND, TV1OutputControl);
+		else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
+			index = GetIndexIntoMasterTable(COMMAND, CV1OutputControl);
+		else
+			index = GetIndexIntoMasterTable(COMMAND, DAC2OutputControl);
+		break;
+	default:
+		return;
+	}
+
+	switch (mode) {
+	case DRM_MODE_DPMS_ON:
+		args.ucAction = ATOM_ENABLE;
+		/* workaround for DVOOutputControl on some RS690 systems */
+		if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_DDI) {
+			u32 reg = RREG32(RADEON_BIOS_3_SCRATCH);
+			WREG32(RADEON_BIOS_3_SCRATCH, reg & ~ATOM_S3_DFP2I_ACTIVE);
+			atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+			WREG32(RADEON_BIOS_3_SCRATCH, reg);
+		} else
+			atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+		if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
+			args.ucAction = ATOM_LCD_BLON;
+			atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+		}
+		break;
+	case DRM_MODE_DPMS_STANDBY:
+	case DRM_MODE_DPMS_SUSPEND:
+	case DRM_MODE_DPMS_OFF:
+		args.ucAction = ATOM_DISABLE;
+		atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+		if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
+			args.ucAction = ATOM_LCD_BLOFF;
+			atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+		}
+		break;
+	}
+}
+
+static void
+radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+	struct radeon_connector *radeon_connector = NULL;
+	struct radeon_connector_atom_dig *radeon_dig_connector = NULL;
+
+	if (connector) {
+		radeon_connector = to_radeon_connector(connector);
+		radeon_dig_connector = radeon_connector->con_priv;
+	}
+
+	switch (mode) {
+	case DRM_MODE_DPMS_ON:
+		/* some early dce3.2 boards have a bug in their transmitter control table */
+		if ((rdev->family == CHIP_RV710) || (rdev->family == CHIP_RV730))
+			atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
+		else
+			atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
+		if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) {
+			if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
+				atombios_set_edp_panel_power(connector,
+							     ATOM_TRANSMITTER_ACTION_POWER_ON);
+				radeon_dig_connector->edp_on = true;
+			}
+			if (ASIC_IS_DCE4(rdev))
+				atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF, 0);
+			radeon_dp_link_train(encoder, connector);
+			if (ASIC_IS_DCE4(rdev))
+				atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON, 0);
+		}
+		if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
+			atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLON, 0, 0);
+		break;
+	case DRM_MODE_DPMS_STANDBY:
+	case DRM_MODE_DPMS_SUSPEND:
+	case DRM_MODE_DPMS_OFF:
+		atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0);
+		if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) {
+			if (ASIC_IS_DCE4(rdev))
+				atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF, 0);
+			if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
+				atombios_set_edp_panel_power(connector,
+							     ATOM_TRANSMITTER_ACTION_POWER_OFF);
+				radeon_dig_connector->edp_on = false;
+			}
+		}
+		if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
+			atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLOFF, 0, 0);
+		break;
+	}
+}
+
+static void
+radeon_atom_encoder_dpms_ext(struct drm_encoder *encoder,
+			     struct drm_encoder *ext_encoder,
+			     int mode)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+
+	switch (mode) {
+	case DRM_MODE_DPMS_ON:
+	default:
+		if (ASIC_IS_DCE41(rdev)) {
+			atombios_external_encoder_setup(encoder, ext_encoder,
+							EXTERNAL_ENCODER_ACTION_V3_ENABLE_OUTPUT);
+			atombios_external_encoder_setup(encoder, ext_encoder,
+							EXTERNAL_ENCODER_ACTION_V3_ENCODER_BLANKING_OFF);
+		} else
+			atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE);
+		break;
+	case DRM_MODE_DPMS_STANDBY:
+	case DRM_MODE_DPMS_SUSPEND:
+	case DRM_MODE_DPMS_OFF:
+		if (ASIC_IS_DCE41(rdev)) {
+			atombios_external_encoder_setup(encoder, ext_encoder,
+							EXTERNAL_ENCODER_ACTION_V3_ENCODER_BLANKING);
+			atombios_external_encoder_setup(encoder, ext_encoder,
+							EXTERNAL_ENCODER_ACTION_V3_DISABLE_OUTPUT);
+		} else
+			atombios_external_encoder_setup(encoder, ext_encoder, ATOM_DISABLE);
+		break;
+	}
+}
+
+static void
+radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct drm_encoder *ext_encoder = radeon_get_external_encoder(encoder);
+
+	DRM_DEBUG_KMS("encoder dpms %d to mode %d, devices %08x, active_devices %08x\n",
+		  radeon_encoder->encoder_id, mode, radeon_encoder->devices,
+		  radeon_encoder->active_device);
+	switch (radeon_encoder->encoder_id) {
+	case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+	case ENCODER_OBJECT_ID_INTERNAL_LVDS:
+	case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+	case ENCODER_OBJECT_ID_INTERNAL_DVO1:
+	case ENCODER_OBJECT_ID_INTERNAL_DDI:
+	case ENCODER_OBJECT_ID_INTERNAL_DAC2:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
+		radeon_atom_encoder_dpms_avivo(encoder, mode);
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+		radeon_atom_encoder_dpms_dig(encoder, mode);
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+		if (ASIC_IS_DCE5(rdev)) {
+			switch (mode) {
+			case DRM_MODE_DPMS_ON:
+				atombios_dvo_setup(encoder, ATOM_ENABLE);
+				break;
+			case DRM_MODE_DPMS_STANDBY:
+			case DRM_MODE_DPMS_SUSPEND:
+			case DRM_MODE_DPMS_OFF:
+				atombios_dvo_setup(encoder, ATOM_DISABLE);
+				break;
+			}
+		} else if (ASIC_IS_DCE3(rdev))
+			radeon_atom_encoder_dpms_dig(encoder, mode);
+		else
+			radeon_atom_encoder_dpms_avivo(encoder, mode);
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_DAC1:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
+		if (ASIC_IS_DCE5(rdev)) {
+			switch (mode) {
+			case DRM_MODE_DPMS_ON:
+				atombios_dac_setup(encoder, ATOM_ENABLE);
+				break;
+			case DRM_MODE_DPMS_STANDBY:
+			case DRM_MODE_DPMS_SUSPEND:
+			case DRM_MODE_DPMS_OFF:
+				atombios_dac_setup(encoder, ATOM_DISABLE);
+				break;
+			}
+		} else
+			radeon_atom_encoder_dpms_avivo(encoder, mode);
+		break;
+	default:
+		return;
+	}
+
+	if (ext_encoder)
+		radeon_atom_encoder_dpms_ext(encoder, ext_encoder, mode);
+
+	radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
+
+}
+
+union crtc_source_param {
+	SELECT_CRTC_SOURCE_PS_ALLOCATION v1;
+	SELECT_CRTC_SOURCE_PARAMETERS_V2 v2;
+};
+
+static void
+atombios_set_encoder_crtc_source(struct drm_encoder *encoder)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
+	union crtc_source_param args;
+	int index = GetIndexIntoMasterTable(COMMAND, SelectCRTC_Source);
+	uint8_t frev, crev;
+	struct radeon_encoder_atom_dig *dig;
+
+	memset(&args, 0, sizeof(args));
+
+	if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
+		return;
+
+	switch (frev) {
+	case 1:
+		switch (crev) {
+		case 1:
+		default:
+			if (ASIC_IS_AVIVO(rdev))
+				args.v1.ucCRTC = radeon_crtc->crtc_id;
+			else {
+				if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_DAC1) {
+					args.v1.ucCRTC = radeon_crtc->crtc_id;
+				} else {
+					args.v1.ucCRTC = radeon_crtc->crtc_id << 2;
+				}
+			}
+			switch (radeon_encoder->encoder_id) {
+			case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
+			case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+				args.v1.ucDevice = ATOM_DEVICE_DFP1_INDEX;
+				break;
+			case ENCODER_OBJECT_ID_INTERNAL_LVDS:
+			case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+				if (radeon_encoder->devices & ATOM_DEVICE_LCD1_SUPPORT)
+					args.v1.ucDevice = ATOM_DEVICE_LCD1_INDEX;
+				else
+					args.v1.ucDevice = ATOM_DEVICE_DFP3_INDEX;
+				break;
+			case ENCODER_OBJECT_ID_INTERNAL_DVO1:
+			case ENCODER_OBJECT_ID_INTERNAL_DDI:
+			case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+				args.v1.ucDevice = ATOM_DEVICE_DFP2_INDEX;
+				break;
+			case ENCODER_OBJECT_ID_INTERNAL_DAC1:
+			case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
+				if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
+					args.v1.ucDevice = ATOM_DEVICE_TV1_INDEX;
+				else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
+					args.v1.ucDevice = ATOM_DEVICE_CV_INDEX;
+				else
+					args.v1.ucDevice = ATOM_DEVICE_CRT1_INDEX;
+				break;
+			case ENCODER_OBJECT_ID_INTERNAL_DAC2:
+			case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
+				if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
+					args.v1.ucDevice = ATOM_DEVICE_TV1_INDEX;
+				else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
+					args.v1.ucDevice = ATOM_DEVICE_CV_INDEX;
+				else
+					args.v1.ucDevice = ATOM_DEVICE_CRT2_INDEX;
+				break;
+			}
+			break;
+		case 2:
+			args.v2.ucCRTC = radeon_crtc->crtc_id;
+			if (radeon_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE) {
+				struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+
+				if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)
+					args.v2.ucEncodeMode = ATOM_ENCODER_MODE_LVDS;
+				else if (connector->connector_type == DRM_MODE_CONNECTOR_VGA)
+					args.v2.ucEncodeMode = ATOM_ENCODER_MODE_CRT;
+				else
+					args.v2.ucEncodeMode = atombios_get_encoder_mode(encoder);
+			} else
+				args.v2.ucEncodeMode = atombios_get_encoder_mode(encoder);
+			switch (radeon_encoder->encoder_id) {
+			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+			case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+				dig = radeon_encoder->enc_priv;
+				switch (dig->dig_encoder) {
+				case 0:
+					args.v2.ucEncoderID = ASIC_INT_DIG1_ENCODER_ID;
+					break;
+				case 1:
+					args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID;
+					break;
+				case 2:
+					args.v2.ucEncoderID = ASIC_INT_DIG3_ENCODER_ID;
+					break;
+				case 3:
+					args.v2.ucEncoderID = ASIC_INT_DIG4_ENCODER_ID;
+					break;
+				case 4:
+					args.v2.ucEncoderID = ASIC_INT_DIG5_ENCODER_ID;
+					break;
+				case 5:
+					args.v2.ucEncoderID = ASIC_INT_DIG6_ENCODER_ID;
+					break;
+				}
+				break;
+			case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+				args.v2.ucEncoderID = ASIC_INT_DVO_ENCODER_ID;
+				break;
+			case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
+				if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
+					args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID;
+				else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
+					args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID;
+				else
+					args.v2.ucEncoderID = ASIC_INT_DAC1_ENCODER_ID;
+				break;
+			case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
+				if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
+					args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID;
+				else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
+					args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID;
+				else
+					args.v2.ucEncoderID = ASIC_INT_DAC2_ENCODER_ID;
+				break;
+			}
+			break;
+		}
+		break;
+	default:
+		DRM_ERROR("Unknown table version: %d, %d\n", frev, crev);
+		return;
+	}
+
+	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+	/* update scratch regs with new routing */
+	radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
+}
+
+static void
+atombios_apply_encoder_quirks(struct drm_encoder *encoder,
+			      struct drm_display_mode *mode)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
+
+	/* Funky macbooks */
+	if ((dev->pdev->device == 0x71C5) &&
+	    (dev->pdev->subsystem_vendor == 0x106b) &&
+	    (dev->pdev->subsystem_device == 0x0080)) {
+		if (radeon_encoder->devices & ATOM_DEVICE_LCD1_SUPPORT) {
+			uint32_t lvtma_bit_depth_control = RREG32(AVIVO_LVTMA_BIT_DEPTH_CONTROL);
+
+			lvtma_bit_depth_control &= ~AVIVO_LVTMA_BIT_DEPTH_CONTROL_TRUNCATE_EN;
+			lvtma_bit_depth_control &= ~AVIVO_LVTMA_BIT_DEPTH_CONTROL_SPATIAL_DITHER_EN;
+
+			WREG32(AVIVO_LVTMA_BIT_DEPTH_CONTROL, lvtma_bit_depth_control);
+		}
+	}
+
+	/* set scaler clears this on some chips */
+	if (ASIC_IS_AVIVO(rdev) &&
+	    (!(radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)))) {
+		if (ASIC_IS_DCE4(rdev)) {
+			if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+				WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset,
+				       EVERGREEN_INTERLEAVE_EN);
+			else
+				WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset, 0);
+		} else {
+			if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+				WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset,
+				       AVIVO_D1MODE_INTERLEAVE_EN);
+			else
+				WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset, 0);
+		}
+	}
+}
+
+static int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct drm_encoder *test_encoder;
+	struct radeon_encoder_atom_dig *dig;
+	uint32_t dig_enc_in_use = 0;
+
+	/* DCE4/5 */
+	if (ASIC_IS_DCE4(rdev)) {
+		dig = radeon_encoder->enc_priv;
+		if (ASIC_IS_DCE41(rdev)) {
+			/* ontario follows DCE4 */
+			if (rdev->family == CHIP_PALM) {
+				if (dig->linkb)
+					return 1;
+				else
+					return 0;
+			} else
+				/* llano follows DCE3.2 */
+				return radeon_crtc->crtc_id;
+		} else {
+			switch (radeon_encoder->encoder_id) {
+			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+				if (dig->linkb)
+					return 1;
+				else
+					return 0;
+				break;
+			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+				if (dig->linkb)
+					return 3;
+				else
+					return 2;
+				break;
+			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+				if (dig->linkb)
+					return 5;
+				else
+					return 4;
+				break;
+			}
+		}
+	}
+
+	/* on DCE32 and encoder can driver any block so just crtc id */
+	if (ASIC_IS_DCE32(rdev)) {
+		return radeon_crtc->crtc_id;
+	}
+
+	/* on DCE3 - LVTMA can only be driven by DIGB */
+	list_for_each_entry(test_encoder, &dev->mode_config.encoder_list, head) {
+		struct radeon_encoder *radeon_test_encoder;
+
+		if (encoder == test_encoder)
+			continue;
+
+		if (!radeon_encoder_is_digital(test_encoder))
+			continue;
+
+		radeon_test_encoder = to_radeon_encoder(test_encoder);
+		dig = radeon_test_encoder->enc_priv;
+
+		if (dig->dig_encoder >= 0)
+			dig_enc_in_use |= (1 << dig->dig_encoder);
+	}
+
+	if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA) {
+		if (dig_enc_in_use & 0x2)
+			DRM_ERROR("LVDS required digital encoder 2 but it was in use - stealing\n");
+		return 1;
+	}
+	if (!(dig_enc_in_use & 1))
+		return 0;
+	return 1;
+}
+
+/* This only needs to be called once at startup */
+void
+radeon_atom_encoder_init(struct radeon_device *rdev)
+{
+	struct drm_device *dev = rdev->ddev;
+	struct drm_encoder *encoder;
+
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+		struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+		struct drm_encoder *ext_encoder = radeon_get_external_encoder(encoder);
+
+		switch (radeon_encoder->encoder_id) {
+		case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+		case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+		case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+		case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+			atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_INIT, 0, 0);
+			break;
+		default:
+			break;
+		}
+
+		if (ext_encoder && ASIC_IS_DCE41(rdev))
+			atombios_external_encoder_setup(encoder, ext_encoder,
+							EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT);
+	}
+}
+
+static void
+radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
+			     struct drm_display_mode *mode,
+			     struct drm_display_mode *adjusted_mode)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct drm_encoder *ext_encoder = radeon_get_external_encoder(encoder);
+
+	radeon_encoder->pixel_clock = adjusted_mode->clock;
+
+	if (ASIC_IS_AVIVO(rdev) && !ASIC_IS_DCE4(rdev)) {
+		if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT | ATOM_DEVICE_TV_SUPPORT))
+			atombios_yuv_setup(encoder, true);
+		else
+			atombios_yuv_setup(encoder, false);
+	}
+
+	switch (radeon_encoder->encoder_id) {
+	case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+	case ENCODER_OBJECT_ID_INTERNAL_LVDS:
+	case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+		atombios_digital_setup(encoder, PANEL_ENCODER_ACTION_ENABLE);
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+		if (ASIC_IS_DCE4(rdev)) {
+			/* disable the transmitter */
+			atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
+			/* setup and enable the encoder */
+			atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP, 0);
+
+			/* enable the transmitter */
+			atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
+		} else {
+			/* disable the encoder and transmitter */
+			atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
+			atombios_dig_encoder_setup(encoder, ATOM_DISABLE, 0);
+
+			/* setup and enable the encoder and transmitter */
+			atombios_dig_encoder_setup(encoder, ATOM_ENABLE, 0);
+			atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0);
+			atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
+		}
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_DDI:
+	case ENCODER_OBJECT_ID_INTERNAL_DVO1:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+		atombios_dvo_setup(encoder, ATOM_ENABLE);
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_DAC1:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
+	case ENCODER_OBJECT_ID_INTERNAL_DAC2:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
+		atombios_dac_setup(encoder, ATOM_ENABLE);
+		if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) {
+			if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))
+				atombios_tv_setup(encoder, ATOM_ENABLE);
+			else
+				atombios_tv_setup(encoder, ATOM_DISABLE);
+		}
+		break;
+	}
+
+	if (ext_encoder) {
+		if (ASIC_IS_DCE41(rdev))
+			atombios_external_encoder_setup(encoder, ext_encoder,
+							EXTERNAL_ENCODER_ACTION_V3_ENCODER_SETUP);
+		else
+			atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE);
+	}
+
+	atombios_apply_encoder_quirks(encoder, adjusted_mode);
+
+	if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) {
+		r600_hdmi_enable(encoder);
+		r600_hdmi_setmode(encoder, adjusted_mode);
+	}
+}
+
+static bool
+atombios_dac_load_detect(struct drm_encoder *encoder, struct drm_connector *connector)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+
+	if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT |
+				       ATOM_DEVICE_CV_SUPPORT |
+				       ATOM_DEVICE_CRT_SUPPORT)) {
+		DAC_LOAD_DETECTION_PS_ALLOCATION args;
+		int index = GetIndexIntoMasterTable(COMMAND, DAC_LoadDetection);
+		uint8_t frev, crev;
+
+		memset(&args, 0, sizeof(args));
+
+		if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
+			return false;
+
+		args.sDacload.ucMisc = 0;
+
+		if ((radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_DAC1) ||
+		    (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1))
+			args.sDacload.ucDacType = ATOM_DAC_A;
+		else
+			args.sDacload.ucDacType = ATOM_DAC_B;
+
+		if (radeon_connector->devices & ATOM_DEVICE_CRT1_SUPPORT)
+			args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_CRT1_SUPPORT);
+		else if (radeon_connector->devices & ATOM_DEVICE_CRT2_SUPPORT)
+			args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_CRT2_SUPPORT);
+		else if (radeon_connector->devices & ATOM_DEVICE_CV_SUPPORT) {
+			args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_CV_SUPPORT);
+			if (crev >= 3)
+				args.sDacload.ucMisc = DAC_LOAD_MISC_YPrPb;
+		} else if (radeon_connector->devices & ATOM_DEVICE_TV1_SUPPORT) {
+			args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_TV1_SUPPORT);
+			if (crev >= 3)
+				args.sDacload.ucMisc = DAC_LOAD_MISC_YPrPb;
+		}
+
+		atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+		return true;
+	} else
+		return false;
+}
+
+static enum drm_connector_status
+radeon_atom_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+	uint32_t bios_0_scratch;
+
+	if (!atombios_dac_load_detect(encoder, connector)) {
+		DRM_DEBUG_KMS("detect returned false \n");
+		return connector_status_unknown;
+	}
+
+	if (rdev->family >= CHIP_R600)
+		bios_0_scratch = RREG32(R600_BIOS_0_SCRATCH);
+	else
+		bios_0_scratch = RREG32(RADEON_BIOS_0_SCRATCH);
+
+	DRM_DEBUG_KMS("Bios 0 scratch %x %08x\n", bios_0_scratch, radeon_encoder->devices);
+	if (radeon_connector->devices & ATOM_DEVICE_CRT1_SUPPORT) {
+		if (bios_0_scratch & ATOM_S0_CRT1_MASK)
+			return connector_status_connected;
+	}
+	if (radeon_connector->devices & ATOM_DEVICE_CRT2_SUPPORT) {
+		if (bios_0_scratch & ATOM_S0_CRT2_MASK)
+			return connector_status_connected;
+	}
+	if (radeon_connector->devices & ATOM_DEVICE_CV_SUPPORT) {
+		if (bios_0_scratch & (ATOM_S0_CV_MASK|ATOM_S0_CV_MASK_A))
+			return connector_status_connected;
+	}
+	if (radeon_connector->devices & ATOM_DEVICE_TV1_SUPPORT) {
+		if (bios_0_scratch & (ATOM_S0_TV1_COMPOSITE | ATOM_S0_TV1_COMPOSITE_A))
+			return connector_status_connected; /* CTV */
+		else if (bios_0_scratch & (ATOM_S0_TV1_SVIDEO | ATOM_S0_TV1_SVIDEO_A))
+			return connector_status_connected; /* STV */
+	}
+	return connector_status_disconnected;
+}
+
+static enum drm_connector_status
+radeon_atom_dig_detect(struct drm_encoder *encoder, struct drm_connector *connector)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+	struct drm_encoder *ext_encoder = radeon_get_external_encoder(encoder);
+	u32 bios_0_scratch;
+
+	if (!ASIC_IS_DCE4(rdev))
+		return connector_status_unknown;
+
+	if (!ext_encoder)
+		return connector_status_unknown;
+
+	if ((radeon_connector->devices & ATOM_DEVICE_CRT_SUPPORT) == 0)
+		return connector_status_unknown;
+
+	/* load detect on the dp bridge */
+	atombios_external_encoder_setup(encoder, ext_encoder,
+					EXTERNAL_ENCODER_ACTION_V3_DACLOAD_DETECTION);
+
+	bios_0_scratch = RREG32(R600_BIOS_0_SCRATCH);
+
+	DRM_DEBUG_KMS("Bios 0 scratch %x %08x\n", bios_0_scratch, radeon_encoder->devices);
+	if (radeon_connector->devices & ATOM_DEVICE_CRT1_SUPPORT) {
+		if (bios_0_scratch & ATOM_S0_CRT1_MASK)
+			return connector_status_connected;
+	}
+	if (radeon_connector->devices & ATOM_DEVICE_CRT2_SUPPORT) {
+		if (bios_0_scratch & ATOM_S0_CRT2_MASK)
+			return connector_status_connected;
+	}
+	if (radeon_connector->devices & ATOM_DEVICE_CV_SUPPORT) {
+		if (bios_0_scratch & (ATOM_S0_CV_MASK|ATOM_S0_CV_MASK_A))
+			return connector_status_connected;
+	}
+	if (radeon_connector->devices & ATOM_DEVICE_TV1_SUPPORT) {
+		if (bios_0_scratch & (ATOM_S0_TV1_COMPOSITE | ATOM_S0_TV1_COMPOSITE_A))
+			return connector_status_connected; /* CTV */
+		else if (bios_0_scratch & (ATOM_S0_TV1_SVIDEO | ATOM_S0_TV1_SVIDEO_A))
+			return connector_status_connected; /* STV */
+	}
+	return connector_status_disconnected;
+}
+
+void
+radeon_atom_ext_encoder_setup_ddc(struct drm_encoder *encoder)
+{
+	struct drm_encoder *ext_encoder = radeon_get_external_encoder(encoder);
+
+	if (ext_encoder)
+		/* ddc_setup on the dp bridge */
+		atombios_external_encoder_setup(encoder, ext_encoder,
+						EXTERNAL_ENCODER_ACTION_V3_DDC_SETUP);
+
+}
+
+static void radeon_atom_encoder_prepare(struct drm_encoder *encoder)
+{
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+
+	if ((radeon_encoder->active_device &
+	     (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) ||
+	    (radeon_encoder_get_dp_bridge_encoder_id(encoder) !=
+	     ENCODER_OBJECT_ID_NONE)) {
+		struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+		if (dig)
+			dig->dig_encoder = radeon_atom_pick_dig_encoder(encoder);
+	}
+
+	radeon_atom_output_lock(encoder, true);
+	radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
+
+	if (connector) {
+		struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+
+		/* select the clock/data port if it uses a router */
+		if (radeon_connector->router.cd_valid)
+			radeon_router_select_cd_port(radeon_connector);
+
+		/* turn eDP panel on for mode set */
+		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
+			atombios_set_edp_panel_power(connector,
+						     ATOM_TRANSMITTER_ACTION_POWER_ON);
+	}
+
+	/* this is needed for the pll/ss setup to work correctly in some cases */
+	atombios_set_encoder_crtc_source(encoder);
+}
+
+static void radeon_atom_encoder_commit(struct drm_encoder *encoder)
+{
+	radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
+	radeon_atom_output_lock(encoder, false);
+}
+
+static void radeon_atom_encoder_disable(struct drm_encoder *encoder)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_encoder_atom_dig *dig;
+
+	/* check for pre-DCE3 cards with shared encoders;
+	 * can't really use the links individually, so don't disable
+	 * the encoder if it's in use by another connector
+	 */
+	if (!ASIC_IS_DCE3(rdev)) {
+		struct drm_encoder *other_encoder;
+		struct radeon_encoder *other_radeon_encoder;
+
+		list_for_each_entry(other_encoder, &dev->mode_config.encoder_list, head) {
+			other_radeon_encoder = to_radeon_encoder(other_encoder);
+			if ((radeon_encoder->encoder_id == other_radeon_encoder->encoder_id) &&
+			    drm_helper_encoder_in_use(other_encoder))
+				goto disable_done;
+		}
+	}
+
+	radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
+
+	switch (radeon_encoder->encoder_id) {
+	case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+	case ENCODER_OBJECT_ID_INTERNAL_LVDS:
+	case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+		atombios_digital_setup(encoder, PANEL_ENCODER_ACTION_DISABLE);
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+		if (ASIC_IS_DCE4(rdev))
+			/* disable the transmitter */
+			atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
+		else {
+			/* disable the encoder and transmitter */
+			atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
+			atombios_dig_encoder_setup(encoder, ATOM_DISABLE, 0);
+		}
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_DDI:
+	case ENCODER_OBJECT_ID_INTERNAL_DVO1:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+		atombios_dvo_setup(encoder, ATOM_DISABLE);
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_DAC1:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
+	case ENCODER_OBJECT_ID_INTERNAL_DAC2:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
+		atombios_dac_setup(encoder, ATOM_DISABLE);
+		if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))
+			atombios_tv_setup(encoder, ATOM_DISABLE);
+		break;
+	}
+
+disable_done:
+	if (radeon_encoder_is_digital(encoder)) {
+		if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
+			r600_hdmi_disable(encoder);
+		dig = radeon_encoder->enc_priv;
+		dig->dig_encoder = -1;
+	}
+	radeon_encoder->active_device = 0;
+}
+
+/* these are handled by the primary encoders */
+static void radeon_atom_ext_prepare(struct drm_encoder *encoder)
+{
+
+}
+
+static void radeon_atom_ext_commit(struct drm_encoder *encoder)
+{
+
+}
+
+static void
+radeon_atom_ext_mode_set(struct drm_encoder *encoder,
+			 struct drm_display_mode *mode,
+			 struct drm_display_mode *adjusted_mode)
+{
+
+}
+
+static void radeon_atom_ext_disable(struct drm_encoder *encoder)
+{
+
+}
+
+static void
+radeon_atom_ext_dpms(struct drm_encoder *encoder, int mode)
+{
+
+}
+
+static bool radeon_atom_ext_mode_fixup(struct drm_encoder *encoder,
+				       struct drm_display_mode *mode,
+				       struct drm_display_mode *adjusted_mode)
+{
+	return true;
+}
+
+static const struct drm_encoder_helper_funcs radeon_atom_ext_helper_funcs = {
+	.dpms = radeon_atom_ext_dpms,
+	.mode_fixup = radeon_atom_ext_mode_fixup,
+	.prepare = radeon_atom_ext_prepare,
+	.mode_set = radeon_atom_ext_mode_set,
+	.commit = radeon_atom_ext_commit,
+	.disable = radeon_atom_ext_disable,
+	/* no detect for TMDS/LVDS yet */
+};
+
+static const struct drm_encoder_helper_funcs radeon_atom_dig_helper_funcs = {
+	.dpms = radeon_atom_encoder_dpms,
+	.mode_fixup = radeon_atom_mode_fixup,
+	.prepare = radeon_atom_encoder_prepare,
+	.mode_set = radeon_atom_encoder_mode_set,
+	.commit = radeon_atom_encoder_commit,
+	.disable = radeon_atom_encoder_disable,
+	.detect = radeon_atom_dig_detect,
+};
+
+static const struct drm_encoder_helper_funcs radeon_atom_dac_helper_funcs = {
+	.dpms = radeon_atom_encoder_dpms,
+	.mode_fixup = radeon_atom_mode_fixup,
+	.prepare = radeon_atom_encoder_prepare,
+	.mode_set = radeon_atom_encoder_mode_set,
+	.commit = radeon_atom_encoder_commit,
+	.detect = radeon_atom_dac_detect,
+};
+
+void radeon_enc_destroy(struct drm_encoder *encoder)
+{
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	kfree(radeon_encoder->enc_priv);
+	drm_encoder_cleanup(encoder);
+	kfree(radeon_encoder);
+}
+
+static const struct drm_encoder_funcs radeon_atom_enc_funcs = {
+	.destroy = radeon_enc_destroy,
+};
+
+struct radeon_encoder_atom_dac *
+radeon_atombios_set_dac_info(struct radeon_encoder *radeon_encoder)
+{
+	struct drm_device *dev = radeon_encoder->base.dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder_atom_dac *dac = kzalloc(sizeof(struct radeon_encoder_atom_dac), GFP_KERNEL);
+
+	if (!dac)
+		return NULL;
+
+	dac->tv_std = radeon_atombios_get_tv_info(rdev);
+	return dac;
+}
+
+struct radeon_encoder_atom_dig *
+radeon_atombios_set_dig_info(struct radeon_encoder *radeon_encoder)
+{
+	int encoder_enum = (radeon_encoder->encoder_enum & ENUM_ID_MASK) >> ENUM_ID_SHIFT;
+	struct radeon_encoder_atom_dig *dig = kzalloc(sizeof(struct radeon_encoder_atom_dig), GFP_KERNEL);
+
+	if (!dig)
+		return NULL;
+
+	/* coherent mode by default */
+	dig->coherent_mode = true;
+	dig->dig_encoder = -1;
+
+	if (encoder_enum == 2)
+		dig->linkb = true;
+	else
+		dig->linkb = false;
+
+	return dig;
+}
+
+void
+radeon_add_atom_encoder(struct drm_device *dev,
+			uint32_t encoder_enum,
+			uint32_t supported_device,
+			u16 caps)
+{
+	struct radeon_device *rdev = dev->dev_private;
+	struct drm_encoder *encoder;
+	struct radeon_encoder *radeon_encoder;
+
+	/* see if we already added it */
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+		radeon_encoder = to_radeon_encoder(encoder);
+		if (radeon_encoder->encoder_enum == encoder_enum) {
+			radeon_encoder->devices |= supported_device;
+			return;
+		}
+
+	}
+
+	/* add a new one */
+	radeon_encoder = kzalloc(sizeof(struct radeon_encoder), GFP_KERNEL);
+	if (!radeon_encoder)
+		return;
+
+	encoder = &radeon_encoder->base;
+	switch (rdev->num_crtc) {
+	case 1:
+		encoder->possible_crtcs = 0x1;
+		break;
+	case 2:
+	default:
+		encoder->possible_crtcs = 0x3;
+		break;
+	case 4:
+		encoder->possible_crtcs = 0xf;
+		break;
+	case 6:
+		encoder->possible_crtcs = 0x3f;
+		break;
+	}
+
+	radeon_encoder->enc_priv = NULL;
+
+	radeon_encoder->encoder_enum = encoder_enum;
+	radeon_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
+	radeon_encoder->devices = supported_device;
+	radeon_encoder->rmx_type = RMX_OFF;
+	radeon_encoder->underscan_type = UNDERSCAN_OFF;
+	radeon_encoder->is_ext_encoder = false;
+	radeon_encoder->caps = caps;
+
+	switch (radeon_encoder->encoder_id) {
+	case ENCODER_OBJECT_ID_INTERNAL_LVDS:
+	case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+	case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+		if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
+			radeon_encoder->rmx_type = RMX_FULL;
+			drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_LVDS);
+			radeon_encoder->enc_priv = radeon_atombios_get_lvds_info(radeon_encoder);
+		} else {
+			drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TMDS);
+			radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder);
+		}
+		drm_encoder_helper_add(encoder, &radeon_atom_dig_helper_funcs);
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_DAC1:
+		drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_DAC);
+		radeon_encoder->enc_priv = radeon_atombios_set_dac_info(radeon_encoder);
+		drm_encoder_helper_add(encoder, &radeon_atom_dac_helper_funcs);
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_DAC2:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
+		drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TVDAC);
+		radeon_encoder->enc_priv = radeon_atombios_set_dac_info(radeon_encoder);
+		drm_encoder_helper_add(encoder, &radeon_atom_dac_helper_funcs);
+		break;
+	case ENCODER_OBJECT_ID_INTERNAL_DVO1:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
+	case ENCODER_OBJECT_ID_INTERNAL_DDI:
+	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+		if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
+			radeon_encoder->rmx_type = RMX_FULL;
+			drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_LVDS);
+			radeon_encoder->enc_priv = radeon_atombios_get_lvds_info(radeon_encoder);
+		} else if (radeon_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) {
+			drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_DAC);
+			radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder);
+		} else {
+			drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TMDS);
+			radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder);
+		}
+		drm_encoder_helper_add(encoder, &radeon_atom_dig_helper_funcs);
+		break;
+	case ENCODER_OBJECT_ID_SI170B:
+	case ENCODER_OBJECT_ID_CH7303:
+	case ENCODER_OBJECT_ID_EXTERNAL_SDVOA:
+	case ENCODER_OBJECT_ID_EXTERNAL_SDVOB:
+	case ENCODER_OBJECT_ID_TITFP513:
+	case ENCODER_OBJECT_ID_VT1623:
+	case ENCODER_OBJECT_ID_HDMI_SI1930:
+	case ENCODER_OBJECT_ID_TRAVIS:
+	case ENCODER_OBJECT_ID_NUTMEG:
+		/* these are handled by the primary encoders */
+		radeon_encoder->is_ext_encoder = true;
+		if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
+			drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_LVDS);
+		else if (radeon_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT))
+			drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_DAC);
+		else
+			drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TMDS);
+		drm_encoder_helper_add(encoder, &radeon_atom_ext_helper_funcs);
+		break;
+	}
+}
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index ed406e8..e4c384b 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -353,6 +353,7 @@
 		default:
 			break;
 		}
+		radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
 	}
 	if (rdev->irq.installed)
 		evergreen_irq_set(rdev);
@@ -893,7 +894,7 @@
 	u32 tmp;
 	int r;
 
-	if (rdev->gart.table.vram.robj == NULL) {
+	if (rdev->gart.robj == NULL) {
 		dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
 		return -EINVAL;
 	}
@@ -945,7 +946,6 @@
 void evergreen_pcie_gart_disable(struct radeon_device *rdev)
 {
 	u32 tmp;
-	int r;
 
 	/* Disable all tables */
 	WREG32(VM_CONTEXT0_CNTL, 0);
@@ -965,14 +965,7 @@
 	WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
 	WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
 	WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
-	if (rdev->gart.table.vram.robj) {
-		r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
-		if (likely(r == 0)) {
-			radeon_bo_kunmap(rdev->gart.table.vram.robj);
-			radeon_bo_unpin(rdev->gart.table.vram.robj);
-			radeon_bo_unreserve(rdev->gart.table.vram.robj);
-		}
-	}
+	radeon_gart_table_vram_unpin(rdev);
 }
 
 void evergreen_pcie_gart_fini(struct radeon_device *rdev)
@@ -3031,6 +3024,10 @@
 		}
 	}
 
+	r = r600_vram_scratch_init(rdev);
+	if (r)
+		return r;
+
 	evergreen_mc_program(rdev);
 	if (rdev->flags & RADEON_IS_AGP) {
 		evergreen_agp_enable(rdev);
@@ -3235,6 +3232,7 @@
 	radeon_ib_pool_fini(rdev);
 	radeon_irq_kms_fini(rdev);
 	evergreen_pcie_gart_fini(rdev);
+	r600_vram_scratch_fini(rdev);
 	radeon_gem_fini(rdev);
 	radeon_fence_driver_fini(rdev);
 	radeon_agp_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/evergreen_blit_kms.c b/drivers/gpu/drm/radeon/evergreen_blit_kms.c
index dcf11bb..914e5af 100644
--- a/drivers/gpu/drm/radeon/evergreen_blit_kms.c
+++ b/drivers/gpu/drm/radeon/evergreen_blit_kms.c
@@ -94,6 +94,15 @@
 	else
 		cp_coher_size = ((size + 255) >> 8);
 
+	if (rdev->family >= CHIP_CAYMAN) {
+		/* CP_COHER_CNTL2 has to be set manually when submitting a surface_sync
+		 * to the RB directly. For IBs, the CP programs this as part of the
+		 * surface_sync packet.
+		 */
+		radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+		radeon_ring_write(rdev, (0x85e8 - PACKET3_SET_CONFIG_REG_START) >> 2);
+		radeon_ring_write(rdev, 0); /* CP_COHER_CNTL2 */
+	}
 	radeon_ring_write(rdev, PACKET3(PACKET3_SURFACE_SYNC, 3));
 	radeon_ring_write(rdev, sync_type);
 	radeon_ring_write(rdev, cp_coher_size);
@@ -174,7 +183,7 @@
 static void
 set_tex_resource(struct radeon_device *rdev,
 		 int format, int w, int h, int pitch,
-		 u64 gpu_addr)
+		 u64 gpu_addr, u32 size)
 {
 	u32 sq_tex_resource_word0, sq_tex_resource_word1;
 	u32 sq_tex_resource_word4, sq_tex_resource_word7;
@@ -196,6 +205,9 @@
 	sq_tex_resource_word7 = format |
 		S__SQ_CONSTANT_TYPE(SQ_TEX_VTX_VALID_TEXTURE);
 
+	cp_set_surface_sync(rdev,
+			    PACKET3_TC_ACTION_ENA, size, gpu_addr);
+
 	radeon_ring_write(rdev, PACKET3(PACKET3_SET_RESOURCE, 8));
 	radeon_ring_write(rdev, 0);
 	radeon_ring_write(rdev, sq_tex_resource_word0);
@@ -613,11 +625,13 @@
 	rdev->r600_blit.primitives.set_default_state = set_default_state;
 
 	rdev->r600_blit.ring_size_common = 55; /* shaders + def state */
-	rdev->r600_blit.ring_size_common += 10; /* fence emit for VB IB */
+	rdev->r600_blit.ring_size_common += 16; /* fence emit for VB IB */
 	rdev->r600_blit.ring_size_common += 5; /* done copy */
-	rdev->r600_blit.ring_size_common += 10; /* fence emit for done copy */
+	rdev->r600_blit.ring_size_common += 16; /* fence emit for done copy */
 
 	rdev->r600_blit.ring_size_per_loop = 74;
+	if (rdev->family >= CHIP_CAYMAN)
+		rdev->r600_blit.ring_size_per_loop += 9; /* additional DWs for surface sync */
 
 	rdev->r600_blit.max_dim = 16384;
 
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index fdb93f88..0e57998 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -262,8 +262,11 @@
 		WREG32(MC_SEQ_SUP_CNTL, 0x00000001);
 
 		/* wait for training to complete */
-		while (!(RREG32(MC_IO_PAD_CNTL_D0) & MEM_FALL_OUT_CMD))
-			udelay(10);
+		for (i = 0; i < rdev->usec_timeout; i++) {
+			if (RREG32(MC_IO_PAD_CNTL_D0) & MEM_FALL_OUT_CMD)
+				break;
+			udelay(1);
+		}
 
 		if (running)
 			WREG32(MC_SHARED_BLACKOUT_CNTL, blackout);
@@ -933,7 +936,7 @@
 {
 	int r;
 
-	if (rdev->gart.table.vram.robj == NULL) {
+	if (rdev->gart.robj == NULL) {
 		dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
 		return -EINVAL;
 	}
@@ -978,8 +981,6 @@
 
 void cayman_pcie_gart_disable(struct radeon_device *rdev)
 {
-	int r;
-
 	/* Disable all tables */
 	WREG32(VM_CONTEXT0_CNTL, 0);
 	WREG32(VM_CONTEXT1_CNTL, 0);
@@ -995,14 +996,7 @@
 	WREG32(VM_L2_CNTL2, 0);
 	WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
 	       L2_CACHE_BIGK_FRAGMENT_SIZE(6));
-	if (rdev->gart.table.vram.robj) {
-		r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
-		if (likely(r == 0)) {
-			radeon_bo_kunmap(rdev->gart.table.vram.robj);
-			radeon_bo_unpin(rdev->gart.table.vram.robj);
-			radeon_bo_unreserve(rdev->gart.table.vram.robj);
-		}
-	}
+	radeon_gart_table_vram_unpin(rdev);
 }
 
 void cayman_pcie_gart_fini(struct radeon_device *rdev)
@@ -1362,6 +1356,10 @@
 		return r;
 	}
 
+	r = r600_vram_scratch_init(rdev);
+	if (r)
+		return r;
+
 	evergreen_mc_program(rdev);
 	r = cayman_pcie_gart_enable(rdev);
 	if (r)
@@ -1557,6 +1555,7 @@
 	radeon_ib_pool_fini(rdev);
 	radeon_irq_kms_fini(rdev);
 	cayman_pcie_gart_fini(rdev);
+	r600_vram_scratch_fini(rdev);
 	radeon_gem_fini(rdev);
 	radeon_fence_driver_fini(rdev);
 	radeon_bo_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index cbf49f4..ad158ea 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -537,6 +537,7 @@
 		default:
 			break;
 		}
+		radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
 	}
 	if (rdev->irq.installed)
 		r100_irq_set(rdev);
@@ -577,7 +578,7 @@
 {
 	int r;
 
-	if (rdev->gart.table.ram.ptr) {
+	if (rdev->gart.ptr) {
 		WARN(1, "R100 PCI GART already initialized\n");
 		return 0;
 	}
@@ -636,10 +637,12 @@
 
 int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
 {
+	u32 *gtt = rdev->gart.ptr;
+
 	if (i < 0 || i > rdev->gart.num_gpu_pages) {
 		return -EINVAL;
 	}
-	rdev->gart.table.ram.ptr[i] = cpu_to_le32(lower_32_bits(addr));
+	gtt[i] = cpu_to_le32(lower_32_bits(addr));
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 33f2b68..400b26d 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -74,7 +74,7 @@
 
 int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
 {
-	void __iomem *ptr = (void *)rdev->gart.table.vram.ptr;
+	void __iomem *ptr = rdev->gart.ptr;
 
 	if (i < 0 || i > rdev->gart.num_gpu_pages) {
 		return -EINVAL;
@@ -93,7 +93,7 @@
 {
 	int r;
 
-	if (rdev->gart.table.vram.robj) {
+	if (rdev->gart.robj) {
 		WARN(1, "RV370 PCIE GART already initialized\n");
 		return 0;
 	}
@@ -116,7 +116,7 @@
 	uint32_t tmp;
 	int r;
 
-	if (rdev->gart.table.vram.robj == NULL) {
+	if (rdev->gart.robj == NULL) {
 		dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
 		return -EINVAL;
 	}
@@ -154,7 +154,6 @@
 void rv370_pcie_gart_disable(struct radeon_device *rdev)
 {
 	u32 tmp;
-	int r;
 
 	WREG32_PCIE(RADEON_PCIE_TX_GART_START_LO, 0);
 	WREG32_PCIE(RADEON_PCIE_TX_GART_END_LO, 0);
@@ -163,14 +162,7 @@
 	tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
 	tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
 	WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp & ~RADEON_PCIE_TX_GART_EN);
-	if (rdev->gart.table.vram.robj) {
-		r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
-		if (likely(r == 0)) {
-			radeon_bo_kunmap(rdev->gart.table.vram.robj);
-			radeon_bo_unpin(rdev->gart.table.vram.robj);
-			radeon_bo_unreserve(rdev->gart.table.vram.robj);
-		}
-	}
+	radeon_gart_table_vram_unpin(rdev);
 }
 
 void rv370_pcie_gart_fini(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 4e777c1..19afc43 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -763,13 +763,14 @@
 	struct drm_device *dev = rdev->ddev;
 	struct drm_connector *connector;
 
-	if (ASIC_IS_DCE3(rdev)) {
-		u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) | DC_HPDx_RX_INT_TIMER(0xfa);
-		if (ASIC_IS_DCE32(rdev))
-			tmp |= DC_HPDx_EN;
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		struct radeon_connector *radeon_connector = to_radeon_connector(connector);
 
-		list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-			struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+		if (ASIC_IS_DCE3(rdev)) {
+			u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) | DC_HPDx_RX_INT_TIMER(0xfa);
+			if (ASIC_IS_DCE32(rdev))
+				tmp |= DC_HPDx_EN;
+
 			switch (radeon_connector->hpd.hpd) {
 			case RADEON_HPD_1:
 				WREG32(DC_HPD1_CONTROL, tmp);
@@ -799,10 +800,7 @@
 			default:
 				break;
 			}
-		}
-	} else {
-		list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-			struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+		} else {
 			switch (radeon_connector->hpd.hpd) {
 			case RADEON_HPD_1:
 				WREG32(DC_HOT_PLUG_DETECT1_CONTROL, DC_HOT_PLUG_DETECTx_EN);
@@ -820,6 +818,7 @@
 				break;
 			}
 		}
+		radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
 	}
 	if (rdev->irq.installed)
 		r600_irq_set(rdev);
@@ -897,7 +896,7 @@
 	/* flush hdp cache so updates hit vram */
 	if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) &&
 	    !(rdev->flags & RADEON_IS_AGP)) {
-		void __iomem *ptr = (void *)rdev->gart.table.vram.ptr;
+		void __iomem *ptr = (void *)rdev->gart.ptr;
 		u32 tmp;
 
 		/* r7xx hw bug.  write to HDP_DEBUG1 followed by fb read
@@ -932,7 +931,7 @@
 {
 	int r;
 
-	if (rdev->gart.table.vram.robj) {
+	if (rdev->gart.robj) {
 		WARN(1, "R600 PCIE GART already initialized\n");
 		return 0;
 	}
@@ -949,7 +948,7 @@
 	u32 tmp;
 	int r, i;
 
-	if (rdev->gart.table.vram.robj == NULL) {
+	if (rdev->gart.robj == NULL) {
 		dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
 		return -EINVAL;
 	}
@@ -1004,7 +1003,7 @@
 void r600_pcie_gart_disable(struct radeon_device *rdev)
 {
 	u32 tmp;
-	int i, r;
+	int i;
 
 	/* Disable all tables */
 	for (i = 0; i < 7; i++)
@@ -1031,14 +1030,7 @@
 	WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
 	WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp);
 	WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
-	if (rdev->gart.table.vram.robj) {
-		r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
-		if (likely(r == 0)) {
-			radeon_bo_kunmap(rdev->gart.table.vram.robj);
-			radeon_bo_unpin(rdev->gart.table.vram.robj);
-			radeon_bo_unreserve(rdev->gart.table.vram.robj);
-		}
-	}
+	radeon_gart_table_vram_unpin(rdev);
 }
 
 void r600_pcie_gart_fini(struct radeon_device *rdev)
@@ -1138,7 +1130,7 @@
 		WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start >> 12);
 		WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.vram_end >> 12);
 	}
-	WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0);
+	WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, rdev->vram_scratch.gpu_addr >> 12);
 	tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
 	tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
 	WREG32(MC_VM_FB_LOCATION, tmp);
@@ -1277,6 +1269,53 @@
 	return 0;
 }
 
+int r600_vram_scratch_init(struct radeon_device *rdev)
+{
+	int r;
+
+	if (rdev->vram_scratch.robj == NULL) {
+		r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE,
+				     PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
+				     &rdev->vram_scratch.robj);
+		if (r) {
+			return r;
+		}
+	}
+
+	r = radeon_bo_reserve(rdev->vram_scratch.robj, false);
+	if (unlikely(r != 0))
+		return r;
+	r = radeon_bo_pin(rdev->vram_scratch.robj,
+			  RADEON_GEM_DOMAIN_VRAM, &rdev->vram_scratch.gpu_addr);
+	if (r) {
+		radeon_bo_unreserve(rdev->vram_scratch.robj);
+		return r;
+	}
+	r = radeon_bo_kmap(rdev->vram_scratch.robj,
+				(void **)&rdev->vram_scratch.ptr);
+	if (r)
+		radeon_bo_unpin(rdev->vram_scratch.robj);
+	radeon_bo_unreserve(rdev->vram_scratch.robj);
+
+	return r;
+}
+
+void r600_vram_scratch_fini(struct radeon_device *rdev)
+{
+	int r;
+
+	if (rdev->vram_scratch.robj == NULL) {
+		return;
+	}
+	r = radeon_bo_reserve(rdev->vram_scratch.robj, false);
+	if (likely(r == 0)) {
+		radeon_bo_kunmap(rdev->vram_scratch.robj);
+		radeon_bo_unpin(rdev->vram_scratch.robj);
+		radeon_bo_unreserve(rdev->vram_scratch.robj);
+	}
+	radeon_bo_unref(&rdev->vram_scratch.robj);
+}
+
 /* We doesn't check that the GPU really needs a reset we simply do the
  * reset, it's up to the caller to determine if the GPU needs one. We
  * might add an helper function to check that.
@@ -2332,6 +2371,14 @@
 	if (rdev->wb.use_event) {
 		u64 addr = rdev->wb.gpu_addr + R600_WB_EVENT_OFFSET +
 			(u64)(rdev->fence_drv.scratch_reg - rdev->scratch.reg_base);
+		/* flush read cache over gart */
+		radeon_ring_write(rdev, PACKET3(PACKET3_SURFACE_SYNC, 3));
+		radeon_ring_write(rdev, PACKET3_TC_ACTION_ENA |
+					PACKET3_VC_ACTION_ENA |
+					PACKET3_SH_ACTION_ENA);
+		radeon_ring_write(rdev, 0xFFFFFFFF);
+		radeon_ring_write(rdev, 0);
+		radeon_ring_write(rdev, 10); /* poll interval */
 		/* EVENT_WRITE_EOP - flush caches, send int */
 		radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
 		radeon_ring_write(rdev, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5));
@@ -2340,6 +2387,14 @@
 		radeon_ring_write(rdev, fence->seq);
 		radeon_ring_write(rdev, 0);
 	} else {
+		/* flush read cache over gart */
+		radeon_ring_write(rdev, PACKET3(PACKET3_SURFACE_SYNC, 3));
+		radeon_ring_write(rdev, PACKET3_TC_ACTION_ENA |
+					PACKET3_VC_ACTION_ENA |
+					PACKET3_SH_ACTION_ENA);
+		radeon_ring_write(rdev, 0xFFFFFFFF);
+		radeon_ring_write(rdev, 0);
+		radeon_ring_write(rdev, 10); /* poll interval */
 		radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE, 0));
 		radeon_ring_write(rdev, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT) | EVENT_INDEX(0));
 		/* wait for 3D idle clean */
@@ -2421,6 +2476,10 @@
 		}
 	}
 
+	r = r600_vram_scratch_init(rdev);
+	if (r)
+		return r;
+
 	r600_mc_program(rdev);
 	if (rdev->flags & RADEON_IS_AGP) {
 		r600_agp_enable(rdev);
@@ -2641,6 +2700,7 @@
 	radeon_ib_pool_fini(rdev);
 	radeon_irq_kms_fini(rdev);
 	r600_pcie_gart_fini(rdev);
+	r600_vram_scratch_fini(rdev);
 	radeon_agp_fini(rdev);
 	radeon_gem_fini(rdev);
 	radeon_fence_driver_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c
index c4cf130..e09d281 100644
--- a/drivers/gpu/drm/radeon/r600_blit_kms.c
+++ b/drivers/gpu/drm/radeon/r600_blit_kms.c
@@ -201,7 +201,7 @@
 static void
 set_tex_resource(struct radeon_device *rdev,
 		 int format, int w, int h, int pitch,
-		 u64 gpu_addr)
+		 u64 gpu_addr, u32 size)
 {
 	uint32_t sq_tex_resource_word0, sq_tex_resource_word1, sq_tex_resource_word4;
 
@@ -222,6 +222,9 @@
 		S_038010_DST_SEL_Z(SQ_SEL_Z) |
 		S_038010_DST_SEL_W(SQ_SEL_W);
 
+	cp_set_surface_sync(rdev,
+			    PACKET3_TC_ACTION_ENA, size, gpu_addr);
+
 	radeon_ring_write(rdev, PACKET3(PACKET3_SET_RESOURCE, 7));
 	radeon_ring_write(rdev, 0);
 	radeon_ring_write(rdev, sq_tex_resource_word0);
@@ -500,9 +503,9 @@
 	rdev->r600_blit.primitives.set_default_state = set_default_state;
 
 	rdev->r600_blit.ring_size_common = 40; /* shaders + def state */
-	rdev->r600_blit.ring_size_common += 10; /* fence emit for VB IB */
+	rdev->r600_blit.ring_size_common += 16; /* fence emit for VB IB */
 	rdev->r600_blit.ring_size_common += 5; /* done copy */
-	rdev->r600_blit.ring_size_common += 10; /* fence emit for done copy */
+	rdev->r600_blit.ring_size_common += 16; /* fence emit for done copy */
 
 	rdev->r600_blit.ring_size_per_loop = 76;
 	/* set_render_target emits 2 extra dwords on rv6xx */
@@ -760,10 +763,7 @@
 		vb[11] = i2f(h);
 
 		rdev->r600_blit.primitives.set_tex_resource(rdev, FMT_8_8_8_8,
-							    w, h, w, src_gpu_addr);
-		rdev->r600_blit.primitives.cp_set_surface_sync(rdev,
-							       PACKET3_TC_ACTION_ENA,
-							       size_in_bytes, src_gpu_addr);
+							    w, h, w, src_gpu_addr, size_in_bytes);
 		rdev->r600_blit.primitives.set_render_target(rdev, COLOR_8_8_8_8,
 							     w, h, dst_gpu_addr);
 		rdev->r600_blit.primitives.set_scissors(rdev, 0, 0, w, h);
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index e3170c7..b316b30 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -93,6 +93,7 @@
 extern int radeon_disp_priority;
 extern int radeon_hw_i2c;
 extern int radeon_pcie_gen2;
+extern int radeon_msi;
 
 /*
  * Copy from radeon_drv.h so we don't have to include both and have conflicting
@@ -306,30 +307,17 @@
  */
 struct radeon_mc;
 
-struct radeon_gart_table_ram {
-	volatile uint32_t		*ptr;
-};
-
-struct radeon_gart_table_vram {
-	struct radeon_bo		*robj;
-	volatile uint32_t		*ptr;
-};
-
-union radeon_gart_table {
-	struct radeon_gart_table_ram	ram;
-	struct radeon_gart_table_vram	vram;
-};
-
 #define RADEON_GPU_PAGE_SIZE 4096
 #define RADEON_GPU_PAGE_MASK (RADEON_GPU_PAGE_SIZE - 1)
 #define RADEON_GPU_PAGE_SHIFT 12
 
 struct radeon_gart {
 	dma_addr_t			table_addr;
+	struct radeon_bo		*robj;
+	void				*ptr;
 	unsigned			num_gpu_pages;
 	unsigned			num_cpu_pages;
 	unsigned			table_size;
-	union radeon_gart_table		table;
 	struct page			**pages;
 	dma_addr_t			*pages_addr;
 	bool				*ttm_alloced;
@@ -340,6 +328,8 @@
 void radeon_gart_table_ram_free(struct radeon_device *rdev);
 int radeon_gart_table_vram_alloc(struct radeon_device *rdev);
 void radeon_gart_table_vram_free(struct radeon_device *rdev);
+int radeon_gart_table_vram_pin(struct radeon_device *rdev);
+void radeon_gart_table_vram_unpin(struct radeon_device *rdev);
 int radeon_gart_init(struct radeon_device *rdev);
 void radeon_gart_fini(struct radeon_device *rdev);
 void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
@@ -347,6 +337,7 @@
 int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
 		     int pages, struct page **pagelist,
 		     dma_addr_t *dma_addr);
+void radeon_gart_restore(struct radeon_device *rdev);
 
 
 /*
@@ -437,25 +428,26 @@
 	struct evergreen_irq_stat_regs evergreen;
 };
 
+#define RADEON_MAX_HPD_PINS 6
+#define RADEON_MAX_CRTCS 6
+#define RADEON_MAX_HDMI_BLOCKS 2
+
 struct radeon_irq {
 	bool		installed;
 	bool		sw_int;
-	/* FIXME: use a define max crtc rather than hardcode it */
-	bool		crtc_vblank_int[6];
-	bool		pflip[6];
+	bool		crtc_vblank_int[RADEON_MAX_CRTCS];
+	bool		pflip[RADEON_MAX_CRTCS];
 	wait_queue_head_t	vblank_queue;
-	/* FIXME: use defines for max hpd/dacs */
-	bool            hpd[6];
+	bool            hpd[RADEON_MAX_HPD_PINS];
 	bool            gui_idle;
 	bool            gui_idle_acked;
 	wait_queue_head_t	idle_queue;
-	/* FIXME: use defines for max HDMI blocks */
-	bool		hdmi[2];
+	bool		hdmi[RADEON_MAX_HDMI_BLOCKS];
 	spinlock_t sw_lock;
 	int sw_refcount;
 	union radeon_irq_stat_regs stat_regs;
-	spinlock_t pflip_lock[6];
-	int pflip_refcount[6];
+	spinlock_t pflip_lock[RADEON_MAX_CRTCS];
+	int pflip_refcount[RADEON_MAX_CRTCS];
 };
 
 int radeon_irq_kms_init(struct radeon_device *rdev);
@@ -533,7 +525,7 @@
 	void (*set_vtx_resource)(struct radeon_device *rdev, u64 gpu_addr);
 	void (*set_tex_resource)(struct radeon_device *rdev,
 				 int format, int w, int h, int pitch,
-				 u64 gpu_addr);
+				 u64 gpu_addr, u32 size);
 	void (*set_scissors)(struct radeon_device *rdev, int x1, int y1,
 			     int x2, int y2);
 	void (*draw_auto)(struct radeon_device *rdev);
@@ -1143,10 +1135,11 @@
 int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
 				struct drm_file *filp);
 
-/* VRAM scratch page for HDP bug */
-struct r700_vram_scratch {
+/* VRAM scratch page for HDP bug, default vram page */
+struct r600_vram_scratch {
 	struct radeon_bo		*robj;
 	volatile uint32_t		*ptr;
+	u64				gpu_addr;
 };
 
 /*
@@ -1218,7 +1211,7 @@
 	const struct firmware *rlc_fw;	/* r6/700 RLC firmware */
 	const struct firmware *mc_fw;	/* NI MC firmware */
 	struct r600_blit r600_blit;
-	struct r700_vram_scratch vram_scratch;
+	struct r600_vram_scratch vram_scratch;
 	int msi_enabled; /* msi enabled */
 	struct r600_ih ih; /* r6/700 interrupt ring */
 	struct work_struct hotplug_work;
@@ -1442,8 +1435,6 @@
 /* AGP */
 extern int radeon_gpu_reset(struct radeon_device *rdev);
 extern void radeon_agp_disable(struct radeon_device *rdev);
-extern int radeon_gart_table_vram_pin(struct radeon_device *rdev);
-extern void radeon_gart_restore(struct radeon_device *rdev);
 extern int radeon_modeset_init(struct radeon_device *rdev);
 extern void radeon_modeset_fini(struct radeon_device *rdev);
 extern bool radeon_card_posted(struct radeon_device *rdev);
@@ -1467,6 +1458,12 @@
 extern void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size);
 
 /*
+ * R600 vram scratch functions
+ */
+int r600_vram_scratch_init(struct radeon_device *rdev);
+void r600_vram_scratch_fini(struct radeon_device *rdev);
+
+/*
  * r600 functions used by radeon_encoder.c
  */
 extern void r600_hdmi_enable(struct drm_encoder *encoder);
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index dec6cbe..e7cb3ab 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -44,8 +44,6 @@
 radeon_legacy_backlight_init(struct radeon_encoder *radeon_encoder,
 			     struct drm_connector *drm_connector);
 
-bool radeon_connector_encoder_is_dp_bridge(struct drm_connector *connector);
-
 void radeon_connector_hotplug(struct drm_connector *connector)
 {
 	struct drm_device *dev = connector->dev;
@@ -432,55 +430,6 @@
 	return 0;
 }
 
-/*
- * Some integrated ATI Radeon chipset implementations (e. g.
- * Asus M2A-VM HDMI) may indicate the availability of a DDC,
- * even when there's no monitor connected. For these connectors
- * following DDC probe extension will be applied: check also for the
- * availability of EDID with at least a correct EDID header. Only then,
- * DDC is assumed to be available. This prevents drm_get_edid() and
- * drm_edid_block_valid() from periodically dumping data and kernel
- * errors into the logs and onto the terminal.
- */
-static bool radeon_connector_needs_extended_probe(struct radeon_device *dev,
-				     uint32_t supported_device,
-				     int connector_type)
-{
-	/* Asus M2A-VM HDMI board sends data to i2c bus even,
-	 * if HDMI add-on card is not plugged in or HDMI is disabled in
-	 * BIOS. Valid DDC can only be assumed, if also a valid EDID header
-	 * can be retrieved via i2c bus during DDC probe */
-	if ((dev->pdev->device == 0x791e) &&
-	    (dev->pdev->subsystem_vendor == 0x1043) &&
-	    (dev->pdev->subsystem_device == 0x826d)) {
-		if ((connector_type == DRM_MODE_CONNECTOR_HDMIA) &&
-		    (supported_device == ATOM_DEVICE_DFP2_SUPPORT))
-			return true;
-	}
-	/* ECS A740GM-M with ATI RADEON 2100 sends data to i2c bus
-	 * for a DVI connector that is not implemented */
-	if ((dev->pdev->device == 0x796e) &&
-	    (dev->pdev->subsystem_vendor == 0x1019) &&
-	    (dev->pdev->subsystem_device == 0x2615)) {
-		if ((connector_type == DRM_MODE_CONNECTOR_DVID) &&
-		    (supported_device == ATOM_DEVICE_DFP2_SUPPORT))
-			return true;
-	}
-	/* TOSHIBA Satellite L300D with ATI Mobility Radeon x1100
-	 * (RS690M) sends data to i2c bus for a HDMI connector that
-	 * is not implemented */
-	if ((dev->pdev->device == 0x791f) &&
-	    (dev->pdev->subsystem_vendor == 0x1179) &&
-	    (dev->pdev->subsystem_device == 0xff68)) {
-		if ((connector_type == DRM_MODE_CONNECTOR_HDMIA) &&
-		    (supported_device == ATOM_DEVICE_DFP2_SUPPORT))
-			return true;
-	}
-
-	/* Default: no EDID header probe required for DDC probing */
-	return false;
-}
-
 static void radeon_fixup_lvds_native_mode(struct drm_encoder *encoder,
 					  struct drm_connector *connector)
 {
@@ -721,8 +670,7 @@
 		ret = connector_status_disconnected;
 
 	if (radeon_connector->ddc_bus)
-		dret = radeon_ddc_probe(radeon_connector,
-					radeon_connector->requires_extended_probe);
+		dret = radeon_ddc_probe(radeon_connector);
 	if (dret) {
 		radeon_connector->detected_by_load = false;
 		if (radeon_connector->edid) {
@@ -764,7 +712,7 @@
 		if (radeon_connector->dac_load_detect && encoder) {
 			encoder_funcs = encoder->helper_private;
 			ret = encoder_funcs->detect(encoder, connector);
-			if (ret == connector_status_connected)
+			if (ret != connector_status_disconnected)
 				radeon_connector->detected_by_load = true;
 		}
 	}
@@ -904,8 +852,7 @@
 	bool dret = false;
 
 	if (radeon_connector->ddc_bus)
-		dret = radeon_ddc_probe(radeon_connector,
-					radeon_connector->requires_extended_probe);
+		dret = radeon_ddc_probe(radeon_connector);
 	if (dret) {
 		radeon_connector->detected_by_load = false;
 		if (radeon_connector->edid) {
@@ -1005,8 +952,9 @@
 					ret = encoder_funcs->detect(encoder, connector);
 					if (ret == connector_status_connected) {
 						radeon_connector->use_digital = false;
-						radeon_connector->detected_by_load = true;
 					}
+					if (ret != connector_status_disconnected)
+						radeon_connector->detected_by_load = true;
 				}
 				break;
 			}
@@ -1203,7 +1151,8 @@
 		}
 	} else {
 		/* need to setup ddc on the bridge */
-		if (radeon_connector_encoder_is_dp_bridge(connector)) {
+		if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) !=
+			ENCODER_OBJECT_ID_NONE) {
 			if (encoder)
 				radeon_atom_ext_encoder_setup_ddc(encoder);
 		}
@@ -1213,13 +1162,12 @@
 	return ret;
 }
 
-bool radeon_connector_encoder_is_dp_bridge(struct drm_connector *connector)
+u16 radeon_connector_encoder_get_dp_bridge_encoder_id(struct drm_connector *connector)
 {
 	struct drm_mode_object *obj;
 	struct drm_encoder *encoder;
 	struct radeon_encoder *radeon_encoder;
 	int i;
-	bool found = false;
 
 	for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
 		if (connector->encoder_ids[i] == 0)
@@ -1235,14 +1183,13 @@
 		switch (radeon_encoder->encoder_id) {
 		case ENCODER_OBJECT_ID_TRAVIS:
 		case ENCODER_OBJECT_ID_NUTMEG:
-			found = true;
-			break;
+			return radeon_encoder->encoder_id;
 		default:
 			break;
 		}
 	}
 
-	return found;
+	return ENCODER_OBJECT_ID_NONE;
 }
 
 bool radeon_connector_encoder_is_hbr2(struct drm_connector *connector)
@@ -1319,7 +1266,8 @@
 		if (!radeon_dig_connector->edp_on)
 			atombios_set_edp_panel_power(connector,
 						     ATOM_TRANSMITTER_ACTION_POWER_OFF);
-	} else if (radeon_connector_encoder_is_dp_bridge(connector)) {
+	} else if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) !=
+		   ENCODER_OBJECT_ID_NONE) {
 		/* DP bridges are always DP */
 		radeon_dig_connector->dp_sink_type = CONNECTOR_OBJECT_ID_DISPLAYPORT;
 		/* get the DPCD from the bridge */
@@ -1328,8 +1276,7 @@
 		if (encoder) {
 			/* setup ddc on the bridge */
 			radeon_atom_ext_encoder_setup_ddc(encoder);
-			if (radeon_ddc_probe(radeon_connector,
-					     radeon_connector->requires_extended_probe)) /* try DDC */
+			if (radeon_ddc_probe(radeon_connector)) /* try DDC */
 				ret = connector_status_connected;
 			else if (radeon_connector->dac_load_detect) { /* try load detection */
 				struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
@@ -1347,8 +1294,7 @@
 				if (radeon_dp_getdpcd(radeon_connector))
 					ret = connector_status_connected;
 			} else {
-				if (radeon_ddc_probe(radeon_connector,
-						     radeon_connector->requires_extended_probe))
+				if (radeon_ddc_probe(radeon_connector))
 					ret = connector_status_connected;
 			}
 		}
@@ -1493,9 +1439,7 @@
 	radeon_connector->shared_ddc = shared_ddc;
 	radeon_connector->connector_object_id = connector_object_id;
 	radeon_connector->hpd = *hpd;
-	radeon_connector->requires_extended_probe =
-		radeon_connector_needs_extended_probe(rdev, supported_device,
-							connector_type);
+
 	radeon_connector->router = *router;
 	if (router->ddc_valid || router->cd_valid) {
 		radeon_connector->router_bus = radeon_i2c_lookup(rdev, &router->i2c_info);
@@ -1842,9 +1786,7 @@
 	radeon_connector->devices = supported_device;
 	radeon_connector->connector_object_id = connector_object_id;
 	radeon_connector->hpd = *hpd;
-	radeon_connector->requires_extended_probe =
-		radeon_connector_needs_extended_probe(rdev, supported_device,
-							connector_type);
+
 	switch (connector_type) {
 	case DRM_MODE_CONNECTOR_VGA:
 		drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 6adb3e5..a22d6e6 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -33,8 +33,6 @@
 #include "drm_crtc_helper.h"
 #include "drm_edid.h"
 
-static int radeon_ddc_dump(struct drm_connector *connector);
-
 static void avivo_crtc_load_lut(struct drm_crtc *crtc)
 {
 	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
@@ -669,7 +667,6 @@
 static bool radeon_setup_enc_conn(struct drm_device *dev)
 {
 	struct radeon_device *rdev = dev->dev_private;
-	struct drm_connector *drm_connector;
 	bool ret = false;
 
 	if (rdev->bios) {
@@ -689,8 +686,6 @@
 	if (ret) {
 		radeon_setup_encoder_clones(dev);
 		radeon_print_display_setup(dev);
-		list_for_each_entry(drm_connector, &dev->mode_config.connector_list, head)
-			radeon_ddc_dump(drm_connector);
 	}
 
 	return ret;
@@ -708,7 +703,8 @@
 
 	if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
 	    (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP) ||
-	    radeon_connector_encoder_is_dp_bridge(&radeon_connector->base)) {
+	    (radeon_connector_encoder_get_dp_bridge_encoder_id(&radeon_connector->base) !=
+	     ENCODER_OBJECT_ID_NONE)) {
 		struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
 
 		if ((dig->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT ||
@@ -743,34 +739,6 @@
 	return 0;
 }
 
-static int radeon_ddc_dump(struct drm_connector *connector)
-{
-	struct edid *edid;
-	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
-	int ret = 0;
-
-	/* on hw with routers, select right port */
-	if (radeon_connector->router.ddc_valid)
-		radeon_router_select_ddc_port(radeon_connector);
-
-	if (!radeon_connector->ddc_bus)
-		return -1;
-	edid = drm_get_edid(connector, &radeon_connector->ddc_bus->adapter);
-	/* Log EDID retrieval status here. In particular with regard to
-	 * connectors with requires_extended_probe flag set, that will prevent
-	 * function radeon_dvi_detect() to fetch EDID on this connector,
-	 * as long as there is no valid EDID header found */
-	if (edid) {
-		DRM_INFO("Radeon display connector %s: Found valid EDID",
-				drm_get_connector_name(connector));
-		kfree(edid);
-	} else {
-		DRM_INFO("Radeon display connector %s: No monitor connected or invalid EDID",
-				drm_get_connector_name(connector));
-	}
-	return ret;
-}
-
 /* avivo */
 static void avivo_get_fb_div(struct radeon_pll *pll,
 			     u32 target_clock,
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 9699338..a0b35e9 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -119,6 +119,7 @@
 int radeon_disp_priority = 0;
 int radeon_hw_i2c = 0;
 int radeon_pcie_gen2 = 0;
+int radeon_msi = -1;
 
 MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers");
 module_param_named(no_wb, radeon_no_wb, int, 0444);
@@ -165,6 +166,9 @@
 MODULE_PARM_DESC(pcie_gen2, "PCIE Gen2 mode (1 = enable)");
 module_param_named(pcie_gen2, radeon_pcie_gen2, int, 0444);
 
+MODULE_PARM_DESC(msi, "MSI support (1 = enable, 0 = disable, -1 = auto)");
+module_param_named(msi, radeon_msi, int, 0444);
+
 static int radeon_suspend(struct drm_device *dev, pm_message_t state)
 {
 	drm_radeon_private_t *dev_priv = dev->dev_private;
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index eb3f6dc..06e413e 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -29,12 +29,6 @@
 #include "radeon.h"
 #include "atom.h"
 
-extern int atom_debug;
-
-/* evil but including atombios.h is much worse */
-bool radeon_atom_get_tv_timings(struct radeon_device *rdev, int index,
-				struct drm_display_mode *mode);
-
 static uint32_t radeon_encoder_clones(struct drm_encoder *encoder)
 {
 	struct drm_device *dev = encoder->dev;
@@ -156,27 +150,6 @@
 	return ret;
 }
 
-static inline bool radeon_encoder_is_digital(struct drm_encoder *encoder)
-{
-	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
-	switch (radeon_encoder->encoder_id) {
-	case ENCODER_OBJECT_ID_INTERNAL_LVDS:
-	case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
-	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
-	case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
-	case ENCODER_OBJECT_ID_INTERNAL_DVO1:
-	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
-	case ENCODER_OBJECT_ID_INTERNAL_DDI:
-	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
-	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
-	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
-	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
-		return true;
-	default:
-		return false;
-	}
-}
-
 void
 radeon_link_encoder_connector(struct drm_device *dev)
 {
@@ -229,23 +202,7 @@
 	return NULL;
 }
 
-static struct drm_connector *
-radeon_get_connector_for_encoder_init(struct drm_encoder *encoder)
-{
-	struct drm_device *dev = encoder->dev;
-	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
-	struct drm_connector *connector;
-	struct radeon_connector *radeon_connector;
-
-	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-		radeon_connector = to_radeon_connector(connector);
-		if (radeon_encoder->devices & radeon_connector->devices)
-			return connector;
-	}
-	return NULL;
-}
-
-struct drm_encoder *radeon_atom_get_external_encoder(struct drm_encoder *encoder)
+struct drm_encoder *radeon_get_external_encoder(struct drm_encoder *encoder)
 {
 	struct drm_device *dev = encoder->dev;
 	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
@@ -266,9 +223,9 @@
 	return NULL;
 }
 
-bool radeon_encoder_is_dp_bridge(struct drm_encoder *encoder)
+u16 radeon_encoder_get_dp_bridge_encoder_id(struct drm_encoder *encoder)
 {
-	struct drm_encoder *other_encoder = radeon_atom_get_external_encoder(encoder);
+	struct drm_encoder *other_encoder = radeon_get_external_encoder(encoder);
 
 	if (other_encoder) {
 		struct radeon_encoder *radeon_encoder = to_radeon_encoder(other_encoder);
@@ -332,2105 +289,3 @@
 
 }
 
-static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
-				   struct drm_display_mode *mode,
-				   struct drm_display_mode *adjusted_mode)
-{
-	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
-	struct drm_device *dev = encoder->dev;
-	struct radeon_device *rdev = dev->dev_private;
-
-	/* set the active encoder to connector routing */
-	radeon_encoder_set_active_device(encoder);
-	drm_mode_set_crtcinfo(adjusted_mode, 0);
-
-	/* hw bug */
-	if ((mode->flags & DRM_MODE_FLAG_INTERLACE)
-	    && (mode->crtc_vsync_start < (mode->crtc_vdisplay + 2)))
-		adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + 2;
-
-	/* get the native mode for LVDS */
-	if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT))
-		radeon_panel_mode_fixup(encoder, adjusted_mode);
-
-	/* get the native mode for TV */
-	if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) {
-		struct radeon_encoder_atom_dac *tv_dac = radeon_encoder->enc_priv;
-		if (tv_dac) {
-			if (tv_dac->tv_std == TV_STD_NTSC ||
-			    tv_dac->tv_std == TV_STD_NTSC_J ||
-			    tv_dac->tv_std == TV_STD_PAL_M)
-				radeon_atom_get_tv_timings(rdev, 0, adjusted_mode);
-			else
-				radeon_atom_get_tv_timings(rdev, 1, adjusted_mode);
-		}
-	}
-
-	if (ASIC_IS_DCE3(rdev) &&
-	    ((radeon_encoder->active_device & (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) ||
-	     radeon_encoder_is_dp_bridge(encoder))) {
-		struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
-		radeon_dp_set_link_config(connector, mode);
-	}
-
-	return true;
-}
-
-static void
-atombios_dac_setup(struct drm_encoder *encoder, int action)
-{
-	struct drm_device *dev = encoder->dev;
-	struct radeon_device *rdev = dev->dev_private;
-	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
-	DAC_ENCODER_CONTROL_PS_ALLOCATION args;
-	int index = 0;
-	struct radeon_encoder_atom_dac *dac_info = radeon_encoder->enc_priv;
-
-	memset(&args, 0, sizeof(args));
-
-	switch (radeon_encoder->encoder_id) {
-	case ENCODER_OBJECT_ID_INTERNAL_DAC1:
-	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
-		index = GetIndexIntoMasterTable(COMMAND, DAC1EncoderControl);
-		break;
-	case ENCODER_OBJECT_ID_INTERNAL_DAC2:
-	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
-		index = GetIndexIntoMasterTable(COMMAND, DAC2EncoderControl);
-		break;
-	}
-
-	args.ucAction = action;
-
-	if (radeon_encoder->active_device & (ATOM_DEVICE_CRT_SUPPORT))
-		args.ucDacStandard = ATOM_DAC1_PS2;
-	else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
-		args.ucDacStandard = ATOM_DAC1_CV;
-	else {
-		switch (dac_info->tv_std) {
-		case TV_STD_PAL:
-		case TV_STD_PAL_M:
-		case TV_STD_SCART_PAL:
-		case TV_STD_SECAM:
-		case TV_STD_PAL_CN:
-			args.ucDacStandard = ATOM_DAC1_PAL;
-			break;
-		case TV_STD_NTSC:
-		case TV_STD_NTSC_J:
-		case TV_STD_PAL_60:
-		default:
-			args.ucDacStandard = ATOM_DAC1_NTSC;
-			break;
-		}
-	}
-	args.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
-
-	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
-
-}
-
-static void
-atombios_tv_setup(struct drm_encoder *encoder, int action)
-{
-	struct drm_device *dev = encoder->dev;
-	struct radeon_device *rdev = dev->dev_private;
-	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
-	TV_ENCODER_CONTROL_PS_ALLOCATION args;
-	int index = 0;
-	struct radeon_encoder_atom_dac *dac_info = radeon_encoder->enc_priv;
-
-	memset(&args, 0, sizeof(args));
-
-	index = GetIndexIntoMasterTable(COMMAND, TVEncoderControl);
-
-	args.sTVEncoder.ucAction = action;
-
-	if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
-		args.sTVEncoder.ucTvStandard = ATOM_TV_CV;
-	else {
-		switch (dac_info->tv_std) {
-		case TV_STD_NTSC:
-			args.sTVEncoder.ucTvStandard = ATOM_TV_NTSC;
-			break;
-		case TV_STD_PAL:
-			args.sTVEncoder.ucTvStandard = ATOM_TV_PAL;
-			break;
-		case TV_STD_PAL_M:
-			args.sTVEncoder.ucTvStandard = ATOM_TV_PALM;
-			break;
-		case TV_STD_PAL_60:
-			args.sTVEncoder.ucTvStandard = ATOM_TV_PAL60;
-			break;
-		case TV_STD_NTSC_J:
-			args.sTVEncoder.ucTvStandard = ATOM_TV_NTSCJ;
-			break;
-		case TV_STD_SCART_PAL:
-			args.sTVEncoder.ucTvStandard = ATOM_TV_PAL; /* ??? */
-			break;
-		case TV_STD_SECAM:
-			args.sTVEncoder.ucTvStandard = ATOM_TV_SECAM;
-			break;
-		case TV_STD_PAL_CN:
-			args.sTVEncoder.ucTvStandard = ATOM_TV_PALCN;
-			break;
-		default:
-			args.sTVEncoder.ucTvStandard = ATOM_TV_NTSC;
-			break;
-		}
-	}
-
-	args.sTVEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
-
-	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
-
-}
-
-union dvo_encoder_control {
-	ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION ext_tmds;
-	DVO_ENCODER_CONTROL_PS_ALLOCATION dvo;
-	DVO_ENCODER_CONTROL_PS_ALLOCATION_V3 dvo_v3;
-};
-
-void
-atombios_dvo_setup(struct drm_encoder *encoder, int action)
-{
-	struct drm_device *dev = encoder->dev;
-	struct radeon_device *rdev = dev->dev_private;
-	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
-	union dvo_encoder_control args;
-	int index = GetIndexIntoMasterTable(COMMAND, DVOEncoderControl);
-
-	memset(&args, 0, sizeof(args));
-
-	if (ASIC_IS_DCE3(rdev)) {
-		/* DCE3+ */
-		args.dvo_v3.ucAction = action;
-		args.dvo_v3.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
-		args.dvo_v3.ucDVOConfig = 0; /* XXX */
-	} else if (ASIC_IS_DCE2(rdev)) {
-		/* DCE2 (pre-DCE3 R6xx, RS600/690/740 */
-		args.dvo.sDVOEncoder.ucAction = action;
-		args.dvo.sDVOEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
-		/* DFP1, CRT1, TV1 depending on the type of port */
-		args.dvo.sDVOEncoder.ucDeviceType = ATOM_DEVICE_DFP1_INDEX;
-
-		if (radeon_encoder->pixel_clock > 165000)
-			args.dvo.sDVOEncoder.usDevAttr.sDigAttrib.ucAttribute |= PANEL_ENCODER_MISC_DUAL;
-	} else {
-		/* R4xx, R5xx */
-		args.ext_tmds.sXTmdsEncoder.ucEnable = action;
-
-		if (radeon_encoder->pixel_clock > 165000)
-			args.ext_tmds.sXTmdsEncoder.ucMisc |= PANEL_ENCODER_MISC_DUAL;
-
-		/*if (pScrn->rgbBits == 8)*/
-		args.ext_tmds.sXTmdsEncoder.ucMisc |= ATOM_PANEL_MISC_888RGB;
-	}
-
-	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
-}
-
-union lvds_encoder_control {
-	LVDS_ENCODER_CONTROL_PS_ALLOCATION    v1;
-	LVDS_ENCODER_CONTROL_PS_ALLOCATION_V2 v2;
-};
-
-void
-atombios_digital_setup(struct drm_encoder *encoder, int action)
-{
-	struct drm_device *dev = encoder->dev;
-	struct radeon_device *rdev = dev->dev_private;
-	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
-	struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
-	union lvds_encoder_control args;
-	int index = 0;
-	int hdmi_detected = 0;
-	uint8_t frev, crev;
-
-	if (!dig)
-		return;
-
-	if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
-		hdmi_detected = 1;
-
-	memset(&args, 0, sizeof(args));
-
-	switch (radeon_encoder->encoder_id) {
-	case ENCODER_OBJECT_ID_INTERNAL_LVDS:
-		index = GetIndexIntoMasterTable(COMMAND, LVDSEncoderControl);
-		break;
-	case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
-	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
-		index = GetIndexIntoMasterTable(COMMAND, TMDS1EncoderControl);
-		break;
-	case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
-		if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
-			index = GetIndexIntoMasterTable(COMMAND, LVDSEncoderControl);
-		else
-			index = GetIndexIntoMasterTable(COMMAND, TMDS2EncoderControl);
-		break;
-	}
-
-	if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
-		return;
-
-	switch (frev) {
-	case 1:
-	case 2:
-		switch (crev) {
-		case 1:
-			args.v1.ucMisc = 0;
-			args.v1.ucAction = action;
-			if (hdmi_detected)
-				args.v1.ucMisc |= PANEL_ENCODER_MISC_HDMI_TYPE;
-			args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
-			if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
-				if (dig->lcd_misc & ATOM_PANEL_MISC_DUAL)
-					args.v1.ucMisc |= PANEL_ENCODER_MISC_DUAL;
-				if (dig->lcd_misc & ATOM_PANEL_MISC_888RGB)
-					args.v1.ucMisc |= ATOM_PANEL_MISC_888RGB;
-			} else {
-				if (dig->linkb)
-					args.v1.ucMisc |= PANEL_ENCODER_MISC_TMDS_LINKB;
-				if (radeon_encoder->pixel_clock > 165000)
-					args.v1.ucMisc |= PANEL_ENCODER_MISC_DUAL;
-				/*if (pScrn->rgbBits == 8) */
-				args.v1.ucMisc |= ATOM_PANEL_MISC_888RGB;
-			}
-			break;
-		case 2:
-		case 3:
-			args.v2.ucMisc = 0;
-			args.v2.ucAction = action;
-			if (crev == 3) {
-				if (dig->coherent_mode)
-					args.v2.ucMisc |= PANEL_ENCODER_MISC_COHERENT;
-			}
-			if (hdmi_detected)
-				args.v2.ucMisc |= PANEL_ENCODER_MISC_HDMI_TYPE;
-			args.v2.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
-			args.v2.ucTruncate = 0;
-			args.v2.ucSpatial = 0;
-			args.v2.ucTemporal = 0;
-			args.v2.ucFRC = 0;
-			if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
-				if (dig->lcd_misc & ATOM_PANEL_MISC_DUAL)
-					args.v2.ucMisc |= PANEL_ENCODER_MISC_DUAL;
-				if (dig->lcd_misc & ATOM_PANEL_MISC_SPATIAL) {
-					args.v2.ucSpatial = PANEL_ENCODER_SPATIAL_DITHER_EN;
-					if (dig->lcd_misc & ATOM_PANEL_MISC_888RGB)
-						args.v2.ucSpatial |= PANEL_ENCODER_SPATIAL_DITHER_DEPTH;
-				}
-				if (dig->lcd_misc & ATOM_PANEL_MISC_TEMPORAL) {
-					args.v2.ucTemporal = PANEL_ENCODER_TEMPORAL_DITHER_EN;
-					if (dig->lcd_misc & ATOM_PANEL_MISC_888RGB)
-						args.v2.ucTemporal |= PANEL_ENCODER_TEMPORAL_DITHER_DEPTH;
-					if (((dig->lcd_misc >> ATOM_PANEL_MISC_GREY_LEVEL_SHIFT) & 0x3) == 2)
-						args.v2.ucTemporal |= PANEL_ENCODER_TEMPORAL_LEVEL_4;
-				}
-			} else {
-				if (dig->linkb)
-					args.v2.ucMisc |= PANEL_ENCODER_MISC_TMDS_LINKB;
-				if (radeon_encoder->pixel_clock > 165000)
-					args.v2.ucMisc |= PANEL_ENCODER_MISC_DUAL;
-			}
-			break;
-		default:
-			DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
-			break;
-		}
-		break;
-	default:
-		DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
-		break;
-	}
-
-	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
-}
-
-int
-atombios_get_encoder_mode(struct drm_encoder *encoder)
-{
-	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
-	struct drm_device *dev = encoder->dev;
-	struct radeon_device *rdev = dev->dev_private;
-	struct drm_connector *connector;
-	struct radeon_connector *radeon_connector;
-	struct radeon_connector_atom_dig *dig_connector;
-
-	/* dp bridges are always DP */
-	if (radeon_encoder_is_dp_bridge(encoder))
-		return ATOM_ENCODER_MODE_DP;
-
-	/* DVO is always DVO */
-	if (radeon_encoder->encoder_id == ATOM_ENCODER_MODE_DVO)
-		return ATOM_ENCODER_MODE_DVO;
-
-	connector = radeon_get_connector_for_encoder(encoder);
-	/* if we don't have an active device yet, just use one of
-	 * the connectors tied to the encoder.
-	 */
-	if (!connector)
-		connector = radeon_get_connector_for_encoder_init(encoder);
-	radeon_connector = to_radeon_connector(connector);
-
-	switch (connector->connector_type) {
-	case DRM_MODE_CONNECTOR_DVII:
-	case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */
-		if (drm_detect_monitor_audio(radeon_connector->edid) && radeon_audio) {
-			/* fix me */
-			if (ASIC_IS_DCE4(rdev))
-				return ATOM_ENCODER_MODE_DVI;
-			else
-				return ATOM_ENCODER_MODE_HDMI;
-		} else if (radeon_connector->use_digital)
-			return ATOM_ENCODER_MODE_DVI;
-		else
-			return ATOM_ENCODER_MODE_CRT;
-		break;
-	case DRM_MODE_CONNECTOR_DVID:
-	case DRM_MODE_CONNECTOR_HDMIA:
-	default:
-		if (drm_detect_monitor_audio(radeon_connector->edid) && radeon_audio) {
-			/* fix me */
-			if (ASIC_IS_DCE4(rdev))
-				return ATOM_ENCODER_MODE_DVI;
-			else
-				return ATOM_ENCODER_MODE_HDMI;
-		} else
-			return ATOM_ENCODER_MODE_DVI;
-		break;
-	case DRM_MODE_CONNECTOR_LVDS:
-		return ATOM_ENCODER_MODE_LVDS;
-		break;
-	case DRM_MODE_CONNECTOR_DisplayPort:
-		dig_connector = radeon_connector->con_priv;
-		if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
-		    (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP))
-			return ATOM_ENCODER_MODE_DP;
-		else if (drm_detect_monitor_audio(radeon_connector->edid) && radeon_audio) {
-			/* fix me */
-			if (ASIC_IS_DCE4(rdev))
-				return ATOM_ENCODER_MODE_DVI;
-			else
-				return ATOM_ENCODER_MODE_HDMI;
-		} else
-			return ATOM_ENCODER_MODE_DVI;
-		break;
-	case DRM_MODE_CONNECTOR_eDP:
-		return ATOM_ENCODER_MODE_DP;
-	case DRM_MODE_CONNECTOR_DVIA:
-	case DRM_MODE_CONNECTOR_VGA:
-		return ATOM_ENCODER_MODE_CRT;
-		break;
-	case DRM_MODE_CONNECTOR_Composite:
-	case DRM_MODE_CONNECTOR_SVIDEO:
-	case DRM_MODE_CONNECTOR_9PinDIN:
-		/* fix me */
-		return ATOM_ENCODER_MODE_TV;
-		/*return ATOM_ENCODER_MODE_CV;*/
-		break;
-	}
-}
-
-/*
- * DIG Encoder/Transmitter Setup
- *
- * DCE 3.0/3.1
- * - 2 DIG transmitter blocks. UNIPHY (links A and B) and LVTMA.
- * Supports up to 3 digital outputs
- * - 2 DIG encoder blocks.
- * DIG1 can drive UNIPHY link A or link B
- * DIG2 can drive UNIPHY link B or LVTMA
- *
- * DCE 3.2
- * - 3 DIG transmitter blocks. UNIPHY0/1/2 (links A and B).
- * Supports up to 5 digital outputs
- * - 2 DIG encoder blocks.
- * DIG1/2 can drive UNIPHY0/1/2 link A or link B
- *
- * DCE 4.0/5.0
- * - 3 DIG transmitter blocks UNIPHY0/1/2 (links A and B).
- * Supports up to 6 digital outputs
- * - 6 DIG encoder blocks.
- * - DIG to PHY mapping is hardcoded
- * DIG1 drives UNIPHY0 link A, A+B
- * DIG2 drives UNIPHY0 link B
- * DIG3 drives UNIPHY1 link A, A+B
- * DIG4 drives UNIPHY1 link B
- * DIG5 drives UNIPHY2 link A, A+B
- * DIG6 drives UNIPHY2 link B
- *
- * DCE 4.1
- * - 3 DIG transmitter blocks UNIPHY0/1/2 (links A and B).
- * Supports up to 6 digital outputs
- * - 2 DIG encoder blocks.
- * DIG1/2 can drive UNIPHY0/1/2 link A or link B
- *
- * Routing
- * crtc -> dig encoder -> UNIPHY/LVTMA (1 or 2 links)
- * Examples:
- * crtc0 -> dig2 -> LVTMA   links A+B -> TMDS/HDMI
- * crtc1 -> dig1 -> UNIPHY0 link  B   -> DP
- * crtc0 -> dig1 -> UNIPHY2 link  A   -> LVDS
- * crtc1 -> dig2 -> UNIPHY1 link  B+A -> TMDS/HDMI
- */
-
-union dig_encoder_control {
-	DIG_ENCODER_CONTROL_PS_ALLOCATION v1;
-	DIG_ENCODER_CONTROL_PARAMETERS_V2 v2;
-	DIG_ENCODER_CONTROL_PARAMETERS_V3 v3;
-	DIG_ENCODER_CONTROL_PARAMETERS_V4 v4;
-};
-
-void
-atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mode)
-{
-	struct drm_device *dev = encoder->dev;
-	struct radeon_device *rdev = dev->dev_private;
-	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
-	struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
-	struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
-	union dig_encoder_control args;
-	int index = 0;
-	uint8_t frev, crev;
-	int dp_clock = 0;
-	int dp_lane_count = 0;
-	int hpd_id = RADEON_HPD_NONE;
-	int bpc = 8;
-
-	if (connector) {
-		struct radeon_connector *radeon_connector = to_radeon_connector(connector);
-		struct radeon_connector_atom_dig *dig_connector =
-			radeon_connector->con_priv;
-
-		dp_clock = dig_connector->dp_clock;
-		dp_lane_count = dig_connector->dp_lane_count;
-		hpd_id = radeon_connector->hpd.hpd;
-		bpc = connector->display_info.bpc;
-	}
-
-	/* no dig encoder assigned */
-	if (dig->dig_encoder == -1)
-		return;
-
-	memset(&args, 0, sizeof(args));
-
-	if (ASIC_IS_DCE4(rdev))
-		index = GetIndexIntoMasterTable(COMMAND, DIGxEncoderControl);
-	else {
-		if (dig->dig_encoder)
-			index = GetIndexIntoMasterTable(COMMAND, DIG2EncoderControl);
-		else
-			index = GetIndexIntoMasterTable(COMMAND, DIG1EncoderControl);
-	}
-
-	if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
-		return;
-
-	args.v1.ucAction = action;
-	args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
-	if (action == ATOM_ENCODER_CMD_SETUP_PANEL_MODE)
-		args.v3.ucPanelMode = panel_mode;
-	else
-		args.v1.ucEncoderMode = atombios_get_encoder_mode(encoder);
-
-	if ((args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP) ||
-	    (args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP_MST))
-		args.v1.ucLaneNum = dp_lane_count;
-	else if (radeon_encoder->pixel_clock > 165000)
-		args.v1.ucLaneNum = 8;
-	else
-		args.v1.ucLaneNum = 4;
-
-	if (ASIC_IS_DCE5(rdev)) {
-		if ((args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP) ||
-		    (args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP_MST)) {
-			if (dp_clock == 270000)
-				args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V4_DPLINKRATE_2_70GHZ;
-			else if (dp_clock == 540000)
-				args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V4_DPLINKRATE_5_40GHZ;
-		}
-		args.v4.acConfig.ucDigSel = dig->dig_encoder;
-		switch (bpc) {
-		case 0:
-			args.v4.ucBitPerColor = PANEL_BPC_UNDEFINE;
-			break;
-		case 6:
-			args.v4.ucBitPerColor = PANEL_6BIT_PER_COLOR;
-			break;
-		case 8:
-		default:
-			args.v4.ucBitPerColor = PANEL_8BIT_PER_COLOR;
-			break;
-		case 10:
-			args.v4.ucBitPerColor = PANEL_10BIT_PER_COLOR;
-			break;
-		case 12:
-			args.v4.ucBitPerColor = PANEL_12BIT_PER_COLOR;
-			break;
-		case 16:
-			args.v4.ucBitPerColor = PANEL_16BIT_PER_COLOR;
-			break;
-		}
-		if (hpd_id == RADEON_HPD_NONE)
-			args.v4.ucHPD_ID = 0;
-		else
-			args.v4.ucHPD_ID = hpd_id + 1;
-	} else if (ASIC_IS_DCE4(rdev)) {
-		if ((args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP) && (dp_clock == 270000))
-			args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V3_DPLINKRATE_2_70GHZ;
-		args.v3.acConfig.ucDigSel = dig->dig_encoder;
-		switch (bpc) {
-		case 0:
-			args.v3.ucBitPerColor = PANEL_BPC_UNDEFINE;
-			break;
-		case 6:
-			args.v3.ucBitPerColor = PANEL_6BIT_PER_COLOR;
-			break;
-		case 8:
-		default:
-			args.v3.ucBitPerColor = PANEL_8BIT_PER_COLOR;
-			break;
-		case 10:
-			args.v3.ucBitPerColor = PANEL_10BIT_PER_COLOR;
-			break;
-		case 12:
-			args.v3.ucBitPerColor = PANEL_12BIT_PER_COLOR;
-			break;
-		case 16:
-			args.v3.ucBitPerColor = PANEL_16BIT_PER_COLOR;
-			break;
-		}
-	} else {
-		if ((args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP) && (dp_clock == 270000))
-			args.v1.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
-		switch (radeon_encoder->encoder_id) {
-		case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
-			args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER1;
-			break;
-		case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
-		case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
-			args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER2;
-			break;
-		case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
-			args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER3;
-			break;
-		}
-		if (dig->linkb)
-			args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKB;
-		else
-			args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKA;
-	}
-
-	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
-
-}
-
-union dig_transmitter_control {
-	DIG_TRANSMITTER_CONTROL_PS_ALLOCATION v1;
-	DIG_TRANSMITTER_CONTROL_PARAMETERS_V2 v2;
-	DIG_TRANSMITTER_CONTROL_PARAMETERS_V3 v3;
-	DIG_TRANSMITTER_CONTROL_PARAMETERS_V4 v4;
-};
-
-void
-atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t lane_num, uint8_t lane_set)
-{
-	struct drm_device *dev = encoder->dev;
-	struct radeon_device *rdev = dev->dev_private;
-	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
-	struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
-	struct drm_connector *connector;
-	union dig_transmitter_control args;
-	int index = 0;
-	uint8_t frev, crev;
-	bool is_dp = false;
-	int pll_id = 0;
-	int dp_clock = 0;
-	int dp_lane_count = 0;
-	int connector_object_id = 0;
-	int igp_lane_info = 0;
-	int dig_encoder = dig->dig_encoder;
-
-	if (action == ATOM_TRANSMITTER_ACTION_INIT) {
-		connector = radeon_get_connector_for_encoder_init(encoder);
-		/* just needed to avoid bailing in the encoder check.  the encoder
-		 * isn't used for init
-		 */
-		dig_encoder = 0;
-	} else
-		connector = radeon_get_connector_for_encoder(encoder);
-
-	if (connector) {
-		struct radeon_connector *radeon_connector = to_radeon_connector(connector);
-		struct radeon_connector_atom_dig *dig_connector =
-			radeon_connector->con_priv;
-
-		dp_clock = dig_connector->dp_clock;
-		dp_lane_count = dig_connector->dp_lane_count;
-		connector_object_id =
-			(radeon_connector->connector_object_id & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
-		igp_lane_info = dig_connector->igp_lane_info;
-	}
-
-	/* no dig encoder assigned */
-	if (dig_encoder == -1)
-		return;
-
-	if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP)
-		is_dp = true;
-
-	memset(&args, 0, sizeof(args));
-
-	switch (radeon_encoder->encoder_id) {
-	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
-		index = GetIndexIntoMasterTable(COMMAND, DVOOutputControl);
-		break;
-	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
-	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
-	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
-		index = GetIndexIntoMasterTable(COMMAND, UNIPHYTransmitterControl);
-		break;
-	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
-		index = GetIndexIntoMasterTable(COMMAND, LVTMATransmitterControl);
-		break;
-	}
-
-	if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
-		return;
-
-	args.v1.ucAction = action;
-	if (action == ATOM_TRANSMITTER_ACTION_INIT) {
-		args.v1.usInitInfo = cpu_to_le16(connector_object_id);
-	} else if (action == ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH) {
-		args.v1.asMode.ucLaneSel = lane_num;
-		args.v1.asMode.ucLaneSet = lane_set;
-	} else {
-		if (is_dp)
-			args.v1.usPixelClock =
-				cpu_to_le16(dp_clock / 10);
-		else if (radeon_encoder->pixel_clock > 165000)
-			args.v1.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10);
-		else
-			args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
-	}
-	if (ASIC_IS_DCE4(rdev)) {
-		if (is_dp)
-			args.v3.ucLaneNum = dp_lane_count;
-		else if (radeon_encoder->pixel_clock > 165000)
-			args.v3.ucLaneNum = 8;
-		else
-			args.v3.ucLaneNum = 4;
-
-		if (dig->linkb)
-			args.v3.acConfig.ucLinkSel = 1;
-		if (dig_encoder & 1)
-			args.v3.acConfig.ucEncoderSel = 1;
-
-		/* Select the PLL for the PHY
-		 * DP PHY should be clocked from external src if there is
-		 * one.
-		 */
-		if (encoder->crtc) {
-			struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
-			pll_id = radeon_crtc->pll_id;
-		}
-
-		if (ASIC_IS_DCE5(rdev)) {
-			/* On DCE5 DCPLL usually generates the DP ref clock */
-			if (is_dp) {
-				if (rdev->clock.dp_extclk)
-					args.v4.acConfig.ucRefClkSource = ENCODER_REFCLK_SRC_EXTCLK;
-				else
-					args.v4.acConfig.ucRefClkSource = ENCODER_REFCLK_SRC_DCPLL;
-			} else
-				args.v4.acConfig.ucRefClkSource = pll_id;
-		} else {
-			/* On DCE4, if there is an external clock, it generates the DP ref clock */
-			if (is_dp && rdev->clock.dp_extclk)
-				args.v3.acConfig.ucRefClkSource = 2; /* external src */
-			else
-				args.v3.acConfig.ucRefClkSource = pll_id;
-		}
-
-		switch (radeon_encoder->encoder_id) {
-		case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
-			args.v3.acConfig.ucTransmitterSel = 0;
-			break;
-		case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
-			args.v3.acConfig.ucTransmitterSel = 1;
-			break;
-		case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
-			args.v3.acConfig.ucTransmitterSel = 2;
-			break;
-		}
-
-		if (is_dp)
-			args.v3.acConfig.fCoherentMode = 1; /* DP requires coherent */
-		else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
-			if (dig->coherent_mode)
-				args.v3.acConfig.fCoherentMode = 1;
-			if (radeon_encoder->pixel_clock > 165000)
-				args.v3.acConfig.fDualLinkConnector = 1;
-		}
-	} else if (ASIC_IS_DCE32(rdev)) {
-		args.v2.acConfig.ucEncoderSel = dig_encoder;
-		if (dig->linkb)
-			args.v2.acConfig.ucLinkSel = 1;
-
-		switch (radeon_encoder->encoder_id) {
-		case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
-			args.v2.acConfig.ucTransmitterSel = 0;
-			break;
-		case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
-			args.v2.acConfig.ucTransmitterSel = 1;
-			break;
-		case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
-			args.v2.acConfig.ucTransmitterSel = 2;
-			break;
-		}
-
-		if (is_dp) {
-			args.v2.acConfig.fCoherentMode = 1;
-			args.v2.acConfig.fDPConnector = 1;
-		} else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
-			if (dig->coherent_mode)
-				args.v2.acConfig.fCoherentMode = 1;
-			if (radeon_encoder->pixel_clock > 165000)
-				args.v2.acConfig.fDualLinkConnector = 1;
-		}
-	} else {
-		args.v1.ucConfig = ATOM_TRANSMITTER_CONFIG_CLKSRC_PPLL;
-
-		if (dig_encoder)
-			args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG2_ENCODER;
-		else
-			args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER;
-
-		if ((rdev->flags & RADEON_IS_IGP) &&
-		    (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_UNIPHY)) {
-			if (is_dp || (radeon_encoder->pixel_clock <= 165000)) {
-				if (igp_lane_info & 0x1)
-					args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_3;
-				else if (igp_lane_info & 0x2)
-					args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_4_7;
-				else if (igp_lane_info & 0x4)
-					args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_11;
-				else if (igp_lane_info & 0x8)
-					args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_12_15;
-			} else {
-				if (igp_lane_info & 0x3)
-					args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_7;
-				else if (igp_lane_info & 0xc)
-					args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_15;
-			}
-		}
-
-		if (dig->linkb)
-			args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKB;
-		else
-			args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKA;
-
-		if (is_dp)
-			args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_COHERENT;
-		else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
-			if (dig->coherent_mode)
-				args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_COHERENT;
-			if (radeon_encoder->pixel_clock > 165000)
-				args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_8LANE_LINK;
-		}
-	}
-
-	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
-}
-
-bool
-atombios_set_edp_panel_power(struct drm_connector *connector, int action)
-{
-	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
-	struct drm_device *dev = radeon_connector->base.dev;
-	struct radeon_device *rdev = dev->dev_private;
-	union dig_transmitter_control args;
-	int index = GetIndexIntoMasterTable(COMMAND, UNIPHYTransmitterControl);
-	uint8_t frev, crev;
-
-	if (connector->connector_type != DRM_MODE_CONNECTOR_eDP)
-		goto done;
-
-	if (!ASIC_IS_DCE4(rdev))
-		goto done;
-
-	if ((action != ATOM_TRANSMITTER_ACTION_POWER_ON) &&
-	    (action != ATOM_TRANSMITTER_ACTION_POWER_OFF))
-		goto done;
-
-	if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
-		goto done;
-
-	memset(&args, 0, sizeof(args));
-
-	args.v1.ucAction = action;
-
-	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
-
-	/* wait for the panel to power up */
-	if (action == ATOM_TRANSMITTER_ACTION_POWER_ON) {
-		int i;
-
-		for (i = 0; i < 300; i++) {
-			if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd))
-				return true;
-			mdelay(1);
-		}
-		return false;
-	}
-done:
-	return true;
-}
-
-union external_encoder_control {
-	EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION v1;
-	EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION_V3 v3;
-};
-
-static void
-atombios_external_encoder_setup(struct drm_encoder *encoder,
-				struct drm_encoder *ext_encoder,
-				int action)
-{
-	struct drm_device *dev = encoder->dev;
-	struct radeon_device *rdev = dev->dev_private;
-	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
-	struct radeon_encoder *ext_radeon_encoder = to_radeon_encoder(ext_encoder);
-	union external_encoder_control args;
-	struct drm_connector *connector;
-	int index = GetIndexIntoMasterTable(COMMAND, ExternalEncoderControl);
-	u8 frev, crev;
-	int dp_clock = 0;
-	int dp_lane_count = 0;
-	int connector_object_id = 0;
-	u32 ext_enum = (ext_radeon_encoder->encoder_enum & ENUM_ID_MASK) >> ENUM_ID_SHIFT;
-	int bpc = 8;
-
-	if (action == EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT)
-		connector = radeon_get_connector_for_encoder_init(encoder);
-	else
-		connector = radeon_get_connector_for_encoder(encoder);
-
-	if (connector) {
-		struct radeon_connector *radeon_connector = to_radeon_connector(connector);
-		struct radeon_connector_atom_dig *dig_connector =
-			radeon_connector->con_priv;
-
-		dp_clock = dig_connector->dp_clock;
-		dp_lane_count = dig_connector->dp_lane_count;
-		connector_object_id =
-			(radeon_connector->connector_object_id & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
-		bpc = connector->display_info.bpc;
-	}
-
-	memset(&args, 0, sizeof(args));
-
-	if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
-		return;
-
-	switch (frev) {
-	case 1:
-		/* no params on frev 1 */
-		break;
-	case 2:
-		switch (crev) {
-		case 1:
-		case 2:
-			args.v1.sDigEncoder.ucAction = action;
-			args.v1.sDigEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
-			args.v1.sDigEncoder.ucEncoderMode = atombios_get_encoder_mode(encoder);
-
-			if (args.v1.sDigEncoder.ucEncoderMode == ATOM_ENCODER_MODE_DP) {
-				if (dp_clock == 270000)
-					args.v1.sDigEncoder.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
-				args.v1.sDigEncoder.ucLaneNum = dp_lane_count;
-			} else if (radeon_encoder->pixel_clock > 165000)
-				args.v1.sDigEncoder.ucLaneNum = 8;
-			else
-				args.v1.sDigEncoder.ucLaneNum = 4;
-			break;
-		case 3:
-			args.v3.sExtEncoder.ucAction = action;
-			if (action == EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT)
-				args.v3.sExtEncoder.usConnectorId = cpu_to_le16(connector_object_id);
-			else
-				args.v3.sExtEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
-			args.v3.sExtEncoder.ucEncoderMode = atombios_get_encoder_mode(encoder);
-
-			if (args.v3.sExtEncoder.ucEncoderMode == ATOM_ENCODER_MODE_DP) {
-				if (dp_clock == 270000)
-					args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_2_70GHZ;
-				else if (dp_clock == 540000)
-					args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_5_40GHZ;
-				args.v3.sExtEncoder.ucLaneNum = dp_lane_count;
-			} else if (radeon_encoder->pixel_clock > 165000)
-				args.v3.sExtEncoder.ucLaneNum = 8;
-			else
-				args.v3.sExtEncoder.ucLaneNum = 4;
-			switch (ext_enum) {
-			case GRAPH_OBJECT_ENUM_ID1:
-				args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_ENCODER1;
-				break;
-			case GRAPH_OBJECT_ENUM_ID2:
-				args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_ENCODER2;
-				break;
-			case GRAPH_OBJECT_ENUM_ID3:
-				args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_ENCODER3;
-				break;
-			}
-			switch (bpc) {
-			case 0:
-				args.v3.sExtEncoder.ucBitPerColor = PANEL_BPC_UNDEFINE;
-				break;
-			case 6:
-				args.v3.sExtEncoder.ucBitPerColor = PANEL_6BIT_PER_COLOR;
-				break;
-			case 8:
-			default:
-				args.v3.sExtEncoder.ucBitPerColor = PANEL_8BIT_PER_COLOR;
-				break;
-			case 10:
-				args.v3.sExtEncoder.ucBitPerColor = PANEL_10BIT_PER_COLOR;
-				break;
-			case 12:
-				args.v3.sExtEncoder.ucBitPerColor = PANEL_12BIT_PER_COLOR;
-				break;
-			case 16:
-				args.v3.sExtEncoder.ucBitPerColor = PANEL_16BIT_PER_COLOR;
-				break;
-			}
-			break;
-		default:
-			DRM_ERROR("Unknown table version: %d, %d\n", frev, crev);
-			return;
-		}
-		break;
-	default:
-		DRM_ERROR("Unknown table version: %d, %d\n", frev, crev);
-		return;
-	}
-	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
-}
-
-static void
-atombios_yuv_setup(struct drm_encoder *encoder, bool enable)
-{
-	struct drm_device *dev = encoder->dev;
-	struct radeon_device *rdev = dev->dev_private;
-	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
-	struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
-	ENABLE_YUV_PS_ALLOCATION args;
-	int index = GetIndexIntoMasterTable(COMMAND, EnableYUV);
-	uint32_t temp, reg;
-
-	memset(&args, 0, sizeof(args));
-
-	if (rdev->family >= CHIP_R600)
-		reg = R600_BIOS_3_SCRATCH;
-	else
-		reg = RADEON_BIOS_3_SCRATCH;
-
-	/* XXX: fix up scratch reg handling */
-	temp = RREG32(reg);
-	if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
-		WREG32(reg, (ATOM_S3_TV1_ACTIVE |
-			     (radeon_crtc->crtc_id << 18)));
-	else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
-		WREG32(reg, (ATOM_S3_CV_ACTIVE | (radeon_crtc->crtc_id << 24)));
-	else
-		WREG32(reg, 0);
-
-	if (enable)
-		args.ucEnable = ATOM_ENABLE;
-	args.ucCRTC = radeon_crtc->crtc_id;
-
-	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
-
-	WREG32(reg, temp);
-}
-
-static void
-radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
-{
-	struct drm_device *dev = encoder->dev;
-	struct radeon_device *rdev = dev->dev_private;
-	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
-	struct drm_encoder *ext_encoder = radeon_atom_get_external_encoder(encoder);
-	DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION args;
-	int index = 0;
-	bool is_dig = false;
-	bool is_dce5_dac = false;
-	bool is_dce5_dvo = false;
-
-	memset(&args, 0, sizeof(args));
-
-	DRM_DEBUG_KMS("encoder dpms %d to mode %d, devices %08x, active_devices %08x\n",
-		  radeon_encoder->encoder_id, mode, radeon_encoder->devices,
-		  radeon_encoder->active_device);
-	switch (radeon_encoder->encoder_id) {
-	case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
-	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
-		index = GetIndexIntoMasterTable(COMMAND, TMDSAOutputControl);
-		break;
-	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
-	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
-	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
-	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
-		is_dig = true;
-		break;
-	case ENCODER_OBJECT_ID_INTERNAL_DVO1:
-	case ENCODER_OBJECT_ID_INTERNAL_DDI:
-		index = GetIndexIntoMasterTable(COMMAND, DVOOutputControl);
-		break;
-	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
-		if (ASIC_IS_DCE5(rdev))
-			is_dce5_dvo = true;
-		else if (ASIC_IS_DCE3(rdev))
-			is_dig = true;
-		else
-			index = GetIndexIntoMasterTable(COMMAND, DVOOutputControl);
-		break;
-	case ENCODER_OBJECT_ID_INTERNAL_LVDS:
-		index = GetIndexIntoMasterTable(COMMAND, LCD1OutputControl);
-		break;
-	case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
-		if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
-			index = GetIndexIntoMasterTable(COMMAND, LCD1OutputControl);
-		else
-			index = GetIndexIntoMasterTable(COMMAND, LVTMAOutputControl);
-		break;
-	case ENCODER_OBJECT_ID_INTERNAL_DAC1:
-	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
-		if (ASIC_IS_DCE5(rdev))
-			is_dce5_dac = true;
-		else {
-			if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
-				index = GetIndexIntoMasterTable(COMMAND, TV1OutputControl);
-			else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
-				index = GetIndexIntoMasterTable(COMMAND, CV1OutputControl);
-			else
-				index = GetIndexIntoMasterTable(COMMAND, DAC1OutputControl);
-		}
-		break;
-	case ENCODER_OBJECT_ID_INTERNAL_DAC2:
-	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
-		if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
-			index = GetIndexIntoMasterTable(COMMAND, TV1OutputControl);
-		else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
-			index = GetIndexIntoMasterTable(COMMAND, CV1OutputControl);
-		else
-			index = GetIndexIntoMasterTable(COMMAND, DAC2OutputControl);
-		break;
-	}
-
-	if (is_dig) {
-		switch (mode) {
-		case DRM_MODE_DPMS_ON:
-			/* some early dce3.2 boards have a bug in their transmitter control table */
-			if ((rdev->family == CHIP_RV710) || (rdev->family == CHIP_RV730))
-				atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
-			else
-				atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
-			if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) {
-				struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
-
-				if (connector &&
-				    (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
-					struct radeon_connector *radeon_connector = to_radeon_connector(connector);
-					struct radeon_connector_atom_dig *radeon_dig_connector =
-						radeon_connector->con_priv;
-					atombios_set_edp_panel_power(connector,
-								     ATOM_TRANSMITTER_ACTION_POWER_ON);
-					radeon_dig_connector->edp_on = true;
-				}
-				if (ASIC_IS_DCE4(rdev))
-					atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF, 0);
-				radeon_dp_link_train(encoder, connector);
-				if (ASIC_IS_DCE4(rdev))
-					atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON, 0);
-			}
-			if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
-				atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLON, 0, 0);
-			break;
-		case DRM_MODE_DPMS_STANDBY:
-		case DRM_MODE_DPMS_SUSPEND:
-		case DRM_MODE_DPMS_OFF:
-			atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0);
-			if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) {
-				struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
-
-				if (ASIC_IS_DCE4(rdev))
-					atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF, 0);
-				if (connector &&
-				    (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
-					struct radeon_connector *radeon_connector = to_radeon_connector(connector);
-					struct radeon_connector_atom_dig *radeon_dig_connector =
-						radeon_connector->con_priv;
-					atombios_set_edp_panel_power(connector,
-								     ATOM_TRANSMITTER_ACTION_POWER_OFF);
-					radeon_dig_connector->edp_on = false;
-				}
-			}
-			if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
-				atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLOFF, 0, 0);
-			break;
-		}
-	} else if (is_dce5_dac) {
-		switch (mode) {
-		case DRM_MODE_DPMS_ON:
-			atombios_dac_setup(encoder, ATOM_ENABLE);
-			break;
-		case DRM_MODE_DPMS_STANDBY:
-		case DRM_MODE_DPMS_SUSPEND:
-		case DRM_MODE_DPMS_OFF:
-			atombios_dac_setup(encoder, ATOM_DISABLE);
-			break;
-		}
-	} else if (is_dce5_dvo) {
-		switch (mode) {
-		case DRM_MODE_DPMS_ON:
-			atombios_dvo_setup(encoder, ATOM_ENABLE);
-			break;
-		case DRM_MODE_DPMS_STANDBY:
-		case DRM_MODE_DPMS_SUSPEND:
-		case DRM_MODE_DPMS_OFF:
-			atombios_dvo_setup(encoder, ATOM_DISABLE);
-			break;
-		}
-	} else {
-		switch (mode) {
-		case DRM_MODE_DPMS_ON:
-			args.ucAction = ATOM_ENABLE;
-			/* workaround for DVOOutputControl on some RS690 systems */
-			if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_DDI) {
-				u32 reg = RREG32(RADEON_BIOS_3_SCRATCH);
-				WREG32(RADEON_BIOS_3_SCRATCH, reg & ~ATOM_S3_DFP2I_ACTIVE);
-				atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
-				WREG32(RADEON_BIOS_3_SCRATCH, reg);
-			} else
-				atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
-			if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
-				args.ucAction = ATOM_LCD_BLON;
-				atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
-			}
-			break;
-		case DRM_MODE_DPMS_STANDBY:
-		case DRM_MODE_DPMS_SUSPEND:
-		case DRM_MODE_DPMS_OFF:
-			args.ucAction = ATOM_DISABLE;
-			atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
-			if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
-				args.ucAction = ATOM_LCD_BLOFF;
-				atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
-			}
-			break;
-		}
-	}
-
-	if (ext_encoder) {
-		switch (mode) {
-		case DRM_MODE_DPMS_ON:
-		default:
-			if (ASIC_IS_DCE41(rdev)) {
-				atombios_external_encoder_setup(encoder, ext_encoder,
-								EXTERNAL_ENCODER_ACTION_V3_ENABLE_OUTPUT);
-				atombios_external_encoder_setup(encoder, ext_encoder,
-								EXTERNAL_ENCODER_ACTION_V3_ENCODER_BLANKING_OFF);
-			} else
-				atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE);
-			break;
-		case DRM_MODE_DPMS_STANDBY:
-		case DRM_MODE_DPMS_SUSPEND:
-		case DRM_MODE_DPMS_OFF:
-			if (ASIC_IS_DCE41(rdev)) {
-				atombios_external_encoder_setup(encoder, ext_encoder,
-								EXTERNAL_ENCODER_ACTION_V3_ENCODER_BLANKING);
-				atombios_external_encoder_setup(encoder, ext_encoder,
-								EXTERNAL_ENCODER_ACTION_V3_DISABLE_OUTPUT);
-			} else
-				atombios_external_encoder_setup(encoder, ext_encoder, ATOM_DISABLE);
-			break;
-		}
-	}
-
-	radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
-
-}
-
-union crtc_source_param {
-	SELECT_CRTC_SOURCE_PS_ALLOCATION v1;
-	SELECT_CRTC_SOURCE_PARAMETERS_V2 v2;
-};
-
-static void
-atombios_set_encoder_crtc_source(struct drm_encoder *encoder)
-{
-	struct drm_device *dev = encoder->dev;
-	struct radeon_device *rdev = dev->dev_private;
-	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
-	struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
-	union crtc_source_param args;
-	int index = GetIndexIntoMasterTable(COMMAND, SelectCRTC_Source);
-	uint8_t frev, crev;
-	struct radeon_encoder_atom_dig *dig;
-
-	memset(&args, 0, sizeof(args));
-
-	if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
-		return;
-
-	switch (frev) {
-	case 1:
-		switch (crev) {
-		case 1:
-		default:
-			if (ASIC_IS_AVIVO(rdev))
-				args.v1.ucCRTC = radeon_crtc->crtc_id;
-			else {
-				if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_DAC1) {
-					args.v1.ucCRTC = radeon_crtc->crtc_id;
-				} else {
-					args.v1.ucCRTC = radeon_crtc->crtc_id << 2;
-				}
-			}
-			switch (radeon_encoder->encoder_id) {
-			case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
-			case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
-				args.v1.ucDevice = ATOM_DEVICE_DFP1_INDEX;
-				break;
-			case ENCODER_OBJECT_ID_INTERNAL_LVDS:
-			case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
-				if (radeon_encoder->devices & ATOM_DEVICE_LCD1_SUPPORT)
-					args.v1.ucDevice = ATOM_DEVICE_LCD1_INDEX;
-				else
-					args.v1.ucDevice = ATOM_DEVICE_DFP3_INDEX;
-				break;
-			case ENCODER_OBJECT_ID_INTERNAL_DVO1:
-			case ENCODER_OBJECT_ID_INTERNAL_DDI:
-			case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
-				args.v1.ucDevice = ATOM_DEVICE_DFP2_INDEX;
-				break;
-			case ENCODER_OBJECT_ID_INTERNAL_DAC1:
-			case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
-				if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
-					args.v1.ucDevice = ATOM_DEVICE_TV1_INDEX;
-				else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
-					args.v1.ucDevice = ATOM_DEVICE_CV_INDEX;
-				else
-					args.v1.ucDevice = ATOM_DEVICE_CRT1_INDEX;
-				break;
-			case ENCODER_OBJECT_ID_INTERNAL_DAC2:
-			case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
-				if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
-					args.v1.ucDevice = ATOM_DEVICE_TV1_INDEX;
-				else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
-					args.v1.ucDevice = ATOM_DEVICE_CV_INDEX;
-				else
-					args.v1.ucDevice = ATOM_DEVICE_CRT2_INDEX;
-				break;
-			}
-			break;
-		case 2:
-			args.v2.ucCRTC = radeon_crtc->crtc_id;
-			if (radeon_encoder_is_dp_bridge(encoder)) {
-				struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
-
-				if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)
-					args.v2.ucEncodeMode = ATOM_ENCODER_MODE_LVDS;
-				else if (connector->connector_type == DRM_MODE_CONNECTOR_VGA)
-					args.v2.ucEncodeMode = ATOM_ENCODER_MODE_CRT;
-				else
-					args.v2.ucEncodeMode = atombios_get_encoder_mode(encoder);
-			} else
-				args.v2.ucEncodeMode = atombios_get_encoder_mode(encoder);
-			switch (radeon_encoder->encoder_id) {
-			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
-			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
-			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
-			case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
-				dig = radeon_encoder->enc_priv;
-				switch (dig->dig_encoder) {
-				case 0:
-					args.v2.ucEncoderID = ASIC_INT_DIG1_ENCODER_ID;
-					break;
-				case 1:
-					args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID;
-					break;
-				case 2:
-					args.v2.ucEncoderID = ASIC_INT_DIG3_ENCODER_ID;
-					break;
-				case 3:
-					args.v2.ucEncoderID = ASIC_INT_DIG4_ENCODER_ID;
-					break;
-				case 4:
-					args.v2.ucEncoderID = ASIC_INT_DIG5_ENCODER_ID;
-					break;
-				case 5:
-					args.v2.ucEncoderID = ASIC_INT_DIG6_ENCODER_ID;
-					break;
-				}
-				break;
-			case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
-				args.v2.ucEncoderID = ASIC_INT_DVO_ENCODER_ID;
-				break;
-			case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
-				if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
-					args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID;
-				else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
-					args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID;
-				else
-					args.v2.ucEncoderID = ASIC_INT_DAC1_ENCODER_ID;
-				break;
-			case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
-				if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
-					args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID;
-				else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
-					args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID;
-				else
-					args.v2.ucEncoderID = ASIC_INT_DAC2_ENCODER_ID;
-				break;
-			}
-			break;
-		}
-		break;
-	default:
-		DRM_ERROR("Unknown table version: %d, %d\n", frev, crev);
-		return;
-	}
-
-	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
-
-	/* update scratch regs with new routing */
-	radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
-}
-
-static void
-atombios_apply_encoder_quirks(struct drm_encoder *encoder,
-			      struct drm_display_mode *mode)
-{
-	struct drm_device *dev = encoder->dev;
-	struct radeon_device *rdev = dev->dev_private;
-	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
-	struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
-
-	/* Funky macbooks */
-	if ((dev->pdev->device == 0x71C5) &&
-	    (dev->pdev->subsystem_vendor == 0x106b) &&
-	    (dev->pdev->subsystem_device == 0x0080)) {
-		if (radeon_encoder->devices & ATOM_DEVICE_LCD1_SUPPORT) {
-			uint32_t lvtma_bit_depth_control = RREG32(AVIVO_LVTMA_BIT_DEPTH_CONTROL);
-
-			lvtma_bit_depth_control &= ~AVIVO_LVTMA_BIT_DEPTH_CONTROL_TRUNCATE_EN;
-			lvtma_bit_depth_control &= ~AVIVO_LVTMA_BIT_DEPTH_CONTROL_SPATIAL_DITHER_EN;
-
-			WREG32(AVIVO_LVTMA_BIT_DEPTH_CONTROL, lvtma_bit_depth_control);
-		}
-	}
-
-	/* set scaler clears this on some chips */
-	if (ASIC_IS_AVIVO(rdev) &&
-	    (!(radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)))) {
-		if (ASIC_IS_DCE4(rdev)) {
-			if (mode->flags & DRM_MODE_FLAG_INTERLACE)
-				WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset,
-				       EVERGREEN_INTERLEAVE_EN);
-			else
-				WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset, 0);
-		} else {
-			if (mode->flags & DRM_MODE_FLAG_INTERLACE)
-				WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset,
-				       AVIVO_D1MODE_INTERLEAVE_EN);
-			else
-				WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset, 0);
-		}
-	}
-}
-
-static int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder)
-{
-	struct drm_device *dev = encoder->dev;
-	struct radeon_device *rdev = dev->dev_private;
-	struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
-	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
-	struct drm_encoder *test_encoder;
-	struct radeon_encoder_atom_dig *dig;
-	uint32_t dig_enc_in_use = 0;
-
-	/* DCE4/5 */
-	if (ASIC_IS_DCE4(rdev)) {
-		dig = radeon_encoder->enc_priv;
-		if (ASIC_IS_DCE41(rdev)) {
-			/* ontario follows DCE4 */
-			if (rdev->family == CHIP_PALM) {
-				if (dig->linkb)
-					return 1;
-				else
-					return 0;
-			} else
-				/* llano follows DCE3.2 */
-				return radeon_crtc->crtc_id;
-		} else {
-			switch (radeon_encoder->encoder_id) {
-			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
-				if (dig->linkb)
-					return 1;
-				else
-					return 0;
-				break;
-			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
-				if (dig->linkb)
-					return 3;
-				else
-					return 2;
-				break;
-			case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
-				if (dig->linkb)
-					return 5;
-				else
-					return 4;
-				break;
-			}
-		}
-	}
-
-	/* on DCE32 and encoder can driver any block so just crtc id */
-	if (ASIC_IS_DCE32(rdev)) {
-		return radeon_crtc->crtc_id;
-	}
-
-	/* on DCE3 - LVTMA can only be driven by DIGB */
-	list_for_each_entry(test_encoder, &dev->mode_config.encoder_list, head) {
-		struct radeon_encoder *radeon_test_encoder;
-
-		if (encoder == test_encoder)
-			continue;
-
-		if (!radeon_encoder_is_digital(test_encoder))
-			continue;
-
-		radeon_test_encoder = to_radeon_encoder(test_encoder);
-		dig = radeon_test_encoder->enc_priv;
-
-		if (dig->dig_encoder >= 0)
-			dig_enc_in_use |= (1 << dig->dig_encoder);
-	}
-
-	if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA) {
-		if (dig_enc_in_use & 0x2)
-			DRM_ERROR("LVDS required digital encoder 2 but it was in use - stealing\n");
-		return 1;
-	}
-	if (!(dig_enc_in_use & 1))
-		return 0;
-	return 1;
-}
-
-/* This only needs to be called once at startup */
-void
-radeon_atom_encoder_init(struct radeon_device *rdev)
-{
-	struct drm_device *dev = rdev->ddev;
-	struct drm_encoder *encoder;
-
-	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
-		struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
-		struct drm_encoder *ext_encoder = radeon_atom_get_external_encoder(encoder);
-
-		switch (radeon_encoder->encoder_id) {
-		case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
-		case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
-		case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
-		case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
-			atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_INIT, 0, 0);
-			break;
-		default:
-			break;
-		}
-
-		if (ext_encoder && ASIC_IS_DCE41(rdev))
-			atombios_external_encoder_setup(encoder, ext_encoder,
-							EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT);
-	}
-}
-
-static void
-radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
-			     struct drm_display_mode *mode,
-			     struct drm_display_mode *adjusted_mode)
-{
-	struct drm_device *dev = encoder->dev;
-	struct radeon_device *rdev = dev->dev_private;
-	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
-	struct drm_encoder *ext_encoder = radeon_atom_get_external_encoder(encoder);
-
-	radeon_encoder->pixel_clock = adjusted_mode->clock;
-
-	if (ASIC_IS_AVIVO(rdev) && !ASIC_IS_DCE4(rdev)) {
-		if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT | ATOM_DEVICE_TV_SUPPORT))
-			atombios_yuv_setup(encoder, true);
-		else
-			atombios_yuv_setup(encoder, false);
-	}
-
-	switch (radeon_encoder->encoder_id) {
-	case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
-	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
-	case ENCODER_OBJECT_ID_INTERNAL_LVDS:
-	case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
-		atombios_digital_setup(encoder, PANEL_ENCODER_ACTION_ENABLE);
-		break;
-	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
-	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
-	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
-	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
-		if (ASIC_IS_DCE4(rdev)) {
-			/* disable the transmitter */
-			atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
-			/* setup and enable the encoder */
-			atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP, 0);
-
-			/* enable the transmitter */
-			atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
-		} else {
-			/* disable the encoder and transmitter */
-			atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
-			atombios_dig_encoder_setup(encoder, ATOM_DISABLE, 0);
-
-			/* setup and enable the encoder and transmitter */
-			atombios_dig_encoder_setup(encoder, ATOM_ENABLE, 0);
-			atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0);
-			atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
-		}
-		break;
-	case ENCODER_OBJECT_ID_INTERNAL_DDI:
-	case ENCODER_OBJECT_ID_INTERNAL_DVO1:
-	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
-		atombios_dvo_setup(encoder, ATOM_ENABLE);
-		break;
-	case ENCODER_OBJECT_ID_INTERNAL_DAC1:
-	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
-	case ENCODER_OBJECT_ID_INTERNAL_DAC2:
-	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
-		atombios_dac_setup(encoder, ATOM_ENABLE);
-		if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) {
-			if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))
-				atombios_tv_setup(encoder, ATOM_ENABLE);
-			else
-				atombios_tv_setup(encoder, ATOM_DISABLE);
-		}
-		break;
-	}
-
-	if (ext_encoder) {
-		if (ASIC_IS_DCE41(rdev))
-			atombios_external_encoder_setup(encoder, ext_encoder,
-							EXTERNAL_ENCODER_ACTION_V3_ENCODER_SETUP);
-		else
-			atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE);
-	}
-
-	atombios_apply_encoder_quirks(encoder, adjusted_mode);
-
-	if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) {
-		r600_hdmi_enable(encoder);
-		r600_hdmi_setmode(encoder, adjusted_mode);
-	}
-}
-
-static bool
-atombios_dac_load_detect(struct drm_encoder *encoder, struct drm_connector *connector)
-{
-	struct drm_device *dev = encoder->dev;
-	struct radeon_device *rdev = dev->dev_private;
-	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
-	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
-
-	if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT |
-				       ATOM_DEVICE_CV_SUPPORT |
-				       ATOM_DEVICE_CRT_SUPPORT)) {
-		DAC_LOAD_DETECTION_PS_ALLOCATION args;
-		int index = GetIndexIntoMasterTable(COMMAND, DAC_LoadDetection);
-		uint8_t frev, crev;
-
-		memset(&args, 0, sizeof(args));
-
-		if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
-			return false;
-
-		args.sDacload.ucMisc = 0;
-
-		if ((radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_DAC1) ||
-		    (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1))
-			args.sDacload.ucDacType = ATOM_DAC_A;
-		else
-			args.sDacload.ucDacType = ATOM_DAC_B;
-
-		if (radeon_connector->devices & ATOM_DEVICE_CRT1_SUPPORT)
-			args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_CRT1_SUPPORT);
-		else if (radeon_connector->devices & ATOM_DEVICE_CRT2_SUPPORT)
-			args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_CRT2_SUPPORT);
-		else if (radeon_connector->devices & ATOM_DEVICE_CV_SUPPORT) {
-			args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_CV_SUPPORT);
-			if (crev >= 3)
-				args.sDacload.ucMisc = DAC_LOAD_MISC_YPrPb;
-		} else if (radeon_connector->devices & ATOM_DEVICE_TV1_SUPPORT) {
-			args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_TV1_SUPPORT);
-			if (crev >= 3)
-				args.sDacload.ucMisc = DAC_LOAD_MISC_YPrPb;
-		}
-
-		atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
-
-		return true;
-	} else
-		return false;
-}
-
-static enum drm_connector_status
-radeon_atom_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
-{
-	struct drm_device *dev = encoder->dev;
-	struct radeon_device *rdev = dev->dev_private;
-	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
-	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
-	uint32_t bios_0_scratch;
-
-	if (!atombios_dac_load_detect(encoder, connector)) {
-		DRM_DEBUG_KMS("detect returned false \n");
-		return connector_status_unknown;
-	}
-
-	if (rdev->family >= CHIP_R600)
-		bios_0_scratch = RREG32(R600_BIOS_0_SCRATCH);
-	else
-		bios_0_scratch = RREG32(RADEON_BIOS_0_SCRATCH);
-
-	DRM_DEBUG_KMS("Bios 0 scratch %x %08x\n", bios_0_scratch, radeon_encoder->devices);
-	if (radeon_connector->devices & ATOM_DEVICE_CRT1_SUPPORT) {
-		if (bios_0_scratch & ATOM_S0_CRT1_MASK)
-			return connector_status_connected;
-	}
-	if (radeon_connector->devices & ATOM_DEVICE_CRT2_SUPPORT) {
-		if (bios_0_scratch & ATOM_S0_CRT2_MASK)
-			return connector_status_connected;
-	}
-	if (radeon_connector->devices & ATOM_DEVICE_CV_SUPPORT) {
-		if (bios_0_scratch & (ATOM_S0_CV_MASK|ATOM_S0_CV_MASK_A))
-			return connector_status_connected;
-	}
-	if (radeon_connector->devices & ATOM_DEVICE_TV1_SUPPORT) {
-		if (bios_0_scratch & (ATOM_S0_TV1_COMPOSITE | ATOM_S0_TV1_COMPOSITE_A))
-			return connector_status_connected; /* CTV */
-		else if (bios_0_scratch & (ATOM_S0_TV1_SVIDEO | ATOM_S0_TV1_SVIDEO_A))
-			return connector_status_connected; /* STV */
-	}
-	return connector_status_disconnected;
-}
-
-static enum drm_connector_status
-radeon_atom_dig_detect(struct drm_encoder *encoder, struct drm_connector *connector)
-{
-	struct drm_device *dev = encoder->dev;
-	struct radeon_device *rdev = dev->dev_private;
-	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
-	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
-	struct drm_encoder *ext_encoder = radeon_atom_get_external_encoder(encoder);
-	u32 bios_0_scratch;
-
-	if (!ASIC_IS_DCE4(rdev))
-		return connector_status_unknown;
-
-	if (!ext_encoder)
-		return connector_status_unknown;
-
-	if ((radeon_connector->devices & ATOM_DEVICE_CRT_SUPPORT) == 0)
-		return connector_status_unknown;
-
-	/* load detect on the dp bridge */
-	atombios_external_encoder_setup(encoder, ext_encoder,
-					EXTERNAL_ENCODER_ACTION_V3_DACLOAD_DETECTION);
-
-	bios_0_scratch = RREG32(R600_BIOS_0_SCRATCH);
-
-	DRM_DEBUG_KMS("Bios 0 scratch %x %08x\n", bios_0_scratch, radeon_encoder->devices);
-	if (radeon_connector->devices & ATOM_DEVICE_CRT1_SUPPORT) {
-		if (bios_0_scratch & ATOM_S0_CRT1_MASK)
-			return connector_status_connected;
-	}
-	if (radeon_connector->devices & ATOM_DEVICE_CRT2_SUPPORT) {
-		if (bios_0_scratch & ATOM_S0_CRT2_MASK)
-			return connector_status_connected;
-	}
-	if (radeon_connector->devices & ATOM_DEVICE_CV_SUPPORT) {
-		if (bios_0_scratch & (ATOM_S0_CV_MASK|ATOM_S0_CV_MASK_A))
-			return connector_status_connected;
-	}
-	if (radeon_connector->devices & ATOM_DEVICE_TV1_SUPPORT) {
-		if (bios_0_scratch & (ATOM_S0_TV1_COMPOSITE | ATOM_S0_TV1_COMPOSITE_A))
-			return connector_status_connected; /* CTV */
-		else if (bios_0_scratch & (ATOM_S0_TV1_SVIDEO | ATOM_S0_TV1_SVIDEO_A))
-			return connector_status_connected; /* STV */
-	}
-	return connector_status_disconnected;
-}
-
-void
-radeon_atom_ext_encoder_setup_ddc(struct drm_encoder *encoder)
-{
-	struct drm_encoder *ext_encoder = radeon_atom_get_external_encoder(encoder);
-
-	if (ext_encoder)
-		/* ddc_setup on the dp bridge */
-		atombios_external_encoder_setup(encoder, ext_encoder,
-						EXTERNAL_ENCODER_ACTION_V3_DDC_SETUP);
-
-}
-
-static void radeon_atom_encoder_prepare(struct drm_encoder *encoder)
-{
-	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
-	struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
-
-	if ((radeon_encoder->active_device &
-	     (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) ||
-	    radeon_encoder_is_dp_bridge(encoder)) {
-		struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
-		if (dig)
-			dig->dig_encoder = radeon_atom_pick_dig_encoder(encoder);
-	}
-
-	radeon_atom_output_lock(encoder, true);
-	radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
-
-	if (connector) {
-		struct radeon_connector *radeon_connector = to_radeon_connector(connector);
-
-		/* select the clock/data port if it uses a router */
-		if (radeon_connector->router.cd_valid)
-			radeon_router_select_cd_port(radeon_connector);
-
-		/* turn eDP panel on for mode set */
-		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
-			atombios_set_edp_panel_power(connector,
-						     ATOM_TRANSMITTER_ACTION_POWER_ON);
-	}
-
-	/* this is needed for the pll/ss setup to work correctly in some cases */
-	atombios_set_encoder_crtc_source(encoder);
-}
-
-static void radeon_atom_encoder_commit(struct drm_encoder *encoder)
-{
-	radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
-	radeon_atom_output_lock(encoder, false);
-}
-
-static void radeon_atom_encoder_disable(struct drm_encoder *encoder)
-{
-	struct drm_device *dev = encoder->dev;
-	struct radeon_device *rdev = dev->dev_private;
-	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
-	struct radeon_encoder_atom_dig *dig;
-
-	/* check for pre-DCE3 cards with shared encoders;
-	 * can't really use the links individually, so don't disable
-	 * the encoder if it's in use by another connector
-	 */
-	if (!ASIC_IS_DCE3(rdev)) {
-		struct drm_encoder *other_encoder;
-		struct radeon_encoder *other_radeon_encoder;
-
-		list_for_each_entry(other_encoder, &dev->mode_config.encoder_list, head) {
-			other_radeon_encoder = to_radeon_encoder(other_encoder);
-			if ((radeon_encoder->encoder_id == other_radeon_encoder->encoder_id) &&
-			    drm_helper_encoder_in_use(other_encoder))
-				goto disable_done;
-		}
-	}
-
-	radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
-
-	switch (radeon_encoder->encoder_id) {
-	case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
-	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
-	case ENCODER_OBJECT_ID_INTERNAL_LVDS:
-	case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
-		atombios_digital_setup(encoder, PANEL_ENCODER_ACTION_DISABLE);
-		break;
-	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
-	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
-	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
-	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
-		if (ASIC_IS_DCE4(rdev))
-			/* disable the transmitter */
-			atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
-		else {
-			/* disable the encoder and transmitter */
-			atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
-			atombios_dig_encoder_setup(encoder, ATOM_DISABLE, 0);
-		}
-		break;
-	case ENCODER_OBJECT_ID_INTERNAL_DDI:
-	case ENCODER_OBJECT_ID_INTERNAL_DVO1:
-	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
-		atombios_dvo_setup(encoder, ATOM_DISABLE);
-		break;
-	case ENCODER_OBJECT_ID_INTERNAL_DAC1:
-	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
-	case ENCODER_OBJECT_ID_INTERNAL_DAC2:
-	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
-		atombios_dac_setup(encoder, ATOM_DISABLE);
-		if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))
-			atombios_tv_setup(encoder, ATOM_DISABLE);
-		break;
-	}
-
-disable_done:
-	if (radeon_encoder_is_digital(encoder)) {
-		if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
-			r600_hdmi_disable(encoder);
-		dig = radeon_encoder->enc_priv;
-		dig->dig_encoder = -1;
-	}
-	radeon_encoder->active_device = 0;
-}
-
-/* these are handled by the primary encoders */
-static void radeon_atom_ext_prepare(struct drm_encoder *encoder)
-{
-
-}
-
-static void radeon_atom_ext_commit(struct drm_encoder *encoder)
-{
-
-}
-
-static void
-radeon_atom_ext_mode_set(struct drm_encoder *encoder,
-			 struct drm_display_mode *mode,
-			 struct drm_display_mode *adjusted_mode)
-{
-
-}
-
-static void radeon_atom_ext_disable(struct drm_encoder *encoder)
-{
-
-}
-
-static void
-radeon_atom_ext_dpms(struct drm_encoder *encoder, int mode)
-{
-
-}
-
-static bool radeon_atom_ext_mode_fixup(struct drm_encoder *encoder,
-				       struct drm_display_mode *mode,
-				       struct drm_display_mode *adjusted_mode)
-{
-	return true;
-}
-
-static const struct drm_encoder_helper_funcs radeon_atom_ext_helper_funcs = {
-	.dpms = radeon_atom_ext_dpms,
-	.mode_fixup = radeon_atom_ext_mode_fixup,
-	.prepare = radeon_atom_ext_prepare,
-	.mode_set = radeon_atom_ext_mode_set,
-	.commit = radeon_atom_ext_commit,
-	.disable = radeon_atom_ext_disable,
-	/* no detect for TMDS/LVDS yet */
-};
-
-static const struct drm_encoder_helper_funcs radeon_atom_dig_helper_funcs = {
-	.dpms = radeon_atom_encoder_dpms,
-	.mode_fixup = radeon_atom_mode_fixup,
-	.prepare = radeon_atom_encoder_prepare,
-	.mode_set = radeon_atom_encoder_mode_set,
-	.commit = radeon_atom_encoder_commit,
-	.disable = radeon_atom_encoder_disable,
-	.detect = radeon_atom_dig_detect,
-};
-
-static const struct drm_encoder_helper_funcs radeon_atom_dac_helper_funcs = {
-	.dpms = radeon_atom_encoder_dpms,
-	.mode_fixup = radeon_atom_mode_fixup,
-	.prepare = radeon_atom_encoder_prepare,
-	.mode_set = radeon_atom_encoder_mode_set,
-	.commit = radeon_atom_encoder_commit,
-	.detect = radeon_atom_dac_detect,
-};
-
-void radeon_enc_destroy(struct drm_encoder *encoder)
-{
-	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
-	kfree(radeon_encoder->enc_priv);
-	drm_encoder_cleanup(encoder);
-	kfree(radeon_encoder);
-}
-
-static const struct drm_encoder_funcs radeon_atom_enc_funcs = {
-	.destroy = radeon_enc_destroy,
-};
-
-struct radeon_encoder_atom_dac *
-radeon_atombios_set_dac_info(struct radeon_encoder *radeon_encoder)
-{
-	struct drm_device *dev = radeon_encoder->base.dev;
-	struct radeon_device *rdev = dev->dev_private;
-	struct radeon_encoder_atom_dac *dac = kzalloc(sizeof(struct radeon_encoder_atom_dac), GFP_KERNEL);
-
-	if (!dac)
-		return NULL;
-
-	dac->tv_std = radeon_atombios_get_tv_info(rdev);
-	return dac;
-}
-
-struct radeon_encoder_atom_dig *
-radeon_atombios_set_dig_info(struct radeon_encoder *radeon_encoder)
-{
-	int encoder_enum = (radeon_encoder->encoder_enum & ENUM_ID_MASK) >> ENUM_ID_SHIFT;
-	struct radeon_encoder_atom_dig *dig = kzalloc(sizeof(struct radeon_encoder_atom_dig), GFP_KERNEL);
-
-	if (!dig)
-		return NULL;
-
-	/* coherent mode by default */
-	dig->coherent_mode = true;
-	dig->dig_encoder = -1;
-
-	if (encoder_enum == 2)
-		dig->linkb = true;
-	else
-		dig->linkb = false;
-
-	return dig;
-}
-
-void
-radeon_add_atom_encoder(struct drm_device *dev,
-			uint32_t encoder_enum,
-			uint32_t supported_device,
-			u16 caps)
-{
-	struct radeon_device *rdev = dev->dev_private;
-	struct drm_encoder *encoder;
-	struct radeon_encoder *radeon_encoder;
-
-	/* see if we already added it */
-	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
-		radeon_encoder = to_radeon_encoder(encoder);
-		if (radeon_encoder->encoder_enum == encoder_enum) {
-			radeon_encoder->devices |= supported_device;
-			return;
-		}
-
-	}
-
-	/* add a new one */
-	radeon_encoder = kzalloc(sizeof(struct radeon_encoder), GFP_KERNEL);
-	if (!radeon_encoder)
-		return;
-
-	encoder = &radeon_encoder->base;
-	switch (rdev->num_crtc) {
-	case 1:
-		encoder->possible_crtcs = 0x1;
-		break;
-	case 2:
-	default:
-		encoder->possible_crtcs = 0x3;
-		break;
-	case 4:
-		encoder->possible_crtcs = 0xf;
-		break;
-	case 6:
-		encoder->possible_crtcs = 0x3f;
-		break;
-	}
-
-	radeon_encoder->enc_priv = NULL;
-
-	radeon_encoder->encoder_enum = encoder_enum;
-	radeon_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
-	radeon_encoder->devices = supported_device;
-	radeon_encoder->rmx_type = RMX_OFF;
-	radeon_encoder->underscan_type = UNDERSCAN_OFF;
-	radeon_encoder->is_ext_encoder = false;
-	radeon_encoder->caps = caps;
-
-	switch (radeon_encoder->encoder_id) {
-	case ENCODER_OBJECT_ID_INTERNAL_LVDS:
-	case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
-	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
-	case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
-		if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
-			radeon_encoder->rmx_type = RMX_FULL;
-			drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_LVDS);
-			radeon_encoder->enc_priv = radeon_atombios_get_lvds_info(radeon_encoder);
-		} else {
-			drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TMDS);
-			radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder);
-		}
-		drm_encoder_helper_add(encoder, &radeon_atom_dig_helper_funcs);
-		break;
-	case ENCODER_OBJECT_ID_INTERNAL_DAC1:
-		drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_DAC);
-		radeon_encoder->enc_priv = radeon_atombios_set_dac_info(radeon_encoder);
-		drm_encoder_helper_add(encoder, &radeon_atom_dac_helper_funcs);
-		break;
-	case ENCODER_OBJECT_ID_INTERNAL_DAC2:
-	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
-	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
-		drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TVDAC);
-		radeon_encoder->enc_priv = radeon_atombios_set_dac_info(radeon_encoder);
-		drm_encoder_helper_add(encoder, &radeon_atom_dac_helper_funcs);
-		break;
-	case ENCODER_OBJECT_ID_INTERNAL_DVO1:
-	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
-	case ENCODER_OBJECT_ID_INTERNAL_DDI:
-	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
-	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
-	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
-	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
-		if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
-			radeon_encoder->rmx_type = RMX_FULL;
-			drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_LVDS);
-			radeon_encoder->enc_priv = radeon_atombios_get_lvds_info(radeon_encoder);
-		} else if (radeon_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) {
-			drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_DAC);
-			radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder);
-		} else {
-			drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TMDS);
-			radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder);
-		}
-		drm_encoder_helper_add(encoder, &radeon_atom_dig_helper_funcs);
-		break;
-	case ENCODER_OBJECT_ID_SI170B:
-	case ENCODER_OBJECT_ID_CH7303:
-	case ENCODER_OBJECT_ID_EXTERNAL_SDVOA:
-	case ENCODER_OBJECT_ID_EXTERNAL_SDVOB:
-	case ENCODER_OBJECT_ID_TITFP513:
-	case ENCODER_OBJECT_ID_VT1623:
-	case ENCODER_OBJECT_ID_HDMI_SI1930:
-	case ENCODER_OBJECT_ID_TRAVIS:
-	case ENCODER_OBJECT_ID_NUTMEG:
-		/* these are handled by the primary encoders */
-		radeon_encoder->is_ext_encoder = true;
-		if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
-			drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_LVDS);
-		else if (radeon_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT))
-			drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_DAC);
-		else
-			drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TMDS);
-		drm_encoder_helper_add(encoder, &radeon_atom_ext_helper_funcs);
-		break;
-	}
-}
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index fdc3a9a..ba7ab79 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -49,27 +49,27 @@
 			      rdev->gart.table_size >> PAGE_SHIFT);
 	}
 #endif
-	rdev->gart.table.ram.ptr = ptr;
-	memset((void *)rdev->gart.table.ram.ptr, 0, rdev->gart.table_size);
+	rdev->gart.ptr = ptr;
+	memset((void *)rdev->gart.ptr, 0, rdev->gart.table_size);
 	return 0;
 }
 
 void radeon_gart_table_ram_free(struct radeon_device *rdev)
 {
-	if (rdev->gart.table.ram.ptr == NULL) {
+	if (rdev->gart.ptr == NULL) {
 		return;
 	}
 #ifdef CONFIG_X86
 	if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480 ||
 	    rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) {
-		set_memory_wb((unsigned long)rdev->gart.table.ram.ptr,
+		set_memory_wb((unsigned long)rdev->gart.ptr,
 			      rdev->gart.table_size >> PAGE_SHIFT);
 	}
 #endif
 	pci_free_consistent(rdev->pdev, rdev->gart.table_size,
-			    (void *)rdev->gart.table.ram.ptr,
+			    (void *)rdev->gart.ptr,
 			    rdev->gart.table_addr);
-	rdev->gart.table.ram.ptr = NULL;
+	rdev->gart.ptr = NULL;
 	rdev->gart.table_addr = 0;
 }
 
@@ -77,10 +77,10 @@
 {
 	int r;
 
-	if (rdev->gart.table.vram.robj == NULL) {
+	if (rdev->gart.robj == NULL) {
 		r = radeon_bo_create(rdev, rdev->gart.table_size,
 				     PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
-				     &rdev->gart.table.vram.robj);
+				     &rdev->gart.robj);
 		if (r) {
 			return r;
 		}
@@ -93,38 +93,46 @@
 	uint64_t gpu_addr;
 	int r;
 
-	r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
+	r = radeon_bo_reserve(rdev->gart.robj, false);
 	if (unlikely(r != 0))
 		return r;
-	r = radeon_bo_pin(rdev->gart.table.vram.robj,
+	r = radeon_bo_pin(rdev->gart.robj,
 				RADEON_GEM_DOMAIN_VRAM, &gpu_addr);
 	if (r) {
-		radeon_bo_unreserve(rdev->gart.table.vram.robj);
+		radeon_bo_unreserve(rdev->gart.robj);
 		return r;
 	}
-	r = radeon_bo_kmap(rdev->gart.table.vram.robj,
-				(void **)&rdev->gart.table.vram.ptr);
+	r = radeon_bo_kmap(rdev->gart.robj, &rdev->gart.ptr);
 	if (r)
-		radeon_bo_unpin(rdev->gart.table.vram.robj);
-	radeon_bo_unreserve(rdev->gart.table.vram.robj);
+		radeon_bo_unpin(rdev->gart.robj);
+	radeon_bo_unreserve(rdev->gart.robj);
 	rdev->gart.table_addr = gpu_addr;
 	return r;
 }
 
-void radeon_gart_table_vram_free(struct radeon_device *rdev)
+void radeon_gart_table_vram_unpin(struct radeon_device *rdev)
 {
 	int r;
 
-	if (rdev->gart.table.vram.robj == NULL) {
+	if (rdev->gart.robj == NULL) {
 		return;
 	}
-	r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
+	r = radeon_bo_reserve(rdev->gart.robj, false);
 	if (likely(r == 0)) {
-		radeon_bo_kunmap(rdev->gart.table.vram.robj);
-		radeon_bo_unpin(rdev->gart.table.vram.robj);
-		radeon_bo_unreserve(rdev->gart.table.vram.robj);
+		radeon_bo_kunmap(rdev->gart.robj);
+		radeon_bo_unpin(rdev->gart.robj);
+		radeon_bo_unreserve(rdev->gart.robj);
+		rdev->gart.ptr = NULL;
 	}
-	radeon_bo_unref(&rdev->gart.table.vram.robj);
+}
+
+void radeon_gart_table_vram_free(struct radeon_device *rdev)
+{
+	if (rdev->gart.robj == NULL) {
+		return;
+	}
+	radeon_gart_table_vram_unpin(rdev);
+	radeon_bo_unref(&rdev->gart.robj);
 }
 
 
@@ -151,12 +159,14 @@
 		if (rdev->gart.pages[p]) {
 			if (!rdev->gart.ttm_alloced[p])
 				pci_unmap_page(rdev->pdev, rdev->gart.pages_addr[p],
-				       		PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+						PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
 			rdev->gart.pages[p] = NULL;
 			rdev->gart.pages_addr[p] = rdev->dummy_page.addr;
 			page_base = rdev->gart.pages_addr[p];
 			for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
-				radeon_gart_set_page(rdev, t, page_base);
+				if (rdev->gart.ptr) {
+					radeon_gart_set_page(rdev, t, page_base);
+				}
 				page_base += RADEON_GPU_PAGE_SIZE;
 			}
 		}
@@ -199,10 +209,12 @@
 			}
 		}
 		rdev->gart.pages[p] = pagelist[i];
-		page_base = rdev->gart.pages_addr[p];
-		for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
-			radeon_gart_set_page(rdev, t, page_base);
-			page_base += RADEON_GPU_PAGE_SIZE;
+		if (rdev->gart.ptr) {
+			page_base = rdev->gart.pages_addr[p];
+			for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
+				radeon_gart_set_page(rdev, t, page_base);
+				page_base += RADEON_GPU_PAGE_SIZE;
+			}
 		}
 	}
 	mb();
@@ -215,6 +227,9 @@
 	int i, j, t;
 	u64 page_base;
 
+	if (!rdev->gart.ptr) {
+		return;
+	}
 	for (i = 0, t = 0; i < rdev->gart.num_cpu_pages; i++) {
 		page_base = rdev->gart.pages_addr[i];
 		for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c
index e6d110c..7bb1b07 100644
--- a/drivers/gpu/drm/radeon/radeon_i2c.c
+++ b/drivers/gpu/drm/radeon/radeon_i2c.c
@@ -34,7 +34,7 @@
  * radeon_ddc_probe
  *
  */
-bool radeon_ddc_probe(struct radeon_connector *radeon_connector, bool requires_extended_probe)
+bool radeon_ddc_probe(struct radeon_connector *radeon_connector)
 {
 	u8 out = 0x0;
 	u8 buf[8];
@@ -49,15 +49,11 @@
 		{
 			.addr = 0x50,
 			.flags = I2C_M_RD,
-			.len = 1,
+			.len = 8,
 			.buf = buf,
 		}
 	};
 
-	/* Read 8 bytes from i2c for extended probe of EDID header */
-	if (requires_extended_probe)
-		msgs[1].len = 8;
-
 	/* on hw with routers, select right port */
 	if (radeon_connector->router.ddc_valid)
 		radeon_router_select_ddc_port(radeon_connector);
@@ -66,17 +62,15 @@
 	if (ret != 2)
 		/* Couldn't find an accessible DDC on this connector */
 		return false;
-	if (requires_extended_probe) {
-		/* Probe also for valid EDID header
-		 * EDID header starts with:
-		 * 0x00,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0x00.
-		 * Only the first 6 bytes must be valid as
-		 * drm_edid_block_valid() can fix the last 2 bytes */
-		if (drm_edid_header_is_valid(buf) < 6) {
-			/* Couldn't find an accessible EDID on this
-			 * connector */
-			return false;
-		}
+	/* Probe also for valid EDID header
+	 * EDID header starts with:
+	 * 0x00,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0x00.
+	 * Only the first 6 bytes must be valid as
+	 * drm_edid_block_valid() can fix the last 2 bytes */
+	if (drm_edid_header_is_valid(buf) < 6) {
+		/* Couldn't find an accessible EDID on this
+		 * connector */
+		return false;
 	}
 	return true;
 }
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index 9ec830c..8f86aeb 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -67,10 +67,10 @@
 	/* Disable *all* interrupts */
 	rdev->irq.sw_int = false;
 	rdev->irq.gui_idle = false;
-	for (i = 0; i < rdev->num_crtc; i++)
-		rdev->irq.crtc_vblank_int[i] = false;
-	for (i = 0; i < 6; i++) {
+	for (i = 0; i < RADEON_MAX_HPD_PINS; i++)
 		rdev->irq.hpd[i] = false;
+	for (i = 0; i < RADEON_MAX_CRTCS; i++) {
+		rdev->irq.crtc_vblank_int[i] = false;
 		rdev->irq.pflip[i] = false;
 	}
 	radeon_irq_set(rdev);
@@ -99,15 +99,55 @@
 	/* Disable *all* interrupts */
 	rdev->irq.sw_int = false;
 	rdev->irq.gui_idle = false;
-	for (i = 0; i < rdev->num_crtc; i++)
-		rdev->irq.crtc_vblank_int[i] = false;
-	for (i = 0; i < 6; i++) {
+	for (i = 0; i < RADEON_MAX_HPD_PINS; i++)
 		rdev->irq.hpd[i] = false;
+	for (i = 0; i < RADEON_MAX_CRTCS; i++) {
+		rdev->irq.crtc_vblank_int[i] = false;
 		rdev->irq.pflip[i] = false;
 	}
 	radeon_irq_set(rdev);
 }
 
+static bool radeon_msi_ok(struct radeon_device *rdev)
+{
+	/* RV370/RV380 was first asic with MSI support */
+	if (rdev->family < CHIP_RV380)
+		return false;
+
+	/* MSIs don't work on AGP */
+	if (rdev->flags & RADEON_IS_AGP)
+		return false;
+
+	/* force MSI on */
+	if (radeon_msi == 1)
+		return true;
+	else if (radeon_msi == 0)
+		return false;
+
+	/* Quirks */
+	/* HP RS690 only seems to work with MSIs. */
+	if ((rdev->pdev->device == 0x791f) &&
+	    (rdev->pdev->subsystem_vendor == 0x103c) &&
+	    (rdev->pdev->subsystem_device == 0x30c2))
+		return true;
+
+	/* Dell RS690 only seems to work with MSIs. */
+	if ((rdev->pdev->device == 0x791f) &&
+	    (rdev->pdev->subsystem_vendor == 0x1028) &&
+	    (rdev->pdev->subsystem_device == 0x01fd))
+		return true;
+
+	if (rdev->flags & RADEON_IS_IGP) {
+		/* APUs work fine with MSIs */
+		if (rdev->family >= CHIP_PALM)
+			return true;
+		/* lots of IGPs have problems with MSIs */
+		return false;
+	}
+
+	return true;
+}
+
 int radeon_irq_kms_init(struct radeon_device *rdev)
 {
 	int i;
@@ -124,12 +164,8 @@
 	}
 	/* enable msi */
 	rdev->msi_enabled = 0;
-	/* MSIs don't seem to work reliably on all IGP
-	 * chips.  Disable MSI on them for now.
-	 */
-	if ((rdev->family >= CHIP_RV380) &&
-	    ((!(rdev->flags & RADEON_IS_IGP)) || (rdev->family >= CHIP_PALM)) &&
-	    (!(rdev->flags & RADEON_IS_AGP))) {
+
+	if (radeon_msi_ok(rdev)) {
 		int ret = pci_enable_msi(rdev->pdev);
 		if (!ret) {
 			rdev->msi_enabled = 1;
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index ed0178f..2c2e75e 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -438,9 +438,6 @@
 	struct radeon_i2c_chan *ddc_bus;
 	/* some systems have an hdmi and vga port with a shared ddc line */
 	bool shared_ddc;
-	/* for some Radeon chip families we apply an additional EDID header
-	   check as part of the DDC probe */
-	bool requires_extended_probe;
 	bool use_digital;
 	/* we need to mind the EDID between detect
 	   and get modes due to analog/digital/tvencoder */
@@ -459,6 +456,8 @@
 	struct drm_gem_object *obj;
 };
 
+#define ENCODER_MODE_IS_DP(em) (((em) == ATOM_ENCODER_MODE_DP) || \
+				((em) == ATOM_ENCODER_MODE_DP_MST))
 
 extern enum radeon_tv_std
 radeon_combios_get_tv_info(struct radeon_device *rdev);
@@ -468,8 +467,8 @@
 extern struct drm_connector *
 radeon_get_connector_for_encoder(struct drm_encoder *encoder);
 
-extern bool radeon_encoder_is_dp_bridge(struct drm_encoder *encoder);
-extern bool radeon_connector_encoder_is_dp_bridge(struct drm_connector *connector);
+extern u16 radeon_encoder_get_dp_bridge_encoder_id(struct drm_encoder *encoder);
+extern u16 radeon_connector_encoder_get_dp_bridge_encoder_id(struct drm_connector *connector);
 extern bool radeon_connector_encoder_is_hbr2(struct drm_connector *connector);
 extern bool radeon_connector_is_dp12_capable(struct drm_connector *connector);
 
@@ -489,7 +488,7 @@
 					   int action, uint8_t lane_num,
 					   uint8_t lane_set);
 extern void radeon_atom_ext_encoder_setup_ddc(struct drm_encoder *encoder);
-extern struct drm_encoder *radeon_atom_get_external_encoder(struct drm_encoder *encoder);
+extern struct drm_encoder *radeon_get_external_encoder(struct drm_encoder *encoder);
 extern int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
 				u8 write_byte, u8 *read_byte);
 
@@ -519,8 +518,7 @@
 				u8 val);
 extern void radeon_router_select_ddc_port(struct radeon_connector *radeon_connector);
 extern void radeon_router_select_cd_port(struct radeon_connector *radeon_connector);
-extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector,
-			bool requires_extended_probe);
+extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector);
 extern int radeon_ddc_get_modes(struct radeon_connector *radeon_connector);
 
 extern struct drm_encoder *radeon_best_encoder(struct drm_connector *connector);
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index 89a6e1e..06b90c8 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -77,7 +77,7 @@
 {
 	int r;
 
-	if (rdev->gart.table.ram.ptr) {
+	if (rdev->gart.ptr) {
 		WARN(1, "RS400 GART already initialized\n");
 		return 0;
 	}
@@ -212,6 +212,7 @@
 int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
 {
 	uint32_t entry;
+	u32 *gtt = rdev->gart.ptr;
 
 	if (i < 0 || i > rdev->gart.num_gpu_pages) {
 		return -EINVAL;
@@ -221,7 +222,7 @@
 		((upper_32_bits(addr) & 0xff) << 4) |
 		RS400_PTE_WRITEABLE | RS400_PTE_READABLE;
 	entry = cpu_to_le32(entry);
-	rdev->gart.table.ram.ptr[i] = entry;
+	gtt[i] = entry;
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 9320dd6..481b99e 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -287,6 +287,7 @@
 		default:
 			break;
 		}
+		radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
 	}
 	if (rdev->irq.installed)
 		rs600_irq_set(rdev);
@@ -413,7 +414,7 @@
 {
 	int r;
 
-	if (rdev->gart.table.vram.robj) {
+	if (rdev->gart.robj) {
 		WARN(1, "RS600 GART already initialized\n");
 		return 0;
 	}
@@ -431,7 +432,7 @@
 	u32 tmp;
 	int r, i;
 
-	if (rdev->gart.table.vram.robj == NULL) {
+	if (rdev->gart.robj == NULL) {
 		dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
 		return -EINVAL;
 	}
@@ -494,20 +495,12 @@
 void rs600_gart_disable(struct radeon_device *rdev)
 {
 	u32 tmp;
-	int r;
 
 	/* FIXME: disable out of gart access */
 	WREG32_MC(R_000100_MC_PT0_CNTL, 0);
 	tmp = RREG32_MC(R_000009_MC_CNTL1);
 	WREG32_MC(R_000009_MC_CNTL1, tmp & C_000009_ENABLE_PAGE_TABLES);
-	if (rdev->gart.table.vram.robj) {
-		r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
-		if (r == 0) {
-			radeon_bo_kunmap(rdev->gart.table.vram.robj);
-			radeon_bo_unpin(rdev->gart.table.vram.robj);
-			radeon_bo_unreserve(rdev->gart.table.vram.robj);
-		}
-	}
+	radeon_gart_table_vram_unpin(rdev);
 }
 
 void rs600_gart_fini(struct radeon_device *rdev)
@@ -525,7 +518,7 @@
 
 int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
 {
-	void __iomem *ptr = (void *)rdev->gart.table.vram.ptr;
+	void __iomem *ptr = (void *)rdev->gart.ptr;
 
 	if (i < 0 || i > rdev->gart.num_gpu_pages) {
 		return -EINVAL;
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 87cc1fe..a983f41 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -124,7 +124,7 @@
 	u32 tmp;
 	int r, i;
 
-	if (rdev->gart.table.vram.robj == NULL) {
+	if (rdev->gart.robj == NULL) {
 		dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
 		return -EINVAL;
 	}
@@ -171,7 +171,7 @@
 void rv770_pcie_gart_disable(struct radeon_device *rdev)
 {
 	u32 tmp;
-	int i, r;
+	int i;
 
 	/* Disable all tables */
 	for (i = 0; i < 7; i++)
@@ -191,14 +191,7 @@
 	WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
 	WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
 	WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
-	if (rdev->gart.table.vram.robj) {
-		r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
-		if (likely(r == 0)) {
-			radeon_bo_kunmap(rdev->gart.table.vram.robj);
-			radeon_bo_unpin(rdev->gart.table.vram.robj);
-			radeon_bo_unreserve(rdev->gart.table.vram.robj);
-		}
-	}
+	radeon_gart_table_vram_unpin(rdev);
 }
 
 void rv770_pcie_gart_fini(struct radeon_device *rdev)
@@ -282,7 +275,7 @@
 		WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
 			rdev->mc.vram_end >> 12);
 	}
-	WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0);
+	WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, rdev->vram_scratch.gpu_addr >> 12);
 	tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
 	tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
 	WREG32(MC_VM_FB_LOCATION, tmp);
@@ -959,54 +952,6 @@
 
 }
 
-static int rv770_vram_scratch_init(struct radeon_device *rdev)
-{
-	int r;
-	u64 gpu_addr;
-
-	if (rdev->vram_scratch.robj == NULL) {
-		r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE,
-				     PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
-				     &rdev->vram_scratch.robj);
-		if (r) {
-			return r;
-		}
-	}
-
-	r = radeon_bo_reserve(rdev->vram_scratch.robj, false);
-	if (unlikely(r != 0))
-		return r;
-	r = radeon_bo_pin(rdev->vram_scratch.robj,
-			  RADEON_GEM_DOMAIN_VRAM, &gpu_addr);
-	if (r) {
-		radeon_bo_unreserve(rdev->vram_scratch.robj);
-		return r;
-	}
-	r = radeon_bo_kmap(rdev->vram_scratch.robj,
-				(void **)&rdev->vram_scratch.ptr);
-	if (r)
-		radeon_bo_unpin(rdev->vram_scratch.robj);
-	radeon_bo_unreserve(rdev->vram_scratch.robj);
-
-	return r;
-}
-
-static void rv770_vram_scratch_fini(struct radeon_device *rdev)
-{
-	int r;
-
-	if (rdev->vram_scratch.robj == NULL) {
-		return;
-	}
-	r = radeon_bo_reserve(rdev->vram_scratch.robj, false);
-	if (likely(r == 0)) {
-		radeon_bo_kunmap(rdev->vram_scratch.robj);
-		radeon_bo_unpin(rdev->vram_scratch.robj);
-		radeon_bo_unreserve(rdev->vram_scratch.robj);
-	}
-	radeon_bo_unref(&rdev->vram_scratch.robj);
-}
-
 void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
 {
 	u64 size_bf, size_af;
@@ -1106,6 +1051,10 @@
 		}
 	}
 
+	r = r600_vram_scratch_init(rdev);
+	if (r)
+		return r;
+
 	rv770_mc_program(rdev);
 	if (rdev->flags & RADEON_IS_AGP) {
 		rv770_agp_enable(rdev);
@@ -1114,9 +1063,7 @@
 		if (r)
 			return r;
 	}
-	r = rv770_vram_scratch_init(rdev);
-	if (r)
-		return r;
+
 	rv770_gpu_init(rdev);
 	r = r600_blit_init(rdev);
 	if (r) {
@@ -1316,7 +1263,7 @@
 	radeon_ib_pool_fini(rdev);
 	radeon_irq_kms_fini(rdev);
 	rv770_pcie_gart_fini(rdev);
-	rv770_vram_scratch_fini(rdev);
+	r600_vram_scratch_fini(rdev);
 	radeon_gem_fini(rdev);
 	radeon_fence_driver_fini(rdev);
 	radeon_agp_fini(rdev);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 1805b8c..dff8fc7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -104,6 +104,9 @@
 #define DRM_IOCTL_VMW_PRESENT_READBACK				\
 	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT_READBACK,	\
 		 struct drm_vmw_present_readback_arg)
+#define DRM_IOCTL_VMW_UPDATE_LAYOUT				\
+	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT,	\
+		 struct drm_vmw_update_layout_arg)
 
 /**
  * The core DRM version of this macro doesn't account for
@@ -166,6 +169,9 @@
 	VMW_IOCTL_DEF(VMW_PRESENT_READBACK,
 		      vmw_present_readback_ioctl,
 		      DRM_MASTER | DRM_AUTH | DRM_UNLOCKED),
+	VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT,
+		      vmw_kms_update_layout_ioctl,
+		      DRM_MASTER | DRM_UNLOCKED),
 };
 
 static struct pci_device_id vmw_pci_id_list[] = {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 30589d0..8cca91a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -40,9 +40,9 @@
 #include "ttm/ttm_module.h"
 #include "vmwgfx_fence.h"
 
-#define VMWGFX_DRIVER_DATE "20111008"
+#define VMWGFX_DRIVER_DATE "20111025"
 #define VMWGFX_DRIVER_MAJOR 2
-#define VMWGFX_DRIVER_MINOR 2
+#define VMWGFX_DRIVER_MINOR 3
 #define VMWGFX_DRIVER_PATCHLEVEL 0
 #define VMWGFX_FILE_PAGE_OFFSET 0x00100000
 #define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
@@ -633,6 +633,8 @@
 		     struct drm_vmw_fence_rep __user *user_fence_rep,
 		     struct drm_vmw_rect *clips,
 		     uint32_t num_clips);
+int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
+				struct drm_file *file_priv);
 
 /**
  * Overlay control - vmwgfx_overlay.c
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 8b14dfd..03daefa 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -111,6 +111,7 @@
 		if (!ret) {
 			if (!surface->snooper.image) {
 				DRM_ERROR("surface not suitable for cursor\n");
+				vmw_surface_unreference(&surface);
 				return -EINVAL;
 			}
 		} else {
@@ -176,7 +177,9 @@
 		return 0;
 	}
 
-	vmw_cursor_update_position(dev_priv, true, du->cursor_x, du->cursor_y);
+	vmw_cursor_update_position(dev_priv, true,
+				   du->cursor_x + du->hotspot_x,
+				   du->cursor_y + du->hotspot_y);
 
 	return 0;
 }
@@ -191,7 +194,8 @@
 	du->cursor_y = y + crtc->y;
 
 	vmw_cursor_update_position(dev_priv, shown,
-				   du->cursor_x, du->cursor_y);
+				   du->cursor_x + du->hotspot_x,
+				   du->cursor_y + du->hotspot_y);
 
 	return 0;
 }
@@ -212,7 +216,7 @@
 		SVGA3dCmdHeader header;
 		SVGA3dCmdSurfaceDMA dma;
 	} *cmd;
-	int ret;
+	int i, ret;
 
 	cmd = container_of(header, struct vmw_dma_cmd, header);
 
@@ -234,16 +238,19 @@
 	box_count = (cmd->header.size - sizeof(SVGA3dCmdSurfaceDMA)) /
 			sizeof(SVGA3dCopyBox);
 
-	if (cmd->dma.guest.pitch != (64 * 4) ||
-	    cmd->dma.guest.ptr.offset % PAGE_SIZE ||
+	if (cmd->dma.guest.ptr.offset % PAGE_SIZE ||
 	    box->x != 0    || box->y != 0    || box->z != 0    ||
 	    box->srcx != 0 || box->srcy != 0 || box->srcz != 0 ||
-	    box->w != 64   || box->h != 64   || box->d != 1    ||
-	    box_count != 1) {
+	    box->d != 1    || box_count != 1) {
 		/* TODO handle none page aligned offsets */
-		/* TODO handle partial uploads and pitch != 256 */
-		/* TODO handle more then one copy (size != 64) */
-		DRM_ERROR("lazy programmer, can't handle weird stuff\n");
+		/* TODO handle more dst & src != 0 */
+		/* TODO handle more then one copy */
+		DRM_ERROR("Cant snoop dma request for cursor!\n");
+		DRM_ERROR("(%u, %u, %u) (%u, %u, %u) (%ux%ux%u) %u %u\n",
+			  box->srcx, box->srcy, box->srcz,
+			  box->x, box->y, box->z,
+			  box->w, box->h, box->d, box_count,
+			  cmd->dma.guest.ptr.offset);
 		return;
 	}
 
@@ -262,7 +269,16 @@
 
 	virtual = ttm_kmap_obj_virtual(&map, &dummy);
 
-	memcpy(srf->snooper.image, virtual, 64*64*4);
+	if (box->w == 64 && cmd->dma.guest.pitch == 64*4) {
+		memcpy(srf->snooper.image, virtual, 64*64*4);
+	} else {
+		/* Image is unsigned pointer. */
+		for (i = 0; i < box->h; i++)
+			memcpy(srf->snooper.image + i * 64,
+			       virtual + i * cmd->dma.guest.pitch,
+			       box->w * 4);
+	}
+
 	srf->snooper.age++;
 
 	/* we can't call this function from this function since execbuf has
@@ -994,7 +1010,7 @@
 	required_size = mode_cmd->pitch * mode_cmd->height;
 	if (unlikely(required_size > (u64) dev_priv->vram_size)) {
 		DRM_ERROR("VRAM size is too small for requested mode.\n");
-		return NULL;
+		return ERR_PTR(-ENOMEM);
 	}
 
 	/*
@@ -1517,6 +1533,8 @@
 			du->pref_width = rects[du->unit].w;
 			du->pref_height = rects[du->unit].h;
 			du->pref_active = true;
+			du->gui_x = rects[du->unit].x;
+			du->gui_y = rects[du->unit].y;
 		} else {
 			du->pref_width = 800;
 			du->pref_height = 600;
@@ -1572,12 +1590,14 @@
 	uint32_t num_displays;
 	struct drm_device *dev = connector->dev;
 	struct vmw_private *dev_priv = vmw_priv(dev);
+	struct vmw_display_unit *du = vmw_connector_to_du(connector);
 
 	mutex_lock(&dev_priv->hw_mutex);
 	num_displays = vmw_read(dev_priv, SVGA_REG_NUM_DISPLAYS);
 	mutex_unlock(&dev_priv->hw_mutex);
 
-	return ((vmw_connector_to_du(connector)->unit < num_displays) ?
+	return ((vmw_connector_to_du(connector)->unit < num_displays &&
+		 du->pref_active) ?
 		connector_status_connected : connector_status_disconnected);
 }
 
@@ -1658,6 +1678,28 @@
 	{ DRM_MODE("", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) },
 };
 
+/**
+ * vmw_guess_mode_timing - Provide fake timings for a
+ * 60Hz vrefresh mode.
+ *
+ * @mode - Pointer to a struct drm_display_mode with hdisplay and vdisplay
+ * members filled in.
+ */
+static void vmw_guess_mode_timing(struct drm_display_mode *mode)
+{
+	mode->hsync_start = mode->hdisplay + 50;
+	mode->hsync_end = mode->hsync_start + 50;
+	mode->htotal = mode->hsync_end + 50;
+
+	mode->vsync_start = mode->vdisplay + 50;
+	mode->vsync_end = mode->vsync_start + 50;
+	mode->vtotal = mode->vsync_end + 50;
+
+	mode->clock = (u32)mode->htotal * (u32)mode->vtotal / 100 * 6;
+	mode->vrefresh = drm_mode_vrefresh(mode);
+}
+
+
 int vmw_du_connector_fill_modes(struct drm_connector *connector,
 				uint32_t max_width, uint32_t max_height)
 {
@@ -1680,18 +1722,23 @@
 			return 0;
 		mode->hdisplay = du->pref_width;
 		mode->vdisplay = du->pref_height;
-		mode->vrefresh = drm_mode_vrefresh(mode);
+		vmw_guess_mode_timing(mode);
+
 		if (vmw_kms_validate_mode_vram(dev_priv, mode->hdisplay * 2,
 					       mode->vdisplay)) {
 			drm_mode_probed_add(connector, mode);
-
-			if (du->pref_mode) {
-				list_del_init(&du->pref_mode->head);
-				drm_mode_destroy(dev, du->pref_mode);
-			}
-
-			du->pref_mode = mode;
+		} else {
+			drm_mode_destroy(dev, mode);
+			mode = NULL;
 		}
+
+		if (du->pref_mode) {
+			list_del_init(&du->pref_mode->head);
+			drm_mode_destroy(dev, du->pref_mode);
+		}
+
+		/* mode might be null here, this is intended */
+		du->pref_mode = mode;
 	}
 
 	for (i = 0; vmw_kms_connector_builtin[i].type != 0; i++) {
@@ -1712,6 +1759,10 @@
 		drm_mode_probed_add(connector, mode);
 	}
 
+	/* Move the prefered mode first, help apps pick the right mode. */
+	if (du->pref_mode)
+		list_move(&du->pref_mode->head, &connector->probed_modes);
+
 	drm_mode_connector_list_update(connector);
 
 	return 1;
@@ -1723,3 +1774,63 @@
 {
 	return 0;
 }
+
+
+int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
+				struct drm_file *file_priv)
+{
+	struct vmw_private *dev_priv = vmw_priv(dev);
+	struct drm_vmw_update_layout_arg *arg =
+		(struct drm_vmw_update_layout_arg *)data;
+	struct vmw_master *vmaster = vmw_master(file_priv->master);
+	void __user *user_rects;
+	struct drm_vmw_rect *rects;
+	unsigned rects_size;
+	int ret;
+	int i;
+	struct drm_mode_config *mode_config = &dev->mode_config;
+
+	ret = ttm_read_lock(&vmaster->lock, true);
+	if (unlikely(ret != 0))
+		return ret;
+
+	if (!arg->num_outputs) {
+		struct drm_vmw_rect def_rect = {0, 0, 800, 600};
+		vmw_du_update_layout(dev_priv, 1, &def_rect);
+		goto out_unlock;
+	}
+
+	rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect);
+	rects = kzalloc(rects_size, GFP_KERNEL);
+	if (unlikely(!rects)) {
+		ret = -ENOMEM;
+		goto out_unlock;
+	}
+
+	user_rects = (void __user *)(unsigned long)arg->rects;
+	ret = copy_from_user(rects, user_rects, rects_size);
+	if (unlikely(ret != 0)) {
+		DRM_ERROR("Failed to get rects.\n");
+		ret = -EFAULT;
+		goto out_free;
+	}
+
+	for (i = 0; i < arg->num_outputs; ++i) {
+		if (rects->x < 0 ||
+		    rects->y < 0 ||
+		    rects->x + rects->w > mode_config->max_width ||
+		    rects->y + rects->h > mode_config->max_height) {
+			DRM_ERROR("Invalid GUI layout.\n");
+			ret = -EINVAL;
+			goto out_free;
+		}
+	}
+
+	vmw_du_update_layout(dev_priv, arg->num_outputs, rects);
+
+out_free:
+	kfree(rects);
+out_unlock:
+	ttm_read_unlock(&vmaster->lock);
+	return ret;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
index db0b901..af8e6e5 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -96,6 +96,13 @@
 	unsigned pref_height;
 	bool pref_active;
 	struct drm_display_mode *pref_mode;
+
+	/*
+	 * Gui positioning
+	 */
+	int gui_x;
+	int gui_y;
+	bool is_implicit;
 };
 
 #define vmw_crtc_to_du(x) \
@@ -126,8 +133,7 @@
 int vmw_du_connector_set_property(struct drm_connector *connector,
 				  struct drm_property *property,
 				  uint64_t val);
-int vmw_du_update_layout(struct vmw_private *dev_priv, unsigned num,
-			 struct drm_vmw_rect *rects);
+
 
 /*
  * Legacy display unit functions - vmwgfx_ldu.c
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index 92f56bc..90c5e39 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -337,13 +337,14 @@
 	ldu->base.pref_width = 800;
 	ldu->base.pref_height = 600;
 	ldu->base.pref_mode = NULL;
+	ldu->base.is_implicit = true;
 
 	drm_connector_init(dev, connector, &vmw_legacy_connector_funcs,
-			   DRM_MODE_CONNECTOR_LVDS);
+			   DRM_MODE_CONNECTOR_VIRTUAL);
 	connector->status = vmw_du_connector_detect(connector, true);
 
 	drm_encoder_init(dev, encoder, &vmw_legacy_encoder_funcs,
-			 DRM_MODE_ENCODER_LVDS);
+			 DRM_MODE_ENCODER_VIRTUAL);
 	drm_mode_connector_attach_encoder(connector, encoder);
 	encoder->possible_crtcs = (1 << unit);
 	encoder->possible_clones = 0;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
index 477b2a9..4defdcf 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -36,12 +36,9 @@
 	container_of(x, struct vmw_screen_object_unit, base.connector)
 
 struct vmw_screen_object_display {
-	struct list_head active;
+	unsigned num_implicit;
 
-	unsigned num_active;
-	unsigned last_num_active;
-
-	struct vmw_framebuffer *fb;
+	struct vmw_framebuffer *implicit_fb;
 };
 
 /**
@@ -54,13 +51,11 @@
 	struct vmw_dma_buffer *buffer; /**< Backing store buffer */
 
 	bool defined;
-
-	struct list_head active;
+	bool active_implicit;
 };
 
 static void vmw_sou_destroy(struct vmw_screen_object_unit *sou)
 {
-	list_del_init(&sou->active);
 	vmw_display_unit_cleanup(&sou->base);
 	kfree(sou);
 }
@@ -75,58 +70,31 @@
 	vmw_sou_destroy(vmw_crtc_to_sou(crtc));
 }
 
-static int vmw_sou_del_active(struct vmw_private *vmw_priv,
+static void vmw_sou_del_active(struct vmw_private *vmw_priv,
 			      struct vmw_screen_object_unit *sou)
 {
 	struct vmw_screen_object_display *ld = vmw_priv->sou_priv;
-	if (list_empty(&sou->active))
-		return 0;
 
-	/* Must init otherwise list_empty(&sou->active) will not work. */
-	list_del_init(&sou->active);
-	if (--(ld->num_active) == 0) {
-		BUG_ON(!ld->fb);
-		if (ld->fb->unpin)
-			ld->fb->unpin(ld->fb);
-		ld->fb = NULL;
+	if (sou->active_implicit) {
+		if (--(ld->num_implicit) == 0)
+			ld->implicit_fb = NULL;
+		sou->active_implicit = false;
 	}
-
-	return 0;
 }
 
-static int vmw_sou_add_active(struct vmw_private *vmw_priv,
+static void vmw_sou_add_active(struct vmw_private *vmw_priv,
 			      struct vmw_screen_object_unit *sou,
 			      struct vmw_framebuffer *vfb)
 {
 	struct vmw_screen_object_display *ld = vmw_priv->sou_priv;
-	struct vmw_screen_object_unit *entry;
-	struct list_head *at;
 
-	BUG_ON(!ld->num_active && ld->fb);
-	if (vfb != ld->fb) {
-		if (ld->fb && ld->fb->unpin)
-			ld->fb->unpin(ld->fb);
-		if (vfb->pin)
-			vfb->pin(vfb);
-		ld->fb = vfb;
+	BUG_ON(!ld->num_implicit && ld->implicit_fb);
+
+	if (!sou->active_implicit && sou->base.is_implicit) {
+		ld->implicit_fb = vfb;
+		sou->active_implicit = true;
+		ld->num_implicit++;
 	}
-
-	if (!list_empty(&sou->active))
-		return 0;
-
-	at = &ld->active;
-	list_for_each_entry(entry, &ld->active, active) {
-		if (entry->base.unit > sou->base.unit)
-			break;
-
-		at = &entry->active;
-	}
-
-	list_add(&sou->active, at);
-
-	ld->num_active++;
-
-	return 0;
 }
 
 /**
@@ -164,8 +132,13 @@
 		(sou->base.unit == 0 ? SVGA_SCREEN_IS_PRIMARY : 0);
 	cmd->obj.size.width = mode->hdisplay;
 	cmd->obj.size.height = mode->vdisplay;
-	cmd->obj.root.x = x;
-	cmd->obj.root.y = y;
+	if (sou->base.is_implicit) {
+		cmd->obj.root.x = x;
+		cmd->obj.root.y = y;
+	} else {
+		cmd->obj.root.x = sou->base.gui_x;
+		cmd->obj.root.y = sou->base.gui_y;
+	}
 
 	/* Ok to assume that buffer is pinned in vram */
 	vmw_bo_get_guest_ptr(&sou->buffer->base, &cmd->obj.backingStore.ptr);
@@ -312,10 +285,11 @@
 	}
 
 	/* sou only supports one fb active at the time */
-	if (dev_priv->sou_priv->fb && vfb &&
-	    !(dev_priv->sou_priv->num_active == 1 &&
-	      !list_empty(&sou->active)) &&
-	    dev_priv->sou_priv->fb != vfb) {
+	if (sou->base.is_implicit &&
+	    dev_priv->sou_priv->implicit_fb && vfb &&
+	    !(dev_priv->sou_priv->num_implicit == 1 &&
+	      sou->active_implicit) &&
+	    dev_priv->sou_priv->implicit_fb != vfb) {
 		DRM_ERROR("Multiple framebuffers not supported\n");
 		return -EINVAL;
 	}
@@ -471,19 +445,20 @@
 	encoder = &sou->base.encoder;
 	connector = &sou->base.connector;
 
-	INIT_LIST_HEAD(&sou->active);
+	sou->active_implicit = false;
 
 	sou->base.pref_active = (unit == 0);
 	sou->base.pref_width = 800;
 	sou->base.pref_height = 600;
 	sou->base.pref_mode = NULL;
+	sou->base.is_implicit = true;
 
 	drm_connector_init(dev, connector, &vmw_legacy_connector_funcs,
-			   DRM_MODE_CONNECTOR_LVDS);
+			   DRM_MODE_CONNECTOR_VIRTUAL);
 	connector->status = vmw_du_connector_detect(connector, true);
 
 	drm_encoder_init(dev, encoder, &vmw_screen_object_encoder_funcs,
-			 DRM_MODE_ENCODER_LVDS);
+			 DRM_MODE_ENCODER_VIRTUAL);
 	drm_mode_connector_attach_encoder(connector, encoder);
 	encoder->possible_crtcs = (1 << unit);
 	encoder->possible_clones = 0;
@@ -520,10 +495,8 @@
 	if (unlikely(!dev_priv->sou_priv))
 		goto err_no_mem;
 
-	INIT_LIST_HEAD(&dev_priv->sou_priv->active);
-	dev_priv->sou_priv->num_active = 0;
-	dev_priv->sou_priv->last_num_active = 0;
-	dev_priv->sou_priv->fb = NULL;
+	dev_priv->sou_priv->num_implicit = 0;
+	dev_priv->sou_priv->implicit_fb = NULL;
 
 	ret = drm_vblank_init(dev, VMWGFX_NUM_DISPLAY_UNITS);
 	if (unlikely(ret != 0))
@@ -558,9 +531,6 @@
 
 	drm_vblank_cleanup(dev);
 
-	if (!list_empty(&dev_priv->sou_priv->active))
-		DRM_ERROR("Still have active outputs when unloading driver");
-
 	kfree(dev_priv->sou_priv);
 
 	return 0;
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index 18767f8..5d2f8e1 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -82,7 +82,8 @@
 static unsigned int lapic_timer_reliable_states = (1 << 1);	 /* Default to only C1 */
 
 static struct cpuidle_device __percpu *intel_idle_cpuidle_devices;
-static int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state);
+static int intel_idle(struct cpuidle_device *dev,
+			struct cpuidle_driver *drv, int index);
 
 static struct cpuidle_state *cpuidle_state_table;
 
@@ -110,7 +111,6 @@
 	{ /* MWAIT C1 */
 		.name = "C1-NHM",
 		.desc = "MWAIT 0x00",
-		.driver_data = (void *) 0x00,
 		.flags = CPUIDLE_FLAG_TIME_VALID,
 		.exit_latency = 3,
 		.target_residency = 6,
@@ -118,7 +118,6 @@
 	{ /* MWAIT C2 */
 		.name = "C3-NHM",
 		.desc = "MWAIT 0x10",
-		.driver_data = (void *) 0x10,
 		.flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
 		.exit_latency = 20,
 		.target_residency = 80,
@@ -126,7 +125,6 @@
 	{ /* MWAIT C3 */
 		.name = "C6-NHM",
 		.desc = "MWAIT 0x20",
-		.driver_data = (void *) 0x20,
 		.flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
 		.exit_latency = 200,
 		.target_residency = 800,
@@ -138,7 +136,6 @@
 	{ /* MWAIT C1 */
 		.name = "C1-SNB",
 		.desc = "MWAIT 0x00",
-		.driver_data = (void *) 0x00,
 		.flags = CPUIDLE_FLAG_TIME_VALID,
 		.exit_latency = 1,
 		.target_residency = 1,
@@ -146,7 +143,6 @@
 	{ /* MWAIT C2 */
 		.name = "C3-SNB",
 		.desc = "MWAIT 0x10",
-		.driver_data = (void *) 0x10,
 		.flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
 		.exit_latency = 80,
 		.target_residency = 211,
@@ -154,7 +150,6 @@
 	{ /* MWAIT C3 */
 		.name = "C6-SNB",
 		.desc = "MWAIT 0x20",
-		.driver_data = (void *) 0x20,
 		.flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
 		.exit_latency = 104,
 		.target_residency = 345,
@@ -162,7 +157,6 @@
 	{ /* MWAIT C4 */
 		.name = "C7-SNB",
 		.desc = "MWAIT 0x30",
-		.driver_data = (void *) 0x30,
 		.flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
 		.exit_latency = 109,
 		.target_residency = 345,
@@ -174,7 +168,6 @@
 	{ /* MWAIT C1 */
 		.name = "C1-ATM",
 		.desc = "MWAIT 0x00",
-		.driver_data = (void *) 0x00,
 		.flags = CPUIDLE_FLAG_TIME_VALID,
 		.exit_latency = 1,
 		.target_residency = 4,
@@ -182,7 +175,6 @@
 	{ /* MWAIT C2 */
 		.name = "C2-ATM",
 		.desc = "MWAIT 0x10",
-		.driver_data = (void *) 0x10,
 		.flags = CPUIDLE_FLAG_TIME_VALID,
 		.exit_latency = 20,
 		.target_residency = 80,
@@ -191,7 +183,6 @@
 	{ /* MWAIT C4 */
 		.name = "C4-ATM",
 		.desc = "MWAIT 0x30",
-		.driver_data = (void *) 0x30,
 		.flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
 		.exit_latency = 100,
 		.target_residency = 400,
@@ -200,23 +191,55 @@
 	{ /* MWAIT C6 */
 		.name = "C6-ATM",
 		.desc = "MWAIT 0x52",
-		.driver_data = (void *) 0x52,
 		.flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
 		.exit_latency = 140,
 		.target_residency = 560,
 		.enter = &intel_idle },
 };
 
+static int get_driver_data(int cstate)
+{
+	int driver_data;
+	switch (cstate) {
+
+	case 1:	/* MWAIT C1 */
+		driver_data = 0x00;
+		break;
+	case 2:	/* MWAIT C2 */
+		driver_data = 0x10;
+		break;
+	case 3:	/* MWAIT C3 */
+		driver_data = 0x20;
+		break;
+	case 4:	/* MWAIT C4 */
+		driver_data = 0x30;
+		break;
+	case 5:	/* MWAIT C5 */
+		driver_data = 0x40;
+		break;
+	case 6:	/* MWAIT C6 */
+		driver_data = 0x52;
+		break;
+	default:
+		driver_data = 0x00;
+	}
+	return driver_data;
+}
+
 /**
  * intel_idle
  * @dev: cpuidle_device
- * @state: cpuidle state
+ * @drv: cpuidle driver
+ * @index: index of cpuidle state
  *
  */
-static int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state)
+static int intel_idle(struct cpuidle_device *dev,
+		struct cpuidle_driver *drv, int index)
 {
 	unsigned long ecx = 1; /* break on interrupt flag */
-	unsigned long eax = (unsigned long)cpuidle_get_statedata(state);
+	struct cpuidle_state *state = &drv->states[index];
+	struct cpuidle_state_usage *state_usage = &dev->states_usage[index];
+	unsigned long eax = (unsigned long)cpuidle_get_statedata(state_usage);
 	unsigned int cstate;
 	ktime_t kt_before, kt_after;
 	s64 usec_delta;
@@ -257,7 +280,10 @@
 	if (!(lapic_timer_reliable_states & (1 << (cstate))))
 		clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu);
 
-	return usec_delta;
+	/* Update cpuidle counters */
+	dev->last_residency = (int)usec_delta;
+
+	return index;
 }
 
 static void __setup_broadcast_timer(void *arg)
@@ -398,6 +424,60 @@
 	return;
 }
 /*
+ * intel_idle_cpuidle_driver_init()
+ * allocate, initialize cpuidle_states
+ */
+static int intel_idle_cpuidle_driver_init(void)
+{
+	int cstate;
+	struct cpuidle_driver *drv = &intel_idle_driver;
+
+	drv->state_count = 1;
+
+	for (cstate = 1; cstate < MWAIT_MAX_NUM_CSTATES; ++cstate) {
+		int num_substates;
+
+		if (cstate > max_cstate) {
+			printk(PREFIX "max_cstate %d reached\n",
+				max_cstate);
+			break;
+		}
+
+		/* does the state exist in CPUID.MWAIT? */
+		num_substates = (mwait_substates >> ((cstate) * 4))
+					& MWAIT_SUBSTATE_MASK;
+		if (num_substates == 0)
+			continue;
+		/* is the state not enabled? */
+		if (cpuidle_state_table[cstate].enter == NULL) {
+			/* does the driver not know about the state? */
+			if (*cpuidle_state_table[cstate].name == '\0')
+				pr_debug(PREFIX "unaware of model 0x%x"
+					" MWAIT %d please"
+					" contact lenb@kernel.org",
+				boot_cpu_data.x86_model, cstate);
+			continue;
+		}
+
+		if ((cstate > 2) &&
+			!boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
+			mark_tsc_unstable("TSC halts in idle"
+					" states deeper than C2");
+
+		drv->states[drv->state_count] =	/* structure copy */
+			cpuidle_state_table[cstate];
+
+		drv->state_count += 1;
+	}
+
+	if (auto_demotion_disable_flags)
+		smp_call_function(auto_demotion_disable, NULL, 1);
+
+	return 0;
+}
+
+
+/*
  * intel_idle_cpuidle_devices_init()
  * allocate, initialize, register cpuidle_devices
  */
@@ -431,22 +511,11 @@
 				continue;
 			/* is the state not enabled? */
 			if (cpuidle_state_table[cstate].enter == NULL) {
-				/* does the driver not know about the state? */
-				if (*cpuidle_state_table[cstate].name == '\0')
-					pr_debug(PREFIX "unaware of model 0x%x"
-						" MWAIT %d please"
-						" contact lenb@kernel.org",
-					boot_cpu_data.x86_model, cstate);
 				continue;
 			}
 
-			if ((cstate > 2) &&
-				!boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
-				mark_tsc_unstable("TSC halts in idle"
-					" states deeper than C2");
-
-			dev->states[dev->state_count] =	/* structure copy */
-				cpuidle_state_table[cstate];
+			dev->states_usage[dev->state_count].driver_data =
+				(void *)get_driver_data(cstate);
 
 			dev->state_count += 1;
 		}
@@ -459,8 +528,6 @@
 			return -EIO;
 		}
 	}
-	if (auto_demotion_disable_flags)
-		smp_call_function(auto_demotion_disable, NULL, 1);
 
 	return 0;
 }
@@ -478,6 +545,7 @@
 	if (retval)
 		return retval;
 
+	intel_idle_cpuidle_driver_init();
 	retval = cpuidle_register_driver(&intel_idle_driver);
 	if (retval) {
 		printk(KERN_DEBUG PREFIX "intel_idle yielding to %s",
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index cb24666..0a6806f 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -14,6 +14,7 @@
 #include <linux/vmalloc.h>
 #include <linux/version.h>
 #include <linux/shrinker.h>
+#include <linux/module.h>
 
 #define DM_MSG_PREFIX "bufio"
 
diff --git a/drivers/md/persistent-data/dm-btree-remove.c b/drivers/md/persistent-data/dm-btree-remove.c
index 65fd85e..023fbc2 100644
--- a/drivers/md/persistent-data/dm-btree-remove.c
+++ b/drivers/md/persistent-data/dm-btree-remove.c
@@ -8,7 +8,7 @@
 #include "dm-btree-internal.h"
 #include "dm-transaction-manager.h"
 
-#include <linux/module.h>
+#include <linux/export.h>
 
 /*
  * Removing an entry from a btree
diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c
index e0638be..bd1e7ff 100644
--- a/drivers/md/persistent-data/dm-btree.c
+++ b/drivers/md/persistent-data/dm-btree.c
@@ -8,7 +8,7 @@
 #include "dm-space-map.h"
 #include "dm-transaction-manager.h"
 
-#include <linux/module.h>
+#include <linux/export.h>
 #include <linux/device-mapper.h>
 
 #define DM_MSG_PREFIX "btree"
diff --git a/drivers/md/persistent-data/dm-space-map-checker.c b/drivers/md/persistent-data/dm-space-map-checker.c
index bb44a93..50ed53b 100644
--- a/drivers/md/persistent-data/dm-space-map-checker.c
+++ b/drivers/md/persistent-data/dm-space-map-checker.c
@@ -7,6 +7,7 @@
 #include "dm-space-map-checker.h"
 
 #include <linux/device-mapper.h>
+#include <linux/export.h>
 
 #ifdef CONFIG_DM_DEBUG_SPACE_MAPS
 
diff --git a/drivers/md/persistent-data/dm-space-map-disk.c b/drivers/md/persistent-data/dm-space-map-disk.c
index aeff785..fc469ba 100644
--- a/drivers/md/persistent-data/dm-space-map-disk.c
+++ b/drivers/md/persistent-data/dm-space-map-disk.c
@@ -12,7 +12,7 @@
 
 #include <linux/list.h>
 #include <linux/slab.h>
-#include <linux/module.h>
+#include <linux/export.h>
 #include <linux/device-mapper.h>
 
 #define DM_MSG_PREFIX "space map disk"
diff --git a/drivers/md/persistent-data/dm-transaction-manager.c b/drivers/md/persistent-data/dm-transaction-manager.c
index 728e89a..6f8d387 100644
--- a/drivers/md/persistent-data/dm-transaction-manager.c
+++ b/drivers/md/persistent-data/dm-transaction-manager.c
@@ -10,7 +10,7 @@
 #include "dm-space-map-metadata.h"
 #include "dm-persistent-data-internal.h"
 
-#include <linux/module.h>
+#include <linux/export.h>
 #include <linux/slab.h>
 #include <linux/device-mapper.h>
 
diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig
index 66b616e..318a869 100644
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -12,27 +12,17 @@
 
 if MTD
 
-config MTD_DEBUG
-	bool "Debugging"
-	help
-	  This turns on low-level debugging for the entire MTD sub-system.
-	  Normally, you should say 'N'.
-
-config MTD_DEBUG_VERBOSE
-	int "Debugging verbosity (0 = quiet, 3 = noisy)"
-	depends on MTD_DEBUG
-	default "0"
-	help
-	  Determines the verbosity level of the MTD debugging messages.
-
 config MTD_TESTS
-	tristate "MTD tests support"
+	tristate "MTD tests support (DANGEROUS)"
 	depends on m
 	help
 	  This option includes various MTD tests into compilation. The tests
 	  should normally be compiled as kernel modules. The modules perform
 	  various checks and verifications when loaded.
 
+	  WARNING: some of the tests will ERASE entire MTD device which they
+	  test. Do not use these tests unless you really know what you do.
+
 config MTD_REDBOOT_PARTS
 	tristate "RedBoot partition table parsing"
 	---help---
@@ -137,7 +127,8 @@
 	  'physmap' map driver (CONFIG_MTD_PHYSMAP) does this, for example.
 
 config MTD_OF_PARTS
-	def_bool y
+	tristate "OpenFirmware partitioning information support"
+	default Y
 	depends on OF
 	help
 	  This provides a partition parsing function which derives
diff --git a/drivers/mtd/Makefile b/drivers/mtd/Makefile
index 39664c4..9aaac3a 100644
--- a/drivers/mtd/Makefile
+++ b/drivers/mtd/Makefile
@@ -5,8 +5,8 @@
 # Core functionality.
 obj-$(CONFIG_MTD)		+= mtd.o
 mtd-y				:= mtdcore.o mtdsuper.o mtdconcat.o mtdpart.o
-mtd-$(CONFIG_MTD_OF_PARTS)	+= ofpart.o
 
+obj-$(CONFIG_MTD_OF_PARTS)	+= ofpart.o
 obj-$(CONFIG_MTD_REDBOOT_PARTS) += redboot.o
 obj-$(CONFIG_MTD_CMDLINE_PARTS) += cmdlinepart.o
 obj-$(CONFIG_MTD_AFS_PARTS)	+= afs.o
diff --git a/drivers/mtd/afs.c b/drivers/mtd/afs.c
index 302372c..89a02f6 100644
--- a/drivers/mtd/afs.c
+++ b/drivers/mtd/afs.c
@@ -162,8 +162,8 @@
 }
 
 static int parse_afs_partitions(struct mtd_info *mtd,
-                         struct mtd_partition **pparts,
-                         unsigned long origin)
+				struct mtd_partition **pparts,
+				struct mtd_part_parser_data *data)
 {
 	struct mtd_partition *parts;
 	u_int mask, off, idx, sz;
diff --git a/drivers/mtd/ar7part.c b/drivers/mtd/ar7part.c
index 95949b9..f40ea45 100644
--- a/drivers/mtd/ar7part.c
+++ b/drivers/mtd/ar7part.c
@@ -47,7 +47,7 @@
 
 static int create_mtd_partitions(struct mtd_info *master,
 				 struct mtd_partition **pparts,
-				 unsigned long origin)
+				 struct mtd_part_parser_data *data)
 {
 	struct ar7_bin_rec header;
 	unsigned int offset;
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
index 23175ed..8d70895 100644
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -145,8 +145,7 @@
 	if (((major << 8) | minor) < 0x3131) {
 		/* CFI version 1.0 => don't trust bootloc */
 
-		DEBUG(MTD_DEBUG_LEVEL1,
-			"%s: JEDEC Vendor ID is 0x%02X Device ID is 0x%02X\n",
+		pr_debug("%s: JEDEC Vendor ID is 0x%02X Device ID is 0x%02X\n",
 			map->name, cfi->mfr, cfi->id);
 
 		/* AFAICS all 29LV400 with a bottom boot block have a device ID
@@ -166,8 +165,7 @@
 			 * the 8-bit device ID.
 			 */
 			(cfi->mfr == CFI_MFR_MACRONIX)) {
-			DEBUG(MTD_DEBUG_LEVEL1,
-				"%s: Macronix MX29LV400C with bottom boot block"
+			pr_debug("%s: Macronix MX29LV400C with bottom boot block"
 				" detected\n", map->name);
 			extp->TopBottom = 2;	/* bottom boot */
 		} else
@@ -178,8 +176,7 @@
 			extp->TopBottom = 2;	/* bottom boot */
 		}
 
-		DEBUG(MTD_DEBUG_LEVEL1,
-			"%s: AMD CFI PRI V%c.%c has no boot block field;"
+		pr_debug("%s: AMD CFI PRI V%c.%c has no boot block field;"
 			" deduced %s from Device ID\n", map->name, major, minor,
 			extp->TopBottom == 2 ? "bottom" : "top");
 	}
@@ -191,7 +188,7 @@
 	struct map_info *map = mtd->priv;
 	struct cfi_private *cfi = map->fldrv_priv;
 	if (cfi->cfiq->BufWriteTimeoutTyp) {
-		DEBUG(MTD_DEBUG_LEVEL1, "Using buffer write method\n" );
+		pr_debug("Using buffer write method\n" );
 		mtd->write = cfi_amdstd_write_buffers;
 	}
 }
@@ -443,8 +440,8 @@
 	mtd->writesize = 1;
 	mtd->writebufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
 
-	DEBUG(MTD_DEBUG_LEVEL3, "MTD %s(): write buffer size %d\n",
-		__func__, mtd->writebufsize);
+	pr_debug("MTD %s(): write buffer size %d\n", __func__,
+			mtd->writebufsize);
 
 	mtd->reboot_notifier.notifier_call = cfi_amdstd_reboot;
 
@@ -1163,7 +1160,7 @@
 		return ret;
 	}
 
-	DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
+	pr_debug("MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
 	       __func__, adr, datum.x[0] );
 
 	/*
@@ -1174,7 +1171,7 @@
 	 */
 	oldd = map_read(map, adr);
 	if (map_word_equal(map, oldd, datum)) {
-		DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): NOP\n",
+		pr_debug("MTD %s(): NOP\n",
 		       __func__);
 		goto op_done;
 	}
@@ -1400,7 +1397,7 @@
 
 	datum = map_word_load(map, buf);
 
-	DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
+	pr_debug("MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
 	       __func__, adr, datum.x[0] );
 
 	XIP_INVAL_CACHED_RANGE(map, adr, len);
@@ -1587,7 +1584,7 @@
 		return ret;
 	}
 
-	DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): ERASE 0x%.8lx\n",
+	pr_debug("MTD %s(): ERASE 0x%.8lx\n",
 	       __func__, chip->start );
 
 	XIP_INVAL_CACHED_RANGE(map, adr, map->size);
@@ -1675,7 +1672,7 @@
 		return ret;
 	}
 
-	DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): ERASE 0x%.8lx\n",
+	pr_debug("MTD %s(): ERASE 0x%.8lx\n",
 	       __func__, adr );
 
 	XIP_INVAL_CACHED_RANGE(map, adr, len);
@@ -1801,8 +1798,7 @@
 		goto out_unlock;
 	chip->state = FL_LOCKING;
 
-	DEBUG(MTD_DEBUG_LEVEL3, "MTD %s(): LOCK 0x%08lx len %d\n",
-	      __func__, adr, len);
+	pr_debug("MTD %s(): LOCK 0x%08lx len %d\n", __func__, adr, len);
 
 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
 			 cfi->device_type, NULL);
@@ -1837,8 +1833,7 @@
 		goto out_unlock;
 	chip->state = FL_UNLOCKING;
 
-	DEBUG(MTD_DEBUG_LEVEL3, "MTD %s(): LOCK 0x%08lx len %d\n",
-	      __func__, adr, len);
+	pr_debug("MTD %s(): LOCK 0x%08lx len %d\n", __func__, adr, len);
 
 	cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
 			 cfi->device_type, NULL);
diff --git a/drivers/mtd/chips/fwh_lock.h b/drivers/mtd/chips/fwh_lock.h
index 5e3cc80..89c6595 100644
--- a/drivers/mtd/chips/fwh_lock.h
+++ b/drivers/mtd/chips/fwh_lock.h
@@ -34,8 +34,7 @@
 
 	/* Refuse the operation if the we cannot look behind the chip */
 	if (chip->start < 0x400000) {
-		DEBUG( MTD_DEBUG_LEVEL3,
-			"MTD %s(): chip->start: %lx wanted >= 0x400000\n",
+		pr_debug( "MTD %s(): chip->start: %lx wanted >= 0x400000\n",
 			__func__, chip->start );
 		return -EIO;
 	}
diff --git a/drivers/mtd/chips/jedec_probe.c b/drivers/mtd/chips/jedec_probe.c
index ea832ea..c443f52 100644
--- a/drivers/mtd/chips/jedec_probe.c
+++ b/drivers/mtd/chips/jedec_probe.c
@@ -1914,11 +1914,10 @@
 	 * (oh and incidentaly the jedec spec - 3.5.3.3) the reset
 	 * sequence is *supposed* to be 0xaa at 0x5555, 0x55 at
 	 * 0x2aaa, 0xF0 at 0x5555 this will not affect the AMD chips
-	 * as they will ignore the writes and dont care what address
+	 * as they will ignore the writes and don't care what address
 	 * the F0 is written to */
 	if (cfi->addr_unlock1) {
-		DEBUG( MTD_DEBUG_LEVEL3,
-		       "reset unlock called %x %x \n",
+		pr_debug( "reset unlock called %x %x \n",
 		       cfi->addr_unlock1,cfi->addr_unlock2);
 		cfi_send_gen_cmd(0xaa, cfi->addr_unlock1, base, map, cfi, cfi->device_type, NULL);
 		cfi_send_gen_cmd(0x55, cfi->addr_unlock2, base, map, cfi, cfi->device_type, NULL);
@@ -1941,7 +1940,7 @@
 	uint8_t uaddr;
 
 	if (!(jedec_table[index].devtypes & cfi->device_type)) {
-		DEBUG(MTD_DEBUG_LEVEL1, "Rejecting potential %s with incompatible %d-bit device type\n",
+		pr_debug("Rejecting potential %s with incompatible %d-bit device type\n",
 		      jedec_table[index].name, 4 * (1<<cfi->device_type));
 		return 0;
 	}
@@ -2021,7 +2020,7 @@
 		 * there aren't.
 		 */
 		if (finfo->dev_id > 0xff) {
-			DEBUG( MTD_DEBUG_LEVEL3, "%s(): ID is not 8bit\n",
+			pr_debug("%s(): ID is not 8bit\n",
 			       __func__);
 			goto match_done;
 		}
@@ -2045,12 +2044,10 @@
 	}
 
 	/* the part size must fit in the memory window */
-	DEBUG( MTD_DEBUG_LEVEL3,
-	       "MTD %s(): Check fit 0x%.8x + 0x%.8x = 0x%.8x\n",
+	pr_debug("MTD %s(): Check fit 0x%.8x + 0x%.8x = 0x%.8x\n",
 	       __func__, base, 1 << finfo->dev_size, base + (1 << finfo->dev_size) );
 	if ( base + cfi_interleave(cfi) * ( 1 << finfo->dev_size ) > map->size ) {
-		DEBUG( MTD_DEBUG_LEVEL3,
-		       "MTD %s(): 0x%.4x 0x%.4x %dKiB doesn't fit\n",
+		pr_debug("MTD %s(): 0x%.4x 0x%.4x %dKiB doesn't fit\n",
 		       __func__, finfo->mfr_id, finfo->dev_id,
 		       1 << finfo->dev_size );
 		goto match_done;
@@ -2061,13 +2058,12 @@
 
 	uaddr = finfo->uaddr;
 
-	DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): check unlock addrs 0x%.4x 0x%.4x\n",
+	pr_debug("MTD %s(): check unlock addrs 0x%.4x 0x%.4x\n",
 	       __func__, cfi->addr_unlock1, cfi->addr_unlock2 );
 	if ( MTD_UADDR_UNNECESSARY != uaddr && MTD_UADDR_DONT_CARE != uaddr
 	     && ( unlock_addrs[uaddr].addr1 / cfi->device_type != cfi->addr_unlock1 ||
 		  unlock_addrs[uaddr].addr2 / cfi->device_type != cfi->addr_unlock2 ) ) {
-		DEBUG( MTD_DEBUG_LEVEL3,
-			"MTD %s(): 0x%.4x 0x%.4x did not match\n",
+		pr_debug("MTD %s(): 0x%.4x 0x%.4x did not match\n",
 			__func__,
 			unlock_addrs[uaddr].addr1,
 			unlock_addrs[uaddr].addr2);
@@ -2083,15 +2079,13 @@
 	 * FIXME - write a driver that takes all of the chip info as
 	 * module parameters, doesn't probe but forces a load.
 	 */
-	DEBUG( MTD_DEBUG_LEVEL3,
-	       "MTD %s(): check ID's disappear when not in ID mode\n",
+	pr_debug("MTD %s(): check ID's disappear when not in ID mode\n",
 	       __func__ );
 	jedec_reset( base, map, cfi );
 	mfr = jedec_read_mfr( map, base, cfi );
 	id = jedec_read_id( map, base, cfi );
 	if ( mfr == cfi->mfr && id == cfi->id ) {
-		DEBUG( MTD_DEBUG_LEVEL3,
-		       "MTD %s(): ID 0x%.2x:0x%.2x did not change after reset:\n"
+		pr_debug("MTD %s(): ID 0x%.2x:0x%.2x did not change after reset:\n"
 		       "You might need to manually specify JEDEC parameters.\n",
 			__func__, cfi->mfr, cfi->id );
 		goto match_done;
@@ -2104,7 +2098,7 @@
 	 * Put the device back in ID mode - only need to do this if we
 	 * were truly frobbing a real device.
 	 */
-	DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): return to ID mode\n", __func__ );
+	pr_debug("MTD %s(): return to ID mode\n", __func__ );
 	if (cfi->addr_unlock1) {
 		cfi_send_gen_cmd(0xaa, cfi->addr_unlock1, base, map, cfi, cfi->device_type, NULL);
 		cfi_send_gen_cmd(0x55, cfi->addr_unlock2, base, map, cfi, cfi->device_type, NULL);
@@ -2167,13 +2161,11 @@
 
 		cfi->mfr = jedec_read_mfr(map, base, cfi);
 		cfi->id = jedec_read_id(map, base, cfi);
-		DEBUG(MTD_DEBUG_LEVEL3,
-		      "Search for id:(%02x %02x) interleave(%d) type(%d)\n",
+		pr_debug("Search for id:(%02x %02x) interleave(%d) type(%d)\n",
 			cfi->mfr, cfi->id, cfi_interleave(cfi), cfi->device_type);
 		for (i = 0; i < ARRAY_SIZE(jedec_table); i++) {
 			if ( jedec_match( base, map, cfi, &jedec_table[i] ) ) {
-				DEBUG( MTD_DEBUG_LEVEL3,
-				       "MTD %s(): matched device 0x%x,0x%x unlock_addrs: 0x%.4x 0x%.4x\n",
+				pr_debug("MTD %s(): matched device 0x%x,0x%x unlock_addrs: 0x%.4x 0x%.4x\n",
 				       __func__, cfi->mfr, cfi->id,
 				       cfi->addr_unlock1, cfi->addr_unlock2 );
 				if (!cfi_jedec_setup(map, cfi, i))
diff --git a/drivers/mtd/cmdlinepart.c b/drivers/mtd/cmdlinepart.c
index 8cf667d..ddf9ec6 100644
--- a/drivers/mtd/cmdlinepart.c
+++ b/drivers/mtd/cmdlinepart.c
@@ -189,10 +189,7 @@
 			     extra_mem_size;
 		parts = kzalloc(alloc_size, GFP_KERNEL);
 		if (!parts)
-		{
-			printk(KERN_ERR ERRP "out of memory\n");
 			return NULL;
-		}
 		extra_mem = (unsigned char *)(parts + *num_parts);
 	}
 	/* enter this partition (offset will be calculated later if it is zero at this point) */
@@ -317,8 +314,8 @@
  * the first one in the chain if a NULL mtd_id is passed in.
  */
 static int parse_cmdline_partitions(struct mtd_info *master,
-                             struct mtd_partition **pparts,
-                             unsigned long origin)
+				    struct mtd_partition **pparts,
+				    struct mtd_part_parser_data *data)
 {
 	unsigned long offset;
 	int i;
diff --git a/drivers/mtd/devices/Kconfig b/drivers/mtd/devices/Kconfig
index 35081ce..283d887 100644
--- a/drivers/mtd/devices/Kconfig
+++ b/drivers/mtd/devices/Kconfig
@@ -249,6 +249,16 @@
 	  under "NAND Flash Device Drivers" (currently that driver does not
 	  support all Millennium Plus devices).
 
+config MTD_DOCG3
+	tristate "M-Systems Disk-On-Chip G3"
+	---help---
+	  This provides an MTD device driver for the M-Systems DiskOnChip
+	  G3 devices.
+
+	  The driver provides access to G3 DiskOnChip, distributed by
+	  M-Systems and now Sandisk. The support is very experimental,
+	  and doesn't give access to any write operations.
+
 config MTD_DOCPROBE
 	tristate
 	select MTD_DOCECC
@@ -268,8 +278,7 @@
 config MTD_DOCPROBE_ADDRESS
 	hex "Physical address of DiskOnChip" if MTD_DOCPROBE_ADVANCED
 	depends on MTD_DOCPROBE
-	default "0x0000" if MTD_DOCPROBE_ADVANCED
-	default "0" if !MTD_DOCPROBE_ADVANCED
+	default "0x0"
 	---help---
 	  By default, the probe for DiskOnChip devices will look for a
 	  DiskOnChip at every multiple of 0x2000 between 0xC8000 and 0xEE000.
diff --git a/drivers/mtd/devices/Makefile b/drivers/mtd/devices/Makefile
index f3226b1..56c7cd4 100644
--- a/drivers/mtd/devices/Makefile
+++ b/drivers/mtd/devices/Makefile
@@ -5,6 +5,7 @@
 obj-$(CONFIG_MTD_DOC2000)	+= doc2000.o
 obj-$(CONFIG_MTD_DOC2001)	+= doc2001.o
 obj-$(CONFIG_MTD_DOC2001PLUS)	+= doc2001plus.o
+obj-$(CONFIG_MTD_DOCG3)		+= docg3.o
 obj-$(CONFIG_MTD_DOCPROBE)	+= docprobe.o
 obj-$(CONFIG_MTD_DOCECC)	+= docecc.o
 obj-$(CONFIG_MTD_SLRAM)		+= slram.o
@@ -17,3 +18,5 @@
 obj-$(CONFIG_MTD_DATAFLASH)	+= mtd_dataflash.o
 obj-$(CONFIG_MTD_M25P80)	+= m25p80.o
 obj-$(CONFIG_MTD_SST25L)	+= sst25l.o
+
+CFLAGS_docg3.o			+= -I$(src)
\ No newline at end of file
diff --git a/drivers/mtd/devices/doc2000.c b/drivers/mtd/devices/doc2000.c
index f7fbf60..e9fad91 100644
--- a/drivers/mtd/devices/doc2000.c
+++ b/drivers/mtd/devices/doc2000.c
@@ -82,8 +82,7 @@
 	void __iomem *docptr = doc->virtadr;
 	unsigned long timeo = jiffies + (HZ * 10);
 
-	DEBUG(MTD_DEBUG_LEVEL3,
-	      "_DoC_WaitReady called for out-of-line wait\n");
+	pr_debug("_DoC_WaitReady called for out-of-line wait\n");
 
 	/* Out-of-line routine to wait for chip response */
 	while (!(ReadDOC(docptr, CDSNControl) & CDSN_CTRL_FR_B)) {
@@ -92,7 +91,7 @@
 		DoC_Delay(doc, 2);
 
 		if (time_after(jiffies, timeo)) {
-			DEBUG(MTD_DEBUG_LEVEL2, "_DoC_WaitReady timed out.\n");
+			pr_debug("_DoC_WaitReady timed out.\n");
 			return -EIO;
 		}
 		udelay(1);
@@ -323,8 +322,7 @@
 
 	/* Reset the chip */
 	if (DoC_Command(doc, NAND_CMD_RESET, CDSN_CTRL_WP)) {
-		DEBUG(MTD_DEBUG_LEVEL2,
-		      "DoC_Command (reset) for %d,%d returned true\n",
+		pr_debug("DoC_Command (reset) for %d,%d returned true\n",
 		      floor, chip);
 		return 0;
 	}
@@ -332,8 +330,7 @@
 
 	/* Read the NAND chip ID: 1. Send ReadID command */
 	if (DoC_Command(doc, NAND_CMD_READID, CDSN_CTRL_WP)) {
-		DEBUG(MTD_DEBUG_LEVEL2,
-		      "DoC_Command (ReadID) for %d,%d returned true\n",
+		pr_debug("DoC_Command (ReadID) for %d,%d returned true\n",
 		      floor, chip);
 		return 0;
 	}
@@ -699,7 +696,7 @@
 #ifdef ECC_DEBUG
 			printk(KERN_ERR "DiskOnChip ECC Error: Read at %lx\n", (long)from);
 #endif
-			/* Read the ECC syndrom through the DiskOnChip ECC
+			/* Read the ECC syndrome through the DiskOnChip ECC
 			   logic.  These syndrome will be all ZERO when there
 			   is no error */
 			for (i = 0; i < 6; i++) {
@@ -930,7 +927,7 @@
 	uint8_t *buf = ops->oobbuf;
 	size_t len = ops->len;
 
-	BUG_ON(ops->mode != MTD_OOB_PLACE);
+	BUG_ON(ops->mode != MTD_OPS_PLACE_OOB);
 
 	ofs += ops->ooboffs;
 
@@ -1094,7 +1091,7 @@
 	struct DiskOnChip *this = mtd->priv;
 	int ret;
 
-	BUG_ON(ops->mode != MTD_OOB_PLACE);
+	BUG_ON(ops->mode != MTD_OPS_PLACE_OOB);
 
 	mutex_lock(&this->lock);
 	ret = doc_write_oob_nolock(mtd, ofs + ops->ooboffs, ops->len,
diff --git a/drivers/mtd/devices/doc2001.c b/drivers/mtd/devices/doc2001.c
index 241192f..a3f7a27 100644
--- a/drivers/mtd/devices/doc2001.c
+++ b/drivers/mtd/devices/doc2001.c
@@ -55,15 +55,14 @@
 {
 	unsigned short c = 0xffff;
 
-	DEBUG(MTD_DEBUG_LEVEL3,
-	      "_DoC_WaitReady called for out-of-line wait\n");
+	pr_debug("_DoC_WaitReady called for out-of-line wait\n");
 
 	/* Out-of-line routine to wait for chip response */
 	while (!(ReadDOC(docptr, CDSNControl) & CDSN_CTRL_FR_B) && --c)
 		;
 
 	if (c == 0)
-		DEBUG(MTD_DEBUG_LEVEL2, "_DoC_WaitReady timed out.\n");
+		pr_debug("_DoC_WaitReady timed out.\n");
 
 	return (c == 0);
 }
@@ -464,7 +463,7 @@
 #ifdef ECC_DEBUG
 		printk("DiskOnChip ECC Error: Read at %lx\n", (long)from);
 #endif
-		/* Read the ECC syndrom through the DiskOnChip ECC logic.
+		/* Read the ECC syndrome through the DiskOnChip ECC logic.
 		   These syndrome will be all ZERO when there is no error */
 		for (i = 0; i < 6; i++) {
 			syndrome[i] = ReadDOC(docptr, ECCSyndrome0 + i);
@@ -632,7 +631,7 @@
 	uint8_t *buf = ops->oobbuf;
 	size_t len = ops->len;
 
-	BUG_ON(ops->mode != MTD_OOB_PLACE);
+	BUG_ON(ops->mode != MTD_OPS_PLACE_OOB);
 
 	ofs += ops->ooboffs;
 
@@ -690,7 +689,7 @@
 	uint8_t *buf = ops->oobbuf;
 	size_t len = ops->len;
 
-	BUG_ON(ops->mode != MTD_OOB_PLACE);
+	BUG_ON(ops->mode != MTD_OPS_PLACE_OOB);
 
 	ofs += ops->ooboffs;
 
diff --git a/drivers/mtd/devices/doc2001plus.c b/drivers/mtd/devices/doc2001plus.c
index 09ae0ad..99351bc 100644
--- a/drivers/mtd/devices/doc2001plus.c
+++ b/drivers/mtd/devices/doc2001plus.c
@@ -61,15 +61,14 @@
 {
 	unsigned int c = 0xffff;
 
-	DEBUG(MTD_DEBUG_LEVEL3,
-	      "_DoC_WaitReady called for out-of-line wait\n");
+	pr_debug("_DoC_WaitReady called for out-of-line wait\n");
 
 	/* Out-of-line routine to wait for chip response */
 	while (((ReadDOC(docptr, Mplus_FlashControl) & CDSN_CTRL_FR_B_MASK) != CDSN_CTRL_FR_B_MASK) && --c)
 		;
 
 	if (c == 0)
-		DEBUG(MTD_DEBUG_LEVEL2, "_DoC_WaitReady timed out.\n");
+		pr_debug("_DoC_WaitReady timed out.\n");
 
 	return (c == 0);
 }
@@ -655,7 +654,7 @@
 #ifdef ECC_DEBUG
 		printk("DiskOnChip ECC Error: Read at %lx\n", (long)from);
 #endif
-		/* Read the ECC syndrom through the DiskOnChip ECC logic.
+		/* Read the ECC syndrome through the DiskOnChip ECC logic.
 		   These syndrome will be all ZERO when there is no error */
 		for (i = 0; i < 6; i++)
 			syndrome[i] = ReadDOC(docptr, Mplus_ECCSyndrome0 + i);
@@ -835,7 +834,7 @@
 	uint8_t *buf = ops->oobbuf;
 	size_t len = ops->len;
 
-	BUG_ON(ops->mode != MTD_OOB_PLACE);
+	BUG_ON(ops->mode != MTD_OPS_PLACE_OOB);
 
 	ofs += ops->ooboffs;
 
@@ -920,7 +919,7 @@
 	uint8_t *buf = ops->oobbuf;
 	size_t len = ops->len;
 
-	BUG_ON(ops->mode != MTD_OOB_PLACE);
+	BUG_ON(ops->mode != MTD_OPS_PLACE_OOB);
 
 	ofs += ops->ooboffs;
 
diff --git a/drivers/mtd/devices/docecc.c b/drivers/mtd/devices/docecc.c
index 37ef29a..4a1c39b 100644
--- a/drivers/mtd/devices/docecc.c
+++ b/drivers/mtd/devices/docecc.c
@@ -2,7 +2,7 @@
  * ECC algorithm for M-systems disk on chip. We use the excellent Reed
  * Solmon code of Phil Karn (karn@ka9q.ampr.org) available under the
  * GNU GPL License. The rest is simply to convert the disk on chip
- * syndrom into a standard syndom.
+ * syndrome into a standard syndome.
  *
  * Author: Fabrice Bellard (fabrice.bellard@netgem.com)
  * Copyright (C) 2000 Netgem S.A.
diff --git a/drivers/mtd/devices/docg3.c b/drivers/mtd/devices/docg3.c
new file mode 100644
index 0000000..bdcf5df
--- /dev/null
+++ b/drivers/mtd/devices/docg3.c
@@ -0,0 +1,1114 @@
+/*
+ * Handles the M-Systems DiskOnChip G3 chip
+ *
+ * Copyright (C) 2011 Robert Jarzmik
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/platform_device.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+#define CREATE_TRACE_POINTS
+#include "docg3.h"
+
+/*
+ * This driver handles the DiskOnChip G3 flash memory.
+ *
+ * As no specification is available from M-Systems/Sandisk, this drivers lacks
+ * several functions available on the chip, as :
+ *  - block erase
+ *  - page write
+ *  - IPL write
+ *  - ECC fixing (lack of BCH algorith understanding)
+ *  - powerdown / powerup
+ *
+ * The bus data width (8bits versus 16bits) is not handled (if_cfg flag), and
+ * the driver assumes a 16bits data bus.
+ *
+ * DocG3 relies on 2 ECC algorithms, which are handled in hardware :
+ *  - a 1 byte Hamming code stored in the OOB for each page
+ *  - a 7 bytes BCH code stored in the OOB for each page
+ * The BCH part is only used for check purpose, no correction is available as
+ * some information is missing. What is known is that :
+ *  - BCH is in GF(2^14)
+ *  - BCH is over data of 520 bytes (512 page + 7 page_info bytes
+ *                                   + 1 hamming byte)
+ *  - BCH can correct up to 4 bits (t = 4)
+ *  - BCH syndroms are calculated in hardware, and checked in hardware as well
+ *
+ */
+
+static inline u8 doc_readb(struct docg3 *docg3, u16 reg)
+{
+	u8 val = readb(docg3->base + reg);
+
+	trace_docg3_io(0, 8, reg, (int)val);
+	return val;
+}
+
+static inline u16 doc_readw(struct docg3 *docg3, u16 reg)
+{
+	u16 val = readw(docg3->base + reg);
+
+	trace_docg3_io(0, 16, reg, (int)val);
+	return val;
+}
+
+static inline void doc_writeb(struct docg3 *docg3, u8 val, u16 reg)
+{
+	writeb(val, docg3->base + reg);
+	trace_docg3_io(1, 16, reg, val);
+}
+
+static inline void doc_writew(struct docg3 *docg3, u16 val, u16 reg)
+{
+	writew(val, docg3->base + reg);
+	trace_docg3_io(1, 16, reg, val);
+}
+
+static inline void doc_flash_command(struct docg3 *docg3, u8 cmd)
+{
+	doc_writeb(docg3, cmd, DOC_FLASHCOMMAND);
+}
+
+static inline void doc_flash_sequence(struct docg3 *docg3, u8 seq)
+{
+	doc_writeb(docg3, seq, DOC_FLASHSEQUENCE);
+}
+
+static inline void doc_flash_address(struct docg3 *docg3, u8 addr)
+{
+	doc_writeb(docg3, addr, DOC_FLASHADDRESS);
+}
+
+static char const *part_probes[] = { "cmdlinepart", "saftlpart", NULL };
+
+static int doc_register_readb(struct docg3 *docg3, int reg)
+{
+	u8 val;
+
+	doc_writew(docg3, reg, DOC_READADDRESS);
+	val = doc_readb(docg3, reg);
+	doc_vdbg("Read register %04x : %02x\n", reg, val);
+	return val;
+}
+
+static int doc_register_readw(struct docg3 *docg3, int reg)
+{
+	u16 val;
+
+	doc_writew(docg3, reg, DOC_READADDRESS);
+	val = doc_readw(docg3, reg);
+	doc_vdbg("Read register %04x : %04x\n", reg, val);
+	return val;
+}
+
+/**
+ * doc_delay - delay docg3 operations
+ * @docg3: the device
+ * @nbNOPs: the number of NOPs to issue
+ *
+ * As no specification is available, the right timings between chip commands are
+ * unknown. The only available piece of information are the observed nops on a
+ * working docg3 chip.
+ * Therefore, doc_delay relies on a busy loop of NOPs, instead of scheduler
+ * friendlier msleep() functions or blocking mdelay().
+ */
+static void doc_delay(struct docg3 *docg3, int nbNOPs)
+{
+	int i;
+
+	doc_dbg("NOP x %d\n", nbNOPs);
+	for (i = 0; i < nbNOPs; i++)
+		doc_writeb(docg3, 0, DOC_NOP);
+}
+
+static int is_prot_seq_error(struct docg3 *docg3)
+{
+	int ctrl;
+
+	ctrl = doc_register_readb(docg3, DOC_FLASHCONTROL);
+	return ctrl & (DOC_CTRL_PROTECTION_ERROR | DOC_CTRL_SEQUENCE_ERROR);
+}
+
+static int doc_is_ready(struct docg3 *docg3)
+{
+	int ctrl;
+
+	ctrl = doc_register_readb(docg3, DOC_FLASHCONTROL);
+	return ctrl & DOC_CTRL_FLASHREADY;
+}
+
+static int doc_wait_ready(struct docg3 *docg3)
+{
+	int maxWaitCycles = 100;
+
+	do {
+		doc_delay(docg3, 4);
+		cpu_relax();
+	} while (!doc_is_ready(docg3) && maxWaitCycles--);
+	doc_delay(docg3, 2);
+	if (maxWaitCycles > 0)
+		return 0;
+	else
+		return -EIO;
+}
+
+static int doc_reset_seq(struct docg3 *docg3)
+{
+	int ret;
+
+	doc_writeb(docg3, 0x10, DOC_FLASHCONTROL);
+	doc_flash_sequence(docg3, DOC_SEQ_RESET);
+	doc_flash_command(docg3, DOC_CMD_RESET);
+	doc_delay(docg3, 2);
+	ret = doc_wait_ready(docg3);
+
+	doc_dbg("doc_reset_seq() -> isReady=%s\n", ret ? "false" : "true");
+	return ret;
+}
+
+/**
+ * doc_read_data_area - Read data from data area
+ * @docg3: the device
+ * @buf: the buffer to fill in
+ * @len: the lenght to read
+ * @first: first time read, DOC_READADDRESS should be set
+ *
+ * Reads bytes from flash data. Handles the single byte / even bytes reads.
+ */
+static void doc_read_data_area(struct docg3 *docg3, void *buf, int len,
+			       int first)
+{
+	int i, cdr, len4;
+	u16 data16, *dst16;
+	u8 data8, *dst8;
+
+	doc_dbg("doc_read_data_area(buf=%p, len=%d)\n", buf, len);
+	cdr = len & 0x3;
+	len4 = len - cdr;
+
+	if (first)
+		doc_writew(docg3, DOC_IOSPACE_DATA, DOC_READADDRESS);
+	dst16 = buf;
+	for (i = 0; i < len4; i += 2) {
+		data16 = doc_readw(docg3, DOC_IOSPACE_DATA);
+		*dst16 = data16;
+		dst16++;
+	}
+
+	if (cdr) {
+		doc_writew(docg3, DOC_IOSPACE_DATA | DOC_READADDR_ONE_BYTE,
+			   DOC_READADDRESS);
+		doc_delay(docg3, 1);
+		dst8 = (u8 *)dst16;
+		for (i = 0; i < cdr; i++) {
+			data8 = doc_readb(docg3, DOC_IOSPACE_DATA);
+			*dst8 = data8;
+			dst8++;
+		}
+	}
+}
+
+/**
+ * doc_set_data_mode - Sets the flash to reliable data mode
+ * @docg3: the device
+ *
+ * The reliable data mode is a bit slower than the fast mode, but less errors
+ * occur.  Entering the reliable mode cannot be done without entering the fast
+ * mode first.
+ */
+static void doc_set_reliable_mode(struct docg3 *docg3)
+{
+	doc_dbg("doc_set_reliable_mode()\n");
+	doc_flash_sequence(docg3, DOC_SEQ_SET_MODE);
+	doc_flash_command(docg3, DOC_CMD_FAST_MODE);
+	doc_flash_command(docg3, DOC_CMD_RELIABLE_MODE);
+	doc_delay(docg3, 2);
+}
+
+/**
+ * doc_set_asic_mode - Set the ASIC mode
+ * @docg3: the device
+ * @mode: the mode
+ *
+ * The ASIC can work in 3 modes :
+ *  - RESET: all registers are zeroed
+ *  - NORMAL: receives and handles commands
+ *  - POWERDOWN: minimal poweruse, flash parts shut off
+ */
+static void doc_set_asic_mode(struct docg3 *docg3, u8 mode)
+{
+	int i;
+
+	for (i = 0; i < 12; i++)
+		doc_readb(docg3, DOC_IOSPACE_IPL);
+
+	mode |= DOC_ASICMODE_MDWREN;
+	doc_dbg("doc_set_asic_mode(%02x)\n", mode);
+	doc_writeb(docg3, mode, DOC_ASICMODE);
+	doc_writeb(docg3, ~mode, DOC_ASICMODECONFIRM);
+	doc_delay(docg3, 1);
+}
+
+/**
+ * doc_set_device_id - Sets the devices id for cascaded G3 chips
+ * @docg3: the device
+ * @id: the chip to select (amongst 0, 1, 2, 3)
+ *
+ * There can be 4 cascaded G3 chips. This function selects the one which will
+ * should be the active one.
+ */
+static void doc_set_device_id(struct docg3 *docg3, int id)
+{
+	u8 ctrl;
+
+	doc_dbg("doc_set_device_id(%d)\n", id);
+	doc_writeb(docg3, id, DOC_DEVICESELECT);
+	ctrl = doc_register_readb(docg3, DOC_FLASHCONTROL);
+
+	ctrl &= ~DOC_CTRL_VIOLATION;
+	ctrl |= DOC_CTRL_CE;
+	doc_writeb(docg3, ctrl, DOC_FLASHCONTROL);
+}
+
+/**
+ * doc_set_extra_page_mode - Change flash page layout
+ * @docg3: the device
+ *
+ * Normally, the flash page is split into the data (512 bytes) and the out of
+ * band data (16 bytes). For each, 4 more bytes can be accessed, where the wear
+ * leveling counters are stored.  To access this last area of 4 bytes, a special
+ * mode must be input to the flash ASIC.
+ *
+ * Returns 0 if no error occured, -EIO else.
+ */
+static int doc_set_extra_page_mode(struct docg3 *docg3)
+{
+	int fctrl;
+
+	doc_dbg("doc_set_extra_page_mode()\n");
+	doc_flash_sequence(docg3, DOC_SEQ_PAGE_SIZE_532);
+	doc_flash_command(docg3, DOC_CMD_PAGE_SIZE_532);
+	doc_delay(docg3, 2);
+
+	fctrl = doc_register_readb(docg3, DOC_FLASHCONTROL);
+	if (fctrl & (DOC_CTRL_PROTECTION_ERROR | DOC_CTRL_SEQUENCE_ERROR))
+		return -EIO;
+	else
+		return 0;
+}
+
+/**
+ * doc_seek - Set both flash planes to the specified block, page for reading
+ * @docg3: the device
+ * @block0: the first plane block index
+ * @block1: the second plane block index
+ * @page: the page index within the block
+ * @wear: if true, read will occur on the 4 extra bytes of the wear area
+ * @ofs: offset in page to read
+ *
+ * Programs the flash even and odd planes to the specific block and page.
+ * Alternatively, programs the flash to the wear area of the specified page.
+ */
+static int doc_read_seek(struct docg3 *docg3, int block0, int block1, int page,
+			 int wear, int ofs)
+{
+	int sector, ret = 0;
+
+	doc_dbg("doc_seek(blocks=(%d,%d), page=%d, ofs=%d, wear=%d)\n",
+		block0, block1, page, ofs, wear);
+
+	if (!wear && (ofs < 2 * DOC_LAYOUT_PAGE_SIZE)) {
+		doc_flash_sequence(docg3, DOC_SEQ_SET_PLANE1);
+		doc_flash_command(docg3, DOC_CMD_READ_PLANE1);
+		doc_delay(docg3, 2);
+	} else {
+		doc_flash_sequence(docg3, DOC_SEQ_SET_PLANE2);
+		doc_flash_command(docg3, DOC_CMD_READ_PLANE2);
+		doc_delay(docg3, 2);
+	}
+
+	doc_set_reliable_mode(docg3);
+	if (wear)
+		ret = doc_set_extra_page_mode(docg3);
+	if (ret)
+		goto out;
+
+	sector = (block0 << DOC_ADDR_BLOCK_SHIFT) + (page & DOC_ADDR_PAGE_MASK);
+	doc_flash_sequence(docg3, DOC_SEQ_READ);
+	doc_flash_command(docg3, DOC_CMD_PROG_BLOCK_ADDR);
+	doc_delay(docg3, 1);
+	doc_flash_address(docg3, sector & 0xff);
+	doc_flash_address(docg3, (sector >> 8) & 0xff);
+	doc_flash_address(docg3, (sector >> 16) & 0xff);
+	doc_delay(docg3, 1);
+
+	sector = (block1 << DOC_ADDR_BLOCK_SHIFT) + (page & DOC_ADDR_PAGE_MASK);
+	doc_flash_command(docg3, DOC_CMD_PROG_BLOCK_ADDR);
+	doc_delay(docg3, 1);
+	doc_flash_address(docg3, sector & 0xff);
+	doc_flash_address(docg3, (sector >> 8) & 0xff);
+	doc_flash_address(docg3, (sector >> 16) & 0xff);
+	doc_delay(docg3, 2);
+
+out:
+	return ret;
+}
+
+/**
+ * doc_read_page_ecc_init - Initialize hardware ECC engine
+ * @docg3: the device
+ * @len: the number of bytes covered by the ECC (BCH covered)
+ *
+ * The function does initialize the hardware ECC engine to compute the Hamming
+ * ECC (on 1 byte) and the BCH Syndroms (on 7 bytes).
+ *
+ * Return 0 if succeeded, -EIO on error
+ */
+static int doc_read_page_ecc_init(struct docg3 *docg3, int len)
+{
+	doc_writew(docg3, DOC_ECCCONF0_READ_MODE
+		   | DOC_ECCCONF0_BCH_ENABLE | DOC_ECCCONF0_HAMMING_ENABLE
+		   | (len & DOC_ECCCONF0_DATA_BYTES_MASK),
+		   DOC_ECCCONF0);
+	doc_delay(docg3, 4);
+	doc_register_readb(docg3, DOC_FLASHCONTROL);
+	return doc_wait_ready(docg3);
+}
+
+/**
+ * doc_read_page_prepare - Prepares reading data from a flash page
+ * @docg3: the device
+ * @block0: the first plane block index on flash memory
+ * @block1: the second plane block index on flash memory
+ * @page: the page index in the block
+ * @offset: the offset in the page (must be a multiple of 4)
+ *
+ * Prepares the page to be read in the flash memory :
+ *   - tell ASIC to map the flash pages
+ *   - tell ASIC to be in read mode
+ *
+ * After a call to this method, a call to doc_read_page_finish is mandatory,
+ * to end the read cycle of the flash.
+ *
+ * Read data from a flash page. The length to be read must be between 0 and
+ * (page_size + oob_size + wear_size), ie. 532, and a multiple of 4 (because
+ * the extra bytes reading is not implemented).
+ *
+ * As pages are grouped by 2 (in 2 planes), reading from a page must be done
+ * in two steps:
+ *  - one read of 512 bytes at offset 0
+ *  - one read of 512 bytes at offset 512 + 16
+ *
+ * Returns 0 if successful, -EIO if a read error occured.
+ */
+static int doc_read_page_prepare(struct docg3 *docg3, int block0, int block1,
+				 int page, int offset)
+{
+	int wear_area = 0, ret = 0;
+
+	doc_dbg("doc_read_page_prepare(blocks=(%d,%d), page=%d, ofsInPage=%d)\n",
+		block0, block1, page, offset);
+	if (offset >= DOC_LAYOUT_WEAR_OFFSET)
+		wear_area = 1;
+	if (!wear_area && offset > (DOC_LAYOUT_PAGE_OOB_SIZE * 2))
+		return -EINVAL;
+
+	doc_set_device_id(docg3, docg3->device_id);
+	ret = doc_reset_seq(docg3);
+	if (ret)
+		goto err;
+
+	/* Program the flash address block and page */
+	ret = doc_read_seek(docg3, block0, block1, page, wear_area, offset);
+	if (ret)
+		goto err;
+
+	doc_flash_command(docg3, DOC_CMD_READ_ALL_PLANES);
+	doc_delay(docg3, 2);
+	doc_wait_ready(docg3);
+
+	doc_flash_command(docg3, DOC_CMD_SET_ADDR_READ);
+	doc_delay(docg3, 1);
+	if (offset >= DOC_LAYOUT_PAGE_SIZE * 2)
+		offset -= 2 * DOC_LAYOUT_PAGE_SIZE;
+	doc_flash_address(docg3, offset >> 2);
+	doc_delay(docg3, 1);
+	doc_wait_ready(docg3);
+
+	doc_flash_command(docg3, DOC_CMD_READ_FLASH);
+
+	return 0;
+err:
+	doc_writeb(docg3, 0, DOC_DATAEND);
+	doc_delay(docg3, 2);
+	return -EIO;
+}
+
+/**
+ * doc_read_page_getbytes - Reads bytes from a prepared page
+ * @docg3: the device
+ * @len: the number of bytes to be read (must be a multiple of 4)
+ * @buf: the buffer to be filled in
+ * @first: 1 if first time read, DOC_READADDRESS should be set
+ *
+ */
+static int doc_read_page_getbytes(struct docg3 *docg3, int len, u_char *buf,
+				  int first)
+{
+	doc_read_data_area(docg3, buf, len, first);
+	doc_delay(docg3, 2);
+	return len;
+}
+
+/**
+ * doc_get_hw_bch_syndroms - Get hardware calculated BCH syndroms
+ * @docg3: the device
+ * @syns:  the array of 7 integers where the syndroms will be stored
+ */
+static void doc_get_hw_bch_syndroms(struct docg3 *docg3, int *syns)
+{
+	int i;
+
+	for (i = 0; i < DOC_ECC_BCH_SIZE; i++)
+		syns[i] = doc_register_readb(docg3, DOC_BCH_SYNDROM(i));
+}
+
+/**
+ * doc_read_page_finish - Ends reading of a flash page
+ * @docg3: the device
+ *
+ * As a side effect, resets the chip selector to 0. This ensures that after each
+ * read operation, the floor 0 is selected. Therefore, if the systems halts, the
+ * reboot will boot on floor 0, where the IPL is.
+ */
+static void doc_read_page_finish(struct docg3 *docg3)
+{
+	doc_writeb(docg3, 0, DOC_DATAEND);
+	doc_delay(docg3, 2);
+	doc_set_device_id(docg3, 0);
+}
+
+/**
+ * calc_block_sector - Calculate blocks, pages and ofs.
+
+ * @from: offset in flash
+ * @block0: first plane block index calculated
+ * @block1: second plane block index calculated
+ * @page: page calculated
+ * @ofs: offset in page
+ */
+static void calc_block_sector(loff_t from, int *block0, int *block1, int *page,
+			      int *ofs)
+{
+	uint sector;
+
+	sector = from / DOC_LAYOUT_PAGE_SIZE;
+	*block0 = sector / (DOC_LAYOUT_PAGES_PER_BLOCK * DOC_LAYOUT_NBPLANES)
+		* DOC_LAYOUT_NBPLANES;
+	*block1 = *block0 + 1;
+	*page = sector % (DOC_LAYOUT_PAGES_PER_BLOCK * DOC_LAYOUT_NBPLANES);
+	*page /= DOC_LAYOUT_NBPLANES;
+	if (sector % 2)
+		*ofs = DOC_LAYOUT_PAGE_OOB_SIZE;
+	else
+		*ofs = 0;
+}
+
+/**
+ * doc_read - Read bytes from flash
+ * @mtd: the device
+ * @from: the offset from first block and first page, in bytes, aligned on page
+ *        size
+ * @len: the number of bytes to read (must be a multiple of 4)
+ * @retlen: the number of bytes actually read
+ * @buf: the filled in buffer
+ *
+ * Reads flash memory pages. This function does not read the OOB chunk, but only
+ * the page data.
+ *
+ * Returns 0 if read successfull, of -EIO, -EINVAL if an error occured
+ */
+static int doc_read(struct mtd_info *mtd, loff_t from, size_t len,
+	     size_t *retlen, u_char *buf)
+{
+	struct docg3 *docg3 = mtd->priv;
+	int block0, block1, page, readlen, ret, ofs = 0;
+	int syn[DOC_ECC_BCH_SIZE], eccconf1;
+	u8 oob[DOC_LAYOUT_OOB_SIZE];
+
+	ret = -EINVAL;
+	doc_dbg("doc_read(from=%lld, len=%zu, buf=%p)\n", from, len, buf);
+	if (from % DOC_LAYOUT_PAGE_SIZE)
+		goto err;
+	if (len % 4)
+		goto err;
+	calc_block_sector(from, &block0, &block1, &page, &ofs);
+	if (block1 > docg3->max_block)
+		goto err;
+
+	*retlen = 0;
+	ret = 0;
+	readlen = min_t(size_t, len, (size_t)DOC_LAYOUT_PAGE_SIZE);
+	while (!ret && len > 0) {
+		readlen = min_t(size_t, len, (size_t)DOC_LAYOUT_PAGE_SIZE);
+		ret = doc_read_page_prepare(docg3, block0, block1, page, ofs);
+		if (ret < 0)
+			goto err;
+		ret = doc_read_page_ecc_init(docg3, DOC_ECC_BCH_COVERED_BYTES);
+		if (ret < 0)
+			goto err_in_read;
+		ret = doc_read_page_getbytes(docg3, readlen, buf, 1);
+		if (ret < readlen)
+			goto err_in_read;
+		ret = doc_read_page_getbytes(docg3, DOC_LAYOUT_OOB_SIZE,
+					     oob, 0);
+		if (ret < DOC_LAYOUT_OOB_SIZE)
+			goto err_in_read;
+
+		*retlen += readlen;
+		buf += readlen;
+		len -= readlen;
+
+		ofs ^= DOC_LAYOUT_PAGE_OOB_SIZE;
+		if (ofs == 0)
+			page += 2;
+		if (page > DOC_ADDR_PAGE_MASK) {
+			page = 0;
+			block0 += 2;
+			block1 += 2;
+		}
+
+		/*
+		 * There should be a BCH bitstream fixing algorithm here ...
+		 * By now, a page read failure is triggered by BCH error
+		 */
+		doc_get_hw_bch_syndroms(docg3, syn);
+		eccconf1 = doc_register_readb(docg3, DOC_ECCCONF1);
+
+		doc_dbg("OOB - INFO: %02x:%02x:%02x:%02x:%02x:%02x:%02x\n",
+			 oob[0], oob[1], oob[2], oob[3], oob[4],
+			 oob[5], oob[6]);
+		doc_dbg("OOB - HAMMING: %02x\n", oob[7]);
+		doc_dbg("OOB - BCH_ECC: %02x:%02x:%02x:%02x:%02x:%02x:%02x\n",
+			 oob[8], oob[9], oob[10], oob[11], oob[12],
+			 oob[13], oob[14]);
+		doc_dbg("OOB - UNUSED: %02x\n", oob[15]);
+		doc_dbg("ECC checks: ECCConf1=%x\n", eccconf1);
+		doc_dbg("ECC BCH syndrom: %02x:%02x:%02x:%02x:%02x:%02x:%02x\n",
+			syn[0], syn[1], syn[2], syn[3], syn[4], syn[5], syn[6]);
+
+		ret = -EBADMSG;
+		if (block0 >= DOC_LAYOUT_BLOCK_FIRST_DATA) {
+			if (eccconf1 & DOC_ECCCONF1_BCH_SYNDROM_ERR)
+				goto err_in_read;
+			if (is_prot_seq_error(docg3))
+				goto err_in_read;
+		}
+		doc_read_page_finish(docg3);
+	}
+
+	return 0;
+err_in_read:
+	doc_read_page_finish(docg3);
+err:
+	return ret;
+}
+
+/**
+ * doc_read_oob - Read out of band bytes from flash
+ * @mtd: the device
+ * @from: the offset from first block and first page, in bytes, aligned on page
+ *        size
+ * @ops: the mtd oob structure
+ *
+ * Reads flash memory OOB area of pages.
+ *
+ * Returns 0 if read successfull, of -EIO, -EINVAL if an error occured
+ */
+static int doc_read_oob(struct mtd_info *mtd, loff_t from,
+			struct mtd_oob_ops *ops)
+{
+	struct docg3 *docg3 = mtd->priv;
+	int block0, block1, page, ofs, ret;
+	u8 *buf = ops->oobbuf;
+	size_t len = ops->ooblen;
+
+	doc_dbg("doc_read_oob(from=%lld, buf=%p, len=%zu)\n", from, buf, len);
+	if (len != DOC_LAYOUT_OOB_SIZE)
+		return -EINVAL;
+
+	switch (ops->mode) {
+	case MTD_OPS_PLACE_OOB:
+		buf += ops->ooboffs;
+		break;
+	default:
+		break;
+	}
+
+	calc_block_sector(from, &block0, &block1, &page, &ofs);
+	if (block1 > docg3->max_block)
+		return -EINVAL;
+
+	ret = doc_read_page_prepare(docg3, block0, block1, page,
+				    ofs + DOC_LAYOUT_PAGE_SIZE);
+	if (!ret)
+		ret = doc_read_page_ecc_init(docg3, DOC_LAYOUT_OOB_SIZE);
+	if (!ret)
+		ret = doc_read_page_getbytes(docg3, DOC_LAYOUT_OOB_SIZE,
+					     buf, 1);
+	doc_read_page_finish(docg3);
+
+	if (ret > 0)
+		ops->oobretlen = ret;
+	else
+		ops->oobretlen = 0;
+	return (ret > 0) ? 0 : ret;
+}
+
+static int doc_reload_bbt(struct docg3 *docg3)
+{
+	int block = DOC_LAYOUT_BLOCK_BBT;
+	int ret = 0, nbpages, page;
+	u_char *buf = docg3->bbt;
+
+	nbpages = DIV_ROUND_UP(docg3->max_block + 1, 8 * DOC_LAYOUT_PAGE_SIZE);
+	for (page = 0; !ret && (page < nbpages); page++) {
+		ret = doc_read_page_prepare(docg3, block, block + 1,
+					    page + DOC_LAYOUT_PAGE_BBT, 0);
+		if (!ret)
+			ret = doc_read_page_ecc_init(docg3,
+						     DOC_LAYOUT_PAGE_SIZE);
+		if (!ret)
+			doc_read_page_getbytes(docg3, DOC_LAYOUT_PAGE_SIZE,
+					       buf, 1);
+		buf += DOC_LAYOUT_PAGE_SIZE;
+	}
+	doc_read_page_finish(docg3);
+	return ret;
+}
+
+/**
+ * doc_block_isbad - Checks whether a block is good or not
+ * @mtd: the device
+ * @from: the offset to find the correct block
+ *
+ * Returns 1 if block is bad, 0 if block is good
+ */
+static int doc_block_isbad(struct mtd_info *mtd, loff_t from)
+{
+	struct docg3 *docg3 = mtd->priv;
+	int block0, block1, page, ofs, is_good;
+
+	calc_block_sector(from, &block0, &block1, &page, &ofs);
+	doc_dbg("doc_block_isbad(from=%lld) => block=(%d,%d), page=%d, ofs=%d\n",
+		from, block0, block1, page, ofs);
+
+	if (block0 < DOC_LAYOUT_BLOCK_FIRST_DATA)
+		return 0;
+	if (block1 > docg3->max_block)
+		return -EINVAL;
+
+	is_good = docg3->bbt[block0 >> 3] & (1 << (block0 & 0x7));
+	return !is_good;
+}
+
+/**
+ * doc_get_erase_count - Get block erase count
+ * @docg3: the device
+ * @from: the offset in which the block is.
+ *
+ * Get the number of times a block was erased. The number is the maximum of
+ * erase times between first and second plane (which should be equal normally).
+ *
+ * Returns The number of erases, or -EINVAL or -EIO on error.
+ */
+static int doc_get_erase_count(struct docg3 *docg3, loff_t from)
+{
+	u8 buf[DOC_LAYOUT_WEAR_SIZE];
+	int ret, plane1_erase_count, plane2_erase_count;
+	int block0, block1, page, ofs;
+
+	doc_dbg("doc_get_erase_count(from=%lld, buf=%p)\n", from, buf);
+	if (from % DOC_LAYOUT_PAGE_SIZE)
+		return -EINVAL;
+	calc_block_sector(from, &block0, &block1, &page, &ofs);
+	if (block1 > docg3->max_block)
+		return -EINVAL;
+
+	ret = doc_reset_seq(docg3);
+	if (!ret)
+		ret = doc_read_page_prepare(docg3, block0, block1, page,
+					    ofs + DOC_LAYOUT_WEAR_OFFSET);
+	if (!ret)
+		ret = doc_read_page_getbytes(docg3, DOC_LAYOUT_WEAR_SIZE,
+					     buf, 1);
+	doc_read_page_finish(docg3);
+
+	if (ret || (buf[0] != DOC_ERASE_MARK) || (buf[2] != DOC_ERASE_MARK))
+		return -EIO;
+	plane1_erase_count = (u8)(~buf[1]) | ((u8)(~buf[4]) << 8)
+		| ((u8)(~buf[5]) << 16);
+	plane2_erase_count = (u8)(~buf[3]) | ((u8)(~buf[6]) << 8)
+		| ((u8)(~buf[7]) << 16);
+
+	return max(plane1_erase_count, plane2_erase_count);
+}
+
+/*
+ * Debug sysfs entries
+ */
+static int dbg_flashctrl_show(struct seq_file *s, void *p)
+{
+	struct docg3 *docg3 = (struct docg3 *)s->private;
+
+	int pos = 0;
+	u8 fctrl = doc_register_readb(docg3, DOC_FLASHCONTROL);
+
+	pos += seq_printf(s,
+		 "FlashControl : 0x%02x (%s,CE# %s,%s,%s,flash %s)\n",
+		 fctrl,
+		 fctrl & DOC_CTRL_VIOLATION ? "protocol violation" : "-",
+		 fctrl & DOC_CTRL_CE ? "active" : "inactive",
+		 fctrl & DOC_CTRL_PROTECTION_ERROR ? "protection error" : "-",
+		 fctrl & DOC_CTRL_SEQUENCE_ERROR ? "sequence error" : "-",
+		 fctrl & DOC_CTRL_FLASHREADY ? "ready" : "not ready");
+	return pos;
+}
+DEBUGFS_RO_ATTR(flashcontrol, dbg_flashctrl_show);
+
+static int dbg_asicmode_show(struct seq_file *s, void *p)
+{
+	struct docg3 *docg3 = (struct docg3 *)s->private;
+
+	int pos = 0;
+	int pctrl = doc_register_readb(docg3, DOC_ASICMODE);
+	int mode = pctrl & 0x03;
+
+	pos += seq_printf(s,
+			 "%04x : RAM_WE=%d,RSTIN_RESET=%d,BDETCT_RESET=%d,WRITE_ENABLE=%d,POWERDOWN=%d,MODE=%d%d (",
+			 pctrl,
+			 pctrl & DOC_ASICMODE_RAM_WE ? 1 : 0,
+			 pctrl & DOC_ASICMODE_RSTIN_RESET ? 1 : 0,
+			 pctrl & DOC_ASICMODE_BDETCT_RESET ? 1 : 0,
+			 pctrl & DOC_ASICMODE_MDWREN ? 1 : 0,
+			 pctrl & DOC_ASICMODE_POWERDOWN ? 1 : 0,
+			 mode >> 1, mode & 0x1);
+
+	switch (mode) {
+	case DOC_ASICMODE_RESET:
+		pos += seq_printf(s, "reset");
+		break;
+	case DOC_ASICMODE_NORMAL:
+		pos += seq_printf(s, "normal");
+		break;
+	case DOC_ASICMODE_POWERDOWN:
+		pos += seq_printf(s, "powerdown");
+		break;
+	}
+	pos += seq_printf(s, ")\n");
+	return pos;
+}
+DEBUGFS_RO_ATTR(asic_mode, dbg_asicmode_show);
+
+static int dbg_device_id_show(struct seq_file *s, void *p)
+{
+	struct docg3 *docg3 = (struct docg3 *)s->private;
+	int pos = 0;
+	int id = doc_register_readb(docg3, DOC_DEVICESELECT);
+
+	pos += seq_printf(s, "DeviceId = %d\n", id);
+	return pos;
+}
+DEBUGFS_RO_ATTR(device_id, dbg_device_id_show);
+
+static int dbg_protection_show(struct seq_file *s, void *p)
+{
+	struct docg3 *docg3 = (struct docg3 *)s->private;
+	int pos = 0;
+	int protect = doc_register_readb(docg3, DOC_PROTECTION);
+	int dps0 = doc_register_readb(docg3, DOC_DPS0_STATUS);
+	int dps0_low = doc_register_readb(docg3, DOC_DPS0_ADDRLOW);
+	int dps0_high = doc_register_readb(docg3, DOC_DPS0_ADDRHIGH);
+	int dps1 = doc_register_readb(docg3, DOC_DPS1_STATUS);
+	int dps1_low = doc_register_readb(docg3, DOC_DPS1_ADDRLOW);
+	int dps1_high = doc_register_readb(docg3, DOC_DPS1_ADDRHIGH);
+
+	pos += seq_printf(s, "Protection = 0x%02x (",
+			 protect);
+	if (protect & DOC_PROTECT_FOUNDRY_OTP_LOCK)
+		pos += seq_printf(s, "FOUNDRY_OTP_LOCK,");
+	if (protect & DOC_PROTECT_CUSTOMER_OTP_LOCK)
+		pos += seq_printf(s, "CUSTOMER_OTP_LOCK,");
+	if (protect & DOC_PROTECT_LOCK_INPUT)
+		pos += seq_printf(s, "LOCK_INPUT,");
+	if (protect & DOC_PROTECT_STICKY_LOCK)
+		pos += seq_printf(s, "STICKY_LOCK,");
+	if (protect & DOC_PROTECT_PROTECTION_ENABLED)
+		pos += seq_printf(s, "PROTECTION ON,");
+	if (protect & DOC_PROTECT_IPL_DOWNLOAD_LOCK)
+		pos += seq_printf(s, "IPL_DOWNLOAD_LOCK,");
+	if (protect & DOC_PROTECT_PROTECTION_ERROR)
+		pos += seq_printf(s, "PROTECT_ERR,");
+	else
+		pos += seq_printf(s, "NO_PROTECT_ERR");
+	pos += seq_printf(s, ")\n");
+
+	pos += seq_printf(s, "DPS0 = 0x%02x : "
+			 "Protected area [0x%x - 0x%x] : OTP=%d, READ=%d, "
+			 "WRITE=%d, HW_LOCK=%d, KEY_OK=%d\n",
+			 dps0, dps0_low, dps0_high,
+			 !!(dps0 & DOC_DPS_OTP_PROTECTED),
+			 !!(dps0 & DOC_DPS_READ_PROTECTED),
+			 !!(dps0 & DOC_DPS_WRITE_PROTECTED),
+			 !!(dps0 & DOC_DPS_HW_LOCK_ENABLED),
+			 !!(dps0 & DOC_DPS_KEY_OK));
+	pos += seq_printf(s, "DPS1 = 0x%02x : "
+			 "Protected area [0x%x - 0x%x] : OTP=%d, READ=%d, "
+			 "WRITE=%d, HW_LOCK=%d, KEY_OK=%d\n",
+			 dps1, dps1_low, dps1_high,
+			 !!(dps1 & DOC_DPS_OTP_PROTECTED),
+			 !!(dps1 & DOC_DPS_READ_PROTECTED),
+			 !!(dps1 & DOC_DPS_WRITE_PROTECTED),
+			 !!(dps1 & DOC_DPS_HW_LOCK_ENABLED),
+			 !!(dps1 & DOC_DPS_KEY_OK));
+	return pos;
+}
+DEBUGFS_RO_ATTR(protection, dbg_protection_show);
+
+static int __init doc_dbg_register(struct docg3 *docg3)
+{
+	struct dentry *root, *entry;
+
+	root = debugfs_create_dir("docg3", NULL);
+	if (!root)
+		return -ENOMEM;
+
+	entry = debugfs_create_file("flashcontrol", S_IRUSR, root, docg3,
+				  &flashcontrol_fops);
+	if (entry)
+		entry = debugfs_create_file("asic_mode", S_IRUSR, root,
+					    docg3, &asic_mode_fops);
+	if (entry)
+		entry = debugfs_create_file("device_id", S_IRUSR, root,
+					    docg3, &device_id_fops);
+	if (entry)
+		entry = debugfs_create_file("protection", S_IRUSR, root,
+					    docg3, &protection_fops);
+	if (entry) {
+		docg3->debugfs_root = root;
+		return 0;
+	} else {
+		debugfs_remove_recursive(root);
+		return -ENOMEM;
+	}
+}
+
+static void __exit doc_dbg_unregister(struct docg3 *docg3)
+{
+	debugfs_remove_recursive(docg3->debugfs_root);
+}
+
+/**
+ * doc_set_driver_info - Fill the mtd_info structure and docg3 structure
+ * @chip_id: The chip ID of the supported chip
+ * @mtd: The structure to fill
+ */
+static void __init doc_set_driver_info(int chip_id, struct mtd_info *mtd)
+{
+	struct docg3 *docg3 = mtd->priv;
+	int cfg;
+
+	cfg = doc_register_readb(docg3, DOC_CONFIGURATION);
+	docg3->if_cfg = (cfg & DOC_CONF_IF_CFG ? 1 : 0);
+
+	switch (chip_id) {
+	case DOC_CHIPID_G3:
+		mtd->name = "DiskOnChip G3";
+		docg3->max_block = 2047;
+		break;
+	}
+	mtd->type = MTD_NANDFLASH;
+	/*
+	 * Once write methods are added, the correct flags will be set.
+	 * mtd->flags = MTD_CAP_NANDFLASH;
+	 */
+	mtd->flags = MTD_CAP_ROM;
+	mtd->size = (docg3->max_block + 1) * DOC_LAYOUT_BLOCK_SIZE;
+	mtd->erasesize = DOC_LAYOUT_BLOCK_SIZE * DOC_LAYOUT_NBPLANES;
+	mtd->writesize = DOC_LAYOUT_PAGE_SIZE;
+	mtd->oobsize = DOC_LAYOUT_OOB_SIZE;
+	mtd->owner = THIS_MODULE;
+	mtd->erase = NULL;
+	mtd->point = NULL;
+	mtd->unpoint = NULL;
+	mtd->read = doc_read;
+	mtd->write = NULL;
+	mtd->read_oob = doc_read_oob;
+	mtd->write_oob = NULL;
+	mtd->sync = NULL;
+	mtd->block_isbad = doc_block_isbad;
+}
+
+/**
+ * doc_probe - Probe the IO space for a DiskOnChip G3 chip
+ * @pdev: platform device
+ *
+ * Probes for a G3 chip at the specified IO space in the platform data
+ * ressources.
+ *
+ * Returns 0 on success, -ENOMEM, -ENXIO on error
+ */
+static int __init docg3_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct docg3 *docg3;
+	struct mtd_info *mtd;
+	struct resource *ress;
+	int ret, bbt_nbpages;
+	u16 chip_id, chip_id_inv;
+
+	ret = -ENOMEM;
+	docg3 = kzalloc(sizeof(struct docg3), GFP_KERNEL);
+	if (!docg3)
+		goto nomem1;
+	mtd = kzalloc(sizeof(struct mtd_info), GFP_KERNEL);
+	if (!mtd)
+		goto nomem2;
+	mtd->priv = docg3;
+
+	ret = -ENXIO;
+	ress = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!ress) {
+		dev_err(dev, "No I/O memory resource defined\n");
+		goto noress;
+	}
+	docg3->base = ioremap(ress->start, DOC_IOSPACE_SIZE);
+
+	docg3->dev = &pdev->dev;
+	docg3->device_id = 0;
+	doc_set_device_id(docg3, docg3->device_id);
+	doc_set_asic_mode(docg3, DOC_ASICMODE_RESET);
+	doc_set_asic_mode(docg3, DOC_ASICMODE_NORMAL);
+
+	chip_id = doc_register_readw(docg3, DOC_CHIPID);
+	chip_id_inv = doc_register_readw(docg3, DOC_CHIPID_INV);
+
+	ret = -ENODEV;
+	if (chip_id != (u16)(~chip_id_inv)) {
+		doc_info("No device found at IO addr %p\n",
+			 (void *)ress->start);
+		goto nochipfound;
+	}
+
+	switch (chip_id) {
+	case DOC_CHIPID_G3:
+		doc_info("Found a G3 DiskOnChip at addr %p\n",
+			 (void *)ress->start);
+		break;
+	default:
+		doc_err("Chip id %04x is not a DiskOnChip G3 chip\n", chip_id);
+		goto nochipfound;
+	}
+
+	doc_set_driver_info(chip_id, mtd);
+	platform_set_drvdata(pdev, mtd);
+
+	ret = -ENOMEM;
+	bbt_nbpages = DIV_ROUND_UP(docg3->max_block + 1,
+				   8 * DOC_LAYOUT_PAGE_SIZE);
+	docg3->bbt = kzalloc(bbt_nbpages * DOC_LAYOUT_PAGE_SIZE, GFP_KERNEL);
+	if (!docg3->bbt)
+		goto nochipfound;
+	doc_reload_bbt(docg3);
+
+	ret = mtd_device_parse_register(mtd, part_probes,
+					NULL, NULL, 0);
+	if (ret)
+		goto register_error;
+
+	doc_dbg_register(docg3);
+	return 0;
+
+register_error:
+	kfree(docg3->bbt);
+nochipfound:
+	iounmap(docg3->base);
+noress:
+	kfree(mtd);
+nomem2:
+	kfree(docg3);
+nomem1:
+	return ret;
+}
+
+/**
+ * docg3_release - Release the driver
+ * @pdev: the platform device
+ *
+ * Returns 0
+ */
+static int __exit docg3_release(struct platform_device *pdev)
+{
+	struct mtd_info *mtd = platform_get_drvdata(pdev);
+	struct docg3 *docg3 = mtd->priv;
+
+	doc_dbg_unregister(docg3);
+	mtd_device_unregister(mtd);
+	iounmap(docg3->base);
+	kfree(docg3->bbt);
+	kfree(docg3);
+	kfree(mtd);
+	return 0;
+}
+
+static struct platform_driver g3_driver = {
+	.driver		= {
+		.name	= "docg3",
+		.owner	= THIS_MODULE,
+	},
+	.remove		= __exit_p(docg3_release),
+};
+
+static int __init docg3_init(void)
+{
+	return platform_driver_probe(&g3_driver, docg3_probe);
+}
+module_init(docg3_init);
+
+
+static void __exit docg3_exit(void)
+{
+	platform_driver_unregister(&g3_driver);
+}
+module_exit(docg3_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Robert Jarzmik <robert.jarzmik@free.fr>");
+MODULE_DESCRIPTION("MTD driver for DiskOnChip G3");
diff --git a/drivers/mtd/devices/docg3.h b/drivers/mtd/devices/docg3.h
new file mode 100644
index 0000000..0d407be
--- /dev/null
+++ b/drivers/mtd/devices/docg3.h
@@ -0,0 +1,297 @@
+/*
+ * Handles the M-Systems DiskOnChip G3 chip
+ *
+ * Copyright (C) 2011 Robert Jarzmik
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ */
+
+#ifndef _MTD_DOCG3_H
+#define _MTD_DOCG3_H
+
+/*
+ * Flash memory areas :
+ *   - 0x0000 .. 0x07ff : IPL
+ *   - 0x0800 .. 0x0fff : Data area
+ *   - 0x1000 .. 0x17ff : Registers
+ *   - 0x1800 .. 0x1fff : Unknown
+ */
+#define DOC_IOSPACE_IPL			0x0000
+#define DOC_IOSPACE_DATA		0x0800
+#define DOC_IOSPACE_SIZE		0x2000
+
+/*
+ * DOC G3 layout and adressing scheme
+ *   A page address for the block "b", plane "P" and page "p":
+ *   address = [bbbb bPpp pppp]
+ */
+
+#define DOC_ADDR_PAGE_MASK		0x3f
+#define DOC_ADDR_BLOCK_SHIFT		6
+#define DOC_LAYOUT_NBPLANES		2
+#define DOC_LAYOUT_PAGES_PER_BLOCK	64
+#define DOC_LAYOUT_PAGE_SIZE		512
+#define DOC_LAYOUT_OOB_SIZE		16
+#define DOC_LAYOUT_WEAR_SIZE		8
+#define DOC_LAYOUT_PAGE_OOB_SIZE				\
+	(DOC_LAYOUT_PAGE_SIZE + DOC_LAYOUT_OOB_SIZE)
+#define DOC_LAYOUT_WEAR_OFFSET		(DOC_LAYOUT_PAGE_OOB_SIZE * 2)
+#define DOC_LAYOUT_BLOCK_SIZE					\
+	(DOC_LAYOUT_PAGES_PER_BLOCK * DOC_LAYOUT_PAGE_SIZE)
+#define DOC_ECC_BCH_SIZE		7
+#define DOC_ECC_BCH_COVERED_BYTES				\
+	(DOC_LAYOUT_PAGE_SIZE + DOC_LAYOUT_OOB_PAGEINFO_SZ +	\
+	 DOC_LAYOUT_OOB_HAMMING_SZ + DOC_LAYOUT_OOB_BCH_SZ)
+
+/*
+ * Blocks distribution
+ */
+#define DOC_LAYOUT_BLOCK_BBT		0
+#define DOC_LAYOUT_BLOCK_OTP		0
+#define DOC_LAYOUT_BLOCK_FIRST_DATA	6
+
+#define DOC_LAYOUT_PAGE_BBT		4
+
+/*
+ * Extra page OOB (16 bytes wide) layout
+ */
+#define DOC_LAYOUT_OOB_PAGEINFO_OFS	0
+#define DOC_LAYOUT_OOB_HAMMING_OFS	7
+#define DOC_LAYOUT_OOB_BCH_OFS		8
+#define DOC_LAYOUT_OOB_UNUSED_OFS	15
+#define DOC_LAYOUT_OOB_PAGEINFO_SZ	7
+#define DOC_LAYOUT_OOB_HAMMING_SZ	1
+#define DOC_LAYOUT_OOB_BCH_SZ		7
+#define DOC_LAYOUT_OOB_UNUSED_SZ	1
+
+
+#define DOC_CHIPID_G3			0x200
+#define DOC_ERASE_MARK			0xaa
+/*
+ * Flash registers
+ */
+#define DOC_CHIPID			0x1000
+#define DOC_TEST			0x1004
+#define DOC_BUSLOCK			0x1006
+#define DOC_ENDIANCONTROL		0x1008
+#define DOC_DEVICESELECT		0x100a
+#define DOC_ASICMODE			0x100c
+#define DOC_CONFIGURATION		0x100e
+#define DOC_INTERRUPTCONTROL		0x1010
+#define DOC_READADDRESS			0x101a
+#define DOC_DATAEND			0x101e
+#define DOC_INTERRUPTSTATUS		0x1020
+
+#define DOC_FLASHSEQUENCE		0x1032
+#define DOC_FLASHCOMMAND		0x1034
+#define DOC_FLASHADDRESS		0x1036
+#define DOC_FLASHCONTROL		0x1038
+#define DOC_NOP				0x103e
+
+#define DOC_ECCCONF0			0x1040
+#define DOC_ECCCONF1			0x1042
+#define DOC_ECCPRESET			0x1044
+#define DOC_HAMMINGPARITY		0x1046
+#define DOC_BCH_SYNDROM(idx)		(0x1048 + (idx << 1))
+
+#define DOC_PROTECTION			0x1056
+#define DOC_DPS0_ADDRLOW		0x1060
+#define DOC_DPS0_ADDRHIGH		0x1062
+#define DOC_DPS1_ADDRLOW		0x1064
+#define DOC_DPS1_ADDRHIGH		0x1066
+#define DOC_DPS0_STATUS			0x106c
+#define DOC_DPS1_STATUS			0x106e
+
+#define DOC_ASICMODECONFIRM		0x1072
+#define DOC_CHIPID_INV			0x1074
+
+/*
+ * Flash sequences
+ * A sequence is preset before one or more commands are input to the chip.
+ */
+#define DOC_SEQ_RESET			0x00
+#define DOC_SEQ_PAGE_SIZE_532		0x03
+#define DOC_SEQ_SET_MODE		0x09
+#define DOC_SEQ_READ			0x12
+#define DOC_SEQ_SET_PLANE1		0x0e
+#define DOC_SEQ_SET_PLANE2		0x10
+#define DOC_SEQ_PAGE_SETUP		0x1d
+
+/*
+ * Flash commands
+ */
+#define DOC_CMD_READ_PLANE1		0x00
+#define DOC_CMD_SET_ADDR_READ		0x05
+#define DOC_CMD_READ_ALL_PLANES		0x30
+#define DOC_CMD_READ_PLANE2		0x50
+#define DOC_CMD_READ_FLASH		0xe0
+#define DOC_CMD_PAGE_SIZE_532		0x3c
+
+#define DOC_CMD_PROG_BLOCK_ADDR		0x60
+#define DOC_CMD_PROG_CYCLE1		0x80
+#define DOC_CMD_PROG_CYCLE2		0x10
+#define DOC_CMD_ERASECYCLE2		0xd0
+
+#define DOC_CMD_RELIABLE_MODE		0x22
+#define DOC_CMD_FAST_MODE		0xa2
+
+#define DOC_CMD_RESET			0xff
+
+/*
+ * Flash register : DOC_FLASHCONTROL
+ */
+#define DOC_CTRL_VIOLATION		0x20
+#define DOC_CTRL_CE			0x10
+#define DOC_CTRL_UNKNOWN_BITS		0x08
+#define DOC_CTRL_PROTECTION_ERROR	0x04
+#define DOC_CTRL_SEQUENCE_ERROR		0x02
+#define DOC_CTRL_FLASHREADY		0x01
+
+/*
+ * Flash register : DOC_ASICMODE
+ */
+#define DOC_ASICMODE_RESET		0x00
+#define DOC_ASICMODE_NORMAL		0x01
+#define DOC_ASICMODE_POWERDOWN		0x02
+#define DOC_ASICMODE_MDWREN		0x04
+#define DOC_ASICMODE_BDETCT_RESET	0x08
+#define DOC_ASICMODE_RSTIN_RESET	0x10
+#define DOC_ASICMODE_RAM_WE		0x20
+
+/*
+ * Flash register : DOC_ECCCONF0
+ */
+#define DOC_ECCCONF0_READ_MODE		0x8000
+#define DOC_ECCCONF0_AUTO_ECC_ENABLE	0x4000
+#define DOC_ECCCONF0_HAMMING_ENABLE	0x1000
+#define DOC_ECCCONF0_BCH_ENABLE		0x0800
+#define DOC_ECCCONF0_DATA_BYTES_MASK	0x07ff
+
+/*
+ * Flash register : DOC_ECCCONF1
+ */
+#define DOC_ECCCONF1_BCH_SYNDROM_ERR	0x80
+#define DOC_ECCCONF1_UNKOWN1		0x40
+#define DOC_ECCCONF1_UNKOWN2		0x20
+#define DOC_ECCCONF1_UNKOWN3		0x10
+#define DOC_ECCCONF1_HAMMING_BITS_MASK	0x0f
+
+/*
+ * Flash register : DOC_PROTECTION
+ */
+#define DOC_PROTECT_FOUNDRY_OTP_LOCK	0x01
+#define DOC_PROTECT_CUSTOMER_OTP_LOCK	0x02
+#define DOC_PROTECT_LOCK_INPUT		0x04
+#define DOC_PROTECT_STICKY_LOCK		0x08
+#define DOC_PROTECT_PROTECTION_ENABLED	0x10
+#define DOC_PROTECT_IPL_DOWNLOAD_LOCK	0x20
+#define DOC_PROTECT_PROTECTION_ERROR	0x80
+
+/*
+ * Flash register : DOC_DPS0_STATUS and DOC_DPS1_STATUS
+ */
+#define DOC_DPS_OTP_PROTECTED		0x01
+#define DOC_DPS_READ_PROTECTED		0x02
+#define DOC_DPS_WRITE_PROTECTED		0x04
+#define DOC_DPS_HW_LOCK_ENABLED		0x08
+#define DOC_DPS_KEY_OK			0x80
+
+/*
+ * Flash register : DOC_CONFIGURATION
+ */
+#define DOC_CONF_IF_CFG			0x80
+#define DOC_CONF_MAX_ID_MASK		0x30
+#define DOC_CONF_VCCQ_3V		0x01
+
+/*
+ * Flash register : DOC_READADDRESS
+ */
+#define DOC_READADDR_INC		0x8000
+#define DOC_READADDR_ONE_BYTE		0x4000
+#define DOC_READADDR_ADDR_MASK		0x1fff
+
+/**
+ * struct docg3 - DiskOnChip driver private data
+ * @dev: the device currently under control
+ * @base: mapped IO space
+ * @device_id: number of the cascaded DoCG3 device (0, 1, 2 or 3)
+ * @if_cfg: if true, reads are on 16bits, else reads are on 8bits
+ * @bbt: bad block table cache
+ * @debugfs_root: debugfs root node
+ */
+struct docg3 {
+	struct device *dev;
+	void __iomem *base;
+	unsigned int device_id:4;
+	unsigned int if_cfg:1;
+	int max_block;
+	u8 *bbt;
+	struct dentry *debugfs_root;
+};
+
+#define doc_err(fmt, arg...) dev_err(docg3->dev, (fmt), ## arg)
+#define doc_info(fmt, arg...) dev_info(docg3->dev, (fmt), ## arg)
+#define doc_dbg(fmt, arg...) dev_dbg(docg3->dev, (fmt), ## arg)
+#define doc_vdbg(fmt, arg...) dev_vdbg(docg3->dev, (fmt), ## arg)
+
+#define DEBUGFS_RO_ATTR(name, show_fct) \
+	static int name##_open(struct inode *inode, struct file *file) \
+	{ return single_open(file, show_fct, inode->i_private); }      \
+	static const struct file_operations name##_fops = { \
+		.owner = THIS_MODULE, \
+		.open = name##_open, \
+		.llseek = seq_lseek, \
+		.read = seq_read, \
+		.release = single_release \
+	};
+#endif
+
+/*
+ * Trace events part
+ */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM docg3
+
+#if !defined(_MTD_DOCG3_TRACE) || defined(TRACE_HEADER_MULTI_READ)
+#define _MTD_DOCG3_TRACE
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(docg3_io,
+	    TP_PROTO(int op, int width, u16 reg, int val),
+	    TP_ARGS(op, width, reg, val),
+	    TP_STRUCT__entry(
+		    __field(int, op)
+		    __field(unsigned char, width)
+		    __field(u16, reg)
+		    __field(int, val)),
+	    TP_fast_assign(
+		    __entry->op = op;
+		    __entry->width = width;
+		    __entry->reg = reg;
+		    __entry->val = val;),
+	    TP_printk("docg3: %s%02d reg=%04x, val=%04x",
+		      __entry->op ? "write" : "read", __entry->width,
+		      __entry->reg, __entry->val)
+	);
+#endif
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE docg3
+#include <trace/define_trace.h>
diff --git a/drivers/mtd/devices/docprobe.c b/drivers/mtd/devices/docprobe.c
index d374603..45116bb 100644
--- a/drivers/mtd/devices/docprobe.c
+++ b/drivers/mtd/devices/docprobe.c
@@ -50,11 +50,6 @@
 #include <linux/mtd/nand.h>
 #include <linux/mtd/doc2000.h>
 
-/* Where to look for the devices? */
-#ifndef CONFIG_MTD_DOCPROBE_ADDRESS
-#define CONFIG_MTD_DOCPROBE_ADDRESS 0
-#endif
-
 
 static unsigned long doc_config_location = CONFIG_MTD_DOCPROBE_ADDRESS;
 module_param(doc_config_location, ulong, 0);
diff --git a/drivers/mtd/devices/lart.c b/drivers/mtd/devices/lart.c
index 772a0ff..3a11ea62 100644
--- a/drivers/mtd/devices/lart.c
+++ b/drivers/mtd/devices/lart.c
@@ -34,9 +34,6 @@
 /* debugging */
 //#define LART_DEBUG
 
-/* partition support */
-#define HAVE_PARTITIONS
-
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/types.h>
@@ -44,9 +41,7 @@
 #include <linux/errno.h>
 #include <linux/string.h>
 #include <linux/mtd/mtd.h>
-#ifdef HAVE_PARTITIONS
 #include <linux/mtd/partitions.h>
-#endif
 
 #ifndef CONFIG_SA1100_LART
 #error This is for LART architecture only
@@ -598,7 +593,6 @@
 	}
 };
 
-#ifdef HAVE_PARTITIONS
 static struct mtd_partition lart_partitions[] = {
 	/* blob */
 	{
@@ -619,7 +613,7 @@
 		.size	= INITRD_LEN,		/* MTDPART_SIZ_FULL */
 	}
 };
-#endif
+#define NUM_PARTITIONS ARRAY_SIZE(lart_partitions)
 
 static int __init lart_flash_init (void)
 {
@@ -668,7 +662,6 @@
 			   result,mtd.eraseregions[result].erasesize,mtd.eraseregions[result].erasesize / 1024,
 			   result,mtd.eraseregions[result].numblocks);
 
-#ifdef HAVE_PARTITIONS
    printk ("\npartitions = %d\n", ARRAY_SIZE(lart_partitions));
 
    for (result = 0; result < ARRAY_SIZE(lart_partitions); result++)
@@ -681,25 +674,16 @@
 			 result,lart_partitions[result].offset,
 			 result,lart_partitions[result].size,lart_partitions[result].size / 1024);
 #endif
-#endif
 
-#ifndef HAVE_PARTITIONS
-   result = mtd_device_register(&mtd, NULL, 0);
-#else
    result = mtd_device_register(&mtd, lart_partitions,
                                 ARRAY_SIZE(lart_partitions));
-#endif
 
    return (result);
 }
 
 static void __exit lart_flash_exit (void)
 {
-#ifndef HAVE_PARTITIONS
    mtd_device_unregister(&mtd);
-#else
-   mtd_device_unregister(&mtd);
-#endif
 }
 
 module_init (lart_flash_init);
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index 35180e4..884904d 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -30,6 +30,7 @@
 #include <linux/mtd/cfi.h>
 #include <linux/mtd/mtd.h>
 #include <linux/mtd/partitions.h>
+#include <linux/of_platform.h>
 
 #include <linux/spi/spi.h>
 #include <linux/spi/flash.h>
@@ -88,7 +89,6 @@
 	struct spi_device	*spi;
 	struct mutex		lock;
 	struct mtd_info		mtd;
-	unsigned		partitioned:1;
 	u16			page_size;
 	u16			addr_width;
 	u8			erase_opcode;
@@ -209,9 +209,8 @@
  */
 static int erase_chip(struct m25p *flash)
 {
-	DEBUG(MTD_DEBUG_LEVEL3, "%s: %s %lldKiB\n",
-	      dev_name(&flash->spi->dev), __func__,
-	      (long long)(flash->mtd.size >> 10));
+	pr_debug("%s: %s %lldKiB\n", dev_name(&flash->spi->dev), __func__,
+			(long long)(flash->mtd.size >> 10));
 
 	/* Wait until finished previous write command. */
 	if (wait_till_ready(flash))
@@ -250,9 +249,8 @@
  */
 static int erase_sector(struct m25p *flash, u32 offset)
 {
-	DEBUG(MTD_DEBUG_LEVEL3, "%s: %s %dKiB at 0x%08x\n",
-			dev_name(&flash->spi->dev), __func__,
-			flash->mtd.erasesize / 1024, offset);
+	pr_debug("%s: %s %dKiB at 0x%08x\n", dev_name(&flash->spi->dev),
+			__func__, flash->mtd.erasesize / 1024, offset);
 
 	/* Wait until finished previous write command. */
 	if (wait_till_ready(flash))
@@ -286,9 +284,9 @@
 	u32 addr,len;
 	uint32_t rem;
 
-	DEBUG(MTD_DEBUG_LEVEL2, "%s: %s %s 0x%llx, len %lld\n",
-	      dev_name(&flash->spi->dev), __func__, "at",
-	      (long long)instr->addr, (long long)instr->len);
+	pr_debug("%s: %s at 0x%llx, len %lld\n", dev_name(&flash->spi->dev),
+			__func__, (long long)instr->addr,
+			(long long)instr->len);
 
 	/* sanity checks */
 	if (instr->addr + instr->len > flash->mtd.size)
@@ -348,9 +346,8 @@
 	struct spi_transfer t[2];
 	struct spi_message m;
 
-	DEBUG(MTD_DEBUG_LEVEL2, "%s: %s %s 0x%08x, len %zd\n",
-			dev_name(&flash->spi->dev), __func__, "from",
-			(u32)from, len);
+	pr_debug("%s: %s from 0x%08x, len %zd\n", dev_name(&flash->spi->dev),
+			__func__, (u32)from, len);
 
 	/* sanity checks */
 	if (!len)
@@ -417,9 +414,8 @@
 	struct spi_transfer t[2];
 	struct spi_message m;
 
-	DEBUG(MTD_DEBUG_LEVEL2, "%s: %s %s 0x%08x, len %zd\n",
-			dev_name(&flash->spi->dev), __func__, "to",
-			(u32)to, len);
+	pr_debug("%s: %s to 0x%08x, len %zd\n", dev_name(&flash->spi->dev),
+			__func__, (u32)to, len);
 
 	*retlen = 0;
 
@@ -510,9 +506,8 @@
 	size_t actual;
 	int cmd_sz, ret;
 
-	DEBUG(MTD_DEBUG_LEVEL2, "%s: %s %s 0x%08x, len %zd\n",
-			dev_name(&flash->spi->dev), __func__, "to",
-			(u32)to, len);
+	pr_debug("%s: %s to 0x%08x, len %zd\n", dev_name(&flash->spi->dev),
+			__func__, (u32)to, len);
 
 	*retlen = 0;
 
@@ -661,6 +656,7 @@
 	{ "at25fs040",  INFO(0x1f6604, 0, 64 * 1024,   8, SECT_4K) },
 
 	{ "at25df041a", INFO(0x1f4401, 0, 64 * 1024,   8, SECT_4K) },
+	{ "at25df321a", INFO(0x1f4701, 0, 64 * 1024,  64, SECT_4K) },
 	{ "at25df641",  INFO(0x1f4800, 0, 64 * 1024, 128, SECT_4K) },
 
 	{ "at26f004",   INFO(0x1f0400, 0, 64 * 1024,  8, SECT_4K) },
@@ -671,6 +667,7 @@
 	/* EON -- en25xxx */
 	{ "en25f32", INFO(0x1c3116, 0, 64 * 1024,  64, SECT_4K) },
 	{ "en25p32", INFO(0x1c2016, 0, 64 * 1024,  64, 0) },
+	{ "en25q32b", INFO(0x1c3016, 0, 64 * 1024,  64, 0) },
 	{ "en25p64", INFO(0x1c2017, 0, 64 * 1024, 128, 0) },
 
 	/* Intel/Numonyx -- xxxs33b */
@@ -788,8 +785,8 @@
 	 */
 	tmp = spi_write_then_read(spi, &code, 1, id, 5);
 	if (tmp < 0) {
-		DEBUG(MTD_DEBUG_LEVEL0, "%s: error %d reading JEDEC ID\n",
-			dev_name(&spi->dev), tmp);
+		pr_debug("%s: error %d reading JEDEC ID\n",
+				dev_name(&spi->dev), tmp);
 		return ERR_PTR(tmp);
 	}
 	jedec = id[0];
@@ -825,8 +822,12 @@
 	struct m25p			*flash;
 	struct flash_info		*info;
 	unsigned			i;
-	struct mtd_partition		*parts = NULL;
-	int				nr_parts = 0;
+	struct mtd_part_parser_data	ppdata;
+
+#ifdef CONFIG_MTD_OF_PARTS
+	if (!of_device_is_available(spi->dev.of_node))
+		return -ENODEV;
+#endif
 
 	/* Platform data helps sort out which chip type we have, as
 	 * well as how this board partitions it.  If we don't have
@@ -928,6 +929,7 @@
 	if (info->flags & M25P_NO_ERASE)
 		flash->mtd.flags |= MTD_NO_ERASE;
 
+	ppdata.of_node = spi->dev.of_node;
 	flash->mtd.dev.parent = &spi->dev;
 	flash->page_size = info->page_size;
 
@@ -945,8 +947,7 @@
 	dev_info(&spi->dev, "%s (%lld Kbytes)\n", id->name,
 			(long long)flash->mtd.size >> 10);
 
-	DEBUG(MTD_DEBUG_LEVEL2,
-		"mtd .name = %s, .size = 0x%llx (%lldMiB) "
+	pr_debug("mtd .name = %s, .size = 0x%llx (%lldMiB) "
 			".erasesize = 0x%.8x (%uKiB) .numeraseregions = %d\n",
 		flash->mtd.name,
 		(long long)flash->mtd.size, (long long)(flash->mtd.size >> 20),
@@ -955,8 +956,7 @@
 
 	if (flash->mtd.numeraseregions)
 		for (i = 0; i < flash->mtd.numeraseregions; i++)
-			DEBUG(MTD_DEBUG_LEVEL2,
-				"mtd.eraseregions[%d] = { .offset = 0x%llx, "
+			pr_debug("mtd.eraseregions[%d] = { .offset = 0x%llx, "
 				".erasesize = 0x%.8x (%uKiB), "
 				".numblocks = %d }\n",
 				i, (long long)flash->mtd.eraseregions[i].offset,
@@ -968,41 +968,9 @@
 	/* partitions should match sector boundaries; and it may be good to
 	 * use readonly partitions for writeprotected sectors (BP2..BP0).
 	 */
-	if (mtd_has_cmdlinepart()) {
-		static const char *part_probes[]
-			= { "cmdlinepart", NULL, };
-
-		nr_parts = parse_mtd_partitions(&flash->mtd,
-						part_probes, &parts, 0);
-	}
-
-	if (nr_parts <= 0 && data && data->parts) {
-		parts = data->parts;
-		nr_parts = data->nr_parts;
-	}
-
-#ifdef CONFIG_MTD_OF_PARTS
-	if (nr_parts <= 0 && spi->dev.of_node) {
-		nr_parts = of_mtd_parse_partitions(&spi->dev,
-						   spi->dev.of_node, &parts);
-	}
-#endif
-
-	if (nr_parts > 0) {
-		for (i = 0; i < nr_parts; i++) {
-			DEBUG(MTD_DEBUG_LEVEL2, "partitions[%d] = "
-			      "{.name = %s, .offset = 0x%llx, "
-			      ".size = 0x%llx (%lldKiB) }\n",
-			      i, parts[i].name,
-			      (long long)parts[i].offset,
-			      (long long)parts[i].size,
-			      (long long)(parts[i].size >> 10));
-		}
-		flash->partitioned = 1;
-	}
-
-	return mtd_device_register(&flash->mtd, parts, nr_parts) == 1 ?
-		-ENODEV : 0;
+	return mtd_device_parse_register(&flash->mtd, NULL, &ppdata,
+			data ? data->parts : NULL,
+			data ? data->nr_parts : 0);
 }
 
 
diff --git a/drivers/mtd/devices/mtd_dataflash.c b/drivers/mtd/devices/mtd_dataflash.c
index 13749d4..d75c7af 100644
--- a/drivers/mtd/devices/mtd_dataflash.c
+++ b/drivers/mtd/devices/mtd_dataflash.c
@@ -17,6 +17,8 @@
 #include <linux/mutex.h>
 #include <linux/err.h>
 #include <linux/math64.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
 
 #include <linux/spi/spi.h>
 #include <linux/spi/flash.h>
@@ -24,7 +26,6 @@
 #include <linux/mtd/mtd.h>
 #include <linux/mtd/partitions.h>
 
-
 /*
  * DataFlash is a kind of SPI flash.  Most AT45 chips have two buffers in
  * each chip, which may be used for double buffered I/O; but this driver
@@ -98,6 +99,16 @@
 	struct mtd_info		mtd;
 };
 
+#ifdef CONFIG_OF
+static const struct of_device_id dataflash_dt_ids[] = {
+	{ .compatible = "atmel,at45", },
+	{ .compatible = "atmel,dataflash", },
+	{ /* sentinel */ }
+};
+#else
+#define dataflash_dt_ids NULL
+#endif
+
 /* ......................................................................... */
 
 /*
@@ -122,7 +133,7 @@
 	for (;;) {
 		status = dataflash_status(spi);
 		if (status < 0) {
-			DEBUG(MTD_DEBUG_LEVEL1, "%s: status %d?\n",
+			pr_debug("%s: status %d?\n",
 					dev_name(&spi->dev), status);
 			status = 0;
 		}
@@ -149,7 +160,7 @@
 	uint8_t			*command;
 	uint32_t		rem;
 
-	DEBUG(MTD_DEBUG_LEVEL2, "%s: erase addr=0x%llx len 0x%llx\n",
+	pr_debug("%s: erase addr=0x%llx len 0x%llx\n",
 	      dev_name(&spi->dev), (long long)instr->addr,
 	      (long long)instr->len);
 
@@ -187,7 +198,7 @@
 		command[2] = (uint8_t)(pageaddr >> 8);
 		command[3] = 0;
 
-		DEBUG(MTD_DEBUG_LEVEL3, "ERASE %s: (%x) %x %x %x [%i]\n",
+		pr_debug("ERASE %s: (%x) %x %x %x [%i]\n",
 			do_block ? "block" : "page",
 			command[0], command[1], command[2], command[3],
 			pageaddr);
@@ -238,8 +249,8 @@
 	uint8_t			*command;
 	int			status;
 
-	DEBUG(MTD_DEBUG_LEVEL2, "%s: read 0x%x..0x%x\n",
-		dev_name(&priv->spi->dev), (unsigned)from, (unsigned)(from + len));
+	pr_debug("%s: read 0x%x..0x%x\n", dev_name(&priv->spi->dev),
+			(unsigned)from, (unsigned)(from + len));
 
 	*retlen = 0;
 
@@ -255,7 +266,7 @@
 
 	command = priv->command;
 
-	DEBUG(MTD_DEBUG_LEVEL3, "READ: (%x) %x %x %x\n",
+	pr_debug("READ: (%x) %x %x %x\n",
 		command[0], command[1], command[2], command[3]);
 
 	spi_message_init(&msg);
@@ -287,7 +298,7 @@
 		*retlen = msg.actual_length - 8;
 		status = 0;
 	} else
-		DEBUG(MTD_DEBUG_LEVEL1, "%s: read %x..%x --> %d\n",
+		pr_debug("%s: read %x..%x --> %d\n",
 			dev_name(&priv->spi->dev),
 			(unsigned)from, (unsigned)(from + len),
 			status);
@@ -314,7 +325,7 @@
 	int			status = -EINVAL;
 	uint8_t			*command;
 
-	DEBUG(MTD_DEBUG_LEVEL2, "%s: write 0x%x..0x%x\n",
+	pr_debug("%s: write 0x%x..0x%x\n",
 		dev_name(&spi->dev), (unsigned)to, (unsigned)(to + len));
 
 	*retlen = 0;
@@ -340,7 +351,7 @@
 
 	mutex_lock(&priv->lock);
 	while (remaining > 0) {
-		DEBUG(MTD_DEBUG_LEVEL3, "write @ %i:%i len=%i\n",
+		pr_debug("write @ %i:%i len=%i\n",
 			pageaddr, offset, writelen);
 
 		/* REVISIT:
@@ -368,12 +379,12 @@
 			command[2] = (addr & 0x0000FF00) >> 8;
 			command[3] = 0;
 
-			DEBUG(MTD_DEBUG_LEVEL3, "TRANSFER: (%x) %x %x %x\n",
+			pr_debug("TRANSFER: (%x) %x %x %x\n",
 				command[0], command[1], command[2], command[3]);
 
 			status = spi_sync(spi, &msg);
 			if (status < 0)
-				DEBUG(MTD_DEBUG_LEVEL1, "%s: xfer %u -> %d \n",
+				pr_debug("%s: xfer %u -> %d\n",
 					dev_name(&spi->dev), addr, status);
 
 			(void) dataflash_waitready(priv->spi);
@@ -386,7 +397,7 @@
 		command[2] = (addr & 0x0000FF00) >> 8;
 		command[3] = (addr & 0x000000FF);
 
-		DEBUG(MTD_DEBUG_LEVEL3, "PROGRAM: (%x) %x %x %x\n",
+		pr_debug("PROGRAM: (%x) %x %x %x\n",
 			command[0], command[1], command[2], command[3]);
 
 		x[1].tx_buf = writebuf;
@@ -395,7 +406,7 @@
 		status = spi_sync(spi, &msg);
 		spi_transfer_del(x + 1);
 		if (status < 0)
-			DEBUG(MTD_DEBUG_LEVEL1, "%s: pgm %u/%u -> %d \n",
+			pr_debug("%s: pgm %u/%u -> %d\n",
 				dev_name(&spi->dev), addr, writelen, status);
 
 		(void) dataflash_waitready(priv->spi);
@@ -410,12 +421,12 @@
 		command[2] = (addr & 0x0000FF00) >> 8;
 		command[3] = 0;
 
-		DEBUG(MTD_DEBUG_LEVEL3, "COMPARE: (%x) %x %x %x\n",
+		pr_debug("COMPARE: (%x) %x %x %x\n",
 			command[0], command[1], command[2], command[3]);
 
 		status = spi_sync(spi, &msg);
 		if (status < 0)
-			DEBUG(MTD_DEBUG_LEVEL1, "%s: compare %u -> %d \n",
+			pr_debug("%s: compare %u -> %d\n",
 				dev_name(&spi->dev), addr, status);
 
 		status = dataflash_waitready(priv->spi);
@@ -634,11 +645,10 @@
 {
 	struct dataflash		*priv;
 	struct mtd_info			*device;
+	struct mtd_part_parser_data	ppdata;
 	struct flash_platform_data	*pdata = spi->dev.platform_data;
 	char				*otp_tag = "";
 	int				err = 0;
-	struct mtd_partition		*parts;
-	int				nr_parts = 0;
 
 	priv = kzalloc(sizeof *priv, GFP_KERNEL);
 	if (!priv)
@@ -677,28 +687,11 @@
 			pagesize, otp_tag);
 	dev_set_drvdata(&spi->dev, priv);
 
-	if (mtd_has_cmdlinepart()) {
-		static const char *part_probes[] = { "cmdlinepart", NULL, };
+	ppdata.of_node = spi->dev.of_node;
+	err = mtd_device_parse_register(device, NULL, &ppdata,
+			pdata ? pdata->parts : NULL,
+			pdata ? pdata->nr_parts : 0);
 
-		nr_parts = parse_mtd_partitions(device, part_probes, &parts,
-						0);
-	}
-
-	if (nr_parts <= 0 && pdata && pdata->parts) {
-		parts = pdata->parts;
-		nr_parts = pdata->nr_parts;
-	}
-
-	if (nr_parts > 0) {
-		priv->partitioned = 1;
-		err = mtd_device_register(device, parts, nr_parts);
-		goto out;
-	}
-
-	if (mtd_device_register(device, NULL, 0) == 1)
-		err = -ENODEV;
-
-out:
 	if (!err)
 		return 0;
 
@@ -787,7 +780,7 @@
 	 */
 	tmp = spi_write_then_read(spi, &code, 1, id, 3);
 	if (tmp < 0) {
-		DEBUG(MTD_DEBUG_LEVEL0, "%s: error %d reading JEDEC ID\n",
+		pr_debug("%s: error %d reading JEDEC ID\n",
 			dev_name(&spi->dev), tmp);
 		return ERR_PTR(tmp);
 	}
@@ -804,7 +797,7 @@
 			tmp < ARRAY_SIZE(dataflash_data);
 			tmp++, info++) {
 		if (info->jedec_id == jedec) {
-			DEBUG(MTD_DEBUG_LEVEL1, "%s: OTP, sector protect%s\n",
+			pr_debug("%s: OTP, sector protect%s\n",
 				dev_name(&spi->dev),
 				(info->flags & SUP_POW2PS)
 					? ", binary pagesize" : ""
@@ -812,8 +805,7 @@
 			if (info->flags & SUP_POW2PS) {
 				status = dataflash_status(spi);
 				if (status < 0) {
-					DEBUG(MTD_DEBUG_LEVEL1,
-						"%s: status error %d\n",
+					pr_debug("%s: status error %d\n",
 						dev_name(&spi->dev), status);
 					return ERR_PTR(status);
 				}
@@ -878,7 +870,7 @@
 	 */
 	status = dataflash_status(spi);
 	if (status <= 0 || status == 0xff) {
-		DEBUG(MTD_DEBUG_LEVEL1, "%s: status error %d\n",
+		pr_debug("%s: status error %d\n",
 				dev_name(&spi->dev), status);
 		if (status == 0 || status == 0xff)
 			status = -ENODEV;
@@ -914,14 +906,14 @@
 		break;
 	/* obsolete AT45DB1282 not (yet?) supported */
 	default:
-		DEBUG(MTD_DEBUG_LEVEL1, "%s: unsupported device (%x)\n",
-				dev_name(&spi->dev), status & 0x3c);
+		pr_debug("%s: unsupported device (%x)\n", dev_name(&spi->dev),
+				status & 0x3c);
 		status = -ENODEV;
 	}
 
 	if (status < 0)
-		DEBUG(MTD_DEBUG_LEVEL1, "%s: add_dataflash --> %d\n",
-				dev_name(&spi->dev), status);
+		pr_debug("%s: add_dataflash --> %d\n", dev_name(&spi->dev),
+				status);
 
 	return status;
 }
@@ -931,7 +923,7 @@
 	struct dataflash	*flash = dev_get_drvdata(&spi->dev);
 	int			status;
 
-	DEBUG(MTD_DEBUG_LEVEL1, "%s: remove\n", dev_name(&spi->dev));
+	pr_debug("%s: remove\n", dev_name(&spi->dev));
 
 	status = mtd_device_unregister(&flash->mtd);
 	if (status == 0) {
@@ -946,6 +938,7 @@
 		.name		= "mtd_dataflash",
 		.bus		= &spi_bus_type,
 		.owner		= THIS_MODULE,
+		.of_match_table = dataflash_dt_ids,
 	},
 
 	.probe		= dataflash_probe,
diff --git a/drivers/mtd/devices/sst25l.c b/drivers/mtd/devices/sst25l.c
index 83e80c6..d38ef3b 100644
--- a/drivers/mtd/devices/sst25l.c
+++ b/drivers/mtd/devices/sst25l.c
@@ -52,8 +52,6 @@
 	struct spi_device	*spi;
 	struct mutex		lock;
 	struct mtd_info		mtd;
-
-	int 			partitioned;
 };
 
 struct flash_info {
@@ -381,8 +379,6 @@
 	struct sst25l_flash *flash;
 	struct flash_platform_data *data;
 	int ret, i;
-	struct mtd_partition *parts = NULL;
-	int nr_parts = 0;
 
 	flash_info = sst25l_match_device(spi);
 	if (!flash_info)
@@ -414,8 +410,7 @@
 	dev_info(&spi->dev, "%s (%lld KiB)\n", flash_info->name,
 		 (long long)flash->mtd.size >> 10);
 
-	DEBUG(MTD_DEBUG_LEVEL2,
-	      "mtd .name = %s, .size = 0x%llx (%lldMiB) "
+	pr_debug("mtd .name = %s, .size = 0x%llx (%lldMiB) "
 	      ".erasesize = 0x%.8x (%uKiB) .numeraseregions = %d\n",
 	      flash->mtd.name,
 	      (long long)flash->mtd.size, (long long)(flash->mtd.size >> 20),
@@ -423,37 +418,10 @@
 	      flash->mtd.numeraseregions);
 
 
-	if (mtd_has_cmdlinepart()) {
-		static const char *part_probes[] = {"cmdlinepart", NULL};
-
-		nr_parts = parse_mtd_partitions(&flash->mtd,
-						part_probes,
-						&parts, 0);
-	}
-
-	if (nr_parts <= 0 && data && data->parts) {
-		parts = data->parts;
-		nr_parts = data->nr_parts;
-	}
-
-	if (nr_parts > 0) {
-		for (i = 0; i < nr_parts; i++) {
-			DEBUG(MTD_DEBUG_LEVEL2, "partitions[%d] = "
-			      "{.name = %s, .offset = 0x%llx, "
-			      ".size = 0x%llx (%lldKiB) }\n",
-			      i, parts[i].name,
-			      (long long)parts[i].offset,
-			      (long long)parts[i].size,
-			      (long long)(parts[i].size >> 10));
-		}
-
-		flash->partitioned = 1;
-		return mtd_device_register(&flash->mtd, parts,
-					   nr_parts);
-	}
-
-	ret = mtd_device_register(&flash->mtd, NULL, 0);
-	if (ret == 1) {
+	ret = mtd_device_parse_register(&flash->mtd, NULL, 0,
+			data ? data->parts : NULL,
+			data ? data->nr_parts : 0);
+	if (ret) {
 		kfree(flash);
 		dev_set_drvdata(&spi->dev, NULL);
 		return -ENODEV;
diff --git a/drivers/mtd/ftl.c b/drivers/mtd/ftl.c
index 037b399..c7382bb 100644
--- a/drivers/mtd/ftl.c
+++ b/drivers/mtd/ftl.c
@@ -339,7 +339,7 @@
     struct erase_info *erase;
 
     xfer = &part->XferInfo[xfernum];
-    DEBUG(1, "ftl_cs: erasing xfer unit at 0x%x\n", xfer->Offset);
+    pr_debug("ftl_cs: erasing xfer unit at 0x%x\n", xfer->Offset);
     xfer->state = XFER_ERASING;
 
     /* Is there a free erase slot? Always in MTD. */
@@ -415,7 +415,7 @@
     xfer = &part->XferInfo[i];
     xfer->state = XFER_FAILED;
 
-    DEBUG(1, "ftl_cs: preparing xfer unit at 0x%x\n", xfer->Offset);
+    pr_debug("ftl_cs: preparing xfer unit at 0x%x\n", xfer->Offset);
 
     /* Write the transfer unit header */
     header = part->header;
@@ -476,7 +476,7 @@
 
     eun = &part->EUNInfo[srcunit];
     xfer = &part->XferInfo[xferunit];
-    DEBUG(2, "ftl_cs: copying block 0x%x to 0x%x\n",
+    pr_debug("ftl_cs: copying block 0x%x to 0x%x\n",
 	  eun->Offset, xfer->Offset);
 
 
@@ -598,7 +598,7 @@
     unit with the fewest erases, and usually pick the data unit with
     the most deleted blocks.  But with a small probability, pick the
     oldest data unit instead.  This means that we generally postpone
-    the next reclaimation as long as possible, but shuffle static
+    the next reclamation as long as possible, but shuffle static
     stuff around a bit for wear leveling.
 
 ======================================================================*/
@@ -609,8 +609,8 @@
     uint32_t best;
     int queued, ret;
 
-    DEBUG(0, "ftl_cs: reclaiming space...\n");
-    DEBUG(3, "NumTransferUnits == %x\n", part->header.NumTransferUnits);
+    pr_debug("ftl_cs: reclaiming space...\n");
+    pr_debug("NumTransferUnits == %x\n", part->header.NumTransferUnits);
     /* Pick the least erased transfer unit */
     best = 0xffffffff; xfer = 0xffff;
     do {
@@ -618,22 +618,22 @@
 	for (i = 0; i < part->header.NumTransferUnits; i++) {
 	    int n=0;
 	    if (part->XferInfo[i].state == XFER_UNKNOWN) {
-		DEBUG(3,"XferInfo[%d].state == XFER_UNKNOWN\n",i);
+		pr_debug("XferInfo[%d].state == XFER_UNKNOWN\n",i);
 		n=1;
 		erase_xfer(part, i);
 	    }
 	    if (part->XferInfo[i].state == XFER_ERASING) {
-		DEBUG(3,"XferInfo[%d].state == XFER_ERASING\n",i);
+		pr_debug("XferInfo[%d].state == XFER_ERASING\n",i);
 		n=1;
 		queued = 1;
 	    }
 	    else if (part->XferInfo[i].state == XFER_ERASED) {
-		DEBUG(3,"XferInfo[%d].state == XFER_ERASED\n",i);
+		pr_debug("XferInfo[%d].state == XFER_ERASED\n",i);
 		n=1;
 		prepare_xfer(part, i);
 	    }
 	    if (part->XferInfo[i].state == XFER_PREPARED) {
-		DEBUG(3,"XferInfo[%d].state == XFER_PREPARED\n",i);
+		pr_debug("XferInfo[%d].state == XFER_PREPARED\n",i);
 		n=1;
 		if (part->XferInfo[i].EraseCount <= best) {
 		    best = part->XferInfo[i].EraseCount;
@@ -641,12 +641,12 @@
 		}
 	    }
 		if (!n)
-		    DEBUG(3,"XferInfo[%d].state == %x\n",i, part->XferInfo[i].state);
+		    pr_debug("XferInfo[%d].state == %x\n",i, part->XferInfo[i].state);
 
 	}
 	if (xfer == 0xffff) {
 	    if (queued) {
-		DEBUG(1, "ftl_cs: waiting for transfer "
+		pr_debug("ftl_cs: waiting for transfer "
 		      "unit to be prepared...\n");
 		if (part->mbd.mtd->sync)
 			part->mbd.mtd->sync(part->mbd.mtd);
@@ -656,7 +656,7 @@
 		    printk(KERN_NOTICE "ftl_cs: reclaim failed: no "
 			   "suitable transfer units!\n");
 		else
-		    DEBUG(1, "ftl_cs: reclaim failed: no "
+		    pr_debug("ftl_cs: reclaim failed: no "
 			  "suitable transfer units!\n");
 
 		return -EIO;
@@ -666,7 +666,7 @@
 
     eun = 0;
     if ((jiffies % shuffle_freq) == 0) {
-	DEBUG(1, "ftl_cs: recycling freshest block...\n");
+	pr_debug("ftl_cs: recycling freshest block...\n");
 	best = 0xffffffff;
 	for (i = 0; i < part->DataUnits; i++)
 	    if (part->EUNInfo[i].EraseCount <= best) {
@@ -686,7 +686,7 @@
 		printk(KERN_NOTICE "ftl_cs: reclaim failed: "
 		       "no free blocks!\n");
 	    else
-		DEBUG(1,"ftl_cs: reclaim failed: "
+		pr_debug("ftl_cs: reclaim failed: "
 		       "no free blocks!\n");
 
 	    return -EIO;
@@ -771,7 +771,7 @@
 	printk(KERN_NOTICE "ftl_cs: bad free list!\n");
 	return 0;
     }
-    DEBUG(2, "ftl_cs: found free block at %d in %d\n", blk, eun);
+    pr_debug("ftl_cs: found free block at %d in %d\n", blk, eun);
     return blk;
 
 } /* find_free */
@@ -791,7 +791,7 @@
     int ret;
     size_t offset, retlen;
 
-    DEBUG(2, "ftl_cs: ftl_read(0x%p, 0x%lx, %ld)\n",
+    pr_debug("ftl_cs: ftl_read(0x%p, 0x%lx, %ld)\n",
 	  part, sector, nblocks);
     if (!(part->state & FTL_FORMATTED)) {
 	printk(KERN_NOTICE "ftl_cs: bad partition\n");
@@ -840,7 +840,7 @@
     int ret;
     size_t retlen, offset;
 
-    DEBUG(2, "ftl_cs: set_bam_entry(0x%p, 0x%x, 0x%x)\n",
+    pr_debug("ftl_cs: set_bam_entry(0x%p, 0x%x, 0x%x)\n",
 	  part, log_addr, virt_addr);
     bsize = 1 << part->header.EraseUnitSize;
     eun = log_addr / bsize;
@@ -905,7 +905,7 @@
     int ret;
     size_t retlen, offset;
 
-    DEBUG(2, "ftl_cs: ftl_write(0x%p, %ld, %ld)\n",
+    pr_debug("ftl_cs: ftl_write(0x%p, %ld, %ld)\n",
 	  part, sector, nblocks);
     if (!(part->state & FTL_FORMATTED)) {
 	printk(KERN_NOTICE "ftl_cs: bad partition\n");
@@ -1011,7 +1011,7 @@
 	partition_t *part = (void *)dev;
 	uint32_t bsize = 1 << part->header.EraseUnitSize;
 
-	DEBUG(1, "FTL erase sector %ld for %d sectors\n",
+	pr_debug("FTL erase sector %ld for %d sectors\n",
 	      sector, nr_sects);
 
 	while (nr_sects) {
diff --git a/drivers/mtd/inftlcore.c b/drivers/mtd/inftlcore.c
index d7592e6..dd034ef 100644
--- a/drivers/mtd/inftlcore.c
+++ b/drivers/mtd/inftlcore.c
@@ -63,14 +63,12 @@
 		return;
 	}
 
-	DEBUG(MTD_DEBUG_LEVEL3, "INFTL: add_mtd for %s\n", mtd->name);
+	pr_debug("INFTL: add_mtd for %s\n", mtd->name);
 
 	inftl = kzalloc(sizeof(*inftl), GFP_KERNEL);
 
-	if (!inftl) {
-		printk(KERN_WARNING "INFTL: Out of memory for data structures\n");
+	if (!inftl)
 		return;
-	}
 
 	inftl->mbd.mtd = mtd;
 	inftl->mbd.devnum = -1;
@@ -133,7 +131,7 @@
 {
 	struct INFTLrecord *inftl = (void *)dev;
 
-	DEBUG(MTD_DEBUG_LEVEL3, "INFTL: remove_dev (i=%d)\n", dev->devnum);
+	pr_debug("INFTL: remove_dev (i=%d)\n", dev->devnum);
 
 	del_mtd_blktrans_dev(dev);
 
@@ -154,7 +152,7 @@
 	struct mtd_oob_ops ops;
 	int res;
 
-	ops.mode = MTD_OOB_PLACE;
+	ops.mode = MTD_OPS_PLACE_OOB;
 	ops.ooboffs = offs & (mtd->writesize - 1);
 	ops.ooblen = len;
 	ops.oobbuf = buf;
@@ -174,7 +172,7 @@
 	struct mtd_oob_ops ops;
 	int res;
 
-	ops.mode = MTD_OOB_PLACE;
+	ops.mode = MTD_OPS_PLACE_OOB;
 	ops.ooboffs = offs & (mtd->writesize - 1);
 	ops.ooblen = len;
 	ops.oobbuf = buf;
@@ -194,7 +192,7 @@
 	struct mtd_oob_ops ops;
 	int res;
 
-	ops.mode = MTD_OOB_PLACE;
+	ops.mode = MTD_OPS_PLACE_OOB;
 	ops.ooboffs = offs;
 	ops.ooblen = mtd->oobsize;
 	ops.oobbuf = oob;
@@ -215,16 +213,16 @@
 	u16 pot = inftl->LastFreeEUN;
 	int silly = inftl->nb_blocks;
 
-	DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_findfreeblock(inftl=%p,"
-		"desperate=%d)\n", inftl, desperate);
+	pr_debug("INFTL: INFTL_findfreeblock(inftl=%p,desperate=%d)\n",
+			inftl, desperate);
 
 	/*
 	 * Normally, we force a fold to happen before we run out of free
 	 * blocks completely.
 	 */
 	if (!desperate && inftl->numfreeEUNs < 2) {
-		DEBUG(MTD_DEBUG_LEVEL1, "INFTL: there are too few free "
-			"EUNs (%d)\n", inftl->numfreeEUNs);
+		pr_debug("INFTL: there are too few free EUNs (%d)\n",
+				inftl->numfreeEUNs);
 		return BLOCK_NIL;
 	}
 
@@ -259,8 +257,8 @@
 	struct inftl_oob oob;
 	size_t retlen;
 
-	DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_foldchain(inftl=%p,thisVUC=%d,"
-		"pending=%d)\n", inftl, thisVUC, pendingblock);
+	pr_debug("INFTL: INFTL_foldchain(inftl=%p,thisVUC=%d,pending=%d)\n",
+			inftl, thisVUC, pendingblock);
 
 	memset(BlockMap, 0xff, sizeof(BlockMap));
 	memset(BlockDeleted, 0, sizeof(BlockDeleted));
@@ -323,8 +321,7 @@
 	 * Chain, and the Erase Unit into which we are supposed to be copying.
 	 * Go for it.
 	 */
-	DEBUG(MTD_DEBUG_LEVEL1, "INFTL: folding chain %d into unit %d\n",
-		thisVUC, targetEUN);
+	pr_debug("INFTL: folding chain %d into unit %d\n", thisVUC, targetEUN);
 
 	for (block = 0; block < inftl->EraseSize/SECTORSIZE ; block++) {
 		unsigned char movebuf[SECTORSIZE];
@@ -349,14 +346,13 @@
 		ret = mtd->read(mtd, (inftl->EraseSize * BlockMap[block]) +
 				(block * SECTORSIZE), SECTORSIZE, &retlen,
 				movebuf);
-		if (ret < 0 && ret != -EUCLEAN) {
+		if (ret < 0 && !mtd_is_bitflip(ret)) {
 			ret = mtd->read(mtd,
 					(inftl->EraseSize * BlockMap[block]) +
 					(block * SECTORSIZE), SECTORSIZE,
 					&retlen, movebuf);
 			if (ret != -EIO)
-				DEBUG(MTD_DEBUG_LEVEL1, "INFTL: error went "
-				      "away on retry?\n");
+				pr_debug("INFTL: error went away on retry?\n");
 		}
 		memset(&oob, 0xff, sizeof(struct inftl_oob));
 		oob.b.Status = oob.b.Status1 = SECTOR_USED;
@@ -372,8 +368,7 @@
 	 * is important, by doing oldest first if we crash/reboot then it
 	 * it is relatively simple to clean up the mess).
 	 */
-	DEBUG(MTD_DEBUG_LEVEL1, "INFTL: want to erase virtual chain %d\n",
-		thisVUC);
+	pr_debug("INFTL: want to erase virtual chain %d\n", thisVUC);
 
 	for (;;) {
 		/* Find oldest unit in chain. */
@@ -421,7 +416,7 @@
 	u16 ChainLength = 0, thislen;
 	u16 chain, EUN;
 
-	DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_makefreeblock(inftl=%p,"
+	pr_debug("INFTL: INFTL_makefreeblock(inftl=%p,"
 		"pending=%d)\n", inftl, pendingblock);
 
 	for (chain = 0; chain < inftl->nb_blocks; chain++) {
@@ -484,8 +479,8 @@
 	size_t retlen;
 	int silly, silly2 = 3;
 
-	DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_findwriteunit(inftl=%p,"
-		"block=%d)\n", inftl, block);
+	pr_debug("INFTL: INFTL_findwriteunit(inftl=%p,block=%d)\n",
+			inftl, block);
 
 	do {
 		/*
@@ -501,8 +496,8 @@
 				       blockofs, 8, &retlen, (char *)&bci);
 
 			status = bci.Status | bci.Status1;
-			DEBUG(MTD_DEBUG_LEVEL3, "INFTL: status of block %d in "
-				"EUN %d is %x\n", block , writeEUN, status);
+			pr_debug("INFTL: status of block %d in EUN %d is %x\n",
+					block , writeEUN, status);
 
 			switch(status) {
 			case SECTOR_FREE:
@@ -555,9 +550,9 @@
 			 * Hopefully we free something, lets try again.
 			 * This time we are desperate...
 			 */
-			DEBUG(MTD_DEBUG_LEVEL1, "INFTL: using desperate==1 "
-				"to find free EUN to accommodate write to "
-				"VUC %d\n", thisVUC);
+			pr_debug("INFTL: using desperate==1 to find free EUN "
+					"to accommodate write to VUC %d\n",
+					thisVUC);
 			writeEUN = INFTL_findfreeblock(inftl, 1);
 			if (writeEUN == BLOCK_NIL) {
 				/*
@@ -647,7 +642,7 @@
 	struct inftl_bci bci;
 	size_t retlen;
 
-	DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_trydeletechain(inftl=%p,"
+	pr_debug("INFTL: INFTL_trydeletechain(inftl=%p,"
 		"thisVUC=%d)\n", inftl, thisVUC);
 
 	memset(BlockUsed, 0, sizeof(BlockUsed));
@@ -711,7 +706,7 @@
 	 * For each block in the chain free it and make it available
 	 * for future use. Erase from the oldest unit first.
 	 */
-	DEBUG(MTD_DEBUG_LEVEL1, "INFTL: deleting empty VUC %d\n", thisVUC);
+	pr_debug("INFTL: deleting empty VUC %d\n", thisVUC);
 
 	for (;;) {
 		u16 *prevEUN = &inftl->VUtable[thisVUC];
@@ -719,7 +714,7 @@
 
 		/* If the chain is all gone already, we're done */
 		if (thisEUN == BLOCK_NIL) {
-			DEBUG(MTD_DEBUG_LEVEL2, "INFTL: Empty VUC %d for deletion was already absent\n", thisEUN);
+			pr_debug("INFTL: Empty VUC %d for deletion was already absent\n", thisEUN);
 			return;
 		}
 
@@ -731,7 +726,7 @@
 			thisEUN = *prevEUN;
 		}
 
-		DEBUG(MTD_DEBUG_LEVEL3, "Deleting EUN %d from VUC %d\n",
+		pr_debug("Deleting EUN %d from VUC %d\n",
 		      thisEUN, thisVUC);
 
 		if (INFTL_formatblock(inftl, thisEUN) < 0) {
@@ -767,7 +762,7 @@
 	size_t retlen;
 	struct inftl_bci bci;
 
-	DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_deleteblock(inftl=%p,"
+	pr_debug("INFTL: INFTL_deleteblock(inftl=%p,"
 		"block=%d)\n", inftl, block);
 
 	while (thisEUN < inftl->nb_blocks) {
@@ -826,7 +821,7 @@
 	struct inftl_oob oob;
 	char *p, *pend;
 
-	DEBUG(MTD_DEBUG_LEVEL3, "INFTL: inftl_writeblock(inftl=%p,block=%ld,"
+	pr_debug("INFTL: inftl_writeblock(inftl=%p,block=%ld,"
 		"buffer=%p)\n", inftl, block, buffer);
 
 	/* Is block all zero? */
@@ -876,7 +871,7 @@
 	struct inftl_bci bci;
 	size_t retlen;
 
-	DEBUG(MTD_DEBUG_LEVEL3, "INFTL: inftl_readblock(inftl=%p,block=%ld,"
+	pr_debug("INFTL: inftl_readblock(inftl=%p,block=%ld,"
 		"buffer=%p)\n", inftl, block, buffer);
 
 	while (thisEUN < inftl->nb_blocks) {
@@ -922,7 +917,7 @@
 		int ret = mtd->read(mtd, ptr, SECTORSIZE, &retlen, buffer);
 
 		/* Handle corrected bit flips gracefully */
-		if (ret < 0 && ret != -EUCLEAN)
+		if (ret < 0 && !mtd_is_bitflip(ret))
 			return -EIO;
 	}
 	return 0;
diff --git a/drivers/mtd/inftlmount.c b/drivers/mtd/inftlmount.c
index 104052e..2ff601f 100644
--- a/drivers/mtd/inftlmount.c
+++ b/drivers/mtd/inftlmount.c
@@ -53,7 +53,7 @@
 	struct INFTLPartition *ip;
 	size_t retlen;
 
-	DEBUG(MTD_DEBUG_LEVEL3, "INFTL: find_boot_record(inftl=%p)\n", inftl);
+	pr_debug("INFTL: find_boot_record(inftl=%p)\n", inftl);
 
         /*
 	 * Assume logical EraseSize == physical erasesize for starting the
@@ -139,24 +139,20 @@
 		mh->FormatFlags = le32_to_cpu(mh->FormatFlags);
 		mh->PercentUsed = le32_to_cpu(mh->PercentUsed);
 
-#ifdef CONFIG_MTD_DEBUG_VERBOSE
-		if (CONFIG_MTD_DEBUG_VERBOSE >= 2) {
-			printk("INFTL: Media Header ->\n"
-				"    bootRecordID          = %s\n"
-				"    NoOfBootImageBlocks   = %d\n"
-				"    NoOfBinaryPartitions  = %d\n"
-				"    NoOfBDTLPartitions    = %d\n"
-				"    BlockMultiplerBits    = %d\n"
-				"    FormatFlgs            = %d\n"
-				"    OsakVersion           = 0x%x\n"
-				"    PercentUsed           = %d\n",
-				mh->bootRecordID, mh->NoOfBootImageBlocks,
-				mh->NoOfBinaryPartitions,
-				mh->NoOfBDTLPartitions,
-				mh->BlockMultiplierBits, mh->FormatFlags,
-				mh->OsakVersion, mh->PercentUsed);
-		}
-#endif
+		pr_debug("INFTL: Media Header ->\n"
+			 "    bootRecordID          = %s\n"
+			 "    NoOfBootImageBlocks   = %d\n"
+			 "    NoOfBinaryPartitions  = %d\n"
+			 "    NoOfBDTLPartitions    = %d\n"
+			 "    BlockMultiplerBits    = %d\n"
+			 "    FormatFlgs            = %d\n"
+			 "    OsakVersion           = 0x%x\n"
+			 "    PercentUsed           = %d\n",
+			 mh->bootRecordID, mh->NoOfBootImageBlocks,
+			 mh->NoOfBinaryPartitions,
+			 mh->NoOfBDTLPartitions,
+			 mh->BlockMultiplierBits, mh->FormatFlags,
+			 mh->OsakVersion, mh->PercentUsed);
 
 		if (mh->NoOfBDTLPartitions == 0) {
 			printk(KERN_WARNING "INFTL: Media Header sanity check "
@@ -200,19 +196,15 @@
 			ip->spareUnits = le32_to_cpu(ip->spareUnits);
 			ip->Reserved0 = le32_to_cpu(ip->Reserved0);
 
-#ifdef CONFIG_MTD_DEBUG_VERBOSE
-			if (CONFIG_MTD_DEBUG_VERBOSE >= 2) {
-				printk("    PARTITION[%d] ->\n"
-					"        virtualUnits    = %d\n"
-					"        firstUnit       = %d\n"
-					"        lastUnit        = %d\n"
-					"        flags           = 0x%x\n"
-					"        spareUnits      = %d\n",
-					i, ip->virtualUnits, ip->firstUnit,
-					ip->lastUnit, ip->flags,
-					ip->spareUnits);
-			}
-#endif
+			pr_debug("    PARTITION[%d] ->\n"
+				 "        virtualUnits    = %d\n"
+				 "        firstUnit       = %d\n"
+				 "        lastUnit        = %d\n"
+				 "        flags           = 0x%x\n"
+				 "        spareUnits      = %d\n",
+				 i, ip->virtualUnits, ip->firstUnit,
+				 ip->lastUnit, ip->flags,
+				 ip->spareUnits);
 
 			if (ip->Reserved0 != ip->firstUnit) {
 				struct erase_info *instr = &inftl->instr;
@@ -375,7 +367,7 @@
  *
  * Return: 0 when succeed, -1 on error.
  *
- * ToDo: 1. Is it neceressary to check_free_sector after erasing ??
+ * ToDo: 1. Is it necessary to check_free_sector after erasing ??
  */
 int INFTL_formatblock(struct INFTLrecord *inftl, int block)
 {
@@ -385,8 +377,7 @@
 	struct mtd_info *mtd = inftl->mbd.mtd;
 	int physblock;
 
-	DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_formatblock(inftl=%p,"
-		"block=%d)\n", inftl, block);
+	pr_debug("INFTL: INFTL_formatblock(inftl=%p,block=%d)\n", inftl, block);
 
 	memset(instr, 0, sizeof(struct erase_info));
 
@@ -476,30 +467,30 @@
 {
 	int i;
 
-	printk("-------------------------------------------"
+	pr_debug("-------------------------------------------"
 		"----------------------------------\n");
 
-	printk("VUtable[%d] ->", s->nb_blocks);
+	pr_debug("VUtable[%d] ->", s->nb_blocks);
 	for (i = 0; i < s->nb_blocks; i++) {
 		if ((i % 8) == 0)
-			printk("\n%04x: ", i);
-		printk("%04x ", s->VUtable[i]);
+			pr_debug("\n%04x: ", i);
+		pr_debug("%04x ", s->VUtable[i]);
 	}
 
-	printk("\n-------------------------------------------"
+	pr_debug("\n-------------------------------------------"
 		"----------------------------------\n");
 
-	printk("PUtable[%d-%d=%d] ->", s->firstEUN, s->lastEUN, s->nb_blocks);
+	pr_debug("PUtable[%d-%d=%d] ->", s->firstEUN, s->lastEUN, s->nb_blocks);
 	for (i = 0; i <= s->lastEUN; i++) {
 		if ((i % 8) == 0)
-			printk("\n%04x: ", i);
-		printk("%04x ", s->PUtable[i]);
+			pr_debug("\n%04x: ", i);
+		pr_debug("%04x ", s->PUtable[i]);
 	}
 
-	printk("\n-------------------------------------------"
+	pr_debug("\n-------------------------------------------"
 		"----------------------------------\n");
 
-	printk("INFTL ->\n"
+	pr_debug("INFTL ->\n"
 		"  EraseSize       = %d\n"
 		"  h/s/c           = %d/%d/%d\n"
 		"  numvunits       = %d\n"
@@ -513,7 +504,7 @@
 		s->numvunits, s->firstEUN, s->lastEUN, s->numfreeEUNs,
 		s->LastFreeEUN, s->nb_blocks, s->nb_boot_blocks);
 
-	printk("\n-------------------------------------------"
+	pr_debug("\n-------------------------------------------"
 		"----------------------------------\n");
 }
 
@@ -521,25 +512,25 @@
 {
 	int logical, block, i;
 
-	printk("-------------------------------------------"
+	pr_debug("-------------------------------------------"
 		"----------------------------------\n");
 
-	printk("INFTL Virtual Unit Chains:\n");
+	pr_debug("INFTL Virtual Unit Chains:\n");
 	for (logical = 0; logical < s->nb_blocks; logical++) {
 		block = s->VUtable[logical];
 		if (block > s->nb_blocks)
 			continue;
-		printk("  LOGICAL %d --> %d ", logical, block);
+		pr_debug("  LOGICAL %d --> %d ", logical, block);
 		for (i = 0; i < s->nb_blocks; i++) {
 			if (s->PUtable[block] == BLOCK_NIL)
 				break;
 			block = s->PUtable[block];
-			printk("%d ", block);
+			pr_debug("%d ", block);
 		}
-		printk("\n");
+		pr_debug("\n");
 	}
 
-	printk("-------------------------------------------"
+	pr_debug("-------------------------------------------"
 		"----------------------------------\n");
 }
 
@@ -555,7 +546,7 @@
 	int i;
 	u8 *ANACtable, ANAC;
 
-	DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_mount(inftl=%p)\n", s);
+	pr_debug("INFTL: INFTL_mount(inftl=%p)\n", s);
 
 	/* Search for INFTL MediaHeader and Spare INFTL Media Header */
 	if (find_boot_record(s) < 0) {
@@ -585,7 +576,7 @@
 	 * NOTEXPLORED state. Then at the end we will try to format it and
 	 * mark it as free.
 	 */
-	DEBUG(MTD_DEBUG_LEVEL3, "INFTL: pass 1, explore each unit\n");
+	pr_debug("INFTL: pass 1, explore each unit\n");
 	for (first_block = s->firstEUN; first_block <= s->lastEUN; first_block++) {
 		if (s->PUtable[first_block] != BLOCK_NOTEXPLORED)
 			continue;
@@ -717,17 +708,14 @@
 		logical_block = BLOCK_NIL;
 	}
 
-#ifdef CONFIG_MTD_DEBUG_VERBOSE
-	if (CONFIG_MTD_DEBUG_VERBOSE >= 2)
-		INFTL_dumptables(s);
-#endif
+	INFTL_dumptables(s);
 
 	/*
 	 * Second pass, check for infinite loops in chains. These are
 	 * possible because we don't update the previous pointers when
 	 * we fold chains. No big deal, just fix them up in PUtable.
 	 */
-	DEBUG(MTD_DEBUG_LEVEL3, "INFTL: pass 2, validate virtual chains\n");
+	pr_debug("INFTL: pass 2, validate virtual chains\n");
 	for (logical_block = 0; logical_block < s->numvunits; logical_block++) {
 		block = s->VUtable[logical_block];
 		last_block = BLOCK_NIL;
@@ -772,12 +760,8 @@
 		}
 	}
 
-#ifdef CONFIG_MTD_DEBUG_VERBOSE
-	if (CONFIG_MTD_DEBUG_VERBOSE >= 2)
-		INFTL_dumptables(s);
-	if (CONFIG_MTD_DEBUG_VERBOSE >= 2)
-		INFTL_dumpVUchains(s);
-#endif
+	INFTL_dumptables(s);
+	INFTL_dumpVUchains(s);
 
 	/*
 	 * Third pass, format unreferenced blocks and init free block count.
@@ -785,7 +769,7 @@
 	s->numfreeEUNs = 0;
 	s->LastFreeEUN = BLOCK_NIL;
 
-	DEBUG(MTD_DEBUG_LEVEL3, "INFTL: pass 3, format unused blocks\n");
+	pr_debug("INFTL: pass 3, format unused blocks\n");
 	for (block = s->firstEUN; block <= s->lastEUN; block++) {
 		if (s->PUtable[block] == BLOCK_NOTEXPLORED) {
 			printk("INFTL: unreferenced block %d, formatting it\n",
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
index c0c328c..8e0c4bf9f 100644
--- a/drivers/mtd/maps/Kconfig
+++ b/drivers/mtd/maps/Kconfig
@@ -41,8 +41,6 @@
 	  are mapped on your particular target board. Refer to the
 	  memory map which should hopefully be in the documentation for
 	  your board.
-	  Ignore this option if you use run-time physmap configuration
-	  (i.e., run-time calling physmap_configure()).
 
 config MTD_PHYSMAP_LEN
 	hex "Physical length of flash mapping"
@@ -55,8 +53,6 @@
 	  than the total amount of flash present. Refer to the memory
 	  map which should hopefully be in the documentation for your
 	  board.
-	  Ignore this option if you use run-time physmap configuration
-	  (i.e., run-time calling physmap_configure()).
 
 config MTD_PHYSMAP_BANKWIDTH
 	int "Bank width in octets"
@@ -67,8 +63,6 @@
 	  in octets. For example, if you have a data bus width of 32
 	  bits, you would set the bus width octet value to 4. This is
 	  used internally by the CFI drivers.
-	  Ignore this option if you use run-time physmap configuration
-	  (i.e., run-time calling physmap_configure()).
 
 config MTD_PHYSMAP_OF
 	tristate "Flash device in physical memory map based on OF description"
@@ -260,7 +254,6 @@
 config MTD_LANTIQ
 	tristate "Lantiq SoC NOR support"
 	depends on LANTIQ
-	select MTD_PARTITIONS
 	help
 	  Support for NOR flash attached to the Lantiq SoC's External Bus Unit.
 
@@ -339,10 +332,6 @@
 	  This enables access to the flash chips on the Hitachi SolutionEngine and
 	  similar boards. Say 'Y' if you are building a kernel for such a board.
 
-config MTD_ARM_INTEGRATOR
-	tristate "CFI Flash device mapped on ARM Integrator/P720T"
-	depends on ARM && MTD_CFI
-
 config MTD_CDB89712
 	tristate "Cirrus CDB89712 evaluation board mappings"
 	depends on MTD_CFI && ARCH_CDB89712
@@ -398,13 +387,6 @@
 	  This enables access to the NV-RAM on autronix autcpu12 board.
 	  If you have such a board, say 'Y'.
 
-config MTD_EDB7312
-	tristate "CFI Flash device mapped on EDB7312"
-	depends on ARCH_EDB7312 && MTD_CFI
-	help
-	  This enables access to the CFI Flash on the Cogent EDB7312 board.
-	  If you have such a board, say 'Y' here.
-
 config MTD_IMPA7
 	tristate "JEDEC Flash device mapped on impA7"
 	depends on ARM && MTD_JEDECPROBE
@@ -412,14 +394,6 @@
 	  This enables access to the NOR Flash on the impA7 board of
 	  implementa GmbH. If you have such a board, say 'Y' here.
 
-config MTD_CEIVA
-	tristate "JEDEC Flash device mapped on Ceiva/Polaroid PhotoMax Digital Picture Frame"
-	depends on MTD_JEDECPROBE && ARCH_CEIVA
-	help
-	  This enables access to the flash chips on the Ceiva/Polaroid
-	  PhotoMax Digital Picture Frame.
-	  If you have such a device, say 'Y'.
-
 config MTD_H720X
 	tristate "Hynix evaluation board mappings"
 	depends on MTD_CFI && ( ARCH_H7201 || ARCH_H7202 )
diff --git a/drivers/mtd/maps/Makefile b/drivers/mtd/maps/Makefile
index cb48b11..45dcb8b 100644
--- a/drivers/mtd/maps/Makefile
+++ b/drivers/mtd/maps/Makefile
@@ -19,7 +19,6 @@
 obj-$(CONFIG_MTD_TSUNAMI)	+= tsunami_flash.o
 obj-$(CONFIG_MTD_PXA2XX)	+= pxa2xx-flash.o
 obj-$(CONFIG_MTD_MBX860)	+= mbx860.o
-obj-$(CONFIG_MTD_CEIVA)		+= ceiva.o
 obj-$(CONFIG_MTD_OCTAGON)	+= octagon-5066.o
 obj-$(CONFIG_MTD_PHYSMAP)	+= physmap.o
 obj-$(CONFIG_MTD_PHYSMAP_OF)	+= physmap_of.o
@@ -40,7 +39,6 @@
 obj-$(CONFIG_MTD_SOLUTIONENGINE)+= solutionengine.o
 obj-$(CONFIG_MTD_PCI)		+= pci.o
 obj-$(CONFIG_MTD_AUTCPU12)	+= autcpu12-nvram.o
-obj-$(CONFIG_MTD_EDB7312)	+= edb7312.o
 obj-$(CONFIG_MTD_IMPA7)		+= impa7.o
 obj-$(CONFIG_MTD_FORTUNET)	+= fortunet.o
 obj-$(CONFIG_MTD_UCLINUX)	+= uclinux.o
diff --git a/drivers/mtd/maps/bfin-async-flash.c b/drivers/mtd/maps/bfin-async-flash.c
index 67815ee..6d6b2b5 100644
--- a/drivers/mtd/maps/bfin-async-flash.c
+++ b/drivers/mtd/maps/bfin-async-flash.c
@@ -41,7 +41,6 @@
 	uint32_t flash_ambctl0, flash_ambctl1;
 	uint32_t save_ambctl0, save_ambctl1;
 	unsigned long irq_flags;
-	struct mtd_partition *parts;
 };
 
 static void switch_to_flash(struct async_state *state)
@@ -165,18 +164,8 @@
 		return -ENXIO;
 	}
 
-	ret = parse_mtd_partitions(state->mtd, part_probe_types, &pdata->parts, 0);
-	if (ret > 0) {
-		pr_devinit(KERN_NOTICE DRIVER_NAME ": Using commandline partition definition\n");
-		mtd_device_register(state->mtd, pdata->parts, ret);
-		state->parts = pdata->parts;
-	} else if (pdata->nr_parts) {
-		pr_devinit(KERN_NOTICE DRIVER_NAME ": Using board partition definition\n");
-		mtd_device_register(state->mtd, pdata->parts, pdata->nr_parts);
-	} else {
-		pr_devinit(KERN_NOTICE DRIVER_NAME ": no partition info available, registering whole flash at once\n");
-		mtd_device_register(state->mtd, NULL, 0);
-	}
+	mtd_device_parse_register(state->mtd, part_probe_types, 0,
+			pdata->parts, pdata->nr_parts);
 
 	platform_set_drvdata(pdev, state);
 
@@ -188,7 +177,6 @@
 	struct async_state *state = platform_get_drvdata(pdev);
 	gpio_free(state->enet_flash_pin);
 	mtd_device_unregister(state->mtd);
-	kfree(state->parts);
 	map_destroy(state->mtd);
 	kfree(state);
 	return 0;
diff --git a/drivers/mtd/maps/ceiva.c b/drivers/mtd/maps/ceiva.c
deleted file mode 100644
index 06f9c98..0000000
--- a/drivers/mtd/maps/ceiva.c
+++ /dev/null
@@ -1,341 +0,0 @@
-/*
- * Ceiva flash memory driver.
- * Copyright (C) 2002 Rob Scott <rscott@mtrob.fdns.net>
- *
- * Note: this driver supports jedec compatible devices. Modification
- * for CFI compatible devices should be straight forward: change
- * jedec_probe to cfi_probe.
- *
- * Based on: sa1100-flash.c, which has the following copyright:
- * Flash memory access on SA11x0 based devices
- *
- * (C) 2000 Nicolas Pitre <nico@fluxnic.net>
- *
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/ioport.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/map.h>
-#include <linux/mtd/partitions.h>
-#include <linux/mtd/concat.h>
-
-#include <mach/hardware.h>
-#include <asm/mach-types.h>
-#include <asm/io.h>
-#include <asm/sizes.h>
-
-/*
- * This isn't complete yet, so...
- */
-#define CONFIG_MTD_CEIVA_STATICMAP
-
-#ifdef CONFIG_MTD_CEIVA_STATICMAP
-/*
- * See include/linux/mtd/partitions.h for definition of the mtd_partition
- * structure.
- *
- * Please note:
- *  1. The flash size given should be the largest flash size that can
- *     be accommodated.
- *
- *  2. The bus width must defined in clps_setup_flash.
- *
- * The MTD layer will detect flash chip aliasing and reduce the size of
- * the map accordingly.
- *
- */
-
-#ifdef CONFIG_ARCH_CEIVA
-/* Flash / Partition sizing */
-/* For the 28F8003, we use the block mapping to calcuate the sizes */
-#define MAX_SIZE_KiB                  (16 + 8 + 8 + 96 + (7*128))
-#define BOOT_PARTITION_SIZE_KiB       (16)
-#define PARAMS_PARTITION_SIZE_KiB     (8)
-#define KERNEL_PARTITION_SIZE_KiB     (4*128)
-/* Use both remaining portion of first flash, and all of second flash */
-#define ROOT_PARTITION_SIZE_KiB       (3*128) + (8*128)
-
-static struct mtd_partition ceiva_partitions[] = {
-	{
-		.name = "Ceiva BOOT partition",
-		.size   = BOOT_PARTITION_SIZE_KiB*1024,
-		.offset = 0,
-
-	},{
-		.name = "Ceiva parameters partition",
-		.size   = PARAMS_PARTITION_SIZE_KiB*1024,
-		.offset = (16 + 8) * 1024,
-	},{
-		.name = "Ceiva kernel partition",
-		.size = (KERNEL_PARTITION_SIZE_KiB)*1024,
-		.offset = 0x20000,
-
-	},{
-		.name = "Ceiva root filesystem partition",
-		.offset = MTDPART_OFS_APPEND,
-		.size = (ROOT_PARTITION_SIZE_KiB)*1024,
-	}
-};
-#endif
-
-static int __init clps_static_partitions(struct mtd_partition **parts)
-{
-	int nb_parts = 0;
-
-#ifdef CONFIG_ARCH_CEIVA
-	if (machine_is_ceiva()) {
-		*parts       = ceiva_partitions;
-		nb_parts     = ARRAY_SIZE(ceiva_partitions);
-	}
-#endif
-	return nb_parts;
-}
-#endif
-
-struct clps_info {
-	unsigned long base;
-	unsigned long size;
-	int width;
-	void *vbase;
-	struct map_info *map;
-	struct mtd_info *mtd;
-	struct resource *res;
-};
-
-#define NR_SUBMTD 4
-
-static struct clps_info info[NR_SUBMTD];
-
-static int __init clps_setup_mtd(struct clps_info *clps, int nr, struct mtd_info **rmtd)
-{
-	struct mtd_info *subdev[nr];
-	struct map_info *maps;
-	int i, found = 0, ret = 0;
-
-	/*
-	 * Allocate the map_info structs in one go.
-	 */
-	maps = kzalloc(sizeof(struct map_info) * nr, GFP_KERNEL);
-	if (!maps)
-		return -ENOMEM;
-	/*
-	 * Claim and then map the memory regions.
-	 */
-	for (i = 0; i < nr; i++) {
-		if (clps[i].base == (unsigned long)-1)
-			break;
-
-		clps[i].res = request_mem_region(clps[i].base, clps[i].size, "clps flash");
-		if (!clps[i].res) {
-			ret = -EBUSY;
-			break;
-		}
-
-		clps[i].map = maps + i;
-
-		clps[i].map->name = "clps flash";
-		clps[i].map->phys = clps[i].base;
-
-		clps[i].vbase = ioremap(clps[i].base, clps[i].size);
-		if (!clps[i].vbase) {
-			ret = -ENOMEM;
-			break;
-		}
-
-		clps[i].map->virt = (void __iomem *)clps[i].vbase;
-		clps[i].map->bankwidth = clps[i].width;
-		clps[i].map->size = clps[i].size;
-
-		simple_map_init(&clps[i].map);
-
-		clps[i].mtd = do_map_probe("jedec_probe", clps[i].map);
-		if (clps[i].mtd == NULL) {
-			ret = -ENXIO;
-			break;
-		}
-		clps[i].mtd->owner = THIS_MODULE;
-		subdev[i] = clps[i].mtd;
-
-		printk(KERN_INFO "clps flash: JEDEC device at 0x%08lx, %dMiB, "
-			"%d-bit\n", clps[i].base, clps[i].mtd->size >> 20,
-			clps[i].width * 8);
-		found += 1;
-	}
-
-	/*
-	 * ENXIO is special.  It means we didn't find a chip when
-	 * we probed.  We need to tear down the mapping, free the
-	 * resource and mark it as such.
-	 */
-	if (ret == -ENXIO) {
-		iounmap(clps[i].vbase);
-		clps[i].vbase = NULL;
-		release_resource(clps[i].res);
-		clps[i].res = NULL;
-	}
-
-	/*
-	 * If we found one device, don't bother with concat support.
-	 * If we found multiple devices, use concat if we have it
-	 * available, otherwise fail.
-	 */
-	if (ret == 0 || ret == -ENXIO) {
-		if (found == 1) {
-			*rmtd = subdev[0];
-			ret = 0;
-		} else if (found > 1) {
-			/*
-			 * We detected multiple devices.  Concatenate
-			 * them together.
-			 */
-			*rmtd = mtd_concat_create(subdev, found,
-						  "clps flash");
-			if (*rmtd == NULL)
-				ret = -ENXIO;
-		}
-	}
-
-	/*
-	 * If we failed, clean up.
-	 */
-	if (ret) {
-		do {
-			if (clps[i].mtd)
-				map_destroy(clps[i].mtd);
-			if (clps[i].vbase)
-				iounmap(clps[i].vbase);
-			if (clps[i].res)
-				release_resource(clps[i].res);
-		} while (i--);
-
-		kfree(maps);
-	}
-
-	return ret;
-}
-
-static void __exit clps_destroy_mtd(struct clps_info *clps, struct mtd_info *mtd)
-{
-	int i;
-
-	mtd_device_unregister(mtd);
-
-	if (mtd != clps[0].mtd)
-		mtd_concat_destroy(mtd);
-
-	for (i = NR_SUBMTD; i >= 0; i--) {
-		if (clps[i].mtd)
-			map_destroy(clps[i].mtd);
-		if (clps[i].vbase)
-			iounmap(clps[i].vbase);
-		if (clps[i].res)
-			release_resource(clps[i].res);
-	}
-	kfree(clps[0].map);
-}
-
-/*
- * We define the memory space, size, and width for the flash memory
- * space here.
- */
-
-static int __init clps_setup_flash(void)
-{
-	int nr = 0;
-
-#ifdef CONFIG_ARCH_CEIVA
-	if (machine_is_ceiva()) {
-		info[0].base = CS0_PHYS_BASE;
-		info[0].size = SZ_32M;
-		info[0].width = CEIVA_FLASH_WIDTH;
-		info[1].base = CS1_PHYS_BASE;
-		info[1].size = SZ_32M;
-		info[1].width = CEIVA_FLASH_WIDTH;
-		nr = 2;
-	}
-#endif
-	return nr;
-}
-
-static struct mtd_partition *parsed_parts;
-static const char *probes[] = { "cmdlinepart", "RedBoot", NULL };
-
-static void __init clps_locate_partitions(struct mtd_info *mtd)
-{
-	const char *part_type = NULL;
-	int nr_parts = 0;
-	do {
-		/*
-		 * Partition selection stuff.
-		 */
-		nr_parts = parse_mtd_partitions(mtd, probes, &parsed_parts, 0);
-		if (nr_parts > 0) {
-			part_type = "command line";
-			break;
-		}
-#ifdef CONFIG_MTD_CEIVA_STATICMAP
-		nr_parts = clps_static_partitions(&parsed_parts);
-		if (nr_parts > 0) {
-			part_type = "static";
-			break;
-		}
-		printk("found: %d partitions\n", nr_parts);
-#endif
-	} while (0);
-
-	if (nr_parts == 0) {
-		printk(KERN_NOTICE "clps flash: no partition info "
-			"available, registering whole flash\n");
-		mtd_device_register(mtd, NULL, 0);
-	} else {
-		printk(KERN_NOTICE "clps flash: using %s partition "
-			"definition\n", part_type);
-		mtd_device_register(mtd, parsed_parts, nr_parts);
-	}
-
-	/* Always succeeds. */
-}
-
-static void __exit clps_destroy_partitions(void)
-{
-	kfree(parsed_parts);
-}
-
-static struct mtd_info *mymtd;
-
-static int __init clps_mtd_init(void)
-{
-	int ret;
-	int nr;
-
-	nr = clps_setup_flash();
-	if (nr < 0)
-		return nr;
-
-	ret = clps_setup_mtd(info, nr, &mymtd);
-	if (ret)
-		return ret;
-
-	clps_locate_partitions(mymtd);
-
-	return 0;
-}
-
-static void __exit clps_mtd_cleanup(void)
-{
-	clps_destroy_mtd(info, mymtd);
-	clps_destroy_partitions();
-}
-
-module_init(clps_mtd_init);
-module_exit(clps_mtd_cleanup);
-
-MODULE_AUTHOR("Rob Scott");
-MODULE_DESCRIPTION("Cirrus Logic JEDEC map driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/maps/dc21285.c b/drivers/mtd/maps/dc21285.c
index 7a9e198..f43b365 100644
--- a/drivers/mtd/maps/dc21285.c
+++ b/drivers/mtd/maps/dc21285.c
@@ -145,14 +145,10 @@
 
 
 /* Partition stuff */
-static struct mtd_partition *dc21285_parts;
 static const char *probes[] = { "RedBoot", "cmdlinepart", NULL };
 
 static int __init init_dc21285(void)
 {
-
-	int nrparts;
-
 	/* Determine bankwidth */
 	switch (*CSR_SA110_CNTL & (3<<14)) {
 		case SA110_CNTL_ROMWIDTH_8:
@@ -200,8 +196,7 @@
 
 	dc21285_mtd->owner = THIS_MODULE;
 
-	nrparts = parse_mtd_partitions(dc21285_mtd, probes, &dc21285_parts, 0);
-	mtd_device_register(dc21285_mtd, dc21285_parts, nrparts);
+	mtd_device_parse_register(dc21285_mtd, probes, 0, NULL, 0);
 
 	if(machine_is_ebsa285()) {
 		/*
@@ -224,8 +219,6 @@
 static void __exit cleanup_dc21285(void)
 {
 	mtd_device_unregister(dc21285_mtd);
-	if (dc21285_parts)
-		kfree(dc21285_parts);
 	map_destroy(dc21285_mtd);
 	iounmap(dc21285_map.virt);
 }
diff --git a/drivers/mtd/maps/edb7312.c b/drivers/mtd/maps/edb7312.c
deleted file mode 100644
index fe42a21..0000000
--- a/drivers/mtd/maps/edb7312.c
+++ /dev/null
@@ -1,134 +0,0 @@
-/*
- * Handle mapping of the NOR flash on Cogent EDB7312 boards
- *
- * Copyright 2002 SYSGO Real-Time Solutions GmbH
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <asm/io.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/map.h>
-#include <linux/mtd/partitions.h>
-
-#define WINDOW_ADDR 0x00000000      /* physical properties of flash */
-#define WINDOW_SIZE 0x01000000
-#define BUSWIDTH    2
-#define FLASH_BLOCKSIZE_MAIN	0x20000
-#define FLASH_NUMBLOCKS_MAIN	128
-/* can be "cfi_probe", "jedec_probe", "map_rom", NULL }; */
-#define PROBETYPES { "cfi_probe", NULL }
-
-#define MSG_PREFIX "EDB7312-NOR:"   /* prefix for our printk()'s */
-#define MTDID      "edb7312-nor"    /* for mtdparts= partitioning */
-
-static struct mtd_info *mymtd;
-
-struct map_info edb7312nor_map = {
-	.name = "NOR flash on EDB7312",
-	.size = WINDOW_SIZE,
-	.bankwidth = BUSWIDTH,
-	.phys = WINDOW_ADDR,
-};
-
-/*
- * MTD partitioning stuff
- */
-static struct mtd_partition static_partitions[3] =
-{
-	{
-		.name = "ARMboot",
-		.size = 0x40000,
-		.offset = 0
-	},
-	{
-		.name = "Kernel",
-		.size = 0x200000,
-		.offset = 0x40000
-	},
-	{
-		.name = "RootFS",
-		.size = 0xDC0000,
-		.offset = 0x240000
-	},
-};
-
-static const char *probes[] = { "RedBoot", "cmdlinepart", NULL };
-
-static int                   mtd_parts_nb = 0;
-static struct mtd_partition *mtd_parts    = 0;
-
-static int __init init_edb7312nor(void)
-{
-	static const char *rom_probe_types[] = PROBETYPES;
-	const char **type;
-	const char *part_type = 0;
-
-       	printk(KERN_NOTICE MSG_PREFIX "0x%08x at 0x%08x\n",
-	       WINDOW_SIZE, WINDOW_ADDR);
-	edb7312nor_map.virt = ioremap(WINDOW_ADDR, WINDOW_SIZE);
-
-	if (!edb7312nor_map.virt) {
-		printk(MSG_PREFIX "failed to ioremap\n");
-		return -EIO;
-	}
-
-	simple_map_init(&edb7312nor_map);
-
-	mymtd = 0;
-	type = rom_probe_types;
-	for(; !mymtd && *type; type++) {
-		mymtd = do_map_probe(*type, &edb7312nor_map);
-	}
-	if (mymtd) {
-		mymtd->owner = THIS_MODULE;
-
-		mtd_parts_nb = parse_mtd_partitions(mymtd, probes, &mtd_parts, MTDID);
-		if (mtd_parts_nb > 0)
-			part_type = "detected";
-
-		if (mtd_parts_nb == 0) {
-			mtd_parts = static_partitions;
-			mtd_parts_nb = ARRAY_SIZE(static_partitions);
-			part_type = "static";
-		}
-
-		if (mtd_parts_nb == 0)
-			printk(KERN_NOTICE MSG_PREFIX "no partition info available\n");
-		else
-			printk(KERN_NOTICE MSG_PREFIX
-			       "using %s partition definition\n", part_type);
-		/* Register the whole device first. */
-		mtd_device_register(mymtd, NULL, 0);
-		mtd_device_register(mymtd, mtd_parts, mtd_parts_nb);
-		return 0;
-	}
-
-	iounmap((void *)edb7312nor_map.virt);
-	return -ENXIO;
-}
-
-static void __exit cleanup_edb7312nor(void)
-{
-	if (mymtd) {
-		mtd_device_unregister(mymtd);
-		map_destroy(mymtd);
-	}
-	if (edb7312nor_map.virt) {
-		iounmap((void *)edb7312nor_map.virt);
-		edb7312nor_map.virt = 0;
-	}
-}
-
-module_init(init_edb7312nor);
-module_exit(cleanup_edb7312nor);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Marius Groeger <mag@sysgo.de>");
-MODULE_DESCRIPTION("Generic configurable MTD map driver");
diff --git a/drivers/mtd/maps/gpio-addr-flash.c b/drivers/mtd/maps/gpio-addr-flash.c
index 7568c5f..1ec66f0 100644
--- a/drivers/mtd/maps/gpio-addr-flash.c
+++ b/drivers/mtd/maps/gpio-addr-flash.c
@@ -187,7 +187,6 @@
  */
 static int __devinit gpio_flash_probe(struct platform_device *pdev)
 {
-	int nr_parts;
 	size_t i, arr_size;
 	struct physmap_flash_data *pdata;
 	struct resource *memory;
@@ -252,20 +251,9 @@
 		return -ENXIO;
 	}
 
-	nr_parts = parse_mtd_partitions(state->mtd, part_probe_types,
-					&pdata->parts, 0);
-	if (nr_parts > 0) {
-		pr_devinit(KERN_NOTICE PFX "Using commandline partition definition\n");
-		kfree(pdata->parts);
-	} else if (pdata->nr_parts) {
-		pr_devinit(KERN_NOTICE PFX "Using board partition definition\n");
-		nr_parts = pdata->nr_parts;
-	} else {
-		pr_devinit(KERN_NOTICE PFX "no partition info available, registering whole flash at once\n");
-		nr_parts = 0;
-	}
 
-	mtd_device_register(state->mtd, pdata->parts, nr_parts);
+	mtd_device_parse_register(state->mtd, part_probe_types, 0,
+			pdata->parts, pdata->nr_parts);
 
 	return 0;
 }
diff --git a/drivers/mtd/maps/h720x-flash.c b/drivers/mtd/maps/h720x-flash.c
index 7f03586..49c1418 100644
--- a/drivers/mtd/maps/h720x-flash.c
+++ b/drivers/mtd/maps/h720x-flash.c
@@ -58,18 +58,11 @@
 
 #define NUM_PARTITIONS ARRAY_SIZE(h720x_partitions)
 
-static int                   nr_mtd_parts;
-static struct mtd_partition *mtd_parts;
-static const char *probes[] = { "cmdlinepart", NULL };
-
 /*
  * Initialize FLASH support
  */
 static int __init h720x_mtd_init(void)
 {
-
-	char	*part_type = NULL;
-
 	h720x_map.virt = ioremap(h720x_map.phys, h720x_map.size);
 
 	if (!h720x_map.virt) {
@@ -92,16 +85,8 @@
 	if (mymtd) {
 		mymtd->owner = THIS_MODULE;
 
-		nr_mtd_parts = parse_mtd_partitions(mymtd, probes, &mtd_parts, 0);
-		if (nr_mtd_parts > 0)
-			part_type = "command line";
-		if (nr_mtd_parts <= 0) {
-			mtd_parts = h720x_partitions;
-			nr_mtd_parts = NUM_PARTITIONS;
-			part_type = "builtin";
-		}
-		printk(KERN_INFO "Using %s partition table\n", part_type);
-		mtd_device_register(mymtd, mtd_parts, nr_mtd_parts);
+		mtd_device_parse_register(mymtd, NULL, 0,
+				h720x_partitions, NUM_PARTITIONS);
 		return 0;
 	}
 
@@ -120,10 +105,6 @@
 		map_destroy(mymtd);
 	}
 
-	/* Free partition info, if commandline partition was used */
-	if (mtd_parts && (mtd_parts != h720x_partitions))
-		kfree (mtd_parts);
-
 	if (h720x_map.virt) {
 		iounmap((void *)h720x_map.virt);
 		h720x_map.virt = 0;
diff --git a/drivers/mtd/maps/impa7.c b/drivers/mtd/maps/impa7.c
index 404a50c..f47aedb 100644
--- a/drivers/mtd/maps/impa7.c
+++ b/drivers/mtd/maps/impa7.c
@@ -49,7 +49,7 @@
 /*
  * MTD partitioning stuff
  */
-static struct mtd_partition static_partitions[] =
+static struct mtd_partition partitions[] =
 {
 	{
 		.name = "FileSystem",
@@ -58,16 +58,10 @@
 	},
 };
 
-static int mtd_parts_nb[NUM_FLASHBANKS];
-static struct mtd_partition *mtd_parts[NUM_FLASHBANKS];
-
-static const char *probes[] = { "cmdlinepart", NULL };
-
 static int __init init_impa7(void)
 {
 	static const char *rom_probe_types[] = PROBETYPES;
 	const char **type;
-	const char *part_type = 0;
 	int i;
 	static struct { u_long addr; u_long size; } pt[NUM_FLASHBANKS] = {
 	  { WINDOW_ADDR0, WINDOW_SIZE0 },
@@ -97,23 +91,9 @@
 		if (impa7_mtd[i]) {
 			impa7_mtd[i]->owner = THIS_MODULE;
 			devicesfound++;
-			mtd_parts_nb[i] = parse_mtd_partitions(impa7_mtd[i],
-							       probes,
-							       &mtd_parts[i],
-							       0);
-			if (mtd_parts_nb[i] > 0) {
-				part_type = "command line";
-			} else {
-				mtd_parts[i] = static_partitions;
-				mtd_parts_nb[i] = ARRAY_SIZE(static_partitions);
-				part_type = "static";
-			}
-
-			printk(KERN_NOTICE MSG_PREFIX
-			       "using %s partition definition\n",
-			       part_type);
-			mtd_device_register(impa7_mtd[i],
-					    mtd_parts[i], mtd_parts_nb[i]);
+			mtd_device_parse_register(impa7_mtd[i], NULL, 0,
+						  partitions,
+						  ARRAY_SIZE(partitions));
 		}
 		else
 			iounmap((void *)impa7_map[i].virt);
diff --git a/drivers/mtd/maps/intel_vr_nor.c b/drivers/mtd/maps/intel_vr_nor.c
index d2f47be..08c2396 100644
--- a/drivers/mtd/maps/intel_vr_nor.c
+++ b/drivers/mtd/maps/intel_vr_nor.c
@@ -44,7 +44,6 @@
 	void __iomem *csr_base;
 	struct map_info map;
 	struct mtd_info *info;
-	int nr_parts;
 	struct pci_dev *dev;
 };
 
@@ -71,13 +70,9 @@
 
 static int __devinit vr_nor_init_partitions(struct vr_nor_mtd *p)
 {
-	struct mtd_partition *parts;
-	static const char *part_probes[] = { "cmdlinepart", NULL };
-
 	/* register the flash bank */
 	/* partition the flash bank */
-	p->nr_parts = parse_mtd_partitions(p->info, part_probes, &parts, 0);
-	return mtd_device_register(p->info, parts, p->nr_parts);
+	return mtd_device_parse_register(p->info, NULL, 0, NULL, 0);
 }
 
 static void __devexit vr_nor_destroy_mtd_setup(struct vr_nor_mtd *p)
diff --git a/drivers/mtd/maps/ixp2000.c b/drivers/mtd/maps/ixp2000.c
index 1594a80..437fcd2 100644
--- a/drivers/mtd/maps/ixp2000.c
+++ b/drivers/mtd/maps/ixp2000.c
@@ -38,7 +38,6 @@
 struct ixp2000_flash_info {
 	struct		mtd_info *mtd;
 	struct		map_info map;
-	struct		mtd_partition *partitions;
 	struct		resource *res;
 };
 
@@ -125,8 +124,6 @@
 	if (info->map.map_priv_1)
 		iounmap((void *) info->map.map_priv_1);
 
-	kfree(info->partitions);
-
 	if (info->res) {
 		release_resource(info->res);
 		kfree(info->res);
@@ -229,13 +226,7 @@
 	}
 	info->mtd->owner = THIS_MODULE;
 
-	err = parse_mtd_partitions(info->mtd, probes, &info->partitions, 0);
-	if (err > 0) {
-		err = mtd_device_register(info->mtd, info->partitions, err);
-		if(err)
-			dev_err(&dev->dev, "Could not parse partitions\n");
-	}
-
+	err = mtd_device_parse_register(info->mtd, probes, 0, NULL, 0);
 	if (err)
 		goto Error;
 
diff --git a/drivers/mtd/maps/ixp4xx.c b/drivers/mtd/maps/ixp4xx.c
index 155b219..3040901 100644
--- a/drivers/mtd/maps/ixp4xx.c
+++ b/drivers/mtd/maps/ixp4xx.c
@@ -145,7 +145,6 @@
 struct ixp4xx_flash_info {
 	struct mtd_info *mtd;
 	struct map_info map;
-	struct mtd_partition *partitions;
 	struct resource *res;
 };
 
@@ -168,8 +167,6 @@
 	if (info->map.virt)
 		iounmap(info->map.virt);
 
-	kfree(info->partitions);
-
 	if (info->res) {
 		release_resource(info->res);
 		kfree(info->res);
@@ -185,8 +182,6 @@
 {
 	struct flash_platform_data *plat = dev->dev.platform_data;
 	struct ixp4xx_flash_info *info;
-	const char *part_type = NULL;
-	int nr_parts = 0;
 	int err = -1;
 
 	if (!plat)
@@ -252,28 +247,12 @@
 	/* Use the fast version */
 	info->map.write = ixp4xx_write16;
 
-	nr_parts = parse_mtd_partitions(info->mtd, probes, &info->partitions,
-					dev->resource->start);
-	if (nr_parts > 0) {
-		part_type = "dynamic";
-	} else {
-		info->partitions = plat->parts;
-		nr_parts = plat->nr_parts;
-		part_type = "static";
-	}
-	if (nr_parts == 0)
-		printk(KERN_NOTICE "IXP4xx flash: no partition info "
-			"available, registering whole flash\n");
-	else
-		printk(KERN_NOTICE "IXP4xx flash: using %s partition "
-			"definition\n", part_type);
-
-	err = mtd_device_register(info->mtd, info->partitions, nr_parts);
-	if (err)
+	err = mtd_device_parse_register(info->mtd, probes, dev->resource->start,
+			plat->parts, plat->nr_parts);
+	if (err) {
 		printk(KERN_ERR "Could not parse partitions\n");
-
-	if (err)
 		goto Error;
+	}
 
 	return 0;
 
diff --git a/drivers/mtd/maps/lantiq-flash.c b/drivers/mtd/maps/lantiq-flash.c
index 7e50896..4f10e27 100644
--- a/drivers/mtd/maps/lantiq-flash.c
+++ b/drivers/mtd/maps/lantiq-flash.c
@@ -107,16 +107,12 @@
 	spin_unlock_irqrestore(&ebu_lock, flags);
 }
 
-static const char const *part_probe_types[] = { "cmdlinepart", NULL };
-
 static int __init
 ltq_mtd_probe(struct platform_device *pdev)
 {
 	struct physmap_flash_data *ltq_mtd_data = dev_get_platdata(&pdev->dev);
 	struct ltq_mtd *ltq_mtd;
-	struct mtd_partition *parts;
 	struct resource *res;
-	int nr_parts = 0;
 	struct cfi_private *cfi;
 	int err;
 
@@ -172,17 +168,8 @@
 	cfi->addr_unlock1 ^= 1;
 	cfi->addr_unlock2 ^= 1;
 
-	nr_parts = parse_mtd_partitions(ltq_mtd->mtd,
-				part_probe_types, &parts, 0);
-	if (nr_parts > 0) {
-		dev_info(&pdev->dev,
-			"using %d partitions from cmdline", nr_parts);
-	} else {
-		nr_parts = ltq_mtd_data->nr_parts;
-		parts = ltq_mtd_data->parts;
-	}
-
-	err = mtd_device_register(ltq_mtd->mtd, parts, nr_parts);
+	err = mtd_device_parse_register(ltq_mtd->mtd, NULL, 0,
+			ltq_mtd_data->parts, ltq_mtd_data->nr_parts);
 	if (err) {
 		dev_err(&pdev->dev, "failed to add partitions\n");
 		goto err_destroy;
diff --git a/drivers/mtd/maps/latch-addr-flash.c b/drivers/mtd/maps/latch-addr-flash.c
index 5936c46..119baa7 100644
--- a/drivers/mtd/maps/latch-addr-flash.c
+++ b/drivers/mtd/maps/latch-addr-flash.c
@@ -33,9 +33,6 @@
 	/* cache; could be found out of res */
 	unsigned long		win_mask;
 
-	int			nr_parts;
-	struct mtd_partition	*parts;
-
 	spinlock_t		lock;
 };
 
@@ -97,8 +94,6 @@
 
 static char *rom_probe_types[] = { "cfi_probe", NULL };
 
-static char *part_probe_types[] = { "cmdlinepart", NULL };
-
 static int latch_addr_flash_remove(struct platform_device *dev)
 {
 	struct latch_addr_flash_info *info;
@@ -112,8 +107,6 @@
 	latch_addr_data = dev->dev.platform_data;
 
 	if (info->mtd != NULL) {
-		if (info->nr_parts)
-			kfree(info->parts);
 		mtd_device_unregister(info->mtd);
 		map_destroy(info->mtd);
 	}
@@ -206,21 +199,8 @@
 	}
 	info->mtd->owner = THIS_MODULE;
 
-	err = parse_mtd_partitions(info->mtd, (const char **)part_probe_types,
-				   &info->parts, 0);
-	if (err > 0) {
-		mtd_device_register(info->mtd, info->parts, err);
-		return 0;
-	}
-	if (latch_addr_data->nr_parts) {
-		pr_notice("Using latch-addr-flash partition information\n");
-		mtd_device_register(info->mtd,
-				    latch_addr_data->parts,
-				    latch_addr_data->nr_parts);
-		return 0;
-	}
-
-	mtd_device_register(info->mtd, NULL, 0);
+	mtd_device_parse_register(info->mtd, NULL, 0,
+			latch_addr_data->parts, latch_addr_data->nr_parts);
 	return 0;
 
 iounmap:
diff --git a/drivers/mtd/maps/pcmciamtd.c b/drivers/mtd/maps/pcmciamtd.c
index bbe168b..e8e9fec 100644
--- a/drivers/mtd/maps/pcmciamtd.c
+++ b/drivers/mtd/maps/pcmciamtd.c
@@ -22,22 +22,6 @@
 #include <linux/mtd/map.h>
 #include <linux/mtd/mtd.h>
 
-#ifdef CONFIG_MTD_DEBUG
-static int debug = CONFIG_MTD_DEBUG_VERBOSE;
-module_param(debug, int, 0);
-MODULE_PARM_DESC(debug, "Set Debug Level 0=quiet, 5=noisy");
-#undef DEBUG
-#define DEBUG(n, format, arg...) \
-	if (n <= debug) {	 \
-		printk(KERN_DEBUG __FILE__ ":%s(): " format "\n", __func__ , ## arg); \
-	}
-
-#else
-#undef DEBUG
-#define DEBUG(n, arg...)
-static const int debug = 0;
-#endif
-
 #define info(format, arg...) printk(KERN_INFO "pcmciamtd: " format "\n" , ## arg)
 
 #define DRIVER_DESC	"PCMCIA Flash memory card driver"
@@ -105,13 +89,13 @@
 	int ret;
 
 	if (!pcmcia_dev_present(dev->p_dev)) {
-		DEBUG(1, "device removed");
+		pr_debug("device removed\n");
 		return 0;
 	}
 
 	offset = to & ~(dev->win_size-1);
 	if (offset != dev->offset) {
-		DEBUG(2, "Remapping window from 0x%8.8x to 0x%8.8x",
+		pr_debug("Remapping window from 0x%8.8x to 0x%8.8x\n",
 		      dev->offset, offset);
 		ret = pcmcia_map_mem_page(dev->p_dev, win, offset);
 		if (ret != 0)
@@ -132,7 +116,7 @@
 		return d;
 
 	d.x[0] = readb(addr);
-	DEBUG(3, "ofs = 0x%08lx (%p) data = 0x%02lx", ofs, addr, d.x[0]);
+	pr_debug("ofs = 0x%08lx (%p) data = 0x%02lx\n", ofs, addr, d.x[0]);
 	return d;
 }
 
@@ -147,7 +131,7 @@
 		return d;
 
 	d.x[0] = readw(addr);
-	DEBUG(3, "ofs = 0x%08lx (%p) data = 0x%04lx", ofs, addr, d.x[0]);
+	pr_debug("ofs = 0x%08lx (%p) data = 0x%04lx\n", ofs, addr, d.x[0]);
 	return d;
 }
 
@@ -157,7 +141,7 @@
 	struct pcmciamtd_dev *dev = (struct pcmciamtd_dev *)map->map_priv_1;
 	unsigned long win_size = dev->win_size;
 
-	DEBUG(3, "to = %p from = %lu len = %zd", to, from, len);
+	pr_debug("to = %p from = %lu len = %zd\n", to, from, len);
 	while(len) {
 		int toread = win_size - (from & (win_size-1));
 		caddr_t addr;
@@ -169,7 +153,7 @@
 		if(!addr)
 			return;
 
-		DEBUG(4, "memcpy from %p to %p len = %d", addr, to, toread);
+		pr_debug("memcpy from %p to %p len = %d\n", addr, to, toread);
 		memcpy_fromio(to, addr, toread);
 		len -= toread;
 		to += toread;
@@ -185,7 +169,7 @@
 	if(!addr)
 		return;
 
-	DEBUG(3, "adr = 0x%08lx (%p)  data = 0x%02lx", adr, addr, d.x[0]);
+	pr_debug("adr = 0x%08lx (%p)  data = 0x%02lx\n", adr, addr, d.x[0]);
 	writeb(d.x[0], addr);
 }
 
@@ -196,7 +180,7 @@
 	if(!addr)
 		return;
 
-	DEBUG(3, "adr = 0x%08lx (%p)  data = 0x%04lx", adr, addr, d.x[0]);
+	pr_debug("adr = 0x%08lx (%p)  data = 0x%04lx\n", adr, addr, d.x[0]);
 	writew(d.x[0], addr);
 }
 
@@ -206,7 +190,7 @@
 	struct pcmciamtd_dev *dev = (struct pcmciamtd_dev *)map->map_priv_1;
 	unsigned long win_size = dev->win_size;
 
-	DEBUG(3, "to = %lu from = %p len = %zd", to, from, len);
+	pr_debug("to = %lu from = %p len = %zd\n", to, from, len);
 	while(len) {
 		int towrite = win_size - (to & (win_size-1));
 		caddr_t addr;
@@ -218,7 +202,7 @@
 		if(!addr)
 			return;
 
-		DEBUG(4, "memcpy from %p to %p len = %d", from, addr, towrite);
+		pr_debug("memcpy from %p to %p len = %d\n", from, addr, towrite);
 		memcpy_toio(addr, from, towrite);
 		len -= towrite;
 		to += towrite;
@@ -240,7 +224,7 @@
 		return d;
 
 	d.x[0] = readb(win_base + ofs);
-	DEBUG(3, "ofs = 0x%08lx (%p) data = 0x%02lx",
+	pr_debug("ofs = 0x%08lx (%p) data = 0x%02lx\n",
 	      ofs, win_base + ofs, d.x[0]);
 	return d;
 }
@@ -255,7 +239,7 @@
 		return d;
 
 	d.x[0] = readw(win_base + ofs);
-	DEBUG(3, "ofs = 0x%08lx (%p) data = 0x%04lx",
+	pr_debug("ofs = 0x%08lx (%p) data = 0x%04lx\n",
 	      ofs, win_base + ofs, d.x[0]);
 	return d;
 }
@@ -268,7 +252,7 @@
 	if(DEV_REMOVED(map))
 		return;
 
-	DEBUG(3, "to = %p from = %lu len = %zd", to, from, len);
+	pr_debug("to = %p from = %lu len = %zd\n", to, from, len);
 	memcpy_fromio(to, win_base + from, len);
 }
 
@@ -280,7 +264,7 @@
 	if(DEV_REMOVED(map))
 		return;
 
-	DEBUG(3, "adr = 0x%08lx (%p)  data = 0x%02lx",
+	pr_debug("adr = 0x%08lx (%p)  data = 0x%02lx\n",
 	      adr, win_base + adr, d.x[0]);
 	writeb(d.x[0], win_base + adr);
 }
@@ -293,7 +277,7 @@
 	if(DEV_REMOVED(map))
 		return;
 
-	DEBUG(3, "adr = 0x%08lx (%p)  data = 0x%04lx",
+	pr_debug("adr = 0x%08lx (%p)  data = 0x%04lx\n",
 	      adr, win_base + adr, d.x[0]);
 	writew(d.x[0], win_base + adr);
 }
@@ -306,7 +290,7 @@
 	if(DEV_REMOVED(map))
 		return;
 
-	DEBUG(3, "to = %lu from = %p len = %zd", to, from, len);
+	pr_debug("to = %lu from = %p len = %zd\n", to, from, len);
 	memcpy_toio(win_base + to, from, len);
 }
 
@@ -316,7 +300,7 @@
 	struct pcmciamtd_dev *dev = (struct pcmciamtd_dev *)map->map_priv_1;
 	struct pcmcia_device *link = dev->p_dev;
 
-	DEBUG(2, "dev = %p on = %d vpp = %d\n", dev, on, dev->vpp);
+	pr_debug("dev = %p on = %d vpp = %d\n\n", dev, on, dev->vpp);
 	pcmcia_fixup_vpp(link, on ? dev->vpp : 0);
 }
 
@@ -325,7 +309,7 @@
 {
 	struct pcmciamtd_dev *dev = link->priv;
 
-	DEBUG(3, "link = 0x%p", link);
+	pr_debug("link = 0x%p\n", link);
 
 	if (link->resource[2]->end) {
 		if(dev->win_base) {
@@ -337,7 +321,6 @@
 }
 
 
-#ifdef CONFIG_MTD_DEBUG
 static int pcmciamtd_cistpl_format(struct pcmcia_device *p_dev,
 				tuple_t *tuple,
 				void *priv_data)
@@ -347,7 +330,7 @@
 	if (!pcmcia_parse_tuple(tuple, &parse)) {
 		cistpl_format_t *t = &parse.format;
 		(void)t; /* Shut up, gcc */
-		DEBUG(2, "Format type: %u, Error Detection: %u, offset = %u, length =%u",
+		pr_debug("Format type: %u, Error Detection: %u, offset = %u, length =%u\n",
 			t->type, t->edc, t->offset, t->length);
 	}
 	return -ENOSPC;
@@ -363,12 +346,11 @@
 	if (!pcmcia_parse_tuple(tuple, &parse)) {
 		cistpl_jedec_t *t = &parse.jedec;
 		for (i = 0; i < t->nid; i++)
-			DEBUG(2, "JEDEC: 0x%02x 0x%02x",
+			pr_debug("JEDEC: 0x%02x 0x%02x\n",
 			      t->id[i].mfr, t->id[i].info);
 	}
 	return -ENOSPC;
 }
-#endif
 
 static int pcmciamtd_cistpl_device(struct pcmcia_device *p_dev,
 				tuple_t *tuple,
@@ -382,14 +364,14 @@
 	if (pcmcia_parse_tuple(tuple, &parse))
 		return -EINVAL;
 
-	DEBUG(2, "Common memory:");
+	pr_debug("Common memory:\n");
 	dev->pcmcia_map.size = t->dev[0].size;
 	/* from here on: DEBUG only */
 	for (i = 0; i < t->ndev; i++) {
-		DEBUG(2, "Region %d, type = %u", i, t->dev[i].type);
-		DEBUG(2, "Region %d, wp = %u", i, t->dev[i].wp);
-		DEBUG(2, "Region %d, speed = %u ns", i, t->dev[i].speed);
-		DEBUG(2, "Region %d, size = %u bytes", i, t->dev[i].size);
+		pr_debug("Region %d, type = %u\n", i, t->dev[i].type);
+		pr_debug("Region %d, wp = %u\n", i, t->dev[i].wp);
+		pr_debug("Region %d, speed = %u ns\n", i, t->dev[i].speed);
+		pr_debug("Region %d, size = %u bytes\n", i, t->dev[i].size);
 	}
 	return 0;
 }
@@ -409,12 +391,12 @@
 	dev->pcmcia_map.bankwidth = t->geo[0].buswidth;
 	/* from here on: DEBUG only */
 	for (i = 0; i < t->ngeo; i++) {
-		DEBUG(2, "region: %d bankwidth = %u", i, t->geo[i].buswidth);
-		DEBUG(2, "region: %d erase_block = %u", i, t->geo[i].erase_block);
-		DEBUG(2, "region: %d read_block = %u", i, t->geo[i].read_block);
-		DEBUG(2, "region: %d write_block = %u", i, t->geo[i].write_block);
-		DEBUG(2, "region: %d partition = %u", i, t->geo[i].partition);
-		DEBUG(2, "region: %d interleave = %u", i, t->geo[i].interleave);
+		pr_debug("region: %d bankwidth = %u\n", i, t->geo[i].buswidth);
+		pr_debug("region: %d erase_block = %u\n", i, t->geo[i].erase_block);
+		pr_debug("region: %d read_block = %u\n", i, t->geo[i].read_block);
+		pr_debug("region: %d write_block = %u\n", i, t->geo[i].write_block);
+		pr_debug("region: %d partition = %u\n", i, t->geo[i].partition);
+		pr_debug("region: %d interleave = %u\n", i, t->geo[i].interleave);
 	}
 	return 0;
 }
@@ -432,13 +414,11 @@
 			if (p_dev->prod_id[i])
 				strcat(dev->mtd_name, p_dev->prod_id[i]);
 		}
-		DEBUG(2, "Found name: %s", dev->mtd_name);
+		pr_debug("Found name: %s\n", dev->mtd_name);
 	}
 
-#ifdef CONFIG_MTD_DEBUG
 	pcmcia_loop_tuple(p_dev, CISTPL_FORMAT, pcmciamtd_cistpl_format, NULL);
 	pcmcia_loop_tuple(p_dev, CISTPL_JEDEC_C, pcmciamtd_cistpl_jedec, NULL);
-#endif
 	pcmcia_loop_tuple(p_dev, CISTPL_DEVICE, pcmciamtd_cistpl_device, dev);
 	pcmcia_loop_tuple(p_dev, CISTPL_DEVICE_GEO, pcmciamtd_cistpl_geo, dev);
 
@@ -450,12 +430,12 @@
 
 	if(force_size) {
 		dev->pcmcia_map.size = force_size << 20;
-		DEBUG(2, "size forced to %dM", force_size);
+		pr_debug("size forced to %dM\n", force_size);
 	}
 
 	if(bankwidth) {
 		dev->pcmcia_map.bankwidth = bankwidth;
-		DEBUG(2, "bankwidth forced to %d", bankwidth);
+		pr_debug("bankwidth forced to %d\n", bankwidth);
 	}
 
 	dev->pcmcia_map.name = dev->mtd_name;
@@ -464,7 +444,7 @@
 		*new_name = 1;
 	}
 
-	DEBUG(1, "Device: Size: %lu Width:%d Name: %s",
+	pr_debug("Device: Size: %lu Width:%d Name: %s\n",
 	      dev->pcmcia_map.size,
 	      dev->pcmcia_map.bankwidth << 3, dev->mtd_name);
 }
@@ -479,7 +459,7 @@
 	static char *probes[] = { "jedec_probe", "cfi_probe" };
 	int new_name = 0;
 
-	DEBUG(3, "link=0x%p", link);
+	pr_debug("link=0x%p\n", link);
 
 	card_settings(dev, link, &new_name);
 
@@ -512,11 +492,11 @@
 
 	do {
 		int ret;
-		DEBUG(2, "requesting window with size = %luKiB memspeed = %d",
+		pr_debug("requesting window with size = %luKiB memspeed = %d\n",
 			(unsigned long) resource_size(link->resource[2]) >> 10,
 			mem_speed);
 		ret = pcmcia_request_window(link, link->resource[2], mem_speed);
-		DEBUG(2, "ret = %d dev->win_size = %d", ret, dev->win_size);
+		pr_debug("ret = %d dev->win_size = %d\n", ret, dev->win_size);
 		if(ret) {
 			j++;
 			link->resource[2]->start = 0;
@@ -524,21 +504,21 @@
 					force_size << 20 : MAX_PCMCIA_ADDR;
 			link->resource[2]->end >>= j;
 		} else {
-			DEBUG(2, "Got window of size %luKiB", (unsigned long)
+			pr_debug("Got window of size %luKiB\n", (unsigned long)
 				resource_size(link->resource[2]) >> 10);
 			dev->win_size = resource_size(link->resource[2]);
 			break;
 		}
 	} while (link->resource[2]->end >= 0x1000);
 
-	DEBUG(2, "dev->win_size = %d", dev->win_size);
+	pr_debug("dev->win_size = %d\n", dev->win_size);
 
 	if(!dev->win_size) {
 		dev_err(&dev->p_dev->dev, "Cannot allocate memory window\n");
 		pcmciamtd_release(link);
 		return -ENODEV;
 	}
-	DEBUG(1, "Allocated a window of %dKiB", dev->win_size >> 10);
+	pr_debug("Allocated a window of %dKiB\n", dev->win_size >> 10);
 
 	/* Get write protect status */
 	dev->win_base = ioremap(link->resource[2]->start,
@@ -549,7 +529,7 @@
 		pcmciamtd_release(link);
 		return -ENODEV;
 	}
-	DEBUG(1, "mapped window dev = %p @ %pR, base = %p",
+	pr_debug("mapped window dev = %p @ %pR, base = %p\n",
 	      dev, link->resource[2], dev->win_base);
 
 	dev->offset = 0;
@@ -564,7 +544,7 @@
 	}
 
 	link->config_index = 0;
-	DEBUG(2, "Setting Configuration");
+	pr_debug("Setting Configuration\n");
 	ret = pcmcia_enable_device(link);
 	if (ret != 0) {
 		if (dev->win_base) {
@@ -580,17 +560,17 @@
 		mtd = do_map_probe("map_rom", &dev->pcmcia_map);
 	} else {
 		for(i = 0; i < ARRAY_SIZE(probes); i++) {
-			DEBUG(1, "Trying %s", probes[i]);
+			pr_debug("Trying %s\n", probes[i]);
 			mtd = do_map_probe(probes[i], &dev->pcmcia_map);
 			if(mtd)
 				break;
 
-			DEBUG(1, "FAILED: %s", probes[i]);
+			pr_debug("FAILED: %s\n", probes[i]);
 		}
 	}
 
 	if(!mtd) {
-		DEBUG(1, "Can not find an MTD");
+		pr_debug("Can not find an MTD\n");
 		pcmciamtd_release(link);
 		return -ENODEV;
 	}
@@ -617,7 +597,7 @@
 	/* If the memory found is fits completely into the mapped PCMCIA window,
 	   use the faster non-remapping read/write functions */
 	if(mtd->size <= dev->win_size) {
-		DEBUG(1, "Using non remapping memory functions");
+		pr_debug("Using non remapping memory functions\n");
 		dev->pcmcia_map.map_priv_2 = (unsigned long)dev->win_base;
 		if (dev->pcmcia_map.bankwidth == 1) {
 			dev->pcmcia_map.read = pcmcia_read8;
@@ -645,7 +625,7 @@
 
 static int pcmciamtd_suspend(struct pcmcia_device *dev)
 {
-	DEBUG(2, "EVENT_PM_RESUME");
+	pr_debug("EVENT_PM_RESUME\n");
 
 	/* get_lock(link); */
 
@@ -654,7 +634,7 @@
 
 static int pcmciamtd_resume(struct pcmcia_device *dev)
 {
-	DEBUG(2, "EVENT_PM_SUSPEND");
+	pr_debug("EVENT_PM_SUSPEND\n");
 
 	/* free_lock(link); */
 
@@ -666,7 +646,7 @@
 {
 	struct pcmciamtd_dev *dev = link->priv;
 
-	DEBUG(3, "link=0x%p", link);
+	pr_debug("link=0x%p\n", link);
 
 	if(dev->mtd_info) {
 		mtd_device_unregister(dev->mtd_info);
@@ -686,7 +666,7 @@
 	/* Create new memory card device */
 	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
 	if (!dev) return -ENOMEM;
-	DEBUG(1, "dev=0x%p", dev);
+	pr_debug("dev=0x%p\n", dev);
 
 	dev->p_dev = link;
 	link->priv = dev;
@@ -755,7 +735,7 @@
 
 static void __exit exit_pcmciamtd(void)
 {
-	DEBUG(1, DRIVER_DESC " unloading");
+	pr_debug(DRIVER_DESC " unloading");
 	pcmcia_unregister_driver(&pcmciamtd_driver);
 }
 
diff --git a/drivers/mtd/maps/physmap.c b/drivers/mtd/maps/physmap.c
index f64cee4..66e8200 100644
--- a/drivers/mtd/maps/physmap.c
+++ b/drivers/mtd/maps/physmap.c
@@ -27,8 +27,6 @@
 	struct mtd_info		*mtd[MAX_RESOURCES];
 	struct mtd_info		*cmtd;
 	struct map_info		map[MAX_RESOURCES];
-	int			nr_parts;
-	struct mtd_partition	*parts;
 };
 
 static int physmap_flash_remove(struct platform_device *dev)
@@ -46,8 +44,6 @@
 
 	if (info->cmtd) {
 		mtd_device_unregister(info->cmtd);
-		if (info->nr_parts)
-			kfree(info->parts);
 		if (info->cmtd != info->mtd[0])
 			mtd_concat_destroy(info->cmtd);
 	}
@@ -175,23 +171,8 @@
 	if (err)
 		goto err_out;
 
-	err = parse_mtd_partitions(info->cmtd, part_probe_types,
-				   &info->parts, 0);
-	if (err > 0) {
-		mtd_device_register(info->cmtd, info->parts, err);
-		info->nr_parts = err;
-		return 0;
-	}
-
-	if (physmap_data->nr_parts) {
-		printk(KERN_NOTICE "Using physmap partition information\n");
-		mtd_device_register(info->cmtd, physmap_data->parts,
-				    physmap_data->nr_parts);
-		return 0;
-	}
-
-	mtd_device_register(info->cmtd, NULL, 0);
-
+	mtd_device_parse_register(info->cmtd, part_probe_types, 0,
+				  physmap_data->parts, physmap_data->nr_parts);
 	return 0;
 
 err_out:
@@ -245,21 +226,6 @@
 	.num_resources	= 1,
 	.resource	= &physmap_flash_resource,
 };
-
-void physmap_configure(unsigned long addr, unsigned long size,
-		int bankwidth, void (*set_vpp)(struct map_info *, int))
-{
-	physmap_flash_resource.start = addr;
-	physmap_flash_resource.end = addr + size - 1;
-	physmap_flash_data.width = bankwidth;
-	physmap_flash_data.set_vpp = set_vpp;
-}
-
-void physmap_set_partitions(struct mtd_partition *parts, int num_parts)
-{
-	physmap_flash_data.nr_parts = num_parts;
-	physmap_flash_data.parts = parts;
-}
 #endif
 
 static int __init physmap_init(void)
diff --git a/drivers/mtd/maps/physmap_of.c b/drivers/mtd/maps/physmap_of.c
index d251d1d..7d65f9d 100644
--- a/drivers/mtd/maps/physmap_of.c
+++ b/drivers/mtd/maps/physmap_of.c
@@ -34,58 +34,10 @@
 
 struct of_flash {
 	struct mtd_info		*cmtd;
-	struct mtd_partition	*parts;
 	int list_size; /* number of elements in of_flash_list */
 	struct of_flash_list	list[0];
 };
 
-#define OF_FLASH_PARTS(info)	((info)->parts)
-static int parse_obsolete_partitions(struct platform_device *dev,
-				     struct of_flash *info,
-				     struct device_node *dp)
-{
-	int i, plen, nr_parts;
-	const struct {
-		__be32 offset, len;
-	} *part;
-	const char *names;
-
-	part = of_get_property(dp, "partitions", &plen);
-	if (!part)
-		return 0; /* No partitions found */
-
-	dev_warn(&dev->dev, "Device tree uses obsolete partition map binding\n");
-
-	nr_parts = plen / sizeof(part[0]);
-
-	info->parts = kzalloc(nr_parts * sizeof(*info->parts), GFP_KERNEL);
-	if (!info->parts)
-		return -ENOMEM;
-
-	names = of_get_property(dp, "partition-names", &plen);
-
-	for (i = 0; i < nr_parts; i++) {
-		info->parts[i].offset = be32_to_cpu(part->offset);
-		info->parts[i].size   = be32_to_cpu(part->len) & ~1;
-		if (be32_to_cpu(part->len) & 1) /* bit 0 set signifies read only partition */
-			info->parts[i].mask_flags = MTD_WRITEABLE;
-
-		if (names && (plen > 0)) {
-			int len = strlen(names) + 1;
-
-			info->parts[i].name = (char *)names;
-			plen -= len;
-			names += len;
-		} else {
-			info->parts[i].name = "unnamed";
-		}
-
-		part++;
-	}
-
-	return nr_parts;
-}
-
 static int of_flash_remove(struct platform_device *dev)
 {
 	struct of_flash *info;
@@ -101,11 +53,8 @@
 		mtd_concat_destroy(info->cmtd);
 	}
 
-	if (info->cmtd) {
-		if (OF_FLASH_PARTS(info))
-			kfree(OF_FLASH_PARTS(info));
+	if (info->cmtd)
 		mtd_device_unregister(info->cmtd);
-	}
 
 	for (i = 0; i < info->list_size; i++) {
 		if (info->list[i].mtd)
@@ -165,7 +114,8 @@
    specifies the list of partition probers to use. If none is given then the
    default is use. These take precedence over other device tree
    information. */
-static const char *part_probe_types_def[] = { "cmdlinepart", "RedBoot", NULL };
+static const char *part_probe_types_def[] = { "cmdlinepart", "RedBoot",
+					"ofpart", "ofoldpart", NULL };
 static const char ** __devinit of_get_probes(struct device_node *dp)
 {
 	const char *cp;
@@ -218,6 +168,7 @@
 	int reg_tuple_size;
 	struct mtd_info **mtd_list = NULL;
 	resource_size_t res_size;
+	struct mtd_part_parser_data ppdata;
 
 	match = of_match_device(of_flash_match, &dev->dev);
 	if (!match)
@@ -331,29 +282,12 @@
 	if (err)
 		goto err_out;
 
+	ppdata.of_node = dp;
 	part_probe_types = of_get_probes(dp);
-	err = parse_mtd_partitions(info->cmtd, part_probe_types,
-				   &info->parts, 0);
-	if (err < 0) {
-		of_free_probes(part_probe_types);
-		goto err_out;
-	}
+	mtd_device_parse_register(info->cmtd, part_probe_types, &ppdata,
+			NULL, 0);
 	of_free_probes(part_probe_types);
 
-	if (err == 0) {
-		err = of_mtd_parse_partitions(&dev->dev, dp, &info->parts);
-		if (err < 0)
-			goto err_out;
-	}
-
-	if (err == 0) {
-		err = parse_obsolete_partitions(dev, info, dp);
-		if (err < 0)
-			goto err_out;
-	}
-
-	mtd_device_register(info->cmtd, info->parts, err);
-
 	kfree(mtd_list);
 
 	return 0;
diff --git a/drivers/mtd/maps/plat-ram.c b/drivers/mtd/maps/plat-ram.c
index 9ca1ecc..94f5534 100644
--- a/drivers/mtd/maps/plat-ram.c
+++ b/drivers/mtd/maps/plat-ram.c
@@ -44,8 +44,6 @@
 	struct device		*dev;
 	struct mtd_info		*mtd;
 	struct map_info		 map;
-	struct mtd_partition	*partitions;
-	bool			free_partitions;
 	struct resource		*area;
 	struct platdata_mtd_ram	*pdata;
 };
@@ -95,10 +93,6 @@
 
 	if (info->mtd) {
 		mtd_device_unregister(info->mtd);
-		if (info->partitions) {
-			if (info->free_partitions)
-				kfree(info->partitions);
-		}
 		map_destroy(info->mtd);
 	}
 
@@ -228,21 +222,8 @@
 	/* check to see if there are any available partitions, or wether
 	 * to add this device whole */
 
-	if (!pdata->nr_partitions) {
-		/* try to probe using the supplied probe type */
-		if (pdata->probes) {
-			err = parse_mtd_partitions(info->mtd, pdata->probes,
-					   &info->partitions, 0);
-			info->free_partitions = 1;
-			if (err > 0)
-				err = mtd_device_register(info->mtd,
-					info->partitions, err);
-		}
-	}
-	/* use the static mapping */
-	else
-		err = mtd_device_register(info->mtd, pdata->partitions,
-					  pdata->nr_partitions);
+	err = mtd_device_parse_register(info->mtd, pdata->probes, 0,
+			pdata->partitions, pdata->nr_partitions);
 	if (!err)
 		dev_info(&pdev->dev, "registered mtd device\n");
 
diff --git a/drivers/mtd/maps/pxa2xx-flash.c b/drivers/mtd/maps/pxa2xx-flash.c
index 7ae137d..411a17d 100644
--- a/drivers/mtd/maps/pxa2xx-flash.c
+++ b/drivers/mtd/maps/pxa2xx-flash.c
@@ -41,8 +41,6 @@
 }
 
 struct pxa2xx_flash_info {
-	struct mtd_partition	*parts;
-	int			nr_parts;
 	struct mtd_info		*mtd;
 	struct map_info		map;
 };
@@ -55,9 +53,7 @@
 {
 	struct flash_platform_data *flash = pdev->dev.platform_data;
 	struct pxa2xx_flash_info *info;
-	struct mtd_partition *parts;
 	struct resource *res;
-	int ret = 0;
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	if (!res)
@@ -71,8 +67,6 @@
 	info->map.bankwidth = flash->width;
 	info->map.phys = res->start;
 	info->map.size = resource_size(res);
-	info->parts = flash->parts;
-	info->nr_parts = flash->nr_parts;
 
 	info->map.virt = ioremap(info->map.phys, info->map.size);
 	if (!info->map.virt) {
@@ -104,18 +98,7 @@
 	}
 	info->mtd->owner = THIS_MODULE;
 
-	ret = parse_mtd_partitions(info->mtd, probes, &parts, 0);
-
-	if (ret > 0) {
-		info->nr_parts = ret;
-		info->parts = parts;
-	}
-
-	if (!info->nr_parts)
-		printk("Registering %s as whole device\n",
-		       info->map.name);
-
-	mtd_device_register(info->mtd, info->parts, info->nr_parts);
+	mtd_device_parse_register(info->mtd, probes, 0, NULL, 0);
 
 	platform_set_drvdata(pdev, info);
 	return 0;
@@ -133,7 +116,6 @@
 	iounmap(info->map.virt);
 	if (info->map.cached)
 		iounmap(info->map.cached);
-	kfree(info->parts);
 	kfree(info);
 	return 0;
 }
diff --git a/drivers/mtd/maps/rbtx4939-flash.c b/drivers/mtd/maps/rbtx4939-flash.c
index 761fb45..0237f19 100644
--- a/drivers/mtd/maps/rbtx4939-flash.c
+++ b/drivers/mtd/maps/rbtx4939-flash.c
@@ -25,8 +25,6 @@
 struct rbtx4939_flash_info {
 	struct mtd_info *mtd;
 	struct map_info map;
-	int nr_parts;
-	struct mtd_partition *parts;
 };
 
 static int rbtx4939_flash_remove(struct platform_device *dev)
@@ -41,8 +39,6 @@
 	if (info->mtd) {
 		struct rbtx4939_flash_data *pdata = dev->dev.platform_data;
 
-		if (info->nr_parts)
-			kfree(info->parts);
 		mtd_device_unregister(info->mtd);
 		map_destroy(info->mtd);
 	}
@@ -50,7 +46,6 @@
 }
 
 static const char *rom_probe_types[] = { "cfi_probe", "jedec_probe", NULL };
-static const char *part_probe_types[] = { "cmdlinepart", NULL };
 
 static int rbtx4939_flash_probe(struct platform_device *dev)
 {
@@ -107,22 +102,11 @@
 	info->mtd->owner = THIS_MODULE;
 	if (err)
 		goto err_out;
+	err = mtd_device_parse_register(info->mtd, NULL, 0,
+			pdata->parts, pdata->nr_parts);
 
-	err = parse_mtd_partitions(info->mtd, part_probe_types,
-				&info->parts, 0);
-	if (err > 0) {
-		mtd_device_register(info->mtd, info->parts, err);
-		info->nr_parts = err;
-		return 0;
-	}
-
-	if (pdata->nr_parts) {
-		pr_notice("Using rbtx4939 partition information\n");
-		mtd_device_register(info->mtd, pdata->parts, pdata->nr_parts);
-		return 0;
-	}
-
-	mtd_device_register(info->mtd, NULL, 0);
+	if (err)
+		goto err_out;
 	return 0;
 
 err_out:
diff --git a/drivers/mtd/maps/sa1100-flash.c b/drivers/mtd/maps/sa1100-flash.c
index a9b5e0e..fa9c0a9 100644
--- a/drivers/mtd/maps/sa1100-flash.c
+++ b/drivers/mtd/maps/sa1100-flash.c
@@ -131,10 +131,8 @@
 };
 
 struct sa_info {
-	struct mtd_partition	*parts;
 	struct mtd_info		*mtd;
 	int			num_subdev;
-	unsigned int		nr_parts;
 	struct sa_subdev_info	subdev[0];
 };
 
@@ -231,8 +229,6 @@
 			mtd_concat_destroy(info->mtd);
 	}
 
-	kfree(info->parts);
-
 	for (i = info->num_subdev - 1; i >= 0; i--)
 		sa1100_destroy_subdev(&info->subdev[i]);
 	kfree(info);
@@ -341,10 +337,8 @@
 static int __devinit sa1100_mtd_probe(struct platform_device *pdev)
 {
 	struct flash_platform_data *plat = pdev->dev.platform_data;
-	struct mtd_partition *parts;
-	const char *part_type = NULL;
 	struct sa_info *info;
-	int err, nr_parts = 0;
+	int err;
 
 	if (!plat)
 		return -ENODEV;
@@ -358,26 +352,8 @@
 	/*
 	 * Partition selection stuff.
 	 */
-	nr_parts = parse_mtd_partitions(info->mtd, part_probes, &parts, 0);
-	if (nr_parts > 0) {
-		info->parts = parts;
-		part_type = "dynamic";
-	} else {
-		parts = plat->parts;
-		nr_parts = plat->nr_parts;
-		part_type = "static";
-	}
-
-	if (nr_parts == 0)
-		printk(KERN_NOTICE "SA1100 flash: no partition info "
-			"available, registering whole flash\n");
-	else
-		printk(KERN_NOTICE "SA1100 flash: using %s partition "
-			"definition\n", part_type);
-
-	mtd_device_register(info->mtd, parts, nr_parts);
-
-	info->nr_parts = nr_parts;
+	mtd_device_parse_register(info->mtd, part_probes, 0,
+			plat->parts, plat->nr_parts);
 
 	platform_set_drvdata(pdev, info);
 	err = 0;
diff --git a/drivers/mtd/maps/solutionengine.c b/drivers/mtd/maps/solutionengine.c
index cbf6bad..496c407 100644
--- a/drivers/mtd/maps/solutionengine.c
+++ b/drivers/mtd/maps/solutionengine.c
@@ -19,8 +19,6 @@
 static struct mtd_info *flash_mtd;
 static struct mtd_info *eprom_mtd;
 
-static struct mtd_partition *parsed_parts;
-
 struct map_info soleng_eprom_map = {
 	.name = "Solution Engine EPROM",
 	.size = 0x400000,
@@ -51,12 +49,14 @@
 		.size = MTDPART_SIZ_FULL,
 	}
 };
+#define NUM_PARTITIONS ARRAY_SIZE(superh_se_partitions)
+#else
+#define superh_se_partitions NULL
+#define NUM_PARTITIONS 0
 #endif /* CONFIG_MTD_SUPERH_RESERVE */
 
 static int __init init_soleng_maps(void)
 {
-	int nr_parts = 0;
-
 	/* First probe at offset 0 */
 	soleng_flash_map.phys = 0;
 	soleng_flash_map.virt = (void __iomem *)P2SEGADDR(0);
@@ -92,21 +92,8 @@
 		mtd_device_register(eprom_mtd, NULL, 0);
 	}
 
-	nr_parts = parse_mtd_partitions(flash_mtd, probes, &parsed_parts, 0);
-
-#ifdef CONFIG_MTD_SUPERH_RESERVE
-	if (nr_parts <= 0) {
-		printk(KERN_NOTICE "Using configured partition at 0x%08x.\n",
-		       CONFIG_MTD_SUPERH_RESERVE);
-		parsed_parts = superh_se_partitions;
-		nr_parts = sizeof(superh_se_partitions)/sizeof(*parsed_parts);
-	}
-#endif /* CONFIG_MTD_SUPERH_RESERVE */
-
-	if (nr_parts > 0)
-		mtd_device_register(flash_mtd, parsed_parts, nr_parts);
-	else
-		mtd_device_register(flash_mtd, NULL, 0);
+	mtd_device_parse_register(flash_mtd, probes, 0,
+			superh_se_partitions, NUM_PARTITIONS);
 
 	return 0;
 }
@@ -118,10 +105,7 @@
 		map_destroy(eprom_mtd);
 	}
 
-	if (parsed_parts)
-		mtd_device_unregister(flash_mtd);
-	else
-		mtd_device_unregister(flash_mtd);
+	mtd_device_unregister(flash_mtd);
 	map_destroy(flash_mtd);
 }
 
diff --git a/drivers/mtd/maps/wr_sbc82xx_flash.c b/drivers/mtd/maps/wr_sbc82xx_flash.c
index 901ce96..aa7e0cb 100644
--- a/drivers/mtd/maps/wr_sbc82xx_flash.c
+++ b/drivers/mtd/maps/wr_sbc82xx_flash.c
@@ -20,7 +20,6 @@
 #include <asm/immap_cpm2.h>
 
 static struct mtd_info *sbcmtd[3];
-static struct mtd_partition *sbcmtd_parts[3];
 
 struct map_info sbc82xx_flash_map[3] = {
 	{.name = "Boot flash"},
@@ -101,6 +100,7 @@
 	for (i=0; i<3; i++) {
 		int8_t flashcs[3] = { 0, 6, 1 };
 		int nr_parts;
+		struct mtd_partition *defparts;
 
 		printk(KERN_NOTICE "PowerQUICC II %s (%ld MiB on CS%d",
 		       sbc82xx_flash_map[i].name,
@@ -113,7 +113,8 @@
 		}
 		printk(" at %08lx)\n",  sbc82xx_flash_map[i].phys);
 
-		sbc82xx_flash_map[i].virt = ioremap(sbc82xx_flash_map[i].phys, sbc82xx_flash_map[i].size);
+		sbc82xx_flash_map[i].virt = ioremap(sbc82xx_flash_map[i].phys,
+						    sbc82xx_flash_map[i].size);
 
 		if (!sbc82xx_flash_map[i].virt) {
 			printk("Failed to ioremap\n");
@@ -129,24 +130,20 @@
 
 		sbcmtd[i]->owner = THIS_MODULE;
 
-		nr_parts = parse_mtd_partitions(sbcmtd[i], part_probes,
-						&sbcmtd_parts[i], 0);
-		if (nr_parts > 0) {
-			mtd_device_register(sbcmtd[i], sbcmtd_parts[i],
-					    nr_parts);
-			continue;
-		}
-
 		/* No partitioning detected. Use default */
 		if (i == 2) {
-			mtd_device_register(sbcmtd[i], NULL, 0);
+			defparts = NULL;
+			nr_parts = 0;
 		} else if (i == bigflash) {
-			mtd_device_register(sbcmtd[i], bigflash_parts,
-					    ARRAY_SIZE(bigflash_parts));
+			defparts = bigflash_parts;
+			nr_parts = ARRAY_SIZE(bigflash_parts);
 		} else {
-			mtd_device_register(sbcmtd[i], smallflash_parts,
-					    ARRAY_SIZE(smallflash_parts));
+			defparts = smallflash_parts;
+			nr_parts = ARRAY_SIZE(smallflash_parts);
 		}
+
+		mtd_device_parse_register(sbcmtd[i], part_probes, 0,
+					  defparts, nr_parts);
 	}
 	return 0;
 }
@@ -159,12 +156,8 @@
 		if (!sbcmtd[i])
 			continue;
 
-		if (i<2 || sbcmtd_parts[i])
-			mtd_device_unregister(sbcmtd[i]);
-		else
-			mtd_device_unregister(sbcmtd[i]);
+		mtd_device_unregister(sbcmtd[i]);
 
-		kfree(sbcmtd_parts[i]);
 		map_destroy(sbcmtd[i]);
 
 		iounmap((void *)sbc82xx_flash_map[i].virt);
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index ca38569..ed8b5e7 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -426,6 +426,8 @@
 	new->rq->queuedata = new;
 	blk_queue_logical_block_size(new->rq, tr->blksize);
 
+	queue_flag_set_unlocked(QUEUE_FLAG_NONROT, new->rq);
+
 	if (tr->discard) {
 		queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, new->rq);
 		new->rq->limits.max_discard_sectors = UINT_MAX;
diff --git a/drivers/mtd/mtdblock.c b/drivers/mtd/mtdblock.c
index 3326615..7c1dc90 100644
--- a/drivers/mtd/mtdblock.c
+++ b/drivers/mtd/mtdblock.c
@@ -44,7 +44,7 @@
 	enum { STATE_EMPTY, STATE_CLEAN, STATE_DIRTY } cache_state;
 };
 
-static struct mutex mtdblks_lock;
+static DEFINE_MUTEX(mtdblks_lock);
 
 /*
  * Cache stuff...
@@ -119,7 +119,7 @@
 	if (mtdblk->cache_state != STATE_DIRTY)
 		return 0;
 
-	DEBUG(MTD_DEBUG_LEVEL2, "mtdblock: writing cached data for \"%s\" "
+	pr_debug("mtdblock: writing cached data for \"%s\" "
 			"at 0x%lx, size 0x%x\n", mtd->name,
 			mtdblk->cache_offset, mtdblk->cache_size);
 
@@ -148,7 +148,7 @@
 	size_t retlen;
 	int ret;
 
-	DEBUG(MTD_DEBUG_LEVEL2, "mtdblock: write on \"%s\" at 0x%lx, size 0x%x\n",
+	pr_debug("mtdblock: write on \"%s\" at 0x%lx, size 0x%x\n",
 		mtd->name, pos, len);
 
 	if (!sect_size)
@@ -218,7 +218,7 @@
 	size_t retlen;
 	int ret;
 
-	DEBUG(MTD_DEBUG_LEVEL2, "mtdblock: read on \"%s\" at 0x%lx, size 0x%x\n",
+	pr_debug("mtdblock: read on \"%s\" at 0x%lx, size 0x%x\n",
 			mtd->name, pos, len);
 
 	if (!sect_size)
@@ -283,7 +283,7 @@
 {
 	struct mtdblk_dev *mtdblk = container_of(mbd, struct mtdblk_dev, mbd);
 
-	DEBUG(MTD_DEBUG_LEVEL1,"mtdblock_open\n");
+	pr_debug("mtdblock_open\n");
 
 	mutex_lock(&mtdblks_lock);
 	if (mtdblk->count) {
@@ -303,7 +303,7 @@
 
 	mutex_unlock(&mtdblks_lock);
 
-	DEBUG(MTD_DEBUG_LEVEL1, "ok\n");
+	pr_debug("ok\n");
 
 	return 0;
 }
@@ -312,7 +312,7 @@
 {
 	struct mtdblk_dev *mtdblk = container_of(mbd, struct mtdblk_dev, mbd);
 
-   	DEBUG(MTD_DEBUG_LEVEL1, "mtdblock_release\n");
+	pr_debug("mtdblock_release\n");
 
 	mutex_lock(&mtdblks_lock);
 
@@ -329,7 +329,7 @@
 
 	mutex_unlock(&mtdblks_lock);
 
-	DEBUG(MTD_DEBUG_LEVEL1, "ok\n");
+	pr_debug("ok\n");
 
 	return 0;
 }
@@ -389,8 +389,6 @@
 
 static int __init init_mtdblock(void)
 {
-	mutex_init(&mtdblks_lock);
-
 	return register_mtd_blktrans(&mtdblock_tr);
 }
 
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index 61086ea..e7dc732 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -43,7 +43,7 @@
 
 /*
  * Data structure to hold the pointer to the mtd device as well
- * as mode information ofr various use cases.
+ * as mode information of various use cases.
  */
 struct mtd_file_info {
 	struct mtd_info *mtd;
@@ -86,7 +86,7 @@
 	struct mtd_file_info *mfi;
 	struct inode *mtd_ino;
 
-	DEBUG(MTD_DEBUG_LEVEL0, "MTD_open\n");
+	pr_debug("MTD_open\n");
 
 	/* You can't open the RO devices RW */
 	if ((file->f_mode & FMODE_WRITE) && (minor & 1))
@@ -151,7 +151,7 @@
 	struct mtd_file_info *mfi = file->private_data;
 	struct mtd_info *mtd = mfi->mtd;
 
-	DEBUG(MTD_DEBUG_LEVEL0, "MTD_close\n");
+	pr_debug("MTD_close\n");
 
 	/* Only sync if opened RW */
 	if ((file->f_mode & FMODE_WRITE) && mtd->sync)
@@ -195,7 +195,7 @@
 	size_t size = count;
 	char *kbuf;
 
-	DEBUG(MTD_DEBUG_LEVEL0,"MTD_read\n");
+	pr_debug("MTD_read\n");
 
 	if (*ppos + count > mtd->size)
 		count = mtd->size - *ppos;
@@ -211,17 +211,17 @@
 		len = min_t(size_t, count, size);
 
 		switch (mfi->mode) {
-		case MTD_MODE_OTP_FACTORY:
+		case MTD_FILE_MODE_OTP_FACTORY:
 			ret = mtd->read_fact_prot_reg(mtd, *ppos, len, &retlen, kbuf);
 			break;
-		case MTD_MODE_OTP_USER:
+		case MTD_FILE_MODE_OTP_USER:
 			ret = mtd->read_user_prot_reg(mtd, *ppos, len, &retlen, kbuf);
 			break;
-		case MTD_MODE_RAW:
+		case MTD_FILE_MODE_RAW:
 		{
 			struct mtd_oob_ops ops;
 
-			ops.mode = MTD_OOB_RAW;
+			ops.mode = MTD_OPS_RAW;
 			ops.datbuf = kbuf;
 			ops.oobbuf = NULL;
 			ops.len = len;
@@ -233,16 +233,16 @@
 		default:
 			ret = mtd->read(mtd, *ppos, len, &retlen, kbuf);
 		}
-		/* Nand returns -EBADMSG on ecc errors, but it returns
+		/* Nand returns -EBADMSG on ECC errors, but it returns
 		 * the data. For our userspace tools it is important
-		 * to dump areas with ecc errors !
+		 * to dump areas with ECC errors!
 		 * For kernel internal usage it also might return -EUCLEAN
 		 * to signal the caller that a bitflip has occurred and has
 		 * been corrected by the ECC algorithm.
 		 * Userspace software which accesses NAND this way
 		 * must be aware of the fact that it deals with NAND
 		 */
-		if (!ret || (ret == -EUCLEAN) || (ret == -EBADMSG)) {
+		if (!ret || mtd_is_bitflip_or_eccerr(ret)) {
 			*ppos += retlen;
 			if (copy_to_user(buf, kbuf, retlen)) {
 				kfree(kbuf);
@@ -278,7 +278,7 @@
 	int ret=0;
 	int len;
 
-	DEBUG(MTD_DEBUG_LEVEL0,"MTD_write\n");
+	pr_debug("MTD_write\n");
 
 	if (*ppos == mtd->size)
 		return -ENOSPC;
@@ -302,10 +302,10 @@
 		}
 
 		switch (mfi->mode) {
-		case MTD_MODE_OTP_FACTORY:
+		case MTD_FILE_MODE_OTP_FACTORY:
 			ret = -EROFS;
 			break;
-		case MTD_MODE_OTP_USER:
+		case MTD_FILE_MODE_OTP_USER:
 			if (!mtd->write_user_prot_reg) {
 				ret = -EOPNOTSUPP;
 				break;
@@ -313,13 +313,14 @@
 			ret = mtd->write_user_prot_reg(mtd, *ppos, len, &retlen, kbuf);
 			break;
 
-		case MTD_MODE_RAW:
+		case MTD_FILE_MODE_RAW:
 		{
 			struct mtd_oob_ops ops;
 
-			ops.mode = MTD_OOB_RAW;
+			ops.mode = MTD_OPS_RAW;
 			ops.datbuf = kbuf;
 			ops.oobbuf = NULL;
+			ops.ooboffs = 0;
 			ops.len = len;
 
 			ret = mtd->write_oob(mtd, *ppos, &ops);
@@ -367,13 +368,13 @@
 		if (!mtd->read_fact_prot_reg)
 			ret = -EOPNOTSUPP;
 		else
-			mfi->mode = MTD_MODE_OTP_FACTORY;
+			mfi->mode = MTD_FILE_MODE_OTP_FACTORY;
 		break;
 	case MTD_OTP_USER:
 		if (!mtd->read_fact_prot_reg)
 			ret = -EOPNOTSUPP;
 		else
-			mfi->mode = MTD_MODE_OTP_USER;
+			mfi->mode = MTD_FILE_MODE_OTP_USER;
 		break;
 	default:
 		ret = -EINVAL;
@@ -390,6 +391,7 @@
 	uint64_t start, uint32_t length, void __user *ptr,
 	uint32_t __user *retp)
 {
+	struct mtd_file_info *mfi = file->private_data;
 	struct mtd_oob_ops ops;
 	uint32_t retlen;
 	int ret = 0;
@@ -409,9 +411,10 @@
 		return ret;
 
 	ops.ooblen = length;
-	ops.ooboffs = start & (mtd->oobsize - 1);
+	ops.ooboffs = start & (mtd->writesize - 1);
 	ops.datbuf = NULL;
-	ops.mode = MTD_OOB_PLACE;
+	ops.mode = (mfi->mode == MTD_FILE_MODE_RAW) ? MTD_OPS_RAW :
+		MTD_OPS_PLACE_OOB;
 
 	if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs))
 		return -EINVAL;
@@ -420,7 +423,7 @@
 	if (IS_ERR(ops.oobbuf))
 		return PTR_ERR(ops.oobbuf);
 
-	start &= ~((uint64_t)mtd->oobsize - 1);
+	start &= ~((uint64_t)mtd->writesize - 1);
 	ret = mtd->write_oob(mtd, start, &ops);
 
 	if (ops.oobretlen > 0xFFFFFFFFU)
@@ -433,9 +436,11 @@
 	return ret;
 }
 
-static int mtd_do_readoob(struct mtd_info *mtd, uint64_t start,
-	uint32_t length, void __user *ptr, uint32_t __user *retp)
+static int mtd_do_readoob(struct file *file, struct mtd_info *mtd,
+	uint64_t start, uint32_t length, void __user *ptr,
+	uint32_t __user *retp)
 {
+	struct mtd_file_info *mfi = file->private_data;
 	struct mtd_oob_ops ops;
 	int ret = 0;
 
@@ -451,9 +456,10 @@
 		return ret;
 
 	ops.ooblen = length;
-	ops.ooboffs = start & (mtd->oobsize - 1);
+	ops.ooboffs = start & (mtd->writesize - 1);
 	ops.datbuf = NULL;
-	ops.mode = MTD_OOB_PLACE;
+	ops.mode = (mfi->mode == MTD_FILE_MODE_RAW) ? MTD_OPS_RAW :
+		MTD_OPS_PLACE_OOB;
 
 	if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs))
 		return -EINVAL;
@@ -462,7 +468,7 @@
 	if (!ops.oobbuf)
 		return -ENOMEM;
 
-	start &= ~((uint64_t)mtd->oobsize - 1);
+	start &= ~((uint64_t)mtd->writesize - 1);
 	ret = mtd->read_oob(mtd, start, &ops);
 
 	if (put_user(ops.oobretlen, retp))
@@ -472,13 +478,29 @@
 		ret = -EFAULT;
 
 	kfree(ops.oobbuf);
+
+	/*
+	 * NAND returns -EBADMSG on ECC errors, but it returns the OOB
+	 * data. For our userspace tools it is important to dump areas
+	 * with ECC errors!
+	 * For kernel internal usage it also might return -EUCLEAN
+	 * to signal the caller that a bitflip has occured and has
+	 * been corrected by the ECC algorithm.
+	 *
+	 * Note: currently the standard NAND function, nand_read_oob_std,
+	 * does not calculate ECC for the OOB area, so do not rely on
+	 * this behavior unless you have replaced it with your own.
+	 */
+	if (mtd_is_bitflip_or_eccerr(ret))
+		return 0;
+
 	return ret;
 }
 
 /*
  * Copies (and truncates, if necessary) data from the larger struct,
  * nand_ecclayout, to the smaller, deprecated layout struct,
- * nand_ecclayout_user. This is necessary only to suppport the deprecated
+ * nand_ecclayout_user. This is necessary only to support the deprecated
  * API ioctl ECCGETLAYOUT while allowing all new functionality to use
  * nand_ecclayout flexibly (i.e. the struct may change size in new
  * releases without requiring major rewrites).
@@ -544,6 +566,55 @@
 	}
 }
 
+static int mtd_write_ioctl(struct mtd_info *mtd,
+		struct mtd_write_req __user *argp)
+{
+	struct mtd_write_req req;
+	struct mtd_oob_ops ops;
+	void __user *usr_data, *usr_oob;
+	int ret;
+
+	if (copy_from_user(&req, argp, sizeof(req)) ||
+			!access_ok(VERIFY_READ, req.usr_data, req.len) ||
+			!access_ok(VERIFY_READ, req.usr_oob, req.ooblen))
+		return -EFAULT;
+	if (!mtd->write_oob)
+		return -EOPNOTSUPP;
+
+	ops.mode = req.mode;
+	ops.len = (size_t)req.len;
+	ops.ooblen = (size_t)req.ooblen;
+	ops.ooboffs = 0;
+
+	usr_data = (void __user *)(uintptr_t)req.usr_data;
+	usr_oob = (void __user *)(uintptr_t)req.usr_oob;
+
+	if (req.usr_data) {
+		ops.datbuf = memdup_user(usr_data, ops.len);
+		if (IS_ERR(ops.datbuf))
+			return PTR_ERR(ops.datbuf);
+	} else {
+		ops.datbuf = NULL;
+	}
+
+	if (req.usr_oob) {
+		ops.oobbuf = memdup_user(usr_oob, ops.ooblen);
+		if (IS_ERR(ops.oobbuf)) {
+			kfree(ops.datbuf);
+			return PTR_ERR(ops.oobbuf);
+		}
+	} else {
+		ops.oobbuf = NULL;
+	}
+
+	ret = mtd->write_oob(mtd, (loff_t)req.start, &ops);
+
+	kfree(ops.datbuf);
+	kfree(ops.oobbuf);
+
+	return ret;
+}
+
 static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
 {
 	struct mtd_file_info *mfi = file->private_data;
@@ -553,7 +624,7 @@
 	u_long size;
 	struct mtd_info_user info;
 
-	DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n");
+	pr_debug("MTD_ioctl\n");
 
 	size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
 	if (cmd & IOC_IN) {
@@ -601,8 +672,8 @@
 		info.erasesize	= mtd->erasesize;
 		info.writesize	= mtd->writesize;
 		info.oobsize	= mtd->oobsize;
-		/* The below fields are obsolete */
-		info.ecctype	= -1;
+		/* The below field is obsolete */
+		info.padding	= 0;
 		if (copy_to_user(argp, &info, sizeof(struct mtd_info_user)))
 			return -EFAULT;
 		break;
@@ -698,7 +769,7 @@
 		if (copy_from_user(&buf, argp, sizeof(buf)))
 			ret = -EFAULT;
 		else
-			ret = mtd_do_readoob(mtd, buf.start, buf.length,
+			ret = mtd_do_readoob(file, mtd, buf.start, buf.length,
 				buf.ptr, &buf_user->start);
 		break;
 	}
@@ -725,12 +796,19 @@
 		if (copy_from_user(&buf, argp, sizeof(buf)))
 			ret = -EFAULT;
 		else
-			ret = mtd_do_readoob(mtd, buf.start, buf.length,
+			ret = mtd_do_readoob(file, mtd, buf.start, buf.length,
 				(void __user *)(uintptr_t)buf.usr_ptr,
 				&buf_user->length);
 		break;
 	}
 
+	case MEMWRITE:
+	{
+		ret = mtd_write_ioctl(mtd,
+		      (struct mtd_write_req __user *)arg);
+		break;
+	}
+
 	case MEMLOCK:
 	{
 		struct erase_info_user einfo;
@@ -827,7 +905,7 @@
 		if (copy_from_user(&mode, argp, sizeof(int)))
 			return -EFAULT;
 
-		mfi->mode = MTD_MODE_NORMAL;
+		mfi->mode = MTD_FILE_MODE_NORMAL;
 
 		ret = otp_select_filemode(mfi, mode);
 
@@ -843,11 +921,11 @@
 			return -ENOMEM;
 		ret = -EOPNOTSUPP;
 		switch (mfi->mode) {
-		case MTD_MODE_OTP_FACTORY:
+		case MTD_FILE_MODE_OTP_FACTORY:
 			if (mtd->get_fact_prot_info)
 				ret = mtd->get_fact_prot_info(mtd, buf, 4096);
 			break;
-		case MTD_MODE_OTP_USER:
+		case MTD_FILE_MODE_OTP_USER:
 			if (mtd->get_user_prot_info)
 				ret = mtd->get_user_prot_info(mtd, buf, 4096);
 			break;
@@ -871,7 +949,7 @@
 	{
 		struct otp_info oinfo;
 
-		if (mfi->mode != MTD_MODE_OTP_USER)
+		if (mfi->mode != MTD_FILE_MODE_OTP_USER)
 			return -EINVAL;
 		if (copy_from_user(&oinfo, argp, sizeof(oinfo)))
 			return -EFAULT;
@@ -882,7 +960,7 @@
 	}
 #endif
 
-	/* This ioctl is being deprecated - it truncates the ecc layout */
+	/* This ioctl is being deprecated - it truncates the ECC layout */
 	case ECCGETLAYOUT:
 	{
 		struct nand_ecclayout_user *usrlay;
@@ -915,17 +993,17 @@
 		mfi->mode = 0;
 
 		switch(arg) {
-		case MTD_MODE_OTP_FACTORY:
-		case MTD_MODE_OTP_USER:
+		case MTD_FILE_MODE_OTP_FACTORY:
+		case MTD_FILE_MODE_OTP_USER:
 			ret = otp_select_filemode(mfi, arg);
 			break;
 
-		case MTD_MODE_RAW:
+		case MTD_FILE_MODE_RAW:
 			if (!mtd->read_oob || !mtd->write_oob)
 				return -EOPNOTSUPP;
 			mfi->mode = arg;
 
-		case MTD_MODE_NORMAL:
+		case MTD_FILE_MODE_NORMAL:
 			break;
 		default:
 			ret = -EINVAL;
@@ -1011,7 +1089,7 @@
 		if (copy_from_user(&buf, argp, sizeof(buf)))
 			ret = -EFAULT;
 		else
-			ret = mtd_do_readoob(mtd, buf.start,
+			ret = mtd_do_readoob(file, mtd, buf.start,
 				buf.length, compat_ptr(buf.ptr),
 				&buf_user->start);
 		break;
diff --git a/drivers/mtd/mtdconcat.c b/drivers/mtd/mtdconcat.c
index e601672..6df4d4d 100644
--- a/drivers/mtd/mtdconcat.c
+++ b/drivers/mtd/mtdconcat.c
@@ -95,10 +95,10 @@
 
 		/* Save information about bitflips! */
 		if (unlikely(err)) {
-			if (err == -EBADMSG) {
+			if (mtd_is_eccerr(err)) {
 				mtd->ecc_stats.failed++;
 				ret = err;
-			} else if (err == -EUCLEAN) {
+			} else if (mtd_is_bitflip(err)) {
 				mtd->ecc_stats.corrected++;
 				/* Do not overwrite -EBADMSG !! */
 				if (!ret)
@@ -279,10 +279,10 @@
 
 		/* Save information about bitflips! */
 		if (unlikely(err)) {
-			if (err == -EBADMSG) {
+			if (mtd_is_eccerr(err)) {
 				mtd->ecc_stats.failed++;
 				ret = err;
-			} else if (err == -EUCLEAN) {
+			} else if (mtd_is_bitflip(err)) {
 				mtd->ecc_stats.corrected++;
 				/* Do not overwrite -EBADMSG !! */
 				if (!ret)
@@ -770,7 +770,7 @@
 
 	/*
 	 * Set up the new "super" device's MTD object structure, check for
-	 * incompatibilites between the subdevices.
+	 * incompatibilities between the subdevices.
 	 */
 	concat->mtd.type = subdev[0]->type;
 	concat->mtd.flags = subdev[0]->flags;
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index c510aff..b01993e 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -362,7 +362,7 @@
 			      MTD_DEVT(i) + 1,
 			      NULL, "mtd%dro", i);
 
-	DEBUG(0, "mtd: Giving out device %d to %s\n", i, mtd->name);
+	pr_debug("mtd: Giving out device %d to %s\n", i, mtd->name);
 	/* No need to get a refcount on the module containing
 	   the notifier, since we hold the mtd_table_mutex */
 	list_for_each_entry(not, &mtd_notifiers, list)
@@ -429,27 +429,63 @@
 }
 
 /**
- * mtd_device_register - register an MTD device.
+ * mtd_device_parse_register - parse partitions and register an MTD device.
  *
- * @master: the MTD device to register
- * @parts: the partitions to register - only valid if nr_parts > 0
- * @nr_parts: the number of partitions in parts.  If zero then the full MTD
- *            device is registered
+ * @mtd: the MTD device to register
+ * @types: the list of MTD partition probes to try, see
+ *         'parse_mtd_partitions()' for more information
+ * @parser_data: MTD partition parser-specific data
+ * @parts: fallback partition information to register, if parsing fails;
+ *         only valid if %nr_parts > %0
+ * @nr_parts: the number of partitions in parts, if zero then the full
+ *            MTD device is registered if no partition info is found
  *
- * Register an MTD device with the system and optionally, a number of
- * partitions.  If nr_parts is 0 then the whole device is registered, otherwise
- * only the partitions are registered.  To register both the full device *and*
- * the partitions, call mtd_device_register() twice, once with nr_parts == 0
- * and once equal to the number of partitions.
+ * This function aggregates MTD partitions parsing (done by
+ * 'parse_mtd_partitions()') and MTD device and partitions registering. It
+ * basically follows the most common pattern found in many MTD drivers:
+ *
+ * * It first tries to probe partitions on MTD device @mtd using parsers
+ *   specified in @types (if @types is %NULL, then the default list of parsers
+ *   is used, see 'parse_mtd_partitions()' for more information). If none are
+ *   found this functions tries to fallback to information specified in
+ *   @parts/@nr_parts.
+ * * If any partitioning info was found, this function registers the found
+ *   partitions.
+ * * If no partitions were found this function just registers the MTD device
+ *   @mtd and exits.
+ *
+ * Returns zero in case of success and a negative error code in case of failure.
  */
-int mtd_device_register(struct mtd_info *master,
-			const struct mtd_partition *parts,
-			int nr_parts)
+int mtd_device_parse_register(struct mtd_info *mtd, const char **types,
+			      struct mtd_part_parser_data *parser_data,
+			      const struct mtd_partition *parts,
+			      int nr_parts)
 {
-	return parts ? add_mtd_partitions(master, parts, nr_parts) :
-		add_mtd_device(master);
+	int err;
+	struct mtd_partition *real_parts;
+
+	err = parse_mtd_partitions(mtd, types, &real_parts, parser_data);
+	if (err <= 0 && nr_parts && parts) {
+		real_parts = kmemdup(parts, sizeof(*parts) * nr_parts,
+				     GFP_KERNEL);
+		if (!real_parts)
+			err = -ENOMEM;
+		else
+			err = nr_parts;
+	}
+
+	if (err > 0) {
+		err = add_mtd_partitions(mtd, real_parts, err);
+		kfree(real_parts);
+	} else if (err == 0) {
+		err = add_mtd_device(mtd);
+		if (err == 1)
+			err = -ENODEV;
+	}
+
+	return err;
 }
-EXPORT_SYMBOL_GPL(mtd_device_register);
+EXPORT_SYMBOL_GPL(mtd_device_parse_register);
 
 /**
  * mtd_device_unregister - unregister an existing MTD device.
diff --git a/drivers/mtd/mtdcore.h b/drivers/mtd/mtdcore.h
index 0ed6126..961a384 100644
--- a/drivers/mtd/mtdcore.h
+++ b/drivers/mtd/mtdcore.h
@@ -15,6 +15,9 @@
 extern int add_mtd_partitions(struct mtd_info *, const struct mtd_partition *,
 			      int);
 extern int del_mtd_partitions(struct mtd_info *);
+extern int parse_mtd_partitions(struct mtd_info *master, const char **types,
+				struct mtd_partition **pparts,
+				struct mtd_part_parser_data *data);
 
 #define mtd_for_each_device(mtd)			\
 	for ((mtd) = __mtd_next_device(0);		\
diff --git a/drivers/mtd/mtdoops.c b/drivers/mtd/mtdoops.c
index e3e40f4..1e2fa62 100644
--- a/drivers/mtd/mtdoops.c
+++ b/drivers/mtd/mtdoops.c
@@ -258,7 +258,7 @@
 		ret = mtd->read(mtd, page * record_size, MTDOOPS_HEADER_SIZE,
 				&retlen, (u_char *) &count[0]);
 		if (retlen != MTDOOPS_HEADER_SIZE ||
-				(ret < 0 && ret != -EUCLEAN)) {
+				(ret < 0 && !mtd_is_bitflip(ret))) {
 			printk(KERN_ERR "mtdoops: read failure at %ld (%td of %d read), err %d\n",
 			       page * record_size, retlen,
 			       MTDOOPS_HEADER_SIZE, ret);
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index 630be3e..a0bd2de 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -73,9 +73,9 @@
 	res = part->master->read(part->master, from + part->offset,
 				   len, retlen, buf);
 	if (unlikely(res)) {
-		if (res == -EUCLEAN)
+		if (mtd_is_bitflip(res))
 			mtd->ecc_stats.corrected += part->master->ecc_stats.corrected - stats.corrected;
-		if (res == -EBADMSG)
+		if (mtd_is_eccerr(res))
 			mtd->ecc_stats.failed += part->master->ecc_stats.failed - stats.failed;
 	}
 	return res;
@@ -130,7 +130,7 @@
 	if (ops->oobbuf) {
 		size_t len, pages;
 
-		if (ops->mode == MTD_OOB_AUTO)
+		if (ops->mode == MTD_OPS_AUTO_OOB)
 			len = mtd->oobavail;
 		else
 			len = mtd->oobsize;
@@ -142,9 +142,9 @@
 
 	res = part->master->read_oob(part->master, from + part->offset, ops);
 	if (unlikely(res)) {
-		if (res == -EUCLEAN)
+		if (mtd_is_bitflip(res))
 			mtd->ecc_stats.corrected++;
-		if (res == -EBADMSG)
+		if (mtd_is_eccerr(res))
 			mtd->ecc_stats.failed++;
 	}
 	return res;
@@ -479,6 +479,19 @@
 			       (unsigned long long)cur_offset, (unsigned long long)slave->offset);
 		}
 	}
+	if (slave->offset == MTDPART_OFS_RETAIN) {
+		slave->offset = cur_offset;
+		if (master->size - slave->offset >= slave->mtd.size) {
+			slave->mtd.size = master->size - slave->offset
+							- slave->mtd.size;
+		} else {
+			printk(KERN_ERR "mtd partition \"%s\" doesn't have enough space: %#llx < %#llx, disabled\n",
+				part->name, master->size - slave->offset,
+				slave->mtd.size);
+			/* register to preserve ordering */
+			goto out_register;
+		}
+	}
 	if (slave->mtd.size == MTDPART_SIZ_FULL)
 		slave->mtd.size = master->size - slave->offset;
 
@@ -693,6 +706,8 @@
 	return ret;
 }
 
+#define put_partition_parser(p) do { module_put((p)->owner); } while (0)
+
 int register_mtd_parser(struct mtd_part_parser *p)
 {
 	spin_lock(&part_parser_lock);
@@ -712,19 +727,51 @@
 }
 EXPORT_SYMBOL_GPL(deregister_mtd_parser);
 
+/*
+ * Do not forget to update 'parse_mtd_partitions()' kerneldoc comment if you
+ * are changing this array!
+ */
+static const char *default_mtd_part_types[] = {
+	"cmdlinepart",
+	"ofpart",
+	NULL
+};
+
+/**
+ * parse_mtd_partitions - parse MTD partitions
+ * @master: the master partition (describes whole MTD device)
+ * @types: names of partition parsers to try or %NULL
+ * @pparts: array of partitions found is returned here
+ * @data: MTD partition parser-specific data
+ *
+ * This function tries to find partition on MTD device @master. It uses MTD
+ * partition parsers, specified in @types. However, if @types is %NULL, then
+ * the default list of parsers is used. The default list contains only the
+ * "cmdlinepart" and "ofpart" parsers ATM.
+ *
+ * This function may return:
+ * o a negative error code in case of failure
+ * o zero if no partitions were found
+ * o a positive number of found partitions, in which case on exit @pparts will
+ *   point to an array containing this number of &struct mtd_info objects.
+ */
 int parse_mtd_partitions(struct mtd_info *master, const char **types,
-			 struct mtd_partition **pparts, unsigned long origin)
+			 struct mtd_partition **pparts,
+			 struct mtd_part_parser_data *data)
 {
 	struct mtd_part_parser *parser;
 	int ret = 0;
 
+	if (!types)
+		types = default_mtd_part_types;
+
 	for ( ; ret <= 0 && *types; types++) {
 		parser = get_partition_parser(*types);
 		if (!parser && !request_module("%s", *types))
 				parser = get_partition_parser(*types);
 		if (!parser)
 			continue;
-		ret = (*parser->parse_fn)(master, pparts, origin);
+		ret = (*parser->parse_fn)(master, pparts, data);
 		if (ret > 0) {
 			printk(KERN_NOTICE "%d %s partitions found on MTD device %s\n",
 			       ret, parser->name, master->name);
@@ -733,7 +780,6 @@
 	}
 	return ret;
 }
-EXPORT_SYMBOL_GPL(parse_mtd_partitions);
 
 int mtd_is_partition(struct mtd_info *mtd)
 {
diff --git a/drivers/mtd/mtdsuper.c b/drivers/mtd/mtdsuper.c
index 89f8e66..a90bfe7 100644
--- a/drivers/mtd/mtdsuper.c
+++ b/drivers/mtd/mtdsuper.c
@@ -27,12 +27,12 @@
 	struct mtd_info *mtd = _mtd;
 
 	if (sb->s_mtd == mtd) {
-		DEBUG(2, "MTDSB: Match on device %d (\"%s\")\n",
+		pr_debug("MTDSB: Match on device %d (\"%s\")\n",
 		      mtd->index, mtd->name);
 		return 1;
 	}
 
-	DEBUG(2, "MTDSB: No match, device %d (\"%s\"), device %d (\"%s\")\n",
+	pr_debug("MTDSB: No match, device %d (\"%s\"), device %d (\"%s\")\n",
 	      sb->s_mtd->index, sb->s_mtd->name, mtd->index, mtd->name);
 	return 0;
 }
@@ -71,7 +71,7 @@
 		goto already_mounted;
 
 	/* fresh new superblock */
-	DEBUG(1, "MTDSB: New superblock for device %d (\"%s\")\n",
+	pr_debug("MTDSB: New superblock for device %d (\"%s\")\n",
 	      mtd->index, mtd->name);
 
 	sb->s_flags = flags;
@@ -88,7 +88,7 @@
 
 	/* new mountpoint for an already mounted superblock */
 already_mounted:
-	DEBUG(1, "MTDSB: Device %d (\"%s\") is already mounted\n",
+	pr_debug("MTDSB: Device %d (\"%s\") is already mounted\n",
 	      mtd->index, mtd->name);
 	put_mtd_device(mtd);
 	return dget(sb->s_root);
@@ -109,7 +109,7 @@
 
 	mtd = get_mtd_device(NULL, mtdnr);
 	if (IS_ERR(mtd)) {
-		DEBUG(0, "MTDSB: Device #%u doesn't appear to exist\n", mtdnr);
+		pr_debug("MTDSB: Device #%u doesn't appear to exist\n", mtdnr);
 		return ERR_CAST(mtd);
 	}
 
@@ -132,7 +132,7 @@
 	if (!dev_name)
 		return ERR_PTR(-EINVAL);
 
-	DEBUG(2, "MTDSB: dev_name \"%s\"\n", dev_name);
+	pr_debug("MTDSB: dev_name \"%s\"\n", dev_name);
 
 	/* the preferred way of mounting in future; especially when
 	 * CONFIG_BLOCK=n - we specify the underlying MTD device by number or
@@ -143,7 +143,7 @@
 			struct mtd_info *mtd;
 
 			/* mount by MTD device name */
-			DEBUG(1, "MTDSB: mtd:%%s, name \"%s\"\n",
+			pr_debug("MTDSB: mtd:%%s, name \"%s\"\n",
 			      dev_name + 4);
 
 			mtd = get_mtd_device_nm(dev_name + 4);
@@ -164,7 +164,7 @@
 			mtdnr = simple_strtoul(dev_name + 3, &endptr, 0);
 			if (!*endptr) {
 				/* It was a valid number */
-				DEBUG(1, "MTDSB: mtd%%d, mtdnr %d\n",
+				pr_debug("MTDSB: mtd%%d, mtdnr %d\n",
 				      mtdnr);
 				return mount_mtd_nr(fs_type, flags,
 						     dev_name, data,
@@ -180,10 +180,10 @@
 	bdev = lookup_bdev(dev_name);
 	if (IS_ERR(bdev)) {
 		ret = PTR_ERR(bdev);
-		DEBUG(1, "MTDSB: lookup_bdev() returned %d\n", ret);
+		pr_debug("MTDSB: lookup_bdev() returned %d\n", ret);
 		return ERR_PTR(ret);
 	}
-	DEBUG(1, "MTDSB: lookup_bdev() returned 0\n");
+	pr_debug("MTDSB: lookup_bdev() returned 0\n");
 
 	ret = -EINVAL;
 
diff --git a/drivers/mtd/mtdswap.c b/drivers/mtd/mtdswap.c
index fd78853..bd9590c 100644
--- a/drivers/mtd/mtdswap.c
+++ b/drivers/mtd/mtdswap.c
@@ -86,7 +86,7 @@
 	unsigned int flags;
 	unsigned int active_count;
 	unsigned int erase_count;
-	unsigned int pad;		/* speeds up pointer decremtnt */
+	unsigned int pad;		/* speeds up pointer decrement */
 };
 
 #define MTDSWAP_ECNT_MIN(rbroot) (rb_entry(rb_first(rbroot), struct swap_eb, \
@@ -314,7 +314,7 @@
 {
 	int ret = d->mtd->read_oob(d->mtd, from, ops);
 
-	if (ret == -EUCLEAN)
+	if (mtd_is_bitflip(ret))
 		return ret;
 
 	if (ret) {
@@ -350,11 +350,11 @@
 	ops.oobbuf = d->oob_buf;
 	ops.ooboffs = 0;
 	ops.datbuf = NULL;
-	ops.mode = MTD_OOB_AUTO;
+	ops.mode = MTD_OPS_AUTO_OOB;
 
 	ret = mtdswap_read_oob(d, offset, &ops);
 
-	if (ret && ret != -EUCLEAN)
+	if (ret && !mtd_is_bitflip(ret))
 		return ret;
 
 	data = (struct mtdswap_oobdata *)d->oob_buf;
@@ -363,7 +363,7 @@
 
 	if (le16_to_cpu(data->magic) == MTDSWAP_MAGIC_CLEAN) {
 		eb->erase_count = le32_to_cpu(data->count);
-		if (ret == -EUCLEAN)
+		if (mtd_is_bitflip(ret))
 			ret = MTDSWAP_SCANNED_BITFLIP;
 		else {
 			if (le16_to_cpu(data2->magic) == MTDSWAP_MAGIC_DIRTY)
@@ -389,7 +389,7 @@
 
 	ops.ooboffs = 0;
 	ops.oobbuf = (uint8_t *)&n;
-	ops.mode = MTD_OOB_AUTO;
+	ops.mode = MTD_OPS_AUTO_OOB;
 	ops.datbuf = NULL;
 
 	if (marker == MTDSWAP_TYPE_CLEAN) {
@@ -408,7 +408,7 @@
 	if (ret) {
 		dev_warn(d->dev, "Write OOB failed for block at %08llx "
 			"error %d\n", offset, ret);
-		if (ret == -EIO || ret == -EBADMSG)
+		if (ret == -EIO || mtd_is_eccerr(ret))
 			mtdswap_handle_write_error(d, eb);
 		return ret;
 	}
@@ -628,7 +628,7 @@
 			TREE_COUNT(d, CLEAN)--;
 
 			ret = mtdswap_write_marker(d, eb, MTDSWAP_TYPE_DIRTY);
-		} while (ret == -EIO || ret == -EBADMSG);
+		} while (ret == -EIO || mtd_is_eccerr(ret));
 
 		if (ret)
 			return ret;
@@ -678,7 +678,7 @@
 	ret = mtdswap_map_free_block(d, page, bp);
 	eb = d->eb_data + (*bp / d->pages_per_eblk);
 
-	if (ret == -EIO || ret == -EBADMSG) {
+	if (ret == -EIO || mtd_is_eccerr(ret)) {
 		d->curr_write = NULL;
 		eb->active_count--;
 		d->revmap[*bp] = PAGE_UNDEF;
@@ -690,7 +690,7 @@
 
 	writepos = (loff_t)*bp << PAGE_SHIFT;
 	ret =  mtd->write(mtd, writepos, PAGE_SIZE, &retlen, buf);
-	if (ret == -EIO || ret == -EBADMSG) {
+	if (ret == -EIO || mtd_is_eccerr(ret)) {
 		d->curr_write_pos--;
 		eb->active_count--;
 		d->revmap[*bp] = PAGE_UNDEF;
@@ -738,7 +738,7 @@
 retry:
 	ret = mtd->read(mtd, readpos, PAGE_SIZE, &retlen, d->page_buf);
 
-	if (ret < 0 && ret != -EUCLEAN) {
+	if (ret < 0 && !mtd_is_bitflip(ret)) {
 		oldeb = d->eb_data + oldblock / d->pages_per_eblk;
 		oldeb->flags |= EBLOCK_READERR;
 
@@ -931,7 +931,7 @@
 	struct mtd_oob_ops ops;
 	int ret;
 
-	ops.mode = MTD_OOB_AUTO;
+	ops.mode = MTD_OPS_AUTO_OOB;
 	ops.len = mtd->writesize;
 	ops.ooblen = mtd->ecclayout->oobavail;
 	ops.ooboffs = 0;
@@ -1016,7 +1016,7 @@
 
 	if (ret == 0)
 		mtdswap_rb_add(d, eb, MTDSWAP_CLEAN);
-	else if (ret != -EIO && ret != -EBADMSG)
+	else if (ret != -EIO && !mtd_is_eccerr(ret))
 		mtdswap_rb_add(d, eb, MTDSWAP_DIRTY);
 
 	return 0;
@@ -1164,7 +1164,7 @@
 	ret = mtd->read(mtd, readpos, PAGE_SIZE, &retlen, buf);
 
 	d->mtd_read_count++;
-	if (ret == -EUCLEAN) {
+	if (mtd_is_bitflip(ret)) {
 		eb->flags |= EBLOCK_BITFLIP;
 		mtdswap_rb_add(d, eb, MTDSWAP_BITFLIP);
 		ret = 0;
@@ -1374,11 +1374,10 @@
 		goto revmap_fail;
 
 	eblk_bytes = sizeof(struct swap_eb)*d->eblks;
-	d->eb_data = vmalloc(eblk_bytes);
+	d->eb_data = vzalloc(eblk_bytes);
 	if (!d->eb_data)
 		goto eb_data_fail;
 
-	memset(d->eb_data, 0, eblk_bytes);
 	for (i = 0; i < pages; i++)
 		d->page_data[i] = BLOCK_UNDEF;
 
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index dbfa0f7..cce7b70 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -83,16 +83,9 @@
           scratch register here to enable this feature. On Intel Moorestown
           boards, the scratch register is at 0xFF108018.
 
-config MTD_NAND_EDB7312
-	tristate "Support for Cirrus Logic EBD7312 evaluation board"
-	depends on ARCH_EDB7312
-	help
-	  This enables the driver for the Cirrus Logic EBD7312 evaluation
-	  board to access the onboard NAND Flash.
-
 config MTD_NAND_H1900
 	tristate "iPAQ H1900 flash"
-	depends on ARCH_PXA
+	depends on ARCH_PXA && BROKEN
 	help
 	  This enables the driver for the iPAQ h1900 flash.
 
@@ -116,10 +109,11 @@
 	  Support for NAND flash on Amstrad E3 (Delta).
 
 config MTD_NAND_OMAP2
-	tristate "NAND Flash device on OMAP2 and OMAP3"
-	depends on ARM && (ARCH_OMAP2 || ARCH_OMAP3)
+	tristate "NAND Flash device on OMAP2, OMAP3 and OMAP4"
+	depends on ARM && (ARCH_OMAP2 || ARCH_OMAP3 || ARCH_OMAP4)
 	help
-          Support for NAND flash on Texas Instruments OMAP2 and OMAP3 platforms.
+          Support for NAND flash on Texas Instruments OMAP2, OMAP3 and OMAP4
+	  platforms.
 
 config MTD_NAND_IDS
 	tristate
@@ -423,6 +417,19 @@
 	  The simulator may simulate various NAND flash chips for the
 	  MTD nand layer.
 
+config MTD_NAND_GPMI_NAND
+        bool "GPMI NAND Flash Controller driver"
+        depends on MTD_NAND && (SOC_IMX23 || SOC_IMX28)
+	select MTD_PARTITIONS
+	select MTD_CMDLINE_PARTS
+        help
+	 Enables NAND Flash support for IMX23 or IMX28.
+	 The GPMI controller is very powerful, with the help of BCH
+	 module, it can do the hardware ECC. The GPMI supports several
+	 NAND flashs at the same time. The GPMI may conflicts with other
+	 block, such as SD card. So pay attention to it when you enable
+	 the GPMI.
+
 config MTD_NAND_PLATFORM
 	tristate "Support for generic platform NAND driver"
 	help
diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile
index 5745d83..618f4ba 100644
--- a/drivers/mtd/nand/Makefile
+++ b/drivers/mtd/nand/Makefile
@@ -13,7 +13,6 @@
 obj-$(CONFIG_MTD_NAND_AMS_DELTA)	+= ams-delta.o
 obj-$(CONFIG_MTD_NAND_AUTCPU12)		+= autcpu12.o
 obj-$(CONFIG_MTD_NAND_DENALI)		+= denali.o
-obj-$(CONFIG_MTD_NAND_EDB7312)		+= edb7312.o
 obj-$(CONFIG_MTD_NAND_AU1550)		+= au1550nd.o
 obj-$(CONFIG_MTD_NAND_BF5XX)		+= bf5xx_nand.o
 obj-$(CONFIG_MTD_NAND_PPCHAMELEONEVB)	+= ppchameleonevb.o
@@ -49,5 +48,6 @@
 obj-$(CONFIG_MTD_NAND_MPC5121_NFC)	+= mpc5121_nfc.o
 obj-$(CONFIG_MTD_NAND_RICOH)		+= r852.o
 obj-$(CONFIG_MTD_NAND_JZ4740)		+= jz4740_nand.o
+obj-$(CONFIG_MTD_NAND_GPMI_NAND)	+= gpmi-nand/
 
 nand-objs := nand_base.o nand_bbt.o
diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c
index 55da20c..23e5d77 100644
--- a/drivers/mtd/nand/atmel_nand.c
+++ b/drivers/mtd/nand/atmel_nand.c
@@ -161,37 +161,6 @@
                 !!host->board->rdy_pin_active_low;
 }
 
-/*
- * Minimal-overhead PIO for data access.
- */
-static void atmel_read_buf8(struct mtd_info *mtd, u8 *buf, int len)
-{
-	struct nand_chip	*nand_chip = mtd->priv;
-
-	__raw_readsb(nand_chip->IO_ADDR_R, buf, len);
-}
-
-static void atmel_read_buf16(struct mtd_info *mtd, u8 *buf, int len)
-{
-	struct nand_chip	*nand_chip = mtd->priv;
-
-	__raw_readsw(nand_chip->IO_ADDR_R, buf, len / 2);
-}
-
-static void atmel_write_buf8(struct mtd_info *mtd, const u8 *buf, int len)
-{
-	struct nand_chip	*nand_chip = mtd->priv;
-
-	__raw_writesb(nand_chip->IO_ADDR_W, buf, len);
-}
-
-static void atmel_write_buf16(struct mtd_info *mtd, const u8 *buf, int len)
-{
-	struct nand_chip	*nand_chip = mtd->priv;
-
-	__raw_writesw(nand_chip->IO_ADDR_W, buf, len / 2);
-}
-
 static void dma_complete_func(void *completion)
 {
 	complete(completion);
@@ -266,33 +235,27 @@
 static void atmel_read_buf(struct mtd_info *mtd, u8 *buf, int len)
 {
 	struct nand_chip *chip = mtd->priv;
-	struct atmel_nand_host *host = chip->priv;
 
 	if (use_dma && len > mtd->oobsize)
 		/* only use DMA for bigger than oob size: better performances */
 		if (atmel_nand_dma_op(mtd, buf, len, 1) == 0)
 			return;
 
-	if (host->board->bus_width_16)
-		atmel_read_buf16(mtd, buf, len);
-	else
-		atmel_read_buf8(mtd, buf, len);
+	/* if no DMA operation possible, use PIO */
+	memcpy_fromio(buf, chip->IO_ADDR_R, len);
 }
 
 static void atmel_write_buf(struct mtd_info *mtd, const u8 *buf, int len)
 {
 	struct nand_chip *chip = mtd->priv;
-	struct atmel_nand_host *host = chip->priv;
 
 	if (use_dma && len > mtd->oobsize)
 		/* only use DMA for bigger than oob size: better performances */
 		if (atmel_nand_dma_op(mtd, (void *)buf, len, 0) == 0)
 			return;
 
-	if (host->board->bus_width_16)
-		atmel_write_buf16(mtd, buf, len);
-	else
-		atmel_write_buf8(mtd, buf, len);
+	/* if no DMA operation possible, use PIO */
+	memcpy_toio(chip->IO_ADDR_W, buf, len);
 }
 
 /*
@@ -481,10 +444,6 @@
 	}
 }
 
-#ifdef CONFIG_MTD_CMDLINE_PARTS
-static const char *part_probes[] = { "cmdlinepart", NULL };
-#endif
-
 /*
  * Probe for the NAND device.
  */
@@ -496,8 +455,6 @@
 	struct resource *regs;
 	struct resource *mem;
 	int res;
-	struct mtd_partition *partitions = NULL;
-	int num_partitions = 0;
 
 	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	if (!mem) {
@@ -583,7 +540,7 @@
 
 	if (on_flash_bbt) {
 		printk(KERN_INFO "atmel_nand: Use On Flash BBT\n");
-		nand_chip->options |= NAND_USE_FLASH_BBT;
+		nand_chip->bbt_options |= NAND_BBT_USE_FLASH;
 	}
 
 	if (!cpu_has_dma())
@@ -594,7 +551,7 @@
 
 		dma_cap_zero(mask);
 		dma_cap_set(DMA_MEMCPY, mask);
-		host->dma_chan = dma_request_channel(mask, 0, NULL);
+		host->dma_chan = dma_request_channel(mask, NULL, NULL);
 		if (!host->dma_chan) {
 			dev_err(host->dev, "Failed to request DMA channel\n");
 			use_dma = 0;
@@ -655,27 +612,12 @@
 		goto err_scan_tail;
 	}
 
-#ifdef CONFIG_MTD_CMDLINE_PARTS
 	mtd->name = "atmel_nand";
-	num_partitions = parse_mtd_partitions(mtd, part_probes,
-					      &partitions, 0);
-#endif
-	if (num_partitions <= 0 && host->board->partition_info)
-		partitions = host->board->partition_info(mtd->size,
-							 &num_partitions);
-
-	if ((!partitions) || (num_partitions == 0)) {
-		printk(KERN_ERR "atmel_nand: No partitions defined, or unsupported device.\n");
-		res = -ENXIO;
-		goto err_no_partitions;
-	}
-
-	res = mtd_device_register(mtd, partitions, num_partitions);
+	res = mtd_device_parse_register(mtd, NULL, 0,
+			host->board->parts, host->board->num_parts);
 	if (!res)
 		return res;
 
-err_no_partitions:
-	nand_release(mtd);
 err_scan_tail:
 err_scan_ident:
 err_no_card:
diff --git a/drivers/mtd/nand/au1550nd.c b/drivers/mtd/nand/au1550nd.c
index fa5736b..7dd3700 100644
--- a/drivers/mtd/nand/au1550nd.c
+++ b/drivers/mtd/nand/au1550nd.c
@@ -52,7 +52,7 @@
  * au_read_byte -  read one byte from the chip
  * @mtd:	MTD device structure
  *
- *  read function for 8bit buswith
+ * read function for 8bit buswidth
  */
 static u_char au_read_byte(struct mtd_info *mtd)
 {
@@ -67,7 +67,7 @@
  * @mtd:	MTD device structure
  * @byte:	pointer to data byte to write
  *
- *  write function for 8it buswith
+ * write function for 8it buswidth
  */
 static void au_write_byte(struct mtd_info *mtd, u_char byte)
 {
@@ -77,11 +77,10 @@
 }
 
 /**
- * au_read_byte16 -  read one byte endianess aware from the chip
+ * au_read_byte16 -  read one byte endianness aware from the chip
  * @mtd:	MTD device structure
  *
- *  read function for 16bit buswith with
- * endianess conversion
+ * read function for 16bit buswidth with endianness conversion
  */
 static u_char au_read_byte16(struct mtd_info *mtd)
 {
@@ -92,12 +91,11 @@
 }
 
 /**
- * au_write_byte16 -  write one byte endianess aware to the chip
+ * au_write_byte16 -  write one byte endianness aware to the chip
  * @mtd:	MTD device structure
  * @byte:	pointer to data byte to write
  *
- *  write function for 16bit buswith with
- * endianess conversion
+ * write function for 16bit buswidth with endianness conversion
  */
 static void au_write_byte16(struct mtd_info *mtd, u_char byte)
 {
@@ -110,8 +108,7 @@
  * au_read_word -  read one word from the chip
  * @mtd:	MTD device structure
  *
- *  read function for 16bit buswith without
- * endianess conversion
+ * read function for 16bit buswidth without endianness conversion
  */
 static u16 au_read_word(struct mtd_info *mtd)
 {
@@ -127,7 +124,7 @@
  * @buf:	data buffer
  * @len:	number of bytes to write
  *
- *  write function for 8bit buswith
+ * write function for 8bit buswidth
  */
 static void au_write_buf(struct mtd_info *mtd, const u_char *buf, int len)
 {
@@ -146,7 +143,7 @@
  * @buf:	buffer to store date
  * @len:	number of bytes to read
  *
- *  read function for 8bit buswith
+ * read function for 8bit buswidth
  */
 static void au_read_buf(struct mtd_info *mtd, u_char *buf, int len)
 {
@@ -165,7 +162,7 @@
  * @buf:	buffer containing the data to compare
  * @len:	number of bytes to compare
  *
- *  verify function for 8bit buswith
+ * verify function for 8bit buswidth
  */
 static int au_verify_buf(struct mtd_info *mtd, const u_char *buf, int len)
 {
@@ -187,7 +184,7 @@
  * @buf:	data buffer
  * @len:	number of bytes to write
  *
- *  write function for 16bit buswith
+ * write function for 16bit buswidth
  */
 static void au_write_buf16(struct mtd_info *mtd, const u_char *buf, int len)
 {
@@ -209,7 +206,7 @@
  * @buf:	buffer to store date
  * @len:	number of bytes to read
  *
- *  read function for 16bit buswith
+ * read function for 16bit buswidth
  */
 static void au_read_buf16(struct mtd_info *mtd, u_char *buf, int len)
 {
@@ -230,7 +227,7 @@
  * @buf:	buffer containing the data to compare
  * @len:	number of bytes to compare
  *
- *  verify function for 16bit buswith
+ * verify function for 16bit buswidth
  */
 static int au_verify_buf16(struct mtd_info *mtd, const u_char *buf, int len)
 {
diff --git a/drivers/mtd/nand/autcpu12.c b/drivers/mtd/nand/autcpu12.c
index eddc9a2..2e42ec2 100644
--- a/drivers/mtd/nand/autcpu12.c
+++ b/drivers/mtd/nand/autcpu12.c
@@ -172,9 +172,9 @@
 
 	/* Enable the following for a flash based bad block table */
 	/*
-	   this->options = NAND_USE_FLASH_BBT;
+	   this->bbt_options = NAND_BBT_USE_FLASH;
 	 */
-	this->options = NAND_USE_FLASH_BBT;
+	this->bbt_options = NAND_BBT_USE_FLASH;
 
 	/* Scan to find existence of the device */
 	if (nand_scan(autcpu12_mtd, 1)) {
diff --git a/drivers/mtd/nand/bcm_umi_nand.c b/drivers/mtd/nand/bcm_umi_nand.c
index 8c569e4..46b58d6 100644
--- a/drivers/mtd/nand/bcm_umi_nand.c
+++ b/drivers/mtd/nand/bcm_umi_nand.c
@@ -52,8 +52,6 @@
 static const __devinitconst char gBanner[] = KERN_INFO \
 	"BCM UMI MTD NAND Driver: 1.00\n";
 
-const char *part_probes[] = { "cmdlinepart", NULL };
-
 #if NAND_ECC_BCH
 static uint8_t scan_ff_pattern[] = { 0xff };
 
@@ -376,16 +374,18 @@
 
 	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 
-	if (!r)
-		return -ENXIO;
+	if (!r) {
+		err = -ENXIO;
+		goto out_free;
+	}
 
 	/* map physical address */
 	bcm_umi_io_base = ioremap(r->start, resource_size(r));
 
 	if (!bcm_umi_io_base) {
 		printk(KERN_ERR "ioremap to access BCM UMI NAND chip failed\n");
-		kfree(board_mtd);
-		return -EIO;
+		err = -EIO;
+		goto out_free;
 	}
 
 	/* Get pointer to private data */
@@ -401,9 +401,8 @@
 	/* Initialize the NAND hardware.  */
 	if (bcm_umi_nand_inithw() < 0) {
 		printk(KERN_ERR "BCM UMI NAND chip could not be initialized\n");
-		iounmap(bcm_umi_io_base);
-		kfree(board_mtd);
-		return -EIO;
+		err = -EIO;
+		goto out_unmap;
 	}
 
 	/* Set address of NAND IO lines */
@@ -436,7 +435,7 @@
 #if USE_DMA
 	err = nand_dma_init();
 	if (err != 0)
-		return err;
+		goto out_unmap;
 #endif
 
 	/* Figure out the size of the device that we have.
@@ -447,9 +446,7 @@
 	err = nand_scan_ident(board_mtd, 1, NULL);
 	if (err) {
 		printk(KERN_ERR "nand_scan failed: %d\n", err);
-		iounmap(bcm_umi_io_base);
-		kfree(board_mtd);
-		return err;
+		goto out_unmap;
 	}
 
 	/* Now that we know the nand size, we can setup the ECC layout */
@@ -468,13 +465,14 @@
 		{
 			printk(KERN_ERR "NAND - Unrecognized pagesize: %d\n",
 					 board_mtd->writesize);
-			return -EINVAL;
+			err = -EINVAL;
+			goto out_unmap;
 		}
 	}
 
 #if NAND_ECC_BCH
 	if (board_mtd->writesize > 512) {
-		if (this->options & NAND_USE_FLASH_BBT)
+		if (this->bbt_options & NAND_BBT_USE_FLASH)
 			largepage_bbt.options = NAND_BBT_SCAN2NDPAGE;
 		this->badblock_pattern = &largepage_bbt;
 	}
@@ -485,33 +483,20 @@
 	err = nand_scan_tail(board_mtd);
 	if (err) {
 		printk(KERN_ERR "nand_scan failed: %d\n", err);
-		iounmap(bcm_umi_io_base);
-		kfree(board_mtd);
-		return err;
+		goto out_unmap;
 	}
 
 	/* Register the partitions */
-	{
-		int nr_partitions;
-		struct mtd_partition *partition_info;
-
-		board_mtd->name = "bcm_umi-nand";
-		nr_partitions =
-		    parse_mtd_partitions(board_mtd, part_probes,
-					 &partition_info, 0);
-
-		if (nr_partitions <= 0) {
-			printk(KERN_ERR "BCM UMI NAND: Too few partitions - %d\n",
-			       nr_partitions);
-			iounmap(bcm_umi_io_base);
-			kfree(board_mtd);
-			return -EIO;
-		}
-		mtd_device_register(board_mtd, partition_info, nr_partitions);
-	}
+	board_mtd->name = "bcm_umi-nand";
+	mtd_device_parse_register(board_mtd, NULL, 0, NULL, 0);
 
 	/* Return happy */
 	return 0;
+out_unmap:
+	iounmap(bcm_umi_io_base);
+out_free:
+	kfree(board_mtd);
+	return err;
 }
 
 static int bcm_umi_nand_remove(struct platform_device *pdev)
diff --git a/drivers/mtd/nand/cafe_nand.c b/drivers/mtd/nand/cafe_nand.c
index 7c8df83..72d3f23 100644
--- a/drivers/mtd/nand/cafe_nand.c
+++ b/drivers/mtd/nand/cafe_nand.c
@@ -58,7 +58,6 @@
 
 struct cafe_priv {
 	struct nand_chip nand;
-	struct mtd_partition *parts;
 	struct pci_dev *pdev;
 	void __iomem *mmio;
 	struct rs_control *rs;
@@ -372,7 +371,7 @@
 	return 1;
 }
 /**
- * cafe_nand_read_page_syndrome - {REPLACABLE] hardware ecc syndrom based page read
+ * cafe_nand_read_page_syndrome - [REPLACEABLE] hardware ecc syndrome based page read
  * @mtd:	mtd info structure
  * @chip:	nand chip info structure
  * @buf:	buffer to store read data
@@ -631,8 +630,6 @@
 	struct cafe_priv *cafe;
 	uint32_t ctrl;
 	int err = 0;
-	struct mtd_partition *parts;
-	int nr_parts;
 
 	/* Very old versions shared the same PCI ident for all three
 	   functions on the chip. Verify the class too... */
@@ -687,7 +684,8 @@
 	cafe->nand.chip_delay = 0;
 
 	/* Enable the following for a flash based bad block table */
-	cafe->nand.options = NAND_USE_FLASH_BBT | NAND_NO_AUTOINCR | NAND_OWN_BUFFERS;
+	cafe->nand.bbt_options = NAND_BBT_USE_FLASH;
+	cafe->nand.options = NAND_NO_AUTOINCR | NAND_OWN_BUFFERS;
 
 	if (skipbbt) {
 		cafe->nand.options |= NAND_SKIP_BBTSCAN;
@@ -800,18 +798,9 @@
 
 	pci_set_drvdata(pdev, mtd);
 
-	/* We register the whole device first, separate from the partitions */
-	mtd_device_register(mtd, NULL, 0);
-
-#ifdef CONFIG_MTD_CMDLINE_PARTS
 	mtd->name = "cafe_nand";
-#endif
-	nr_parts = parse_mtd_partitions(mtd, part_probes, &parts, 0);
-	if (nr_parts > 0) {
-		cafe->parts = parts;
-		dev_info(&cafe->pdev->dev, "%d partitions found\n", nr_parts);
-		mtd_device_register(mtd, parts, nr_parts);
-	}
+	mtd_device_parse_register(mtd, part_probes, 0, NULL, 0);
+
 	goto out;
 
  out_irq:
diff --git a/drivers/mtd/nand/cmx270_nand.c b/drivers/mtd/nand/cmx270_nand.c
index be33b0f..737ef9a 100644
--- a/drivers/mtd/nand/cmx270_nand.c
+++ b/drivers/mtd/nand/cmx270_nand.c
@@ -51,8 +51,6 @@
 };
 #define NUM_PARTITIONS (ARRAY_SIZE(partition_info))
 
-const char *part_probes[] = { "cmdlinepart", NULL };
-
 static u_char cmx270_read_byte(struct mtd_info *mtd)
 {
 	struct nand_chip *this = mtd->priv;
@@ -152,9 +150,6 @@
 static int __init cmx270_init(void)
 {
 	struct nand_chip *this;
-	const char *part_type;
-	struct mtd_partition *mtd_parts;
-	int mtd_parts_nb = 0;
 	int ret;
 
 	if (!(machine_is_armcore() && cpu_is_pxa27x()))
@@ -223,23 +218,9 @@
 		goto err_scan;
 	}
 
-#ifdef CONFIG_MTD_CMDLINE_PARTS
-	mtd_parts_nb = parse_mtd_partitions(cmx270_nand_mtd, part_probes,
-					    &mtd_parts, 0);
-	if (mtd_parts_nb > 0)
-		part_type = "command line";
-	else
-		mtd_parts_nb = 0;
-#endif
-	if (!mtd_parts_nb) {
-		mtd_parts = partition_info;
-		mtd_parts_nb = NUM_PARTITIONS;
-		part_type = "static";
-	}
-
 	/* Register the partitions */
-	pr_notice("Using %s partition definition\n", part_type);
-	ret = mtd_device_register(cmx270_nand_mtd, mtd_parts, mtd_parts_nb);
+	ret = mtd_device_parse_register(cmx270_nand_mtd, NULL, 0,
+					partition_info, NUM_PARTITIONS);
 	if (ret)
 		goto err_scan;
 
diff --git a/drivers/mtd/nand/cs553x_nand.c b/drivers/mtd/nand/cs553x_nand.c
index f59ad1f..414afa7 100644
--- a/drivers/mtd/nand/cs553x_nand.c
+++ b/drivers/mtd/nand/cs553x_nand.c
@@ -239,7 +239,8 @@
 	this->ecc.correct  = nand_correct_data;
 
 	/* Enable the following for a flash based bad block table */
-	this->options = NAND_USE_FLASH_BBT | NAND_NO_AUTOINCR;
+	this->bbt_options = NAND_BBT_USE_FLASH;
+	this->options = NAND_NO_AUTOINCR;
 
 	/* Scan to find existence of the device */
 	if (nand_scan(new_mtd, 1)) {
@@ -277,15 +278,11 @@
 	return 0;
 }
 
-static const char *part_probes[] = { "cmdlinepart", NULL };
-
 static int __init cs553x_init(void)
 {
 	int err = -ENXIO;
 	int i;
 	uint64_t val;
-	int mtd_parts_nb = 0;
-	struct mtd_partition *mtd_parts = NULL;
 
 	/* If the CPU isn't a Geode GX or LX, abort */
 	if (!is_geode())
@@ -315,13 +312,9 @@
 	   do mtdconcat etc. if we want to. */
 	for (i = 0; i < NR_CS553X_CONTROLLERS; i++) {
 		if (cs553x_mtd[i]) {
-
 			/* If any devices registered, return success. Else the last error. */
-			mtd_parts_nb = parse_mtd_partitions(cs553x_mtd[i], part_probes, &mtd_parts, 0);
-			if (mtd_parts_nb > 0)
-				printk(KERN_NOTICE "Using command line partition definition\n");
-			mtd_device_register(cs553x_mtd[i], mtd_parts,
-					    mtd_parts_nb);
+			mtd_device_parse_register(cs553x_mtd[i], NULL, 0,
+						  NULL, 0);
 			err = 0;
 		}
 	}
diff --git a/drivers/mtd/nand/davinci_nand.c b/drivers/mtd/nand/davinci_nand.c
index 1f34951..c153e1f 100644
--- a/drivers/mtd/nand/davinci_nand.c
+++ b/drivers/mtd/nand/davinci_nand.c
@@ -57,7 +57,6 @@
 
 	struct device		*dev;
 	struct clk		*clk;
-	bool			partitioned;
 
 	bool			is_readmode;
 
@@ -530,8 +529,6 @@
 	int				ret;
 	uint32_t			val;
 	nand_ecc_modes_t		ecc_mode;
-	struct mtd_partition		*mtd_parts = NULL;
-	int				mtd_parts_nb = 0;
 
 	/* insist on board-specific configuration */
 	if (!pdata)
@@ -581,7 +578,9 @@
 	info->chip.chip_delay	= 0;
 	info->chip.select_chip	= nand_davinci_select_chip;
 
-	/* options such as NAND_USE_FLASH_BBT or 16-bit widths */
+	/* options such as NAND_BBT_USE_FLASH */
+	info->chip.bbt_options	= pdata->bbt_options;
+	/* options such as 16-bit widths */
 	info->chip.options	= pdata->options;
 	info->chip.bbt_td	= pdata->bbt_td;
 	info->chip.bbt_md	= pdata->bbt_md;
@@ -751,33 +750,8 @@
 	if (ret < 0)
 		goto err_scan;
 
-	if (mtd_has_cmdlinepart()) {
-		static const char *probes[] __initconst = {
-			"cmdlinepart", NULL
-		};
-
-		mtd_parts_nb = parse_mtd_partitions(&info->mtd, probes,
-						    &mtd_parts, 0);
-	}
-
-	if (mtd_parts_nb <= 0) {
-		mtd_parts = pdata->parts;
-		mtd_parts_nb = pdata->nr_parts;
-	}
-
-	/* Register any partitions */
-	if (mtd_parts_nb > 0) {
-		ret = mtd_device_register(&info->mtd, mtd_parts,
-					  mtd_parts_nb);
-		if (ret == 0)
-			info->partitioned = true;
-	}
-
-	/* If there's no partition info, just package the whole chip
-	 * as a single MTD device.
-	 */
-	if (!info->partitioned)
-		ret = mtd_device_register(&info->mtd, NULL, 0) ? -ENODEV : 0;
+	ret = mtd_device_parse_register(&info->mtd, NULL, 0,
+			pdata->parts, pdata->nr_parts);
 
 	if (ret < 0)
 		goto err_scan;
@@ -816,9 +790,6 @@
 static int __exit nand_davinci_remove(struct platform_device *pdev)
 {
 	struct davinci_nand_info *info = platform_get_drvdata(pdev);
-	int status;
-
-	status = mtd_device_unregister(&info->mtd);
 
 	spin_lock_irq(&davinci_nand_lock);
 	if (info->chip.ecc.mode == NAND_ECC_HW_SYNDROME)
diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
index d527621..3984d48 100644
--- a/drivers/mtd/nand/denali.c
+++ b/drivers/mtd/nand/denali.c
@@ -1346,6 +1346,7 @@
 	 * */
 	denali->bbtskipbytes = ioread32(denali->flash_reg +
 						SPARE_AREA_SKIP_BYTES);
+	detect_max_banks(denali);
 	denali_nand_reset(denali);
 	iowrite32(0x0F, denali->flash_reg + RB_PIN_ENABLED);
 	iowrite32(CHIP_EN_DONT_CARE__FLAG,
@@ -1356,7 +1357,6 @@
 	/* Should set value for these registers when init */
 	iowrite32(0, denali->flash_reg + TWO_ROW_ADDR_CYCLES);
 	iowrite32(1, denali->flash_reg + ECC_ENABLE);
-	detect_max_banks(denali);
 	denali_nand_timing_set(denali);
 	denali_irq_init(denali);
 }
@@ -1577,7 +1577,8 @@
 	denali->nand.bbt_md = &bbt_mirror_descr;
 
 	/* skip the scan for now until we have OOB read and write support */
-	denali->nand.options |= NAND_USE_FLASH_BBT | NAND_SKIP_BBTSCAN;
+	denali->nand.bbt_options |= NAND_BBT_USE_FLASH;
+	denali->nand.options |= NAND_SKIP_BBTSCAN;
 	denali->nand.ecc.mode = NAND_ECC_HW_SYNDROME;
 
 	/* Denali Controller only support 15bit and 8bit ECC in MRST,
@@ -1676,7 +1677,6 @@
 	struct denali_nand_info *denali = pci_get_drvdata(dev);
 
 	nand_release(&denali->mtd);
-	mtd_device_unregister(&denali->mtd);
 
 	denali_irq_cleanup(dev->irq, denali);
 
diff --git a/drivers/mtd/nand/diskonchip.c b/drivers/mtd/nand/diskonchip.c
index e1b84cb..5780dba 100644
--- a/drivers/mtd/nand/diskonchip.c
+++ b/drivers/mtd/nand/diskonchip.c
@@ -133,7 +133,7 @@
 
 /*
  * The HW decoder in the DoC ASIC's provides us a error syndrome,
- * which we must convert to a standard syndrom usable by the generic
+ * which we must convert to a standard syndrome usable by the generic
  * Reed-Solomon library code.
  *
  * Fabrice Bellard figured this out in the old docecc code. I added
@@ -154,7 +154,7 @@
 	ds[3] = ((ecc[3] & 0xc0) >> 6) | ((ecc[0] & 0xff) << 2);
 	parity = ecc[1];
 
-	/* Initialize the syndrom buffer */
+	/* Initialize the syndrome buffer */
 	for (i = 0; i < NROOTS; i++)
 		s[i] = ds[0];
 	/*
@@ -1032,7 +1032,7 @@
 		WriteDOC(DOC_ECC_DIS, docptr, Mplus_ECCConf);
 	else
 		WriteDOC(DOC_ECC_DIS, docptr, ECCConf);
-	if (no_ecc_failures && (ret == -EBADMSG)) {
+	if (no_ecc_failures && mtd_is_eccerr(ret)) {
 		printk(KERN_ERR "suppressing ECC failure\n");
 		ret = 0;
 	}
@@ -1653,7 +1653,7 @@
 	nand->ecc.mode		= NAND_ECC_HW_SYNDROME;
 	nand->ecc.size		= 512;
 	nand->ecc.bytes		= 6;
-	nand->options		= NAND_USE_FLASH_BBT;
+	nand->bbt_options	= NAND_BBT_USE_FLASH;
 
 	doc->physadr		= physadr;
 	doc->virtadr		= virtadr;
diff --git a/drivers/mtd/nand/edb7312.c b/drivers/mtd/nand/edb7312.c
deleted file mode 100644
index 8400d0f..0000000
--- a/drivers/mtd/nand/edb7312.c
+++ /dev/null
@@ -1,203 +0,0 @@
-/*
- *  drivers/mtd/nand/edb7312.c
- *
- *  Copyright (C) 2002 Marius Gröger (mag@sysgo.de)
- *
- *  Derived from drivers/mtd/nand/autcpu12.c
- *       Copyright (c) 2001 Thomas Gleixner (gleixner@autronix.de)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- *  Overview:
- *   This is a device driver for the NAND flash device found on the
- *   CLEP7312 board which utilizes the Toshiba TC58V64AFT part. This is
- *   a 64Mibit (8MiB x 8 bits) NAND flash device.
- */
-
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/nand.h>
-#include <linux/mtd/partitions.h>
-#include <asm/io.h>
-#include <mach/hardware.h>	/* for CLPS7111_VIRT_BASE */
-#include <asm/sizes.h>
-#include <asm/hardware/clps7111.h>
-
-/*
- * MTD structure for EDB7312 board
- */
-static struct mtd_info *ep7312_mtd = NULL;
-
-/*
- * Values specific to the EDB7312 board (used with EP7312 processor)
- */
-#define EP7312_FIO_PBASE 0x10000000	/* Phys address of flash */
-#define EP7312_PXDR	0x0001	/*
-				 * IO offset to Port B data register
-				 * where the CLE, ALE and NCE pins
-				 * are wired to.
-				 */
-#define EP7312_PXDDR	0x0041	/*
-				 * IO offset to Port B data direction
-				 * register so we can control the IO
-				 * lines.
-				 */
-
-/*
- * Module stuff
- */
-
-static unsigned long ep7312_fio_pbase = EP7312_FIO_PBASE;
-static void __iomem *ep7312_pxdr = (void __iomem *)EP7312_PXDR;
-static void __iomem *ep7312_pxddr = (void __iomem *)EP7312_PXDDR;
-
-/*
- * Define static partitions for flash device
- */
-static struct mtd_partition partition_info[] = {
-	{.name = "EP7312 Nand Flash",
-	 .offset = 0,
-	 .size = 8 * 1024 * 1024}
-};
-
-#define NUM_PARTITIONS 1
-
-/*
- *	hardware specific access to control-lines
- *
- *	NAND_NCE: bit 0 -> bit 6 (bit 7 = 1)
- *	NAND_CLE: bit 1 -> bit 4
- *	NAND_ALE: bit 2 -> bit 5
- */
-static void ep7312_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl)
-{
-	struct nand_chip *chip = mtd->priv;
-
-	if (ctrl & NAND_CTRL_CHANGE) {
-		unsigned char bits = 0x80;
-
-		bits |= (ctrl & (NAND_CLE | NAND_ALE)) << 3;
-		bits |= (ctrl & NAND_NCE) ? 0x00 : 0x40;
-
-		clps_writeb((clps_readb(ep7312_pxdr)  & 0xF0) | bits,
-			    ep7312_pxdr);
-	}
-	if (cmd != NAND_CMD_NONE)
-		writeb(cmd, chip->IO_ADDR_W);
-}
-
-/*
- *	read device ready pin
- */
-static int ep7312_device_ready(struct mtd_info *mtd)
-{
-	return 1;
-}
-
-const char *part_probes[] = { "cmdlinepart", NULL };
-
-/*
- * Main initialization routine
- */
-static int __init ep7312_init(void)
-{
-	struct nand_chip *this;
-	const char *part_type = 0;
-	int mtd_parts_nb = 0;
-	struct mtd_partition *mtd_parts = 0;
-	void __iomem *ep7312_fio_base;
-
-	/* Allocate memory for MTD device structure and private data */
-	ep7312_mtd = kmalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), GFP_KERNEL);
-	if (!ep7312_mtd) {
-		printk("Unable to allocate EDB7312 NAND MTD device structure.\n");
-		return -ENOMEM;
-	}
-
-	/* map physical address */
-	ep7312_fio_base = ioremap(ep7312_fio_pbase, SZ_1K);
-	if (!ep7312_fio_base) {
-		printk("ioremap EDB7312 NAND flash failed\n");
-		kfree(ep7312_mtd);
-		return -EIO;
-	}
-
-	/* Get pointer to private data */
-	this = (struct nand_chip *)(&ep7312_mtd[1]);
-
-	/* Initialize structures */
-	memset(ep7312_mtd, 0, sizeof(struct mtd_info));
-	memset(this, 0, sizeof(struct nand_chip));
-
-	/* Link the private data with the MTD structure */
-	ep7312_mtd->priv = this;
-	ep7312_mtd->owner = THIS_MODULE;
-
-	/*
-	 * Set GPIO Port B control register so that the pins are configured
-	 * to be outputs for controlling the NAND flash.
-	 */
-	clps_writeb(0xf0, ep7312_pxddr);
-
-	/* insert callbacks */
-	this->IO_ADDR_R = ep7312_fio_base;
-	this->IO_ADDR_W = ep7312_fio_base;
-	this->cmd_ctrl = ep7312_hwcontrol;
-	this->dev_ready = ep7312_device_ready;
-	/* 15 us command delay time */
-	this->chip_delay = 15;
-
-	/* Scan to find existence of the device */
-	if (nand_scan(ep7312_mtd, 1)) {
-		iounmap((void *)ep7312_fio_base);
-		kfree(ep7312_mtd);
-		return -ENXIO;
-	}
-	ep7312_mtd->name = "edb7312-nand";
-	mtd_parts_nb = parse_mtd_partitions(ep7312_mtd, part_probes, &mtd_parts, 0);
-	if (mtd_parts_nb > 0)
-		part_type = "command line";
-	else
-		mtd_parts_nb = 0;
-	if (mtd_parts_nb == 0) {
-		mtd_parts = partition_info;
-		mtd_parts_nb = NUM_PARTITIONS;
-		part_type = "static";
-	}
-
-	/* Register the partitions */
-	printk(KERN_NOTICE "Using %s partition definition\n", part_type);
-	mtd_device_register(ep7312_mtd, mtd_parts, mtd_parts_nb);
-
-	/* Return happy */
-	return 0;
-}
-
-module_init(ep7312_init);
-
-/*
- * Clean up routine
- */
-static void __exit ep7312_cleanup(void)
-{
-	struct nand_chip *this = (struct nand_chip *)&ep7312_mtd[1];
-
-	/* Release resources, unregister device */
-	nand_release(ap7312_mtd);
-
-	/* Release io resource */
-	iounmap(this->IO_ADDR_R);
-
-	/* Free the MTD device structure */
-	kfree(ep7312_mtd);
-}
-
-module_exit(ep7312_cleanup);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Marius Groeger <mag@sysgo.de>");
-MODULE_DESCRIPTION("MTD map driver for Cogent EDB7312 board");
diff --git a/drivers/mtd/nand/fsl_elbc_nand.c b/drivers/mtd/nand/fsl_elbc_nand.c
index 33d8aad..eedd8ee 100644
--- a/drivers/mtd/nand/fsl_elbc_nand.c
+++ b/drivers/mtd/nand/fsl_elbc_nand.c
@@ -75,7 +75,6 @@
 	unsigned int use_mdr;    /* Non zero if the MDR is to be set      */
 	unsigned int oob;        /* Non zero if operating on OOB data     */
 	unsigned int counter;	 /* counter for the initializations	  */
-	char *oob_poi;           /* Place to write ECC after read back    */
 };
 
 /* These map to the positions used by the FCM hardware ECC generator */
@@ -244,6 +243,25 @@
 		return -EIO;
 	}
 
+	if (chip->ecc.mode != NAND_ECC_HW)
+		return 0;
+
+	if (elbc_fcm_ctrl->read_bytes == mtd->writesize + mtd->oobsize) {
+		uint32_t lteccr = in_be32(&lbc->lteccr);
+		/*
+		 * if command was a full page read and the ELBC
+		 * has the LTECCR register, then bits 12-15 (ppc order) of
+		 * LTECCR indicates which 512 byte sub-pages had fixed errors.
+		 * bits 28-31 are uncorrectable errors, marked elsewhere.
+		 * for small page nand only 1 bit is used.
+		 * if the ELBC doesn't have the lteccr register it reads 0
+		 */
+		if (lteccr & 0x000F000F)
+			out_be32(&lbc->lteccr, 0x000F000F); /* clear lteccr */
+		if (lteccr & 0x000F0000)
+			mtd->ecc_stats.corrected++;
+	}
+
 	return 0;
 }
 
@@ -435,7 +453,6 @@
 
 	/* PAGEPROG reuses all of the setup from SEQIN and adds the length */
 	case NAND_CMD_PAGEPROG: {
-		int full_page;
 		dev_vdbg(priv->dev,
 		         "fsl_elbc_cmdfunc: NAND_CMD_PAGEPROG "
 			 "writing %d bytes.\n", elbc_fcm_ctrl->index);
@@ -445,34 +462,12 @@
 		 * write so the HW generates the ECC.
 		 */
 		if (elbc_fcm_ctrl->oob || elbc_fcm_ctrl->column != 0 ||
-		    elbc_fcm_ctrl->index != mtd->writesize + mtd->oobsize) {
+		    elbc_fcm_ctrl->index != mtd->writesize + mtd->oobsize)
 			out_be32(&lbc->fbcr, elbc_fcm_ctrl->index);
-			full_page = 0;
-		} else {
+		else
 			out_be32(&lbc->fbcr, 0);
-			full_page = 1;
-		}
 
 		fsl_elbc_run_command(mtd);
-
-		/* Read back the page in order to fill in the ECC for the
-		 * caller.  Is this really needed?
-		 */
-		if (full_page && elbc_fcm_ctrl->oob_poi) {
-			out_be32(&lbc->fbcr, 3);
-			set_addr(mtd, 6, page_addr, 1);
-
-			elbc_fcm_ctrl->read_bytes = mtd->writesize + 9;
-
-			fsl_elbc_do_read(chip, 1);
-			fsl_elbc_run_command(mtd);
-
-			memcpy_fromio(elbc_fcm_ctrl->oob_poi + 6,
-				&elbc_fcm_ctrl->addr[elbc_fcm_ctrl->index], 3);
-			elbc_fcm_ctrl->index += 3;
-		}
-
-		elbc_fcm_ctrl->oob_poi = NULL;
 		return;
 	}
 
@@ -752,13 +747,8 @@
                                 struct nand_chip *chip,
                                 const uint8_t *buf)
 {
-	struct fsl_elbc_mtd *priv = chip->priv;
-	struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = priv->ctrl->nand;
-
 	fsl_elbc_write_buf(mtd, buf, mtd->writesize);
 	fsl_elbc_write_buf(mtd, chip->oob_poi, mtd->oobsize);
-
-	elbc_fcm_ctrl->oob_poi = chip->oob_poi;
 }
 
 static int fsl_elbc_chip_init(struct fsl_elbc_mtd *priv)
@@ -791,8 +781,8 @@
 	chip->bbt_md = &bbt_mirror_descr;
 
 	/* set up nand options */
-	chip->options = NAND_NO_READRDY | NAND_NO_AUTOINCR |
-			NAND_USE_FLASH_BBT;
+	chip->options = NAND_NO_READRDY | NAND_NO_AUTOINCR;
+	chip->bbt_options = NAND_BBT_USE_FLASH;
 
 	chip->controller = &elbc_fcm_ctrl->controller;
 	chip->priv = priv;
@@ -829,7 +819,6 @@
 
 	elbc_fcm_ctrl->chips[priv->bank] = NULL;
 	kfree(priv);
-	kfree(elbc_fcm_ctrl);
 	return 0;
 }
 
@@ -842,13 +831,14 @@
 	struct resource res;
 	struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl;
 	static const char *part_probe_types[]
-		= { "cmdlinepart", "RedBoot", NULL };
-	struct mtd_partition *parts;
+		= { "cmdlinepart", "RedBoot", "ofpart", NULL };
 	int ret;
 	int bank;
 	struct device *dev;
 	struct device_node *node = pdev->dev.of_node;
+	struct mtd_part_parser_data ppdata;
 
+	ppdata.of_node = pdev->dev.of_node;
 	if (!fsl_lbc_ctrl_dev || !fsl_lbc_ctrl_dev->regs)
 		return -ENODEV;
 	lbc = fsl_lbc_ctrl_dev->regs;
@@ -934,17 +924,8 @@
 
 	/* First look for RedBoot table or partitions on the command
 	 * line, these take precedence over device tree information */
-	ret = parse_mtd_partitions(&priv->mtd, part_probe_types, &parts, 0);
-	if (ret < 0)
-		goto err;
-
-	if (ret == 0) {
-		ret = of_mtd_parse_partitions(priv->dev, node, &parts);
-		if (ret < 0)
-			goto err;
-	}
-
-	mtd_device_register(&priv->mtd, parts, ret);
+	mtd_device_parse_register(&priv->mtd, part_probe_types, &ppdata,
+				  NULL, 0);
 
 	printk(KERN_INFO "eLBC NAND device at 0x%llx, bank %d\n",
 	       (unsigned long long)res.start, priv->bank);
diff --git a/drivers/mtd/nand/fsl_upm.c b/drivers/mtd/nand/fsl_upm.c
index 23752fd..b4f3cc9 100644
--- a/drivers/mtd/nand/fsl_upm.c
+++ b/drivers/mtd/nand/fsl_upm.c
@@ -158,7 +158,7 @@
 {
 	int ret;
 	struct device_node *flash_np;
-	static const char *part_types[] = { "cmdlinepart", NULL, };
+	struct mtd_part_parser_data ppdata;
 
 	fun->chip.IO_ADDR_R = fun->io_base;
 	fun->chip.IO_ADDR_W = fun->io_base;
@@ -192,18 +192,12 @@
 	if (ret)
 		goto err;
 
-	ret = parse_mtd_partitions(&fun->mtd, part_types, &fun->parts, 0);
-
-#ifdef CONFIG_MTD_OF_PARTS
-	if (ret == 0) {
-		ret = of_mtd_parse_partitions(fun->dev, flash_np, &fun->parts);
-		if (ret < 0)
-			goto err;
-	}
-#endif
-	ret = mtd_device_register(&fun->mtd, fun->parts, ret);
+	ppdata.of_node = flash_np;
+	ret = mtd_device_parse_register(&fun->mtd, NULL, &ppdata, NULL, 0);
 err:
 	of_node_put(flash_np);
+	if (ret)
+		kfree(fun->mtd.name);
 	return ret;
 }
 
diff --git a/drivers/mtd/nand/fsmc_nand.c b/drivers/mtd/nand/fsmc_nand.c
index e9b275a..e53b760 100644
--- a/drivers/mtd/nand/fsmc_nand.c
+++ b/drivers/mtd/nand/fsmc_nand.c
@@ -146,7 +146,7 @@
 	{
 		.name = "Root File System",
 		.offset = 0x460000,
-		.size = 0,
+		.size = MTDPART_SIZ_FULL,
 	},
 };
 
@@ -173,13 +173,10 @@
 	{
 		.name = "Root File System",
 		.offset = 0x800000,
-		.size = 0,
+		.size = MTDPART_SIZ_FULL,
 	},
 };
 
-#ifdef CONFIG_MTD_CMDLINE_PARTS
-const char *part_probes[] = { "cmdlinepart", NULL };
-#endif
 
 /**
  * struct fsmc_nand_data - structure for FSMC NAND device state
@@ -187,8 +184,6 @@
  * @pid:		Part ID on the AMBA PrimeCell format
  * @mtd:		MTD info for a NAND flash.
  * @nand:		Chip related info for a NAND flash.
- * @partitions:		Partition info for a NAND Flash.
- * @nr_partitions:	Total number of partition of a NAND flash.
  *
  * @ecc_place:		ECC placing locations in oobfree type format.
  * @bank:		Bank number for probed device.
@@ -203,8 +198,6 @@
 	u32			pid;
 	struct mtd_info		mtd;
 	struct nand_chip	nand;
-	struct mtd_partition	*partitions;
-	unsigned int		nr_partitions;
 
 	struct fsmc_eccplace	*ecc_place;
 	unsigned int		bank;
@@ -716,65 +709,17 @@
 	 * platform data,
 	 * default partition information present in driver.
 	 */
-#ifdef CONFIG_MTD_CMDLINE_PARTS
 	/*
-	 * Check if partition info passed via command line
+	 * Check for partition info passed
 	 */
 	host->mtd.name = "nand";
-	host->nr_partitions = parse_mtd_partitions(&host->mtd, part_probes,
-			&host->partitions, 0);
-	if (host->nr_partitions <= 0) {
-#endif
-		/*
-		 * Check if partition info passed via command line
-		 */
-		if (pdata->partitions) {
-			host->partitions = pdata->partitions;
-			host->nr_partitions = pdata->nr_partitions;
-		} else {
-			struct mtd_partition *partition;
-			int i;
-
-			/* Select the default partitions info */
-			switch (host->mtd.size) {
-			case 0x01000000:
-			case 0x02000000:
-			case 0x04000000:
-				host->partitions = partition_info_16KB_blk;
-				host->nr_partitions =
-					sizeof(partition_info_16KB_blk) /
-					sizeof(struct mtd_partition);
-				break;
-			case 0x08000000:
-			case 0x10000000:
-			case 0x20000000:
-			case 0x40000000:
-				host->partitions = partition_info_128KB_blk;
-				host->nr_partitions =
-					sizeof(partition_info_128KB_blk) /
-					sizeof(struct mtd_partition);
-				break;
-			default:
-				ret = -ENXIO;
-				pr_err("Unsupported NAND size\n");
-				goto err_probe;
-			}
-
-			partition = host->partitions;
-			for (i = 0; i < host->nr_partitions; i++, partition++) {
-				if (partition->size == 0) {
-					partition->size = host->mtd.size -
-						partition->offset;
-					break;
-				}
-			}
-		}
-#ifdef CONFIG_MTD_CMDLINE_PARTS
-	}
-#endif
-
-	ret = mtd_device_register(&host->mtd, host->partitions,
-				  host->nr_partitions);
+	ret = mtd_device_parse_register(&host->mtd, NULL, 0,
+			host->mtd.size <= 0x04000000 ?
+				partition_info_16KB_blk :
+				partition_info_128KB_blk,
+			host->mtd.size <= 0x04000000 ?
+				ARRAY_SIZE(partition_info_16KB_blk) :
+				ARRAY_SIZE(partition_info_128KB_blk));
 	if (ret)
 		goto err_probe;
 
@@ -822,7 +767,7 @@
 	platform_set_drvdata(pdev, NULL);
 
 	if (host) {
-		mtd_device_unregister(&host->mtd);
+		nand_release(&host->mtd);
 		clk_disable(host->clk);
 		clk_put(host->clk);
 
diff --git a/drivers/mtd/nand/gpmi-nand/Makefile b/drivers/mtd/nand/gpmi-nand/Makefile
new file mode 100644
index 0000000..3a46248
--- /dev/null
+++ b/drivers/mtd/nand/gpmi-nand/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_MTD_NAND_GPMI_NAND) += gpmi_nand.o
+gpmi_nand-objs += gpmi-nand.o
+gpmi_nand-objs += gpmi-lib.o
diff --git a/drivers/mtd/nand/gpmi-nand/bch-regs.h b/drivers/mtd/nand/gpmi-nand/bch-regs.h
new file mode 100644
index 0000000..4effb8c
--- /dev/null
+++ b/drivers/mtd/nand/gpmi-nand/bch-regs.h
@@ -0,0 +1,84 @@
+/*
+ * Freescale GPMI NAND Flash Driver
+ *
+ * Copyright 2008-2011 Freescale Semiconductor, Inc.
+ * Copyright 2008 Embedded Alley Solutions, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+#ifndef __GPMI_NAND_BCH_REGS_H
+#define __GPMI_NAND_BCH_REGS_H
+
+#define HW_BCH_CTRL				0x00000000
+#define HW_BCH_CTRL_SET				0x00000004
+#define HW_BCH_CTRL_CLR				0x00000008
+#define HW_BCH_CTRL_TOG				0x0000000c
+
+#define BM_BCH_CTRL_COMPLETE_IRQ_EN		(1 << 8)
+#define BM_BCH_CTRL_COMPLETE_IRQ		(1 << 0)
+
+#define HW_BCH_STATUS0				0x00000010
+#define HW_BCH_MODE				0x00000020
+#define HW_BCH_ENCODEPTR			0x00000030
+#define HW_BCH_DATAPTR				0x00000040
+#define HW_BCH_METAPTR				0x00000050
+#define HW_BCH_LAYOUTSELECT			0x00000070
+
+#define HW_BCH_FLASH0LAYOUT0			0x00000080
+
+#define BP_BCH_FLASH0LAYOUT0_NBLOCKS		24
+#define BM_BCH_FLASH0LAYOUT0_NBLOCKS	(0xff << BP_BCH_FLASH0LAYOUT0_NBLOCKS)
+#define BF_BCH_FLASH0LAYOUT0_NBLOCKS(v)		\
+	(((v) << BP_BCH_FLASH0LAYOUT0_NBLOCKS) & BM_BCH_FLASH0LAYOUT0_NBLOCKS)
+
+#define BP_BCH_FLASH0LAYOUT0_META_SIZE		16
+#define BM_BCH_FLASH0LAYOUT0_META_SIZE	(0xff << BP_BCH_FLASH0LAYOUT0_META_SIZE)
+#define BF_BCH_FLASH0LAYOUT0_META_SIZE(v)	\
+	(((v) << BP_BCH_FLASH0LAYOUT0_META_SIZE)\
+					 & BM_BCH_FLASH0LAYOUT0_META_SIZE)
+
+#define BP_BCH_FLASH0LAYOUT0_ECC0		12
+#define BM_BCH_FLASH0LAYOUT0_ECC0	(0xf << BP_BCH_FLASH0LAYOUT0_ECC0)
+#define BF_BCH_FLASH0LAYOUT0_ECC0(v)		\
+	(((v) << BP_BCH_FLASH0LAYOUT0_ECC0) & BM_BCH_FLASH0LAYOUT0_ECC0)
+
+#define BP_BCH_FLASH0LAYOUT0_DATA0_SIZE		0
+#define BM_BCH_FLASH0LAYOUT0_DATA0_SIZE		\
+			(0xfff << BP_BCH_FLASH0LAYOUT0_DATA0_SIZE)
+#define BF_BCH_FLASH0LAYOUT0_DATA0_SIZE(v)	\
+	(((v) << BP_BCH_FLASH0LAYOUT0_DATA0_SIZE)\
+					 & BM_BCH_FLASH0LAYOUT0_DATA0_SIZE)
+
+#define HW_BCH_FLASH0LAYOUT1			0x00000090
+
+#define BP_BCH_FLASH0LAYOUT1_PAGE_SIZE		16
+#define BM_BCH_FLASH0LAYOUT1_PAGE_SIZE		\
+			(0xffff << BP_BCH_FLASH0LAYOUT1_PAGE_SIZE)
+#define BF_BCH_FLASH0LAYOUT1_PAGE_SIZE(v)	\
+	(((v) << BP_BCH_FLASH0LAYOUT1_PAGE_SIZE) \
+					 & BM_BCH_FLASH0LAYOUT1_PAGE_SIZE)
+
+#define BP_BCH_FLASH0LAYOUT1_ECCN		12
+#define BM_BCH_FLASH0LAYOUT1_ECCN	(0xf << BP_BCH_FLASH0LAYOUT1_ECCN)
+#define BF_BCH_FLASH0LAYOUT1_ECCN(v)		\
+	(((v) << BP_BCH_FLASH0LAYOUT1_ECCN) & BM_BCH_FLASH0LAYOUT1_ECCN)
+
+#define BP_BCH_FLASH0LAYOUT1_DATAN_SIZE		0
+#define BM_BCH_FLASH0LAYOUT1_DATAN_SIZE		\
+			(0xfff << BP_BCH_FLASH0LAYOUT1_DATAN_SIZE)
+#define BF_BCH_FLASH0LAYOUT1_DATAN_SIZE(v)	\
+	(((v) << BP_BCH_FLASH0LAYOUT1_DATAN_SIZE) \
+					 & BM_BCH_FLASH0LAYOUT1_DATAN_SIZE)
+#endif
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
new file mode 100644
index 0000000..de4db760
--- /dev/null
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
@@ -0,0 +1,1057 @@
+/*
+ * Freescale GPMI NAND Flash Driver
+ *
+ * Copyright (C) 2008-2011 Freescale Semiconductor, Inc.
+ * Copyright (C) 2008 Embedded Alley Solutions, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+#include <linux/mtd/gpmi-nand.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <mach/mxs.h>
+
+#include "gpmi-nand.h"
+#include "gpmi-regs.h"
+#include "bch-regs.h"
+
+struct timing_threshod timing_default_threshold = {
+	.max_data_setup_cycles       = (BM_GPMI_TIMING0_DATA_SETUP >>
+						BP_GPMI_TIMING0_DATA_SETUP),
+	.internal_data_setup_in_ns   = 0,
+	.max_sample_delay_factor     = (BM_GPMI_CTRL1_RDN_DELAY >>
+						BP_GPMI_CTRL1_RDN_DELAY),
+	.max_dll_clock_period_in_ns  = 32,
+	.max_dll_delay_in_ns         = 16,
+};
+
+/*
+ * Clear the bit and poll it cleared.  This is usually called with
+ * a reset address and mask being either SFTRST(bit 31) or CLKGATE
+ * (bit 30).
+ */
+static int clear_poll_bit(void __iomem *addr, u32 mask)
+{
+	int timeout = 0x400;
+
+	/* clear the bit */
+	__mxs_clrl(mask, addr);
+
+	/*
+	 * SFTRST needs 3 GPMI clocks to settle, the reference manual
+	 * recommends to wait 1us.
+	 */
+	udelay(1);
+
+	/* poll the bit becoming clear */
+	while ((readl(addr) & mask) && --timeout)
+		/* nothing */;
+
+	return !timeout;
+}
+
+#define MODULE_CLKGATE		(1 << 30)
+#define MODULE_SFTRST		(1 << 31)
+/*
+ * The current mxs_reset_block() will do two things:
+ *  [1] enable the module.
+ *  [2] reset the module.
+ *
+ * In most of the cases, it's ok. But there is a hardware bug in the BCH block.
+ * If you try to soft reset the BCH block, it becomes unusable until
+ * the next hard reset. This case occurs in the NAND boot mode. When the board
+ * boots by NAND, the ROM of the chip will initialize the BCH blocks itself.
+ * So If the driver tries to reset the BCH again, the BCH will not work anymore.
+ * You will see a DMA timeout in this case.
+ *
+ * To avoid this bug, just add a new parameter `just_enable` for
+ * the mxs_reset_block(), and rewrite it here.
+ */
+int gpmi_reset_block(void __iomem *reset_addr, bool just_enable)
+{
+	int ret;
+	int timeout = 0x400;
+
+	/* clear and poll SFTRST */
+	ret = clear_poll_bit(reset_addr, MODULE_SFTRST);
+	if (unlikely(ret))
+		goto error;
+
+	/* clear CLKGATE */
+	__mxs_clrl(MODULE_CLKGATE, reset_addr);
+
+	if (!just_enable) {
+		/* set SFTRST to reset the block */
+		__mxs_setl(MODULE_SFTRST, reset_addr);
+		udelay(1);
+
+		/* poll CLKGATE becoming set */
+		while ((!(readl(reset_addr) & MODULE_CLKGATE)) && --timeout)
+			/* nothing */;
+		if (unlikely(!timeout))
+			goto error;
+	}
+
+	/* clear and poll SFTRST */
+	ret = clear_poll_bit(reset_addr, MODULE_SFTRST);
+	if (unlikely(ret))
+		goto error;
+
+	/* clear and poll CLKGATE */
+	ret = clear_poll_bit(reset_addr, MODULE_CLKGATE);
+	if (unlikely(ret))
+		goto error;
+
+	return 0;
+
+error:
+	pr_err("%s(%p): module reset timeout\n", __func__, reset_addr);
+	return -ETIMEDOUT;
+}
+
+int gpmi_init(struct gpmi_nand_data *this)
+{
+	struct resources *r = &this->resources;
+	int ret;
+
+	ret = clk_enable(r->clock);
+	if (ret)
+		goto err_out;
+	ret = gpmi_reset_block(r->gpmi_regs, false);
+	if (ret)
+		goto err_out;
+
+	/* Choose NAND mode. */
+	writel(BM_GPMI_CTRL1_GPMI_MODE, r->gpmi_regs + HW_GPMI_CTRL1_CLR);
+
+	/* Set the IRQ polarity. */
+	writel(BM_GPMI_CTRL1_ATA_IRQRDY_POLARITY,
+				r->gpmi_regs + HW_GPMI_CTRL1_SET);
+
+	/* Disable Write-Protection. */
+	writel(BM_GPMI_CTRL1_DEV_RESET, r->gpmi_regs + HW_GPMI_CTRL1_SET);
+
+	/* Select BCH ECC. */
+	writel(BM_GPMI_CTRL1_BCH_MODE, r->gpmi_regs + HW_GPMI_CTRL1_SET);
+
+	clk_disable(r->clock);
+	return 0;
+err_out:
+	return ret;
+}
+
+/* This function is very useful. It is called only when the bug occur. */
+void gpmi_dump_info(struct gpmi_nand_data *this)
+{
+	struct resources *r = &this->resources;
+	struct bch_geometry *geo = &this->bch_geometry;
+	u32 reg;
+	int i;
+
+	pr_err("Show GPMI registers :\n");
+	for (i = 0; i <= HW_GPMI_DEBUG / 0x10 + 1; i++) {
+		reg = readl(r->gpmi_regs + i * 0x10);
+		pr_err("offset 0x%.3x : 0x%.8x\n", i * 0x10, reg);
+	}
+
+	/* start to print out the BCH info */
+	pr_err("BCH Geometry :\n");
+	pr_err("GF length              : %u\n", geo->gf_len);
+	pr_err("ECC Strength           : %u\n", geo->ecc_strength);
+	pr_err("Page Size in Bytes     : %u\n", geo->page_size);
+	pr_err("Metadata Size in Bytes : %u\n", geo->metadata_size);
+	pr_err("ECC Chunk Size in Bytes: %u\n", geo->ecc_chunk_size);
+	pr_err("ECC Chunk Count        : %u\n", geo->ecc_chunk_count);
+	pr_err("Payload Size in Bytes  : %u\n", geo->payload_size);
+	pr_err("Auxiliary Size in Bytes: %u\n", geo->auxiliary_size);
+	pr_err("Auxiliary Status Offset: %u\n", geo->auxiliary_status_offset);
+	pr_err("Block Mark Byte Offset : %u\n", geo->block_mark_byte_offset);
+	pr_err("Block Mark Bit Offset  : %u\n", geo->block_mark_bit_offset);
+}
+
+/* Configures the geometry for BCH.  */
+int bch_set_geometry(struct gpmi_nand_data *this)
+{
+	struct resources *r = &this->resources;
+	struct bch_geometry *bch_geo = &this->bch_geometry;
+	unsigned int block_count;
+	unsigned int block_size;
+	unsigned int metadata_size;
+	unsigned int ecc_strength;
+	unsigned int page_size;
+	int ret;
+
+	if (common_nfc_set_geometry(this))
+		return !0;
+
+	block_count   = bch_geo->ecc_chunk_count - 1;
+	block_size    = bch_geo->ecc_chunk_size;
+	metadata_size = bch_geo->metadata_size;
+	ecc_strength  = bch_geo->ecc_strength >> 1;
+	page_size     = bch_geo->page_size;
+
+	ret = clk_enable(r->clock);
+	if (ret)
+		goto err_out;
+
+	ret = gpmi_reset_block(r->bch_regs, true);
+	if (ret)
+		goto err_out;
+
+	/* Configure layout 0. */
+	writel(BF_BCH_FLASH0LAYOUT0_NBLOCKS(block_count)
+			| BF_BCH_FLASH0LAYOUT0_META_SIZE(metadata_size)
+			| BF_BCH_FLASH0LAYOUT0_ECC0(ecc_strength)
+			| BF_BCH_FLASH0LAYOUT0_DATA0_SIZE(block_size),
+			r->bch_regs + HW_BCH_FLASH0LAYOUT0);
+
+	writel(BF_BCH_FLASH0LAYOUT1_PAGE_SIZE(page_size)
+			| BF_BCH_FLASH0LAYOUT1_ECCN(ecc_strength)
+			| BF_BCH_FLASH0LAYOUT1_DATAN_SIZE(block_size),
+			r->bch_regs + HW_BCH_FLASH0LAYOUT1);
+
+	/* Set *all* chip selects to use layout 0. */
+	writel(0, r->bch_regs + HW_BCH_LAYOUTSELECT);
+
+	/* Enable interrupts. */
+	writel(BM_BCH_CTRL_COMPLETE_IRQ_EN,
+				r->bch_regs + HW_BCH_CTRL_SET);
+
+	clk_disable(r->clock);
+	return 0;
+err_out:
+	return ret;
+}
+
+/* Converts time in nanoseconds to cycles. */
+static unsigned int ns_to_cycles(unsigned int time,
+			unsigned int period, unsigned int min)
+{
+	unsigned int k;
+
+	k = (time + period - 1) / period;
+	return max(k, min);
+}
+
+/* Apply timing to current hardware conditions. */
+static int gpmi_nfc_compute_hardware_timing(struct gpmi_nand_data *this,
+					struct gpmi_nfc_hardware_timing *hw)
+{
+	struct gpmi_nand_platform_data *pdata = this->pdata;
+	struct timing_threshod *nfc = &timing_default_threshold;
+	struct nand_chip *nand = &this->nand;
+	struct nand_timing target = this->timing;
+	bool improved_timing_is_available;
+	unsigned long clock_frequency_in_hz;
+	unsigned int clock_period_in_ns;
+	bool dll_use_half_periods;
+	unsigned int dll_delay_shift;
+	unsigned int max_sample_delay_in_ns;
+	unsigned int address_setup_in_cycles;
+	unsigned int data_setup_in_ns;
+	unsigned int data_setup_in_cycles;
+	unsigned int data_hold_in_cycles;
+	int ideal_sample_delay_in_ns;
+	unsigned int sample_delay_factor;
+	int tEYE;
+	unsigned int min_prop_delay_in_ns = pdata->min_prop_delay_in_ns;
+	unsigned int max_prop_delay_in_ns = pdata->max_prop_delay_in_ns;
+
+	/*
+	 * If there are multiple chips, we need to relax the timings to allow
+	 * for signal distortion due to higher capacitance.
+	 */
+	if (nand->numchips > 2) {
+		target.data_setup_in_ns    += 10;
+		target.data_hold_in_ns     += 10;
+		target.address_setup_in_ns += 10;
+	} else if (nand->numchips > 1) {
+		target.data_setup_in_ns    += 5;
+		target.data_hold_in_ns     += 5;
+		target.address_setup_in_ns += 5;
+	}
+
+	/* Check if improved timing information is available. */
+	improved_timing_is_available =
+		(target.tREA_in_ns  >= 0) &&
+		(target.tRLOH_in_ns >= 0) &&
+		(target.tRHOH_in_ns >= 0) ;
+
+	/* Inspect the clock. */
+	clock_frequency_in_hz = nfc->clock_frequency_in_hz;
+	clock_period_in_ns    = 1000000000 / clock_frequency_in_hz;
+
+	/*
+	 * The NFC quantizes setup and hold parameters in terms of clock cycles.
+	 * Here, we quantize the setup and hold timing parameters to the
+	 * next-highest clock period to make sure we apply at least the
+	 * specified times.
+	 *
+	 * For data setup and data hold, the hardware interprets a value of zero
+	 * as the largest possible delay. This is not what's intended by a zero
+	 * in the input parameter, so we impose a minimum of one cycle.
+	 */
+	data_setup_in_cycles    = ns_to_cycles(target.data_setup_in_ns,
+							clock_period_in_ns, 1);
+	data_hold_in_cycles     = ns_to_cycles(target.data_hold_in_ns,
+							clock_period_in_ns, 1);
+	address_setup_in_cycles = ns_to_cycles(target.address_setup_in_ns,
+							clock_period_in_ns, 0);
+
+	/*
+	 * The clock's period affects the sample delay in a number of ways:
+	 *
+	 * (1) The NFC HAL tells us the maximum clock period the sample delay
+	 *     DLL can tolerate. If the clock period is greater than half that
+	 *     maximum, we must configure the DLL to be driven by half periods.
+	 *
+	 * (2) We need to convert from an ideal sample delay, in ns, to a
+	 *     "sample delay factor," which the NFC uses. This factor depends on
+	 *     whether we're driving the DLL with full or half periods.
+	 *     Paraphrasing the reference manual:
+	 *
+	 *         AD = SDF x 0.125 x RP
+	 *
+	 * where:
+	 *
+	 *     AD   is the applied delay, in ns.
+	 *     SDF  is the sample delay factor, which is dimensionless.
+	 *     RP   is the reference period, in ns, which is a full clock period
+	 *          if the DLL is being driven by full periods, or half that if
+	 *          the DLL is being driven by half periods.
+	 *
+	 * Let's re-arrange this in a way that's more useful to us:
+	 *
+	 *                        8
+	 *         SDF  =  AD x ----
+	 *                       RP
+	 *
+	 * The reference period is either the clock period or half that, so this
+	 * is:
+	 *
+	 *                        8       AD x DDF
+	 *         SDF  =  AD x -----  =  --------
+	 *                      f x P        P
+	 *
+	 * where:
+	 *
+	 *       f  is 1 or 1/2, depending on how we're driving the DLL.
+	 *       P  is the clock period.
+	 *     DDF  is the DLL Delay Factor, a dimensionless value that
+	 *          incorporates all the constants in the conversion.
+	 *
+	 * DDF will be either 8 or 16, both of which are powers of two. We can
+	 * reduce the cost of this conversion by using bit shifts instead of
+	 * multiplication or division. Thus:
+	 *
+	 *                 AD << DDS
+	 *         SDF  =  ---------
+	 *                     P
+	 *
+	 *     or
+	 *
+	 *         AD  =  (SDF >> DDS) x P
+	 *
+	 * where:
+	 *
+	 *     DDS  is the DLL Delay Shift, the logarithm to base 2 of the DDF.
+	 */
+	if (clock_period_in_ns > (nfc->max_dll_clock_period_in_ns >> 1)) {
+		dll_use_half_periods = true;
+		dll_delay_shift      = 3 + 1;
+	} else {
+		dll_use_half_periods = false;
+		dll_delay_shift      = 3;
+	}
+
+	/*
+	 * Compute the maximum sample delay the NFC allows, under current
+	 * conditions. If the clock is running too slowly, no sample delay is
+	 * possible.
+	 */
+	if (clock_period_in_ns > nfc->max_dll_clock_period_in_ns)
+		max_sample_delay_in_ns = 0;
+	else {
+		/*
+		 * Compute the delay implied by the largest sample delay factor
+		 * the NFC allows.
+		 */
+		max_sample_delay_in_ns =
+			(nfc->max_sample_delay_factor * clock_period_in_ns) >>
+								dll_delay_shift;
+
+		/*
+		 * Check if the implied sample delay larger than the NFC
+		 * actually allows.
+		 */
+		if (max_sample_delay_in_ns > nfc->max_dll_delay_in_ns)
+			max_sample_delay_in_ns = nfc->max_dll_delay_in_ns;
+	}
+
+	/*
+	 * Check if improved timing information is available. If not, we have to
+	 * use a less-sophisticated algorithm.
+	 */
+	if (!improved_timing_is_available) {
+		/*
+		 * Fold the read setup time required by the NFC into the ideal
+		 * sample delay.
+		 */
+		ideal_sample_delay_in_ns = target.gpmi_sample_delay_in_ns +
+						nfc->internal_data_setup_in_ns;
+
+		/*
+		 * The ideal sample delay may be greater than the maximum
+		 * allowed by the NFC. If so, we can trade off sample delay time
+		 * for more data setup time.
+		 *
+		 * In each iteration of the following loop, we add a cycle to
+		 * the data setup time and subtract a corresponding amount from
+		 * the sample delay until we've satisified the constraints or
+		 * can't do any better.
+		 */
+		while ((ideal_sample_delay_in_ns > max_sample_delay_in_ns) &&
+			(data_setup_in_cycles < nfc->max_data_setup_cycles)) {
+
+			data_setup_in_cycles++;
+			ideal_sample_delay_in_ns -= clock_period_in_ns;
+
+			if (ideal_sample_delay_in_ns < 0)
+				ideal_sample_delay_in_ns = 0;
+
+		}
+
+		/*
+		 * Compute the sample delay factor that corresponds most closely
+		 * to the ideal sample delay. If the result is too large for the
+		 * NFC, use the maximum value.
+		 *
+		 * Notice that we use the ns_to_cycles function to compute the
+		 * sample delay factor. We do this because the form of the
+		 * computation is the same as that for calculating cycles.
+		 */
+		sample_delay_factor =
+			ns_to_cycles(
+				ideal_sample_delay_in_ns << dll_delay_shift,
+							clock_period_in_ns, 0);
+
+		if (sample_delay_factor > nfc->max_sample_delay_factor)
+			sample_delay_factor = nfc->max_sample_delay_factor;
+
+		/* Skip to the part where we return our results. */
+		goto return_results;
+	}
+
+	/*
+	 * If control arrives here, we have more detailed timing information,
+	 * so we can use a better algorithm.
+	 */
+
+	/*
+	 * Fold the read setup time required by the NFC into the maximum
+	 * propagation delay.
+	 */
+	max_prop_delay_in_ns += nfc->internal_data_setup_in_ns;
+
+	/*
+	 * Earlier, we computed the number of clock cycles required to satisfy
+	 * the data setup time. Now, we need to know the actual nanoseconds.
+	 */
+	data_setup_in_ns = clock_period_in_ns * data_setup_in_cycles;
+
+	/*
+	 * Compute tEYE, the width of the data eye when reading from the NAND
+	 * Flash. The eye width is fundamentally determined by the data setup
+	 * time, perturbed by propagation delays and some characteristics of the
+	 * NAND Flash device.
+	 *
+	 * start of the eye = max_prop_delay + tREA
+	 * end of the eye   = min_prop_delay + tRHOH + data_setup
+	 */
+	tEYE = (int)min_prop_delay_in_ns + (int)target.tRHOH_in_ns +
+							(int)data_setup_in_ns;
+
+	tEYE -= (int)max_prop_delay_in_ns + (int)target.tREA_in_ns;
+
+	/*
+	 * The eye must be open. If it's not, we can try to open it by
+	 * increasing its main forcer, the data setup time.
+	 *
+	 * In each iteration of the following loop, we increase the data setup
+	 * time by a single clock cycle. We do this until either the eye is
+	 * open or we run into NFC limits.
+	 */
+	while ((tEYE <= 0) &&
+			(data_setup_in_cycles < nfc->max_data_setup_cycles)) {
+		/* Give a cycle to data setup. */
+		data_setup_in_cycles++;
+		/* Synchronize the data setup time with the cycles. */
+		data_setup_in_ns += clock_period_in_ns;
+		/* Adjust tEYE accordingly. */
+		tEYE += clock_period_in_ns;
+	}
+
+	/*
+	 * When control arrives here, the eye is open. The ideal time to sample
+	 * the data is in the center of the eye:
+	 *
+	 *     end of the eye + start of the eye
+	 *     ---------------------------------  -  data_setup
+	 *                    2
+	 *
+	 * After some algebra, this simplifies to the code immediately below.
+	 */
+	ideal_sample_delay_in_ns =
+		((int)max_prop_delay_in_ns +
+			(int)target.tREA_in_ns +
+				(int)min_prop_delay_in_ns +
+					(int)target.tRHOH_in_ns -
+						(int)data_setup_in_ns) >> 1;
+
+	/*
+	 * The following figure illustrates some aspects of a NAND Flash read:
+	 *
+	 *
+	 *           __                   _____________________________________
+	 * RDN         \_________________/
+	 *
+	 *                                         <---- tEYE ----->
+	 *                                        /-----------------\
+	 * Read Data ----------------------------<                   >---------
+	 *                                        \-----------------/
+	 *             ^                 ^                 ^              ^
+	 *             |                 |                 |              |
+	 *             |<--Data Setup -->|<--Delay Time -->|              |
+	 *             |                 |                 |              |
+	 *             |                 |                                |
+	 *             |                 |<--   Quantized Delay Time   -->|
+	 *             |                 |                                |
+	 *
+	 *
+	 * We have some issues we must now address:
+	 *
+	 * (1) The *ideal* sample delay time must not be negative. If it is, we
+	 *     jam it to zero.
+	 *
+	 * (2) The *ideal* sample delay time must not be greater than that
+	 *     allowed by the NFC. If it is, we can increase the data setup
+	 *     time, which will reduce the delay between the end of the data
+	 *     setup and the center of the eye. It will also make the eye
+	 *     larger, which might help with the next issue...
+	 *
+	 * (3) The *quantized* sample delay time must not fall either before the
+	 *     eye opens or after it closes (the latter is the problem
+	 *     illustrated in the above figure).
+	 */
+
+	/* Jam a negative ideal sample delay to zero. */
+	if (ideal_sample_delay_in_ns < 0)
+		ideal_sample_delay_in_ns = 0;
+
+	/*
+	 * Extend the data setup as needed to reduce the ideal sample delay
+	 * below the maximum permitted by the NFC.
+	 */
+	while ((ideal_sample_delay_in_ns > max_sample_delay_in_ns) &&
+			(data_setup_in_cycles < nfc->max_data_setup_cycles)) {
+
+		/* Give a cycle to data setup. */
+		data_setup_in_cycles++;
+		/* Synchronize the data setup time with the cycles. */
+		data_setup_in_ns += clock_period_in_ns;
+		/* Adjust tEYE accordingly. */
+		tEYE += clock_period_in_ns;
+
+		/*
+		 * Decrease the ideal sample delay by one half cycle, to keep it
+		 * in the middle of the eye.
+		 */
+		ideal_sample_delay_in_ns -= (clock_period_in_ns >> 1);
+
+		/* Jam a negative ideal sample delay to zero. */
+		if (ideal_sample_delay_in_ns < 0)
+			ideal_sample_delay_in_ns = 0;
+	}
+
+	/*
+	 * Compute the sample delay factor that corresponds to the ideal sample
+	 * delay. If the result is too large, then use the maximum allowed
+	 * value.
+	 *
+	 * Notice that we use the ns_to_cycles function to compute the sample
+	 * delay factor. We do this because the form of the computation is the
+	 * same as that for calculating cycles.
+	 */
+	sample_delay_factor =
+		ns_to_cycles(ideal_sample_delay_in_ns << dll_delay_shift,
+							clock_period_in_ns, 0);
+
+	if (sample_delay_factor > nfc->max_sample_delay_factor)
+		sample_delay_factor = nfc->max_sample_delay_factor;
+
+	/*
+	 * These macros conveniently encapsulate a computation we'll use to
+	 * continuously evaluate whether or not the data sample delay is inside
+	 * the eye.
+	 */
+	#define IDEAL_DELAY  ((int) ideal_sample_delay_in_ns)
+
+	#define QUANTIZED_DELAY  \
+		((int) ((sample_delay_factor * clock_period_in_ns) >> \
+							dll_delay_shift))
+
+	#define DELAY_ERROR  (abs(QUANTIZED_DELAY - IDEAL_DELAY))
+
+	#define SAMPLE_IS_NOT_WITHIN_THE_EYE  (DELAY_ERROR > (tEYE >> 1))
+
+	/*
+	 * While the quantized sample time falls outside the eye, reduce the
+	 * sample delay or extend the data setup to move the sampling point back
+	 * toward the eye. Do not allow the number of data setup cycles to
+	 * exceed the maximum allowed by the NFC.
+	 */
+	while (SAMPLE_IS_NOT_WITHIN_THE_EYE &&
+			(data_setup_in_cycles < nfc->max_data_setup_cycles)) {
+		/*
+		 * If control arrives here, the quantized sample delay falls
+		 * outside the eye. Check if it's before the eye opens, or after
+		 * the eye closes.
+		 */
+		if (QUANTIZED_DELAY > IDEAL_DELAY) {
+			/*
+			 * If control arrives here, the quantized sample delay
+			 * falls after the eye closes. Decrease the quantized
+			 * delay time and then go back to re-evaluate.
+			 */
+			if (sample_delay_factor != 0)
+				sample_delay_factor--;
+			continue;
+		}
+
+		/*
+		 * If control arrives here, the quantized sample delay falls
+		 * before the eye opens. Shift the sample point by increasing
+		 * data setup time. This will also make the eye larger.
+		 */
+
+		/* Give a cycle to data setup. */
+		data_setup_in_cycles++;
+		/* Synchronize the data setup time with the cycles. */
+		data_setup_in_ns += clock_period_in_ns;
+		/* Adjust tEYE accordingly. */
+		tEYE += clock_period_in_ns;
+
+		/*
+		 * Decrease the ideal sample delay by one half cycle, to keep it
+		 * in the middle of the eye.
+		 */
+		ideal_sample_delay_in_ns -= (clock_period_in_ns >> 1);
+
+		/* ...and one less period for the delay time. */
+		ideal_sample_delay_in_ns -= clock_period_in_ns;
+
+		/* Jam a negative ideal sample delay to zero. */
+		if (ideal_sample_delay_in_ns < 0)
+			ideal_sample_delay_in_ns = 0;
+
+		/*
+		 * We have a new ideal sample delay, so re-compute the quantized
+		 * delay.
+		 */
+		sample_delay_factor =
+			ns_to_cycles(
+				ideal_sample_delay_in_ns << dll_delay_shift,
+							clock_period_in_ns, 0);
+
+		if (sample_delay_factor > nfc->max_sample_delay_factor)
+			sample_delay_factor = nfc->max_sample_delay_factor;
+	}
+
+	/* Control arrives here when we're ready to return our results. */
+return_results:
+	hw->data_setup_in_cycles    = data_setup_in_cycles;
+	hw->data_hold_in_cycles     = data_hold_in_cycles;
+	hw->address_setup_in_cycles = address_setup_in_cycles;
+	hw->use_half_periods        = dll_use_half_periods;
+	hw->sample_delay_factor     = sample_delay_factor;
+
+	/* Return success. */
+	return 0;
+}
+
+/* Begin the I/O */
+void gpmi_begin(struct gpmi_nand_data *this)
+{
+	struct resources *r = &this->resources;
+	struct timing_threshod *nfc = &timing_default_threshold;
+	unsigned char  *gpmi_regs = r->gpmi_regs;
+	unsigned int   clock_period_in_ns;
+	uint32_t       reg;
+	unsigned int   dll_wait_time_in_us;
+	struct gpmi_nfc_hardware_timing  hw;
+	int ret;
+
+	/* Enable the clock. */
+	ret = clk_enable(r->clock);
+	if (ret) {
+		pr_err("We failed in enable the clk\n");
+		goto err_out;
+	}
+
+	/* set ready/busy timeout */
+	writel(0x500 << BP_GPMI_TIMING1_BUSY_TIMEOUT,
+		gpmi_regs + HW_GPMI_TIMING1);
+
+	/* Get the timing information we need. */
+	nfc->clock_frequency_in_hz = clk_get_rate(r->clock);
+	clock_period_in_ns = 1000000000 / nfc->clock_frequency_in_hz;
+
+	gpmi_nfc_compute_hardware_timing(this, &hw);
+
+	/* Set up all the simple timing parameters. */
+	reg = BF_GPMI_TIMING0_ADDRESS_SETUP(hw.address_setup_in_cycles) |
+		BF_GPMI_TIMING0_DATA_HOLD(hw.data_hold_in_cycles)         |
+		BF_GPMI_TIMING0_DATA_SETUP(hw.data_setup_in_cycles)       ;
+
+	writel(reg, gpmi_regs + HW_GPMI_TIMING0);
+
+	/*
+	 * DLL_ENABLE must be set to 0 when setting RDN_DELAY or HALF_PERIOD.
+	 */
+	writel(BM_GPMI_CTRL1_DLL_ENABLE, gpmi_regs + HW_GPMI_CTRL1_CLR);
+
+	/* Clear out the DLL control fields. */
+	writel(BM_GPMI_CTRL1_RDN_DELAY,   gpmi_regs + HW_GPMI_CTRL1_CLR);
+	writel(BM_GPMI_CTRL1_HALF_PERIOD, gpmi_regs + HW_GPMI_CTRL1_CLR);
+
+	/* If no sample delay is called for, return immediately. */
+	if (!hw.sample_delay_factor)
+		return;
+
+	/* Configure the HALF_PERIOD flag. */
+	if (hw.use_half_periods)
+		writel(BM_GPMI_CTRL1_HALF_PERIOD,
+						gpmi_regs + HW_GPMI_CTRL1_SET);
+
+	/* Set the delay factor. */
+	writel(BF_GPMI_CTRL1_RDN_DELAY(hw.sample_delay_factor),
+						gpmi_regs + HW_GPMI_CTRL1_SET);
+
+	/* Enable the DLL. */
+	writel(BM_GPMI_CTRL1_DLL_ENABLE, gpmi_regs + HW_GPMI_CTRL1_SET);
+
+	/*
+	 * After we enable the GPMI DLL, we have to wait 64 clock cycles before
+	 * we can use the GPMI.
+	 *
+	 * Calculate the amount of time we need to wait, in microseconds.
+	 */
+	dll_wait_time_in_us = (clock_period_in_ns * 64) / 1000;
+
+	if (!dll_wait_time_in_us)
+		dll_wait_time_in_us = 1;
+
+	/* Wait for the DLL to settle. */
+	udelay(dll_wait_time_in_us);
+
+err_out:
+	return;
+}
+
+void gpmi_end(struct gpmi_nand_data *this)
+{
+	struct resources *r = &this->resources;
+	clk_disable(r->clock);
+}
+
+/* Clears a BCH interrupt. */
+void gpmi_clear_bch(struct gpmi_nand_data *this)
+{
+	struct resources *r = &this->resources;
+	writel(BM_BCH_CTRL_COMPLETE_IRQ, r->bch_regs + HW_BCH_CTRL_CLR);
+}
+
+/* Returns the Ready/Busy status of the given chip. */
+int gpmi_is_ready(struct gpmi_nand_data *this, unsigned chip)
+{
+	struct resources *r = &this->resources;
+	uint32_t mask = 0;
+	uint32_t reg = 0;
+
+	if (GPMI_IS_MX23(this)) {
+		mask = MX23_BM_GPMI_DEBUG_READY0 << chip;
+		reg = readl(r->gpmi_regs + HW_GPMI_DEBUG);
+	} else if (GPMI_IS_MX28(this)) {
+		mask = MX28_BF_GPMI_STAT_READY_BUSY(1 << chip);
+		reg = readl(r->gpmi_regs + HW_GPMI_STAT);
+	} else
+		pr_err("unknow arch.\n");
+	return reg & mask;
+}
+
+static inline void set_dma_type(struct gpmi_nand_data *this,
+					enum dma_ops_type type)
+{
+	this->last_dma_type = this->dma_type;
+	this->dma_type = type;
+}
+
+int gpmi_send_command(struct gpmi_nand_data *this)
+{
+	struct dma_chan *channel = get_dma_chan(this);
+	struct dma_async_tx_descriptor *desc;
+	struct scatterlist *sgl;
+	int chip = this->current_chip;
+	u32 pio[3];
+
+	/* [1] send out the PIO words */
+	pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__WRITE)
+		| BM_GPMI_CTRL0_WORD_LENGTH
+		| BF_GPMI_CTRL0_CS(chip, this)
+		| BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
+		| BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_CLE)
+		| BM_GPMI_CTRL0_ADDRESS_INCREMENT
+		| BF_GPMI_CTRL0_XFER_COUNT(this->command_length);
+	pio[1] = pio[2] = 0;
+	desc = channel->device->device_prep_slave_sg(channel,
+					(struct scatterlist *)pio,
+					ARRAY_SIZE(pio), DMA_NONE, 0);
+	if (!desc) {
+		pr_err("step 1 error\n");
+		return -1;
+	}
+
+	/* [2] send out the COMMAND + ADDRESS string stored in @buffer */
+	sgl = &this->cmd_sgl;
+
+	sg_init_one(sgl, this->cmd_buffer, this->command_length);
+	dma_map_sg(this->dev, sgl, 1, DMA_TO_DEVICE);
+	desc = channel->device->device_prep_slave_sg(channel,
+					sgl, 1, DMA_TO_DEVICE, 1);
+	if (!desc) {
+		pr_err("step 2 error\n");
+		return -1;
+	}
+
+	/* [3] submit the DMA */
+	set_dma_type(this, DMA_FOR_COMMAND);
+	return start_dma_without_bch_irq(this, desc);
+}
+
+int gpmi_send_data(struct gpmi_nand_data *this)
+{
+	struct dma_async_tx_descriptor *desc;
+	struct dma_chan *channel = get_dma_chan(this);
+	int chip = this->current_chip;
+	uint32_t command_mode;
+	uint32_t address;
+	u32 pio[2];
+
+	/* [1] PIO */
+	command_mode = BV_GPMI_CTRL0_COMMAND_MODE__WRITE;
+	address      = BV_GPMI_CTRL0_ADDRESS__NAND_DATA;
+
+	pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(command_mode)
+		| BM_GPMI_CTRL0_WORD_LENGTH
+		| BF_GPMI_CTRL0_CS(chip, this)
+		| BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
+		| BF_GPMI_CTRL0_ADDRESS(address)
+		| BF_GPMI_CTRL0_XFER_COUNT(this->upper_len);
+	pio[1] = 0;
+	desc = channel->device->device_prep_slave_sg(channel,
+					(struct scatterlist *)pio,
+					ARRAY_SIZE(pio), DMA_NONE, 0);
+	if (!desc) {
+		pr_err("step 1 error\n");
+		return -1;
+	}
+
+	/* [2] send DMA request */
+	prepare_data_dma(this, DMA_TO_DEVICE);
+	desc = channel->device->device_prep_slave_sg(channel, &this->data_sgl,
+						1, DMA_TO_DEVICE, 1);
+	if (!desc) {
+		pr_err("step 2 error\n");
+		return -1;
+	}
+	/* [3] submit the DMA */
+	set_dma_type(this, DMA_FOR_WRITE_DATA);
+	return start_dma_without_bch_irq(this, desc);
+}
+
+int gpmi_read_data(struct gpmi_nand_data *this)
+{
+	struct dma_async_tx_descriptor *desc;
+	struct dma_chan *channel = get_dma_chan(this);
+	int chip = this->current_chip;
+	u32 pio[2];
+
+	/* [1] : send PIO */
+	pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__READ)
+		| BM_GPMI_CTRL0_WORD_LENGTH
+		| BF_GPMI_CTRL0_CS(chip, this)
+		| BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
+		| BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_DATA)
+		| BF_GPMI_CTRL0_XFER_COUNT(this->upper_len);
+	pio[1] = 0;
+	desc = channel->device->device_prep_slave_sg(channel,
+					(struct scatterlist *)pio,
+					ARRAY_SIZE(pio), DMA_NONE, 0);
+	if (!desc) {
+		pr_err("step 1 error\n");
+		return -1;
+	}
+
+	/* [2] : send DMA request */
+	prepare_data_dma(this, DMA_FROM_DEVICE);
+	desc = channel->device->device_prep_slave_sg(channel, &this->data_sgl,
+						1, DMA_FROM_DEVICE, 1);
+	if (!desc) {
+		pr_err("step 2 error\n");
+		return -1;
+	}
+
+	/* [3] : submit the DMA */
+	set_dma_type(this, DMA_FOR_READ_DATA);
+	return start_dma_without_bch_irq(this, desc);
+}
+
+int gpmi_send_page(struct gpmi_nand_data *this,
+			dma_addr_t payload, dma_addr_t auxiliary)
+{
+	struct bch_geometry *geo = &this->bch_geometry;
+	uint32_t command_mode;
+	uint32_t address;
+	uint32_t ecc_command;
+	uint32_t buffer_mask;
+	struct dma_async_tx_descriptor *desc;
+	struct dma_chan *channel = get_dma_chan(this);
+	int chip = this->current_chip;
+	u32 pio[6];
+
+	/* A DMA descriptor that does an ECC page read. */
+	command_mode = BV_GPMI_CTRL0_COMMAND_MODE__WRITE;
+	address      = BV_GPMI_CTRL0_ADDRESS__NAND_DATA;
+	ecc_command  = BV_GPMI_ECCCTRL_ECC_CMD__BCH_ENCODE;
+	buffer_mask  = BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_PAGE |
+				BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_AUXONLY;
+
+	pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(command_mode)
+		| BM_GPMI_CTRL0_WORD_LENGTH
+		| BF_GPMI_CTRL0_CS(chip, this)
+		| BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
+		| BF_GPMI_CTRL0_ADDRESS(address)
+		| BF_GPMI_CTRL0_XFER_COUNT(0);
+	pio[1] = 0;
+	pio[2] = BM_GPMI_ECCCTRL_ENABLE_ECC
+		| BF_GPMI_ECCCTRL_ECC_CMD(ecc_command)
+		| BF_GPMI_ECCCTRL_BUFFER_MASK(buffer_mask);
+	pio[3] = geo->page_size;
+	pio[4] = payload;
+	pio[5] = auxiliary;
+
+	desc = channel->device->device_prep_slave_sg(channel,
+					(struct scatterlist *)pio,
+					ARRAY_SIZE(pio), DMA_NONE, 0);
+	if (!desc) {
+		pr_err("step 2 error\n");
+		return -1;
+	}
+	set_dma_type(this, DMA_FOR_WRITE_ECC_PAGE);
+	return start_dma_with_bch_irq(this, desc);
+}
+
+int gpmi_read_page(struct gpmi_nand_data *this,
+				dma_addr_t payload, dma_addr_t auxiliary)
+{
+	struct bch_geometry *geo = &this->bch_geometry;
+	uint32_t command_mode;
+	uint32_t address;
+	uint32_t ecc_command;
+	uint32_t buffer_mask;
+	struct dma_async_tx_descriptor *desc;
+	struct dma_chan *channel = get_dma_chan(this);
+	int chip = this->current_chip;
+	u32 pio[6];
+
+	/* [1] Wait for the chip to report ready. */
+	command_mode = BV_GPMI_CTRL0_COMMAND_MODE__WAIT_FOR_READY;
+	address      = BV_GPMI_CTRL0_ADDRESS__NAND_DATA;
+
+	pio[0] =  BF_GPMI_CTRL0_COMMAND_MODE(command_mode)
+		| BM_GPMI_CTRL0_WORD_LENGTH
+		| BF_GPMI_CTRL0_CS(chip, this)
+		| BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
+		| BF_GPMI_CTRL0_ADDRESS(address)
+		| BF_GPMI_CTRL0_XFER_COUNT(0);
+	pio[1] = 0;
+	desc = channel->device->device_prep_slave_sg(channel,
+				(struct scatterlist *)pio, 2, DMA_NONE, 0);
+	if (!desc) {
+		pr_err("step 1 error\n");
+		return -1;
+	}
+
+	/* [2] Enable the BCH block and read. */
+	command_mode = BV_GPMI_CTRL0_COMMAND_MODE__READ;
+	address      = BV_GPMI_CTRL0_ADDRESS__NAND_DATA;
+	ecc_command  = BV_GPMI_ECCCTRL_ECC_CMD__BCH_DECODE;
+	buffer_mask  = BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_PAGE
+			| BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_AUXONLY;
+
+	pio[0] =  BF_GPMI_CTRL0_COMMAND_MODE(command_mode)
+		| BM_GPMI_CTRL0_WORD_LENGTH
+		| BF_GPMI_CTRL0_CS(chip, this)
+		| BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
+		| BF_GPMI_CTRL0_ADDRESS(address)
+		| BF_GPMI_CTRL0_XFER_COUNT(geo->page_size);
+
+	pio[1] = 0;
+	pio[2] =  BM_GPMI_ECCCTRL_ENABLE_ECC
+		| BF_GPMI_ECCCTRL_ECC_CMD(ecc_command)
+		| BF_GPMI_ECCCTRL_BUFFER_MASK(buffer_mask);
+	pio[3] = geo->page_size;
+	pio[4] = payload;
+	pio[5] = auxiliary;
+	desc = channel->device->device_prep_slave_sg(channel,
+					(struct scatterlist *)pio,
+					ARRAY_SIZE(pio), DMA_NONE, 1);
+	if (!desc) {
+		pr_err("step 2 error\n");
+		return -1;
+	}
+
+	/* [3] Disable the BCH block */
+	command_mode = BV_GPMI_CTRL0_COMMAND_MODE__WAIT_FOR_READY;
+	address      = BV_GPMI_CTRL0_ADDRESS__NAND_DATA;
+
+	pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(command_mode)
+		| BM_GPMI_CTRL0_WORD_LENGTH
+		| BF_GPMI_CTRL0_CS(chip, this)
+		| BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
+		| BF_GPMI_CTRL0_ADDRESS(address)
+		| BF_GPMI_CTRL0_XFER_COUNT(geo->page_size);
+	pio[1] = 0;
+	desc = channel->device->device_prep_slave_sg(channel,
+				(struct scatterlist *)pio, 2, DMA_NONE, 1);
+	if (!desc) {
+		pr_err("step 3 error\n");
+		return -1;
+	}
+
+	/* [4] submit the DMA */
+	set_dma_type(this, DMA_FOR_READ_ECC_PAGE);
+	return start_dma_with_bch_irq(this, desc);
+}
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
new file mode 100644
index 0000000..071b634
--- /dev/null
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
@@ -0,0 +1,1619 @@
+/*
+ * Freescale GPMI NAND Flash Driver
+ *
+ * Copyright (C) 2010-2011 Freescale Semiconductor, Inc.
+ * Copyright (C) 2008 Embedded Alley Solutions, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+#include <linux/clk.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/mtd/gpmi-nand.h>
+#include <linux/mtd/partitions.h>
+
+#include "gpmi-nand.h"
+
+/* add our owner bbt descriptor */
+static uint8_t scan_ff_pattern[] = { 0xff };
+static struct nand_bbt_descr gpmi_bbt_descr = {
+	.options	= 0,
+	.offs		= 0,
+	.len		= 1,
+	.pattern	= scan_ff_pattern
+};
+
+/*  We will use all the (page + OOB). */
+static struct nand_ecclayout gpmi_hw_ecclayout = {
+	.eccbytes = 0,
+	.eccpos = { 0, },
+	.oobfree = { {.offset = 0, .length = 0} }
+};
+
+static irqreturn_t bch_irq(int irq, void *cookie)
+{
+	struct gpmi_nand_data *this = cookie;
+
+	gpmi_clear_bch(this);
+	complete(&this->bch_done);
+	return IRQ_HANDLED;
+}
+
+/*
+ *  Calculate the ECC strength by hand:
+ *	E : The ECC strength.
+ *	G : the length of Galois Field.
+ *	N : The chunk count of per page.
+ *	O : the oobsize of the NAND chip.
+ *	M : the metasize of per page.
+ *
+ *	The formula is :
+ *		E * G * N
+ *	      ------------ <= (O - M)
+ *                  8
+ *
+ *      So, we get E by:
+ *                    (O - M) * 8
+ *              E <= -------------
+ *                       G * N
+ */
+static inline int get_ecc_strength(struct gpmi_nand_data *this)
+{
+	struct bch_geometry *geo = &this->bch_geometry;
+	struct mtd_info	*mtd = &this->mtd;
+	int ecc_strength;
+
+	ecc_strength = ((mtd->oobsize - geo->metadata_size) * 8)
+			/ (geo->gf_len * geo->ecc_chunk_count);
+
+	/* We need the minor even number. */
+	return round_down(ecc_strength, 2);
+}
+
+int common_nfc_set_geometry(struct gpmi_nand_data *this)
+{
+	struct bch_geometry *geo = &this->bch_geometry;
+	struct mtd_info *mtd = &this->mtd;
+	unsigned int metadata_size;
+	unsigned int status_size;
+	unsigned int block_mark_bit_offset;
+
+	/*
+	 * The size of the metadata can be changed, though we set it to 10
+	 * bytes now. But it can't be too large, because we have to save
+	 * enough space for BCH.
+	 */
+	geo->metadata_size = 10;
+
+	/* The default for the length of Galois Field. */
+	geo->gf_len = 13;
+
+	/* The default for chunk size. There is no oobsize greater then 512. */
+	geo->ecc_chunk_size = 512;
+	while (geo->ecc_chunk_size < mtd->oobsize)
+		geo->ecc_chunk_size *= 2; /* keep C >= O */
+
+	geo->ecc_chunk_count = mtd->writesize / geo->ecc_chunk_size;
+
+	/* We use the same ECC strength for all chunks. */
+	geo->ecc_strength = get_ecc_strength(this);
+	if (!geo->ecc_strength) {
+		pr_err("We get a wrong ECC strength.\n");
+		return -EINVAL;
+	}
+
+	geo->page_size = mtd->writesize + mtd->oobsize;
+	geo->payload_size = mtd->writesize;
+
+	/*
+	 * The auxiliary buffer contains the metadata and the ECC status. The
+	 * metadata is padded to the nearest 32-bit boundary. The ECC status
+	 * contains one byte for every ECC chunk, and is also padded to the
+	 * nearest 32-bit boundary.
+	 */
+	metadata_size = ALIGN(geo->metadata_size, 4);
+	status_size   = ALIGN(geo->ecc_chunk_count, 4);
+
+	geo->auxiliary_size = metadata_size + status_size;
+	geo->auxiliary_status_offset = metadata_size;
+
+	if (!this->swap_block_mark)
+		return 0;
+
+	/*
+	 * We need to compute the byte and bit offsets of
+	 * the physical block mark within the ECC-based view of the page.
+	 *
+	 * NAND chip with 2K page shows below:
+	 *                                             (Block Mark)
+	 *                                                   |      |
+	 *                                                   |  D   |
+	 *                                                   |<---->|
+	 *                                                   V      V
+	 *    +---+----------+-+----------+-+----------+-+----------+-+
+	 *    | M |   data   |E|   data   |E|   data   |E|   data   |E|
+	 *    +---+----------+-+----------+-+----------+-+----------+-+
+	 *
+	 * The position of block mark moves forward in the ECC-based view
+	 * of page, and the delta is:
+	 *
+	 *                   E * G * (N - 1)
+	 *             D = (---------------- + M)
+	 *                          8
+	 *
+	 * With the formula to compute the ECC strength, and the condition
+	 *       : C >= O         (C is the ecc chunk size)
+	 *
+	 * It's easy to deduce to the following result:
+	 *
+	 *         E * G       (O - M)      C - M         C - M
+	 *      ----------- <= ------- <=  --------  <  ---------
+	 *           8            N           N          (N - 1)
+	 *
+	 *  So, we get:
+	 *
+	 *                   E * G * (N - 1)
+	 *             D = (---------------- + M) < C
+	 *                          8
+	 *
+	 *  The above inequality means the position of block mark
+	 *  within the ECC-based view of the page is still in the data chunk,
+	 *  and it's NOT in the ECC bits of the chunk.
+	 *
+	 *  Use the following to compute the bit position of the
+	 *  physical block mark within the ECC-based view of the page:
+	 *          (page_size - D) * 8
+	 *
+	 *  --Huang Shijie
+	 */
+	block_mark_bit_offset = mtd->writesize * 8 -
+		(geo->ecc_strength * geo->gf_len * (geo->ecc_chunk_count - 1)
+				+ geo->metadata_size * 8);
+
+	geo->block_mark_byte_offset = block_mark_bit_offset / 8;
+	geo->block_mark_bit_offset  = block_mark_bit_offset % 8;
+	return 0;
+}
+
+struct dma_chan *get_dma_chan(struct gpmi_nand_data *this)
+{
+	int chipnr = this->current_chip;
+
+	return this->dma_chans[chipnr];
+}
+
+/* Can we use the upper's buffer directly for DMA? */
+void prepare_data_dma(struct gpmi_nand_data *this, enum dma_data_direction dr)
+{
+	struct scatterlist *sgl = &this->data_sgl;
+	int ret;
+
+	this->direct_dma_map_ok = true;
+
+	/* first try to map the upper buffer directly */
+	sg_init_one(sgl, this->upper_buf, this->upper_len);
+	ret = dma_map_sg(this->dev, sgl, 1, dr);
+	if (ret == 0) {
+		/* We have to use our own DMA buffer. */
+		sg_init_one(sgl, this->data_buffer_dma, PAGE_SIZE);
+
+		if (dr == DMA_TO_DEVICE)
+			memcpy(this->data_buffer_dma, this->upper_buf,
+				this->upper_len);
+
+		ret = dma_map_sg(this->dev, sgl, 1, dr);
+		if (ret == 0)
+			pr_err("map failed.\n");
+
+		this->direct_dma_map_ok = false;
+	}
+}
+
+/* This will be called after the DMA operation is finished. */
+static void dma_irq_callback(void *param)
+{
+	struct gpmi_nand_data *this = param;
+	struct completion *dma_c = &this->dma_done;
+
+	complete(dma_c);
+
+	switch (this->dma_type) {
+	case DMA_FOR_COMMAND:
+		dma_unmap_sg(this->dev, &this->cmd_sgl, 1, DMA_TO_DEVICE);
+		break;
+
+	case DMA_FOR_READ_DATA:
+		dma_unmap_sg(this->dev, &this->data_sgl, 1, DMA_FROM_DEVICE);
+		if (this->direct_dma_map_ok == false)
+			memcpy(this->upper_buf, this->data_buffer_dma,
+				this->upper_len);
+		break;
+
+	case DMA_FOR_WRITE_DATA:
+		dma_unmap_sg(this->dev, &this->data_sgl, 1, DMA_TO_DEVICE);
+		break;
+
+	case DMA_FOR_READ_ECC_PAGE:
+	case DMA_FOR_WRITE_ECC_PAGE:
+		/* We have to wait the BCH interrupt to finish. */
+		break;
+
+	default:
+		pr_err("in wrong DMA operation.\n");
+	}
+}
+
+int start_dma_without_bch_irq(struct gpmi_nand_data *this,
+				struct dma_async_tx_descriptor *desc)
+{
+	struct completion *dma_c = &this->dma_done;
+	int err;
+
+	init_completion(dma_c);
+
+	desc->callback		= dma_irq_callback;
+	desc->callback_param	= this;
+	dmaengine_submit(desc);
+
+	/* Wait for the interrupt from the DMA block. */
+	err = wait_for_completion_timeout(dma_c, msecs_to_jiffies(1000));
+	if (!err) {
+		pr_err("DMA timeout, last DMA :%d\n", this->last_dma_type);
+		gpmi_dump_info(this);
+		return -ETIMEDOUT;
+	}
+	return 0;
+}
+
+/*
+ * This function is used in BCH reading or BCH writing pages.
+ * It will wait for the BCH interrupt as long as ONE second.
+ * Actually, we must wait for two interrupts :
+ *	[1] firstly the DMA interrupt and
+ *	[2] secondly the BCH interrupt.
+ */
+int start_dma_with_bch_irq(struct gpmi_nand_data *this,
+			struct dma_async_tx_descriptor *desc)
+{
+	struct completion *bch_c = &this->bch_done;
+	int err;
+
+	/* Prepare to receive an interrupt from the BCH block. */
+	init_completion(bch_c);
+
+	/* start the DMA */
+	start_dma_without_bch_irq(this, desc);
+
+	/* Wait for the interrupt from the BCH block. */
+	err = wait_for_completion_timeout(bch_c, msecs_to_jiffies(1000));
+	if (!err) {
+		pr_err("BCH timeout, last DMA :%d\n", this->last_dma_type);
+		gpmi_dump_info(this);
+		return -ETIMEDOUT;
+	}
+	return 0;
+}
+
+static int __devinit
+acquire_register_block(struct gpmi_nand_data *this, const char *res_name)
+{
+	struct platform_device *pdev = this->pdev;
+	struct resources *res = &this->resources;
+	struct resource *r;
+	void *p;
+
+	r = platform_get_resource_byname(pdev, IORESOURCE_MEM, res_name);
+	if (!r) {
+		pr_err("Can't get resource for %s\n", res_name);
+		return -ENXIO;
+	}
+
+	p = ioremap(r->start, resource_size(r));
+	if (!p) {
+		pr_err("Can't remap %s\n", res_name);
+		return -ENOMEM;
+	}
+
+	if (!strcmp(res_name, GPMI_NAND_GPMI_REGS_ADDR_RES_NAME))
+		res->gpmi_regs = p;
+	else if (!strcmp(res_name, GPMI_NAND_BCH_REGS_ADDR_RES_NAME))
+		res->bch_regs = p;
+	else
+		pr_err("unknown resource name : %s\n", res_name);
+
+	return 0;
+}
+
+static void release_register_block(struct gpmi_nand_data *this)
+{
+	struct resources *res = &this->resources;
+	if (res->gpmi_regs)
+		iounmap(res->gpmi_regs);
+	if (res->bch_regs)
+		iounmap(res->bch_regs);
+	res->gpmi_regs = NULL;
+	res->bch_regs = NULL;
+}
+
+static int __devinit
+acquire_bch_irq(struct gpmi_nand_data *this, irq_handler_t irq_h)
+{
+	struct platform_device *pdev = this->pdev;
+	struct resources *res = &this->resources;
+	const char *res_name = GPMI_NAND_BCH_INTERRUPT_RES_NAME;
+	struct resource *r;
+	int err;
+
+	r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, res_name);
+	if (!r) {
+		pr_err("Can't get resource for %s\n", res_name);
+		return -ENXIO;
+	}
+
+	err = request_irq(r->start, irq_h, 0, res_name, this);
+	if (err) {
+		pr_err("Can't own %s\n", res_name);
+		return err;
+	}
+
+	res->bch_low_interrupt = r->start;
+	res->bch_high_interrupt = r->end;
+	return 0;
+}
+
+static void release_bch_irq(struct gpmi_nand_data *this)
+{
+	struct resources *res = &this->resources;
+	int i = res->bch_low_interrupt;
+
+	for (; i <= res->bch_high_interrupt; i++)
+		free_irq(i, this);
+}
+
+static bool gpmi_dma_filter(struct dma_chan *chan, void *param)
+{
+	struct gpmi_nand_data *this = param;
+	struct resource *r = this->private;
+
+	if (!mxs_dma_is_apbh(chan))
+		return false;
+	/*
+	 * only catch the GPMI dma channels :
+	 *	for mx23 :	MX23_DMA_GPMI0 ~ MX23_DMA_GPMI3
+	 *		(These four channels share the same IRQ!)
+	 *
+	 *	for mx28 :	MX28_DMA_GPMI0 ~ MX28_DMA_GPMI7
+	 *		(These eight channels share the same IRQ!)
+	 */
+	if (r->start <= chan->chan_id && chan->chan_id <= r->end) {
+		chan->private = &this->dma_data;
+		return true;
+	}
+	return false;
+}
+
+static void release_dma_channels(struct gpmi_nand_data *this)
+{
+	unsigned int i;
+	for (i = 0; i < DMA_CHANS; i++)
+		if (this->dma_chans[i]) {
+			dma_release_channel(this->dma_chans[i]);
+			this->dma_chans[i] = NULL;
+		}
+}
+
+static int __devinit acquire_dma_channels(struct gpmi_nand_data *this)
+{
+	struct platform_device *pdev = this->pdev;
+	struct gpmi_nand_platform_data *pdata = this->pdata;
+	struct resources *res = &this->resources;
+	struct resource *r, *r_dma;
+	unsigned int i;
+
+	r = platform_get_resource_byname(pdev, IORESOURCE_DMA,
+					GPMI_NAND_DMA_CHANNELS_RES_NAME);
+	r_dma = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
+					GPMI_NAND_DMA_INTERRUPT_RES_NAME);
+	if (!r || !r_dma) {
+		pr_err("Can't get resource for DMA\n");
+		return -ENXIO;
+	}
+
+	/* used in gpmi_dma_filter() */
+	this->private = r;
+
+	for (i = r->start; i <= r->end; i++) {
+		struct dma_chan *dma_chan;
+		dma_cap_mask_t mask;
+
+		if (i - r->start >= pdata->max_chip_count)
+			break;
+
+		dma_cap_zero(mask);
+		dma_cap_set(DMA_SLAVE, mask);
+
+		/* get the DMA interrupt */
+		if (r_dma->start == r_dma->end) {
+			/* only register the first. */
+			if (i == r->start)
+				this->dma_data.chan_irq = r_dma->start;
+			else
+				this->dma_data.chan_irq = NO_IRQ;
+		} else
+			this->dma_data.chan_irq = r_dma->start + (i - r->start);
+
+		dma_chan = dma_request_channel(mask, gpmi_dma_filter, this);
+		if (!dma_chan)
+			goto acquire_err;
+
+		/* fill the first empty item */
+		this->dma_chans[i - r->start] = dma_chan;
+	}
+
+	res->dma_low_channel = r->start;
+	res->dma_high_channel = i;
+	return 0;
+
+acquire_err:
+	pr_err("Can't acquire DMA channel %u\n", i);
+	release_dma_channels(this);
+	return -EINVAL;
+}
+
+static int __devinit acquire_resources(struct gpmi_nand_data *this)
+{
+	struct resources *res = &this->resources;
+	int ret;
+
+	ret = acquire_register_block(this, GPMI_NAND_GPMI_REGS_ADDR_RES_NAME);
+	if (ret)
+		goto exit_regs;
+
+	ret = acquire_register_block(this, GPMI_NAND_BCH_REGS_ADDR_RES_NAME);
+	if (ret)
+		goto exit_regs;
+
+	ret = acquire_bch_irq(this, bch_irq);
+	if (ret)
+		goto exit_regs;
+
+	ret = acquire_dma_channels(this);
+	if (ret)
+		goto exit_dma_channels;
+
+	res->clock = clk_get(&this->pdev->dev, NULL);
+	if (IS_ERR(res->clock)) {
+		pr_err("can not get the clock\n");
+		ret = -ENOENT;
+		goto exit_clock;
+	}
+	return 0;
+
+exit_clock:
+	release_dma_channels(this);
+exit_dma_channels:
+	release_bch_irq(this);
+exit_regs:
+	release_register_block(this);
+	return ret;
+}
+
+static void release_resources(struct gpmi_nand_data *this)
+{
+	struct resources *r = &this->resources;
+
+	clk_put(r->clock);
+	release_register_block(this);
+	release_bch_irq(this);
+	release_dma_channels(this);
+}
+
+static int __devinit init_hardware(struct gpmi_nand_data *this)
+{
+	int ret;
+
+	/*
+	 * This structure contains the "safe" GPMI timing that should succeed
+	 * with any NAND Flash device
+	 * (although, with less-than-optimal performance).
+	 */
+	struct nand_timing  safe_timing = {
+		.data_setup_in_ns        = 80,
+		.data_hold_in_ns         = 60,
+		.address_setup_in_ns     = 25,
+		.gpmi_sample_delay_in_ns =  6,
+		.tREA_in_ns              = -1,
+		.tRLOH_in_ns             = -1,
+		.tRHOH_in_ns             = -1,
+	};
+
+	/* Initialize the hardwares. */
+	ret = gpmi_init(this);
+	if (ret)
+		return ret;
+
+	this->timing = safe_timing;
+	return 0;
+}
+
+static int read_page_prepare(struct gpmi_nand_data *this,
+			void *destination, unsigned length,
+			void *alt_virt, dma_addr_t alt_phys, unsigned alt_size,
+			void **use_virt, dma_addr_t *use_phys)
+{
+	struct device *dev = this->dev;
+
+	if (virt_addr_valid(destination)) {
+		dma_addr_t dest_phys;
+
+		dest_phys = dma_map_single(dev, destination,
+						length, DMA_FROM_DEVICE);
+		if (dma_mapping_error(dev, dest_phys)) {
+			if (alt_size < length) {
+				pr_err("Alternate buffer is too small\n");
+				return -ENOMEM;
+			}
+			goto map_failed;
+		}
+		*use_virt = destination;
+		*use_phys = dest_phys;
+		this->direct_dma_map_ok = true;
+		return 0;
+	}
+
+map_failed:
+	*use_virt = alt_virt;
+	*use_phys = alt_phys;
+	this->direct_dma_map_ok = false;
+	return 0;
+}
+
+static inline void read_page_end(struct gpmi_nand_data *this,
+			void *destination, unsigned length,
+			void *alt_virt, dma_addr_t alt_phys, unsigned alt_size,
+			void *used_virt, dma_addr_t used_phys)
+{
+	if (this->direct_dma_map_ok)
+		dma_unmap_single(this->dev, used_phys, length, DMA_FROM_DEVICE);
+}
+
+static inline void read_page_swap_end(struct gpmi_nand_data *this,
+			void *destination, unsigned length,
+			void *alt_virt, dma_addr_t alt_phys, unsigned alt_size,
+			void *used_virt, dma_addr_t used_phys)
+{
+	if (!this->direct_dma_map_ok)
+		memcpy(destination, alt_virt, length);
+}
+
+static int send_page_prepare(struct gpmi_nand_data *this,
+			const void *source, unsigned length,
+			void *alt_virt, dma_addr_t alt_phys, unsigned alt_size,
+			const void **use_virt, dma_addr_t *use_phys)
+{
+	struct device *dev = this->dev;
+
+	if (virt_addr_valid(source)) {
+		dma_addr_t source_phys;
+
+		source_phys = dma_map_single(dev, (void *)source, length,
+						DMA_TO_DEVICE);
+		if (dma_mapping_error(dev, source_phys)) {
+			if (alt_size < length) {
+				pr_err("Alternate buffer is too small\n");
+				return -ENOMEM;
+			}
+			goto map_failed;
+		}
+		*use_virt = source;
+		*use_phys = source_phys;
+		return 0;
+	}
+map_failed:
+	/*
+	 * Copy the content of the source buffer into the alternate
+	 * buffer and set up the return values accordingly.
+	 */
+	memcpy(alt_virt, source, length);
+
+	*use_virt = alt_virt;
+	*use_phys = alt_phys;
+	return 0;
+}
+
+static void send_page_end(struct gpmi_nand_data *this,
+			const void *source, unsigned length,
+			void *alt_virt, dma_addr_t alt_phys, unsigned alt_size,
+			const void *used_virt, dma_addr_t used_phys)
+{
+	struct device *dev = this->dev;
+	if (used_virt == source)
+		dma_unmap_single(dev, used_phys, length, DMA_TO_DEVICE);
+}
+
+static void gpmi_free_dma_buffer(struct gpmi_nand_data *this)
+{
+	struct device *dev = this->dev;
+
+	if (this->page_buffer_virt && virt_addr_valid(this->page_buffer_virt))
+		dma_free_coherent(dev, this->page_buffer_size,
+					this->page_buffer_virt,
+					this->page_buffer_phys);
+	kfree(this->cmd_buffer);
+	kfree(this->data_buffer_dma);
+
+	this->cmd_buffer	= NULL;
+	this->data_buffer_dma	= NULL;
+	this->page_buffer_virt	= NULL;
+	this->page_buffer_size	=  0;
+}
+
+/* Allocate the DMA buffers */
+static int gpmi_alloc_dma_buffer(struct gpmi_nand_data *this)
+{
+	struct bch_geometry *geo = &this->bch_geometry;
+	struct device *dev = this->dev;
+
+	/* [1] Allocate a command buffer. PAGE_SIZE is enough. */
+	this->cmd_buffer = kzalloc(PAGE_SIZE, GFP_DMA);
+	if (this->cmd_buffer == NULL)
+		goto error_alloc;
+
+	/* [2] Allocate a read/write data buffer. PAGE_SIZE is enough. */
+	this->data_buffer_dma = kzalloc(PAGE_SIZE, GFP_DMA);
+	if (this->data_buffer_dma == NULL)
+		goto error_alloc;
+
+	/*
+	 * [3] Allocate the page buffer.
+	 *
+	 * Both the payload buffer and the auxiliary buffer must appear on
+	 * 32-bit boundaries. We presume the size of the payload buffer is a
+	 * power of two and is much larger than four, which guarantees the
+	 * auxiliary buffer will appear on a 32-bit boundary.
+	 */
+	this->page_buffer_size = geo->payload_size + geo->auxiliary_size;
+	this->page_buffer_virt = dma_alloc_coherent(dev, this->page_buffer_size,
+					&this->page_buffer_phys, GFP_DMA);
+	if (!this->page_buffer_virt)
+		goto error_alloc;
+
+
+	/* Slice up the page buffer. */
+	this->payload_virt = this->page_buffer_virt;
+	this->payload_phys = this->page_buffer_phys;
+	this->auxiliary_virt = this->payload_virt + geo->payload_size;
+	this->auxiliary_phys = this->payload_phys + geo->payload_size;
+	return 0;
+
+error_alloc:
+	gpmi_free_dma_buffer(this);
+	pr_err("allocate DMA buffer ret!!\n");
+	return -ENOMEM;
+}
+
+static void gpmi_cmd_ctrl(struct mtd_info *mtd, int data, unsigned int ctrl)
+{
+	struct nand_chip *chip = mtd->priv;
+	struct gpmi_nand_data *this = chip->priv;
+	int ret;
+
+	/*
+	 * Every operation begins with a command byte and a series of zero or
+	 * more address bytes. These are distinguished by either the Address
+	 * Latch Enable (ALE) or Command Latch Enable (CLE) signals being
+	 * asserted. When MTD is ready to execute the command, it will deassert
+	 * both latch enables.
+	 *
+	 * Rather than run a separate DMA operation for every single byte, we
+	 * queue them up and run a single DMA operation for the entire series
+	 * of command and data bytes. NAND_CMD_NONE means the END of the queue.
+	 */
+	if ((ctrl & (NAND_ALE | NAND_CLE))) {
+		if (data != NAND_CMD_NONE)
+			this->cmd_buffer[this->command_length++] = data;
+		return;
+	}
+
+	if (!this->command_length)
+		return;
+
+	ret = gpmi_send_command(this);
+	if (ret)
+		pr_err("Chip: %u, Error %d\n", this->current_chip, ret);
+
+	this->command_length = 0;
+}
+
+static int gpmi_dev_ready(struct mtd_info *mtd)
+{
+	struct nand_chip *chip = mtd->priv;
+	struct gpmi_nand_data *this = chip->priv;
+
+	return gpmi_is_ready(this, this->current_chip);
+}
+
+static void gpmi_select_chip(struct mtd_info *mtd, int chipnr)
+{
+	struct nand_chip *chip = mtd->priv;
+	struct gpmi_nand_data *this = chip->priv;
+
+	if ((this->current_chip < 0) && (chipnr >= 0))
+		gpmi_begin(this);
+	else if ((this->current_chip >= 0) && (chipnr < 0))
+		gpmi_end(this);
+
+	this->current_chip = chipnr;
+}
+
+static void gpmi_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
+{
+	struct nand_chip *chip = mtd->priv;
+	struct gpmi_nand_data *this = chip->priv;
+
+	pr_debug("len is %d\n", len);
+	this->upper_buf	= buf;
+	this->upper_len	= len;
+
+	gpmi_read_data(this);
+}
+
+static void gpmi_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
+{
+	struct nand_chip *chip = mtd->priv;
+	struct gpmi_nand_data *this = chip->priv;
+
+	pr_debug("len is %d\n", len);
+	this->upper_buf	= (uint8_t *)buf;
+	this->upper_len	= len;
+
+	gpmi_send_data(this);
+}
+
+static uint8_t gpmi_read_byte(struct mtd_info *mtd)
+{
+	struct nand_chip *chip = mtd->priv;
+	struct gpmi_nand_data *this = chip->priv;
+	uint8_t *buf = this->data_buffer_dma;
+
+	gpmi_read_buf(mtd, buf, 1);
+	return buf[0];
+}
+
+/*
+ * Handles block mark swapping.
+ * It can be called in swapping the block mark, or swapping it back,
+ * because the the operations are the same.
+ */
+static void block_mark_swapping(struct gpmi_nand_data *this,
+				void *payload, void *auxiliary)
+{
+	struct bch_geometry *nfc_geo = &this->bch_geometry;
+	unsigned char *p;
+	unsigned char *a;
+	unsigned int  bit;
+	unsigned char mask;
+	unsigned char from_data;
+	unsigned char from_oob;
+
+	if (!this->swap_block_mark)
+		return;
+
+	/*
+	 * If control arrives here, we're swapping. Make some convenience
+	 * variables.
+	 */
+	bit = nfc_geo->block_mark_bit_offset;
+	p   = payload + nfc_geo->block_mark_byte_offset;
+	a   = auxiliary;
+
+	/*
+	 * Get the byte from the data area that overlays the block mark. Since
+	 * the ECC engine applies its own view to the bits in the page, the
+	 * physical block mark won't (in general) appear on a byte boundary in
+	 * the data.
+	 */
+	from_data = (p[0] >> bit) | (p[1] << (8 - bit));
+
+	/* Get the byte from the OOB. */
+	from_oob = a[0];
+
+	/* Swap them. */
+	a[0] = from_data;
+
+	mask = (0x1 << bit) - 1;
+	p[0] = (p[0] & mask) | (from_oob << bit);
+
+	mask = ~0 << bit;
+	p[1] = (p[1] & mask) | (from_oob >> (8 - bit));
+}
+
+static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
+				uint8_t *buf, int page)
+{
+	struct gpmi_nand_data *this = chip->priv;
+	struct bch_geometry *nfc_geo = &this->bch_geometry;
+	void          *payload_virt;
+	dma_addr_t    payload_phys;
+	void          *auxiliary_virt;
+	dma_addr_t    auxiliary_phys;
+	unsigned int  i;
+	unsigned char *status;
+	unsigned int  failed;
+	unsigned int  corrected;
+	int           ret;
+
+	pr_debug("page number is : %d\n", page);
+	ret = read_page_prepare(this, buf, mtd->writesize,
+					this->payload_virt, this->payload_phys,
+					nfc_geo->payload_size,
+					&payload_virt, &payload_phys);
+	if (ret) {
+		pr_err("Inadequate DMA buffer\n");
+		ret = -ENOMEM;
+		return ret;
+	}
+	auxiliary_virt = this->auxiliary_virt;
+	auxiliary_phys = this->auxiliary_phys;
+
+	/* go! */
+	ret = gpmi_read_page(this, payload_phys, auxiliary_phys);
+	read_page_end(this, buf, mtd->writesize,
+			this->payload_virt, this->payload_phys,
+			nfc_geo->payload_size,
+			payload_virt, payload_phys);
+	if (ret) {
+		pr_err("Error in ECC-based read: %d\n", ret);
+		goto exit_nfc;
+	}
+
+	/* handle the block mark swapping */
+	block_mark_swapping(this, payload_virt, auxiliary_virt);
+
+	/* Loop over status bytes, accumulating ECC status. */
+	failed		= 0;
+	corrected	= 0;
+	status		= auxiliary_virt + nfc_geo->auxiliary_status_offset;
+
+	for (i = 0; i < nfc_geo->ecc_chunk_count; i++, status++) {
+		if ((*status == STATUS_GOOD) || (*status == STATUS_ERASED))
+			continue;
+
+		if (*status == STATUS_UNCORRECTABLE) {
+			failed++;
+			continue;
+		}
+		corrected += *status;
+	}
+
+	/*
+	 * Propagate ECC status to the owning MTD only when failed or
+	 * corrected times nearly reaches our ECC correction threshold.
+	 */
+	if (failed || corrected >= (nfc_geo->ecc_strength - 1)) {
+		mtd->ecc_stats.failed    += failed;
+		mtd->ecc_stats.corrected += corrected;
+	}
+
+	/*
+	 * It's time to deliver the OOB bytes. See gpmi_ecc_read_oob() for
+	 * details about our policy for delivering the OOB.
+	 *
+	 * We fill the caller's buffer with set bits, and then copy the block
+	 * mark to th caller's buffer. Note that, if block mark swapping was
+	 * necessary, it has already been done, so we can rely on the first
+	 * byte of the auxiliary buffer to contain the block mark.
+	 */
+	memset(chip->oob_poi, ~0, mtd->oobsize);
+	chip->oob_poi[0] = ((uint8_t *) auxiliary_virt)[0];
+
+	read_page_swap_end(this, buf, mtd->writesize,
+			this->payload_virt, this->payload_phys,
+			nfc_geo->payload_size,
+			payload_virt, payload_phys);
+exit_nfc:
+	return ret;
+}
+
+static void gpmi_ecc_write_page(struct mtd_info *mtd,
+				struct nand_chip *chip, const uint8_t *buf)
+{
+	struct gpmi_nand_data *this = chip->priv;
+	struct bch_geometry *nfc_geo = &this->bch_geometry;
+	const void *payload_virt;
+	dma_addr_t payload_phys;
+	const void *auxiliary_virt;
+	dma_addr_t auxiliary_phys;
+	int        ret;
+
+	pr_debug("ecc write page.\n");
+	if (this->swap_block_mark) {
+		/*
+		 * If control arrives here, we're doing block mark swapping.
+		 * Since we can't modify the caller's buffers, we must copy them
+		 * into our own.
+		 */
+		memcpy(this->payload_virt, buf, mtd->writesize);
+		payload_virt = this->payload_virt;
+		payload_phys = this->payload_phys;
+
+		memcpy(this->auxiliary_virt, chip->oob_poi,
+				nfc_geo->auxiliary_size);
+		auxiliary_virt = this->auxiliary_virt;
+		auxiliary_phys = this->auxiliary_phys;
+
+		/* Handle block mark swapping. */
+		block_mark_swapping(this,
+				(void *) payload_virt, (void *) auxiliary_virt);
+	} else {
+		/*
+		 * If control arrives here, we're not doing block mark swapping,
+		 * so we can to try and use the caller's buffers.
+		 */
+		ret = send_page_prepare(this,
+				buf, mtd->writesize,
+				this->payload_virt, this->payload_phys,
+				nfc_geo->payload_size,
+				&payload_virt, &payload_phys);
+		if (ret) {
+			pr_err("Inadequate payload DMA buffer\n");
+			return;
+		}
+
+		ret = send_page_prepare(this,
+				chip->oob_poi, mtd->oobsize,
+				this->auxiliary_virt, this->auxiliary_phys,
+				nfc_geo->auxiliary_size,
+				&auxiliary_virt, &auxiliary_phys);
+		if (ret) {
+			pr_err("Inadequate auxiliary DMA buffer\n");
+			goto exit_auxiliary;
+		}
+	}
+
+	/* Ask the NFC. */
+	ret = gpmi_send_page(this, payload_phys, auxiliary_phys);
+	if (ret)
+		pr_err("Error in ECC-based write: %d\n", ret);
+
+	if (!this->swap_block_mark) {
+		send_page_end(this, chip->oob_poi, mtd->oobsize,
+				this->auxiliary_virt, this->auxiliary_phys,
+				nfc_geo->auxiliary_size,
+				auxiliary_virt, auxiliary_phys);
+exit_auxiliary:
+		send_page_end(this, buf, mtd->writesize,
+				this->payload_virt, this->payload_phys,
+				nfc_geo->payload_size,
+				payload_virt, payload_phys);
+	}
+}
+
+/*
+ * There are several places in this driver where we have to handle the OOB and
+ * block marks. This is the function where things are the most complicated, so
+ * this is where we try to explain it all. All the other places refer back to
+ * here.
+ *
+ * These are the rules, in order of decreasing importance:
+ *
+ * 1) Nothing the caller does can be allowed to imperil the block mark.
+ *
+ * 2) In read operations, the first byte of the OOB we return must reflect the
+ *    true state of the block mark, no matter where that block mark appears in
+ *    the physical page.
+ *
+ * 3) ECC-based read operations return an OOB full of set bits (since we never
+ *    allow ECC-based writes to the OOB, it doesn't matter what ECC-based reads
+ *    return).
+ *
+ * 4) "Raw" read operations return a direct view of the physical bytes in the
+ *    page, using the conventional definition of which bytes are data and which
+ *    are OOB. This gives the caller a way to see the actual, physical bytes
+ *    in the page, without the distortions applied by our ECC engine.
+ *
+ *
+ * What we do for this specific read operation depends on two questions:
+ *
+ * 1) Are we doing a "raw" read, or an ECC-based read?
+ *
+ * 2) Are we using block mark swapping or transcription?
+ *
+ * There are four cases, illustrated by the following Karnaugh map:
+ *
+ *                    |           Raw           |         ECC-based       |
+ *       -------------+-------------------------+-------------------------+
+ *                    | Read the conventional   |                         |
+ *                    | OOB at the end of the   |                         |
+ *       Swapping     | page and return it. It  |                         |
+ *                    | contains exactly what   |                         |
+ *                    | we want.                | Read the block mark and |
+ *       -------------+-------------------------+ return it in a buffer   |
+ *                    | Read the conventional   | full of set bits.       |
+ *                    | OOB at the end of the   |                         |
+ *                    | page and also the block |                         |
+ *       Transcribing | mark in the metadata.   |                         |
+ *                    | Copy the block mark     |                         |
+ *                    | into the first byte of  |                         |
+ *                    | the OOB.                |                         |
+ *       -------------+-------------------------+-------------------------+
+ *
+ * Note that we break rule #4 in the Transcribing/Raw case because we're not
+ * giving an accurate view of the actual, physical bytes in the page (we're
+ * overwriting the block mark). That's OK because it's more important to follow
+ * rule #2.
+ *
+ * It turns out that knowing whether we want an "ECC-based" or "raw" read is not
+ * easy. When reading a page, for example, the NAND Flash MTD code calls our
+ * ecc.read_page or ecc.read_page_raw function. Thus, the fact that MTD wants an
+ * ECC-based or raw view of the page is implicit in which function it calls
+ * (there is a similar pair of ECC-based/raw functions for writing).
+ *
+ * Since MTD assumes the OOB is not covered by ECC, there is no pair of
+ * ECC-based/raw functions for reading or or writing the OOB. The fact that the
+ * caller wants an ECC-based or raw view of the page is not propagated down to
+ * this driver.
+ */
+static int gpmi_ecc_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
+				int page, int sndcmd)
+{
+	struct gpmi_nand_data *this = chip->priv;
+
+	pr_debug("page number is %d\n", page);
+	/* clear the OOB buffer */
+	memset(chip->oob_poi, ~0, mtd->oobsize);
+
+	/* Read out the conventional OOB. */
+	chip->cmdfunc(mtd, NAND_CMD_READ0, mtd->writesize, page);
+	chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+	/*
+	 * Now, we want to make sure the block mark is correct. In the
+	 * Swapping/Raw case, we already have it. Otherwise, we need to
+	 * explicitly read it.
+	 */
+	if (!this->swap_block_mark) {
+		/* Read the block mark into the first byte of the OOB buffer. */
+		chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
+		chip->oob_poi[0] = chip->read_byte(mtd);
+	}
+
+	/*
+	 * Return true, indicating that the next call to this function must send
+	 * a command.
+	 */
+	return true;
+}
+
+static int
+gpmi_ecc_write_oob(struct mtd_info *mtd, struct nand_chip *chip, int page)
+{
+	/*
+	 * The BCH will use all the (page + oob).
+	 * Our gpmi_hw_ecclayout can only prohibit the JFFS2 to write the oob.
+	 * But it can not stop some ioctls such MEMWRITEOOB which uses
+	 * MTD_OPS_PLACE_OOB. So We have to implement this function to prohibit
+	 * these ioctls too.
+	 */
+	return -EPERM;
+}
+
+static int gpmi_block_markbad(struct mtd_info *mtd, loff_t ofs)
+{
+	struct nand_chip *chip = mtd->priv;
+	struct gpmi_nand_data *this = chip->priv;
+	int block, ret = 0;
+	uint8_t *block_mark;
+	int column, page, status, chipnr;
+
+	/* Get block number */
+	block = (int)(ofs >> chip->bbt_erase_shift);
+	if (chip->bbt)
+		chip->bbt[block >> 2] |= 0x01 << ((block & 0x03) << 1);
+
+	/* Do we have a flash based bad block table ? */
+	if (chip->options & NAND_BBT_USE_FLASH)
+		ret = nand_update_bbt(mtd, ofs);
+	else {
+		chipnr = (int)(ofs >> chip->chip_shift);
+		chip->select_chip(mtd, chipnr);
+
+		column = this->swap_block_mark ? mtd->writesize : 0;
+
+		/* Write the block mark. */
+		block_mark = this->data_buffer_dma;
+		block_mark[0] = 0; /* bad block marker */
+
+		/* Shift to get page */
+		page = (int)(ofs >> chip->page_shift);
+
+		chip->cmdfunc(mtd, NAND_CMD_SEQIN, column, page);
+		chip->write_buf(mtd, block_mark, 1);
+		chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
+
+		status = chip->waitfunc(mtd, chip);
+		if (status & NAND_STATUS_FAIL)
+			ret = -EIO;
+
+		chip->select_chip(mtd, -1);
+	}
+	if (!ret)
+		mtd->ecc_stats.badblocks++;
+
+	return ret;
+}
+
+static int __devinit nand_boot_set_geometry(struct gpmi_nand_data *this)
+{
+	struct boot_rom_geometry *geometry = &this->rom_geometry;
+
+	/*
+	 * Set the boot block stride size.
+	 *
+	 * In principle, we should be reading this from the OTP bits, since
+	 * that's where the ROM is going to get it. In fact, we don't have any
+	 * way to read the OTP bits, so we go with the default and hope for the
+	 * best.
+	 */
+	geometry->stride_size_in_pages = 64;
+
+	/*
+	 * Set the search area stride exponent.
+	 *
+	 * In principle, we should be reading this from the OTP bits, since
+	 * that's where the ROM is going to get it. In fact, we don't have any
+	 * way to read the OTP bits, so we go with the default and hope for the
+	 * best.
+	 */
+	geometry->search_area_stride_exponent = 2;
+	return 0;
+}
+
+static const char  *fingerprint = "STMP";
+static int __devinit mx23_check_transcription_stamp(struct gpmi_nand_data *this)
+{
+	struct boot_rom_geometry *rom_geo = &this->rom_geometry;
+	struct device *dev = this->dev;
+	struct mtd_info *mtd = &this->mtd;
+	struct nand_chip *chip = &this->nand;
+	unsigned int search_area_size_in_strides;
+	unsigned int stride;
+	unsigned int page;
+	loff_t byte;
+	uint8_t *buffer = chip->buffers->databuf;
+	int saved_chip_number;
+	int found_an_ncb_fingerprint = false;
+
+	/* Compute the number of strides in a search area. */
+	search_area_size_in_strides = 1 << rom_geo->search_area_stride_exponent;
+
+	saved_chip_number = this->current_chip;
+	chip->select_chip(mtd, 0);
+
+	/*
+	 * Loop through the first search area, looking for the NCB fingerprint.
+	 */
+	dev_dbg(dev, "Scanning for an NCB fingerprint...\n");
+
+	for (stride = 0; stride < search_area_size_in_strides; stride++) {
+		/* Compute the page and byte addresses. */
+		page = stride * rom_geo->stride_size_in_pages;
+		byte = page   * mtd->writesize;
+
+		dev_dbg(dev, "Looking for a fingerprint in page 0x%x\n", page);
+
+		/*
+		 * Read the NCB fingerprint. The fingerprint is four bytes long
+		 * and starts in the 12th byte of the page.
+		 */
+		chip->cmdfunc(mtd, NAND_CMD_READ0, 12, page);
+		chip->read_buf(mtd, buffer, strlen(fingerprint));
+
+		/* Look for the fingerprint. */
+		if (!memcmp(buffer, fingerprint, strlen(fingerprint))) {
+			found_an_ncb_fingerprint = true;
+			break;
+		}
+
+	}
+
+	chip->select_chip(mtd, saved_chip_number);
+
+	if (found_an_ncb_fingerprint)
+		dev_dbg(dev, "\tFound a fingerprint\n");
+	else
+		dev_dbg(dev, "\tNo fingerprint found\n");
+	return found_an_ncb_fingerprint;
+}
+
+/* Writes a transcription stamp. */
+static int __devinit mx23_write_transcription_stamp(struct gpmi_nand_data *this)
+{
+	struct device *dev = this->dev;
+	struct boot_rom_geometry *rom_geo = &this->rom_geometry;
+	struct mtd_info *mtd = &this->mtd;
+	struct nand_chip *chip = &this->nand;
+	unsigned int block_size_in_pages;
+	unsigned int search_area_size_in_strides;
+	unsigned int search_area_size_in_pages;
+	unsigned int search_area_size_in_blocks;
+	unsigned int block;
+	unsigned int stride;
+	unsigned int page;
+	loff_t       byte;
+	uint8_t      *buffer = chip->buffers->databuf;
+	int saved_chip_number;
+	int status;
+
+	/* Compute the search area geometry. */
+	block_size_in_pages = mtd->erasesize / mtd->writesize;
+	search_area_size_in_strides = 1 << rom_geo->search_area_stride_exponent;
+	search_area_size_in_pages = search_area_size_in_strides *
+					rom_geo->stride_size_in_pages;
+	search_area_size_in_blocks =
+		  (search_area_size_in_pages + (block_size_in_pages - 1)) /
+				    block_size_in_pages;
+
+	dev_dbg(dev, "Search Area Geometry :\n");
+	dev_dbg(dev, "\tin Blocks : %u\n", search_area_size_in_blocks);
+	dev_dbg(dev, "\tin Strides: %u\n", search_area_size_in_strides);
+	dev_dbg(dev, "\tin Pages  : %u\n", search_area_size_in_pages);
+
+	/* Select chip 0. */
+	saved_chip_number = this->current_chip;
+	chip->select_chip(mtd, 0);
+
+	/* Loop over blocks in the first search area, erasing them. */
+	dev_dbg(dev, "Erasing the search area...\n");
+
+	for (block = 0; block < search_area_size_in_blocks; block++) {
+		/* Compute the page address. */
+		page = block * block_size_in_pages;
+
+		/* Erase this block. */
+		dev_dbg(dev, "\tErasing block 0x%x\n", block);
+		chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page);
+		chip->cmdfunc(mtd, NAND_CMD_ERASE2, -1, -1);
+
+		/* Wait for the erase to finish. */
+		status = chip->waitfunc(mtd, chip);
+		if (status & NAND_STATUS_FAIL)
+			dev_err(dev, "[%s] Erase failed.\n", __func__);
+	}
+
+	/* Write the NCB fingerprint into the page buffer. */
+	memset(buffer, ~0, mtd->writesize);
+	memset(chip->oob_poi, ~0, mtd->oobsize);
+	memcpy(buffer + 12, fingerprint, strlen(fingerprint));
+
+	/* Loop through the first search area, writing NCB fingerprints. */
+	dev_dbg(dev, "Writing NCB fingerprints...\n");
+	for (stride = 0; stride < search_area_size_in_strides; stride++) {
+		/* Compute the page and byte addresses. */
+		page = stride * rom_geo->stride_size_in_pages;
+		byte = page   * mtd->writesize;
+
+		/* Write the first page of the current stride. */
+		dev_dbg(dev, "Writing an NCB fingerprint in page 0x%x\n", page);
+		chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
+		chip->ecc.write_page_raw(mtd, chip, buffer);
+		chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
+
+		/* Wait for the write to finish. */
+		status = chip->waitfunc(mtd, chip);
+		if (status & NAND_STATUS_FAIL)
+			dev_err(dev, "[%s] Write failed.\n", __func__);
+	}
+
+	/* Deselect chip 0. */
+	chip->select_chip(mtd, saved_chip_number);
+	return 0;
+}
+
+static int __devinit mx23_boot_init(struct gpmi_nand_data  *this)
+{
+	struct device *dev = this->dev;
+	struct nand_chip *chip = &this->nand;
+	struct mtd_info *mtd = &this->mtd;
+	unsigned int block_count;
+	unsigned int block;
+	int     chipnr;
+	int     page;
+	loff_t  byte;
+	uint8_t block_mark;
+	int     ret = 0;
+
+	/*
+	 * If control arrives here, we can't use block mark swapping, which
+	 * means we're forced to use transcription. First, scan for the
+	 * transcription stamp. If we find it, then we don't have to do
+	 * anything -- the block marks are already transcribed.
+	 */
+	if (mx23_check_transcription_stamp(this))
+		return 0;
+
+	/*
+	 * If control arrives here, we couldn't find a transcription stamp, so
+	 * so we presume the block marks are in the conventional location.
+	 */
+	dev_dbg(dev, "Transcribing bad block marks...\n");
+
+	/* Compute the number of blocks in the entire medium. */
+	block_count = chip->chipsize >> chip->phys_erase_shift;
+
+	/*
+	 * Loop over all the blocks in the medium, transcribing block marks as
+	 * we go.
+	 */
+	for (block = 0; block < block_count; block++) {
+		/*
+		 * Compute the chip, page and byte addresses for this block's
+		 * conventional mark.
+		 */
+		chipnr = block >> (chip->chip_shift - chip->phys_erase_shift);
+		page = block << (chip->phys_erase_shift - chip->page_shift);
+		byte = block <<  chip->phys_erase_shift;
+
+		/* Send the command to read the conventional block mark. */
+		chip->select_chip(mtd, chipnr);
+		chip->cmdfunc(mtd, NAND_CMD_READ0, mtd->writesize, page);
+		block_mark = chip->read_byte(mtd);
+		chip->select_chip(mtd, -1);
+
+		/*
+		 * Check if the block is marked bad. If so, we need to mark it
+		 * again, but this time the result will be a mark in the
+		 * location where we transcribe block marks.
+		 */
+		if (block_mark != 0xff) {
+			dev_dbg(dev, "Transcribing mark in block %u\n", block);
+			ret = chip->block_markbad(mtd, byte);
+			if (ret)
+				dev_err(dev, "Failed to mark block bad with "
+							"ret %d\n", ret);
+		}
+	}
+
+	/* Write the stamp that indicates we've transcribed the block marks. */
+	mx23_write_transcription_stamp(this);
+	return 0;
+}
+
+static int __devinit nand_boot_init(struct gpmi_nand_data  *this)
+{
+	nand_boot_set_geometry(this);
+
+	/* This is ROM arch-specific initilization before the BBT scanning. */
+	if (GPMI_IS_MX23(this))
+		return mx23_boot_init(this);
+	return 0;
+}
+
+static int __devinit gpmi_set_geometry(struct gpmi_nand_data *this)
+{
+	int ret;
+
+	/* Free the temporary DMA memory for reading ID. */
+	gpmi_free_dma_buffer(this);
+
+	/* Set up the NFC geometry which is used by BCH. */
+	ret = bch_set_geometry(this);
+	if (ret) {
+		pr_err("set geometry ret : %d\n", ret);
+		return ret;
+	}
+
+	/* Alloc the new DMA buffers according to the pagesize and oobsize */
+	return gpmi_alloc_dma_buffer(this);
+}
+
+static int gpmi_pre_bbt_scan(struct gpmi_nand_data  *this)
+{
+	int ret;
+
+	/* Set up swap_block_mark, must be set before the gpmi_set_geometry() */
+	if (GPMI_IS_MX23(this))
+		this->swap_block_mark = false;
+	else
+		this->swap_block_mark = true;
+
+	/* Set up the medium geometry */
+	ret = gpmi_set_geometry(this);
+	if (ret)
+		return ret;
+
+	/* NAND boot init, depends on the gpmi_set_geometry(). */
+	return nand_boot_init(this);
+}
+
+static int gpmi_scan_bbt(struct mtd_info *mtd)
+{
+	struct nand_chip *chip = mtd->priv;
+	struct gpmi_nand_data *this = chip->priv;
+	int ret;
+
+	/* Prepare for the BBT scan. */
+	ret = gpmi_pre_bbt_scan(this);
+	if (ret)
+		return ret;
+
+	/* use the default BBT implementation */
+	return nand_default_bbt(mtd);
+}
+
+void gpmi_nfc_exit(struct gpmi_nand_data *this)
+{
+	nand_release(&this->mtd);
+	gpmi_free_dma_buffer(this);
+}
+
+static int __devinit gpmi_nfc_init(struct gpmi_nand_data *this)
+{
+	struct gpmi_nand_platform_data *pdata = this->pdata;
+	struct mtd_info  *mtd = &this->mtd;
+	struct nand_chip *chip = &this->nand;
+	int ret;
+
+	/* init current chip */
+	this->current_chip	= -1;
+
+	/* init the MTD data structures */
+	mtd->priv		= chip;
+	mtd->name		= "gpmi-nand";
+	mtd->owner		= THIS_MODULE;
+
+	/* init the nand_chip{}, we don't support a 16-bit NAND Flash bus. */
+	chip->priv		= this;
+	chip->select_chip	= gpmi_select_chip;
+	chip->cmd_ctrl		= gpmi_cmd_ctrl;
+	chip->dev_ready		= gpmi_dev_ready;
+	chip->read_byte		= gpmi_read_byte;
+	chip->read_buf		= gpmi_read_buf;
+	chip->write_buf		= gpmi_write_buf;
+	chip->ecc.read_page	= gpmi_ecc_read_page;
+	chip->ecc.write_page	= gpmi_ecc_write_page;
+	chip->ecc.read_oob	= gpmi_ecc_read_oob;
+	chip->ecc.write_oob	= gpmi_ecc_write_oob;
+	chip->scan_bbt		= gpmi_scan_bbt;
+	chip->badblock_pattern	= &gpmi_bbt_descr;
+	chip->block_markbad	= gpmi_block_markbad;
+	chip->options		|= NAND_NO_SUBPAGE_WRITE;
+	chip->ecc.mode		= NAND_ECC_HW;
+	chip->ecc.size		= 1;
+	chip->ecc.layout	= &gpmi_hw_ecclayout;
+
+	/* Allocate a temporary DMA buffer for reading ID in the nand_scan() */
+	this->bch_geometry.payload_size = 1024;
+	this->bch_geometry.auxiliary_size = 128;
+	ret = gpmi_alloc_dma_buffer(this);
+	if (ret)
+		goto err_out;
+
+	ret = nand_scan(mtd, pdata->max_chip_count);
+	if (ret) {
+		pr_err("Chip scan failed\n");
+		goto err_out;
+	}
+
+	ret = mtd_device_parse_register(mtd, NULL, NULL,
+			pdata->partitions, pdata->partition_count);
+	if (ret)
+		goto err_out;
+	return 0;
+
+err_out:
+	gpmi_nfc_exit(this);
+	return ret;
+}
+
+static int __devinit gpmi_nand_probe(struct platform_device *pdev)
+{
+	struct gpmi_nand_platform_data *pdata = pdev->dev.platform_data;
+	struct gpmi_nand_data *this;
+	int ret;
+
+	this = kzalloc(sizeof(*this), GFP_KERNEL);
+	if (!this) {
+		pr_err("Failed to allocate per-device memory\n");
+		return -ENOMEM;
+	}
+
+	platform_set_drvdata(pdev, this);
+	this->pdev  = pdev;
+	this->dev   = &pdev->dev;
+	this->pdata = pdata;
+
+	if (pdata->platform_init) {
+		ret = pdata->platform_init();
+		if (ret)
+			goto platform_init_error;
+	}
+
+	ret = acquire_resources(this);
+	if (ret)
+		goto exit_acquire_resources;
+
+	ret = init_hardware(this);
+	if (ret)
+		goto exit_nfc_init;
+
+	ret = gpmi_nfc_init(this);
+	if (ret)
+		goto exit_nfc_init;
+
+	return 0;
+
+exit_nfc_init:
+	release_resources(this);
+platform_init_error:
+exit_acquire_resources:
+	platform_set_drvdata(pdev, NULL);
+	kfree(this);
+	return ret;
+}
+
+static int __exit gpmi_nand_remove(struct platform_device *pdev)
+{
+	struct gpmi_nand_data *this = platform_get_drvdata(pdev);
+
+	gpmi_nfc_exit(this);
+	release_resources(this);
+	platform_set_drvdata(pdev, NULL);
+	kfree(this);
+	return 0;
+}
+
+static const struct platform_device_id gpmi_ids[] = {
+	{
+		.name = "imx23-gpmi-nand",
+		.driver_data = IS_MX23,
+	}, {
+		.name = "imx28-gpmi-nand",
+		.driver_data = IS_MX28,
+	}, {},
+};
+
+static struct platform_driver gpmi_nand_driver = {
+	.driver = {
+		.name = "gpmi-nand",
+	},
+	.probe   = gpmi_nand_probe,
+	.remove  = __exit_p(gpmi_nand_remove),
+	.id_table = gpmi_ids,
+};
+
+static int __init gpmi_nand_init(void)
+{
+	int err;
+
+	err = platform_driver_register(&gpmi_nand_driver);
+	if (err == 0)
+		printk(KERN_INFO "GPMI NAND driver registered. (IMX)\n");
+	else
+		pr_err("i.MX GPMI NAND driver registration failed\n");
+	return err;
+}
+
+static void __exit gpmi_nand_exit(void)
+{
+	platform_driver_unregister(&gpmi_nand_driver);
+}
+
+module_init(gpmi_nand_init);
+module_exit(gpmi_nand_exit);
+
+MODULE_AUTHOR("Freescale Semiconductor, Inc.");
+MODULE_DESCRIPTION("i.MX GPMI NAND Flash Controller Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.h b/drivers/mtd/nand/gpmi-nand/gpmi-nand.h
new file mode 100644
index 0000000..e023bcc
--- /dev/null
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.h
@@ -0,0 +1,273 @@
+/*
+ * Freescale GPMI NAND Flash Driver
+ *
+ * Copyright (C) 2010-2011 Freescale Semiconductor, Inc.
+ * Copyright (C) 2008 Embedded Alley Solutions, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef __DRIVERS_MTD_NAND_GPMI_NAND_H
+#define __DRIVERS_MTD_NAND_GPMI_NAND_H
+
+#include <linux/mtd/nand.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <mach/dma.h>
+
+struct resources {
+	void          *gpmi_regs;
+	void          *bch_regs;
+	unsigned int  bch_low_interrupt;
+	unsigned int  bch_high_interrupt;
+	unsigned int  dma_low_channel;
+	unsigned int  dma_high_channel;
+	struct clk    *clock;
+};
+
+/**
+ * struct bch_geometry - BCH geometry description.
+ * @gf_len:                   The length of Galois Field. (e.g., 13 or 14)
+ * @ecc_strength:             A number that describes the strength of the ECC
+ *                            algorithm.
+ * @page_size:                The size, in bytes, of a physical page, including
+ *                            both data and OOB.
+ * @metadata_size:            The size, in bytes, of the metadata.
+ * @ecc_chunk_size:           The size, in bytes, of a single ECC chunk. Note
+ *                            the first chunk in the page includes both data and
+ *                            metadata, so it's a bit larger than this value.
+ * @ecc_chunk_count:          The number of ECC chunks in the page,
+ * @payload_size:             The size, in bytes, of the payload buffer.
+ * @auxiliary_size:           The size, in bytes, of the auxiliary buffer.
+ * @auxiliary_status_offset:  The offset into the auxiliary buffer at which
+ *                            the ECC status appears.
+ * @block_mark_byte_offset:   The byte offset in the ECC-based page view at
+ *                            which the underlying physical block mark appears.
+ * @block_mark_bit_offset:    The bit offset into the ECC-based page view at
+ *                            which the underlying physical block mark appears.
+ */
+struct bch_geometry {
+	unsigned int  gf_len;
+	unsigned int  ecc_strength;
+	unsigned int  page_size;
+	unsigned int  metadata_size;
+	unsigned int  ecc_chunk_size;
+	unsigned int  ecc_chunk_count;
+	unsigned int  payload_size;
+	unsigned int  auxiliary_size;
+	unsigned int  auxiliary_status_offset;
+	unsigned int  block_mark_byte_offset;
+	unsigned int  block_mark_bit_offset;
+};
+
+/**
+ * struct boot_rom_geometry - Boot ROM geometry description.
+ * @stride_size_in_pages:        The size of a boot block stride, in pages.
+ * @search_area_stride_exponent: The logarithm to base 2 of the size of a
+ *                               search area in boot block strides.
+ */
+struct boot_rom_geometry {
+	unsigned int  stride_size_in_pages;
+	unsigned int  search_area_stride_exponent;
+};
+
+/* DMA operations types */
+enum dma_ops_type {
+	DMA_FOR_COMMAND = 1,
+	DMA_FOR_READ_DATA,
+	DMA_FOR_WRITE_DATA,
+	DMA_FOR_READ_ECC_PAGE,
+	DMA_FOR_WRITE_ECC_PAGE
+};
+
+/**
+ * struct nand_timing - Fundamental timing attributes for NAND.
+ * @data_setup_in_ns:         The data setup time, in nanoseconds. Usually the
+ *                            maximum of tDS and tWP. A negative value
+ *                            indicates this characteristic isn't known.
+ * @data_hold_in_ns:          The data hold time, in nanoseconds. Usually the
+ *                            maximum of tDH, tWH and tREH. A negative value
+ *                            indicates this characteristic isn't known.
+ * @address_setup_in_ns:      The address setup time, in nanoseconds. Usually
+ *                            the maximum of tCLS, tCS and tALS. A negative
+ *                            value indicates this characteristic isn't known.
+ * @gpmi_sample_delay_in_ns:  A GPMI-specific timing parameter. A negative value
+ *                            indicates this characteristic isn't known.
+ * @tREA_in_ns:               tREA, in nanoseconds, from the data sheet. A
+ *                            negative value indicates this characteristic isn't
+ *                            known.
+ * @tRLOH_in_ns:              tRLOH, in nanoseconds, from the data sheet. A
+ *                            negative value indicates this characteristic isn't
+ *                            known.
+ * @tRHOH_in_ns:              tRHOH, in nanoseconds, from the data sheet. A
+ *                            negative value indicates this characteristic isn't
+ *                            known.
+ */
+struct nand_timing {
+	int8_t  data_setup_in_ns;
+	int8_t  data_hold_in_ns;
+	int8_t  address_setup_in_ns;
+	int8_t  gpmi_sample_delay_in_ns;
+	int8_t  tREA_in_ns;
+	int8_t  tRLOH_in_ns;
+	int8_t  tRHOH_in_ns;
+};
+
+struct gpmi_nand_data {
+	/* System Interface */
+	struct device		*dev;
+	struct platform_device	*pdev;
+	struct gpmi_nand_platform_data	*pdata;
+
+	/* Resources */
+	struct resources	resources;
+
+	/* Flash Hardware */
+	struct nand_timing	timing;
+
+	/* BCH */
+	struct bch_geometry	bch_geometry;
+	struct completion	bch_done;
+
+	/* NAND Boot issue */
+	bool			swap_block_mark;
+	struct boot_rom_geometry rom_geometry;
+
+	/* MTD / NAND */
+	struct nand_chip	nand;
+	struct mtd_info		mtd;
+
+	/* General-use Variables */
+	int			current_chip;
+	unsigned int		command_length;
+
+	/* passed from upper layer */
+	uint8_t			*upper_buf;
+	int			upper_len;
+
+	/* for DMA operations */
+	bool			direct_dma_map_ok;
+
+	struct scatterlist	cmd_sgl;
+	char			*cmd_buffer;
+
+	struct scatterlist	data_sgl;
+	char			*data_buffer_dma;
+
+	void			*page_buffer_virt;
+	dma_addr_t		page_buffer_phys;
+	unsigned int		page_buffer_size;
+
+	void			*payload_virt;
+	dma_addr_t		payload_phys;
+
+	void			*auxiliary_virt;
+	dma_addr_t		auxiliary_phys;
+
+	/* DMA channels */
+#define DMA_CHANS		8
+	struct dma_chan		*dma_chans[DMA_CHANS];
+	struct mxs_dma_data	dma_data;
+	enum dma_ops_type	last_dma_type;
+	enum dma_ops_type	dma_type;
+	struct completion	dma_done;
+
+	/* private */
+	void			*private;
+};
+
+/**
+ * struct gpmi_nfc_hardware_timing - GPMI hardware timing parameters.
+ * @data_setup_in_cycles:      The data setup time, in cycles.
+ * @data_hold_in_cycles:       The data hold time, in cycles.
+ * @address_setup_in_cycles:   The address setup time, in cycles.
+ * @use_half_periods:          Indicates the clock is running slowly, so the
+ *                             NFC DLL should use half-periods.
+ * @sample_delay_factor:       The sample delay factor.
+ */
+struct gpmi_nfc_hardware_timing {
+	uint8_t  data_setup_in_cycles;
+	uint8_t  data_hold_in_cycles;
+	uint8_t  address_setup_in_cycles;
+	bool     use_half_periods;
+	uint8_t  sample_delay_factor;
+};
+
+/**
+ * struct timing_threshod - Timing threshold
+ * @max_data_setup_cycles:       The maximum number of data setup cycles that
+ *                               can be expressed in the hardware.
+ * @internal_data_setup_in_ns:   The time, in ns, that the NFC hardware requires
+ *                               for data read internal setup. In the Reference
+ *                               Manual, see the chapter "High-Speed NAND
+ *                               Timing" for more details.
+ * @max_sample_delay_factor:     The maximum sample delay factor that can be
+ *                               expressed in the hardware.
+ * @max_dll_clock_period_in_ns:  The maximum period of the GPMI clock that the
+ *                               sample delay DLL hardware can possibly work
+ *                               with (the DLL is unusable with longer periods).
+ *                               If the full-cycle period is greater than HALF
+ *                               this value, the DLL must be configured to use
+ *                               half-periods.
+ * @max_dll_delay_in_ns:         The maximum amount of delay, in ns, that the
+ *                               DLL can implement.
+ * @clock_frequency_in_hz:       The clock frequency, in Hz, during the current
+ *                               I/O transaction. If no I/O transaction is in
+ *                               progress, this is the clock frequency during
+ *                               the most recent I/O transaction.
+ */
+struct timing_threshod {
+	const unsigned int      max_chip_count;
+	const unsigned int      max_data_setup_cycles;
+	const unsigned int      internal_data_setup_in_ns;
+	const unsigned int      max_sample_delay_factor;
+	const unsigned int      max_dll_clock_period_in_ns;
+	const unsigned int      max_dll_delay_in_ns;
+	unsigned long           clock_frequency_in_hz;
+
+};
+
+/* Common Services */
+extern int common_nfc_set_geometry(struct gpmi_nand_data *);
+extern struct dma_chan *get_dma_chan(struct gpmi_nand_data *);
+extern void prepare_data_dma(struct gpmi_nand_data *,
+				enum dma_data_direction dr);
+extern int start_dma_without_bch_irq(struct gpmi_nand_data *,
+				struct dma_async_tx_descriptor *);
+extern int start_dma_with_bch_irq(struct gpmi_nand_data *,
+				struct dma_async_tx_descriptor *);
+
+/* GPMI-NAND helper function library */
+extern int gpmi_init(struct gpmi_nand_data *);
+extern void gpmi_clear_bch(struct gpmi_nand_data *);
+extern void gpmi_dump_info(struct gpmi_nand_data *);
+extern int bch_set_geometry(struct gpmi_nand_data *);
+extern int gpmi_is_ready(struct gpmi_nand_data *, unsigned chip);
+extern int gpmi_send_command(struct gpmi_nand_data *);
+extern void gpmi_begin(struct gpmi_nand_data *);
+extern void gpmi_end(struct gpmi_nand_data *);
+extern int gpmi_read_data(struct gpmi_nand_data *);
+extern int gpmi_send_data(struct gpmi_nand_data *);
+extern int gpmi_send_page(struct gpmi_nand_data *,
+			dma_addr_t payload, dma_addr_t auxiliary);
+extern int gpmi_read_page(struct gpmi_nand_data *,
+			dma_addr_t payload, dma_addr_t auxiliary);
+
+/* BCH : Status Block Completion Codes */
+#define STATUS_GOOD		0x00
+#define STATUS_ERASED		0xff
+#define STATUS_UNCORRECTABLE	0xfe
+
+/* Use the platform_id to distinguish different Archs. */
+#define IS_MX23			0x1
+#define IS_MX28			0x2
+#define GPMI_IS_MX23(x)		((x)->pdev->id_entry->driver_data == IS_MX23)
+#define GPMI_IS_MX28(x)		((x)->pdev->id_entry->driver_data == IS_MX28)
+#endif
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-regs.h b/drivers/mtd/nand/gpmi-nand/gpmi-regs.h
new file mode 100644
index 0000000..8343124
--- /dev/null
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-regs.h
@@ -0,0 +1,172 @@
+/*
+ * Freescale GPMI NAND Flash Driver
+ *
+ * Copyright 2008-2011 Freescale Semiconductor, Inc.
+ * Copyright 2008 Embedded Alley Solutions, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+#ifndef __GPMI_NAND_GPMI_REGS_H
+#define __GPMI_NAND_GPMI_REGS_H
+
+#define HW_GPMI_CTRL0					0x00000000
+#define HW_GPMI_CTRL0_SET				0x00000004
+#define HW_GPMI_CTRL0_CLR				0x00000008
+#define HW_GPMI_CTRL0_TOG				0x0000000c
+
+#define BP_GPMI_CTRL0_COMMAND_MODE			24
+#define BM_GPMI_CTRL0_COMMAND_MODE	(3 << BP_GPMI_CTRL0_COMMAND_MODE)
+#define BF_GPMI_CTRL0_COMMAND_MODE(v)	\
+	(((v) << BP_GPMI_CTRL0_COMMAND_MODE) & BM_GPMI_CTRL0_COMMAND_MODE)
+#define BV_GPMI_CTRL0_COMMAND_MODE__WRITE		0x0
+#define BV_GPMI_CTRL0_COMMAND_MODE__READ		0x1
+#define BV_GPMI_CTRL0_COMMAND_MODE__READ_AND_COMPARE	0x2
+#define BV_GPMI_CTRL0_COMMAND_MODE__WAIT_FOR_READY	0x3
+
+#define BM_GPMI_CTRL0_WORD_LENGTH			(1 << 23)
+#define BV_GPMI_CTRL0_WORD_LENGTH__16_BIT		0x0
+#define BV_GPMI_CTRL0_WORD_LENGTH__8_BIT		0x1
+
+/*
+ *  Difference in LOCK_CS between imx23 and imx28 :
+ *  This bit may impact the _POWER_ consumption. So some chips
+ *  do not set it.
+ */
+#define MX23_BP_GPMI_CTRL0_LOCK_CS			22
+#define MX28_BP_GPMI_CTRL0_LOCK_CS			27
+#define LOCK_CS_ENABLE					0x1
+#define BF_GPMI_CTRL0_LOCK_CS(v, x)			0x0
+
+/* Difference in CS between imx23 and imx28 */
+#define BP_GPMI_CTRL0_CS				20
+#define MX23_BM_GPMI_CTRL0_CS		(3 << BP_GPMI_CTRL0_CS)
+#define MX28_BM_GPMI_CTRL0_CS		(7 << BP_GPMI_CTRL0_CS)
+#define BF_GPMI_CTRL0_CS(v, x)		(((v) << BP_GPMI_CTRL0_CS) & \
+						(GPMI_IS_MX23((x)) \
+						? MX23_BM_GPMI_CTRL0_CS	\
+						: MX28_BM_GPMI_CTRL0_CS))
+
+#define BP_GPMI_CTRL0_ADDRESS				17
+#define BM_GPMI_CTRL0_ADDRESS		(3 << BP_GPMI_CTRL0_ADDRESS)
+#define BF_GPMI_CTRL0_ADDRESS(v)	\
+		(((v) << BP_GPMI_CTRL0_ADDRESS) & BM_GPMI_CTRL0_ADDRESS)
+#define BV_GPMI_CTRL0_ADDRESS__NAND_DATA		0x0
+#define BV_GPMI_CTRL0_ADDRESS__NAND_CLE			0x1
+#define BV_GPMI_CTRL0_ADDRESS__NAND_ALE			0x2
+
+#define BM_GPMI_CTRL0_ADDRESS_INCREMENT			(1 << 16)
+#define BV_GPMI_CTRL0_ADDRESS_INCREMENT__DISABLED	0x0
+#define BV_GPMI_CTRL0_ADDRESS_INCREMENT__ENABLED	0x1
+
+#define BP_GPMI_CTRL0_XFER_COUNT			0
+#define BM_GPMI_CTRL0_XFER_COUNT	(0xffff << BP_GPMI_CTRL0_XFER_COUNT)
+#define BF_GPMI_CTRL0_XFER_COUNT(v)	\
+		(((v) << BP_GPMI_CTRL0_XFER_COUNT) & BM_GPMI_CTRL0_XFER_COUNT)
+
+#define HW_GPMI_COMPARE					0x00000010
+
+#define HW_GPMI_ECCCTRL					0x00000020
+#define HW_GPMI_ECCCTRL_SET				0x00000024
+#define HW_GPMI_ECCCTRL_CLR				0x00000028
+#define HW_GPMI_ECCCTRL_TOG				0x0000002c
+
+#define BP_GPMI_ECCCTRL_ECC_CMD				13
+#define BM_GPMI_ECCCTRL_ECC_CMD		(3 << BP_GPMI_ECCCTRL_ECC_CMD)
+#define BF_GPMI_ECCCTRL_ECC_CMD(v)	\
+		(((v) << BP_GPMI_ECCCTRL_ECC_CMD) & BM_GPMI_ECCCTRL_ECC_CMD)
+#define BV_GPMI_ECCCTRL_ECC_CMD__BCH_DECODE		0x0
+#define BV_GPMI_ECCCTRL_ECC_CMD__BCH_ENCODE		0x1
+
+#define BM_GPMI_ECCCTRL_ENABLE_ECC			(1 << 12)
+#define BV_GPMI_ECCCTRL_ENABLE_ECC__ENABLE		0x1
+#define BV_GPMI_ECCCTRL_ENABLE_ECC__DISABLE		0x0
+
+#define BP_GPMI_ECCCTRL_BUFFER_MASK			0
+#define BM_GPMI_ECCCTRL_BUFFER_MASK	(0x1ff << BP_GPMI_ECCCTRL_BUFFER_MASK)
+#define BF_GPMI_ECCCTRL_BUFFER_MASK(v)	\
+	(((v) << BP_GPMI_ECCCTRL_BUFFER_MASK) & BM_GPMI_ECCCTRL_BUFFER_MASK)
+#define BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_AUXONLY	0x100
+#define BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_PAGE		0x1FF
+
+#define HW_GPMI_ECCCOUNT				0x00000030
+#define HW_GPMI_PAYLOAD					0x00000040
+#define HW_GPMI_AUXILIARY				0x00000050
+#define HW_GPMI_CTRL1					0x00000060
+#define HW_GPMI_CTRL1_SET				0x00000064
+#define HW_GPMI_CTRL1_CLR				0x00000068
+#define HW_GPMI_CTRL1_TOG				0x0000006c
+
+#define BM_GPMI_CTRL1_BCH_MODE				(1 << 18)
+
+#define BP_GPMI_CTRL1_DLL_ENABLE			17
+#define BM_GPMI_CTRL1_DLL_ENABLE	(1 << BP_GPMI_CTRL1_DLL_ENABLE)
+
+#define BP_GPMI_CTRL1_HALF_PERIOD			16
+#define BM_GPMI_CTRL1_HALF_PERIOD	(1 << BP_GPMI_CTRL1_HALF_PERIOD)
+
+#define BP_GPMI_CTRL1_RDN_DELAY				12
+#define BM_GPMI_CTRL1_RDN_DELAY		(0xf << BP_GPMI_CTRL1_RDN_DELAY)
+#define BF_GPMI_CTRL1_RDN_DELAY(v)	\
+		(((v) << BP_GPMI_CTRL1_RDN_DELAY) & BM_GPMI_CTRL1_RDN_DELAY)
+
+#define BM_GPMI_CTRL1_DEV_RESET				(1 << 3)
+#define BV_GPMI_CTRL1_DEV_RESET__ENABLED		0x0
+#define BV_GPMI_CTRL1_DEV_RESET__DISABLED		0x1
+
+#define BM_GPMI_CTRL1_ATA_IRQRDY_POLARITY		(1 << 2)
+#define BV_GPMI_CTRL1_ATA_IRQRDY_POLARITY__ACTIVELOW	0x0
+#define BV_GPMI_CTRL1_ATA_IRQRDY_POLARITY__ACTIVEHIGH	0x1
+
+#define BM_GPMI_CTRL1_CAMERA_MODE			(1 << 1)
+#define BV_GPMI_CTRL1_GPMI_MODE__NAND			0x0
+#define BV_GPMI_CTRL1_GPMI_MODE__ATA			0x1
+
+#define BM_GPMI_CTRL1_GPMI_MODE				(1 << 0)
+
+#define HW_GPMI_TIMING0					0x00000070
+
+#define BP_GPMI_TIMING0_ADDRESS_SETUP			16
+#define BM_GPMI_TIMING0_ADDRESS_SETUP	(0xff << BP_GPMI_TIMING0_ADDRESS_SETUP)
+#define BF_GPMI_TIMING0_ADDRESS_SETUP(v)	\
+	(((v) << BP_GPMI_TIMING0_ADDRESS_SETUP) & BM_GPMI_TIMING0_ADDRESS_SETUP)
+
+#define BP_GPMI_TIMING0_DATA_HOLD			8
+#define BM_GPMI_TIMING0_DATA_HOLD	(0xff << BP_GPMI_TIMING0_DATA_HOLD)
+#define BF_GPMI_TIMING0_DATA_HOLD(v)		\
+	(((v) << BP_GPMI_TIMING0_DATA_HOLD) & BM_GPMI_TIMING0_DATA_HOLD)
+
+#define BP_GPMI_TIMING0_DATA_SETUP			0
+#define BM_GPMI_TIMING0_DATA_SETUP	(0xff << BP_GPMI_TIMING0_DATA_SETUP)
+#define BF_GPMI_TIMING0_DATA_SETUP(v)		\
+	(((v) << BP_GPMI_TIMING0_DATA_SETUP) & BM_GPMI_TIMING0_DATA_SETUP)
+
+#define HW_GPMI_TIMING1					0x00000080
+#define BP_GPMI_TIMING1_BUSY_TIMEOUT			16
+
+#define HW_GPMI_TIMING2					0x00000090
+#define HW_GPMI_DATA					0x000000a0
+
+/* MX28 uses this to detect READY. */
+#define HW_GPMI_STAT					0x000000b0
+#define MX28_BP_GPMI_STAT_READY_BUSY			24
+#define MX28_BM_GPMI_STAT_READY_BUSY	(0xff << MX28_BP_GPMI_STAT_READY_BUSY)
+#define MX28_BF_GPMI_STAT_READY_BUSY(v)		\
+	(((v) << MX28_BP_GPMI_STAT_READY_BUSY) & MX28_BM_GPMI_STAT_READY_BUSY)
+
+/* MX23 uses this to detect READY. */
+#define HW_GPMI_DEBUG					0x000000c0
+#define MX23_BP_GPMI_DEBUG_READY0			28
+#define MX23_BM_GPMI_DEBUG_READY0	(1 << MX23_BP_GPMI_DEBUG_READY0)
+#endif
diff --git a/drivers/mtd/nand/h1910.c b/drivers/mtd/nand/h1910.c
index 02a03e6..5dc6f0d 100644
--- a/drivers/mtd/nand/h1910.c
+++ b/drivers/mtd/nand/h1910.c
@@ -81,9 +81,6 @@
 static int __init h1910_init(void)
 {
 	struct nand_chip *this;
-	const char *part_type = 0;
-	int mtd_parts_nb = 0;
-	struct mtd_partition *mtd_parts = 0;
 	void __iomem *nandaddr;
 
 	if (!machine_is_h1900())
@@ -136,22 +133,10 @@
 		iounmap((void *)nandaddr);
 		return -ENXIO;
 	}
-#ifdef CONFIG_MTD_CMDLINE_PARTS
-	mtd_parts_nb = parse_cmdline_partitions(h1910_nand_mtd, &mtd_parts, "h1910-nand");
-	if (mtd_parts_nb > 0)
-		part_type = "command line";
-	else
-		mtd_parts_nb = 0;
-#endif
-	if (mtd_parts_nb == 0) {
-		mtd_parts = partition_info;
-		mtd_parts_nb = NUM_PARTITIONS;
-		part_type = "static";
-	}
 
 	/* Register the partitions */
-	printk(KERN_NOTICE "Using %s partition definition\n", part_type);
-	mtd_device_register(h1910_nand_mtd, mtd_parts, mtd_parts_nb);
+	mtd_device_parse_register(h1910_nand_mtd, NULL, 0,
+			partition_info, NUM_PARTITIONS);
 
 	/* Return happy */
 	return 0;
diff --git a/drivers/mtd/nand/jz4740_nand.c b/drivers/mtd/nand/jz4740_nand.c
index 6e813da..e266407 100644
--- a/drivers/mtd/nand/jz4740_nand.c
+++ b/drivers/mtd/nand/jz4740_nand.c
@@ -251,10 +251,6 @@
 	return 0;
 }
 
-#ifdef CONFIG_MTD_CMDLINE_PARTS
-static const char *part_probes[] = {"cmdline", NULL};
-#endif
-
 static int jz_nand_ioremap_resource(struct platform_device *pdev,
 	const char *name, struct resource **res, void __iomem **base)
 {
@@ -299,8 +295,6 @@
 	struct nand_chip *chip;
 	struct mtd_info *mtd;
 	struct jz_nand_platform_data *pdata = pdev->dev.platform_data;
-	struct mtd_partition *partition_info;
-	int num_partitions = 0;
 
 	nand = kzalloc(sizeof(*nand), GFP_KERNEL);
 	if (!nand) {
@@ -373,15 +367,9 @@
 		goto err_gpio_free;
 	}
 
-#ifdef CONFIG_MTD_CMDLINE_PARTS
-	num_partitions = parse_mtd_partitions(mtd, part_probes,
-						&partition_info, 0);
-#endif
-	if (num_partitions <= 0 && pdata) {
-		num_partitions = pdata->num_partitions;
-		partition_info = pdata->partitions;
-	}
-	ret = mtd_device_register(mtd, partition_info, num_partitions);
+	ret = mtd_device_parse_register(mtd, NULL, 0,
+			pdata ? pdata->partitions : NULL,
+			pdata ? pdata->num_partitions : 0);
 
 	if (ret) {
 		dev_err(&pdev->dev, "Failed to add mtd device\n");
diff --git a/drivers/mtd/nand/mpc5121_nfc.c b/drivers/mtd/nand/mpc5121_nfc.c
index eb1fbac..5ede647 100644
--- a/drivers/mtd/nand/mpc5121_nfc.c
+++ b/drivers/mtd/nand/mpc5121_nfc.c
@@ -131,8 +131,6 @@
 
 static void mpc5121_nfc_done(struct mtd_info *mtd);
 
-static const char *mpc5121_nfc_pprobes[] = { "cmdlinepart", NULL };
-
 /* Read NFC register */
 static inline u16 nfc_read(struct mtd_info *mtd, uint reg)
 {
@@ -656,13 +654,13 @@
 	struct mpc5121_nfc_prv *prv;
 	struct resource res;
 	struct mtd_info *mtd;
-	struct mtd_partition *parts;
 	struct nand_chip *chip;
 	unsigned long regs_paddr, regs_size;
 	const __be32 *chips_no;
 	int resettime = 0;
 	int retval = 0;
 	int rev, len;
+	struct mtd_part_parser_data ppdata;
 
 	/*
 	 * Check SoC revision. This driver supports only NFC
@@ -727,6 +725,7 @@
 	}
 
 	mtd->name = "MPC5121 NAND";
+	ppdata.of_node = dn;
 	chip->dev_ready = mpc5121_nfc_dev_ready;
 	chip->cmdfunc = mpc5121_nfc_command;
 	chip->read_byte = mpc5121_nfc_read_byte;
@@ -735,7 +734,8 @@
 	chip->write_buf = mpc5121_nfc_write_buf;
 	chip->verify_buf = mpc5121_nfc_verify_buf;
 	chip->select_chip = mpc5121_nfc_select_chip;
-	chip->options = NAND_NO_AUTOINCR | NAND_USE_FLASH_BBT;
+	chip->options = NAND_NO_AUTOINCR;
+	chip->bbt_options = NAND_BBT_USE_FLASH;
 	chip->ecc.mode = NAND_ECC_SOFT;
 
 	/* Support external chip-select logic on ADS5121 board */
@@ -837,19 +837,7 @@
 	dev_set_drvdata(dev, mtd);
 
 	/* Register device in MTD */
-	retval = parse_mtd_partitions(mtd, mpc5121_nfc_pprobes, &parts, 0);
-#ifdef CONFIG_MTD_OF_PARTS
-	if (retval == 0)
-		retval = of_mtd_parse_partitions(dev, dn, &parts);
-#endif
-	if (retval < 0) {
-		dev_err(dev, "Error parsing MTD partitions!\n");
-		devm_free_irq(dev, prv->irq, mtd);
-		retval = -EINVAL;
-		goto error;
-	}
-
-	retval = mtd_device_register(mtd, parts, retval);
+	retval = mtd_device_parse_register(mtd, NULL, &ppdata, NULL, 0);
 	if (retval) {
 		dev_err(dev, "Error adding MTD device!\n");
 		devm_free_irq(dev, prv->irq, mtd);
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
index 90df34c..74a43b8 100644
--- a/drivers/mtd/nand/mxc_nand.c
+++ b/drivers/mtd/nand/mxc_nand.c
@@ -41,7 +41,7 @@
 
 #define nfc_is_v21()		(cpu_is_mx25() || cpu_is_mx35())
 #define nfc_is_v1()		(cpu_is_mx31() || cpu_is_mx27() || cpu_is_mx21())
-#define nfc_is_v3_2()		cpu_is_mx51()
+#define nfc_is_v3_2()		(cpu_is_mx51() || cpu_is_mx53())
 #define nfc_is_v3()		nfc_is_v3_2()
 
 /* Addresses for NFC registers */
@@ -143,7 +143,6 @@
 struct mxc_nand_host {
 	struct mtd_info		mtd;
 	struct nand_chip	nand;
-	struct mtd_partition	*parts;
 	struct device		*dev;
 
 	void			*spare0;
@@ -350,8 +349,7 @@
 			udelay(1);
 		}
 		if (max_retries < 0)
-			DEBUG(MTD_DEBUG_LEVEL0, "%s: INT not set\n",
-			      __func__);
+			pr_debug("%s: INT not set\n", __func__);
 	}
 }
 
@@ -371,7 +369,7 @@
  * waits for completion. */
 static void send_cmd_v1_v2(struct mxc_nand_host *host, uint16_t cmd, int useirq)
 {
-	DEBUG(MTD_DEBUG_LEVEL3, "send_cmd(host, 0x%x, %d)\n", cmd, useirq);
+	pr_debug("send_cmd(host, 0x%x, %d)\n", cmd, useirq);
 
 	writew(cmd, NFC_V1_V2_FLASH_CMD);
 	writew(NFC_CMD, NFC_V1_V2_CONFIG2);
@@ -387,8 +385,7 @@
 			udelay(1);
 		}
 		if (max_retries < 0)
-			DEBUG(MTD_DEBUG_LEVEL0, "%s: RESET failed\n",
-			      __func__);
+			pr_debug("%s: RESET failed\n", __func__);
 	} else {
 		/* Wait for operation to complete */
 		wait_op_done(host, useirq);
@@ -411,7 +408,7 @@
  * a NAND command. */
 static void send_addr_v1_v2(struct mxc_nand_host *host, uint16_t addr, int islast)
 {
-	DEBUG(MTD_DEBUG_LEVEL3, "send_addr(host, 0x%x %d)\n", addr, islast);
+	pr_debug("send_addr(host, 0x%x %d)\n", addr, islast);
 
 	writew(addr, NFC_V1_V2_FLASH_ADDR);
 	writew(NFC_ADDR, NFC_V1_V2_CONFIG2);
@@ -561,8 +558,7 @@
 	uint16_t ecc_status = readw(NFC_V1_V2_ECC_STATUS_RESULT);
 
 	if (((ecc_status & 0x3) == 2) || ((ecc_status >> 2) == 2)) {
-		DEBUG(MTD_DEBUG_LEVEL0,
-		      "MXC_NAND: HWECC uncorrectable 2-bit ECC error\n");
+		pr_debug("MXC_NAND: HWECC uncorrectable 2-bit ECC error\n");
 		return -1;
 	}
 
@@ -849,7 +845,7 @@
 		writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR3);
 	} else if (nfc_is_v1()) {
 		writew(0x0, NFC_V1_UNLOCKSTART_BLKADDR);
-		writew(0x4000, NFC_V1_UNLOCKEND_BLKADDR);
+		writew(0xffff, NFC_V1_UNLOCKEND_BLKADDR);
 	} else
 		BUG();
 
@@ -932,8 +928,7 @@
 	struct nand_chip *nand_chip = mtd->priv;
 	struct mxc_nand_host *host = nand_chip->priv;
 
-	DEBUG(MTD_DEBUG_LEVEL3,
-	      "mxc_nand_command (cmd = 0x%x, col = 0x%x, page = 0x%x)\n",
+	pr_debug("mxc_nand_command (cmd = 0x%x, col = 0x%x, page = 0x%x)\n",
 	      command, column, page_addr);
 
 	/* Reset command state information */
@@ -1044,7 +1039,7 @@
 	struct mxc_nand_platform_data *pdata = pdev->dev.platform_data;
 	struct mxc_nand_host *host;
 	struct resource *res;
-	int err = 0, __maybe_unused nr_parts = 0;
+	int err = 0;
 	struct nand_ecclayout *oob_smallpage, *oob_largepage;
 
 	/* Allocate memory for MTD device structure and private data */
@@ -1179,7 +1174,7 @@
 		this->bbt_td = &bbt_main_descr;
 		this->bbt_md = &bbt_mirror_descr;
 		/* update flash based bbt */
-		this->options |= NAND_USE_FLASH_BBT;
+		this->bbt_options |= NAND_BBT_USE_FLASH;
 	}
 
 	init_completion(&host->op_completion);
@@ -1231,16 +1226,8 @@
 	}
 
 	/* Register the partitions */
-	nr_parts =
-	    parse_mtd_partitions(mtd, part_probes, &host->parts, 0);
-	if (nr_parts > 0)
-		mtd_device_register(mtd, host->parts, nr_parts);
-	else if (pdata->parts)
-		mtd_device_register(mtd, pdata->parts, pdata->nr_parts);
-	else {
-		pr_info("Registering %s as whole device\n", mtd->name);
-		mtd_device_register(mtd, NULL, 0);
-	}
+	mtd_device_parse_register(mtd, part_probes, 0,
+			pdata->parts, pdata->nr_parts);
 
 	platform_set_drvdata(pdev, host);
 
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index a46e9bb..3ed9c5e 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -21,7 +21,7 @@
  *  TODO:
  *	Enable cached programming for 2k page size chips
  *	Check, if mtd->ecctype should be set to MTD_ECC_HW
- *	if we have HW ecc support.
+ *	if we have HW ECC support.
  *	The AG-AND chips have nice features for speed improvement,
  *	which are not supported yet. Read / program 4 pages in one go.
  *	BBT table is not serialized, has to be fixed
@@ -113,21 +113,19 @@
 
 	/* Start address must align on block boundary */
 	if (ofs & ((1 << chip->phys_erase_shift) - 1)) {
-		DEBUG(MTD_DEBUG_LEVEL0, "%s: Unaligned address\n", __func__);
+		pr_debug("%s: unaligned address\n", __func__);
 		ret = -EINVAL;
 	}
 
 	/* Length must align on block boundary */
 	if (len & ((1 << chip->phys_erase_shift) - 1)) {
-		DEBUG(MTD_DEBUG_LEVEL0, "%s: Length not block aligned\n",
-					__func__);
+		pr_debug("%s: length not block aligned\n", __func__);
 		ret = -EINVAL;
 	}
 
 	/* Do not allow past end of device */
 	if (ofs + len > mtd->size) {
-		DEBUG(MTD_DEBUG_LEVEL0, "%s: Past end of device\n",
-					__func__);
+		pr_debug("%s: past end of device\n", __func__);
 		ret = -EINVAL;
 	}
 
@@ -136,9 +134,9 @@
 
 /**
  * nand_release_device - [GENERIC] release chip
- * @mtd:	MTD device structure
+ * @mtd: MTD device structure
  *
- * Deselect, release chip lock and wake up anyone waiting on the device
+ * Deselect, release chip lock and wake up anyone waiting on the device.
  */
 static void nand_release_device(struct mtd_info *mtd)
 {
@@ -157,9 +155,9 @@
 
 /**
  * nand_read_byte - [DEFAULT] read one byte from the chip
- * @mtd:	MTD device structure
+ * @mtd: MTD device structure
  *
- * Default read function for 8bit buswith
+ * Default read function for 8bit buswidth
  */
 static uint8_t nand_read_byte(struct mtd_info *mtd)
 {
@@ -169,10 +167,11 @@
 
 /**
  * nand_read_byte16 - [DEFAULT] read one byte endianess aware from the chip
- * @mtd:	MTD device structure
+ * nand_read_byte16 - [DEFAULT] read one byte endianness aware from the chip
+ * @mtd: MTD device structure
  *
- * Default read function for 16bit buswith with
- * endianess conversion
+ * Default read function for 16bit buswidth with endianness conversion.
+ *
  */
 static uint8_t nand_read_byte16(struct mtd_info *mtd)
 {
@@ -182,10 +181,9 @@
 
 /**
  * nand_read_word - [DEFAULT] read one word from the chip
- * @mtd:	MTD device structure
+ * @mtd: MTD device structure
  *
- * Default read function for 16bit buswith without
- * endianess conversion
+ * Default read function for 16bit buswidth without endianness conversion.
  */
 static u16 nand_read_word(struct mtd_info *mtd)
 {
@@ -195,8 +193,8 @@
 
 /**
  * nand_select_chip - [DEFAULT] control CE line
- * @mtd:	MTD device structure
- * @chipnr:	chipnumber to select, -1 for deselect
+ * @mtd: MTD device structure
+ * @chipnr: chipnumber to select, -1 for deselect
  *
  * Default select function for 1 chip devices.
  */
@@ -218,11 +216,11 @@
 
 /**
  * nand_write_buf - [DEFAULT] write buffer to chip
- * @mtd:	MTD device structure
- * @buf:	data buffer
- * @len:	number of bytes to write
+ * @mtd: MTD device structure
+ * @buf: data buffer
+ * @len: number of bytes to write
  *
- * Default write function for 8bit buswith
+ * Default write function for 8bit buswidth.
  */
 static void nand_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
 {
@@ -235,11 +233,11 @@
 
 /**
  * nand_read_buf - [DEFAULT] read chip data into buffer
- * @mtd:	MTD device structure
- * @buf:	buffer to store date
- * @len:	number of bytes to read
+ * @mtd: MTD device structure
+ * @buf: buffer to store date
+ * @len: number of bytes to read
  *
- * Default read function for 8bit buswith
+ * Default read function for 8bit buswidth.
  */
 static void nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
 {
@@ -252,11 +250,11 @@
 
 /**
  * nand_verify_buf - [DEFAULT] Verify chip data against buffer
- * @mtd:	MTD device structure
- * @buf:	buffer containing the data to compare
- * @len:	number of bytes to compare
+ * @mtd: MTD device structure
+ * @buf: buffer containing the data to compare
+ * @len: number of bytes to compare
  *
- * Default verify function for 8bit buswith
+ * Default verify function for 8bit buswidth.
  */
 static int nand_verify_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
 {
@@ -271,11 +269,11 @@
 
 /**
  * nand_write_buf16 - [DEFAULT] write buffer to chip
- * @mtd:	MTD device structure
- * @buf:	data buffer
- * @len:	number of bytes to write
+ * @mtd: MTD device structure
+ * @buf: data buffer
+ * @len: number of bytes to write
  *
- * Default write function for 16bit buswith
+ * Default write function for 16bit buswidth.
  */
 static void nand_write_buf16(struct mtd_info *mtd, const uint8_t *buf, int len)
 {
@@ -291,11 +289,11 @@
 
 /**
  * nand_read_buf16 - [DEFAULT] read chip data into buffer
- * @mtd:	MTD device structure
- * @buf:	buffer to store date
- * @len:	number of bytes to read
+ * @mtd: MTD device structure
+ * @buf: buffer to store date
+ * @len: number of bytes to read
  *
- * Default read function for 16bit buswith
+ * Default read function for 16bit buswidth.
  */
 static void nand_read_buf16(struct mtd_info *mtd, uint8_t *buf, int len)
 {
@@ -310,11 +308,11 @@
 
 /**
  * nand_verify_buf16 - [DEFAULT] Verify chip data against buffer
- * @mtd:	MTD device structure
- * @buf:	buffer containing the data to compare
- * @len:	number of bytes to compare
+ * @mtd: MTD device structure
+ * @buf: buffer containing the data to compare
+ * @len: number of bytes to compare
  *
- * Default verify function for 16bit buswith
+ * Default verify function for 16bit buswidth.
  */
 static int nand_verify_buf16(struct mtd_info *mtd, const uint8_t *buf, int len)
 {
@@ -332,9 +330,9 @@
 
 /**
  * nand_block_bad - [DEFAULT] Read bad block marker from the chip
- * @mtd:	MTD device structure
- * @ofs:	offset from device start
- * @getchip:	0, if the chip is already selected
+ * @mtd: MTD device structure
+ * @ofs: offset from device start
+ * @getchip: 0, if the chip is already selected
  *
  * Check, if the block is bad.
  */
@@ -344,7 +342,7 @@
 	struct nand_chip *chip = mtd->priv;
 	u16 bad;
 
-	if (chip->options & NAND_BBT_SCANLASTPAGE)
+	if (chip->bbt_options & NAND_BBT_SCANLASTPAGE)
 		ofs += mtd->erasesize - mtd->writesize;
 
 	page = (int)(ofs >> chip->page_shift) & chip->pagemask;
@@ -384,11 +382,11 @@
 
 /**
  * nand_default_block_markbad - [DEFAULT] mark a block bad
- * @mtd:	MTD device structure
- * @ofs:	offset from device start
+ * @mtd: MTD device structure
+ * @ofs: offset from device start
  *
- * This is the default implementation, which can be overridden by
- * a hardware specific driver.
+ * This is the default implementation, which can be overridden by a hardware
+ * specific driver.
 */
 static int nand_default_block_markbad(struct mtd_info *mtd, loff_t ofs)
 {
@@ -396,7 +394,7 @@
 	uint8_t buf[2] = { 0, 0 };
 	int block, ret, i = 0;
 
-	if (chip->options & NAND_BBT_SCANLASTPAGE)
+	if (chip->bbt_options & NAND_BBT_SCANLASTPAGE)
 		ofs += mtd->erasesize - mtd->writesize;
 
 	/* Get block number */
@@ -404,33 +402,31 @@
 	if (chip->bbt)
 		chip->bbt[block >> 2] |= 0x01 << ((block & 0x03) << 1);
 
-	/* Do we have a flash based bad block table ? */
-	if (chip->options & NAND_USE_FLASH_BBT)
+	/* Do we have a flash based bad block table? */
+	if (chip->bbt_options & NAND_BBT_USE_FLASH)
 		ret = nand_update_bbt(mtd, ofs);
 	else {
+		struct mtd_oob_ops ops;
+
 		nand_get_device(chip, mtd, FL_WRITING);
 
-		/* Write to first two pages and to byte 1 and 6 if necessary.
-		 * If we write to more than one location, the first error
-		 * encountered quits the procedure. We write two bytes per
-		 * location, so we dont have to mess with 16 bit access.
+		/*
+		 * Write to first two pages if necessary. If we write to more
+		 * than one location, the first error encountered quits the
+		 * procedure. We write two bytes per location, so we dont have
+		 * to mess with 16 bit access.
 		 */
+		ops.len = ops.ooblen = 2;
+		ops.datbuf = NULL;
+		ops.oobbuf = buf;
+		ops.ooboffs = chip->badblockpos & ~0x01;
+		ops.mode = MTD_OPS_PLACE_OOB;
 		do {
-			chip->ops.len = chip->ops.ooblen = 2;
-			chip->ops.datbuf = NULL;
-			chip->ops.oobbuf = buf;
-			chip->ops.ooboffs = chip->badblockpos & ~0x01;
+			ret = nand_do_write_oob(mtd, ofs, &ops);
 
-			ret = nand_do_write_oob(mtd, ofs, &chip->ops);
-
-			if (!ret && (chip->options & NAND_BBT_SCANBYTE1AND6)) {
-				chip->ops.ooboffs = NAND_SMALL_BADBLOCK_POS
-					& ~0x01;
-				ret = nand_do_write_oob(mtd, ofs, &chip->ops);
-			}
 			i++;
 			ofs += mtd->writesize;
-		} while (!ret && (chip->options & NAND_BBT_SCAN2NDPAGE) &&
+		} while (!ret && (chip->bbt_options & NAND_BBT_SCAN2NDPAGE) &&
 				i < 2);
 
 		nand_release_device(mtd);
@@ -443,16 +439,16 @@
 
 /**
  * nand_check_wp - [GENERIC] check if the chip is write protected
- * @mtd:	MTD device structure
- * Check, if the device is write protected
+ * @mtd: MTD device structure
  *
- * The function expects, that the device is already selected
+ * Check, if the device is write protected. The function expects, that the
+ * device is already selected.
  */
 static int nand_check_wp(struct mtd_info *mtd)
 {
 	struct nand_chip *chip = mtd->priv;
 
-	/* broken xD cards report WP despite being writable */
+	/* Broken xD cards report WP despite being writable */
 	if (chip->options & NAND_BROKEN_XD)
 		return 0;
 
@@ -463,10 +459,10 @@
 
 /**
  * nand_block_checkbad - [GENERIC] Check if a block is marked bad
- * @mtd:	MTD device structure
- * @ofs:	offset from device start
- * @getchip:	0, if the chip is already selected
- * @allowbbt:	1, if its allowed to access the bbt area
+ * @mtd: MTD device structure
+ * @ofs: offset from device start
+ * @getchip: 0, if the chip is already selected
+ * @allowbbt: 1, if its allowed to access the bbt area
  *
  * Check, if the block is bad. Either by reading the bad block table or
  * calling of the scan function.
@@ -485,8 +481,8 @@
 
 /**
  * panic_nand_wait_ready - [GENERIC] Wait for the ready pin after commands.
- * @mtd:	MTD device structure
- * @timeo:	Timeout
+ * @mtd: MTD device structure
+ * @timeo: Timeout
  *
  * Helper function for nand_wait_ready used when needing to wait in interrupt
  * context.
@@ -505,10 +501,7 @@
 	}
 }
 
-/*
- * Wait for the ready pin, after a command
- * The timeout is catched later.
- */
+/* Wait for the ready pin, after a command. The timeout is caught later. */
 void nand_wait_ready(struct mtd_info *mtd)
 {
 	struct nand_chip *chip = mtd->priv;
@@ -519,7 +512,7 @@
 		return panic_nand_wait_ready(mtd, 400);
 
 	led_trigger_event(nand_led_trigger, LED_FULL);
-	/* wait until command is processed or timeout occures */
+	/* Wait until command is processed or timeout occurs */
 	do {
 		if (chip->dev_ready(mtd))
 			break;
@@ -531,13 +524,13 @@
 
 /**
  * nand_command - [DEFAULT] Send command to NAND device
- * @mtd:	MTD device structure
- * @command:	the command to be sent
- * @column:	the column address for this command, -1 if none
- * @page_addr:	the page address for this command, -1 if none
+ * @mtd: MTD device structure
+ * @command: the command to be sent
+ * @column: the column address for this command, -1 if none
+ * @page_addr: the page address for this command, -1 if none
  *
- * Send command to NAND device. This function is used for small page
- * devices (256/512 Bytes per page)
+ * Send command to NAND device. This function is used for small page devices
+ * (256/512 Bytes per page).
  */
 static void nand_command(struct mtd_info *mtd, unsigned int command,
 			 int column, int page_addr)
@@ -545,9 +538,7 @@
 	register struct nand_chip *chip = mtd->priv;
 	int ctrl = NAND_CTRL_CLE | NAND_CTRL_CHANGE;
 
-	/*
-	 * Write out the command to the device.
-	 */
+	/* Write out the command to the device */
 	if (command == NAND_CMD_SEQIN) {
 		int readcmd;
 
@@ -567,9 +558,7 @@
 	}
 	chip->cmd_ctrl(mtd, command, ctrl);
 
-	/*
-	 * Address cycle, when necessary
-	 */
+	/* Address cycle, when necessary */
 	ctrl = NAND_CTRL_ALE | NAND_CTRL_CHANGE;
 	/* Serially input address */
 	if (column != -1) {
@@ -590,8 +579,8 @@
 	chip->cmd_ctrl(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
 
 	/*
-	 * program and erase have their own busy handlers
-	 * status and sequential in needs no delay
+	 * Program and erase have their own busy handlers status and sequential
+	 * in needs no delay
 	 */
 	switch (command) {
 
@@ -625,8 +614,10 @@
 			return;
 		}
 	}
-	/* Apply this short delay always to ensure that we do wait tWB in
-	 * any case on any machine. */
+	/*
+	 * Apply this short delay always to ensure that we do wait tWB in
+	 * any case on any machine.
+	 */
 	ndelay(100);
 
 	nand_wait_ready(mtd);
@@ -634,14 +625,14 @@
 
 /**
  * nand_command_lp - [DEFAULT] Send command to NAND large page device
- * @mtd:	MTD device structure
- * @command:	the command to be sent
- * @column:	the column address for this command, -1 if none
- * @page_addr:	the page address for this command, -1 if none
+ * @mtd: MTD device structure
+ * @command: the command to be sent
+ * @column: the column address for this command, -1 if none
+ * @page_addr: the page address for this command, -1 if none
  *
  * Send command to NAND device. This is the version for the new large page
- * devices We dont have the separate regions as we have in the small page
- * devices.  We must emulate NAND_CMD_READOOB to keep the code compatible.
+ * devices. We don't have the separate regions as we have in the small page
+ * devices. We must emulate NAND_CMD_READOOB to keep the code compatible.
  */
 static void nand_command_lp(struct mtd_info *mtd, unsigned int command,
 			    int column, int page_addr)
@@ -683,8 +674,8 @@
 	chip->cmd_ctrl(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
 
 	/*
-	 * program and erase have their own busy handlers
-	 * status, sequential in, and deplete1 need no delay
+	 * Program and erase have their own busy handlers status, sequential
+	 * in, and deplete1 need no delay.
 	 */
 	switch (command) {
 
@@ -698,14 +689,12 @@
 	case NAND_CMD_DEPLETE1:
 		return;
 
-		/*
-		 * read error status commands require only a short delay
-		 */
 	case NAND_CMD_STATUS_ERROR:
 	case NAND_CMD_STATUS_ERROR0:
 	case NAND_CMD_STATUS_ERROR1:
 	case NAND_CMD_STATUS_ERROR2:
 	case NAND_CMD_STATUS_ERROR3:
+		/* Read error status commands require only a short delay */
 		udelay(chip->chip_delay);
 		return;
 
@@ -739,7 +728,7 @@
 	default:
 		/*
 		 * If we don't have access to the busy pin, we apply the given
-		 * command delay
+		 * command delay.
 		 */
 		if (!chip->dev_ready) {
 			udelay(chip->chip_delay);
@@ -747,8 +736,10 @@
 		}
 	}
 
-	/* Apply this short delay always to ensure that we do wait tWB in
-	 * any case on any machine. */
+	/*
+	 * Apply this short delay always to ensure that we do wait tWB in
+	 * any case on any machine.
+	 */
 	ndelay(100);
 
 	nand_wait_ready(mtd);
@@ -756,25 +747,25 @@
 
 /**
  * panic_nand_get_device - [GENERIC] Get chip for selected access
- * @chip:	the nand chip descriptor
- * @mtd:	MTD device structure
- * @new_state:	the state which is requested
+ * @chip: the nand chip descriptor
+ * @mtd: MTD device structure
+ * @new_state: the state which is requested
  *
  * Used when in panic, no locks are taken.
  */
 static void panic_nand_get_device(struct nand_chip *chip,
 		      struct mtd_info *mtd, int new_state)
 {
-	/* Hardware controller shared among independend devices */
+	/* Hardware controller shared among independent devices */
 	chip->controller->active = chip;
 	chip->state = new_state;
 }
 
 /**
  * nand_get_device - [GENERIC] Get chip for selected access
- * @chip:	the nand chip descriptor
- * @mtd:	MTD device structure
- * @new_state:	the state which is requested
+ * @chip: the nand chip descriptor
+ * @mtd: MTD device structure
+ * @new_state: the state which is requested
  *
  * Get the device and lock it for exclusive access
  */
@@ -812,10 +803,10 @@
 }
 
 /**
- * panic_nand_wait - [GENERIC]  wait until the command is done
- * @mtd:	MTD device structure
- * @chip:	NAND chip structure
- * @timeo:	Timeout
+ * panic_nand_wait - [GENERIC] wait until the command is done
+ * @mtd: MTD device structure
+ * @chip: NAND chip structure
+ * @timeo: timeout
  *
  * Wait for command done. This is a helper function for nand_wait used when
  * we are in interrupt context. May happen when in panic and trying to write
@@ -838,13 +829,13 @@
 }
 
 /**
- * nand_wait - [DEFAULT]  wait until the command is done
- * @mtd:	MTD device structure
- * @chip:	NAND chip structure
+ * nand_wait - [DEFAULT] wait until the command is done
+ * @mtd: MTD device structure
+ * @chip: NAND chip structure
  *
- * Wait for command done. This applies to erase and program only
- * Erase can take up to 400ms and program up to 20ms according to
- * general NAND and SmartMedia specs
+ * Wait for command done. This applies to erase and program only. Erase can
+ * take up to 400ms and program up to 20ms according to general NAND and
+ * SmartMedia specs.
  */
 static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip)
 {
@@ -859,8 +850,10 @@
 
 	led_trigger_event(nand_led_trigger, LED_FULL);
 
-	/* Apply this short delay always to ensure that we do wait tWB in
-	 * any case on any machine. */
+	/*
+	 * Apply this short delay always to ensure that we do wait tWB in any
+	 * case on any machine.
+	 */
 	ndelay(100);
 
 	if ((state == FL_ERASING) && (chip->options & NAND_IS_AND))
@@ -890,16 +883,15 @@
 
 /**
  * __nand_unlock - [REPLACEABLE] unlocks specified locked blocks
- *
  * @mtd: mtd info
  * @ofs: offset to start unlock from
  * @len: length to unlock
- * @invert:   when = 0, unlock the range of blocks within the lower and
- *                      upper boundary address
- *            when = 1, unlock the range of blocks outside the boundaries
- *                      of the lower and upper boundary address
+ * @invert: when = 0, unlock the range of blocks within the lower and
+ *                    upper boundary address
+ *          when = 1, unlock the range of blocks outside the boundaries
+ *                    of the lower and upper boundary address
  *
- * return - unlock status
+ * Returs unlock status.
  */
 static int __nand_unlock(struct mtd_info *mtd, loff_t ofs,
 					uint64_t len, int invert)
@@ -919,10 +911,9 @@
 
 	/* Call wait ready function */
 	status = chip->waitfunc(mtd, chip);
-	udelay(1000);
 	/* See if device thinks it succeeded */
 	if (status & 0x01) {
-		DEBUG(MTD_DEBUG_LEVEL0, "%s: Error status = 0x%08x\n",
+		pr_debug("%s: error status = 0x%08x\n",
 					__func__, status);
 		ret = -EIO;
 	}
@@ -932,12 +923,11 @@
 
 /**
  * nand_unlock - [REPLACEABLE] unlocks specified locked blocks
- *
  * @mtd: mtd info
  * @ofs: offset to start unlock from
  * @len: length to unlock
  *
- * return - unlock status
+ * Returns unlock status.
  */
 int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
 {
@@ -945,7 +935,7 @@
 	int chipnr;
 	struct nand_chip *chip = mtd->priv;
 
-	DEBUG(MTD_DEBUG_LEVEL3, "%s: start = 0x%012llx, len = %llu\n",
+	pr_debug("%s: start = 0x%012llx, len = %llu\n",
 			__func__, (unsigned long long)ofs, len);
 
 	if (check_offs_len(mtd, ofs, len))
@@ -964,7 +954,7 @@
 
 	/* Check, if it is write protected */
 	if (nand_check_wp(mtd)) {
-		DEBUG(MTD_DEBUG_LEVEL0, "%s: Device is write protected!!!\n",
+		pr_debug("%s: device is write protected!\n",
 					__func__);
 		ret = -EIO;
 		goto out;
@@ -981,18 +971,16 @@
 
 /**
  * nand_lock - [REPLACEABLE] locks all blocks present in the device
- *
  * @mtd: mtd info
  * @ofs: offset to start unlock from
  * @len: length to unlock
  *
- * return - lock status
+ * This feature is not supported in many NAND parts. 'Micron' NAND parts do
+ * have this feature, but it allows only to lock all blocks, not for specified
+ * range for block. Implementing 'lock' feature by making use of 'unlock', for
+ * now.
  *
- * This feature is not supported in many NAND parts. 'Micron' NAND parts
- * do have this feature, but it allows only to lock all blocks, not for
- * specified range for block.
- *
- * Implementing 'lock' feature by making use of 'unlock', for now.
+ * Returns lock status.
  */
 int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
 {
@@ -1000,7 +988,7 @@
 	int chipnr, status, page;
 	struct nand_chip *chip = mtd->priv;
 
-	DEBUG(MTD_DEBUG_LEVEL3, "%s: start = 0x%012llx, len = %llu\n",
+	pr_debug("%s: start = 0x%012llx, len = %llu\n",
 			__func__, (unsigned long long)ofs, len);
 
 	if (check_offs_len(mtd, ofs, len))
@@ -1015,7 +1003,7 @@
 
 	/* Check, if it is write protected */
 	if (nand_check_wp(mtd)) {
-		DEBUG(MTD_DEBUG_LEVEL0, "%s: Device is write protected!!!\n",
+		pr_debug("%s: device is write protected!\n",
 					__func__);
 		status = MTD_ERASE_FAILED;
 		ret = -EIO;
@@ -1028,10 +1016,9 @@
 
 	/* Call wait ready function */
 	status = chip->waitfunc(mtd, chip);
-	udelay(1000);
 	/* See if device thinks it succeeded */
 	if (status & 0x01) {
-		DEBUG(MTD_DEBUG_LEVEL0, "%s: Error status = 0x%08x\n",
+		pr_debug("%s: error status = 0x%08x\n",
 					__func__, status);
 		ret = -EIO;
 		goto out;
@@ -1047,13 +1034,13 @@
 EXPORT_SYMBOL(nand_lock);
 
 /**
- * nand_read_page_raw - [Intern] read raw page data without ecc
- * @mtd:	mtd info structure
- * @chip:	nand chip info structure
- * @buf:	buffer to store read data
- * @page:	page number to read
+ * nand_read_page_raw - [INTERN] read raw page data without ecc
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: buffer to store read data
+ * @page: page number to read
  *
- * Not for syndrome calculating ecc controllers, which use a special oob layout
+ * Not for syndrome calculating ECC controllers, which use a special oob layout.
  */
 static int nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
 			      uint8_t *buf, int page)
@@ -1064,11 +1051,11 @@
 }
 
 /**
- * nand_read_page_raw_syndrome - [Intern] read raw page data without ecc
- * @mtd:	mtd info structure
- * @chip:	nand chip info structure
- * @buf:	buffer to store read data
- * @page:	page number to read
+ * nand_read_page_raw_syndrome - [INTERN] read raw page data without ecc
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: buffer to store read data
+ * @page: page number to read
  *
  * We need a special oob layout and handling even when OOB isn't used.
  */
@@ -1107,11 +1094,11 @@
 }
 
 /**
- * nand_read_page_swecc - [REPLACABLE] software ecc based page read function
- * @mtd:	mtd info structure
- * @chip:	nand chip info structure
- * @buf:	buffer to store read data
- * @page:	page number to read
+ * nand_read_page_swecc - [REPLACEABLE] software ECC based page read function
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: buffer to store read data
+ * @page: page number to read
  */
 static int nand_read_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
 				uint8_t *buf, int page)
@@ -1148,12 +1135,12 @@
 }
 
 /**
- * nand_read_subpage - [REPLACABLE] software ecc based sub-page read function
- * @mtd:	mtd info structure
- * @chip:	nand chip info structure
- * @data_offs:	offset of requested data within the page
- * @readlen:	data length
- * @bufpoi:	buffer to store read data
+ * nand_read_subpage - [REPLACEABLE] software ECC based sub-page read function
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @data_offs: offset of requested data within the page
+ * @readlen: data length
+ * @bufpoi: buffer to store read data
  */
 static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
 			uint32_t data_offs, uint32_t readlen, uint8_t *bufpoi)
@@ -1166,12 +1153,12 @@
 	int busw = (chip->options & NAND_BUSWIDTH_16) ? 2 : 1;
 	int index = 0;
 
-	/* Column address wihin the page aligned to ECC size (256bytes). */
+	/* Column address within the page aligned to ECC size (256bytes) */
 	start_step = data_offs / chip->ecc.size;
 	end_step = (data_offs + readlen - 1) / chip->ecc.size;
 	num_steps = end_step - start_step + 1;
 
-	/* Data size aligned to ECC ecc.size*/
+	/* Data size aligned to ECC ecc.size */
 	datafrag_len = num_steps * chip->ecc.size;
 	eccfrag_len = num_steps * chip->ecc.bytes;
 
@@ -1183,13 +1170,14 @@
 	p = bufpoi + data_col_addr;
 	chip->read_buf(mtd, p, datafrag_len);
 
-	/* Calculate  ECC */
+	/* Calculate ECC */
 	for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size)
 		chip->ecc.calculate(mtd, p, &chip->buffers->ecccalc[i]);
 
-	/* The performance is faster if to position offsets
-	   according to ecc.pos. Let make sure here that
-	   there are no gaps in ecc positions */
+	/*
+	 * The performance is faster if we position offsets according to
+	 * ecc.pos. Let's make sure that there are no gaps in ECC positions.
+	 */
 	for (i = 0; i < eccfrag_len - 1; i++) {
 		if (eccpos[i + start_step * chip->ecc.bytes] + 1 !=
 			eccpos[i + start_step * chip->ecc.bytes + 1]) {
@@ -1201,8 +1189,10 @@
 		chip->cmdfunc(mtd, NAND_CMD_RNDOUT, mtd->writesize, -1);
 		chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
 	} else {
-		/* send the command to read the particular ecc bytes */
-		/* take care about buswidth alignment in read_buf */
+		/*
+		 * Send the command to read the particular ECC bytes take care
+		 * about buswidth alignment in read_buf.
+		 */
 		index = start_step * chip->ecc.bytes;
 
 		aligned_pos = eccpos[index] & ~(busw - 1);
@@ -1235,13 +1225,13 @@
 }
 
 /**
- * nand_read_page_hwecc - [REPLACABLE] hardware ecc based page read function
- * @mtd:	mtd info structure
- * @chip:	nand chip info structure
- * @buf:	buffer to store read data
- * @page:	page number to read
+ * nand_read_page_hwecc - [REPLACEABLE] hardware ECC based page read function
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: buffer to store read data
+ * @page: page number to read
  *
- * Not for syndrome calculating ecc controllers which need a special oob layout
+ * Not for syndrome calculating ECC controllers which need a special oob layout.
  */
 static int nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
 				uint8_t *buf, int page)
@@ -1280,18 +1270,17 @@
 }
 
 /**
- * nand_read_page_hwecc_oob_first - [REPLACABLE] hw ecc, read oob first
- * @mtd:	mtd info structure
- * @chip:	nand chip info structure
- * @buf:	buffer to store read data
- * @page:	page number to read
+ * nand_read_page_hwecc_oob_first - [REPLACEABLE] hw ecc, read oob first
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: buffer to store read data
+ * @page: page number to read
  *
- * Hardware ECC for large page chips, require OOB to be read first.
- * For this ECC mode, the write_page method is re-used from ECC_HW.
- * These methods read/write ECC from the OOB area, unlike the
- * ECC_HW_SYNDROME support with multiple ECC steps, follows the
- * "infix ECC" scheme and reads/writes ECC from the data area, by
- * overwriting the NAND manufacturer bad block markings.
+ * Hardware ECC for large page chips, require OOB to be read first. For this
+ * ECC mode, the write_page method is re-used from ECC_HW. These methods
+ * read/write ECC from the OOB area, unlike the ECC_HW_SYNDROME support with
+ * multiple ECC steps, follows the "infix ECC" scheme and reads/writes ECC from
+ * the data area, by overwriting the NAND manufacturer bad block markings.
  */
 static int nand_read_page_hwecc_oob_first(struct mtd_info *mtd,
 	struct nand_chip *chip, uint8_t *buf, int page)
@@ -1329,14 +1318,14 @@
 }
 
 /**
- * nand_read_page_syndrome - [REPLACABLE] hardware ecc syndrom based page read
- * @mtd:	mtd info structure
- * @chip:	nand chip info structure
- * @buf:	buffer to store read data
- * @page:	page number to read
+ * nand_read_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page read
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: buffer to store read data
+ * @page: page number to read
  *
- * The hw generator calculates the error syndrome automatically. Therefor
- * we need a special oob layout and handling.
+ * The hw generator calculates the error syndrome automatically. Therefore we
+ * need a special oob layout and handling.
  */
 static int nand_read_page_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
 				   uint8_t *buf, int page)
@@ -1384,29 +1373,29 @@
 }
 
 /**
- * nand_transfer_oob - [Internal] Transfer oob to client buffer
- * @chip:	nand chip structure
- * @oob:	oob destination address
- * @ops:	oob ops structure
- * @len:	size of oob to transfer
+ * nand_transfer_oob - [INTERN] Transfer oob to client buffer
+ * @chip: nand chip structure
+ * @oob: oob destination address
+ * @ops: oob ops structure
+ * @len: size of oob to transfer
  */
 static uint8_t *nand_transfer_oob(struct nand_chip *chip, uint8_t *oob,
 				  struct mtd_oob_ops *ops, size_t len)
 {
 	switch (ops->mode) {
 
-	case MTD_OOB_PLACE:
-	case MTD_OOB_RAW:
+	case MTD_OPS_PLACE_OOB:
+	case MTD_OPS_RAW:
 		memcpy(oob, chip->oob_poi + ops->ooboffs, len);
 		return oob + len;
 
-	case MTD_OOB_AUTO: {
+	case MTD_OPS_AUTO_OOB: {
 		struct nand_oobfree *free = chip->ecc.layout->oobfree;
 		uint32_t boffs = 0, roffs = ops->ooboffs;
 		size_t bytes = 0;
 
 		for (; free->length && len; free++, len -= bytes) {
-			/* Read request not from offset 0 ? */
+			/* Read request not from offset 0? */
 			if (unlikely(roffs)) {
 				if (roffs >= free->length) {
 					roffs -= free->length;
@@ -1432,11 +1421,10 @@
 }
 
 /**
- * nand_do_read_ops - [Internal] Read data with ECC
- *
- * @mtd:	MTD device structure
- * @from:	offset to read from
- * @ops:	oob ops structure
+ * nand_do_read_ops - [INTERN] Read data with ECC
+ * @mtd: MTD device structure
+ * @from: offset to read from
+ * @ops: oob ops structure
  *
  * Internal function. Called with chip held.
  */
@@ -1451,7 +1439,7 @@
 	int ret = 0;
 	uint32_t readlen = ops->len;
 	uint32_t oobreadlen = ops->ooblen;
-	uint32_t max_oobsize = ops->mode == MTD_OOB_AUTO ?
+	uint32_t max_oobsize = ops->mode == MTD_OPS_AUTO_OOB ?
 		mtd->oobavail : mtd->oobsize;
 
 	uint8_t *bufpoi, *oob, *buf;
@@ -1473,7 +1461,7 @@
 		bytes = min(mtd->writesize - col, readlen);
 		aligned = (bytes == mtd->writesize);
 
-		/* Is the current page in the buffer ? */
+		/* Is the current page in the buffer? */
 		if (realpage != chip->pagebuf || oob) {
 			bufpoi = aligned ? buf : chip->buffers->databuf;
 
@@ -1483,7 +1471,7 @@
 			}
 
 			/* Now read the page into the buffer */
-			if (unlikely(ops->mode == MTD_OOB_RAW))
+			if (unlikely(ops->mode == MTD_OPS_RAW))
 				ret = chip->ecc.read_page_raw(mtd, chip,
 							      bufpoi, page);
 			else if (!aligned && NAND_SUBPAGE_READ(chip) && !oob)
@@ -1492,14 +1480,22 @@
 			else
 				ret = chip->ecc.read_page(mtd, chip, bufpoi,
 							  page);
-			if (ret < 0)
+			if (ret < 0) {
+				if (!aligned)
+					/* Invalidate page cache */
+					chip->pagebuf = -1;
 				break;
+			}
 
 			/* Transfer not aligned data */
 			if (!aligned) {
 				if (!NAND_SUBPAGE_READ(chip) && !oob &&
-				    !(mtd->ecc_stats.failed - stats.failed))
+				    !(mtd->ecc_stats.failed - stats.failed) &&
+				    (ops->mode != MTD_OPS_RAW))
 					chip->pagebuf = realpage;
+				else
+					/* Invalidate page cache */
+					chip->pagebuf = -1;
 				memcpy(buf, chip->buffers->databuf + col, bytes);
 			}
 
@@ -1539,7 +1535,7 @@
 		if (!readlen)
 			break;
 
-		/* For subsequent reads align to page boundary. */
+		/* For subsequent reads align to page boundary */
 		col = 0;
 		/* Increment page address */
 		realpage++;
@@ -1552,8 +1548,9 @@
 			chip->select_chip(mtd, chipnr);
 		}
 
-		/* Check, if the chip supports auto page increment
-		 * or if we have hit a block boundary.
+		/*
+		 * Check, if the chip supports auto page increment or if we
+		 * have hit a block boundary.
 		 */
 		if (!NAND_CANAUTOINCR(chip) || !(page & blkcheck))
 			sndcmd = 1;
@@ -1574,18 +1571,19 @@
 
 /**
  * nand_read - [MTD Interface] MTD compatibility function for nand_do_read_ecc
- * @mtd:	MTD device structure
- * @from:	offset to read from
- * @len:	number of bytes to read
- * @retlen:	pointer to variable to store the number of read bytes
- * @buf:	the databuffer to put data
+ * @mtd: MTD device structure
+ * @from: offset to read from
+ * @len: number of bytes to read
+ * @retlen: pointer to variable to store the number of read bytes
+ * @buf: the databuffer to put data
  *
- * Get hold of the chip and call nand_do_read
+ * Get hold of the chip and call nand_do_read.
  */
 static int nand_read(struct mtd_info *mtd, loff_t from, size_t len,
 		     size_t *retlen, uint8_t *buf)
 {
 	struct nand_chip *chip = mtd->priv;
+	struct mtd_oob_ops ops;
 	int ret;
 
 	/* Do not allow reads past end of device */
@@ -1596,13 +1594,14 @@
 
 	nand_get_device(chip, mtd, FL_READING);
 
-	chip->ops.len = len;
-	chip->ops.datbuf = buf;
-	chip->ops.oobbuf = NULL;
+	ops.len = len;
+	ops.datbuf = buf;
+	ops.oobbuf = NULL;
+	ops.mode = 0;
 
-	ret = nand_do_read_ops(mtd, from, &chip->ops);
+	ret = nand_do_read_ops(mtd, from, &ops);
 
-	*retlen = chip->ops.retlen;
+	*retlen = ops.retlen;
 
 	nand_release_device(mtd);
 
@@ -1610,11 +1609,11 @@
 }
 
 /**
- * nand_read_oob_std - [REPLACABLE] the most common OOB data read function
- * @mtd:	mtd info structure
- * @chip:	nand chip info structure
- * @page:	page number to read
- * @sndcmd:	flag whether to issue read command or not
+ * nand_read_oob_std - [REPLACEABLE] the most common OOB data read function
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @page: page number to read
+ * @sndcmd: flag whether to issue read command or not
  */
 static int nand_read_oob_std(struct mtd_info *mtd, struct nand_chip *chip,
 			     int page, int sndcmd)
@@ -1628,12 +1627,12 @@
 }
 
 /**
- * nand_read_oob_syndrome - [REPLACABLE] OOB data read function for HW ECC
+ * nand_read_oob_syndrome - [REPLACEABLE] OOB data read function for HW ECC
  *			    with syndromes
- * @mtd:	mtd info structure
- * @chip:	nand chip info structure
- * @page:	page number to read
- * @sndcmd:	flag whether to issue read command or not
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @page: page number to read
+ * @sndcmd: flag whether to issue read command or not
  */
 static int nand_read_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
 				  int page, int sndcmd)
@@ -1667,10 +1666,10 @@
 }
 
 /**
- * nand_write_oob_std - [REPLACABLE] the most common OOB data write function
- * @mtd:	mtd info structure
- * @chip:	nand chip info structure
- * @page:	page number to write
+ * nand_write_oob_std - [REPLACEABLE] the most common OOB data write function
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @page: page number to write
  */
 static int nand_write_oob_std(struct mtd_info *mtd, struct nand_chip *chip,
 			      int page)
@@ -1690,11 +1689,11 @@
 }
 
 /**
- * nand_write_oob_syndrome - [REPLACABLE] OOB data write function for HW ECC
- *			     with syndrome - only for large page flash !
- * @mtd:	mtd info structure
- * @chip:	nand chip info structure
- * @page:	page number to write
+ * nand_write_oob_syndrome - [REPLACEABLE] OOB data write function for HW ECC
+ *			     with syndrome - only for large page flash
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @page: page number to write
  */
 static int nand_write_oob_syndrome(struct mtd_info *mtd,
 				   struct nand_chip *chip, int page)
@@ -1749,34 +1748,37 @@
 }
 
 /**
- * nand_do_read_oob - [Intern] NAND read out-of-band
- * @mtd:	MTD device structure
- * @from:	offset to read from
- * @ops:	oob operations description structure
+ * nand_do_read_oob - [INTERN] NAND read out-of-band
+ * @mtd: MTD device structure
+ * @from: offset to read from
+ * @ops: oob operations description structure
  *
- * NAND read out-of-band data from the spare area
+ * NAND read out-of-band data from the spare area.
  */
 static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
 			    struct mtd_oob_ops *ops)
 {
 	int page, realpage, chipnr, sndcmd = 1;
 	struct nand_chip *chip = mtd->priv;
+	struct mtd_ecc_stats stats;
 	int blkcheck = (1 << (chip->phys_erase_shift - chip->page_shift)) - 1;
 	int readlen = ops->ooblen;
 	int len;
 	uint8_t *buf = ops->oobbuf;
 
-	DEBUG(MTD_DEBUG_LEVEL3, "%s: from = 0x%08Lx, len = %i\n",
+	pr_debug("%s: from = 0x%08Lx, len = %i\n",
 			__func__, (unsigned long long)from, readlen);
 
-	if (ops->mode == MTD_OOB_AUTO)
+	stats = mtd->ecc_stats;
+
+	if (ops->mode == MTD_OPS_AUTO_OOB)
 		len = chip->ecc.layout->oobavail;
 	else
 		len = mtd->oobsize;
 
 	if (unlikely(ops->ooboffs >= len)) {
-		DEBUG(MTD_DEBUG_LEVEL0, "%s: Attempt to start read "
-					"outside oob\n", __func__);
+		pr_debug("%s: attempt to start read outside oob\n",
+				__func__);
 		return -EINVAL;
 	}
 
@@ -1784,8 +1786,8 @@
 	if (unlikely(from >= mtd->size ||
 		     ops->ooboffs + readlen > ((mtd->size >> chip->page_shift) -
 					(from >> chip->page_shift)) * len)) {
-		DEBUG(MTD_DEBUG_LEVEL0, "%s: Attempt read beyond end "
-					"of device\n", __func__);
+		pr_debug("%s: attempt to read beyond end of device\n",
+				__func__);
 		return -EINVAL;
 	}
 
@@ -1797,7 +1799,10 @@
 	page = realpage & chip->pagemask;
 
 	while (1) {
-		sndcmd = chip->ecc.read_oob(mtd, chip, page, sndcmd);
+		if (ops->mode == MTD_OPS_RAW)
+			sndcmd = chip->ecc.read_oob_raw(mtd, chip, page, sndcmd);
+		else
+			sndcmd = chip->ecc.read_oob(mtd, chip, page, sndcmd);
 
 		len = min(len, readlen);
 		buf = nand_transfer_oob(chip, buf, ops, len);
@@ -1830,24 +1835,29 @@
 			chip->select_chip(mtd, chipnr);
 		}
 
-		/* Check, if the chip supports auto page increment
-		 * or if we have hit a block boundary.
+		/*
+		 * Check, if the chip supports auto page increment or if we
+		 * have hit a block boundary.
 		 */
 		if (!NAND_CANAUTOINCR(chip) || !(page & blkcheck))
 			sndcmd = 1;
 	}
 
 	ops->oobretlen = ops->ooblen;
-	return 0;
+
+	if (mtd->ecc_stats.failed - stats.failed)
+		return -EBADMSG;
+
+	return  mtd->ecc_stats.corrected - stats.corrected ? -EUCLEAN : 0;
 }
 
 /**
  * nand_read_oob - [MTD Interface] NAND read data and/or out-of-band
- * @mtd:	MTD device structure
- * @from:	offset to read from
- * @ops:	oob operation description structure
+ * @mtd: MTD device structure
+ * @from: offset to read from
+ * @ops: oob operation description structure
  *
- * NAND read data and/or out-of-band data
+ * NAND read data and/or out-of-band data.
  */
 static int nand_read_oob(struct mtd_info *mtd, loff_t from,
 			 struct mtd_oob_ops *ops)
@@ -1859,17 +1869,17 @@
 
 	/* Do not allow reads past end of device */
 	if (ops->datbuf && (from + ops->len) > mtd->size) {
-		DEBUG(MTD_DEBUG_LEVEL0, "%s: Attempt read "
-				"beyond end of device\n", __func__);
+		pr_debug("%s: attempt to read beyond end of device\n",
+				__func__);
 		return -EINVAL;
 	}
 
 	nand_get_device(chip, mtd, FL_READING);
 
 	switch (ops->mode) {
-	case MTD_OOB_PLACE:
-	case MTD_OOB_AUTO:
-	case MTD_OOB_RAW:
+	case MTD_OPS_PLACE_OOB:
+	case MTD_OPS_AUTO_OOB:
+	case MTD_OPS_RAW:
 		break;
 
 	default:
@@ -1888,12 +1898,12 @@
 
 
 /**
- * nand_write_page_raw - [Intern] raw page write function
- * @mtd:	mtd info structure
- * @chip:	nand chip info structure
- * @buf:	data buffer
+ * nand_write_page_raw - [INTERN] raw page write function
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: data buffer
  *
- * Not for syndrome calculating ecc controllers, which use a special oob layout
+ * Not for syndrome calculating ECC controllers, which use a special oob layout.
  */
 static void nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
 				const uint8_t *buf)
@@ -1903,10 +1913,10 @@
 }
 
 /**
- * nand_write_page_raw_syndrome - [Intern] raw page write function
- * @mtd:	mtd info structure
- * @chip:	nand chip info structure
- * @buf:	data buffer
+ * nand_write_page_raw_syndrome - [INTERN] raw page write function
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: data buffer
  *
  * We need a special oob layout and handling even when ECC isn't checked.
  */
@@ -1942,10 +1952,10 @@
 		chip->write_buf(mtd, oob, size);
 }
 /**
- * nand_write_page_swecc - [REPLACABLE] software ecc based page write function
- * @mtd:	mtd info structure
- * @chip:	nand chip info structure
- * @buf:	data buffer
+ * nand_write_page_swecc - [REPLACEABLE] software ECC based page write function
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: data buffer
  */
 static void nand_write_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
 				  const uint8_t *buf)
@@ -1957,7 +1967,7 @@
 	const uint8_t *p = buf;
 	uint32_t *eccpos = chip->ecc.layout->eccpos;
 
-	/* Software ecc calculation */
+	/* Software ECC calculation */
 	for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
 		chip->ecc.calculate(mtd, p, &ecc_calc[i]);
 
@@ -1968,10 +1978,10 @@
 }
 
 /**
- * nand_write_page_hwecc - [REPLACABLE] hardware ecc based page write function
- * @mtd:	mtd info structure
- * @chip:	nand chip info structure
- * @buf:	data buffer
+ * nand_write_page_hwecc - [REPLACEABLE] hardware ECC based page write function
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: data buffer
  */
 static void nand_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
 				  const uint8_t *buf)
@@ -1996,13 +2006,13 @@
 }
 
 /**
- * nand_write_page_syndrome - [REPLACABLE] hardware ecc syndrom based page write
- * @mtd:	mtd info structure
- * @chip:	nand chip info structure
- * @buf:	data buffer
+ * nand_write_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page write
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: data buffer
  *
- * The hw generator calculates the error syndrome automatically. Therefor
- * we need a special oob layout and handling.
+ * The hw generator calculates the error syndrome automatically. Therefore we
+ * need a special oob layout and handling.
  */
 static void nand_write_page_syndrome(struct mtd_info *mtd,
 				    struct nand_chip *chip, const uint8_t *buf)
@@ -2041,12 +2051,12 @@
 
 /**
  * nand_write_page - [REPLACEABLE] write one page
- * @mtd:	MTD device structure
- * @chip:	NAND chip descriptor
- * @buf:	the data to write
- * @page:	page number to write
- * @cached:	cached programming
- * @raw:	use _raw version of write_page
+ * @mtd: MTD device structure
+ * @chip: NAND chip descriptor
+ * @buf: the data to write
+ * @page: page number to write
+ * @cached: cached programming
+ * @raw: use _raw version of write_page
  */
 static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
 			   const uint8_t *buf, int page, int cached, int raw)
@@ -2061,8 +2071,8 @@
 		chip->ecc.write_page(mtd, chip, buf);
 
 	/*
-	 * Cached progamming disabled for now, Not sure if its worth the
-	 * trouble. The speed gain is not very impressive. (2.3->2.6Mib/s)
+	 * Cached progamming disabled for now. Not sure if it's worth the
+	 * trouble. The speed gain is not very impressive. (2.3->2.6Mib/s).
 	 */
 	cached = 0;
 
@@ -2072,7 +2082,7 @@
 		status = chip->waitfunc(mtd, chip);
 		/*
 		 * See if operation failed and additional status checks are
-		 * available
+		 * available.
 		 */
 		if ((status & NAND_STATUS_FAIL) && (chip->errstat))
 			status = chip->errstat(mtd, chip, FL_WRITING, status,
@@ -2096,29 +2106,37 @@
 }
 
 /**
- * nand_fill_oob - [Internal] Transfer client buffer to oob
- * @chip:	nand chip structure
- * @oob:	oob data buffer
- * @len:	oob data write length
- * @ops:	oob ops structure
+ * nand_fill_oob - [INTERN] Transfer client buffer to oob
+ * @mtd: MTD device structure
+ * @oob: oob data buffer
+ * @len: oob data write length
+ * @ops: oob ops structure
  */
-static uint8_t *nand_fill_oob(struct nand_chip *chip, uint8_t *oob, size_t len,
-						struct mtd_oob_ops *ops)
+static uint8_t *nand_fill_oob(struct mtd_info *mtd, uint8_t *oob, size_t len,
+			      struct mtd_oob_ops *ops)
 {
+	struct nand_chip *chip = mtd->priv;
+
+	/*
+	 * Initialise to all 0xFF, to avoid the possibility of left over OOB
+	 * data from a previous OOB read.
+	 */
+	memset(chip->oob_poi, 0xff, mtd->oobsize);
+
 	switch (ops->mode) {
 
-	case MTD_OOB_PLACE:
-	case MTD_OOB_RAW:
+	case MTD_OPS_PLACE_OOB:
+	case MTD_OPS_RAW:
 		memcpy(chip->oob_poi + ops->ooboffs, oob, len);
 		return oob + len;
 
-	case MTD_OOB_AUTO: {
+	case MTD_OPS_AUTO_OOB: {
 		struct nand_oobfree *free = chip->ecc.layout->oobfree;
 		uint32_t boffs = 0, woffs = ops->ooboffs;
 		size_t bytes = 0;
 
 		for (; free->length && len; free++, len -= bytes) {
-			/* Write request not from offset 0 ? */
+			/* Write request not from offset 0? */
 			if (unlikely(woffs)) {
 				if (woffs >= free->length) {
 					woffs -= free->length;
@@ -2146,12 +2164,12 @@
 #define NOTALIGNED(x)	((x & (chip->subpagesize - 1)) != 0)
 
 /**
- * nand_do_write_ops - [Internal] NAND write with ECC
- * @mtd:	MTD device structure
- * @to:		offset to write to
- * @ops:	oob operations description structure
+ * nand_do_write_ops - [INTERN] NAND write with ECC
+ * @mtd: MTD device structure
+ * @to: offset to write to
+ * @ops: oob operations description structure
  *
- * NAND write with ECC
+ * NAND write with ECC.
  */
 static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
 			     struct mtd_oob_ops *ops)
@@ -2161,7 +2179,7 @@
 	uint32_t writelen = ops->len;
 
 	uint32_t oobwritelen = ops->ooblen;
-	uint32_t oobmaxlen = ops->mode == MTD_OOB_AUTO ?
+	uint32_t oobmaxlen = ops->mode == MTD_OPS_AUTO_OOB ?
 				mtd->oobavail : mtd->oobsize;
 
 	uint8_t *oob = ops->oobbuf;
@@ -2172,10 +2190,10 @@
 	if (!writelen)
 		return 0;
 
-	/* reject writes, which are not page aligned */
+	/* Reject writes, which are not page aligned */
 	if (NOTALIGNED(to) || NOTALIGNED(ops->len)) {
-		printk(KERN_NOTICE "%s: Attempt to write not "
-				"page aligned data\n", __func__);
+		pr_notice("%s: attempt to write non page aligned data\n",
+			   __func__);
 		return -EINVAL;
 	}
 
@@ -2201,10 +2219,6 @@
 	    (chip->pagebuf << chip->page_shift) < (to + ops->len))
 		chip->pagebuf = -1;
 
-	/* If we're not given explicit OOB data, let it be 0xFF */
-	if (likely(!oob))
-		memset(chip->oob_poi, 0xff, mtd->oobsize);
-
 	/* Don't allow multipage oob writes with offset */
 	if (oob && ops->ooboffs && (ops->ooboffs + ops->ooblen > oobmaxlen))
 		return -EINVAL;
@@ -2214,7 +2228,7 @@
 		int cached = writelen > bytes && page != blockmask;
 		uint8_t *wbuf = buf;
 
-		/* Partial page write ? */
+		/* Partial page write? */
 		if (unlikely(column || writelen < (mtd->writesize - 1))) {
 			cached = 0;
 			bytes = min_t(int, bytes - column, (int) writelen);
@@ -2226,12 +2240,15 @@
 
 		if (unlikely(oob)) {
 			size_t len = min(oobwritelen, oobmaxlen);
-			oob = nand_fill_oob(chip, oob, len, ops);
+			oob = nand_fill_oob(mtd, oob, len, ops);
 			oobwritelen -= len;
+		} else {
+			/* We still need to erase leftover OOB data */
+			memset(chip->oob_poi, 0xff, mtd->oobsize);
 		}
 
 		ret = chip->write_page(mtd, chip, wbuf, page, cached,
-				       (ops->mode == MTD_OOB_RAW));
+				       (ops->mode == MTD_OPS_RAW));
 		if (ret)
 			break;
 
@@ -2260,11 +2277,11 @@
 
 /**
  * panic_nand_write - [MTD Interface] NAND write with ECC
- * @mtd:	MTD device structure
- * @to:		offset to write to
- * @len:	number of bytes to write
- * @retlen:	pointer to variable to store the number of written bytes
- * @buf:	the data to write
+ * @mtd: MTD device structure
+ * @to: offset to write to
+ * @len: number of bytes to write
+ * @retlen: pointer to variable to store the number of written bytes
+ * @buf: the data to write
  *
  * NAND write with ECC. Used when performing writes in interrupt context, this
  * may for example be called by mtdoops when writing an oops while in panic.
@@ -2273,6 +2290,7 @@
 			    size_t *retlen, const uint8_t *buf)
 {
 	struct nand_chip *chip = mtd->priv;
+	struct mtd_oob_ops ops;
 	int ret;
 
 	/* Do not allow reads past end of device */
@@ -2281,36 +2299,38 @@
 	if (!len)
 		return 0;
 
-	/* Wait for the device to get ready.  */
+	/* Wait for the device to get ready */
 	panic_nand_wait(mtd, chip, 400);
 
-	/* Grab the device.  */
+	/* Grab the device */
 	panic_nand_get_device(chip, mtd, FL_WRITING);
 
-	chip->ops.len = len;
-	chip->ops.datbuf = (uint8_t *)buf;
-	chip->ops.oobbuf = NULL;
+	ops.len = len;
+	ops.datbuf = (uint8_t *)buf;
+	ops.oobbuf = NULL;
+	ops.mode = 0;
 
-	ret = nand_do_write_ops(mtd, to, &chip->ops);
+	ret = nand_do_write_ops(mtd, to, &ops);
 
-	*retlen = chip->ops.retlen;
+	*retlen = ops.retlen;
 	return ret;
 }
 
 /**
  * nand_write - [MTD Interface] NAND write with ECC
- * @mtd:	MTD device structure
- * @to:		offset to write to
- * @len:	number of bytes to write
- * @retlen:	pointer to variable to store the number of written bytes
- * @buf:	the data to write
+ * @mtd: MTD device structure
+ * @to: offset to write to
+ * @len: number of bytes to write
+ * @retlen: pointer to variable to store the number of written bytes
+ * @buf: the data to write
  *
- * NAND write with ECC
+ * NAND write with ECC.
  */
 static int nand_write(struct mtd_info *mtd, loff_t to, size_t len,
 			  size_t *retlen, const uint8_t *buf)
 {
 	struct nand_chip *chip = mtd->priv;
+	struct mtd_oob_ops ops;
 	int ret;
 
 	/* Do not allow reads past end of device */
@@ -2321,13 +2341,14 @@
 
 	nand_get_device(chip, mtd, FL_WRITING);
 
-	chip->ops.len = len;
-	chip->ops.datbuf = (uint8_t *)buf;
-	chip->ops.oobbuf = NULL;
+	ops.len = len;
+	ops.datbuf = (uint8_t *)buf;
+	ops.oobbuf = NULL;
+	ops.mode = 0;
 
-	ret = nand_do_write_ops(mtd, to, &chip->ops);
+	ret = nand_do_write_ops(mtd, to, &ops);
 
-	*retlen = chip->ops.retlen;
+	*retlen = ops.retlen;
 
 	nand_release_device(mtd);
 
@@ -2336,11 +2357,11 @@
 
 /**
  * nand_do_write_oob - [MTD Interface] NAND write out-of-band
- * @mtd:	MTD device structure
- * @to:		offset to write to
- * @ops:	oob operation description structure
+ * @mtd: MTD device structure
+ * @to: offset to write to
+ * @ops: oob operation description structure
  *
- * NAND write out-of-band
+ * NAND write out-of-band.
  */
 static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
 			     struct mtd_oob_ops *ops)
@@ -2348,24 +2369,24 @@
 	int chipnr, page, status, len;
 	struct nand_chip *chip = mtd->priv;
 
-	DEBUG(MTD_DEBUG_LEVEL3, "%s: to = 0x%08x, len = %i\n",
+	pr_debug("%s: to = 0x%08x, len = %i\n",
 			 __func__, (unsigned int)to, (int)ops->ooblen);
 
-	if (ops->mode == MTD_OOB_AUTO)
+	if (ops->mode == MTD_OPS_AUTO_OOB)
 		len = chip->ecc.layout->oobavail;
 	else
 		len = mtd->oobsize;
 
 	/* Do not allow write past end of page */
 	if ((ops->ooboffs + ops->ooblen) > len) {
-		DEBUG(MTD_DEBUG_LEVEL0, "%s: Attempt to write "
-				"past end of page\n", __func__);
+		pr_debug("%s: attempt to write past end of page\n",
+				__func__);
 		return -EINVAL;
 	}
 
 	if (unlikely(ops->ooboffs >= len)) {
-		DEBUG(MTD_DEBUG_LEVEL0, "%s: Attempt to start "
-				"write outside oob\n", __func__);
+		pr_debug("%s: attempt to start write outside oob\n",
+				__func__);
 		return -EINVAL;
 	}
 
@@ -2374,8 +2395,8 @@
 		     ops->ooboffs + ops->ooblen >
 			((mtd->size >> chip->page_shift) -
 			 (to >> chip->page_shift)) * len)) {
-		DEBUG(MTD_DEBUG_LEVEL0, "%s: Attempt write beyond "
-				"end of device\n", __func__);
+		pr_debug("%s: attempt to write beyond end of device\n",
+				__func__);
 		return -EINVAL;
 	}
 
@@ -2401,10 +2422,12 @@
 	if (page == chip->pagebuf)
 		chip->pagebuf = -1;
 
-	memset(chip->oob_poi, 0xff, mtd->oobsize);
-	nand_fill_oob(chip, ops->oobbuf, ops->ooblen, ops);
-	status = chip->ecc.write_oob(mtd, chip, page & chip->pagemask);
-	memset(chip->oob_poi, 0xff, mtd->oobsize);
+	nand_fill_oob(mtd, ops->oobbuf, ops->ooblen, ops);
+
+	if (ops->mode == MTD_OPS_RAW)
+		status = chip->ecc.write_oob_raw(mtd, chip, page & chip->pagemask);
+	else
+		status = chip->ecc.write_oob(mtd, chip, page & chip->pagemask);
 
 	if (status)
 		return status;
@@ -2416,9 +2439,9 @@
 
 /**
  * nand_write_oob - [MTD Interface] NAND write data and/or out-of-band
- * @mtd:	MTD device structure
- * @to:		offset to write to
- * @ops:	oob operation description structure
+ * @mtd: MTD device structure
+ * @to: offset to write to
+ * @ops: oob operation description structure
  */
 static int nand_write_oob(struct mtd_info *mtd, loff_t to,
 			  struct mtd_oob_ops *ops)
@@ -2430,17 +2453,17 @@
 
 	/* Do not allow writes past end of device */
 	if (ops->datbuf && (to + ops->len) > mtd->size) {
-		DEBUG(MTD_DEBUG_LEVEL0, "%s: Attempt write beyond "
-				"end of device\n", __func__);
+		pr_debug("%s: attempt to write beyond end of device\n",
+				__func__);
 		return -EINVAL;
 	}
 
 	nand_get_device(chip, mtd, FL_WRITING);
 
 	switch (ops->mode) {
-	case MTD_OOB_PLACE:
-	case MTD_OOB_AUTO:
-	case MTD_OOB_RAW:
+	case MTD_OPS_PLACE_OOB:
+	case MTD_OPS_AUTO_OOB:
+	case MTD_OPS_RAW:
 		break;
 
 	default:
@@ -2458,11 +2481,11 @@
 }
 
 /**
- * single_erease_cmd - [GENERIC] NAND standard block erase command function
- * @mtd:	MTD device structure
- * @page:	the page address of the block which will be erased
+ * single_erase_cmd - [GENERIC] NAND standard block erase command function
+ * @mtd: MTD device structure
+ * @page: the page address of the block which will be erased
  *
- * Standard erase command for NAND chips
+ * Standard erase command for NAND chips.
  */
 static void single_erase_cmd(struct mtd_info *mtd, int page)
 {
@@ -2473,12 +2496,11 @@
 }
 
 /**
- * multi_erease_cmd - [GENERIC] AND specific block erase command function
- * @mtd:	MTD device structure
- * @page:	the page address of the block which will be erased
+ * multi_erase_cmd - [GENERIC] AND specific block erase command function
+ * @mtd: MTD device structure
+ * @page: the page address of the block which will be erased
  *
- * AND multi block erase command function
- * Erase 4 consecutive blocks
+ * AND multi block erase command function. Erase 4 consecutive blocks.
  */
 static void multi_erase_cmd(struct mtd_info *mtd, int page)
 {
@@ -2493,10 +2515,10 @@
 
 /**
  * nand_erase - [MTD Interface] erase block(s)
- * @mtd:	MTD device structure
- * @instr:	erase instruction
+ * @mtd: MTD device structure
+ * @instr: erase instruction
  *
- * Erase one ore more blocks
+ * Erase one ore more blocks.
  */
 static int nand_erase(struct mtd_info *mtd, struct erase_info *instr)
 {
@@ -2505,12 +2527,12 @@
 
 #define BBT_PAGE_MASK	0xffffff3f
 /**
- * nand_erase_nand - [Internal] erase block(s)
- * @mtd:	MTD device structure
- * @instr:	erase instruction
- * @allowbbt:	allow erasing the bbt area
+ * nand_erase_nand - [INTERN] erase block(s)
+ * @mtd: MTD device structure
+ * @instr: erase instruction
+ * @allowbbt: allow erasing the bbt area
  *
- * Erase one ore more blocks
+ * Erase one ore more blocks.
  */
 int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
 		    int allowbbt)
@@ -2521,9 +2543,9 @@
 	unsigned int bbt_masked_page = 0xffffffff;
 	loff_t len;
 
-	DEBUG(MTD_DEBUG_LEVEL3, "%s: start = 0x%012llx, len = %llu\n",
-				__func__, (unsigned long long)instr->addr,
-				(unsigned long long)instr->len);
+	pr_debug("%s: start = 0x%012llx, len = %llu\n",
+			__func__, (unsigned long long)instr->addr,
+			(unsigned long long)instr->len);
 
 	if (check_offs_len(mtd, instr->addr, instr->len))
 		return -EINVAL;
@@ -2545,8 +2567,8 @@
 
 	/* Check, if it is write protected */
 	if (nand_check_wp(mtd)) {
-		DEBUG(MTD_DEBUG_LEVEL0, "%s: Device is write protected!!!\n",
-					__func__);
+		pr_debug("%s: device is write protected!\n",
+				__func__);
 		instr->state = MTD_ERASE_FAILED;
 		goto erase_exit;
 	}
@@ -2555,7 +2577,7 @@
 	 * If BBT requires refresh, set the BBT page mask to see if the BBT
 	 * should be rewritten. Otherwise the mask is set to 0xffffffff which
 	 * can not be matched. This is also done when the bbt is actually
-	 * erased to avoid recusrsive updates
+	 * erased to avoid recursive updates.
 	 */
 	if (chip->options & BBT_AUTO_REFRESH && !allowbbt)
 		bbt_masked_page = chip->bbt_td->pages[chipnr] & BBT_PAGE_MASK;
@@ -2566,20 +2588,18 @@
 	instr->state = MTD_ERASING;
 
 	while (len) {
-		/*
-		 * heck if we have a bad block, we do not erase bad blocks !
-		 */
+		/* Heck if we have a bad block, we do not erase bad blocks! */
 		if (nand_block_checkbad(mtd, ((loff_t) page) <<
 					chip->page_shift, 0, allowbbt)) {
-			printk(KERN_WARNING "%s: attempt to erase a bad block "
-					"at page 0x%08x\n", __func__, page);
+			pr_warn("%s: attempt to erase a bad block at page 0x%08x\n",
+				    __func__, page);
 			instr->state = MTD_ERASE_FAILED;
 			goto erase_exit;
 		}
 
 		/*
 		 * Invalidate the page cache, if we erase the block which
-		 * contains the current cached page
+		 * contains the current cached page.
 		 */
 		if (page <= chip->pagebuf && chip->pagebuf <
 		    (page + pages_per_block))
@@ -2599,8 +2619,8 @@
 
 		/* See if block erase succeeded */
 		if (status & NAND_STATUS_FAIL) {
-			DEBUG(MTD_DEBUG_LEVEL0, "%s: Failed erase, "
-					"page 0x%08x\n", __func__, page);
+			pr_debug("%s: failed erase, page 0x%08x\n",
+					__func__, page);
 			instr->state = MTD_ERASE_FAILED;
 			instr->fail_addr =
 				((loff_t)page << chip->page_shift);
@@ -2609,7 +2629,7 @@
 
 		/*
 		 * If BBT requires refresh, set the BBT rewrite flag to the
-		 * page being erased
+		 * page being erased.
 		 */
 		if (bbt_masked_page != 0xffffffff &&
 		    (page & BBT_PAGE_MASK) == bbt_masked_page)
@@ -2628,7 +2648,7 @@
 
 			/*
 			 * If BBT requires refresh and BBT-PERCHIP, set the BBT
-			 * page mask to see if this BBT should be rewritten
+			 * page mask to see if this BBT should be rewritten.
 			 */
 			if (bbt_masked_page != 0xffffffff &&
 			    (chip->bbt_td->options & NAND_BBT_PERCHIP))
@@ -2651,7 +2671,7 @@
 
 	/*
 	 * If BBT requires refresh and erase was successful, rewrite any
-	 * selected bad block tables
+	 * selected bad block tables.
 	 */
 	if (bbt_masked_page == 0xffffffff || ret)
 		return ret;
@@ -2659,10 +2679,10 @@
 	for (chipnr = 0; chipnr < chip->numchips; chipnr++) {
 		if (!rewrite_bbt[chipnr])
 			continue;
-		/* update the BBT for chip */
-		DEBUG(MTD_DEBUG_LEVEL0, "%s: nand_update_bbt "
-			"(%d:0x%0llx 0x%0x)\n", __func__, chipnr,
-			rewrite_bbt[chipnr], chip->bbt_td->pages[chipnr]);
+		/* Update the BBT for chip */
+		pr_debug("%s: nand_update_bbt (%d:0x%0llx 0x%0x)\n",
+				__func__, chipnr, rewrite_bbt[chipnr],
+				chip->bbt_td->pages[chipnr]);
 		nand_update_bbt(mtd, rewrite_bbt[chipnr]);
 	}
 
@@ -2672,15 +2692,15 @@
 
 /**
  * nand_sync - [MTD Interface] sync
- * @mtd:	MTD device structure
+ * @mtd: MTD device structure
  *
- * Sync is actually a wait for chip ready function
+ * Sync is actually a wait for chip ready function.
  */
 static void nand_sync(struct mtd_info *mtd)
 {
 	struct nand_chip *chip = mtd->priv;
 
-	DEBUG(MTD_DEBUG_LEVEL3, "%s: called\n", __func__);
+	pr_debug("%s: called\n", __func__);
 
 	/* Grab the lock and see if the device is available */
 	nand_get_device(chip, mtd, FL_SYNCING);
@@ -2690,8 +2710,8 @@
 
 /**
  * nand_block_isbad - [MTD Interface] Check if block at offset is bad
- * @mtd:	MTD device structure
- * @offs:	offset relative to mtd start
+ * @mtd: MTD device structure
+ * @offs: offset relative to mtd start
  */
 static int nand_block_isbad(struct mtd_info *mtd, loff_t offs)
 {
@@ -2704,8 +2724,8 @@
 
 /**
  * nand_block_markbad - [MTD Interface] Mark block at the given offset as bad
- * @mtd:	MTD device structure
- * @ofs:	offset relative to mtd start
+ * @mtd: MTD device structure
+ * @ofs: offset relative to mtd start
  */
 static int nand_block_markbad(struct mtd_info *mtd, loff_t ofs)
 {
@@ -2714,7 +2734,7 @@
 
 	ret = nand_block_isbad(mtd, ofs);
 	if (ret) {
-		/* If it was bad already, return success and do nothing. */
+		/* If it was bad already, return success and do nothing */
 		if (ret > 0)
 			return 0;
 		return ret;
@@ -2725,7 +2745,7 @@
 
 /**
  * nand_suspend - [MTD Interface] Suspend the NAND flash
- * @mtd:	MTD device structure
+ * @mtd: MTD device structure
  */
 static int nand_suspend(struct mtd_info *mtd)
 {
@@ -2736,7 +2756,7 @@
 
 /**
  * nand_resume - [MTD Interface] Resume the NAND flash
- * @mtd:	MTD device structure
+ * @mtd: MTD device structure
  */
 static void nand_resume(struct mtd_info *mtd)
 {
@@ -2745,13 +2765,11 @@
 	if (chip->state == FL_PM_SUSPENDED)
 		nand_release_device(mtd);
 	else
-		printk(KERN_ERR "%s called for a chip which is not "
-		       "in suspended state\n", __func__);
+		pr_err("%s called for a chip which is not in suspended state\n",
+			__func__);
 }
 
-/*
- * Set default functions
- */
+/* Set default functions */
 static void nand_set_defaults(struct nand_chip *chip, int busw)
 {
 	/* check for proper chip_delay setup, set 20us if not */
@@ -2793,23 +2811,21 @@
 
 }
 
-/*
- * sanitize ONFI strings so we can safely print them
- */
+/* Sanitize ONFI strings so we can safely print them */
 static void sanitize_string(uint8_t *s, size_t len)
 {
 	ssize_t i;
 
-	/* null terminate */
+	/* Null terminate */
 	s[len - 1] = 0;
 
-	/* remove non printable chars */
+	/* Remove non printable chars */
 	for (i = 0; i < len - 1; i++) {
 		if (s[i] < ' ' || s[i] > 127)
 			s[i] = '?';
 	}
 
-	/* remove trailing spaces */
+	/* Remove trailing spaces */
 	strim(s);
 }
 
@@ -2826,28 +2842,28 @@
 }
 
 /*
- * Check if the NAND chip is ONFI compliant, returns 1 if it is, 0 otherwise
+ * Check if the NAND chip is ONFI compliant, returns 1 if it is, 0 otherwise.
  */
 static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip,
-					int busw)
+					int *busw)
 {
 	struct nand_onfi_params *p = &chip->onfi_params;
 	int i;
 	int val;
 
-	/* try ONFI for unknow chip or LP */
+	/* Try ONFI for unknown chip or LP */
 	chip->cmdfunc(mtd, NAND_CMD_READID, 0x20, -1);
 	if (chip->read_byte(mtd) != 'O' || chip->read_byte(mtd) != 'N' ||
 		chip->read_byte(mtd) != 'F' || chip->read_byte(mtd) != 'I')
 		return 0;
 
-	printk(KERN_INFO "ONFI flash detected\n");
+	pr_info("ONFI flash detected\n");
 	chip->cmdfunc(mtd, NAND_CMD_PARAM, 0, -1);
 	for (i = 0; i < 3; i++) {
 		chip->read_buf(mtd, (uint8_t *)p, sizeof(*p));
 		if (onfi_crc16(ONFI_CRC_BASE, (uint8_t *)p, 254) ==
 				le16_to_cpu(p->crc)) {
-			printk(KERN_INFO "ONFI param page %d valid\n", i);
+			pr_info("ONFI param page %d valid\n", i);
 			break;
 		}
 	}
@@ -2855,7 +2871,7 @@
 	if (i == 3)
 		return 0;
 
-	/* check version */
+	/* Check version */
 	val = le16_to_cpu(p->revision);
 	if (val & (1 << 5))
 		chip->onfi_version = 23;
@@ -2871,8 +2887,7 @@
 		chip->onfi_version = 0;
 
 	if (!chip->onfi_version) {
-		printk(KERN_INFO "%s: unsupported ONFI version: %d\n",
-								__func__, val);
+		pr_info("%s: unsupported ONFI version: %d\n", __func__, val);
 		return 0;
 	}
 
@@ -2884,9 +2899,9 @@
 	mtd->erasesize = le32_to_cpu(p->pages_per_block) * mtd->writesize;
 	mtd->oobsize = le16_to_cpu(p->spare_bytes_per_page);
 	chip->chipsize = (uint64_t)le32_to_cpu(p->blocks_per_lun) * mtd->erasesize;
-	busw = 0;
+	*busw = 0;
 	if (le16_to_cpu(p->features) & 1)
-		busw = NAND_BUSWIDTH_16;
+		*busw = NAND_BUSWIDTH_16;
 
 	chip->options &= ~NAND_CHIPOPTIONS_MSK;
 	chip->options |= (NAND_NO_READRDY |
@@ -2896,7 +2911,7 @@
 }
 
 /*
- * Get the flash and manufacturer id and lookup if the type is supported
+ * Get the flash and manufacturer id and lookup if the type is supported.
  */
 static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
 						  struct nand_chip *chip,
@@ -2913,7 +2928,7 @@
 
 	/*
 	 * Reset the chip, required by some chips (e.g. Micron MT29FxGxxxxx)
-	 * after power-up
+	 * after power-up.
 	 */
 	chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
 
@@ -2924,7 +2939,8 @@
 	*maf_id = chip->read_byte(mtd);
 	*dev_id = chip->read_byte(mtd);
 
-	/* Try again to make sure, as some systems the bus-hold or other
+	/*
+	 * Try again to make sure, as some systems the bus-hold or other
 	 * interface concerns can cause random data which looks like a
 	 * possibly credible NAND flash to appear. If the two results do
 	 * not match, ignore the device completely.
@@ -2936,9 +2952,9 @@
 		id_data[i] = chip->read_byte(mtd);
 
 	if (id_data[0] != *maf_id || id_data[1] != *dev_id) {
-		printk(KERN_INFO "%s: second ID read did not match "
-		       "%02x,%02x against %02x,%02x\n", __func__,
-		       *maf_id, *dev_id, id_data[0], id_data[1]);
+		pr_info("%s: second ID read did not match "
+			"%02x,%02x against %02x,%02x\n", __func__,
+			*maf_id, *dev_id, id_data[0], id_data[1]);
 		return ERR_PTR(-ENODEV);
 	}
 
@@ -2952,7 +2968,7 @@
 	chip->onfi_version = 0;
 	if (!type->name || !type->pagesize) {
 		/* Check is chip is ONFI compliant */
-		ret = nand_flash_detect_onfi(mtd, chip, busw);
+		ret = nand_flash_detect_onfi(mtd, chip, &busw);
 		if (ret)
 			goto ident_done;
 	}
@@ -2973,7 +2989,7 @@
 	chip->chipsize = (uint64_t)type->chipsize << 20;
 
 	if (!type->pagesize && chip->init_size) {
-		/* set the pagesize, oobsize, erasesize by the driver*/
+		/* Set the pagesize, oobsize, erasesize by the driver */
 		busw = chip->init_size(mtd, chip, id_data);
 	} else if (!type->pagesize) {
 		int extid;
@@ -3033,7 +3049,7 @@
 		}
 	} else {
 		/*
-		 * Old devices have chip data hardcoded in the device id table
+		 * Old devices have chip data hardcoded in the device id table.
 		 */
 		mtd->erasesize = type->erasesize;
 		mtd->writesize = type->pagesize;
@@ -3043,7 +3059,7 @@
 		/*
 		 * Check for Spansion/AMD ID + repeating 5th, 6th byte since
 		 * some Spansion chips have erasesize that conflicts with size
-		 * listed in nand_ids table
+		 * listed in nand_ids table.
 		 * Data sheet (5 byte ID): Spansion S30ML-P ORNAND (p.39)
 		 */
 		if (*maf_id == NAND_MFR_AMD && id_data[4] != 0x00 &&
@@ -3057,15 +3073,16 @@
 	chip->options &= ~NAND_CHIPOPTIONS_MSK;
 	chip->options |= type->options & NAND_CHIPOPTIONS_MSK;
 
-	/* Check if chip is a not a samsung device. Do not clear the
-	 * options for chips which are not having an extended id.
+	/*
+	 * Check if chip is not a Samsung device. Do not clear the
+	 * options for chips which do not have an extended id.
 	 */
 	if (*maf_id != NAND_MFR_SAMSUNG && !type->pagesize)
 		chip->options &= ~NAND_SAMSUNG_LP_OPTIONS;
 ident_done:
 
 	/*
-	 * Set chip as a default. Board drivers can override it, if necessary
+	 * Set chip as a default. Board drivers can override it, if necessary.
 	 */
 	chip->options |= NAND_NO_AUTOINCR;
 
@@ -3077,21 +3094,21 @@
 
 	/*
 	 * Check, if buswidth is correct. Hardware drivers should set
-	 * chip correct !
+	 * chip correct!
 	 */
 	if (busw != (chip->options & NAND_BUSWIDTH_16)) {
-		printk(KERN_INFO "NAND device: Manufacturer ID:"
-		       " 0x%02x, Chip ID: 0x%02x (%s %s)\n", *maf_id,
-		       *dev_id, nand_manuf_ids[maf_idx].name, mtd->name);
-		printk(KERN_WARNING "NAND bus width %d instead %d bit\n",
-		       (chip->options & NAND_BUSWIDTH_16) ? 16 : 8,
-		       busw ? 16 : 8);
+		pr_info("NAND device: Manufacturer ID:"
+			" 0x%02x, Chip ID: 0x%02x (%s %s)\n", *maf_id,
+			*dev_id, nand_manuf_ids[maf_idx].name, mtd->name);
+		pr_warn("NAND bus width %d instead %d bit\n",
+			   (chip->options & NAND_BUSWIDTH_16) ? 16 : 8,
+			   busw ? 16 : 8);
 		return ERR_PTR(-EINVAL);
 	}
 
 	/* Calculate the address shift from the page size */
 	chip->page_shift = ffs(mtd->writesize) - 1;
-	/* Convert chipsize to number of pages per chip -1. */
+	/* Convert chipsize to number of pages per chip -1 */
 	chip->pagemask = (chip->chipsize >> chip->page_shift) - 1;
 
 	chip->bbt_erase_shift = chip->phys_erase_shift =
@@ -3121,7 +3138,7 @@
 	if ((chip->cellinfo & NAND_CI_CELLTYPE_MSK) &&
 			(*maf_id == NAND_MFR_SAMSUNG ||
 			 *maf_id == NAND_MFR_HYNIX))
-		chip->options |= NAND_BBT_SCANLASTPAGE;
+		chip->bbt_options |= NAND_BBT_SCANLASTPAGE;
 	else if ((!(chip->cellinfo & NAND_CI_CELLTYPE_MSK) &&
 				(*maf_id == NAND_MFR_SAMSUNG ||
 				 *maf_id == NAND_MFR_HYNIX ||
@@ -3129,17 +3146,7 @@
 				 *maf_id == NAND_MFR_AMD)) ||
 			(mtd->writesize == 2048 &&
 			 *maf_id == NAND_MFR_MICRON))
-		chip->options |= NAND_BBT_SCAN2NDPAGE;
-
-	/*
-	 * Numonyx/ST 2K pages, x8 bus use BOTH byte 1 and 6
-	 */
-	if (!(busw & NAND_BUSWIDTH_16) &&
-			*maf_id == NAND_MFR_STMICRO &&
-			mtd->writesize == 2048) {
-		chip->options |= NAND_BBT_SCANBYTE1AND6;
-		chip->badblockpos = 0;
-	}
+		chip->bbt_options |= NAND_BBT_SCAN2NDPAGE;
 
 	/* Check for AND chips with 4 page planes */
 	if (chip->options & NAND_4PAGE_ARRAY)
@@ -3147,12 +3154,11 @@
 	else
 		chip->erase_cmd = single_erase_cmd;
 
-	/* Do not replace user supplied command function ! */
+	/* Do not replace user supplied command function! */
 	if (mtd->writesize > 512 && chip->cmdfunc == nand_command)
 		chip->cmdfunc = nand_command_lp;
 
-	/* TODO onfi flash name */
-	printk(KERN_INFO "NAND device: Manufacturer ID:"
+	pr_info("NAND device: Manufacturer ID:"
 		" 0x%02x, Chip ID: 0x%02x (%s %s)\n", *maf_id, *dev_id,
 		nand_manuf_ids[maf_idx].name,
 		chip->onfi_version ? chip->onfi_params.model : type->name);
@@ -3162,12 +3168,12 @@
 
 /**
  * nand_scan_ident - [NAND Interface] Scan for the NAND device
- * @mtd:	     MTD device structure
- * @maxchips:	     Number of chips to scan for
- * @table:	     Alternative NAND ID table
+ * @mtd: MTD device structure
+ * @maxchips: number of chips to scan for
+ * @table: alternative NAND ID table
  *
- * This is the first phase of the normal nand_scan() function. It
- * reads the flash ID and sets up MTD fields accordingly.
+ * This is the first phase of the normal nand_scan() function. It reads the
+ * flash ID and sets up MTD fields accordingly.
  *
  * The mtd->owner field must be set to the module of the caller.
  */
@@ -3189,7 +3195,7 @@
 
 	if (IS_ERR(type)) {
 		if (!(chip->options & NAND_SCAN_SILENT_NODEV))
-			printk(KERN_WARNING "No NAND device found.\n");
+			pr_warn("No NAND device found\n");
 		chip->select_chip(mtd, -1);
 		return PTR_ERR(type);
 	}
@@ -3207,7 +3213,7 @@
 			break;
 	}
 	if (i > 1)
-		printk(KERN_INFO "%d NAND chips detected\n", i);
+		pr_info("%d NAND chips detected\n", i);
 
 	/* Store the number of chips and calc total size for mtd */
 	chip->numchips = i;
@@ -3220,11 +3226,11 @@
 
 /**
  * nand_scan_tail - [NAND Interface] Scan for the NAND device
- * @mtd:	    MTD device structure
+ * @mtd: MTD device structure
  *
- * This is the second phase of the normal nand_scan() function. It
- * fills out all the uninitialized function pointers with the defaults
- * and scans for a bad block table if appropriate.
+ * This is the second phase of the normal nand_scan() function. It fills out
+ * all the uninitialized function pointers with the defaults and scans for a
+ * bad block table if appropriate.
  */
 int nand_scan_tail(struct mtd_info *mtd)
 {
@@ -3240,7 +3246,7 @@
 	chip->oob_poi = chip->buffers->databuf + mtd->writesize;
 
 	/*
-	 * If no default placement scheme is given, select an appropriate one
+	 * If no default placement scheme is given, select an appropriate one.
 	 */
 	if (!chip->ecc.layout && (chip->ecc.mode != NAND_ECC_SOFT_BCH)) {
 		switch (mtd->oobsize) {
@@ -3257,8 +3263,8 @@
 			chip->ecc.layout = &nand_oob_128;
 			break;
 		default:
-			printk(KERN_WARNING "No oob scheme defined for "
-			       "oobsize %d\n", mtd->oobsize);
+			pr_warn("No oob scheme defined for oobsize %d\n",
+				   mtd->oobsize);
 			BUG();
 		}
 	}
@@ -3267,7 +3273,7 @@
 		chip->write_page = nand_write_page;
 
 	/*
-	 * check ECC mode, default to software if 3byte/512byte hardware ECC is
+	 * Check ECC mode, default to software if 3byte/512byte hardware ECC is
 	 * selected and we have 256 byte pagesize fallback to software ECC
 	 */
 
@@ -3276,15 +3282,15 @@
 		/* Similar to NAND_ECC_HW, but a separate read_page handle */
 		if (!chip->ecc.calculate || !chip->ecc.correct ||
 		     !chip->ecc.hwctl) {
-			printk(KERN_WARNING "No ECC functions supplied; "
-			       "Hardware ECC not possible\n");
+			pr_warn("No ECC functions supplied; "
+				   "hardware ECC not possible\n");
 			BUG();
 		}
 		if (!chip->ecc.read_page)
 			chip->ecc.read_page = nand_read_page_hwecc_oob_first;
 
 	case NAND_ECC_HW:
-		/* Use standard hwecc read page function ? */
+		/* Use standard hwecc read page function? */
 		if (!chip->ecc.read_page)
 			chip->ecc.read_page = nand_read_page_hwecc;
 		if (!chip->ecc.write_page)
@@ -3305,11 +3311,11 @@
 		     chip->ecc.read_page == nand_read_page_hwecc ||
 		     !chip->ecc.write_page ||
 		     chip->ecc.write_page == nand_write_page_hwecc)) {
-			printk(KERN_WARNING "No ECC functions supplied; "
-			       "Hardware ECC not possible\n");
+			pr_warn("No ECC functions supplied; "
+				   "hardware ECC not possible\n");
 			BUG();
 		}
-		/* Use standard syndrome read/write page function ? */
+		/* Use standard syndrome read/write page function? */
 		if (!chip->ecc.read_page)
 			chip->ecc.read_page = nand_read_page_syndrome;
 		if (!chip->ecc.write_page)
@@ -3325,9 +3331,9 @@
 
 		if (mtd->writesize >= chip->ecc.size)
 			break;
-		printk(KERN_WARNING "%d byte HW ECC not possible on "
-		       "%d byte page size, fallback to SW ECC\n",
-		       chip->ecc.size, mtd->writesize);
+		pr_warn("%d byte HW ECC not possible on "
+			   "%d byte page size, fallback to SW ECC\n",
+			   chip->ecc.size, mtd->writesize);
 		chip->ecc.mode = NAND_ECC_SOFT;
 
 	case NAND_ECC_SOFT:
@@ -3347,7 +3353,7 @@
 
 	case NAND_ECC_SOFT_BCH:
 		if (!mtd_nand_has_bch()) {
-			printk(KERN_WARNING "CONFIG_MTD_ECC_BCH not enabled\n");
+			pr_warn("CONFIG_MTD_ECC_BCH not enabled\n");
 			BUG();
 		}
 		chip->ecc.calculate = nand_bch_calculate_ecc;
@@ -3362,8 +3368,8 @@
 		/*
 		 * Board driver should supply ecc.size and ecc.bytes values to
 		 * select how many bits are correctable; see nand_bch_init()
-		 * for details.
-		 * Otherwise, default to 4 bits for large page devices
+		 * for details. Otherwise, default to 4 bits for large page
+		 * devices.
 		 */
 		if (!chip->ecc.size && (mtd->oobsize >= 64)) {
 			chip->ecc.size = 512;
@@ -3374,14 +3380,14 @@
 					       chip->ecc.bytes,
 					       &chip->ecc.layout);
 		if (!chip->ecc.priv) {
-			printk(KERN_WARNING "BCH ECC initialization failed!\n");
+			pr_warn("BCH ECC initialization failed!\n");
 			BUG();
 		}
 		break;
 
 	case NAND_ECC_NONE:
-		printk(KERN_WARNING "NAND_ECC_NONE selected by board driver. "
-		       "This is not recommended !!\n");
+		pr_warn("NAND_ECC_NONE selected by board driver. "
+			   "This is not recommended!\n");
 		chip->ecc.read_page = nand_read_page_raw;
 		chip->ecc.write_page = nand_write_page_raw;
 		chip->ecc.read_oob = nand_read_oob_std;
@@ -3393,14 +3399,19 @@
 		break;
 
 	default:
-		printk(KERN_WARNING "Invalid NAND_ECC_MODE %d\n",
-		       chip->ecc.mode);
+		pr_warn("Invalid NAND_ECC_MODE %d\n", chip->ecc.mode);
 		BUG();
 	}
 
+	/* For many systems, the standard OOB write also works for raw */
+	if (!chip->ecc.read_oob_raw)
+		chip->ecc.read_oob_raw = chip->ecc.read_oob;
+	if (!chip->ecc.write_oob_raw)
+		chip->ecc.write_oob_raw = chip->ecc.write_oob;
+
 	/*
 	 * The number of bytes available for a client to place data into
-	 * the out of band area
+	 * the out of band area.
 	 */
 	chip->ecc.layout->oobavail = 0;
 	for (i = 0; chip->ecc.layout->oobfree[i].length
@@ -3411,19 +3422,16 @@
 
 	/*
 	 * Set the number of read / write steps for one page depending on ECC
-	 * mode
+	 * mode.
 	 */
 	chip->ecc.steps = mtd->writesize / chip->ecc.size;
 	if (chip->ecc.steps * chip->ecc.size != mtd->writesize) {
-		printk(KERN_WARNING "Invalid ecc parameters\n");
+		pr_warn("Invalid ECC parameters\n");
 		BUG();
 	}
 	chip->ecc.total = chip->ecc.steps * chip->ecc.bytes;
 
-	/*
-	 * Allow subpage writes up to ecc.steps. Not possible for MLC
-	 * FLASH.
-	 */
+	/* Allow subpage writes up to ecc.steps. Not possible for MLC flash */
 	if (!(chip->options & NAND_NO_SUBPAGE_WRITE) &&
 	    !(chip->cellinfo & NAND_CI_CELLTYPE_MSK)) {
 		switch (chip->ecc.steps) {
@@ -3481,9 +3489,11 @@
 }
 EXPORT_SYMBOL(nand_scan_tail);
 
-/* is_module_text_address() isn't exported, and it's mostly a pointless
+/*
+ * is_module_text_address() isn't exported, and it's mostly a pointless
  * test if this is a module _anyway_ -- they'd have to try _really_ hard
- * to call us from in-kernel code if the core NAND support is modular. */
+ * to call us from in-kernel code if the core NAND support is modular.
+ */
 #ifdef MODULE
 #define caller_is_module() (1)
 #else
@@ -3493,15 +3503,13 @@
 
 /**
  * nand_scan - [NAND Interface] Scan for the NAND device
- * @mtd:	MTD device structure
- * @maxchips:	Number of chips to scan for
+ * @mtd: MTD device structure
+ * @maxchips: number of chips to scan for
  *
- * This fills out all the uninitialized function pointers
- * with the defaults.
- * The flash ID is read and the mtd/chip structures are
- * filled with the appropriate values.
- * The mtd->owner field must be set to the module of the caller
- *
+ * This fills out all the uninitialized function pointers with the defaults.
+ * The flash ID is read and the mtd/chip structures are filled with the
+ * appropriate values. The mtd->owner field must be set to the module of the
+ * caller.
  */
 int nand_scan(struct mtd_info *mtd, int maxchips)
 {
@@ -3509,8 +3517,7 @@
 
 	/* Many callers got this wrong, so check for it for a while... */
 	if (!mtd->owner && caller_is_module()) {
-		printk(KERN_CRIT "%s called with NULL mtd->owner!\n",
-				__func__);
+		pr_crit("%s called with NULL mtd->owner!\n", __func__);
 		BUG();
 	}
 
@@ -3523,8 +3530,8 @@
 
 /**
  * nand_release - [NAND Interface] Free resources held by the NAND device
- * @mtd:	MTD device structure
-*/
+ * @mtd: MTD device structure
+ */
 void nand_release(struct mtd_info *mtd)
 {
 	struct nand_chip *chip = mtd->priv;
diff --git a/drivers/mtd/nand/nand_bbt.c b/drivers/mtd/nand/nand_bbt.c
index 4165857..69148ae 100644
--- a/drivers/mtd/nand/nand_bbt.c
+++ b/drivers/mtd/nand/nand_bbt.c
@@ -14,7 +14,7 @@
  *
  * When nand_scan_bbt is called, then it tries to find the bad block table
  * depending on the options in the BBT descriptor(s). If no flash based BBT
- * (NAND_USE_FLASH_BBT) is specified then the device is scanned for factory
+ * (NAND_BBT_USE_FLASH) is specified then the device is scanned for factory
  * marked good / bad blocks. This information is used to create a memory BBT.
  * Once a new bad block is discovered then the "factory" information is updated
  * on the device.
@@ -36,9 +36,9 @@
  * The table is marked in the OOB area with an ident pattern and a version
  * number which indicates which of both tables is more up to date. If the NAND
  * controller needs the complete OOB area for the ECC information then the
- * option NAND_USE_FLASH_BBT_NO_OOB should be used: it moves the ident pattern
- * and the version byte into the data area and the OOB area will remain
- * untouched.
+ * option NAND_BBT_NO_OOB should be used (along with NAND_BBT_USE_FLASH, of
+ * course): it moves the ident pattern and the version byte into the data area
+ * and the OOB area will remain untouched.
  *
  * The table uses 2 bits per block
  * 11b:		block is good
@@ -81,17 +81,15 @@
 
 /**
  * check_pattern - [GENERIC] check if a pattern is in the buffer
- * @buf:	the buffer to search
- * @len:	the length of buffer to search
- * @paglen:	the pagelength
- * @td:		search pattern descriptor
+ * @buf: the buffer to search
+ * @len: the length of buffer to search
+ * @paglen: the pagelength
+ * @td: search pattern descriptor
  *
- * Check for a pattern at the given place. Used to search bad block
- * tables and good / bad block identifiers.
- * If the SCAN_EMPTY option is set then check, if all bytes except the
- * pattern area contain 0xff
- *
-*/
+ * Check for a pattern at the given place. Used to search bad block tables and
+ * good / bad block identifiers. If the SCAN_EMPTY option is set then check, if
+ * all bytes except the pattern area contain 0xff.
+ */
 static int check_pattern(uint8_t *buf, int len, int paglen, struct nand_bbt_descr *td)
 {
 	int i, end = 0;
@@ -110,32 +108,8 @@
 	p += end;
 
 	/* Compare the pattern */
-	for (i = 0; i < td->len; i++) {
-		if (p[i] != td->pattern[i])
-			return -1;
-	}
-
-	/* Check both positions 1 and 6 for pattern? */
-	if (td->options & NAND_BBT_SCANBYTE1AND6) {
-		if (td->options & NAND_BBT_SCANEMPTY) {
-			p += td->len;
-			end += NAND_SMALL_BADBLOCK_POS - td->offs;
-			/* Check region between positions 1 and 6 */
-			for (i = 0; i < NAND_SMALL_BADBLOCK_POS - td->offs - td->len;
-					i++) {
-				if (*p++ != 0xff)
-					return -1;
-			}
-		}
-		else {
-			p += NAND_SMALL_BADBLOCK_POS - td->offs;
-		}
-		/* Compare the pattern */
-		for (i = 0; i < td->len; i++) {
-			if (p[i] != td->pattern[i])
-				return -1;
-		}
-	}
+	if (memcmp(p, td->pattern, td->len))
+		return -1;
 
 	if (td->options & NAND_BBT_SCANEMPTY) {
 		p += td->len;
@@ -150,14 +124,13 @@
 
 /**
  * check_short_pattern - [GENERIC] check if a pattern is in the buffer
- * @buf:	the buffer to search
- * @td:		search pattern descriptor
+ * @buf: the buffer to search
+ * @td:	search pattern descriptor
  *
- * Check for a pattern at the given place. Used to search bad block
- * tables and good / bad block identifiers. Same as check_pattern, but
- * no optional empty check
- *
-*/
+ * Check for a pattern at the given place. Used to search bad block tables and
+ * good / bad block identifiers. Same as check_pattern, but no optional empty
+ * check.
+ */
 static int check_short_pattern(uint8_t *buf, struct nand_bbt_descr *td)
 {
 	int i;
@@ -168,21 +141,14 @@
 		if (p[td->offs + i] != td->pattern[i])
 			return -1;
 	}
-	/* Need to check location 1 AND 6? */
-	if (td->options & NAND_BBT_SCANBYTE1AND6) {
-		for (i = 0; i < td->len; i++) {
-			if (p[NAND_SMALL_BADBLOCK_POS + i] != td->pattern[i])
-				return -1;
-		}
-	}
 	return 0;
 }
 
 /**
  * add_marker_len - compute the length of the marker in data area
- * @td:		BBT descriptor used for computation
+ * @td: BBT descriptor used for computation
  *
- * The length will be 0 if the markeris located in OOB area.
+ * The length will be 0 if the marker is located in OOB area.
  */
 static u32 add_marker_len(struct nand_bbt_descr *td)
 {
@@ -199,34 +165,33 @@
 
 /**
  * read_bbt - [GENERIC] Read the bad block table starting from page
- * @mtd:	MTD device structure
- * @buf:	temporary buffer
- * @page:	the starting page
- * @num:	the number of bbt descriptors to read
- * @td:		the bbt describtion table
- * @offs:	offset in the memory table
+ * @mtd: MTD device structure
+ * @buf: temporary buffer
+ * @page: the starting page
+ * @num: the number of bbt descriptors to read
+ * @td: the bbt describtion table
+ * @offs: offset in the memory table
  *
  * Read the bad block table starting from page.
- *
  */
 static int read_bbt(struct mtd_info *mtd, uint8_t *buf, int page, int num,
 		struct nand_bbt_descr *td, int offs)
 {
-	int res, i, j, act = 0;
+	int res, ret = 0, i, j, act = 0;
 	struct nand_chip *this = mtd->priv;
 	size_t retlen, len, totlen;
 	loff_t from;
 	int bits = td->options & NAND_BBT_NRBITS_MSK;
-	uint8_t msk = (uint8_t) ((1 << bits) - 1);
+	uint8_t msk = (uint8_t)((1 << bits) - 1);
 	u32 marker_len;
 	int reserved_block_code = td->reserved_block_code;
 
 	totlen = (num * bits) >> 3;
 	marker_len = add_marker_len(td);
-	from = ((loff_t) page) << this->page_shift;
+	from = ((loff_t)page) << this->page_shift;
 
 	while (totlen) {
-		len = min(totlen, (size_t) (1 << this->bbt_erase_shift));
+		len = min(totlen, (size_t)(1 << this->bbt_erase_shift));
 		if (marker_len) {
 			/*
 			 * In case the BBT marker is not in the OOB area it
@@ -238,11 +203,18 @@
 		}
 		res = mtd->read(mtd, from, len, &retlen, buf);
 		if (res < 0) {
-			if (retlen != len) {
-				printk(KERN_INFO "nand_bbt: Error reading bad block table\n");
+			if (mtd_is_eccerr(res)) {
+				pr_info("nand_bbt: ECC error in BBT at "
+					"0x%012llx\n", from & ~mtd->writesize);
+				return res;
+			} else if (mtd_is_bitflip(res)) {
+				pr_info("nand_bbt: corrected error in BBT at "
+					"0x%012llx\n", from & ~mtd->writesize);
+				ret = res;
+			} else {
+				pr_info("nand_bbt: error reading BBT\n");
 				return res;
 			}
-			printk(KERN_WARNING "nand_bbt: ECC error while reading bad block table\n");
 		}
 
 		/* Analyse data */
@@ -253,17 +225,19 @@
 				if (tmp == msk)
 					continue;
 				if (reserved_block_code && (tmp == reserved_block_code)) {
-					printk(KERN_DEBUG "nand_read_bbt: Reserved block at 0x%012llx\n",
-					       (loff_t)((offs << 2) + (act >> 1)) << this->bbt_erase_shift);
+					pr_info("nand_read_bbt: reserved block at 0x%012llx\n",
+						 (loff_t)((offs << 2) + (act >> 1)) << this->bbt_erase_shift);
 					this->bbt[offs + (act >> 3)] |= 0x2 << (act & 0x06);
 					mtd->ecc_stats.bbtblocks++;
 					continue;
 				}
-				/* Leave it for now, if its matured we can move this
-				 * message to MTD_DEBUG_LEVEL0 */
-				printk(KERN_DEBUG "nand_read_bbt: Bad block at 0x%012llx\n",
-				       (loff_t)((offs << 2) + (act >> 1)) << this->bbt_erase_shift);
-				/* Factory marked bad or worn out ? */
+				/*
+				 * Leave it for now, if it's matured we can
+				 * move this message to pr_debug.
+				 */
+				pr_info("nand_read_bbt: bad block at 0x%012llx\n",
+					 (loff_t)((offs << 2) + (act >> 1)) << this->bbt_erase_shift);
+				/* Factory marked bad or worn out? */
 				if (tmp == 0)
 					this->bbt[offs + (act >> 3)] |= 0x3 << (act & 0x06);
 				else
@@ -274,20 +248,20 @@
 		totlen -= len;
 		from += len;
 	}
-	return 0;
+	return ret;
 }
 
 /**
  * read_abs_bbt - [GENERIC] Read the bad block table starting at a given page
- * @mtd:	MTD device structure
- * @buf:	temporary buffer
- * @td:		descriptor for the bad block table
- * @chip:	read the table for a specific chip, -1 read all chips.
- *		Applies only if NAND_BBT_PERCHIP option is set
+ * @mtd: MTD device structure
+ * @buf: temporary buffer
+ * @td: descriptor for the bad block table
+ * @chip: read the table for a specific chip, -1 read all chips; applies only if
+ *        NAND_BBT_PERCHIP option is set
  *
- * Read the bad block table for all chips starting at a given page
- * We assume that the bbt bits are in consecutive order.
-*/
+ * Read the bad block table for all chips starting at a given page. We assume
+ * that the bbt bits are in consecutive order.
+ */
 static int read_abs_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *td, int chip)
 {
 	struct nand_chip *this = mtd->priv;
@@ -313,9 +287,7 @@
 	return 0;
 }
 
-/*
- * BBT marker is in the first page, no OOB.
- */
+/* BBT marker is in the first page, no OOB */
 static int scan_read_raw_data(struct mtd_info *mtd, uint8_t *buf, loff_t offs,
 			 struct nand_bbt_descr *td)
 {
@@ -329,35 +301,26 @@
 	return mtd->read(mtd, offs, len, &retlen, buf);
 }
 
-/*
- * Scan read raw data from flash
- */
+/* Scan read raw data from flash */
 static int scan_read_raw_oob(struct mtd_info *mtd, uint8_t *buf, loff_t offs,
 			 size_t len)
 {
 	struct mtd_oob_ops ops;
 	int res;
 
-	ops.mode = MTD_OOB_RAW;
+	ops.mode = MTD_OPS_RAW;
 	ops.ooboffs = 0;
 	ops.ooblen = mtd->oobsize;
 
-
 	while (len > 0) {
-		if (len <= mtd->writesize) {
-			ops.oobbuf = buf + len;
-			ops.datbuf = buf;
-			ops.len = len;
-			return mtd->read_oob(mtd, offs, &ops);
-		} else {
-			ops.oobbuf = buf + mtd->writesize;
-			ops.datbuf = buf;
-			ops.len = mtd->writesize;
-			res = mtd->read_oob(mtd, offs, &ops);
+		ops.datbuf = buf;
+		ops.len = min(len, (size_t)mtd->writesize);
+		ops.oobbuf = buf + ops.len;
 
-			if (res)
-				return res;
-		}
+		res = mtd->read_oob(mtd, offs, &ops);
+
+		if (res)
+			return res;
 
 		buf += mtd->oobsize + mtd->writesize;
 		len -= mtd->writesize;
@@ -374,15 +337,13 @@
 		return scan_read_raw_oob(mtd, buf, offs, len);
 }
 
-/*
- * Scan write data with oob to flash
- */
+/* Scan write data with oob to flash */
 static int scan_write_bbt(struct mtd_info *mtd, loff_t offs, size_t len,
 			  uint8_t *buf, uint8_t *oob)
 {
 	struct mtd_oob_ops ops;
 
-	ops.mode = MTD_OOB_PLACE;
+	ops.mode = MTD_OPS_PLACE_OOB;
 	ops.ooboffs = 0;
 	ops.ooblen = mtd->oobsize;
 	ops.datbuf = buf;
@@ -403,15 +364,14 @@
 
 /**
  * read_abs_bbts - [GENERIC] Read the bad block table(s) for all chips starting at a given page
- * @mtd:	MTD device structure
- * @buf:	temporary buffer
- * @td:		descriptor for the bad block table
- * @md:		descriptor for the bad block table mirror
+ * @mtd: MTD device structure
+ * @buf: temporary buffer
+ * @td: descriptor for the bad block table
+ * @md:	descriptor for the bad block table mirror
  *
- * Read the bad block table(s) for all chips starting at a given page
- * We assume that the bbt bits are in consecutive order.
- *
-*/
+ * Read the bad block table(s) for all chips starting at a given page. We
+ * assume that the bbt bits are in consecutive order.
+ */
 static int read_abs_bbts(struct mtd_info *mtd, uint8_t *buf,
 			 struct nand_bbt_descr *td, struct nand_bbt_descr *md)
 {
@@ -422,8 +382,8 @@
 		scan_read_raw(mtd, buf, (loff_t)td->pages[0] << this->page_shift,
 			      mtd->writesize, td);
 		td->version[0] = buf[bbt_get_ver_offs(mtd, td)];
-		printk(KERN_DEBUG "Bad block table at page %d, version 0x%02X\n",
-		       td->pages[0], td->version[0]);
+		pr_info("Bad block table at page %d, version 0x%02X\n",
+			 td->pages[0], td->version[0]);
 	}
 
 	/* Read the mirror version, if available */
@@ -431,15 +391,13 @@
 		scan_read_raw(mtd, buf, (loff_t)md->pages[0] << this->page_shift,
 			      mtd->writesize, td);
 		md->version[0] = buf[bbt_get_ver_offs(mtd, md)];
-		printk(KERN_DEBUG "Bad block table at page %d, version 0x%02X\n",
-		       md->pages[0], md->version[0]);
+		pr_info("Bad block table at page %d, version 0x%02X\n",
+			 md->pages[0], md->version[0]);
 	}
 	return 1;
 }
 
-/*
- * Scan a given block full
- */
+/* Scan a given block full */
 static int scan_block_full(struct mtd_info *mtd, struct nand_bbt_descr *bd,
 			   loff_t offs, uint8_t *buf, size_t readlen,
 			   int scanlen, int len)
@@ -447,7 +405,8 @@
 	int ret, j;
 
 	ret = scan_read_raw_oob(mtd, buf, offs, readlen);
-	if (ret)
+	/* Ignore ECC errors when checking for BBM */
+	if (ret && !mtd_is_bitflip_or_eccerr(ret))
 		return ret;
 
 	for (j = 0; j < len; j++, buf += scanlen) {
@@ -457,9 +416,7 @@
 	return 0;
 }
 
-/*
- * Scan a given block partially
- */
+/* Scan a given block partially */
 static int scan_block_fast(struct mtd_info *mtd, struct nand_bbt_descr *bd,
 			   loff_t offs, uint8_t *buf, int len)
 {
@@ -470,16 +427,16 @@
 	ops.oobbuf = buf;
 	ops.ooboffs = 0;
 	ops.datbuf = NULL;
-	ops.mode = MTD_OOB_PLACE;
+	ops.mode = MTD_OPS_PLACE_OOB;
 
 	for (j = 0; j < len; j++) {
 		/*
-		 * Read the full oob until read_oob is fixed to
-		 * handle single byte reads for 16 bit
-		 * buswidth
+		 * Read the full oob until read_oob is fixed to handle single
+		 * byte reads for 16 bit buswidth.
 		 */
 		ret = mtd->read_oob(mtd, offs, &ops);
-		if (ret)
+		/* Ignore ECC errors when checking for BBM */
+		if (ret && !mtd_is_bitflip_or_eccerr(ret))
 			return ret;
 
 		if (check_short_pattern(buf, bd))
@@ -492,14 +449,14 @@
 
 /**
  * create_bbt - [GENERIC] Create a bad block table by scanning the device
- * @mtd:	MTD device structure
- * @buf:	temporary buffer
- * @bd:		descriptor for the good/bad block search pattern
- * @chip:	create the table for a specific chip, -1 read all chips.
- *		Applies only if NAND_BBT_PERCHIP option is set
+ * @mtd: MTD device structure
+ * @buf: temporary buffer
+ * @bd: descriptor for the good/bad block search pattern
+ * @chip: create the table for a specific chip, -1 read all chips; applies only
+ *        if NAND_BBT_PERCHIP option is set
  *
- * Create a bad block table by scanning the device
- * for the given good/bad block identify pattern
+ * Create a bad block table by scanning the device for the given good/bad block
+ * identify pattern.
  */
 static int create_bbt(struct mtd_info *mtd, uint8_t *buf,
 	struct nand_bbt_descr *bd, int chip)
@@ -510,7 +467,7 @@
 	loff_t from;
 	size_t readlen;
 
-	printk(KERN_INFO "Scanning device for bad blocks\n");
+	pr_info("Scanning device for bad blocks\n");
 
 	if (bd->options & NAND_BBT_SCANALLPAGES)
 		len = 1 << (this->bbt_erase_shift - this->page_shift);
@@ -530,14 +487,16 @@
 	}
 
 	if (chip == -1) {
-		/* Note that numblocks is 2 * (real numblocks) here, see i+=2
-		 * below as it makes shifting and masking less painful */
+		/*
+		 * Note that numblocks is 2 * (real numblocks) here, see i+=2
+		 * below as it makes shifting and masking less painful
+		 */
 		numblocks = mtd->size >> (this->bbt_erase_shift - 1);
 		startblock = 0;
 		from = 0;
 	} else {
 		if (chip >= this->numchips) {
-			printk(KERN_WARNING "create_bbt(): chipnr (%d) > available chips (%d)\n",
+			pr_warn("create_bbt(): chipnr (%d) > available chips (%d)\n",
 			       chip + 1, this->numchips);
 			return -EINVAL;
 		}
@@ -547,7 +506,7 @@
 		from = (loff_t)startblock << (this->bbt_erase_shift - 1);
 	}
 
-	if (this->options & NAND_BBT_SCANLASTPAGE)
+	if (this->bbt_options & NAND_BBT_SCANLASTPAGE)
 		from += mtd->erasesize - (mtd->writesize * len);
 
 	for (i = startblock; i < numblocks;) {
@@ -566,8 +525,8 @@
 
 		if (ret) {
 			this->bbt[i >> 3] |= 0x03 << (i & 0x6);
-			printk(KERN_WARNING "Bad eraseblock %d at 0x%012llx\n",
-			       i >> 1, (unsigned long long)from);
+			pr_warn("Bad eraseblock %d at 0x%012llx\n",
+				i >> 1, (unsigned long long)from);
 			mtd->ecc_stats.badblocks++;
 		}
 
@@ -579,20 +538,18 @@
 
 /**
  * search_bbt - [GENERIC] scan the device for a specific bad block table
- * @mtd:	MTD device structure
- * @buf:	temporary buffer
- * @td:		descriptor for the bad block table
+ * @mtd: MTD device structure
+ * @buf: temporary buffer
+ * @td: descriptor for the bad block table
  *
- * Read the bad block table by searching for a given ident pattern.
- * Search is preformed either from the beginning up or from the end of
- * the device downwards. The search starts always at the start of a
- * block.
- * If the option NAND_BBT_PERCHIP is given, each chip is searched
- * for a bbt, which contains the bad block information of this chip.
- * This is necessary to provide support for certain DOC devices.
+ * Read the bad block table by searching for a given ident pattern. Search is
+ * preformed either from the beginning up or from the end of the device
+ * downwards. The search starts always at the start of a block. If the option
+ * NAND_BBT_PERCHIP is given, each chip is searched for a bbt, which contains
+ * the bad block information of this chip. This is necessary to provide support
+ * for certain DOC devices.
  *
- * The bbt ident pattern resides in the oob area of the first page
- * in a block.
+ * The bbt ident pattern resides in the oob area of the first page in a block.
  */
 static int search_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *td)
 {
@@ -603,7 +560,7 @@
 	int bbtblocks;
 	int blocktopage = this->bbt_erase_shift - this->page_shift;
 
-	/* Search direction top -> down ? */
+	/* Search direction top -> down? */
 	if (td->options & NAND_BBT_LASTBLOCK) {
 		startblock = (mtd->size >> this->bbt_erase_shift) - 1;
 		dir = -1;
@@ -612,7 +569,7 @@
 		dir = 1;
 	}
 
-	/* Do we have a bbt per chip ? */
+	/* Do we have a bbt per chip? */
 	if (td->options & NAND_BBT_PERCHIP) {
 		chips = this->numchips;
 		bbtblocks = this->chipsize >> this->bbt_erase_shift;
@@ -651,23 +608,23 @@
 	/* Check, if we found a bbt for each requested chip */
 	for (i = 0; i < chips; i++) {
 		if (td->pages[i] == -1)
-			printk(KERN_WARNING "Bad block table not found for chip %d\n", i);
+			pr_warn("Bad block table not found for chip %d\n", i);
 		else
-			printk(KERN_DEBUG "Bad block table found at page %d, version 0x%02X\n", td->pages[i],
-			       td->version[i]);
+			pr_info("Bad block table found at page %d, version "
+				 "0x%02X\n", td->pages[i], td->version[i]);
 	}
 	return 0;
 }
 
 /**
  * search_read_bbts - [GENERIC] scan the device for bad block table(s)
- * @mtd:	MTD device structure
- * @buf:	temporary buffer
- * @td:		descriptor for the bad block table
- * @md:		descriptor for the bad block table mirror
+ * @mtd: MTD device structure
+ * @buf: temporary buffer
+ * @td: descriptor for the bad block table
+ * @md: descriptor for the bad block table mirror
  *
- * Search and read the bad block table(s)
-*/
+ * Search and read the bad block table(s).
+ */
 static int search_read_bbts(struct mtd_info *mtd, uint8_t * buf, struct nand_bbt_descr *td, struct nand_bbt_descr *md)
 {
 	/* Search the primary table */
@@ -683,16 +640,14 @@
 
 /**
  * write_bbt - [GENERIC] (Re)write the bad block table
+ * @mtd: MTD device structure
+ * @buf: temporary buffer
+ * @td: descriptor for the bad block table
+ * @md: descriptor for the bad block table mirror
+ * @chipsel: selector for a specific chip, -1 for all
  *
- * @mtd:	MTD device structure
- * @buf:	temporary buffer
- * @td:		descriptor for the bad block table
- * @md:		descriptor for the bad block table mirror
- * @chipsel:	selector for a specific chip, -1 for all
- *
- * (Re)write the bad block table
- *
-*/
+ * (Re)write the bad block table.
+ */
 static int write_bbt(struct mtd_info *mtd, uint8_t *buf,
 		     struct nand_bbt_descr *td, struct nand_bbt_descr *md,
 		     int chipsel)
@@ -711,14 +666,14 @@
 	ops.ooblen = mtd->oobsize;
 	ops.ooboffs = 0;
 	ops.datbuf = NULL;
-	ops.mode = MTD_OOB_PLACE;
+	ops.mode = MTD_OPS_PLACE_OOB;
 
 	if (!rcode)
 		rcode = 0xff;
-	/* Write bad block table per chip rather than per device ? */
+	/* Write bad block table per chip rather than per device? */
 	if (td->options & NAND_BBT_PERCHIP) {
 		numblocks = (int)(this->chipsize >> this->bbt_erase_shift);
-		/* Full device write or specific chip ? */
+		/* Full device write or specific chip? */
 		if (chipsel == -1) {
 			nrchips = this->numchips;
 		} else {
@@ -732,8 +687,8 @@
 
 	/* Loop through the chips */
 	for (; chip < nrchips; chip++) {
-
-		/* There was already a version of the table, reuse the page
+		/*
+		 * There was already a version of the table, reuse the page
 		 * This applies for absolute placement too, as we have the
 		 * page nr. in td->pages.
 		 */
@@ -742,8 +697,10 @@
 			goto write;
 		}
 
-		/* Automatic placement of the bad block table */
-		/* Search direction top -> down ? */
+		/*
+		 * Automatic placement of the bad block table. Search direction
+		 * top -> down?
+		 */
 		if (td->options & NAND_BBT_LASTBLOCK) {
 			startblock = numblocks * (chip + 1) - 1;
 			dir = -1;
@@ -767,7 +724,7 @@
 			if (!md || md->pages[chip] != page)
 				goto write;
 		}
-		printk(KERN_ERR "No space left to write bad block table\n");
+		pr_err("No space left to write bad block table\n");
 		return -ENOSPC;
 	write:
 
@@ -792,24 +749,22 @@
 
 		bbtoffs = chip * (numblocks >> 2);
 
-		to = ((loff_t) page) << this->page_shift;
+		to = ((loff_t)page) << this->page_shift;
 
-		/* Must we save the block contents ? */
+		/* Must we save the block contents? */
 		if (td->options & NAND_BBT_SAVECONTENT) {
 			/* Make it block aligned */
-			to &= ~((loff_t) ((1 << this->bbt_erase_shift) - 1));
+			to &= ~((loff_t)((1 << this->bbt_erase_shift) - 1));
 			len = 1 << this->bbt_erase_shift;
 			res = mtd->read(mtd, to, len, &retlen, buf);
 			if (res < 0) {
 				if (retlen != len) {
-					printk(KERN_INFO "nand_bbt: Error "
-					       "reading block for writing "
-					       "the bad block table\n");
+					pr_info("nand_bbt: error reading block "
+						"for writing the bad block table\n");
 					return res;
 				}
-				printk(KERN_WARNING "nand_bbt: ECC error "
-				       "while reading block for writing "
-				       "bad block table\n");
+				pr_warn("nand_bbt: ECC error while reading "
+					"block for writing bad block table\n");
 			}
 			/* Read oob data */
 			ops.ooblen = (len >> this->page_shift) * mtd->oobsize;
@@ -822,19 +777,19 @@
 			pageoffs = page - (int)(to >> this->page_shift);
 			offs = pageoffs << this->page_shift;
 			/* Preset the bbt area with 0xff */
-			memset(&buf[offs], 0xff, (size_t) (numblocks >> sft));
+			memset(&buf[offs], 0xff, (size_t)(numblocks >> sft));
 			ooboffs = len + (pageoffs * mtd->oobsize);
 
 		} else if (td->options & NAND_BBT_NO_OOB) {
 			ooboffs = 0;
 			offs = td->len;
-			/* the version byte */
+			/* The version byte */
 			if (td->options & NAND_BBT_VERSION)
 				offs++;
 			/* Calc length */
-			len = (size_t) (numblocks >> sft);
+			len = (size_t)(numblocks >> sft);
 			len += offs;
-			/* Make it page aligned ! */
+			/* Make it page aligned! */
 			len = ALIGN(len, mtd->writesize);
 			/* Preset the buffer with 0xff */
 			memset(buf, 0xff, len);
@@ -842,8 +797,8 @@
 			memcpy(buf, td->pattern, td->len);
 		} else {
 			/* Calc length */
-			len = (size_t) (numblocks >> sft);
-			/* Make it page aligned ! */
+			len = (size_t)(numblocks >> sft);
+			/* Make it page aligned! */
 			len = ALIGN(len, mtd->writesize);
 			/* Preset the buffer with 0xff */
 			memset(buf, 0xff, len +
@@ -857,13 +812,13 @@
 		if (td->options & NAND_BBT_VERSION)
 			buf[ooboffs + td->veroffs] = td->version[chip];
 
-		/* walk through the memory table */
+		/* Walk through the memory table */
 		for (i = 0; i < numblocks;) {
 			uint8_t dat;
 			dat = this->bbt[bbtoffs + (i >> 2)];
 			for (j = 0; j < 4; j++, i++) {
 				int sftcnt = (i << (3 - sft)) & sftmsk;
-				/* Do not store the reserved bbt blocks ! */
+				/* Do not store the reserved bbt blocks! */
 				buf[offs + (i >> sft)] &=
 					~(msk[dat & 0x03] << sftcnt);
 				dat >>= 2;
@@ -884,8 +839,8 @@
 		if (res < 0)
 			goto outerr;
 
-		printk(KERN_DEBUG "Bad block table written to 0x%012llx, version "
-		       "0x%02X\n", (unsigned long long)to, td->version[chip]);
+		pr_info("Bad block table written to 0x%012llx, version 0x%02X\n",
+			 (unsigned long long)to, td->version[chip]);
 
 		/* Mark it as used */
 		td->pages[chip] = page;
@@ -893,19 +848,18 @@
 	return 0;
 
  outerr:
-	printk(KERN_WARNING
-	       "nand_bbt: Error while writing bad block table %d\n", res);
+	pr_warn("nand_bbt: error while writing bad block table %d\n", res);
 	return res;
 }
 
 /**
  * nand_memory_bbt - [GENERIC] create a memory based bad block table
- * @mtd:	MTD device structure
- * @bd:		descriptor for the good/bad block search pattern
+ * @mtd: MTD device structure
+ * @bd: descriptor for the good/bad block search pattern
  *
- * The function creates a memory based bbt by scanning the device
- * for manufacturer / software marked good / bad blocks
-*/
+ * The function creates a memory based bbt by scanning the device for
+ * manufacturer / software marked good / bad blocks.
+ */
 static inline int nand_memory_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd)
 {
 	struct nand_chip *this = mtd->priv;
@@ -916,25 +870,24 @@
 
 /**
  * check_create - [GENERIC] create and write bbt(s) if necessary
- * @mtd:	MTD device structure
- * @buf:	temporary buffer
- * @bd:		descriptor for the good/bad block search pattern
+ * @mtd: MTD device structure
+ * @buf: temporary buffer
+ * @bd: descriptor for the good/bad block search pattern
  *
- * The function checks the results of the previous call to read_bbt
- * and creates / updates the bbt(s) if necessary
- * Creation is necessary if no bbt was found for the chip/device
- * Update is necessary if one of the tables is missing or the
- * version nr. of one table is less than the other
-*/
+ * The function checks the results of the previous call to read_bbt and creates
+ * / updates the bbt(s) if necessary. Creation is necessary if no bbt was found
+ * for the chip/device. Update is necessary if one of the tables is missing or
+ * the version nr. of one table is less than the other.
+ */
 static int check_create(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *bd)
 {
-	int i, chips, writeops, chipsel, res;
+	int i, chips, writeops, create, chipsel, res, res2;
 	struct nand_chip *this = mtd->priv;
 	struct nand_bbt_descr *td = this->bbt_td;
 	struct nand_bbt_descr *md = this->bbt_md;
 	struct nand_bbt_descr *rd, *rd2;
 
-	/* Do we have a bbt per chip ? */
+	/* Do we have a bbt per chip? */
 	if (td->options & NAND_BBT_PERCHIP)
 		chips = this->numchips;
 	else
@@ -942,86 +895,98 @@
 
 	for (i = 0; i < chips; i++) {
 		writeops = 0;
+		create = 0;
 		rd = NULL;
 		rd2 = NULL;
-		/* Per chip or per device ? */
+		res = res2 = 0;
+		/* Per chip or per device? */
 		chipsel = (td->options & NAND_BBT_PERCHIP) ? i : -1;
-		/* Mirrored table available ? */
+		/* Mirrored table available? */
 		if (md) {
 			if (td->pages[i] == -1 && md->pages[i] == -1) {
+				create = 1;
 				writeops = 0x03;
-				goto create;
-			}
-
-			if (td->pages[i] == -1) {
+			} else if (td->pages[i] == -1) {
 				rd = md;
-				td->version[i] = md->version[i];
-				writeops = 1;
-				goto writecheck;
-			}
-
-			if (md->pages[i] == -1) {
+				writeops = 0x01;
+			} else if (md->pages[i] == -1) {
 				rd = td;
-				md->version[i] = td->version[i];
-				writeops = 2;
-				goto writecheck;
-			}
-
-			if (td->version[i] == md->version[i]) {
+				writeops = 0x02;
+			} else if (td->version[i] == md->version[i]) {
 				rd = td;
 				if (!(td->options & NAND_BBT_VERSION))
 					rd2 = md;
-				goto writecheck;
-			}
-
-			if (((int8_t) (td->version[i] - md->version[i])) > 0) {
+			} else if (((int8_t)(td->version[i] - md->version[i])) > 0) {
 				rd = td;
-				md->version[i] = td->version[i];
-				writeops = 2;
+				writeops = 0x02;
 			} else {
 				rd = md;
-				td->version[i] = md->version[i];
-				writeops = 1;
+				writeops = 0x01;
 			}
-
-			goto writecheck;
-
 		} else {
 			if (td->pages[i] == -1) {
+				create = 1;
 				writeops = 0x01;
-				goto create;
+			} else {
+				rd = td;
 			}
-			rd = td;
-			goto writecheck;
 		}
-	create:
-		/* Create the bad block table by scanning the device ? */
-		if (!(td->options & NAND_BBT_CREATE))
-			continue;
 
-		/* Create the table in memory by scanning the chip(s) */
-		if (!(this->options & NAND_CREATE_EMPTY_BBT))
-			create_bbt(mtd, buf, bd, chipsel);
+		if (create) {
+			/* Create the bad block table by scanning the device? */
+			if (!(td->options & NAND_BBT_CREATE))
+				continue;
 
-		td->version[i] = 1;
-		if (md)
-			md->version[i] = 1;
-	writecheck:
-		/* read back first ? */
-		if (rd)
-			read_abs_bbt(mtd, buf, rd, chipsel);
-		/* If they weren't versioned, read both. */
-		if (rd2)
-			read_abs_bbt(mtd, buf, rd2, chipsel);
+			/* Create the table in memory by scanning the chip(s) */
+			if (!(this->bbt_options & NAND_BBT_CREATE_EMPTY))
+				create_bbt(mtd, buf, bd, chipsel);
 
-		/* Write the bad block table to the device ? */
+			td->version[i] = 1;
+			if (md)
+				md->version[i] = 1;
+		}
+
+		/* Read back first? */
+		if (rd) {
+			res = read_abs_bbt(mtd, buf, rd, chipsel);
+			if (mtd_is_eccerr(res)) {
+				/* Mark table as invalid */
+				rd->pages[i] = -1;
+				rd->version[i] = 0;
+				i--;
+				continue;
+			}
+		}
+		/* If they weren't versioned, read both */
+		if (rd2) {
+			res2 = read_abs_bbt(mtd, buf, rd2, chipsel);
+			if (mtd_is_eccerr(res2)) {
+				/* Mark table as invalid */
+				rd2->pages[i] = -1;
+				rd2->version[i] = 0;
+				i--;
+				continue;
+			}
+		}
+
+		/* Scrub the flash table(s)? */
+		if (mtd_is_bitflip(res) || mtd_is_bitflip(res2))
+			writeops = 0x03;
+
+		/* Update version numbers before writing */
+		if (md) {
+			td->version[i] = max(td->version[i], md->version[i]);
+			md->version[i] = td->version[i];
+		}
+
+		/* Write the bad block table to the device? */
 		if ((writeops & 0x01) && (td->options & NAND_BBT_WRITE)) {
 			res = write_bbt(mtd, buf, td, md, chipsel);
 			if (res < 0)
 				return res;
 		}
 
-		/* Write the mirror bad block table to the device ? */
+		/* Write the mirror bad block table to the device? */
 		if ((writeops & 0x02) && md && (md->options & NAND_BBT_WRITE)) {
 			res = write_bbt(mtd, buf, md, td, chipsel);
 			if (res < 0)
@@ -1033,20 +998,19 @@
 
 /**
  * mark_bbt_regions - [GENERIC] mark the bad block table regions
- * @mtd:	MTD device structure
- * @td:		bad block table descriptor
+ * @mtd: MTD device structure
+ * @td: bad block table descriptor
  *
- * The bad block table regions are marked as "bad" to prevent
- * accidental erasures / writes. The regions are identified by
- * the mark 0x02.
-*/
+ * The bad block table regions are marked as "bad" to prevent accidental
+ * erasures / writes. The regions are identified by the mark 0x02.
+ */
 static void mark_bbt_region(struct mtd_info *mtd, struct nand_bbt_descr *td)
 {
 	struct nand_chip *this = mtd->priv;
 	int i, j, chips, block, nrblocks, update;
 	uint8_t oldval, newval;
 
-	/* Do we have a bbt per chip ? */
+	/* Do we have a bbt per chip? */
 	if (td->options & NAND_BBT_PERCHIP) {
 		chips = this->numchips;
 		nrblocks = (int)(this->chipsize >> this->bbt_erase_shift);
@@ -1083,9 +1047,11 @@
 				update = 1;
 			block += 2;
 		}
-		/* If we want reserved blocks to be recorded to flash, and some
-		   new ones have been marked, then we need to update the stored
-		   bbts.  This should only happen once. */
+		/*
+		 * If we want reserved blocks to be recorded to flash, and some
+		 * new ones have been marked, then we need to update the stored
+		 * bbts.  This should only happen once.
+		 */
 		if (update && td->reserved_block_code)
 			nand_update_bbt(mtd, (loff_t)(block - 2) << (this->bbt_erase_shift - 1));
 	}
@@ -1093,8 +1059,8 @@
 
 /**
  * verify_bbt_descr - verify the bad block description
- * @mtd:	MTD device structure
- * @bd:		the table to verify
+ * @mtd: MTD device structure
+ * @bd: the table to verify
  *
  * This functions performs a few sanity checks on the bad block description
  * table.
@@ -1112,16 +1078,16 @@
 	pattern_len = bd->len;
 	bits = bd->options & NAND_BBT_NRBITS_MSK;
 
-	BUG_ON((this->options & NAND_USE_FLASH_BBT_NO_OOB) &&
-			!(this->options & NAND_USE_FLASH_BBT));
+	BUG_ON((this->bbt_options & NAND_BBT_NO_OOB) &&
+			!(this->bbt_options & NAND_BBT_USE_FLASH));
 	BUG_ON(!bits);
 
 	if (bd->options & NAND_BBT_VERSION)
 		pattern_len++;
 
 	if (bd->options & NAND_BBT_NO_OOB) {
-		BUG_ON(!(this->options & NAND_USE_FLASH_BBT));
-		BUG_ON(!(this->options & NAND_USE_FLASH_BBT_NO_OOB));
+		BUG_ON(!(this->bbt_options & NAND_BBT_USE_FLASH));
+		BUG_ON(!(this->bbt_options & NAND_BBT_NO_OOB));
 		BUG_ON(bd->offs);
 		if (bd->options & NAND_BBT_VERSION)
 			BUG_ON(bd->veroffs != bd->len);
@@ -1141,18 +1107,16 @@
 
 /**
  * nand_scan_bbt - [NAND Interface] scan, find, read and maybe create bad block table(s)
- * @mtd:	MTD device structure
- * @bd:		descriptor for the good/bad block search pattern
+ * @mtd: MTD device structure
+ * @bd: descriptor for the good/bad block search pattern
  *
- * The function checks, if a bad block table(s) is/are already
- * available. If not it scans the device for manufacturer
- * marked good / bad blocks and writes the bad block table(s) to
- * the selected place.
+ * The function checks, if a bad block table(s) is/are already available. If
+ * not it scans the device for manufacturer marked good / bad blocks and writes
+ * the bad block table(s) to the selected place.
  *
- * The bad block table memory is allocated here. It must be freed
- * by calling the nand_free_bbt function.
- *
-*/
+ * The bad block table memory is allocated here. It must be freed by calling
+ * the nand_free_bbt function.
+ */
 int nand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd)
 {
 	struct nand_chip *this = mtd->priv;
@@ -1162,19 +1126,21 @@
 	struct nand_bbt_descr *md = this->bbt_md;
 
 	len = mtd->size >> (this->bbt_erase_shift + 2);
-	/* Allocate memory (2bit per block) and clear the memory bad block table */
+	/*
+	 * Allocate memory (2bit per block) and clear the memory bad block
+	 * table.
+	 */
 	this->bbt = kzalloc(len, GFP_KERNEL);
-	if (!this->bbt) {
-		printk(KERN_ERR "nand_scan_bbt: Out of memory\n");
+	if (!this->bbt)
 		return -ENOMEM;
-	}
 
-	/* If no primary table decriptor is given, scan the device
-	 * to build a memory based bad block table
+	/*
+	 * If no primary table decriptor is given, scan the device to build a
+	 * memory based bad block table.
 	 */
 	if (!td) {
 		if ((res = nand_memory_bbt(mtd, bd))) {
-			printk(KERN_ERR "nand_bbt: Can't scan flash and build the RAM-based BBT\n");
+			pr_err("nand_bbt: can't scan flash and build the RAM-based BBT\n");
 			kfree(this->bbt);
 			this->bbt = NULL;
 		}
@@ -1188,13 +1154,12 @@
 	len += (len >> this->page_shift) * mtd->oobsize;
 	buf = vmalloc(len);
 	if (!buf) {
-		printk(KERN_ERR "nand_bbt: Out of memory\n");
 		kfree(this->bbt);
 		this->bbt = NULL;
 		return -ENOMEM;
 	}
 
-	/* Is the bbt at a given page ? */
+	/* Is the bbt at a given page? */
 	if (td->options & NAND_BBT_ABSPAGE) {
 		res = read_abs_bbts(mtd, buf, td, md);
 	} else {
@@ -1216,15 +1181,15 @@
 
 /**
  * nand_update_bbt - [NAND Interface] update bad block table(s)
- * @mtd:	MTD device structure
- * @offs:	the offset of the newly marked block
+ * @mtd: MTD device structure
+ * @offs: the offset of the newly marked block
  *
- * The function updates the bad block table(s)
-*/
+ * The function updates the bad block table(s).
+ */
 int nand_update_bbt(struct mtd_info *mtd, loff_t offs)
 {
 	struct nand_chip *this = mtd->priv;
-	int len, res = 0, writeops = 0;
+	int len, res = 0;
 	int chip, chipsel;
 	uint8_t *buf;
 	struct nand_bbt_descr *td = this->bbt_td;
@@ -1237,14 +1202,10 @@
 	len = (1 << this->bbt_erase_shift);
 	len += (len >> this->page_shift) * mtd->oobsize;
 	buf = kmalloc(len, GFP_KERNEL);
-	if (!buf) {
-		printk(KERN_ERR "nand_update_bbt: Out of memory\n");
+	if (!buf)
 		return -ENOMEM;
-	}
 
-	writeops = md != NULL ? 0x03 : 0x01;
-
-	/* Do we have a bbt per chip ? */
+	/* Do we have a bbt per chip? */
 	if (td->options & NAND_BBT_PERCHIP) {
 		chip = (int)(offs >> this->chip_shift);
 		chipsel = chip;
@@ -1257,14 +1218,14 @@
 	if (md)
 		md->version[chip]++;
 
-	/* Write the bad block table to the device ? */
-	if ((writeops & 0x01) && (td->options & NAND_BBT_WRITE)) {
+	/* Write the bad block table to the device? */
+	if (td->options & NAND_BBT_WRITE) {
 		res = write_bbt(mtd, buf, td, md, chipsel);
 		if (res < 0)
 			goto out;
 	}
-	/* Write the mirror bad block table to the device ? */
-	if ((writeops & 0x02) && md && (md->options & NAND_BBT_WRITE)) {
+	/* Write the mirror bad block table to the device? */
+	if (md && (md->options & NAND_BBT_WRITE)) {
 		res = write_bbt(mtd, buf, md, td, chipsel);
 	}
 
@@ -1273,8 +1234,10 @@
 	return res;
 }
 
-/* Define some generic bad / good block scan pattern which are used
- * while scanning a device for factory marked good / bad blocks. */
+/*
+ * Define some generic bad / good block scan pattern which are used
+ * while scanning a device for factory marked good / bad blocks.
+ */
 static uint8_t scan_ff_pattern[] = { 0xff, 0xff };
 
 static uint8_t scan_agand_pattern[] = { 0x1C, 0x71, 0xC7, 0x1C, 0x71, 0xC7 };
@@ -1286,8 +1249,7 @@
 	.pattern = scan_agand_pattern
 };
 
-/* Generic flash bbt decriptors
-*/
+/* Generic flash bbt descriptors */
 static uint8_t bbt_pattern[] = {'B', 'b', 't', '0' };
 static uint8_t mirror_pattern[] = {'1', 't', 'b', 'B' };
 
@@ -1331,31 +1293,27 @@
 	.pattern = mirror_pattern
 };
 
-#define BBT_SCAN_OPTIONS (NAND_BBT_SCANLASTPAGE | NAND_BBT_SCAN2NDPAGE | \
-		NAND_BBT_SCANBYTE1AND6)
+#define BADBLOCK_SCAN_MASK (~NAND_BBT_NO_OOB)
 /**
- * nand_create_default_bbt_descr - [Internal] Creates a BBT descriptor structure
- * @this:	NAND chip to create descriptor for
+ * nand_create_badblock_pattern - [INTERN] Creates a BBT descriptor structure
+ * @this: NAND chip to create descriptor for
  *
  * This function allocates and initializes a nand_bbt_descr for BBM detection
- * based on the properties of "this". The new descriptor is stored in
+ * based on the properties of @this. The new descriptor is stored in
  * this->badblock_pattern. Thus, this->badblock_pattern should be NULL when
  * passed to this function.
- *
  */
-static int nand_create_default_bbt_descr(struct nand_chip *this)
+static int nand_create_badblock_pattern(struct nand_chip *this)
 {
 	struct nand_bbt_descr *bd;
 	if (this->badblock_pattern) {
-		printk(KERN_WARNING "BBT descr already allocated; not replacing.\n");
+		pr_warn("Bad block pattern already allocated; not replacing\n");
 		return -EINVAL;
 	}
 	bd = kzalloc(sizeof(*bd), GFP_KERNEL);
-	if (!bd) {
-		printk(KERN_ERR "nand_create_default_bbt_descr: Out of memory\n");
+	if (!bd)
 		return -ENOMEM;
-	}
-	bd->options = this->options & BBT_SCAN_OPTIONS;
+	bd->options = this->bbt_options & BADBLOCK_SCAN_MASK;
 	bd->offs = this->badblockpos;
 	bd->len = (this->options & NAND_BUSWIDTH_16) ? 2 : 1;
 	bd->pattern = scan_ff_pattern;
@@ -1366,22 +1324,20 @@
 
 /**
  * nand_default_bbt - [NAND Interface] Select a default bad block table for the device
- * @mtd:	MTD device structure
+ * @mtd: MTD device structure
  *
- * This function selects the default bad block table
- * support for the device and calls the nand_scan_bbt function
- *
-*/
+ * This function selects the default bad block table support for the device and
+ * calls the nand_scan_bbt function.
+ */
 int nand_default_bbt(struct mtd_info *mtd)
 {
 	struct nand_chip *this = mtd->priv;
 
-	/* Default for AG-AND. We must use a flash based
-	 * bad block table as the devices have factory marked
-	 * _good_ blocks. Erasing those blocks leads to loss
-	 * of the good / bad information, so we _must_ store
-	 * this information in a good / bad table during
-	 * startup
+	/*
+	 * Default for AG-AND. We must use a flash based bad block table as the
+	 * devices have factory marked _good_ blocks. Erasing those blocks
+	 * leads to loss of the good / bad information, so we _must_ store this
+	 * information in a good / bad table during startup.
 	 */
 	if (this->options & NAND_IS_AND) {
 		/* Use the default pattern descriptors */
@@ -1389,15 +1345,15 @@
 			this->bbt_td = &bbt_main_descr;
 			this->bbt_md = &bbt_mirror_descr;
 		}
-		this->options |= NAND_USE_FLASH_BBT;
+		this->bbt_options |= NAND_BBT_USE_FLASH;
 		return nand_scan_bbt(mtd, &agand_flashbased);
 	}
 
-	/* Is a flash based bad block table requested ? */
-	if (this->options & NAND_USE_FLASH_BBT) {
+	/* Is a flash based bad block table requested? */
+	if (this->bbt_options & NAND_BBT_USE_FLASH) {
 		/* Use the default pattern descriptors */
 		if (!this->bbt_td) {
-			if (this->options & NAND_USE_FLASH_BBT_NO_OOB) {
+			if (this->bbt_options & NAND_BBT_NO_OOB) {
 				this->bbt_td = &bbt_main_no_bbt_descr;
 				this->bbt_md = &bbt_mirror_no_bbt_descr;
 			} else {
@@ -1411,18 +1367,17 @@
 	}
 
 	if (!this->badblock_pattern)
-		nand_create_default_bbt_descr(this);
+		nand_create_badblock_pattern(this);
 
 	return nand_scan_bbt(mtd, this->badblock_pattern);
 }
 
 /**
  * nand_isbad_bbt - [NAND Interface] Check if a block is bad
- * @mtd:	MTD device structure
- * @offs:	offset in the device
- * @allowbbt:	allow access to bad block table region
- *
-*/
+ * @mtd: MTD device structure
+ * @offs: offset in the device
+ * @allowbbt: allow access to bad block table region
+ */
 int nand_isbad_bbt(struct mtd_info *mtd, loff_t offs, int allowbbt)
 {
 	struct nand_chip *this = mtd->priv;
@@ -1433,8 +1388,9 @@
 	block = (int)(offs >> (this->bbt_erase_shift - 1));
 	res = (this->bbt[block >> 3] >> (block & 0x06)) & 0x03;
 
-	DEBUG(MTD_DEBUG_LEVEL2, "nand_isbad_bbt(): bbt info for offs 0x%08x: (block %d) 0x%02x\n",
-	      (unsigned int)offs, block >> 1, res);
+	pr_debug("nand_isbad_bbt(): bbt info for offs 0x%08x: "
+			"(block %d) 0x%02x\n",
+			(unsigned int)offs, block >> 1, res);
 
 	switch ((int)res) {
 	case 0x00:
diff --git a/drivers/mtd/nand/nand_bch.c b/drivers/mtd/nand/nand_bch.c
index 0f931e7..3803e0b 100644
--- a/drivers/mtd/nand/nand_bch.c
+++ b/drivers/mtd/nand/nand_bch.c
@@ -93,8 +93,8 @@
 				buf[errloc[i] >> 3] ^= (1 << (errloc[i] & 7));
 			/* else error in ecc, no action needed */
 
-			DEBUG(MTD_DEBUG_LEVEL0, "%s: corrected bitflip %u\n",
-			      __func__, errloc[i]);
+			pr_debug("%s: corrected bitflip %u\n", __func__,
+					errloc[i]);
 		}
 	} else if (count < 0) {
 		printk(KERN_ERR "ecc unrecoverable error\n");
diff --git a/drivers/mtd/nand/nand_ecc.c b/drivers/mtd/nand/nand_ecc.c
index 271b8e7..b7cfe0d 100644
--- a/drivers/mtd/nand/nand_ecc.c
+++ b/drivers/mtd/nand/nand_ecc.c
@@ -110,7 +110,7 @@
 
 /*
  * addressbits is a lookup table to filter out the bits from the xor-ed
- * ecc data that identify the faulty location.
+ * ECC data that identify the faulty location.
  * this is only used for repairing parity
  * see the comments in nand_correct_data for more details
  */
@@ -153,7 +153,7 @@
  * __nand_calculate_ecc - [NAND Interface] Calculate 3-byte ECC for 256/512-byte
  *			 block
  * @buf:	input buffer with raw data
- * @eccsize:	data bytes per ecc step (256 or 512)
+ * @eccsize:	data bytes per ECC step (256 or 512)
  * @code:	output buffer with ECC
  */
 void __nand_calculate_ecc(const unsigned char *buf, unsigned int eccsize,
@@ -348,7 +348,7 @@
 		rp17 = (par ^ rp16) & 0xff;
 
 	/*
-	 * Finally calculate the ecc bits.
+	 * Finally calculate the ECC bits.
 	 * Again here it might seem that there are performance optimisations
 	 * possible, but benchmarks showed that on the system this is developed
 	 * the code below is the fastest
@@ -436,7 +436,7 @@
  * @buf:	raw data read from the chip
  * @read_ecc:	ECC from the chip
  * @calc_ecc:	the ECC calculated from raw data
- * @eccsize:	data bytes per ecc step (256 or 512)
+ * @eccsize:	data bytes per ECC step (256 or 512)
  *
  * Detect and correct a 1 bit error for eccsize byte block
  */
@@ -505,7 +505,7 @@
 	}
 	/* count nr of bits; use table lookup, faster than calculating it */
 	if ((bitsperbyte[b0] + bitsperbyte[b1] + bitsperbyte[b2]) == 1)
-		return 1;	/* error in ecc data; no action needed */
+		return 1;	/* error in ECC data; no action needed */
 
 	printk(KERN_ERR "uncorrectable error : ");
 	return -1;
diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c
index 357e8c5..34c03be 100644
--- a/drivers/mtd/nand/nandsim.c
+++ b/drivers/mtd/nand/nandsim.c
@@ -2273,9 +2273,9 @@
 
 	switch (bbt) {
 	case 2:
-		 chip->options |= NAND_USE_FLASH_BBT_NO_OOB;
+		 chip->bbt_options |= NAND_BBT_NO_OOB;
 	case 1:
-		 chip->options |= NAND_USE_FLASH_BBT;
+		 chip->bbt_options |= NAND_BBT_USE_FLASH;
 	case 0:
 		break;
 	default:
diff --git a/drivers/mtd/nand/ndfc.c b/drivers/mtd/nand/ndfc.c
index ea2dea8..ee17139 100644
--- a/drivers/mtd/nand/ndfc.c
+++ b/drivers/mtd/nand/ndfc.c
@@ -42,7 +42,6 @@
 	struct nand_chip chip;
 	int chip_select;
 	struct nand_hw_control ndfc_control;
-	struct mtd_partition *parts;
 };
 
 static struct ndfc_controller ndfc_ctrl[NDFC_MAX_CS];
@@ -159,13 +158,9 @@
 static int ndfc_chip_init(struct ndfc_controller *ndfc,
 			  struct device_node *node)
 {
-#ifdef CONFIG_MTD_CMDLINE_PARTS
-	static const char *part_types[] = { "cmdlinepart", NULL };
-#else
-	static const char *part_types[] = { NULL };
-#endif
 	struct device_node *flash_np;
 	struct nand_chip *chip = &ndfc->chip;
+	struct mtd_part_parser_data ppdata;
 	int ret;
 
 	chip->IO_ADDR_R = ndfc->ndfcbase + NDFC_DATA;
@@ -193,6 +188,7 @@
 	if (!flash_np)
 		return -ENODEV;
 
+	ppdata->of_node = flash_np;
 	ndfc->mtd.name = kasprintf(GFP_KERNEL, "%s.%s",
 			dev_name(&ndfc->ofdev->dev), flash_np->name);
 	if (!ndfc->mtd.name) {
@@ -204,18 +200,7 @@
 	if (ret)
 		goto err;
 
-	ret = parse_mtd_partitions(&ndfc->mtd, part_types, &ndfc->parts, 0);
-	if (ret < 0)
-		goto err;
-
-	if (ret == 0) {
-		ret = of_mtd_parse_partitions(&ndfc->ofdev->dev, flash_np,
-					      &ndfc->parts);
-		if (ret < 0)
-			goto err;
-	}
-
-	ret = mtd_device_register(&ndfc->mtd, ndfc->parts, ret);
+	ret = mtd_device_parse_register(&ndfc->mtd, NULL, &ppdata, NULL, 0);
 
 err:
 	of_node_put(flash_np);
@@ -288,6 +273,7 @@
 	struct ndfc_controller *ndfc = dev_get_drvdata(&ofdev->dev);
 
 	nand_release(&ndfc->mtd);
+	kfree(ndfc->mtd.name);
 
 	return 0;
 }
diff --git a/drivers/mtd/nand/nomadik_nand.c b/drivers/mtd/nand/nomadik_nand.c
index b6a5c86..b463ecf 100644
--- a/drivers/mtd/nand/nomadik_nand.c
+++ b/drivers/mtd/nand/nomadik_nand.c
@@ -187,6 +187,7 @@
 		pdata->exit();
 
 	if (host) {
+		nand_release(&host->mtd);
 		iounmap(host->cmd_va);
 		iounmap(host->data_va);
 		iounmap(host->addr_va);
diff --git a/drivers/mtd/nand/nuc900_nand.c b/drivers/mtd/nand/nuc900_nand.c
index 9c30a0b..fa8faed 100644
--- a/drivers/mtd/nand/nuc900_nand.c
+++ b/drivers/mtd/nand/nuc900_nand.c
@@ -339,6 +339,7 @@
 	struct nuc900_nand *nuc900_nand = platform_get_drvdata(pdev);
 	struct resource *res;
 
+	nand_release(&nuc900_nand->mtd);
 	iounmap(nuc900_nand->reg);
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index ec22a5a..f745f00 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -95,8 +95,6 @@
 #define P4e_s(a)	(TF(a & NAND_Ecc_P4e)		<< 0)
 #define P4o_s(a)	(TF(a & NAND_Ecc_P4o)		<< 1)
 
-static const char *part_probes[] = { "cmdlinepart", NULL };
-
 /* oob info generated runtime depending on ecc algorithm and layout selected */
 static struct nand_ecclayout omap_oobinfo;
 /* Define some generic bad / good block scan pattern which are used
@@ -115,7 +113,6 @@
 	struct nand_hw_control		controller;
 	struct omap_nand_platform_data	*pdata;
 	struct mtd_info			mtd;
-	struct mtd_partition		*parts;
 	struct nand_chip		nand;
 	struct platform_device		*pdev;
 
@@ -745,12 +742,12 @@
 
 	case 1:
 		/* Uncorrectable error */
-		DEBUG(MTD_DEBUG_LEVEL0, "ECC UNCORRECTED_ERROR 1\n");
+		pr_debug("ECC UNCORRECTED_ERROR 1\n");
 		return -1;
 
 	case 11:
 		/* UN-Correctable error */
-		DEBUG(MTD_DEBUG_LEVEL0, "ECC UNCORRECTED_ERROR B\n");
+		pr_debug("ECC UNCORRECTED_ERROR B\n");
 		return -1;
 
 	case 12:
@@ -767,8 +764,8 @@
 
 		find_bit = (ecc_bit[5] << 2) + (ecc_bit[3] << 1) + ecc_bit[1];
 
-		DEBUG(MTD_DEBUG_LEVEL0, "Correcting single bit ECC error at "
-				"offset: %d, bit: %d\n", find_byte, find_bit);
+		pr_debug("Correcting single bit ECC error at offset: "
+				"%d, bit: %d\n", find_byte, find_bit);
 
 		page_data[find_byte] ^= (1 << find_bit);
 
@@ -780,7 +777,7 @@
 			    ecc_data2[2] == 0)
 				return 0;
 		}
-		DEBUG(MTD_DEBUG_LEVEL0, "UNCORRECTED_ERROR default\n");
+		pr_debug("UNCORRECTED_ERROR default\n");
 		return -1;
 	}
 }
@@ -1104,13 +1101,8 @@
 		goto out_release_mem_region;
 	}
 
-	err = parse_mtd_partitions(&info->mtd, part_probes, &info->parts, 0);
-	if (err > 0)
-		mtd_device_register(&info->mtd, info->parts, err);
-	else if (pdata->parts)
-		mtd_device_register(&info->mtd, pdata->parts, pdata->nr_parts);
-	else
-		mtd_device_register(&info->mtd, NULL, 0);
+	mtd_device_parse_register(&info->mtd, NULL, 0,
+			pdata->parts, pdata->nr_parts);
 
 	platform_set_drvdata(pdev, &info->mtd);
 
diff --git a/drivers/mtd/nand/orion_nand.c b/drivers/mtd/nand/orion_nand.c
index 7794d06..29f505ad 100644
--- a/drivers/mtd/nand/orion_nand.c
+++ b/drivers/mtd/nand/orion_nand.c
@@ -21,8 +21,6 @@
 #include <mach/hardware.h>
 #include <plat/orion_nand.h>
 
-static const char *part_probes[] = { "cmdlinepart", NULL };
-
 static void orion_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
 {
 	struct nand_chip *nc = mtd->priv;
@@ -81,8 +79,6 @@
 	struct resource *res;
 	void __iomem *io_base;
 	int ret = 0;
-	struct mtd_partition *partitions = NULL;
-	int num_part = 0;
 
 	nc = kzalloc(sizeof(struct nand_chip) + sizeof(struct mtd_info), GFP_KERNEL);
 	if (!nc) {
@@ -132,17 +128,9 @@
 		goto no_dev;
 	}
 
-#ifdef CONFIG_MTD_CMDLINE_PARTS
 	mtd->name = "orion_nand";
-	num_part = parse_mtd_partitions(mtd, part_probes, &partitions, 0);
-#endif
-	/* If cmdline partitions have been passed, let them be used */
-	if (num_part <= 0) {
-		num_part = board->nr_parts;
-		partitions = board->parts;
-	}
-
-	ret = mtd_device_register(mtd, partitions, num_part);
+	ret = mtd_device_parse_register(mtd, NULL, 0,
+			board->parts, board->nr_parts);
 	if (ret) {
 		nand_release(mtd);
 		goto no_dev;
diff --git a/drivers/mtd/nand/pasemi_nand.c b/drivers/mtd/nand/pasemi_nand.c
index b1aa41b..a97264e 100644
--- a/drivers/mtd/nand/pasemi_nand.c
+++ b/drivers/mtd/nand/pasemi_nand.c
@@ -155,7 +155,8 @@
 	chip->ecc.mode = NAND_ECC_SOFT;
 
 	/* Enable the following for a flash based bad block table */
-	chip->options = NAND_USE_FLASH_BBT | NAND_NO_AUTOINCR;
+	chip->options = NAND_NO_AUTOINCR;
+	chip->bbt_options = NAND_BBT_USE_FLASH;
 
 	/* Scan to find existence of the device */
 	if (nand_scan(pasemi_nand_mtd, 1)) {
diff --git a/drivers/mtd/nand/plat_nand.c b/drivers/mtd/nand/plat_nand.c
index 633c04b..ea8e123 100644
--- a/drivers/mtd/nand/plat_nand.c
+++ b/drivers/mtd/nand/plat_nand.c
@@ -21,8 +21,6 @@
 	struct nand_chip	chip;
 	struct mtd_info		mtd;
 	void __iomem		*io_base;
-	int			nr_parts;
-	struct mtd_partition	*parts;
 };
 
 /*
@@ -79,6 +77,7 @@
 	data->chip.read_buf = pdata->ctrl.read_buf;
 	data->chip.chip_delay = pdata->chip.chip_delay;
 	data->chip.options |= pdata->chip.options;
+	data->chip.bbt_options |= pdata->chip.bbt_options;
 
 	data->chip.ecc.hwctl = pdata->ctrl.hwcontrol;
 	data->chip.ecc.layout = pdata->chip.ecclayout;
@@ -99,23 +98,9 @@
 		goto out;
 	}
 
-	if (pdata->chip.part_probe_types) {
-		err = parse_mtd_partitions(&data->mtd,
-					pdata->chip.part_probe_types,
-					&data->parts, 0);
-		if (err > 0) {
-			mtd_device_register(&data->mtd, data->parts, err);
-			return 0;
-		}
-	}
-	if (pdata->chip.set_parts)
-		pdata->chip.set_parts(data->mtd.size, &pdata->chip);
-	if (pdata->chip.partitions) {
-		data->parts = pdata->chip.partitions;
-		err = mtd_device_register(&data->mtd, data->parts,
-			pdata->chip.nr_partitions);
-	} else
-		err = mtd_device_register(&data->mtd, NULL, 0);
+	err = mtd_device_parse_register(&data->mtd,
+			pdata->chip.part_probe_types, 0,
+			pdata->chip.partitions, pdata->chip.nr_partitions);
 
 	if (!err)
 		return err;
@@ -145,8 +130,6 @@
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 
 	nand_release(&data->mtd);
-	if (data->parts && data->parts != pdata->chip.partitions)
-		kfree(data->parts);
 	if (pdata->ctrl.remove)
 		pdata->ctrl.remove(pdev);
 	iounmap(data->io_base);
diff --git a/drivers/mtd/nand/ppchameleonevb.c b/drivers/mtd/nand/ppchameleonevb.c
index 3bbb796..7e52af51 100644
--- a/drivers/mtd/nand/ppchameleonevb.c
+++ b/drivers/mtd/nand/ppchameleonevb.c
@@ -99,8 +99,6 @@
 
 #define NUM_PARTITIONS 1
 
-extern int parse_cmdline_partitions(struct mtd_info *master, struct mtd_partition **pparts, const char *mtd_id);
-
 /*
  *	hardware specific access to control-lines
  */
@@ -187,18 +185,12 @@
 }
 #endif
 
-const char *part_probes[] = { "cmdlinepart", NULL };
-const char *part_probes_evb[] = { "cmdlinepart", NULL };
-
 /*
  * Main initialization routine
  */
 static int __init ppchameleonevb_init(void)
 {
 	struct nand_chip *this;
-	const char *part_type = 0;
-	int mtd_parts_nb = 0;
-	struct mtd_partition *mtd_parts = 0;
 	void __iomem *ppchameleon_fio_base;
 	void __iomem *ppchameleonevb_fio_base;
 
@@ -281,24 +273,13 @@
 #endif
 
 	ppchameleon_mtd->name = "ppchameleon-nand";
-	mtd_parts_nb = parse_mtd_partitions(ppchameleon_mtd, part_probes, &mtd_parts, 0);
-	if (mtd_parts_nb > 0)
-		part_type = "command line";
-	else
-		mtd_parts_nb = 0;
-
-	if (mtd_parts_nb == 0) {
-		if (ppchameleon_mtd->size == NAND_SMALL_SIZE)
-			mtd_parts = partition_info_me;
-		else
-			mtd_parts = partition_info_hi;
-		mtd_parts_nb = NUM_PARTITIONS;
-		part_type = "static";
-	}
 
 	/* Register the partitions */
-	printk(KERN_NOTICE "Using %s partition definition\n", part_type);
-	mtd_device_register(ppchameleon_mtd, mtd_parts, mtd_parts_nb);
+	mtd_device_parse_register(ppchameleon_mtd, NULL, 0,
+			ppchameleon_mtd->size == NAND_SMALL_SIZE ?
+				partition_info_me :
+				partition_info_hi,
+			NUM_PARTITIONS);
 
  nand_evb_init:
 	/****************************
@@ -382,21 +363,13 @@
 	}
 
 	ppchameleonevb_mtd->name = NAND_EVB_MTD_NAME;
-	mtd_parts_nb = parse_mtd_partitions(ppchameleonevb_mtd, part_probes_evb, &mtd_parts, 0);
-	if (mtd_parts_nb > 0)
-		part_type = "command line";
-	else
-		mtd_parts_nb = 0;
-
-	if (mtd_parts_nb == 0) {
-		mtd_parts = partition_info_evb;
-		mtd_parts_nb = NUM_PARTITIONS;
-		part_type = "static";
-	}
 
 	/* Register the partitions */
-	printk(KERN_NOTICE "Using %s partition definition\n", part_type);
-	mtd_device_register(ppchameleonevb_mtd, mtd_parts, mtd_parts_nb);
+	mtd_device_parse_register(ppchameleonevb_mtd, NULL, 0,
+			ppchameleon_mtd->size == NAND_SMALL_SIZE ?
+				partition_info_me :
+				partition_info_hi,
+			NUM_PARTITIONS);
 
 	/* Return happy */
 	return 0;
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index 1fb3b3a..9eb7f87 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -110,6 +110,7 @@
 
 enum {
 	STATE_IDLE = 0,
+	STATE_PREPARED,
 	STATE_CMD_HANDLE,
 	STATE_DMA_READING,
 	STATE_DMA_WRITING,
@@ -120,21 +121,40 @@
 	STATE_READY,
 };
 
-struct pxa3xx_nand_info {
-	struct nand_chip	nand_chip;
+struct pxa3xx_nand_host {
+	struct nand_chip	chip;
+	struct pxa3xx_nand_cmdset *cmdset;
+	struct mtd_info         *mtd;
+	void			*info_data;
 
+	/* page size of attached chip */
+	unsigned int		page_size;
+	int			use_ecc;
+	int			cs;
+
+	/* calculated from pxa3xx_nand_flash data */
+	unsigned int		col_addr_cycles;
+	unsigned int		row_addr_cycles;
+	size_t			read_id_bytes;
+
+	/* cached register value */
+	uint32_t		reg_ndcr;
+	uint32_t		ndtr0cs0;
+	uint32_t		ndtr1cs0;
+};
+
+struct pxa3xx_nand_info {
 	struct nand_hw_control	controller;
 	struct platform_device	 *pdev;
-	struct pxa3xx_nand_cmdset *cmdset;
 
 	struct clk		*clk;
 	void __iomem		*mmio_base;
 	unsigned long		mmio_phys;
+	struct completion	cmd_complete;
 
 	unsigned int 		buf_start;
 	unsigned int		buf_count;
 
-	struct mtd_info         *mtd;
 	/* DMA information */
 	int			drcmr_dat;
 	int			drcmr_cmd;
@@ -142,44 +162,27 @@
 	unsigned char		*data_buff;
 	unsigned char		*oob_buff;
 	dma_addr_t 		data_buff_phys;
-	size_t			data_buff_size;
 	int 			data_dma_ch;
 	struct pxa_dma_desc	*data_desc;
 	dma_addr_t 		data_desc_addr;
 
-	uint32_t		reg_ndcr;
-
-	/* saved column/page_addr during CMD_SEQIN */
-	int			seqin_column;
-	int			seqin_page_addr;
-
-	/* relate to the command */
+	struct pxa3xx_nand_host *host[NUM_CHIP_SELECT];
 	unsigned int		state;
 
+	int			cs;
 	int			use_ecc;	/* use HW ECC ? */
 	int			use_dma;	/* use DMA ? */
 	int			is_ready;
 
 	unsigned int		page_size;	/* page size of attached chip */
 	unsigned int		data_size;	/* data size in FIFO */
+	unsigned int		oob_size;
 	int 			retcode;
-	struct completion 	cmd_complete;
 
 	/* generated NDCBx register values */
 	uint32_t		ndcb0;
 	uint32_t		ndcb1;
 	uint32_t		ndcb2;
-
-	/* timing calcuted from setting */
-	uint32_t		ndtr0cs0;
-	uint32_t		ndtr1cs0;
-
-	/* calculated from pxa3xx_nand_flash data */
-	size_t		oob_size;
-	size_t		read_id_bytes;
-
-	unsigned int	col_addr_cycles;
-	unsigned int	row_addr_cycles;
 };
 
 static int use_dma = 1;
@@ -225,7 +228,7 @@
 /* Define a default flash type setting serve as flash detecting only */
 #define DEFAULT_FLASH_TYPE (&builtin_flash_types[0])
 
-const char *mtd_names[] = {"pxa3xx_nand-0", NULL};
+const char *mtd_names[] = {"pxa3xx_nand-0", "pxa3xx_nand-1", NULL};
 
 #define NDTR0_tCH(c)	(min((c), 7) << 19)
 #define NDTR0_tCS(c)	(min((c), 7) << 16)
@@ -241,9 +244,10 @@
 /* convert nano-seconds to nand flash controller clock cycles */
 #define ns2cycle(ns, clk)	(int)((ns) * (clk / 1000000) / 1000)
 
-static void pxa3xx_nand_set_timing(struct pxa3xx_nand_info *info,
+static void pxa3xx_nand_set_timing(struct pxa3xx_nand_host *host,
 				   const struct pxa3xx_nand_timing *t)
 {
+	struct pxa3xx_nand_info *info = host->info_data;
 	unsigned long nand_clk = clk_get_rate(info->clk);
 	uint32_t ndtr0, ndtr1;
 
@@ -258,23 +262,24 @@
 		NDTR1_tWHR(ns2cycle(t->tWHR, nand_clk)) |
 		NDTR1_tAR(ns2cycle(t->tAR, nand_clk));
 
-	info->ndtr0cs0 = ndtr0;
-	info->ndtr1cs0 = ndtr1;
+	host->ndtr0cs0 = ndtr0;
+	host->ndtr1cs0 = ndtr1;
 	nand_writel(info, NDTR0CS0, ndtr0);
 	nand_writel(info, NDTR1CS0, ndtr1);
 }
 
 static void pxa3xx_set_datasize(struct pxa3xx_nand_info *info)
 {
-	int oob_enable = info->reg_ndcr & NDCR_SPARE_EN;
+	struct pxa3xx_nand_host *host = info->host[info->cs];
+	int oob_enable = host->reg_ndcr & NDCR_SPARE_EN;
 
-	info->data_size = info->page_size;
+	info->data_size = host->page_size;
 	if (!oob_enable) {
 		info->oob_size = 0;
 		return;
 	}
 
-	switch (info->page_size) {
+	switch (host->page_size) {
 	case 2048:
 		info->oob_size = (info->use_ecc) ? 40 : 64;
 		break;
@@ -292,9 +297,10 @@
  */
 static void pxa3xx_nand_start(struct pxa3xx_nand_info *info)
 {
+	struct pxa3xx_nand_host *host = info->host[info->cs];
 	uint32_t ndcr;
 
-	ndcr = info->reg_ndcr;
+	ndcr = host->reg_ndcr;
 	ndcr |= info->use_ecc ? NDCR_ECC_EN : 0;
 	ndcr |= info->use_dma ? NDCR_DMA_EN : 0;
 	ndcr |= NDCR_ND_RUN;
@@ -359,7 +365,7 @@
 					DIV_ROUND_UP(info->oob_size, 4));
 		break;
 	default:
-		printk(KERN_ERR "%s: invalid state %d\n", __func__,
+		dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
 				info->state);
 		BUG();
 	}
@@ -385,7 +391,7 @@
 		desc->dcmd |= DCMD_INCTRGADDR | DCMD_FLOWSRC;
 		break;
 	default:
-		printk(KERN_ERR "%s: invalid state %d\n", __func__,
+		dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
 				info->state);
 		BUG();
 	}
@@ -416,6 +422,15 @@
 {
 	struct pxa3xx_nand_info *info = devid;
 	unsigned int status, is_completed = 0;
+	unsigned int ready, cmd_done;
+
+	if (info->cs == 0) {
+		ready           = NDSR_FLASH_RDY;
+		cmd_done        = NDSR_CS0_CMDD;
+	} else {
+		ready           = NDSR_RDY;
+		cmd_done        = NDSR_CS1_CMDD;
+	}
 
 	status = nand_readl(info, NDSR);
 
@@ -437,11 +452,11 @@
 			handle_data_pio(info);
 		}
 	}
-	if (status & NDSR_CS0_CMDD) {
+	if (status & cmd_done) {
 		info->state = STATE_CMD_DONE;
 		is_completed = 1;
 	}
-	if (status & NDSR_FLASH_RDY) {
+	if (status & ready) {
 		info->is_ready = 1;
 		info->state = STATE_READY;
 	}
@@ -463,12 +478,6 @@
 	return IRQ_HANDLED;
 }
 
-static int pxa3xx_nand_dev_ready(struct mtd_info *mtd)
-{
-	struct pxa3xx_nand_info *info = mtd->priv;
-	return (nand_readl(info, NDSR) & NDSR_RDY) ? 1 : 0;
-}
-
 static inline int is_buf_blank(uint8_t *buf, size_t len)
 {
 	for (; len > 0; len--)
@@ -481,10 +490,12 @@
 		uint16_t column, int page_addr)
 {
 	uint16_t cmd;
-	int addr_cycle, exec_cmd, ndcb0;
-	struct mtd_info *mtd = info->mtd;
+	int addr_cycle, exec_cmd;
+	struct pxa3xx_nand_host *host;
+	struct mtd_info *mtd;
 
-	ndcb0 = 0;
+	host = info->host[info->cs];
+	mtd = host->mtd;
 	addr_cycle = 0;
 	exec_cmd = 1;
 
@@ -495,6 +506,10 @@
 	info->use_ecc		= 0;
 	info->is_ready		= 0;
 	info->retcode		= ERR_NONE;
+	if (info->cs != 0)
+		info->ndcb0 = NDCB0_CSEL;
+	else
+		info->ndcb0 = 0;
 
 	switch (command) {
 	case NAND_CMD_READ0:
@@ -512,20 +527,19 @@
 		break;
 	}
 
-	info->ndcb0 = ndcb0;
-	addr_cycle = NDCB0_ADDR_CYC(info->row_addr_cycles
-				    + info->col_addr_cycles);
+	addr_cycle = NDCB0_ADDR_CYC(host->row_addr_cycles
+				    + host->col_addr_cycles);
 
 	switch (command) {
 	case NAND_CMD_READOOB:
 	case NAND_CMD_READ0:
-		cmd = info->cmdset->read1;
+		cmd = host->cmdset->read1;
 		if (command == NAND_CMD_READOOB)
 			info->buf_start = mtd->writesize + column;
 		else
 			info->buf_start = column;
 
-		if (unlikely(info->page_size < PAGE_CHUNK_SIZE))
+		if (unlikely(host->page_size < PAGE_CHUNK_SIZE))
 			info->ndcb0 |= NDCB0_CMD_TYPE(0)
 					| addr_cycle
 					| (cmd & NDCB0_CMD1_MASK);
@@ -537,7 +551,7 @@
 
 	case NAND_CMD_SEQIN:
 		/* small page addr setting */
-		if (unlikely(info->page_size < PAGE_CHUNK_SIZE)) {
+		if (unlikely(host->page_size < PAGE_CHUNK_SIZE)) {
 			info->ndcb1 = ((page_addr & 0xFFFFFF) << 8)
 					| (column & 0xFF);
 
@@ -564,7 +578,7 @@
 			break;
 		}
 
-		cmd = info->cmdset->program;
+		cmd = host->cmdset->program;
 		info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
 				| NDCB0_AUTO_RS
 				| NDCB0_ST_ROW_EN
@@ -574,8 +588,8 @@
 		break;
 
 	case NAND_CMD_READID:
-		cmd = info->cmdset->read_id;
-		info->buf_count = info->read_id_bytes;
+		cmd = host->cmdset->read_id;
+		info->buf_count = host->read_id_bytes;
 		info->ndcb0 |= NDCB0_CMD_TYPE(3)
 				| NDCB0_ADDR_CYC(1)
 				| cmd;
@@ -583,7 +597,7 @@
 		info->data_size = 8;
 		break;
 	case NAND_CMD_STATUS:
-		cmd = info->cmdset->read_status;
+		cmd = host->cmdset->read_status;
 		info->buf_count = 1;
 		info->ndcb0 |= NDCB0_CMD_TYPE(4)
 				| NDCB0_ADDR_CYC(1)
@@ -593,7 +607,7 @@
 		break;
 
 	case NAND_CMD_ERASE1:
-		cmd = info->cmdset->erase;
+		cmd = host->cmdset->erase;
 		info->ndcb0 |= NDCB0_CMD_TYPE(2)
 				| NDCB0_AUTO_RS
 				| NDCB0_ADDR_CYC(3)
@@ -604,7 +618,7 @@
 
 		break;
 	case NAND_CMD_RESET:
-		cmd = info->cmdset->reset;
+		cmd = host->cmdset->reset;
 		info->ndcb0 |= NDCB0_CMD_TYPE(5)
 				| cmd;
 
@@ -616,8 +630,8 @@
 
 	default:
 		exec_cmd = 0;
-		printk(KERN_ERR "pxa3xx-nand: non-supported"
-			" command %x\n", command);
+		dev_err(&info->pdev->dev, "non-supported command %x\n",
+				command);
 		break;
 	}
 
@@ -627,7 +641,8 @@
 static void pxa3xx_nand_cmdfunc(struct mtd_info *mtd, unsigned command,
 				int column, int page_addr)
 {
-	struct pxa3xx_nand_info *info = mtd->priv;
+	struct pxa3xx_nand_host *host = mtd->priv;
+	struct pxa3xx_nand_info *info = host->info_data;
 	int ret, exec_cmd;
 
 	/*
@@ -635,9 +650,21 @@
 	 * "byte" address into a "word" address appropriate
 	 * for indexing a word-oriented device
 	 */
-	if (info->reg_ndcr & NDCR_DWIDTH_M)
+	if (host->reg_ndcr & NDCR_DWIDTH_M)
 		column /= 2;
 
+	/*
+	 * There may be different NAND chip hooked to
+	 * different chip select, so check whether
+	 * chip select has been changed, if yes, reset the timing
+	 */
+	if (info->cs != host->cs) {
+		info->cs = host->cs;
+		nand_writel(info, NDTR0CS0, host->ndtr0cs0);
+		nand_writel(info, NDTR1CS0, host->ndtr1cs0);
+	}
+
+	info->state = STATE_PREPARED;
 	exec_cmd = prepare_command_pool(info, command, column, page_addr);
 	if (exec_cmd) {
 		init_completion(&info->cmd_complete);
@@ -646,12 +673,12 @@
 		ret = wait_for_completion_timeout(&info->cmd_complete,
 				CHIP_DELAY_TIMEOUT);
 		if (!ret) {
-			printk(KERN_ERR "Wait time out!!!\n");
+			dev_err(&info->pdev->dev, "Wait time out!!!\n");
 			/* Stop State Machine for next command cycle */
 			pxa3xx_nand_stop(info);
 		}
-		info->state = STATE_IDLE;
 	}
+	info->state = STATE_IDLE;
 }
 
 static void pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd,
@@ -664,7 +691,8 @@
 static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
 		struct nand_chip *chip, uint8_t *buf, int page)
 {
-	struct pxa3xx_nand_info *info = mtd->priv;
+	struct pxa3xx_nand_host *host = mtd->priv;
+	struct pxa3xx_nand_info *info = host->info_data;
 
 	chip->read_buf(mtd, buf, mtd->writesize);
 	chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
@@ -685,6 +713,8 @@
 		 * OOB, ignore such double bit errors
 		 */
 		if (is_buf_blank(buf, mtd->writesize))
+			info->retcode = ERR_NONE;
+		else
 			mtd->ecc_stats.failed++;
 	}
 
@@ -693,7 +723,8 @@
 
 static uint8_t pxa3xx_nand_read_byte(struct mtd_info *mtd)
 {
-	struct pxa3xx_nand_info *info = mtd->priv;
+	struct pxa3xx_nand_host *host = mtd->priv;
+	struct pxa3xx_nand_info *info = host->info_data;
 	char retval = 0xFF;
 
 	if (info->buf_start < info->buf_count)
@@ -705,7 +736,8 @@
 
 static u16 pxa3xx_nand_read_word(struct mtd_info *mtd)
 {
-	struct pxa3xx_nand_info *info = mtd->priv;
+	struct pxa3xx_nand_host *host = mtd->priv;
+	struct pxa3xx_nand_info *info = host->info_data;
 	u16 retval = 0xFFFF;
 
 	if (!(info->buf_start & 0x01) && info->buf_start < info->buf_count) {
@@ -717,7 +749,8 @@
 
 static void pxa3xx_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
 {
-	struct pxa3xx_nand_info *info = mtd->priv;
+	struct pxa3xx_nand_host *host = mtd->priv;
+	struct pxa3xx_nand_info *info = host->info_data;
 	int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
 
 	memcpy(buf, info->data_buff + info->buf_start, real_len);
@@ -727,7 +760,8 @@
 static void pxa3xx_nand_write_buf(struct mtd_info *mtd,
 		const uint8_t *buf, int len)
 {
-	struct pxa3xx_nand_info *info = mtd->priv;
+	struct pxa3xx_nand_host *host = mtd->priv;
+	struct pxa3xx_nand_info *info = host->info_data;
 	int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
 
 	memcpy(info->data_buff + info->buf_start, buf, real_len);
@@ -747,7 +781,8 @@
 
 static int pxa3xx_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *this)
 {
-	struct pxa3xx_nand_info *info = mtd->priv;
+	struct pxa3xx_nand_host *host = mtd->priv;
+	struct pxa3xx_nand_info *info = host->info_data;
 
 	/* pxa3xx_nand_send_command has waited for command complete */
 	if (this->state == FL_WRITING || this->state == FL_ERASING) {
@@ -770,54 +805,70 @@
 {
 	struct platform_device *pdev = info->pdev;
 	struct pxa3xx_nand_platform_data *pdata = pdev->dev.platform_data;
+	struct pxa3xx_nand_host *host = info->host[info->cs];
 	uint32_t ndcr = 0x0; /* enable all interrupts */
 
-	if (f->page_size != 2048 && f->page_size != 512)
+	if (f->page_size != 2048 && f->page_size != 512) {
+		dev_err(&pdev->dev, "Current only support 2048 and 512 size\n");
 		return -EINVAL;
+	}
 
-	if (f->flash_width != 16 && f->flash_width != 8)
+	if (f->flash_width != 16 && f->flash_width != 8) {
+		dev_err(&pdev->dev, "Only support 8bit and 16 bit!\n");
 		return -EINVAL;
+	}
 
 	/* calculate flash information */
-	info->cmdset = &default_cmdset;
-	info->page_size = f->page_size;
-	info->read_id_bytes = (f->page_size == 2048) ? 4 : 2;
+	host->cmdset = &default_cmdset;
+	host->page_size = f->page_size;
+	host->read_id_bytes = (f->page_size == 2048) ? 4 : 2;
 
 	/* calculate addressing information */
-	info->col_addr_cycles = (f->page_size == 2048) ? 2 : 1;
+	host->col_addr_cycles = (f->page_size == 2048) ? 2 : 1;
 
 	if (f->num_blocks * f->page_per_block > 65536)
-		info->row_addr_cycles = 3;
+		host->row_addr_cycles = 3;
 	else
-		info->row_addr_cycles = 2;
+		host->row_addr_cycles = 2;
 
 	ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
-	ndcr |= (info->col_addr_cycles == 2) ? NDCR_RA_START : 0;
+	ndcr |= (host->col_addr_cycles == 2) ? NDCR_RA_START : 0;
 	ndcr |= (f->page_per_block == 64) ? NDCR_PG_PER_BLK : 0;
 	ndcr |= (f->page_size == 2048) ? NDCR_PAGE_SZ : 0;
 	ndcr |= (f->flash_width == 16) ? NDCR_DWIDTH_M : 0;
 	ndcr |= (f->dfc_width == 16) ? NDCR_DWIDTH_C : 0;
 
-	ndcr |= NDCR_RD_ID_CNT(info->read_id_bytes);
+	ndcr |= NDCR_RD_ID_CNT(host->read_id_bytes);
 	ndcr |= NDCR_SPARE_EN; /* enable spare by default */
 
-	info->reg_ndcr = ndcr;
+	host->reg_ndcr = ndcr;
 
-	pxa3xx_nand_set_timing(info, f->timing);
+	pxa3xx_nand_set_timing(host, f->timing);
 	return 0;
 }
 
 static int pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
 {
+	/*
+	 * We set 0 by hard coding here, for we don't support keep_config
+	 * when there is more than one chip attached to the controller
+	 */
+	struct pxa3xx_nand_host *host = info->host[0];
 	uint32_t ndcr = nand_readl(info, NDCR);
-	info->page_size = ndcr & NDCR_PAGE_SZ ? 2048 : 512;
-	/* set info fields needed to read id */
-	info->read_id_bytes = (info->page_size == 2048) ? 4 : 2;
-	info->reg_ndcr = ndcr;
-	info->cmdset = &default_cmdset;
 
-	info->ndtr0cs0 = nand_readl(info, NDTR0CS0);
-	info->ndtr1cs0 = nand_readl(info, NDTR1CS0);
+	if (ndcr & NDCR_PAGE_SZ) {
+		host->page_size = 2048;
+		host->read_id_bytes = 4;
+	} else {
+		host->page_size = 512;
+		host->read_id_bytes = 2;
+	}
+
+	host->reg_ndcr = ndcr & ~NDCR_INT_MASK;
+	host->cmdset = &default_cmdset;
+
+	host->ndtr0cs0 = nand_readl(info, NDTR0CS0);
+	host->ndtr1cs0 = nand_readl(info, NDTR1CS0);
 
 	return 0;
 }
@@ -847,7 +898,6 @@
 		return -ENOMEM;
 	}
 
-	info->data_buff_size = MAX_BUFF_SIZE;
 	info->data_desc = (void *)info->data_buff + data_desc_offset;
 	info->data_desc_addr = info->data_buff_phys + data_desc_offset;
 
@@ -855,7 +905,7 @@
 				pxa3xx_nand_data_dma_irq, info);
 	if (info->data_dma_ch < 0) {
 		dev_err(&pdev->dev, "failed to request data dma\n");
-		dma_free_coherent(&pdev->dev, info->data_buff_size,
+		dma_free_coherent(&pdev->dev, MAX_BUFF_SIZE,
 				info->data_buff, info->data_buff_phys);
 		return info->data_dma_ch;
 	}
@@ -865,24 +915,28 @@
 
 static int pxa3xx_nand_sensing(struct pxa3xx_nand_info *info)
 {
-	struct mtd_info *mtd = info->mtd;
-	struct nand_chip *chip = mtd->priv;
-
+	struct mtd_info *mtd;
+	int ret;
+	mtd = info->host[info->cs]->mtd;
 	/* use the common timing to make a try */
-	pxa3xx_nand_config_flash(info, &builtin_flash_types[0]);
-	chip->cmdfunc(mtd, NAND_CMD_RESET, 0, 0);
+	ret = pxa3xx_nand_config_flash(info, &builtin_flash_types[0]);
+	if (ret)
+		return ret;
+
+	pxa3xx_nand_cmdfunc(mtd, NAND_CMD_RESET, 0, 0);
 	if (info->is_ready)
-		return 1;
-	else
 		return 0;
+
+	return -ENODEV;
 }
 
 static int pxa3xx_nand_scan(struct mtd_info *mtd)
 {
-	struct pxa3xx_nand_info *info = mtd->priv;
+	struct pxa3xx_nand_host *host = mtd->priv;
+	struct pxa3xx_nand_info *info = host->info_data;
 	struct platform_device *pdev = info->pdev;
 	struct pxa3xx_nand_platform_data *pdata = pdev->dev.platform_data;
-	struct nand_flash_dev pxa3xx_flash_ids[2] = { {NULL,}, {NULL,} };
+	struct nand_flash_dev pxa3xx_flash_ids[2], *def = NULL;
 	const struct pxa3xx_nand_flash *f = NULL;
 	struct nand_chip *chip = mtd->priv;
 	uint32_t id = -1;
@@ -893,22 +947,20 @@
 		goto KEEP_CONFIG;
 
 	ret = pxa3xx_nand_sensing(info);
-	if (!ret) {
-		kfree(mtd);
-		info->mtd = NULL;
-		printk(KERN_INFO "There is no nand chip on cs 0!\n");
+	if (ret) {
+		dev_info(&info->pdev->dev, "There is no chip on cs %d!\n",
+			 info->cs);
 
-		return -EINVAL;
+		return ret;
 	}
 
 	chip->cmdfunc(mtd, NAND_CMD_READID, 0, 0);
 	id = *((uint16_t *)(info->data_buff));
 	if (id != 0)
-		printk(KERN_INFO "Detect a flash id %x\n", id);
+		dev_info(&info->pdev->dev, "Detect a flash id %x\n", id);
 	else {
-		kfree(mtd);
-		info->mtd = NULL;
-		printk(KERN_WARNING "Read out ID 0, potential timing set wrong!!\n");
+		dev_warn(&info->pdev->dev,
+			 "Read out ID 0, potential timing set wrong!!\n");
 
 		return -EINVAL;
 	}
@@ -926,14 +978,17 @@
 	}
 
 	if (i >= (ARRAY_SIZE(builtin_flash_types) + pdata->num_flash - 1)) {
-		kfree(mtd);
-		info->mtd = NULL;
-		printk(KERN_ERR "ERROR!! flash not defined!!!\n");
+		dev_err(&info->pdev->dev, "ERROR!! flash not defined!!!\n");
 
 		return -EINVAL;
 	}
 
-	pxa3xx_nand_config_flash(info, f);
+	ret = pxa3xx_nand_config_flash(info, f);
+	if (ret) {
+		dev_err(&info->pdev->dev, "ERROR! Configure failed\n");
+		return ret;
+	}
+
 	pxa3xx_flash_ids[0].name = f->name;
 	pxa3xx_flash_ids[0].id = (f->chip_id >> 8) & 0xffff;
 	pxa3xx_flash_ids[0].pagesize = f->page_size;
@@ -942,62 +997,78 @@
 	pxa3xx_flash_ids[0].erasesize = f->page_size * f->page_per_block;
 	if (f->flash_width == 16)
 		pxa3xx_flash_ids[0].options = NAND_BUSWIDTH_16;
+	pxa3xx_flash_ids[1].name = NULL;
+	def = pxa3xx_flash_ids;
 KEEP_CONFIG:
-	if (nand_scan_ident(mtd, 1, pxa3xx_flash_ids))
+	chip->ecc.mode = NAND_ECC_HW;
+	chip->ecc.size = host->page_size;
+
+	chip->options = NAND_NO_AUTOINCR;
+	chip->options |= NAND_NO_READRDY;
+	if (host->reg_ndcr & NDCR_DWIDTH_M)
+		chip->options |= NAND_BUSWIDTH_16;
+
+	if (nand_scan_ident(mtd, 1, def))
 		return -ENODEV;
 	/* calculate addressing information */
-	info->col_addr_cycles = (mtd->writesize >= 2048) ? 2 : 1;
+	if (mtd->writesize >= 2048)
+		host->col_addr_cycles = 2;
+	else
+		host->col_addr_cycles = 1;
+
 	info->oob_buff = info->data_buff + mtd->writesize;
 	if ((mtd->size >> chip->page_shift) > 65536)
-		info->row_addr_cycles = 3;
+		host->row_addr_cycles = 3;
 	else
-		info->row_addr_cycles = 2;
+		host->row_addr_cycles = 2;
+
 	mtd->name = mtd_names[0];
-	chip->ecc.mode = NAND_ECC_HW;
-	chip->ecc.size = f->page_size;
-
-	chip->options = (f->flash_width == 16) ? NAND_BUSWIDTH_16 : 0;
-	chip->options |= NAND_NO_AUTOINCR;
-	chip->options |= NAND_NO_READRDY;
-
 	return nand_scan_tail(mtd);
 }
 
-static
-struct pxa3xx_nand_info *alloc_nand_resource(struct platform_device *pdev)
+static int alloc_nand_resource(struct platform_device *pdev)
 {
+	struct pxa3xx_nand_platform_data *pdata;
 	struct pxa3xx_nand_info *info;
+	struct pxa3xx_nand_host *host;
 	struct nand_chip *chip;
 	struct mtd_info *mtd;
 	struct resource *r;
-	int ret, irq;
+	int ret, irq, cs;
 
-	mtd = kzalloc(sizeof(struct mtd_info) + sizeof(struct pxa3xx_nand_info),
-			GFP_KERNEL);
-	if (!mtd) {
+	pdata = pdev->dev.platform_data;
+	info = kzalloc(sizeof(*info) + (sizeof(*mtd) +
+		       sizeof(*host)) * pdata->num_cs, GFP_KERNEL);
+	if (!info) {
 		dev_err(&pdev->dev, "failed to allocate memory\n");
-		return NULL;
+		return -ENOMEM;
 	}
 
-	info = (struct pxa3xx_nand_info *)(&mtd[1]);
-	chip = (struct nand_chip *)(&mtd[1]);
 	info->pdev = pdev;
-	info->mtd = mtd;
-	mtd->priv = info;
-	mtd->owner = THIS_MODULE;
+	for (cs = 0; cs < pdata->num_cs; cs++) {
+		mtd = (struct mtd_info *)((unsigned int)&info[1] +
+		      (sizeof(*mtd) + sizeof(*host)) * cs);
+		chip = (struct nand_chip *)(&mtd[1]);
+		host = (struct pxa3xx_nand_host *)chip;
+		info->host[cs] = host;
+		host->mtd = mtd;
+		host->cs = cs;
+		host->info_data = info;
+		mtd->priv = host;
+		mtd->owner = THIS_MODULE;
 
-	chip->ecc.read_page	= pxa3xx_nand_read_page_hwecc;
-	chip->ecc.write_page	= pxa3xx_nand_write_page_hwecc;
-	chip->controller        = &info->controller;
-	chip->waitfunc		= pxa3xx_nand_waitfunc;
-	chip->select_chip	= pxa3xx_nand_select_chip;
-	chip->dev_ready		= pxa3xx_nand_dev_ready;
-	chip->cmdfunc		= pxa3xx_nand_cmdfunc;
-	chip->read_word		= pxa3xx_nand_read_word;
-	chip->read_byte		= pxa3xx_nand_read_byte;
-	chip->read_buf		= pxa3xx_nand_read_buf;
-	chip->write_buf		= pxa3xx_nand_write_buf;
-	chip->verify_buf	= pxa3xx_nand_verify_buf;
+		chip->ecc.read_page	= pxa3xx_nand_read_page_hwecc;
+		chip->ecc.write_page	= pxa3xx_nand_write_page_hwecc;
+		chip->controller        = &info->controller;
+		chip->waitfunc		= pxa3xx_nand_waitfunc;
+		chip->select_chip	= pxa3xx_nand_select_chip;
+		chip->cmdfunc		= pxa3xx_nand_cmdfunc;
+		chip->read_word		= pxa3xx_nand_read_word;
+		chip->read_byte		= pxa3xx_nand_read_byte;
+		chip->read_buf		= pxa3xx_nand_read_buf;
+		chip->write_buf		= pxa3xx_nand_write_buf;
+		chip->verify_buf	= pxa3xx_nand_verify_buf;
+	}
 
 	spin_lock_init(&chip->controller->lock);
 	init_waitqueue_head(&chip->controller->wq);
@@ -1070,13 +1141,13 @@
 
 	platform_set_drvdata(pdev, info);
 
-	return info;
+	return 0;
 
 fail_free_buf:
 	free_irq(irq, info);
 	if (use_dma) {
 		pxa_free_dma(info->data_dma_ch);
-		dma_free_coherent(&pdev->dev, info->data_buff_size,
+		dma_free_coherent(&pdev->dev, MAX_BUFF_SIZE,
 			info->data_buff, info->data_buff_phys);
 	} else
 		kfree(info->data_buff);
@@ -1088,17 +1159,21 @@
 	clk_disable(info->clk);
 	clk_put(info->clk);
 fail_free_mtd:
-	kfree(mtd);
-	return NULL;
+	kfree(info);
+	return ret;
 }
 
 static int pxa3xx_nand_remove(struct platform_device *pdev)
 {
 	struct pxa3xx_nand_info *info = platform_get_drvdata(pdev);
-	struct mtd_info *mtd = info->mtd;
+	struct pxa3xx_nand_platform_data *pdata;
 	struct resource *r;
-	int irq;
+	int irq, cs;
 
+	if (!info)
+		return 0;
+
+	pdata = pdev->dev.platform_data;
 	platform_set_drvdata(pdev, NULL);
 
 	irq = platform_get_irq(pdev, 0);
@@ -1106,7 +1181,7 @@
 		free_irq(irq, info);
 	if (use_dma) {
 		pxa_free_dma(info->data_dma_ch);
-		dma_free_writecombine(&pdev->dev, info->data_buff_size,
+		dma_free_writecombine(&pdev->dev, MAX_BUFF_SIZE,
 				info->data_buff, info->data_buff_phys);
 	} else
 		kfree(info->data_buff);
@@ -1118,10 +1193,9 @@
 	clk_disable(info->clk);
 	clk_put(info->clk);
 
-	if (mtd) {
-		mtd_device_unregister(mtd);
-		kfree(mtd);
-	}
+	for (cs = 0; cs < pdata->num_cs; cs++)
+		nand_release(info->host[cs]->mtd);
+	kfree(info);
 	return 0;
 }
 
@@ -1129,6 +1203,7 @@
 {
 	struct pxa3xx_nand_platform_data *pdata;
 	struct pxa3xx_nand_info *info;
+	int ret, cs, probe_success;
 
 	pdata = pdev->dev.platform_data;
 	if (!pdata) {
@@ -1136,52 +1211,88 @@
 		return -ENODEV;
 	}
 
-	info = alloc_nand_resource(pdev);
-	if (info == NULL)
-		return -ENOMEM;
+	ret = alloc_nand_resource(pdev);
+	if (ret) {
+		dev_err(&pdev->dev, "alloc nand resource failed\n");
+		return ret;
+	}
 
-	if (pxa3xx_nand_scan(info->mtd)) {
-		dev_err(&pdev->dev, "failed to scan nand\n");
+	info = platform_get_drvdata(pdev);
+	probe_success = 0;
+	for (cs = 0; cs < pdata->num_cs; cs++) {
+		info->cs = cs;
+		ret = pxa3xx_nand_scan(info->host[cs]->mtd);
+		if (ret) {
+			dev_warn(&pdev->dev, "failed to scan nand at cs %d\n",
+				cs);
+			continue;
+		}
+
+		ret = mtd_device_parse_register(info->host[cs]->mtd, NULL, 0,
+				pdata->parts[cs], pdata->nr_parts[cs]);
+		if (!ret)
+			probe_success = 1;
+	}
+
+	if (!probe_success) {
 		pxa3xx_nand_remove(pdev);
 		return -ENODEV;
 	}
 
-	if (mtd_has_cmdlinepart()) {
-		const char *probes[] = { "cmdlinepart", NULL };
-		struct mtd_partition *parts;
-		int nr_parts;
-
-		nr_parts = parse_mtd_partitions(info->mtd, probes, &parts, 0);
-
-		if (nr_parts)
-			return mtd_device_register(info->mtd, parts, nr_parts);
-	}
-
-	return mtd_device_register(info->mtd, pdata->parts, pdata->nr_parts);
+	return 0;
 }
 
 #ifdef CONFIG_PM
 static int pxa3xx_nand_suspend(struct platform_device *pdev, pm_message_t state)
 {
 	struct pxa3xx_nand_info *info = platform_get_drvdata(pdev);
-	struct mtd_info *mtd = info->mtd;
+	struct pxa3xx_nand_platform_data *pdata;
+	struct mtd_info *mtd;
+	int cs;
 
+	pdata = pdev->dev.platform_data;
 	if (info->state) {
 		dev_err(&pdev->dev, "driver busy, state = %d\n", info->state);
 		return -EAGAIN;
 	}
 
+	for (cs = 0; cs < pdata->num_cs; cs++) {
+		mtd = info->host[cs]->mtd;
+		mtd->suspend(mtd);
+	}
+
 	return 0;
 }
 
 static int pxa3xx_nand_resume(struct platform_device *pdev)
 {
 	struct pxa3xx_nand_info *info = platform_get_drvdata(pdev);
-	struct mtd_info *mtd = info->mtd;
+	struct pxa3xx_nand_platform_data *pdata;
+	struct mtd_info *mtd;
+	int cs;
 
-	nand_writel(info, NDTR0CS0, info->ndtr0cs0);
-	nand_writel(info, NDTR1CS0, info->ndtr1cs0);
-	clk_enable(info->clk);
+	pdata = pdev->dev.platform_data;
+	/* We don't want to handle interrupt without calling mtd routine */
+	disable_int(info, NDCR_INT_MASK);
+
+	/*
+	 * Directly set the chip select to a invalid value,
+	 * then the driver would reset the timing according
+	 * to current chip select at the beginning of cmdfunc
+	 */
+	info->cs = 0xff;
+
+	/*
+	 * As the spec says, the NDSR would be updated to 0x1800 when
+	 * doing the nand_clk disable/enable.
+	 * To prevent it damaging state machine of the driver, clear
+	 * all status before resume
+	 */
+	nand_writel(info, NDSR, NDSR_MASK);
+	for (cs = 0; cs < pdata->num_cs; cs++) {
+		mtd = info->host[cs]->mtd;
+		mtd->resume(mtd);
+	}
 
 	return 0;
 }
diff --git a/drivers/mtd/nand/r852.c b/drivers/mtd/nand/r852.c
index cae2e01..f20f393 100644
--- a/drivers/mtd/nand/r852.c
+++ b/drivers/mtd/nand/r852.c
@@ -1027,7 +1027,7 @@
 }
 
 #ifdef CONFIG_PM
-int r852_suspend(struct device *device)
+static int r852_suspend(struct device *device)
 {
 	struct r852_device *dev = pci_get_drvdata(to_pci_dev(device));
 
@@ -1048,7 +1048,7 @@
 	return 0;
 }
 
-int r852_resume(struct device *device)
+static int r852_resume(struct device *device)
 {
 	struct r852_device *dev = pci_get_drvdata(to_pci_dev(device));
 
@@ -1092,7 +1092,7 @@
 
 MODULE_DEVICE_TABLE(pci, r852_pci_id_tbl);
 
-SIMPLE_DEV_PM_OPS(r852_pm_ops, r852_suspend, r852_resume);
+static SIMPLE_DEV_PM_OPS(r852_pm_ops, r852_suspend, r852_resume);
 
 static struct pci_driver r852_pci_driver = {
 	.name		= DRV_NAME,
diff --git a/drivers/mtd/nand/rtc_from4.c b/drivers/mtd/nand/rtc_from4.c
index c9f9127..f309add 100644
--- a/drivers/mtd/nand/rtc_from4.c
+++ b/drivers/mtd/nand/rtc_from4.c
@@ -351,7 +351,7 @@
 		return 0;
 	}
 
-	/* Read the syndrom pattern from the FPGA and correct the bitorder */
+	/* Read the syndrome pattern from the FPGA and correct the bitorder */
 	rs_ecc = (volatile unsigned short *)(rtc_from4_fio_base + RTC_FROM4_RS_ECC);
 	for (i = 0; i < 8; i++) {
 		ecc[i] = bitrev8(*rs_ecc);
@@ -380,7 +380,7 @@
 	/* Let the library code do its magic. */
 	res = decode_rs8(rs_decoder, (uint8_t *) buf, par, 512, syn, 0, NULL, 0xff, NULL);
 	if (res > 0) {
-		DEBUG(MTD_DEBUG_LEVEL0, "rtc_from4_correct_data: " "ECC corrected %d errors on read\n", res);
+		pr_debug("rtc_from4_correct_data: " "ECC corrected %d errors on read\n", res);
 	}
 	return res;
 }
@@ -444,7 +444,6 @@
 		len = mtd->writesize;
 		buf = kmalloc(len, GFP_KERNEL);
 		if (!buf) {
-			printk(KERN_ERR "rtc_from4_errstat: Out of memory!\n");
 			er_stat = 1;
 			goto out;
 		}
diff --git a/drivers/mtd/nand/s3c2410.c b/drivers/mtd/nand/s3c2410.c
index 4405468..868685d 100644
--- a/drivers/mtd/nand/s3c2410.c
+++ b/drivers/mtd/nand/s3c2410.c
@@ -723,7 +723,7 @@
 
 	/* free the common resources */
 
-	if (info->clk != NULL && !IS_ERR(info->clk)) {
+	if (!IS_ERR(info->clk)) {
 		s3c2410_nand_clk_set_state(info, CLOCK_DISABLE);
 		clk_put(info->clk);
 	}
@@ -744,26 +744,15 @@
 	return 0;
 }
 
-const char *part_probes[] = { "cmdlinepart", NULL };
 static int s3c2410_nand_add_partition(struct s3c2410_nand_info *info,
 				      struct s3c2410_nand_mtd *mtd,
 				      struct s3c2410_nand_set *set)
 {
-	struct mtd_partition *part_info;
-	int nr_part = 0;
+	if (set)
+		mtd->mtd.name = set->name;
 
-	if (set == NULL)
-		return mtd_device_register(&mtd->mtd, NULL, 0);
-
-	mtd->mtd.name = set->name;
-	nr_part = parse_mtd_partitions(&mtd->mtd, part_probes, &part_info, 0);
-
-	if (nr_part <= 0 && set->nr_partitions > 0) {
-		nr_part = set->nr_partitions;
-		part_info = set->partitions;
-	}
-
-	return mtd_device_register(&mtd->mtd, part_info, nr_part);
+	return mtd_device_parse_register(&mtd->mtd, NULL, 0,
+			set->partitions, set->nr_partitions);
 }
 
 /**
@@ -880,8 +869,10 @@
 	/* If you use u-boot BBT creation code, specifying this flag will
 	 * let the kernel fish out the BBT from the NAND, and also skip the
 	 * full NAND scan that can take 1/2s or so. Little things... */
-	if (set->flash_bbt)
-		chip->options |= NAND_USE_FLASH_BBT | NAND_SKIP_BBTSCAN;
+	if (set->flash_bbt) {
+		chip->bbt_options |= NAND_BBT_USE_FLASH;
+		chip->options |= NAND_SKIP_BBTSCAN;
+	}
 }
 
 /**
diff --git a/drivers/mtd/nand/sharpsl.c b/drivers/mtd/nand/sharpsl.c
index 19e24ed..619d2a5 100644
--- a/drivers/mtd/nand/sharpsl.c
+++ b/drivers/mtd/nand/sharpsl.c
@@ -103,16 +103,12 @@
 	return readb(sharpsl->io + ECCCNTR) != 0;
 }
 
-static const char *part_probes[] = { "cmdlinepart", NULL };
-
 /*
  * Main initialization routine
  */
 static int __devinit sharpsl_nand_probe(struct platform_device *pdev)
 {
 	struct nand_chip *this;
-	struct mtd_partition *sharpsl_partition_info;
-	int nr_partitions;
 	struct resource *r;
 	int err = 0;
 	struct sharpsl_nand *sharpsl;
@@ -184,14 +180,9 @@
 
 	/* Register the partitions */
 	sharpsl->mtd.name = "sharpsl-nand";
-	nr_partitions = parse_mtd_partitions(&sharpsl->mtd, part_probes, &sharpsl_partition_info, 0);
-	if (nr_partitions <= 0) {
-		nr_partitions = data->nr_partitions;
-		sharpsl_partition_info = data->partitions;
-	}
 
-	err = mtd_device_register(&sharpsl->mtd, sharpsl_partition_info,
-				  nr_partitions);
+	err = mtd_device_parse_register(&sharpsl->mtd, NULL, 0,
+			data->partitions, data->nr_partitions);
 	if (err)
 		goto err_add;
 
diff --git a/drivers/mtd/nand/sm_common.c b/drivers/mtd/nand/sm_common.c
index 4346971..32ae5af 100644
--- a/drivers/mtd/nand/sm_common.c
+++ b/drivers/mtd/nand/sm_common.c
@@ -48,7 +48,7 @@
 
 	/* As long as this function is called on erase block boundaries
 		it will work correctly for 256 byte nand */
-	ops.mode = MTD_OOB_PLACE;
+	ops.mode = MTD_OPS_PLACE_OOB;
 	ops.ooboffs = 0;
 	ops.ooblen = mtd->oobsize;
 	ops.oobbuf = (void *)&oob;
diff --git a/drivers/mtd/nand/socrates_nand.c b/drivers/mtd/nand/socrates_nand.c
index ca2d055..0fb24f9 100644
--- a/drivers/mtd/nand/socrates_nand.c
+++ b/drivers/mtd/nand/socrates_nand.c
@@ -155,8 +155,6 @@
 	return 1;
 }
 
-static const char *part_probes[] = { "cmdlinepart", NULL };
-
 /*
  * Probe for the NAND device.
  */
@@ -166,8 +164,7 @@
 	struct mtd_info *mtd;
 	struct nand_chip *nand_chip;
 	int res;
-	struct mtd_partition *partitions = NULL;
-	int num_partitions = 0;
+	struct mtd_part_parser_data ppdata;
 
 	/* Allocate memory for the device structure (and zero it) */
 	host = kzalloc(sizeof(struct socrates_nand_host), GFP_KERNEL);
@@ -193,6 +190,7 @@
 	mtd->name = "socrates_nand";
 	mtd->owner = THIS_MODULE;
 	mtd->dev.parent = &ofdev->dev;
+	ppdata.of_node = ofdev->dev.of_node;
 
 	/*should never be accessed directly */
 	nand_chip->IO_ADDR_R = (void *)0xdeadbeef;
@@ -225,30 +223,10 @@
 		goto out;
 	}
 
-#ifdef CONFIG_MTD_CMDLINE_PARTS
-	num_partitions = parse_mtd_partitions(mtd, part_probes,
-					      &partitions, 0);
-	if (num_partitions < 0) {
-		res = num_partitions;
-		goto release;
-	}
-#endif
-
-	if (num_partitions == 0) {
-		num_partitions = of_mtd_parse_partitions(&ofdev->dev,
-							 ofdev->dev.of_node,
-							 &partitions);
-		if (num_partitions < 0) {
-			res = num_partitions;
-			goto release;
-		}
-	}
-
-	res = mtd_device_register(mtd, partitions, num_partitions);
+	res = mtd_device_parse_register(mtd, NULL, &ppdata, NULL, 0);
 	if (!res)
 		return res;
 
-release:
 	nand_release(mtd);
 
 out:
diff --git a/drivers/mtd/nand/tmio_nand.c b/drivers/mtd/nand/tmio_nand.c
index 11e8371..beebd95 100644
--- a/drivers/mtd/nand/tmio_nand.c
+++ b/drivers/mtd/nand/tmio_nand.c
@@ -121,9 +121,6 @@
 
 #define mtd_to_tmio(m)			container_of(m, struct tmio_nand, mtd)
 
-#ifdef CONFIG_MTD_CMDLINE_PARTS
-static const char *part_probes[] = { "cmdlinepart", NULL };
-#endif
 
 /*--------------------------------------------------------------------------*/
 
@@ -381,8 +378,6 @@
 	struct tmio_nand *tmio;
 	struct mtd_info *mtd;
 	struct nand_chip *nand_chip;
-	struct mtd_partition *parts;
-	int nbparts = 0;
 	int retval;
 
 	if (data == NULL)
@@ -461,15 +456,9 @@
 		goto err_scan;
 	}
 	/* Register the partitions */
-#ifdef CONFIG_MTD_CMDLINE_PARTS
-	nbparts = parse_mtd_partitions(mtd, part_probes, &parts, 0);
-#endif
-	if (nbparts <= 0 && data) {
-		parts = data->partition;
-		nbparts = data->num_partitions;
-	}
-
-	retval = mtd_device_register(mtd, parts, nbparts);
+	retval = mtd_device_parse_register(mtd, NULL, 0,
+			data ? data->partition : NULL,
+			data ? data->num_partitions : 0);
 	if (!retval)
 		return retval;
 
diff --git a/drivers/mtd/nand/txx9ndfmc.c b/drivers/mtd/nand/txx9ndfmc.c
index bfba4e3..ace46fd 100644
--- a/drivers/mtd/nand/txx9ndfmc.c
+++ b/drivers/mtd/nand/txx9ndfmc.c
@@ -74,7 +74,6 @@
 	unsigned char hold;	/* in gbusclock */
 	unsigned char spw;	/* in gbusclock */
 	struct nand_hw_control hw_control;
-	struct mtd_partition *parts[MAX_TXX9NDFMC_DEV];
 };
 
 static struct platform_device *mtd_to_platdev(struct mtd_info *mtd)
@@ -287,7 +286,6 @@
 static int __init txx9ndfmc_probe(struct platform_device *dev)
 {
 	struct txx9ndfmc_platform_data *plat = dev->dev.platform_data;
-	static const char *probes[] = { "cmdlinepart", NULL };
 	int hold, spw;
 	int i;
 	struct txx9ndfmc_drvdata *drvdata;
@@ -333,7 +331,6 @@
 		struct txx9ndfmc_priv *txx9_priv;
 		struct nand_chip *chip;
 		struct mtd_info *mtd;
-		int nr_parts;
 
 		if (!(plat->ch_mask & (1 << i)))
 			continue;
@@ -393,9 +390,7 @@
 		}
 		mtd->name = txx9_priv->mtdname;
 
-		nr_parts = parse_mtd_partitions(mtd, probes,
-						&drvdata->parts[i], 0);
-		mtd_device_register(mtd, drvdata->parts[i], nr_parts);
+		mtd_device_parse_register(mtd, NULL, 0, NULL, 0);
 		drvdata->mtds[i] = mtd;
 	}
 
@@ -421,7 +416,6 @@
 		txx9_priv = chip->priv;
 
 		nand_release(mtd);
-		kfree(drvdata->parts[i]);
 		kfree(txx9_priv->mtdname);
 		kfree(txx9_priv);
 	}
diff --git a/drivers/mtd/nftlcore.c b/drivers/mtd/nftlcore.c
index b155666..cda77b5 100644
--- a/drivers/mtd/nftlcore.c
+++ b/drivers/mtd/nftlcore.c
@@ -63,14 +63,12 @@
 		return;
 	}
 
-	DEBUG(MTD_DEBUG_LEVEL1, "NFTL: add_mtd for %s\n", mtd->name);
+	pr_debug("NFTL: add_mtd for %s\n", mtd->name);
 
 	nftl = kzalloc(sizeof(struct NFTLrecord), GFP_KERNEL);
 
-	if (!nftl) {
-		printk(KERN_WARNING "NFTL: out of memory for data structures\n");
+	if (!nftl)
 		return;
-	}
 
 	nftl->mbd.mtd = mtd;
 	nftl->mbd.devnum = -1;
@@ -132,7 +130,7 @@
 {
 	struct NFTLrecord *nftl = (void *)dev;
 
-	DEBUG(MTD_DEBUG_LEVEL1, "NFTL: remove_dev (i=%d)\n", dev->devnum);
+	pr_debug("NFTL: remove_dev (i=%d)\n", dev->devnum);
 
 	del_mtd_blktrans_dev(dev);
 	kfree(nftl->ReplUnitTable);
@@ -149,7 +147,7 @@
 	struct mtd_oob_ops ops;
 	int res;
 
-	ops.mode = MTD_OOB_PLACE;
+	ops.mode = MTD_OPS_PLACE_OOB;
 	ops.ooboffs = offs & mask;
 	ops.ooblen = len;
 	ops.oobbuf = buf;
@@ -170,7 +168,7 @@
 	struct mtd_oob_ops ops;
 	int res;
 
-	ops.mode = MTD_OOB_PLACE;
+	ops.mode = MTD_OPS_PLACE_OOB;
 	ops.ooboffs = offs & mask;
 	ops.ooblen = len;
 	ops.oobbuf = buf;
@@ -193,7 +191,7 @@
 	struct mtd_oob_ops ops;
 	int res;
 
-	ops.mode = MTD_OOB_PLACE;
+	ops.mode = MTD_OPS_PLACE_OOB;
 	ops.ooboffs = offs & mask;
 	ops.ooblen = mtd->oobsize;
 	ops.oobbuf = oob;
@@ -220,7 +218,7 @@
 
 	/* Normally, we force a fold to happen before we run out of free blocks completely */
 	if (!desperate && nftl->numfreeEUNs < 2) {
-		DEBUG(MTD_DEBUG_LEVEL1, "NFTL_findfreeblock: there are too few free EUNs\n");
+		pr_debug("NFTL_findfreeblock: there are too few free EUNs\n");
 		return BLOCK_NIL;
 	}
 
@@ -291,8 +289,7 @@
 			if (block == 2) {
 				foldmark = oob.u.c.FoldMark | oob.u.c.FoldMark1;
 				if (foldmark == FOLD_MARK_IN_PROGRESS) {
-					DEBUG(MTD_DEBUG_LEVEL1,
-					      "Write Inhibited on EUN %d\n", thisEUN);
+					pr_debug("Write Inhibited on EUN %d\n", thisEUN);
 					inplace = 0;
 				} else {
 					/* There's no other reason not to do inplace,
@@ -357,7 +354,7 @@
 			if (BlockLastState[block] != SECTOR_FREE &&
 			    BlockMap[block] != BLOCK_NIL &&
 			    BlockMap[block] != targetEUN) {
-				DEBUG(MTD_DEBUG_LEVEL1, "Setting inplace to 0. VUC %d, "
+				pr_debug("Setting inplace to 0. VUC %d, "
 				      "block %d was %x lastEUN, "
 				      "and is in EUN %d (%s) %d\n",
 				      thisVUC, block, BlockLastState[block],
@@ -373,14 +370,14 @@
 		    pendingblock < ((thisVUC + 1)* (nftl->EraseSize / 512)) &&
 		    BlockLastState[pendingblock - (thisVUC * (nftl->EraseSize / 512))] !=
 		    SECTOR_FREE) {
-			DEBUG(MTD_DEBUG_LEVEL1, "Pending write not free in EUN %d. "
+			pr_debug("Pending write not free in EUN %d. "
 			      "Folding out of place.\n", targetEUN);
 			inplace = 0;
 		}
 	}
 
 	if (!inplace) {
-		DEBUG(MTD_DEBUG_LEVEL1, "Cannot fold Virtual Unit Chain %d in place. "
+		pr_debug("Cannot fold Virtual Unit Chain %d in place. "
 		      "Trying out-of-place\n", thisVUC);
 		/* We need to find a targetEUN to fold into. */
 		targetEUN = NFTL_findfreeblock(nftl, 1);
@@ -410,7 +407,7 @@
 	   and the Erase Unit into which we are supposed to be copying.
 	   Go for it.
 	*/
-	DEBUG(MTD_DEBUG_LEVEL1,"Folding chain %d into unit %d\n", thisVUC, targetEUN);
+	pr_debug("Folding chain %d into unit %d\n", thisVUC, targetEUN);
 	for (block = 0; block < nftl->EraseSize / 512 ; block++) {
 		unsigned char movebuf[512];
 		int ret;
@@ -428,7 +425,7 @@
 
 		ret = mtd->read(mtd, (nftl->EraseSize * BlockMap[block]) + (block * 512),
 				512, &retlen, movebuf);
-		if (ret < 0 && ret != -EUCLEAN) {
+		if (ret < 0 && !mtd_is_bitflip(ret)) {
 			ret = mtd->read(mtd, (nftl->EraseSize * BlockMap[block])
 					+ (block * 512), 512, &retlen,
 					movebuf);
@@ -457,7 +454,7 @@
 	   has duplicate chains, we need to free one of the chains because it's not necessary any more.
 	*/
 	thisEUN = nftl->EUNtable[thisVUC];
-	DEBUG(MTD_DEBUG_LEVEL1,"Want to erase\n");
+	pr_debug("Want to erase\n");
 
 	/* For each block in the old chain (except the targetEUN of course),
 	   free it and make it available for future use */
@@ -570,7 +567,7 @@
 				      (writeEUN * nftl->EraseSize) + blockofs,
 				      8, &retlen, (char *)&bci);
 
-			DEBUG(MTD_DEBUG_LEVEL2, "Status of block %d in EUN %d is %x\n",
+			pr_debug("Status of block %d in EUN %d is %x\n",
 			      block , writeEUN, le16_to_cpu(bci.Status));
 
 			status = bci.Status | bci.Status1;
@@ -623,7 +620,7 @@
 				   but they are reserved for when we're
 				   desperate. Well, now we're desperate.
 				*/
-				DEBUG(MTD_DEBUG_LEVEL1, "Using desperate==1 to find free EUN to accommodate write to VUC %d\n", thisVUC);
+				pr_debug("Using desperate==1 to find free EUN to accommodate write to VUC %d\n", thisVUC);
 				writeEUN = NFTL_findfreeblock(nftl, 1);
 			}
 			if (writeEUN == BLOCK_NIL) {
@@ -776,7 +773,7 @@
 		size_t retlen;
 		int res = mtd->read(mtd, ptr, 512, &retlen, buffer);
 
-		if (res < 0 && res != -EUCLEAN)
+		if (res < 0 && !mtd_is_bitflip(res))
 			return -EIO;
 	}
 	return 0;
diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
index e3cd1ff..ac40925 100644
--- a/drivers/mtd/nftlmount.c
+++ b/drivers/mtd/nftlmount.c
@@ -32,7 +32,7 @@
 
 /* find_boot_record: Find the NFTL Media Header and its Spare copy which contains the
  *	various device information of the NFTL partition and Bad Unit Table. Update
- *	the ReplUnitTable[] table accroding to the Bad Unit Table. ReplUnitTable[]
+ *	the ReplUnitTable[] table according to the Bad Unit Table. ReplUnitTable[]
  *	is used for management of Erase Unit in other routines in nftl.c and nftlmount.c
  */
 static int find_boot_record(struct NFTLrecord *nftl)
@@ -297,7 +297,7 @@
  *
  * Return: 0 when succeed, -1 on error.
  *
- *  ToDo: 1. Is it neceressary to check_free_sector after erasing ??
+ *  ToDo: 1. Is it necessary to check_free_sector after erasing ??
  */
 int NFTL_formatblock(struct NFTLrecord *nftl, int block)
 {
@@ -337,7 +337,7 @@
 		nb_erases = le32_to_cpu(uci.WearInfo);
 		nb_erases++;
 
-		/* wrap (almost impossible with current flashs) or free block */
+		/* wrap (almost impossible with current flash) or free block */
 		if (nb_erases == 0)
 			nb_erases = 1;
 
@@ -363,10 +363,10 @@
  *	Mark as 'IGNORE' each incorrect sector. This check is only done if the chain
  *	was being folded when NFTL was interrupted.
  *
- *	The check_free_sectors in this function is neceressary. There is a possible
+ *	The check_free_sectors in this function is necessary. There is a possible
  *	situation that after writing the Data area, the Block Control Information is
  *	not updated according (due to power failure or something) which leaves the block
- *	in an umconsistent state. So we have to check if a block is really FREE in this
+ *	in an inconsistent state. So we have to check if a block is really FREE in this
  *	case. */
 static void check_sectors_in_chain(struct NFTLrecord *nftl, unsigned int first_block)
 {
@@ -428,7 +428,7 @@
 
 	for (;;) {
 		length++;
-		/* avoid infinite loops, although this is guaranted not to
+		/* avoid infinite loops, although this is guaranteed not to
 		   happen because of the previous checks */
 		if (length >= nftl->nb_blocks) {
 			printk("nftl: length too long %d !\n", length);
@@ -447,11 +447,11 @@
 /* format_chain: Format an invalid Virtual Unit chain. It frees all the Erase Units in a
  *	Virtual Unit Chain, i.e. all the units are disconnected.
  *
- *	It is not stricly correct to begin from the first block of the chain because
+ *	It is not strictly correct to begin from the first block of the chain because
  *	if we stop the code, we may see again a valid chain if there was a first_block
  *	flag in a block inside it. But is it really a problem ?
  *
- * FixMe: Figure out what the last statesment means. What if power failure when we are
+ * FixMe: Figure out what the last statement means. What if power failure when we are
  *	in the for (;;) loop formatting blocks ??
  */
 static void format_chain(struct NFTLrecord *nftl, unsigned int first_block)
@@ -485,7 +485,7 @@
  *	totally free (only 0xff).
  *
  * Definition: Free Erase Unit -- A properly erased/formatted Free Erase Unit should have meet the
- *	following critia:
+ *	following criteria:
  *	1. */
 static int check_and_mark_free_block(struct NFTLrecord *nftl, int block)
 {
@@ -502,7 +502,7 @@
 	erase_mark = le16_to_cpu ((h1.EraseMark | h1.EraseMark1));
 	if (erase_mark != ERASE_MARK) {
 		/* if no erase mark, the block must be totally free. This is
-		   possible in two cases : empty filsystem or interrupted erase (very unlikely) */
+		   possible in two cases : empty filesystem or interrupted erase (very unlikely) */
 		if (check_free_sectors (nftl, block * nftl->EraseSize, nftl->EraseSize, 1) != 0)
 			return -1;
 
@@ -544,7 +544,7 @@
 /* get_fold_mark: Read fold mark from Unit Control Information #2, we use FOLD_MARK_IN_PROGRESS
  *	to indicate that we are in the progression of a Virtual Unit Chain folding. If the UCI #2
  *	is FOLD_MARK_IN_PROGRESS when mounting the NFTL, the (previous) folding process is interrupted
- *	for some reason. A clean up/check of the VUC is neceressary in this case.
+ *	for some reason. A clean up/check of the VUC is necessary in this case.
  *
  * WARNING: return 0 if read error
  */
@@ -657,7 +657,7 @@
 						printk("Block %d: incorrect logical block: %d expected: %d\n",
 						       block, logical_block, first_logical_block);
 						/* the chain is incorrect : we must format it,
-						   but we need to read it completly */
+						   but we need to read it completely */
 						do_format_chain = 1;
 					}
 					if (is_first_block) {
@@ -669,7 +669,7 @@
 							printk("Block %d: incorrectly marked as first block in chain\n",
 							       block);
 							/* the chain is incorrect : we must format it,
-							   but we need to read it completly */
+							   but we need to read it completely */
 							do_format_chain = 1;
 						} else {
 							printk("Block %d: folding in progress - ignoring first block flag\n",
diff --git a/drivers/mtd/ofpart.c b/drivers/mtd/ofpart.c
index a996718..64be8f08 100644
--- a/drivers/mtd/ofpart.c
+++ b/drivers/mtd/ofpart.c
@@ -20,14 +20,23 @@
 #include <linux/slab.h>
 #include <linux/mtd/partitions.h>
 
-int __devinit of_mtd_parse_partitions(struct device *dev,
-                                      struct device_node *node,
-                                      struct mtd_partition **pparts)
+static int parse_ofpart_partitions(struct mtd_info *master,
+				   struct mtd_partition **pparts,
+				   struct mtd_part_parser_data *data)
 {
+	struct device_node *node;
 	const char *partname;
 	struct device_node *pp;
 	int nr_parts, i;
 
+
+	if (!data)
+		return 0;
+
+	node = data->of_node;
+	if (!node)
+		return 0;
+
 	/* First count the subnodes */
 	pp = NULL;
 	nr_parts = 0;
@@ -69,7 +78,7 @@
 
 	if (!i) {
 		of_node_put(pp);
-		dev_err(dev, "No valid partition found on %s\n", node->full_name);
+		pr_err("No valid partition found on %s\n", node->full_name);
 		kfree(*pparts);
 		*pparts = NULL;
 		return -EINVAL;
@@ -77,6 +86,99 @@
 
 	return nr_parts;
 }
-EXPORT_SYMBOL(of_mtd_parse_partitions);
+
+static struct mtd_part_parser ofpart_parser = {
+	.owner = THIS_MODULE,
+	.parse_fn = parse_ofpart_partitions,
+	.name = "ofpart",
+};
+
+static int parse_ofoldpart_partitions(struct mtd_info *master,
+				      struct mtd_partition **pparts,
+				      struct mtd_part_parser_data *data)
+{
+	struct device_node *dp;
+	int i, plen, nr_parts;
+	const struct {
+		__be32 offset, len;
+	} *part;
+	const char *names;
+
+	if (!data)
+		return 0;
+
+	dp = data->of_node;
+	if (!dp)
+		return 0;
+
+	part = of_get_property(dp, "partitions", &plen);
+	if (!part)
+		return 0; /* No partitions found */
+
+	pr_warning("Device tree uses obsolete partition map binding: %s\n",
+			dp->full_name);
+
+	nr_parts = plen / sizeof(part[0]);
+
+	*pparts = kzalloc(nr_parts * sizeof(*(*pparts)), GFP_KERNEL);
+	if (!pparts)
+		return -ENOMEM;
+
+	names = of_get_property(dp, "partition-names", &plen);
+
+	for (i = 0; i < nr_parts; i++) {
+		(*pparts)[i].offset = be32_to_cpu(part->offset);
+		(*pparts)[i].size   = be32_to_cpu(part->len) & ~1;
+		/* bit 0 set signifies read only partition */
+		if (be32_to_cpu(part->len) & 1)
+			(*pparts)[i].mask_flags = MTD_WRITEABLE;
+
+		if (names && (plen > 0)) {
+			int len = strlen(names) + 1;
+
+			(*pparts)[i].name = (char *)names;
+			plen -= len;
+			names += len;
+		} else {
+			(*pparts)[i].name = "unnamed";
+		}
+
+		part++;
+	}
+
+	return nr_parts;
+}
+
+static struct mtd_part_parser ofoldpart_parser = {
+	.owner = THIS_MODULE,
+	.parse_fn = parse_ofoldpart_partitions,
+	.name = "ofoldpart",
+};
+
+static int __init ofpart_parser_init(void)
+{
+	int rc;
+	rc = register_mtd_parser(&ofpart_parser);
+	if (rc)
+		goto out;
+
+	rc = register_mtd_parser(&ofoldpart_parser);
+	if (!rc)
+		return 0;
+
+	deregister_mtd_parser(&ofoldpart_parser);
+out:
+	return rc;
+}
+
+module_init(ofpart_parser_init);
 
 MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Parser for MTD partitioning information in device tree");
+MODULE_AUTHOR("Vitaly Wool, David Gibson");
+/*
+ * When MTD core cannot find the requested parser, it tries to load the module
+ * with the same name. Since we provide the ofoldpart parser, we should have
+ * the corresponding alias.
+ */
+MODULE_ALIAS("ofoldpart");
diff --git a/drivers/mtd/onenand/generic.c b/drivers/mtd/onenand/generic.c
index 2d70d35..7813095 100644
--- a/drivers/mtd/onenand/generic.c
+++ b/drivers/mtd/onenand/generic.c
@@ -30,11 +30,8 @@
  */
 #define DRIVER_NAME	"onenand-flash"
 
-static const char *part_probes[] = { "cmdlinepart", NULL,  };
-
 struct onenand_info {
 	struct mtd_info		mtd;
-	struct mtd_partition	*parts;
 	struct onenand_chip	onenand;
 };
 
@@ -73,13 +70,9 @@
 		goto out_iounmap;
 	}
 
-	err = parse_mtd_partitions(&info->mtd, part_probes, &info->parts, 0);
-	if (err > 0)
-		mtd_device_register(&info->mtd, info->parts, err);
-	else if (err <= 0 && pdata && pdata->parts)
-		mtd_device_register(&info->mtd, pdata->parts, pdata->nr_parts);
-	else
-		err = mtd_device_register(&info->mtd, NULL, 0);
+	err = mtd_device_parse_register(&info->mtd, NULL, 0,
+			pdata ? pdata->parts : NULL,
+			pdata ? pdata->nr_parts : 0);
 
 	platform_set_drvdata(pdev, info);
 
@@ -104,7 +97,6 @@
 	platform_set_drvdata(pdev, NULL);
 
 	if (info) {
-		mtd_device_unregister(&info->mtd);
 		onenand_release(&info->mtd);
 		release_mem_region(res->start, size);
 		iounmap(info->onenand.base);
diff --git a/drivers/mtd/onenand/omap2.c b/drivers/mtd/onenand/omap2.c
index 6a1d6d9..7e9ea68 100644
--- a/drivers/mtd/onenand/omap2.c
+++ b/drivers/mtd/onenand/omap2.c
@@ -57,7 +57,6 @@
 	unsigned long phys_base;
 	int gpio_irq;
 	struct mtd_info mtd;
-	struct mtd_partition *parts;
 	struct onenand_chip onenand;
 	struct completion irq_done;
 	struct completion dma_done;
@@ -67,8 +66,6 @@
 	struct regulator *regulator;
 };
 
-static const char *part_probes[] = { "cmdlinepart", NULL,  };
-
 static void omap2_onenand_dma_cb(int lch, u16 ch_status, void *data)
 {
 	struct omap2_onenand *c = data;
@@ -741,6 +738,7 @@
 		c->regulator = regulator_get(&pdev->dev, "vonenand");
 		if (IS_ERR(c->regulator)) {
 			dev_err(&pdev->dev,  "Failed to get regulator\n");
+			r = PTR_ERR(c->regulator);
 			goto err_release_dma;
 		}
 		c->onenand.enable = omap2_onenand_enable;
@@ -753,13 +751,9 @@
 	if ((r = onenand_scan(&c->mtd, 1)) < 0)
 		goto err_release_regulator;
 
-	r = parse_mtd_partitions(&c->mtd, part_probes, &c->parts, 0);
-	if (r > 0)
-		r = mtd_device_register(&c->mtd, c->parts, r);
-	else if (pdata->parts != NULL)
-		r = mtd_device_register(&c->mtd, pdata->parts, pdata->nr_parts);
-	else
-		r = mtd_device_register(&c->mtd, NULL, 0);
+	r = mtd_device_parse_register(&c->mtd, NULL, 0,
+			pdata ? pdata->parts : NULL,
+			pdata ? pdata->nr_parts : 0);
 	if (r)
 		goto err_release_onenand;
 
@@ -786,7 +780,6 @@
 err_free_cs:
 	gpmc_cs_free(c->gpmc_cs);
 err_kfree:
-	kfree(c->parts);
 	kfree(c);
 
 	return r;
@@ -809,7 +802,6 @@
 	iounmap(c->onenand.base);
 	release_mem_region(c->phys_base, ONENAND_IO_SIZE);
 	gpmc_cs_free(c->gpmc_cs);
-	kfree(c->parts);
 	kfree(c);
 
 	return 0;
diff --git a/drivers/mtd/onenand/onenand_base.c b/drivers/mtd/onenand/onenand_base.c
index ac9e959..a839473 100644
--- a/drivers/mtd/onenand/onenand_base.c
+++ b/drivers/mtd/onenand/onenand_base.c
@@ -1015,7 +1015,7 @@
 }
 
 /**
- * onenand_transfer_auto_oob - [Internal] oob auto-placement transfer
+ * onenand_transfer_auto_oob - [INTERN] oob auto-placement transfer
  * @param mtd		MTD device structure
  * @param buf		destination address
  * @param column	oob offset to read from
@@ -1079,7 +1079,7 @@
 		return status;
 
 	/* check if we failed due to uncorrectable error */
-	if (status != -EBADMSG && status != ONENAND_BBT_READ_ECC_ERROR)
+	if (!mtd_is_eccerr(status) && status != ONENAND_BBT_READ_ECC_ERROR)
 		return status;
 
 	/* check if address lies in MLC region */
@@ -1122,10 +1122,10 @@
 	int ret = 0;
 	int writesize = this->writesize;
 
-	DEBUG(MTD_DEBUG_LEVEL3, "%s: from = 0x%08x, len = %i\n",
-	      __func__, (unsigned int) from, (int) len);
+	pr_debug("%s: from = 0x%08x, len = %i\n", __func__, (unsigned int)from,
+			(int)len);
 
-	if (ops->mode == MTD_OOB_AUTO)
+	if (ops->mode == MTD_OPS_AUTO_OOB)
 		oobsize = this->ecclayout->oobavail;
 	else
 		oobsize = mtd->oobsize;
@@ -1159,7 +1159,7 @@
 			if (unlikely(ret))
 				ret = onenand_recover_lsb(mtd, from, ret);
 			onenand_update_bufferram(mtd, from, !ret);
-			if (ret == -EBADMSG)
+			if (mtd_is_eccerr(ret))
 				ret = 0;
 			if (ret)
 				break;
@@ -1170,7 +1170,7 @@
 			thisooblen = oobsize - oobcolumn;
 			thisooblen = min_t(int, thisooblen, ooblen - oobread);
 
-			if (ops->mode == MTD_OOB_AUTO)
+			if (ops->mode == MTD_OPS_AUTO_OOB)
 				onenand_transfer_auto_oob(mtd, oobbuf, oobcolumn, thisooblen);
 			else
 				this->read_bufferram(mtd, ONENAND_SPARERAM, oobbuf, oobcolumn, thisooblen);
@@ -1226,10 +1226,10 @@
 	int ret = 0, boundary = 0;
 	int writesize = this->writesize;
 
-	DEBUG(MTD_DEBUG_LEVEL3, "%s: from = 0x%08x, len = %i\n",
-			__func__, (unsigned int) from, (int) len);
+	pr_debug("%s: from = 0x%08x, len = %i\n", __func__, (unsigned int)from,
+			(int)len);
 
-	if (ops->mode == MTD_OOB_AUTO)
+	if (ops->mode == MTD_OPS_AUTO_OOB)
 		oobsize = this->ecclayout->oobavail;
 	else
 		oobsize = mtd->oobsize;
@@ -1255,7 +1255,7 @@
 			this->command(mtd, ONENAND_CMD_READ, from, writesize);
  			ret = this->wait(mtd, FL_READING);
  			onenand_update_bufferram(mtd, from, !ret);
-			if (ret == -EBADMSG)
+			if (mtd_is_eccerr(ret))
 				ret = 0;
  		}
  	}
@@ -1291,7 +1291,7 @@
 			thisooblen = oobsize - oobcolumn;
 			thisooblen = min_t(int, thisooblen, ooblen - oobread);
 
-			if (ops->mode == MTD_OOB_AUTO)
+			if (ops->mode == MTD_OPS_AUTO_OOB)
 				onenand_transfer_auto_oob(mtd, oobbuf, oobcolumn, thisooblen);
 			else
 				this->read_bufferram(mtd, ONENAND_SPARERAM, oobbuf, oobcolumn, thisooblen);
@@ -1315,7 +1315,7 @@
  		/* Now wait for load */
  		ret = this->wait(mtd, FL_READING);
  		onenand_update_bufferram(mtd, from, !ret);
-		if (ret == -EBADMSG)
+		if (mtd_is_eccerr(ret))
 			ret = 0;
  	}
 
@@ -1351,19 +1351,19 @@
 	struct mtd_ecc_stats stats;
 	int read = 0, thislen, column, oobsize;
 	size_t len = ops->ooblen;
-	mtd_oob_mode_t mode = ops->mode;
+	unsigned int mode = ops->mode;
 	u_char *buf = ops->oobbuf;
 	int ret = 0, readcmd;
 
 	from += ops->ooboffs;
 
-	DEBUG(MTD_DEBUG_LEVEL3, "%s: from = 0x%08x, len = %i\n",
-		__func__, (unsigned int) from, (int) len);
+	pr_debug("%s: from = 0x%08x, len = %i\n", __func__, (unsigned int)from,
+			(int)len);
 
 	/* Initialize return length value */
 	ops->oobretlen = 0;
 
-	if (mode == MTD_OOB_AUTO)
+	if (mode == MTD_OPS_AUTO_OOB)
 		oobsize = this->ecclayout->oobavail;
 	else
 		oobsize = mtd->oobsize;
@@ -1403,13 +1403,13 @@
 		if (unlikely(ret))
 			ret = onenand_recover_lsb(mtd, from, ret);
 
-		if (ret && ret != -EBADMSG) {
+		if (ret && !mtd_is_eccerr(ret)) {
 			printk(KERN_ERR "%s: read failed = 0x%x\n",
 				__func__, ret);
 			break;
 		}
 
-		if (mode == MTD_OOB_AUTO)
+		if (mode == MTD_OPS_AUTO_OOB)
 			onenand_transfer_auto_oob(mtd, buf, column, thislen);
 		else
 			this->read_bufferram(mtd, ONENAND_SPARERAM, buf, column, thislen);
@@ -1487,10 +1487,10 @@
 	int ret;
 
 	switch (ops->mode) {
-	case MTD_OOB_PLACE:
-	case MTD_OOB_AUTO:
+	case MTD_OPS_PLACE_OOB:
+	case MTD_OPS_AUTO_OOB:
 		break;
-	case MTD_OOB_RAW:
+	case MTD_OPS_RAW:
 		/* Not implemented yet */
 	default:
 		return -EINVAL;
@@ -1576,8 +1576,8 @@
 	size_t len = ops->ooblen;
 	u_char *buf = ops->oobbuf;
 
-	DEBUG(MTD_DEBUG_LEVEL3, "%s: from = 0x%08x, len = %zi\n",
-		__func__, (unsigned int) from, len);
+	pr_debug("%s: from = 0x%08x, len = %zi\n", __func__, (unsigned int)from,
+			len);
 
 	/* Initialize return value */
 	ops->oobretlen = 0;
@@ -1750,8 +1750,8 @@
 	/* Wait for any existing operation to clear */
 	onenand_panic_wait(mtd);
 
-	DEBUG(MTD_DEBUG_LEVEL3, "%s: to = 0x%08x, len = %i\n",
-		__func__, (unsigned int) to, (int) len);
+	pr_debug("%s: to = 0x%08x, len = %i\n", __func__, (unsigned int)to,
+			(int)len);
 
 	/* Initialize retlen, in case of early exit */
 	*retlen = 0;
@@ -1821,7 +1821,7 @@
 }
 
 /**
- * onenand_fill_auto_oob - [Internal] oob auto-placement transfer
+ * onenand_fill_auto_oob - [INTERN] oob auto-placement transfer
  * @param mtd		MTD device structure
  * @param oob_buf	oob buffer
  * @param buf		source address
@@ -1883,8 +1883,8 @@
 	u_char *oobbuf;
 	int ret = 0, cmd;
 
-	DEBUG(MTD_DEBUG_LEVEL3, "%s: to = 0x%08x, len = %i\n",
-		__func__, (unsigned int) to, (int) len);
+	pr_debug("%s: to = 0x%08x, len = %i\n", __func__, (unsigned int)to,
+			(int)len);
 
 	/* Initialize retlen, in case of early exit */
 	ops->retlen = 0;
@@ -1908,7 +1908,7 @@
 	if (!len)
 		return 0;
 
-	if (ops->mode == MTD_OOB_AUTO)
+	if (ops->mode == MTD_OPS_AUTO_OOB)
 		oobsize = this->ecclayout->oobavail;
 	else
 		oobsize = mtd->oobsize;
@@ -1945,7 +1945,7 @@
 				/* We send data to spare ram with oobsize
 				 * to prevent byte access */
 				memset(oobbuf, 0xff, mtd->oobsize);
-				if (ops->mode == MTD_OOB_AUTO)
+				if (ops->mode == MTD_OPS_AUTO_OOB)
 					onenand_fill_auto_oob(mtd, oobbuf, oob, oobcolumn, thisooblen);
 				else
 					memcpy(oobbuf + oobcolumn, oob, thisooblen);
@@ -2055,7 +2055,7 @@
 
 
 /**
- * onenand_write_oob_nolock - [Internal] OneNAND write out-of-band
+ * onenand_write_oob_nolock - [INTERN] OneNAND write out-of-band
  * @param mtd		MTD device structure
  * @param to		offset to write to
  * @param len		number of bytes to write
@@ -2074,17 +2074,17 @@
 	u_char *oobbuf;
 	size_t len = ops->ooblen;
 	const u_char *buf = ops->oobbuf;
-	mtd_oob_mode_t mode = ops->mode;
+	unsigned int mode = ops->mode;
 
 	to += ops->ooboffs;
 
-	DEBUG(MTD_DEBUG_LEVEL3, "%s: to = 0x%08x, len = %i\n",
-		__func__, (unsigned int) to, (int) len);
+	pr_debug("%s: to = 0x%08x, len = %i\n", __func__, (unsigned int)to,
+			(int)len);
 
 	/* Initialize retlen, in case of early exit */
 	ops->oobretlen = 0;
 
-	if (mode == MTD_OOB_AUTO)
+	if (mode == MTD_OPS_AUTO_OOB)
 		oobsize = this->ecclayout->oobavail;
 	else
 		oobsize = mtd->oobsize;
@@ -2128,7 +2128,7 @@
 		/* We send data to spare ram with oobsize
 		 * to prevent byte access */
 		memset(oobbuf, 0xff, mtd->oobsize);
-		if (mode == MTD_OOB_AUTO)
+		if (mode == MTD_OPS_AUTO_OOB)
 			onenand_fill_auto_oob(mtd, oobbuf, buf, column, thislen);
 		else
 			memcpy(oobbuf + column, buf, thislen);
@@ -2217,10 +2217,10 @@
 	int ret;
 
 	switch (ops->mode) {
-	case MTD_OOB_PLACE:
-	case MTD_OOB_AUTO:
+	case MTD_OPS_PLACE_OOB:
+	case MTD_OPS_AUTO_OOB:
 		break;
-	case MTD_OOB_RAW:
+	case MTD_OPS_RAW:
 		/* Not implemented yet */
 	default:
 		return -EINVAL;
@@ -2281,7 +2281,7 @@
 }
 
 /**
- * onenand_multiblock_erase - [Internal] erase block(s) using multiblock erase
+ * onenand_multiblock_erase - [INTERN] erase block(s) using multiblock erase
  * @param mtd		MTD device structure
  * @param instr		erase instruction
  * @param region	erase region
@@ -2397,7 +2397,7 @@
 
 
 /**
- * onenand_block_by_block_erase - [Internal] erase block(s) using regular erase
+ * onenand_block_by_block_erase - [INTERN] erase block(s) using regular erase
  * @param mtd		MTD device structure
  * @param instr		erase instruction
  * @param region	erase region
@@ -2489,8 +2489,9 @@
 	struct mtd_erase_region_info *region = NULL;
 	loff_t region_offset = 0;
 
-	DEBUG(MTD_DEBUG_LEVEL3, "%s: start=0x%012llx, len=%llu\n", __func__,
-	      (unsigned long long) instr->addr, (unsigned long long) instr->len);
+	pr_debug("%s: start=0x%012llx, len=%llu\n", __func__,
+			(unsigned long long)instr->addr,
+			(unsigned long long)instr->len);
 
 	/* Do not allow erase past end of device */
 	if (unlikely((len + addr) > mtd->size)) {
@@ -2558,7 +2559,7 @@
  */
 static void onenand_sync(struct mtd_info *mtd)
 {
-	DEBUG(MTD_DEBUG_LEVEL3, "%s: called\n", __func__);
+	pr_debug("%s: called\n", __func__);
 
 	/* Grab the lock and see if the device is available */
 	onenand_get_device(mtd, FL_SYNCING);
@@ -2602,7 +2603,7 @@
 	struct bbm_info *bbm = this->bbm;
 	u_char buf[2] = {0, 0};
 	struct mtd_oob_ops ops = {
-		.mode = MTD_OOB_PLACE,
+		.mode = MTD_OPS_PLACE_OOB,
 		.ooblen = 2,
 		.oobbuf = buf,
 		.ooboffs = 0,
@@ -2922,7 +2923,7 @@
 }
 
 /**
- * onenand_otp_write_oob_nolock - [Internal] OneNAND write out-of-band, specific to OTP
+ * onenand_otp_write_oob_nolock - [INTERN] OneNAND write out-of-band, specific to OTP
  * @param mtd		MTD device structure
  * @param to		offset to write to
  * @param len		number of bytes to write
@@ -3170,7 +3171,7 @@
 		this->command(mtd, ONENAND_CMD_RESET, 0, 0);
 		this->wait(mtd, FL_RESETING);
 	} else {
-		ops.mode = MTD_OOB_PLACE;
+		ops.mode = MTD_OPS_PLACE_OOB;
 		ops.ooblen = len;
 		ops.oobbuf = buf;
 		ops.ooboffs = 0;
@@ -3429,6 +3430,19 @@
 		else if (numbufs == 1) {
 			this->options |= ONENAND_HAS_4KB_PAGE;
 			this->options |= ONENAND_HAS_CACHE_PROGRAM;
+			/*
+			 * There are two different 4KiB pagesize chips
+			 * and no way to detect it by H/W config values.
+			 *
+			 * To detect the correct NOP for each chips,
+			 * It should check the version ID as workaround.
+			 *
+			 * Now it has as following
+			 * KFM4G16Q4M has NOP 4 with version ID 0x0131
+			 * KFM4G16Q5M has NOP 1 with versoin ID 0x013e
+			 */
+			if ((this->version_id & 0xf) == 0xe)
+				this->options |= ONENAND_HAS_NOP_1;
 		}
 
 	case ONENAND_DEVICE_DENSITY_2Gb:
@@ -3663,7 +3677,7 @@
 	int i, ret;
 	int block;
 	struct mtd_oob_ops ops = {
-		.mode = MTD_OOB_PLACE,
+		.mode = MTD_OPS_PLACE_OOB,
 		.ooboffs = 0,
 		.ooblen	= mtd->oobsize,
 		.datbuf	= NULL,
@@ -4054,6 +4068,8 @@
 			this->ecclayout = &onenand_oob_128;
 			mtd->subpage_sft = 2;
 		}
+		if (ONENAND_IS_NOP_1(this))
+			mtd->subpage_sft = 0;
 		break;
 	case 64:
 		this->ecclayout = &onenand_oob_64;
diff --git a/drivers/mtd/onenand/onenand_bbt.c b/drivers/mtd/onenand/onenand_bbt.c
index b2d7fc5..66fe3b7 100644
--- a/drivers/mtd/onenand/onenand_bbt.c
+++ b/drivers/mtd/onenand/onenand_bbt.c
@@ -81,7 +81,7 @@
 	startblock = 0;
 	from = 0;
 
-	ops.mode = MTD_OOB_PLACE;
+	ops.mode = MTD_OPS_PLACE_OOB;
 	ops.ooblen = readlen;
 	ops.oobbuf = buf;
 	ops.len = ops.ooboffs = ops.retlen = ops.oobretlen = 0;
@@ -154,7 +154,7 @@
 	block = (int) (onenand_block(this, offs) << 1);
 	res = (bbm->bbt[block >> 3] >> (block & 0x06)) & 0x03;
 
-	DEBUG(MTD_DEBUG_LEVEL2, "onenand_isbad_bbt: bbt info for offs 0x%08x: (block %d) 0x%02x\n",
+	pr_debug("onenand_isbad_bbt: bbt info for offs 0x%08x: (block %d) 0x%02x\n",
 		(unsigned int) offs, block >> 1, res);
 
 	switch ((int) res) {
@@ -189,10 +189,8 @@
 	len = this->chipsize >> (this->erase_shift + 2);
 	/* Allocate memory (2bit per block) and clear the memory bad block table */
 	bbm->bbt = kzalloc(len, GFP_KERNEL);
-	if (!bbm->bbt) {
-		printk(KERN_ERR "onenand_scan_bbt: Out of memory\n");
+	if (!bbm->bbt)
 		return -ENOMEM;
-	}
 
 	/* Set the bad block position */
 	bbm->badblockpos = ONENAND_BADBLOCK_POS;
diff --git a/drivers/mtd/onenand/samsung.c b/drivers/mtd/onenand/samsung.c
index 3306b5b..5474547 100644
--- a/drivers/mtd/onenand/samsung.c
+++ b/drivers/mtd/onenand/samsung.c
@@ -147,7 +147,6 @@
 	struct resource *dma_res;
 	unsigned long	phys_base;
 	struct completion	complete;
-	struct mtd_partition *parts;
 };
 
 #define CMD_MAP_00(dev, addr)		(dev->cmd_map(MAP_00, ((addr) << 1)))
@@ -157,8 +156,6 @@
 
 static struct s3c_onenand *onenand;
 
-static const char *part_probes[] = { "cmdlinepart", NULL, };
-
 static inline int s3c_read_reg(int offset)
 {
 	return readl(onenand->base + offset);
@@ -1017,13 +1014,9 @@
 	if (s3c_read_reg(MEM_CFG_OFFSET) & ONENAND_SYS_CFG1_SYNC_READ)
 		dev_info(&onenand->pdev->dev, "OneNAND Sync. Burst Read enabled\n");
 
-	err = parse_mtd_partitions(mtd, part_probes, &onenand->parts, 0);
-	if (err > 0)
-		mtd_device_register(mtd, onenand->parts, err);
-	else if (err <= 0 && pdata && pdata->parts)
-		mtd_device_register(mtd, pdata->parts, pdata->nr_parts);
-	else
-		err = mtd_device_register(mtd, NULL, 0);
+	err = mtd_device_parse_register(mtd, NULL, 0,
+					pdata ? pdata->parts : NULL,
+					pdata ? pdata->nr_parts : 0);
 
 	platform_set_drvdata(pdev, mtd);
 
diff --git a/drivers/mtd/redboot.c b/drivers/mtd/redboot.c
index 84b4dda..e366b1d 100644
--- a/drivers/mtd/redboot.c
+++ b/drivers/mtd/redboot.c
@@ -57,8 +57,8 @@
 }
 
 static int parse_redboot_partitions(struct mtd_info *master,
-                             struct mtd_partition **pparts,
-                             unsigned long fis_origin)
+				    struct mtd_partition **pparts,
+				    struct mtd_part_parser_data *data)
 {
 	int nrparts = 0;
 	struct fis_image_desc *buf;
@@ -198,11 +198,10 @@
 			goto out;
 		}
 		new_fl->img = &buf[i];
-                if (fis_origin) {
-                        buf[i].flash_base -= fis_origin;
-                } else {
-                        buf[i].flash_base &= master->size-1;
-                }
+		if (data && data->origin)
+			buf[i].flash_base -= data->origin;
+		else
+			buf[i].flash_base &= master->size-1;
 
 		/* I'm sure the JFFS2 code has done me permanent damage.
 		 * I now think the following is _normal_
@@ -298,6 +297,9 @@
 	.name = "RedBoot",
 };
 
+/* mtd parsers will request the module by parser name */
+MODULE_ALIAS("RedBoot");
+
 static int __init redboot_parser_init(void)
 {
 	return register_mtd_parser(&redboot_parser);
diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c
index ed3d6cd..fddb714 100644
--- a/drivers/mtd/sm_ftl.c
+++ b/drivers/mtd/sm_ftl.c
@@ -34,7 +34,7 @@
 MODULE_PARM_DESC(debug, "Debug level (0-2)");
 
 
-/* ------------------- sysfs attributtes ---------------------------------- */
+/* ------------------- sysfs attributes ---------------------------------- */
 struct sm_sysfs_attribute {
 	struct device_attribute dev_attr;
 	char *data;
@@ -138,7 +138,7 @@
 	if ((lba[0] & 0xF8) != 0x10)
 		return -2;
 
-	/* check parity - endianess doesn't matter */
+	/* check parity - endianness doesn't matter */
 	if (hweight16(*(uint16_t *)lba) & 1)
 		return -2;
 
@@ -147,7 +147,7 @@
 
 
 /*
- * Read LBA asscociated with block
+ * Read LBA associated with block
  * returns -1, if block is erased
  * returns -2 if error happens
  */
@@ -252,11 +252,11 @@
 		return 0;
 	}
 
-	/* User might not need the oob, but we do for data vertification */
+	/* User might not need the oob, but we do for data verification */
 	if (!oob)
 		oob = &tmp_oob;
 
-	ops.mode = ftl->smallpagenand ? MTD_OOB_RAW : MTD_OOB_PLACE;
+	ops.mode = ftl->smallpagenand ? MTD_OPS_RAW : MTD_OPS_PLACE_OOB;
 	ops.ooboffs = 0;
 	ops.ooblen = SM_OOB_SIZE;
 	ops.oobbuf = (void *)oob;
@@ -276,12 +276,12 @@
 			return ret;
 	}
 
-	/* Unfortunelly, oob read will _always_ succeed,
+	/* Unfortunately, oob read will _always_ succeed,
 		despite card removal..... */
 	ret = mtd->read_oob(mtd, sm_mkoffset(ftl, zone, block, boffset), &ops);
 
 	/* Test for unknown errors */
-	if (ret != 0 && ret != -EUCLEAN && ret != -EBADMSG) {
+	if (ret != 0 && !mtd_is_bitflip_or_eccerr(ret)) {
 		dbg("read of block %d at zone %d, failed due to error (%d)",
 			block, zone, ret);
 		goto again;
@@ -306,7 +306,7 @@
 	}
 
 	/* Test ECC*/
-	if (ret == -EBADMSG ||
+	if (mtd_is_eccerr(ret) ||
 		(ftl->smallpagenand && sm_correct_sector(buffer, oob))) {
 
 		dbg("read of block %d at zone %d, failed due to ECC error",
@@ -336,7 +336,7 @@
 	if (ftl->unstable)
 		return -EIO;
 
-	ops.mode = ftl->smallpagenand ? MTD_OOB_RAW : MTD_OOB_PLACE;
+	ops.mode = ftl->smallpagenand ? MTD_OPS_RAW : MTD_OPS_PLACE_OOB;
 	ops.len = SM_SECTOR_SIZE;
 	ops.datbuf = buffer;
 	ops.ooboffs = 0;
@@ -447,14 +447,14 @@
 
 	/* We aren't checking the return value, because we don't care */
 	/* This also fails on fake xD cards, but I guess these won't expose
-		any bad blocks till fail completly */
+		any bad blocks till fail completely */
 	for (boffset = 0; boffset < ftl->block_size; boffset += SM_SECTOR_SIZE)
 		sm_write_sector(ftl, zone, block, boffset, NULL, &oob);
 }
 
 /*
  * Erase a block within a zone
- * If erase succedes, it updates free block fifo, otherwise marks block as bad
+ * If erase succeeds, it updates free block fifo, otherwise marks block as bad
  */
 static int sm_erase_block(struct sm_ftl *ftl, int zone_num, uint16_t block,
 			  int put_free)
@@ -510,7 +510,7 @@
 	complete(&ftl->erase_completion);
 }
 
-/* Throughtly test that block is valid. */
+/* Thoroughly test that block is valid. */
 static int sm_check_block(struct sm_ftl *ftl, int zone, int block)
 {
 	int boffset;
@@ -526,7 +526,7 @@
 	for (boffset = 0; boffset < ftl->block_size;
 					boffset += SM_SECTOR_SIZE) {
 
-		/* This shoudn't happen anyway */
+		/* This shouldn't happen anyway */
 		if (sm_read_sector(ftl, zone, block, boffset, NULL, &oob))
 			return -2;
 
diff --git a/drivers/mtd/ssfdc.c b/drivers/mtd/ssfdc.c
index 5cd1897..976e3d2 100644
--- a/drivers/mtd/ssfdc.c
+++ b/drivers/mtd/ssfdc.c
@@ -135,8 +135,7 @@
 				/* Found */
 				cis_sector = (int)(offset >> SECTOR_SHIFT);
 			} else {
-				DEBUG(MTD_DEBUG_LEVEL1,
-					"SSFDC_RO: CIS/IDI sector not found"
+				pr_debug("SSFDC_RO: CIS/IDI sector not found"
 					" on %s (mtd%d)\n", mtd->name,
 					mtd->index);
 			}
@@ -170,7 +169,7 @@
 	struct mtd_oob_ops ops;
 	int ret;
 
-	ops.mode = MTD_OOB_RAW;
+	ops.mode = MTD_OPS_RAW;
 	ops.ooboffs = 0;
 	ops.ooblen = OOB_SIZE;
 	ops.oobbuf = buf;
@@ -221,8 +220,7 @@
 			block_address >>= 1;
 
 			if (get_parity(block_address, 10) != parity) {
-				DEBUG(MTD_DEBUG_LEVEL0,
-					"SSFDC_RO: logical address field%d"
+				pr_debug("SSFDC_RO: logical address field%d"
 					"parity error(0x%04X)\n", j+1,
 					block_address);
 			} else {
@@ -235,7 +233,7 @@
 	if (!ok)
 		block_address = -2;
 
-	DEBUG(MTD_DEBUG_LEVEL3, "SSFDC_RO: get_logical_address() %d\n",
+	pr_debug("SSFDC_RO: get_logical_address() %d\n",
 		block_address);
 
 	return block_address;
@@ -249,7 +247,7 @@
 	int ret, block_address, phys_block;
 	struct mtd_info *mtd = ssfdc->mbd.mtd;
 
-	DEBUG(MTD_DEBUG_LEVEL1, "SSFDC_RO: build_block_map() nblks=%d (%luK)\n",
+	pr_debug("SSFDC_RO: build_block_map() nblks=%d (%luK)\n",
 	      ssfdc->map_len,
 	      (unsigned long)ssfdc->map_len * ssfdc->erase_size / 1024);
 
@@ -262,8 +260,7 @@
 
 		ret = read_raw_oob(mtd, offset, oob_buf);
 		if (ret < 0) {
-			DEBUG(MTD_DEBUG_LEVEL0,
-				"SSFDC_RO: mtd read_oob() failed at %lu\n",
+			pr_debug("SSFDC_RO: mtd read_oob() failed at %lu\n",
 				offset);
 			return -1;
 		}
@@ -279,8 +276,7 @@
 			ssfdc->logic_block_map[block_address] =
 				(unsigned short)phys_block;
 
-			DEBUG(MTD_DEBUG_LEVEL2,
-				"SSFDC_RO: build_block_map() phys_block=%d,"
+			pr_debug("SSFDC_RO: build_block_map() phys_block=%d,"
 				"logic_block_addr=%d, zone=%d\n",
 				phys_block, block_address, zone_index);
 		}
@@ -304,11 +300,8 @@
 		return;
 
 	ssfdc = kzalloc(sizeof(struct ssfdcr_record), GFP_KERNEL);
-	if (!ssfdc) {
-		printk(KERN_WARNING
-			"SSFDC_RO: out of memory for data structures\n");
+	if (!ssfdc)
 		return;
-	}
 
 	ssfdc->mbd.mtd = mtd;
 	ssfdc->mbd.devnum = -1;
@@ -319,8 +312,7 @@
 	ssfdc->erase_size = mtd->erasesize;
 	ssfdc->map_len = (u32)mtd->size / mtd->erasesize;
 
-	DEBUG(MTD_DEBUG_LEVEL1,
-		"SSFDC_RO: cis_block=%d,erase_size=%d,map_len=%d,n_zones=%d\n",
+	pr_debug("SSFDC_RO: cis_block=%d,erase_size=%d,map_len=%d,n_zones=%d\n",
 		ssfdc->cis_block, ssfdc->erase_size, ssfdc->map_len,
 		DIV_ROUND_UP(ssfdc->map_len, MAX_PHYS_BLK_PER_ZONE));
 
@@ -331,7 +323,7 @@
 	ssfdc->cylinders = (unsigned short)(((u32)mtd->size >> SECTOR_SHIFT) /
 			((long)ssfdc->sectors * (long)ssfdc->heads));
 
-	DEBUG(MTD_DEBUG_LEVEL1, "SSFDC_RO: using C:%d H:%d S:%d == %ld sects\n",
+	pr_debug("SSFDC_RO: using C:%d H:%d S:%d == %ld sects\n",
 		ssfdc->cylinders, ssfdc->heads , ssfdc->sectors,
 		(long)ssfdc->cylinders * (long)ssfdc->heads *
 		(long)ssfdc->sectors);
@@ -342,11 +334,8 @@
 	/* Allocate logical block map */
 	ssfdc->logic_block_map = kmalloc(sizeof(ssfdc->logic_block_map[0]) *
 					 ssfdc->map_len, GFP_KERNEL);
-	if (!ssfdc->logic_block_map) {
-		printk(KERN_WARNING
-			"SSFDC_RO: out of memory for data structures\n");
+	if (!ssfdc->logic_block_map)
 		goto out_err;
-	}
 	memset(ssfdc->logic_block_map, 0xff, sizeof(ssfdc->logic_block_map[0]) *
 		ssfdc->map_len);
 
@@ -371,7 +360,7 @@
 {
 	struct ssfdcr_record *ssfdc = (struct ssfdcr_record *)dev;
 
-	DEBUG(MTD_DEBUG_LEVEL1, "SSFDC_RO: remove_dev (i=%d)\n", dev->devnum);
+	pr_debug("SSFDC_RO: remove_dev (i=%d)\n", dev->devnum);
 
 	del_mtd_blktrans_dev(dev);
 	kfree(ssfdc->logic_block_map);
@@ -387,8 +376,7 @@
 	offset = (int)(logic_sect_no % sectors_per_block);
 	block_address = (int)(logic_sect_no / sectors_per_block);
 
-	DEBUG(MTD_DEBUG_LEVEL3,
-		"SSFDC_RO: ssfdcr_readsect(%lu) sec_per_blk=%d, ofst=%d,"
+	pr_debug("SSFDC_RO: ssfdcr_readsect(%lu) sec_per_blk=%d, ofst=%d,"
 		" block_addr=%d\n", logic_sect_no, sectors_per_block, offset,
 		block_address);
 
@@ -397,8 +385,7 @@
 
 	block_address = ssfdc->logic_block_map[block_address];
 
-	DEBUG(MTD_DEBUG_LEVEL3,
-		"SSFDC_RO: ssfdcr_readsect() phys_block_addr=%d\n",
+	pr_debug("SSFDC_RO: ssfdcr_readsect() phys_block_addr=%d\n",
 		block_address);
 
 	if (block_address < 0xffff) {
@@ -407,8 +394,7 @@
 		sect_no = (unsigned long)block_address * sectors_per_block +
 				offset;
 
-		DEBUG(MTD_DEBUG_LEVEL3,
-			"SSFDC_RO: ssfdcr_readsect() phys_sect_no=%lu\n",
+		pr_debug("SSFDC_RO: ssfdcr_readsect() phys_sect_no=%lu\n",
 			sect_no);
 
 		if (read_physical_sector(ssfdc->mbd.mtd, buf, sect_no) < 0)
@@ -424,7 +410,7 @@
 {
 	struct ssfdcr_record *ssfdc = (struct ssfdcr_record *)dev;
 
-	DEBUG(MTD_DEBUG_LEVEL1, "SSFDC_RO: ssfdcr_getgeo() C=%d, H=%d, S=%d\n",
+	pr_debug("SSFDC_RO: ssfdcr_getgeo() C=%d, H=%d, S=%d\n",
 			ssfdc->cylinders, ssfdc->heads, ssfdc->sectors);
 
 	geo->heads = ssfdc->heads;
diff --git a/drivers/mtd/tests/mtd_oobtest.c b/drivers/mtd/tests/mtd_oobtest.c
index dec92ae..933f7e5 100644
--- a/drivers/mtd/tests/mtd_oobtest.c
+++ b/drivers/mtd/tests/mtd_oobtest.c
@@ -30,7 +30,7 @@
 
 #define PRINT_PREF KERN_INFO "mtd_oobtest: "
 
-static int dev;
+static int dev = -EINVAL;
 module_param(dev, int, S_IRUGO);
 MODULE_PARM_DESC(dev, "MTD device number to use");
 
@@ -131,7 +131,7 @@
 
 	for (i = 0; i < pgcnt; ++i, addr += mtd->writesize) {
 		set_random_data(writebuf, use_len);
-		ops.mode      = MTD_OOB_AUTO;
+		ops.mode      = MTD_OPS_AUTO_OOB;
 		ops.len       = 0;
 		ops.retlen    = 0;
 		ops.ooblen    = use_len;
@@ -184,7 +184,7 @@
 
 	for (i = 0; i < pgcnt; ++i, addr += mtd->writesize) {
 		set_random_data(writebuf, use_len);
-		ops.mode      = MTD_OOB_AUTO;
+		ops.mode      = MTD_OPS_AUTO_OOB;
 		ops.len       = 0;
 		ops.retlen    = 0;
 		ops.ooblen    = use_len;
@@ -211,7 +211,7 @@
 		if (use_offset != 0 || use_len < mtd->ecclayout->oobavail) {
 			int k;
 
-			ops.mode      = MTD_OOB_AUTO;
+			ops.mode      = MTD_OPS_AUTO_OOB;
 			ops.len       = 0;
 			ops.retlen    = 0;
 			ops.ooblen    = mtd->ecclayout->oobavail;
@@ -276,7 +276,7 @@
 	size_t len = mtd->ecclayout->oobavail * pgcnt;
 
 	set_random_data(writebuf, len);
-	ops.mode      = MTD_OOB_AUTO;
+	ops.mode      = MTD_OPS_AUTO_OOB;
 	ops.len       = 0;
 	ops.retlen    = 0;
 	ops.ooblen    = len;
@@ -366,6 +366,13 @@
 
 	printk(KERN_INFO "\n");
 	printk(KERN_INFO "=================================================\n");
+
+	if (dev < 0) {
+		printk(PRINT_PREF "Please specify a valid mtd-device via module paramter\n");
+		printk(KERN_CRIT "CAREFUL: This test wipes all data on the specified MTD device!\n");
+		return -EINVAL;
+	}
+
 	printk(PRINT_PREF "MTD device: %d\n", dev);
 
 	mtd = get_mtd_device(NULL, dev);
@@ -507,7 +514,7 @@
 		addr0 += mtd->erasesize;
 
 	/* Attempt to write off end of OOB */
-	ops.mode      = MTD_OOB_AUTO;
+	ops.mode      = MTD_OPS_AUTO_OOB;
 	ops.len       = 0;
 	ops.retlen    = 0;
 	ops.ooblen    = 1;
@@ -527,7 +534,7 @@
 	}
 
 	/* Attempt to read off end of OOB */
-	ops.mode      = MTD_OOB_AUTO;
+	ops.mode      = MTD_OPS_AUTO_OOB;
 	ops.len       = 0;
 	ops.retlen    = 0;
 	ops.ooblen    = 1;
@@ -551,7 +558,7 @@
 		       "block is bad\n");
 	else {
 		/* Attempt to write off end of device */
-		ops.mode      = MTD_OOB_AUTO;
+		ops.mode      = MTD_OPS_AUTO_OOB;
 		ops.len       = 0;
 		ops.retlen    = 0;
 		ops.ooblen    = mtd->ecclayout->oobavail + 1;
@@ -571,7 +578,7 @@
 		}
 
 		/* Attempt to read off end of device */
-		ops.mode      = MTD_OOB_AUTO;
+		ops.mode      = MTD_OPS_AUTO_OOB;
 		ops.len       = 0;
 		ops.retlen    = 0;
 		ops.ooblen    = mtd->ecclayout->oobavail + 1;
@@ -595,7 +602,7 @@
 			goto out;
 
 		/* Attempt to write off end of device */
-		ops.mode      = MTD_OOB_AUTO;
+		ops.mode      = MTD_OPS_AUTO_OOB;
 		ops.len       = 0;
 		ops.retlen    = 0;
 		ops.ooblen    = mtd->ecclayout->oobavail;
@@ -615,7 +622,7 @@
 		}
 
 		/* Attempt to read off end of device */
-		ops.mode      = MTD_OOB_AUTO;
+		ops.mode      = MTD_OPS_AUTO_OOB;
 		ops.len       = 0;
 		ops.retlen    = 0;
 		ops.ooblen    = mtd->ecclayout->oobavail;
@@ -655,7 +662,7 @@
 		addr = (i + 1) * mtd->erasesize - mtd->writesize;
 		for (pg = 0; pg < cnt; ++pg) {
 			set_random_data(writebuf, sz);
-			ops.mode      = MTD_OOB_AUTO;
+			ops.mode      = MTD_OPS_AUTO_OOB;
 			ops.len       = 0;
 			ops.retlen    = 0;
 			ops.ooblen    = sz;
@@ -683,7 +690,7 @@
 			continue;
 		set_random_data(writebuf, mtd->ecclayout->oobavail * 2);
 		addr = (i + 1) * mtd->erasesize - mtd->writesize;
-		ops.mode      = MTD_OOB_AUTO;
+		ops.mode      = MTD_OPS_AUTO_OOB;
 		ops.len       = 0;
 		ops.retlen    = 0;
 		ops.ooblen    = mtd->ecclayout->oobavail * 2;
diff --git a/drivers/mtd/tests/mtd_pagetest.c b/drivers/mtd/tests/mtd_pagetest.c
index 00b937e3..afafb69 100644
--- a/drivers/mtd/tests/mtd_pagetest.c
+++ b/drivers/mtd/tests/mtd_pagetest.c
@@ -30,7 +30,7 @@
 
 #define PRINT_PREF KERN_INFO "mtd_pagetest: "
 
-static int dev;
+static int dev = -EINVAL;
 module_param(dev, int, S_IRUGO);
 MODULE_PARM_DESC(dev, "MTD device number to use");
 
@@ -128,7 +128,7 @@
 	for (j = 0; j < pgcnt - 1; ++j, addr += pgsize) {
 		/* Do a read to set the internal dataRAMs to different data */
 		err = mtd->read(mtd, addr0, bufsize, &read, twopages);
-		if (err == -EUCLEAN)
+		if (mtd_is_bitflip(err))
 			err = 0;
 		if (err || read != bufsize) {
 			printk(PRINT_PREF "error: read failed at %#llx\n",
@@ -136,7 +136,7 @@
 			return err;
 		}
 		err = mtd->read(mtd, addrn - bufsize, bufsize, &read, twopages);
-		if (err == -EUCLEAN)
+		if (mtd_is_bitflip(err))
 			err = 0;
 		if (err || read != bufsize) {
 			printk(PRINT_PREF "error: read failed at %#llx\n",
@@ -146,7 +146,7 @@
 		memset(twopages, 0, bufsize);
 		read = 0;
 		err = mtd->read(mtd, addr, bufsize, &read, twopages);
-		if (err == -EUCLEAN)
+		if (mtd_is_bitflip(err))
 			err = 0;
 		if (err || read != bufsize) {
 			printk(PRINT_PREF "error: read failed at %#llx\n",
@@ -164,7 +164,7 @@
 		unsigned long oldnext = next;
 		/* Do a read to set the internal dataRAMs to different data */
 		err = mtd->read(mtd, addr0, bufsize, &read, twopages);
-		if (err == -EUCLEAN)
+		if (mtd_is_bitflip(err))
 			err = 0;
 		if (err || read != bufsize) {
 			printk(PRINT_PREF "error: read failed at %#llx\n",
@@ -172,7 +172,7 @@
 			return err;
 		}
 		err = mtd->read(mtd, addrn - bufsize, bufsize, &read, twopages);
-		if (err == -EUCLEAN)
+		if (mtd_is_bitflip(err))
 			err = 0;
 		if (err || read != bufsize) {
 			printk(PRINT_PREF "error: read failed at %#llx\n",
@@ -182,7 +182,7 @@
 		memset(twopages, 0, bufsize);
 		read = 0;
 		err = mtd->read(mtd, addr, bufsize, &read, twopages);
-		if (err == -EUCLEAN)
+		if (mtd_is_bitflip(err))
 			err = 0;
 		if (err || read != bufsize) {
 			printk(PRINT_PREF "error: read failed at %#llx\n",
@@ -231,7 +231,7 @@
 	read = 0;
 	addr = addrn - pgsize - pgsize;
 	err = mtd->read(mtd, addr, pgsize, &read, pp1);
-	if (err == -EUCLEAN)
+	if (mtd_is_bitflip(err))
 		err = 0;
 	if (err || read != pgsize) {
 		printk(PRINT_PREF "error: read failed at %#llx\n",
@@ -244,7 +244,7 @@
 	read = 0;
 	addr = addrn - pgsize - pgsize - pgsize;
 	err = mtd->read(mtd, addr, pgsize, &read, pp1);
-	if (err == -EUCLEAN)
+	if (mtd_is_bitflip(err))
 		err = 0;
 	if (err || read != pgsize) {
 		printk(PRINT_PREF "error: read failed at %#llx\n",
@@ -258,7 +258,7 @@
 	addr = addr0;
 	printk(PRINT_PREF "reading page at %#llx\n", (long long)addr);
 	err = mtd->read(mtd, addr, pgsize, &read, pp2);
-	if (err == -EUCLEAN)
+	if (mtd_is_bitflip(err))
 		err = 0;
 	if (err || read != pgsize) {
 		printk(PRINT_PREF "error: read failed at %#llx\n",
@@ -272,7 +272,7 @@
 	addr = addrn - pgsize;
 	printk(PRINT_PREF "reading page at %#llx\n", (long long)addr);
 	err = mtd->read(mtd, addr, pgsize, &read, pp3);
-	if (err == -EUCLEAN)
+	if (mtd_is_bitflip(err))
 		err = 0;
 	if (err || read != pgsize) {
 		printk(PRINT_PREF "error: read failed at %#llx\n",
@@ -286,7 +286,7 @@
 	addr = addr0;
 	printk(PRINT_PREF "reading page at %#llx\n", (long long)addr);
 	err = mtd->read(mtd, addr, pgsize, &read, pp4);
-	if (err == -EUCLEAN)
+	if (mtd_is_bitflip(err))
 		err = 0;
 	if (err || read != pgsize) {
 		printk(PRINT_PREF "error: read failed at %#llx\n",
@@ -345,7 +345,7 @@
 	printk(PRINT_PREF "reading 1st page of block %d\n", ebnum);
 	memset(readbuf, 0, pgsize);
 	err = mtd->read(mtd, addr0, pgsize, &read, readbuf);
-	if (err == -EUCLEAN)
+	if (mtd_is_bitflip(err))
 		err = 0;
 	if (err || read != pgsize) {
 		printk(PRINT_PREF "error: read failed at %#llx\n",
@@ -383,7 +383,7 @@
 	printk(PRINT_PREF "reading 1st page of block %d\n", ebnum);
 	memset(readbuf, 0, pgsize);
 	err = mtd->read(mtd, addr0, pgsize, &read, readbuf);
-	if (err == -EUCLEAN)
+	if (mtd_is_bitflip(err))
 		err = 0;
 	if (err || read != pgsize) {
 		printk(PRINT_PREF "error: read failed at %#llx\n",
@@ -439,7 +439,7 @@
 
 	printk(PRINT_PREF "reading 1st page of block %d\n", ebnum);
 	err = mtd->read(mtd, addr0, pgsize, &read, twopages);
-	if (err == -EUCLEAN)
+	if (mtd_is_bitflip(err))
 		err = 0;
 	if (err || read != pgsize) {
 		printk(PRINT_PREF "error: read failed at %#llx\n",
@@ -504,6 +504,13 @@
 
 	printk(KERN_INFO "\n");
 	printk(KERN_INFO "=================================================\n");
+
+	if (dev < 0) {
+		printk(PRINT_PREF "Please specify a valid mtd-device via module paramter\n");
+		printk(KERN_CRIT "CAREFUL: This test wipes all data on the specified MTD device!\n");
+		return -EINVAL;
+	}
+
 	printk(PRINT_PREF "MTD device: %d\n", dev);
 
 	mtd = get_mtd_device(NULL, dev);
diff --git a/drivers/mtd/tests/mtd_readtest.c b/drivers/mtd/tests/mtd_readtest.c
index afe71aa..550fe51 100644
--- a/drivers/mtd/tests/mtd_readtest.c
+++ b/drivers/mtd/tests/mtd_readtest.c
@@ -29,7 +29,7 @@
 
 #define PRINT_PREF KERN_INFO "mtd_readtest: "
 
-static int dev;
+static int dev = -EINVAL;
 module_param(dev, int, S_IRUGO);
 MODULE_PARM_DESC(dev, "MTD device number to use");
 
@@ -66,7 +66,7 @@
 		if (mtd->oobsize) {
 			struct mtd_oob_ops ops;
 
-			ops.mode      = MTD_OOB_PLACE;
+			ops.mode      = MTD_OPS_PLACE_OOB;
 			ops.len       = 0;
 			ops.retlen    = 0;
 			ops.ooblen    = mtd->oobsize;
@@ -75,7 +75,8 @@
 			ops.datbuf    = NULL;
 			ops.oobbuf    = oobbuf;
 			ret = mtd->read_oob(mtd, addr, &ops);
-			if (ret || ops.oobretlen != mtd->oobsize) {
+			if ((ret && !mtd_is_bitflip(ret)) ||
+					ops.oobretlen != mtd->oobsize) {
 				printk(PRINT_PREF "error: read oob failed at "
 						  "%#llx\n", (long long)addr);
 				if (!err)
@@ -169,6 +170,12 @@
 
 	printk(KERN_INFO "\n");
 	printk(KERN_INFO "=================================================\n");
+
+	if (dev < 0) {
+		printk(PRINT_PREF "Please specify a valid mtd-device via module paramter\n");
+		return -EINVAL;
+	}
+
 	printk(PRINT_PREF "MTD device: %d\n", dev);
 
 	mtd = get_mtd_device(NULL, dev);
diff --git a/drivers/mtd/tests/mtd_speedtest.c b/drivers/mtd/tests/mtd_speedtest.c
index 627d4e2..493b367 100644
--- a/drivers/mtd/tests/mtd_speedtest.c
+++ b/drivers/mtd/tests/mtd_speedtest.c
@@ -29,7 +29,7 @@
 
 #define PRINT_PREF KERN_INFO "mtd_speedtest: "
 
-static int dev;
+static int dev = -EINVAL;
 module_param(dev, int, S_IRUGO);
 MODULE_PARM_DESC(dev, "MTD device number to use");
 
@@ -216,7 +216,7 @@
 
 	err = mtd->read(mtd, addr, mtd->erasesize, &read, iobuf);
 	/* Ignore corrected ECC errors */
-	if (err == -EUCLEAN)
+	if (mtd_is_bitflip(err))
 		err = 0;
 	if (err || read != mtd->erasesize) {
 		printk(PRINT_PREF "error: read failed at %#llx\n", addr);
@@ -237,7 +237,7 @@
 	for (i = 0; i < pgcnt; i++) {
 		err = mtd->read(mtd, addr, pgsize, &read, buf);
 		/* Ignore corrected ECC errors */
-		if (err == -EUCLEAN)
+		if (mtd_is_bitflip(err))
 			err = 0;
 		if (err || read != pgsize) {
 			printk(PRINT_PREF "error: read failed at %#llx\n",
@@ -263,7 +263,7 @@
 	for (i = 0; i < n; i++) {
 		err = mtd->read(mtd, addr, sz, &read, buf);
 		/* Ignore corrected ECC errors */
-		if (err == -EUCLEAN)
+		if (mtd_is_bitflip(err))
 			err = 0;
 		if (err || read != sz) {
 			printk(PRINT_PREF "error: read failed at %#llx\n",
@@ -278,7 +278,7 @@
 	if (pgcnt % 2) {
 		err = mtd->read(mtd, addr, pgsize, &read, buf);
 		/* Ignore corrected ECC errors */
-		if (err == -EUCLEAN)
+		if (mtd_is_bitflip(err))
 			err = 0;
 		if (err || read != pgsize) {
 			printk(PRINT_PREF "error: read failed at %#llx\n",
@@ -361,6 +361,13 @@
 
 	printk(KERN_INFO "\n");
 	printk(KERN_INFO "=================================================\n");
+
+	if (dev < 0) {
+		printk(PRINT_PREF "Please specify a valid mtd-device via module paramter\n");
+		printk(KERN_CRIT "CAREFUL: This test wipes all data on the specified MTD device!\n");
+		return -EINVAL;
+	}
+
 	if (count)
 		printk(PRINT_PREF "MTD device: %d    count: %d\n", dev, count);
 	else
diff --git a/drivers/mtd/tests/mtd_stresstest.c b/drivers/mtd/tests/mtd_stresstest.c
index 531625f..52ffd91 100644
--- a/drivers/mtd/tests/mtd_stresstest.c
+++ b/drivers/mtd/tests/mtd_stresstest.c
@@ -30,7 +30,7 @@
 
 #define PRINT_PREF KERN_INFO "mtd_stresstest: "
 
-static int dev;
+static int dev = -EINVAL;
 module_param(dev, int, S_IRUGO);
 MODULE_PARM_DESC(dev, "MTD device number to use");
 
@@ -154,7 +154,7 @@
 	}
 	addr = eb * mtd->erasesize + offs;
 	err = mtd->read(mtd, addr, len, &read, readbuf);
-	if (err == -EUCLEAN)
+	if (mtd_is_bitflip(err))
 		err = 0;
 	if (unlikely(err || read != len)) {
 		printk(PRINT_PREF "error: read failed at 0x%llx\n",
@@ -250,6 +250,13 @@
 
 	printk(KERN_INFO "\n");
 	printk(KERN_INFO "=================================================\n");
+
+	if (dev < 0) {
+		printk(PRINT_PREF "Please specify a valid mtd-device via module paramter\n");
+		printk(KERN_CRIT "CAREFUL: This test wipes all data on the specified MTD device!\n");
+		return -EINVAL;
+	}
+
 	printk(PRINT_PREF "MTD device: %d\n", dev);
 
 	mtd = get_mtd_device(NULL, dev);
diff --git a/drivers/mtd/tests/mtd_subpagetest.c b/drivers/mtd/tests/mtd_subpagetest.c
index 334eae5..1a05bfa 100644
--- a/drivers/mtd/tests/mtd_subpagetest.c
+++ b/drivers/mtd/tests/mtd_subpagetest.c
@@ -29,7 +29,7 @@
 
 #define PRINT_PREF KERN_INFO "mtd_subpagetest: "
 
-static int dev;
+static int dev = -EINVAL;
 module_param(dev, int, S_IRUGO);
 MODULE_PARM_DESC(dev, "MTD device number to use");
 
@@ -198,7 +198,7 @@
 	read = 0;
 	err = mtd->read(mtd, addr, subpgsize, &read, readbuf);
 	if (unlikely(err || read != subpgsize)) {
-		if (err == -EUCLEAN && read == subpgsize) {
+		if (mtd_is_bitflip(err) && read == subpgsize) {
 			printk(PRINT_PREF "ECC correction at %#llx\n",
 			       (long long)addr);
 			err = 0;
@@ -226,7 +226,7 @@
 	read = 0;
 	err = mtd->read(mtd, addr, subpgsize, &read, readbuf);
 	if (unlikely(err || read != subpgsize)) {
-		if (err == -EUCLEAN && read == subpgsize) {
+		if (mtd_is_bitflip(err) && read == subpgsize) {
 			printk(PRINT_PREF "ECC correction at %#llx\n",
 			       (long long)addr);
 			err = 0;
@@ -264,7 +264,7 @@
 		read = 0;
 		err = mtd->read(mtd, addr, subpgsize * k, &read, readbuf);
 		if (unlikely(err || read != subpgsize * k)) {
-			if (err == -EUCLEAN && read == subpgsize * k) {
+			if (mtd_is_bitflip(err) && read == subpgsize * k) {
 				printk(PRINT_PREF "ECC correction at %#llx\n",
 				       (long long)addr);
 				err = 0;
@@ -298,7 +298,7 @@
 		read = 0;
 		err = mtd->read(mtd, addr, subpgsize, &read, readbuf);
 		if (unlikely(err || read != subpgsize)) {
-			if (err == -EUCLEAN && read == subpgsize) {
+			if (mtd_is_bitflip(err) && read == subpgsize) {
 				printk(PRINT_PREF "ECC correction at %#llx\n",
 				       (long long)addr);
 				err = 0;
@@ -379,6 +379,13 @@
 
 	printk(KERN_INFO "\n");
 	printk(KERN_INFO "=================================================\n");
+
+	if (dev < 0) {
+		printk(PRINT_PREF "Please specify a valid mtd-device via module paramter\n");
+		printk(KERN_CRIT "CAREFUL: This test wipes all data on the specified MTD device!\n");
+		return -EINVAL;
+	}
+
 	printk(PRINT_PREF "MTD device: %d\n", dev);
 
 	mtd = get_mtd_device(NULL, dev);
diff --git a/drivers/mtd/tests/mtd_torturetest.c b/drivers/mtd/tests/mtd_torturetest.c
index 5c6c3d2..03ab649 100644
--- a/drivers/mtd/tests/mtd_torturetest.c
+++ b/drivers/mtd/tests/mtd_torturetest.c
@@ -46,7 +46,7 @@
 module_param(pgcnt, int, S_IRUGO);
 MODULE_PARM_DESC(pgcnt, "number of pages per eraseblock to torture (0 => all)");
 
-static int dev;
+static int dev = -EINVAL;
 module_param(dev, int, S_IRUGO);
 MODULE_PARM_DESC(dev, "MTD device number to use");
 
@@ -138,7 +138,7 @@
 
 retry:
 	err = mtd->read(mtd, addr, len, &read, check_buf);
-	if (err == -EUCLEAN)
+	if (mtd_is_bitflip(err))
 		printk(PRINT_PREF "single bit flip occurred at EB %d "
 		       "MTD reported that it was fixed.\n", ebnum);
 	else if (err) {
@@ -213,6 +213,13 @@
 	printk(KERN_INFO "=================================================\n");
 	printk(PRINT_PREF "Warning: this program is trying to wear out your "
 	       "flash, stop it if this is not wanted.\n");
+
+	if (dev < 0) {
+		printk(PRINT_PREF "Please specify a valid mtd-device via module paramter\n");
+		printk(KERN_CRIT "CAREFUL: This test wipes all data on the specified MTD device!\n");
+		return -EINVAL;
+	}
+
 	printk(PRINT_PREF "MTD device: %d\n", dev);
 	printk(PRINT_PREF "torture %d eraseblocks (%d-%d) of mtd%d\n",
 	       ebcnt, eb, eb + ebcnt - 1, dev);
diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
index 4be67181..fb7f19b 100644
--- a/drivers/mtd/ubi/eba.c
+++ b/drivers/mtd/ubi/eba.c
@@ -443,7 +443,7 @@
 		if (err == UBI_IO_BITFLIPS) {
 			scrub = 1;
 			err = 0;
-		} else if (err == -EBADMSG) {
+		} else if (mtd_is_eccerr(err)) {
 			if (vol->vol_type == UBI_DYNAMIC_VOLUME)
 				goto out_unlock;
 			scrub = 1;
diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c
index 6ba55c2..f20b6f2 100644
--- a/drivers/mtd/ubi/io.c
+++ b/drivers/mtd/ubi/io.c
@@ -172,9 +172,9 @@
 retry:
 	err = ubi->mtd->read(ubi->mtd, addr, len, &read, buf);
 	if (err) {
-		const char *errstr = (err == -EBADMSG) ? " (ECC error)" : "";
+		const char *errstr = mtd_is_eccerr(err) ? " (ECC error)" : "";
 
-		if (err == -EUCLEAN) {
+		if (mtd_is_bitflip(err)) {
 			/*
 			 * -EUCLEAN is reported if there was a bit-flip which
 			 * was corrected, so this is harmless.
@@ -205,7 +205,7 @@
 		 * all the requested data. But some buggy drivers might do
 		 * this, so we change it to -EIO.
 		 */
-		if (read != len && err == -EBADMSG) {
+		if (read != len && mtd_is_eccerr(err)) {
 			ubi_assert(0);
 			err = -EIO;
 		}
@@ -469,7 +469,7 @@
 
 out:
 	mutex_unlock(&ubi->buf_mutex);
-	if (err == UBI_IO_BITFLIPS || err == -EBADMSG) {
+	if (err == UBI_IO_BITFLIPS || mtd_is_eccerr(err)) {
 		/*
 		 * If a bit-flip or data integrity error was detected, the test
 		 * has not passed because it happened on a freshly erased
@@ -760,7 +760,7 @@
 
 	read_err = ubi_io_read(ubi, ec_hdr, pnum, 0, UBI_EC_HDR_SIZE);
 	if (read_err) {
-		if (read_err != UBI_IO_BITFLIPS && read_err != -EBADMSG)
+		if (read_err != UBI_IO_BITFLIPS && !mtd_is_eccerr(read_err))
 			return read_err;
 
 		/*
@@ -776,7 +776,7 @@
 
 	magic = be32_to_cpu(ec_hdr->magic);
 	if (magic != UBI_EC_HDR_MAGIC) {
-		if (read_err == -EBADMSG)
+		if (mtd_is_eccerr(read_err))
 			return UBI_IO_BAD_HDR_EBADMSG;
 
 		/*
@@ -1032,12 +1032,12 @@
 	p = (char *)vid_hdr - ubi->vid_hdr_shift;
 	read_err = ubi_io_read(ubi, p, pnum, ubi->vid_hdr_aloffset,
 			  ubi->vid_hdr_alsize);
-	if (read_err && read_err != UBI_IO_BITFLIPS && read_err != -EBADMSG)
+	if (read_err && read_err != UBI_IO_BITFLIPS && !mtd_is_eccerr(read_err))
 		return read_err;
 
 	magic = be32_to_cpu(vid_hdr->magic);
 	if (magic != UBI_VID_HDR_MAGIC) {
-		if (read_err == -EBADMSG)
+		if (mtd_is_eccerr(read_err))
 			return UBI_IO_BAD_HDR_EBADMSG;
 
 		if (ubi_check_pattern(vid_hdr, 0xFF, UBI_VID_HDR_SIZE)) {
@@ -1219,7 +1219,7 @@
 		return -ENOMEM;
 
 	err = ubi_io_read(ubi, ec_hdr, pnum, 0, UBI_EC_HDR_SIZE);
-	if (err && err != UBI_IO_BITFLIPS && err != -EBADMSG)
+	if (err && err != UBI_IO_BITFLIPS && !mtd_is_eccerr(err))
 		goto exit;
 
 	crc = crc32(UBI_CRC32_INIT, ec_hdr, UBI_EC_HDR_SIZE_CRC);
@@ -1306,7 +1306,7 @@
 	p = (char *)vid_hdr - ubi->vid_hdr_shift;
 	err = ubi_io_read(ubi, p, pnum, ubi->vid_hdr_aloffset,
 			  ubi->vid_hdr_alsize);
-	if (err && err != UBI_IO_BITFLIPS && err != -EBADMSG)
+	if (err && err != UBI_IO_BITFLIPS && !mtd_is_eccerr(err))
 		goto exit;
 
 	crc = crc32(UBI_CRC32_INIT, vid_hdr, UBI_EC_HDR_SIZE_CRC);
@@ -1358,7 +1358,7 @@
 	}
 
 	err = ubi->mtd->read(ubi->mtd, addr, len, &read, buf1);
-	if (err && err != -EUCLEAN)
+	if (err && !mtd_is_bitflip(err))
 		goto out_free;
 
 	for (i = 0; i < len; i++) {
@@ -1422,7 +1422,7 @@
 	}
 
 	err = ubi->mtd->read(ubi->mtd, addr, len, &read, buf);
-	if (err && err != -EUCLEAN) {
+	if (err && !mtd_is_bitflip(err)) {
 		ubi_err("error %d while reading %d bytes from PEB %d:%d, "
 			"read %zd bytes", err, len, pnum, offset, read);
 		goto error;
diff --git a/drivers/mtd/ubi/kapi.c b/drivers/mtd/ubi/kapi.c
index d39716e..1a35fc5 100644
--- a/drivers/mtd/ubi/kapi.c
+++ b/drivers/mtd/ubi/kapi.c
@@ -410,7 +410,7 @@
 		return 0;
 
 	err = ubi_eba_read_leb(ubi, vol, lnum, buf, offset, len, check);
-	if (err && err == -EBADMSG && vol->vol_type == UBI_STATIC_VOLUME) {
+	if (err && mtd_is_eccerr(err) && vol->vol_type == UBI_STATIC_VOLUME) {
 		ubi_warn("mark volume %d as corrupted", vol_id);
 		vol->corrupted = 1;
 	}
diff --git a/drivers/mtd/ubi/misc.c b/drivers/mtd/ubi/misc.c
index ff2a65c..f6a7d7a 100644
--- a/drivers/mtd/ubi/misc.c
+++ b/drivers/mtd/ubi/misc.c
@@ -81,7 +81,7 @@
 
 		err = ubi_eba_read_leb(ubi, vol, i, buf, 0, size, 1);
 		if (err) {
-			if (err == -EBADMSG)
+			if (mtd_is_eccerr(err))
 				err = 1;
 			break;
 		}
diff --git a/drivers/mtd/ubi/scan.c b/drivers/mtd/ubi/scan.c
index a3a198f..0cb17d9 100644
--- a/drivers/mtd/ubi/scan.c
+++ b/drivers/mtd/ubi/scan.c
@@ -395,7 +395,7 @@
 	}
 
 	err = ubi_io_read_data(ubi, buf, pnum, 0, len);
-	if (err && err != UBI_IO_BITFLIPS && err != -EBADMSG)
+	if (err && err != UBI_IO_BITFLIPS && !mtd_is_eccerr(err))
 		goto out_free_buf;
 
 	data_crc = be32_to_cpu(vid_hdr->data_crc);
@@ -793,7 +793,7 @@
 
 	err = ubi_io_read(ubi, ubi->peb_buf1, pnum, ubi->leb_start,
 			  ubi->leb_size);
-	if (err == UBI_IO_BITFLIPS || err == -EBADMSG) {
+	if (err == UBI_IO_BITFLIPS || mtd_is_eccerr(err)) {
 		/*
 		 * Bit-flips or integrity errors while reading the data area.
 		 * It is difficult to say for sure what type of corruption is
diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c
index 4b50a30..9ad18da 100644
--- a/drivers/mtd/ubi/vtbl.c
+++ b/drivers/mtd/ubi/vtbl.c
@@ -423,7 +423,7 @@
 
 		err = ubi_io_read_data(ubi, leb[seb->lnum], seb->pnum, 0,
 				       ubi->vtbl_size);
-		if (err == UBI_IO_BITFLIPS || err == -EBADMSG)
+		if (err == UBI_IO_BITFLIPS || mtd_is_eccerr(err))
 			/*
 			 * Scrub the PEB later. Note, -EBADMSG indicates an
 			 * uncorrectable ECC error, but we have our own CRC and
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index b2b9109..b0c5772 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -560,8 +560,8 @@
 	u32 slave_speed;
 	int res;
 
-	slave->speed = -1;
-	slave->duplex = -1;
+	slave->speed = SPEED_UNKNOWN;
+	slave->duplex = DUPLEX_UNKNOWN;
 
 	res = __ethtool_get_settings(slave_dev, &ecmd);
 	if (res < 0)
diff --git a/drivers/net/bonding/bond_procfs.c b/drivers/net/bonding/bond_procfs.c
index 2acf0b0..ad284ba 100644
--- a/drivers/net/bonding/bond_procfs.c
+++ b/drivers/net/bonding/bond_procfs.c
@@ -158,12 +158,12 @@
 	seq_printf(seq, "\nSlave Interface: %s\n", slave->dev->name);
 	seq_printf(seq, "MII Status: %s\n",
 		   (slave->link == BOND_LINK_UP) ?  "up" : "down");
-	if (slave->speed == -1)
+	if (slave->speed == SPEED_UNKNOWN)
 		seq_printf(seq, "Speed: %s\n", "Unknown");
 	else
 		seq_printf(seq, "Speed: %d Mbps\n", slave->speed);
 
-	if (slave->duplex == -1)
+	if (slave->duplex == DUPLEX_UNKNOWN)
 		seq_printf(seq, "Duplex: %s\n", "Unknown");
 	else
 		seq_printf(seq, "Duplex: %s\n", slave->duplex ? "full" : "half");
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 161cbbb..bf40741 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -89,10 +89,10 @@
 
 #define DRV_MODULE_NAME		"tg3"
 #define TG3_MAJ_NUM			3
-#define TG3_MIN_NUM			120
+#define TG3_MIN_NUM			121
 #define DRV_MODULE_VERSION	\
 	__stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
-#define DRV_MODULE_RELDATE	"August 18, 2011"
+#define DRV_MODULE_RELDATE	"November 2, 2011"
 
 #define RESET_KIND_SHUTDOWN	0
 #define RESET_KIND_INIT		1
@@ -628,19 +628,23 @@
 		regbase = TG3_APE_PER_LOCK_GRANT;
 
 	/* Make sure the driver hasn't any stale locks. */
-	for (i = 0; i < 8; i++) {
-		if (i == TG3_APE_LOCK_GPIO)
-			continue;
-		tg3_ape_write32(tp, regbase + 4 * i, APE_LOCK_GRANT_DRIVER);
+	for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
+		switch (i) {
+		case TG3_APE_LOCK_PHY0:
+		case TG3_APE_LOCK_PHY1:
+		case TG3_APE_LOCK_PHY2:
+		case TG3_APE_LOCK_PHY3:
+			bit = APE_LOCK_GRANT_DRIVER;
+			break;
+		default:
+			if (!tp->pci_fn)
+				bit = APE_LOCK_GRANT_DRIVER;
+			else
+				bit = 1 << tp->pci_fn;
+		}
+		tg3_ape_write32(tp, regbase + 4 * i, bit);
 	}
 
-	/* Clear the correct bit of the GPIO lock too. */
-	if (!tp->pci_fn)
-		bit = APE_LOCK_GRANT_DRIVER;
-	else
-		bit = 1 << tp->pci_fn;
-
-	tg3_ape_write32(tp, regbase + 4 * TG3_APE_LOCK_GPIO, bit);
 }
 
 static int tg3_ape_lock(struct tg3 *tp, int locknum)
@@ -658,6 +662,10 @@
 			return 0;
 	case TG3_APE_LOCK_GRC:
 	case TG3_APE_LOCK_MEM:
+		if (!tp->pci_fn)
+			bit = APE_LOCK_REQ_DRIVER;
+		else
+			bit = 1 << tp->pci_fn;
 		break;
 	default:
 		return -EINVAL;
@@ -673,11 +681,6 @@
 
 	off = 4 * locknum;
 
-	if (locknum != TG3_APE_LOCK_GPIO || !tp->pci_fn)
-		bit = APE_LOCK_REQ_DRIVER;
-	else
-		bit = 1 << tp->pci_fn;
-
 	tg3_ape_write32(tp, req + off, bit);
 
 	/* Wait for up to 1 millisecond to acquire lock. */
@@ -710,6 +713,10 @@
 			return;
 	case TG3_APE_LOCK_GRC:
 	case TG3_APE_LOCK_MEM:
+		if (!tp->pci_fn)
+			bit = APE_LOCK_GRANT_DRIVER;
+		else
+			bit = 1 << tp->pci_fn;
 		break;
 	default:
 		return;
@@ -720,11 +727,6 @@
 	else
 		gnt = TG3_APE_PER_LOCK_GRANT;
 
-	if (locknum != TG3_APE_LOCK_GPIO || !tp->pci_fn)
-		bit = APE_LOCK_GRANT_DRIVER;
-	else
-		bit = 1 << tp->pci_fn;
-
 	tg3_ape_write32(tp, gnt + 4 * locknum, bit);
 }
 
@@ -5927,6 +5929,18 @@
 	return work_done;
 }
 
+static inline void tg3_reset_task_schedule(struct tg3 *tp)
+{
+	if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
+		schedule_work(&tp->reset_task);
+}
+
+static inline void tg3_reset_task_cancel(struct tg3 *tp)
+{
+	cancel_work_sync(&tp->reset_task);
+	tg3_flag_clear(tp, RESET_TASK_PENDING);
+}
+
 static int tg3_poll_msix(struct napi_struct *napi, int budget)
 {
 	struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
@@ -5967,7 +5981,7 @@
 tx_recovery:
 	/* work_done is guaranteed to be less than budget. */
 	napi_complete(napi);
-	schedule_work(&tp->reset_task);
+	tg3_reset_task_schedule(tp);
 	return work_done;
 }
 
@@ -6002,7 +6016,7 @@
 	tg3_dump_state(tp);
 
 	tg3_flag_set(tp, ERROR_PROCESSED);
-	schedule_work(&tp->reset_task);
+	tg3_reset_task_schedule(tp);
 }
 
 static int tg3_poll(struct napi_struct *napi, int budget)
@@ -6049,7 +6063,7 @@
 tx_recovery:
 	/* work_done is guaranteed to be less than budget. */
 	napi_complete(napi);
-	schedule_work(&tp->reset_task);
+	tg3_reset_task_schedule(tp);
 	return work_done;
 }
 
@@ -6338,11 +6352,11 @@
 {
 	struct tg3 *tp = container_of(work, struct tg3, reset_task);
 	int err;
-	unsigned int restart_timer;
 
 	tg3_full_lock(tp, 0);
 
 	if (!netif_running(tp->dev)) {
+		tg3_flag_clear(tp, RESET_TASK_PENDING);
 		tg3_full_unlock(tp);
 		return;
 	}
@@ -6355,9 +6369,6 @@
 
 	tg3_full_lock(tp, 1);
 
-	restart_timer = tg3_flag(tp, RESTART_TIMER);
-	tg3_flag_clear(tp, RESTART_TIMER);
-
 	if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
 		tp->write32_tx_mbox = tg3_write32_tx_mbox;
 		tp->write32_rx_mbox = tg3_write_flush_reg32;
@@ -6372,14 +6383,13 @@
 
 	tg3_netif_start(tp);
 
-	if (restart_timer)
-		mod_timer(&tp->timer, jiffies + 1);
-
 out:
 	tg3_full_unlock(tp);
 
 	if (!err)
 		tg3_phy_start(tp);
+
+	tg3_flag_clear(tp, RESET_TASK_PENDING);
 }
 
 static void tg3_tx_timeout(struct net_device *dev)
@@ -6391,7 +6401,7 @@
 		tg3_dump_state(tp);
 	}
 
-	schedule_work(&tp->reset_task);
+	tg3_reset_task_schedule(tp);
 }
 
 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
@@ -6442,31 +6452,26 @@
 		hwbug = 1;
 
 	if (tg3_flag(tp, 4K_FIFO_LIMIT)) {
+		u32 prvidx = *entry;
 		u32 tmp_flag = flags & ~TXD_FLAG_END;
-		while (len > TG3_TX_BD_DMA_MAX) {
+		while (len > TG3_TX_BD_DMA_MAX && *budget) {
 			u32 frag_len = TG3_TX_BD_DMA_MAX;
 			len -= TG3_TX_BD_DMA_MAX;
 
-			if (len) {
-				tnapi->tx_buffers[*entry].fragmented = true;
-				/* Avoid the 8byte DMA problem */
-				if (len <= 8) {
-					len += TG3_TX_BD_DMA_MAX / 2;
-					frag_len = TG3_TX_BD_DMA_MAX / 2;
-				}
-			} else
-				tmp_flag = flags;
-
-			if (*budget) {
-				tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
-					      frag_len, tmp_flag, mss, vlan);
-				(*budget)--;
-				*entry = NEXT_TX(*entry);
-			} else {
-				hwbug = 1;
-				break;
+			/* Avoid the 8byte DMA problem */
+			if (len <= 8) {
+				len += TG3_TX_BD_DMA_MAX / 2;
+				frag_len = TG3_TX_BD_DMA_MAX / 2;
 			}
 
+			tnapi->tx_buffers[*entry].fragmented = true;
+
+			tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
+				      frag_len, tmp_flag, mss, vlan);
+			*budget -= 1;
+			prvidx = *entry;
+			*entry = NEXT_TX(*entry);
+
 			map += frag_len;
 		}
 
@@ -6474,10 +6479,11 @@
 			if (*budget) {
 				tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
 					      len, flags, mss, vlan);
-				(*budget)--;
+				*budget -= 1;
 				*entry = NEXT_TX(*entry);
 			} else {
 				hwbug = 1;
+				tnapi->tx_buffers[prvidx].fragmented = false;
 			}
 		}
 	} else {
@@ -6509,7 +6515,7 @@
 		txb = &tnapi->tx_buffers[entry];
 	}
 
-	for (i = 0; i < last; i++) {
+	for (i = 0; i <= last; i++) {
 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
 
 		entry = NEXT_TX(entry);
@@ -6559,6 +6565,8 @@
 			dev_kfree_skb(new_skb);
 			ret = -1;
 		} else {
+			u32 save_entry = *entry;
+
 			base_flags |= TXD_FLAG_END;
 
 			tnapi->tx_buffers[*entry].skb = new_skb;
@@ -6568,7 +6576,7 @@
 			if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
 					    new_skb->len, base_flags,
 					    mss, vlan)) {
-				tg3_tx_skb_unmap(tnapi, *entry, 0);
+				tg3_tx_skb_unmap(tnapi, save_entry, -1);
 				dev_kfree_skb(new_skb);
 				ret = -1;
 			}
@@ -6758,11 +6766,10 @@
 
 	if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
 			  ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
-			    mss, vlan))
+			    mss, vlan)) {
 		would_hit_hwbug = 1;
-
 	/* Now loop through additional data fragments, and queue them. */
-	if (skb_shinfo(skb)->nr_frags > 0) {
+	} else if (skb_shinfo(skb)->nr_frags > 0) {
 		u32 tmp_mss = mss;
 
 		if (!tg3_flag(tp, HW_TSO_1) &&
@@ -6784,11 +6791,14 @@
 			if (dma_mapping_error(&tp->pdev->dev, mapping))
 				goto dma_error;
 
-			if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
+			if (!budget ||
+			    tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
 					    len, base_flags |
 					    ((i == last) ? TXD_FLAG_END : 0),
-					    tmp_mss, vlan))
+					    tmp_mss, vlan)) {
 				would_hit_hwbug = 1;
+				break;
+			}
 		}
 	}
 
@@ -6828,7 +6838,7 @@
 	return NETDEV_TX_OK;
 
 dma_error:
-	tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
+	tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
 	tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
 drop:
 	dev_kfree_skb(skb);
@@ -7281,7 +7291,8 @@
 			if (!skb)
 				continue;
 
-			tg3_tx_skb_unmap(tnapi, i, skb_shinfo(skb)->nr_frags);
+			tg3_tx_skb_unmap(tnapi, i,
+					 skb_shinfo(skb)->nr_frags - 1);
 
 			dev_kfree_skb_any(skb);
 		}
@@ -9200,7 +9211,7 @@
 {
 	struct tg3 *tp = (struct tg3 *) __opaque;
 
-	if (tp->irq_sync)
+	if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
 		goto restart_timer;
 
 	spin_lock(&tp->lock);
@@ -9223,10 +9234,9 @@
 		}
 
 		if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
-			tg3_flag_set(tp, RESTART_TIMER);
 			spin_unlock(&tp->lock);
-			schedule_work(&tp->reset_task);
-			return;
+			tg3_reset_task_schedule(tp);
+			goto restart_timer;
 		}
 	}
 
@@ -9674,15 +9684,14 @@
 		struct tg3_napi *tnapi = &tp->napi[i];
 		err = tg3_request_irq(tp, i);
 		if (err) {
-			for (i--; i >= 0; i--)
+			for (i--; i >= 0; i--) {
+				tnapi = &tp->napi[i];
 				free_irq(tnapi->irq_vec, tnapi);
-			break;
+			}
+			goto err_out2;
 		}
 	}
 
-	if (err)
-		goto err_out2;
-
 	tg3_full_lock(tp, 0);
 
 	err = tg3_init_hw(tp, 1);
@@ -9783,7 +9792,7 @@
 	struct tg3 *tp = netdev_priv(dev);
 
 	tg3_napi_disable(tp);
-	cancel_work_sync(&tp->reset_task);
+	tg3_reset_task_cancel(tp);
 
 	netif_tx_stop_all_queues(dev);
 
@@ -11520,7 +11529,7 @@
 			break;
 	}
 
-	tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, 0);
+	tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
 	dev_kfree_skb(skb);
 
 	if (tx_idx != tnapi->tx_prod)
@@ -14228,12 +14237,30 @@
 	val = tr32(MEMARB_MODE);
 	tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
 
-	if (tg3_flag(tp, PCIX_MODE)) {
-		pci_read_config_dword(tp->pdev,
-				      tp->pcix_cap + PCI_X_STATUS, &val);
-		tp->pci_fn = val & 0x7;
-	} else {
-		tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
+	tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
+	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
+	    tg3_flag(tp, 5780_CLASS)) {
+		if (tg3_flag(tp, PCIX_MODE)) {
+			pci_read_config_dword(tp->pdev,
+					      tp->pcix_cap + PCI_X_STATUS,
+					      &val);
+			tp->pci_fn = val & 0x7;
+		}
+	} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
+		tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
+		if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
+		    NIC_SRAM_CPMUSTAT_SIG) {
+			tp->pci_fn = val & TG3_CPMU_STATUS_FMSK_5717;
+			tp->pci_fn = tp->pci_fn ? 1 : 0;
+		}
+	} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
+		   GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
+		tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
+		if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
+		    NIC_SRAM_CPMUSTAT_SIG) {
+			tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
+				     TG3_CPMU_STATUS_FSHFT_5719;
+		}
 	}
 
 	/* Get eeprom hw config before calling tg3_set_power_state().
@@ -15665,7 +15692,7 @@
 		if (tp->fw)
 			release_firmware(tp->fw);
 
-		cancel_work_sync(&tp->reset_task);
+		tg3_reset_task_cancel(tp);
 
 		if (tg3_flag(tp, USE_PHYLIB)) {
 			tg3_phy_fini(tp);
@@ -15699,7 +15726,7 @@
 	if (!netif_running(dev))
 		return 0;
 
-	flush_work_sync(&tp->reset_task);
+	tg3_reset_task_cancel(tp);
 	tg3_phy_stop(tp);
 	tg3_netif_stop(tp);
 
@@ -15812,12 +15839,10 @@
 	tg3_netif_stop(tp);
 
 	del_timer_sync(&tp->timer);
-	tg3_flag_clear(tp, RESTART_TIMER);
 
 	/* Want to make sure that the reset task doesn't run */
-	cancel_work_sync(&tp->reset_task);
+	tg3_reset_task_cancel(tp);
 	tg3_flag_clear(tp, TX_RECOVERY_PENDING);
-	tg3_flag_clear(tp, RESTART_TIMER);
 
 	netif_device_detach(netdev);
 
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
index f32f288..94b4bd0 100644
--- a/drivers/net/ethernet/broadcom/tg3.h
+++ b/drivers/net/ethernet/broadcom/tg3.h
@@ -1095,6 +1095,11 @@
 #define TG3_CPMU_CLCK_ORIDE		0x00003624
 #define  CPMU_CLCK_ORIDE_MAC_ORIDE_EN	 0x80000000
 
+#define TG3_CPMU_STATUS			0x0000362c
+#define  TG3_CPMU_STATUS_FMSK_5717	 0x20000000
+#define  TG3_CPMU_STATUS_FMSK_5719	 0xc0000000
+#define  TG3_CPMU_STATUS_FSHFT_5719	 30
+
 #define TG3_CPMU_CLCK_STAT		0x00003630
 #define  CPMU_CLCK_STAT_MAC_CLCK_MASK	 0x001f0000
 #define  CPMU_CLCK_STAT_MAC_CLCK_62_5	 0x00000000
@@ -2128,6 +2133,10 @@
 #define  NIC_SRAM_RGMII_EXT_IBND_RX_EN	 0x00000008
 #define  NIC_SRAM_RGMII_EXT_IBND_TX_EN	 0x00000010
 
+#define NIC_SRAM_CPMU_STATUS		0x00000e00
+#define  NIC_SRAM_CPMUSTAT_SIG		0x0000362c
+#define  NIC_SRAM_CPMUSTAT_SIG_MSK	0x0000ffff
+
 #define NIC_SRAM_RX_MINI_BUFFER_DESC	0x00001000
 
 #define NIC_SRAM_DMA_DESC_POOL_BASE	0x00002000
@@ -2344,9 +2353,13 @@
 #define  APE_PER_LOCK_GRANT_DRIVER	 0x00001000
 
 /* APE convenience enumerations. */
-#define TG3_APE_LOCK_GRC                1
-#define TG3_APE_LOCK_MEM                4
-#define TG3_APE_LOCK_GPIO               7
+#define TG3_APE_LOCK_PHY0		0
+#define TG3_APE_LOCK_GRC		1
+#define TG3_APE_LOCK_PHY1		2
+#define TG3_APE_LOCK_PHY2		3
+#define TG3_APE_LOCK_MEM		4
+#define TG3_APE_LOCK_PHY3		5
+#define TG3_APE_LOCK_GPIO		7
 
 #define TG3_EEPROM_SB_F1R2_MBA_OFF	0x10
 
@@ -2866,7 +2879,6 @@
 	TG3_FLAG_JUMBO_CAPABLE,
 	TG3_FLAG_CHIP_RESETTING,
 	TG3_FLAG_INIT_COMPLETE,
-	TG3_FLAG_RESTART_TIMER,
 	TG3_FLAG_TSO_BUG,
 	TG3_FLAG_IS_5788,
 	TG3_FLAG_MAX_RXPEND_64,
@@ -2909,6 +2921,7 @@
 	TG3_FLAG_APE_HAS_NCSI,
 	TG3_FLAG_5717_PLUS,
 	TG3_FLAG_4K_FIFO_LIMIT,
+	TG3_FLAG_RESET_TASK_PENDING,
 
 	/* Add new flags before this comment and TG3_FLAG_NUMBER_OF_FLAGS */
 	TG3_FLAG_NUMBER_OF_FLAGS,	/* Last entry in enum TG3_FLAGS */
diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig
index 1cf6716..c520cfd 100644
--- a/drivers/net/ethernet/freescale/Kconfig
+++ b/drivers/net/ethernet/freescale/Kconfig
@@ -7,8 +7,7 @@
 	default y
 	depends on FSL_SOC || QUICC_ENGINE || CPM1 || CPM2 || PPC_MPC512x || \
 		   M523x || M527x || M5272 || M528x || M520x || M532x || \
-		   ARCH_MXC || ARCH_MXS || \
-		   (PPC_MPC52xx && PPC_BESTCOMM)
+		   ARCH_MXC || ARCH_MXS || (PPC_MPC52xx && PPC_BESTCOMM)
 	---help---
 	  If you have a network (Ethernet) card belonging to this class, say Y
 	  and read the Ethernet-HOWTO, available from
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index 61029dc..7621316 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -5,7 +5,11 @@
 config NET_VENDOR_INTEL
 	bool "Intel devices"
 	default y
-	depends on PCI || PCI_MSI
+	depends on PCI || PCI_MSI || ISA || ISA_DMA_API || ARM || \
+		   ARCH_ACORN || MCA || MCA_LEGACY || SNI_RM || SUN3 || \
+		   GSC || BVME6000 || MVME16x || ARCH_ENP2611 || \
+		   (ARM && ARCH_IXP4XX && IXP4XX_NPE && IXP4XX_QMGR) || \
+		   EXPERIMENTAL
 	---help---
 	  If you have a network (Ethernet) card belonging to this class, say Y
 	  and read the Ethernet-HOWTO, available from
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index db95731..00fcd39 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -442,12 +442,14 @@
 
 int ixgbe_check_vf_assignment(struct ixgbe_adapter *adapter)
 {
+#ifdef CONFIG_PCI_IOV
 	int i;
 	for (i = 0; i < adapter->num_vfs; i++) {
 		if (adapter->vfinfo[i].vfdev->dev_flags &
 				PCI_DEV_FLAGS_ASSIGNED)
 			return true;
 	}
+#endif
 	return false;
 }
 
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
index 4a5d889..df04f1a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
@@ -42,11 +42,11 @@
 int ixgbe_ndo_get_vf_config(struct net_device *netdev,
 			    int vf, struct ifla_vf_info *ivi);
 void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter);
-#ifdef CONFIG_PCI_IOV
 void ixgbe_disable_sriov(struct ixgbe_adapter *adapter);
+int ixgbe_check_vf_assignment(struct ixgbe_adapter *adapter);
+#ifdef CONFIG_PCI_IOV
 void ixgbe_enable_sriov(struct ixgbe_adapter *adapter,
 			const struct ixgbe_info *ii);
-int ixgbe_check_vf_assignment(struct ixgbe_adapter *adapter);
 #endif
 
 
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index cbd026f..fdc6c39 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -366,17 +366,6 @@
 				gm_phy_write(hw, port, PHY_MARV_FE_SPEC_2, spec);
 			}
 		} else {
-			if (hw->chip_id >= CHIP_ID_YUKON_OPT) {
-				u16 ctrl2 = gm_phy_read(hw, port, PHY_MARV_EXT_CTRL_2);
-
-				/* enable PHY Reverse Auto-Negotiation */
-				ctrl2 |= 1u << 13;
-
-				/* Write PHY changes (SW-reset must follow) */
-				gm_phy_write(hw, port, PHY_MARV_EXT_CTRL_2, ctrl2);
-			}
-
-
 			/* disable energy detect */
 			ctrl &= ~PHY_M_PC_EN_DET_MSK;
 
diff --git a/drivers/net/ethernet/natsemi/Kconfig b/drivers/net/ethernet/natsemi/Kconfig
index 4a6b9fd..eb836f7 100644
--- a/drivers/net/ethernet/natsemi/Kconfig
+++ b/drivers/net/ethernet/natsemi/Kconfig
@@ -5,7 +5,10 @@
 config NET_VENDOR_NATSEMI
 	bool "National Semi-conductor devices"
 	default y
-	depends on MCA || MAC || MACH_JAZZ || PCI || XTENSA_PLATFORM_XT2000
+	depends on AMIGA_PCMCIA || ARM || EISA || EXPERIMENTAL || H8300 || \
+		   ISA || M32R || MAC || MACH_JAZZ || MACH_TX49XX || MCA || \
+		   MCA_LEGACY || MIPS || PCI || PCMCIA || SUPERH || \
+		   XTENSA_PLATFORM_XT2000 || ZORRO
 	---help---
 	  If you have a network (Ethernet) card belonging to this class, say Y
 	  and read the Ethernet-HOWTO, available from
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index 1e37eb9..1dca570 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -1682,6 +1682,7 @@
 		np->estats.tx_pause += readl(base + NvRegTxPause);
 		np->estats.rx_pause += readl(base + NvRegRxPause);
 		np->estats.rx_drop_frame += readl(base + NvRegRxDropFrame);
+		np->estats.rx_errors_total += np->estats.rx_drop_frame;
 	}
 
 	if (np->driver_data & DEV_HAS_STATISTICS_V3) {
@@ -1706,11 +1707,14 @@
 		nv_get_hw_stats(dev);
 
 		/* copy to net_device stats */
+		dev->stats.tx_packets = np->estats.tx_packets;
+		dev->stats.rx_bytes = np->estats.rx_bytes;
 		dev->stats.tx_bytes = np->estats.tx_bytes;
 		dev->stats.tx_fifo_errors = np->estats.tx_fifo_errors;
 		dev->stats.tx_carrier_errors = np->estats.tx_carrier_errors;
 		dev->stats.rx_crc_errors = np->estats.rx_crc_errors;
 		dev->stats.rx_over_errors = np->estats.rx_over_errors;
+		dev->stats.rx_fifo_errors = np->estats.rx_drop_frame;
 		dev->stats.rx_errors = np->estats.rx_errors_total;
 		dev->stats.tx_errors = np->estats.tx_errors_total;
 	}
@@ -2099,10 +2103,10 @@
 
 	/* add fragments to entries count */
 	for (i = 0; i < fragments; i++) {
-		u32 size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
+		u32 frag_size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
 
-		entries += (size >> NV_TX2_TSO_MAX_SHIFT) +
-			   ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
+		entries += (frag_size >> NV_TX2_TSO_MAX_SHIFT) +
+			   ((frag_size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
 	}
 
 	spin_lock_irqsave(&np->lock, flags);
@@ -2141,13 +2145,13 @@
 	/* setup the fragments */
 	for (i = 0; i < fragments; i++) {
 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
-		u32 size = skb_frag_size(frag);
+		u32 frag_size = skb_frag_size(frag);
 		offset = 0;
 
 		do {
 			prev_tx = put_tx;
 			prev_tx_ctx = np->put_tx_ctx;
-			bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
+			bcnt = (frag_size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : frag_size;
 			np->put_tx_ctx->dma = skb_frag_dma_map(
 							&np->pci_dev->dev,
 							frag, offset,
@@ -2159,12 +2163,12 @@
 			put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
 
 			offset += bcnt;
-			size -= bcnt;
+			frag_size -= bcnt;
 			if (unlikely(put_tx++ == np->last_tx.orig))
 				put_tx = np->first_tx.orig;
 			if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
 				np->put_tx_ctx = np->first_tx_ctx;
-		} while (size);
+		} while (frag_size);
 	}
 
 	/* set last fragment flag  */
@@ -2213,10 +2217,10 @@
 
 	/* add fragments to entries count */
 	for (i = 0; i < fragments; i++) {
-		u32 size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
+		u32 frag_size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
 
-		entries += (size >> NV_TX2_TSO_MAX_SHIFT) +
-			   ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
+		entries += (frag_size >> NV_TX2_TSO_MAX_SHIFT) +
+			   ((frag_size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
 	}
 
 	spin_lock_irqsave(&np->lock, flags);
@@ -2257,13 +2261,13 @@
 	/* setup the fragments */
 	for (i = 0; i < fragments; i++) {
 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
-		u32 size = skb_frag_size(frag);
+		u32 frag_size = skb_frag_size(frag);
 		offset = 0;
 
 		do {
 			prev_tx = put_tx;
 			prev_tx_ctx = np->put_tx_ctx;
-			bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
+			bcnt = (frag_size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : frag_size;
 			np->put_tx_ctx->dma = skb_frag_dma_map(
 							&np->pci_dev->dev,
 							frag, offset,
@@ -2276,12 +2280,12 @@
 			put_tx->flaglen = cpu_to_le32((bcnt-1) | tx_flags);
 
 			offset += bcnt;
-			size -= bcnt;
+			frag_size -= bcnt;
 			if (unlikely(put_tx++ == np->last_tx.ex))
 				put_tx = np->first_tx.ex;
 			if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx))
 				np->put_tx_ctx = np->first_tx_ctx;
-		} while (size);
+		} while (frag_size);
 	}
 
 	/* set last fragment flag  */
@@ -2374,16 +2378,8 @@
 		if (np->desc_ver == DESC_VER_1) {
 			if (flags & NV_TX_LASTPACKET) {
 				if (flags & NV_TX_ERROR) {
-					if (flags & NV_TX_UNDERFLOW)
-						dev->stats.tx_fifo_errors++;
-					if (flags & NV_TX_CARRIERLOST)
-						dev->stats.tx_carrier_errors++;
 					if ((flags & NV_TX_RETRYERROR) && !(flags & NV_TX_RETRYCOUNT_MASK))
 						nv_legacybackoff_reseed(dev);
-					dev->stats.tx_errors++;
-				} else {
-					dev->stats.tx_packets++;
-					dev->stats.tx_bytes += np->get_tx_ctx->skb->len;
 				}
 				dev_kfree_skb_any(np->get_tx_ctx->skb);
 				np->get_tx_ctx->skb = NULL;
@@ -2392,16 +2388,8 @@
 		} else {
 			if (flags & NV_TX2_LASTPACKET) {
 				if (flags & NV_TX2_ERROR) {
-					if (flags & NV_TX2_UNDERFLOW)
-						dev->stats.tx_fifo_errors++;
-					if (flags & NV_TX2_CARRIERLOST)
-						dev->stats.tx_carrier_errors++;
 					if ((flags & NV_TX2_RETRYERROR) && !(flags & NV_TX2_RETRYCOUNT_MASK))
 						nv_legacybackoff_reseed(dev);
-					dev->stats.tx_errors++;
-				} else {
-					dev->stats.tx_packets++;
-					dev->stats.tx_bytes += np->get_tx_ctx->skb->len;
 				}
 				dev_kfree_skb_any(np->get_tx_ctx->skb);
 				np->get_tx_ctx->skb = NULL;
@@ -2434,9 +2422,7 @@
 		nv_unmap_txskb(np, np->get_tx_ctx);
 
 		if (flags & NV_TX2_LASTPACKET) {
-			if (!(flags & NV_TX2_ERROR))
-				dev->stats.tx_packets++;
-			else {
+			if (flags & NV_TX2_ERROR) {
 				if ((flags & NV_TX2_RETRYERROR) && !(flags & NV_TX2_RETRYCOUNT_MASK)) {
 					if (np->driver_data & DEV_HAS_GEAR_MODE)
 						nv_gear_backoff_reseed(dev);
@@ -2636,7 +2622,6 @@
 					if ((flags & NV_RX_ERROR_MASK) == NV_RX_ERROR4) {
 						len = nv_getlen(dev, skb->data, len);
 						if (len < 0) {
-							dev->stats.rx_errors++;
 							dev_kfree_skb(skb);
 							goto next_pkt;
 						}
@@ -2650,11 +2635,6 @@
 					else {
 						if (flags & NV_RX_MISSEDFRAME)
 							dev->stats.rx_missed_errors++;
-						if (flags & NV_RX_CRCERR)
-							dev->stats.rx_crc_errors++;
-						if (flags & NV_RX_OVERFLOW)
-							dev->stats.rx_over_errors++;
-						dev->stats.rx_errors++;
 						dev_kfree_skb(skb);
 						goto next_pkt;
 					}
@@ -2670,7 +2650,6 @@
 					if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_ERROR4) {
 						len = nv_getlen(dev, skb->data, len);
 						if (len < 0) {
-							dev->stats.rx_errors++;
 							dev_kfree_skb(skb);
 							goto next_pkt;
 						}
@@ -2682,11 +2661,6 @@
 					}
 					/* the rest are hard errors */
 					else {
-						if (flags & NV_RX2_CRCERR)
-							dev->stats.rx_crc_errors++;
-						if (flags & NV_RX2_OVERFLOW)
-							dev->stats.rx_over_errors++;
-						dev->stats.rx_errors++;
 						dev_kfree_skb(skb);
 						goto next_pkt;
 					}
@@ -2704,7 +2678,6 @@
 		skb->protocol = eth_type_trans(skb, dev);
 		napi_gro_receive(&np->napi, skb);
 		dev->stats.rx_packets++;
-		dev->stats.rx_bytes += len;
 next_pkt:
 		if (unlikely(np->get_rx.orig++ == np->last_rx.orig))
 			np->get_rx.orig = np->first_rx.orig;
@@ -2787,9 +2760,7 @@
 				__vlan_hwaccel_put_tag(skb, vid);
 			}
 			napi_gro_receive(&np->napi, skb);
-
 			dev->stats.rx_packets++;
-			dev->stats.rx_bytes += len;
 		} else {
 			dev_kfree_skb(skb);
 		}
@@ -2962,11 +2933,11 @@
 				struct netdev_hw_addr *ha;
 
 				netdev_for_each_mc_addr(ha, dev) {
-					unsigned char *addr = ha->addr;
+					unsigned char *hw_addr = ha->addr;
 					u32 a, b;
 
-					a = le32_to_cpu(*(__le32 *) addr);
-					b = le16_to_cpu(*(__le16 *) (&addr[4]));
+					a = le32_to_cpu(*(__le32 *) hw_addr);
+					b = le16_to_cpu(*(__le16 *) (&hw_addr[4]));
 					alwaysOn[0] &= a;
 					alwaysOff[0] &= ~a;
 					alwaysOn[1] &= b;
@@ -3398,7 +3369,8 @@
 
 	for (i = 0;; i++) {
 		events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_TX_ALL;
-		writel(NVREG_IRQ_TX_ALL, base + NvRegMSIXIrqStatus);
+		writel(events, base + NvRegMSIXIrqStatus);
+		netdev_dbg(dev, "tx irq events: %08x\n", events);
 		if (!(events & np->irqmask))
 			break;
 
@@ -3509,7 +3481,8 @@
 
 	for (i = 0;; i++) {
 		events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL;
-		writel(NVREG_IRQ_RX_ALL, base + NvRegMSIXIrqStatus);
+		writel(events, base + NvRegMSIXIrqStatus);
+		netdev_dbg(dev, "rx irq events: %08x\n", events);
 		if (!(events & np->irqmask))
 			break;
 
@@ -3553,7 +3526,8 @@
 
 	for (i = 0;; i++) {
 		events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_OTHER;
-		writel(NVREG_IRQ_OTHER, base + NvRegMSIXIrqStatus);
+		writel(events, base + NvRegMSIXIrqStatus);
+		netdev_dbg(dev, "irq events: %08x\n", events);
 		if (!(events & np->irqmask))
 			break;
 
@@ -3617,10 +3591,10 @@
 
 	if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
 		events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
-		writel(NVREG_IRQ_TIMER, base + NvRegIrqStatus);
+		writel(events & NVREG_IRQ_TIMER, base + NvRegIrqStatus);
 	} else {
 		events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
-		writel(NVREG_IRQ_TIMER, base + NvRegMSIXIrqStatus);
+		writel(events & NVREG_IRQ_TIMER, base + NvRegMSIXIrqStatus);
 	}
 	pci_push(base);
 	if (!(events & NVREG_IRQ_TIMER))
@@ -4566,7 +4540,7 @@
 	struct fe_priv *np = netdev_priv(dev);
 
 	/* update stats */
-	nv_do_stats_poll((unsigned long)dev);
+	nv_get_hw_stats(dev);
 
 	memcpy(buffer, &np->estats, nv_get_sset_count(dev, ETH_SS_STATS)*sizeof(u64));
 }
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index a3ce3d4..7413497 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -192,6 +192,13 @@
 			 */
 			macvlan_broadcast(skb, port, src->dev,
 					  MACVLAN_MODE_VEPA);
+		else {
+			/* forward to original port. */
+			vlan = src;
+			ret = macvlan_broadcast_one(skb, vlan, eth, 0);
+			goto out;
+		}
+
 		return RX_HANDLER_PASS;
 	}
 
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 7d60821..fae0fbd 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -1057,7 +1057,8 @@
 	unsigned long		flags;
 	int retval;
 
-	skb_tx_timestamp(skb);
+	if (skb)
+		skb_tx_timestamp(skb);
 
 	// some devices want funky USB-level framing, for
 	// win32 driver (usually) and/or hardware quirks
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_calib.c b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
index e0ab065..88279e3 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
@@ -868,10 +868,6 @@
 	/* Do PA Calibration */
 	ar9002_hw_pa_cal(ah, true);
 
-	/* Do NF Calibration after DC offset and other calibrations */
-	ath9k_hw_loadnf(ah, chan);
-	ath9k_hw_start_nfcal(ah, true);
-
 	if (ah->caldata)
 		ah->caldata->nfcal_pending = true;
 
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_calib.c b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
index 16851cb..12a730d 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
@@ -908,12 +908,15 @@
 	int i;
 	bool restore;
 
-	if (!(ah->caps.hw_caps & ATH9K_HW_CAP_RTT) || !ah->caldata)
+	if (!ah->caldata)
 		return false;
 
 	hist = &ah->caldata->rtt_hist;
+	if (!hist->num_readings)
+		return false;
+
 	ar9003_hw_rtt_enable(ah);
-	ar9003_hw_rtt_set_mask(ah, 0x10);
+	ar9003_hw_rtt_set_mask(ah, 0x00);
 	for (i = 0; i < AR9300_MAX_CHAINS; i++) {
 		if (!(ah->rxchainmask & (1 << i)))
 			continue;
@@ -1070,6 +1073,7 @@
 		if (is_reusable && (hist->num_readings < RTT_HIST_MAX)) {
 			u32 *table;
 
+			hist->num_readings++;
 			for (i = 0; i < AR9300_MAX_CHAINS; i++) {
 				if (!(ah->rxchainmask & (1 << i)))
 					continue;
@@ -1081,9 +1085,6 @@
 		ar9003_hw_rtt_disable(ah);
 	}
 
-	ath9k_hw_loadnf(ah, chan);
-	ath9k_hw_start_nfcal(ah, true);
-
 	/* Initialize list pointers */
 	ah->cal_list = ah->cal_list_last = ah->cal_list_curr = NULL;
 	ah->supp_cals = IQ_MISMATCH_CAL;
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.h b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
index 2f4023e..4114fe75 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
@@ -572,14 +572,14 @@
 
 #define AR_PHY_TXGAIN_TABLE      (AR_SM_BASE + 0x300)
 
-#define AR_PHY_TX_IQCAL_CONTROL_0   (AR_SM_BASE + AR_SREV_9485(ah) ? \
-						 0x3c4 : 0x444)
-#define AR_PHY_TX_IQCAL_CONTROL_1   (AR_SM_BASE + AR_SREV_9485(ah) ? \
-						 0x3c8 : 0x448)
-#define AR_PHY_TX_IQCAL_START       (AR_SM_BASE + AR_SREV_9485(ah) ? \
-						 0x3c4 : 0x440)
-#define AR_PHY_TX_IQCAL_STATUS_B0   (AR_SM_BASE + AR_SREV_9485(ah) ? \
-						 0x3f0 : 0x48c)
+#define AR_PHY_TX_IQCAL_CONTROL_0   (AR_SM_BASE + (AR_SREV_9485(ah) ? \
+						 0x3c4 : 0x444))
+#define AR_PHY_TX_IQCAL_CONTROL_1   (AR_SM_BASE + (AR_SREV_9485(ah) ? \
+						 0x3c8 : 0x448))
+#define AR_PHY_TX_IQCAL_START       (AR_SM_BASE + (AR_SREV_9485(ah) ? \
+						 0x3c4 : 0x440))
+#define AR_PHY_TX_IQCAL_STATUS_B0   (AR_SM_BASE + (AR_SREV_9485(ah) ? \
+						 0x3f0 : 0x48c))
 #define AR_PHY_TX_IQCAL_CORR_COEFF_B0(_i)    (AR_SM_BASE + \
 					     (AR_SREV_9485(ah) ? \
 					      0x3d0 : 0x450) + ((_i) << 2))
@@ -651,7 +651,7 @@
 #define AR_SWITCH_TABLE_ALL_S (0)
 
 #define AR_PHY_65NM_CH0_THERM       (AR_SREV_9300(ah) ? 0x16290 :\
-					(AR_SREV_9485(ah) ? 0x1628c : 0x16294))
+					(AR_SREV_9462(ah) ? 0x16294 : 0x1628c))
 
 #define AR_PHY_65NM_CH0_THERM_LOCAL   0x80000000
 #define AR_PHY_65NM_CH0_THERM_LOCAL_S 31
@@ -668,12 +668,12 @@
 #define AR_PHY_65NM_CH2_RXTX2       0x16904
 
 #define AR_CH0_TOP2		(AR_SREV_9300(ah) ? 0x1628c : \
-					(AR_SREV_9485(ah) ? 0x16284 : 0x16290))
+					(AR_SREV_9462(ah) ? 0x16290 : 0x16284))
 #define AR_CH0_TOP2_XPABIASLVL		0xf000
 #define AR_CH0_TOP2_XPABIASLVL_S	12
 
 #define AR_CH0_XTAL		(AR_SREV_9300(ah) ? 0x16294 : \
-					(AR_SREV_9485(ah) ? 0x16290 : 0x16298))
+					(AR_SREV_9462(ah) ? 0x16298 : 0x16290))
 #define AR_CH0_XTAL_CAPINDAC	0x7f000000
 #define AR_CH0_XTAL_CAPINDAC_S	24
 #define AR_CH0_XTAL_CAPOUTDAC	0x00fe0000
@@ -908,8 +908,8 @@
 #define AR_PHY_TPC_5_B1         (AR_SM1_BASE + 0x208)
 #define AR_PHY_TPC_6_B1         (AR_SM1_BASE + 0x20c)
 #define AR_PHY_TPC_11_B1        (AR_SM1_BASE + 0x220)
-#define AR_PHY_PDADC_TAB_1	(AR_SM1_BASE + (AR_SREV_AR9300(ah) ? \
-					0x240 : 0x280))
+#define AR_PHY_PDADC_TAB_1	(AR_SM1_BASE + (AR_SREV_AR9462(ah) ? \
+					0x280 : 0x240))
 #define AR_PHY_TPC_19_B1	(AR_SM1_BASE + 0x240)
 #define AR_PHY_TPC_19_B1_ALPHA_THERM		0xff
 #define AR_PHY_TPC_19_B1_ALPHA_THERM_S		0
@@ -931,10 +931,10 @@
 #define AR_PHY_AIC_SRAM_ADDR_B1	(AR_SM1_BASE + 0x5f0)
 #define AR_PHY_AIC_SRAM_DATA_B1	(AR_SM1_BASE + 0x5f4)
 
-#define AR_PHY_RTT_TABLE_SW_INTF_B(i)	(0x384 + (i) ? \
-					AR_SM1_BASE : AR_SM_BASE)
-#define AR_PHY_RTT_TABLE_SW_INTF_1_B(i)	(0x388 + (i) ? \
-					AR_SM1_BASE : AR_SM_BASE)
+#define AR_PHY_RTT_TABLE_SW_INTF_B(i)	(0x384 + ((i) ? \
+					AR_SM1_BASE : AR_SM_BASE))
+#define AR_PHY_RTT_TABLE_SW_INTF_1_B(i)	(0x388 + ((i) ? \
+					AR_SM1_BASE : AR_SM_BASE))
 /*
  * Channel 2 Register Map
  */
diff --git a/drivers/net/wireless/ath/ath9k/ar9485_initvals.h b/drivers/net/wireless/ath/ath9k/ar9485_initvals.h
index 611ea6c..d16d029 100644
--- a/drivers/net/wireless/ath/ath9k/ar9485_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9485_initvals.h
@@ -521,7 +521,7 @@
 	{0x000160ac, 0x24611800},
 	{0x000160b0, 0x03284f3e},
 	{0x0001610c, 0x00170000},
-	{0x00016140, 0x10804008},
+	{0x00016140, 0x50804008},
 };
 
 static const u32 ar9485_1_1_mac_postamble[][5] = {
@@ -603,7 +603,7 @@
 
 static const u32 ar9485_1_1_pcie_phy_pll_on_clkreq_enable_L1[][2] = {
 	/* Addr        allmodes */
-	{0x00018c00, 0x10052e5e},
+	{0x00018c00, 0x18052e5e},
 	{0x00018c04, 0x000801d8},
 	{0x00018c08, 0x0000080c},
 };
@@ -776,7 +776,7 @@
 
 static const u32 ar9485_1_1_pcie_phy_clkreq_disable_L1[][2] = {
 	/* Addr        allmodes */
-	{0x00018c00, 0x10013e5e},
+	{0x00018c00, 0x18013e5e},
 	{0x00018c04, 0x000801d8},
 	{0x00018c08, 0x0000080c},
 };
@@ -882,7 +882,7 @@
 
 static const u32 ar9485_1_1_pcie_phy_pll_on_clkreq_disable_L1[][2] = {
 	/* Addr        allmodes  */
-	{0x00018c00, 0x10012e5e},
+	{0x00018c00, 0x18012e5e},
 	{0x00018c04, 0x000801d8},
 	{0x00018c08, 0x0000080c},
 };
@@ -1021,7 +1021,7 @@
 
 static const u32 ar9485_1_1_pcie_phy_clkreq_enable_L1[][2] = {
 	/* Addr        allmodes */
-	{0x00018c00, 0x10053e5e},
+	{0x00018c00, 0x18053e5e},
 	{0x00018c04, 0x000801d8},
 	{0x00018c08, 0x0000080c},
 };
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 4952ad8..2f91acc 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -1725,6 +1725,9 @@
 	if (!ath9k_hw_init_cal(ah, chan))
 		return -EIO;
 
+	ath9k_hw_loadnf(ah, chan);
+	ath9k_hw_start_nfcal(ah, true);
+
 	ENABLE_REGWRITE_BUFFER(ah);
 
 	ath9k_hw_restore_chainmask(ah);
diff --git a/drivers/net/wireless/ath/carl9170/tx.c b/drivers/net/wireless/ath/carl9170/tx.c
index d209469..59472e16 100644
--- a/drivers/net/wireless/ath/carl9170/tx.c
+++ b/drivers/net/wireless/ath/carl9170/tx.c
@@ -296,7 +296,8 @@
 			super = (void *)skb->data;
 			txinfo->status.ampdu_len = super->s.rix;
 			txinfo->status.ampdu_ack_len = super->s.cnt;
-		} else if (txinfo->flags & IEEE80211_TX_STAT_ACK) {
+		} else if ((txinfo->flags & IEEE80211_TX_STAT_ACK) &&
+			   !(txinfo->flags & IEEE80211_TX_CTL_REQ_TX_STATUS)) {
 			/*
 			 * drop redundant tx_status reports:
 			 *
@@ -308,15 +309,17 @@
 			 *
 			 * 3. minstrel_ht is picky, it only accepts
 			 *    reports of frames with the TX_STATUS_AMPDU flag.
+			 *
+			 * 4. mac80211 is not particularly interested in
+			 *    feedback either [CTL_REQ_TX_STATUS not set]
 			 */
 
 			dev_kfree_skb_any(skb);
 			return;
 		} else {
 			/*
-			 * Frame has failed, but we want to keep it in
-			 * case it was lost due to a power-state
-			 * transition.
+			 * Either the frame transmission has failed or
+			 * mac80211 requested tx status.
 			 */
 		}
 	}
diff --git a/drivers/net/wireless/b43/xmit.c b/drivers/net/wireless/b43/xmit.c
index c73e860..58ea0e5 100644
--- a/drivers/net/wireless/b43/xmit.c
+++ b/drivers/net/wireless/b43/xmit.c
@@ -827,7 +827,6 @@
 #endif
 	return;
 drop:
-	b43dbg(dev->wl, "RX: Packet dropped\n");
 	dev_kfree_skb_any(skb);
 }
 
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
index b247a56..001fdf1 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.c
+++ b/drivers/net/wireless/iwlwifi/iwl-core.c
@@ -1755,16 +1755,6 @@
 {
 	if (iwl_trans_check_stuck_queue(trans(priv), txq)) {
 		int ret;
-		if (txq == priv->shrd->cmd_queue) {
-			/*
-			 * validate command queue still working
-			 * by sending "ECHO" command
-			 */
-			if (!iwl_cmd_echo_test(priv))
-				return 0;
-			else
-				IWL_DEBUG_HC(priv, "echo testing fail\n");
-		}
 		ret = iwl_force_reset(priv, IWL_FW_RESET, false);
 		return (ret == -EAGAIN) ? 0 : 1;
 	}
diff --git a/drivers/net/wireless/iwlwifi/iwl-pci.c b/drivers/net/wireless/iwlwifi/iwl-pci.c
index f0c623a..1800029 100644
--- a/drivers/net/wireless/iwlwifi/iwl-pci.c
+++ b/drivers/net/wireless/iwlwifi/iwl-pci.c
@@ -446,10 +446,9 @@
 	pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
 
 	err = pci_enable_msi(pdev);
-	if (err) {
-		dev_printk(KERN_ERR, &pdev->dev, "pci_enable_msi failed");
-		goto out_iounmap;
-	}
+	if (err)
+		dev_printk(KERN_ERR, &pdev->dev,
+			"pci_enable_msi failed(0X%x)", err);
 
 	/* TODO: Move this away, not needed if not MSI */
 	/* enable rfkill interrupt: hw bug w/a */
@@ -470,7 +469,6 @@
 
 out_disable_msi:
 	pci_disable_msi(pdev);
-out_iounmap:
 	pci_iounmap(pdev, pci_bus->hw_base);
 out_pci_release_regions:
 	pci_set_drvdata(pdev, NULL);
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c b/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
index 8e8c75c..da34110 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
+++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
@@ -407,6 +407,7 @@
 	struct iwl_queue *q = &txq->q;
 	enum dma_data_direction dma_dir;
 	unsigned long flags;
+	spinlock_t *lock;
 
 	if (!q->n_bd)
 		return;
@@ -414,19 +415,22 @@
 	/* In the command queue, all the TBs are mapped as BIDI
 	 * so unmap them as such.
 	 */
-	if (txq_id == trans->shrd->cmd_queue)
+	if (txq_id == trans->shrd->cmd_queue) {
 		dma_dir = DMA_BIDIRECTIONAL;
-	else
+		lock = &trans->hcmd_lock;
+	} else {
 		dma_dir = DMA_TO_DEVICE;
+		lock = &trans->shrd->sta_lock;
+	}
 
-	spin_lock_irqsave(&trans->shrd->sta_lock, flags);
+	spin_lock_irqsave(lock, flags);
 	while (q->write_ptr != q->read_ptr) {
 		/* The read_ptr needs to bound by q->n_window */
 		iwlagn_txq_free_tfd(trans, txq, get_cmd_index(q, q->read_ptr),
 				    dma_dir);
 		q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
 	}
-	spin_unlock_irqrestore(&trans->shrd->sta_lock, flags);
+	spin_unlock_irqrestore(lock, flags);
 }
 
 /**
diff --git a/drivers/net/wireless/libertas/cfg.c b/drivers/net/wireless/libertas/cfg.c
index ff63782..4fcd653 100644
--- a/drivers/net/wireless/libertas/cfg.c
+++ b/drivers/net/wireless/libertas/cfg.c
@@ -728,15 +728,9 @@
 		le16_to_cpu(scan_cmd->hdr.size),
 		lbs_ret_scan, 0);
 
-	if (priv->scan_channel >= priv->scan_req->n_channels) {
+	if (priv->scan_channel >= priv->scan_req->n_channels)
 		/* Mark scan done */
-		if (priv->internal_scan)
-			kfree(priv->scan_req);
-		else
-			cfg80211_scan_done(priv->scan_req, false);
-
-		priv->scan_req = NULL;
-	}
+		lbs_scan_done(priv);
 
 	/* Restart network */
 	if (carrier)
@@ -774,6 +768,21 @@
 	lbs_deb_leave(LBS_DEB_CFG80211);
 }
 
+/*
+ * Clean up priv->scan_req.  Should be used to handle the allocation details.
+ */
+void lbs_scan_done(struct lbs_private *priv)
+{
+	WARN_ON(!priv->scan_req);
+
+	if (priv->internal_scan)
+		kfree(priv->scan_req);
+	else
+		cfg80211_scan_done(priv->scan_req, false);
+
+	priv->scan_req = NULL;
+}
+
 static int lbs_cfg_scan(struct wiphy *wiphy,
 	struct net_device *dev,
 	struct cfg80211_scan_request *request)
diff --git a/drivers/net/wireless/libertas/cfg.h b/drivers/net/wireless/libertas/cfg.h
index a02ee15..558168c 100644
--- a/drivers/net/wireless/libertas/cfg.h
+++ b/drivers/net/wireless/libertas/cfg.h
@@ -16,6 +16,7 @@
 void lbs_send_disconnect_notification(struct lbs_private *priv);
 void lbs_send_mic_failureevent(struct lbs_private *priv, u32 event);
 
+void lbs_scan_done(struct lbs_private *priv);
 void lbs_scan_deinit(struct lbs_private *priv);
 int lbs_disconnect(struct lbs_private *priv, u16 reason);
 
diff --git a/drivers/net/wireless/libertas/main.c b/drivers/net/wireless/libertas/main.c
index 4ae99a4..957681d 100644
--- a/drivers/net/wireless/libertas/main.c
+++ b/drivers/net/wireless/libertas/main.c
@@ -255,10 +255,8 @@
 
 	lbs_update_mcast(priv);
 	cancel_delayed_work_sync(&priv->scan_work);
-	if (priv->scan_req) {
-		cfg80211_scan_done(priv->scan_req, false);
-		priv->scan_req = NULL;
-	}
+	if (priv->scan_req)
+		lbs_scan_done(priv);
 
 	netif_carrier_off(priv->dev);
 
diff --git a/drivers/pnp/pnpacpi/rsparser.c b/drivers/pnp/pnpacpi/rsparser.c
index bbf3edd..5be4a39 100644
--- a/drivers/pnp/pnpacpi/rsparser.c
+++ b/drivers/pnp/pnpacpi/rsparser.c
@@ -509,15 +509,12 @@
 					    struct acpi_resource_dma *p)
 {
 	int i;
-	unsigned char map = 0, flags = 0;
-
-	if (p->channel_count == 0)
-		flags |= IORESOURCE_DISABLED;
+	unsigned char map = 0, flags;
 
 	for (i = 0; i < p->channel_count; i++)
 		map |= 1 << p->channels[i];
 
-	flags |= dma_flags(dev, p->type, p->bus_master, p->transfer);
+	flags = dma_flags(dev, p->type, p->bus_master, p->transfer);
 	pnp_register_dma_resource(dev, option_flags, map, flags);
 }
 
@@ -527,17 +524,14 @@
 {
 	int i;
 	pnp_irq_mask_t map;
-	unsigned char flags = 0;
-
-	if (p->interrupt_count == 0)
-		flags |= IORESOURCE_DISABLED;
+	unsigned char flags;
 
 	bitmap_zero(map.bits, PNP_IRQ_NR);
 	for (i = 0; i < p->interrupt_count; i++)
 		if (p->interrupts[i])
 			__set_bit(p->interrupts[i], map.bits);
 
-	flags |= irq_flags(p->triggering, p->polarity, p->sharable);
+	flags = irq_flags(p->triggering, p->polarity, p->sharable);
 	pnp_register_irq_resource(dev, option_flags, &map, flags);
 }
 
@@ -547,10 +541,7 @@
 {
 	int i;
 	pnp_irq_mask_t map;
-	unsigned char flags = 0;
-
-	if (p->interrupt_count == 0)
-		flags |= IORESOURCE_DISABLED;
+	unsigned char flags;
 
 	bitmap_zero(map.bits, PNP_IRQ_NR);
 	for (i = 0; i < p->interrupt_count; i++) {
@@ -564,7 +555,7 @@
 		}
 	}
 
-	flags |= irq_flags(p->triggering, p->polarity, p->sharable);
+	flags = irq_flags(p->triggering, p->polarity, p->sharable);
 	pnp_register_irq_resource(dev, option_flags, &map, flags);
 }
 
@@ -574,11 +565,8 @@
 {
 	unsigned char flags = 0;
 
-	if (io->address_length == 0)
-		flags |= IORESOURCE_DISABLED;
-
 	if (io->io_decode == ACPI_DECODE_16)
-		flags |= IORESOURCE_IO_16BIT_ADDR;
+		flags = IORESOURCE_IO_16BIT_ADDR;
 	pnp_register_port_resource(dev, option_flags, io->minimum, io->maximum,
 				   io->alignment, io->address_length, flags);
 }
@@ -587,13 +575,8 @@
 					unsigned int option_flags,
 					struct acpi_resource_fixed_io *io)
 {
-	unsigned char flags = 0;
-
-	if (io->address_length == 0)
-		flags |= IORESOURCE_DISABLED;
-
 	pnp_register_port_resource(dev, option_flags, io->address, io->address,
-				   0, io->address_length, flags | IORESOURCE_IO_FIXED);
+				   0, io->address_length, IORESOURCE_IO_FIXED);
 }
 
 static __init void pnpacpi_parse_mem24_option(struct pnp_dev *dev,
@@ -602,11 +585,8 @@
 {
 	unsigned char flags = 0;
 
-	if (p->address_length == 0)
-		flags |= IORESOURCE_DISABLED;
-
 	if (p->write_protect == ACPI_READ_WRITE_MEMORY)
-		flags |= IORESOURCE_MEM_WRITEABLE;
+		flags = IORESOURCE_MEM_WRITEABLE;
 	pnp_register_mem_resource(dev, option_flags, p->minimum, p->maximum,
 				  p->alignment, p->address_length, flags);
 }
@@ -617,11 +597,8 @@
 {
 	unsigned char flags = 0;
 
-	if (p->address_length == 0)
-		flags |= IORESOURCE_DISABLED;
-
 	if (p->write_protect == ACPI_READ_WRITE_MEMORY)
-		flags |= IORESOURCE_MEM_WRITEABLE;
+		flags = IORESOURCE_MEM_WRITEABLE;
 	pnp_register_mem_resource(dev, option_flags, p->minimum, p->maximum,
 				  p->alignment, p->address_length, flags);
 }
@@ -632,11 +609,8 @@
 {
 	unsigned char flags = 0;
 
-	if (p->address_length == 0)
-		flags |= IORESOURCE_DISABLED;
-
 	if (p->write_protect == ACPI_READ_WRITE_MEMORY)
-		flags |= IORESOURCE_MEM_WRITEABLE;
+		flags = IORESOURCE_MEM_WRITEABLE;
 	pnp_register_mem_resource(dev, option_flags, p->address, p->address,
 				  0, p->address_length, flags);
 }
@@ -656,19 +630,16 @@
 		return;
 	}
 
-	if (p->address_length == 0)
-		flags |= IORESOURCE_DISABLED;
-
 	if (p->resource_type == ACPI_MEMORY_RANGE) {
 		if (p->info.mem.write_protect == ACPI_READ_WRITE_MEMORY)
-			flags |= IORESOURCE_MEM_WRITEABLE;
+			flags = IORESOURCE_MEM_WRITEABLE;
 		pnp_register_mem_resource(dev, option_flags, p->minimum,
 					  p->minimum, 0, p->address_length,
 					  flags);
 	} else if (p->resource_type == ACPI_IO_RANGE)
 		pnp_register_port_resource(dev, option_flags, p->minimum,
 					   p->minimum, 0, p->address_length,
-					   flags | IORESOURCE_IO_FIXED);
+					   IORESOURCE_IO_FIXED);
 }
 
 static __init void pnpacpi_parse_ext_address_option(struct pnp_dev *dev,
@@ -678,19 +649,16 @@
 	struct acpi_resource_extended_address64 *p = &r->data.ext_address64;
 	unsigned char flags = 0;
 
-	if (p->address_length == 0)
-		flags |= IORESOURCE_DISABLED;
-
 	if (p->resource_type == ACPI_MEMORY_RANGE) {
 		if (p->info.mem.write_protect == ACPI_READ_WRITE_MEMORY)
-			flags |= IORESOURCE_MEM_WRITEABLE;
+			flags = IORESOURCE_MEM_WRITEABLE;
 		pnp_register_mem_resource(dev, option_flags, p->minimum,
 					  p->minimum, 0, p->address_length,
 					  flags);
 	} else if (p->resource_type == ACPI_IO_RANGE)
 		pnp_register_port_resource(dev, option_flags, p->minimum,
 					   p->minimum, 0, p->address_length,
-					   flags | IORESOURCE_IO_FIXED);
+					   IORESOURCE_IO_FIXED);
 }
 
 struct acpipnp_parse_option_s {
diff --git a/drivers/staging/spectra/lld_mtd.c b/drivers/staging/spectra/lld_mtd.c
index 2bd3466..a9c309a 100644
--- a/drivers/staging/spectra/lld_mtd.c
+++ b/drivers/staging/spectra/lld_mtd.c
@@ -340,7 +340,7 @@
 		struct mtd_oob_ops ops;
 		int ret;
 
-		ops.mode = MTD_OOB_AUTO;
+		ops.mode = MTD_OPS_AUTO_OOB;
 		ops.datbuf = read_data;
 		ops.len = DeviceInfo.wPageDataSize;
 		ops.oobbuf = read_data + DeviceInfo.wPageDataSize + BTSIG_OFFSET;
@@ -400,7 +400,7 @@
 		struct mtd_oob_ops ops;
 		int ret;
 
-		ops.mode = MTD_OOB_AUTO;
+		ops.mode = MTD_OPS_AUTO_OOB;
 		ops.datbuf = write_data;
 		ops.len = DeviceInfo.wPageDataSize;
 		ops.oobbuf = write_data + DeviceInfo.wPageDataSize + BTSIG_OFFSET;
@@ -473,7 +473,7 @@
 		struct mtd_oob_ops ops;
 		int ret;
 
-		ops.mode = MTD_OOB_AUTO;
+		ops.mode = MTD_OPS_AUTO_OOB;
 		ops.datbuf = NULL;
 		ops.len = 0;
 		ops.oobbuf = read_data;
diff --git a/drivers/thermal/thermal_sys.c b/drivers/thermal/thermal_sys.c
index 708f8e9..dd9a5743 100644
--- a/drivers/thermal/thermal_sys.c
+++ b/drivers/thermal/thermal_sys.c
@@ -678,10 +678,10 @@
 		return;
 
 	if (delay > 1000)
-		schedule_delayed_work(&(tz->poll_queue),
+		queue_delayed_work(system_freezable_wq, &(tz->poll_queue),
 				      round_jiffies(msecs_to_jiffies(delay)));
 	else
-		schedule_delayed_work(&(tz->poll_queue),
+		queue_delayed_work(system_freezable_wq, &(tz->poll_queue),
 				      msecs_to_jiffies(delay));
 }
 
diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c
index 7430bc3..b6b2d18 100644
--- a/drivers/tty/hvc/hvc_console.c
+++ b/drivers/tty/hvc/hvc_console.c
@@ -852,7 +852,7 @@
 	 * find index to use:
 	 * see if this vterm id matches one registered for console.
 	 */
-	for (i = 0; i < MAX_NR_HVC_CONSOLES; i++)
+	for (i=0; i < MAX_NR_HVC_CONSOLES; i++)
 		if (vtermnos[i] == hp->vtermno &&
 		    cons_ops[i] == hp->ops)
 			break;
@@ -862,13 +862,9 @@
 		i = ++last_hvc;
 
 	hp->index = i;
-	hvc_console.index = i;
-	vtermnos[i] = vtermno;
-	cons_ops[i] = ops;
 
 	list_add_tail(&(hp->next), &hvc_structs);
 	spin_unlock(&hvc_structs_lock);
-	register_console(&hvc_console);
 
 	return hp;
 }
@@ -879,7 +875,6 @@
 	unsigned long flags;
 	struct tty_struct *tty;
 
-	unregister_console(&hvc_console);
 	spin_lock_irqsave(&hp->lock, flags);
 	tty = tty_kref_get(hp->tty);
 
diff --git a/drivers/tty/hvc/hvc_opal.c b/drivers/tty/hvc/hvc_opal.c
index 7b38512..ced26c8 100644
--- a/drivers/tty/hvc/hvc_opal.c
+++ b/drivers/tty/hvc/hvc_opal.c
@@ -28,6 +28,7 @@
 #include <linux/console.h>
 #include <linux/of.h>
 #include <linux/of_platform.h>
+#include <linux/export.h>
 
 #include <asm/hvconsole.h>
 #include <asm/prom.h>
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index 1779338..a767884 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -94,8 +94,8 @@
 #define inc_totalhigh_pages() (totalhigh_pages++)
 #define dec_totalhigh_pages() (totalhigh_pages--)
 #else
-#define inc_totalhigh_pages() do {} while(0)
-#define dec_totalhigh_pages() do {} while(0)
+#define inc_totalhigh_pages() do {} while (0)
+#define dec_totalhigh_pages() do {} while (0)
 #endif
 
 /* List of ballooned pages, threaded through the mem_map array. */
@@ -155,8 +155,7 @@
 	if (PageHighMem(page)) {
 		balloon_stats.balloon_high--;
 		inc_totalhigh_pages();
-	}
-	else
+	} else
 		balloon_stats.balloon_low--;
 
 	totalram_pages++;
@@ -423,7 +422,7 @@
 				(unsigned long)__va(pfn << PAGE_SHIFT),
 				__pte_ma(0), 0);
 			BUG_ON(ret);
-                }
+		}
 
 	}
 
@@ -508,7 +507,7 @@
 int alloc_xenballooned_pages(int nr_pages, struct page **pages, bool highmem)
 {
 	int pgno = 0;
-	struct page* page;
+	struct page *page;
 	mutex_lock(&balloon_mutex);
 	while (pgno < nr_pages) {
 		page = balloon_retrieve(highmem);
@@ -541,7 +540,7 @@
  * @nr_pages: Number of pages
  * @pages: pages to return
  */
-void free_xenballooned_pages(int nr_pages, struct page** pages)
+void free_xenballooned_pages(int nr_pages, struct page **pages)
 {
 	int i;
 
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index 0eb8a57c..6e075cd 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -85,8 +85,7 @@
  *    IPI - IPI vector
  *    EVTCHN -
  */
-struct irq_info
-{
+struct irq_info {
 	struct list_head list;
 	enum xen_irq_type type;	/* type */
 	unsigned irq;
@@ -282,9 +281,9 @@
 					   struct shared_info *sh,
 					   unsigned int idx)
 {
-	return (sh->evtchn_pending[idx] &
+	return sh->evtchn_pending[idx] &
 		per_cpu(cpu_evtchn_mask, cpu)[idx] &
-		~sh->evtchn_mask[idx]);
+		~sh->evtchn_mask[idx];
 }
 
 static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
@@ -1180,7 +1179,7 @@
 	int cpu = get_cpu();
 	struct shared_info *s = HYPERVISOR_shared_info;
 	struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
- 	unsigned count;
+	unsigned count;
 
 	do {
 		unsigned long pending_words;
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index 880798a..3987132 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -193,9 +193,8 @@
 
 	atomic_sub(map->count, &pages_mapped);
 
-	if (map->notify.flags & UNMAP_NOTIFY_SEND_EVENT) {
+	if (map->notify.flags & UNMAP_NOTIFY_SEND_EVENT)
 		notify_remote_via_evtchn(map->notify.event);
-	}
 
 	if (map->pages) {
 		if (!use_ptemod)
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index 8c71ab80..bf1c094 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -193,7 +193,7 @@
 
 	nflags = shared[ref].flags;
 
-	return (nflags & (GTF_reading|GTF_writing));
+	return nflags & (GTF_reading|GTF_writing);
 }
 EXPORT_SYMBOL_GPL(gnttab_query_foreign_access);
 
diff --git a/drivers/xen/pci.c b/drivers/xen/pci.c
index 6605707..b84bf0b 100644
--- a/drivers/xen/pci.c
+++ b/drivers/xen/pci.c
@@ -116,7 +116,7 @@
 			&manage_pci_ext);
 	} else {
 		struct physdev_manage_pci manage_pci = {
-			.bus 	= pci_dev->bus->number,
+			.bus	= pci_dev->bus->number,
 			.devfn	= pci_dev->devfn,
 		};
 
diff --git a/drivers/xen/xen-balloon.c b/drivers/xen/xen-balloon.c
index 5c9dc43..9cc2259 100644
--- a/drivers/xen/xen-balloon.c
+++ b/drivers/xen/xen-balloon.c
@@ -50,11 +50,6 @@
 
 static int register_balloon(struct sys_device *sysdev);
 
-static struct xenbus_watch target_watch =
-{
-	.node = "memory/target"
-};
-
 /* React to a change in the target key */
 static void watch_target(struct xenbus_watch *watch,
 			 const char **vec, unsigned int len)
@@ -73,6 +68,11 @@
 	 */
 	balloon_set_new_target(new_target >> (PAGE_SHIFT - 10));
 }
+static struct xenbus_watch target_watch = {
+	.node = "memory/target",
+	.callback = watch_target,
+};
+
 
 static int balloon_init_watcher(struct notifier_block *notifier,
 				unsigned long event,
@@ -87,7 +87,9 @@
 	return NOTIFY_DONE;
 }
 
-static struct notifier_block xenstore_notifier;
+static struct notifier_block xenstore_notifier = {
+	.notifier_call = balloon_init_watcher,
+};
 
 static int __init balloon_init(void)
 {
@@ -100,9 +102,6 @@
 
 	register_xen_selfballooning(&balloon_sysdev);
 
-	target_watch.callback = watch_target;
-	xenstore_notifier.notifier_call = balloon_init_watcher;
-
 	register_xenstore_notifier(&xenstore_notifier);
 
 	return 0;
diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
index 0e867ee..1b178c6 100644
--- a/drivers/xen/xenbus/xenbus_probe.c
+++ b/drivers/xen/xenbus/xenbus_probe.c
@@ -310,8 +310,7 @@
 }
 EXPORT_SYMBOL_GPL(xenbus_unregister_driver);
 
-struct xb_find_info
-{
+struct xb_find_info {
 	struct xenbus_device *dev;
 	const char *nodename;
 };
@@ -640,7 +639,7 @@
 EXPORT_SYMBOL_GPL(xenbus_dev_cancel);
 
 /* A flag to determine if xenstored is 'ready' (i.e. has started) */
-int xenstored_ready = 0;
+int xenstored_ready;
 
 
 int register_xenstore_notifier(struct notifier_block *nb)
@@ -771,7 +770,7 @@
 	proc_mkdir("xen", NULL);
 #endif
 
- out_error:
+out_error:
 	return err;
 }
 
diff --git a/drivers/xen/xenbus/xenbus_probe.h b/drivers/xen/xenbus/xenbus_probe.h
index b814935..9b1de4e 100644
--- a/drivers/xen/xenbus/xenbus_probe.h
+++ b/drivers/xen/xenbus/xenbus_probe.h
@@ -36,8 +36,7 @@
 
 #define XEN_BUS_ID_SIZE			20
 
-struct xen_bus_type
-{
+struct xen_bus_type {
 	char *root;
 	unsigned int levels;
 	int (*get_bus_id)(char bus_id[XEN_BUS_ID_SIZE], const char *nodename);
diff --git a/fs/btrfs/Makefile b/fs/btrfs/Makefile
index 40e6ac0..c0ddfd2 100644
--- a/fs/btrfs/Makefile
+++ b/fs/btrfs/Makefile
@@ -7,6 +7,7 @@
 	   extent_map.o sysfs.o struct-funcs.o xattr.o ordered-data.o \
 	   extent_io.o volumes.o async-thread.o ioctl.o locking.o orphan.o \
 	   export.o tree-log.o free-space-cache.o zlib.o lzo.o \
-	   compression.o delayed-ref.o relocation.o delayed-inode.o scrub.o
+	   compression.o delayed-ref.o relocation.o delayed-inode.o scrub.o \
+	   reada.o backref.o
 
 btrfs-$(CONFIG_BTRFS_FS_POSIX_ACL) += acl.o
diff --git a/fs/btrfs/acl.c b/fs/btrfs/acl.c
index eb159aa..89b156d 100644
--- a/fs/btrfs/acl.c
+++ b/fs/btrfs/acl.c
@@ -59,22 +59,19 @@
 		if (!value)
 			return ERR_PTR(-ENOMEM);
 		size = __btrfs_getxattr(inode, name, value, size);
-		if (size > 0) {
-			acl = posix_acl_from_xattr(value, size);
-			if (IS_ERR(acl)) {
-				kfree(value);
-				return acl;
-			}
-			set_cached_acl(inode, type, acl);
-		}
-		kfree(value);
+	}
+	if (size > 0) {
+		acl = posix_acl_from_xattr(value, size);
 	} else if (size == -ENOENT || size == -ENODATA || size == 0) {
 		/* FIXME, who returns -ENOENT?  I think nobody */
 		acl = NULL;
-		set_cached_acl(inode, type, acl);
 	} else {
 		acl = ERR_PTR(-EIO);
 	}
+	kfree(value);
+
+	if (!IS_ERR(acl))
+		set_cached_acl(inode, type, acl);
 
 	return acl;
 }
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
new file mode 100644
index 0000000..8855aad
--- /dev/null
+++ b/fs/btrfs/backref.c
@@ -0,0 +1,776 @@
+/*
+ * Copyright (C) 2011 STRATO.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License v2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#include "ctree.h"
+#include "disk-io.h"
+#include "backref.h"
+
+struct __data_ref {
+	struct list_head list;
+	u64 inum;
+	u64 root;
+	u64 extent_data_item_offset;
+};
+
+struct __shared_ref {
+	struct list_head list;
+	u64 disk_byte;
+};
+
+static int __inode_info(u64 inum, u64 ioff, u8 key_type,
+			struct btrfs_root *fs_root, struct btrfs_path *path,
+			struct btrfs_key *found_key)
+{
+	int ret;
+	struct btrfs_key key;
+	struct extent_buffer *eb;
+
+	key.type = key_type;
+	key.objectid = inum;
+	key.offset = ioff;
+
+	ret = btrfs_search_slot(NULL, fs_root, &key, path, 0, 0);
+	if (ret < 0)
+		return ret;
+
+	eb = path->nodes[0];
+	if (ret && path->slots[0] >= btrfs_header_nritems(eb)) {
+		ret = btrfs_next_leaf(fs_root, path);
+		if (ret)
+			return ret;
+		eb = path->nodes[0];
+	}
+
+	btrfs_item_key_to_cpu(eb, found_key, path->slots[0]);
+	if (found_key->type != key.type || found_key->objectid != key.objectid)
+		return 1;
+
+	return 0;
+}
+
+/*
+ * this makes the path point to (inum INODE_ITEM ioff)
+ */
+int inode_item_info(u64 inum, u64 ioff, struct btrfs_root *fs_root,
+			struct btrfs_path *path)
+{
+	struct btrfs_key key;
+	return __inode_info(inum, ioff, BTRFS_INODE_ITEM_KEY, fs_root, path,
+				&key);
+}
+
+static int inode_ref_info(u64 inum, u64 ioff, struct btrfs_root *fs_root,
+				struct btrfs_path *path,
+				struct btrfs_key *found_key)
+{
+	return __inode_info(inum, ioff, BTRFS_INODE_REF_KEY, fs_root, path,
+				found_key);
+}
+
+/*
+ * this iterates to turn a btrfs_inode_ref into a full filesystem path. elements
+ * of the path are separated by '/' and the path is guaranteed to be
+ * 0-terminated. the path is only given within the current file system.
+ * Therefore, it never starts with a '/'. the caller is responsible to provide
+ * "size" bytes in "dest". the dest buffer will be filled backwards. finally,
+ * the start point of the resulting string is returned. this pointer is within
+ * dest, normally.
+ * in case the path buffer would overflow, the pointer is decremented further
+ * as if output was written to the buffer, though no more output is actually
+ * generated. that way, the caller can determine how much space would be
+ * required for the path to fit into the buffer. in that case, the returned
+ * value will be smaller than dest. callers must check this!
+ */
+static char *iref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
+				struct btrfs_inode_ref *iref,
+				struct extent_buffer *eb_in, u64 parent,
+				char *dest, u32 size)
+{
+	u32 len;
+	int slot;
+	u64 next_inum;
+	int ret;
+	s64 bytes_left = size - 1;
+	struct extent_buffer *eb = eb_in;
+	struct btrfs_key found_key;
+
+	if (bytes_left >= 0)
+		dest[bytes_left] = '\0';
+
+	while (1) {
+		len = btrfs_inode_ref_name_len(eb, iref);
+		bytes_left -= len;
+		if (bytes_left >= 0)
+			read_extent_buffer(eb, dest + bytes_left,
+						(unsigned long)(iref + 1), len);
+		if (eb != eb_in)
+			free_extent_buffer(eb);
+		ret = inode_ref_info(parent, 0, fs_root, path, &found_key);
+		if (ret)
+			break;
+		next_inum = found_key.offset;
+
+		/* regular exit ahead */
+		if (parent == next_inum)
+			break;
+
+		slot = path->slots[0];
+		eb = path->nodes[0];
+		/* make sure we can use eb after releasing the path */
+		if (eb != eb_in)
+			atomic_inc(&eb->refs);
+		btrfs_release_path(path);
+
+		iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
+		parent = next_inum;
+		--bytes_left;
+		if (bytes_left >= 0)
+			dest[bytes_left] = '/';
+	}
+
+	btrfs_release_path(path);
+
+	if (ret)
+		return ERR_PTR(ret);
+
+	return dest + bytes_left;
+}
+
+/*
+ * this makes the path point to (logical EXTENT_ITEM *)
+ * returns BTRFS_EXTENT_FLAG_DATA for data, BTRFS_EXTENT_FLAG_TREE_BLOCK for
+ * tree blocks and <0 on error.
+ */
+int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical,
+			struct btrfs_path *path, struct btrfs_key *found_key)
+{
+	int ret;
+	u64 flags;
+	u32 item_size;
+	struct extent_buffer *eb;
+	struct btrfs_extent_item *ei;
+	struct btrfs_key key;
+
+	key.type = BTRFS_EXTENT_ITEM_KEY;
+	key.objectid = logical;
+	key.offset = (u64)-1;
+
+	ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0);
+	if (ret < 0)
+		return ret;
+	ret = btrfs_previous_item(fs_info->extent_root, path,
+					0, BTRFS_EXTENT_ITEM_KEY);
+	if (ret < 0)
+		return ret;
+
+	btrfs_item_key_to_cpu(path->nodes[0], found_key, path->slots[0]);
+	if (found_key->type != BTRFS_EXTENT_ITEM_KEY ||
+	    found_key->objectid > logical ||
+	    found_key->objectid + found_key->offset <= logical)
+		return -ENOENT;
+
+	eb = path->nodes[0];
+	item_size = btrfs_item_size_nr(eb, path->slots[0]);
+	BUG_ON(item_size < sizeof(*ei));
+
+	ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
+	flags = btrfs_extent_flags(eb, ei);
+
+	if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
+		return BTRFS_EXTENT_FLAG_TREE_BLOCK;
+	if (flags & BTRFS_EXTENT_FLAG_DATA)
+		return BTRFS_EXTENT_FLAG_DATA;
+
+	return -EIO;
+}
+
+/*
+ * helper function to iterate extent inline refs. ptr must point to a 0 value
+ * for the first call and may be modified. it is used to track state.
+ * if more refs exist, 0 is returned and the next call to
+ * __get_extent_inline_ref must pass the modified ptr parameter to get the
+ * next ref. after the last ref was processed, 1 is returned.
+ * returns <0 on error
+ */
+static int __get_extent_inline_ref(unsigned long *ptr, struct extent_buffer *eb,
+				struct btrfs_extent_item *ei, u32 item_size,
+				struct btrfs_extent_inline_ref **out_eiref,
+				int *out_type)
+{
+	unsigned long end;
+	u64 flags;
+	struct btrfs_tree_block_info *info;
+
+	if (!*ptr) {
+		/* first call */
+		flags = btrfs_extent_flags(eb, ei);
+		if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
+			info = (struct btrfs_tree_block_info *)(ei + 1);
+			*out_eiref =
+				(struct btrfs_extent_inline_ref *)(info + 1);
+		} else {
+			*out_eiref = (struct btrfs_extent_inline_ref *)(ei + 1);
+		}
+		*ptr = (unsigned long)*out_eiref;
+		if ((void *)*ptr >= (void *)ei + item_size)
+			return -ENOENT;
+	}
+
+	end = (unsigned long)ei + item_size;
+	*out_eiref = (struct btrfs_extent_inline_ref *)*ptr;
+	*out_type = btrfs_extent_inline_ref_type(eb, *out_eiref);
+
+	*ptr += btrfs_extent_inline_ref_size(*out_type);
+	WARN_ON(*ptr > end);
+	if (*ptr == end)
+		return 1; /* last */
+
+	return 0;
+}
+
+/*
+ * reads the tree block backref for an extent. tree level and root are returned
+ * through out_level and out_root. ptr must point to a 0 value for the first
+ * call and may be modified (see __get_extent_inline_ref comment).
+ * returns 0 if data was provided, 1 if there was no more data to provide or
+ * <0 on error.
+ */
+int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb,
+				struct btrfs_extent_item *ei, u32 item_size,
+				u64 *out_root, u8 *out_level)
+{
+	int ret;
+	int type;
+	struct btrfs_tree_block_info *info;
+	struct btrfs_extent_inline_ref *eiref;
+
+	if (*ptr == (unsigned long)-1)
+		return 1;
+
+	while (1) {
+		ret = __get_extent_inline_ref(ptr, eb, ei, item_size,
+						&eiref, &type);
+		if (ret < 0)
+			return ret;
+
+		if (type == BTRFS_TREE_BLOCK_REF_KEY ||
+		    type == BTRFS_SHARED_BLOCK_REF_KEY)
+			break;
+
+		if (ret == 1)
+			return 1;
+	}
+
+	/* we can treat both ref types equally here */
+	info = (struct btrfs_tree_block_info *)(ei + 1);
+	*out_root = btrfs_extent_inline_ref_offset(eb, eiref);
+	*out_level = btrfs_tree_block_level(eb, info);
+
+	if (ret == 1)
+		*ptr = (unsigned long)-1;
+
+	return 0;
+}
+
+static int __data_list_add(struct list_head *head, u64 inum,
+				u64 extent_data_item_offset, u64 root)
+{
+	struct __data_ref *ref;
+
+	ref = kmalloc(sizeof(*ref), GFP_NOFS);
+	if (!ref)
+		return -ENOMEM;
+
+	ref->inum = inum;
+	ref->extent_data_item_offset = extent_data_item_offset;
+	ref->root = root;
+	list_add_tail(&ref->list, head);
+
+	return 0;
+}
+
+static int __data_list_add_eb(struct list_head *head, struct extent_buffer *eb,
+				struct btrfs_extent_data_ref *dref)
+{
+	return __data_list_add(head, btrfs_extent_data_ref_objectid(eb, dref),
+				btrfs_extent_data_ref_offset(eb, dref),
+				btrfs_extent_data_ref_root(eb, dref));
+}
+
+static int __shared_list_add(struct list_head *head, u64 disk_byte)
+{
+	struct __shared_ref *ref;
+
+	ref = kmalloc(sizeof(*ref), GFP_NOFS);
+	if (!ref)
+		return -ENOMEM;
+
+	ref->disk_byte = disk_byte;
+	list_add_tail(&ref->list, head);
+
+	return 0;
+}
+
+static int __iter_shared_inline_ref_inodes(struct btrfs_fs_info *fs_info,
+					   u64 logical, u64 inum,
+					   u64 extent_data_item_offset,
+					   u64 extent_offset,
+					   struct btrfs_path *path,
+					   struct list_head *data_refs,
+					   iterate_extent_inodes_t *iterate,
+					   void *ctx)
+{
+	u64 ref_root;
+	u32 item_size;
+	struct btrfs_key key;
+	struct extent_buffer *eb;
+	struct btrfs_extent_item *ei;
+	struct btrfs_extent_inline_ref *eiref;
+	struct __data_ref *ref;
+	int ret;
+	int type;
+	int last;
+	unsigned long ptr = 0;
+
+	WARN_ON(!list_empty(data_refs));
+	ret = extent_from_logical(fs_info, logical, path, &key);
+	if (ret & BTRFS_EXTENT_FLAG_DATA)
+		ret = -EIO;
+	if (ret < 0)
+		goto out;
+
+	eb = path->nodes[0];
+	ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
+	item_size = btrfs_item_size_nr(eb, path->slots[0]);
+
+	ret = 0;
+	ref_root = 0;
+	/*
+	 * as done in iterate_extent_inodes, we first build a list of refs to
+	 * iterate, then free the path and then iterate them to avoid deadlocks.
+	 */
+	do {
+		last = __get_extent_inline_ref(&ptr, eb, ei, item_size,
+						&eiref, &type);
+		if (last < 0) {
+			ret = last;
+			goto out;
+		}
+		if (type == BTRFS_TREE_BLOCK_REF_KEY ||
+		    type == BTRFS_SHARED_BLOCK_REF_KEY) {
+			ref_root = btrfs_extent_inline_ref_offset(eb, eiref);
+			ret = __data_list_add(data_refs, inum,
+						extent_data_item_offset,
+						ref_root);
+		}
+	} while (!ret && !last);
+
+	btrfs_release_path(path);
+
+	if (ref_root == 0) {
+		printk(KERN_ERR "btrfs: failed to find tree block ref "
+			"for shared data backref %llu\n", logical);
+		WARN_ON(1);
+		ret = -EIO;
+	}
+
+out:
+	while (!list_empty(data_refs)) {
+		ref = list_first_entry(data_refs, struct __data_ref, list);
+		list_del(&ref->list);
+		if (!ret)
+			ret = iterate(ref->inum, extent_offset +
+					ref->extent_data_item_offset,
+					ref->root, ctx);
+		kfree(ref);
+	}
+
+	return ret;
+}
+
+static int __iter_shared_inline_ref(struct btrfs_fs_info *fs_info,
+				    u64 logical, u64 orig_extent_item_objectid,
+				    u64 extent_offset, struct btrfs_path *path,
+				    struct list_head *data_refs,
+				    iterate_extent_inodes_t *iterate,
+				    void *ctx)
+{
+	u64 disk_byte;
+	struct btrfs_key key;
+	struct btrfs_file_extent_item *fi;
+	struct extent_buffer *eb;
+	int slot;
+	int nritems;
+	int ret;
+	int found = 0;
+
+	eb = read_tree_block(fs_info->tree_root, logical,
+				fs_info->tree_root->leafsize, 0);
+	if (!eb)
+		return -EIO;
+
+	/*
+	 * from the shared data ref, we only have the leaf but we need
+	 * the key. thus, we must look into all items and see that we
+	 * find one (some) with a reference to our extent item.
+	 */
+	nritems = btrfs_header_nritems(eb);
+	for (slot = 0; slot < nritems; ++slot) {
+		btrfs_item_key_to_cpu(eb, &key, slot);
+		if (key.type != BTRFS_EXTENT_DATA_KEY)
+			continue;
+		fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
+		if (!fi) {
+			free_extent_buffer(eb);
+			return -EIO;
+		}
+		disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
+		if (disk_byte != orig_extent_item_objectid) {
+			if (found)
+				break;
+			else
+				continue;
+		}
+		++found;
+		ret = __iter_shared_inline_ref_inodes(fs_info, logical,
+							key.objectid,
+							key.offset,
+							extent_offset, path,
+							data_refs,
+							iterate, ctx);
+		if (ret)
+			break;
+	}
+
+	if (!found) {
+		printk(KERN_ERR "btrfs: failed to follow shared data backref "
+			"to parent %llu\n", logical);
+		WARN_ON(1);
+		ret = -EIO;
+	}
+
+	free_extent_buffer(eb);
+	return ret;
+}
+
+/*
+ * calls iterate() for every inode that references the extent identified by
+ * the given parameters. will use the path given as a parameter and return it
+ * released.
+ * when the iterator function returns a non-zero value, iteration stops.
+ */
+int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
+				struct btrfs_path *path,
+				u64 extent_item_objectid,
+				u64 extent_offset,
+				iterate_extent_inodes_t *iterate, void *ctx)
+{
+	unsigned long ptr = 0;
+	int last;
+	int ret;
+	int type;
+	u64 logical;
+	u32 item_size;
+	struct btrfs_extent_inline_ref *eiref;
+	struct btrfs_extent_data_ref *dref;
+	struct extent_buffer *eb;
+	struct btrfs_extent_item *ei;
+	struct btrfs_key key;
+	struct list_head data_refs = LIST_HEAD_INIT(data_refs);
+	struct list_head shared_refs = LIST_HEAD_INIT(shared_refs);
+	struct __data_ref *ref_d;
+	struct __shared_ref *ref_s;
+
+	eb = path->nodes[0];
+	ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
+	item_size = btrfs_item_size_nr(eb, path->slots[0]);
+
+	/* first we iterate the inline refs, ... */
+	do {
+		last = __get_extent_inline_ref(&ptr, eb, ei, item_size,
+						&eiref, &type);
+		if (last == -ENOENT) {
+			ret = 0;
+			break;
+		}
+		if (last < 0) {
+			ret = last;
+			break;
+		}
+
+		if (type == BTRFS_EXTENT_DATA_REF_KEY) {
+			dref = (struct btrfs_extent_data_ref *)(&eiref->offset);
+			ret = __data_list_add_eb(&data_refs, eb, dref);
+		} else if (type == BTRFS_SHARED_DATA_REF_KEY) {
+			logical = btrfs_extent_inline_ref_offset(eb, eiref);
+			ret = __shared_list_add(&shared_refs, logical);
+		}
+	} while (!ret && !last);
+
+	/* ... then we proceed to in-tree references and ... */
+	while (!ret) {
+		++path->slots[0];
+		if (path->slots[0] > btrfs_header_nritems(eb)) {
+			ret = btrfs_next_leaf(fs_info->extent_root, path);
+			if (ret) {
+				if (ret == 1)
+					ret = 0; /* we're done */
+				break;
+			}
+			eb = path->nodes[0];
+		}
+		btrfs_item_key_to_cpu(eb, &key, path->slots[0]);
+		if (key.objectid != extent_item_objectid)
+			break;
+		if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
+			dref = btrfs_item_ptr(eb, path->slots[0],
+						struct btrfs_extent_data_ref);
+			ret = __data_list_add_eb(&data_refs, eb, dref);
+		} else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
+			ret = __shared_list_add(&shared_refs, key.offset);
+		}
+	}
+
+	btrfs_release_path(path);
+
+	/*
+	 * ... only at the very end we can process the refs we found. this is
+	 * because the iterator function we call is allowed to make tree lookups
+	 * and we have to avoid deadlocks. additionally, we need more tree
+	 * lookups ourselves for shared data refs.
+	 */
+	while (!list_empty(&data_refs)) {
+		ref_d = list_first_entry(&data_refs, struct __data_ref, list);
+		list_del(&ref_d->list);
+		if (!ret)
+			ret = iterate(ref_d->inum, extent_offset +
+					ref_d->extent_data_item_offset,
+					ref_d->root, ctx);
+		kfree(ref_d);
+	}
+
+	while (!list_empty(&shared_refs)) {
+		ref_s = list_first_entry(&shared_refs, struct __shared_ref,
+					list);
+		list_del(&ref_s->list);
+		if (!ret)
+			ret = __iter_shared_inline_ref(fs_info,
+							ref_s->disk_byte,
+							extent_item_objectid,
+							extent_offset, path,
+							&data_refs,
+							iterate, ctx);
+		kfree(ref_s);
+	}
+
+	return ret;
+}
+
+int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info,
+				struct btrfs_path *path,
+				iterate_extent_inodes_t *iterate, void *ctx)
+{
+	int ret;
+	u64 offset;
+	struct btrfs_key found_key;
+
+	ret = extent_from_logical(fs_info, logical, path,
+					&found_key);
+	if (ret & BTRFS_EXTENT_FLAG_TREE_BLOCK)
+		ret = -EINVAL;
+	if (ret < 0)
+		return ret;
+
+	offset = logical - found_key.objectid;
+	ret = iterate_extent_inodes(fs_info, path, found_key.objectid,
+					offset, iterate, ctx);
+
+	return ret;
+}
+
+static int iterate_irefs(u64 inum, struct btrfs_root *fs_root,
+				struct btrfs_path *path,
+				iterate_irefs_t *iterate, void *ctx)
+{
+	int ret;
+	int slot;
+	u32 cur;
+	u32 len;
+	u32 name_len;
+	u64 parent = 0;
+	int found = 0;
+	struct extent_buffer *eb;
+	struct btrfs_item *item;
+	struct btrfs_inode_ref *iref;
+	struct btrfs_key found_key;
+
+	while (1) {
+		ret = inode_ref_info(inum, parent ? parent+1 : 0, fs_root, path,
+					&found_key);
+		if (ret < 0)
+			break;
+		if (ret) {
+			ret = found ? 0 : -ENOENT;
+			break;
+		}
+		++found;
+
+		parent = found_key.offset;
+		slot = path->slots[0];
+		eb = path->nodes[0];
+		/* make sure we can use eb after releasing the path */
+		atomic_inc(&eb->refs);
+		btrfs_release_path(path);
+
+		item = btrfs_item_nr(eb, slot);
+		iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
+
+		for (cur = 0; cur < btrfs_item_size(eb, item); cur += len) {
+			name_len = btrfs_inode_ref_name_len(eb, iref);
+			/* path must be released before calling iterate()! */
+			ret = iterate(parent, iref, eb, ctx);
+			if (ret) {
+				free_extent_buffer(eb);
+				break;
+			}
+			len = sizeof(*iref) + name_len;
+			iref = (struct btrfs_inode_ref *)((char *)iref + len);
+		}
+		free_extent_buffer(eb);
+	}
+
+	btrfs_release_path(path);
+
+	return ret;
+}
+
+/*
+ * returns 0 if the path could be dumped (probably truncated)
+ * returns <0 in case of an error
+ */
+static int inode_to_path(u64 inum, struct btrfs_inode_ref *iref,
+				struct extent_buffer *eb, void *ctx)
+{
+	struct inode_fs_paths *ipath = ctx;
+	char *fspath;
+	char *fspath_min;
+	int i = ipath->fspath->elem_cnt;
+	const int s_ptr = sizeof(char *);
+	u32 bytes_left;
+
+	bytes_left = ipath->fspath->bytes_left > s_ptr ?
+					ipath->fspath->bytes_left - s_ptr : 0;
+
+	fspath_min = (char *)ipath->fspath->val + (i + 1) * s_ptr;
+	fspath = iref_to_path(ipath->fs_root, ipath->btrfs_path, iref, eb,
+				inum, fspath_min, bytes_left);
+	if (IS_ERR(fspath))
+		return PTR_ERR(fspath);
+
+	if (fspath > fspath_min) {
+		ipath->fspath->val[i] = (u64)fspath;
+		++ipath->fspath->elem_cnt;
+		ipath->fspath->bytes_left = fspath - fspath_min;
+	} else {
+		++ipath->fspath->elem_missed;
+		ipath->fspath->bytes_missing += fspath_min - fspath;
+		ipath->fspath->bytes_left = 0;
+	}
+
+	return 0;
+}
+
+/*
+ * this dumps all file system paths to the inode into the ipath struct, provided
+ * is has been created large enough. each path is zero-terminated and accessed
+ * from ipath->fspath->val[i].
+ * when it returns, there are ipath->fspath->elem_cnt number of paths available
+ * in ipath->fspath->val[]. when the allocated space wasn't sufficient, the
+ * number of missed paths in recored in ipath->fspath->elem_missed, otherwise,
+ * it's zero. ipath->fspath->bytes_missing holds the number of bytes that would
+ * have been needed to return all paths.
+ */
+int paths_from_inode(u64 inum, struct inode_fs_paths *ipath)
+{
+	return iterate_irefs(inum, ipath->fs_root, ipath->btrfs_path,
+				inode_to_path, ipath);
+}
+
+/*
+ * allocates space to return multiple file system paths for an inode.
+ * total_bytes to allocate are passed, note that space usable for actual path
+ * information will be total_bytes - sizeof(struct inode_fs_paths).
+ * the returned pointer must be freed with free_ipath() in the end.
+ */
+struct btrfs_data_container *init_data_container(u32 total_bytes)
+{
+	struct btrfs_data_container *data;
+	size_t alloc_bytes;
+
+	alloc_bytes = max_t(size_t, total_bytes, sizeof(*data));
+	data = kmalloc(alloc_bytes, GFP_NOFS);
+	if (!data)
+		return ERR_PTR(-ENOMEM);
+
+	if (total_bytes >= sizeof(*data)) {
+		data->bytes_left = total_bytes - sizeof(*data);
+		data->bytes_missing = 0;
+	} else {
+		data->bytes_missing = sizeof(*data) - total_bytes;
+		data->bytes_left = 0;
+	}
+
+	data->elem_cnt = 0;
+	data->elem_missed = 0;
+
+	return data;
+}
+
+/*
+ * allocates space to return multiple file system paths for an inode.
+ * total_bytes to allocate are passed, note that space usable for actual path
+ * information will be total_bytes - sizeof(struct inode_fs_paths).
+ * the returned pointer must be freed with free_ipath() in the end.
+ */
+struct inode_fs_paths *init_ipath(s32 total_bytes, struct btrfs_root *fs_root,
+					struct btrfs_path *path)
+{
+	struct inode_fs_paths *ifp;
+	struct btrfs_data_container *fspath;
+
+	fspath = init_data_container(total_bytes);
+	if (IS_ERR(fspath))
+		return (void *)fspath;
+
+	ifp = kmalloc(sizeof(*ifp), GFP_NOFS);
+	if (!ifp) {
+		kfree(fspath);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	ifp->btrfs_path = path;
+	ifp->fspath = fspath;
+	ifp->fs_root = fs_root;
+
+	return ifp;
+}
+
+void free_ipath(struct inode_fs_paths *ipath)
+{
+	kfree(ipath);
+}
diff --git a/fs/btrfs/backref.h b/fs/btrfs/backref.h
new file mode 100644
index 0000000..9261883
--- /dev/null
+++ b/fs/btrfs/backref.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (C) 2011 STRATO.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License v2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#ifndef __BTRFS_BACKREF__
+#define __BTRFS_BACKREF__
+
+#include "ioctl.h"
+
+struct inode_fs_paths {
+	struct btrfs_path		*btrfs_path;
+	struct btrfs_root		*fs_root;
+	struct btrfs_data_container	*fspath;
+};
+
+typedef int (iterate_extent_inodes_t)(u64 inum, u64 offset, u64 root,
+		void *ctx);
+typedef int (iterate_irefs_t)(u64 parent, struct btrfs_inode_ref *iref,
+				struct extent_buffer *eb, void *ctx);
+
+int inode_item_info(u64 inum, u64 ioff, struct btrfs_root *fs_root,
+			struct btrfs_path *path);
+
+int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical,
+			struct btrfs_path *path, struct btrfs_key *found_key);
+
+int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb,
+				struct btrfs_extent_item *ei, u32 item_size,
+				u64 *out_root, u8 *out_level);
+
+int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
+				struct btrfs_path *path,
+				u64 extent_item_objectid,
+				u64 extent_offset,
+				iterate_extent_inodes_t *iterate, void *ctx);
+
+int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info,
+				struct btrfs_path *path,
+				iterate_extent_inodes_t *iterate, void *ctx);
+
+int paths_from_inode(u64 inum, struct inode_fs_paths *ipath);
+
+struct btrfs_data_container *init_data_container(u32 total_bytes);
+struct inode_fs_paths *init_ipath(s32 total_bytes, struct btrfs_root *fs_root,
+					struct btrfs_path *path);
+void free_ipath(struct inode_fs_paths *ipath);
+
+#endif
diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h
index d9f99a1..5a5d325 100644
--- a/fs/btrfs/btrfs_inode.h
+++ b/fs/btrfs/btrfs_inode.h
@@ -103,11 +103,6 @@
 	 */
 	u64 delalloc_bytes;
 
-	/* total number of bytes that may be used for this inode for
-	 * delalloc
-	 */
-	u64 reserved_bytes;
-
 	/*
 	 * the size of the file stored in the metadata on disk.  data=ordered
 	 * means the in-memory i_size might be larger than the size on disk
@@ -115,9 +110,6 @@
 	 */
 	u64 disk_i_size;
 
-	/* flags field from the on disk inode */
-	u32 flags;
-
 	/*
 	 * if this is a directory then index_cnt is the counter for the index
 	 * number for new files that are created
@@ -132,6 +124,15 @@
 	u64 last_unlink_trans;
 
 	/*
+	 * Number of bytes outstanding that are going to need csums.  This is
+	 * used in ENOSPC accounting.
+	 */
+	u64 csum_bytes;
+
+	/* flags field from the on disk inode */
+	u32 flags;
+
+	/*
 	 * Counters to keep track of the number of extent item's we may use due
 	 * to delalloc and such.  outstanding_extents is the number of extent
 	 * items we think we'll end up using, and reserved_extents is the number
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index 8ec5d86..14f1c5a 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -85,7 +85,8 @@
 static inline int compressed_bio_size(struct btrfs_root *root,
 				      unsigned long disk_size)
 {
-	u16 csum_size = btrfs_super_csum_size(&root->fs_info->super_copy);
+	u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
+
 	return sizeof(struct compressed_bio) +
 		((disk_size + root->sectorsize - 1) / root->sectorsize) *
 		csum_size;
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index 011cab3..0fe615e 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -902,9 +902,10 @@
 
 	orig_ptr = btrfs_node_blockptr(mid, orig_slot);
 
-	if (level < BTRFS_MAX_LEVEL - 1)
+	if (level < BTRFS_MAX_LEVEL - 1) {
 		parent = path->nodes[level + 1];
-	pslot = path->slots[level + 1];
+		pslot = path->slots[level + 1];
+	}
 
 	/*
 	 * deal with the case where there is only one pointer in the root
@@ -1107,9 +1108,10 @@
 	mid = path->nodes[level];
 	WARN_ON(btrfs_header_generation(mid) != trans->transid);
 
-	if (level < BTRFS_MAX_LEVEL - 1)
+	if (level < BTRFS_MAX_LEVEL - 1) {
 		parent = path->nodes[level + 1];
-	pslot = path->slots[level + 1];
+		pslot = path->slots[level + 1];
+	}
 
 	if (!parent)
 		return 1;
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 03912c5..b9ba59f 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -30,6 +30,7 @@
 #include <linux/kobject.h>
 #include <trace/events/btrfs.h>
 #include <asm/kmap_types.h>
+#include <linux/pagemap.h>
 #include "extent_io.h"
 #include "extent_map.h"
 #include "async-thread.h"
@@ -360,6 +361,47 @@
 #define BTRFS_LABEL_SIZE 256
 
 /*
+ * just in case we somehow lose the roots and are not able to mount,
+ * we store an array of the roots from previous transactions
+ * in the super.
+ */
+#define BTRFS_NUM_BACKUP_ROOTS 4
+struct btrfs_root_backup {
+	__le64 tree_root;
+	__le64 tree_root_gen;
+
+	__le64 chunk_root;
+	__le64 chunk_root_gen;
+
+	__le64 extent_root;
+	__le64 extent_root_gen;
+
+	__le64 fs_root;
+	__le64 fs_root_gen;
+
+	__le64 dev_root;
+	__le64 dev_root_gen;
+
+	__le64 csum_root;
+	__le64 csum_root_gen;
+
+	__le64 total_bytes;
+	__le64 bytes_used;
+	__le64 num_devices;
+	/* future */
+	__le64 unsed_64[4];
+
+	u8 tree_root_level;
+	u8 chunk_root_level;
+	u8 extent_root_level;
+	u8 fs_root_level;
+	u8 dev_root_level;
+	u8 csum_root_level;
+	/* future and to align */
+	u8 unused_8[10];
+} __attribute__ ((__packed__));
+
+/*
  * the super block basically lists the main trees of the FS
  * it currently lacks any block count etc etc
  */
@@ -405,6 +447,7 @@
 	/* future expansion */
 	__le64 reserved[31];
 	u8 sys_chunk_array[BTRFS_SYSTEM_CHUNK_ARRAY_SIZE];
+	struct btrfs_root_backup super_roots[BTRFS_NUM_BACKUP_ROOTS];
 } __attribute__ ((__packed__));
 
 /*
@@ -772,14 +815,8 @@
 struct btrfs_block_rsv {
 	u64 size;
 	u64 reserved;
-	u64 freed[2];
 	struct btrfs_space_info *space_info;
-	struct list_head list;
 	spinlock_t lock;
-	atomic_t usage;
-	unsigned int priority:8;
-	unsigned int durable:1;
-	unsigned int refill_used:1;
 	unsigned int full:1;
 };
 
@@ -840,10 +877,10 @@
 	spinlock_t lock;
 	u64 pinned;
 	u64 reserved;
-	u64 reserved_pinned;
 	u64 bytes_super;
 	u64 flags;
 	u64 sectorsize;
+	u64 cache_generation;
 	unsigned int ro:1;
 	unsigned int dirty:1;
 	unsigned int iref:1;
@@ -899,6 +936,10 @@
 	spinlock_t block_group_cache_lock;
 	struct rb_root block_group_cache_tree;
 
+	/* keep track of unallocated space */
+	spinlock_t free_chunk_lock;
+	u64 free_chunk_space;
+
 	struct extent_io_tree freed_extents[2];
 	struct extent_io_tree *pinned_extents;
 
@@ -916,14 +957,11 @@
 	struct btrfs_block_rsv trans_block_rsv;
 	/* block reservation for chunk tree */
 	struct btrfs_block_rsv chunk_block_rsv;
+	/* block reservation for delayed operations */
+	struct btrfs_block_rsv delayed_block_rsv;
 
 	struct btrfs_block_rsv empty_block_rsv;
 
-	/* list of block reservations that cross multiple transactions */
-	struct list_head durable_block_rsv_list;
-
-	struct mutex durable_block_rsv_mutex;
-
 	u64 generation;
 	u64 last_trans_committed;
 
@@ -942,8 +980,8 @@
 	wait_queue_head_t transaction_blocked_wait;
 	wait_queue_head_t async_submit_wait;
 
-	struct btrfs_super_block super_copy;
-	struct btrfs_super_block super_for_commit;
+	struct btrfs_super_block *super_copy;
+	struct btrfs_super_block *super_for_commit;
 	struct block_device *__bdev;
 	struct super_block *sb;
 	struct inode *btree_inode;
@@ -1036,6 +1074,7 @@
 	struct btrfs_workers endio_freespace_worker;
 	struct btrfs_workers submit_workers;
 	struct btrfs_workers caching_workers;
+	struct btrfs_workers readahead_workers;
 
 	/*
 	 * fixup workers take dirty pages that didn't properly go through
@@ -1119,6 +1158,13 @@
 	u64 fs_state;
 
 	struct btrfs_delayed_root *delayed_root;
+
+	/* readahead tree */
+	spinlock_t reada_lock;
+	struct radix_tree_root reada_tree;
+
+	/* next backup root to be overwritten */
+	int backup_root_index;
 };
 
 /*
@@ -1363,6 +1409,7 @@
 #define BTRFS_MOUNT_ENOSPC_DEBUG	 (1 << 15)
 #define BTRFS_MOUNT_AUTO_DEFRAG		(1 << 16)
 #define BTRFS_MOUNT_INODE_MAP_CACHE	(1 << 17)
+#define BTRFS_MOUNT_RECOVERY		(1 << 18)
 
 #define btrfs_clear_opt(o, opt)		((o) &= ~BTRFS_MOUNT_##opt)
 #define btrfs_set_opt(o, opt)		((o) |= BTRFS_MOUNT_##opt)
@@ -1978,6 +2025,55 @@
 	return root->root_item.flags & BTRFS_ROOT_SUBVOL_RDONLY;
 }
 
+/* struct btrfs_root_backup */
+BTRFS_SETGET_STACK_FUNCS(backup_tree_root, struct btrfs_root_backup,
+		   tree_root, 64);
+BTRFS_SETGET_STACK_FUNCS(backup_tree_root_gen, struct btrfs_root_backup,
+		   tree_root_gen, 64);
+BTRFS_SETGET_STACK_FUNCS(backup_tree_root_level, struct btrfs_root_backup,
+		   tree_root_level, 8);
+
+BTRFS_SETGET_STACK_FUNCS(backup_chunk_root, struct btrfs_root_backup,
+		   chunk_root, 64);
+BTRFS_SETGET_STACK_FUNCS(backup_chunk_root_gen, struct btrfs_root_backup,
+		   chunk_root_gen, 64);
+BTRFS_SETGET_STACK_FUNCS(backup_chunk_root_level, struct btrfs_root_backup,
+		   chunk_root_level, 8);
+
+BTRFS_SETGET_STACK_FUNCS(backup_extent_root, struct btrfs_root_backup,
+		   extent_root, 64);
+BTRFS_SETGET_STACK_FUNCS(backup_extent_root_gen, struct btrfs_root_backup,
+		   extent_root_gen, 64);
+BTRFS_SETGET_STACK_FUNCS(backup_extent_root_level, struct btrfs_root_backup,
+		   extent_root_level, 8);
+
+BTRFS_SETGET_STACK_FUNCS(backup_fs_root, struct btrfs_root_backup,
+		   fs_root, 64);
+BTRFS_SETGET_STACK_FUNCS(backup_fs_root_gen, struct btrfs_root_backup,
+		   fs_root_gen, 64);
+BTRFS_SETGET_STACK_FUNCS(backup_fs_root_level, struct btrfs_root_backup,
+		   fs_root_level, 8);
+
+BTRFS_SETGET_STACK_FUNCS(backup_dev_root, struct btrfs_root_backup,
+		   dev_root, 64);
+BTRFS_SETGET_STACK_FUNCS(backup_dev_root_gen, struct btrfs_root_backup,
+		   dev_root_gen, 64);
+BTRFS_SETGET_STACK_FUNCS(backup_dev_root_level, struct btrfs_root_backup,
+		   dev_root_level, 8);
+
+BTRFS_SETGET_STACK_FUNCS(backup_csum_root, struct btrfs_root_backup,
+		   csum_root, 64);
+BTRFS_SETGET_STACK_FUNCS(backup_csum_root_gen, struct btrfs_root_backup,
+		   csum_root_gen, 64);
+BTRFS_SETGET_STACK_FUNCS(backup_csum_root_level, struct btrfs_root_backup,
+		   csum_root_level, 8);
+BTRFS_SETGET_STACK_FUNCS(backup_total_bytes, struct btrfs_root_backup,
+		   total_bytes, 64);
+BTRFS_SETGET_STACK_FUNCS(backup_bytes_used, struct btrfs_root_backup,
+		   bytes_used, 64);
+BTRFS_SETGET_STACK_FUNCS(backup_num_devices, struct btrfs_root_backup,
+		   num_devices, 64);
+
 /* struct btrfs_super_block */
 
 BTRFS_SETGET_STACK_FUNCS(super_bytenr, struct btrfs_super_block, bytenr, 64);
@@ -2129,6 +2225,11 @@
 		(space_info->flags & BTRFS_BLOCK_GROUP_DATA));
 }
 
+static inline gfp_t btrfs_alloc_write_mask(struct address_space *mapping)
+{
+	return mapping_gfp_mask(mapping) & ~__GFP_FS;
+}
+
 /* extent-tree.c */
 static inline u64 btrfs_calc_trans_metadata_size(struct btrfs_root *root,
 						 unsigned num_items)
@@ -2137,6 +2238,17 @@
 		3 * num_items;
 }
 
+/*
+ * Doing a truncate won't result in new nodes or leaves, just what we need for
+ * COW.
+ */
+static inline u64 btrfs_calc_trunc_metadata_size(struct btrfs_root *root,
+						 unsigned num_items)
+{
+	return (root->leafsize + root->nodesize * (BTRFS_MAX_LEVEL - 1)) *
+		num_items;
+}
+
 void btrfs_put_block_group(struct btrfs_block_group_cache *cache);
 int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
 			   struct btrfs_root *root, unsigned long count);
@@ -2146,6 +2258,9 @@
 			     u64 num_bytes, u64 *refs, u64 *flags);
 int btrfs_pin_extent(struct btrfs_root *root,
 		     u64 bytenr, u64 num, int reserved);
+int btrfs_pin_extent_for_log_replay(struct btrfs_trans_handle *trans,
+				    struct btrfs_root *root,
+				    u64 bytenr, u64 num_bytes);
 int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
 			  struct btrfs_root *root,
 			  u64 objectid, u64 offset, u64 bytenr);
@@ -2196,8 +2311,8 @@
 		      u64 root_objectid, u64 owner, u64 offset);
 
 int btrfs_free_reserved_extent(struct btrfs_root *root, u64 start, u64 len);
-int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
-				u64 num_bytes, int reserve, int sinfo);
+int btrfs_free_and_pin_reserved_extent(struct btrfs_root *root,
+				       u64 start, u64 len);
 int btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
 				struct btrfs_root *root);
 int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
@@ -2240,25 +2355,23 @@
 struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root);
 void btrfs_free_block_rsv(struct btrfs_root *root,
 			  struct btrfs_block_rsv *rsv);
-void btrfs_add_durable_block_rsv(struct btrfs_fs_info *fs_info,
-				 struct btrfs_block_rsv *rsv);
-int btrfs_block_rsv_add(struct btrfs_trans_handle *trans,
-			struct btrfs_root *root,
+int btrfs_block_rsv_add(struct btrfs_root *root,
 			struct btrfs_block_rsv *block_rsv,
 			u64 num_bytes);
-int btrfs_block_rsv_check(struct btrfs_trans_handle *trans,
-			  struct btrfs_root *root,
+int btrfs_block_rsv_add_noflush(struct btrfs_root *root,
+				struct btrfs_block_rsv *block_rsv,
+				u64 num_bytes);
+int btrfs_block_rsv_check(struct btrfs_root *root,
+			  struct btrfs_block_rsv *block_rsv, int min_factor);
+int btrfs_block_rsv_refill(struct btrfs_root *root,
 			  struct btrfs_block_rsv *block_rsv,
-			  u64 min_reserved, int min_factor);
+			  u64 min_reserved);
 int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv,
 			    struct btrfs_block_rsv *dst_rsv,
 			    u64 num_bytes);
 void btrfs_block_rsv_release(struct btrfs_root *root,
 			     struct btrfs_block_rsv *block_rsv,
 			     u64 num_bytes);
-int btrfs_truncate_reserve_metadata(struct btrfs_trans_handle *trans,
-				    struct btrfs_root *root,
-				    struct btrfs_block_rsv *rsv);
 int btrfs_set_block_group_ro(struct btrfs_root *root,
 			     struct btrfs_block_group_cache *cache);
 int btrfs_set_block_group_rw(struct btrfs_root *root,
@@ -2379,6 +2492,18 @@
 	smp_mb();
 	return fs_info->closing;
 }
+static inline void free_fs_info(struct btrfs_fs_info *fs_info)
+{
+	kfree(fs_info->delayed_root);
+	kfree(fs_info->extent_root);
+	kfree(fs_info->tree_root);
+	kfree(fs_info->chunk_root);
+	kfree(fs_info->dev_root);
+	kfree(fs_info->csum_root);
+	kfree(fs_info->super_copy);
+	kfree(fs_info->super_for_commit);
+	kfree(fs_info);
+}
 
 /* root-item.c */
 int btrfs_find_root_ref(struct btrfs_root *tree_root,
@@ -2579,11 +2704,6 @@
 int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode);
 int btrfs_orphan_del(struct btrfs_trans_handle *trans, struct inode *inode);
 int btrfs_orphan_cleanup(struct btrfs_root *root);
-void btrfs_orphan_pre_snapshot(struct btrfs_trans_handle *trans,
-				struct btrfs_pending_snapshot *pending,
-				u64 *bytes_to_reserve);
-void btrfs_orphan_post_snapshot(struct btrfs_trans_handle *trans,
-				struct btrfs_pending_snapshot *pending);
 void btrfs_orphan_commit_root(struct btrfs_trans_handle *trans,
 			      struct btrfs_root *root);
 int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size);
@@ -2697,4 +2817,20 @@
 int btrfs_scrub_progress(struct btrfs_root *root, u64 devid,
 			 struct btrfs_scrub_progress *progress);
 
+/* reada.c */
+struct reada_control {
+	struct btrfs_root	*root;		/* tree to prefetch */
+	struct btrfs_key	key_start;
+	struct btrfs_key	key_end;	/* exclusive */
+	atomic_t		elems;
+	struct kref		refcnt;
+	wait_queue_head_t	wait;
+};
+struct reada_control *btrfs_reada_add(struct btrfs_root *root,
+			      struct btrfs_key *start, struct btrfs_key *end);
+int btrfs_reada_wait(void *handle);
+void btrfs_reada_detach(void *handle);
+int btree_readahead_hook(struct btrfs_root *root, struct extent_buffer *eb,
+			 u64 start, int err);
+
 #endif
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
index ae4d9cd..3a1b939 100644
--- a/fs/btrfs/delayed-inode.c
+++ b/fs/btrfs/delayed-inode.c
@@ -591,7 +591,7 @@
 		return 0;
 
 	src_rsv = trans->block_rsv;
-	dst_rsv = &root->fs_info->global_block_rsv;
+	dst_rsv = &root->fs_info->delayed_block_rsv;
 
 	num_bytes = btrfs_calc_trans_metadata_size(root, 1);
 	ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes);
@@ -609,7 +609,7 @@
 	if (!item->bytes_reserved)
 		return;
 
-	rsv = &root->fs_info->global_block_rsv;
+	rsv = &root->fs_info->delayed_block_rsv;
 	btrfs_block_rsv_release(root, rsv,
 				item->bytes_reserved);
 }
@@ -624,13 +624,36 @@
 	u64 num_bytes;
 	int ret;
 
-	if (!trans->bytes_reserved)
-		return 0;
-
 	src_rsv = trans->block_rsv;
-	dst_rsv = &root->fs_info->global_block_rsv;
+	dst_rsv = &root->fs_info->delayed_block_rsv;
 
 	num_bytes = btrfs_calc_trans_metadata_size(root, 1);
+
+	/*
+	 * btrfs_dirty_inode will update the inode under btrfs_join_transaction
+	 * which doesn't reserve space for speed.  This is a problem since we
+	 * still need to reserve space for this update, so try to reserve the
+	 * space.
+	 *
+	 * Now if src_rsv == delalloc_block_rsv we'll let it just steal since
+	 * we're accounted for.
+	 */
+	if (!trans->bytes_reserved &&
+	    src_rsv != &root->fs_info->delalloc_block_rsv) {
+		ret = btrfs_block_rsv_add_noflush(root, dst_rsv, num_bytes);
+		/*
+		 * Since we're under a transaction reserve_metadata_bytes could
+		 * try to commit the transaction which will make it return
+		 * EAGAIN to make us stop the transaction we have, so return
+		 * ENOSPC instead so that btrfs_dirty_inode knows what to do.
+		 */
+		if (ret == -EAGAIN)
+			ret = -ENOSPC;
+		if (!ret)
+			node->bytes_reserved = num_bytes;
+		return ret;
+	}
+
 	ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes);
 	if (!ret)
 		node->bytes_reserved = num_bytes;
@@ -646,7 +669,7 @@
 	if (!node->bytes_reserved)
 		return;
 
-	rsv = &root->fs_info->global_block_rsv;
+	rsv = &root->fs_info->delayed_block_rsv;
 	btrfs_block_rsv_release(root, rsv,
 				node->bytes_reserved);
 	node->bytes_reserved = 0;
@@ -1026,7 +1049,7 @@
 	path->leave_spinning = 1;
 
 	block_rsv = trans->block_rsv;
-	trans->block_rsv = &root->fs_info->global_block_rsv;
+	trans->block_rsv = &root->fs_info->delayed_block_rsv;
 
 	delayed_root = btrfs_get_delayed_root(root);
 
@@ -1069,7 +1092,7 @@
 	path->leave_spinning = 1;
 
 	block_rsv = trans->block_rsv;
-	trans->block_rsv = &node->root->fs_info->global_block_rsv;
+	trans->block_rsv = &node->root->fs_info->delayed_block_rsv;
 
 	ret = btrfs_insert_delayed_items(trans, path, node->root, node);
 	if (!ret)
@@ -1149,7 +1172,7 @@
 		goto free_path;
 
 	block_rsv = trans->block_rsv;
-	trans->block_rsv = &root->fs_info->global_block_rsv;
+	trans->block_rsv = &root->fs_info->delayed_block_rsv;
 
 	ret = btrfs_insert_delayed_items(trans, path, root, delayed_node);
 	if (!ret)
@@ -1686,11 +1709,8 @@
 	}
 
 	ret = btrfs_delayed_inode_reserve_metadata(trans, root, delayed_node);
-	/*
-	 * we must reserve enough space when we start a new transaction,
-	 * so reserving metadata failure is impossible
-	 */
-	BUG_ON(ret);
+	if (ret)
+		goto release_node;
 
 	fill_stack_inode_item(trans, &delayed_node->inode_item, inode);
 	delayed_node->inode_dirty = 1;
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 07ea918..102c176 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -256,8 +256,7 @@
 static int csum_tree_block(struct btrfs_root *root, struct extent_buffer *buf,
 			   int verify)
 {
-	u16 csum_size =
-		btrfs_super_csum_size(&root->fs_info->super_copy);
+	u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
 	char *result = NULL;
 	unsigned long len;
 	unsigned long cur_len;
@@ -367,7 +366,8 @@
 	clear_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
 	io_tree = &BTRFS_I(root->fs_info->btree_inode)->io_tree;
 	while (1) {
-		ret = read_extent_buffer_pages(io_tree, eb, start, 1,
+		ret = read_extent_buffer_pages(io_tree, eb, start,
+					       WAIT_COMPLETE,
 					       btree_get_extent, mirror_num);
 		if (!ret &&
 		    !verify_parent_transid(io_tree, eb, parent_transid))
@@ -608,11 +608,48 @@
 	end = min_t(u64, eb->len, PAGE_CACHE_SIZE);
 	end = eb->start + end - 1;
 err:
+	if (test_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags)) {
+		clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags);
+		btree_readahead_hook(root, eb, eb->start, ret);
+	}
+
 	free_extent_buffer(eb);
 out:
 	return ret;
 }
 
+static int btree_io_failed_hook(struct bio *failed_bio,
+			 struct page *page, u64 start, u64 end,
+			 u64 mirror_num, struct extent_state *state)
+{
+	struct extent_io_tree *tree;
+	unsigned long len;
+	struct extent_buffer *eb;
+	struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
+
+	tree = &BTRFS_I(page->mapping->host)->io_tree;
+	if (page->private == EXTENT_PAGE_PRIVATE)
+		goto out;
+	if (!page->private)
+		goto out;
+
+	len = page->private >> 2;
+	WARN_ON(len == 0);
+
+	eb = alloc_extent_buffer(tree, start, len, page);
+	if (eb == NULL)
+		goto out;
+
+	if (test_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags)) {
+		clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags);
+		btree_readahead_hook(root, eb, eb->start, -EIO);
+	}
+	free_extent_buffer(eb);
+
+out:
+	return -EIO;	/* we fixed nothing */
+}
+
 static void end_workqueue_bio(struct bio *bio, int err)
 {
 	struct end_io_wq *end_io_wq = bio->bi_private;
@@ -908,7 +945,7 @@
 {
 	struct extent_io_tree *tree;
 	tree = &BTRFS_I(page->mapping->host)->io_tree;
-	return extent_read_full_page(tree, page, btree_get_extent);
+	return extent_read_full_page(tree, page, btree_get_extent, 0);
 }
 
 static int btree_releasepage(struct page *page, gfp_t gfp_flags)
@@ -974,11 +1011,43 @@
 	if (!buf)
 		return 0;
 	read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree,
-				 buf, 0, 0, btree_get_extent, 0);
+				 buf, 0, WAIT_NONE, btree_get_extent, 0);
 	free_extent_buffer(buf);
 	return ret;
 }
 
+int reada_tree_block_flagged(struct btrfs_root *root, u64 bytenr, u32 blocksize,
+			 int mirror_num, struct extent_buffer **eb)
+{
+	struct extent_buffer *buf = NULL;
+	struct inode *btree_inode = root->fs_info->btree_inode;
+	struct extent_io_tree *io_tree = &BTRFS_I(btree_inode)->io_tree;
+	int ret;
+
+	buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
+	if (!buf)
+		return 0;
+
+	set_bit(EXTENT_BUFFER_READAHEAD, &buf->bflags);
+
+	ret = read_extent_buffer_pages(io_tree, buf, 0, WAIT_PAGE_LOCK,
+				       btree_get_extent, mirror_num);
+	if (ret) {
+		free_extent_buffer(buf);
+		return ret;
+	}
+
+	if (test_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags)) {
+		free_extent_buffer(buf);
+		return -EIO;
+	} else if (extent_buffer_uptodate(io_tree, buf, NULL)) {
+		*eb = buf;
+	} else {
+		free_extent_buffer(buf);
+	}
+	return 0;
+}
+
 struct extent_buffer *btrfs_find_tree_block(struct btrfs_root *root,
 					    u64 bytenr, u32 blocksize)
 {
@@ -1135,10 +1204,12 @@
 
 	generation = btrfs_root_generation(&root->root_item);
 	blocksize = btrfs_level_size(root, btrfs_root_level(&root->root_item));
+	root->commit_root = NULL;
 	root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item),
 				     blocksize, generation);
 	if (!root->node || !btrfs_buffer_uptodate(root->node, generation)) {
 		free_extent_buffer(root->node);
+		root->node = NULL;
 		return -EIO;
 	}
 	root->commit_root = btrfs_root_node(root);
@@ -1577,6 +1648,235 @@
 	return 0;
 }
 
+/*
+ * this will find the highest generation in the array of
+ * root backups.  The index of the highest array is returned,
+ * or -1 if we can't find anything.
+ *
+ * We check to make sure the array is valid by comparing the
+ * generation of the latest  root in the array with the generation
+ * in the super block.  If they don't match we pitch it.
+ */
+static int find_newest_super_backup(struct btrfs_fs_info *info, u64 newest_gen)
+{
+	u64 cur;
+	int newest_index = -1;
+	struct btrfs_root_backup *root_backup;
+	int i;
+
+	for (i = 0; i < BTRFS_NUM_BACKUP_ROOTS; i++) {
+		root_backup = info->super_copy->super_roots + i;
+		cur = btrfs_backup_tree_root_gen(root_backup);
+		if (cur == newest_gen)
+			newest_index = i;
+	}
+
+	/* check to see if we actually wrapped around */
+	if (newest_index == BTRFS_NUM_BACKUP_ROOTS - 1) {
+		root_backup = info->super_copy->super_roots;
+		cur = btrfs_backup_tree_root_gen(root_backup);
+		if (cur == newest_gen)
+			newest_index = 0;
+	}
+	return newest_index;
+}
+
+
+/*
+ * find the oldest backup so we know where to store new entries
+ * in the backup array.  This will set the backup_root_index
+ * field in the fs_info struct
+ */
+static void find_oldest_super_backup(struct btrfs_fs_info *info,
+				     u64 newest_gen)
+{
+	int newest_index = -1;
+
+	newest_index = find_newest_super_backup(info, newest_gen);
+	/* if there was garbage in there, just move along */
+	if (newest_index == -1) {
+		info->backup_root_index = 0;
+	} else {
+		info->backup_root_index = (newest_index + 1) % BTRFS_NUM_BACKUP_ROOTS;
+	}
+}
+
+/*
+ * copy all the root pointers into the super backup array.
+ * this will bump the backup pointer by one when it is
+ * done
+ */
+static void backup_super_roots(struct btrfs_fs_info *info)
+{
+	int next_backup;
+	struct btrfs_root_backup *root_backup;
+	int last_backup;
+
+	next_backup = info->backup_root_index;
+	last_backup = (next_backup + BTRFS_NUM_BACKUP_ROOTS - 1) %
+		BTRFS_NUM_BACKUP_ROOTS;
+
+	/*
+	 * just overwrite the last backup if we're at the same generation
+	 * this happens only at umount
+	 */
+	root_backup = info->super_for_commit->super_roots + last_backup;
+	if (btrfs_backup_tree_root_gen(root_backup) ==
+	    btrfs_header_generation(info->tree_root->node))
+		next_backup = last_backup;
+
+	root_backup = info->super_for_commit->super_roots + next_backup;
+
+	/*
+	 * make sure all of our padding and empty slots get zero filled
+	 * regardless of which ones we use today
+	 */
+	memset(root_backup, 0, sizeof(*root_backup));
+
+	info->backup_root_index = (next_backup + 1) % BTRFS_NUM_BACKUP_ROOTS;
+
+	btrfs_set_backup_tree_root(root_backup, info->tree_root->node->start);
+	btrfs_set_backup_tree_root_gen(root_backup,
+			       btrfs_header_generation(info->tree_root->node));
+
+	btrfs_set_backup_tree_root_level(root_backup,
+			       btrfs_header_level(info->tree_root->node));
+
+	btrfs_set_backup_chunk_root(root_backup, info->chunk_root->node->start);
+	btrfs_set_backup_chunk_root_gen(root_backup,
+			       btrfs_header_generation(info->chunk_root->node));
+	btrfs_set_backup_chunk_root_level(root_backup,
+			       btrfs_header_level(info->chunk_root->node));
+
+	btrfs_set_backup_extent_root(root_backup, info->extent_root->node->start);
+	btrfs_set_backup_extent_root_gen(root_backup,
+			       btrfs_header_generation(info->extent_root->node));
+	btrfs_set_backup_extent_root_level(root_backup,
+			       btrfs_header_level(info->extent_root->node));
+
+	/*
+	 * we might commit during log recovery, which happens before we set
+	 * the fs_root.  Make sure it is valid before we fill it in.
+	 */
+	if (info->fs_root && info->fs_root->node) {
+		btrfs_set_backup_fs_root(root_backup,
+					 info->fs_root->node->start);
+		btrfs_set_backup_fs_root_gen(root_backup,
+			       btrfs_header_generation(info->fs_root->node));
+		btrfs_set_backup_fs_root_level(root_backup,
+			       btrfs_header_level(info->fs_root->node));
+	}
+
+	btrfs_set_backup_dev_root(root_backup, info->dev_root->node->start);
+	btrfs_set_backup_dev_root_gen(root_backup,
+			       btrfs_header_generation(info->dev_root->node));
+	btrfs_set_backup_dev_root_level(root_backup,
+				       btrfs_header_level(info->dev_root->node));
+
+	btrfs_set_backup_csum_root(root_backup, info->csum_root->node->start);
+	btrfs_set_backup_csum_root_gen(root_backup,
+			       btrfs_header_generation(info->csum_root->node));
+	btrfs_set_backup_csum_root_level(root_backup,
+			       btrfs_header_level(info->csum_root->node));
+
+	btrfs_set_backup_total_bytes(root_backup,
+			     btrfs_super_total_bytes(info->super_copy));
+	btrfs_set_backup_bytes_used(root_backup,
+			     btrfs_super_bytes_used(info->super_copy));
+	btrfs_set_backup_num_devices(root_backup,
+			     btrfs_super_num_devices(info->super_copy));
+
+	/*
+	 * if we don't copy this out to the super_copy, it won't get remembered
+	 * for the next commit
+	 */
+	memcpy(&info->super_copy->super_roots,
+	       &info->super_for_commit->super_roots,
+	       sizeof(*root_backup) * BTRFS_NUM_BACKUP_ROOTS);
+}
+
+/*
+ * this copies info out of the root backup array and back into
+ * the in-memory super block.  It is meant to help iterate through
+ * the array, so you send it the number of backups you've already
+ * tried and the last backup index you used.
+ *
+ * this returns -1 when it has tried all the backups
+ */
+static noinline int next_root_backup(struct btrfs_fs_info *info,
+				     struct btrfs_super_block *super,
+				     int *num_backups_tried, int *backup_index)
+{
+	struct btrfs_root_backup *root_backup;
+	int newest = *backup_index;
+
+	if (*num_backups_tried == 0) {
+		u64 gen = btrfs_super_generation(super);
+
+		newest = find_newest_super_backup(info, gen);
+		if (newest == -1)
+			return -1;
+
+		*backup_index = newest;
+		*num_backups_tried = 1;
+	} else if (*num_backups_tried == BTRFS_NUM_BACKUP_ROOTS) {
+		/* we've tried all the backups, all done */
+		return -1;
+	} else {
+		/* jump to the next oldest backup */
+		newest = (*backup_index + BTRFS_NUM_BACKUP_ROOTS - 1) %
+			BTRFS_NUM_BACKUP_ROOTS;
+		*backup_index = newest;
+		*num_backups_tried += 1;
+	}
+	root_backup = super->super_roots + newest;
+
+	btrfs_set_super_generation(super,
+				   btrfs_backup_tree_root_gen(root_backup));
+	btrfs_set_super_root(super, btrfs_backup_tree_root(root_backup));
+	btrfs_set_super_root_level(super,
+				   btrfs_backup_tree_root_level(root_backup));
+	btrfs_set_super_bytes_used(super, btrfs_backup_bytes_used(root_backup));
+
+	/*
+	 * fixme: the total bytes and num_devices need to match or we should
+	 * need a fsck
+	 */
+	btrfs_set_super_total_bytes(super, btrfs_backup_total_bytes(root_backup));
+	btrfs_set_super_num_devices(super, btrfs_backup_num_devices(root_backup));
+	return 0;
+}
+
+/* helper to cleanup tree roots */
+static void free_root_pointers(struct btrfs_fs_info *info, int chunk_root)
+{
+	free_extent_buffer(info->tree_root->node);
+	free_extent_buffer(info->tree_root->commit_root);
+	free_extent_buffer(info->dev_root->node);
+	free_extent_buffer(info->dev_root->commit_root);
+	free_extent_buffer(info->extent_root->node);
+	free_extent_buffer(info->extent_root->commit_root);
+	free_extent_buffer(info->csum_root->node);
+	free_extent_buffer(info->csum_root->commit_root);
+
+	info->tree_root->node = NULL;
+	info->tree_root->commit_root = NULL;
+	info->dev_root->node = NULL;
+	info->dev_root->commit_root = NULL;
+	info->extent_root->node = NULL;
+	info->extent_root->commit_root = NULL;
+	info->csum_root->node = NULL;
+	info->csum_root->commit_root = NULL;
+
+	if (chunk_root) {
+		free_extent_buffer(info->chunk_root->node);
+		free_extent_buffer(info->chunk_root->commit_root);
+		info->chunk_root->node = NULL;
+		info->chunk_root->commit_root = NULL;
+	}
+}
+
+
 struct btrfs_root *open_ctree(struct super_block *sb,
 			      struct btrfs_fs_devices *fs_devices,
 			      char *options)
@@ -1604,6 +1904,8 @@
 
 	int ret;
 	int err = -EINVAL;
+	int num_backups_tried = 0;
+	int backup_index = 0;
 
 	struct btrfs_super_block *disk_super;
 
@@ -1648,6 +1950,7 @@
 	spin_lock_init(&fs_info->fs_roots_radix_lock);
 	spin_lock_init(&fs_info->delayed_iput_lock);
 	spin_lock_init(&fs_info->defrag_inodes_lock);
+	spin_lock_init(&fs_info->free_chunk_lock);
 	mutex_init(&fs_info->reloc_mutex);
 
 	init_completion(&fs_info->kobj_unregister);
@@ -1665,8 +1968,7 @@
 	btrfs_init_block_rsv(&fs_info->trans_block_rsv);
 	btrfs_init_block_rsv(&fs_info->chunk_block_rsv);
 	btrfs_init_block_rsv(&fs_info->empty_block_rsv);
-	INIT_LIST_HEAD(&fs_info->durable_block_rsv_list);
-	mutex_init(&fs_info->durable_block_rsv_mutex);
+	btrfs_init_block_rsv(&fs_info->delayed_block_rsv);
 	atomic_set(&fs_info->nr_async_submits, 0);
 	atomic_set(&fs_info->async_delalloc_pages, 0);
 	atomic_set(&fs_info->async_submit_draining, 0);
@@ -1677,6 +1979,11 @@
 	fs_info->metadata_ratio = 0;
 	fs_info->defrag_inodes = RB_ROOT;
 	fs_info->trans_no_join = 0;
+	fs_info->free_chunk_space = 0;
+
+	/* readahead state */
+	INIT_RADIX_TREE(&fs_info->reada_tree, GFP_NOFS & ~__GFP_WAIT);
+	spin_lock_init(&fs_info->reada_lock);
 
 	fs_info->thread_pool_size = min_t(unsigned long,
 					  num_online_cpus() + 2, 8);
@@ -1766,14 +2073,14 @@
 		goto fail_alloc;
 	}
 
-	memcpy(&fs_info->super_copy, bh->b_data, sizeof(fs_info->super_copy));
-	memcpy(&fs_info->super_for_commit, &fs_info->super_copy,
-	       sizeof(fs_info->super_for_commit));
+	memcpy(fs_info->super_copy, bh->b_data, sizeof(*fs_info->super_copy));
+	memcpy(fs_info->super_for_commit, fs_info->super_copy,
+	       sizeof(*fs_info->super_for_commit));
 	brelse(bh);
 
-	memcpy(fs_info->fsid, fs_info->super_copy.fsid, BTRFS_FSID_SIZE);
+	memcpy(fs_info->fsid, fs_info->super_copy->fsid, BTRFS_FSID_SIZE);
 
-	disk_super = &fs_info->super_copy;
+	disk_super = fs_info->super_copy;
 	if (!btrfs_super_root(disk_super))
 		goto fail_alloc;
 
@@ -1783,6 +2090,13 @@
 	btrfs_check_super_valid(fs_info, sb->s_flags & MS_RDONLY);
 
 	/*
+	 * run through our array of backup supers and setup
+	 * our ring pointer to the oldest one
+	 */
+	generation = btrfs_super_generation(disk_super);
+	find_oldest_super_backup(fs_info, generation);
+
+	/*
 	 * In the long term, we'll store the compression type in the super
 	 * block, and it'll be used for per file compression control.
 	 */
@@ -1870,6 +2184,9 @@
 	btrfs_init_workers(&fs_info->delayed_workers, "delayed-meta",
 			   fs_info->thread_pool_size,
 			   &fs_info->generic_worker);
+	btrfs_init_workers(&fs_info->readahead_workers, "readahead",
+			   fs_info->thread_pool_size,
+			   &fs_info->generic_worker);
 
 	/*
 	 * endios are largely parallel and should have a very
@@ -1880,6 +2197,7 @@
 
 	fs_info->endio_write_workers.idle_thresh = 2;
 	fs_info->endio_meta_write_workers.idle_thresh = 2;
+	fs_info->readahead_workers.idle_thresh = 2;
 
 	btrfs_start_workers(&fs_info->workers, 1);
 	btrfs_start_workers(&fs_info->generic_worker, 1);
@@ -1893,6 +2211,7 @@
 	btrfs_start_workers(&fs_info->endio_freespace_worker, 1);
 	btrfs_start_workers(&fs_info->delayed_workers, 1);
 	btrfs_start_workers(&fs_info->caching_workers, 1);
+	btrfs_start_workers(&fs_info->readahead_workers, 1);
 
 	fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super);
 	fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages,
@@ -1939,7 +2258,7 @@
 	if (!test_bit(EXTENT_BUFFER_UPTODATE, &chunk_root->node->bflags)) {
 		printk(KERN_WARNING "btrfs: failed to read chunk root on %s\n",
 		       sb->s_id);
-		goto fail_chunk_root;
+		goto fail_tree_roots;
 	}
 	btrfs_set_root_node(&chunk_root->root_item, chunk_root->node);
 	chunk_root->commit_root = btrfs_root_node(chunk_root);
@@ -1954,11 +2273,12 @@
 	if (ret) {
 		printk(KERN_WARNING "btrfs: failed to read chunk tree on %s\n",
 		       sb->s_id);
-		goto fail_chunk_root;
+		goto fail_tree_roots;
 	}
 
 	btrfs_close_extra_devices(fs_devices);
 
+retry_root_backup:
 	blocksize = btrfs_level_size(tree_root,
 				     btrfs_super_root_level(disk_super));
 	generation = btrfs_super_generation(disk_super);
@@ -1966,32 +2286,33 @@
 	tree_root->node = read_tree_block(tree_root,
 					  btrfs_super_root(disk_super),
 					  blocksize, generation);
-	if (!tree_root->node)
-		goto fail_chunk_root;
-	if (!test_bit(EXTENT_BUFFER_UPTODATE, &tree_root->node->bflags)) {
+	if (!tree_root->node ||
+	    !test_bit(EXTENT_BUFFER_UPTODATE, &tree_root->node->bflags)) {
 		printk(KERN_WARNING "btrfs: failed to read tree root on %s\n",
 		       sb->s_id);
-		goto fail_tree_root;
+
+		goto recovery_tree_root;
 	}
+
 	btrfs_set_root_node(&tree_root->root_item, tree_root->node);
 	tree_root->commit_root = btrfs_root_node(tree_root);
 
 	ret = find_and_setup_root(tree_root, fs_info,
 				  BTRFS_EXTENT_TREE_OBJECTID, extent_root);
 	if (ret)
-		goto fail_tree_root;
+		goto recovery_tree_root;
 	extent_root->track_dirty = 1;
 
 	ret = find_and_setup_root(tree_root, fs_info,
 				  BTRFS_DEV_TREE_OBJECTID, dev_root);
 	if (ret)
-		goto fail_extent_root;
+		goto recovery_tree_root;
 	dev_root->track_dirty = 1;
 
 	ret = find_and_setup_root(tree_root, fs_info,
 				  BTRFS_CSUM_TREE_OBJECTID, csum_root);
 	if (ret)
-		goto fail_dev_root;
+		goto recovery_tree_root;
 
 	csum_root->track_dirty = 1;
 
@@ -2124,22 +2445,13 @@
 
 fail_block_groups:
 	btrfs_free_block_groups(fs_info);
-	free_extent_buffer(csum_root->node);
-	free_extent_buffer(csum_root->commit_root);
-fail_dev_root:
-	free_extent_buffer(dev_root->node);
-	free_extent_buffer(dev_root->commit_root);
-fail_extent_root:
-	free_extent_buffer(extent_root->node);
-	free_extent_buffer(extent_root->commit_root);
-fail_tree_root:
-	free_extent_buffer(tree_root->node);
-	free_extent_buffer(tree_root->commit_root);
-fail_chunk_root:
-	free_extent_buffer(chunk_root->node);
-	free_extent_buffer(chunk_root->commit_root);
+
+fail_tree_roots:
+	free_root_pointers(fs_info, 1);
+
 fail_sb_buffer:
 	btrfs_stop_workers(&fs_info->generic_worker);
+	btrfs_stop_workers(&fs_info->readahead_workers);
 	btrfs_stop_workers(&fs_info->fixup_workers);
 	btrfs_stop_workers(&fs_info->delalloc_workers);
 	btrfs_stop_workers(&fs_info->workers);
@@ -2152,7 +2464,6 @@
 	btrfs_stop_workers(&fs_info->delayed_workers);
 	btrfs_stop_workers(&fs_info->caching_workers);
 fail_alloc:
-	kfree(fs_info->delayed_root);
 fail_iput:
 	invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
 	iput(fs_info->btree_inode);
@@ -2164,13 +2475,27 @@
 fail_srcu:
 	cleanup_srcu_struct(&fs_info->subvol_srcu);
 fail:
-	kfree(extent_root);
-	kfree(tree_root);
-	kfree(fs_info);
-	kfree(chunk_root);
-	kfree(dev_root);
-	kfree(csum_root);
+	free_fs_info(fs_info);
 	return ERR_PTR(err);
+
+recovery_tree_root:
+
+	if (!btrfs_test_opt(tree_root, RECOVERY))
+		goto fail_tree_roots;
+
+	free_root_pointers(fs_info, 0);
+
+	/* don't use the log in recovery mode, it won't be valid */
+	btrfs_set_super_log_root(disk_super, 0);
+
+	/* we can't trust the free space cache either */
+	btrfs_set_opt(fs_info->mount_opt, CLEAR_CACHE);
+
+	ret = next_root_backup(fs_info, fs_info->super_copy,
+			       &num_backups_tried, &backup_index);
+	if (ret == -1)
+		goto fail_block_groups;
+	goto retry_root_backup;
 }
 
 static void btrfs_end_buffer_write_sync(struct buffer_head *bh, int uptodate)
@@ -2338,10 +2663,11 @@
 	int total_errors = 0;
 	u64 flags;
 
-	max_errors = btrfs_super_num_devices(&root->fs_info->super_copy) - 1;
+	max_errors = btrfs_super_num_devices(root->fs_info->super_copy) - 1;
 	do_barriers = !btrfs_test_opt(root, NOBARRIER);
+	backup_super_roots(root->fs_info);
 
-	sb = &root->fs_info->super_for_commit;
+	sb = root->fs_info->super_for_commit;
 	dev_item = &sb->dev_item;
 
 	mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
@@ -2545,8 +2871,6 @@
 	/* clear out the rbtree of defraggable inodes */
 	btrfs_run_defrag_inodes(root->fs_info);
 
-	btrfs_put_block_group_cache(fs_info);
-
 	/*
 	 * Here come 2 situations when btrfs is broken to flip readonly:
 	 *
@@ -2572,6 +2896,8 @@
 			printk(KERN_ERR "btrfs: commit super ret %d\n", ret);
 	}
 
+	btrfs_put_block_group_cache(fs_info);
+
 	kthread_stop(root->fs_info->transaction_kthread);
 	kthread_stop(root->fs_info->cleaner_kthread);
 
@@ -2603,7 +2929,6 @@
 	del_fs_roots(fs_info);
 
 	iput(fs_info->btree_inode);
-	kfree(fs_info->delayed_root);
 
 	btrfs_stop_workers(&fs_info->generic_worker);
 	btrfs_stop_workers(&fs_info->fixup_workers);
@@ -2617,6 +2942,7 @@
 	btrfs_stop_workers(&fs_info->submit_workers);
 	btrfs_stop_workers(&fs_info->delayed_workers);
 	btrfs_stop_workers(&fs_info->caching_workers);
+	btrfs_stop_workers(&fs_info->readahead_workers);
 
 	btrfs_close_devices(fs_info->fs_devices);
 	btrfs_mapping_tree_free(&fs_info->mapping_tree);
@@ -2624,12 +2950,7 @@
 	bdi_destroy(&fs_info->bdi);
 	cleanup_srcu_struct(&fs_info->subvol_srcu);
 
-	kfree(fs_info->extent_root);
-	kfree(fs_info->tree_root);
-	kfree(fs_info->chunk_root);
-	kfree(fs_info->dev_root);
-	kfree(fs_info->csum_root);
-	kfree(fs_info);
+	free_fs_info(fs_info);
 
 	return 0;
 }
@@ -2735,7 +3056,8 @@
 	return ret;
 }
 
-int btree_lock_page_hook(struct page *page)
+static int btree_lock_page_hook(struct page *page, void *data,
+				void (*flush_fn)(void *))
 {
 	struct inode *inode = page->mapping->host;
 	struct btrfs_root *root = BTRFS_I(inode)->root;
@@ -2752,7 +3074,10 @@
 	if (!eb)
 		goto out;
 
-	btrfs_tree_lock(eb);
+	if (!btrfs_try_tree_write_lock(eb)) {
+		flush_fn(data);
+		btrfs_tree_lock(eb);
+	}
 	btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
 
 	if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
@@ -2767,7 +3092,10 @@
 	btrfs_tree_unlock(eb);
 	free_extent_buffer(eb);
 out:
-	lock_page(page);
+	if (!trylock_page(page)) {
+		flush_fn(data);
+		lock_page(page);
+	}
 	return 0;
 }
 
@@ -3123,6 +3451,7 @@
 static struct extent_io_ops btree_extent_io_ops = {
 	.write_cache_pages_lock_hook = btree_lock_page_hook,
 	.readpage_end_io_hook = btree_readpage_end_io_hook,
+	.readpage_io_failed_hook = btree_io_failed_hook,
 	.submit_bio_hook = btree_submit_bio_hook,
 	/* note we're sharing with inode.c for the merge bio hook */
 	.merge_bio_hook = btrfs_merge_bio_hook,
diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h
index bec3ea4..c99d0a8f 100644
--- a/fs/btrfs/disk-io.h
+++ b/fs/btrfs/disk-io.h
@@ -40,6 +40,8 @@
 				      u32 blocksize, u64 parent_transid);
 int readahead_tree_block(struct btrfs_root *root, u64 bytenr, u32 blocksize,
 			 u64 parent_transid);
+int reada_tree_block_flagged(struct btrfs_root *root, u64 bytenr, u32 blocksize,
+			 int mirror_num, struct extent_buffer **eb);
 struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root,
 						   u64 bytenr, u32 blocksize);
 int clean_tree_block(struct btrfs_trans_handle *trans,
@@ -83,8 +85,6 @@
 			     struct btrfs_fs_info *fs_info);
 int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
 		       struct btrfs_root *root);
-int btree_lock_page_hook(struct page *page);
-
 
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
 void btrfs_init_lockdep(void);
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index c9ee0e1..9879bd4 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -23,6 +23,7 @@
 #include <linux/rcupdate.h>
 #include <linux/kthread.h>
 #include <linux/slab.h>
+#include <linux/ratelimit.h>
 #include "compat.h"
 #include "hash.h"
 #include "ctree.h"
@@ -52,6 +53,21 @@
 	CHUNK_ALLOC_LIMITED = 2,
 };
 
+/*
+ * Control how reservations are dealt with.
+ *
+ * RESERVE_FREE - freeing a reservation.
+ * RESERVE_ALLOC - allocating space and we need to update bytes_may_use for
+ *   ENOSPC accounting
+ * RESERVE_ALLOC_NO_ACCOUNT - allocating space and we should not update
+ *   bytes_may_use as the ENOSPC accounting is done elsewhere
+ */
+enum {
+	RESERVE_FREE = 0,
+	RESERVE_ALLOC = 1,
+	RESERVE_ALLOC_NO_ACCOUNT = 2,
+};
+
 static int update_block_group(struct btrfs_trans_handle *trans,
 			      struct btrfs_root *root,
 			      u64 bytenr, u64 num_bytes, int alloc);
@@ -81,6 +97,8 @@
 			 struct btrfs_key *key);
 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
 			    int dump_block_groups);
+static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
+				       u64 num_bytes, int reserve);
 
 static noinline int
 block_group_cache_done(struct btrfs_block_group_cache *cache)
@@ -104,7 +122,6 @@
 	if (atomic_dec_and_test(&cache->count)) {
 		WARN_ON(cache->pinned > 0);
 		WARN_ON(cache->reserved > 0);
-		WARN_ON(cache->reserved_pinned > 0);
 		kfree(cache->free_space_ctl);
 		kfree(cache);
 	}
@@ -465,7 +482,8 @@
 	 * we likely hold important locks.
 	 */
 	if (trans && (!trans->transaction->in_commit) &&
-	    (root && root != root->fs_info->tree_root)) {
+	    (root && root != root->fs_info->tree_root) &&
+	    btrfs_test_opt(root, SPACE_CACHE)) {
 		spin_lock(&cache->lock);
 		if (cache->cached != BTRFS_CACHE_NO) {
 			spin_unlock(&cache->lock);
@@ -1770,18 +1788,18 @@
 {
 	int ret;
 	u64 discarded_bytes = 0;
-	struct btrfs_multi_bio *multi = NULL;
+	struct btrfs_bio *bbio = NULL;
 
 
 	/* Tell the block device(s) that the sectors can be discarded */
 	ret = btrfs_map_block(&root->fs_info->mapping_tree, REQ_DISCARD,
-			      bytenr, &num_bytes, &multi, 0);
+			      bytenr, &num_bytes, &bbio, 0);
 	if (!ret) {
-		struct btrfs_bio_stripe *stripe = multi->stripes;
+		struct btrfs_bio_stripe *stripe = bbio->stripes;
 		int i;
 
 
-		for (i = 0; i < multi->num_stripes; i++, stripe++) {
+		for (i = 0; i < bbio->num_stripes; i++, stripe++) {
 			if (!stripe->dev->can_discard)
 				continue;
 
@@ -1800,7 +1818,7 @@
 			 */
 			ret = 0;
 		}
-		kfree(multi);
+		kfree(bbio);
 	}
 
 	if (actual_bytes)
@@ -2700,6 +2718,13 @@
 		goto again;
 	}
 
+	/* We've already setup this transaction, go ahead and exit */
+	if (block_group->cache_generation == trans->transid &&
+	    i_size_read(inode)) {
+		dcs = BTRFS_DC_SETUP;
+		goto out_put;
+	}
+
 	/*
 	 * We want to set the generation to 0, that way if anything goes wrong
 	 * from here on out we know not to trust this cache when we load up next
@@ -2749,12 +2774,15 @@
 	if (!ret)
 		dcs = BTRFS_DC_SETUP;
 	btrfs_free_reserved_data_space(inode, num_pages);
+
 out_put:
 	iput(inode);
 out_free:
 	btrfs_release_path(path);
 out:
 	spin_lock(&block_group->lock);
+	if (!ret)
+		block_group->cache_generation = trans->transid;
 	block_group->disk_cache_state = dcs;
 	spin_unlock(&block_group->lock);
 
@@ -3122,16 +3150,13 @@
 		return -ENOSPC;
 	}
 	data_sinfo->bytes_may_use += bytes;
-	BTRFS_I(inode)->reserved_bytes += bytes;
 	spin_unlock(&data_sinfo->lock);
 
 	return 0;
 }
 
 /*
- * called when we are clearing an delalloc extent from the
- * inode's io_tree or there was an error for whatever reason
- * after calling btrfs_check_data_free_space
+ * Called if we need to clear a data reservation for this inode.
  */
 void btrfs_free_reserved_data_space(struct inode *inode, u64 bytes)
 {
@@ -3144,7 +3169,6 @@
 	data_sinfo = BTRFS_I(inode)->space_info;
 	spin_lock(&data_sinfo->lock);
 	data_sinfo->bytes_may_use -= bytes;
-	BTRFS_I(inode)->reserved_bytes -= bytes;
 	spin_unlock(&data_sinfo->lock);
 }
 
@@ -3165,6 +3189,7 @@
 			      struct btrfs_space_info *sinfo, u64 alloc_bytes,
 			      int force)
 {
+	struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
 	u64 num_bytes = sinfo->total_bytes - sinfo->bytes_readonly;
 	u64 num_allocated = sinfo->bytes_used + sinfo->bytes_reserved;
 	u64 thresh;
@@ -3173,11 +3198,18 @@
 		return 1;
 
 	/*
+	 * We need to take into account the global rsv because for all intents
+	 * and purposes it's used space.  Don't worry about locking the
+	 * global_rsv, it doesn't change except when the transaction commits.
+	 */
+	num_allocated += global_rsv->size;
+
+	/*
 	 * in limited mode, we want to have some free space up to
 	 * about 1% of the FS size.
 	 */
 	if (force == CHUNK_ALLOC_LIMITED) {
-		thresh = btrfs_super_total_bytes(&root->fs_info->super_copy);
+		thresh = btrfs_super_total_bytes(root->fs_info->super_copy);
 		thresh = max_t(u64, 64 * 1024 * 1024,
 			       div_factor_fine(thresh, 1));
 
@@ -3199,7 +3231,7 @@
 	if (num_allocated + alloc_bytes < div_factor(num_bytes, 8))
 		return 0;
 
-	thresh = btrfs_super_total_bytes(&root->fs_info->super_copy);
+	thresh = btrfs_super_total_bytes(root->fs_info->super_copy);
 
 	/* 256MB or 5% of the FS */
 	thresh = max_t(u64, 256 * 1024 * 1024, div_factor_fine(thresh, 5));
@@ -3302,24 +3334,26 @@
 /*
  * shrink metadata reservation for delalloc
  */
-static int shrink_delalloc(struct btrfs_trans_handle *trans,
-			   struct btrfs_root *root, u64 to_reclaim, int sync)
+static int shrink_delalloc(struct btrfs_root *root, u64 to_reclaim,
+			   bool wait_ordered)
 {
 	struct btrfs_block_rsv *block_rsv;
 	struct btrfs_space_info *space_info;
+	struct btrfs_trans_handle *trans;
 	u64 reserved;
 	u64 max_reclaim;
 	u64 reclaimed = 0;
 	long time_left;
-	int nr_pages = (2 * 1024 * 1024) >> PAGE_CACHE_SHIFT;
+	unsigned long nr_pages = (2 * 1024 * 1024) >> PAGE_CACHE_SHIFT;
 	int loops = 0;
 	unsigned long progress;
 
+	trans = (struct btrfs_trans_handle *)current->journal_info;
 	block_rsv = &root->fs_info->delalloc_block_rsv;
 	space_info = block_rsv->space_info;
 
 	smp_mb();
-	reserved = space_info->bytes_reserved;
+	reserved = space_info->bytes_may_use;
 	progress = space_info->reservation_progress;
 
 	if (reserved == 0)
@@ -3334,7 +3368,8 @@
 	}
 
 	max_reclaim = min(reserved, to_reclaim);
-
+	nr_pages = max_t(unsigned long, nr_pages,
+			 max_reclaim >> PAGE_CACHE_SHIFT);
 	while (loops < 1024) {
 		/* have the flusher threads jump in and do some IO */
 		smp_mb();
@@ -3344,9 +3379,9 @@
 						WB_REASON_FS_FREE_SPACE);
 
 		spin_lock(&space_info->lock);
-		if (reserved > space_info->bytes_reserved)
-			reclaimed += reserved - space_info->bytes_reserved;
-		reserved = space_info->bytes_reserved;
+		if (reserved > space_info->bytes_may_use)
+			reclaimed += reserved - space_info->bytes_may_use;
+		reserved = space_info->bytes_may_use;
 		spin_unlock(&space_info->lock);
 
 		loops++;
@@ -3357,11 +3392,15 @@
 		if (trans && trans->transaction->blocked)
 			return -EAGAIN;
 
-		time_left = schedule_timeout_interruptible(1);
+		if (wait_ordered && !trans) {
+			btrfs_wait_ordered_extents(root, 0, 0);
+		} else {
+			time_left = schedule_timeout_interruptible(1);
 
-		/* We were interrupted, exit */
-		if (time_left)
-			break;
+			/* We were interrupted, exit */
+			if (time_left)
+				break;
+		}
 
 		/* we've kicked the IO a few times, if anything has been freed,
 		 * exit.  There is no sense in looping here for a long time
@@ -3376,34 +3415,90 @@
 		}
 
 	}
-	if (reclaimed >= to_reclaim && !trans)
-		btrfs_wait_ordered_extents(root, 0, 0);
+
 	return reclaimed >= to_reclaim;
 }
 
-/*
- * Retries tells us how many times we've called reserve_metadata_bytes.  The
- * idea is if this is the first call (retries == 0) then we will add to our
- * reserved count if we can't make the allocation in order to hold our place
- * while we go and try and free up space.  That way for retries > 1 we don't try
- * and add space, we just check to see if the amount of unused space is >= the
- * total space, meaning that our reservation is valid.
+/**
+ * maybe_commit_transaction - possibly commit the transaction if its ok to
+ * @root - the root we're allocating for
+ * @bytes - the number of bytes we want to reserve
+ * @force - force the commit
  *
- * However if we don't intend to retry this reservation, pass -1 as retries so
- * that it short circuits this logic.
+ * This will check to make sure that committing the transaction will actually
+ * get us somewhere and then commit the transaction if it does.  Otherwise it
+ * will return -ENOSPC.
  */
-static int reserve_metadata_bytes(struct btrfs_trans_handle *trans,
-				  struct btrfs_root *root,
+static int may_commit_transaction(struct btrfs_root *root,
+				  struct btrfs_space_info *space_info,
+				  u64 bytes, int force)
+{
+	struct btrfs_block_rsv *delayed_rsv = &root->fs_info->delayed_block_rsv;
+	struct btrfs_trans_handle *trans;
+
+	trans = (struct btrfs_trans_handle *)current->journal_info;
+	if (trans)
+		return -EAGAIN;
+
+	if (force)
+		goto commit;
+
+	/* See if there is enough pinned space to make this reservation */
+	spin_lock(&space_info->lock);
+	if (space_info->bytes_pinned >= bytes) {
+		spin_unlock(&space_info->lock);
+		goto commit;
+	}
+	spin_unlock(&space_info->lock);
+
+	/*
+	 * See if there is some space in the delayed insertion reservation for
+	 * this reservation.
+	 */
+	if (space_info != delayed_rsv->space_info)
+		return -ENOSPC;
+
+	spin_lock(&delayed_rsv->lock);
+	if (delayed_rsv->size < bytes) {
+		spin_unlock(&delayed_rsv->lock);
+		return -ENOSPC;
+	}
+	spin_unlock(&delayed_rsv->lock);
+
+commit:
+	trans = btrfs_join_transaction(root);
+	if (IS_ERR(trans))
+		return -ENOSPC;
+
+	return btrfs_commit_transaction(trans, root);
+}
+
+/**
+ * reserve_metadata_bytes - try to reserve bytes from the block_rsv's space
+ * @root - the root we're allocating for
+ * @block_rsv - the block_rsv we're allocating for
+ * @orig_bytes - the number of bytes we want
+ * @flush - wether or not we can flush to make our reservation
+ *
+ * This will reserve orgi_bytes number of bytes from the space info associated
+ * with the block_rsv.  If there is not enough space it will make an attempt to
+ * flush out space to make room.  It will do this by flushing delalloc if
+ * possible or committing the transaction.  If flush is 0 then no attempts to
+ * regain reservations will be made and this will fail if there is not enough
+ * space already.
+ */
+static int reserve_metadata_bytes(struct btrfs_root *root,
 				  struct btrfs_block_rsv *block_rsv,
 				  u64 orig_bytes, int flush)
 {
 	struct btrfs_space_info *space_info = block_rsv->space_info;
-	u64 unused;
+	u64 used;
 	u64 num_bytes = orig_bytes;
 	int retries = 0;
 	int ret = 0;
 	bool committed = false;
 	bool flushing = false;
+	bool wait_ordered = false;
 
 again:
 	ret = 0;
@@ -3420,7 +3515,7 @@
 		 * deadlock since we are waiting for the flusher to finish, but
 		 * hold the current transaction open.
 		 */
-		if (trans)
+		if (current->journal_info)
 			return -EAGAIN;
 		ret = wait_event_interruptible(space_info->wait,
 					       !space_info->flush);
@@ -3432,9 +3527,9 @@
 	}
 
 	ret = -ENOSPC;
-	unused = space_info->bytes_used + space_info->bytes_reserved +
-		 space_info->bytes_pinned + space_info->bytes_readonly +
-		 space_info->bytes_may_use;
+	used = space_info->bytes_used + space_info->bytes_reserved +
+		space_info->bytes_pinned + space_info->bytes_readonly +
+		space_info->bytes_may_use;
 
 	/*
 	 * The idea here is that we've not already over-reserved the block group
@@ -3443,10 +3538,9 @@
 	 * lets start flushing stuff first and then come back and try to make
 	 * our reservation.
 	 */
-	if (unused <= space_info->total_bytes) {
-		unused = space_info->total_bytes - unused;
-		if (unused >= num_bytes) {
-			space_info->bytes_reserved += orig_bytes;
+	if (used <= space_info->total_bytes) {
+		if (used + orig_bytes <= space_info->total_bytes) {
+			space_info->bytes_may_use += orig_bytes;
 			ret = 0;
 		} else {
 			/*
@@ -3462,10 +3556,64 @@
 		 * amount plus the amount of bytes that we need for this
 		 * reservation.
 		 */
-		num_bytes = unused - space_info->total_bytes +
+		wait_ordered = true;
+		num_bytes = used - space_info->total_bytes +
 			(orig_bytes * (retries + 1));
 	}
 
+	if (ret) {
+		u64 profile = btrfs_get_alloc_profile(root, 0);
+		u64 avail;
+
+		/*
+		 * If we have a lot of space that's pinned, don't bother doing
+		 * the overcommit dance yet and just commit the transaction.
+		 */
+		avail = (space_info->total_bytes - space_info->bytes_used) * 8;
+		do_div(avail, 10);
+		if (space_info->bytes_pinned >= avail && flush && !committed) {
+			space_info->flush = 1;
+			flushing = true;
+			spin_unlock(&space_info->lock);
+			ret = may_commit_transaction(root, space_info,
+						     orig_bytes, 1);
+			if (ret)
+				goto out;
+			committed = true;
+			goto again;
+		}
+
+		spin_lock(&root->fs_info->free_chunk_lock);
+		avail = root->fs_info->free_chunk_space;
+
+		/*
+		 * If we have dup, raid1 or raid10 then only half of the free
+		 * space is actually useable.
+		 */
+		if (profile & (BTRFS_BLOCK_GROUP_DUP |
+			       BTRFS_BLOCK_GROUP_RAID1 |
+			       BTRFS_BLOCK_GROUP_RAID10))
+			avail >>= 1;
+
+		/*
+		 * If we aren't flushing don't let us overcommit too much, say
+		 * 1/8th of the space.  If we can flush, let it overcommit up to
+		 * 1/2 of the space.
+		 */
+		if (flush)
+			avail >>= 3;
+		else
+			avail >>= 1;
+		 spin_unlock(&root->fs_info->free_chunk_lock);
+
+		if (used + num_bytes < space_info->total_bytes + avail) {
+			space_info->bytes_may_use += orig_bytes;
+			ret = 0;
+		} else {
+			wait_ordered = true;
+		}
+	}
+
 	/*
 	 * Couldn't make our reservation, save our place so while we're trying
 	 * to reclaim space we can actually use it instead of somebody else
@@ -3485,7 +3633,7 @@
 	 * We do synchronous shrinking since we don't actually unreserve
 	 * metadata until after the IO is completed.
 	 */
-	ret = shrink_delalloc(trans, root, num_bytes, 1);
+	ret = shrink_delalloc(root, num_bytes, wait_ordered);
 	if (ret < 0)
 		goto out;
 
@@ -3497,35 +3645,17 @@
 	 * so go back around and try again.
 	 */
 	if (retries < 2) {
+		wait_ordered = true;
 		retries++;
 		goto again;
 	}
 
-	/*
-	 * Not enough space to be reclaimed, don't bother committing the
-	 * transaction.
-	 */
-	spin_lock(&space_info->lock);
-	if (space_info->bytes_pinned < orig_bytes)
-		ret = -ENOSPC;
-	spin_unlock(&space_info->lock);
-	if (ret)
-		goto out;
-
-	ret = -EAGAIN;
-	if (trans)
-		goto out;
-
 	ret = -ENOSPC;
 	if (committed)
 		goto out;
 
-	trans = btrfs_join_transaction(root);
-	if (IS_ERR(trans))
-		goto out;
-	ret = btrfs_commit_transaction(trans, root);
+	ret = may_commit_transaction(root, space_info, orig_bytes, 0);
 	if (!ret) {
-		trans = NULL;
 		committed = true;
 		goto again;
 	}
@@ -3543,10 +3673,12 @@
 static struct btrfs_block_rsv *get_block_rsv(struct btrfs_trans_handle *trans,
 					     struct btrfs_root *root)
 {
-	struct btrfs_block_rsv *block_rsv;
-	if (root->ref_cows)
+	struct btrfs_block_rsv *block_rsv = NULL;
+
+	if (root->ref_cows || root == root->fs_info->csum_root)
 		block_rsv = trans->block_rsv;
-	else
+
+	if (!block_rsv)
 		block_rsv = root->block_rsv;
 
 	if (!block_rsv)
@@ -3617,7 +3749,7 @@
 		}
 		if (num_bytes) {
 			spin_lock(&space_info->lock);
-			space_info->bytes_reserved -= num_bytes;
+			space_info->bytes_may_use -= num_bytes;
 			space_info->reservation_progress++;
 			spin_unlock(&space_info->lock);
 		}
@@ -3641,9 +3773,6 @@
 {
 	memset(rsv, 0, sizeof(*rsv));
 	spin_lock_init(&rsv->lock);
-	atomic_set(&rsv->usage, 1);
-	rsv->priority = 6;
-	INIT_LIST_HEAD(&rsv->list);
 }
 
 struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root)
@@ -3664,29 +3793,11 @@
 void btrfs_free_block_rsv(struct btrfs_root *root,
 			  struct btrfs_block_rsv *rsv)
 {
-	if (rsv && atomic_dec_and_test(&rsv->usage)) {
-		btrfs_block_rsv_release(root, rsv, (u64)-1);
-		if (!rsv->durable)
-			kfree(rsv);
-	}
+	btrfs_block_rsv_release(root, rsv, (u64)-1);
+	kfree(rsv);
 }
 
-/*
- * make the block_rsv struct be able to capture freed space.
- * the captured space will re-add to the the block_rsv struct
- * after transaction commit
- */
-void btrfs_add_durable_block_rsv(struct btrfs_fs_info *fs_info,
-				 struct btrfs_block_rsv *block_rsv)
-{
-	block_rsv->durable = 1;
-	mutex_lock(&fs_info->durable_block_rsv_mutex);
-	list_add_tail(&block_rsv->list, &fs_info->durable_block_rsv_list);
-	mutex_unlock(&fs_info->durable_block_rsv_mutex);
-}
-
-int btrfs_block_rsv_add(struct btrfs_trans_handle *trans,
-			struct btrfs_root *root,
+int btrfs_block_rsv_add(struct btrfs_root *root,
 			struct btrfs_block_rsv *block_rsv,
 			u64 num_bytes)
 {
@@ -3695,7 +3806,7 @@
 	if (num_bytes == 0)
 		return 0;
 
-	ret = reserve_metadata_bytes(trans, root, block_rsv, num_bytes, 1);
+	ret = reserve_metadata_bytes(root, block_rsv, num_bytes, 1);
 	if (!ret) {
 		block_rsv_add_bytes(block_rsv, num_bytes, 1);
 		return 0;
@@ -3704,55 +3815,70 @@
 	return ret;
 }
 
-int btrfs_block_rsv_check(struct btrfs_trans_handle *trans,
-			  struct btrfs_root *root,
-			  struct btrfs_block_rsv *block_rsv,
-			  u64 min_reserved, int min_factor)
+int btrfs_block_rsv_add_noflush(struct btrfs_root *root,
+				struct btrfs_block_rsv *block_rsv,
+				u64 num_bytes)
+{
+	int ret;
+
+	if (num_bytes == 0)
+		return 0;
+
+	ret = reserve_metadata_bytes(root, block_rsv, num_bytes, 0);
+	if (!ret) {
+		block_rsv_add_bytes(block_rsv, num_bytes, 1);
+		return 0;
+	}
+
+	return ret;
+}
+
+int btrfs_block_rsv_check(struct btrfs_root *root,
+			  struct btrfs_block_rsv *block_rsv, int min_factor)
 {
 	u64 num_bytes = 0;
-	int commit_trans = 0;
 	int ret = -ENOSPC;
 
 	if (!block_rsv)
 		return 0;
 
 	spin_lock(&block_rsv->lock);
-	if (min_factor > 0)
-		num_bytes = div_factor(block_rsv->size, min_factor);
-	if (min_reserved > num_bytes)
-		num_bytes = min_reserved;
-
-	if (block_rsv->reserved >= num_bytes) {
+	num_bytes = div_factor(block_rsv->size, min_factor);
+	if (block_rsv->reserved >= num_bytes)
 		ret = 0;
-	} else {
-		num_bytes -= block_rsv->reserved;
-		if (block_rsv->durable &&
-		    block_rsv->freed[0] + block_rsv->freed[1] >= num_bytes)
-			commit_trans = 1;
-	}
 	spin_unlock(&block_rsv->lock);
+
+	return ret;
+}
+
+int btrfs_block_rsv_refill(struct btrfs_root *root,
+			  struct btrfs_block_rsv *block_rsv,
+			  u64 min_reserved)
+{
+	u64 num_bytes = 0;
+	int ret = -ENOSPC;
+
+	if (!block_rsv)
+		return 0;
+
+	spin_lock(&block_rsv->lock);
+	num_bytes = min_reserved;
+	if (block_rsv->reserved >= num_bytes)
+		ret = 0;
+	else
+		num_bytes -= block_rsv->reserved;
+	spin_unlock(&block_rsv->lock);
+
 	if (!ret)
 		return 0;
 
-	if (block_rsv->refill_used) {
-		ret = reserve_metadata_bytes(trans, root, block_rsv,
-					     num_bytes, 0);
-		if (!ret) {
-			block_rsv_add_bytes(block_rsv, num_bytes, 0);
-			return 0;
-		}
-	}
-
-	if (commit_trans) {
-		if (trans)
-			return -EAGAIN;
-		trans = btrfs_join_transaction(root);
-		BUG_ON(IS_ERR(trans));
-		ret = btrfs_commit_transaction(trans, root);
+	ret = reserve_metadata_bytes(root, block_rsv, num_bytes, 1);
+	if (!ret) {
+		block_rsv_add_bytes(block_rsv, num_bytes, 0);
 		return 0;
 	}
 
-	return -ENOSPC;
+	return ret;
 }
 
 int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv,
@@ -3784,7 +3910,7 @@
 	u64 num_bytes;
 	u64 meta_used;
 	u64 data_used;
-	int csum_size = btrfs_super_csum_size(&fs_info->super_copy);
+	int csum_size = btrfs_super_csum_size(fs_info->super_copy);
 
 	sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA);
 	spin_lock(&sinfo->lock);
@@ -3828,12 +3954,12 @@
 	if (sinfo->total_bytes > num_bytes) {
 		num_bytes = sinfo->total_bytes - num_bytes;
 		block_rsv->reserved += num_bytes;
-		sinfo->bytes_reserved += num_bytes;
+		sinfo->bytes_may_use += num_bytes;
 	}
 
 	if (block_rsv->reserved >= block_rsv->size) {
 		num_bytes = block_rsv->reserved - block_rsv->size;
-		sinfo->bytes_reserved -= num_bytes;
+		sinfo->bytes_may_use -= num_bytes;
 		sinfo->reservation_progress++;
 		block_rsv->reserved = block_rsv->size;
 		block_rsv->full = 1;
@@ -3849,16 +3975,13 @@
 
 	space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
 	fs_info->chunk_block_rsv.space_info = space_info;
-	fs_info->chunk_block_rsv.priority = 10;
 
 	space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
 	fs_info->global_block_rsv.space_info = space_info;
-	fs_info->global_block_rsv.priority = 10;
-	fs_info->global_block_rsv.refill_used = 1;
 	fs_info->delalloc_block_rsv.space_info = space_info;
 	fs_info->trans_block_rsv.space_info = space_info;
 	fs_info->empty_block_rsv.space_info = space_info;
-	fs_info->empty_block_rsv.priority = 10;
+	fs_info->delayed_block_rsv.space_info = space_info;
 
 	fs_info->extent_root->block_rsv = &fs_info->global_block_rsv;
 	fs_info->csum_root->block_rsv = &fs_info->global_block_rsv;
@@ -3866,10 +3989,6 @@
 	fs_info->tree_root->block_rsv = &fs_info->global_block_rsv;
 	fs_info->chunk_root->block_rsv = &fs_info->chunk_block_rsv;
 
-	btrfs_add_durable_block_rsv(fs_info, &fs_info->global_block_rsv);
-
-	btrfs_add_durable_block_rsv(fs_info, &fs_info->delalloc_block_rsv);
-
 	update_global_block_rsv(fs_info);
 }
 
@@ -3882,37 +4001,8 @@
 	WARN_ON(fs_info->trans_block_rsv.reserved > 0);
 	WARN_ON(fs_info->chunk_block_rsv.size > 0);
 	WARN_ON(fs_info->chunk_block_rsv.reserved > 0);
-}
-
-int btrfs_truncate_reserve_metadata(struct btrfs_trans_handle *trans,
-				    struct btrfs_root *root,
-				    struct btrfs_block_rsv *rsv)
-{
-	struct btrfs_block_rsv *trans_rsv = &root->fs_info->trans_block_rsv;
-	u64 num_bytes;
-	int ret;
-
-	/*
-	 * Truncate should be freeing data, but give us 2 items just in case it
-	 * needs to use some space.  We may want to be smarter about this in the
-	 * future.
-	 */
-	num_bytes = btrfs_calc_trans_metadata_size(root, 2);
-
-	/* We already have enough bytes, just return */
-	if (rsv->reserved >= num_bytes)
-		return 0;
-
-	num_bytes -= rsv->reserved;
-
-	/*
-	 * You should have reserved enough space before hand to do this, so this
-	 * should not fail.
-	 */
-	ret = block_rsv_migrate_bytes(trans_rsv, rsv, num_bytes);
-	BUG_ON(ret);
-
-	return 0;
+	WARN_ON(fs_info->delayed_block_rsv.size > 0);
+	WARN_ON(fs_info->delayed_block_rsv.reserved > 0);
 }
 
 void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
@@ -3921,9 +4011,7 @@
 	if (!trans->bytes_reserved)
 		return;
 
-	BUG_ON(trans->block_rsv != &root->fs_info->trans_block_rsv);
-	btrfs_block_rsv_release(root, trans->block_rsv,
-				trans->bytes_reserved);
+	btrfs_block_rsv_release(root, trans->block_rsv, trans->bytes_reserved);
 	trans->bytes_reserved = 0;
 }
 
@@ -3965,11 +4053,19 @@
 	return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
 }
 
+/**
+ * drop_outstanding_extent - drop an outstanding extent
+ * @inode: the inode we're dropping the extent for
+ *
+ * This is called when we are freeing up an outstanding extent, either called
+ * after an error or after an extent is written.  This will return the number of
+ * reserved extents that need to be freed.  This must be called with
+ * BTRFS_I(inode)->lock held.
+ */
 static unsigned drop_outstanding_extent(struct inode *inode)
 {
 	unsigned dropped_extents = 0;
 
-	spin_lock(&BTRFS_I(inode)->lock);
 	BUG_ON(!BTRFS_I(inode)->outstanding_extents);
 	BTRFS_I(inode)->outstanding_extents--;
 
@@ -3979,19 +4075,70 @@
 	 */
 	if (BTRFS_I(inode)->outstanding_extents >=
 	    BTRFS_I(inode)->reserved_extents)
-		goto out;
+		return 0;
 
 	dropped_extents = BTRFS_I(inode)->reserved_extents -
 		BTRFS_I(inode)->outstanding_extents;
 	BTRFS_I(inode)->reserved_extents -= dropped_extents;
-out:
-	spin_unlock(&BTRFS_I(inode)->lock);
 	return dropped_extents;
 }
 
-static u64 calc_csum_metadata_size(struct inode *inode, u64 num_bytes)
+/**
+ * calc_csum_metadata_size - return the amount of metada space that must be
+ *	reserved/free'd for the given bytes.
+ * @inode: the inode we're manipulating
+ * @num_bytes: the number of bytes in question
+ * @reserve: 1 if we are reserving space, 0 if we are freeing space
+ *
+ * This adjusts the number of csum_bytes in the inode and then returns the
+ * correct amount of metadata that must either be reserved or freed.  We
+ * calculate how many checksums we can fit into one leaf and then divide the
+ * number of bytes that will need to be checksumed by this value to figure out
+ * how many checksums will be required.  If we are adding bytes then the number
+ * may go up and we will return the number of additional bytes that must be
+ * reserved.  If it is going down we will return the number of bytes that must
+ * be freed.
+ *
+ * This must be called with BTRFS_I(inode)->lock held.
+ */
+static u64 calc_csum_metadata_size(struct inode *inode, u64 num_bytes,
+				   int reserve)
 {
-	return num_bytes >>= 3;
+	struct btrfs_root *root = BTRFS_I(inode)->root;
+	u64 csum_size;
+	int num_csums_per_leaf;
+	int num_csums;
+	int old_csums;
+
+	if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM &&
+	    BTRFS_I(inode)->csum_bytes == 0)
+		return 0;
+
+	old_csums = (int)div64_u64(BTRFS_I(inode)->csum_bytes, root->sectorsize);
+	if (reserve)
+		BTRFS_I(inode)->csum_bytes += num_bytes;
+	else
+		BTRFS_I(inode)->csum_bytes -= num_bytes;
+	csum_size = BTRFS_LEAF_DATA_SIZE(root) - sizeof(struct btrfs_item);
+	num_csums_per_leaf = (int)div64_u64(csum_size,
+					    sizeof(struct btrfs_csum_item) +
+					    sizeof(struct btrfs_disk_key));
+	num_csums = (int)div64_u64(BTRFS_I(inode)->csum_bytes, root->sectorsize);
+	num_csums = num_csums + num_csums_per_leaf - 1;
+	num_csums = num_csums / num_csums_per_leaf;
+
+	old_csums = old_csums + num_csums_per_leaf - 1;
+	old_csums = old_csums / num_csums_per_leaf;
+
+	/* No change, no need to reserve more */
+	if (old_csums == num_csums)
+		return 0;
+
+	if (reserve)
+		return btrfs_calc_trans_metadata_size(root,
+						      num_csums - old_csums);
+
+	return btrfs_calc_trans_metadata_size(root, old_csums - num_csums);
 }
 
 int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
@@ -4000,9 +4147,13 @@
 	struct btrfs_block_rsv *block_rsv = &root->fs_info->delalloc_block_rsv;
 	u64 to_reserve = 0;
 	unsigned nr_extents = 0;
+	int flush = 1;
 	int ret;
 
-	if (btrfs_transaction_in_commit(root->fs_info))
+	if (btrfs_is_free_space_inode(root, inode))
+		flush = 0;
+
+	if (flush && btrfs_transaction_in_commit(root->fs_info))
 		schedule_timeout(1);
 
 	num_bytes = ALIGN(num_bytes, root->sectorsize);
@@ -4018,18 +4169,29 @@
 
 		to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents);
 	}
+	to_reserve += calc_csum_metadata_size(inode, num_bytes, 1);
 	spin_unlock(&BTRFS_I(inode)->lock);
 
-	to_reserve += calc_csum_metadata_size(inode, num_bytes);
-	ret = reserve_metadata_bytes(NULL, root, block_rsv, to_reserve, 1);
+	ret = reserve_metadata_bytes(root, block_rsv, to_reserve, flush);
 	if (ret) {
+		u64 to_free = 0;
 		unsigned dropped;
-		/*
-		 * We don't need the return value since our reservation failed,
-		 * we just need to clean up our counter.
-		 */
+
+		spin_lock(&BTRFS_I(inode)->lock);
 		dropped = drop_outstanding_extent(inode);
-		WARN_ON(dropped > 1);
+		to_free = calc_csum_metadata_size(inode, num_bytes, 0);
+		spin_unlock(&BTRFS_I(inode)->lock);
+		to_free += btrfs_calc_trans_metadata_size(root, dropped);
+
+		/*
+		 * Somebody could have come in and twiddled with the
+		 * reservation, so if we have to free more than we would have
+		 * reserved from this reservation go ahead and release those
+		 * bytes.
+		 */
+		to_free -= to_reserve;
+		if (to_free)
+			btrfs_block_rsv_release(root, block_rsv, to_free);
 		return ret;
 	}
 
@@ -4038,6 +4200,15 @@
 	return 0;
 }
 
+/**
+ * btrfs_delalloc_release_metadata - release a metadata reservation for an inode
+ * @inode: the inode to release the reservation for
+ * @num_bytes: the number of bytes we're releasing
+ *
+ * This will release the metadata reservation for an inode.  This can be called
+ * once we complete IO for a given set of bytes to release their metadata
+ * reservations.
+ */
 void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
 {
 	struct btrfs_root *root = BTRFS_I(inode)->root;
@@ -4045,9 +4216,11 @@
 	unsigned dropped;
 
 	num_bytes = ALIGN(num_bytes, root->sectorsize);
+	spin_lock(&BTRFS_I(inode)->lock);
 	dropped = drop_outstanding_extent(inode);
 
-	to_free = calc_csum_metadata_size(inode, num_bytes);
+	to_free = calc_csum_metadata_size(inode, num_bytes, 0);
+	spin_unlock(&BTRFS_I(inode)->lock);
 	if (dropped > 0)
 		to_free += btrfs_calc_trans_metadata_size(root, dropped);
 
@@ -4055,6 +4228,21 @@
 				to_free);
 }
 
+/**
+ * btrfs_delalloc_reserve_space - reserve data and metadata space for delalloc
+ * @inode: inode we're writing to
+ * @num_bytes: the number of bytes we want to allocate
+ *
+ * This will do the following things
+ *
+ * o reserve space in the data space info for num_bytes
+ * o reserve space in the metadata space info based on number of outstanding
+ *   extents and how much csums will be needed
+ * o add to the inodes ->delalloc_bytes
+ * o add it to the fs_info's delalloc inodes list.
+ *
+ * This will return 0 for success and -ENOSPC if there is no space left.
+ */
 int btrfs_delalloc_reserve_space(struct inode *inode, u64 num_bytes)
 {
 	int ret;
@@ -4072,6 +4260,19 @@
 	return 0;
 }
 
+/**
+ * btrfs_delalloc_release_space - release data and metadata space for delalloc
+ * @inode: inode we're releasing space for
+ * @num_bytes: the number of bytes we want to free up
+ *
+ * This must be matched with a call to btrfs_delalloc_reserve_space.  This is
+ * called in the case that we don't need the metadata AND data reservations
+ * anymore.  So if there is an error or we insert an inline extent.
+ *
+ * This function will release the metadata space that was not used and will
+ * decrement ->delalloc_bytes and remove it from the fs_info delalloc_inodes
+ * list if there are no delalloc bytes left.
+ */
 void btrfs_delalloc_release_space(struct inode *inode, u64 num_bytes)
 {
 	btrfs_delalloc_release_metadata(inode, num_bytes);
@@ -4091,12 +4292,12 @@
 
 	/* block accounting for super block */
 	spin_lock(&info->delalloc_lock);
-	old_val = btrfs_super_bytes_used(&info->super_copy);
+	old_val = btrfs_super_bytes_used(info->super_copy);
 	if (alloc)
 		old_val += num_bytes;
 	else
 		old_val -= num_bytes;
-	btrfs_set_super_bytes_used(&info->super_copy, old_val);
+	btrfs_set_super_bytes_used(info->super_copy, old_val);
 	spin_unlock(&info->delalloc_lock);
 
 	while (total) {
@@ -4124,7 +4325,7 @@
 		spin_lock(&cache->space_info->lock);
 		spin_lock(&cache->lock);
 
-		if (btrfs_super_cache_generation(&info->super_copy) != 0 &&
+		if (btrfs_test_opt(root, SPACE_CACHE) &&
 		    cache->disk_cache_state < BTRFS_DC_CLEAR)
 			cache->disk_cache_state = BTRFS_DC_CLEAR;
 
@@ -4136,7 +4337,6 @@
 			btrfs_set_block_group_used(&cache->item, old_val);
 			cache->reserved -= num_bytes;
 			cache->space_info->bytes_reserved -= num_bytes;
-			cache->space_info->reservation_progress++;
 			cache->space_info->bytes_used += num_bytes;
 			cache->space_info->disk_used += num_bytes * factor;
 			spin_unlock(&cache->lock);
@@ -4188,7 +4388,6 @@
 	if (reserved) {
 		cache->reserved -= num_bytes;
 		cache->space_info->bytes_reserved -= num_bytes;
-		cache->space_info->reservation_progress++;
 	}
 	spin_unlock(&cache->lock);
 	spin_unlock(&cache->space_info->lock);
@@ -4216,45 +4415,82 @@
 }
 
 /*
- * update size of reserved extents. this function may return -EAGAIN
- * if 'reserve' is true or 'sinfo' is false.
+ * this function must be called within transaction
  */
-int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
-				u64 num_bytes, int reserve, int sinfo)
+int btrfs_pin_extent_for_log_replay(struct btrfs_trans_handle *trans,
+				    struct btrfs_root *root,
+				    u64 bytenr, u64 num_bytes)
 {
+	struct btrfs_block_group_cache *cache;
+
+	cache = btrfs_lookup_block_group(root->fs_info, bytenr);
+	BUG_ON(!cache);
+
+	/*
+	 * pull in the free space cache (if any) so that our pin
+	 * removes the free space from the cache.  We have load_only set
+	 * to one because the slow code to read in the free extents does check
+	 * the pinned extents.
+	 */
+	cache_block_group(cache, trans, root, 1);
+
+	pin_down_extent(root, cache, bytenr, num_bytes, 0);
+
+	/* remove us from the free space cache (if we're there at all) */
+	btrfs_remove_free_space(cache, bytenr, num_bytes);
+	btrfs_put_block_group(cache);
+	return 0;
+}
+
+/**
+ * btrfs_update_reserved_bytes - update the block_group and space info counters
+ * @cache:	The cache we are manipulating
+ * @num_bytes:	The number of bytes in question
+ * @reserve:	One of the reservation enums
+ *
+ * This is called by the allocator when it reserves space, or by somebody who is
+ * freeing space that was never actually used on disk.  For example if you
+ * reserve some space for a new leaf in transaction A and before transaction A
+ * commits you free that leaf, you call this with reserve set to 0 in order to
+ * clear the reservation.
+ *
+ * Metadata reservations should be called with RESERVE_ALLOC so we do the proper
+ * ENOSPC accounting.  For data we handle the reservation through clearing the
+ * delalloc bits in the io_tree.  We have to do this since we could end up
+ * allocating less disk space for the amount of data we have reserved in the
+ * case of compression.
+ *
+ * If this is a reservation and the block group has become read only we cannot
+ * make the reservation and return -EAGAIN, otherwise this function always
+ * succeeds.
+ */
+static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
+				       u64 num_bytes, int reserve)
+{
+	struct btrfs_space_info *space_info = cache->space_info;
 	int ret = 0;
-	if (sinfo) {
-		struct btrfs_space_info *space_info = cache->space_info;
-		spin_lock(&space_info->lock);
-		spin_lock(&cache->lock);
-		if (reserve) {
-			if (cache->ro) {
-				ret = -EAGAIN;
-			} else {
-				cache->reserved += num_bytes;
-				space_info->bytes_reserved += num_bytes;
-			}
-		} else {
-			if (cache->ro)
-				space_info->bytes_readonly += num_bytes;
-			cache->reserved -= num_bytes;
-			space_info->bytes_reserved -= num_bytes;
-			space_info->reservation_progress++;
-		}
-		spin_unlock(&cache->lock);
-		spin_unlock(&space_info->lock);
-	} else {
-		spin_lock(&cache->lock);
+	spin_lock(&space_info->lock);
+	spin_lock(&cache->lock);
+	if (reserve != RESERVE_FREE) {
 		if (cache->ro) {
 			ret = -EAGAIN;
 		} else {
-			if (reserve)
-				cache->reserved += num_bytes;
-			else
-				cache->reserved -= num_bytes;
+			cache->reserved += num_bytes;
+			space_info->bytes_reserved += num_bytes;
+			if (reserve == RESERVE_ALLOC) {
+				BUG_ON(space_info->bytes_may_use < num_bytes);
+				space_info->bytes_may_use -= num_bytes;
+			}
 		}
-		spin_unlock(&cache->lock);
+	} else {
+		if (cache->ro)
+			space_info->bytes_readonly += num_bytes;
+		cache->reserved -= num_bytes;
+		space_info->bytes_reserved -= num_bytes;
+		space_info->reservation_progress++;
 	}
+	spin_unlock(&cache->lock);
+	spin_unlock(&space_info->lock);
 	return ret;
 }
 
@@ -4320,13 +4556,8 @@
 		spin_lock(&cache->lock);
 		cache->pinned -= len;
 		cache->space_info->bytes_pinned -= len;
-		if (cache->ro) {
+		if (cache->ro)
 			cache->space_info->bytes_readonly += len;
-		} else if (cache->reserved_pinned > 0) {
-			len = min(len, cache->reserved_pinned);
-			cache->reserved_pinned -= len;
-			cache->space_info->bytes_reserved += len;
-		}
 		spin_unlock(&cache->lock);
 		spin_unlock(&cache->space_info->lock);
 	}
@@ -4341,11 +4572,8 @@
 {
 	struct btrfs_fs_info *fs_info = root->fs_info;
 	struct extent_io_tree *unpin;
-	struct btrfs_block_rsv *block_rsv;
-	struct btrfs_block_rsv *next_rsv;
 	u64 start;
 	u64 end;
-	int idx;
 	int ret;
 
 	if (fs_info->pinned_extents == &fs_info->freed_extents[0])
@@ -4368,30 +4596,6 @@
 		cond_resched();
 	}
 
-	mutex_lock(&fs_info->durable_block_rsv_mutex);
-	list_for_each_entry_safe(block_rsv, next_rsv,
-				 &fs_info->durable_block_rsv_list, list) {
-
-		idx = trans->transid & 0x1;
-		if (block_rsv->freed[idx] > 0) {
-			block_rsv_add_bytes(block_rsv,
-					    block_rsv->freed[idx], 0);
-			block_rsv->freed[idx] = 0;
-		}
-		if (atomic_read(&block_rsv->usage) == 0) {
-			btrfs_block_rsv_release(root, block_rsv, (u64)-1);
-
-			if (block_rsv->freed[0] == 0 &&
-			    block_rsv->freed[1] == 0) {
-				list_del_init(&block_rsv->list);
-				kfree(block_rsv);
-			}
-		} else {
-			btrfs_block_rsv_release(root, block_rsv, 0);
-		}
-	}
-	mutex_unlock(&fs_info->durable_block_rsv_mutex);
-
 	return 0;
 }
 
@@ -4669,7 +4873,6 @@
 			   struct extent_buffer *buf,
 			   u64 parent, int last_ref)
 {
-	struct btrfs_block_rsv *block_rsv;
 	struct btrfs_block_group_cache *cache = NULL;
 	int ret;
 
@@ -4684,64 +4887,24 @@
 	if (!last_ref)
 		return;
 
-	block_rsv = get_block_rsv(trans, root);
 	cache = btrfs_lookup_block_group(root->fs_info, buf->start);
-	if (block_rsv->space_info != cache->space_info)
-		goto out;
 
 	if (btrfs_header_generation(buf) == trans->transid) {
 		if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
 			ret = check_ref_cleanup(trans, root, buf->start);
 			if (!ret)
-				goto pin;
+				goto out;
 		}
 
 		if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
 			pin_down_extent(root, cache, buf->start, buf->len, 1);
-			goto pin;
+			goto out;
 		}
 
 		WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags));
 
 		btrfs_add_free_space(cache, buf->start, buf->len);
-		ret = btrfs_update_reserved_bytes(cache, buf->len, 0, 0);
-		if (ret == -EAGAIN) {
-			/* block group became read-only */
-			btrfs_update_reserved_bytes(cache, buf->len, 0, 1);
-			goto out;
-		}
-
-		ret = 1;
-		spin_lock(&block_rsv->lock);
-		if (block_rsv->reserved < block_rsv->size) {
-			block_rsv->reserved += buf->len;
-			ret = 0;
-		}
-		spin_unlock(&block_rsv->lock);
-
-		if (ret) {
-			spin_lock(&cache->space_info->lock);
-			cache->space_info->bytes_reserved -= buf->len;
-			cache->space_info->reservation_progress++;
-			spin_unlock(&cache->space_info->lock);
-		}
-		goto out;
-	}
-pin:
-	if (block_rsv->durable && !cache->ro) {
-		ret = 0;
-		spin_lock(&cache->lock);
-		if (!cache->ro) {
-			cache->reserved_pinned += buf->len;
-			ret = 1;
-		}
-		spin_unlock(&cache->lock);
-
-		if (ret) {
-			spin_lock(&block_rsv->lock);
-			block_rsv->freed[trans->transid & 0x1] += buf->len;
-			spin_unlock(&block_rsv->lock);
-		}
+		btrfs_update_reserved_bytes(cache, buf->len, RESERVE_FREE);
 	}
 out:
 	/*
@@ -4884,10 +5047,13 @@
 	int last_ptr_loop = 0;
 	int loop = 0;
 	int index = 0;
+	int alloc_type = (data & BTRFS_BLOCK_GROUP_DATA) ?
+		RESERVE_ALLOC_NO_ACCOUNT : RESERVE_ALLOC;
 	bool found_uncached_bg = false;
 	bool failed_cluster_refill = false;
 	bool failed_alloc = false;
 	bool use_cluster = true;
+	bool have_caching_bg = false;
 	u64 ideal_cache_percent = 0;
 	u64 ideal_cache_offset = 0;
 
@@ -4970,6 +5136,7 @@
 		}
 	}
 search:
+	have_caching_bg = false;
 	down_read(&space_info->groups_sem);
 	list_for_each_entry(block_group, &space_info->block_groups[index],
 			    list) {
@@ -5178,6 +5345,8 @@
 			failed_alloc = true;
 			goto have_block_group;
 		} else if (!offset) {
+			if (!cached)
+				have_caching_bg = true;
 			goto loop;
 		}
 checks:
@@ -5203,8 +5372,8 @@
 					     search_start - offset);
 		BUG_ON(offset > search_start);
 
-		ret = btrfs_update_reserved_bytes(block_group, num_bytes, 1,
-					    (data & BTRFS_BLOCK_GROUP_DATA));
+		ret = btrfs_update_reserved_bytes(block_group, num_bytes,
+						  alloc_type);
 		if (ret == -EAGAIN) {
 			btrfs_add_free_space(block_group, offset, num_bytes);
 			goto loop;
@@ -5228,6 +5397,9 @@
 	}
 	up_read(&space_info->groups_sem);
 
+	if (!ins->objectid && loop >= LOOP_CACHING_WAIT && have_caching_bg)
+		goto search;
+
 	if (!ins->objectid && ++index < BTRFS_NR_RAID_TYPES)
 		goto search;
 
@@ -5326,7 +5498,8 @@
 	int index = 0;
 
 	spin_lock(&info->lock);
-	printk(KERN_INFO "space_info has %llu free, is %sfull\n",
+	printk(KERN_INFO "space_info %llu has %llu free, is %sfull\n",
+	       (unsigned long long)info->flags,
 	       (unsigned long long)(info->total_bytes - info->bytes_used -
 				    info->bytes_pinned - info->bytes_reserved -
 				    info->bytes_readonly),
@@ -5412,7 +5585,8 @@
 	return ret;
 }
 
-int btrfs_free_reserved_extent(struct btrfs_root *root, u64 start, u64 len)
+static int __btrfs_free_reserved_extent(struct btrfs_root *root,
+					u64 start, u64 len, int pin)
 {
 	struct btrfs_block_group_cache *cache;
 	int ret = 0;
@@ -5427,8 +5601,12 @@
 	if (btrfs_test_opt(root, DISCARD))
 		ret = btrfs_discard_extent(root, start, len, NULL);
 
-	btrfs_add_free_space(cache, start, len);
-	btrfs_update_reserved_bytes(cache, len, 0, 1);
+	if (pin)
+		pin_down_extent(root, cache, start, len, 1);
+	else {
+		btrfs_add_free_space(cache, start, len);
+		btrfs_update_reserved_bytes(cache, len, RESERVE_FREE);
+	}
 	btrfs_put_block_group(cache);
 
 	trace_btrfs_reserved_extent_free(root, start, len);
@@ -5436,6 +5614,18 @@
 	return ret;
 }
 
+int btrfs_free_reserved_extent(struct btrfs_root *root,
+					u64 start, u64 len)
+{
+	return __btrfs_free_reserved_extent(root, start, len, 0);
+}
+
+int btrfs_free_and_pin_reserved_extent(struct btrfs_root *root,
+				       u64 start, u64 len)
+{
+	return __btrfs_free_reserved_extent(root, start, len, 1);
+}
+
 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
 				      struct btrfs_root *root,
 				      u64 parent, u64 root_objectid,
@@ -5631,7 +5821,8 @@
 		put_caching_control(caching_ctl);
 	}
 
-	ret = btrfs_update_reserved_bytes(block_group, ins->offset, 1, 1);
+	ret = btrfs_update_reserved_bytes(block_group, ins->offset,
+					  RESERVE_ALLOC_NO_ACCOUNT);
 	BUG_ON(ret);
 	btrfs_put_block_group(block_group);
 	ret = alloc_reserved_file_extent(trans, root, 0, root_objectid,
@@ -5688,8 +5879,7 @@
 	block_rsv = get_block_rsv(trans, root);
 
 	if (block_rsv->size == 0) {
-		ret = reserve_metadata_bytes(trans, root, block_rsv,
-					     blocksize, 0);
+		ret = reserve_metadata_bytes(root, block_rsv, blocksize, 0);
 		/*
 		 * If we couldn't reserve metadata bytes try and use some from
 		 * the global reserve.
@@ -5709,13 +5899,15 @@
 	if (!ret)
 		return block_rsv;
 	if (ret) {
-		WARN_ON(1);
-		ret = reserve_metadata_bytes(trans, root, block_rsv, blocksize,
-					     0);
+		static DEFINE_RATELIMIT_STATE(_rs,
+				DEFAULT_RATELIMIT_INTERVAL,
+				/*DEFAULT_RATELIMIT_BURST*/ 2);
+		if (__ratelimit(&_rs)) {
+			printk(KERN_DEBUG "btrfs: block rsv returned %d\n", ret);
+			WARN_ON(1);
+		}
+		ret = reserve_metadata_bytes(root, block_rsv, blocksize, 0);
 		if (!ret) {
-			spin_lock(&block_rsv->lock);
-			block_rsv->size += blocksize;
-			spin_unlock(&block_rsv->lock);
 			return block_rsv;
 		} else if (ret && block_rsv != global_rsv) {
 			ret = block_rsv_use_bytes(global_rsv, blocksize);
@@ -6593,12 +6785,9 @@
 		    cache->bytes_super - btrfs_block_group_used(&cache->item);
 
 	if (sinfo->bytes_used + sinfo->bytes_reserved + sinfo->bytes_pinned +
-	    sinfo->bytes_may_use + sinfo->bytes_readonly +
-	    cache->reserved_pinned + num_bytes + min_allocable_bytes <=
-	    sinfo->total_bytes) {
+	    sinfo->bytes_may_use + sinfo->bytes_readonly + num_bytes +
+	    min_allocable_bytes <= sinfo->total_bytes) {
 		sinfo->bytes_readonly += num_bytes;
-		sinfo->bytes_reserved += cache->reserved_pinned;
-		cache->reserved_pinned = 0;
 		cache->ro = 1;
 		ret = 0;
 	}
@@ -6965,7 +7154,8 @@
 					struct btrfs_space_info,
 					list);
 		if (space_info->bytes_pinned > 0 ||
-		    space_info->bytes_reserved > 0) {
+		    space_info->bytes_reserved > 0 ||
+		    space_info->bytes_may_use > 0) {
 			WARN_ON(1);
 			dump_space_info(space_info, 0, 0);
 		}
@@ -7007,14 +7197,12 @@
 		return -ENOMEM;
 	path->reada = 1;
 
-	cache_gen = btrfs_super_cache_generation(&root->fs_info->super_copy);
-	if (cache_gen != 0 &&
-	    btrfs_super_generation(&root->fs_info->super_copy) != cache_gen)
+	cache_gen = btrfs_super_cache_generation(root->fs_info->super_copy);
+	if (btrfs_test_opt(root, SPACE_CACHE) &&
+	    btrfs_super_generation(root->fs_info->super_copy) != cache_gen)
 		need_clear = 1;
 	if (btrfs_test_opt(root, CLEAR_CACHE))
 		need_clear = 1;
-	if (!btrfs_test_opt(root, SPACE_CACHE) && cache_gen)
-		printk(KERN_INFO "btrfs: disk space caching is enabled\n");
 
 	while (1) {
 		ret = find_first_block_group(root, path, &key);
@@ -7253,7 +7441,7 @@
 		goto out;
 	}
 
-	inode = lookup_free_space_inode(root, block_group, path);
+	inode = lookup_free_space_inode(tree_root, block_group, path);
 	if (!IS_ERR(inode)) {
 		ret = btrfs_orphan_add(trans, inode);
 		BUG_ON(ret);
@@ -7269,7 +7457,7 @@
 			spin_unlock(&block_group->lock);
 		}
 		/* One for our lookup ref */
-		iput(inode);
+		btrfs_add_delayed_iput(inode);
 	}
 
 	key.objectid = BTRFS_FREE_SPACE_OBJECTID;
@@ -7340,7 +7528,7 @@
 	int mixed = 0;
 	int ret;
 
-	disk_super = &fs_info->super_copy;
+	disk_super = fs_info->super_copy;
 	if (!btrfs_super_root(disk_super))
 		return 1;
 
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index d418164..1f87c4d 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -17,6 +17,7 @@
 #include "compat.h"
 #include "ctree.h"
 #include "btrfs_inode.h"
+#include "volumes.h"
 
 static struct kmem_cache *extent_state_cache;
 static struct kmem_cache *extent_buffer_cache;
@@ -894,6 +895,194 @@
 	goto again;
 }
 
+/**
+ * convert_extent - convert all bits in a given range from one bit to another
+ * @tree:	the io tree to search
+ * @start:	the start offset in bytes
+ * @end:	the end offset in bytes (inclusive)
+ * @bits:	the bits to set in this range
+ * @clear_bits:	the bits to clear in this range
+ * @mask:	the allocation mask
+ *
+ * This will go through and set bits for the given range.  If any states exist
+ * already in this range they are set with the given bit and cleared of the
+ * clear_bits.  This is only meant to be used by things that are mergeable, ie
+ * converting from say DELALLOC to DIRTY.  This is not meant to be used with
+ * boundary bits like LOCK.
+ */
+int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
+		       int bits, int clear_bits, gfp_t mask)
+{
+	struct extent_state *state;
+	struct extent_state *prealloc = NULL;
+	struct rb_node *node;
+	int err = 0;
+	u64 last_start;
+	u64 last_end;
+
+again:
+	if (!prealloc && (mask & __GFP_WAIT)) {
+		prealloc = alloc_extent_state(mask);
+		if (!prealloc)
+			return -ENOMEM;
+	}
+
+	spin_lock(&tree->lock);
+	/*
+	 * this search will find all the extents that end after
+	 * our range starts.
+	 */
+	node = tree_search(tree, start);
+	if (!node) {
+		prealloc = alloc_extent_state_atomic(prealloc);
+		if (!prealloc)
+			return -ENOMEM;
+		err = insert_state(tree, prealloc, start, end, &bits);
+		prealloc = NULL;
+		BUG_ON(err == -EEXIST);
+		goto out;
+	}
+	state = rb_entry(node, struct extent_state, rb_node);
+hit_next:
+	last_start = state->start;
+	last_end = state->end;
+
+	/*
+	 * | ---- desired range ---- |
+	 * | state |
+	 *
+	 * Just lock what we found and keep going
+	 */
+	if (state->start == start && state->end <= end) {
+		struct rb_node *next_node;
+
+		set_state_bits(tree, state, &bits);
+		clear_state_bit(tree, state, &clear_bits, 0);
+
+		merge_state(tree, state);
+		if (last_end == (u64)-1)
+			goto out;
+
+		start = last_end + 1;
+		next_node = rb_next(&state->rb_node);
+		if (next_node && start < end && prealloc && !need_resched()) {
+			state = rb_entry(next_node, struct extent_state,
+					 rb_node);
+			if (state->start == start)
+				goto hit_next;
+		}
+		goto search_again;
+	}
+
+	/*
+	 *     | ---- desired range ---- |
+	 * | state |
+	 *   or
+	 * | ------------- state -------------- |
+	 *
+	 * We need to split the extent we found, and may flip bits on
+	 * second half.
+	 *
+	 * If the extent we found extends past our
+	 * range, we just split and search again.  It'll get split
+	 * again the next time though.
+	 *
+	 * If the extent we found is inside our range, we set the
+	 * desired bit on it.
+	 */
+	if (state->start < start) {
+		prealloc = alloc_extent_state_atomic(prealloc);
+		if (!prealloc)
+			return -ENOMEM;
+		err = split_state(tree, state, prealloc, start);
+		BUG_ON(err == -EEXIST);
+		prealloc = NULL;
+		if (err)
+			goto out;
+		if (state->end <= end) {
+			set_state_bits(tree, state, &bits);
+			clear_state_bit(tree, state, &clear_bits, 0);
+			merge_state(tree, state);
+			if (last_end == (u64)-1)
+				goto out;
+			start = last_end + 1;
+		}
+		goto search_again;
+	}
+	/*
+	 * | ---- desired range ---- |
+	 *     | state | or               | state |
+	 *
+	 * There's a hole, we need to insert something in it and
+	 * ignore the extent we found.
+	 */
+	if (state->start > start) {
+		u64 this_end;
+		if (end < last_start)
+			this_end = end;
+		else
+			this_end = last_start - 1;
+
+		prealloc = alloc_extent_state_atomic(prealloc);
+		if (!prealloc)
+			return -ENOMEM;
+
+		/*
+		 * Avoid to free 'prealloc' if it can be merged with
+		 * the later extent.
+		 */
+		err = insert_state(tree, prealloc, start, this_end,
+				   &bits);
+		BUG_ON(err == -EEXIST);
+		if (err) {
+			free_extent_state(prealloc);
+			prealloc = NULL;
+			goto out;
+		}
+		prealloc = NULL;
+		start = this_end + 1;
+		goto search_again;
+	}
+	/*
+	 * | ---- desired range ---- |
+	 *                        | state |
+	 * We need to split the extent, and set the bit
+	 * on the first half
+	 */
+	if (state->start <= end && state->end > end) {
+		prealloc = alloc_extent_state_atomic(prealloc);
+		if (!prealloc)
+			return -ENOMEM;
+
+		err = split_state(tree, state, prealloc, end + 1);
+		BUG_ON(err == -EEXIST);
+
+		set_state_bits(tree, prealloc, &bits);
+		clear_state_bit(tree, prealloc, &clear_bits, 0);
+
+		merge_state(tree, prealloc);
+		prealloc = NULL;
+		goto out;
+	}
+
+	goto search_again;
+
+out:
+	spin_unlock(&tree->lock);
+	if (prealloc)
+		free_extent_state(prealloc);
+
+	return err;
+
+search_again:
+	if (start > end)
+		goto out;
+	spin_unlock(&tree->lock);
+	if (mask & __GFP_WAIT)
+		cond_resched();
+	goto again;
+}
+
 /* wrappers around set/clear extent bit */
 int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
 		     gfp_t mask)
@@ -919,7 +1108,7 @@
 			struct extent_state **cached_state, gfp_t mask)
 {
 	return set_extent_bit(tree, start, end,
-			      EXTENT_DELALLOC | EXTENT_DIRTY | EXTENT_UPTODATE,
+			      EXTENT_DELALLOC | EXTENT_UPTODATE,
 			      0, NULL, cached_state, mask);
 }
 
@@ -1599,6 +1788,368 @@
 	return 0;
 }
 
+/*
+ * When IO fails, either with EIO or csum verification fails, we
+ * try other mirrors that might have a good copy of the data.  This
+ * io_failure_record is used to record state as we go through all the
+ * mirrors.  If another mirror has good data, the page is set up to date
+ * and things continue.  If a good mirror can't be found, the original
+ * bio end_io callback is called to indicate things have failed.
+ */
+struct io_failure_record {
+	struct page *page;
+	u64 start;
+	u64 len;
+	u64 logical;
+	unsigned long bio_flags;
+	int this_mirror;
+	int failed_mirror;
+	int in_validation;
+};
+
+static int free_io_failure(struct inode *inode, struct io_failure_record *rec,
+				int did_repair)
+{
+	int ret;
+	int err = 0;
+	struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
+
+	set_state_private(failure_tree, rec->start, 0);
+	ret = clear_extent_bits(failure_tree, rec->start,
+				rec->start + rec->len - 1,
+				EXTENT_LOCKED | EXTENT_DIRTY, GFP_NOFS);
+	if (ret)
+		err = ret;
+
+	if (did_repair) {
+		ret = clear_extent_bits(&BTRFS_I(inode)->io_tree, rec->start,
+					rec->start + rec->len - 1,
+					EXTENT_DAMAGED, GFP_NOFS);
+		if (ret && !err)
+			err = ret;
+	}
+
+	kfree(rec);
+	return err;
+}
+
+static void repair_io_failure_callback(struct bio *bio, int err)
+{
+	complete(bio->bi_private);
+}
+
+/*
+ * this bypasses the standard btrfs submit functions deliberately, as
+ * the standard behavior is to write all copies in a raid setup. here we only
+ * want to write the one bad copy. so we do the mapping for ourselves and issue
+ * submit_bio directly.
+ * to avoid any synchonization issues, wait for the data after writing, which
+ * actually prevents the read that triggered the error from finishing.
+ * currently, there can be no more than two copies of every data bit. thus,
+ * exactly one rewrite is required.
+ */
+int repair_io_failure(struct btrfs_mapping_tree *map_tree, u64 start,
+			u64 length, u64 logical, struct page *page,
+			int mirror_num)
+{
+	struct bio *bio;
+	struct btrfs_device *dev;
+	DECLARE_COMPLETION_ONSTACK(compl);
+	u64 map_length = 0;
+	u64 sector;
+	struct btrfs_bio *bbio = NULL;
+	int ret;
+
+	BUG_ON(!mirror_num);
+
+	bio = bio_alloc(GFP_NOFS, 1);
+	if (!bio)
+		return -EIO;
+	bio->bi_private = &compl;
+	bio->bi_end_io = repair_io_failure_callback;
+	bio->bi_size = 0;
+	map_length = length;
+
+	ret = btrfs_map_block(map_tree, WRITE, logical,
+			      &map_length, &bbio, mirror_num);
+	if (ret) {
+		bio_put(bio);
+		return -EIO;
+	}
+	BUG_ON(mirror_num != bbio->mirror_num);
+	sector = bbio->stripes[mirror_num-1].physical >> 9;
+	bio->bi_sector = sector;
+	dev = bbio->stripes[mirror_num-1].dev;
+	kfree(bbio);
+	if (!dev || !dev->bdev || !dev->writeable) {
+		bio_put(bio);
+		return -EIO;
+	}
+	bio->bi_bdev = dev->bdev;
+	bio_add_page(bio, page, length, start-page_offset(page));
+	submit_bio(WRITE_SYNC, bio);
+	wait_for_completion(&compl);
+
+	if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) {
+		/* try to remap that extent elsewhere? */
+		bio_put(bio);
+		return -EIO;
+	}
+
+	printk(KERN_INFO "btrfs read error corrected: ino %lu off %llu (dev %s "
+			"sector %llu)\n", page->mapping->host->i_ino, start,
+			dev->name, sector);
+
+	bio_put(bio);
+	return 0;
+}
+
+/*
+ * each time an IO finishes, we do a fast check in the IO failure tree
+ * to see if we need to process or clean up an io_failure_record
+ */
+static int clean_io_failure(u64 start, struct page *page)
+{
+	u64 private;
+	u64 private_failure;
+	struct io_failure_record *failrec;
+	struct btrfs_mapping_tree *map_tree;
+	struct extent_state *state;
+	int num_copies;
+	int did_repair = 0;
+	int ret;
+	struct inode *inode = page->mapping->host;
+
+	private = 0;
+	ret = count_range_bits(&BTRFS_I(inode)->io_failure_tree, &private,
+				(u64)-1, 1, EXTENT_DIRTY, 0);
+	if (!ret)
+		return 0;
+
+	ret = get_state_private(&BTRFS_I(inode)->io_failure_tree, start,
+				&private_failure);
+	if (ret)
+		return 0;
+
+	failrec = (struct io_failure_record *)(unsigned long) private_failure;
+	BUG_ON(!failrec->this_mirror);
+
+	if (failrec->in_validation) {
+		/* there was no real error, just free the record */
+		pr_debug("clean_io_failure: freeing dummy error at %llu\n",
+			 failrec->start);
+		did_repair = 1;
+		goto out;
+	}
+
+	spin_lock(&BTRFS_I(inode)->io_tree.lock);
+	state = find_first_extent_bit_state(&BTRFS_I(inode)->io_tree,
+					    failrec->start,
+					    EXTENT_LOCKED);
+	spin_unlock(&BTRFS_I(inode)->io_tree.lock);
+
+	if (state && state->start == failrec->start) {
+		map_tree = &BTRFS_I(inode)->root->fs_info->mapping_tree;
+		num_copies = btrfs_num_copies(map_tree, failrec->logical,
+						failrec->len);
+		if (num_copies > 1)  {
+			ret = repair_io_failure(map_tree, start, failrec->len,
+						failrec->logical, page,
+						failrec->failed_mirror);
+			did_repair = !ret;
+		}
+	}
+
+out:
+	if (!ret)
+		ret = free_io_failure(inode, failrec, did_repair);
+
+	return ret;
+}
+
+/*
+ * this is a generic handler for readpage errors (default
+ * readpage_io_failed_hook). if other copies exist, read those and write back
+ * good data to the failed position. does not investigate in remapping the
+ * failed extent elsewhere, hoping the device will be smart enough to do this as
+ * needed
+ */
+
+static int bio_readpage_error(struct bio *failed_bio, struct page *page,
+				u64 start, u64 end, int failed_mirror,
+				struct extent_state *state)
+{
+	struct io_failure_record *failrec = NULL;
+	u64 private;
+	struct extent_map *em;
+	struct inode *inode = page->mapping->host;
+	struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
+	struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
+	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
+	struct bio *bio;
+	int num_copies;
+	int ret;
+	int read_mode;
+	u64 logical;
+
+	BUG_ON(failed_bio->bi_rw & REQ_WRITE);
+
+	ret = get_state_private(failure_tree, start, &private);
+	if (ret) {
+		failrec = kzalloc(sizeof(*failrec), GFP_NOFS);
+		if (!failrec)
+			return -ENOMEM;
+		failrec->start = start;
+		failrec->len = end - start + 1;
+		failrec->this_mirror = 0;
+		failrec->bio_flags = 0;
+		failrec->in_validation = 0;
+
+		read_lock(&em_tree->lock);
+		em = lookup_extent_mapping(em_tree, start, failrec->len);
+		if (!em) {
+			read_unlock(&em_tree->lock);
+			kfree(failrec);
+			return -EIO;
+		}
+
+		if (em->start > start || em->start + em->len < start) {
+			free_extent_map(em);
+			em = NULL;
+		}
+		read_unlock(&em_tree->lock);
+
+		if (!em || IS_ERR(em)) {
+			kfree(failrec);
+			return -EIO;
+		}
+		logical = start - em->start;
+		logical = em->block_start + logical;
+		if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
+			logical = em->block_start;
+			failrec->bio_flags = EXTENT_BIO_COMPRESSED;
+			extent_set_compress_type(&failrec->bio_flags,
+						 em->compress_type);
+		}
+		pr_debug("bio_readpage_error: (new) logical=%llu, start=%llu, "
+			 "len=%llu\n", logical, start, failrec->len);
+		failrec->logical = logical;
+		free_extent_map(em);
+
+		/* set the bits in the private failure tree */
+		ret = set_extent_bits(failure_tree, start, end,
+					EXTENT_LOCKED | EXTENT_DIRTY, GFP_NOFS);
+		if (ret >= 0)
+			ret = set_state_private(failure_tree, start,
+						(u64)(unsigned long)failrec);
+		/* set the bits in the inode's tree */
+		if (ret >= 0)
+			ret = set_extent_bits(tree, start, end, EXTENT_DAMAGED,
+						GFP_NOFS);
+		if (ret < 0) {
+			kfree(failrec);
+			return ret;
+		}
+	} else {
+		failrec = (struct io_failure_record *)(unsigned long)private;
+		pr_debug("bio_readpage_error: (found) logical=%llu, "
+			 "start=%llu, len=%llu, validation=%d\n",
+			 failrec->logical, failrec->start, failrec->len,
+			 failrec->in_validation);
+		/*
+		 * when data can be on disk more than twice, add to failrec here
+		 * (e.g. with a list for failed_mirror) to make
+		 * clean_io_failure() clean all those errors at once.
+		 */
+	}
+	num_copies = btrfs_num_copies(
+			      &BTRFS_I(inode)->root->fs_info->mapping_tree,
+			      failrec->logical, failrec->len);
+	if (num_copies == 1) {
+		/*
+		 * we only have a single copy of the data, so don't bother with
+		 * all the retry and error correction code that follows. no
+		 * matter what the error is, it is very likely to persist.
+		 */
+		pr_debug("bio_readpage_error: cannot repair, num_copies == 1. "
+			 "state=%p, num_copies=%d, next_mirror %d, "
+			 "failed_mirror %d\n", state, num_copies,
+			 failrec->this_mirror, failed_mirror);
+		free_io_failure(inode, failrec, 0);
+		return -EIO;
+	}
+
+	if (!state) {
+		spin_lock(&tree->lock);
+		state = find_first_extent_bit_state(tree, failrec->start,
+						    EXTENT_LOCKED);
+		if (state && state->start != failrec->start)
+			state = NULL;
+		spin_unlock(&tree->lock);
+	}
+
+	/*
+	 * there are two premises:
+	 *	a) deliver good data to the caller
+	 *	b) correct the bad sectors on disk
+	 */
+	if (failed_bio->bi_vcnt > 1) {
+		/*
+		 * to fulfill b), we need to know the exact failing sectors, as
+		 * we don't want to rewrite any more than the failed ones. thus,
+		 * we need separate read requests for the failed bio
+		 *
+		 * if the following BUG_ON triggers, our validation request got
+		 * merged. we need separate requests for our algorithm to work.
+		 */
+		BUG_ON(failrec->in_validation);
+		failrec->in_validation = 1;
+		failrec->this_mirror = failed_mirror;
+		read_mode = READ_SYNC | REQ_FAILFAST_DEV;
+	} else {
+		/*
+		 * we're ready to fulfill a) and b) alongside. get a good copy
+		 * of the failed sector and if we succeed, we have setup
+		 * everything for repair_io_failure to do the rest for us.
+		 */
+		if (failrec->in_validation) {
+			BUG_ON(failrec->this_mirror != failed_mirror);
+			failrec->in_validation = 0;
+			failrec->this_mirror = 0;
+		}
+		failrec->failed_mirror = failed_mirror;
+		failrec->this_mirror++;
+		if (failrec->this_mirror == failed_mirror)
+			failrec->this_mirror++;
+		read_mode = READ_SYNC;
+	}
+
+	if (!state || failrec->this_mirror > num_copies) {
+		pr_debug("bio_readpage_error: (fail) state=%p, num_copies=%d, "
+			 "next_mirror %d, failed_mirror %d\n", state,
+			 num_copies, failrec->this_mirror, failed_mirror);
+		free_io_failure(inode, failrec, 0);
+		return -EIO;
+	}
+
+	bio = bio_alloc(GFP_NOFS, 1);
+	bio->bi_private = state;
+	bio->bi_end_io = failed_bio->bi_end_io;
+	bio->bi_sector = failrec->logical >> 9;
+	bio->bi_bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
+	bio->bi_size = 0;
+
+	bio_add_page(bio, page, failrec->len, start - page_offset(page));
+
+	pr_debug("bio_readpage_error: submitting new read[%#x] to "
+		 "this_mirror=%d, num_copies=%d, in_validation=%d\n", read_mode,
+		 failrec->this_mirror, num_copies, failrec->in_validation);
+
+	tree->ops->submit_bio_hook(inode, read_mode, bio, failrec->this_mirror,
+					failrec->bio_flags, 0);
+	return 0;
+}
+
 /* lots and lots of room for performance fixes in the end_bio funcs */
 
 /*
@@ -1697,6 +2248,9 @@
 		struct extent_state *cached = NULL;
 		struct extent_state *state;
 
+		pr_debug("end_bio_extent_readpage: bi_vcnt=%d, idx=%d, err=%d, "
+			 "mirror=%ld\n", bio->bi_vcnt, bio->bi_idx, err,
+			 (long int)bio->bi_bdev);
 		tree = &BTRFS_I(page->mapping->host)->io_tree;
 
 		start = ((u64)page->index << PAGE_CACHE_SHIFT) +
@@ -1727,11 +2281,19 @@
 							      state);
 			if (ret)
 				uptodate = 0;
+			else
+				clean_io_failure(start, page);
 		}
-		if (!uptodate && tree->ops &&
-		    tree->ops->readpage_io_failed_hook) {
-			ret = tree->ops->readpage_io_failed_hook(bio, page,
-							 start, end, NULL);
+		if (!uptodate) {
+			u64 failed_mirror;
+			failed_mirror = (u64)bio->bi_bdev;
+			if (tree->ops && tree->ops->readpage_io_failed_hook)
+				ret = tree->ops->readpage_io_failed_hook(
+						bio, page, start, end,
+						failed_mirror, state);
+			else
+				ret = bio_readpage_error(bio, page, start, end,
+							 failed_mirror, NULL);
 			if (ret == 0) {
 				uptodate =
 					test_bit(BIO_UPTODATE, &bio->bi_flags);
@@ -1811,6 +2373,7 @@
 					   mirror_num, bio_flags, start);
 	else
 		submit_bio(rw, bio);
+
 	if (bio_flagged(bio, BIO_EOPNOTSUPP))
 		ret = -EOPNOTSUPP;
 	bio_put(bio);
@@ -2076,16 +2639,16 @@
 }
 
 int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
-			    get_extent_t *get_extent)
+			    get_extent_t *get_extent, int mirror_num)
 {
 	struct bio *bio = NULL;
 	unsigned long bio_flags = 0;
 	int ret;
 
-	ret = __extent_read_full_page(tree, page, get_extent, &bio, 0,
+	ret = __extent_read_full_page(tree, page, get_extent, &bio, mirror_num,
 				      &bio_flags);
 	if (bio)
-		ret = submit_one_bio(READ, bio, 0, bio_flags);
+		ret = submit_one_bio(READ, bio, mirror_num, bio_flags);
 	return ret;
 }
 
@@ -2136,6 +2699,7 @@
 	int compressed;
 	int write_flags;
 	unsigned long nr_written = 0;
+	bool fill_delalloc = true;
 
 	if (wbc->sync_mode == WB_SYNC_ALL)
 		write_flags = WRITE_SYNC;
@@ -2145,6 +2709,9 @@
 	trace___extent_writepage(page, inode, wbc);
 
 	WARN_ON(!PageLocked(page));
+
+	ClearPageError(page);
+
 	pg_offset = i_size & (PAGE_CACHE_SIZE - 1);
 	if (page->index > end_index ||
 	   (page->index == end_index && !pg_offset)) {
@@ -2166,10 +2733,13 @@
 
 	set_page_extent_mapped(page);
 
+	if (!tree->ops || !tree->ops->fill_delalloc)
+		fill_delalloc = false;
+
 	delalloc_start = start;
 	delalloc_end = 0;
 	page_started = 0;
-	if (!epd->extent_locked) {
+	if (!epd->extent_locked && fill_delalloc) {
 		u64 delalloc_to_write = 0;
 		/*
 		 * make sure the wbc mapping index is at least updated
@@ -2421,10 +2991,16 @@
 			 * swizzled back from swapper_space to tmpfs file
 			 * mapping
 			 */
-			if (tree->ops && tree->ops->write_cache_pages_lock_hook)
-				tree->ops->write_cache_pages_lock_hook(page);
-			else
-				lock_page(page);
+			if (tree->ops &&
+			    tree->ops->write_cache_pages_lock_hook) {
+				tree->ops->write_cache_pages_lock_hook(page,
+							       data, flush_fn);
+			} else {
+				if (!trylock_page(page)) {
+					flush_fn(data);
+					lock_page(page);
+				}
+			}
 
 			if (unlikely(page->mapping != mapping)) {
 				unlock_page(page);
@@ -2926,7 +3502,7 @@
 	return ret;
 }
 
-static inline struct page *extent_buffer_page(struct extent_buffer *eb,
+inline struct page *extent_buffer_page(struct extent_buffer *eb,
 					      unsigned long i)
 {
 	struct page *p;
@@ -2951,7 +3527,7 @@
 	return p;
 }
 
-static inline unsigned long num_extent_pages(u64 start, u64 len)
+inline unsigned long num_extent_pages(u64 start, u64 len)
 {
 	return ((start + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) -
 		(start >> PAGE_CACHE_SHIFT);
@@ -3204,6 +3780,7 @@
 						PAGECACHE_TAG_DIRTY);
 		}
 		spin_unlock_irq(&page->mapping->tree_lock);
+		ClearPageError(page);
 		unlock_page(page);
 	}
 	return 0;
@@ -3349,8 +3926,7 @@
 }
 
 int read_extent_buffer_pages(struct extent_io_tree *tree,
-			     struct extent_buffer *eb,
-			     u64 start, int wait,
+			     struct extent_buffer *eb, u64 start, int wait,
 			     get_extent_t *get_extent, int mirror_num)
 {
 	unsigned long i;
@@ -3386,7 +3962,7 @@
 	num_pages = num_extent_pages(eb->start, eb->len);
 	for (i = start_i; i < num_pages; i++) {
 		page = extent_buffer_page(eb, i);
-		if (!wait) {
+		if (wait == WAIT_NONE) {
 			if (!trylock_page(page))
 				goto unlock_exit;
 		} else {
@@ -3430,7 +4006,7 @@
 	if (bio)
 		submit_one_bio(READ, bio, mirror_num, bio_flags);
 
-	if (ret || !wait)
+	if (ret || wait != WAIT_COMPLETE)
 		return ret;
 
 	for (i = start_i; i < num_pages; i++) {
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index 7b2f0c3..feb9be0 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -17,6 +17,8 @@
 #define EXTENT_NODATASUM (1 << 10)
 #define EXTENT_DO_ACCOUNTING (1 << 11)
 #define EXTENT_FIRST_DELALLOC (1 << 12)
+#define EXTENT_NEED_WAIT (1 << 13)
+#define EXTENT_DAMAGED (1 << 14)
 #define EXTENT_IOBITS (EXTENT_LOCKED | EXTENT_WRITEBACK)
 #define EXTENT_CTLBITS (EXTENT_DO_ACCOUNTING | EXTENT_FIRST_DELALLOC)
 
@@ -32,6 +34,7 @@
 #define EXTENT_BUFFER_BLOCKING 1
 #define EXTENT_BUFFER_DIRTY 2
 #define EXTENT_BUFFER_CORRUPT 3
+#define EXTENT_BUFFER_READAHEAD 4	/* this got triggered by readahead */
 
 /* these are flags for extent_clear_unlock_delalloc */
 #define EXTENT_CLEAR_UNLOCK_PAGE 0x1
@@ -67,7 +70,7 @@
 			      unsigned long bio_flags);
 	int (*readpage_io_hook)(struct page *page, u64 start, u64 end);
 	int (*readpage_io_failed_hook)(struct bio *bio, struct page *page,
-				       u64 start, u64 end,
+				       u64 start, u64 end, u64 failed_mirror,
 				       struct extent_state *state);
 	int (*writepage_io_failed_hook)(struct bio *bio, struct page *page,
 					u64 start, u64 end,
@@ -85,7 +88,8 @@
 				  struct extent_state *other);
 	void (*split_extent_hook)(struct inode *inode,
 				  struct extent_state *orig, u64 split);
-	int (*write_cache_pages_lock_hook)(struct page *page);
+	int (*write_cache_pages_lock_hook)(struct page *page, void *data,
+					   void (*flush_fn)(void *));
 };
 
 struct extent_io_tree {
@@ -185,7 +189,7 @@
 int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end,
 		    gfp_t mask);
 int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
-			  get_extent_t *get_extent);
+			  get_extent_t *get_extent, int mirror_num);
 int __init extent_io_init(void);
 void extent_io_exit(void);
 
@@ -214,6 +218,8 @@
 		     gfp_t mask);
 int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
 		       gfp_t mask);
+int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
+		       int bits, int clear_bits, gfp_t mask);
 int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end,
 			struct extent_state **cached_state, gfp_t mask);
 int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
@@ -248,9 +254,14 @@
 struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree,
 					 u64 start, unsigned long len);
 void free_extent_buffer(struct extent_buffer *eb);
+#define WAIT_NONE	0
+#define WAIT_COMPLETE	1
+#define WAIT_PAGE_LOCK	2
 int read_extent_buffer_pages(struct extent_io_tree *tree,
 			     struct extent_buffer *eb, u64 start, int wait,
 			     get_extent_t *get_extent, int mirror_num);
+unsigned long num_extent_pages(u64 start, u64 len);
+struct page *extent_buffer_page(struct extent_buffer *eb, unsigned long i);
 
 static inline void extent_buffer_get(struct extent_buffer *eb)
 {
@@ -300,4 +311,10 @@
 struct bio *
 btrfs_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
 		gfp_t gfp_flags);
+
+struct btrfs_mapping_tree;
+
+int repair_io_failure(struct btrfs_mapping_tree *map_tree, u64 start,
+			u64 length, u64 logical, struct page *page,
+			int mirror_num);
 #endif
diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c
index a1cb782..c7fb3a4 100644
--- a/fs/btrfs/file-item.c
+++ b/fs/btrfs/file-item.c
@@ -91,8 +91,7 @@
 	struct btrfs_csum_item *item;
 	struct extent_buffer *leaf;
 	u64 csum_offset = 0;
-	u16 csum_size =
-		btrfs_super_csum_size(&root->fs_info->super_copy);
+	u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
 	int csums_in_item;
 
 	file_key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
@@ -162,8 +161,7 @@
 	u64 item_last_offset = 0;
 	u64 disk_bytenr;
 	u32 diff;
-	u16 csum_size =
-		btrfs_super_csum_size(&root->fs_info->super_copy);
+	u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
 	int ret;
 	struct btrfs_path *path;
 	struct btrfs_csum_item *item = NULL;
@@ -290,7 +288,7 @@
 	int ret;
 	size_t size;
 	u64 csum_end;
-	u16 csum_size = btrfs_super_csum_size(&root->fs_info->super_copy);
+	u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
 
 	path = btrfs_alloc_path();
 	if (!path)
@@ -492,8 +490,7 @@
 				      u64 bytenr, u64 len)
 {
 	struct extent_buffer *leaf;
-	u16 csum_size =
-		btrfs_super_csum_size(&root->fs_info->super_copy);
+	u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
 	u64 csum_end;
 	u64 end_byte = bytenr + len;
 	u32 blocksize_bits = root->fs_info->sb->s_blocksize_bits;
@@ -549,8 +546,7 @@
 	u64 csum_end;
 	struct extent_buffer *leaf;
 	int ret;
-	u16 csum_size =
-		btrfs_super_csum_size(&root->fs_info->super_copy);
+	u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
 	int blocksize_bits = root->fs_info->sb->s_blocksize_bits;
 
 	root = root->fs_info->csum_root;
@@ -676,8 +672,7 @@
 	struct btrfs_sector_sum *sector_sum;
 	u32 nritems;
 	u32 ins_size;
-	u16 csum_size =
-		btrfs_super_csum_size(&root->fs_info->super_copy);
+	u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
 
 	path = btrfs_alloc_path();
 	if (!path)
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 1266f6e..dafdfa0 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -1069,6 +1069,7 @@
 	int i;
 	unsigned long index = pos >> PAGE_CACHE_SHIFT;
 	struct inode *inode = fdentry(file)->d_inode;
+	gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
 	int err = 0;
 	int faili = 0;
 	u64 start_pos;
@@ -1080,7 +1081,7 @@
 again:
 	for (i = 0; i < num_pages; i++) {
 		pages[i] = find_or_create_page(inode->i_mapping, index + i,
-					       GFP_NOFS);
+					       mask);
 		if (!pages[i]) {
 			faili = i - 1;
 			err = -ENOMEM;
@@ -1615,10 +1616,6 @@
 			goto out;
 	}
 
-	ret = btrfs_check_data_free_space(inode, alloc_end - alloc_start);
-	if (ret)
-		goto out;
-
 	locked_end = alloc_end - 1;
 	while (1) {
 		struct btrfs_ordered_extent *ordered;
@@ -1664,11 +1661,27 @@
 		if (em->block_start == EXTENT_MAP_HOLE ||
 		    (cur_offset >= inode->i_size &&
 		     !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) {
+
+			/*
+			 * Make sure we have enough space before we do the
+			 * allocation.
+			 */
+			ret = btrfs_check_data_free_space(inode, last_byte -
+							  cur_offset);
+			if (ret) {
+				free_extent_map(em);
+				break;
+			}
+
 			ret = btrfs_prealloc_file_range(inode, mode, cur_offset,
 							last_byte - cur_offset,
 							1 << inode->i_blkbits,
 							offset + len,
 							&alloc_hint);
+
+			/* Let go of our reservation. */
+			btrfs_free_reserved_data_space(inode, last_byte -
+						       cur_offset);
 			if (ret < 0) {
 				free_extent_map(em);
 				break;
@@ -1694,8 +1707,6 @@
 	}
 	unlock_extent_cached(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
 			     &cached_state, GFP_NOFS);
-
-	btrfs_free_reserved_data_space(inode, alloc_end - alloc_start);
 out:
 	mutex_unlock(&inode->i_mutex);
 	return ret;
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index 41ac927..7a15fcf 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -20,6 +20,7 @@
 #include <linux/sched.h>
 #include <linux/slab.h>
 #include <linux/math64.h>
+#include <linux/ratelimit.h>
 #include "ctree.h"
 #include "free-space-cache.h"
 #include "transaction.h"
@@ -84,6 +85,7 @@
 				      *block_group, struct btrfs_path *path)
 {
 	struct inode *inode = NULL;
+	u32 flags = BTRFS_INODE_NODATASUM | BTRFS_INODE_NODATACOW;
 
 	spin_lock(&block_group->lock);
 	if (block_group->inode)
@@ -98,13 +100,14 @@
 		return inode;
 
 	spin_lock(&block_group->lock);
-	if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM) {
+	if (!((BTRFS_I(inode)->flags & flags) == flags)) {
 		printk(KERN_INFO "Old style space inode found, converting.\n");
-		BTRFS_I(inode)->flags &= ~BTRFS_INODE_NODATASUM;
+		BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM |
+			BTRFS_INODE_NODATACOW;
 		block_group->disk_cache_state = BTRFS_DC_CLEAR;
 	}
 
-	if (!btrfs_fs_closing(root->fs_info)) {
+	if (!block_group->iref) {
 		block_group->inode = igrab(inode);
 		block_group->iref = 1;
 	}
@@ -122,12 +125,17 @@
 	struct btrfs_free_space_header *header;
 	struct btrfs_inode_item *inode_item;
 	struct extent_buffer *leaf;
+	u64 flags = BTRFS_INODE_NOCOMPRESS | BTRFS_INODE_PREALLOC;
 	int ret;
 
 	ret = btrfs_insert_empty_inode(trans, root, path, ino);
 	if (ret)
 		return ret;
 
+	/* We inline crc's for the free disk space cache */
+	if (ino != BTRFS_FREE_INO_OBJECTID)
+		flags |= BTRFS_INODE_NODATASUM | BTRFS_INODE_NODATACOW;
+
 	leaf = path->nodes[0];
 	inode_item = btrfs_item_ptr(leaf, path->slots[0],
 				    struct btrfs_inode_item);
@@ -140,8 +148,7 @@
 	btrfs_set_inode_uid(leaf, inode_item, 0);
 	btrfs_set_inode_gid(leaf, inode_item, 0);
 	btrfs_set_inode_mode(leaf, inode_item, S_IFREG | 0600);
-	btrfs_set_inode_flags(leaf, inode_item, BTRFS_INODE_NOCOMPRESS |
-			      BTRFS_INODE_PREALLOC);
+	btrfs_set_inode_flags(leaf, inode_item, flags);
 	btrfs_set_inode_nlink(leaf, inode_item, 1);
 	btrfs_set_inode_transid(leaf, inode_item, trans->transid);
 	btrfs_set_inode_block_group(leaf, inode_item, offset);
@@ -191,16 +198,24 @@
 				    struct inode *inode)
 {
 	struct btrfs_block_rsv *rsv;
+	u64 needed_bytes;
 	loff_t oldsize;
 	int ret = 0;
 
 	rsv = trans->block_rsv;
-	trans->block_rsv = root->orphan_block_rsv;
-	ret = btrfs_block_rsv_check(trans, root,
-				    root->orphan_block_rsv,
-				    0, 5);
-	if (ret)
-		return ret;
+	trans->block_rsv = &root->fs_info->global_block_rsv;
+
+	/* 1 for slack space, 1 for updating the inode */
+	needed_bytes = btrfs_calc_trunc_metadata_size(root, 1) +
+		btrfs_calc_trans_metadata_size(root, 1);
+
+	spin_lock(&trans->block_rsv->lock);
+	if (trans->block_rsv->reserved < needed_bytes) {
+		spin_unlock(&trans->block_rsv->lock);
+		trans->block_rsv = rsv;
+		return -ENOSPC;
+	}
+	spin_unlock(&trans->block_rsv->lock);
 
 	oldsize = i_size_read(inode);
 	btrfs_i_size_write(inode, 0);
@@ -213,13 +228,15 @@
 	ret = btrfs_truncate_inode_items(trans, root, inode,
 					 0, BTRFS_EXTENT_DATA_KEY);
 
-	trans->block_rsv = rsv;
 	if (ret) {
+		trans->block_rsv = rsv;
 		WARN_ON(1);
 		return ret;
 	}
 
 	ret = btrfs_update_inode(trans, root, inode);
+	trans->block_rsv = rsv;
+
 	return ret;
 }
 
@@ -242,26 +259,342 @@
 	return 0;
 }
 
+struct io_ctl {
+	void *cur, *orig;
+	struct page *page;
+	struct page **pages;
+	struct btrfs_root *root;
+	unsigned long size;
+	int index;
+	int num_pages;
+	unsigned check_crcs:1;
+};
+
+static int io_ctl_init(struct io_ctl *io_ctl, struct inode *inode,
+		       struct btrfs_root *root)
+{
+	memset(io_ctl, 0, sizeof(struct io_ctl));
+	io_ctl->num_pages = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >>
+		PAGE_CACHE_SHIFT;
+	io_ctl->pages = kzalloc(sizeof(struct page *) * io_ctl->num_pages,
+				GFP_NOFS);
+	if (!io_ctl->pages)
+		return -ENOMEM;
+	io_ctl->root = root;
+	if (btrfs_ino(inode) != BTRFS_FREE_INO_OBJECTID)
+		io_ctl->check_crcs = 1;
+	return 0;
+}
+
+static void io_ctl_free(struct io_ctl *io_ctl)
+{
+	kfree(io_ctl->pages);
+}
+
+static void io_ctl_unmap_page(struct io_ctl *io_ctl)
+{
+	if (io_ctl->cur) {
+		kunmap(io_ctl->page);
+		io_ctl->cur = NULL;
+		io_ctl->orig = NULL;
+	}
+}
+
+static void io_ctl_map_page(struct io_ctl *io_ctl, int clear)
+{
+	WARN_ON(io_ctl->cur);
+	BUG_ON(io_ctl->index >= io_ctl->num_pages);
+	io_ctl->page = io_ctl->pages[io_ctl->index++];
+	io_ctl->cur = kmap(io_ctl->page);
+	io_ctl->orig = io_ctl->cur;
+	io_ctl->size = PAGE_CACHE_SIZE;
+	if (clear)
+		memset(io_ctl->cur, 0, PAGE_CACHE_SIZE);
+}
+
+static void io_ctl_drop_pages(struct io_ctl *io_ctl)
+{
+	int i;
+
+	io_ctl_unmap_page(io_ctl);
+
+	for (i = 0; i < io_ctl->num_pages; i++) {
+		ClearPageChecked(io_ctl->pages[i]);
+		unlock_page(io_ctl->pages[i]);
+		page_cache_release(io_ctl->pages[i]);
+	}
+}
+
+static int io_ctl_prepare_pages(struct io_ctl *io_ctl, struct inode *inode,
+				int uptodate)
+{
+	struct page *page;
+	gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
+	int i;
+
+	for (i = 0; i < io_ctl->num_pages; i++) {
+		page = find_or_create_page(inode->i_mapping, i, mask);
+		if (!page) {
+			io_ctl_drop_pages(io_ctl);
+			return -ENOMEM;
+		}
+		io_ctl->pages[i] = page;
+		if (uptodate && !PageUptodate(page)) {
+			btrfs_readpage(NULL, page);
+			lock_page(page);
+			if (!PageUptodate(page)) {
+				printk(KERN_ERR "btrfs: error reading free "
+				       "space cache\n");
+				io_ctl_drop_pages(io_ctl);
+				return -EIO;
+			}
+		}
+	}
+
+	return 0;
+}
+
+static void io_ctl_set_generation(struct io_ctl *io_ctl, u64 generation)
+{
+	u64 *val;
+
+	io_ctl_map_page(io_ctl, 1);
+
+	/*
+	 * Skip the csum areas.  If we don't check crcs then we just have a
+	 * 64bit chunk at the front of the first page.
+	 */
+	if (io_ctl->check_crcs) {
+		io_ctl->cur += (sizeof(u32) * io_ctl->num_pages);
+		io_ctl->size -= sizeof(u64) + (sizeof(u32) * io_ctl->num_pages);
+	} else {
+		io_ctl->cur += sizeof(u64);
+		io_ctl->size -= sizeof(u64) * 2;
+	}
+
+	val = io_ctl->cur;
+	*val = cpu_to_le64(generation);
+	io_ctl->cur += sizeof(u64);
+}
+
+static int io_ctl_check_generation(struct io_ctl *io_ctl, u64 generation)
+{
+	u64 *gen;
+
+	/*
+	 * Skip the crc area.  If we don't check crcs then we just have a 64bit
+	 * chunk at the front of the first page.
+	 */
+	if (io_ctl->check_crcs) {
+		io_ctl->cur += sizeof(u32) * io_ctl->num_pages;
+		io_ctl->size -= sizeof(u64) +
+			(sizeof(u32) * io_ctl->num_pages);
+	} else {
+		io_ctl->cur += sizeof(u64);
+		io_ctl->size -= sizeof(u64) * 2;
+	}
+
+	gen = io_ctl->cur;
+	if (le64_to_cpu(*gen) != generation) {
+		printk_ratelimited(KERN_ERR "btrfs: space cache generation "
+				   "(%Lu) does not match inode (%Lu)\n", *gen,
+				   generation);
+		io_ctl_unmap_page(io_ctl);
+		return -EIO;
+	}
+	io_ctl->cur += sizeof(u64);
+	return 0;
+}
+
+static void io_ctl_set_crc(struct io_ctl *io_ctl, int index)
+{
+	u32 *tmp;
+	u32 crc = ~(u32)0;
+	unsigned offset = 0;
+
+	if (!io_ctl->check_crcs) {
+		io_ctl_unmap_page(io_ctl);
+		return;
+	}
+
+	if (index == 0)
+		offset = sizeof(u32) * io_ctl->num_pages;;
+
+	crc = btrfs_csum_data(io_ctl->root, io_ctl->orig + offset, crc,
+			      PAGE_CACHE_SIZE - offset);
+	btrfs_csum_final(crc, (char *)&crc);
+	io_ctl_unmap_page(io_ctl);
+	tmp = kmap(io_ctl->pages[0]);
+	tmp += index;
+	*tmp = crc;
+	kunmap(io_ctl->pages[0]);
+}
+
+static int io_ctl_check_crc(struct io_ctl *io_ctl, int index)
+{
+	u32 *tmp, val;
+	u32 crc = ~(u32)0;
+	unsigned offset = 0;
+
+	if (!io_ctl->check_crcs) {
+		io_ctl_map_page(io_ctl, 0);
+		return 0;
+	}
+
+	if (index == 0)
+		offset = sizeof(u32) * io_ctl->num_pages;
+
+	tmp = kmap(io_ctl->pages[0]);
+	tmp += index;
+	val = *tmp;
+	kunmap(io_ctl->pages[0]);
+
+	io_ctl_map_page(io_ctl, 0);
+	crc = btrfs_csum_data(io_ctl->root, io_ctl->orig + offset, crc,
+			      PAGE_CACHE_SIZE - offset);
+	btrfs_csum_final(crc, (char *)&crc);
+	if (val != crc) {
+		printk_ratelimited(KERN_ERR "btrfs: csum mismatch on free "
+				   "space cache\n");
+		io_ctl_unmap_page(io_ctl);
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static int io_ctl_add_entry(struct io_ctl *io_ctl, u64 offset, u64 bytes,
+			    void *bitmap)
+{
+	struct btrfs_free_space_entry *entry;
+
+	if (!io_ctl->cur)
+		return -ENOSPC;
+
+	entry = io_ctl->cur;
+	entry->offset = cpu_to_le64(offset);
+	entry->bytes = cpu_to_le64(bytes);
+	entry->type = (bitmap) ? BTRFS_FREE_SPACE_BITMAP :
+		BTRFS_FREE_SPACE_EXTENT;
+	io_ctl->cur += sizeof(struct btrfs_free_space_entry);
+	io_ctl->size -= sizeof(struct btrfs_free_space_entry);
+
+	if (io_ctl->size >= sizeof(struct btrfs_free_space_entry))
+		return 0;
+
+	io_ctl_set_crc(io_ctl, io_ctl->index - 1);
+
+	/* No more pages to map */
+	if (io_ctl->index >= io_ctl->num_pages)
+		return 0;
+
+	/* map the next page */
+	io_ctl_map_page(io_ctl, 1);
+	return 0;
+}
+
+static int io_ctl_add_bitmap(struct io_ctl *io_ctl, void *bitmap)
+{
+	if (!io_ctl->cur)
+		return -ENOSPC;
+
+	/*
+	 * If we aren't at the start of the current page, unmap this one and
+	 * map the next one if there is any left.
+	 */
+	if (io_ctl->cur != io_ctl->orig) {
+		io_ctl_set_crc(io_ctl, io_ctl->index - 1);
+		if (io_ctl->index >= io_ctl->num_pages)
+			return -ENOSPC;
+		io_ctl_map_page(io_ctl, 0);
+	}
+
+	memcpy(io_ctl->cur, bitmap, PAGE_CACHE_SIZE);
+	io_ctl_set_crc(io_ctl, io_ctl->index - 1);
+	if (io_ctl->index < io_ctl->num_pages)
+		io_ctl_map_page(io_ctl, 0);
+	return 0;
+}
+
+static void io_ctl_zero_remaining_pages(struct io_ctl *io_ctl)
+{
+	/*
+	 * If we're not on the boundary we know we've modified the page and we
+	 * need to crc the page.
+	 */
+	if (io_ctl->cur != io_ctl->orig)
+		io_ctl_set_crc(io_ctl, io_ctl->index - 1);
+	else
+		io_ctl_unmap_page(io_ctl);
+
+	while (io_ctl->index < io_ctl->num_pages) {
+		io_ctl_map_page(io_ctl, 1);
+		io_ctl_set_crc(io_ctl, io_ctl->index - 1);
+	}
+}
+
+static int io_ctl_read_entry(struct io_ctl *io_ctl,
+			    struct btrfs_free_space *entry, u8 *type)
+{
+	struct btrfs_free_space_entry *e;
+
+	e = io_ctl->cur;
+	entry->offset = le64_to_cpu(e->offset);
+	entry->bytes = le64_to_cpu(e->bytes);
+	*type = e->type;
+	io_ctl->cur += sizeof(struct btrfs_free_space_entry);
+	io_ctl->size -= sizeof(struct btrfs_free_space_entry);
+
+	if (io_ctl->size >= sizeof(struct btrfs_free_space_entry))
+		return 0;
+
+	io_ctl_unmap_page(io_ctl);
+
+	if (io_ctl->index >= io_ctl->num_pages)
+		return 0;
+
+	return io_ctl_check_crc(io_ctl, io_ctl->index);
+}
+
+static int io_ctl_read_bitmap(struct io_ctl *io_ctl,
+			      struct btrfs_free_space *entry)
+{
+	int ret;
+
+	if (io_ctl->cur && io_ctl->cur != io_ctl->orig)
+		io_ctl_unmap_page(io_ctl);
+
+	ret = io_ctl_check_crc(io_ctl, io_ctl->index);
+	if (ret)
+		return ret;
+
+	memcpy(entry->bitmap, io_ctl->cur, PAGE_CACHE_SIZE);
+	io_ctl_unmap_page(io_ctl);
+
+	return 0;
+}
+
 int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
 			    struct btrfs_free_space_ctl *ctl,
 			    struct btrfs_path *path, u64 offset)
 {
 	struct btrfs_free_space_header *header;
 	struct extent_buffer *leaf;
-	struct page *page;
+	struct io_ctl io_ctl;
 	struct btrfs_key key;
+	struct btrfs_free_space *e, *n;
 	struct list_head bitmaps;
 	u64 num_entries;
 	u64 num_bitmaps;
 	u64 generation;
-	pgoff_t index = 0;
+	u8 type;
 	int ret = 0;
 
 	INIT_LIST_HEAD(&bitmaps);
 
 	/* Nothing in the space cache, goodbye */
 	if (!i_size_read(inode))
-		goto out;
+		return 0;
 
 	key.objectid = BTRFS_FREE_SPACE_OBJECTID;
 	key.offset = offset;
@@ -269,11 +602,10 @@
 
 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
 	if (ret < 0)
-		goto out;
+		return 0;
 	else if (ret > 0) {
 		btrfs_release_path(path);
-		ret = 0;
-		goto out;
+		return 0;
 	}
 
 	ret = -1;
@@ -291,169 +623,100 @@
 		       " not match free space cache generation (%llu)\n",
 		       (unsigned long long)BTRFS_I(inode)->generation,
 		       (unsigned long long)generation);
-		goto out;
+		return 0;
 	}
 
 	if (!num_entries)
-		goto out;
+		return 0;
 
+	io_ctl_init(&io_ctl, inode, root);
 	ret = readahead_cache(inode);
 	if (ret)
 		goto out;
 
-	while (1) {
-		struct btrfs_free_space_entry *entry;
-		struct btrfs_free_space *e;
-		void *addr;
-		unsigned long offset = 0;
-		int need_loop = 0;
+	ret = io_ctl_prepare_pages(&io_ctl, inode, 1);
+	if (ret)
+		goto out;
 
-		if (!num_entries && !num_bitmaps)
-			break;
+	ret = io_ctl_check_crc(&io_ctl, 0);
+	if (ret)
+		goto free_cache;
 
-		page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
-		if (!page)
+	ret = io_ctl_check_generation(&io_ctl, generation);
+	if (ret)
+		goto free_cache;
+
+	while (num_entries) {
+		e = kmem_cache_zalloc(btrfs_free_space_cachep,
+				      GFP_NOFS);
+		if (!e)
 			goto free_cache;
 
-		if (!PageUptodate(page)) {
-			btrfs_readpage(NULL, page);
-			lock_page(page);
-			if (!PageUptodate(page)) {
-				unlock_page(page);
-				page_cache_release(page);
-				printk(KERN_ERR "btrfs: error reading free "
-				       "space cache\n");
-				goto free_cache;
-			}
+		ret = io_ctl_read_entry(&io_ctl, e, &type);
+		if (ret) {
+			kmem_cache_free(btrfs_free_space_cachep, e);
+			goto free_cache;
 		}
-		addr = kmap(page);
 
-		if (index == 0) {
-			u64 *gen;
-
-			/*
-			 * We put a bogus crc in the front of the first page in
-			 * case old kernels try to mount a fs with the new
-			 * format to make sure they discard the cache.
-			 */
-			addr += sizeof(u64);
-			offset += sizeof(u64);
-
-			gen = addr;
-			if (*gen != BTRFS_I(inode)->generation) {
-				printk(KERN_ERR "btrfs: space cache generation"
-				       " (%llu) does not match inode (%llu)\n",
-				       (unsigned long long)*gen,
-				       (unsigned long long)
-				       BTRFS_I(inode)->generation);
-				kunmap(page);
-				unlock_page(page);
-				page_cache_release(page);
-				goto free_cache;
-			}
-			addr += sizeof(u64);
-			offset += sizeof(u64);
+		if (!e->bytes) {
+			kmem_cache_free(btrfs_free_space_cachep, e);
+			goto free_cache;
 		}
-		entry = addr;
 
-		while (1) {
-			if (!num_entries)
-				break;
-
-			need_loop = 1;
-			e = kmem_cache_zalloc(btrfs_free_space_cachep,
-					      GFP_NOFS);
-			if (!e) {
-				kunmap(page);
-				unlock_page(page);
-				page_cache_release(page);
-				goto free_cache;
-			}
-
-			e->offset = le64_to_cpu(entry->offset);
-			e->bytes = le64_to_cpu(entry->bytes);
-			if (!e->bytes) {
-				kunmap(page);
+		if (type == BTRFS_FREE_SPACE_EXTENT) {
+			spin_lock(&ctl->tree_lock);
+			ret = link_free_space(ctl, e);
+			spin_unlock(&ctl->tree_lock);
+			if (ret) {
+				printk(KERN_ERR "Duplicate entries in "
+				       "free space cache, dumping\n");
 				kmem_cache_free(btrfs_free_space_cachep, e);
-				unlock_page(page);
-				page_cache_release(page);
 				goto free_cache;
 			}
-
-			if (entry->type == BTRFS_FREE_SPACE_EXTENT) {
-				spin_lock(&ctl->tree_lock);
-				ret = link_free_space(ctl, e);
-				spin_unlock(&ctl->tree_lock);
-				if (ret) {
-					printk(KERN_ERR "Duplicate entries in "
-					       "free space cache, dumping\n");
-					kunmap(page);
-					unlock_page(page);
-					page_cache_release(page);
-					goto free_cache;
-				}
-			} else {
-				e->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS);
-				if (!e->bitmap) {
-					kunmap(page);
-					kmem_cache_free(
-						btrfs_free_space_cachep, e);
-					unlock_page(page);
-					page_cache_release(page);
-					goto free_cache;
-				}
-				spin_lock(&ctl->tree_lock);
-				ret = link_free_space(ctl, e);
-				ctl->total_bitmaps++;
-				ctl->op->recalc_thresholds(ctl);
-				spin_unlock(&ctl->tree_lock);
-				if (ret) {
-					printk(KERN_ERR "Duplicate entries in "
-					       "free space cache, dumping\n");
-					kunmap(page);
-					unlock_page(page);
-					page_cache_release(page);
-					goto free_cache;
-				}
-				list_add_tail(&e->list, &bitmaps);
+		} else {
+			BUG_ON(!num_bitmaps);
+			num_bitmaps--;
+			e->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS);
+			if (!e->bitmap) {
+				kmem_cache_free(
+					btrfs_free_space_cachep, e);
+				goto free_cache;
 			}
-
-			num_entries--;
-			offset += sizeof(struct btrfs_free_space_entry);
-			if (offset + sizeof(struct btrfs_free_space_entry) >=
-			    PAGE_CACHE_SIZE)
-				break;
-			entry++;
+			spin_lock(&ctl->tree_lock);
+			ret = link_free_space(ctl, e);
+			ctl->total_bitmaps++;
+			ctl->op->recalc_thresholds(ctl);
+			spin_unlock(&ctl->tree_lock);
+			if (ret) {
+				printk(KERN_ERR "Duplicate entries in "
+				       "free space cache, dumping\n");
+				kmem_cache_free(btrfs_free_space_cachep, e);
+				goto free_cache;
+			}
+			list_add_tail(&e->list, &bitmaps);
 		}
 
-		/*
-		 * We read an entry out of this page, we need to move on to the
-		 * next page.
-		 */
-		if (need_loop) {
-			kunmap(page);
-			goto next;
-		}
-
-		/*
-		 * We add the bitmaps at the end of the entries in order that
-		 * the bitmap entries are added to the cache.
-		 */
-		e = list_entry(bitmaps.next, struct btrfs_free_space, list);
-		list_del_init(&e->list);
-		memcpy(e->bitmap, addr, PAGE_CACHE_SIZE);
-		kunmap(page);
-		num_bitmaps--;
-next:
-		unlock_page(page);
-		page_cache_release(page);
-		index++;
+		num_entries--;
 	}
 
+	/*
+	 * We add the bitmaps at the end of the entries in order that
+	 * the bitmap entries are added to the cache.
+	 */
+	list_for_each_entry_safe(e, n, &bitmaps, list) {
+		list_del_init(&e->list);
+		ret = io_ctl_read_bitmap(&io_ctl, e);
+		if (ret)
+			goto free_cache;
+	}
+
+	io_ctl_drop_pages(&io_ctl);
 	ret = 1;
 out:
+	io_ctl_free(&io_ctl);
 	return ret;
 free_cache:
+	io_ctl_drop_pages(&io_ctl);
 	__btrfs_remove_free_space_cache(ctl);
 	goto out;
 }
@@ -465,7 +728,7 @@
 	struct btrfs_root *root = fs_info->tree_root;
 	struct inode *inode;
 	struct btrfs_path *path;
-	int ret;
+	int ret = 0;
 	bool matched;
 	u64 used = btrfs_block_group_used(&block_group->item);
 
@@ -497,6 +760,14 @@
 		return 0;
 	}
 
+	/* We may have converted the inode and made the cache invalid. */
+	spin_lock(&block_group->lock);
+	if (block_group->disk_cache_state != BTRFS_DC_WRITTEN) {
+		spin_unlock(&block_group->lock);
+		goto out;
+	}
+	spin_unlock(&block_group->lock);
+
 	ret = __load_free_space_cache(fs_info->tree_root, inode, ctl,
 				      path, block_group->key.objectid);
 	btrfs_free_path(path);
@@ -530,6 +801,19 @@
 	return ret;
 }
 
+/**
+ * __btrfs_write_out_cache - write out cached info to an inode
+ * @root - the root the inode belongs to
+ * @ctl - the free space cache we are going to write out
+ * @block_group - the block_group for this cache if it belongs to a block_group
+ * @trans - the trans handle
+ * @path - the path to use
+ * @offset - the offset for the key we'll insert
+ *
+ * This function writes out a free space cache struct to disk for quick recovery
+ * on mount.  This will return 0 if it was successfull in writing the cache out,
+ * and -1 if it was not.
+ */
 int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
 			    struct btrfs_free_space_ctl *ctl,
 			    struct btrfs_block_group_cache *block_group,
@@ -540,42 +824,24 @@
 	struct extent_buffer *leaf;
 	struct rb_node *node;
 	struct list_head *pos, *n;
-	struct page **pages;
-	struct page *page;
 	struct extent_state *cached_state = NULL;
 	struct btrfs_free_cluster *cluster = NULL;
 	struct extent_io_tree *unpin = NULL;
+	struct io_ctl io_ctl;
 	struct list_head bitmap_list;
 	struct btrfs_key key;
 	u64 start, end, len;
-	u64 bytes = 0;
-	u32 crc = ~(u32)0;
-	int index = 0, num_pages = 0;
 	int entries = 0;
 	int bitmaps = 0;
-	int ret = -1;
-	bool next_page = false;
-	bool out_of_space = false;
+	int ret;
+	int err = -1;
 
 	INIT_LIST_HEAD(&bitmap_list);
 
-	node = rb_first(&ctl->free_space_offset);
-	if (!node)
-		return 0;
-
 	if (!i_size_read(inode))
 		return -1;
 
-	num_pages = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >>
-		PAGE_CACHE_SHIFT;
-
-	filemap_write_and_wait(inode->i_mapping);
-	btrfs_wait_ordered_range(inode, inode->i_size &
-				 ~(root->sectorsize - 1), (u64)-1);
-
-	pages = kzalloc(sizeof(struct page *) * num_pages, GFP_NOFS);
-	if (!pages)
-		return -1;
+	io_ctl_init(&io_ctl, inode, root);
 
 	/* Get the cluster for this block_group if it exists */
 	if (block_group && !list_empty(&block_group->cluster_list))
@@ -589,30 +855,9 @@
 	 */
 	unpin = root->fs_info->pinned_extents;
 
-	/*
-	 * Lock all pages first so we can lock the extent safely.
-	 *
-	 * NOTE: Because we hold the ref the entire time we're going to write to
-	 * the page find_get_page should never fail, so we don't do a check
-	 * after find_get_page at this point.  Just putting this here so people
-	 * know and don't freak out.
-	 */
-	while (index < num_pages) {
-		page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
-		if (!page) {
-			int i;
+	/* Lock all pages first so we can lock the extent safely. */
+	io_ctl_prepare_pages(&io_ctl, inode, 0);
 
-			for (i = 0; i < num_pages; i++) {
-				unlock_page(pages[i]);
-				page_cache_release(pages[i]);
-			}
-			goto out;
-		}
-		pages[index] = page;
-		index++;
-	}
-
-	index = 0;
 	lock_extent_bits(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1,
 			 0, &cached_state, GFP_NOFS);
 
@@ -623,189 +868,111 @@
 	if (block_group)
 		start = block_group->key.objectid;
 
+	node = rb_first(&ctl->free_space_offset);
+	if (!node && cluster) {
+		node = rb_first(&cluster->root);
+		cluster = NULL;
+	}
+
+	/* Make sure we can fit our crcs into the first page */
+	if (io_ctl.check_crcs &&
+	    (io_ctl.num_pages * sizeof(u32)) >= PAGE_CACHE_SIZE) {
+		WARN_ON(1);
+		goto out_nospc;
+	}
+
+	io_ctl_set_generation(&io_ctl, trans->transid);
+
 	/* Write out the extent entries */
-	do {
-		struct btrfs_free_space_entry *entry;
-		void *addr, *orig;
-		unsigned long offset = 0;
+	while (node) {
+		struct btrfs_free_space *e;
 
-		next_page = false;
+		e = rb_entry(node, struct btrfs_free_space, offset_index);
+		entries++;
 
-		if (index >= num_pages) {
-			out_of_space = true;
+		ret = io_ctl_add_entry(&io_ctl, e->offset, e->bytes,
+				       e->bitmap);
+		if (ret)
+			goto out_nospc;
+
+		if (e->bitmap) {
+			list_add_tail(&e->list, &bitmap_list);
+			bitmaps++;
+		}
+		node = rb_next(node);
+		if (!node && cluster) {
+			node = rb_first(&cluster->root);
+			cluster = NULL;
+		}
+	}
+
+	/*
+	 * We want to add any pinned extents to our free space cache
+	 * so we don't leak the space
+	 */
+	while (block_group && (start < block_group->key.objectid +
+			       block_group->key.offset)) {
+		ret = find_first_extent_bit(unpin, start, &start, &end,
+					    EXTENT_DIRTY);
+		if (ret) {
+			ret = 0;
 			break;
 		}
 
-		page = pages[index];
+		/* This pinned extent is out of our range */
+		if (start >= block_group->key.objectid +
+		    block_group->key.offset)
+			break;
 
-		orig = addr = kmap(page);
-		if (index == 0) {
-			u64 *gen;
+		len = block_group->key.objectid +
+			block_group->key.offset - start;
+		len = min(len, end + 1 - start);
 
-			/*
-			 * We're going to put in a bogus crc for this page to
-			 * make sure that old kernels who aren't aware of this
-			 * format will be sure to discard the cache.
-			 */
-			addr += sizeof(u64);
-			offset += sizeof(u64);
+		entries++;
+		ret = io_ctl_add_entry(&io_ctl, start, len, NULL);
+		if (ret)
+			goto out_nospc;
 
-			gen = addr;
-			*gen = trans->transid;
-			addr += sizeof(u64);
-			offset += sizeof(u64);
-		}
-		entry = addr;
-
-		memset(addr, 0, PAGE_CACHE_SIZE - offset);
-		while (node && !next_page) {
-			struct btrfs_free_space *e;
-
-			e = rb_entry(node, struct btrfs_free_space, offset_index);
-			entries++;
-
-			entry->offset = cpu_to_le64(e->offset);
-			entry->bytes = cpu_to_le64(e->bytes);
-			if (e->bitmap) {
-				entry->type = BTRFS_FREE_SPACE_BITMAP;
-				list_add_tail(&e->list, &bitmap_list);
-				bitmaps++;
-			} else {
-				entry->type = BTRFS_FREE_SPACE_EXTENT;
-			}
-			node = rb_next(node);
-			if (!node && cluster) {
-				node = rb_first(&cluster->root);
-				cluster = NULL;
-			}
-			offset += sizeof(struct btrfs_free_space_entry);
-			if (offset + sizeof(struct btrfs_free_space_entry) >=
-			    PAGE_CACHE_SIZE)
-				next_page = true;
-			entry++;
-		}
-
-		/*
-		 * We want to add any pinned extents to our free space cache
-		 * so we don't leak the space
-		 */
-		while (block_group && !next_page &&
-		       (start < block_group->key.objectid +
-			block_group->key.offset)) {
-			ret = find_first_extent_bit(unpin, start, &start, &end,
-						    EXTENT_DIRTY);
-			if (ret) {
-				ret = 0;
-				break;
-			}
-
-			/* This pinned extent is out of our range */
-			if (start >= block_group->key.objectid +
-			    block_group->key.offset)
-				break;
-
-			len = block_group->key.objectid +
-				block_group->key.offset - start;
-			len = min(len, end + 1 - start);
-
-			entries++;
-			entry->offset = cpu_to_le64(start);
-			entry->bytes = cpu_to_le64(len);
-			entry->type = BTRFS_FREE_SPACE_EXTENT;
-
-			start = end + 1;
-			offset += sizeof(struct btrfs_free_space_entry);
-			if (offset + sizeof(struct btrfs_free_space_entry) >=
-			    PAGE_CACHE_SIZE)
-				next_page = true;
-			entry++;
-		}
-
-		/* Generate bogus crc value */
-		if (index == 0) {
-			u32 *tmp;
-			crc = btrfs_csum_data(root, orig + sizeof(u64), crc,
-					      PAGE_CACHE_SIZE - sizeof(u64));
-			btrfs_csum_final(crc, (char *)&crc);
-			crc++;
-			tmp = orig;
-			*tmp = crc;
-		}
-
-		kunmap(page);
-
-		bytes += PAGE_CACHE_SIZE;
-
-		index++;
-	} while (node || next_page);
+		start = end + 1;
+	}
 
 	/* Write out the bitmaps */
 	list_for_each_safe(pos, n, &bitmap_list) {
-		void *addr;
 		struct btrfs_free_space *entry =
 			list_entry(pos, struct btrfs_free_space, list);
 
-		if (index >= num_pages) {
-			out_of_space = true;
-			break;
-		}
-		page = pages[index];
-
-		addr = kmap(page);
-		memcpy(addr, entry->bitmap, PAGE_CACHE_SIZE);
-		kunmap(page);
-		bytes += PAGE_CACHE_SIZE;
-
+		ret = io_ctl_add_bitmap(&io_ctl, entry->bitmap);
+		if (ret)
+			goto out_nospc;
 		list_del_init(&entry->list);
-		index++;
-	}
-
-	if (out_of_space) {
-		btrfs_drop_pages(pages, num_pages);
-		unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0,
-				     i_size_read(inode) - 1, &cached_state,
-				     GFP_NOFS);
-		ret = 0;
-		goto out;
 	}
 
 	/* Zero out the rest of the pages just to make sure */
-	while (index < num_pages) {
-		void *addr;
+	io_ctl_zero_remaining_pages(&io_ctl);
 
-		page = pages[index];
-		addr = kmap(page);
-		memset(addr, 0, PAGE_CACHE_SIZE);
-		kunmap(page);
-		bytes += PAGE_CACHE_SIZE;
-		index++;
-	}
-
-	ret = btrfs_dirty_pages(root, inode, pages, num_pages, 0,
-					    bytes, &cached_state);
-	btrfs_drop_pages(pages, num_pages);
+	ret = btrfs_dirty_pages(root, inode, io_ctl.pages, io_ctl.num_pages,
+				0, i_size_read(inode), &cached_state);
+	io_ctl_drop_pages(&io_ctl);
 	unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0,
 			     i_size_read(inode) - 1, &cached_state, GFP_NOFS);
 
-	if (ret) {
-		ret = 0;
+	if (ret)
 		goto out;
-	}
 
-	BTRFS_I(inode)->generation = trans->transid;
 
-	filemap_write_and_wait(inode->i_mapping);
+	ret = filemap_write_and_wait(inode->i_mapping);
+	if (ret)
+		goto out;
 
 	key.objectid = BTRFS_FREE_SPACE_OBJECTID;
 	key.offset = offset;
 	key.type = 0;
 
-	ret = btrfs_search_slot(trans, root, &key, path, 1, 1);
+	ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
 	if (ret < 0) {
-		ret = -1;
-		clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, bytes - 1,
-				 EXTENT_DIRTY | EXTENT_DELALLOC |
-				 EXTENT_DO_ACCOUNTING, 0, 0, NULL, GFP_NOFS);
+		clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, inode->i_size - 1,
+				 EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, NULL,
+				 GFP_NOFS);
 		goto out;
 	}
 	leaf = path->nodes[0];
@@ -816,15 +983,16 @@
 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
 		if (found_key.objectid != BTRFS_FREE_SPACE_OBJECTID ||
 		    found_key.offset != offset) {
-			ret = -1;
-			clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, bytes - 1,
-					 EXTENT_DIRTY | EXTENT_DELALLOC |
-					 EXTENT_DO_ACCOUNTING, 0, 0, NULL,
-					 GFP_NOFS);
+			clear_extent_bit(&BTRFS_I(inode)->io_tree, 0,
+					 inode->i_size - 1,
+					 EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0,
+					 NULL, GFP_NOFS);
 			btrfs_release_path(path);
 			goto out;
 		}
 	}
+
+	BTRFS_I(inode)->generation = trans->transid;
 	header = btrfs_item_ptr(leaf, path->slots[0],
 				struct btrfs_free_space_header);
 	btrfs_set_free_space_entries(leaf, header, entries);
@@ -833,16 +1001,26 @@
 	btrfs_mark_buffer_dirty(leaf);
 	btrfs_release_path(path);
 
-	ret = 1;
-
+	err = 0;
 out:
-	kfree(pages);
-	if (ret != 1) {
-		invalidate_inode_pages2_range(inode->i_mapping, 0, index);
+	io_ctl_free(&io_ctl);
+	if (err) {
+		invalidate_inode_pages2(inode->i_mapping);
 		BTRFS_I(inode)->generation = 0;
 	}
 	btrfs_update_inode(trans, root, inode);
-	return ret;
+	return err;
+
+out_nospc:
+	list_for_each_safe(pos, n, &bitmap_list) {
+		struct btrfs_free_space *entry =
+			list_entry(pos, struct btrfs_free_space, list);
+		list_del_init(&entry->list);
+	}
+	io_ctl_drop_pages(&io_ctl);
+	unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0,
+			     i_size_read(inode) - 1, &cached_state, GFP_NOFS);
+	goto out;
 }
 
 int btrfs_write_out_cache(struct btrfs_root *root,
@@ -869,14 +1047,15 @@
 
 	ret = __btrfs_write_out_cache(root, inode, ctl, block_group, trans,
 				      path, block_group->key.objectid);
-	if (ret < 0) {
+	if (ret) {
 		spin_lock(&block_group->lock);
 		block_group->disk_cache_state = BTRFS_DC_ERROR;
 		spin_unlock(&block_group->lock);
 		ret = 0;
-
+#ifdef DEBUG
 		printk(KERN_ERR "btrfs: failed to write free space cace "
 		       "for block group %llu\n", block_group->key.objectid);
+#endif
 	}
 
 	iput(inode);
@@ -1701,6 +1880,7 @@
 			ctl->total_bitmaps--;
 		}
 		kmem_cache_free(btrfs_free_space_cachep, info);
+		ret = 0;
 		goto out_lock;
 	}
 
@@ -1708,7 +1888,8 @@
 		unlink_free_space(ctl, info);
 		info->offset += bytes;
 		info->bytes -= bytes;
-		link_free_space(ctl, info);
+		ret = link_free_space(ctl, info);
+		WARN_ON(ret);
 		goto out_lock;
 	}
 
@@ -2472,9 +2653,19 @@
 		spin_unlock(&ctl->tree_lock);
 
 		if (bytes >= minlen) {
-			int update_ret;
-			update_ret = btrfs_update_reserved_bytes(block_group,
-								 bytes, 1, 1);
+			struct btrfs_space_info *space_info;
+			int update = 0;
+
+			space_info = block_group->space_info;
+			spin_lock(&space_info->lock);
+			spin_lock(&block_group->lock);
+			if (!block_group->ro) {
+				block_group->reserved += bytes;
+				space_info->bytes_reserved += bytes;
+				update = 1;
+			}
+			spin_unlock(&block_group->lock);
+			spin_unlock(&space_info->lock);
 
 			ret = btrfs_error_discard_extent(fs_info->extent_root,
 							 start,
@@ -2482,9 +2673,16 @@
 							 &actually_trimmed);
 
 			btrfs_add_free_space(block_group, start, bytes);
-			if (!update_ret)
-				btrfs_update_reserved_bytes(block_group,
-							    bytes, 0, 1);
+			if (update) {
+				spin_lock(&space_info->lock);
+				spin_lock(&block_group->lock);
+				if (block_group->ro)
+					space_info->bytes_readonly += bytes;
+				block_group->reserved -= bytes;
+				space_info->bytes_reserved -= bytes;
+				spin_unlock(&space_info->lock);
+				spin_unlock(&block_group->lock);
+			}
 
 			if (ret)
 				break;
@@ -2643,9 +2841,13 @@
 		return 0;
 
 	ret = __btrfs_write_out_cache(root, inode, ctl, NULL, trans, path, 0);
-	if (ret < 0)
+	if (ret) {
+		btrfs_delalloc_release_metadata(inode, inode->i_size);
+#ifdef DEBUG
 		printk(KERN_ERR "btrfs: failed to write free ino cache "
 		       "for root %llu\n", root->root_key.objectid);
+#endif
+	}
 
 	iput(inode);
 	return ret;
diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c
index b4087e0..53dcbdf 100644
--- a/fs/btrfs/inode-map.c
+++ b/fs/btrfs/inode-map.c
@@ -465,14 +465,16 @@
 	/* Just to make sure we have enough space */
 	prealloc += 8 * PAGE_CACHE_SIZE;
 
-	ret = btrfs_check_data_free_space(inode, prealloc);
+	ret = btrfs_delalloc_reserve_space(inode, prealloc);
 	if (ret)
 		goto out_put;
 
 	ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, prealloc,
 					      prealloc, prealloc, &alloc_hint);
-	if (ret)
+	if (ret) {
+		btrfs_delalloc_release_space(inode, prealloc);
 		goto out_put;
+	}
 	btrfs_free_reserved_data_space(inode, prealloc);
 
 out_put:
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 75686a6..966ddcc 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -45,10 +45,10 @@
 #include "btrfs_inode.h"
 #include "ioctl.h"
 #include "print-tree.h"
-#include "volumes.h"
 #include "ordered-data.h"
 #include "xattr.h"
 #include "tree-log.h"
+#include "volumes.h"
 #include "compression.h"
 #include "locking.h"
 #include "free-space-cache.h"
@@ -393,7 +393,10 @@
 	     (BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS))) {
 		WARN_ON(pages);
 		pages = kzalloc(sizeof(struct page *) * nr_pages, GFP_NOFS);
-		BUG_ON(!pages);
+		if (!pages) {
+			/* just bail out to the uncompressed code */
+			goto cont;
+		}
 
 		if (BTRFS_I(inode)->force_compress)
 			compress_type = BTRFS_I(inode)->force_compress;
@@ -424,6 +427,7 @@
 			will_compress = 1;
 		}
 	}
+cont:
 	if (start == 0) {
 		trans = btrfs_join_transaction(root);
 		BUG_ON(IS_ERR(trans));
@@ -820,7 +824,7 @@
 	}
 
 	BUG_ON(disk_num_bytes >
-	       btrfs_super_total_bytes(&root->fs_info->super_copy));
+	       btrfs_super_total_bytes(root->fs_info->super_copy));
 
 	alloc_hint = get_extent_allocation_hint(inode, start, num_bytes);
 	btrfs_drop_extent_cache(inode, start, start + num_bytes - 1, 0);
@@ -1792,12 +1796,12 @@
 	}
 	ret = 0;
 out:
-	if (nolock) {
-		if (trans)
-			btrfs_end_transaction_nolock(trans, root);
-	} else {
+	if (root != root->fs_info->tree_root)
 		btrfs_delalloc_release_metadata(inode, ordered_extent->len);
-		if (trans)
+	if (trans) {
+		if (nolock)
+			btrfs_end_transaction_nolock(trans, root);
+		else
 			btrfs_end_transaction(trans, root);
 	}
 
@@ -1819,153 +1823,9 @@
 }
 
 /*
- * When IO fails, either with EIO or csum verification fails, we
- * try other mirrors that might have a good copy of the data.  This
- * io_failure_record is used to record state as we go through all the
- * mirrors.  If another mirror has good data, the page is set up to date
- * and things continue.  If a good mirror can't be found, the original
- * bio end_io callback is called to indicate things have failed.
- */
-struct io_failure_record {
-	struct page *page;
-	u64 start;
-	u64 len;
-	u64 logical;
-	unsigned long bio_flags;
-	int last_mirror;
-};
-
-static int btrfs_io_failed_hook(struct bio *failed_bio,
-			 struct page *page, u64 start, u64 end,
-			 struct extent_state *state)
-{
-	struct io_failure_record *failrec = NULL;
-	u64 private;
-	struct extent_map *em;
-	struct inode *inode = page->mapping->host;
-	struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
-	struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
-	struct bio *bio;
-	int num_copies;
-	int ret;
-	int rw;
-	u64 logical;
-
-	ret = get_state_private(failure_tree, start, &private);
-	if (ret) {
-		failrec = kmalloc(sizeof(*failrec), GFP_NOFS);
-		if (!failrec)
-			return -ENOMEM;
-		failrec->start = start;
-		failrec->len = end - start + 1;
-		failrec->last_mirror = 0;
-		failrec->bio_flags = 0;
-
-		read_lock(&em_tree->lock);
-		em = lookup_extent_mapping(em_tree, start, failrec->len);
-		if (em->start > start || em->start + em->len < start) {
-			free_extent_map(em);
-			em = NULL;
-		}
-		read_unlock(&em_tree->lock);
-
-		if (IS_ERR_OR_NULL(em)) {
-			kfree(failrec);
-			return -EIO;
-		}
-		logical = start - em->start;
-		logical = em->block_start + logical;
-		if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
-			logical = em->block_start;
-			failrec->bio_flags = EXTENT_BIO_COMPRESSED;
-			extent_set_compress_type(&failrec->bio_flags,
-						 em->compress_type);
-		}
-		failrec->logical = logical;
-		free_extent_map(em);
-		set_extent_bits(failure_tree, start, end, EXTENT_LOCKED |
-				EXTENT_DIRTY, GFP_NOFS);
-		set_state_private(failure_tree, start,
-				 (u64)(unsigned long)failrec);
-	} else {
-		failrec = (struct io_failure_record *)(unsigned long)private;
-	}
-	num_copies = btrfs_num_copies(
-			      &BTRFS_I(inode)->root->fs_info->mapping_tree,
-			      failrec->logical, failrec->len);
-	failrec->last_mirror++;
-	if (!state) {
-		spin_lock(&BTRFS_I(inode)->io_tree.lock);
-		state = find_first_extent_bit_state(&BTRFS_I(inode)->io_tree,
-						    failrec->start,
-						    EXTENT_LOCKED);
-		if (state && state->start != failrec->start)
-			state = NULL;
-		spin_unlock(&BTRFS_I(inode)->io_tree.lock);
-	}
-	if (!state || failrec->last_mirror > num_copies) {
-		set_state_private(failure_tree, failrec->start, 0);
-		clear_extent_bits(failure_tree, failrec->start,
-				  failrec->start + failrec->len - 1,
-				  EXTENT_LOCKED | EXTENT_DIRTY, GFP_NOFS);
-		kfree(failrec);
-		return -EIO;
-	}
-	bio = bio_alloc(GFP_NOFS, 1);
-	bio->bi_private = state;
-	bio->bi_end_io = failed_bio->bi_end_io;
-	bio->bi_sector = failrec->logical >> 9;
-	bio->bi_bdev = failed_bio->bi_bdev;
-	bio->bi_size = 0;
-
-	bio_add_page(bio, page, failrec->len, start - page_offset(page));
-	if (failed_bio->bi_rw & REQ_WRITE)
-		rw = WRITE;
-	else
-		rw = READ;
-
-	ret = BTRFS_I(inode)->io_tree.ops->submit_bio_hook(inode, rw, bio,
-						      failrec->last_mirror,
-						      failrec->bio_flags, 0);
-	return ret;
-}
-
-/*
- * each time an IO finishes, we do a fast check in the IO failure tree
- * to see if we need to process or clean up an io_failure_record
- */
-static int btrfs_clean_io_failures(struct inode *inode, u64 start)
-{
-	u64 private;
-	u64 private_failure;
-	struct io_failure_record *failure;
-	int ret;
-
-	private = 0;
-	if (count_range_bits(&BTRFS_I(inode)->io_failure_tree, &private,
-			     (u64)-1, 1, EXTENT_DIRTY, 0)) {
-		ret = get_state_private(&BTRFS_I(inode)->io_failure_tree,
-					start, &private_failure);
-		if (ret == 0) {
-			failure = (struct io_failure_record *)(unsigned long)
-				   private_failure;
-			set_state_private(&BTRFS_I(inode)->io_failure_tree,
-					  failure->start, 0);
-			clear_extent_bits(&BTRFS_I(inode)->io_failure_tree,
-					  failure->start,
-					  failure->start + failure->len - 1,
-					  EXTENT_DIRTY | EXTENT_LOCKED,
-					  GFP_NOFS);
-			kfree(failure);
-		}
-	}
-	return 0;
-}
-
-/*
  * when reads are done, we need to check csums to verify the data is correct
- * if there's a match, we allow the bio to finish.  If not, we go through
- * the io_failure_record routines to find good copies
+ * if there's a match, we allow the bio to finish.  If not, the code in
+ * extent_io.c will try to find good copies for us.
  */
 static int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end,
 			       struct extent_state *state)
@@ -2011,10 +1871,6 @@
 
 	kunmap_atomic(kaddr, KM_USER0);
 good:
-	/* if the io failure tree for this inode is non-empty,
-	 * check to see if we've recovered from a failed IO
-	 */
-	btrfs_clean_io_failures(inode, start);
 	return 0;
 
 zeroit:
@@ -2079,89 +1935,6 @@
 	up_read(&root->fs_info->cleanup_work_sem);
 }
 
-/*
- * calculate extra metadata reservation when snapshotting a subvolume
- * contains orphan files.
- */
-void btrfs_orphan_pre_snapshot(struct btrfs_trans_handle *trans,
-				struct btrfs_pending_snapshot *pending,
-				u64 *bytes_to_reserve)
-{
-	struct btrfs_root *root;
-	struct btrfs_block_rsv *block_rsv;
-	u64 num_bytes;
-	int index;
-
-	root = pending->root;
-	if (!root->orphan_block_rsv || list_empty(&root->orphan_list))
-		return;
-
-	block_rsv = root->orphan_block_rsv;
-
-	/* orphan block reservation for the snapshot */
-	num_bytes = block_rsv->size;
-
-	/*
-	 * after the snapshot is created, COWing tree blocks may use more
-	 * space than it frees. So we should make sure there is enough
-	 * reserved space.
-	 */
-	index = trans->transid & 0x1;
-	if (block_rsv->reserved + block_rsv->freed[index] < block_rsv->size) {
-		num_bytes += block_rsv->size -
-			     (block_rsv->reserved + block_rsv->freed[index]);
-	}
-
-	*bytes_to_reserve += num_bytes;
-}
-
-void btrfs_orphan_post_snapshot(struct btrfs_trans_handle *trans,
-				struct btrfs_pending_snapshot *pending)
-{
-	struct btrfs_root *root = pending->root;
-	struct btrfs_root *snap = pending->snap;
-	struct btrfs_block_rsv *block_rsv;
-	u64 num_bytes;
-	int index;
-	int ret;
-
-	if (!root->orphan_block_rsv || list_empty(&root->orphan_list))
-		return;
-
-	/* refill source subvolume's orphan block reservation */
-	block_rsv = root->orphan_block_rsv;
-	index = trans->transid & 0x1;
-	if (block_rsv->reserved + block_rsv->freed[index] < block_rsv->size) {
-		num_bytes = block_rsv->size -
-			    (block_rsv->reserved + block_rsv->freed[index]);
-		ret = btrfs_block_rsv_migrate(&pending->block_rsv,
-					      root->orphan_block_rsv,
-					      num_bytes);
-		BUG_ON(ret);
-	}
-
-	/* setup orphan block reservation for the snapshot */
-	block_rsv = btrfs_alloc_block_rsv(snap);
-	BUG_ON(!block_rsv);
-
-	btrfs_add_durable_block_rsv(root->fs_info, block_rsv);
-	snap->orphan_block_rsv = block_rsv;
-
-	num_bytes = root->orphan_block_rsv->size;
-	ret = btrfs_block_rsv_migrate(&pending->block_rsv,
-				      block_rsv, num_bytes);
-	BUG_ON(ret);
-
-#if 0
-	/* insert orphan item for the snapshot */
-	WARN_ON(!root->orphan_item_inserted);
-	ret = btrfs_insert_orphan_item(trans, root->fs_info->tree_root,
-				       snap->root_key.objectid);
-	BUG_ON(ret);
-	snap->orphan_item_inserted = 1;
-#endif
-}
-
 enum btrfs_orphan_cleanup_state {
 	ORPHAN_CLEANUP_STARTED	= 1,
 	ORPHAN_CLEANUP_DONE	= 2,
@@ -2247,9 +2020,6 @@
 	}
 	spin_unlock(&root->orphan_lock);
 
-	if (block_rsv)
-		btrfs_add_durable_block_rsv(root->fs_info, block_rsv);
-
 	/* grab metadata reservation from transaction handle */
 	if (reserve) {
 		ret = btrfs_orphan_reserve_metadata(trans, inode);
@@ -2316,6 +2086,7 @@
 	struct btrfs_key key, found_key;
 	struct btrfs_trans_handle *trans;
 	struct inode *inode;
+	u64 last_objectid = 0;
 	int ret = 0, nr_unlink = 0, nr_truncate = 0;
 
 	if (cmpxchg(&root->orphan_cleanup_state, 0, ORPHAN_CLEANUP_STARTED))
@@ -2367,13 +2138,39 @@
 		 * crossing root thing.  we store the inode number in the
 		 * offset of the orphan item.
 		 */
+
+		if (found_key.offset == last_objectid) {
+			printk(KERN_ERR "btrfs: Error removing orphan entry, "
+			       "stopping orphan cleanup\n");
+			ret = -EINVAL;
+			goto out;
+		}
+
+		last_objectid = found_key.offset;
+
 		found_key.objectid = found_key.offset;
 		found_key.type = BTRFS_INODE_ITEM_KEY;
 		found_key.offset = 0;
 		inode = btrfs_iget(root->fs_info->sb, &found_key, root, NULL);
-		if (IS_ERR(inode)) {
-			ret = PTR_ERR(inode);
+		ret = PTR_RET(inode);
+		if (ret && ret != -ESTALE)
 			goto out;
+
+		/*
+		 * Inode is already gone but the orphan item is still there,
+		 * kill the orphan item.
+		 */
+		if (ret == -ESTALE) {
+			trans = btrfs_start_transaction(root, 1);
+			if (IS_ERR(trans)) {
+				ret = PTR_ERR(trans);
+				goto out;
+			}
+			ret = btrfs_del_orphan_item(trans, root,
+						    found_key.objectid);
+			BUG_ON(ret);
+			btrfs_end_transaction(trans, root);
+			continue;
 		}
 
 		/*
@@ -2384,24 +2181,6 @@
 		list_add(&BTRFS_I(inode)->i_orphan, &root->orphan_list);
 		spin_unlock(&root->orphan_lock);
 
-		/*
-		 * if this is a bad inode, means we actually succeeded in
-		 * removing the inode, but not the orphan record, which means
-		 * we need to manually delete the orphan since iput will just
-		 * do a destroy_inode
-		 */
-		if (is_bad_inode(inode)) {
-			trans = btrfs_start_transaction(root, 0);
-			if (IS_ERR(trans)) {
-				ret = PTR_ERR(trans);
-				goto out;
-			}
-			btrfs_orphan_del(trans, inode);
-			btrfs_end_transaction(trans, root);
-			iput(inode);
-			continue;
-		}
-
 		/* if we have links, this was a truncate, lets do that */
 		if (inode->i_nlink) {
 			if (!S_ISREG(inode->i_mode)) {
@@ -2835,7 +2614,16 @@
 	u64 ino = btrfs_ino(inode);
 	u64 dir_ino = btrfs_ino(dir);
 
-	trans = btrfs_start_transaction(root, 10);
+	/*
+	 * 1 for the possible orphan item
+	 * 1 for the dir item
+	 * 1 for the dir index
+	 * 1 for the inode ref
+	 * 1 for the inode ref in the tree log
+	 * 2 for the dir entries in the log
+	 * 1 for the inode
+	 */
+	trans = btrfs_start_transaction(root, 8);
 	if (!IS_ERR(trans) || PTR_ERR(trans) != -ENOSPC)
 		return trans;
 
@@ -2858,7 +2646,8 @@
 		return ERR_PTR(-ENOMEM);
 	}
 
-	trans = btrfs_start_transaction(root, 0);
+	/* 1 for the orphan item */
+	trans = btrfs_start_transaction(root, 1);
 	if (IS_ERR(trans)) {
 		btrfs_free_path(path);
 		root->fs_info->enospc_unlink = 0;
@@ -2963,6 +2752,12 @@
 	err = 0;
 out:
 	btrfs_free_path(path);
+	/* Migrate the orphan reservation over */
+	if (!err)
+		err = btrfs_block_rsv_migrate(trans->block_rsv,
+				&root->fs_info->global_block_rsv,
+				trans->bytes_reserved);
+
 	if (err) {
 		btrfs_end_transaction(trans, root);
 		root->fs_info->enospc_unlink = 0;
@@ -2977,6 +2772,9 @@
 			       struct btrfs_root *root)
 {
 	if (trans->block_rsv == &root->fs_info->global_block_rsv) {
+		btrfs_block_rsv_release(root, trans->block_rsv,
+					trans->bytes_reserved);
+		trans->block_rsv = &root->fs_info->trans_block_rsv;
 		BUG_ON(!root->fs_info->enospc_unlink);
 		root->fs_info->enospc_unlink = 0;
 	}
@@ -3368,6 +3166,7 @@
 	pgoff_t index = from >> PAGE_CACHE_SHIFT;
 	unsigned offset = from & (PAGE_CACHE_SIZE-1);
 	struct page *page;
+	gfp_t mask = btrfs_alloc_write_mask(mapping);
 	int ret = 0;
 	u64 page_start;
 	u64 page_end;
@@ -3380,7 +3179,7 @@
 
 	ret = -ENOMEM;
 again:
-	page = find_or_create_page(mapping, index, GFP_NOFS);
+	page = find_or_create_page(mapping, index, mask);
 	if (!page) {
 		btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE);
 		goto out;
@@ -3613,6 +3412,8 @@
 {
 	struct btrfs_trans_handle *trans;
 	struct btrfs_root *root = BTRFS_I(inode)->root;
+	struct btrfs_block_rsv *rsv, *global_rsv;
+	u64 min_size = btrfs_calc_trunc_metadata_size(root, 1);
 	unsigned long nr;
 	int ret;
 
@@ -3640,22 +3441,55 @@
 		goto no_delete;
 	}
 
+	rsv = btrfs_alloc_block_rsv(root);
+	if (!rsv) {
+		btrfs_orphan_del(NULL, inode);
+		goto no_delete;
+	}
+	rsv->size = min_size;
+	global_rsv = &root->fs_info->global_block_rsv;
+
 	btrfs_i_size_write(inode, 0);
 
+	/*
+	 * This is a bit simpler than btrfs_truncate since
+	 *
+	 * 1) We've already reserved our space for our orphan item in the
+	 *    unlink.
+	 * 2) We're going to delete the inode item, so we don't need to update
+	 *    it at all.
+	 *
+	 * So we just need to reserve some slack space in case we add bytes when
+	 * doing the truncate.
+	 */
 	while (1) {
-		trans = btrfs_join_transaction(root);
-		BUG_ON(IS_ERR(trans));
-		trans->block_rsv = root->orphan_block_rsv;
+		ret = btrfs_block_rsv_refill(root, rsv, min_size);
 
-		ret = btrfs_block_rsv_check(trans, root,
-					    root->orphan_block_rsv, 0, 5);
+		/*
+		 * Try and steal from the global reserve since we will
+		 * likely not use this space anyway, we want to try as
+		 * hard as possible to get this to work.
+		 */
+		if (ret)
+			ret = btrfs_block_rsv_migrate(global_rsv, rsv, min_size);
+
 		if (ret) {
-			BUG_ON(ret != -EAGAIN);
-			ret = btrfs_commit_transaction(trans, root);
-			BUG_ON(ret);
-			continue;
+			printk(KERN_WARNING "Could not get space for a "
+			       "delete, will truncate on mount %d\n", ret);
+			btrfs_orphan_del(NULL, inode);
+			btrfs_free_block_rsv(root, rsv);
+			goto no_delete;
 		}
 
+		trans = btrfs_start_transaction(root, 0);
+		if (IS_ERR(trans)) {
+			btrfs_orphan_del(NULL, inode);
+			btrfs_free_block_rsv(root, rsv);
+			goto no_delete;
+		}
+
+		trans->block_rsv = rsv;
+
 		ret = btrfs_truncate_inode_items(trans, root, inode, 0, 0);
 		if (ret != -EAGAIN)
 			break;
@@ -3664,14 +3498,17 @@
 		btrfs_end_transaction(trans, root);
 		trans = NULL;
 		btrfs_btree_balance_dirty(root, nr);
-
 	}
 
+	btrfs_free_block_rsv(root, rsv);
+
 	if (ret == 0) {
+		trans->block_rsv = root->orphan_block_rsv;
 		ret = btrfs_orphan_del(trans, inode);
 		BUG_ON(ret);
 	}
 
+	trans->block_rsv = &root->fs_info->trans_block_rsv;
 	if (!(root == root->fs_info->tree_root ||
 	      root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID))
 		btrfs_return_ino(root, btrfs_ino(inode));
@@ -5795,8 +5632,7 @@
 	if (test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags)) {
 		ret = btrfs_ordered_update_i_size(inode, 0, ordered);
 		if (!ret)
-			ret = btrfs_update_inode(trans, root, inode);
-		err = ret;
+			err = btrfs_update_inode(trans, root, inode);
 		goto out;
 	}
 
@@ -6289,7 +6125,7 @@
 {
 	struct extent_io_tree *tree;
 	tree = &BTRFS_I(page->mapping->host)->io_tree;
-	return extent_read_full_page(tree, page, btrfs_get_extent);
+	return extent_read_full_page(tree, page, btrfs_get_extent, 0);
 }
 
 static int btrfs_writepage(struct page *page, struct writeback_control *wbc)
@@ -6541,6 +6377,7 @@
 	struct btrfs_trans_handle *trans;
 	unsigned long nr;
 	u64 mask = root->sectorsize - 1;
+	u64 min_size = btrfs_calc_trunc_metadata_size(root, 1);
 
 	ret = btrfs_truncate_page(inode->i_mapping, inode->i_size);
 	if (ret)
@@ -6588,19 +6425,23 @@
 	rsv = btrfs_alloc_block_rsv(root);
 	if (!rsv)
 		return -ENOMEM;
-	btrfs_add_durable_block_rsv(root->fs_info, rsv);
+	rsv->size = min_size;
 
+	/*
+	 * 1 for the truncate slack space
+	 * 1 for the orphan item we're going to add
+	 * 1 for the orphan item deletion
+	 * 1 for updating the inode.
+	 */
 	trans = btrfs_start_transaction(root, 4);
 	if (IS_ERR(trans)) {
 		err = PTR_ERR(trans);
 		goto out;
 	}
 
-	/*
-	 * Reserve space for the truncate process.  Truncate should be adding
-	 * space, but if there are snapshots it may end up using space.
-	 */
-	ret = btrfs_truncate_reserve_metadata(trans, root, rsv);
+	/* Migrate the slack space for the truncate to our reserve */
+	ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv, rsv,
+				      min_size);
 	BUG_ON(ret);
 
 	ret = btrfs_orphan_add(trans, inode);
@@ -6609,21 +6450,6 @@
 		goto out;
 	}
 
-	nr = trans->blocks_used;
-	btrfs_end_transaction(trans, root);
-	btrfs_btree_balance_dirty(root, nr);
-
-	/*
-	 * Ok so we've already migrated our bytes over for the truncate, so here
-	 * just reserve the one slot we need for updating the inode.
-	 */
-	trans = btrfs_start_transaction(root, 1);
-	if (IS_ERR(trans)) {
-		err = PTR_ERR(trans);
-		goto out;
-	}
-	trans->block_rsv = rsv;
-
 	/*
 	 * setattr is responsible for setting the ordered_data_close flag,
 	 * but that is only tested during the last file release.  That
@@ -6645,20 +6471,30 @@
 		btrfs_add_ordered_operation(trans, root, inode);
 
 	while (1) {
+		ret = btrfs_block_rsv_refill(root, rsv, min_size);
+		if (ret) {
+			/*
+			 * This can only happen with the original transaction we
+			 * started above, every other time we shouldn't have a
+			 * transaction started yet.
+			 */
+			if (ret == -EAGAIN)
+				goto end_trans;
+			err = ret;
+			break;
+		}
+
 		if (!trans) {
-			trans = btrfs_start_transaction(root, 3);
+			/* Just need the 1 for updating the inode */
+			trans = btrfs_start_transaction(root, 1);
 			if (IS_ERR(trans)) {
 				err = PTR_ERR(trans);
 				goto out;
 			}
-
-			ret = btrfs_truncate_reserve_metadata(trans, root,
-							      rsv);
-			BUG_ON(ret);
-
-			trans->block_rsv = rsv;
 		}
 
+		trans->block_rsv = rsv;
+
 		ret = btrfs_truncate_inode_items(trans, root, inode,
 						 inode->i_size,
 						 BTRFS_EXTENT_DATA_KEY);
@@ -6673,7 +6509,7 @@
 			err = ret;
 			break;
 		}
-
+end_trans:
 		nr = trans->blocks_used;
 		btrfs_end_transaction(trans, root);
 		trans = NULL;
@@ -6755,9 +6591,9 @@
 	ei->last_sub_trans = 0;
 	ei->logged_trans = 0;
 	ei->delalloc_bytes = 0;
-	ei->reserved_bytes = 0;
 	ei->disk_i_size = 0;
 	ei->flags = 0;
+	ei->csum_bytes = 0;
 	ei->index_cnt = (u64)-1;
 	ei->last_unlink_trans = 0;
 
@@ -6803,6 +6639,8 @@
 	WARN_ON(inode->i_data.nrpages);
 	WARN_ON(BTRFS_I(inode)->outstanding_extents);
 	WARN_ON(BTRFS_I(inode)->reserved_extents);
+	WARN_ON(BTRFS_I(inode)->delalloc_bytes);
+	WARN_ON(BTRFS_I(inode)->csum_bytes);
 
 	/*
 	 * This can happen where we create an inode, but somebody else also
@@ -7420,7 +7258,6 @@
 	.readpage_end_io_hook = btrfs_readpage_end_io_hook,
 	.writepage_end_io_hook = btrfs_writepage_end_io_hook,
 	.writepage_start_hook = btrfs_writepage_start_hook,
-	.readpage_io_failed_hook = btrfs_io_failed_hook,
 	.set_bit_hook = btrfs_set_bit_hook,
 	.clear_bit_hook = btrfs_clear_bit_hook,
 	.merge_extent_hook = btrfs_merge_extent_hook,
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index dae5dfe..4a34c47 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -51,6 +51,7 @@
 #include "volumes.h"
 #include "locking.h"
 #include "inode-map.h"
+#include "backref.h"
 
 /* Mask out flags that are inappropriate for the given type of inode. */
 static inline __u32 btrfs_mask_flags(umode_t mode, __u32 flags)
@@ -117,7 +118,7 @@
 /*
  * Inherit flags from the parent inode.
  *
- * Unlike extN we don't have any flags we don't want to inherit currently.
+ * Currently only the compression flags and the cow flags are inherited.
  */
 void btrfs_inherit_iflags(struct inode *inode, struct inode *dir)
 {
@@ -128,12 +129,17 @@
 
 	flags = BTRFS_I(dir)->flags;
 
-	if (S_ISREG(inode->i_mode))
-		flags &= ~BTRFS_INODE_DIRSYNC;
-	else if (!S_ISDIR(inode->i_mode))
-		flags &= (BTRFS_INODE_NODUMP | BTRFS_INODE_NOATIME);
+	if (flags & BTRFS_INODE_NOCOMPRESS) {
+		BTRFS_I(inode)->flags &= ~BTRFS_INODE_COMPRESS;
+		BTRFS_I(inode)->flags |= BTRFS_INODE_NOCOMPRESS;
+	} else if (flags & BTRFS_INODE_COMPRESS) {
+		BTRFS_I(inode)->flags &= ~BTRFS_INODE_NOCOMPRESS;
+		BTRFS_I(inode)->flags |= BTRFS_INODE_COMPRESS;
+	}
 
-	BTRFS_I(inode)->flags = flags;
+	if (flags & BTRFS_INODE_NODATACOW)
+		BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW;
+
 	btrfs_update_iflags(inode);
 }
 
@@ -277,6 +283,7 @@
 	struct fstrim_range range;
 	u64 minlen = ULLONG_MAX;
 	u64 num_devices = 0;
+	u64 total_bytes = btrfs_super_total_bytes(root->fs_info->super_copy);
 	int ret;
 
 	if (!capable(CAP_SYS_ADMIN))
@@ -295,12 +302,15 @@
 		}
 	}
 	rcu_read_unlock();
+
 	if (!num_devices)
 		return -EOPNOTSUPP;
-
 	if (copy_from_user(&range, arg, sizeof(range)))
 		return -EFAULT;
+	if (range.start > total_bytes)
+		return -EINVAL;
 
+	range.len = min(range.len, total_bytes - range.start);
 	range.minlen = max(range.minlen, minlen);
 	ret = btrfs_trim_fs(root, &range);
 	if (ret < 0)
@@ -760,7 +770,7 @@
 	int ret = 1;
 
 	/*
-	 * make sure that once we start defragging and extent, we keep on
+	 * make sure that once we start defragging an extent, we keep on
 	 * defragging it
 	 */
 	if (start < *defrag_end)
@@ -805,7 +815,6 @@
 	 * extent will force at least part of that big extent to be defragged.
 	 */
 	if (ret) {
-		*last_len += len;
 		*defrag_end = extent_map_end(em);
 	} else {
 		*last_len = 0;
@@ -843,6 +852,7 @@
 	int i_done;
 	struct btrfs_ordered_extent *ordered;
 	struct extent_state *cached_state = NULL;
+	gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
 
 	if (isize == 0)
 		return 0;
@@ -860,7 +870,7 @@
 	for (i = 0; i < num_pages; i++) {
 		struct page *page;
 		page = find_or_create_page(inode->i_mapping,
-					    start_index + i, GFP_NOFS);
+					    start_index + i, mask);
 		if (!page)
 			break;
 
@@ -972,18 +982,20 @@
 	struct btrfs_super_block *disk_super;
 	struct file_ra_state *ra = NULL;
 	unsigned long last_index;
+	u64 isize = i_size_read(inode);
 	u64 features;
 	u64 last_len = 0;
 	u64 skip = 0;
 	u64 defrag_end = 0;
 	u64 newer_off = range->start;
-	int newer_left = 0;
 	unsigned long i;
+	unsigned long ra_index = 0;
 	int ret;
 	int defrag_count = 0;
 	int compress_type = BTRFS_COMPRESS_ZLIB;
 	int extent_thresh = range->extent_thresh;
-	int newer_cluster = (256 * 1024) >> PAGE_CACHE_SHIFT;
+	int max_cluster = (256 * 1024) >> PAGE_CACHE_SHIFT;
+	int cluster = max_cluster;
 	u64 new_align = ~((u64)128 * 1024 - 1);
 	struct page **pages = NULL;
 
@@ -997,7 +1009,7 @@
 			compress_type = range->compress_type;
 	}
 
-	if (inode->i_size == 0)
+	if (isize == 0)
 		return 0;
 
 	/*
@@ -1013,7 +1025,7 @@
 		ra = &file->f_ra;
 	}
 
-	pages = kmalloc(sizeof(struct page *) * newer_cluster,
+	pages = kmalloc(sizeof(struct page *) * max_cluster,
 			GFP_NOFS);
 	if (!pages) {
 		ret = -ENOMEM;
@@ -1022,10 +1034,10 @@
 
 	/* find the last page to defrag */
 	if (range->start + range->len > range->start) {
-		last_index = min_t(u64, inode->i_size - 1,
+		last_index = min_t(u64, isize - 1,
 			 range->start + range->len - 1) >> PAGE_CACHE_SHIFT;
 	} else {
-		last_index = (inode->i_size - 1) >> PAGE_CACHE_SHIFT;
+		last_index = (isize - 1) >> PAGE_CACHE_SHIFT;
 	}
 
 	if (newer_than) {
@@ -1038,14 +1050,13 @@
 			 * the extents in the file evenly spaced
 			 */
 			i = (newer_off & new_align) >> PAGE_CACHE_SHIFT;
-			newer_left = newer_cluster;
 		} else
 			goto out_ra;
 	} else {
 		i = range->start >> PAGE_CACHE_SHIFT;
 	}
 	if (!max_to_defrag)
-		max_to_defrag = last_index - 1;
+		max_to_defrag = last_index;
 
 	/*
 	 * make writeback starts from i, so the defrag range can be
@@ -1079,18 +1090,31 @@
 			i = max(i + 1, next);
 			continue;
 		}
+
+		if (!newer_than) {
+			cluster = (PAGE_CACHE_ALIGN(defrag_end) >>
+				   PAGE_CACHE_SHIFT) - i;
+			cluster = min(cluster, max_cluster);
+		} else {
+			cluster = max_cluster;
+		}
+
 		if (range->flags & BTRFS_DEFRAG_RANGE_COMPRESS)
 			BTRFS_I(inode)->force_compress = compress_type;
 
-		btrfs_force_ra(inode->i_mapping, ra, file, i, newer_cluster);
+		if (i + cluster > ra_index) {
+			ra_index = max(i, ra_index);
+			btrfs_force_ra(inode->i_mapping, ra, file, ra_index,
+				       cluster);
+			ra_index += max_cluster;
+		}
 
-		ret = cluster_pages_for_defrag(inode, pages, i, newer_cluster);
+		ret = cluster_pages_for_defrag(inode, pages, i, cluster);
 		if (ret < 0)
 			goto out_ra;
 
 		defrag_count += ret;
 		balance_dirty_pages_ratelimited_nr(inode->i_mapping, ret);
-		i += ret;
 
 		if (newer_than) {
 			if (newer_off == (u64)-1)
@@ -1105,12 +1129,17 @@
 			if (!ret) {
 				range->start = newer_off;
 				i = (newer_off & new_align) >> PAGE_CACHE_SHIFT;
-				newer_left = newer_cluster;
 			} else {
 				break;
 			}
 		} else {
-			i++;
+			if (ret > 0) {
+				i += ret;
+				last_len += ret << PAGE_CACHE_SHIFT;
+			} else {
+				i++;
+				last_len = 0;
+			}
 		}
 	}
 
@@ -1136,16 +1165,14 @@
 		mutex_unlock(&inode->i_mutex);
 	}
 
-	disk_super = &root->fs_info->super_copy;
+	disk_super = root->fs_info->super_copy;
 	features = btrfs_super_incompat_flags(disk_super);
 	if (range->compress_type == BTRFS_COMPRESS_LZO) {
 		features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO;
 		btrfs_set_super_incompat_flags(disk_super, features);
 	}
 
-	if (!file)
-		kfree(ra);
-	return defrag_count;
+	ret = defrag_count;
 
 out_ra:
 	if (!file)
@@ -2587,7 +2614,7 @@
 		return PTR_ERR(trans);
 	}
 
-	dir_id = btrfs_super_root_dir(&root->fs_info->super_copy);
+	dir_id = btrfs_super_root_dir(root->fs_info->super_copy);
 	di = btrfs_lookup_dir_item(trans, root->fs_info->tree_root, path,
 				   dir_id, "default", 7, 1);
 	if (IS_ERR_OR_NULL(di)) {
@@ -2603,7 +2630,7 @@
 	btrfs_mark_buffer_dirty(path->nodes[0]);
 	btrfs_free_path(path);
 
-	disk_super = &root->fs_info->super_copy;
+	disk_super = root->fs_info->super_copy;
 	features = btrfs_super_incompat_flags(disk_super);
 	if (!(features & BTRFS_FEATURE_INCOMPAT_DEFAULT_SUBVOL)) {
 		features |= BTRFS_FEATURE_INCOMPAT_DEFAULT_SUBVOL;
@@ -2864,6 +2891,144 @@
 	return ret;
 }
 
+static long btrfs_ioctl_ino_to_path(struct btrfs_root *root, void __user *arg)
+{
+	int ret = 0;
+	int i;
+	u64 rel_ptr;
+	int size;
+	struct btrfs_ioctl_ino_path_args *ipa = NULL;
+	struct inode_fs_paths *ipath = NULL;
+	struct btrfs_path *path;
+
+	if (!capable(CAP_SYS_ADMIN))
+		return -EPERM;
+
+	path = btrfs_alloc_path();
+	if (!path) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	ipa = memdup_user(arg, sizeof(*ipa));
+	if (IS_ERR(ipa)) {
+		ret = PTR_ERR(ipa);
+		ipa = NULL;
+		goto out;
+	}
+
+	size = min_t(u32, ipa->size, 4096);
+	ipath = init_ipath(size, root, path);
+	if (IS_ERR(ipath)) {
+		ret = PTR_ERR(ipath);
+		ipath = NULL;
+		goto out;
+	}
+
+	ret = paths_from_inode(ipa->inum, ipath);
+	if (ret < 0)
+		goto out;
+
+	for (i = 0; i < ipath->fspath->elem_cnt; ++i) {
+		rel_ptr = ipath->fspath->val[i] - (u64)ipath->fspath->val;
+		ipath->fspath->val[i] = rel_ptr;
+	}
+
+	ret = copy_to_user((void *)ipa->fspath, (void *)ipath->fspath, size);
+	if (ret) {
+		ret = -EFAULT;
+		goto out;
+	}
+
+out:
+	btrfs_free_path(path);
+	free_ipath(ipath);
+	kfree(ipa);
+
+	return ret;
+}
+
+static int build_ino_list(u64 inum, u64 offset, u64 root, void *ctx)
+{
+	struct btrfs_data_container *inodes = ctx;
+	const size_t c = 3 * sizeof(u64);
+
+	if (inodes->bytes_left >= c) {
+		inodes->bytes_left -= c;
+		inodes->val[inodes->elem_cnt] = inum;
+		inodes->val[inodes->elem_cnt + 1] = offset;
+		inodes->val[inodes->elem_cnt + 2] = root;
+		inodes->elem_cnt += 3;
+	} else {
+		inodes->bytes_missing += c - inodes->bytes_left;
+		inodes->bytes_left = 0;
+		inodes->elem_missed += 3;
+	}
+
+	return 0;
+}
+
+static long btrfs_ioctl_logical_to_ino(struct btrfs_root *root,
+					void __user *arg)
+{
+	int ret = 0;
+	int size;
+	u64 extent_offset;
+	struct btrfs_ioctl_logical_ino_args *loi;
+	struct btrfs_data_container *inodes = NULL;
+	struct btrfs_path *path = NULL;
+	struct btrfs_key key;
+
+	if (!capable(CAP_SYS_ADMIN))
+		return -EPERM;
+
+	loi = memdup_user(arg, sizeof(*loi));
+	if (IS_ERR(loi)) {
+		ret = PTR_ERR(loi);
+		loi = NULL;
+		goto out;
+	}
+
+	path = btrfs_alloc_path();
+	if (!path) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	size = min_t(u32, loi->size, 4096);
+	inodes = init_data_container(size);
+	if (IS_ERR(inodes)) {
+		ret = PTR_ERR(inodes);
+		inodes = NULL;
+		goto out;
+	}
+
+	ret = extent_from_logical(root->fs_info, loi->logical, path, &key);
+
+	if (ret & BTRFS_EXTENT_FLAG_TREE_BLOCK)
+		ret = -ENOENT;
+	if (ret < 0)
+		goto out;
+
+	extent_offset = loi->logical - key.objectid;
+	ret = iterate_extent_inodes(root->fs_info, path, key.objectid,
+					extent_offset, build_ino_list, inodes);
+
+	if (ret < 0)
+		goto out;
+
+	ret = copy_to_user((void *)loi->inodes, (void *)inodes, size);
+	if (ret)
+		ret = -EFAULT;
+
+out:
+	btrfs_free_path(path);
+	kfree(inodes);
+	kfree(loi);
+
+	return ret;
+}
+
 long btrfs_ioctl(struct file *file, unsigned int
 		cmd, unsigned long arg)
 {
@@ -2921,6 +3086,10 @@
 		return btrfs_ioctl_tree_search(file, argp);
 	case BTRFS_IOC_INO_LOOKUP:
 		return btrfs_ioctl_ino_lookup(file, argp);
+	case BTRFS_IOC_INO_PATHS:
+		return btrfs_ioctl_ino_to_path(root, argp);
+	case BTRFS_IOC_LOGICAL_INO:
+		return btrfs_ioctl_logical_to_ino(root, argp);
 	case BTRFS_IOC_SPACE_INFO:
 		return btrfs_ioctl_space_info(root, argp);
 	case BTRFS_IOC_SYNC:
diff --git a/fs/btrfs/ioctl.h b/fs/btrfs/ioctl.h
index ad1ea78..252ae99 100644
--- a/fs/btrfs/ioctl.h
+++ b/fs/btrfs/ioctl.h
@@ -193,6 +193,30 @@
 	struct btrfs_ioctl_space_info spaces[0];
 };
 
+struct btrfs_data_container {
+	__u32	bytes_left;	/* out -- bytes not needed to deliver output */
+	__u32	bytes_missing;	/* out -- additional bytes needed for result */
+	__u32	elem_cnt;	/* out */
+	__u32	elem_missed;	/* out */
+	__u64	val[0];		/* out */
+};
+
+struct btrfs_ioctl_ino_path_args {
+	__u64				inum;		/* in */
+	__u32				size;		/* in */
+	__u64				reserved[4];
+	/* struct btrfs_data_container	*fspath;	   out */
+	__u64				fspath;		/* out */
+};
+
+struct btrfs_ioctl_logical_ino_args {
+	__u64				logical;	/* in */
+	__u32				size;		/* in */
+	__u64				reserved[4];
+	/* struct btrfs_data_container	*inodes;	out   */
+	__u64				inodes;
+};
+
 #define BTRFS_IOC_SNAP_CREATE _IOW(BTRFS_IOCTL_MAGIC, 1, \
 				   struct btrfs_ioctl_vol_args)
 #define BTRFS_IOC_DEFRAG _IOW(BTRFS_IOCTL_MAGIC, 2, \
@@ -248,4 +272,9 @@
 				 struct btrfs_ioctl_dev_info_args)
 #define BTRFS_IOC_FS_INFO _IOR(BTRFS_IOCTL_MAGIC, 31, \
 			       struct btrfs_ioctl_fs_info_args)
+#define BTRFS_IOC_INO_PATHS _IOWR(BTRFS_IOCTL_MAGIC, 35, \
+					struct btrfs_ioctl_ino_path_args)
+#define BTRFS_IOC_LOGICAL_INO _IOWR(BTRFS_IOCTL_MAGIC, 36, \
+					struct btrfs_ioctl_ino_path_args)
+
 #endif
diff --git a/fs/btrfs/print-tree.c b/fs/btrfs/print-tree.c
index fb2605d..f38e452 100644
--- a/fs/btrfs/print-tree.c
+++ b/fs/btrfs/print-tree.c
@@ -158,8 +158,7 @@
 void btrfs_print_leaf(struct btrfs_root *root, struct extent_buffer *l)
 {
 	int i;
-	u32 type;
-	u32 nr = btrfs_header_nritems(l);
+	u32 type, nr;
 	struct btrfs_item *item;
 	struct btrfs_root_item *ri;
 	struct btrfs_dir_item *di;
@@ -172,6 +171,11 @@
 	struct btrfs_key key;
 	struct btrfs_key found_key;
 
+	if (!l)
+		return;
+
+	nr = btrfs_header_nritems(l);
+
 	printk(KERN_INFO "leaf %llu total ptrs %d free space %d\n",
 		(unsigned long long)btrfs_header_bytenr(l), nr,
 		btrfs_leaf_free_space(root, l));
diff --git a/fs/btrfs/reada.c b/fs/btrfs/reada.c
new file mode 100644
index 0000000..2373b39
--- /dev/null
+++ b/fs/btrfs/reada.c
@@ -0,0 +1,951 @@
+/*
+ * Copyright (C) 2011 STRATO.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License v2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#include <linux/sched.h>
+#include <linux/pagemap.h>
+#include <linux/writeback.h>
+#include <linux/blkdev.h>
+#include <linux/rbtree.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+#include "ctree.h"
+#include "volumes.h"
+#include "disk-io.h"
+#include "transaction.h"
+
+#undef DEBUG
+
+/*
+ * This is the implementation for the generic read ahead framework.
+ *
+ * To trigger a readahead, btrfs_reada_add must be called. It will start
+ * a read ahead for the given range [start, end) on tree root. The returned
+ * handle can either be used to wait on the readahead to finish
+ * (btrfs_reada_wait), or to send it to the background (btrfs_reada_detach).
+ *
+ * The read ahead works as follows:
+ * On btrfs_reada_add, the root of the tree is inserted into a radix_tree.
+ * reada_start_machine will then search for extents to prefetch and trigger
+ * some reads. When a read finishes for a node, all contained node/leaf
+ * pointers that lie in the given range will also be enqueued. The reads will
+ * be triggered in sequential order, thus giving a big win over a naive
+ * enumeration. It will also make use of multi-device layouts. Each disk
+ * will have its on read pointer and all disks will by utilized in parallel.
+ * Also will no two disks read both sides of a mirror simultaneously, as this
+ * would waste seeking capacity. Instead both disks will read different parts
+ * of the filesystem.
+ * Any number of readaheads can be started in parallel. The read order will be
+ * determined globally, i.e. 2 parallel readaheads will normally finish faster
+ * than the 2 started one after another.
+ */
+
+#define MAX_MIRRORS 2
+#define MAX_IN_FLIGHT 6
+
+struct reada_extctl {
+	struct list_head	list;
+	struct reada_control	*rc;
+	u64			generation;
+};
+
+struct reada_extent {
+	u64			logical;
+	struct btrfs_key	top;
+	u32			blocksize;
+	int			err;
+	struct list_head	extctl;
+	struct kref		refcnt;
+	spinlock_t		lock;
+	struct reada_zone	*zones[MAX_MIRRORS];
+	int			nzones;
+	struct btrfs_device	*scheduled_for;
+};
+
+struct reada_zone {
+	u64			start;
+	u64			end;
+	u64			elems;
+	struct list_head	list;
+	spinlock_t		lock;
+	int			locked;
+	struct btrfs_device	*device;
+	struct btrfs_device	*devs[MAX_MIRRORS]; /* full list, incl self */
+	int			ndevs;
+	struct kref		refcnt;
+};
+
+struct reada_machine_work {
+	struct btrfs_work	work;
+	struct btrfs_fs_info	*fs_info;
+};
+
+static void reada_extent_put(struct btrfs_fs_info *, struct reada_extent *);
+static void reada_control_release(struct kref *kref);
+static void reada_zone_release(struct kref *kref);
+static void reada_start_machine(struct btrfs_fs_info *fs_info);
+static void __reada_start_machine(struct btrfs_fs_info *fs_info);
+
+static int reada_add_block(struct reada_control *rc, u64 logical,
+			   struct btrfs_key *top, int level, u64 generation);
+
+/* recurses */
+/* in case of err, eb might be NULL */
+static int __readahead_hook(struct btrfs_root *root, struct extent_buffer *eb,
+			    u64 start, int err)
+{
+	int level = 0;
+	int nritems;
+	int i;
+	u64 bytenr;
+	u64 generation;
+	struct reada_extent *re;
+	struct btrfs_fs_info *fs_info = root->fs_info;
+	struct list_head list;
+	unsigned long index = start >> PAGE_CACHE_SHIFT;
+	struct btrfs_device *for_dev;
+
+	if (eb)
+		level = btrfs_header_level(eb);
+
+	/* find extent */
+	spin_lock(&fs_info->reada_lock);
+	re = radix_tree_lookup(&fs_info->reada_tree, index);
+	if (re)
+		kref_get(&re->refcnt);
+	spin_unlock(&fs_info->reada_lock);
+
+	if (!re)
+		return -1;
+
+	spin_lock(&re->lock);
+	/*
+	 * just take the full list from the extent. afterwards we
+	 * don't need the lock anymore
+	 */
+	list_replace_init(&re->extctl, &list);
+	for_dev = re->scheduled_for;
+	re->scheduled_for = NULL;
+	spin_unlock(&re->lock);
+
+	if (err == 0) {
+		nritems = level ? btrfs_header_nritems(eb) : 0;
+		generation = btrfs_header_generation(eb);
+		/*
+		 * FIXME: currently we just set nritems to 0 if this is a leaf,
+		 * effectively ignoring the content. In a next step we could
+		 * trigger more readahead depending from the content, e.g.
+		 * fetch the checksums for the extents in the leaf.
+		 */
+	} else {
+		/*
+		 * this is the error case, the extent buffer has not been
+		 * read correctly. We won't access anything from it and
+		 * just cleanup our data structures. Effectively this will
+		 * cut the branch below this node from read ahead.
+		 */
+		nritems = 0;
+		generation = 0;
+	}
+
+	for (i = 0; i < nritems; i++) {
+		struct reada_extctl *rec;
+		u64 n_gen;
+		struct btrfs_key key;
+		struct btrfs_key next_key;
+
+		btrfs_node_key_to_cpu(eb, &key, i);
+		if (i + 1 < nritems)
+			btrfs_node_key_to_cpu(eb, &next_key, i + 1);
+		else
+			next_key = re->top;
+		bytenr = btrfs_node_blockptr(eb, i);
+		n_gen = btrfs_node_ptr_generation(eb, i);
+
+		list_for_each_entry(rec, &list, list) {
+			struct reada_control *rc = rec->rc;
+
+			/*
+			 * if the generation doesn't match, just ignore this
+			 * extctl. This will probably cut off a branch from
+			 * prefetch. Alternatively one could start a new (sub-)
+			 * prefetch for this branch, starting again from root.
+			 * FIXME: move the generation check out of this loop
+			 */
+#ifdef DEBUG
+			if (rec->generation != generation) {
+				printk(KERN_DEBUG "generation mismatch for "
+						"(%llu,%d,%llu) %llu != %llu\n",
+				       key.objectid, key.type, key.offset,
+				       rec->generation, generation);
+			}
+#endif
+			if (rec->generation == generation &&
+			    btrfs_comp_cpu_keys(&key, &rc->key_end) < 0 &&
+			    btrfs_comp_cpu_keys(&next_key, &rc->key_start) > 0)
+				reada_add_block(rc, bytenr, &next_key,
+						level - 1, n_gen);
+		}
+	}
+	/*
+	 * free extctl records
+	 */
+	while (!list_empty(&list)) {
+		struct reada_control *rc;
+		struct reada_extctl *rec;
+
+		rec = list_first_entry(&list, struct reada_extctl, list);
+		list_del(&rec->list);
+		rc = rec->rc;
+		kfree(rec);
+
+		kref_get(&rc->refcnt);
+		if (atomic_dec_and_test(&rc->elems)) {
+			kref_put(&rc->refcnt, reada_control_release);
+			wake_up(&rc->wait);
+		}
+		kref_put(&rc->refcnt, reada_control_release);
+
+		reada_extent_put(fs_info, re);	/* one ref for each entry */
+	}
+	reada_extent_put(fs_info, re);	/* our ref */
+	if (for_dev)
+		atomic_dec(&for_dev->reada_in_flight);
+
+	return 0;
+}
+
+/*
+ * start is passed separately in case eb in NULL, which may be the case with
+ * failed I/O
+ */
+int btree_readahead_hook(struct btrfs_root *root, struct extent_buffer *eb,
+			 u64 start, int err)
+{
+	int ret;
+
+	ret = __readahead_hook(root, eb, start, err);
+
+	reada_start_machine(root->fs_info);
+
+	return ret;
+}
+
+static struct reada_zone *reada_find_zone(struct btrfs_fs_info *fs_info,
+					  struct btrfs_device *dev, u64 logical,
+					  struct btrfs_bio *bbio)
+{
+	int ret;
+	int looped = 0;
+	struct reada_zone *zone;
+	struct btrfs_block_group_cache *cache = NULL;
+	u64 start;
+	u64 end;
+	int i;
+
+again:
+	zone = NULL;
+	spin_lock(&fs_info->reada_lock);
+	ret = radix_tree_gang_lookup(&dev->reada_zones, (void **)&zone,
+				     logical >> PAGE_CACHE_SHIFT, 1);
+	if (ret == 1)
+		kref_get(&zone->refcnt);
+	spin_unlock(&fs_info->reada_lock);
+
+	if (ret == 1) {
+		if (logical >= zone->start && logical < zone->end)
+			return zone;
+		spin_lock(&fs_info->reada_lock);
+		kref_put(&zone->refcnt, reada_zone_release);
+		spin_unlock(&fs_info->reada_lock);
+	}
+
+	if (looped)
+		return NULL;
+
+	cache = btrfs_lookup_block_group(fs_info, logical);
+	if (!cache)
+		return NULL;
+
+	start = cache->key.objectid;
+	end = start + cache->key.offset - 1;
+	btrfs_put_block_group(cache);
+
+	zone = kzalloc(sizeof(*zone), GFP_NOFS);
+	if (!zone)
+		return NULL;
+
+	zone->start = start;
+	zone->end = end;
+	INIT_LIST_HEAD(&zone->list);
+	spin_lock_init(&zone->lock);
+	zone->locked = 0;
+	kref_init(&zone->refcnt);
+	zone->elems = 0;
+	zone->device = dev; /* our device always sits at index 0 */
+	for (i = 0; i < bbio->num_stripes; ++i) {
+		/* bounds have already been checked */
+		zone->devs[i] = bbio->stripes[i].dev;
+	}
+	zone->ndevs = bbio->num_stripes;
+
+	spin_lock(&fs_info->reada_lock);
+	ret = radix_tree_insert(&dev->reada_zones,
+				(unsigned long)zone->end >> PAGE_CACHE_SHIFT,
+				zone);
+	spin_unlock(&fs_info->reada_lock);
+
+	if (ret) {
+		kfree(zone);
+		looped = 1;
+		goto again;
+	}
+
+	return zone;
+}
+
+static struct reada_extent *reada_find_extent(struct btrfs_root *root,
+					      u64 logical,
+					      struct btrfs_key *top, int level)
+{
+	int ret;
+	int looped = 0;
+	struct reada_extent *re = NULL;
+	struct btrfs_fs_info *fs_info = root->fs_info;
+	struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
+	struct btrfs_bio *bbio = NULL;
+	struct btrfs_device *dev;
+	u32 blocksize;
+	u64 length;
+	int nzones = 0;
+	int i;
+	unsigned long index = logical >> PAGE_CACHE_SHIFT;
+
+again:
+	spin_lock(&fs_info->reada_lock);
+	re = radix_tree_lookup(&fs_info->reada_tree, index);
+	if (re)
+		kref_get(&re->refcnt);
+	spin_unlock(&fs_info->reada_lock);
+
+	if (re || looped)
+		return re;
+
+	re = kzalloc(sizeof(*re), GFP_NOFS);
+	if (!re)
+		return NULL;
+
+	blocksize = btrfs_level_size(root, level);
+	re->logical = logical;
+	re->blocksize = blocksize;
+	re->top = *top;
+	INIT_LIST_HEAD(&re->extctl);
+	spin_lock_init(&re->lock);
+	kref_init(&re->refcnt);
+
+	/*
+	 * map block
+	 */
+	length = blocksize;
+	ret = btrfs_map_block(map_tree, REQ_WRITE, logical, &length, &bbio, 0);
+	if (ret || !bbio || length < blocksize)
+		goto error;
+
+	if (bbio->num_stripes > MAX_MIRRORS) {
+		printk(KERN_ERR "btrfs readahead: more than %d copies not "
+				"supported", MAX_MIRRORS);
+		goto error;
+	}
+
+	for (nzones = 0; nzones < bbio->num_stripes; ++nzones) {
+		struct reada_zone *zone;
+
+		dev = bbio->stripes[nzones].dev;
+		zone = reada_find_zone(fs_info, dev, logical, bbio);
+		if (!zone)
+			break;
+
+		re->zones[nzones] = zone;
+		spin_lock(&zone->lock);
+		if (!zone->elems)
+			kref_get(&zone->refcnt);
+		++zone->elems;
+		spin_unlock(&zone->lock);
+		spin_lock(&fs_info->reada_lock);
+		kref_put(&zone->refcnt, reada_zone_release);
+		spin_unlock(&fs_info->reada_lock);
+	}
+	re->nzones = nzones;
+	if (nzones == 0) {
+		/* not a single zone found, error and out */
+		goto error;
+	}
+
+	/* insert extent in reada_tree + all per-device trees, all or nothing */
+	spin_lock(&fs_info->reada_lock);
+	ret = radix_tree_insert(&fs_info->reada_tree, index, re);
+	if (ret) {
+		spin_unlock(&fs_info->reada_lock);
+		if (ret != -ENOMEM) {
+			/* someone inserted the extent in the meantime */
+			looped = 1;
+		}
+		goto error;
+	}
+	for (i = 0; i < nzones; ++i) {
+		dev = bbio->stripes[i].dev;
+		ret = radix_tree_insert(&dev->reada_extents, index, re);
+		if (ret) {
+			while (--i >= 0) {
+				dev = bbio->stripes[i].dev;
+				BUG_ON(dev == NULL);
+				radix_tree_delete(&dev->reada_extents, index);
+			}
+			BUG_ON(fs_info == NULL);
+			radix_tree_delete(&fs_info->reada_tree, index);
+			spin_unlock(&fs_info->reada_lock);
+			goto error;
+		}
+	}
+	spin_unlock(&fs_info->reada_lock);
+
+	kfree(bbio);
+	return re;
+
+error:
+	while (nzones) {
+		struct reada_zone *zone;
+
+		--nzones;
+		zone = re->zones[nzones];
+		kref_get(&zone->refcnt);
+		spin_lock(&zone->lock);
+		--zone->elems;
+		if (zone->elems == 0) {
+			/*
+			 * no fs_info->reada_lock needed, as this can't be
+			 * the last ref
+			 */
+			kref_put(&zone->refcnt, reada_zone_release);
+		}
+		spin_unlock(&zone->lock);
+
+		spin_lock(&fs_info->reada_lock);
+		kref_put(&zone->refcnt, reada_zone_release);
+		spin_unlock(&fs_info->reada_lock);
+	}
+	kfree(bbio);
+	kfree(re);
+	if (looped)
+		goto again;
+	return NULL;
+}
+
+static void reada_kref_dummy(struct kref *kr)
+{
+}
+
+static void reada_extent_put(struct btrfs_fs_info *fs_info,
+			     struct reada_extent *re)
+{
+	int i;
+	unsigned long index = re->logical >> PAGE_CACHE_SHIFT;
+
+	spin_lock(&fs_info->reada_lock);
+	if (!kref_put(&re->refcnt, reada_kref_dummy)) {
+		spin_unlock(&fs_info->reada_lock);
+		return;
+	}
+
+	radix_tree_delete(&fs_info->reada_tree, index);
+	for (i = 0; i < re->nzones; ++i) {
+		struct reada_zone *zone = re->zones[i];
+
+		radix_tree_delete(&zone->device->reada_extents, index);
+	}
+
+	spin_unlock(&fs_info->reada_lock);
+
+	for (i = 0; i < re->nzones; ++i) {
+		struct reada_zone *zone = re->zones[i];
+
+		kref_get(&zone->refcnt);
+		spin_lock(&zone->lock);
+		--zone->elems;
+		if (zone->elems == 0) {
+			/* no fs_info->reada_lock needed, as this can't be
+			 * the last ref */
+			kref_put(&zone->refcnt, reada_zone_release);
+		}
+		spin_unlock(&zone->lock);
+
+		spin_lock(&fs_info->reada_lock);
+		kref_put(&zone->refcnt, reada_zone_release);
+		spin_unlock(&fs_info->reada_lock);
+	}
+	if (re->scheduled_for)
+		atomic_dec(&re->scheduled_for->reada_in_flight);
+
+	kfree(re);
+}
+
+static void reada_zone_release(struct kref *kref)
+{
+	struct reada_zone *zone = container_of(kref, struct reada_zone, refcnt);
+
+	radix_tree_delete(&zone->device->reada_zones,
+			  zone->end >> PAGE_CACHE_SHIFT);
+
+	kfree(zone);
+}
+
+static void reada_control_release(struct kref *kref)
+{
+	struct reada_control *rc = container_of(kref, struct reada_control,
+						refcnt);
+
+	kfree(rc);
+}
+
+static int reada_add_block(struct reada_control *rc, u64 logical,
+			   struct btrfs_key *top, int level, u64 generation)
+{
+	struct btrfs_root *root = rc->root;
+	struct reada_extent *re;
+	struct reada_extctl *rec;
+
+	re = reada_find_extent(root, logical, top, level); /* takes one ref */
+	if (!re)
+		return -1;
+
+	rec = kzalloc(sizeof(*rec), GFP_NOFS);
+	if (!rec) {
+		reada_extent_put(root->fs_info, re);
+		return -1;
+	}
+
+	rec->rc = rc;
+	rec->generation = generation;
+	atomic_inc(&rc->elems);
+
+	spin_lock(&re->lock);
+	list_add_tail(&rec->list, &re->extctl);
+	spin_unlock(&re->lock);
+
+	/* leave the ref on the extent */
+
+	return 0;
+}
+
+/*
+ * called with fs_info->reada_lock held
+ */
+static void reada_peer_zones_set_lock(struct reada_zone *zone, int lock)
+{
+	int i;
+	unsigned long index = zone->end >> PAGE_CACHE_SHIFT;
+
+	for (i = 0; i < zone->ndevs; ++i) {
+		struct reada_zone *peer;
+		peer = radix_tree_lookup(&zone->devs[i]->reada_zones, index);
+		if (peer && peer->device != zone->device)
+			peer->locked = lock;
+	}
+}
+
+/*
+ * called with fs_info->reada_lock held
+ */
+static int reada_pick_zone(struct btrfs_device *dev)
+{
+	struct reada_zone *top_zone = NULL;
+	struct reada_zone *top_locked_zone = NULL;
+	u64 top_elems = 0;
+	u64 top_locked_elems = 0;
+	unsigned long index = 0;
+	int ret;
+
+	if (dev->reada_curr_zone) {
+		reada_peer_zones_set_lock(dev->reada_curr_zone, 0);
+		kref_put(&dev->reada_curr_zone->refcnt, reada_zone_release);
+		dev->reada_curr_zone = NULL;
+	}
+	/* pick the zone with the most elements */
+	while (1) {
+		struct reada_zone *zone;
+
+		ret = radix_tree_gang_lookup(&dev->reada_zones,
+					     (void **)&zone, index, 1);
+		if (ret == 0)
+			break;
+		index = (zone->end >> PAGE_CACHE_SHIFT) + 1;
+		if (zone->locked) {
+			if (zone->elems > top_locked_elems) {
+				top_locked_elems = zone->elems;
+				top_locked_zone = zone;
+			}
+		} else {
+			if (zone->elems > top_elems) {
+				top_elems = zone->elems;
+				top_zone = zone;
+			}
+		}
+	}
+	if (top_zone)
+		dev->reada_curr_zone = top_zone;
+	else if (top_locked_zone)
+		dev->reada_curr_zone = top_locked_zone;
+	else
+		return 0;
+
+	dev->reada_next = dev->reada_curr_zone->start;
+	kref_get(&dev->reada_curr_zone->refcnt);
+	reada_peer_zones_set_lock(dev->reada_curr_zone, 1);
+
+	return 1;
+}
+
+static int reada_start_machine_dev(struct btrfs_fs_info *fs_info,
+				   struct btrfs_device *dev)
+{
+	struct reada_extent *re = NULL;
+	int mirror_num = 0;
+	struct extent_buffer *eb = NULL;
+	u64 logical;
+	u32 blocksize;
+	int ret;
+	int i;
+	int need_kick = 0;
+
+	spin_lock(&fs_info->reada_lock);
+	if (dev->reada_curr_zone == NULL) {
+		ret = reada_pick_zone(dev);
+		if (!ret) {
+			spin_unlock(&fs_info->reada_lock);
+			return 0;
+		}
+	}
+	/*
+	 * FIXME currently we issue the reads one extent at a time. If we have
+	 * a contiguous block of extents, we could also coagulate them or use
+	 * plugging to speed things up
+	 */
+	ret = radix_tree_gang_lookup(&dev->reada_extents, (void **)&re,
+				     dev->reada_next >> PAGE_CACHE_SHIFT, 1);
+	if (ret == 0 || re->logical >= dev->reada_curr_zone->end) {
+		ret = reada_pick_zone(dev);
+		if (!ret) {
+			spin_unlock(&fs_info->reada_lock);
+			return 0;
+		}
+		re = NULL;
+		ret = radix_tree_gang_lookup(&dev->reada_extents, (void **)&re,
+					dev->reada_next >> PAGE_CACHE_SHIFT, 1);
+	}
+	if (ret == 0) {
+		spin_unlock(&fs_info->reada_lock);
+		return 0;
+	}
+	dev->reada_next = re->logical + re->blocksize;
+	kref_get(&re->refcnt);
+
+	spin_unlock(&fs_info->reada_lock);
+
+	/*
+	 * find mirror num
+	 */
+	for (i = 0; i < re->nzones; ++i) {
+		if (re->zones[i]->device == dev) {
+			mirror_num = i + 1;
+			break;
+		}
+	}
+	logical = re->logical;
+	blocksize = re->blocksize;
+
+	spin_lock(&re->lock);
+	if (re->scheduled_for == NULL) {
+		re->scheduled_for = dev;
+		need_kick = 1;
+	}
+	spin_unlock(&re->lock);
+
+	reada_extent_put(fs_info, re);
+
+	if (!need_kick)
+		return 0;
+
+	atomic_inc(&dev->reada_in_flight);
+	ret = reada_tree_block_flagged(fs_info->extent_root, logical, blocksize,
+			 mirror_num, &eb);
+	if (ret)
+		__readahead_hook(fs_info->extent_root, NULL, logical, ret);
+	else if (eb)
+		__readahead_hook(fs_info->extent_root, eb, eb->start, ret);
+
+	if (eb)
+		free_extent_buffer(eb);
+
+	return 1;
+
+}
+
+static void reada_start_machine_worker(struct btrfs_work *work)
+{
+	struct reada_machine_work *rmw;
+	struct btrfs_fs_info *fs_info;
+
+	rmw = container_of(work, struct reada_machine_work, work);
+	fs_info = rmw->fs_info;
+
+	kfree(rmw);
+
+	__reada_start_machine(fs_info);
+}
+
+static void __reada_start_machine(struct btrfs_fs_info *fs_info)
+{
+	struct btrfs_device *device;
+	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
+	u64 enqueued;
+	u64 total = 0;
+	int i;
+
+	do {
+		enqueued = 0;
+		list_for_each_entry(device, &fs_devices->devices, dev_list) {
+			if (atomic_read(&device->reada_in_flight) <
+			    MAX_IN_FLIGHT)
+				enqueued += reada_start_machine_dev(fs_info,
+								    device);
+		}
+		total += enqueued;
+	} while (enqueued && total < 10000);
+
+	if (enqueued == 0)
+		return;
+
+	/*
+	 * If everything is already in the cache, this is effectively single
+	 * threaded. To a) not hold the caller for too long and b) to utilize
+	 * more cores, we broke the loop above after 10000 iterations and now
+	 * enqueue to workers to finish it. This will distribute the load to
+	 * the cores.
+	 */
+	for (i = 0; i < 2; ++i)
+		reada_start_machine(fs_info);
+}
+
+static void reada_start_machine(struct btrfs_fs_info *fs_info)
+{
+	struct reada_machine_work *rmw;
+
+	rmw = kzalloc(sizeof(*rmw), GFP_NOFS);
+	if (!rmw) {
+		/* FIXME we cannot handle this properly right now */
+		BUG();
+	}
+	rmw->work.func = reada_start_machine_worker;
+	rmw->fs_info = fs_info;
+
+	btrfs_queue_worker(&fs_info->readahead_workers, &rmw->work);
+}
+
+#ifdef DEBUG
+static void dump_devs(struct btrfs_fs_info *fs_info, int all)
+{
+	struct btrfs_device *device;
+	struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
+	unsigned long index;
+	int ret;
+	int i;
+	int j;
+	int cnt;
+
+	spin_lock(&fs_info->reada_lock);
+	list_for_each_entry(device, &fs_devices->devices, dev_list) {
+		printk(KERN_DEBUG "dev %lld has %d in flight\n", device->devid,
+			atomic_read(&device->reada_in_flight));
+		index = 0;
+		while (1) {
+			struct reada_zone *zone;
+			ret = radix_tree_gang_lookup(&device->reada_zones,
+						     (void **)&zone, index, 1);
+			if (ret == 0)
+				break;
+			printk(KERN_DEBUG "  zone %llu-%llu elems %llu locked "
+				"%d devs", zone->start, zone->end, zone->elems,
+				zone->locked);
+			for (j = 0; j < zone->ndevs; ++j) {
+				printk(KERN_CONT " %lld",
+					zone->devs[j]->devid);
+			}
+			if (device->reada_curr_zone == zone)
+				printk(KERN_CONT " curr off %llu",
+					device->reada_next - zone->start);
+			printk(KERN_CONT "\n");
+			index = (zone->end >> PAGE_CACHE_SHIFT) + 1;
+		}
+		cnt = 0;
+		index = 0;
+		while (all) {
+			struct reada_extent *re = NULL;
+
+			ret = radix_tree_gang_lookup(&device->reada_extents,
+						     (void **)&re, index, 1);
+			if (ret == 0)
+				break;
+			printk(KERN_DEBUG
+				"  re: logical %llu size %u empty %d for %lld",
+				re->logical, re->blocksize,
+				list_empty(&re->extctl), re->scheduled_for ?
+				re->scheduled_for->devid : -1);
+
+			for (i = 0; i < re->nzones; ++i) {
+				printk(KERN_CONT " zone %llu-%llu devs",
+					re->zones[i]->start,
+					re->zones[i]->end);
+				for (j = 0; j < re->zones[i]->ndevs; ++j) {
+					printk(KERN_CONT " %lld",
+						re->zones[i]->devs[j]->devid);
+				}
+			}
+			printk(KERN_CONT "\n");
+			index = (re->logical >> PAGE_CACHE_SHIFT) + 1;
+			if (++cnt > 15)
+				break;
+		}
+	}
+
+	index = 0;
+	cnt = 0;
+	while (all) {
+		struct reada_extent *re = NULL;
+
+		ret = radix_tree_gang_lookup(&fs_info->reada_tree, (void **)&re,
+					     index, 1);
+		if (ret == 0)
+			break;
+		if (!re->scheduled_for) {
+			index = (re->logical >> PAGE_CACHE_SHIFT) + 1;
+			continue;
+		}
+		printk(KERN_DEBUG
+			"re: logical %llu size %u list empty %d for %lld",
+			re->logical, re->blocksize, list_empty(&re->extctl),
+			re->scheduled_for ? re->scheduled_for->devid : -1);
+		for (i = 0; i < re->nzones; ++i) {
+			printk(KERN_CONT " zone %llu-%llu devs",
+				re->zones[i]->start,
+				re->zones[i]->end);
+			for (i = 0; i < re->nzones; ++i) {
+				printk(KERN_CONT " zone %llu-%llu devs",
+					re->zones[i]->start,
+					re->zones[i]->end);
+				for (j = 0; j < re->zones[i]->ndevs; ++j) {
+					printk(KERN_CONT " %lld",
+						re->zones[i]->devs[j]->devid);
+				}
+			}
+		}
+		printk(KERN_CONT "\n");
+		index = (re->logical >> PAGE_CACHE_SHIFT) + 1;
+	}
+	spin_unlock(&fs_info->reada_lock);
+}
+#endif
+
+/*
+ * interface
+ */
+struct reada_control *btrfs_reada_add(struct btrfs_root *root,
+			struct btrfs_key *key_start, struct btrfs_key *key_end)
+{
+	struct reada_control *rc;
+	u64 start;
+	u64 generation;
+	int level;
+	struct extent_buffer *node;
+	static struct btrfs_key max_key = {
+		.objectid = (u64)-1,
+		.type = (u8)-1,
+		.offset = (u64)-1
+	};
+
+	rc = kzalloc(sizeof(*rc), GFP_NOFS);
+	if (!rc)
+		return ERR_PTR(-ENOMEM);
+
+	rc->root = root;
+	rc->key_start = *key_start;
+	rc->key_end = *key_end;
+	atomic_set(&rc->elems, 0);
+	init_waitqueue_head(&rc->wait);
+	kref_init(&rc->refcnt);
+	kref_get(&rc->refcnt); /* one ref for having elements */
+
+	node = btrfs_root_node(root);
+	start = node->start;
+	level = btrfs_header_level(node);
+	generation = btrfs_header_generation(node);
+	free_extent_buffer(node);
+
+	reada_add_block(rc, start, &max_key, level, generation);
+
+	reada_start_machine(root->fs_info);
+
+	return rc;
+}
+
+#ifdef DEBUG
+int btrfs_reada_wait(void *handle)
+{
+	struct reada_control *rc = handle;
+
+	while (atomic_read(&rc->elems)) {
+		wait_event_timeout(rc->wait, atomic_read(&rc->elems) == 0,
+				   5 * HZ);
+		dump_devs(rc->root->fs_info, rc->elems < 10 ? 1 : 0);
+	}
+
+	dump_devs(rc->root->fs_info, rc->elems < 10 ? 1 : 0);
+
+	kref_put(&rc->refcnt, reada_control_release);
+
+	return 0;
+}
+#else
+int btrfs_reada_wait(void *handle)
+{
+	struct reada_control *rc = handle;
+
+	while (atomic_read(&rc->elems)) {
+		wait_event(rc->wait, atomic_read(&rc->elems) == 0);
+	}
+
+	kref_put(&rc->refcnt, reada_control_release);
+
+	return 0;
+}
+#endif
+
+void btrfs_reada_detach(void *handle)
+{
+	struct reada_control *rc = handle;
+
+	kref_put(&rc->refcnt, reada_control_release);
+}
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index 59bb176..24d654c 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -2041,8 +2041,7 @@
 		BUG_ON(IS_ERR(trans));
 		trans->block_rsv = rc->block_rsv;
 
-		ret = btrfs_block_rsv_check(trans, root, rc->block_rsv,
-					    min_reserved, 0);
+		ret = btrfs_block_rsv_refill(root, rc->block_rsv, min_reserved);
 		if (ret) {
 			BUG_ON(ret != -EAGAIN);
 			ret = btrfs_commit_transaction(trans, root);
@@ -2152,8 +2151,7 @@
 again:
 	if (!err) {
 		num_bytes = rc->merging_rsv_size;
-		ret = btrfs_block_rsv_add(NULL, root, rc->block_rsv,
-					  num_bytes);
+		ret = btrfs_block_rsv_add(root, rc->block_rsv, num_bytes);
 		if (ret)
 			err = ret;
 	}
@@ -2427,7 +2425,7 @@
 	num_bytes = calcu_metadata_size(rc, node, 1) * 2;
 
 	trans->block_rsv = rc->block_rsv;
-	ret = btrfs_block_rsv_add(trans, root, rc->block_rsv, num_bytes);
+	ret = btrfs_block_rsv_add(root, rc->block_rsv, num_bytes);
 	if (ret) {
 		if (ret == -EAGAIN)
 			rc->commit_transaction = 1;
@@ -2922,6 +2920,7 @@
 	unsigned long last_index;
 	struct page *page;
 	struct file_ra_state *ra;
+	gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
 	int nr = 0;
 	int ret = 0;
 
@@ -2956,7 +2955,7 @@
 						  ra, NULL, index,
 						  last_index + 1 - index);
 			page = find_or_create_page(inode->i_mapping, index,
-						   GFP_NOFS);
+						   mask);
 			if (!page) {
 				btrfs_delalloc_release_metadata(inode,
 							PAGE_CACHE_SIZE);
@@ -3323,8 +3322,11 @@
 	}
 
 	key.objectid = ref_objectid;
-	key.offset = ref_offset;
 	key.type = BTRFS_EXTENT_DATA_KEY;
+	if (ref_offset > ((u64)-1 << 32))
+		key.offset = 0;
+	else
+		key.offset = ref_offset;
 
 	path->search_commit_root = 1;
 	path->skip_locking = 1;
@@ -3645,14 +3647,11 @@
 	 * btrfs_init_reloc_root will use them when there
 	 * is no reservation in transaction handle.
 	 */
-	ret = btrfs_block_rsv_add(NULL, rc->extent_root, rc->block_rsv,
+	ret = btrfs_block_rsv_add(rc->extent_root, rc->block_rsv,
 				  rc->extent_root->nodesize * 256);
 	if (ret)
 		return ret;
 
-	rc->block_rsv->refill_used = 1;
-	btrfs_add_durable_block_rsv(rc->extent_root->fs_info, rc->block_rsv);
-
 	memset(&rc->cluster, 0, sizeof(rc->cluster));
 	rc->search_start = rc->block_group->key.objectid;
 	rc->extents_found = 0;
@@ -3777,8 +3776,7 @@
 			}
 		}
 
-		ret = btrfs_block_rsv_check(trans, rc->extent_root,
-					    rc->block_rsv, 0, 5);
+		ret = btrfs_block_rsv_check(rc->extent_root, rc->block_rsv, 5);
 		if (ret < 0) {
 			if (ret != -EAGAIN) {
 				err = ret;
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index a8d03d5..ed11d38 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -17,10 +17,14 @@
  */
 
 #include <linux/blkdev.h>
+#include <linux/ratelimit.h>
 #include "ctree.h"
 #include "volumes.h"
 #include "disk-io.h"
 #include "ordered-data.h"
+#include "transaction.h"
+#include "backref.h"
+#include "extent_io.h"
 
 /*
  * This is only the first step towards a full-features scrub. It reads all
@@ -29,15 +33,12 @@
  * any can be found.
  *
  * Future enhancements:
- *  - To enhance the performance, better read-ahead strategies for the
- *    extent-tree can be employed.
  *  - In case an unrepairable extent is encountered, track which files are
  *    affected and report them
  *  - In case of a read error on files with nodatasum, map the file and read
  *    the extent to trigger a writeback of the good copy
  *  - track and record media errors, throw out bad devices
  *  - add a mode to also read unallocated space
- *  - make the prefetch cancellable
  */
 
 struct scrub_bio;
@@ -63,7 +64,7 @@
 struct scrub_page {
 	u64			flags;  /* extent flags */
 	u64			generation;
-	u64			mirror_num;
+	int			mirror_num;
 	int			have_csum;
 	u8			csum[BTRFS_CSUM_SIZE];
 };
@@ -87,6 +88,7 @@
 	int			first_free;
 	int			curr;
 	atomic_t		in_flight;
+	atomic_t		fixup_cnt;
 	spinlock_t		list_lock;
 	wait_queue_head_t	list_wait;
 	u16			csum_size;
@@ -100,6 +102,27 @@
 	spinlock_t		stat_lock;
 };
 
+struct scrub_fixup_nodatasum {
+	struct scrub_dev	*sdev;
+	u64			logical;
+	struct btrfs_root	*root;
+	struct btrfs_work	work;
+	int			mirror_num;
+};
+
+struct scrub_warning {
+	struct btrfs_path	*path;
+	u64			extent_item_size;
+	char			*scratch_buf;
+	char			*msg_buf;
+	const char		*errstr;
+	sector_t		sector;
+	u64			logical;
+	struct btrfs_device	*dev;
+	int			msg_bufsize;
+	int			scratch_bufsize;
+};
+
 static void scrub_free_csums(struct scrub_dev *sdev)
 {
 	while (!list_empty(&sdev->csum_list)) {
@@ -175,14 +198,15 @@
 
 		if (i != SCRUB_BIOS_PER_DEV-1)
 			sdev->bios[i]->next_free = i + 1;
-		 else
+		else
 			sdev->bios[i]->next_free = -1;
 	}
 	sdev->first_free = 0;
 	sdev->curr = -1;
 	atomic_set(&sdev->in_flight, 0);
+	atomic_set(&sdev->fixup_cnt, 0);
 	atomic_set(&sdev->cancel_req, 0);
-	sdev->csum_size = btrfs_super_csum_size(&fs_info->super_copy);
+	sdev->csum_size = btrfs_super_csum_size(fs_info->super_copy);
 	INIT_LIST_HEAD(&sdev->csum_list);
 
 	spin_lock_init(&sdev->list_lock);
@@ -195,24 +219,361 @@
 	return ERR_PTR(-ENOMEM);
 }
 
+static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root, void *ctx)
+{
+	u64 isize;
+	u32 nlink;
+	int ret;
+	int i;
+	struct extent_buffer *eb;
+	struct btrfs_inode_item *inode_item;
+	struct scrub_warning *swarn = ctx;
+	struct btrfs_fs_info *fs_info = swarn->dev->dev_root->fs_info;
+	struct inode_fs_paths *ipath = NULL;
+	struct btrfs_root *local_root;
+	struct btrfs_key root_key;
+
+	root_key.objectid = root;
+	root_key.type = BTRFS_ROOT_ITEM_KEY;
+	root_key.offset = (u64)-1;
+	local_root = btrfs_read_fs_root_no_name(fs_info, &root_key);
+	if (IS_ERR(local_root)) {
+		ret = PTR_ERR(local_root);
+		goto err;
+	}
+
+	ret = inode_item_info(inum, 0, local_root, swarn->path);
+	if (ret) {
+		btrfs_release_path(swarn->path);
+		goto err;
+	}
+
+	eb = swarn->path->nodes[0];
+	inode_item = btrfs_item_ptr(eb, swarn->path->slots[0],
+					struct btrfs_inode_item);
+	isize = btrfs_inode_size(eb, inode_item);
+	nlink = btrfs_inode_nlink(eb, inode_item);
+	btrfs_release_path(swarn->path);
+
+	ipath = init_ipath(4096, local_root, swarn->path);
+	ret = paths_from_inode(inum, ipath);
+
+	if (ret < 0)
+		goto err;
+
+	/*
+	 * we deliberately ignore the bit ipath might have been too small to
+	 * hold all of the paths here
+	 */
+	for (i = 0; i < ipath->fspath->elem_cnt; ++i)
+		printk(KERN_WARNING "btrfs: %s at logical %llu on dev "
+			"%s, sector %llu, root %llu, inode %llu, offset %llu, "
+			"length %llu, links %u (path: %s)\n", swarn->errstr,
+			swarn->logical, swarn->dev->name,
+			(unsigned long long)swarn->sector, root, inum, offset,
+			min(isize - offset, (u64)PAGE_SIZE), nlink,
+			(char *)ipath->fspath->val[i]);
+
+	free_ipath(ipath);
+	return 0;
+
+err:
+	printk(KERN_WARNING "btrfs: %s at logical %llu on dev "
+		"%s, sector %llu, root %llu, inode %llu, offset %llu: path "
+		"resolving failed with ret=%d\n", swarn->errstr,
+		swarn->logical, swarn->dev->name,
+		(unsigned long long)swarn->sector, root, inum, offset, ret);
+
+	free_ipath(ipath);
+	return 0;
+}
+
+static void scrub_print_warning(const char *errstr, struct scrub_bio *sbio,
+				int ix)
+{
+	struct btrfs_device *dev = sbio->sdev->dev;
+	struct btrfs_fs_info *fs_info = dev->dev_root->fs_info;
+	struct btrfs_path *path;
+	struct btrfs_key found_key;
+	struct extent_buffer *eb;
+	struct btrfs_extent_item *ei;
+	struct scrub_warning swarn;
+	u32 item_size;
+	int ret;
+	u64 ref_root;
+	u8 ref_level;
+	unsigned long ptr = 0;
+	const int bufsize = 4096;
+	u64 extent_offset;
+
+	path = btrfs_alloc_path();
+
+	swarn.scratch_buf = kmalloc(bufsize, GFP_NOFS);
+	swarn.msg_buf = kmalloc(bufsize, GFP_NOFS);
+	swarn.sector = (sbio->physical + ix * PAGE_SIZE) >> 9;
+	swarn.logical = sbio->logical + ix * PAGE_SIZE;
+	swarn.errstr = errstr;
+	swarn.dev = dev;
+	swarn.msg_bufsize = bufsize;
+	swarn.scratch_bufsize = bufsize;
+
+	if (!path || !swarn.scratch_buf || !swarn.msg_buf)
+		goto out;
+
+	ret = extent_from_logical(fs_info, swarn.logical, path, &found_key);
+	if (ret < 0)
+		goto out;
+
+	extent_offset = swarn.logical - found_key.objectid;
+	swarn.extent_item_size = found_key.offset;
+
+	eb = path->nodes[0];
+	ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
+	item_size = btrfs_item_size_nr(eb, path->slots[0]);
+
+	if (ret & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
+		do {
+			ret = tree_backref_for_extent(&ptr, eb, ei, item_size,
+							&ref_root, &ref_level);
+			printk(KERN_WARNING "%s at logical %llu on dev %s, "
+				"sector %llu: metadata %s (level %d) in tree "
+				"%llu\n", errstr, swarn.logical, dev->name,
+				(unsigned long long)swarn.sector,
+				ref_level ? "node" : "leaf",
+				ret < 0 ? -1 : ref_level,
+				ret < 0 ? -1 : ref_root);
+		} while (ret != 1);
+	} else {
+		swarn.path = path;
+		iterate_extent_inodes(fs_info, path, found_key.objectid,
+					extent_offset,
+					scrub_print_warning_inode, &swarn);
+	}
+
+out:
+	btrfs_free_path(path);
+	kfree(swarn.scratch_buf);
+	kfree(swarn.msg_buf);
+}
+
+static int scrub_fixup_readpage(u64 inum, u64 offset, u64 root, void *ctx)
+{
+	struct page *page = NULL;
+	unsigned long index;
+	struct scrub_fixup_nodatasum *fixup = ctx;
+	int ret;
+	int corrected = 0;
+	struct btrfs_key key;
+	struct inode *inode = NULL;
+	u64 end = offset + PAGE_SIZE - 1;
+	struct btrfs_root *local_root;
+
+	key.objectid = root;
+	key.type = BTRFS_ROOT_ITEM_KEY;
+	key.offset = (u64)-1;
+	local_root = btrfs_read_fs_root_no_name(fixup->root->fs_info, &key);
+	if (IS_ERR(local_root))
+		return PTR_ERR(local_root);
+
+	key.type = BTRFS_INODE_ITEM_KEY;
+	key.objectid = inum;
+	key.offset = 0;
+	inode = btrfs_iget(fixup->root->fs_info->sb, &key, local_root, NULL);
+	if (IS_ERR(inode))
+		return PTR_ERR(inode);
+
+	index = offset >> PAGE_CACHE_SHIFT;
+
+	page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
+	if (!page) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	if (PageUptodate(page)) {
+		struct btrfs_mapping_tree *map_tree;
+		if (PageDirty(page)) {
+			/*
+			 * we need to write the data to the defect sector. the
+			 * data that was in that sector is not in memory,
+			 * because the page was modified. we must not write the
+			 * modified page to that sector.
+			 *
+			 * TODO: what could be done here: wait for the delalloc
+			 *       runner to write out that page (might involve
+			 *       COW) and see whether the sector is still
+			 *       referenced afterwards.
+			 *
+			 * For the meantime, we'll treat this error
+			 * incorrectable, although there is a chance that a
+			 * later scrub will find the bad sector again and that
+			 * there's no dirty page in memory, then.
+			 */
+			ret = -EIO;
+			goto out;
+		}
+		map_tree = &BTRFS_I(inode)->root->fs_info->mapping_tree;
+		ret = repair_io_failure(map_tree, offset, PAGE_SIZE,
+					fixup->logical, page,
+					fixup->mirror_num);
+		unlock_page(page);
+		corrected = !ret;
+	} else {
+		/*
+		 * we need to get good data first. the general readpage path
+		 * will call repair_io_failure for us, we just have to make
+		 * sure we read the bad mirror.
+		 */
+		ret = set_extent_bits(&BTRFS_I(inode)->io_tree, offset, end,
+					EXTENT_DAMAGED, GFP_NOFS);
+		if (ret) {
+			/* set_extent_bits should give proper error */
+			WARN_ON(ret > 0);
+			if (ret > 0)
+				ret = -EFAULT;
+			goto out;
+		}
+
+		ret = extent_read_full_page(&BTRFS_I(inode)->io_tree, page,
+						btrfs_get_extent,
+						fixup->mirror_num);
+		wait_on_page_locked(page);
+
+		corrected = !test_range_bit(&BTRFS_I(inode)->io_tree, offset,
+						end, EXTENT_DAMAGED, 0, NULL);
+		if (!corrected)
+			clear_extent_bits(&BTRFS_I(inode)->io_tree, offset, end,
+						EXTENT_DAMAGED, GFP_NOFS);
+	}
+
+out:
+	if (page)
+		put_page(page);
+	if (inode)
+		iput(inode);
+
+	if (ret < 0)
+		return ret;
+
+	if (ret == 0 && corrected) {
+		/*
+		 * we only need to call readpage for one of the inodes belonging
+		 * to this extent. so make iterate_extent_inodes stop
+		 */
+		return 1;
+	}
+
+	return -EIO;
+}
+
+static void scrub_fixup_nodatasum(struct btrfs_work *work)
+{
+	int ret;
+	struct scrub_fixup_nodatasum *fixup;
+	struct scrub_dev *sdev;
+	struct btrfs_trans_handle *trans = NULL;
+	struct btrfs_fs_info *fs_info;
+	struct btrfs_path *path;
+	int uncorrectable = 0;
+
+	fixup = container_of(work, struct scrub_fixup_nodatasum, work);
+	sdev = fixup->sdev;
+	fs_info = fixup->root->fs_info;
+
+	path = btrfs_alloc_path();
+	if (!path) {
+		spin_lock(&sdev->stat_lock);
+		++sdev->stat.malloc_errors;
+		spin_unlock(&sdev->stat_lock);
+		uncorrectable = 1;
+		goto out;
+	}
+
+	trans = btrfs_join_transaction(fixup->root);
+	if (IS_ERR(trans)) {
+		uncorrectable = 1;
+		goto out;
+	}
+
+	/*
+	 * the idea is to trigger a regular read through the standard path. we
+	 * read a page from the (failed) logical address by specifying the
+	 * corresponding copynum of the failed sector. thus, that readpage is
+	 * expected to fail.
+	 * that is the point where on-the-fly error correction will kick in
+	 * (once it's finished) and rewrite the failed sector if a good copy
+	 * can be found.
+	 */
+	ret = iterate_inodes_from_logical(fixup->logical, fixup->root->fs_info,
+						path, scrub_fixup_readpage,
+						fixup);
+	if (ret < 0) {
+		uncorrectable = 1;
+		goto out;
+	}
+	WARN_ON(ret != 1);
+
+	spin_lock(&sdev->stat_lock);
+	++sdev->stat.corrected_errors;
+	spin_unlock(&sdev->stat_lock);
+
+out:
+	if (trans && !IS_ERR(trans))
+		btrfs_end_transaction(trans, fixup->root);
+	if (uncorrectable) {
+		spin_lock(&sdev->stat_lock);
+		++sdev->stat.uncorrectable_errors;
+		spin_unlock(&sdev->stat_lock);
+		printk_ratelimited(KERN_ERR "btrfs: unable to fixup "
+					"(nodatasum) error at logical %llu\n",
+					fixup->logical);
+	}
+
+	btrfs_free_path(path);
+	kfree(fixup);
+
+	/* see caller why we're pretending to be paused in the scrub counters */
+	mutex_lock(&fs_info->scrub_lock);
+	atomic_dec(&fs_info->scrubs_running);
+	atomic_dec(&fs_info->scrubs_paused);
+	mutex_unlock(&fs_info->scrub_lock);
+	atomic_dec(&sdev->fixup_cnt);
+	wake_up(&fs_info->scrub_pause_wait);
+	wake_up(&sdev->list_wait);
+}
+
 /*
  * scrub_recheck_error gets called when either verification of the page
  * failed or the bio failed to read, e.g. with EIO. In the latter case,
  * recheck_error gets called for every page in the bio, even though only
  * one may be bad
  */
-static void scrub_recheck_error(struct scrub_bio *sbio, int ix)
+static int scrub_recheck_error(struct scrub_bio *sbio, int ix)
 {
+	struct scrub_dev *sdev = sbio->sdev;
+	u64 sector = (sbio->physical + ix * PAGE_SIZE) >> 9;
+	static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL,
+					DEFAULT_RATELIMIT_BURST);
+
 	if (sbio->err) {
-		if (scrub_fixup_io(READ, sbio->sdev->dev->bdev,
-				   (sbio->physical + ix * PAGE_SIZE) >> 9,
+		if (scrub_fixup_io(READ, sbio->sdev->dev->bdev, sector,
 				   sbio->bio->bi_io_vec[ix].bv_page) == 0) {
 			if (scrub_fixup_check(sbio, ix) == 0)
-				return;
+				return 0;
 		}
+		if (__ratelimit(&_rs))
+			scrub_print_warning("i/o error", sbio, ix);
+	} else {
+		if (__ratelimit(&_rs))
+			scrub_print_warning("checksum error", sbio, ix);
 	}
 
+	spin_lock(&sdev->stat_lock);
+	++sdev->stat.read_errors;
+	spin_unlock(&sdev->stat_lock);
+
 	scrub_fixup(sbio, ix);
+	return 1;
 }
 
 static int scrub_fixup_check(struct scrub_bio *sbio, int ix)
@@ -250,7 +611,8 @@
 	struct scrub_dev *sdev = sbio->sdev;
 	struct btrfs_fs_info *fs_info = sdev->dev->dev_root->fs_info;
 	struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
-	struct btrfs_multi_bio *multi = NULL;
+	struct btrfs_bio *bbio = NULL;
+	struct scrub_fixup_nodatasum *fixup;
 	u64 logical = sbio->logical + ix * PAGE_SIZE;
 	u64 length;
 	int i;
@@ -259,38 +621,57 @@
 
 	if ((sbio->spag[ix].flags & BTRFS_EXTENT_FLAG_DATA) &&
 	    (sbio->spag[ix].have_csum == 0)) {
+		fixup = kzalloc(sizeof(*fixup), GFP_NOFS);
+		if (!fixup)
+			goto uncorrectable;
+		fixup->sdev = sdev;
+		fixup->logical = logical;
+		fixup->root = fs_info->extent_root;
+		fixup->mirror_num = sbio->spag[ix].mirror_num;
 		/*
-		 * nodatasum, don't try to fix anything
-		 * FIXME: we can do better, open the inode and trigger a
-		 * writeback
+		 * increment scrubs_running to prevent cancel requests from
+		 * completing as long as a fixup worker is running. we must also
+		 * increment scrubs_paused to prevent deadlocking on pause
+		 * requests used for transactions commits (as the worker uses a
+		 * transaction context). it is safe to regard the fixup worker
+		 * as paused for all matters practical. effectively, we only
+		 * avoid cancellation requests from completing.
 		 */
-		goto uncorrectable;
+		mutex_lock(&fs_info->scrub_lock);
+		atomic_inc(&fs_info->scrubs_running);
+		atomic_inc(&fs_info->scrubs_paused);
+		mutex_unlock(&fs_info->scrub_lock);
+		atomic_inc(&sdev->fixup_cnt);
+		fixup->work.func = scrub_fixup_nodatasum;
+		btrfs_queue_worker(&fs_info->scrub_workers, &fixup->work);
+		return;
 	}
 
 	length = PAGE_SIZE;
 	ret = btrfs_map_block(map_tree, REQ_WRITE, logical, &length,
-			      &multi, 0);
-	if (ret || !multi || length < PAGE_SIZE) {
+			      &bbio, 0);
+	if (ret || !bbio || length < PAGE_SIZE) {
 		printk(KERN_ERR
 		       "scrub_fixup: btrfs_map_block failed us for %llu\n",
 		       (unsigned long long)logical);
 		WARN_ON(1);
+		kfree(bbio);
 		return;
 	}
 
-	if (multi->num_stripes == 1)
+	if (bbio->num_stripes == 1)
 		/* there aren't any replicas */
 		goto uncorrectable;
 
 	/*
 	 * first find a good copy
 	 */
-	for (i = 0; i < multi->num_stripes; ++i) {
-		if (i == sbio->spag[ix].mirror_num)
+	for (i = 0; i < bbio->num_stripes; ++i) {
+		if (i + 1 == sbio->spag[ix].mirror_num)
 			continue;
 
-		if (scrub_fixup_io(READ, multi->stripes[i].dev->bdev,
-				   multi->stripes[i].physical >> 9,
+		if (scrub_fixup_io(READ, bbio->stripes[i].dev->bdev,
+				   bbio->stripes[i].physical >> 9,
 				   sbio->bio->bi_io_vec[ix].bv_page)) {
 			/* I/O-error, this is not a good copy */
 			continue;
@@ -299,7 +680,7 @@
 		if (scrub_fixup_check(sbio, ix) == 0)
 			break;
 	}
-	if (i == multi->num_stripes)
+	if (i == bbio->num_stripes)
 		goto uncorrectable;
 
 	if (!sdev->readonly) {
@@ -314,25 +695,23 @@
 		}
 	}
 
-	kfree(multi);
+	kfree(bbio);
 	spin_lock(&sdev->stat_lock);
 	++sdev->stat.corrected_errors;
 	spin_unlock(&sdev->stat_lock);
 
-	if (printk_ratelimit())
-		printk(KERN_ERR "btrfs: fixed up at %llu\n",
-		       (unsigned long long)logical);
+	printk_ratelimited(KERN_ERR "btrfs: fixed up error at logical %llu\n",
+			       (unsigned long long)logical);
 	return;
 
 uncorrectable:
-	kfree(multi);
+	kfree(bbio);
 	spin_lock(&sdev->stat_lock);
 	++sdev->stat.uncorrectable_errors;
 	spin_unlock(&sdev->stat_lock);
 
-	if (printk_ratelimit())
-		printk(KERN_ERR "btrfs: unable to fixup at %llu\n",
-			 (unsigned long long)logical);
+	printk_ratelimited(KERN_ERR "btrfs: unable to fixup (regular) error at "
+				"logical %llu\n", (unsigned long long)logical);
 }
 
 static int scrub_fixup_io(int rw, struct block_device *bdev, sector_t sector,
@@ -382,8 +761,14 @@
 	int ret;
 
 	if (sbio->err) {
+		ret = 0;
 		for (i = 0; i < sbio->count; ++i)
-			scrub_recheck_error(sbio, i);
+			ret |= scrub_recheck_error(sbio, i);
+		if (!ret) {
+			spin_lock(&sdev->stat_lock);
+			++sdev->stat.unverified_errors;
+			spin_unlock(&sdev->stat_lock);
+		}
 
 		sbio->bio->bi_flags &= ~(BIO_POOL_MASK - 1);
 		sbio->bio->bi_flags |= 1 << BIO_UPTODATE;
@@ -396,10 +781,6 @@
 			bi->bv_offset = 0;
 			bi->bv_len = PAGE_SIZE;
 		}
-
-		spin_lock(&sdev->stat_lock);
-		++sdev->stat.read_errors;
-		spin_unlock(&sdev->stat_lock);
 		goto out;
 	}
 	for (i = 0; i < sbio->count; ++i) {
@@ -420,8 +801,14 @@
 			WARN_ON(1);
 		}
 		kunmap_atomic(buffer, KM_USER0);
-		if (ret)
-			scrub_recheck_error(sbio, i);
+		if (ret) {
+			ret = scrub_recheck_error(sbio, i);
+			if (!ret) {
+				spin_lock(&sdev->stat_lock);
+				++sdev->stat.unverified_errors;
+				spin_unlock(&sdev->stat_lock);
+			}
+		}
 	}
 
 out:
@@ -604,7 +991,7 @@
 }
 
 static int scrub_page(struct scrub_dev *sdev, u64 logical, u64 len,
-		      u64 physical, u64 flags, u64 gen, u64 mirror_num,
+		      u64 physical, u64 flags, u64 gen, int mirror_num,
 		      u8 *csum, int force)
 {
 	struct scrub_bio *sbio;
@@ -701,7 +1088,7 @@
 
 /* scrub extent tries to collect up to 64 kB for each bio */
 static int scrub_extent(struct scrub_dev *sdev, u64 logical, u64 len,
-			u64 physical, u64 flags, u64 gen, u64 mirror_num)
+			u64 physical, u64 flags, u64 gen, int mirror_num)
 {
 	int ret;
 	u8 csum[BTRFS_CSUM_SIZE];
@@ -741,13 +1128,16 @@
 	int slot;
 	int i;
 	u64 nstripes;
-	int start_stripe;
 	struct extent_buffer *l;
 	struct btrfs_key key;
 	u64 physical;
 	u64 logical;
 	u64 generation;
-	u64 mirror_num;
+	int mirror_num;
+	struct reada_control *reada1;
+	struct reada_control *reada2;
+	struct btrfs_key key_start;
+	struct btrfs_key key_end;
 
 	u64 increment = map->stripe_len;
 	u64 offset;
@@ -758,102 +1148,88 @@
 	if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
 		offset = map->stripe_len * num;
 		increment = map->stripe_len * map->num_stripes;
-		mirror_num = 0;
+		mirror_num = 1;
 	} else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
 		int factor = map->num_stripes / map->sub_stripes;
 		offset = map->stripe_len * (num / map->sub_stripes);
 		increment = map->stripe_len * factor;
-		mirror_num = num % map->sub_stripes;
+		mirror_num = num % map->sub_stripes + 1;
 	} else if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
 		increment = map->stripe_len;
-		mirror_num = num % map->num_stripes;
+		mirror_num = num % map->num_stripes + 1;
 	} else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
 		increment = map->stripe_len;
-		mirror_num = num % map->num_stripes;
+		mirror_num = num % map->num_stripes + 1;
 	} else {
 		increment = map->stripe_len;
-		mirror_num = 0;
+		mirror_num = 1;
 	}
 
 	path = btrfs_alloc_path();
 	if (!path)
 		return -ENOMEM;
 
-	path->reada = 2;
 	path->search_commit_root = 1;
 	path->skip_locking = 1;
 
 	/*
-	 * find all extents for each stripe and just read them to get
-	 * them into the page cache
-	 * FIXME: we can do better. build a more intelligent prefetching
+	 * trigger the readahead for extent tree csum tree and wait for
+	 * completion. During readahead, the scrub is officially paused
+	 * to not hold off transaction commits
 	 */
 	logical = base + offset;
-	physical = map->stripes[num].physical;
-	ret = 0;
-	for (i = 0; i < nstripes; ++i) {
-		key.objectid = logical;
-		key.type = BTRFS_EXTENT_ITEM_KEY;
-		key.offset = (u64)0;
 
-		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
-		if (ret < 0)
-			goto out_noplug;
+	wait_event(sdev->list_wait,
+		   atomic_read(&sdev->in_flight) == 0);
+	atomic_inc(&fs_info->scrubs_paused);
+	wake_up(&fs_info->scrub_pause_wait);
 
-		/*
-		 * we might miss half an extent here, but that doesn't matter,
-		 * as it's only the prefetch
-		 */
-		while (1) {
-			l = path->nodes[0];
-			slot = path->slots[0];
-			if (slot >= btrfs_header_nritems(l)) {
-				ret = btrfs_next_leaf(root, path);
-				if (ret == 0)
-					continue;
-				if (ret < 0)
-					goto out_noplug;
+	/* FIXME it might be better to start readahead at commit root */
+	key_start.objectid = logical;
+	key_start.type = BTRFS_EXTENT_ITEM_KEY;
+	key_start.offset = (u64)0;
+	key_end.objectid = base + offset + nstripes * increment;
+	key_end.type = BTRFS_EXTENT_ITEM_KEY;
+	key_end.offset = (u64)0;
+	reada1 = btrfs_reada_add(root, &key_start, &key_end);
 
-				break;
-			}
-			btrfs_item_key_to_cpu(l, &key, slot);
+	key_start.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
+	key_start.type = BTRFS_EXTENT_CSUM_KEY;
+	key_start.offset = logical;
+	key_end.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
+	key_end.type = BTRFS_EXTENT_CSUM_KEY;
+	key_end.offset = base + offset + nstripes * increment;
+	reada2 = btrfs_reada_add(csum_root, &key_start, &key_end);
 
-			if (key.objectid >= logical + map->stripe_len)
-				break;
+	if (!IS_ERR(reada1))
+		btrfs_reada_wait(reada1);
+	if (!IS_ERR(reada2))
+		btrfs_reada_wait(reada2);
 
-			path->slots[0]++;
-		}
-		btrfs_release_path(path);
-		logical += increment;
-		physical += map->stripe_len;
-		cond_resched();
+	mutex_lock(&fs_info->scrub_lock);
+	while (atomic_read(&fs_info->scrub_pause_req)) {
+		mutex_unlock(&fs_info->scrub_lock);
+		wait_event(fs_info->scrub_pause_wait,
+		   atomic_read(&fs_info->scrub_pause_req) == 0);
+		mutex_lock(&fs_info->scrub_lock);
 	}
+	atomic_dec(&fs_info->scrubs_paused);
+	mutex_unlock(&fs_info->scrub_lock);
+	wake_up(&fs_info->scrub_pause_wait);
 
 	/*
 	 * collect all data csums for the stripe to avoid seeking during
 	 * the scrub. This might currently (crc32) end up to be about 1MB
 	 */
-	start_stripe = 0;
 	blk_start_plug(&plug);
-again:
-	logical = base + offset + start_stripe * increment;
-	for (i = start_stripe; i < nstripes; ++i) {
-		ret = btrfs_lookup_csums_range(csum_root, logical,
-					       logical + map->stripe_len - 1,
-					       &sdev->csum_list, 1);
-		if (ret)
-			goto out;
 
-		logical += increment;
-		cond_resched();
-	}
 	/*
 	 * now find all extents for each stripe and scrub them
 	 */
-	logical = base + offset + start_stripe * increment;
-	physical = map->stripes[num].physical + start_stripe * map->stripe_len;
+	logical = base + offset;
+	physical = map->stripes[num].physical;
 	ret = 0;
-	for (i = start_stripe; i < nstripes; ++i) {
+	for (i = 0; i < nstripes; ++i) {
 		/*
 		 * canceled?
 		 */
@@ -882,11 +1258,14 @@
 			atomic_dec(&fs_info->scrubs_paused);
 			mutex_unlock(&fs_info->scrub_lock);
 			wake_up(&fs_info->scrub_pause_wait);
-			scrub_free_csums(sdev);
-			start_stripe = i;
-			goto again;
 		}
 
+		ret = btrfs_lookup_csums_range(csum_root, logical,
+					       logical + map->stripe_len - 1,
+					       &sdev->csum_list, 1);
+		if (ret)
+			goto out;
+
 		key.objectid = logical;
 		key.type = BTRFS_EXTENT_ITEM_KEY;
 		key.offset = (u64)0;
@@ -982,7 +1361,6 @@
 
 out:
 	blk_finish_plug(&plug);
-out_noplug:
 	btrfs_free_path(path);
 	return ret < 0 ? ret : 0;
 }
@@ -1253,10 +1631,11 @@
 		ret = scrub_enumerate_chunks(sdev, start, end);
 
 	wait_event(sdev->list_wait, atomic_read(&sdev->in_flight) == 0);
-
 	atomic_dec(&fs_info->scrubs_running);
 	wake_up(&fs_info->scrub_pause_wait);
 
+	wait_event(sdev->list_wait, atomic_read(&sdev->fixup_cnt) == 0);
+
 	if (progress)
 		memcpy(progress, &sdev->stat, sizeof(*progress));
 
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 15634d4..57080df 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -40,6 +40,7 @@
 #include <linux/magic.h>
 #include <linux/slab.h>
 #include <linux/cleancache.h>
+#include <linux/mnt_namespace.h>
 #include "compat.h"
 #include "delayed-inode.h"
 #include "ctree.h"
@@ -58,6 +59,7 @@
 #include <trace/events/btrfs.h>
 
 static const struct super_operations btrfs_super_ops;
+static struct file_system_type btrfs_fs_type;
 
 static const char *btrfs_decode_error(struct btrfs_fs_info *fs_info, int errno,
 				      char nbuf[16])
@@ -162,7 +164,7 @@
 	Opt_notreelog, Opt_ratio, Opt_flushoncommit, Opt_discard,
 	Opt_space_cache, Opt_clear_cache, Opt_user_subvol_rm_allowed,
 	Opt_enospc_debug, Opt_subvolrootid, Opt_defrag,
-	Opt_inode_cache, Opt_err,
+	Opt_inode_cache, Opt_no_space_cache, Opt_recovery, Opt_err,
 };
 
 static match_table_t tokens = {
@@ -195,6 +197,8 @@
 	{Opt_subvolrootid, "subvolrootid=%d"},
 	{Opt_defrag, "autodefrag"},
 	{Opt_inode_cache, "inode_cache"},
+	{Opt_no_space_cache, "no_space_cache"},
+	{Opt_recovery, "recovery"},
 	{Opt_err, NULL},
 };
 
@@ -206,14 +210,19 @@
 {
 	struct btrfs_fs_info *info = root->fs_info;
 	substring_t args[MAX_OPT_ARGS];
-	char *p, *num, *orig;
+	char *p, *num, *orig = NULL;
+	u64 cache_gen;
 	int intarg;
 	int ret = 0;
 	char *compress_type;
 	bool compress_force = false;
 
+	cache_gen = btrfs_super_cache_generation(root->fs_info->super_copy);
+	if (cache_gen)
+		btrfs_set_opt(info->mount_opt, SPACE_CACHE);
+
 	if (!options)
-		return 0;
+		goto out;
 
 	/*
 	 * strsep changes the string, duplicate it because parse_options
@@ -360,9 +369,12 @@
 			btrfs_set_opt(info->mount_opt, DISCARD);
 			break;
 		case Opt_space_cache:
-			printk(KERN_INFO "btrfs: enabling disk space caching\n");
 			btrfs_set_opt(info->mount_opt, SPACE_CACHE);
 			break;
+		case Opt_no_space_cache:
+			printk(KERN_INFO "btrfs: disabling disk space caching\n");
+			btrfs_clear_opt(info->mount_opt, SPACE_CACHE);
+			break;
 		case Opt_inode_cache:
 			printk(KERN_INFO "btrfs: enabling inode map caching\n");
 			btrfs_set_opt(info->mount_opt, INODE_MAP_CACHE);
@@ -381,6 +393,10 @@
 			printk(KERN_INFO "btrfs: enabling auto defrag");
 			btrfs_set_opt(info->mount_opt, AUTO_DEFRAG);
 			break;
+		case Opt_recovery:
+			printk(KERN_INFO "btrfs: enabling auto recovery");
+			btrfs_set_opt(info->mount_opt, RECOVERY);
+			break;
 		case Opt_err:
 			printk(KERN_INFO "btrfs: unrecognized mount option "
 			       "'%s'\n", p);
@@ -391,6 +407,8 @@
 		}
 	}
 out:
+	if (!ret && btrfs_test_opt(root, SPACE_CACHE))
+		printk(KERN_INFO "btrfs: disk space caching is enabled\n");
 	kfree(orig);
 	return ret;
 }
@@ -406,12 +424,12 @@
 		u64 *subvol_rootid, struct btrfs_fs_devices **fs_devices)
 {
 	substring_t args[MAX_OPT_ARGS];
-	char *opts, *orig, *p;
+	char *device_name, *opts, *orig, *p;
 	int error = 0;
 	int intarg;
 
 	if (!options)
-		goto out;
+		return 0;
 
 	/*
 	 * strsep changes the string, duplicate it because parse_options
@@ -457,29 +475,24 @@
 			}
 			break;
 		case Opt_device:
-			error = btrfs_scan_one_device(match_strdup(&args[0]),
+			device_name = match_strdup(&args[0]);
+			if (!device_name) {
+				error = -ENOMEM;
+				goto out;
+			}
+			error = btrfs_scan_one_device(device_name,
 					flags, holder, fs_devices);
+			kfree(device_name);
 			if (error)
-				goto out_free_opts;
+				goto out;
 			break;
 		default:
 			break;
 		}
 	}
 
- out_free_opts:
+out:
 	kfree(orig);
- out:
-	/*
-	 * If no subvolume name is specified we use the default one.  Allocate
-	 * a copy of the string "." here so that code later in the
-	 * mount path doesn't care if it's the default volume or another one.
-	 */
-	if (!*subvol_name) {
-		*subvol_name = kstrdup(".", GFP_KERNEL);
-		if (!*subvol_name)
-			return -ENOMEM;
-	}
 	return error;
 }
 
@@ -492,7 +505,6 @@
 	struct btrfs_path *path;
 	struct btrfs_key location;
 	struct inode *inode;
-	struct dentry *dentry;
 	u64 dir_id;
 	int new = 0;
 
@@ -517,7 +529,7 @@
 	 * will mount by default if we haven't been given a specific subvolume
 	 * to mount.
 	 */
-	dir_id = btrfs_super_root_dir(&root->fs_info->super_copy);
+	dir_id = btrfs_super_root_dir(root->fs_info->super_copy);
 	di = btrfs_lookup_dir_item(NULL, root, path, dir_id, "default", 7, 0);
 	if (IS_ERR(di)) {
 		btrfs_free_path(path);
@@ -566,29 +578,7 @@
 		return dget(sb->s_root);
 	}
 
-	if (new) {
-		const struct qstr name = { .name = "/", .len = 1 };
-
-		/*
-		 * New inode, we need to make the dentry a sibling of s_root so
-		 * everything gets cleaned up properly on unmount.
-		 */
-		dentry = d_alloc(sb->s_root, &name);
-		if (!dentry) {
-			iput(inode);
-			return ERR_PTR(-ENOMEM);
-		}
-		d_splice_alias(inode, dentry);
-	} else {
-		/*
-		 * We found the inode in cache, just find a dentry for it and
-		 * put the reference to the inode we just got.
-		 */
-		dentry = d_find_alias(inode);
-		iput(inode);
-	}
-
-	return dentry;
+	return d_obtain_alias(inode);
 }
 
 static int btrfs_fill_super(struct super_block *sb,
@@ -719,6 +709,8 @@
 		seq_puts(seq, ",noacl");
 	if (btrfs_test_opt(root, SPACE_CACHE))
 		seq_puts(seq, ",space_cache");
+	else
+		seq_puts(seq, ",no_space_cache");
 	if (btrfs_test_opt(root, CLEAR_CACHE))
 		seq_puts(seq, ",clear_cache");
 	if (btrfs_test_opt(root, USER_SUBVOL_RM_ALLOWED))
@@ -753,6 +745,137 @@
 	return set_anon_super(s, data);
 }
 
+/*
+ * subvolumes are identified by ino 256
+ */
+static inline int is_subvolume_inode(struct inode *inode)
+{
+	if (inode && inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)
+		return 1;
+	return 0;
+}
+
+/*
+ * This will strip out the subvol=%s argument for an argument string and add
+ * subvolid=0 to make sure we get the actual tree root for path walking to the
+ * subvol we want.
+ */
+static char *setup_root_args(char *args)
+{
+	unsigned copied = 0;
+	unsigned len = strlen(args) + 2;
+	char *pos;
+	char *ret;
+
+	/*
+	 * We need the same args as before, but minus
+	 *
+	 * subvol=a
+	 *
+	 * and add
+	 *
+	 * subvolid=0
+	 *
+	 * which is a difference of 2 characters, so we allocate strlen(args) +
+	 * 2 characters.
+	 */
+	ret = kzalloc(len * sizeof(char), GFP_NOFS);
+	if (!ret)
+		return NULL;
+	pos = strstr(args, "subvol=");
+
+	/* This shouldn't happen, but just in case.. */
+	if (!pos) {
+		kfree(ret);
+		return NULL;
+	}
+
+	/*
+	 * The subvol=<> arg is not at the front of the string, copy everybody
+	 * up to that into ret.
+	 */
+	if (pos != args) {
+		*pos = '\0';
+		strcpy(ret, args);
+		copied += strlen(args);
+		pos++;
+	}
+
+	strncpy(ret + copied, "subvolid=0", len - copied);
+
+	/* Length of subvolid=0 */
+	copied += 10;
+
+	/*
+	 * If there is no , after the subvol= option then we know there's no
+	 * other options and we can just return.
+	 */
+	pos = strchr(pos, ',');
+	if (!pos)
+		return ret;
+
+	/* Copy the rest of the arguments into our buffer */
+	strncpy(ret + copied, pos, len - copied);
+	copied += strlen(pos);
+
+	return ret;
+}
+
+static struct dentry *mount_subvol(const char *subvol_name, int flags,
+				   const char *device_name, char *data)
+{
+	struct super_block *s;
+	struct dentry *root;
+	struct vfsmount *mnt;
+	struct mnt_namespace *ns_private;
+	char *newargs;
+	struct path path;
+	int error;
+
+	newargs = setup_root_args(data);
+	if (!newargs)
+		return ERR_PTR(-ENOMEM);
+	mnt = vfs_kern_mount(&btrfs_fs_type, flags, device_name,
+			     newargs);
+	kfree(newargs);
+	if (IS_ERR(mnt))
+		return ERR_CAST(mnt);
+
+	ns_private = create_mnt_ns(mnt);
+	if (IS_ERR(ns_private)) {
+		mntput(mnt);
+		return ERR_CAST(ns_private);
+	}
+
+	/*
+	 * This will trigger the automount of the subvol so we can just
+	 * drop the mnt we have here and return the dentry that we
+	 * found.
+	 */
+	error = vfs_path_lookup(mnt->mnt_root, mnt, subvol_name,
+				LOOKUP_FOLLOW, &path);
+	put_mnt_ns(ns_private);
+	if (error)
+		return ERR_PTR(error);
+
+	if (!is_subvolume_inode(path.dentry->d_inode)) {
+		path_put(&path);
+		mntput(mnt);
+		error = -EINVAL;
+		printk(KERN_ERR "btrfs: '%s' is not a valid subvolume\n",
+				subvol_name);
+		return ERR_PTR(-EINVAL);
+	}
+
+	/* Get a ref to the sb and the dentry we found and return it */
+	s = path.mnt->mnt_sb;
+	atomic_inc(&s->s_active);
+	root = dget(path.dentry);
+	path_put(&path);
+	down_write(&s->s_umount);
+
+	return root;
+}
 
 /*
  * Find a superblock for the given device / mount point.
@@ -784,13 +907,19 @@
 	if (error)
 		return ERR_PTR(error);
 
+	if (subvol_name) {
+		root = mount_subvol(subvol_name, flags, device_name, data);
+		kfree(subvol_name);
+		return root;
+	}
+
 	error = btrfs_scan_one_device(device_name, mode, fs_type, &fs_devices);
 	if (error)
-		goto error_free_subvol_name;
+		return ERR_PTR(error);
 
 	error = btrfs_open_devices(fs_devices, mode, fs_type);
 	if (error)
-		goto error_free_subvol_name;
+		return ERR_PTR(error);
 
 	if (!(flags & MS_RDONLY) && fs_devices->rw_devices == 0) {
 		error = -EACCES;
@@ -813,88 +942,57 @@
 	fs_info->fs_devices = fs_devices;
 	tree_root->fs_info = fs_info;
 
+	fs_info->super_copy = kzalloc(BTRFS_SUPER_INFO_SIZE, GFP_NOFS);
+	fs_info->super_for_commit = kzalloc(BTRFS_SUPER_INFO_SIZE, GFP_NOFS);
+	if (!fs_info->super_copy || !fs_info->super_for_commit) {
+		error = -ENOMEM;
+		goto error_close_devices;
+	}
+
 	bdev = fs_devices->latest_bdev;
 	s = sget(fs_type, btrfs_test_super, btrfs_set_super, tree_root);
-	if (IS_ERR(s))
-		goto error_s;
+	if (IS_ERR(s)) {
+		error = PTR_ERR(s);
+		goto error_close_devices;
+	}
 
 	if (s->s_root) {
 		if ((flags ^ s->s_flags) & MS_RDONLY) {
 			deactivate_locked_super(s);
-			error = -EBUSY;
-			goto error_close_devices;
+			return ERR_PTR(-EBUSY);
 		}
 
 		btrfs_close_devices(fs_devices);
-		kfree(fs_info);
+		free_fs_info(fs_info);
 		kfree(tree_root);
 	} else {
 		char b[BDEVNAME_SIZE];
 
 		s->s_flags = flags | MS_NOSEC;
 		strlcpy(s->s_id, bdevname(bdev, b), sizeof(s->s_id));
+		btrfs_sb(s)->fs_info->bdev_holder = fs_type;
 		error = btrfs_fill_super(s, fs_devices, data,
 					 flags & MS_SILENT ? 1 : 0);
 		if (error) {
 			deactivate_locked_super(s);
-			goto error_free_subvol_name;
+			return ERR_PTR(error);
 		}
 
-		btrfs_sb(s)->fs_info->bdev_holder = fs_type;
 		s->s_flags |= MS_ACTIVE;
 	}
 
-	/* if they gave us a subvolume name bind mount into that */
-	if (strcmp(subvol_name, ".")) {
-		struct dentry *new_root;
-
-		root = get_default_root(s, subvol_rootid);
-		if (IS_ERR(root)) {
-			error = PTR_ERR(root);
-			deactivate_locked_super(s);
-			goto error_free_subvol_name;
-		}
-
-		mutex_lock(&root->d_inode->i_mutex);
-		new_root = lookup_one_len(subvol_name, root,
-				      strlen(subvol_name));
-		mutex_unlock(&root->d_inode->i_mutex);
-
-		if (IS_ERR(new_root)) {
-			dput(root);
-			deactivate_locked_super(s);
-			error = PTR_ERR(new_root);
-			goto error_free_subvol_name;
-		}
-		if (!new_root->d_inode) {
-			dput(root);
-			dput(new_root);
-			deactivate_locked_super(s);
-			error = -ENXIO;
-			goto error_free_subvol_name;
-		}
-		dput(root);
-		root = new_root;
-	} else {
-		root = get_default_root(s, subvol_objectid);
-		if (IS_ERR(root)) {
-			error = PTR_ERR(root);
-			deactivate_locked_super(s);
-			goto error_free_subvol_name;
-		}
+	root = get_default_root(s, subvol_objectid);
+	if (IS_ERR(root)) {
+		deactivate_locked_super(s);
+		return root;
 	}
 
-	kfree(subvol_name);
 	return root;
 
-error_s:
-	error = PTR_ERR(s);
 error_close_devices:
 	btrfs_close_devices(fs_devices);
-	kfree(fs_info);
+	free_fs_info(fs_info);
 	kfree(tree_root);
-error_free_subvol_name:
-	kfree(subvol_name);
 	return ERR_PTR(error);
 }
 
@@ -919,7 +1017,7 @@
 		if (root->fs_info->fs_devices->rw_devices == 0)
 			return -EACCES;
 
-		if (btrfs_super_log_root(&root->fs_info->super_copy) != 0)
+		if (btrfs_super_log_root(root->fs_info->super_copy) != 0)
 			return -EINVAL;
 
 		ret = btrfs_cleanup_fs_roots(root->fs_info);
@@ -1085,7 +1183,7 @@
 static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf)
 {
 	struct btrfs_root *root = btrfs_sb(dentry->d_sb);
-	struct btrfs_super_block *disk_super = &root->fs_info->super_copy;
+	struct btrfs_super_block *disk_super = root->fs_info->super_copy;
 	struct list_head *head = &root->fs_info->space_info;
 	struct btrfs_space_info *found;
 	u64 total_used = 0;
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index e24b796..960835e 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -55,6 +55,7 @@
 	struct btrfs_transaction *cur_trans;
 
 	spin_lock(&root->fs_info->trans_lock);
+loop:
 	if (root->fs_info->trans_no_join) {
 		if (!nofail) {
 			spin_unlock(&root->fs_info->trans_lock);
@@ -75,16 +76,18 @@
 	cur_trans = kmem_cache_alloc(btrfs_transaction_cachep, GFP_NOFS);
 	if (!cur_trans)
 		return -ENOMEM;
+
 	spin_lock(&root->fs_info->trans_lock);
 	if (root->fs_info->running_transaction) {
+		/*
+		 * someone started a transaction after we unlocked.  Make sure
+		 * to redo the trans_no_join checks above
+		 */
 		kmem_cache_free(btrfs_transaction_cachep, cur_trans);
 		cur_trans = root->fs_info->running_transaction;
-		atomic_inc(&cur_trans->use_count);
-		atomic_inc(&cur_trans->num_writers);
-		cur_trans->num_joined++;
-		spin_unlock(&root->fs_info->trans_lock);
-		return 0;
+		goto loop;
 	}
+
 	atomic_set(&cur_trans->num_writers, 1);
 	cur_trans->num_joined = 0;
 	init_waitqueue_head(&cur_trans->writer_wait);
@@ -275,7 +278,7 @@
 	 */
 	if (num_items > 0 && root != root->fs_info->chunk_root) {
 		num_bytes = btrfs_calc_trans_metadata_size(root, num_items);
-		ret = btrfs_block_rsv_add(NULL, root,
+		ret = btrfs_block_rsv_add(root,
 					  &root->fs_info->trans_block_rsv,
 					  num_bytes);
 		if (ret)
@@ -418,8 +421,8 @@
 				  struct btrfs_root *root)
 {
 	int ret;
-	ret = btrfs_block_rsv_check(trans, root,
-				    &root->fs_info->global_block_rsv, 0, 5);
+
+	ret = btrfs_block_rsv_check(root, &root->fs_info->global_block_rsv, 5);
 	return ret ? 1 : 0;
 }
 
@@ -427,17 +430,26 @@
 				 struct btrfs_root *root)
 {
 	struct btrfs_transaction *cur_trans = trans->transaction;
+	struct btrfs_block_rsv *rsv = trans->block_rsv;
 	int updates;
 
 	smp_mb();
 	if (cur_trans->blocked || cur_trans->delayed_refs.flushing)
 		return 1;
 
+	/*
+	 * We need to do this in case we're deleting csums so the global block
+	 * rsv get's used instead of the csum block rsv.
+	 */
+	trans->block_rsv = NULL;
+
 	updates = trans->delayed_ref_updates;
 	trans->delayed_ref_updates = 0;
 	if (updates)
 		btrfs_run_delayed_refs(trans, root, updates);
 
+	trans->block_rsv = rsv;
+
 	return should_end_transaction(trans, root);
 }
 
@@ -453,6 +465,8 @@
 		return 0;
 	}
 
+	btrfs_trans_release_metadata(trans, root);
+	trans->block_rsv = NULL;
 	while (count < 4) {
 		unsigned long cur = trans->delayed_ref_updates;
 		trans->delayed_ref_updates = 0;
@@ -473,8 +487,6 @@
 		count++;
 	}
 
-	btrfs_trans_release_metadata(trans, root);
-
 	if (lock && !atomic_read(&root->fs_info->open_ioctl_trans) &&
 	    should_end_transaction(trans, root)) {
 		trans->transaction->blocked = 1;
@@ -562,50 +574,21 @@
 int btrfs_write_marked_extents(struct btrfs_root *root,
 			       struct extent_io_tree *dirty_pages, int mark)
 {
-	int ret;
 	int err = 0;
 	int werr = 0;
-	struct page *page;
-	struct inode *btree_inode = root->fs_info->btree_inode;
+	struct address_space *mapping = root->fs_info->btree_inode->i_mapping;
 	u64 start = 0;
 	u64 end;
-	unsigned long index;
 
-	while (1) {
-		ret = find_first_extent_bit(dirty_pages, start, &start, &end,
-					    mark);
-		if (ret)
-			break;
-		while (start <= end) {
-			cond_resched();
-
-			index = start >> PAGE_CACHE_SHIFT;
-			start = (u64)(index + 1) << PAGE_CACHE_SHIFT;
-			page = find_get_page(btree_inode->i_mapping, index);
-			if (!page)
-				continue;
-
-			btree_lock_page_hook(page);
-			if (!page->mapping) {
-				unlock_page(page);
-				page_cache_release(page);
-				continue;
-			}
-
-			if (PageWriteback(page)) {
-				if (PageDirty(page))
-					wait_on_page_writeback(page);
-				else {
-					unlock_page(page);
-					page_cache_release(page);
-					continue;
-				}
-			}
-			err = write_one_page(page, 0);
-			if (err)
-				werr = err;
-			page_cache_release(page);
-		}
+	while (!find_first_extent_bit(dirty_pages, start, &start, &end,
+				      mark)) {
+		convert_extent_bit(dirty_pages, start, end, EXTENT_NEED_WAIT, mark,
+				   GFP_NOFS);
+		err = filemap_fdatawrite_range(mapping, start, end);
+		if (err)
+			werr = err;
+		cond_resched();
+		start = end + 1;
 	}
 	if (err)
 		werr = err;
@@ -621,39 +604,20 @@
 int btrfs_wait_marked_extents(struct btrfs_root *root,
 			      struct extent_io_tree *dirty_pages, int mark)
 {
-	int ret;
 	int err = 0;
 	int werr = 0;
-	struct page *page;
-	struct inode *btree_inode = root->fs_info->btree_inode;
+	struct address_space *mapping = root->fs_info->btree_inode->i_mapping;
 	u64 start = 0;
 	u64 end;
-	unsigned long index;
 
-	while (1) {
-		ret = find_first_extent_bit(dirty_pages, start, &start, &end,
-					    mark);
-		if (ret)
-			break;
-
-		clear_extent_bits(dirty_pages, start, end, mark, GFP_NOFS);
-		while (start <= end) {
-			index = start >> PAGE_CACHE_SHIFT;
-			start = (u64)(index + 1) << PAGE_CACHE_SHIFT;
-			page = find_get_page(btree_inode->i_mapping, index);
-			if (!page)
-				continue;
-			if (PageDirty(page)) {
-				btree_lock_page_hook(page);
-				wait_on_page_writeback(page);
-				err = write_one_page(page, 0);
-				if (err)
-					werr = err;
-			}
-			wait_on_page_writeback(page);
-			page_cache_release(page);
-			cond_resched();
-		}
+	while (!find_first_extent_bit(dirty_pages, start, &start, &end,
+				      EXTENT_NEED_WAIT)) {
+		clear_extent_bits(dirty_pages, start, end, EXTENT_NEED_WAIT, GFP_NOFS);
+		err = filemap_fdatawait_range(mapping, start, end);
+		if (err)
+			werr = err;
+		cond_resched();
+		start = end + 1;
 	}
 	if (err)
 		werr = err;
@@ -673,7 +637,12 @@
 
 	ret = btrfs_write_marked_extents(root, dirty_pages, mark);
 	ret2 = btrfs_wait_marked_extents(root, dirty_pages, mark);
-	return ret || ret2;
+
+	if (ret)
+		return ret;
+	if (ret2)
+		return ret2;
+	return 0;
 }
 
 int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans,
@@ -911,10 +880,9 @@
 	}
 
 	btrfs_reloc_pre_snapshot(trans, pending, &to_reserve);
-	btrfs_orphan_pre_snapshot(trans, pending, &to_reserve);
 
 	if (to_reserve > 0) {
-		ret = btrfs_block_rsv_add(trans, root, &pending->block_rsv,
+		ret = btrfs_block_rsv_add(root, &pending->block_rsv,
 					  to_reserve);
 		if (ret) {
 			pending->error = ret;
@@ -1002,7 +970,6 @@
 	BUG_ON(IS_ERR(pending->snap));
 
 	btrfs_reloc_post_snapshot(trans, pending);
-	btrfs_orphan_post_snapshot(trans, pending);
 fail:
 	kfree(new_root_item);
 	trans->block_rsv = rsv;
@@ -1032,7 +999,7 @@
 	struct btrfs_root_item *root_item;
 	struct btrfs_super_block *super;
 
-	super = &root->fs_info->super_copy;
+	super = root->fs_info->super_copy;
 
 	root_item = &root->fs_info->chunk_root->root_item;
 	super->chunk_root = root_item->bytenr;
@@ -1043,7 +1010,7 @@
 	super->root = root_item->bytenr;
 	super->generation = root_item->generation;
 	super->root_level = root_item->level;
-	if (super->cache_generation != 0 || btrfs_test_opt(root, SPACE_CACHE))
+	if (btrfs_test_opt(root, SPACE_CACHE))
 		super->cache_generation = root_item->generation;
 }
 
@@ -1168,14 +1135,15 @@
 
 	btrfs_run_ordered_operations(root, 0);
 
+	btrfs_trans_release_metadata(trans, root);
+	trans->block_rsv = NULL;
+
 	/* make a pass through all the delayed refs we have so far
 	 * any runnings procs may add more while we are here
 	 */
 	ret = btrfs_run_delayed_refs(trans, root, 0);
 	BUG_ON(ret);
 
-	btrfs_trans_release_metadata(trans, root);
-
 	cur_trans = trans->transaction;
 	/*
 	 * set the flushing flag so procs in this transaction have to
@@ -1341,12 +1309,12 @@
 	update_super_roots(root);
 
 	if (!root->fs_info->log_root_recovering) {
-		btrfs_set_super_log_root(&root->fs_info->super_copy, 0);
-		btrfs_set_super_log_root_level(&root->fs_info->super_copy, 0);
+		btrfs_set_super_log_root(root->fs_info->super_copy, 0);
+		btrfs_set_super_log_root_level(root->fs_info->super_copy, 0);
 	}
 
-	memcpy(&root->fs_info->super_for_commit, &root->fs_info->super_copy,
-	       sizeof(root->fs_info->super_copy));
+	memcpy(root->fs_info->super_for_commit, root->fs_info->super_copy,
+	       sizeof(*root->fs_info->super_copy));
 
 	trans->transaction->blocked = 0;
 	spin_lock(&root->fs_info->trans_lock);
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index 0618aa3..3568374 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -276,8 +276,9 @@
 			      struct walk_control *wc, u64 gen)
 {
 	if (wc->pin)
-		btrfs_pin_extent(log->fs_info->extent_root,
-				 eb->start, eb->len, 0);
+		btrfs_pin_extent_for_log_replay(wc->trans,
+						log->fs_info->extent_root,
+						eb->start, eb->len);
 
 	if (btrfs_buffer_uptodate(eb, gen)) {
 		if (wc->write)
@@ -1760,7 +1761,7 @@
 
 				WARN_ON(root_owner !=
 					BTRFS_TREE_LOG_OBJECTID);
-				ret = btrfs_free_reserved_extent(root,
+				ret = btrfs_free_and_pin_reserved_extent(root,
 							 bytenr, blocksize);
 				BUG_ON(ret);
 			}
@@ -1828,7 +1829,7 @@
 				btrfs_tree_unlock(next);
 
 				WARN_ON(root_owner != BTRFS_TREE_LOG_OBJECTID);
-				ret = btrfs_free_reserved_extent(root,
+				ret = btrfs_free_and_pin_reserved_extent(root,
 						path->nodes[*level]->start,
 						path->nodes[*level]->len);
 				BUG_ON(ret);
@@ -1897,7 +1898,7 @@
 
 			WARN_ON(log->root_key.objectid !=
 				BTRFS_TREE_LOG_OBJECTID);
-			ret = btrfs_free_reserved_extent(log, next->start,
+			ret = btrfs_free_and_pin_reserved_extent(log, next->start,
 							 next->len);
 			BUG_ON(ret);
 		}
@@ -2013,10 +2014,10 @@
 	/* wait for previous tree log sync to complete */
 	if (atomic_read(&root->log_commit[(index1 + 1) % 2]))
 		wait_log_commit(trans, root, root->log_transid - 1);
-
 	while (1) {
 		unsigned long batch = root->log_batch;
-		if (root->log_multiple_pids) {
+		/* when we're on an ssd, just kick the log commit out */
+		if (!btrfs_test_opt(root, SSD) && root->log_multiple_pids) {
 			mutex_unlock(&root->log_mutex);
 			schedule_timeout_uninterruptible(1);
 			mutex_lock(&root->log_mutex);
@@ -2117,9 +2118,9 @@
 	BUG_ON(ret);
 	btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark);
 
-	btrfs_set_super_log_root(&root->fs_info->super_for_commit,
+	btrfs_set_super_log_root(root->fs_info->super_for_commit,
 				log_root_tree->node->start);
-	btrfs_set_super_log_root_level(&root->fs_info->super_for_commit,
+	btrfs_set_super_log_root_level(root->fs_info->super_for_commit,
 				btrfs_header_level(log_root_tree->node));
 
 	log_root_tree->log_batch = 0;
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index f2a4cc7..f8e29431 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -366,6 +366,14 @@
 		}
 		INIT_LIST_HEAD(&device->dev_alloc_list);
 
+		/* init readahead state */
+		spin_lock_init(&device->reada_lock);
+		device->reada_curr_zone = NULL;
+		atomic_set(&device->reada_in_flight, 0);
+		device->reada_next = 0;
+		INIT_RADIX_TREE(&device->reada_zones, GFP_NOFS & ~__GFP_WAIT);
+		INIT_RADIX_TREE(&device->reada_extents, GFP_NOFS & ~__GFP_WAIT);
+
 		mutex_lock(&fs_devices->device_list_mutex);
 		list_add_rcu(&device->dev_list, &fs_devices->devices);
 		mutex_unlock(&fs_devices->device_list_mutex);
@@ -597,10 +605,8 @@
 		set_blocksize(bdev, 4096);
 
 		bh = btrfs_read_dev_super(bdev);
-		if (!bh) {
-			ret = -EINVAL;
+		if (!bh)
 			goto error_close;
-		}
 
 		disk_super = (struct btrfs_super_block *)bh->b_data;
 		devid = btrfs_stack_device_id(&disk_super->dev_item);
@@ -655,7 +661,7 @@
 		continue;
 	}
 	if (fs_devices->open_devices == 0) {
-		ret = -EIO;
+		ret = -EINVAL;
 		goto out;
 	}
 	fs_devices->seeding = seeding;
@@ -1013,8 +1019,13 @@
 	}
 	BUG_ON(ret);
 
-	if (device->bytes_used > 0)
-		device->bytes_used -= btrfs_dev_extent_length(leaf, extent);
+	if (device->bytes_used > 0) {
+		u64 len = btrfs_dev_extent_length(leaf, extent);
+		device->bytes_used -= len;
+		spin_lock(&root->fs_info->free_chunk_lock);
+		root->fs_info->free_chunk_space += len;
+		spin_unlock(&root->fs_info->free_chunk_lock);
+	}
 	ret = btrfs_del_item(trans, root, path);
 
 out:
@@ -1356,6 +1367,11 @@
 	if (ret)
 		goto error_undo;
 
+	spin_lock(&root->fs_info->free_chunk_lock);
+	root->fs_info->free_chunk_space = device->total_bytes -
+		device->bytes_used;
+	spin_unlock(&root->fs_info->free_chunk_lock);
+
 	device->in_fs_metadata = 0;
 	btrfs_scrub_cancel_dev(root, device);
 
@@ -1387,8 +1403,8 @@
 	call_rcu(&device->rcu, free_device);
 	mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
 
-	num_devices = btrfs_super_num_devices(&root->fs_info->super_copy) - 1;
-	btrfs_set_super_num_devices(&root->fs_info->super_copy, num_devices);
+	num_devices = btrfs_super_num_devices(root->fs_info->super_copy) - 1;
+	btrfs_set_super_num_devices(root->fs_info->super_copy, num_devices);
 
 	if (cur_devices->open_devices == 0) {
 		struct btrfs_fs_devices *fs_devices;
@@ -1450,7 +1466,7 @@
 	struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
 	struct btrfs_fs_devices *old_devices;
 	struct btrfs_fs_devices *seed_devices;
-	struct btrfs_super_block *disk_super = &root->fs_info->super_copy;
+	struct btrfs_super_block *disk_super = root->fs_info->super_copy;
 	struct btrfs_device *device;
 	u64 super_flags;
 
@@ -1691,15 +1707,19 @@
 		root->fs_info->fs_devices->num_can_discard++;
 	root->fs_info->fs_devices->total_rw_bytes += device->total_bytes;
 
+	spin_lock(&root->fs_info->free_chunk_lock);
+	root->fs_info->free_chunk_space += device->total_bytes;
+	spin_unlock(&root->fs_info->free_chunk_lock);
+
 	if (!blk_queue_nonrot(bdev_get_queue(bdev)))
 		root->fs_info->fs_devices->rotating = 1;
 
-	total_bytes = btrfs_super_total_bytes(&root->fs_info->super_copy);
-	btrfs_set_super_total_bytes(&root->fs_info->super_copy,
+	total_bytes = btrfs_super_total_bytes(root->fs_info->super_copy);
+	btrfs_set_super_total_bytes(root->fs_info->super_copy,
 				    total_bytes + device->total_bytes);
 
-	total_bytes = btrfs_super_num_devices(&root->fs_info->super_copy);
-	btrfs_set_super_num_devices(&root->fs_info->super_copy,
+	total_bytes = btrfs_super_num_devices(root->fs_info->super_copy);
+	btrfs_set_super_num_devices(root->fs_info->super_copy,
 				    total_bytes + 1);
 	mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
 
@@ -1790,7 +1810,7 @@
 		      struct btrfs_device *device, u64 new_size)
 {
 	struct btrfs_super_block *super_copy =
-		&device->dev_root->fs_info->super_copy;
+		device->dev_root->fs_info->super_copy;
 	u64 old_total = btrfs_super_total_bytes(super_copy);
 	u64 diff = new_size - device->total_bytes;
 
@@ -1849,7 +1869,7 @@
 static int btrfs_del_sys_chunk(struct btrfs_root *root, u64 chunk_objectid, u64
 			chunk_offset)
 {
-	struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
+	struct btrfs_super_block *super_copy = root->fs_info->super_copy;
 	struct btrfs_disk_key *disk_key;
 	struct btrfs_chunk *chunk;
 	u8 *ptr;
@@ -2175,7 +2195,7 @@
 	bool retried = false;
 	struct extent_buffer *l;
 	struct btrfs_key key;
-	struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
+	struct btrfs_super_block *super_copy = root->fs_info->super_copy;
 	u64 old_total = btrfs_super_total_bytes(super_copy);
 	u64 old_size = device->total_bytes;
 	u64 diff = device->total_bytes - new_size;
@@ -2192,8 +2212,12 @@
 	lock_chunks(root);
 
 	device->total_bytes = new_size;
-	if (device->writeable)
+	if (device->writeable) {
 		device->fs_devices->total_rw_bytes -= diff;
+		spin_lock(&root->fs_info->free_chunk_lock);
+		root->fs_info->free_chunk_space -= diff;
+		spin_unlock(&root->fs_info->free_chunk_lock);
+	}
 	unlock_chunks(root);
 
 again:
@@ -2257,6 +2281,9 @@
 		device->total_bytes = old_size;
 		if (device->writeable)
 			device->fs_devices->total_rw_bytes += diff;
+		spin_lock(&root->fs_info->free_chunk_lock);
+		root->fs_info->free_chunk_space += diff;
+		spin_unlock(&root->fs_info->free_chunk_lock);
 		unlock_chunks(root);
 		goto done;
 	}
@@ -2292,7 +2319,7 @@
 			   struct btrfs_key *key,
 			   struct btrfs_chunk *chunk, int item_size)
 {
-	struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
+	struct btrfs_super_block *super_copy = root->fs_info->super_copy;
 	struct btrfs_disk_key disk_key;
 	u32 array_size;
 	u8 *ptr;
@@ -2615,6 +2642,11 @@
 		index++;
 	}
 
+	spin_lock(&extent_root->fs_info->free_chunk_lock);
+	extent_root->fs_info->free_chunk_space -= (stripe_size *
+						   map->num_stripes);
+	spin_unlock(&extent_root->fs_info->free_chunk_lock);
+
 	index = 0;
 	stripe = &chunk->stripe;
 	while (index < map->num_stripes) {
@@ -2848,7 +2880,7 @@
 
 static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
 			     u64 logical, u64 *length,
-			     struct btrfs_multi_bio **multi_ret,
+			     struct btrfs_bio **bbio_ret,
 			     int mirror_num)
 {
 	struct extent_map *em;
@@ -2866,18 +2898,18 @@
 	int i;
 	int num_stripes;
 	int max_errors = 0;
-	struct btrfs_multi_bio *multi = NULL;
+	struct btrfs_bio *bbio = NULL;
 
-	if (multi_ret && !(rw & (REQ_WRITE | REQ_DISCARD)))
+	if (bbio_ret && !(rw & (REQ_WRITE | REQ_DISCARD)))
 		stripes_allocated = 1;
 again:
-	if (multi_ret) {
-		multi = kzalloc(btrfs_multi_bio_size(stripes_allocated),
+	if (bbio_ret) {
+		bbio = kzalloc(btrfs_bio_size(stripes_allocated),
 				GFP_NOFS);
-		if (!multi)
+		if (!bbio)
 			return -ENOMEM;
 
-		atomic_set(&multi->error, 0);
+		atomic_set(&bbio->error, 0);
 	}
 
 	read_lock(&em_tree->lock);
@@ -2898,7 +2930,7 @@
 	if (mirror_num > map->num_stripes)
 		mirror_num = 0;
 
-	/* if our multi bio struct is too small, back off and try again */
+	/* if our btrfs_bio struct is too small, back off and try again */
 	if (rw & REQ_WRITE) {
 		if (map->type & (BTRFS_BLOCK_GROUP_RAID1 |
 				 BTRFS_BLOCK_GROUP_DUP)) {
@@ -2917,11 +2949,11 @@
 			stripes_required = map->num_stripes;
 		}
 	}
-	if (multi_ret && (rw & (REQ_WRITE | REQ_DISCARD)) &&
+	if (bbio_ret && (rw & (REQ_WRITE | REQ_DISCARD)) &&
 	    stripes_allocated < stripes_required) {
 		stripes_allocated = map->num_stripes;
 		free_extent_map(em);
-		kfree(multi);
+		kfree(bbio);
 		goto again;
 	}
 	stripe_nr = offset;
@@ -2950,7 +2982,7 @@
 		*length = em->len - offset;
 	}
 
-	if (!multi_ret)
+	if (!bbio_ret)
 		goto out;
 
 	num_stripes = 1;
@@ -2975,13 +3007,17 @@
 			stripe_index = find_live_mirror(map, 0,
 					    map->num_stripes,
 					    current->pid % map->num_stripes);
+			mirror_num = stripe_index + 1;
 		}
 
 	} else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
-		if (rw & (REQ_WRITE | REQ_DISCARD))
+		if (rw & (REQ_WRITE | REQ_DISCARD)) {
 			num_stripes = map->num_stripes;
-		else if (mirror_num)
+		} else if (mirror_num) {
 			stripe_index = mirror_num - 1;
+		} else {
+			mirror_num = 1;
+		}
 
 	} else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
 		int factor = map->num_stripes / map->sub_stripes;
@@ -3001,6 +3037,7 @@
 			stripe_index = find_live_mirror(map, stripe_index,
 					      map->sub_stripes, stripe_index +
 					      current->pid % map->sub_stripes);
+			mirror_num = stripe_index + 1;
 		}
 	} else {
 		/*
@@ -3009,15 +3046,16 @@
 		 * stripe_index is the number of our device in the stripe array
 		 */
 		stripe_index = do_div(stripe_nr, map->num_stripes);
+		mirror_num = stripe_index + 1;
 	}
 	BUG_ON(stripe_index >= map->num_stripes);
 
 	if (rw & REQ_DISCARD) {
 		for (i = 0; i < num_stripes; i++) {
-			multi->stripes[i].physical =
+			bbio->stripes[i].physical =
 				map->stripes[stripe_index].physical +
 				stripe_offset + stripe_nr * map->stripe_len;
-			multi->stripes[i].dev = map->stripes[stripe_index].dev;
+			bbio->stripes[i].dev = map->stripes[stripe_index].dev;
 
 			if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
 				u64 stripes;
@@ -3038,16 +3076,16 @@
 				}
 				stripes = stripe_nr_end - 1 - j;
 				do_div(stripes, map->num_stripes);
-				multi->stripes[i].length = map->stripe_len *
+				bbio->stripes[i].length = map->stripe_len *
 					(stripes - stripe_nr + 1);
 
 				if (i == 0) {
-					multi->stripes[i].length -=
+					bbio->stripes[i].length -=
 						stripe_offset;
 					stripe_offset = 0;
 				}
 				if (stripe_index == last_stripe)
-					multi->stripes[i].length -=
+					bbio->stripes[i].length -=
 						stripe_end_offset;
 			} else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
 				u64 stripes;
@@ -3072,11 +3110,11 @@
 				}
 				stripes = stripe_nr_end - 1 - j;
 				do_div(stripes, factor);
-				multi->stripes[i].length = map->stripe_len *
+				bbio->stripes[i].length = map->stripe_len *
 					(stripes - stripe_nr + 1);
 
 				if (i < map->sub_stripes) {
-					multi->stripes[i].length -=
+					bbio->stripes[i].length -=
 						stripe_offset;
 					if (i == map->sub_stripes - 1)
 						stripe_offset = 0;
@@ -3084,11 +3122,11 @@
 				if (stripe_index >= last_stripe &&
 				    stripe_index <= (last_stripe +
 						     map->sub_stripes - 1)) {
-					multi->stripes[i].length -=
+					bbio->stripes[i].length -=
 						stripe_end_offset;
 				}
 			} else
-				multi->stripes[i].length = *length;
+				bbio->stripes[i].length = *length;
 
 			stripe_index++;
 			if (stripe_index == map->num_stripes) {
@@ -3099,19 +3137,20 @@
 		}
 	} else {
 		for (i = 0; i < num_stripes; i++) {
-			multi->stripes[i].physical =
+			bbio->stripes[i].physical =
 				map->stripes[stripe_index].physical +
 				stripe_offset +
 				stripe_nr * map->stripe_len;
-			multi->stripes[i].dev =
+			bbio->stripes[i].dev =
 				map->stripes[stripe_index].dev;
 			stripe_index++;
 		}
 	}
-	if (multi_ret) {
-		*multi_ret = multi;
-		multi->num_stripes = num_stripes;
-		multi->max_errors = max_errors;
+	if (bbio_ret) {
+		*bbio_ret = bbio;
+		bbio->num_stripes = num_stripes;
+		bbio->max_errors = max_errors;
+		bbio->mirror_num = mirror_num;
 	}
 out:
 	free_extent_map(em);
@@ -3120,9 +3159,9 @@
 
 int btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
 		      u64 logical, u64 *length,
-		      struct btrfs_multi_bio **multi_ret, int mirror_num)
+		      struct btrfs_bio **bbio_ret, int mirror_num)
 {
-	return __btrfs_map_block(map_tree, rw, logical, length, multi_ret,
+	return __btrfs_map_block(map_tree, rw, logical, length, bbio_ret,
 				 mirror_num);
 }
 
@@ -3191,28 +3230,30 @@
 	return 0;
 }
 
-static void end_bio_multi_stripe(struct bio *bio, int err)
+static void btrfs_end_bio(struct bio *bio, int err)
 {
-	struct btrfs_multi_bio *multi = bio->bi_private;
+	struct btrfs_bio *bbio = bio->bi_private;
 	int is_orig_bio = 0;
 
 	if (err)
-		atomic_inc(&multi->error);
+		atomic_inc(&bbio->error);
 
-	if (bio == multi->orig_bio)
+	if (bio == bbio->orig_bio)
 		is_orig_bio = 1;
 
-	if (atomic_dec_and_test(&multi->stripes_pending)) {
+	if (atomic_dec_and_test(&bbio->stripes_pending)) {
 		if (!is_orig_bio) {
 			bio_put(bio);
-			bio = multi->orig_bio;
+			bio = bbio->orig_bio;
 		}
-		bio->bi_private = multi->private;
-		bio->bi_end_io = multi->end_io;
+		bio->bi_private = bbio->private;
+		bio->bi_end_io = bbio->end_io;
+		bio->bi_bdev = (struct block_device *)
+					(unsigned long)bbio->mirror_num;
 		/* only send an error to the higher layers if it is
 		 * beyond the tolerance of the multi-bio
 		 */
-		if (atomic_read(&multi->error) > multi->max_errors) {
+		if (atomic_read(&bbio->error) > bbio->max_errors) {
 			err = -EIO;
 		} else if (err) {
 			/*
@@ -3222,7 +3263,7 @@
 			set_bit(BIO_UPTODATE, &bio->bi_flags);
 			err = 0;
 		}
-		kfree(multi);
+		kfree(bbio);
 
 		bio_endio(bio, err);
 	} else if (!is_orig_bio) {
@@ -3302,20 +3343,20 @@
 	u64 logical = (u64)bio->bi_sector << 9;
 	u64 length = 0;
 	u64 map_length;
-	struct btrfs_multi_bio *multi = NULL;
 	int ret;
 	int dev_nr = 0;
 	int total_devs = 1;
+	struct btrfs_bio *bbio = NULL;
 
 	length = bio->bi_size;
 	map_tree = &root->fs_info->mapping_tree;
 	map_length = length;
 
-	ret = btrfs_map_block(map_tree, rw, logical, &map_length, &multi,
+	ret = btrfs_map_block(map_tree, rw, logical, &map_length, &bbio,
 			      mirror_num);
 	BUG_ON(ret);
 
-	total_devs = multi->num_stripes;
+	total_devs = bbio->num_stripes;
 	if (map_length < length) {
 		printk(KERN_CRIT "mapping failed logical %llu bio len %llu "
 		       "len %llu\n", (unsigned long long)logical,
@@ -3323,25 +3364,28 @@
 		       (unsigned long long)map_length);
 		BUG();
 	}
-	multi->end_io = first_bio->bi_end_io;
-	multi->private = first_bio->bi_private;
-	multi->orig_bio = first_bio;
-	atomic_set(&multi->stripes_pending, multi->num_stripes);
+
+	bbio->orig_bio = first_bio;
+	bbio->private = first_bio->bi_private;
+	bbio->end_io = first_bio->bi_end_io;
+	atomic_set(&bbio->stripes_pending, bbio->num_stripes);
 
 	while (dev_nr < total_devs) {
-		if (total_devs > 1) {
-			if (dev_nr < total_devs - 1) {
-				bio = bio_clone(first_bio, GFP_NOFS);
-				BUG_ON(!bio);
-			} else {
-				bio = first_bio;
-			}
-			bio->bi_private = multi;
-			bio->bi_end_io = end_bio_multi_stripe;
+		if (dev_nr < total_devs - 1) {
+			bio = bio_clone(first_bio, GFP_NOFS);
+			BUG_ON(!bio);
+		} else {
+			bio = first_bio;
 		}
-		bio->bi_sector = multi->stripes[dev_nr].physical >> 9;
-		dev = multi->stripes[dev_nr].dev;
+		bio->bi_private = bbio;
+		bio->bi_end_io = btrfs_end_bio;
+		bio->bi_sector = bbio->stripes[dev_nr].physical >> 9;
+		dev = bbio->stripes[dev_nr].dev;
 		if (dev && dev->bdev && (rw != WRITE || dev->writeable)) {
+			pr_debug("btrfs_map_bio: rw %d, secor=%llu, dev=%lu "
+				 "(%s id %llu), size=%u\n", rw,
+				 (u64)bio->bi_sector, (u_long)dev->bdev->bd_dev,
+				 dev->name, dev->devid, bio->bi_size);
 			bio->bi_bdev = dev->bdev;
 			if (async_submit)
 				schedule_bio(root, dev, rw, bio);
@@ -3354,8 +3398,6 @@
 		}
 		dev_nr++;
 	}
-	if (total_devs == 1)
-		kfree(multi);
 	return 0;
 }
 
@@ -3616,15 +3658,20 @@
 	fill_device_from_item(leaf, dev_item, device);
 	device->dev_root = root->fs_info->dev_root;
 	device->in_fs_metadata = 1;
-	if (device->writeable)
+	if (device->writeable) {
 		device->fs_devices->total_rw_bytes += device->total_bytes;
+		spin_lock(&root->fs_info->free_chunk_lock);
+		root->fs_info->free_chunk_space += device->total_bytes -
+			device->bytes_used;
+		spin_unlock(&root->fs_info->free_chunk_lock);
+	}
 	ret = 0;
 	return ret;
 }
 
 int btrfs_read_sys_array(struct btrfs_root *root)
 {
-	struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
+	struct btrfs_super_block *super_copy = root->fs_info->super_copy;
 	struct extent_buffer *sb;
 	struct btrfs_disk_key *disk_key;
 	struct btrfs_chunk *chunk;
diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h
index 6d866db..ab5b1c4 100644
--- a/fs/btrfs/volumes.h
+++ b/fs/btrfs/volumes.h
@@ -92,6 +92,14 @@
 	struct btrfs_work work;
 	struct rcu_head rcu;
 	struct work_struct rcu_work;
+
+	/* readahead state */
+	spinlock_t reada_lock;
+	atomic_t reada_in_flight;
+	u64 reada_next;
+	struct reada_zone *reada_curr_zone;
+	struct radix_tree_root reada_zones;
+	struct radix_tree_root reada_extents;
 };
 
 struct btrfs_fs_devices {
@@ -136,7 +144,10 @@
 	u64 length; /* only used for discard mappings */
 };
 
-struct btrfs_multi_bio {
+struct btrfs_bio;
+typedef void (btrfs_bio_end_io_t) (struct btrfs_bio *bio, int err);
+
+struct btrfs_bio {
 	atomic_t stripes_pending;
 	bio_end_io_t *end_io;
 	struct bio *orig_bio;
@@ -144,6 +155,7 @@
 	atomic_t error;
 	int max_errors;
 	int num_stripes;
+	int mirror_num;
 	struct btrfs_bio_stripe stripes[];
 };
 
@@ -171,7 +183,7 @@
 int btrfs_account_dev_extents_size(struct btrfs_device *device, u64 start,
 				   u64 end, u64 *length);
 
-#define btrfs_multi_bio_size(n) (sizeof(struct btrfs_multi_bio) + \
+#define btrfs_bio_size(n) (sizeof(struct btrfs_bio) + \
 			    (sizeof(struct btrfs_bio_stripe) * (n)))
 
 int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
@@ -180,7 +192,7 @@
 			   u64 chunk_offset, u64 start, u64 num_bytes);
 int btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
 		    u64 logical, u64 *length,
-		    struct btrfs_multi_bio **multi_ret, int mirror_num);
+		    struct btrfs_bio **bbio_ret, int mirror_num);
 int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
 		     u64 chunk_start, u64 physical, u64 devid,
 		     u64 **logical, int *naddrs, int *stripe_len);
diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c
index 426aa46..3848b04 100644
--- a/fs/btrfs/xattr.c
+++ b/fs/btrfs/xattr.c
@@ -127,6 +127,17 @@
 again:
 	ret = btrfs_insert_xattr_item(trans, root, path, btrfs_ino(inode),
 				      name, name_len, value, size);
+	/*
+	 * If we're setting an xattr to a new value but the new value is say
+	 * exactly BTRFS_MAX_XATTR_SIZE, we could end up with EOVERFLOW getting
+	 * back from split_leaf.  This is because it thinks we'll be extending
+	 * the existing item size, but we're asking for enough space to add the
+	 * item itself.  So if we get EOVERFLOW just set ret to EEXIST and let
+	 * the rest of the function figure it out.
+	 */
+	if (ret == -EOVERFLOW)
+		ret = -EEXIST;
+
 	if (ret == -EEXIST) {
 		if (flags & XATTR_CREATE)
 			goto out;
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index c1f063c..cf0b153 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -645,20 +645,20 @@
 }
 
 static struct cifsLockInfo *
-cifs_lock_init(__u64 len, __u64 offset, __u8 type, __u16 netfid)
+cifs_lock_init(__u64 offset, __u64 length, __u8 type, __u16 netfid)
 {
-	struct cifsLockInfo *li =
+	struct cifsLockInfo *lock =
 		kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
-	if (!li)
-		return li;
-	li->netfid = netfid;
-	li->offset = offset;
-	li->length = len;
-	li->type = type;
-	li->pid = current->tgid;
-	INIT_LIST_HEAD(&li->blist);
-	init_waitqueue_head(&li->block_q);
-	return li;
+	if (!lock)
+		return lock;
+	lock->offset = offset;
+	lock->length = length;
+	lock->type = type;
+	lock->netfid = netfid;
+	lock->pid = current->tgid;
+	INIT_LIST_HEAD(&lock->blist);
+	init_waitqueue_head(&lock->block_q);
+	return lock;
 }
 
 static void
@@ -672,7 +672,7 @@
 }
 
 static bool
-cifs_find_lock_conflict(struct cifsInodeInfo *cinode, __u64 offset,
+__cifs_find_lock_conflict(struct cifsInodeInfo *cinode, __u64 offset,
 			__u64 length, __u8 type, __u16 netfid,
 			struct cifsLockInfo **conf_lock)
 {
@@ -694,6 +694,14 @@
 	return false;
 }
 
+static bool
+cifs_find_lock_conflict(struct cifsInodeInfo *cinode, struct cifsLockInfo *lock,
+			struct cifsLockInfo **conf_lock)
+{
+	return __cifs_find_lock_conflict(cinode, lock->offset, lock->length,
+					 lock->type, lock->netfid, conf_lock);
+}
+
 static int
 cifs_lock_test(struct cifsInodeInfo *cinode, __u64 offset, __u64 length,
 	       __u8 type, __u16 netfid, struct file_lock *flock)
@@ -704,8 +712,8 @@
 
 	mutex_lock(&cinode->lock_mutex);
 
-	exist = cifs_find_lock_conflict(cinode, offset, length, type, netfid,
-					&conf_lock);
+	exist = __cifs_find_lock_conflict(cinode, offset, length, type, netfid,
+					  &conf_lock);
 	if (exist) {
 		flock->fl_start = conf_lock->offset;
 		flock->fl_end = conf_lock->offset + conf_lock->length - 1;
@@ -723,40 +731,27 @@
 	return rc;
 }
 
-static int
-cifs_lock_add(struct cifsInodeInfo *cinode, __u64 len, __u64 offset,
-	      __u8 type, __u16 netfid)
+static void
+cifs_lock_add(struct cifsInodeInfo *cinode, struct cifsLockInfo *lock)
 {
-	struct cifsLockInfo *li;
-
-	li = cifs_lock_init(len, offset, type, netfid);
-	if (!li)
-		return -ENOMEM;
-
 	mutex_lock(&cinode->lock_mutex);
-	list_add_tail(&li->llist, &cinode->llist);
+	list_add_tail(&lock->llist, &cinode->llist);
 	mutex_unlock(&cinode->lock_mutex);
-	return 0;
 }
 
 static int
-cifs_lock_add_if(struct cifsInodeInfo *cinode, __u64 offset, __u64 length,
-		 __u8 type, __u16 netfid, bool wait)
+cifs_lock_add_if(struct cifsInodeInfo *cinode, struct cifsLockInfo *lock,
+		 bool wait)
 {
-	struct cifsLockInfo *lock, *conf_lock;
+	struct cifsLockInfo *conf_lock;
 	bool exist;
 	int rc = 0;
 
-	lock = cifs_lock_init(length, offset, type, netfid);
-	if (!lock)
-		return -ENOMEM;
-
 try_again:
 	exist = false;
 	mutex_lock(&cinode->lock_mutex);
 
-	exist = cifs_find_lock_conflict(cinode, offset, length, type, netfid,
-					&conf_lock);
+	exist = cifs_find_lock_conflict(cinode, lock, &conf_lock);
 	if (!exist && cinode->can_cache_brlcks) {
 		list_add_tail(&lock->llist, &cinode->llist);
 		mutex_unlock(&cinode->lock_mutex);
@@ -775,13 +770,10 @@
 					(lock->blist.next == &lock->blist));
 		if (!rc)
 			goto try_again;
-		else {
-			mutex_lock(&cinode->lock_mutex);
-			list_del_init(&lock->blist);
-		}
+		mutex_lock(&cinode->lock_mutex);
+		list_del_init(&lock->blist);
 	}
 
-	kfree(lock);
 	mutex_unlock(&cinode->lock_mutex);
 	return rc;
 }
@@ -933,7 +925,7 @@
 		else
 			type = CIFS_WRLCK;
 
-		lck = cifs_lock_init(length, flock->fl_start, type,
+		lck = cifs_lock_init(flock->fl_start, length, type,
 				     cfile->netfid);
 		if (!lck) {
 			rc = -ENOMEM;
@@ -1070,14 +1062,12 @@
 		if (rc != 0)
 			cERROR(1, "Error unlocking previously locked "
 				   "range %d during test of lock", rc);
-		rc = 0;
-		return rc;
+		return 0;
 	}
 
 	if (type & LOCKING_ANDX_SHARED_LOCK) {
 		flock->fl_type = F_WRLCK;
-		rc = 0;
-		return rc;
+		return 0;
 	}
 
 	rc = CIFSSMBLock(xid, tcon, netfid, current->tgid, length,
@@ -1095,8 +1085,7 @@
 	} else
 		flock->fl_type = F_WRLCK;
 
-	rc = 0;
-	return rc;
+	return 0;
 }
 
 static void
@@ -1254,20 +1243,26 @@
 	}
 
 	if (lock) {
-		rc = cifs_lock_add_if(cinode, flock->fl_start, length,
-				      type, netfid, wait_flag);
+		struct cifsLockInfo *lock;
+
+		lock = cifs_lock_init(flock->fl_start, length, type, netfid);
+		if (!lock)
+			return -ENOMEM;
+
+		rc = cifs_lock_add_if(cinode, lock, wait_flag);
 		if (rc < 0)
-			return rc;
-		else if (!rc)
+			kfree(lock);
+		if (rc <= 0)
 			goto out;
 
 		rc = CIFSSMBLock(xid, tcon, netfid, current->tgid, length,
 				 flock->fl_start, 0, 1, type, wait_flag, 0);
-		if (rc == 0) {
-			/* For Windows locks we must store them. */
-			rc = cifs_lock_add(cinode, length, flock->fl_start,
-					   type, netfid);
+		if (rc) {
+			kfree(lock);
+			goto out;
 		}
+
+		cifs_lock_add(cinode, lock);
 	} else if (unlock)
 		rc = cifs_unlock_range(cfile, flock, xid);
 
diff --git a/fs/dcache.c b/fs/dcache.c
index 274f13e..a901c69 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -546,9 +546,11 @@
 	 * would make it unreachable from the root,
 	 * we might still populate it if it was a
 	 * working directory or similar).
+	 * We also need to leave mountpoints alone,
+	 * directory or not.
 	 */
-	if (dentry->d_count > 1) {
-		if (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode)) {
+	if (dentry->d_count > 1 && dentry->d_inode) {
+		if (S_ISDIR(dentry->d_inode->i_mode) || d_mountpoint(dentry)) {
 			spin_unlock(&dentry->d_lock);
 			return -EBUSY;
 		}
diff --git a/fs/jffs2/compr.c b/fs/jffs2/compr.c
index de42470..5b6c9d1 100644
--- a/fs/jffs2/compr.c
+++ b/fs/jffs2/compr.c
@@ -53,6 +53,78 @@
 	return 0;
 }
 
+/*
+ * jffs2_selected_compress:
+ * @compr: Explicit compression type to use (ie, JFFS2_COMPR_ZLIB).
+ *	If 0, just take the first available compression mode.
+ * @data_in: Pointer to uncompressed data
+ * @cpage_out: Pointer to returned pointer to buffer for compressed data
+ * @datalen: On entry, holds the amount of data available for compression.
+ *	On exit, expected to hold the amount of data actually compressed.
+ * @cdatalen: On entry, holds the amount of space available for compressed
+ *	data. On exit, expected to hold the actual size of the compressed
+ *	data.
+ *
+ * Returns: the compression type used.  Zero is used to show that the data
+ * could not be compressed; probably because we couldn't find the requested
+ * compression mode.
+ */
+static int jffs2_selected_compress(u8 compr, unsigned char *data_in,
+		unsigned char **cpage_out, u32 *datalen, u32 *cdatalen)
+{
+	struct jffs2_compressor *this;
+	int err, ret = JFFS2_COMPR_NONE;
+	uint32_t orig_slen, orig_dlen;
+	char *output_buf;
+
+	output_buf = kmalloc(*cdatalen, GFP_KERNEL);
+	if (!output_buf) {
+		printk(KERN_WARNING "JFFS2: No memory for compressor allocation. Compression failed.\n");
+		return ret;
+	}
+	orig_slen = *datalen;
+	orig_dlen = *cdatalen;
+	spin_lock(&jffs2_compressor_list_lock);
+	list_for_each_entry(this, &jffs2_compressor_list, list) {
+		/* Skip decompress-only and disabled modules */
+		if (!this->compress || this->disabled)
+			continue;
+
+		/* Skip if not the desired compression type */
+		if (compr && (compr != this->compr))
+			continue;
+
+		/*
+		 * Either compression type was unspecified, or we found our
+		 * compressor; either way, we're good to go.
+		 */
+		this->usecount++;
+		spin_unlock(&jffs2_compressor_list_lock);
+
+		*datalen  = orig_slen;
+		*cdatalen = orig_dlen;
+		err = this->compress(data_in, output_buf, datalen, cdatalen);
+
+		spin_lock(&jffs2_compressor_list_lock);
+		this->usecount--;
+		if (!err) {
+			/* Success */
+			ret = this->compr;
+			this->stat_compr_blocks++;
+			this->stat_compr_orig_size += *datalen;
+			this->stat_compr_new_size += *cdatalen;
+			break;
+		}
+	}
+	spin_unlock(&jffs2_compressor_list_lock);
+	if (ret == JFFS2_COMPR_NONE)
+		kfree(output_buf);
+	else
+		*cpage_out = output_buf;
+
+	return ret;
+}
+
 /* jffs2_compress:
  * @data_in: Pointer to uncompressed data
  * @cpage_out: Pointer to returned pointer to buffer for compressed data
@@ -76,47 +148,23 @@
 			uint32_t *datalen, uint32_t *cdatalen)
 {
 	int ret = JFFS2_COMPR_NONE;
-	int compr_ret;
+	int mode, compr_ret;
 	struct jffs2_compressor *this, *best=NULL;
 	unsigned char *output_buf = NULL, *tmp_buf;
 	uint32_t orig_slen, orig_dlen;
 	uint32_t best_slen=0, best_dlen=0;
 
-	switch (jffs2_compression_mode) {
+	if (c->mount_opts.override_compr)
+		mode = c->mount_opts.compr;
+	else
+		mode = jffs2_compression_mode;
+
+	switch (mode) {
 	case JFFS2_COMPR_MODE_NONE:
 		break;
 	case JFFS2_COMPR_MODE_PRIORITY:
-		output_buf = kmalloc(*cdatalen,GFP_KERNEL);
-		if (!output_buf) {
-			printk(KERN_WARNING "JFFS2: No memory for compressor allocation. Compression failed.\n");
-			goto out;
-		}
-		orig_slen = *datalen;
-		orig_dlen = *cdatalen;
-		spin_lock(&jffs2_compressor_list_lock);
-		list_for_each_entry(this, &jffs2_compressor_list, list) {
-			/* Skip decompress-only backwards-compatibility and disabled modules */
-			if ((!this->compress)||(this->disabled))
-				continue;
-
-			this->usecount++;
-			spin_unlock(&jffs2_compressor_list_lock);
-			*datalen  = orig_slen;
-			*cdatalen = orig_dlen;
-			compr_ret = this->compress(data_in, output_buf, datalen, cdatalen);
-			spin_lock(&jffs2_compressor_list_lock);
-			this->usecount--;
-			if (!compr_ret) {
-				ret = this->compr;
-				this->stat_compr_blocks++;
-				this->stat_compr_orig_size += *datalen;
-				this->stat_compr_new_size  += *cdatalen;
-				break;
-			}
-		}
-		spin_unlock(&jffs2_compressor_list_lock);
-		if (ret == JFFS2_COMPR_NONE)
-			kfree(output_buf);
+		ret = jffs2_selected_compress(0, data_in, cpage_out, datalen,
+				cdatalen);
 		break;
 	case JFFS2_COMPR_MODE_SIZE:
 	case JFFS2_COMPR_MODE_FAVOURLZO:
@@ -174,22 +222,28 @@
 			best->stat_compr_orig_size += best_slen;
 			best->stat_compr_new_size  += best_dlen;
 			ret = best->compr;
+			*cpage_out = output_buf;
 		}
 		spin_unlock(&jffs2_compressor_list_lock);
 		break;
+	case JFFS2_COMPR_MODE_FORCELZO:
+		ret = jffs2_selected_compress(JFFS2_COMPR_LZO, data_in,
+				cpage_out, datalen, cdatalen);
+		break;
+	case JFFS2_COMPR_MODE_FORCEZLIB:
+		ret = jffs2_selected_compress(JFFS2_COMPR_ZLIB, data_in,
+				cpage_out, datalen, cdatalen);
+		break;
 	default:
 		printk(KERN_ERR "JFFS2: unknown compression mode.\n");
 	}
- out:
+
 	if (ret == JFFS2_COMPR_NONE) {
 		*cpage_out = data_in;
 		*datalen = *cdatalen;
 		none_stat_compr_blocks++;
 		none_stat_compr_size += *datalen;
 	}
-	else {
-		*cpage_out = output_buf;
-	}
 	return ret;
 }
 
diff --git a/fs/jffs2/compr.h b/fs/jffs2/compr.h
index 13bb759..5e91d57 100644
--- a/fs/jffs2/compr.h
+++ b/fs/jffs2/compr.h
@@ -40,6 +40,8 @@
 #define JFFS2_COMPR_MODE_PRIORITY   1
 #define JFFS2_COMPR_MODE_SIZE       2
 #define JFFS2_COMPR_MODE_FAVOURLZO  3
+#define JFFS2_COMPR_MODE_FORCELZO   4
+#define JFFS2_COMPR_MODE_FORCEZLIB  5
 
 #define FAVOUR_LZO_PERCENT 80
 
diff --git a/fs/jffs2/fs.c b/fs/jffs2/fs.c
index 7286e44..4b8afe3 100644
--- a/fs/jffs2/fs.c
+++ b/fs/jffs2/fs.c
@@ -379,7 +379,7 @@
 	jffs2_do_setattr(inode, &iattr);
 }
 
-int jffs2_remount_fs (struct super_block *sb, int *flags, char *data)
+int jffs2_do_remount_fs(struct super_block *sb, int *flags, char *data)
 {
 	struct jffs2_sb_info *c = JFFS2_SB_INFO(sb);
 
diff --git a/fs/jffs2/jffs2_fs_sb.h b/fs/jffs2/jffs2_fs_sb.h
index 0bc6a6c..55a0c1d 100644
--- a/fs/jffs2/jffs2_fs_sb.h
+++ b/fs/jffs2/jffs2_fs_sb.h
@@ -29,6 +29,11 @@
 
 struct jffs2_inodirty;
 
+struct jffs2_mount_opts {
+	bool override_compr;
+	unsigned int compr;
+};
+
 /* A struct for the overall file system control.  Pointers to
    jffs2_sb_info structs are named `c' in the source code.
    Nee jffs_control
@@ -126,6 +131,7 @@
 #endif
 
 	struct jffs2_summary *summary;		/* Summary information */
+	struct jffs2_mount_opts mount_opts;
 
 #ifdef CONFIG_JFFS2_FS_XATTR
 #define XATTRINDEX_HASHSIZE	(57)
diff --git a/fs/jffs2/os-linux.h b/fs/jffs2/os-linux.h
index 6c1755c..ab65ee3 100644
--- a/fs/jffs2/os-linux.h
+++ b/fs/jffs2/os-linux.h
@@ -176,7 +176,7 @@
 struct inode *jffs2_new_inode (struct inode *dir_i, umode_t mode,
 			       struct jffs2_raw_inode *ri);
 int jffs2_statfs (struct dentry *, struct kstatfs *);
-int jffs2_remount_fs (struct super_block *, int *, char *);
+int jffs2_do_remount_fs(struct super_block *, int *, char *);
 int jffs2_do_fill_super(struct super_block *sb, void *data, int silent);
 void jffs2_gc_release_inode(struct jffs2_sb_info *c,
 			    struct jffs2_inode_info *f);
diff --git a/fs/jffs2/scan.c b/fs/jffs2/scan.c
index 8d8cd34..28107ca 100644
--- a/fs/jffs2/scan.c
+++ b/fs/jffs2/scan.c
@@ -275,9 +275,7 @@
 	else
 		c->mtd->unpoint(c->mtd, 0, c->mtd->size);
 #endif
-	if (s)
-		kfree(s);
-
+	kfree(s);
 	return ret;
 }
 
diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c
index 853b8e3..e7e9744 100644
--- a/fs/jffs2/super.c
+++ b/fs/jffs2/super.c
@@ -17,11 +17,13 @@
 #include <linux/fs.h>
 #include <linux/err.h>
 #include <linux/mount.h>
+#include <linux/parser.h>
 #include <linux/jffs2.h>
 #include <linux/pagemap.h>
 #include <linux/mtd/super.h>
 #include <linux/ctype.h>
 #include <linux/namei.h>
+#include <linux/seq_file.h>
 #include <linux/exportfs.h>
 #include "compr.h"
 #include "nodelist.h"
@@ -75,6 +77,37 @@
 	unlock_super(sb);
 }
 
+static const char *jffs2_compr_name(unsigned int compr)
+{
+	switch (compr) {
+	case JFFS2_COMPR_MODE_NONE:
+		return "none";
+#ifdef CONFIG_JFFS2_LZO
+	case JFFS2_COMPR_MODE_FORCELZO:
+		return "lzo";
+#endif
+#ifdef CONFIG_JFFS2_ZLIB
+	case JFFS2_COMPR_MODE_FORCEZLIB:
+		return "zlib";
+#endif
+	default:
+		/* should never happen; programmer error */
+		WARN_ON(1);
+		return "";
+	}
+}
+
+static int jffs2_show_options(struct seq_file *s, struct vfsmount *mnt)
+{
+	struct jffs2_sb_info *c = JFFS2_SB_INFO(mnt->mnt_sb);
+	struct jffs2_mount_opts *opts = &c->mount_opts;
+
+	if (opts->override_compr)
+		seq_printf(s, ",compr=%s", jffs2_compr_name(opts->compr));
+
+	return 0;
+}
+
 static int jffs2_sync_fs(struct super_block *sb, int wait)
 {
 	struct jffs2_sb_info *c = JFFS2_SB_INFO(sb);
@@ -133,6 +166,85 @@
 	.fh_to_parent = jffs2_fh_to_parent,
 };
 
+/*
+ * JFFS2 mount options.
+ *
+ * Opt_override_compr: override default compressor
+ * Opt_err: just end of array marker
+ */
+enum {
+	Opt_override_compr,
+	Opt_err,
+};
+
+static const match_table_t tokens = {
+	{Opt_override_compr, "compr=%s"},
+	{Opt_err, NULL},
+};
+
+static int jffs2_parse_options(struct jffs2_sb_info *c, char *data)
+{
+	substring_t args[MAX_OPT_ARGS];
+	char *p, *name;
+
+	if (!data)
+		return 0;
+
+	while ((p = strsep(&data, ","))) {
+		int token;
+
+		if (!*p)
+			continue;
+
+		token = match_token(p, tokens, args);
+		switch (token) {
+		case Opt_override_compr:
+			name = match_strdup(&args[0]);
+
+			if (!name)
+				return -ENOMEM;
+			if (!strcmp(name, "none"))
+				c->mount_opts.compr = JFFS2_COMPR_MODE_NONE;
+#ifdef CONFIG_JFFS2_LZO
+			else if (!strcmp(name, "lzo"))
+				c->mount_opts.compr = JFFS2_COMPR_MODE_FORCELZO;
+#endif
+#ifdef CONFIG_JFFS2_ZLIB
+			else if (!strcmp(name, "zlib"))
+				c->mount_opts.compr =
+						JFFS2_COMPR_MODE_FORCEZLIB;
+#endif
+			else {
+				printk(KERN_ERR "JFFS2 Error: unknown compressor \"%s\"",
+						name);
+				kfree(name);
+				return -EINVAL;
+			}
+			kfree(name);
+			c->mount_opts.override_compr = true;
+			break;
+		default:
+			printk(KERN_ERR "JFFS2 Error: unrecognized mount option '%s' or missing value\n",
+					p);
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static int jffs2_remount_fs(struct super_block *sb, int *flags, char *data)
+{
+	struct jffs2_sb_info *c = JFFS2_SB_INFO(sb);
+	int err;
+
+	err = jffs2_parse_options(c, data);
+	if (err)
+		return -EINVAL;
+
+	return jffs2_do_remount_fs(sb, flags, data);
+}
+
 static const struct super_operations jffs2_super_operations =
 {
 	.alloc_inode =	jffs2_alloc_inode,
@@ -143,6 +255,7 @@
 	.remount_fs =	jffs2_remount_fs,
 	.evict_inode =	jffs2_evict_inode,
 	.dirty_inode =	jffs2_dirty_inode,
+	.show_options =	jffs2_show_options,
 	.sync_fs =	jffs2_sync_fs,
 };
 
@@ -166,6 +279,12 @@
 	c->os_priv = sb;
 	sb->s_fs_info = c;
 
+	ret = jffs2_parse_options(c, data);
+	if (ret) {
+		kfree(c);
+		return -EINVAL;
+	}
+
 	/* Initialize JFFS2 superblock locks, the further initialization will
 	 * be done later */
 	mutex_init(&c->alloc_sem);
diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
index 4515bea..b09e51d 100644
--- a/fs/jffs2/wbuf.c
+++ b/fs/jffs2/wbuf.c
@@ -578,8 +578,7 @@
 	if (!jffs2_is_writebuffered(c))
 		return 0;
 
-	if (mutex_trylock(&c->alloc_sem)) {
-		mutex_unlock(&c->alloc_sem);
+	if (!mutex_is_locked(&c->alloc_sem)) {
 		printk(KERN_CRIT "jffs2_flush_wbuf() called with alloc_sem not locked!\n");
 		BUG();
 	}
@@ -1026,7 +1025,7 @@
 	int cmlen = min_t(int, c->oobavail, OOB_CM_SIZE);
 	struct mtd_oob_ops ops;
 
-	ops.mode = MTD_OOB_AUTO;
+	ops.mode = MTD_OPS_AUTO_OOB;
 	ops.ooblen = NR_OOB_SCAN_PAGES * c->oobavail;
 	ops.oobbuf = c->oobbuf;
 	ops.len = ops.ooboffs = ops.retlen = ops.oobretlen = 0;
@@ -1069,7 +1068,7 @@
 	struct mtd_oob_ops ops;
 	int ret, cmlen = min_t(int, c->oobavail, OOB_CM_SIZE);
 
-	ops.mode = MTD_OOB_AUTO;
+	ops.mode = MTD_OPS_AUTO_OOB;
 	ops.ooblen = cmlen;
 	ops.oobbuf = c->oobbuf;
 	ops.len = ops.ooboffs = ops.retlen = ops.oobretlen = 0;
@@ -1095,7 +1094,7 @@
 	struct mtd_oob_ops ops;
 	int cmlen = min_t(int, c->oobavail, OOB_CM_SIZE);
 
-	ops.mode = MTD_OOB_AUTO;
+	ops.mode = MTD_OPS_AUTO_OOB;
 	ops.ooblen = cmlen;
 	ops.oobbuf = (uint8_t *)&oob_cleanmarker;
 	ops.len = ops.ooboffs = ops.retlen = ops.oobretlen = 0;
diff --git a/fs/namei.c b/fs/namei.c
index ac6d214..5008f01 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -852,7 +852,7 @@
 		mntput(path->mnt);
 	if (ret == -EISDIR)
 		ret = 0;
-	return ret;
+	return ret < 0 ? ret : need_mntput;
 }
 
 int follow_down_one(struct path *path)
@@ -900,6 +900,7 @@
 			break;
 		path->mnt = mounted;
 		path->dentry = mounted->mnt_root;
+		nd->flags |= LOOKUP_JUMPED;
 		nd->seq = read_seqcount_begin(&path->dentry->d_seq);
 		/*
 		 * Update the inode too. We don't need to re-check the
@@ -1213,6 +1214,8 @@
 		path_put_conditional(path, nd);
 		return err;
 	}
+	if (err)
+		nd->flags |= LOOKUP_JUMPED;
 	*inode = path->dentry->d_inode;
 	return 0;
 }
@@ -2146,6 +2149,10 @@
 	}
 
 	/* create side of things */
+	/*
+	 * This will *only* deal with leaving RCU mode - LOOKUP_JUMPED has been
+	 * cleared when we got to the last component we are about to look up
+	 */
 	error = complete_walk(nd);
 	if (error)
 		return ERR_PTR(error);
@@ -2214,6 +2221,9 @@
 	if (error < 0)
 		goto exit_dput;
 
+	if (error)
+		nd->flags |= LOOKUP_JUMPED;
+
 	error = -ENOENT;
 	if (!path->dentry->d_inode)
 		goto exit_dput;
@@ -2223,6 +2233,10 @@
 
 	path_to_nameidata(path, nd);
 	nd->inode = path->dentry->d_inode;
+	/* Why this, you ask?  _Now_ we might have grown LOOKUP_JUMPED... */
+	error = complete_walk(nd);
+	if (error)
+		goto exit;
 	error = -EISDIR;
 	if (S_ISDIR(nd->inode->i_mode))
 		goto exit;
diff --git a/fs/ubifs/debug.c b/fs/ubifs/debug.c
index eef109a..b09ba2d 100644
--- a/fs/ubifs/debug.c
+++ b/fs/ubifs/debug.c
@@ -870,6 +870,22 @@
 	spin_unlock(&dbg_lock);
 }
 
+void dbg_dump_sleb(const struct ubifs_info *c,
+		   const struct ubifs_scan_leb *sleb, int offs)
+{
+	struct ubifs_scan_node *snod;
+
+	printk(KERN_DEBUG "(pid %d) start dumping scanned data from LEB %d:%d\n",
+	       current->pid, sleb->lnum, offs);
+
+	list_for_each_entry(snod, &sleb->nodes, list) {
+		cond_resched();
+		printk(KERN_DEBUG "Dumping node at LEB %d:%d len %d\n", sleb->lnum,
+		       snod->offs, snod->len);
+		dbg_dump_node(c, snod->node);
+	}
+}
+
 void dbg_dump_leb(const struct ubifs_info *c, int lnum)
 {
 	struct ubifs_scan_leb *sleb;
diff --git a/fs/ubifs/debug.h b/fs/ubifs/debug.h
index feb361e..8d9c468 100644
--- a/fs/ubifs/debug.h
+++ b/fs/ubifs/debug.h
@@ -269,6 +269,8 @@
 void dbg_dump_lprops(struct ubifs_info *c);
 void dbg_dump_lpt_info(struct ubifs_info *c);
 void dbg_dump_leb(const struct ubifs_info *c, int lnum);
+void dbg_dump_sleb(const struct ubifs_info *c,
+		   const struct ubifs_scan_leb *sleb, int offs);
 void dbg_dump_znode(const struct ubifs_info *c,
 		    const struct ubifs_znode *znode);
 void dbg_dump_heap(struct ubifs_info *c, struct ubifs_lpt_heap *heap, int cat);
@@ -387,6 +389,9 @@
 static inline void dbg_dump_leb(const struct ubifs_info *c,
 				int lnum)                         { return; }
 static inline void
+dbg_dump_sleb(const struct ubifs_info *c,
+	      const struct ubifs_scan_leb *sleb, int offs)        { return; }
+static inline void
 dbg_dump_znode(const struct ubifs_info *c,
 	       const struct ubifs_znode *znode)                   { return; }
 static inline void dbg_dump_heap(struct ubifs_info *c,
diff --git a/fs/ubifs/recovery.c b/fs/ubifs/recovery.c
index af02790..ee4f43f 100644
--- a/fs/ubifs/recovery.c
+++ b/fs/ubifs/recovery.c
@@ -983,7 +983,7 @@
 }
 
 /**
- *  clean_an_unclean_leb - read and write a LEB to remove corruption.
+ * clean_an_unclean_leb - read and write a LEB to remove corruption.
  * @c: UBIFS file-system description object
  * @ucleb: unclean LEB information
  * @sbuf: LEB-sized buffer to use
diff --git a/fs/ubifs/sb.c b/fs/ubifs/sb.c
index 93d938a..6094c5a 100644
--- a/fs/ubifs/sb.c
+++ b/fs/ubifs/sb.c
@@ -247,7 +247,7 @@
 	mst->total_dirty = cpu_to_le64(tmp64);
 
 	/*  The indexing LEB does not contribute to dark space */
-	tmp64 = (c->main_lebs - 1) * c->dark_wm;
+	tmp64 = ((long long)(c->main_lebs - 1) * c->dark_wm);
 	mst->total_dark = cpu_to_le64(tmp64);
 
 	mst->total_used = cpu_to_le64(UBIFS_INO_NODE_SZ);
diff --git a/include/acpi/acpi_drivers.h b/include/acpi/acpi_drivers.h
index e49c36d..bb145e4 100644
--- a/include/acpi/acpi_drivers.h
+++ b/include/acpi/acpi_drivers.h
@@ -144,7 +144,7 @@
 {
 }
 static inline int register_hotplug_dock_device(acpi_handle handle,
-					       struct acpi_dock_ops *ops,
+					       const struct acpi_dock_ops *ops,
 					       void *context)
 {
 	return -ENODEV;
diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h
index b67231b..ed73f67 100644
--- a/include/acpi/actypes.h
+++ b/include/acpi/actypes.h
@@ -470,7 +470,6 @@
  */
 #define ACPI_FULL_INITIALIZATION        0x00
 #define ACPI_NO_ADDRESS_SPACE_INIT      0x01
-#define ACPI_NO_HARDWARE_INIT           0x02
 #define ACPI_NO_EVENT_INIT              0x04
 #define ACPI_NO_HANDLER_INIT            0x08
 #define ACPI_NO_ACPI_ENABLE             0x10
diff --git a/include/acpi/processor.h b/include/acpi/processor.h
index 67055f1..610f6fb 100644
--- a/include/acpi/processor.h
+++ b/include/acpi/processor.h
@@ -329,6 +329,7 @@
 int acpi_processor_power_init(struct acpi_processor *pr,
 			      struct acpi_device *device);
 int acpi_processor_cst_has_changed(struct acpi_processor *pr);
+int acpi_processor_hotplug(struct acpi_processor *pr);
 int acpi_processor_power_exit(struct acpi_processor *pr,
 			      struct acpi_device *device);
 int acpi_processor_suspend(struct acpi_device * device, pm_message_t state);
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index 0d2f727e..93df2d7 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -72,6 +72,7 @@
 
 #define DP_MAIN_LINK_CHANNEL_CODING         0x006
 
+#define DP_EDP_CONFIGURATION_CAP            0x00d
 #define DP_TRAINING_AUX_RD_INTERVAL         0x00e
 
 #define DP_PSR_SUPPORT                      0x070
@@ -159,6 +160,8 @@
 # define DP_CP_IRQ			    (1 << 2)
 # define DP_SINK_SPECIFIC_IRQ		    (1 << 6)
 
+#define DP_EDP_CONFIGURATION_SET            0x10a
+
 #define DP_LANE0_1_STATUS		    0x202
 #define DP_LANE2_3_STATUS		    0x203
 # define DP_LANE_CR_DONE		    (1 << 0)
diff --git a/include/drm/drm_mode.h b/include/drm/drm_mode.h
index c4961ea..d30bedf 100644
--- a/include/drm/drm_mode.h
+++ b/include/drm/drm_mode.h
@@ -120,11 +120,12 @@
 	struct drm_mode_modeinfo mode;
 };
 
-#define DRM_MODE_ENCODER_NONE	0
-#define DRM_MODE_ENCODER_DAC	1
-#define DRM_MODE_ENCODER_TMDS	2
-#define DRM_MODE_ENCODER_LVDS	3
-#define DRM_MODE_ENCODER_TVDAC	4
+#define DRM_MODE_ENCODER_NONE	 0
+#define DRM_MODE_ENCODER_DAC	 1
+#define DRM_MODE_ENCODER_TMDS	 2
+#define DRM_MODE_ENCODER_LVDS	 3
+#define DRM_MODE_ENCODER_TVDAC	 4
+#define DRM_MODE_ENCODER_VIRTUAL 5
 
 struct drm_mode_get_encoder {
 	__u32 encoder_id;
@@ -162,6 +163,7 @@
 #define DRM_MODE_CONNECTOR_HDMIB	12
 #define DRM_MODE_CONNECTOR_TV		13
 #define DRM_MODE_CONNECTOR_eDP		14
+#define DRM_MODE_CONNECTOR_VIRTUAL      15
 
 struct drm_mode_get_connector {
 
diff --git a/include/drm/vmwgfx_drm.h b/include/drm/vmwgfx_drm.h
index cd7cd81..bcb0912 100644
--- a/include/drm/vmwgfx_drm.h
+++ b/include/drm/vmwgfx_drm.h
@@ -54,7 +54,7 @@
 #define DRM_VMW_FENCE_EVENT          17
 #define DRM_VMW_PRESENT              18
 #define DRM_VMW_PRESENT_READBACK     19
-
+#define DRM_VMW_UPDATE_LAYOUT        20
 
 /*************************************************************************/
 /**
@@ -552,31 +552,6 @@
 
 /*************************************************************************/
 /**
- * DRM_VMW_UPDATE_LAYOUT - Update layout
- *
- * Updates the preferred modes and connection status for connectors. The
- * command conisits of one drm_vmw_update_layout_arg pointing out a array
- * of num_outputs drm_vmw_rect's.
- */
-
-/**
- * struct drm_vmw_update_layout_arg
- *
- * @num_outputs: number of active
- * @rects: pointer to array of drm_vmw_rect
- *
- * Input argument to the DRM_VMW_UPDATE_LAYOUT Ioctl.
- */
-
-struct drm_vmw_update_layout_arg {
-	uint32_t num_outputs;
-	uint32_t pad64;
-	uint64_t rects;
-};
-
-
-/*************************************************************************/
-/**
  * DRM_VMW_FENCE_WAIT
  *
  * Waits for a fence object to signal. The wait is interruptible, so that
@@ -788,4 +763,28 @@
 	 uint64_t clips_ptr;
 	 uint64_t fence_rep;
 };
+
+/*************************************************************************/
+/**
+ * DRM_VMW_UPDATE_LAYOUT - Update layout
+ *
+ * Updates the preferred modes and connection status for connectors. The
+ * command consists of one drm_vmw_update_layout_arg pointing to an array
+ * of num_outputs drm_vmw_rect's.
+ */
+
+/**
+ * struct drm_vmw_update_layout_arg
+ *
+ * @num_outputs: number of active connectors
+ * @rects: pointer to array of drm_vmw_rect cast to an uint64_t
+ *
+ * Input argument to the DRM_VMW_UPDATE_LAYOUT Ioctl.
+ */
+struct drm_vmw_update_layout_arg {
+	uint32_t num_outputs;
+	uint32_t pad64;
+	uint64_t rects;
+};
+
 #endif
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
index 583baf2..7408af8 100644
--- a/include/linux/cpuidle.h
+++ b/include/linux/cpuidle.h
@@ -23,57 +23,62 @@
 struct module;
 
 struct cpuidle_device;
+struct cpuidle_driver;
 
 
 /****************************
  * CPUIDLE DEVICE INTERFACE *
  ****************************/
 
+struct cpuidle_state_usage {
+	void		*driver_data;
+
+	unsigned long long	usage;
+	unsigned long long	time; /* in US */
+};
+
 struct cpuidle_state {
 	char		name[CPUIDLE_NAME_LEN];
 	char		desc[CPUIDLE_DESC_LEN];
-	void		*driver_data;
 
 	unsigned int	flags;
 	unsigned int	exit_latency; /* in US */
 	unsigned int	power_usage; /* in mW */
 	unsigned int	target_residency; /* in US */
 
-	unsigned long long	usage;
-	unsigned long long	time; /* in US */
-
 	int (*enter)	(struct cpuidle_device *dev,
-			 struct cpuidle_state *state);
+			struct cpuidle_driver *drv,
+			int index);
 };
 
 /* Idle State Flags */
 #define CPUIDLE_FLAG_TIME_VALID	(0x01) /* is residency time measurable? */
-#define CPUIDLE_FLAG_IGNORE	(0x100) /* ignore during this idle period */
 
 #define CPUIDLE_DRIVER_FLAGS_MASK (0xFFFF0000)
 
 /**
  * cpuidle_get_statedata - retrieves private driver state data
- * @state: the state
+ * @st_usage: the state usage statistics
  */
-static inline void * cpuidle_get_statedata(struct cpuidle_state *state)
+static inline void *cpuidle_get_statedata(struct cpuidle_state_usage *st_usage)
 {
-	return state->driver_data;
+	return st_usage->driver_data;
 }
 
 /**
  * cpuidle_set_statedata - stores private driver state data
- * @state: the state
+ * @st_usage: the state usage statistics
  * @data: the private data
  */
 static inline void
-cpuidle_set_statedata(struct cpuidle_state *state, void *data)
+cpuidle_set_statedata(struct cpuidle_state_usage *st_usage, void *data)
 {
-	state->driver_data = data;
+	st_usage->driver_data = data;
 }
 
 struct cpuidle_state_kobj {
 	struct cpuidle_state *state;
+	struct cpuidle_state_usage *state_usage;
 	struct completion kobj_unregister;
 	struct kobject kobj;
 };
@@ -81,22 +86,17 @@
 struct cpuidle_device {
 	unsigned int		registered:1;
 	unsigned int		enabled:1;
-	unsigned int		power_specified:1;
 	unsigned int		cpu;
 
 	int			last_residency;
 	int			state_count;
-	struct cpuidle_state	states[CPUIDLE_STATE_MAX];
+	struct cpuidle_state_usage	states_usage[CPUIDLE_STATE_MAX];
 	struct cpuidle_state_kobj *kobjs[CPUIDLE_STATE_MAX];
-	struct cpuidle_state	*last_state;
 
 	struct list_head 	device_list;
 	struct kobject		kobj;
 	struct completion	kobj_unregister;
 	void			*governor_data;
-	struct cpuidle_state	*safe_state;
-
-	int (*prepare)		(struct cpuidle_device *dev);
 };
 
 DECLARE_PER_CPU(struct cpuidle_device *, cpuidle_devices);
@@ -120,6 +120,11 @@
 struct cpuidle_driver {
 	char			name[CPUIDLE_NAME_LEN];
 	struct module 		*owner;
+
+	unsigned int		power_specified:1;
+	struct cpuidle_state	states[CPUIDLE_STATE_MAX];
+	int			state_count;
+	int			safe_state_index;
 };
 
 #ifdef CONFIG_CPU_IDLE
@@ -166,11 +171,14 @@
 	struct list_head 	governor_list;
 	unsigned int		rating;
 
-	int  (*enable)		(struct cpuidle_device *dev);
-	void (*disable)		(struct cpuidle_device *dev);
+	int  (*enable)		(struct cpuidle_driver *drv,
+					struct cpuidle_device *dev);
+	void (*disable)		(struct cpuidle_driver *drv,
+					struct cpuidle_device *dev);
 
-	int  (*select)		(struct cpuidle_device *dev);
-	void (*reflect)		(struct cpuidle_device *dev);
+	int  (*select)		(struct cpuidle_driver *drv,
+					struct cpuidle_device *dev);
+	void (*reflect)		(struct cpuidle_device *dev, int index);
 
 	struct module 		*owner;
 };
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
index 45f00b6..de33de1 100644
--- a/include/linux/ethtool.h
+++ b/include/linux/ethtool.h
@@ -1097,10 +1097,12 @@
 #define SPEED_1000		1000
 #define SPEED_2500		2500
 #define SPEED_10000		10000
+#define SPEED_UNKNOWN		-1
 
 /* Duplex, half or full. */
 #define DUPLEX_HALF		0x00
 #define DUPLEX_FULL		0x01
+#define DUPLEX_UNKNOWN		0xff
 
 /* Which connector port. */
 #define PORT_TP			0x00
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
index 66f23dc..388b0d4 100644
--- a/include/linux/jump_label.h
+++ b/include/linux/jump_label.h
@@ -16,7 +16,7 @@
 
 # include <asm/jump_label.h>
 # define HAVE_JUMP_LABEL
-#endif
+#endif	/* CC_HAVE_ASM_GOTO && CONFIG_JUMP_LABEL */
 
 enum jump_label_type {
 	JUMP_LABEL_DISABLE = 0,
@@ -28,9 +28,9 @@
 #ifdef HAVE_JUMP_LABEL
 
 #ifdef CONFIG_MODULES
-#define JUMP_LABEL_INIT {{ 0 }, NULL, NULL}
+#define JUMP_LABEL_INIT {ATOMIC_INIT(0), NULL, NULL}
 #else
-#define JUMP_LABEL_INIT {{ 0 }, NULL}
+#define JUMP_LABEL_INIT {ATOMIC_INIT(0), NULL}
 #endif
 
 static __always_inline bool static_branch(struct jump_label_key *key)
@@ -41,18 +41,20 @@
 extern struct jump_entry __start___jump_table[];
 extern struct jump_entry __stop___jump_table[];
 
+extern void jump_label_init(void);
 extern void jump_label_lock(void);
 extern void jump_label_unlock(void);
 extern void arch_jump_label_transform(struct jump_entry *entry,
-				 enum jump_label_type type);
-extern void arch_jump_label_text_poke_early(jump_label_t addr);
+				      enum jump_label_type type);
+extern void arch_jump_label_transform_static(struct jump_entry *entry,
+					     enum jump_label_type type);
 extern int jump_label_text_reserved(void *start, void *end);
 extern void jump_label_inc(struct jump_label_key *key);
 extern void jump_label_dec(struct jump_label_key *key);
 extern bool jump_label_enabled(struct jump_label_key *key);
 extern void jump_label_apply_nops(struct module *mod);
 
-#else
+#else  /* !HAVE_JUMP_LABEL */
 
 #include <linux/atomic.h>
 
@@ -62,6 +64,10 @@
 	atomic_t enabled;
 };
 
+static __always_inline void jump_label_init(void)
+{
+}
+
 static __always_inline bool static_branch(struct jump_label_key *key)
 {
 	if (unlikely(atomic_read(&key->enabled)))
@@ -96,7 +102,6 @@
 {
 	return 0;
 }
+#endif	/* HAVE_JUMP_LABEL */
 
-#endif
-
-#endif
+#endif	/* _LINUX_JUMP_LABEL_H */
diff --git a/include/linux/mtd/bbm.h b/include/linux/mtd/bbm.h
index 57cc0e6..c4eec22 100644
--- a/include/linux/mtd/bbm.h
+++ b/include/linux/mtd/bbm.h
@@ -86,24 +86,39 @@
 #define NAND_BBT_VERSION	0x00000100
 /* Create a bbt if none exists */
 #define NAND_BBT_CREATE		0x00000200
+/*
+ * Create an empty BBT with no vendor information. Vendor's information may be
+ * unavailable, for example, if the NAND controller has a different data and OOB
+ * layout or if this information is already purged. Must be used in conjunction
+ * with NAND_BBT_CREATE.
+ */
+#define NAND_BBT_CREATE_EMPTY	0x00000400
 /* Search good / bad pattern through all pages of a block */
-#define NAND_BBT_SCANALLPAGES	0x00000400
+#define NAND_BBT_SCANALLPAGES	0x00000800
 /* Scan block empty during good / bad block scan */
-#define NAND_BBT_SCANEMPTY	0x00000800
+#define NAND_BBT_SCANEMPTY	0x00001000
 /* Write bbt if neccecary */
-#define NAND_BBT_WRITE		0x00001000
+#define NAND_BBT_WRITE		0x00002000
 /* Read and write back block contents when writing bbt */
-#define NAND_BBT_SAVECONTENT	0x00002000
+#define NAND_BBT_SAVECONTENT	0x00004000
 /* Search good / bad pattern on the first and the second page */
-#define NAND_BBT_SCAN2NDPAGE	0x00004000
+#define NAND_BBT_SCAN2NDPAGE	0x00008000
 /* Search good / bad pattern on the last page of the eraseblock */
-#define NAND_BBT_SCANLASTPAGE	0x00008000
-/* Chip stores bad block marker on BOTH 1st and 6th bytes of OOB */
-#define NAND_BBT_SCANBYTE1AND6 0x00100000
-/* The nand_bbt_descr was created dynamicaly and must be freed */
-#define NAND_BBT_DYNAMICSTRUCT 0x00200000
-/* The bad block table does not OOB for marker */
-#define NAND_BBT_NO_OOB		0x00400000
+#define NAND_BBT_SCANLASTPAGE	0x00010000
+/*
+ * Use a flash based bad block table. By default, OOB identifier is saved in
+ * OOB area. This option is passed to the default bad block table function.
+ */
+#define NAND_BBT_USE_FLASH	0x00020000
+/* Do not store flash based bad block table in OOB area; store it in-band */
+#define NAND_BBT_NO_OOB		0x00040000
+
+/*
+ * Flag set by nand_create_default_bbt_descr(), marking that the nand_bbt_descr
+ * was allocated dynamicaly and must be freed in nand_release(). Has no meaning
+ * in nand_chip.bbt_options.
+ */
+#define NAND_BBT_DYNAMICSTRUCT	0x80000000
 
 /* The maximum number of blocks to scan for a bbt */
 #define NAND_BBT_SCAN_MAXBLOCKS	4
diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h
index 37be05b..9f5b312 100644
--- a/include/linux/mtd/mtd.h
+++ b/include/linux/mtd/mtd.h
@@ -32,17 +32,19 @@
 #define MTD_CHAR_MAJOR 90
 #define MTD_BLOCK_MAJOR 31
 
-#define MTD_ERASE_PENDING      	0x01
+#define MTD_ERASE_PENDING	0x01
 #define MTD_ERASING		0x02
 #define MTD_ERASE_SUSPEND	0x04
-#define MTD_ERASE_DONE          0x08
-#define MTD_ERASE_FAILED        0x10
+#define MTD_ERASE_DONE		0x08
+#define MTD_ERASE_FAILED	0x10
 
 #define MTD_FAIL_ADDR_UNKNOWN -1LL
 
-/* If the erase fails, fail_addr might indicate exactly which block failed.  If
-   fail_addr = MTD_FAIL_ADDR_UNKNOWN, the failure was not at the device level or was not
-   specific to any particular block. */
+/*
+ * If the erase fails, fail_addr might indicate exactly which block failed. If
+ * fail_addr = MTD_FAIL_ADDR_UNKNOWN, the failure was not at the device level
+ * or was not specific to any particular block.
+ */
 struct erase_info {
 	struct mtd_info *mtd;
 	uint64_t addr;
@@ -59,26 +61,12 @@
 };
 
 struct mtd_erase_region_info {
-	uint64_t offset;			/* At which this region starts, from the beginning of the MTD */
+	uint64_t offset;		/* At which this region starts, from the beginning of the MTD */
 	uint32_t erasesize;		/* For this region */
 	uint32_t numblocks;		/* Number of blocks of erasesize in this region */
 	unsigned long *lockmap;		/* If keeping bitmap of locks */
 };
 
-/*
- * oob operation modes
- *
- * MTD_OOB_PLACE:	oob data are placed at the given offset
- * MTD_OOB_AUTO:	oob data are automatically placed at the free areas
- *			which are defined by the ecclayout
- * MTD_OOB_RAW:		mode to read oob and data without doing ECC checking
- */
-typedef enum {
-	MTD_OOB_PLACE,
-	MTD_OOB_AUTO,
-	MTD_OOB_RAW,
-} mtd_oob_mode_t;
-
 /**
  * struct mtd_oob_ops - oob operation operands
  * @mode:	operation mode
@@ -90,7 +78,7 @@
  * @ooblen:	number of oob bytes to write/read
  * @oobretlen:	number of oob bytes written/read
  * @ooboffs:	offset of oob data in the oob area (only relevant when
- *		mode = MTD_OOB_PLACE)
+ *		mode = MTD_OPS_PLACE_OOB or MTD_OPS_RAW)
  * @datbuf:	data buffer - if NULL only oob data are read/written
  * @oobbuf:	oob data buffer
  *
@@ -99,7 +87,7 @@
  * OOB area.
  */
 struct mtd_oob_ops {
-	mtd_oob_mode_t	mode;
+	unsigned int	mode;
 	size_t		len;
 	size_t		retlen;
 	size_t		ooblen;
@@ -173,7 +161,7 @@
 	const char *name;
 	int index;
 
-	/* ecc layout structure pointer - read only ! */
+	/* ECC layout structure pointer - read only! */
 	struct nand_ecclayout *ecclayout;
 
 	/* Data for variable erase regions. If numeraseregions is zero,
@@ -324,10 +312,15 @@
 	/* Kernel-side ioctl definitions */
 
 struct mtd_partition;
+struct mtd_part_parser_data;
 
-extern int mtd_device_register(struct mtd_info *master,
-			       const struct mtd_partition *parts,
-			       int nr_parts);
+extern int mtd_device_parse_register(struct mtd_info *mtd,
+			      const char **part_probe_types,
+			      struct mtd_part_parser_data *parser_data,
+			      const struct mtd_partition *defparts,
+			      int defnr_parts);
+#define mtd_device_register(master, parts, nr_parts)	\
+	mtd_device_parse_register(master, NULL, NULL, parts, nr_parts)
 extern int mtd_device_unregister(struct mtd_info *master);
 extern struct mtd_info *get_mtd_device(struct mtd_info *mtd, int num);
 extern int __get_mtd_device(struct mtd_info *mtd);
@@ -356,27 +349,16 @@
 
 void mtd_erase_callback(struct erase_info *instr);
 
-/*
- * Debugging macro and defines
- */
-#define MTD_DEBUG_LEVEL0	(0)	/* Quiet   */
-#define MTD_DEBUG_LEVEL1	(1)	/* Audible */
-#define MTD_DEBUG_LEVEL2	(2)	/* Loud    */
-#define MTD_DEBUG_LEVEL3	(3)	/* Noisy   */
+static inline int mtd_is_bitflip(int err) {
+	return err == -EUCLEAN;
+}
 
-#ifdef CONFIG_MTD_DEBUG
-#define DEBUG(n, args...)				\
-	do {						\
-		if (n <= CONFIG_MTD_DEBUG_VERBOSE)	\
-			printk(KERN_INFO args);		\
-	} while(0)
-#else /* CONFIG_MTD_DEBUG */
-#define DEBUG(n, args...)				\
-	do {						\
-		if (0)					\
-			printk(KERN_INFO args);		\
-	} while(0)
+static inline int mtd_is_eccerr(int err) {
+	return err == -EBADMSG;
+}
 
-#endif /* CONFIG_MTD_DEBUG */
+static inline int mtd_is_bitflip_or_eccerr(int err) {
+	return mtd_is_bitflip(err) || mtd_is_eccerr(err);
+}
 
 #endif /* __MTD_MTD_H__ */
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
index c2b9ac4..904131b 100644
--- a/include/linux/mtd/nand.h
+++ b/include/linux/mtd/nand.h
@@ -42,10 +42,10 @@
 /* Internal helper for board drivers which need to override command function */
 extern void nand_wait_ready(struct mtd_info *mtd);
 
-/* locks all blockes present in the device */
+/* locks all blocks present in the device */
 extern int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
 
-/* unlocks specified locked blockes */
+/* unlocks specified locked blocks */
 extern int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
 
 /* The maximum number of NAND chips in an array */
@@ -150,7 +150,7 @@
 #define NAND_ECC_READ		0
 /* Reset Hardware ECC for write */
 #define NAND_ECC_WRITE		1
-/* Enable Hardware ECC before syndrom is read back from flash */
+/* Enable Hardware ECC before syndrome is read back from flash */
 #define NAND_ECC_READSYN	2
 
 /* Bit mask for flags passed to do_nand_read_ecc */
@@ -163,7 +163,7 @@
  */
 /* Chip can not auto increment pages */
 #define NAND_NO_AUTOINCR	0x00000001
-/* Buswitdh is 16 bit */
+/* Buswidth is 16 bit */
 #define NAND_BUSWIDTH_16	0x00000002
 /* Device supports partial programming without padding */
 #define NAND_NO_PADDING		0x00000004
@@ -219,27 +219,15 @@
 #define NAND_CHIPOPTIONS_MSK	(0x0000ffff & ~NAND_NO_AUTOINCR)
 
 /* Non chip related options */
-/*
- * Use a flash based bad block table. OOB identifier is saved in OOB area.
- * This option is passed to the default bad block table function.
- */
-#define NAND_USE_FLASH_BBT	0x00010000
 /* This option skips the bbt scan during initialization. */
-#define NAND_SKIP_BBTSCAN	0x00020000
+#define NAND_SKIP_BBTSCAN	0x00010000
 /*
  * This option is defined if the board driver allocates its own buffers
  * (e.g. because it needs them DMA-coherent).
  */
-#define NAND_OWN_BUFFERS	0x00040000
+#define NAND_OWN_BUFFERS	0x00020000
 /* Chip may not exist, so silence any errors in scan */
-#define NAND_SCAN_SILENT_NODEV	0x00080000
-/*
- * If passed additionally to NAND_USE_FLASH_BBT then BBT code will not touch
- * the OOB area.
- */
-#define NAND_USE_FLASH_BBT_NO_OOB	0x00800000
-/* Create an empty BBT with no vendor information if the BBT is available */
-#define NAND_CREATE_EMPTY_BBT		0x01000000
+#define NAND_SCAN_SILENT_NODEV	0x00040000
 
 /* Options set by nand scan */
 /* Nand scan has allocated controller struct */
@@ -331,27 +319,29 @@
 };
 
 /**
- * struct nand_ecc_ctrl - Control structure for ecc
- * @mode:	ecc mode
- * @steps:	number of ecc steps per page
- * @size:	data bytes per ecc step
- * @bytes:	ecc bytes per step
- * @total:	total number of ecc bytes per page
- * @prepad:	padding information for syndrome based ecc generators
- * @postpad:	padding information for syndrome based ecc generators
+ * struct nand_ecc_ctrl - Control structure for ECC
+ * @mode:	ECC mode
+ * @steps:	number of ECC steps per page
+ * @size:	data bytes per ECC step
+ * @bytes:	ECC bytes per step
+ * @total:	total number of ECC bytes per page
+ * @prepad:	padding information for syndrome based ECC generators
+ * @postpad:	padding information for syndrome based ECC generators
  * @layout:	ECC layout control struct pointer
- * @priv:	pointer to private ecc control data
- * @hwctl:	function to control hardware ecc generator. Must only
+ * @priv:	pointer to private ECC control data
+ * @hwctl:	function to control hardware ECC generator. Must only
  *		be provided if an hardware ECC is available
- * @calculate:	function for ecc calculation or readback from ecc hardware
- * @correct:	function for ecc correction, matching to ecc generator (sw/hw)
+ * @calculate:	function for ECC calculation or readback from ECC hardware
+ * @correct:	function for ECC correction, matching to ECC generator (sw/hw)
  * @read_page_raw:	function to read a raw page without ECC
  * @write_page_raw:	function to write a raw page without ECC
- * @read_page:	function to read a page according to the ecc generator
+ * @read_page:	function to read a page according to the ECC generator
  *		requirements.
  * @read_subpage:	function to read parts of the page covered by ECC.
- * @write_page:	function to write a page according to the ecc generator
+ * @write_page:	function to write a page according to the ECC generator
  *		requirements.
+ * @write_oob_raw:	function to write chip OOB data without ECC
+ * @read_oob_raw:	function to read chip OOB data without ECC
  * @read_oob:	function to read chip OOB data
  * @write_oob:	function to write chip OOB data
  */
@@ -380,6 +370,10 @@
 			uint32_t offs, uint32_t len, uint8_t *buf);
 	void (*write_page)(struct mtd_info *mtd, struct nand_chip *chip,
 			const uint8_t *buf);
+	int (*write_oob_raw)(struct mtd_info *mtd, struct nand_chip *chip,
+			int page);
+	int (*read_oob_raw)(struct mtd_info *mtd, struct nand_chip *chip,
+			int page, int sndcmd);
 	int (*read_oob)(struct mtd_info *mtd, struct nand_chip *chip, int page,
 			int sndcmd);
 	int (*write_oob)(struct mtd_info *mtd, struct nand_chip *chip,
@@ -388,8 +382,8 @@
 
 /**
  * struct nand_buffers - buffer structure for read/write
- * @ecccalc:	buffer for calculated ecc
- * @ecccode:	buffer for ecc read from flash
+ * @ecccalc:	buffer for calculated ECC
+ * @ecccode:	buffer for ECC read from flash
  * @databuf:	buffer for data - dynamically sized
  *
  * Do not change the order of buffers. databuf and oobrbuf must be in
@@ -422,7 +416,7 @@
  *			mtd->oobsize, mtd->writesize and so on.
  *			@id_data contains the 8 bytes values of NAND_CMD_READID.
  *			Return with the bus width.
- * @dev_ready:		[BOARDSPECIFIC] hardwarespecific function for accesing
+ * @dev_ready:		[BOARDSPECIFIC] hardwarespecific function for accessing
  *			device ready/busy line. If set to NULL no access to
  *			ready/busy is available and the ready/busy information
  *			is read from the chip status register.
@@ -430,17 +424,17 @@
  *			commands to the chip.
  * @waitfunc:		[REPLACEABLE] hardwarespecific function for wait on
  *			ready.
- * @ecc:		[BOARDSPECIFIC] ecc control ctructure
+ * @ecc:		[BOARDSPECIFIC] ECC control structure
  * @buffers:		buffer structure for read/write
  * @hwcontrol:		platform-specific hardware control structure
- * @ops:		oob operation operands
  * @erase_cmd:		[INTERN] erase command write function, selectable due
  *			to AND support.
  * @scan_bbt:		[REPLACEABLE] function to scan bad block table
  * @chip_delay:		[BOARDSPECIFIC] chip dependent delay for transferring
  *			data from array to read regs (tR).
  * @state:		[INTERN] the current state of the NAND device
- * @oob_poi:		poison value buffer
+ * @oob_poi:		"poison value buffer," used for laying out OOB data
+ *			before writing
  * @page_shift:		[INTERN] number of address bits in a page (column
  *			address bits).
  * @phys_erase_shift:	[INTERN] number of address bits in a physical eraseblock
@@ -449,6 +443,9 @@
  * @options:		[BOARDSPECIFIC] various chip options. They can partly
  *			be set to inform nand_scan about special functionality.
  *			See the defines for further explanation.
+ * @bbt_options:	[INTERN] bad block specific options. All options used
+ *			here must come from bbm.h. By default, these options
+ *			will be copied to the appropriate nand_bbt_descr's.
  * @badblockpos:	[INTERN] position of the bad block marker in the oob
  *			area.
  * @badblockbits:	[INTERN] number of bits to left-shift the bad block
@@ -464,7 +461,7 @@
  *			non 0 if ONFI supported.
  * @onfi_params:	[INTERN] holds the ONFI page parameter when ONFI is
  *			supported, 0 otherwise.
- * @ecclayout:		[REPLACEABLE] the default ecc placement scheme
+ * @ecclayout:		[REPLACEABLE] the default ECC placement scheme
  * @bbt:		[INTERN] bad block table pointer
  * @bbt_td:		[REPLACEABLE] bad block table descriptor for flash
  *			lookup.
@@ -472,9 +469,9 @@
  * @badblock_pattern:	[REPLACEABLE] bad block scan pattern used for initial
  *			bad block scan.
  * @controller:		[REPLACEABLE] a pointer to a hardware controller
- *			structure which is shared among multiple independend
+ *			structure which is shared among multiple independent
  *			devices.
- * @priv:		[OPTIONAL] pointer to private chip date
+ * @priv:		[OPTIONAL] pointer to private chip data
  * @errstat:		[OPTIONAL] hardware specific function to perform
  *			additional error status checks (determine if errors are
  *			correctable).
@@ -509,6 +506,7 @@
 
 	int chip_delay;
 	unsigned int options;
+	unsigned int bbt_options;
 
 	int page_shift;
 	int phys_erase_shift;
@@ -536,8 +534,6 @@
 	struct nand_buffers *buffers;
 	struct nand_hw_control hwcontrol;
 
-	struct mtd_oob_ops ops;
-
 	uint8_t *bbt;
 	struct nand_bbt_descr *bbt_td;
 	struct nand_bbt_descr *bbt_md;
@@ -611,10 +607,9 @@
  * @partitions:		mtd partition list
  * @chip_delay:		R/B delay value in us
  * @options:		Option flags, e.g. 16bit buswidth
- * @ecclayout:		ecc layout info structure
+ * @bbt_options:	BBT option flags, e.g. NAND_BBT_USE_FLASH
+ * @ecclayout:		ECC layout info structure
  * @part_probe_types:	NULL-terminated array of probe types
- * @set_parts:		platform specific function to set partitions
- * @priv:		hardware controller specific settings
  */
 struct platform_nand_chip {
 	int nr_chips;
@@ -624,9 +619,8 @@
 	struct nand_ecclayout *ecclayout;
 	int chip_delay;
 	unsigned int options;
+	unsigned int bbt_options;
 	const char **part_probe_types;
-	void (*set_parts)(uint64_t size, struct platform_nand_chip *chip);
-	void *priv;
 };
 
 /* Keep gcc happy */
diff --git a/include/linux/mtd/onenand.h b/include/linux/mtd/onenand.h
index 52b6f18..4596503 100644
--- a/include/linux/mtd/onenand.h
+++ b/include/linux/mtd/onenand.h
@@ -184,6 +184,9 @@
 #define ONENAND_IS_CACHE_PROGRAM(this)					\
 	(this->options & ONENAND_HAS_CACHE_PROGRAM)
 
+#define ONENAND_IS_NOP_1(this)						\
+	(this->options & ONENAND_HAS_NOP_1)
+
 /* Check byte access in OneNAND */
 #define ONENAND_CHECK_BYTE_ACCESS(addr)		(addr & 0x1)
 
@@ -195,6 +198,7 @@
 #define ONENAND_HAS_2PLANE		(0x0004)
 #define ONENAND_HAS_4KB_PAGE		(0x0008)
 #define ONENAND_HAS_CACHE_PROGRAM	(0x0010)
+#define ONENAND_HAS_NOP_1		(0x0020)
 #define ONENAND_SKIP_UNLOCK_CHECK	(0x0100)
 #define ONENAND_PAGEBUF_ALLOC		(0x1000)
 #define ONENAND_OOBBUF_ALLOC		(0x2000)
diff --git a/include/linux/mtd/partitions.h b/include/linux/mtd/partitions.h
index 3a6f037..2475228 100644
--- a/include/linux/mtd/partitions.h
+++ b/include/linux/mtd/partitions.h
@@ -24,7 +24,9 @@
  * 	will extend to the end of the master MTD device.
  * offset: absolute starting position within the master MTD device; if
  * 	defined as MTDPART_OFS_APPEND, the partition will start where the
- * 	previous one ended; if MTDPART_OFS_NXTBLK, at the next erase block.
+ *	previous one ended; if MTDPART_OFS_NXTBLK, at the next erase block;
+ *	if MTDPART_OFS_RETAIN, consume as much as possible, leaving size
+ *	after the end of partition.
  * mask_flags: contains flags that have to be masked (removed) from the
  * 	master MTD flag set for the corresponding MTD partition.
  * 	For example, to force a read-only partition, simply adding
@@ -42,12 +44,25 @@
 	struct nand_ecclayout *ecclayout;	/* out of band layout for this partition (NAND only) */
 };
 
+#define MTDPART_OFS_RETAIN	(-3)
 #define MTDPART_OFS_NXTBLK	(-2)
 #define MTDPART_OFS_APPEND	(-1)
 #define MTDPART_SIZ_FULL	(0)
 
 
 struct mtd_info;
+struct device_node;
+
+/**
+ * struct mtd_part_parser_data - used to pass data to MTD partition parsers.
+ * @origin: for RedBoot, start address of MTD device
+ * @of_node: for OF parsers, device node containing partitioning information
+ */
+struct mtd_part_parser_data {
+	unsigned long origin;
+	struct device_node *of_node;
+};
+
 
 /*
  * Functions dealing with the various ways of partitioning the space
@@ -57,37 +72,12 @@
 	struct list_head list;
 	struct module *owner;
 	const char *name;
-	int (*parse_fn)(struct mtd_info *, struct mtd_partition **, unsigned long);
+	int (*parse_fn)(struct mtd_info *, struct mtd_partition **,
+			struct mtd_part_parser_data *);
 };
 
 extern int register_mtd_parser(struct mtd_part_parser *parser);
 extern int deregister_mtd_parser(struct mtd_part_parser *parser);
-extern int parse_mtd_partitions(struct mtd_info *master, const char **types,
-				struct mtd_partition **pparts, unsigned long origin);
-
-#define put_partition_parser(p) do { module_put((p)->owner); } while(0)
-
-struct device;
-struct device_node;
-
-#ifdef CONFIG_MTD_OF_PARTS
-int __devinit of_mtd_parse_partitions(struct device *dev,
-                                      struct device_node *node,
-                                      struct mtd_partition **pparts);
-#else
-static inline int of_mtd_parse_partitions(struct device *dev,
-					  struct device_node *node,
-					  struct mtd_partition **pparts)
-{
-	return 0;
-}
-#endif
-
-#ifdef CONFIG_MTD_CMDLINE_PARTS
-static inline int mtd_has_cmdlinepart(void) { return 1; }
-#else
-static inline int mtd_has_cmdlinepart(void) { return 0; }
-#endif
 
 int mtd_is_partition(struct mtd_info *mtd);
 int mtd_add_partition(struct mtd_info *master, char *name,
diff --git a/include/linux/mtd/physmap.h b/include/linux/mtd/physmap.h
index e5f21d2..04e0181 100644
--- a/include/linux/mtd/physmap.h
+++ b/include/linux/mtd/physmap.h
@@ -32,21 +32,4 @@
 	struct mtd_partition	*parts;
 };
 
-/*
- * Board needs to specify the exact mapping during their setup time.
- */
-void physmap_configure(unsigned long addr, unsigned long size,
-		int bankwidth, void (*set_vpp)(struct map_info *, int) );
-
-/*
- * Machines that wish to do flash partition may want to call this function in
- * their setup routine.
- *
- *	physmap_set_partitions(mypartitions, num_parts);
- *
- * Note that one can always override this hard-coded partition with
- * command line partition (you need to enable CONFIG_MTD_CMDLINE_PARTS).
- */
-void physmap_set_partitions(struct mtd_partition *parts, int num_parts);
-
 #endif /* __LINUX_MTD_PHYSMAP__ */
diff --git a/include/mtd/mtd-abi.h b/include/mtd/mtd-abi.h
index 2f7d45b..1a7e1d2 100644
--- a/include/mtd/mtd-abi.h
+++ b/include/mtd/mtd-abi.h
@@ -45,6 +45,51 @@
 	__u64 usr_ptr;
 };
 
+/**
+ * MTD operation modes
+ *
+ * @MTD_OPS_PLACE_OOB:	OOB data are placed at the given offset (default)
+ * @MTD_OPS_AUTO_OOB:	OOB data are automatically placed at the free areas
+ *			which are defined by the internal ecclayout
+ * @MTD_OPS_RAW:	data are transferred as-is, with no error correction;
+ *			this mode implies %MTD_OPS_PLACE_OOB
+ *
+ * These modes can be passed to ioctl(MEMWRITE) and are also used internally.
+ * See notes on "MTD file modes" for discussion on %MTD_OPS_RAW vs.
+ * %MTD_FILE_MODE_RAW.
+ */
+enum {
+	MTD_OPS_PLACE_OOB = 0,
+	MTD_OPS_AUTO_OOB = 1,
+	MTD_OPS_RAW = 2,
+};
+
+/**
+ * struct mtd_write_req - data structure for requesting a write operation
+ *
+ * @start:	start address
+ * @len:	length of data buffer
+ * @ooblen:	length of OOB buffer
+ * @usr_data:	user-provided data buffer
+ * @usr_oob:	user-provided OOB buffer
+ * @mode:	MTD mode (see "MTD operation modes")
+ * @padding:	reserved, must be set to 0
+ *
+ * This structure supports ioctl(MEMWRITE) operations, allowing data and/or OOB
+ * writes in various modes. To write to OOB-only, set @usr_data == NULL, and to
+ * write data-only, set @usr_oob == NULL. However, setting both @usr_data and
+ * @usr_oob to NULL is not allowed.
+ */
+struct mtd_write_req {
+	__u64 start;
+	__u64 len;
+	__u64 ooblen;
+	__u64 usr_data;
+	__u64 usr_oob;
+	__u8 mode;
+	__u8 padding[7];
+};
+
 #define MTD_ABSENT		0
 #define MTD_RAM			1
 #define MTD_ROM			2
@@ -59,13 +104,13 @@
 #define MTD_NO_ERASE		0x1000	/* No erase necessary */
 #define MTD_POWERUP_LOCK	0x2000	/* Always locked after reset */
 
-// Some common devices / combinations of capabilities
+/* Some common devices / combinations of capabilities */
 #define MTD_CAP_ROM		0
 #define MTD_CAP_RAM		(MTD_WRITEABLE | MTD_BIT_WRITEABLE | MTD_NO_ERASE)
 #define MTD_CAP_NORFLASH	(MTD_WRITEABLE | MTD_BIT_WRITEABLE)
 #define MTD_CAP_NANDFLASH	(MTD_WRITEABLE)
 
-/* ECC byte placement */
+/* Obsolete ECC byte placement modes (used with obsolete MEMGETOOBSEL) */
 #define MTD_NANDECC_OFF		0	// Switch off ECC (Not recommended)
 #define MTD_NANDECC_PLACE	1	// Use the given placement in the structure (YAFFS1 legacy mode)
 #define MTD_NANDECC_AUTOPLACE	2	// Use the default placement scheme
@@ -80,21 +125,18 @@
 struct mtd_info_user {
 	__u8 type;
 	__u32 flags;
-	__u32 size;	 // Total size of the MTD
+	__u32 size;	/* Total size of the MTD */
 	__u32 erasesize;
 	__u32 writesize;
-	__u32 oobsize;   // Amount of OOB data per block (e.g. 16)
-	/* The below two fields are obsolete and broken, do not use them
-	 * (TODO: remove at some point) */
-	__u32 ecctype;
-	__u32 eccsize;
+	__u32 oobsize;	/* Amount of OOB data per block (e.g. 16) */
+	__u64 padding;	/* Old obsolete field; do not use */
 };
 
 struct region_info_user {
 	__u32 offset;		/* At which this region starts,
-					 * from the beginning of the MTD */
-	__u32 erasesize;		/* For this region */
-	__u32 numblocks;		/* Number of blocks in this region */
+				 * from the beginning of the MTD */
+	__u32 erasesize;	/* For this region */
+	__u32 numblocks;	/* Number of blocks in this region */
 	__u32 regionindex;
 };
 
@@ -104,29 +146,61 @@
 	__u32 locked;
 };
 
+/*
+ * Note, the following ioctl existed in the past and was removed:
+ * #define MEMSETOOBSEL           _IOW('M', 9, struct nand_oobinfo)
+ * Try to avoid adding a new ioctl with the same ioctl number.
+ */
+
+/* Get basic MTD characteristics info (better to use sysfs) */
 #define MEMGETINFO		_IOR('M', 1, struct mtd_info_user)
+/* Erase segment of MTD */
 #define MEMERASE		_IOW('M', 2, struct erase_info_user)
+/* Write out-of-band data from MTD */
 #define MEMWRITEOOB		_IOWR('M', 3, struct mtd_oob_buf)
+/* Read out-of-band data from MTD */
 #define MEMREADOOB		_IOWR('M', 4, struct mtd_oob_buf)
+/* Lock a chip (for MTD that supports it) */
 #define MEMLOCK			_IOW('M', 5, struct erase_info_user)
+/* Unlock a chip (for MTD that supports it) */
 #define MEMUNLOCK		_IOW('M', 6, struct erase_info_user)
+/* Get the number of different erase regions */
 #define MEMGETREGIONCOUNT	_IOR('M', 7, int)
+/* Get information about the erase region for a specific index */
 #define MEMGETREGIONINFO	_IOWR('M', 8, struct region_info_user)
-#define MEMSETOOBSEL		_IOW('M', 9, struct nand_oobinfo)
+/* Get info about OOB modes (e.g., RAW, PLACE, AUTO) - legacy interface */
 #define MEMGETOOBSEL		_IOR('M', 10, struct nand_oobinfo)
+/* Check if an eraseblock is bad */
 #define MEMGETBADBLOCK		_IOW('M', 11, __kernel_loff_t)
+/* Mark an eraseblock as bad */
 #define MEMSETBADBLOCK		_IOW('M', 12, __kernel_loff_t)
+/* Set OTP (One-Time Programmable) mode (factory vs. user) */
 #define OTPSELECT		_IOR('M', 13, int)
+/* Get number of OTP (One-Time Programmable) regions */
 #define OTPGETREGIONCOUNT	_IOW('M', 14, int)
+/* Get all OTP (One-Time Programmable) info about MTD */
 #define OTPGETREGIONINFO	_IOW('M', 15, struct otp_info)
+/* Lock a given range of user data (must be in mode %MTD_FILE_MODE_OTP_USER) */
 #define OTPLOCK			_IOR('M', 16, struct otp_info)
+/* Get ECC layout (deprecated) */
 #define ECCGETLAYOUT		_IOR('M', 17, struct nand_ecclayout_user)
+/* Get statistics about corrected/uncorrected errors */
 #define ECCGETSTATS		_IOR('M', 18, struct mtd_ecc_stats)
+/* Set MTD mode on a per-file-descriptor basis (see "MTD file modes") */
 #define MTDFILEMODE		_IO('M', 19)
+/* Erase segment of MTD (supports 64-bit address) */
 #define MEMERASE64		_IOW('M', 20, struct erase_info_user64)
+/* Write data to OOB (64-bit version) */
 #define MEMWRITEOOB64		_IOWR('M', 21, struct mtd_oob_buf64)
+/* Read data from OOB (64-bit version) */
 #define MEMREADOOB64		_IOWR('M', 22, struct mtd_oob_buf64)
+/* Check if chip is locked (for MTD that supports it) */
 #define MEMISLOCKED		_IOR('M', 23, struct erase_info_user)
+/*
+ * Most generic write interface; can write in-band and/or out-of-band in various
+ * modes (see "struct mtd_write_req")
+ */
+#define MEMWRITE		_IOWR('M', 24, struct mtd_write_req)
 
 /*
  * Obsolete legacy interface. Keep it in order not to break userspace
@@ -177,13 +251,27 @@
 };
 
 /*
- * Read/write file modes for access to MTD
+ * MTD file modes - for read/write access to MTD
+ *
+ * @MTD_FILE_MODE_NORMAL:	OTP disabled, ECC enabled
+ * @MTD_FILE_MODE_OTP_FACTORY:	OTP enabled in factory mode
+ * @MTD_FILE_MODE_OTP_USER:	OTP enabled in user mode
+ * @MTD_FILE_MODE_RAW:		OTP disabled, ECC disabled
+ *
+ * These modes can be set via ioctl(MTDFILEMODE). The mode mode will be retained
+ * separately for each open file descriptor.
+ *
+ * Note: %MTD_FILE_MODE_RAW provides the same functionality as %MTD_OPS_RAW -
+ * raw access to the flash, without error correction or autoplacement schemes.
+ * Wherever possible, the MTD_OPS_* mode will override the MTD_FILE_MODE_* mode
+ * (e.g., when using ioctl(MEMWRITE)), but in some cases, the MTD_FILE_MODE is
+ * used out of necessity (e.g., `write()', ioctl(MEMWRITEOOB64)).
  */
 enum mtd_file_modes {
-	MTD_MODE_NORMAL = MTD_OTP_OFF,
-	MTD_MODE_OTP_FACTORY = MTD_OTP_FACTORY,
-	MTD_MODE_OTP_USER = MTD_OTP_USER,
-	MTD_MODE_RAW,
+	MTD_FILE_MODE_NORMAL = MTD_OTP_OFF,
+	MTD_FILE_MODE_OTP_FACTORY = MTD_OTP_FACTORY,
+	MTD_FILE_MODE_OTP_USER = MTD_OTP_USER,
+	MTD_FILE_MODE_RAW,
 };
 
 #endif /* __MTD_ABI_H__ */
diff --git a/include/net/bluetooth/rfcomm.h b/include/net/bluetooth/rfcomm.h
index d5eee20..e2e3eca 100644
--- a/include/net/bluetooth/rfcomm.h
+++ b/include/net/bluetooth/rfcomm.h
@@ -211,6 +211,7 @@
 #define RFCOMM_AUTH_ACCEPT  6
 #define RFCOMM_AUTH_REJECT  7
 #define RFCOMM_DEFER_SETUP  8
+#define RFCOMM_ENC_DROP     9
 
 /* Scheduling flags and events */
 #define RFCOMM_SCHED_WAKEUP 31
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index dc1123a..72eddd1 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -3567,8 +3567,9 @@
 			return i;
 
 	/* warn when we cannot find a rate. */
-	WARN_ON(1);
+	WARN_ON_ONCE(1);
 
+	/* and return 0 (the lowest index) */
 	return 0;
 }
 
diff --git a/include/net/netlink.h b/include/net/netlink.h
index 98c18544..cb1f350 100644
--- a/include/net/netlink.h
+++ b/include/net/netlink.h
@@ -192,8 +192,15 @@
  *    NLA_NUL_STRING       Maximum length of string (excluding NUL)
  *    NLA_FLAG             Unused
  *    NLA_BINARY           Maximum length of attribute payload
- *    NLA_NESTED_COMPAT    Exact length of structure payload
- *    All other            Exact length of attribute payload
+ *    NLA_NESTED           Don't use `len' field -- length verification is
+ *                         done by checking len of nested header (or empty)
+ *    NLA_NESTED_COMPAT    Minimum length of structure payload
+ *    NLA_U8, NLA_U16,
+ *    NLA_U32, NLA_U64,
+ *    NLA_MSECS            Leaving the length field zero will verify the
+ *                         given type fits, using it verifies minimum length
+ *                         just like "All other"
+ *    All other            Minimum length of attribute payload
  *
  * Example:
  * static const struct nla_policy my_policy[ATTR_MAX+1] = {
diff --git a/include/xen/interface/platform.h b/include/xen/interface/platform.h
new file mode 100644
index 0000000..c168468
--- /dev/null
+++ b/include/xen/interface/platform.h
@@ -0,0 +1,320 @@
+/******************************************************************************
+ * platform.h
+ *
+ * Hardware platform operations. Intended for use by domain-0 kernel.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Copyright (c) 2002-2006, K Fraser
+ */
+
+#ifndef __XEN_PUBLIC_PLATFORM_H__
+#define __XEN_PUBLIC_PLATFORM_H__
+
+#include "xen.h"
+
+#define XENPF_INTERFACE_VERSION 0x03000001
+
+/*
+ * Set clock such that it would read <secs,nsecs> after 00:00:00 UTC,
+ * 1 January, 1970 if the current system time was <system_time>.
+ */
+#define XENPF_settime             17
+struct xenpf_settime {
+	/* IN variables. */
+	uint32_t secs;
+	uint32_t nsecs;
+	uint64_t system_time;
+};
+DEFINE_GUEST_HANDLE_STRUCT(xenpf_settime_t);
+
+/*
+ * Request memory range (@mfn, @mfn+@nr_mfns-1) to have type @type.
+ * On x86, @type is an architecture-defined MTRR memory type.
+ * On success, returns the MTRR that was used (@reg) and a handle that can
+ * be passed to XENPF_DEL_MEMTYPE to accurately tear down the new setting.
+ * (x86-specific).
+ */
+#define XENPF_add_memtype         31
+struct xenpf_add_memtype {
+	/* IN variables. */
+	unsigned long mfn;
+	uint64_t nr_mfns;
+	uint32_t type;
+	/* OUT variables. */
+	uint32_t handle;
+	uint32_t reg;
+};
+DEFINE_GUEST_HANDLE_STRUCT(xenpf_add_memtype_t);
+
+/*
+ * Tear down an existing memory-range type. If @handle is remembered then it
+ * should be passed in to accurately tear down the correct setting (in case
+ * of overlapping memory regions with differing types). If it is not known
+ * then @handle should be set to zero. In all cases @reg must be set.
+ * (x86-specific).
+ */
+#define XENPF_del_memtype         32
+struct xenpf_del_memtype {
+	/* IN variables. */
+	uint32_t handle;
+	uint32_t reg;
+};
+DEFINE_GUEST_HANDLE_STRUCT(xenpf_del_memtype_t);
+
+/* Read current type of an MTRR (x86-specific). */
+#define XENPF_read_memtype        33
+struct xenpf_read_memtype {
+	/* IN variables. */
+	uint32_t reg;
+	/* OUT variables. */
+	unsigned long mfn;
+	uint64_t nr_mfns;
+	uint32_t type;
+};
+DEFINE_GUEST_HANDLE_STRUCT(xenpf_read_memtype_t);
+
+#define XENPF_microcode_update    35
+struct xenpf_microcode_update {
+	/* IN variables. */
+	GUEST_HANDLE(void) data;          /* Pointer to microcode data */
+	uint32_t length;                  /* Length of microcode data. */
+};
+DEFINE_GUEST_HANDLE_STRUCT(xenpf_microcode_update_t);
+
+#define XENPF_platform_quirk      39
+#define QUIRK_NOIRQBALANCING      1 /* Do not restrict IO-APIC RTE targets */
+#define QUIRK_IOAPIC_BAD_REGSEL   2 /* IO-APIC REGSEL forgets its value    */
+#define QUIRK_IOAPIC_GOOD_REGSEL  3 /* IO-APIC REGSEL behaves properly     */
+struct xenpf_platform_quirk {
+	/* IN variables. */
+	uint32_t quirk_id;
+};
+DEFINE_GUEST_HANDLE_STRUCT(xenpf_platform_quirk_t);
+
+#define XENPF_firmware_info       50
+#define XEN_FW_DISK_INFO          1 /* from int 13 AH=08/41/48 */
+#define XEN_FW_DISK_MBR_SIGNATURE 2 /* from MBR offset 0x1b8 */
+#define XEN_FW_VBEDDC_INFO        3 /* from int 10 AX=4f15 */
+struct xenpf_firmware_info {
+	/* IN variables. */
+	uint32_t type;
+	uint32_t index;
+	/* OUT variables. */
+	union {
+		struct {
+			/* Int13, Fn48: Check Extensions Present. */
+			uint8_t device;                   /* %dl: bios device number */
+			uint8_t version;                  /* %ah: major version      */
+			uint16_t interface_support;       /* %cx: support bitmap     */
+			/* Int13, Fn08: Legacy Get Device Parameters. */
+			uint16_t legacy_max_cylinder;     /* %cl[7:6]:%ch: max cyl # */
+			uint8_t legacy_max_head;          /* %dh: max head #         */
+			uint8_t legacy_sectors_per_track; /* %cl[5:0]: max sector #  */
+			/* Int13, Fn41: Get Device Parameters (as filled into %ds:%esi). */
+			/* NB. First uint16_t of buffer must be set to buffer size.      */
+			GUEST_HANDLE(void) edd_params;
+		} disk_info; /* XEN_FW_DISK_INFO */
+		struct {
+			uint8_t device;                   /* bios device number  */
+			uint32_t mbr_signature;           /* offset 0x1b8 in mbr */
+		} disk_mbr_signature; /* XEN_FW_DISK_MBR_SIGNATURE */
+		struct {
+			/* Int10, AX=4F15: Get EDID info. */
+			uint8_t capabilities;
+			uint8_t edid_transfer_time;
+			/* must refer to 128-byte buffer */
+			GUEST_HANDLE(uchar) edid;
+		} vbeddc_info; /* XEN_FW_VBEDDC_INFO */
+	} u;
+};
+DEFINE_GUEST_HANDLE_STRUCT(xenpf_firmware_info_t);
+
+#define XENPF_enter_acpi_sleep    51
+struct xenpf_enter_acpi_sleep {
+	/* IN variables */
+	uint16_t pm1a_cnt_val;      /* PM1a control value. */
+	uint16_t pm1b_cnt_val;      /* PM1b control value. */
+	uint32_t sleep_state;       /* Which state to enter (Sn). */
+	uint32_t flags;             /* Must be zero. */
+};
+DEFINE_GUEST_HANDLE_STRUCT(xenpf_enter_acpi_sleep_t);
+
+#define XENPF_change_freq         52
+struct xenpf_change_freq {
+	/* IN variables */
+	uint32_t flags; /* Must be zero. */
+	uint32_t cpu;   /* Physical cpu. */
+	uint64_t freq;  /* New frequency (Hz). */
+};
+DEFINE_GUEST_HANDLE_STRUCT(xenpf_change_freq_t);
+
+/*
+ * Get idle times (nanoseconds since boot) for physical CPUs specified in the
+ * @cpumap_bitmap with range [0..@cpumap_nr_cpus-1]. The @idletime array is
+ * indexed by CPU number; only entries with the corresponding @cpumap_bitmap
+ * bit set are written to. On return, @cpumap_bitmap is modified so that any
+ * non-existent CPUs are cleared. Such CPUs have their @idletime array entry
+ * cleared.
+ */
+#define XENPF_getidletime         53
+struct xenpf_getidletime {
+	/* IN/OUT variables */
+	/* IN: CPUs to interrogate; OUT: subset of IN which are present */
+	GUEST_HANDLE(uchar) cpumap_bitmap;
+	/* IN variables */
+	/* Size of cpumap bitmap. */
+	uint32_t cpumap_nr_cpus;
+	/* Must be indexable for every cpu in cpumap_bitmap. */
+	GUEST_HANDLE(uint64_t) idletime;
+	/* OUT variables */
+	/* System time when the idletime snapshots were taken. */
+	uint64_t now;
+};
+DEFINE_GUEST_HANDLE_STRUCT(xenpf_getidletime_t);
+
+#define XENPF_set_processor_pminfo      54
+
+/* ability bits */
+#define XEN_PROCESSOR_PM_CX	1
+#define XEN_PROCESSOR_PM_PX	2
+#define XEN_PROCESSOR_PM_TX	4
+
+/* cmd type */
+#define XEN_PM_CX   0
+#define XEN_PM_PX   1
+#define XEN_PM_TX   2
+
+/* Px sub info type */
+#define XEN_PX_PCT   1
+#define XEN_PX_PSS   2
+#define XEN_PX_PPC   4
+#define XEN_PX_PSD   8
+
+struct xen_power_register {
+	uint32_t     space_id;
+	uint32_t     bit_width;
+	uint32_t     bit_offset;
+	uint32_t     access_size;
+	uint64_t     address;
+};
+
+struct xen_processor_csd {
+	uint32_t    domain;      /* domain number of one dependent group */
+	uint32_t    coord_type;  /* coordination type */
+	uint32_t    num;         /* number of processors in same domain */
+};
+DEFINE_GUEST_HANDLE_STRUCT(xen_processor_csd);
+
+struct xen_processor_cx {
+	struct xen_power_register  reg; /* GAS for Cx trigger register */
+	uint8_t     type;     /* cstate value, c0: 0, c1: 1, ... */
+	uint32_t    latency;  /* worst latency (ms) to enter/exit this cstate */
+	uint32_t    power;    /* average power consumption(mW) */
+	uint32_t    dpcnt;    /* number of dependency entries */
+	GUEST_HANDLE(xen_processor_csd) dp; /* NULL if no dependency */
+};
+DEFINE_GUEST_HANDLE_STRUCT(xen_processor_cx);
+
+struct xen_processor_flags {
+	uint32_t bm_control:1;
+	uint32_t bm_check:1;
+	uint32_t has_cst:1;
+	uint32_t power_setup_done:1;
+	uint32_t bm_rld_set:1;
+};
+
+struct xen_processor_power {
+	uint32_t count;  /* number of C state entries in array below */
+	struct xen_processor_flags flags;  /* global flags of this processor */
+	GUEST_HANDLE(xen_processor_cx) states; /* supported c states */
+};
+
+struct xen_pct_register {
+	uint8_t  descriptor;
+	uint16_t length;
+	uint8_t  space_id;
+	uint8_t  bit_width;
+	uint8_t  bit_offset;
+	uint8_t  reserved;
+	uint64_t address;
+};
+
+struct xen_processor_px {
+	uint64_t core_frequency; /* megahertz */
+	uint64_t power;      /* milliWatts */
+	uint64_t transition_latency; /* microseconds */
+	uint64_t bus_master_latency; /* microseconds */
+	uint64_t control;        /* control value */
+	uint64_t status;     /* success indicator */
+};
+DEFINE_GUEST_HANDLE_STRUCT(xen_processor_px);
+
+struct xen_psd_package {
+	uint64_t num_entries;
+	uint64_t revision;
+	uint64_t domain;
+	uint64_t coord_type;
+	uint64_t num_processors;
+};
+
+struct xen_processor_performance {
+	uint32_t flags;     /* flag for Px sub info type */
+	uint32_t platform_limit;  /* Platform limitation on freq usage */
+	struct xen_pct_register control_register;
+	struct xen_pct_register status_register;
+	uint32_t state_count;     /* total available performance states */
+	GUEST_HANDLE(xen_processor_px) states;
+	struct xen_psd_package domain_info;
+	uint32_t shared_type;     /* coordination type of this processor */
+};
+DEFINE_GUEST_HANDLE_STRUCT(xen_processor_performance);
+
+struct xenpf_set_processor_pminfo {
+	/* IN variables */
+	uint32_t id;    /* ACPI CPU ID */
+	uint32_t type;  /* {XEN_PM_CX, XEN_PM_PX} */
+	union {
+		struct xen_processor_power          power;/* Cx: _CST/_CSD */
+		struct xen_processor_performance    perf; /* Px: _PPC/_PCT/_PSS/_PSD */
+	};
+};
+DEFINE_GUEST_HANDLE_STRUCT(xenpf_set_processor_pminfo);
+
+struct xen_platform_op {
+	uint32_t cmd;
+	uint32_t interface_version; /* XENPF_INTERFACE_VERSION */
+	union {
+		struct xenpf_settime           settime;
+		struct xenpf_add_memtype       add_memtype;
+		struct xenpf_del_memtype       del_memtype;
+		struct xenpf_read_memtype      read_memtype;
+		struct xenpf_microcode_update  microcode;
+		struct xenpf_platform_quirk    platform_quirk;
+		struct xenpf_firmware_info     firmware_info;
+		struct xenpf_enter_acpi_sleep  enter_acpi_sleep;
+		struct xenpf_change_freq       change_freq;
+		struct xenpf_getidletime       getidletime;
+		struct xenpf_set_processor_pminfo set_pminfo;
+		uint8_t                        pad[128];
+	} u;
+};
+DEFINE_GUEST_HANDLE_STRUCT(xen_platform_op_t);
+
+#endif /* __XEN_PUBLIC_PLATFORM_H__ */
diff --git a/include/xen/interface/xen.h b/include/xen/interface/xen.h
index 6acd9ce..6a6e914 100644
--- a/include/xen/interface/xen.h
+++ b/include/xen/interface/xen.h
@@ -492,6 +492,7 @@
 /* These flags are passed in the 'flags' field of start_info_t. */
 #define SIF_PRIVILEGED    (1<<0)  /* Is the domain privileged? */
 #define SIF_INITDOMAIN    (1<<1)  /* Is this the initial control domain? */
+#define SIF_PM_MASK       (0xFF<<8) /* reserve 1 byte for xen-pm options */
 
 typedef uint64_t cpumap_t;
 
diff --git a/init/main.c b/init/main.c
index 63f5f6f..217ed23 100644
--- a/init/main.c
+++ b/init/main.c
@@ -512,6 +512,9 @@
 	parse_args("Booting kernel", static_command_line, __start___param,
 		   __stop___param - __start___param,
 		   &unknown_bootoption);
+
+	jump_label_init();
+
 	/*
 	 * These use large bootmem allocations and must precede
 	 * kmem_cache_init()
diff --git a/kernel/jump_label.c b/kernel/jump_label.c
index a8ce450..bbdfe2a 100644
--- a/kernel/jump_label.c
+++ b/kernel/jump_label.c
@@ -104,6 +104,18 @@
 	return 0;
 }
 
+/* 
+ * Update code which is definitely not currently executing.
+ * Architectures which need heavyweight synchronization to modify
+ * running code can override this to make the non-live update case
+ * cheaper.
+ */
+void __weak arch_jump_label_transform_static(struct jump_entry *entry,
+					    enum jump_label_type type)
+{
+	arch_jump_label_transform(entry, type);	
+}
+
 static void __jump_label_update(struct jump_label_key *key,
 				struct jump_entry *entry,
 				struct jump_entry *stop, int enable)
@@ -121,14 +133,7 @@
 	}
 }
 
-/*
- * Not all archs need this.
- */
-void __weak arch_jump_label_text_poke_early(jump_label_t addr)
-{
-}
-
-static __init int jump_label_init(void)
+void __init jump_label_init(void)
 {
 	struct jump_entry *iter_start = __start___jump_table;
 	struct jump_entry *iter_stop = __stop___jump_table;
@@ -139,22 +144,22 @@
 	jump_label_sort_entries(iter_start, iter_stop);
 
 	for (iter = iter_start; iter < iter_stop; iter++) {
-		arch_jump_label_text_poke_early(iter->code);
-		if (iter->key == (jump_label_t)(unsigned long)key)
+		struct jump_label_key *iterk;
+
+		iterk = (struct jump_label_key *)(unsigned long)iter->key;
+		arch_jump_label_transform_static(iter, jump_label_enabled(iterk) ?
+						 JUMP_LABEL_ENABLE : JUMP_LABEL_DISABLE);
+		if (iterk == key)
 			continue;
 
-		key = (struct jump_label_key *)(unsigned long)iter->key;
-		atomic_set(&key->enabled, 0);
+		key = iterk;
 		key->entries = iter;
 #ifdef CONFIG_MODULES
 		key->next = NULL;
 #endif
 	}
 	jump_label_unlock();
-
-	return 0;
 }
-early_initcall(jump_label_init);
 
 #ifdef CONFIG_MODULES
 
@@ -212,7 +217,7 @@
 		return;
 
 	for (iter = iter_start; iter < iter_stop; iter++)
-		arch_jump_label_text_poke_early(iter->code);
+		arch_jump_label_transform_static(iter, JUMP_LABEL_DISABLE);
 }
 
 static int jump_label_add_module(struct module *mod)
diff --git a/lib/nlattr.c b/lib/nlattr.c
index ac09f22..a8408b6 100644
--- a/lib/nlattr.c
+++ b/lib/nlattr.c
@@ -20,6 +20,7 @@
 	[NLA_U16]	= sizeof(u16),
 	[NLA_U32]	= sizeof(u32),
 	[NLA_U64]	= sizeof(u64),
+	[NLA_MSECS]	= sizeof(u64),
 	[NLA_NESTED]	= NLA_HDRLEN,
 };
 
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 0360d1b..a3278f0 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -1097,13 +1097,13 @@
 		pos_ratio = bdi_position_ratio(bdi, dirty_thresh,
 					       background_thresh, nr_dirty,
 					       bdi_thresh, bdi_dirty);
-		if (unlikely(pos_ratio == 0)) {
+		task_ratelimit = ((u64)dirty_ratelimit * pos_ratio) >>
+							RATELIMIT_CALC_SHIFT;
+		if (unlikely(task_ratelimit == 0)) {
 			pause = max_pause;
 			goto pause;
 		}
-		task_ratelimit = (u64)dirty_ratelimit *
-					pos_ratio >> RATELIMIT_CALC_SHIFT;
-		pause = (HZ * pages_dirtied) / (task_ratelimit | 1);
+		pause = HZ * pages_dirtied / task_ratelimit;
 		if (unlikely(pause <= 0)) {
 			trace_balance_dirty_pages(bdi,
 						  dirty_thresh,
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index b84458d..be84ae3 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -613,7 +613,7 @@
 	if (!test_bit(HCI_RAW, &hdev->flags)) {
 		set_bit(HCI_INIT, &hdev->flags);
 		__hci_request(hdev, hci_reset_req, 0,
-					msecs_to_jiffies(250));
+					msecs_to_jiffies(HCI_INIT_TIMEOUT));
 		clear_bit(HCI_INIT, &hdev->flags);
 	}
 
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index 940858a..2c76342 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -148,8 +148,6 @@
 
 		hci_del_off_timer(d);
 
-		set_bit(HCI_MGMT, &d->flags);
-
 		if (test_bit(HCI_SETUP, &d->flags))
 			continue;
 
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index 38b618c..4e32e18 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -1802,6 +1802,11 @@
 			continue;
 		}
 
+		if (test_bit(RFCOMM_ENC_DROP, &d->flags)) {
+			__rfcomm_dlc_close(d, ECONNREFUSED);
+			continue;
+		}
+
 		if (test_and_clear_bit(RFCOMM_AUTH_ACCEPT, &d->flags)) {
 			rfcomm_dlc_clear_timer(d);
 			if (d->out) {
@@ -2077,7 +2082,7 @@
 		if (test_and_clear_bit(RFCOMM_SEC_PENDING, &d->flags)) {
 			rfcomm_dlc_clear_timer(d);
 			if (status || encrypt == 0x00) {
-				__rfcomm_dlc_close(d, ECONNREFUSED);
+				set_bit(RFCOMM_ENC_DROP, &d->flags);
 				continue;
 			}
 		}
@@ -2088,7 +2093,7 @@
 				rfcomm_dlc_set_timer(d, RFCOMM_AUTH_TIMEOUT);
 				continue;
 			} else if (d->sec_level == BT_SECURITY_HIGH) {
-				__rfcomm_dlc_close(d, ECONNREFUSED);
+				set_bit(RFCOMM_ENC_DROP, &d->flags);
 				continue;
 			}
 		}
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index ebd7fb1..d06c65f 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -832,6 +832,12 @@
 	if (is_multicast_ether_addr(mac))
 		return -EINVAL;
 
+	/* Only TDLS-supporting stations can add TDLS peers */
+	if ((params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER)) &&
+	    !((wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS) &&
+	      sdata->vif.type == NL80211_IFTYPE_STATION))
+		return -ENOTSUPP;
+
 	sta = sta_info_alloc(sdata, mac, GFP_KERNEL);
 	if (!sta)
 		return -ENOMEM;
@@ -841,12 +847,6 @@
 
 	sta_apply_parameters(local, sta, params);
 
-	/* Only TDLS-supporting stations can add TDLS peers */
-	if (test_sta_flag(sta, WLAN_STA_TDLS_PEER) &&
-	    !((wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS) &&
-	      sdata->vif.type == NL80211_IFTYPE_STATION))
-		return -ENOTSUPP;
-
 	rate_control_rate_init(sta);
 
 	layer2_update = sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 4c3d1f5..ea10a51 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -389,6 +389,7 @@
 
 	unsigned long timers_running; /* used for quiesce/restart */
 	bool powersave; /* powersave requested for this iface */
+	bool broken_ap; /* AP is broken -- turn off powersave */
 	enum ieee80211_smps_mode req_smps, /* requested smps mode */
 				 ap_smps, /* smps mode AP thinks we're in */
 				 driver_smps_mode; /* smps mode request */
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 96f9fae..72c8bea 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -639,6 +639,9 @@
 	if (!mgd->powersave)
 		return false;
 
+	if (mgd->broken_ap)
+		return false;
+
 	if (!mgd->associated)
 		return false;
 
@@ -1491,10 +1494,21 @@
 	capab_info = le16_to_cpu(mgmt->u.assoc_resp.capab_info);
 
 	if ((aid & (BIT(15) | BIT(14))) != (BIT(15) | BIT(14)))
-		printk(KERN_DEBUG "%s: invalid aid value %d; bits 15:14 not "
-		       "set\n", sdata->name, aid);
+		printk(KERN_DEBUG
+		       "%s: invalid AID value 0x%x; bits 15:14 not set\n",
+		       sdata->name, aid);
 	aid &= ~(BIT(15) | BIT(14));
 
+	ifmgd->broken_ap = false;
+
+	if (aid == 0 || aid > IEEE80211_MAX_AID) {
+		printk(KERN_DEBUG
+		       "%s: invalid AID value %d (out of range), turn off PS\n",
+		       sdata->name, aid);
+		aid = 0;
+		ifmgd->broken_ap = true;
+	}
+
 	pos = mgmt->u.assoc_resp.variable;
 	ieee802_11_parse_elems(pos, len - (pos - (u8 *) mgmt), &elems);
 
diff --git a/net/mac80211/work.c b/net/mac80211/work.c
index 94472eb..6c53b6d 100644
--- a/net/mac80211/work.c
+++ b/net/mac80211/work.c
@@ -1084,14 +1084,13 @@
 			continue;
 		if (wk->chan != local->tmp_channel)
 			continue;
-		if (ieee80211_work_ct_coexists(wk->chan_type,
-					       local->tmp_channel_type))
+		if (!ieee80211_work_ct_coexists(wk->chan_type,
+						local->tmp_channel_type))
 			continue;
 		remain_off_channel = true;
 	}
 
 	if (!remain_off_channel && local->tmp_channel) {
-		bool on_oper_chan = ieee80211_cfg_on_oper_channel(local);
 		local->tmp_channel = NULL;
 		/* If tmp_channel wasn't operating channel, then
 		 * we need to go back on-channel.
@@ -1101,7 +1100,7 @@
 		 * we still need to do a hardware config.  Currently,
 		 * we cannot be here while scanning, however.
 		 */
-		if (ieee80211_cfg_on_oper_channel(local) && !on_oper_chan)
+		if (!ieee80211_cfg_on_oper_channel(local))
 			ieee80211_hw_config(local, 0);
 
 		/* At the least, we need to disable offchannel_ps,
diff --git a/net/wanrouter/wanproc.c b/net/wanrouter/wanproc.c
index f346395..c43612e 100644
--- a/net/wanrouter/wanproc.c
+++ b/net/wanrouter/wanproc.c
@@ -81,7 +81,6 @@
  *	Iterator
  */
 static void *r_start(struct seq_file *m, loff_t *pos)
-	__acquires(kernel_lock)
 {
 	struct wan_device *wandev;
 	loff_t l = *pos;
@@ -103,7 +102,6 @@
 }
 
 static void r_stop(struct seq_file *m, void *v)
-	__releases(kernel_lock)
 {
 	mutex_unlock(&config_mutex);
 }
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index f82480f..6ab58cc 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -262,13 +262,16 @@
 
 static void open_counters(struct perf_evlist *evlist)
 {
-	struct perf_evsel *pos;
+	struct perf_evsel *pos, *first;
 
 	if (evlist->cpus->map[0] < 0)
 		no_inherit = true;
 
+	first = list_entry(evlist->entries.next, struct perf_evsel, node);
+
 	list_for_each_entry(pos, &evlist->entries, node) {
 		struct perf_event_attr *attr = &pos->attr;
+		struct xyarray *group_fd = NULL;
 		/*
 		 * Check if parse_single_tracepoint_event has already asked for
 		 * PERF_SAMPLE_TIME.
@@ -283,15 +286,19 @@
 		 */
 		bool time_needed = attr->sample_type & PERF_SAMPLE_TIME;
 
+		if (group && pos != first)
+			group_fd = first->fd;
+
 		config_attr(pos, evlist);
 retry_sample_id:
 		attr->sample_id_all = sample_id_all_avail ? 1 : 0;
 try_again:
-		if (perf_evsel__open(pos, evlist->cpus, evlist->threads, group) < 0) {
+		if (perf_evsel__open(pos, evlist->cpus, evlist->threads, group,
+				     group_fd) < 0) {
 			int err = errno;
 
 			if (err == EPERM || err == EACCES) {
-				ui__warning_paranoid();
+				ui__error_paranoid();
 				exit(EXIT_FAILURE);
 			} else if (err ==  ENODEV && cpu_list) {
 				die("No such device - did you specify"
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index 7ce65f5..7d98676 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -278,9 +278,14 @@
 struct stats			runtime_dtlb_cache_stats[MAX_NR_CPUS];
 struct stats			walltime_nsecs_stats;
 
-static int create_perf_stat_counter(struct perf_evsel *evsel)
+static int create_perf_stat_counter(struct perf_evsel *evsel,
+				    struct perf_evsel *first)
 {
 	struct perf_event_attr *attr = &evsel->attr;
+	struct xyarray *group_fd = NULL;
+
+	if (group && evsel != first)
+		group_fd = first->fd;
 
 	if (scale)
 		attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
@@ -289,14 +294,15 @@
 	attr->inherit = !no_inherit;
 
 	if (system_wide)
-		return perf_evsel__open_per_cpu(evsel, evsel_list->cpus, group);
-
+		return perf_evsel__open_per_cpu(evsel, evsel_list->cpus,
+						group, group_fd);
 	if (target_pid == -1 && target_tid == -1) {
 		attr->disabled = 1;
 		attr->enable_on_exec = 1;
 	}
 
-	return perf_evsel__open_per_thread(evsel, evsel_list->threads, group);
+	return perf_evsel__open_per_thread(evsel, evsel_list->threads,
+					   group, group_fd);
 }
 
 /*
@@ -396,7 +402,7 @@
 static int run_perf_stat(int argc __used, const char **argv)
 {
 	unsigned long long t0, t1;
-	struct perf_evsel *counter;
+	struct perf_evsel *counter, *first;
 	int status = 0;
 	int child_ready_pipe[2], go_pipe[2];
 	const bool forks = (argc > 0);
@@ -453,8 +459,10 @@
 		close(child_ready_pipe[0]);
 	}
 
+	first = list_entry(evsel_list->entries.next, struct perf_evsel, node);
+
 	list_for_each_entry(counter, &evsel_list->entries, node) {
-		if (create_perf_stat_counter(counter) < 0) {
+		if (create_perf_stat_counter(counter, first) < 0) {
 			if (errno == EINVAL || errno == ENOSYS || errno == ENOENT) {
 				if (verbose)
 					ui__warning("%s event is not supported by the kernel.\n",
diff --git a/tools/perf/builtin-test.c b/tools/perf/builtin-test.c
index efe696f..831d1ba 100644
--- a/tools/perf/builtin-test.c
+++ b/tools/perf/builtin-test.c
@@ -291,7 +291,7 @@
 		goto out_thread_map_delete;
 	}
 
-	if (perf_evsel__open_per_thread(evsel, threads, false) < 0) {
+	if (perf_evsel__open_per_thread(evsel, threads, false, NULL) < 0) {
 		pr_debug("failed to open counter: %s, "
 			 "tweak /proc/sys/kernel/perf_event_paranoid?\n",
 			 strerror(errno));
@@ -366,7 +366,7 @@
 		goto out_thread_map_delete;
 	}
 
-	if (perf_evsel__open(evsel, cpus, threads, false) < 0) {
+	if (perf_evsel__open(evsel, cpus, threads, false, NULL) < 0) {
 		pr_debug("failed to open counter: %s, "
 			 "tweak /proc/sys/kernel/perf_event_paranoid?\n",
 			 strerror(errno));
@@ -531,7 +531,7 @@
 
 		perf_evlist__add(evlist, evsels[i]);
 
-		if (perf_evsel__open(evsels[i], cpus, threads, false) < 0) {
+		if (perf_evsel__open(evsels[i], cpus, threads, false, NULL) < 0) {
 			pr_debug("failed to open counter: %s, "
 				 "tweak /proc/sys/kernel/perf_event_paranoid?\n",
 				 strerror(errno));
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index 7a87171..c9cdedb 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -89,6 +89,7 @@
 static bool			inherit				=  false;
 static int			realtime_prio			=      0;
 static bool			group				=  false;
+static bool			sample_id_all_avail		=   true;
 static unsigned int		mmap_pages			=    128;
 
 static bool			dump_symtab                     =  false;
@@ -199,7 +200,8 @@
 	struct symbol *sym;
 
 	if (he == NULL || he->ms.sym == NULL ||
-	    (he != top.sym_filter_entry && use_browser != 1))
+	    ((top.sym_filter_entry == NULL ||
+	      top.sym_filter_entry->ms.sym != he->ms.sym) && use_browser != 1))
 		return;
 
 	sym = he->ms.sym;
@@ -289,11 +291,13 @@
 
 	printf("%-*.*s\n", win_width, win_width, graph_dotted_line);
 
-	if (top.total_lost_warned != top.session->hists.stats.total_lost) {
-		top.total_lost_warned = top.session->hists.stats.total_lost;
-		color_fprintf(stdout, PERF_COLOR_RED, "WARNING:");
-		printf(" LOST %" PRIu64 " events, Check IO/CPU overload\n",
-		       top.total_lost_warned);
+	if (top.sym_evsel->hists.stats.nr_lost_warned !=
+	    top.sym_evsel->hists.stats.nr_events[PERF_RECORD_LOST]) {
+		top.sym_evsel->hists.stats.nr_lost_warned =
+			top.sym_evsel->hists.stats.nr_events[PERF_RECORD_LOST];
+		color_fprintf(stdout, PERF_COLOR_RED,
+			      "WARNING: LOST %d chunks, Check IO/CPU overload",
+			      top.sym_evsel->hists.stats.nr_lost_warned);
 		++printed;
 	}
 
@@ -561,7 +565,6 @@
 	hists__decay_entries_threaded(&t->sym_evsel->hists,
 				      top.hide_user_symbols,
 				      top.hide_kernel_symbols);
-	hists__output_recalc_col_len(&t->sym_evsel->hists, winsize.ws_row - 3);
 }
 
 static void *display_thread_tui(void *arg __used)
@@ -671,6 +674,7 @@
 }
 
 static void perf_event__process_sample(const union perf_event *event,
+				       struct perf_evsel *evsel,
 				       struct perf_sample *sample,
 				       struct perf_session *session)
 {
@@ -770,12 +774,8 @@
 	}
 
 	if (al.sym == NULL || !al.sym->ignore) {
-		struct perf_evsel *evsel;
 		struct hist_entry *he;
 
-		evsel = perf_evlist__id2evsel(top.evlist, sample->id);
-		assert(evsel != NULL);
-
 		if ((sort__has_parent || symbol_conf.use_callchain) &&
 		    sample->callchain) {
 			err = perf_session__resolve_callchain(session, al.thread,
@@ -807,6 +807,7 @@
 static void perf_session__mmap_read_idx(struct perf_session *self, int idx)
 {
 	struct perf_sample sample;
+	struct perf_evsel *evsel;
 	union perf_event *event;
 	int ret;
 
@@ -817,10 +818,16 @@
 			continue;
 		}
 
+		evsel = perf_evlist__id2evsel(self->evlist, sample.id);
+		assert(evsel != NULL);
+
 		if (event->header.type == PERF_RECORD_SAMPLE)
-			perf_event__process_sample(event, &sample, self);
-		else
+			perf_event__process_sample(event, evsel, &sample, self);
+		else if (event->header.type < PERF_RECORD_MAX) {
+			hists__inc_nr_events(&evsel->hists, event->header.type);
 			perf_event__process(event, &sample, self);
+		} else
+			++self->hists.stats.nr_unknown_events;
 	}
 }
 
@@ -834,10 +841,16 @@
 
 static void start_counters(struct perf_evlist *evlist)
 {
-	struct perf_evsel *counter;
+	struct perf_evsel *counter, *first;
+
+	first = list_entry(evlist->entries.next, struct perf_evsel, node);
 
 	list_for_each_entry(counter, &evlist->entries, node) {
 		struct perf_event_attr *attr = &counter->attr;
+		struct xyarray *group_fd = NULL;
+
+		if (group && counter != first)
+			group_fd = first->fd;
 
 		attr->sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_TID;
 
@@ -858,14 +871,23 @@
 		attr->mmap = 1;
 		attr->comm = 1;
 		attr->inherit = inherit;
+retry_sample_id:
+		attr->sample_id_all = sample_id_all_avail ? 1 : 0;
 try_again:
 		if (perf_evsel__open(counter, top.evlist->cpus,
-				     top.evlist->threads, group) < 0) {
+				     top.evlist->threads, group,
+				     group_fd) < 0) {
 			int err = errno;
 
 			if (err == EPERM || err == EACCES) {
-				ui__warning_paranoid();
+				ui__error_paranoid();
 				goto out_err;
+			} else if (err == EINVAL && sample_id_all_avail) {
+				/*
+				 * Old kernel, no attr->sample_id_type_all field
+				 */
+				sample_id_all_avail = false;
+				goto retry_sample_id;
 			}
 			/*
 			 * If it's cycles then fall back to hrtimer
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
index bc8f477..119e996 100644
--- a/tools/perf/util/annotate.c
+++ b/tools/perf/util/annotate.c
@@ -310,9 +310,12 @@
 		}
 		err = -ENOENT;
 		dso->annotate_warned = 1;
-		pr_err("Can't annotate %s: No vmlinux file%s was found in the "
-		       "path.\nPlease use 'perf buildid-cache -av vmlinux' or "
-		       "--vmlinux vmlinux.\n",
+		pr_err("Can't annotate %s:\n\n"
+		       "No vmlinux file%s\nwas found in the path.\n\n"
+		       "Please use:\n\n"
+		       "  perf buildid-cache -av vmlinux\n\n"
+		       "or:\n\n"
+		       "  --vmlinux vmlinux",
 		       sym->name, build_id_msg ?: "");
 		goto out_free_filename;
 	}
diff --git a/tools/perf/util/debug.c b/tools/perf/util/debug.c
index 155749d..26817da 100644
--- a/tools/perf/util/debug.c
+++ b/tools/perf/util/debug.c
@@ -47,19 +47,20 @@
 }
 
 #ifdef NO_NEWT_SUPPORT
-void ui__warning(const char *format, ...)
+int ui__warning(const char *format, ...)
 {
 	va_list args;
 
 	va_start(args, format);
 	vfprintf(stderr, format, args);
 	va_end(args);
+	return 0;
 }
 #endif
 
-void ui__warning_paranoid(void)
+int ui__error_paranoid(void)
 {
-	ui__warning("Permission error - are you root?\n"
+	return ui__error("Permission error - are you root?\n"
 		    "Consider tweaking /proc/sys/kernel/perf_event_paranoid:\n"
 		    " -1 - Not paranoid at all\n"
 		    "  0 - Disallow raw tracepoint access for unpriv\n"
diff --git a/tools/perf/util/debug.h b/tools/perf/util/debug.h
index fd53db4..f2ce88d0 100644
--- a/tools/perf/util/debug.h
+++ b/tools/perf/util/debug.h
@@ -19,23 +19,18 @@
 	return 0;
 }
 
-static inline struct ui_progress *ui_progress__new(const char *title __used,
-						   u64 total __used)
-{
-	return (struct ui_progress *)1;
-}
+static inline void ui_progress__update(u64 curr __used, u64 total __used,
+				       const char *title __used) {}
 
-static inline void ui_progress__update(struct ui_progress *self __used,
-				       u64 curr __used) {}
-
-static inline void ui_progress__delete(struct ui_progress *self __used) {}
+#define ui__error(format, arg...) ui__warning(format, ##arg)
 #else
 extern char ui_helpline__last_msg[];
 int ui_helpline__show_help(const char *format, va_list ap);
 #include "ui/progress.h"
+int ui__error(const char *format, ...) __attribute__((format(printf, 1, 2)));
 #endif
 
-void ui__warning(const char *format, ...) __attribute__((format(printf, 1, 2)));
-void ui__warning_paranoid(void);
+int ui__warning(const char *format, ...) __attribute__((format(printf, 1, 2)));
+int ui__error_paranoid(void);
 
 #endif	/* __PERF_DEBUG_H */
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index 2f6bc89..fbb4b4a 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -539,3 +539,33 @@
 {
 	evlist->selected = evsel;
 }
+
+int perf_evlist__open(struct perf_evlist *evlist, bool group)
+{
+	struct perf_evsel *evsel, *first;
+	int err, ncpus, nthreads;
+
+	first = list_entry(evlist->entries.next, struct perf_evsel, node);
+
+	list_for_each_entry(evsel, &evlist->entries, node) {
+		struct xyarray *group_fd = NULL;
+
+		if (group && evsel != first)
+			group_fd = first->fd;
+
+		err = perf_evsel__open(evsel, evlist->cpus, evlist->threads,
+				       group, group_fd);
+		if (err < 0)
+			goto out_err;
+	}
+
+	return 0;
+out_err:
+	ncpus = evlist->cpus ? evlist->cpus->nr : 1;
+	nthreads = evlist->threads ? evlist->threads->nr : 1;
+
+	list_for_each_entry_reverse(evsel, &evlist->entries, node)
+		perf_evsel__close(evsel, ncpus, nthreads);
+
+	return err;
+}
diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h
index 6be71fc..1779ffe 100644
--- a/tools/perf/util/evlist.h
+++ b/tools/perf/util/evlist.h
@@ -50,6 +50,8 @@
 
 union perf_event *perf_evlist__mmap_read(struct perf_evlist *self, int idx);
 
+int perf_evlist__open(struct perf_evlist *evlist, bool group);
+
 int perf_evlist__alloc_mmap(struct perf_evlist *evlist);
 int perf_evlist__mmap(struct perf_evlist *evlist, int pages, bool overwrite);
 void perf_evlist__munmap(struct perf_evlist *evlist);
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index b46f6e4..e426264 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -16,6 +16,7 @@
 #include "thread_map.h"
 
 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
+#define GROUP_FD(group_fd, cpu) (*(int *)xyarray__entry(group_fd, cpu, 0))
 
 int __perf_evsel__sample_size(u64 sample_type)
 {
@@ -204,15 +205,16 @@
 }
 
 static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
-			      struct thread_map *threads, bool group)
+			      struct thread_map *threads, bool group,
+			      struct xyarray *group_fds)
 {
 	int cpu, thread;
 	unsigned long flags = 0;
-	int pid = -1;
+	int pid = -1, err;
 
 	if (evsel->fd == NULL &&
 	    perf_evsel__alloc_fd(evsel, cpus->nr, threads->nr) < 0)
-		return -1;
+		return -ENOMEM;
 
 	if (evsel->cgrp) {
 		flags = PERF_FLAG_PID_CGROUP;
@@ -220,7 +222,7 @@
 	}
 
 	for (cpu = 0; cpu < cpus->nr; cpu++) {
-		int group_fd = -1;
+		int group_fd = group_fds ? GROUP_FD(group_fds, cpu) : -1;
 
 		for (thread = 0; thread < threads->nr; thread++) {
 
@@ -231,8 +233,10 @@
 								     pid,
 								     cpus->map[cpu],
 								     group_fd, flags);
-			if (FD(evsel, cpu, thread) < 0)
+			if (FD(evsel, cpu, thread) < 0) {
+				err = -errno;
 				goto out_close;
+			}
 
 			if (group && group_fd == -1)
 				group_fd = FD(evsel, cpu, thread);
@@ -249,7 +253,17 @@
 		}
 		thread = threads->nr;
 	} while (--cpu >= 0);
-	return -1;
+	return err;
+}
+
+void perf_evsel__close(struct perf_evsel *evsel, int ncpus, int nthreads)
+{
+	if (evsel->fd == NULL)
+		return;
+
+	perf_evsel__close_fd(evsel, ncpus, nthreads);
+	perf_evsel__free_fd(evsel);
+	evsel->fd = NULL;
 }
 
 static struct {
@@ -269,7 +283,8 @@
 };
 
 int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
-		     struct thread_map *threads, bool group)
+		     struct thread_map *threads, bool group,
+		     struct xyarray *group_fd)
 {
 	if (cpus == NULL) {
 		/* Work around old compiler warnings about strict aliasing */
@@ -279,19 +294,23 @@
 	if (threads == NULL)
 		threads = &empty_thread_map.map;
 
-	return __perf_evsel__open(evsel, cpus, threads, group);
+	return __perf_evsel__open(evsel, cpus, threads, group, group_fd);
 }
 
 int perf_evsel__open_per_cpu(struct perf_evsel *evsel,
-			     struct cpu_map *cpus, bool group)
+			     struct cpu_map *cpus, bool group,
+			     struct xyarray *group_fd)
 {
-	return __perf_evsel__open(evsel, cpus, &empty_thread_map.map, group);
+	return __perf_evsel__open(evsel, cpus, &empty_thread_map.map, group,
+				  group_fd);
 }
 
 int perf_evsel__open_per_thread(struct perf_evsel *evsel,
-				struct thread_map *threads, bool group)
+				struct thread_map *threads, bool group,
+				struct xyarray *group_fd)
 {
-	return __perf_evsel__open(evsel, &empty_cpu_map.map, threads, group);
+	return __perf_evsel__open(evsel, &empty_cpu_map.map, threads, group,
+				  group_fd);
 }
 
 static int perf_event__parse_id_sample(const union perf_event *event, u64 type,
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
index e9a3155..b1d15e6 100644
--- a/tools/perf/util/evsel.h
+++ b/tools/perf/util/evsel.h
@@ -82,11 +82,15 @@
 void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads);
 
 int perf_evsel__open_per_cpu(struct perf_evsel *evsel,
-			     struct cpu_map *cpus, bool group);
+			     struct cpu_map *cpus, bool group,
+			     struct xyarray *group_fds);
 int perf_evsel__open_per_thread(struct perf_evsel *evsel,
-				struct thread_map *threads, bool group);
+				struct thread_map *threads, bool group,
+				struct xyarray *group_fds);
 int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
-		     struct thread_map *threads, bool group);
+		     struct thread_map *threads, bool group,
+		     struct xyarray *group_fds);
+void perf_evsel__close(struct perf_evsel *evsel, int ncpus, int nthreads);
 
 #define perf_evsel__match(evsel, t, c)		\
 	(evsel->attr.type == PERF_TYPE_##t &&	\
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index 76c0b2c..bcd05d0 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -1,5 +1,6 @@
 #define _FILE_OFFSET_BITS 64
 
+#include "util.h"
 #include <sys/types.h>
 #include <byteswap.h>
 #include <unistd.h>
@@ -11,7 +12,6 @@
 
 #include "evlist.h"
 #include "evsel.h"
-#include "util.h"
 #include "header.h"
 #include "../perf.h"
 #include "trace-event.h"
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
index f6a9939..a36a3fa 100644
--- a/tools/perf/util/hist.c
+++ b/tools/perf/util/hist.c
@@ -365,7 +365,6 @@
 
 	root = hists__get_rotate_entries_in(hists);
 	next = rb_first(root);
-	hists->stats.total_period = 0;
 
 	while (next) {
 		n = rb_entry(next, struct hist_entry, rb_node_in);
@@ -379,7 +378,6 @@
 			 * been set by, say, the hist_browser.
 			 */
 			hists__apply_filters(hists, n);
-			hists__inc_nr_entries(hists, n);
 		}
 	}
 }
@@ -442,6 +440,7 @@
 	hists->entries = RB_ROOT;
 
 	hists->nr_entries = 0;
+	hists->stats.total_period = 0;
 	hists__reset_col_len(hists);
 
 	while (next) {
diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h
index ff93ddc..c86c1d2 100644
--- a/tools/perf/util/hist.h
+++ b/tools/perf/util/hist.h
@@ -28,6 +28,7 @@
 	u64 total_lost;
 	u64 total_invalid_chains;
 	u32 nr_events[PERF_RECORD_HEADER_MAX];
+	u32 nr_lost_warned;
 	u32 nr_unknown_events;
 	u32 nr_invalid_chains;
 	u32 nr_unknown_id;
diff --git a/tools/perf/util/python.c b/tools/perf/util/python.c
index 7624324..9dd47a4 100644
--- a/tools/perf/util/python.c
+++ b/tools/perf/util/python.c
@@ -623,7 +623,11 @@
 		cpus = ((struct pyrf_cpu_map *)pcpus)->cpus;
 
 	evsel->attr.inherit = inherit;
-	if (perf_evsel__open(evsel, cpus, threads, group) < 0) {
+	/*
+	 * This will group just the fds for this single evsel, to group
+	 * multiple events, use evlist.open().
+	 */
+	if (perf_evsel__open(evsel, cpus, threads, group, NULL) < 0) {
 		PyErr_SetFromErrno(PyExc_OSError);
 		return NULL;
 	}
@@ -814,6 +818,25 @@
 	return Py_None;
 }
 
+static PyObject *pyrf_evlist__open(struct pyrf_evlist *pevlist,
+				   PyObject *args, PyObject *kwargs)
+{
+	struct perf_evlist *evlist = &pevlist->evlist;
+	int group = 0;
+	static char *kwlist[] = { "group", NULL };
+
+	if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|OOii", kwlist, &group))
+		return NULL;
+
+	if (perf_evlist__open(evlist, group) < 0) {
+		PyErr_SetFromErrno(PyExc_OSError);
+		return NULL;
+	}
+
+	Py_INCREF(Py_None);
+	return Py_None;
+}
+
 static PyMethodDef pyrf_evlist__methods[] = {
 	{
 		.ml_name  = "mmap",
@@ -822,6 +845,12 @@
 		.ml_doc	  = PyDoc_STR("mmap the file descriptor table.")
 	},
 	{
+		.ml_name  = "open",
+		.ml_meth  = (PyCFunction)pyrf_evlist__open,
+		.ml_flags = METH_VARARGS | METH_KEYWORDS,
+		.ml_doc	  = PyDoc_STR("open the file descriptors.")
+	},
+	{
 		.ml_name  = "poll",
 		.ml_meth  = (PyCFunction)pyrf_evlist__poll,
 		.ml_flags = METH_VARARGS | METH_KEYWORDS,
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index 20e011c..85c1e6b7 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -502,6 +502,7 @@
 	struct perf_sample sample;
 	u64 limit = os->next_flush;
 	u64 last_ts = os->last_sample ? os->last_sample->timestamp : 0ULL;
+	unsigned idx = 0, progress_next = os->nr_samples / 16;
 	int ret;
 
 	if (!ops->ordered_samples || !limit)
@@ -521,6 +522,11 @@
 		os->last_flush = iter->timestamp;
 		list_del(&iter->list);
 		list_add(&iter->list, &os->sample_cache);
+		if (++idx >= progress_next) {
+			progress_next += os->nr_samples / 16;
+			ui_progress__update(idx, os->nr_samples,
+					    "Processing time ordered events...");
+		}
 	}
 
 	if (list_empty(head)) {
@@ -529,6 +535,8 @@
 		os->last_sample =
 			list_entry(head->prev, struct sample_queue, list);
 	}
+
+	os->nr_samples = 0;
 }
 
 /*
@@ -588,6 +596,7 @@
 	u64 timestamp = new->timestamp;
 	struct list_head *p;
 
+	++os->nr_samples;
 	os->last_sample = new;
 
 	if (!sample) {
@@ -738,10 +747,27 @@
 
 	dump_event(session, event, file_offset, sample);
 
+	evsel = perf_evlist__id2evsel(session->evlist, sample->id);
+	if (evsel != NULL && event->header.type != PERF_RECORD_SAMPLE) {
+		/*
+		 * XXX We're leaving PERF_RECORD_SAMPLE unnacounted here
+		 * because the tools right now may apply filters, discarding
+		 * some of the samples. For consistency, in the future we
+		 * should have something like nr_filtered_samples and remove
+		 * the sample->period from total_sample_period, etc, KISS for
+		 * now tho.
+		 *
+		 * Also testing against NULL allows us to handle files without
+		 * attr.sample_id_all and/or without PERF_SAMPLE_ID. In the
+		 * future probably it'll be a good idea to restrict event
+		 * processing via perf_session to files with both set.
+		 */
+		hists__inc_nr_events(&evsel->hists, event->header.type);
+	}
+
 	switch (event->header.type) {
 	case PERF_RECORD_SAMPLE:
 		dump_sample(session, event, sample);
-		evsel = perf_evlist__id2evsel(session->evlist, sample->id);
 		if (evsel == NULL) {
 			++session->hists.stats.nr_unknown_id;
 			return -1;
@@ -874,11 +900,11 @@
 					    const struct perf_event_ops *ops)
 {
 	if (ops->lost == perf_event__process_lost &&
-	    session->hists.stats.total_lost != 0) {
-		ui__warning("Processed %" PRIu64 " events and LOST %" PRIu64
-			    "!\n\nCheck IO/CPU overload!\n\n",
-			    session->hists.stats.total_period,
-			    session->hists.stats.total_lost);
+	    session->hists.stats.nr_events[PERF_RECORD_LOST] != 0) {
+		ui__warning("Processed %d events and lost %d chunks!\n\n"
+			    "Check IO/CPU overload!\n\n",
+			    session->hists.stats.nr_events[0],
+			    session->hists.stats.nr_events[PERF_RECORD_LOST]);
 	}
 
 	if (session->hists.stats.nr_unknown_events != 0) {
@@ -1012,7 +1038,6 @@
 {
 	u64 head, page_offset, file_offset, file_pos, progress_next;
 	int err, mmap_prot, mmap_flags, map_idx = 0;
-	struct ui_progress *progress;
 	size_t	page_size, mmap_size;
 	char *buf, *mmaps[8];
 	union perf_event *event;
@@ -1030,9 +1055,6 @@
 		file_size = data_offset + data_size;
 
 	progress_next = file_size / 16;
-	progress = ui_progress__new("Processing events...", file_size);
-	if (progress == NULL)
-		return -1;
 
 	mmap_size = session->mmap_window;
 	if (mmap_size > file_size)
@@ -1095,7 +1117,8 @@
 
 	if (file_pos >= progress_next) {
 		progress_next += file_size / 16;
-		ui_progress__update(progress, file_pos);
+		ui_progress__update(file_pos, file_size,
+				    "Processing events...");
 	}
 
 	if (file_pos < file_size)
@@ -1106,7 +1129,6 @@
 	session->ordered_samples.next_flush = ULLONG_MAX;
 	flush_sample_queue(session, ops);
 out_err:
-	ui_progress__delete(progress);
 	perf_session__warn_about_errors(session, ops);
 	perf_session_free_sample_buffers(session);
 	return err;
diff --git a/tools/perf/util/session.h b/tools/perf/util/session.h
index 514b06d..6e393c9 100644
--- a/tools/perf/util/session.h
+++ b/tools/perf/util/session.h
@@ -23,6 +23,7 @@
 	struct sample_queue	*sample_buffer;
 	struct sample_queue	*last_sample;
 	int			sample_buffer_idx;
+	unsigned int		nr_samples;
 };
 
 struct perf_session {
diff --git a/tools/perf/util/top.h b/tools/perf/util/top.h
index 01d1057..3996509 100644
--- a/tools/perf/util/top.h
+++ b/tools/perf/util/top.h
@@ -19,7 +19,6 @@
 	u64		   kernel_samples, us_samples;
 	u64		   exact_samples;
 	u64		   guest_us_samples, guest_kernel_samples;
-	u64		   total_lost_warned;
 	int		   print_entries, count_filter, delay_secs;
 	int		   freq;
 	pid_t		   target_pid, target_tid;
diff --git a/tools/perf/util/trace-event-info.c b/tools/perf/util/trace-event-info.c
index 2d530cf..d2655f0 100644
--- a/tools/perf/util/trace-event-info.c
+++ b/tools/perf/util/trace-event-info.c
@@ -80,7 +80,7 @@
 	int ret = errno;
 
 	if (errno)
-		perror("trace-cmd");
+		perror("perf");
 	else
 		ret = -1;
 
diff --git a/tools/perf/util/ui/browser.c b/tools/perf/util/ui/browser.c
index 5359f37..5568291 100644
--- a/tools/perf/util/ui/browser.c
+++ b/tools/perf/util/ui/browser.c
@@ -4,6 +4,7 @@
 #include "libslang.h"
 #include <newt.h>
 #include "ui.h"
+#include "util.h"
 #include <linux/compiler.h>
 #include <linux/list.h>
 #include <linux/rbtree.h>
@@ -168,6 +169,59 @@
 	self->x = 0;
 }
 
+void ui_browser__handle_resize(struct ui_browser *browser)
+{
+	ui__refresh_dimensions(false);
+	ui_browser__show(browser, browser->title, ui_helpline__current);
+	ui_browser__refresh(browser);
+}
+
+int ui_browser__warning(struct ui_browser *browser, int timeout,
+			const char *format, ...)
+{
+	va_list args;
+	char *text;
+	int key = 0, err;
+
+	va_start(args, format);
+	err = vasprintf(&text, format, args);
+	va_end(args);
+
+	if (err < 0) {
+		va_start(args, format);
+		ui_helpline__vpush(format, args);
+		va_end(args);
+	} else {
+		while ((key == ui__question_window("Warning!", text,
+						   "Press any key...",
+						   timeout)) == K_RESIZE)
+			ui_browser__handle_resize(browser);
+		free(text);
+	}
+
+	return key;
+}
+
+int ui_browser__help_window(struct ui_browser *browser, const char *text)
+{
+	int key;
+
+	while ((key = ui__help_window(text)) == K_RESIZE)
+		ui_browser__handle_resize(browser);
+
+	return key;
+}
+
+bool ui_browser__dialog_yesno(struct ui_browser *browser, const char *text)
+{
+	int key;
+
+	while ((key = ui__dialog_yesno(text)) == K_RESIZE)
+		ui_browser__handle_resize(browser);
+
+	return key == K_ENTER || toupper(key) == 'Y';
+}
+
 void ui_browser__reset_index(struct ui_browser *self)
 {
 	self->index = self->top_idx = 0;
@@ -230,13 +284,15 @@
 		       (browser->nr_entries - 1));
 	}
 
+	SLsmg_set_char_set(1);
+
 	while (h < height) {
 	        ui_browser__gotorc(browser, row++, col);
-		SLsmg_set_char_set(1);
-		SLsmg_write_char(h == pct ? SLSMG_DIAMOND_CHAR : SLSMG_BOARD_CHAR);
-		SLsmg_set_char_set(0);
+		SLsmg_write_char(h == pct ? SLSMG_DIAMOND_CHAR : SLSMG_CKBRD_CHAR);
 		++h;
 	}
+
+	SLsmg_set_char_set(0);
 }
 
 static int __ui_browser__refresh(struct ui_browser *browser)
@@ -291,53 +347,10 @@
 	browser->seek(browser, browser->top_idx, SEEK_SET);
 }
 
-static int ui__getch(int delay_secs)
-{
-	struct timeval timeout, *ptimeout = delay_secs ? &timeout : NULL;
-	fd_set read_set;
-	int err, key;
-
-	FD_ZERO(&read_set);
-	FD_SET(0, &read_set);
-
-	if (delay_secs) {
-		timeout.tv_sec = delay_secs;
-		timeout.tv_usec = 0;
-	}
-
-        err = select(1, &read_set, NULL, NULL, ptimeout);
-
-	if (err == 0)
-		return K_TIMER;
-
-	if (err == -1) {
-		if (errno == EINTR)
-			return K_RESIZE;
-		return K_ERROR;
-	}
-
-	key = SLang_getkey();
-	if (key != K_ESC)
-		return key;
-
-	FD_ZERO(&read_set);
-	FD_SET(0, &read_set);
-	timeout.tv_sec = 0;
-	timeout.tv_usec = 20;
-        err = select(1, &read_set, NULL, NULL, &timeout);
-	if (err == 0)
-		return K_ESC;
-
-	SLang_ungetkey(key);
-	return SLkp_getkey();
-}
-
 int ui_browser__run(struct ui_browser *self, int delay_secs)
 {
 	int err, key;
 
-	pthread__unblock_sigwinch();
-
 	while (1) {
 		off_t offset;
 
@@ -351,10 +364,7 @@
 		key = ui__getch(delay_secs);
 
 		if (key == K_RESIZE) {
-			pthread_mutex_lock(&ui__lock);
-			SLtt_get_screen_size();
-			SLsmg_reinit_smg();
-			pthread_mutex_unlock(&ui__lock);
+			ui__refresh_dimensions(false);
 			ui_browser__refresh_dimensions(self);
 			__ui_browser__show_title(self, self->title);
 			ui_helpline__puts(self->helpline);
@@ -533,6 +543,47 @@
 	return -1;
 }
 
+void ui_browser__argv_seek(struct ui_browser *browser, off_t offset, int whence)
+{
+	switch (whence) {
+	case SEEK_SET:
+		browser->top = browser->entries;
+		break;
+	case SEEK_CUR:
+		browser->top = browser->top + browser->top_idx + offset;
+		break;
+	case SEEK_END:
+		browser->top = browser->top + browser->nr_entries + offset;
+		break;
+	default:
+		return;
+	}
+}
+
+unsigned int ui_browser__argv_refresh(struct ui_browser *browser)
+{
+	unsigned int row = 0, idx = browser->top_idx;
+	char **pos;
+
+	if (browser->top == NULL)
+		browser->top = browser->entries;
+
+	pos = (char **)browser->top;
+	while (idx < browser->nr_entries) {
+		if (!browser->filter || !browser->filter(browser, *pos)) {
+			ui_browser__gotorc(browser, row, 0);
+			browser->write(browser, pos, row);
+			if (++row == browser->height)
+				break;
+		}
+
+		++idx;
+		++pos;
+	}
+
+	return row;
+}
+
 void ui_browser__init(void)
 {
 	int i = 0;
diff --git a/tools/perf/util/ui/browser.h b/tools/perf/util/ui/browser.h
index a2c707d..84d761b 100644
--- a/tools/perf/util/ui/browser.h
+++ b/tools/perf/util/ui/browser.h
@@ -43,6 +43,15 @@
 int ui_browser__refresh(struct ui_browser *self);
 int ui_browser__run(struct ui_browser *browser, int delay_secs);
 void ui_browser__update_nr_entries(struct ui_browser *browser, u32 nr_entries);
+void ui_browser__handle_resize(struct ui_browser *browser);
+
+int ui_browser__warning(struct ui_browser *browser, int timeout,
+			const char *format, ...);
+int ui_browser__help_window(struct ui_browser *browser, const char *text);
+bool ui_browser__dialog_yesno(struct ui_browser *browser, const char *text);
+
+void ui_browser__argv_seek(struct ui_browser *browser, off_t offset, int whence);
+unsigned int ui_browser__argv_refresh(struct ui_browser *browser);
 
 void ui_browser__rb_tree_seek(struct ui_browser *self, off_t offset, int whence);
 unsigned int ui_browser__rb_tree_refresh(struct ui_browser *self);
diff --git a/tools/perf/util/ui/browsers/annotate.c b/tools/perf/util/ui/browsers/annotate.c
index 4e0cb7fe..0575905 100644
--- a/tools/perf/util/ui/browsers/annotate.c
+++ b/tools/perf/util/ui/browsers/annotate.c
@@ -1,6 +1,9 @@
+#include "../../util.h"
 #include "../browser.h"
 #include "../helpline.h"
 #include "../libslang.h"
+#include "../ui.h"
+#include "../util.h"
 #include "../../annotate.h"
 #include "../../hist.h"
 #include "../../sort.h"
@@ -8,15 +11,6 @@
 #include <pthread.h>
 #include <newt.h>
 
-static void ui__error_window(const char *fmt, ...)
-{
-	va_list ap;
-
-	va_start(ap, fmt);
-	newtWinMessagev((char *)"Error", (char *)"Ok", (char *)fmt, ap);
-	va_end(ap);
-}
-
 struct annotate_browser {
 	struct ui_browser b;
 	struct rb_root	  entries;
@@ -400,7 +394,7 @@
 		return -1;
 
 	if (symbol__annotate(sym, map, sizeof(struct objdump_line_rb_node)) < 0) {
-		ui__error_window(ui_helpline__last_msg);
+		ui__error("%s", ui_helpline__last_msg);
 		return -1;
 	}
 
diff --git a/tools/perf/util/ui/browsers/hists.c b/tools/perf/util/ui/browsers/hists.c
index 4663dcb..d0c94b4 100644
--- a/tools/perf/util/ui/browsers/hists.c
+++ b/tools/perf/util/ui/browsers/hists.c
@@ -17,6 +17,7 @@
 #include "../browser.h"
 #include "../helpline.h"
 #include "../util.h"
+#include "../ui.h"
 #include "map.h"
 
 struct hist_browser {
@@ -294,6 +295,15 @@
 	ui_browser__reset_index(&self->b);
 }
 
+static void ui_browser__warn_lost_events(struct ui_browser *browser)
+{
+	ui_browser__warning(browser, 4,
+		"Events are being lost, check IO/CPU overload!\n\n"
+		"You may want to run 'perf' using a RT scheduler policy:\n\n"
+		" perf top -r 80\n\n"
+		"Or reduce the sampling frequency.");
+}
+
 static int hist_browser__run(struct hist_browser *self, const char *ev_name,
 			     void(*timer)(void *arg), void *arg, int delay_secs)
 {
@@ -314,12 +324,18 @@
 		key = ui_browser__run(&self->b, delay_secs);
 
 		switch (key) {
-		case -1:
-			/* FIXME we need to check if it was es.reason == NEWT_EXIT_TIMER */
+		case K_TIMER:
 			timer(arg);
 			ui_browser__update_nr_entries(&self->b, self->hists->nr_entries);
-			hists__browser_title(self->hists, title, sizeof(title),
-					     ev_name);
+
+			if (self->hists->stats.nr_lost_warned !=
+			    self->hists->stats.nr_events[PERF_RECORD_LOST]) {
+				self->hists->stats.nr_lost_warned =
+					self->hists->stats.nr_events[PERF_RECORD_LOST];
+				ui_browser__warn_lost_events(&self->b);
+			}
+
+			hists__browser_title(self->hists, title, sizeof(title), ev_name);
 			ui_browser__show_title(&self->b, title);
 			continue;
 		case 'D': { /* Debug */
@@ -883,7 +899,7 @@
 			goto out_free_stack;
 		case 'a':
 			if (!browser->has_symbols) {
-				ui__warning(
+				ui_browser__warning(&browser->b, delay_secs * 2,
 			"Annotation is only available for symbolic views, "
 			"include \"sym\" in --sort to use it.");
 				continue;
@@ -901,7 +917,8 @@
 		case K_F1:
 		case 'h':
 		case '?':
-			ui__help_window("h/?/F1        Show this window\n"
+			ui_browser__help_window(&browser->b,
+					"h/?/F1        Show this window\n"
 					"UP/DOWN/PGUP\n"
 					"PGDN/SPACE    Navigate\n"
 					"q/ESC/CTRL+C  Exit browser\n\n"
@@ -914,7 +931,7 @@
 					"C             Collapse all callchains\n"
 					"E             Expand all callchains\n"
 					"d             Zoom into current DSO\n"
-					"t             Zoom into current Thread\n");
+					"t             Zoom into current Thread");
 			continue;
 		case K_ENTER:
 		case K_RIGHT:
@@ -940,7 +957,8 @@
 		}
 		case K_ESC:
 			if (!left_exits &&
-			    !ui__dialog_yesno("Do you really want to exit?"))
+			    !ui_browser__dialog_yesno(&browser->b,
+					       "Do you really want to exit?"))
 				continue;
 			/* Fall thru */
 		case 'q':
@@ -993,6 +1011,7 @@
 
 		if (choice == annotate) {
 			struct hist_entry *he;
+			int err;
 do_annotate:
 			he = hist_browser__selected_entry(browser);
 			if (he == NULL)
@@ -1001,10 +1020,12 @@
 			 * Don't let this be freed, say, by hists__decay_entry.
 			 */
 			he->used = true;
-			hist_entry__tui_annotate(he, evsel->idx, nr_events,
-						 timer, arg, delay_secs);
+			err = hist_entry__tui_annotate(he, evsel->idx, nr_events,
+						       timer, arg, delay_secs);
 			he->used = false;
 			ui_browser__update_nr_entries(&browser->b, browser->hists->nr_entries);
+			if (err)
+				ui_browser__handle_resize(&browser->b);
 		} else if (choice == browse_map)
 			map__browse(browser->selection->map);
 		else if (choice == zoom_dso) {
@@ -1056,6 +1077,7 @@
 struct perf_evsel_menu {
 	struct ui_browser b;
 	struct perf_evsel *selection;
+	bool lost_events, lost_events_warned;
 };
 
 static void perf_evsel_menu__write(struct ui_browser *browser,
@@ -1068,14 +1090,29 @@
 	unsigned long nr_events = evsel->hists.stats.nr_events[PERF_RECORD_SAMPLE];
 	const char *ev_name = event_name(evsel);
 	char bf[256], unit;
+	const char *warn = " ";
+	size_t printed;
 
 	ui_browser__set_color(browser, current_entry ? HE_COLORSET_SELECTED :
 						       HE_COLORSET_NORMAL);
 
 	nr_events = convert_unit(nr_events, &unit);
-	snprintf(bf, sizeof(bf), "%lu%c%s%s", nr_events,
-		 unit, unit == ' ' ? "" : " ", ev_name);
-	slsmg_write_nstring(bf, browser->width);
+	printed = snprintf(bf, sizeof(bf), "%lu%c%s%s", nr_events,
+			   unit, unit == ' ' ? "" : " ", ev_name);
+	slsmg_printf("%s", bf);
+
+	nr_events = evsel->hists.stats.nr_events[PERF_RECORD_LOST];
+	if (nr_events != 0) {
+		menu->lost_events = true;
+		if (!current_entry)
+			ui_browser__set_color(browser, HE_COLORSET_TOP);
+		nr_events = convert_unit(nr_events, &unit);
+		snprintf(bf, sizeof(bf), ": %ld%c%schunks LOST!", nr_events,
+			 unit, unit == ' ' ? "" : " ");
+		warn = bf;
+	}
+
+	slsmg_write_nstring(warn, browser->width - printed);
 
 	if (current_entry)
 		menu->selection = evsel;
@@ -1100,6 +1137,11 @@
 		switch (key) {
 		case K_TIMER:
 			timer(arg);
+
+			if (!menu->lost_events_warned && menu->lost_events) {
+				ui_browser__warn_lost_events(&menu->b);
+				menu->lost_events_warned = true;
+			}
 			continue;
 		case K_RIGHT:
 		case K_ENTER:
@@ -1133,7 +1175,8 @@
 					pos = list_entry(pos->node.prev, struct perf_evsel, node);
 				goto browse_hists;
 			case K_ESC:
-				if (!ui__dialog_yesno("Do you really want to exit?"))
+				if (!ui_browser__dialog_yesno(&menu->b,
+						"Do you really want to exit?"))
 					continue;
 				/* Fall thru */
 			case 'q':
@@ -1145,7 +1188,8 @@
 		case K_LEFT:
 			continue;
 		case K_ESC:
-			if (!ui__dialog_yesno("Do you really want to exit?"))
+			if (!ui_browser__dialog_yesno(&menu->b,
+					       "Do you really want to exit?"))
 				continue;
 			/* Fall thru */
 		case 'q':
diff --git a/tools/perf/util/ui/helpline.c b/tools/perf/util/ui/helpline.c
index f36d2ff5..6ef3c56 100644
--- a/tools/perf/util/ui/helpline.c
+++ b/tools/perf/util/ui/helpline.c
@@ -1,20 +1,28 @@
 #define _GNU_SOURCE
 #include <stdio.h>
 #include <stdlib.h>
-#include <newt.h>
+#include <string.h>
 
 #include "../debug.h"
 #include "helpline.h"
 #include "ui.h"
+#include "libslang.h"
 
 void ui_helpline__pop(void)
 {
-	newtPopHelpLine();
 }
 
+char ui_helpline__current[512];
+
 void ui_helpline__push(const char *msg)
 {
-	newtPushHelpLine(msg);
+	const size_t sz = sizeof(ui_helpline__current);
+
+	SLsmg_gotorc(SLtt_Screen_Rows - 1, 0);
+	SLsmg_set_color(0);
+	SLsmg_write_nstring((char *)msg, SLtt_Screen_Cols);
+	SLsmg_refresh();
+	strncpy(ui_helpline__current, msg, sz)[sz - 1] = '\0';
 }
 
 void ui_helpline__vpush(const char *fmt, va_list ap)
@@ -63,7 +71,7 @@
 
 	if (ui_helpline__last_msg[backlog - 1] == '\n') {
 		ui_helpline__puts(ui_helpline__last_msg);
-		newtRefresh();
+		SLsmg_refresh();
 		backlog = 0;
 	}
 	pthread_mutex_unlock(&ui__lock);
diff --git a/tools/perf/util/ui/helpline.h b/tools/perf/util/ui/helpline.h
index fdcbc02..7bab6b3 100644
--- a/tools/perf/util/ui/helpline.h
+++ b/tools/perf/util/ui/helpline.h
@@ -11,4 +11,6 @@
 void ui_helpline__fpush(const char *fmt, ...);
 void ui_helpline__puts(const char *msg);
 
+extern char ui_helpline__current[];
+
 #endif /* _PERF_UI_HELPLINE_H_ */
diff --git a/tools/perf/util/ui/progress.c b/tools/perf/util/ui/progress.c
index d7fc399..295e366 100644
--- a/tools/perf/util/ui/progress.c
+++ b/tools/perf/util/ui/progress.c
@@ -1,60 +1,29 @@
-#include <stdlib.h>
-#include <newt.h>
 #include "../cache.h"
 #include "progress.h"
+#include "libslang.h"
+#include "ui.h"
+#include "browser.h"
 
-struct ui_progress {
-	newtComponent form, scale;
-};
-
-struct ui_progress *ui_progress__new(const char *title, u64 total)
+void ui_progress__update(u64 curr, u64 total, const char *title)
 {
-	struct ui_progress *self = malloc(sizeof(*self));
-
-	if (self != NULL) {
-		int cols;
-
-		if (use_browser <= 0)
-			return self;
-		newtGetScreenSize(&cols, NULL);
-		cols -= 4;
-		newtCenteredWindow(cols, 1, title);
-		self->form  = newtForm(NULL, NULL, 0);
-		if (self->form == NULL)
-			goto out_free_self;
-		self->scale = newtScale(0, 0, cols, total);
-		if (self->scale == NULL)
-			goto out_free_form;
-		newtFormAddComponent(self->form, self->scale);
-		newtRefresh();
-	}
-
-	return self;
-
-out_free_form:
-	newtFormDestroy(self->form);
-out_free_self:
-	free(self);
-	return NULL;
-}
-
-void ui_progress__update(struct ui_progress *self, u64 curr)
-{
+	int bar, y;
 	/*
 	 * FIXME: We should have a per UI backend way of showing progress,
 	 * stdio will just show a percentage as NN%, etc.
 	 */
 	if (use_browser <= 0)
 		return;
-	newtScaleSet(self->scale, curr);
-	newtRefresh();
-}
 
-void ui_progress__delete(struct ui_progress *self)
-{
-	if (use_browser > 0) {
-		newtFormDestroy(self->form);
-		newtPopWindow();
-	}
-	free(self);
+	ui__refresh_dimensions(true);
+	pthread_mutex_lock(&ui__lock);
+	y = SLtt_Screen_Rows / 2 - 2;
+	SLsmg_set_color(0);
+	SLsmg_draw_box(y, 0, 3, SLtt_Screen_Cols);
+	SLsmg_gotorc(y++, 1);
+	SLsmg_write_string((char *)title);
+	SLsmg_set_color(HE_COLORSET_SELECTED);
+	bar = ((SLtt_Screen_Cols - 2) * curr) / total;
+	SLsmg_fill_region(y, 1, 1, bar, ' ');
+	SLsmg_refresh();
+	pthread_mutex_unlock(&ui__lock);
 }
diff --git a/tools/perf/util/ui/progress.h b/tools/perf/util/ui/progress.h
index a3820a0..d9c205b 100644
--- a/tools/perf/util/ui/progress.h
+++ b/tools/perf/util/ui/progress.h
@@ -1,11 +1,8 @@
 #ifndef _PERF_UI_PROGRESS_H_
 #define _PERF_UI_PROGRESS_H_ 1
 
-struct ui_progress;
+#include <../types.h>
 
-struct ui_progress *ui_progress__new(const char *title, u64 total);
-void ui_progress__delete(struct ui_progress *self);
-
-void ui_progress__update(struct ui_progress *self, u64 curr);
+void ui_progress__update(u64 curr, u64 total, const char *title);
 
 #endif
diff --git a/tools/perf/util/ui/setup.c b/tools/perf/util/ui/setup.c
index 1e6ba06..85a69fa 100644
--- a/tools/perf/util/ui/setup.c
+++ b/tools/perf/util/ui/setup.c
@@ -7,10 +7,85 @@
 #include "browser.h"
 #include "helpline.h"
 #include "ui.h"
+#include "util.h"
 #include "libslang.h"
+#include "keysyms.h"
 
 pthread_mutex_t ui__lock = PTHREAD_MUTEX_INITIALIZER;
 
+static volatile int ui__need_resize;
+
+void ui__refresh_dimensions(bool force)
+{
+	if (force || ui__need_resize) {
+		ui__need_resize = 0;
+		pthread_mutex_lock(&ui__lock);
+		SLtt_get_screen_size();
+		SLsmg_reinit_smg();
+		pthread_mutex_unlock(&ui__lock);
+	}
+}
+
+static void ui__sigwinch(int sig __used)
+{
+	ui__need_resize = 1;
+}
+
+static void ui__setup_sigwinch(void)
+{
+	static bool done;
+
+	if (done)
+		return;
+
+	done = true;
+	pthread__unblock_sigwinch();
+	signal(SIGWINCH, ui__sigwinch);
+}
+
+int ui__getch(int delay_secs)
+{
+	struct timeval timeout, *ptimeout = delay_secs ? &timeout : NULL;
+	fd_set read_set;
+	int err, key;
+
+	ui__setup_sigwinch();
+
+	FD_ZERO(&read_set);
+	FD_SET(0, &read_set);
+
+	if (delay_secs) {
+		timeout.tv_sec = delay_secs;
+		timeout.tv_usec = 0;
+	}
+
+        err = select(1, &read_set, NULL, NULL, ptimeout);
+
+	if (err == 0)
+		return K_TIMER;
+
+	if (err == -1) {
+		if (errno == EINTR)
+			return K_RESIZE;
+		return K_ERROR;
+	}
+
+	key = SLang_getkey();
+	if (key != K_ESC)
+		return key;
+
+	FD_ZERO(&read_set);
+	FD_SET(0, &read_set);
+	timeout.tv_sec = 0;
+	timeout.tv_usec = 20;
+        err = select(1, &read_set, NULL, NULL, &timeout);
+	if (err == 0)
+		return K_ESC;
+
+	SLang_ungetkey(key);
+	return SLkp_getkey();
+}
+
 static void newt_suspend(void *d __used)
 {
 	newtSuspend();
@@ -71,10 +146,10 @@
 void exit_browser(bool wait_for_ok)
 {
 	if (use_browser > 0) {
-		if (wait_for_ok) {
-			char title[] = "Fatal Error", ok[] = "Ok";
-			newtWinMessage(title, ok, ui_helpline__last_msg);
-		}
+		if (wait_for_ok)
+			ui__question_window("Fatal Error",
+					    ui_helpline__last_msg,
+					    "Press any key...", 0);
 		ui__exit();
 	}
 }
diff --git a/tools/perf/util/ui/ui.h b/tools/perf/util/ui/ui.h
index d264e05..7b67045 100644
--- a/tools/perf/util/ui/ui.h
+++ b/tools/perf/util/ui/ui.h
@@ -2,7 +2,10 @@
 #define _PERF_UI_H_ 1
 
 #include <pthread.h>
+#include <stdbool.h>
 
 extern pthread_mutex_t ui__lock;
 
+void ui__refresh_dimensions(bool force);
+
 #endif /* _PERF_UI_H_ */
diff --git a/tools/perf/util/ui/util.c b/tools/perf/util/ui/util.c
index fdf1fc8..45daa7c 100644
--- a/tools/perf/util/ui/util.c
+++ b/tools/perf/util/ui/util.c
@@ -1,6 +1,5 @@
-#include <newt.h>
+#include "../util.h"
 #include <signal.h>
-#include <stdio.h>
 #include <stdbool.h>
 #include <string.h>
 #include <sys/ttydefaults.h>
@@ -8,72 +7,75 @@
 #include "../cache.h"
 #include "../debug.h"
 #include "browser.h"
+#include "keysyms.h"
 #include "helpline.h"
 #include "ui.h"
 #include "util.h"
+#include "libslang.h"
 
-static void newt_form__set_exit_keys(newtComponent self)
+static void ui_browser__argv_write(struct ui_browser *browser,
+				   void *entry, int row)
 {
-	newtFormAddHotKey(self, NEWT_KEY_LEFT);
-	newtFormAddHotKey(self, NEWT_KEY_ESCAPE);
-	newtFormAddHotKey(self, 'Q');
-	newtFormAddHotKey(self, 'q');
-	newtFormAddHotKey(self, CTRL('c'));
+	char **arg = entry;
+	bool current_entry = ui_browser__is_current_entry(browser, row);
+
+	ui_browser__set_color(browser, current_entry ? HE_COLORSET_SELECTED :
+						       HE_COLORSET_NORMAL);
+	slsmg_write_nstring(*arg, browser->width);
 }
 
-static newtComponent newt_form__new(void)
+static int popup_menu__run(struct ui_browser *menu)
 {
-	newtComponent self = newtForm(NULL, NULL, 0);
-	if (self)
-		newt_form__set_exit_keys(self);
-	return self;
+	int key;
+
+	if (ui_browser__show(menu, " ", "ESC: exit, ENTER|->: Select option") < 0)
+		return -1;
+
+	while (1) {
+		key = ui_browser__run(menu, 0);
+
+		switch (key) {
+		case K_RIGHT:
+		case K_ENTER:
+			key = menu->index;
+			break;
+		case K_LEFT:
+		case K_ESC:
+		case 'q':
+		case CTRL('c'):
+			key = -1;
+			break;
+		default:
+			continue;
+		}
+
+		break;
+	}
+
+	ui_browser__hide(menu);
+	return key;
 }
 
 int ui__popup_menu(int argc, char * const argv[])
 {
-	struct newtExitStruct es;
-	int i, rc = -1, max_len = 5;
-	newtComponent listbox, form = newt_form__new();
+	struct ui_browser menu = {
+		.entries    = (void *)argv,
+		.refresh    = ui_browser__argv_refresh,
+		.seek	    = ui_browser__argv_seek,
+		.write	    = ui_browser__argv_write,
+		.nr_entries = argc,
+	};
 
-	if (form == NULL)
-		return -1;
-
-	listbox = newtListbox(0, 0, argc, NEWT_FLAG_RETURNEXIT);
-	if (listbox == NULL)
-		goto out_destroy_form;
-
-	newtFormAddComponent(form, listbox);
-
-	for (i = 0; i < argc; ++i) {
-		int len = strlen(argv[i]);
-		if (len > max_len)
-			max_len = len;
-		if (newtListboxAddEntry(listbox, argv[i], (void *)(long)i))
-			goto out_destroy_form;
-	}
-
-	newtCenteredWindow(max_len, argc, NULL);
-	newtFormRun(form, &es);
-	rc = newtListboxGetCurrent(listbox) - NULL;
-	if (es.reason == NEWT_EXIT_HOTKEY)
-		rc = -1;
-	newtPopWindow();
-out_destroy_form:
-	newtFormDestroy(form);
-	return rc;
+	return popup_menu__run(&menu);
 }
 
-int ui__help_window(const char *text)
+int ui__question_window(const char *title, const char *text,
+			const char *exit_msg, int delay_secs)
 {
-	struct newtExitStruct es;
-	newtComponent tb, form = newt_form__new();
-	int rc = -1;
+	int x, y;
 	int max_len = 0, nr_lines = 0;
 	const char *t;
 
-	if (form == NULL)
-		return -1;
-
 	t = text;
 	while (1) {
 		const char *sep = strchr(t, '\n');
@@ -90,41 +92,77 @@
 		t = sep + 1;
 	}
 
-	tb = newtTextbox(0, 0, max_len, nr_lines, 0);
-	if (tb == NULL)
-		goto out_destroy_form;
+	max_len += 2;
+	nr_lines += 4;
+	y = SLtt_Screen_Rows / 2 - nr_lines / 2,
+	x = SLtt_Screen_Cols / 2 - max_len / 2;
 
-	newtTextboxSetText(tb, text);
-	newtFormAddComponent(form, tb);
-	newtCenteredWindow(max_len, nr_lines, NULL);
-	newtFormRun(form, &es);
-	newtPopWindow();
-	rc = 0;
-out_destroy_form:
-	newtFormDestroy(form);
-	return rc;
+	SLsmg_set_color(0);
+	SLsmg_draw_box(y, x++, nr_lines, max_len);
+	if (title) {
+		SLsmg_gotorc(y, x + 1);
+		SLsmg_write_string((char *)title);
+	}
+	SLsmg_gotorc(++y, x);
+	nr_lines -= 2;
+	max_len -= 2;
+	SLsmg_write_wrapped_string((unsigned char *)text, y, x,
+				   nr_lines, max_len, 1);
+	SLsmg_gotorc(y + nr_lines - 2, x);
+	SLsmg_write_nstring((char *)" ", max_len);
+	SLsmg_gotorc(y + nr_lines - 1, x);
+	SLsmg_write_nstring((char *)exit_msg, max_len);
+	SLsmg_refresh();
+	return ui__getch(delay_secs);
 }
 
-static const char yes[] = "Yes", no[] = "No",
-		  warning_str[] = "Warning!", ok[] = "Ok";
-
-bool ui__dialog_yesno(const char *msg)
+int ui__help_window(const char *text)
 {
-	/* newtWinChoice should really be accepting const char pointers... */
-	return newtWinChoice(NULL, (char *)yes, (char *)no, (char *)msg) == 1;
+	return ui__question_window("Help", text, "Press any key...", 0);
 }
 
-void ui__warning(const char *format, ...)
+int ui__dialog_yesno(const char *msg)
 {
+	return ui__question_window(NULL, msg, "Enter: Yes, ESC: No", 0);
+}
+
+int __ui__warning(const char *title, const char *format, va_list args)
+{
+	char *s;
+
+	if (use_browser > 0 && vasprintf(&s, format, args) > 0) {
+		int key;
+
+		pthread_mutex_lock(&ui__lock);
+		key = ui__question_window(title, s, "Press any key...", 0);
+		pthread_mutex_unlock(&ui__lock);
+		free(s);
+		return key;
+	}
+
+	fprintf(stderr, "%s:\n", title);
+	vfprintf(stderr, format, args);
+	return K_ESC;
+}
+
+int ui__warning(const char *format, ...)
+{
+	int key;
 	va_list args;
 
 	va_start(args, format);
-	if (use_browser > 0) {
-		pthread_mutex_lock(&ui__lock);
-		newtWinMessagev((char *)warning_str, (char *)ok,
-				(char *)format, args);
-		pthread_mutex_unlock(&ui__lock);
-	} else
-		vfprintf(stderr, format, args);
+	key = __ui__warning("Warning", format, args);
 	va_end(args);
+	return key;
+}
+
+int ui__error(const char *format, ...)
+{
+	int key;
+	va_list args;
+
+	va_start(args, format);
+	key = __ui__warning("Error", format, args);
+	va_end(args);
+	return key;
 }
diff --git a/tools/perf/util/ui/util.h b/tools/perf/util/ui/util.h
index afcbc1d..2d1738b 100644
--- a/tools/perf/util/ui/util.h
+++ b/tools/perf/util/ui/util.h
@@ -1,10 +1,14 @@
 #ifndef _PERF_UI_UTIL_H_
 #define _PERF_UI_UTIL_H_ 1
 
-#include <stdbool.h>
+#include <stdarg.h>
 
+int ui__getch(int delay_secs);
 int ui__popup_menu(int argc, char * const argv[]);
 int ui__help_window(const char *text);
-bool ui__dialog_yesno(const char *msg);
+int ui__dialog_yesno(const char *msg);
+int ui__question_window(const char *title, const char *text,
+			const char *exit_msg, int delay_secs);
+int __ui__warning(const char *title, const char *format, va_list args);
 
 #endif /* _PERF_UI_UTIL_H_ */
diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
index 8b2d37b..3c6f780 100644
--- a/tools/power/x86/turbostat/turbostat.c
+++ b/tools/power/x86/turbostat/turbostat.c
@@ -162,19 +162,21 @@
 
 void dump_cnt(struct counters *cnt)
 {
-	fprintf(stderr, "package: %d ", cnt->pkg);
-	fprintf(stderr, "core:: %d ", cnt->core);
-	fprintf(stderr, "CPU: %d ", cnt->cpu);
-	fprintf(stderr, "TSC: %016llX\n", cnt->tsc);
-	fprintf(stderr, "c3: %016llX\n", cnt->c3);
-	fprintf(stderr, "c6: %016llX\n", cnt->c6);
-	fprintf(stderr, "c7: %016llX\n", cnt->c7);
-	fprintf(stderr, "aperf: %016llX\n", cnt->aperf);
-	fprintf(stderr, "pc2: %016llX\n", cnt->pc2);
-	fprintf(stderr, "pc3: %016llX\n", cnt->pc3);
-	fprintf(stderr, "pc6: %016llX\n", cnt->pc6);
-	fprintf(stderr, "pc7: %016llX\n", cnt->pc7);
-	fprintf(stderr, "msr0x%x: %016llX\n", extra_msr_offset, cnt->extra_msr);
+	if (!cnt)
+		return;
+	if (cnt->pkg) fprintf(stderr, "package: %d ", cnt->pkg);
+	if (cnt->core) fprintf(stderr, "core:: %d ", cnt->core);
+	if (cnt->cpu) fprintf(stderr, "CPU: %d ", cnt->cpu);
+	if (cnt->tsc) fprintf(stderr, "TSC: %016llX\n", cnt->tsc);
+	if (cnt->c3) fprintf(stderr, "c3: %016llX\n", cnt->c3);
+	if (cnt->c6) fprintf(stderr, "c6: %016llX\n", cnt->c6);
+	if (cnt->c7) fprintf(stderr, "c7: %016llX\n", cnt->c7);
+	if (cnt->aperf) fprintf(stderr, "aperf: %016llX\n", cnt->aperf);
+	if (cnt->pc2) fprintf(stderr, "pc2: %016llX\n", cnt->pc2);
+	if (cnt->pc3) fprintf(stderr, "pc3: %016llX\n", cnt->pc3);
+	if (cnt->pc6) fprintf(stderr, "pc6: %016llX\n", cnt->pc6);
+	if (cnt->pc7) fprintf(stderr, "pc7: %016llX\n", cnt->pc7);
+	if (cnt->extra_msr) fprintf(stderr, "msr0x%x: %016llX\n", extra_msr_offset, cnt->extra_msr);
 }
 
 void dump_list(struct counters *cnt)