blob: 850783cfa9c730bf04d7d8308852e388f120e4f4 [file] [log] [blame]
Thomas Gleixner2874c5f2019-05-27 08:55:01 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Paul Mackerras14cf11a2005-09-26 16:04:21 +10002/*
3 * This file contains the routines for handling the MMU on those
4 * PowerPC implementations where the MMU substantially follows the
5 * architecture specification. This includes the 6xx, 7xx, 7xxx,
Michael Ellerman0f369102014-07-10 12:29:24 +10006 * and 8260 implementations but excludes the 8xx and 4xx.
Paul Mackerras14cf11a2005-09-26 16:04:21 +10007 * -- paulus
8 *
9 * Derived from arch/ppc/mm/init.c:
10 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
11 *
12 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
13 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
14 * Copyright (C) 1996 Paul Mackerras
Paul Mackerras14cf11a2005-09-26 16:04:21 +100015 *
16 * Derived from "arch/i386/mm/init.c"
17 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
Paul Mackerras14cf11a2005-09-26 16:04:21 +100018 */
19
Paul Mackerras14cf11a2005-09-26 16:04:21 +100020#include <linux/kernel.h>
21#include <linux/mm.h>
22#include <linux/init.h>
23#include <linux/highmem.h>
Yinghai Lu95f72d12010-07-12 14:36:09 +100024#include <linux/memblock.h>
Paul Mackerras14cf11a2005-09-26 16:04:21 +100025
Paul Mackerras14cf11a2005-09-26 16:04:21 +100026#include <asm/mmu.h>
27#include <asm/machdep.h>
Christophe Leroy9efc74f2018-11-09 17:33:24 +000028#include <asm/code-patching.h>
Christophe Leroy63b2bc62019-02-21 19:08:49 +000029#include <asm/sections.h>
Paul Mackerras14cf11a2005-09-26 16:04:21 +100030
Christophe Leroy9d9f2cc2019-03-29 09:59:59 +000031#include <mm/mmu_decl.h>
Paul Mackerras14cf11a2005-09-26 16:04:21 +100032
Christophe Leroy69a15932020-10-01 15:35:38 +000033u8 __initdata early_hash[SZ_256K] __aligned(SZ_256K) = {0};
34
Christophe Leroy6e980b52020-10-22 06:29:32 +000035static struct hash_pte __initdata *Hash = (struct hash_pte *)early_hash;
36static unsigned long __initdata Hash_size, Hash_mask;
37static unsigned int __initdata hash_mb, hash_mb2;
38unsigned long __initdata _SDR1;
Paul Mackerras14cf11a2005-09-26 16:04:21 +100039
Becky Bruce316a4052008-06-14 09:41:43 +100040struct ppc_bat BATS[8][2]; /* 8 pairs of IBAT, DBAT */
Paul Mackerras14cf11a2005-09-26 16:04:21 +100041
Christophe Leroy03d5b192020-10-22 06:29:29 +000042static struct batrange { /* stores address ranges mapped by BATs */
Paul Mackerras14cf11a2005-09-26 16:04:21 +100043 unsigned long start;
44 unsigned long limit;
Becky Bruce7c5c4322008-06-14 09:41:42 +100045 phys_addr_t phys;
Jon Loeligeree0339f2006-06-17 17:52:44 -050046} bat_addrs[8];
Paul Mackerras14cf11a2005-09-26 16:04:21 +100047
Christophe Leroyf2655122020-10-22 06:29:35 +000048#ifdef CONFIG_SMP
49unsigned long mmu_hash_lock;
50#endif
51
Paul Mackerras14cf11a2005-09-26 16:04:21 +100052/*
53 * Return PA for this VA if it is mapped by a BAT, or 0
54 */
Christophe Leroy3084cdb2016-02-09 17:07:58 +010055phys_addr_t v_block_mapped(unsigned long va)
Paul Mackerras14cf11a2005-09-26 16:04:21 +100056{
57 int b;
Christophe Leroye93ba1b2018-11-16 17:27:42 +000058 for (b = 0; b < ARRAY_SIZE(bat_addrs); ++b)
Paul Mackerras14cf11a2005-09-26 16:04:21 +100059 if (va >= bat_addrs[b].start && va < bat_addrs[b].limit)
60 return bat_addrs[b].phys + (va - bat_addrs[b].start);
61 return 0;
62}
63
64/*
65 * Return VA for a given PA or 0 if not mapped
66 */
Christophe Leroy3084cdb2016-02-09 17:07:58 +010067unsigned long p_block_mapped(phys_addr_t pa)
Paul Mackerras14cf11a2005-09-26 16:04:21 +100068{
69 int b;
Christophe Leroye93ba1b2018-11-16 17:27:42 +000070 for (b = 0; b < ARRAY_SIZE(bat_addrs); ++b)
Paul Mackerras14cf11a2005-09-26 16:04:21 +100071 if (pa >= bat_addrs[b].phys
72 && pa < (bat_addrs[b].limit-bat_addrs[b].start)
73 +bat_addrs[b].phys)
74 return bat_addrs[b].start+(pa-bat_addrs[b].phys);
75 return 0;
76}
77
Christophe Leroyd37823c2022-01-10 15:29:25 +000078int __init find_free_bat(void)
Christophe Leroye4d66542019-02-21 19:08:39 +000079{
80 int b;
Christophe Leroy2e38ea42020-09-29 06:48:37 +000081 int n = mmu_has_feature(MMU_FTR_USE_HIGH_BATS) ? 8 : 4;
Christophe Leroye4d66542019-02-21 19:08:39 +000082
Christophe Leroy2e38ea42020-09-29 06:48:37 +000083 for (b = 0; b < n; b++) {
84 struct ppc_bat *bat = BATS[b];
Christophe Leroye4d66542019-02-21 19:08:39 +000085
Christophe Leroy2e38ea42020-09-29 06:48:37 +000086 if (!(bat[1].batu & 3))
87 return b;
Christophe Leroye4d66542019-02-21 19:08:39 +000088 }
89 return -1;
90}
91
Christophe Leroy12f36352019-04-30 16:11:59 +000092/*
93 * This function calculates the size of the larger block usable to map the
94 * beginning of an area based on the start address and size of that area:
Christophe Leroy8b14e1d2020-09-29 06:48:36 +000095 * - max block size is 256 on 6xx.
Christophe Leroy12f36352019-04-30 16:11:59 +000096 * - base address must be aligned to the block size. So the maximum block size
97 * is identified by the lowest bit set to 1 in the base address (for instance
98 * if base is 0x16000000, max size is 0x02000000).
99 * - block size has to be a power of two. This is calculated by finding the
100 * highest bit set to 1.
101 */
Christophe Leroyd37823c2022-01-10 15:29:25 +0000102unsigned int bat_block_size(unsigned long base, unsigned long top)
Christophe Leroye4d66542019-02-21 19:08:39 +0000103{
Christophe Leroy8b14e1d2020-09-29 06:48:36 +0000104 unsigned int max_size = SZ_256M;
Christophe Leroy12f36352019-04-30 16:11:59 +0000105 unsigned int base_shift = (ffs(base) - 1) & 31;
Christophe Leroye4d66542019-02-21 19:08:39 +0000106 unsigned int block_shift = (fls(top - base) - 1) & 31;
107
108 return min3(max_size, 1U << base_shift, 1U << block_shift);
109}
110
Christophe Leroy5e04ae82019-02-21 19:08:48 +0000111/*
112 * Set up one of the IBAT (block address translation) register pairs.
113 * The parameters are not checked; in particular size must be a power
114 * of 2 between 128k and 256M.
Christophe Leroy5e04ae82019-02-21 19:08:48 +0000115 */
116static void setibat(int index, unsigned long virt, phys_addr_t phys,
117 unsigned int size, pgprot_t prot)
118{
119 unsigned int bl = (size >> 17) - 1;
120 int wimgxpp;
121 struct ppc_bat *bat = BATS[index];
122 unsigned long flags = pgprot_val(prot);
123
124 if (!cpu_has_feature(CPU_FTR_NEED_COHERENT))
125 flags &= ~_PAGE_COHERENT;
126
127 wimgxpp = (flags & _PAGE_COHERENT) | (_PAGE_EXEC ? BPP_RX : BPP_XX);
128 bat[0].batu = virt | (bl << 2) | 2; /* Vs=1, Vp=0 */
129 bat[0].batl = BAT_PHYS_ADDR(phys) | wimgxpp;
130 if (flags & _PAGE_USER)
131 bat[0].batu |= 1; /* Vp = 1 */
132}
133
134static void clearibat(int index)
135{
136 struct ppc_bat *bat = BATS[index];
137
138 bat[0].batu = 0;
139 bat[0].batl = 0;
140}
141
Christophe Leroy63b2bc62019-02-21 19:08:49 +0000142static unsigned long __init __mmu_mapin_ram(unsigned long base, unsigned long top)
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000143{
Christophe Leroye4d66542019-02-21 19:08:39 +0000144 int idx;
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000145
Christophe Leroye4d66542019-02-21 19:08:39 +0000146 while ((idx = find_free_bat()) != -1 && base != top) {
Christophe Leroyd37823c2022-01-10 15:29:25 +0000147 unsigned int size = bat_block_size(base, top);
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000148
Christophe Leroye4d66542019-02-21 19:08:39 +0000149 if (size < 128 << 10)
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000150 break;
Christophe Leroye4d66542019-02-21 19:08:39 +0000151 setbat(idx, PAGE_OFFSET + base, base, size, PAGE_KERNEL_X);
152 base += size;
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000153 }
154
Christophe Leroye4d66542019-02-21 19:08:39 +0000155 return base;
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000156}
157
Christophe Leroy63b2bc62019-02-21 19:08:49 +0000158unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top)
159{
Christophe Leroy12f36352019-04-30 16:11:59 +0000160 unsigned long done;
Michael Ellermanb150a4d2022-09-16 14:41:24 +1000161 unsigned long border = (unsigned long)__srwx_boundary - PAGE_OFFSET;
Christophe Leroy2a0fb3c2022-06-14 12:34:09 +0200162 unsigned long size;
Christophe Leroy63b2bc62019-02-21 19:08:49 +0000163
Christophe Leroy2a0fb3c2022-06-14 12:34:09 +0200164 size = roundup_pow_of_two((unsigned long)_einittext - PAGE_OFFSET);
165 setibat(0, PAGE_OFFSET, 0, size, PAGE_KERNEL_X);
Christophe Leroy035b19a2020-11-25 07:10:46 +0000166
Christophe Leroy1ce84492022-06-14 12:32:23 +0200167 if (debug_pagealloc_enabled_or_kfence()) {
Christophe Leroy035b19a2020-11-25 07:10:46 +0000168 pr_debug_once("Read-Write memory mapped without BATs\n");
Christophe Leroy2b279c02020-05-19 05:49:28 +0000169 if (base >= border)
170 return base;
171 if (top >= border)
172 top = border;
173 }
Christophe Leroy63b2bc62019-02-21 19:08:49 +0000174
175 if (!strict_kernel_rwx_enabled() || base >= border || top <= border)
176 return __mmu_mapin_ram(base, top);
177
178 done = __mmu_mapin_ram(base, border);
Christophe Leroy12f36352019-04-30 16:11:59 +0000179 if (done != border)
Christophe Leroy63b2bc62019-02-21 19:08:49 +0000180 return done;
181
Christophe Leroy12f36352019-04-30 16:11:59 +0000182 return __mmu_mapin_ram(border, top);
Christophe Leroy63b2bc62019-02-21 19:08:49 +0000183}
184
Christophe Leroyc4964332020-06-29 11:15:22 +0000185static bool is_module_segment(unsigned long addr)
186{
187 if (!IS_ENABLED(CONFIG_MODULES))
188 return false;
Christophe Leroy7bee31a2020-08-05 15:27:28 +0000189 if (addr < ALIGN_DOWN(MODULES_VADDR, SZ_256M))
190 return false;
Christophe Leroy541cebb2020-08-21 07:15:25 +0000191 if (addr > ALIGN(MODULES_END, SZ_256M) - 1)
Christophe Leroy7bee31a2020-08-05 15:27:28 +0000192 return false;
Christophe Leroyc4964332020-06-29 11:15:22 +0000193 return true;
194}
195
Christophe Leroy63b2bc62019-02-21 19:08:49 +0000196void mmu_mark_initmem_nx(void)
197{
198 int nb = mmu_has_feature(MMU_FTR_USE_HIGH_BATS) ? 8 : 4;
199 int i;
200 unsigned long base = (unsigned long)_stext - PAGE_OFFSET;
Christophe Leroy37eb7ca2021-11-26 13:40:35 +0100201 unsigned long top = ALIGN((unsigned long)_etext - PAGE_OFFSET, SZ_128K);
Christophe Leroy4b19f962020-05-19 05:48:56 +0000202 unsigned long border = (unsigned long)__init_begin - PAGE_OFFSET;
Christophe Leroy63b2bc62019-02-21 19:08:49 +0000203 unsigned long size;
204
Christophe Leroy37eb7ca2021-11-26 13:40:35 +0100205 for (i = 0; i < nb - 1 && base < top;) {
Christophe Leroyd37823c2022-01-10 15:29:25 +0000206 size = bat_block_size(base, top);
Christophe Leroy63b2bc62019-02-21 19:08:49 +0000207 setibat(i++, PAGE_OFFSET + base, base, size, PAGE_KERNEL_TEXT);
208 base += size;
209 }
210 if (base < top) {
Christophe Leroyd37823c2022-01-10 15:29:25 +0000211 size = bat_block_size(base, top);
Christophe Leroy63b2bc62019-02-21 19:08:49 +0000212 if ((top - base) > size) {
Christophe Leroy63b2bc62019-02-21 19:08:49 +0000213 size <<= 1;
Christophe Leroy4b19f962020-05-19 05:48:56 +0000214 if (strict_kernel_rwx_enabled() && base + size > border)
215 pr_warn("Some RW data is getting mapped X. "
216 "Adjust CONFIG_DATA_SHIFT to avoid that.\n");
Christophe Leroy63b2bc62019-02-21 19:08:49 +0000217 }
218 setibat(i++, PAGE_OFFSET + base, base, size, PAGE_KERNEL_TEXT);
219 base += size;
220 }
221 for (; i < nb; i++)
222 clearibat(i);
223
224 update_bats();
225
226 for (i = TASK_SIZE >> 28; i < 16; i++) {
227 /* Do not set NX on VM space for modules */
Christophe Leroyc4964332020-06-29 11:15:22 +0000228 if (is_module_segment(i << 28))
229 continue;
230
Christophe Leroy179ae572021-02-06 11:47:27 +0000231 mtsr(mfsr(i << 28) | 0x10000000, i << 28);
Christophe Leroy63b2bc62019-02-21 19:08:49 +0000232 }
233}
234
235void mmu_mark_rodata_ro(void)
236{
237 int nb = mmu_has_feature(MMU_FTR_USE_HIGH_BATS) ? 8 : 4;
238 int i;
239
Christophe Leroy63b2bc62019-02-21 19:08:49 +0000240 for (i = 0; i < nb; i++) {
241 struct ppc_bat *bat = BATS[i];
242
Nicholas Piggin7082f8e2022-09-16 14:07:49 +1000243 if (bat_addrs[i].start < (unsigned long)__end_rodata)
Christophe Leroy63b2bc62019-02-21 19:08:49 +0000244 bat[1].batl = (bat[1].batl & ~BPP_RW) | BPP_RX;
245 }
246
247 update_bats();
248}
249
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000250/*
Christophe Leroy2a0fb3c2022-06-14 12:34:09 +0200251 * Set up one of the D BAT (block address translation) register pairs.
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000252 * The parameters are not checked; in particular size must be a power
253 * of 2 between 128k and 256M.
254 */
Becky Bruce7c5c4322008-06-14 09:41:42 +1000255void __init setbat(int index, unsigned long virt, phys_addr_t phys,
Michael Ellerman5dd4e4f2015-03-25 20:11:55 +1100256 unsigned int size, pgprot_t prot)
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000257{
258 unsigned int bl;
259 int wimgxpp;
Christophe Leroycbcaff72019-09-16 20:25:39 +0000260 struct ppc_bat *bat;
Michael Ellerman5dd4e4f2015-03-25 20:11:55 +1100261 unsigned long flags = pgprot_val(prot);
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000262
Christophe Leroycbcaff72019-09-16 20:25:39 +0000263 if (index == -1)
264 index = find_free_bat();
265 if (index == -1) {
266 pr_err("%s: no BAT available for mapping 0x%llx\n", __func__,
267 (unsigned long long)phys);
268 return;
269 }
270 bat = BATS[index];
271
Gerhard Pircher4c456a62009-01-23 06:51:28 +0000272 if ((flags & _PAGE_NO_CACHE) ||
273 (cpu_has_feature(CPU_FTR_NEED_COHERENT) == 0))
274 flags &= ~_PAGE_COHERENT;
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000275
276 bl = (size >> 17) - 1;
Christophe Leroy2e38ea42020-09-29 06:48:37 +0000277 /* Do DBAT first */
278 wimgxpp = flags & (_PAGE_WRITETHRU | _PAGE_NO_CACHE
279 | _PAGE_COHERENT | _PAGE_GUARDED);
280 wimgxpp |= (flags & _PAGE_RW)? BPP_RW: BPP_RX;
281 bat[1].batu = virt | (bl << 2) | 2; /* Vs=1, Vp=0 */
282 bat[1].batl = BAT_PHYS_ADDR(phys) | wimgxpp;
283 if (flags & _PAGE_USER)
284 bat[1].batu |= 1; /* Vp = 1 */
285 if (flags & _PAGE_GUARDED) {
286 /* G bit must be zero in IBATs */
287 flags &= ~_PAGE_EXEC;
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000288 }
289
290 bat_addrs[index].start = virt;
291 bat_addrs[index].limit = virt + ((bl + 1) << 17) - 1;
292 bat_addrs[index].phys = phys;
293}
294
295/*
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100296 * Preload a translation in the hash table
297 */
Christophe Leroy79d1bef2020-11-25 07:10:47 +0000298static void hash_preload(struct mm_struct *mm, unsigned long ea)
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100299{
300 pmd_t *pmd;
301
Christophe Leroy4cc445b2020-10-22 06:29:30 +0000302 if (!mmu_has_feature(MMU_FTR_HPTE_TABLE))
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100303 return;
Mike Rapoporte05c7b12020-06-08 21:33:05 -0700304 pmd = pmd_off(mm, ea);
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100305 if (!pmd_none(*pmd))
Paul Mackerras6218a762006-06-11 14:15:17 +1000306 add_hash_page(mm->context.id, ea, pmd_val(*pmd));
Benjamin Herrenschmidt3c726f82005-11-07 11:06:55 +1100307}
308
309/*
Christophe Leroye5a1edb2019-08-16 05:41:42 +0000310 * This is called at the end of handling a user page fault, when the
311 * fault has been handled by updating a PTE in the linux page tables.
312 * We use it to preload an HPTE into the hash table corresponding to
313 * the updated linux PTE.
314 *
315 * This must always be called with the pte lock held.
316 */
Christophe Leroy73ea68a2022-09-05 11:38:25 +0200317void __update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
Christophe Leroye5a1edb2019-08-16 05:41:42 +0000318 pte_t *ptep)
319{
320 /*
321 * We don't need to worry about _PAGE_PRESENT here because we are
322 * called with either mm->page_table_lock held or ptl lock held
323 */
Christophe Leroye5a1edb2019-08-16 05:41:42 +0000324
325 /* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */
326 if (!pte_young(*ptep) || address >= TASK_SIZE)
327 return;
328
Christophe Leroyf49f4e22019-08-16 05:41:43 +0000329 /* We have to test for regs NULL since init will get here first thing at boot */
330 if (!current->thread.regs)
Christophe Leroye5a1edb2019-08-16 05:41:42 +0000331 return;
Christophe Leroye5a1edb2019-08-16 05:41:42 +0000332
Christophe Leroyf49f4e22019-08-16 05:41:43 +0000333 /* We also avoid filling the hash if not coming from a fault */
334 if (TRAP(current->thread.regs) != 0x300 && TRAP(current->thread.regs) != 0x400)
335 return;
336
337 hash_preload(vma->vm_mm, address);
Christophe Leroye5a1edb2019-08-16 05:41:42 +0000338}
339
340/*
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000341 * Initialize the hash table and patch the instructions in hashtable.S.
342 */
343void __init MMU_init_hw(void)
344{
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000345 unsigned int n_hpteg, lg_n_hpteg;
346
Christophe Leroy4a3a2242018-11-09 17:33:22 +0000347 if (!mmu_has_feature(MMU_FTR_HPTE_TABLE))
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000348 return;
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000349
350 if ( ppc_md.progress ) ppc_md.progress("hash:enter", 0x105);
351
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000352#define LG_HPTEG_SIZE 6 /* 64 bytes per HPTEG */
353#define SDR1_LOW_BITS ((n_hpteg - 1) >> 10)
354#define MIN_N_HPTEG 1024 /* min 64kB hash table */
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000355
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000356 /*
357 * Allow 1 HPTE (1/8 HPTEG) for each page of memory.
358 * This is less than the recommended amount, but then
359 * Linux ain't AIX.
360 */
361 n_hpteg = total_memory / (PAGE_SIZE * 8);
362 if (n_hpteg < MIN_N_HPTEG)
363 n_hpteg = MIN_N_HPTEG;
364 lg_n_hpteg = __ilog2(n_hpteg);
365 if (n_hpteg & (n_hpteg - 1)) {
366 ++lg_n_hpteg; /* round up if not power of 2 */
367 n_hpteg = 1 << lg_n_hpteg;
368 }
369 Hash_size = n_hpteg << LG_HPTEG_SIZE;
370
371 /*
372 * Find some memory for the hash table.
373 */
374 if ( ppc_md.progress ) ppc_md.progress("hash:find piece", 0x322);
Mike Rapoportb63a07d2019-03-07 16:31:06 -0800375 Hash = memblock_alloc(Hash_size, Hash_size);
Mike Rapoport8a7f97b2019-03-11 23:30:31 -0700376 if (!Hash)
377 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
378 __func__, Hash_size, Hash_size);
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000379 _SDR1 = __pa(Hash) | SDR1_LOW_BITS;
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000380
Christophe Leroy8f156c22019-04-26 16:36:37 +0000381 pr_info("Total memory = %lldMB; using %ldkB for hash table\n",
382 (unsigned long long)(total_memory >> 20), Hash_size >> 10);
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000383
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000384
Christophe Leroy72f208c2019-04-26 16:23:35 +0000385 Hash_mask = n_hpteg - 1;
386 hash_mb2 = hash_mb = 32 - LG_HPTEG_SIZE - lg_n_hpteg;
387 if (lg_n_hpteg > 16)
388 hash_mb2 = 16 - LG_HPTEG_SIZE;
389}
390
391void __init MMU_init_hw_patch(void)
392{
393 unsigned int hmask = Hash_mask >> (16 - LG_HPTEG_SIZE);
Christophe Leroy232ca1e2020-02-15 10:14:25 +0000394 unsigned int hash = (unsigned int)Hash - PAGE_OFFSET;
Christophe Leroy72f208c2019-04-26 16:23:35 +0000395
Christophe Leroy69a15932020-10-01 15:35:38 +0000396 if (!mmu_has_feature(MMU_FTR_HPTE_TABLE))
397 return;
398
Christophe Leroy72f208c2019-04-26 16:23:35 +0000399 if (ppc_md.progress)
400 ppc_md.progress("hash:patch", 0x345);
401 if (ppc_md.progress)
402 ppc_md.progress("hash:done", 0x205);
403
404 /* WARNING: Make sure nothing can trigger a KASAN check past this point */
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000405
406 /*
407 * Patch up the instructions in hashtable.S:create_hpte
408 */
Christophe Leroycd08f102019-12-21 08:32:38 +0000409 modify_instruction_site(&patch__hash_page_A0, 0xffff, hash >> 16);
Christophe Leroy72f208c2019-04-26 16:23:35 +0000410 modify_instruction_site(&patch__hash_page_A1, 0x7c0, hash_mb << 6);
411 modify_instruction_site(&patch__hash_page_A2, 0x7c0, hash_mb2 << 6);
Christophe Leroy9efc74f2018-11-09 17:33:24 +0000412 modify_instruction_site(&patch__hash_page_B, 0xffff, hmask);
413 modify_instruction_site(&patch__hash_page_C, 0xffff, hmask);
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000414
415 /*
416 * Patch up the instructions in hashtable.S:flush_hash_page
417 */
Christophe Leroy232ca1e2020-02-15 10:14:25 +0000418 modify_instruction_site(&patch__flush_hash_A0, 0xffff, hash >> 16);
Christophe Leroy72f208c2019-04-26 16:23:35 +0000419 modify_instruction_site(&patch__flush_hash_A1, 0x7c0, hash_mb << 6);
420 modify_instruction_site(&patch__flush_hash_A2, 0x7c0, hash_mb2 << 6);
Christophe Leroy9efc74f2018-11-09 17:33:24 +0000421 modify_instruction_site(&patch__flush_hash_B, 0xffff, hmask);
Paul Mackerras14cf11a2005-09-26 16:04:21 +1000422}
Benjamin Herrenschmidtcd3db0c2010-07-06 15:39:02 -0700423
424void setup_initial_memory_limit(phys_addr_t first_memblock_base,
425 phys_addr_t first_memblock_size)
426{
427 /* We don't currently support the first MEMBLOCK not mapping 0
428 * physical on those processors
429 */
430 BUG_ON(first_memblock_base != 0);
431
Christophe Leroy8b14e1d2020-09-29 06:48:36 +0000432 memblock_set_current_limit(min_t(u64, first_memblock_size, SZ_256M));
Benjamin Herrenschmidtcd3db0c2010-07-06 15:39:02 -0700433}
Christophe Leroy31ed2b12019-03-11 08:30:35 +0000434
Christophe Leroye4dccf92019-04-26 16:36:39 +0000435void __init print_system_hash_info(void)
436{
437 pr_info("Hash_size = 0x%lx\n", Hash_size);
438 if (Hash_mask)
439 pr_info("Hash_mask = 0x%lx\n", Hash_mask);
440}
441
Christophe Leroy068fdba2020-10-22 06:29:41 +0000442void __init early_init_mmu(void)
443{
444}