Mirror only - Please move to https://github.com/immortalwrt/immortalwrt
Révision | 64f4b111aa321657afa57545f9890e309db90711 (tree) |
---|---|
l'heure | 2022-01-15 20:29:20 |
Auteur | Tianling Shen <i@cnsz...> |
Commiter | Tianling Shen |
Merge Official Source
Signed-off-by: Tianling Shen <i@cnsztl.eu.org>
@@ -0,0 +1,2 @@ | ||
1 | +LINUX_VERSION-5.10 = .90 | |
2 | +LINUX_KERNEL_HASH-5.10.90 = 945e4264c014a3d9dfc0a4639309dd1ec2fb545416556421f931b95da78c2725 |
@@ -0,0 +1,2 @@ | ||
1 | +LINUX_VERSION-5.4 = .170 | |
2 | +LINUX_KERNEL_HASH-5.4.170 = b09f74e0cf5fc7cf5de6aa932fe654c962cb10118bdbbdddb397022c6e6d382c |
@@ -6,11 +6,12 @@ ifdef CONFIG_TESTING_KERNEL | ||
6 | 6 | KERNEL_PATCHVER:=$(KERNEL_TESTING_PATCHVER) |
7 | 7 | endif |
8 | 8 | |
9 | -LINUX_VERSION-5.4 = .170 | |
10 | -LINUX_VERSION-5.10 = .90 | |
9 | +KERNEL_DETAILS_FILE=$(INCLUDE_DIR)/kernel-$(KERNEL_PATCHVER) | |
10 | +ifeq ($(wildcard $(KERNEL_DETAILS_FILE)),) | |
11 | + $(error Missing kernel version/hash file for $(KERNEL_PATCHVER). Please create $(KERNEL_DETAILS_FILE)) | |
12 | +endif | |
11 | 13 | |
12 | -LINUX_KERNEL_HASH-5.4.170 = b09f74e0cf5fc7cf5de6aa932fe654c962cb10118bdbbdddb397022c6e6d382c | |
13 | -LINUX_KERNEL_HASH-5.10.90 = 945e4264c014a3d9dfc0a4639309dd1ec2fb545416556421f931b95da78c2725 | |
14 | +include $(KERNEL_DETAILS_FILE) | |
14 | 15 | |
15 | 16 | remove_uri_prefix=$(subst git://,,$(subst http://,,$(subst https://,,$(1)))) |
16 | 17 | sanitize_uri=$(call qstrip,$(subst @,_,$(subst :,_,$(subst .,_,$(subst -,_,$(subst /,_,$(1))))))) |
@@ -5,9 +5,9 @@ PKG_RELEASE:=1 | ||
5 | 5 | |
6 | 6 | PKG_SOURCE_PROTO:=git |
7 | 7 | PKG_SOURCE_URL=$(PROJECT_GIT)/project/netifd.git |
8 | -PKG_SOURCE_DATE:=2021-12-02 | |
9 | -PKG_SOURCE_VERSION:=5ca5e0b4d058a47d72ba4102acdcec826e203c41 | |
10 | -PKG_MIRROR_HASH:=8a1fd6b634a0390a5ee512483c924b3ea8fa3ea9fa863d0434e3e66949faccab | |
8 | +PKG_SOURCE_DATE:=2022-01-14 | |
9 | +PKG_SOURCE_VERSION:=3043206e94da412eb19dd72ea68edcaca545d84c | |
10 | +PKG_MIRROR_HASH:=2bf5a59e93968b00f69b8b7ebbdfd28353c36bc5e7f72225d725c24d0ac7265f | |
11 | 11 | PKG_MAINTAINER:=Felix Fietkau <nbd@nbd.name> |
12 | 12 | |
13 | 13 | PKG_LICENSE:=GPL-2.0 |
@@ -411,9 +411,15 @@ hostapd_bss_get_status(struct ubus_context *ctx, struct ubus_object *obj, | ||
411 | 411 | char ssid[SSID_MAX_LEN + 1]; |
412 | 412 | char phy_name[17]; |
413 | 413 | size_t ssid_len = SSID_MAX_LEN; |
414 | + u8 channel = 0, op_class = 0; | |
414 | 415 | |
415 | 416 | if (hapd->conf->ssid.ssid_len < SSID_MAX_LEN) |
416 | 417 | ssid_len = hapd->conf->ssid.ssid_len; |
418 | + | |
419 | + ieee80211_freq_to_channel_ext(hapd->iface->freq, | |
420 | + hapd->iconf->secondary_channel, | |
421 | + hostapd_get_oper_chwidth(hapd->iconf), | |
422 | + &op_class, &channel); | |
417 | 423 | |
418 | 424 | blob_buf_init(&b, 0); |
419 | 425 | blobmsg_add_string(&b, "status", hostapd_state_text(hapd->iface->state)); |
@@ -424,7 +430,8 @@ hostapd_bss_get_status(struct ubus_context *ctx, struct ubus_object *obj, | ||
424 | 430 | blobmsg_add_string(&b, "ssid", ssid); |
425 | 431 | |
426 | 432 | blobmsg_add_u32(&b, "freq", hapd->iface->freq); |
427 | - blobmsg_add_u32(&b, "channel", ieee80211_frequency_to_channel(hapd->iface->freq)); | |
433 | + blobmsg_add_u32(&b, "channel", channel); | |
434 | + blobmsg_add_u32(&b, "op_class", op_class); | |
428 | 435 | blobmsg_add_u32(&b, "beacon_interval", hapd->iconf->beacon_int); |
429 | 436 | |
430 | 437 | snprintf(phy_name, 17, "%s", hapd->iface->phy); |
@@ -1930,4 +1937,4 @@ int hostapd_ubus_notify_bss_transition_query( | ||
1930 | 1937 | |
1931 | 1938 | return ureq.resp; |
1932 | 1939 | #endif |
1933 | -} | |
\ No newline at end of file | ||
1940 | +} |
@@ -10,7 +10,7 @@ include $(INCLUDE_DIR)/kernel.mk | ||
10 | 10 | |
11 | 11 | PKG_NAME:=iptables |
12 | 12 | PKG_VERSION:=1.8.7 |
13 | -PKG_RELEASE:=1 | |
13 | +PKG_RELEASE:=2 | |
14 | 14 | |
15 | 15 | PKG_SOURCE_URL:=https://netfilter.org/projects/iptables/files |
16 | 16 | PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.bz2 |
@@ -60,7 +60,7 @@ define Package/iptables/config | ||
60 | 60 | |
61 | 61 | config IPTABLES_NFTABLES |
62 | 62 | bool "Enable Nftables support" |
63 | - default n | |
63 | + default y | |
64 | 64 | help |
65 | 65 | This enable nftables support in iptables. |
66 | 66 | endef |
@@ -12,9 +12,9 @@ PKG_RELEASE:=$(AUTORELEASE) | ||
12 | 12 | |
13 | 13 | PKG_SOURCE_PROTO:=git |
14 | 14 | PKG_SOURCE_URL=$(PROJECT_GIT)/project/procd.git |
15 | -PKG_MIRROR_HASH:=3ce6f5e2c12ae276af3ffe1755a495415f77184974206a44a3b083420aba52a8 | |
16 | -PKG_SOURCE_DATE:=2021-12-20 | |
17 | -PKG_SOURCE_VERSION:=129d050b9f5725ea4c54e1d906aba43eca95b860 | |
15 | +PKG_MIRROR_HASH:=0e506062a992f77979bd59eb51fb5d500e45197cc7bfba3a9302415d754dbfd1 | |
16 | +PKG_SOURCE_DATE:=2022-01-11 | |
17 | +PKG_SOURCE_VERSION:=ac2b8b365bdbcbf292f77409d180ec3c0963faf3 | |
18 | 18 | CMAKE_INSTALL:=1 |
19 | 19 | |
20 | 20 | PKG_LICENSE:=GPL-2.0 |
@@ -127,6 +127,10 @@ | ||
127 | 127 | }; |
128 | 128 | }; |
129 | 129 | |
130 | +&ref { | |
131 | + clock-frequency = <25000000>; | |
132 | +}; | |
133 | + | |
130 | 134 | &spi { |
131 | 135 | status = "okay"; |
132 | 136 |
@@ -194,7 +198,6 @@ | ||
194 | 198 | gmac-config { |
195 | 199 | device = <&gmac>; |
196 | 200 | rgmii-gmac0 = <1>; |
197 | - rgmii-enabled = <1>; | |
198 | 201 | rxd-delay = <1>; |
199 | 202 | txd-delay = <1>; |
200 | 203 | }; |
@@ -99,7 +99,6 @@ | ||
99 | 99 | partition@0 { |
100 | 100 | label = "RouterBoot"; |
101 | 101 | reg = <0x0 0x20000>; |
102 | - read-only; | |
103 | 102 | compatible = "mikrotik,routerboot-partitions"; |
104 | 103 | #address-cells = <1>; |
105 | 104 | #size-cells = <1>; |
@@ -38,7 +38,6 @@ | ||
38 | 38 | partition@0 { |
39 | 39 | label = "RouterBoot"; |
40 | 40 | reg = <0x0 0x20000>; |
41 | - read-only; | |
42 | 41 | compatible = "mikrotik,routerboot-partitions"; |
43 | 42 | #address-cells = <1>; |
44 | 43 | #size-cells = <1>; |
@@ -73,7 +73,6 @@ | ||
73 | 73 | partition@0 { |
74 | 74 | label = "RouterBoot"; |
75 | 75 | reg = <0x0 0x20000>; |
76 | - read-only; | |
77 | 76 | compatible = "mikrotik,routerboot-partitions"; |
78 | 77 | #address-cells = <1>; |
79 | 78 | #size-cells = <1>; |
@@ -112,8 +112,7 @@ | ||
112 | 112 | }; |
113 | 113 | |
114 | 114 | pll: pll-controller@18050000 { |
115 | - compatible = "qca,qca9550-pll", | |
116 | - "qca,qca9550-pll", "syscon"; | |
115 | + compatible = "qca,qca9550-pll", "syscon"; | |
117 | 116 | reg = <0x18050000 0x50>; |
118 | 117 | |
119 | 118 | #clock-cells = <1>; |
@@ -19,6 +19,7 @@ CONFIG_MTD_NAND_RB91X=y | ||
19 | 19 | CONFIG_MTD_RAW_NAND=y |
20 | 20 | CONFIG_MTD_ROUTERBOOT_PARTS=y |
21 | 21 | CONFIG_MTD_SPI_NAND=y |
22 | +CONFIG_MTD_SPI_NOR_USE_VARIABLE_ERASE=y | |
22 | 23 | CONFIG_MTD_SPLIT_MINOR_FW=y |
23 | 24 | CONFIG_MTD_UBI=y |
24 | 25 | CONFIG_MTD_UBI_BLOCK=y |
@@ -3661,6 +3661,7 @@ CONFIG_MTD_ROOTFS_ROOT_DEV=y | ||
3661 | 3661 | # CONFIG_MTD_SPI_NOR is not set |
3662 | 3662 | # CONFIG_MTD_SPI_NOR_USE_4K_SECTORS is not set |
3663 | 3663 | CONFIG_MTD_SPI_NOR_USE_4K_SECTORS_LIMIT=4096 |
3664 | +# CONFIG_MTD_SPI_NOR_USE_VARIABLE_ERASE is not set | |
3664 | 3665 | CONFIG_MTD_SPLIT=y |
3665 | 3666 | # CONFIG_MTD_SPLIT_BCM63XX_FW is not set |
3666 | 3667 | # CONFIG_MTD_SPLIT_BCM_WFI_FW is not set |
@@ -3688,6 +3689,7 @@ CONFIG_MTD_SPLIT_SUPPORT=y | ||
3688 | 3689 | # CONFIG_MTD_UBI_GLUEBI is not set |
3689 | 3690 | # CONFIG_MTD_UIMAGE_SPLIT is not set |
3690 | 3691 | # CONFIG_MTD_VIRT_CONCAT is not set |
3692 | +# CONFIG_MTD_NAND_MTK_BMT is not set | |
3691 | 3693 | # CONFIG_MTK_MMC is not set |
3692 | 3694 | # CONFIG_MTK_MMSYS is not set |
3693 | 3695 | CONFIG_MULTIUSER=y |
@@ -0,0 +1,1201 @@ | ||
1 | +/* | |
2 | + * Copyright (c) 2017 MediaTek Inc. | |
3 | + * Author: Xiangsheng Hou <xiangsheng.hou@mediatek.com> | |
4 | + * Copyright (c) 2020 Felix Fietkau <nbd@nbd.name> | |
5 | + * | |
6 | + * This program is free software; you can redistribute it and/or modify | |
7 | + * it under the terms of the GNU General Public License version 2 as | |
8 | + * published by the Free Software Foundation. | |
9 | + * | |
10 | + * This program is distributed in the hope that it will be useful, | |
11 | + * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
12 | + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
13 | + * GNU General Public License for more details. | |
14 | + */ | |
15 | + | |
16 | +#include <linux/slab.h> | |
17 | +#include <linux/gfp.h> | |
18 | +#include <linux/kernel.h> | |
19 | +#include <linux/of.h> | |
20 | +#include <linux/mtd/mtd.h> | |
21 | +#include <linux/mtd/partitions.h> | |
22 | +#include <linux/mtd/mtk_bmt.h> | |
23 | +#include <linux/module.h> | |
24 | +#include <linux/debugfs.h> | |
25 | +#include <linux/bits.h> | |
26 | + | |
27 | +#define MAIN_SIGNATURE_OFFSET 0 | |
28 | +#define OOB_SIGNATURE_OFFSET 1 | |
29 | +#define BBPOOL_RATIO 2 | |
30 | + | |
31 | +#define BBT_LOG(fmt, ...) pr_debug("[BBT][%s|%d] "fmt"\n", __func__, __LINE__, ##__VA_ARGS__) | |
32 | + | |
33 | +/* Maximum 8k blocks */ | |
34 | +#define BB_TABLE_MAX bmtd.table_size | |
35 | +#define BMT_TABLE_MAX (BB_TABLE_MAX * BBPOOL_RATIO / 100) | |
36 | +#define BMT_TBL_DEF_VAL 0x0 | |
37 | + | |
38 | +struct mtk_bmt_ops { | |
39 | + char *sig; | |
40 | + unsigned int sig_len; | |
41 | + int (*init)(struct device_node *np); | |
42 | + bool (*remap_block)(u16 block, u16 mapped_block, int copy_len); | |
43 | + void (*unmap_block)(u16 block); | |
44 | + u16 (*get_mapping_block)(int block); | |
45 | + int (*debug)(void *data, u64 val); | |
46 | +}; | |
47 | + | |
48 | +struct bbbt { | |
49 | + char signature[3]; | |
50 | + /* This version is used to distinguish the legacy and new algorithm */ | |
51 | +#define BBMT_VERSION 2 | |
52 | + unsigned char version; | |
53 | + /* Below 2 tables will be written in SLC */ | |
54 | + u16 bb_tbl[]; | |
55 | +}; | |
56 | + | |
57 | +struct bbmt { | |
58 | + u16 block; | |
59 | +#define NO_MAPPED 0 | |
60 | +#define NORMAL_MAPPED 1 | |
61 | +#define BMT_MAPPED 2 | |
62 | + u16 mapped; | |
63 | +}; | |
64 | + | |
65 | +static struct bmt_desc { | |
66 | + struct mtd_info *mtd; | |
67 | + | |
68 | + int (*_read_oob) (struct mtd_info *mtd, loff_t from, | |
69 | + struct mtd_oob_ops *ops); | |
70 | + int (*_write_oob) (struct mtd_info *mtd, loff_t to, | |
71 | + struct mtd_oob_ops *ops); | |
72 | + int (*_erase) (struct mtd_info *mtd, struct erase_info *instr); | |
73 | + int (*_block_isbad) (struct mtd_info *mtd, loff_t ofs); | |
74 | + int (*_block_markbad) (struct mtd_info *mtd, loff_t ofs); | |
75 | + | |
76 | + const struct mtk_bmt_ops *ops; | |
77 | + | |
78 | + struct bbbt *bbt; | |
79 | + | |
80 | + struct dentry *debugfs_dir; | |
81 | + | |
82 | + u32 table_size; | |
83 | + u32 pg_size; | |
84 | + u32 blk_size; | |
85 | + u16 pg_shift; | |
86 | + u16 blk_shift; | |
87 | + /* bbt logical address */ | |
88 | + u16 pool_lba; | |
89 | + /* bbt physical address */ | |
90 | + u16 pool_pba; | |
91 | + /* Maximum count of bad blocks that the vendor guaranteed */ | |
92 | + u16 bb_max; | |
93 | + /* Total blocks of the Nand Chip */ | |
94 | + u16 total_blks; | |
95 | + /* The block(n) BMT is located at (bmt_tbl[n]) */ | |
96 | + u16 bmt_blk_idx; | |
97 | + /* How many pages needs to store 'struct bbbt' */ | |
98 | + u32 bmt_pgs; | |
99 | + | |
100 | + const __be32 *remap_range; | |
101 | + int remap_range_len; | |
102 | + | |
103 | + /* to compensate for driver level remapping */ | |
104 | + u8 oob_offset; | |
105 | +} bmtd = {0}; | |
106 | + | |
107 | +static unsigned char *nand_bbt_buf; | |
108 | +static unsigned char *nand_data_buf; | |
109 | + | |
110 | +/* -------- Unit conversions -------- */ | |
111 | +static inline u32 blk_pg(u16 block) | |
112 | +{ | |
113 | + return (u32)(block << (bmtd.blk_shift - bmtd.pg_shift)); | |
114 | +} | |
115 | + | |
116 | +/* -------- Nand operations wrapper -------- */ | |
117 | +static inline int | |
118 | +bbt_nand_read(u32 page, unsigned char *dat, int dat_len, | |
119 | + unsigned char *fdm, int fdm_len) | |
120 | +{ | |
121 | + struct mtd_oob_ops ops = { | |
122 | + .mode = MTD_OPS_PLACE_OOB, | |
123 | + .ooboffs = bmtd.oob_offset, | |
124 | + .oobbuf = fdm, | |
125 | + .ooblen = fdm_len, | |
126 | + .datbuf = dat, | |
127 | + .len = dat_len, | |
128 | + }; | |
129 | + | |
130 | + return bmtd._read_oob(bmtd.mtd, page << bmtd.pg_shift, &ops); | |
131 | +} | |
132 | + | |
133 | +static inline int bbt_nand_erase(u16 block) | |
134 | +{ | |
135 | + struct mtd_info *mtd = bmtd.mtd; | |
136 | + struct erase_info instr = { | |
137 | + .addr = (loff_t)block << bmtd.blk_shift, | |
138 | + .len = bmtd.blk_size, | |
139 | + }; | |
140 | + | |
141 | + return bmtd._erase(mtd, &instr); | |
142 | +} | |
143 | + | |
144 | +static inline int bbt_nand_copy(u16 dest_blk, u16 src_blk, loff_t max_offset) | |
145 | +{ | |
146 | + int pages = bmtd.blk_size >> bmtd.pg_shift; | |
147 | + loff_t src = (loff_t)src_blk << bmtd.blk_shift; | |
148 | + loff_t dest = (loff_t)dest_blk << bmtd.blk_shift; | |
149 | + loff_t offset = 0; | |
150 | + uint8_t oob[64]; | |
151 | + int i, ret; | |
152 | + | |
153 | + for (i = 0; i < pages; i++) { | |
154 | + struct mtd_oob_ops rd_ops = { | |
155 | + .mode = MTD_OPS_PLACE_OOB, | |
156 | + .oobbuf = oob, | |
157 | + .ooblen = min_t(int, bmtd.mtd->oobsize / pages, sizeof(oob)), | |
158 | + .datbuf = nand_data_buf, | |
159 | + .len = bmtd.pg_size, | |
160 | + }; | |
161 | + struct mtd_oob_ops wr_ops = { | |
162 | + .mode = MTD_OPS_PLACE_OOB, | |
163 | + .oobbuf = oob, | |
164 | + .datbuf = nand_data_buf, | |
165 | + .len = bmtd.pg_size, | |
166 | + }; | |
167 | + | |
168 | + if (offset >= max_offset) | |
169 | + break; | |
170 | + | |
171 | + ret = bmtd._read_oob(bmtd.mtd, src + offset, &rd_ops); | |
172 | + if (ret < 0 && !mtd_is_bitflip(ret)) | |
173 | + return ret; | |
174 | + | |
175 | + if (!rd_ops.retlen) | |
176 | + break; | |
177 | + | |
178 | + ret = bmtd._write_oob(bmtd.mtd, dest + offset, &wr_ops); | |
179 | + if (ret < 0) | |
180 | + return ret; | |
181 | + | |
182 | + wr_ops.ooblen = rd_ops.oobretlen; | |
183 | + offset += rd_ops.retlen; | |
184 | + } | |
185 | + | |
186 | + return 0; | |
187 | +} | |
188 | + | |
189 | +/* -------- Bad Blocks Management -------- */ | |
190 | +static inline struct bbmt *bmt_tbl(struct bbbt *bbbt) | |
191 | +{ | |
192 | + return (struct bbmt *)&bbbt->bb_tbl[bmtd.table_size]; | |
193 | +} | |
194 | + | |
195 | +static int | |
196 | +read_bmt(u16 block, unsigned char *dat, unsigned char *fdm, int fdm_len) | |
197 | +{ | |
198 | + u32 len = bmtd.bmt_pgs << bmtd.pg_shift; | |
199 | + | |
200 | + return bbt_nand_read(blk_pg(block), dat, len, fdm, fdm_len); | |
201 | +} | |
202 | + | |
203 | +static int write_bmt(u16 block, unsigned char *dat) | |
204 | +{ | |
205 | + struct mtd_oob_ops ops = { | |
206 | + .mode = MTD_OPS_PLACE_OOB, | |
207 | + .ooboffs = OOB_SIGNATURE_OFFSET + bmtd.oob_offset, | |
208 | + .oobbuf = bmtd.ops->sig, | |
209 | + .ooblen = bmtd.ops->sig_len, | |
210 | + .datbuf = dat, | |
211 | + .len = bmtd.bmt_pgs << bmtd.pg_shift, | |
212 | + }; | |
213 | + loff_t addr = (loff_t)block << bmtd.blk_shift; | |
214 | + | |
215 | + return bmtd._write_oob(bmtd.mtd, addr, &ops); | |
216 | +} | |
217 | + | |
218 | +static u16 find_valid_block(u16 block) | |
219 | +{ | |
220 | + u8 fdm[4]; | |
221 | + int ret; | |
222 | + int loop = 0; | |
223 | + | |
224 | +retry: | |
225 | + if (block >= bmtd.total_blks) | |
226 | + return 0; | |
227 | + | |
228 | + ret = bbt_nand_read(blk_pg(block), nand_data_buf, bmtd.pg_size, | |
229 | + fdm, sizeof(fdm)); | |
230 | + /* Read the 1st byte of FDM to judge whether it's a bad | |
231 | + * or not | |
232 | + */ | |
233 | + if (ret || fdm[0] != 0xff) { | |
234 | + pr_info("nand: found bad block 0x%x\n", block); | |
235 | + if (loop >= bmtd.bb_max) { | |
236 | + pr_info("nand: FATAL ERR: too many bad blocks!!\n"); | |
237 | + return 0; | |
238 | + } | |
239 | + | |
240 | + loop++; | |
241 | + block++; | |
242 | + goto retry; | |
243 | + } | |
244 | + | |
245 | + return block; | |
246 | +} | |
247 | + | |
248 | +/* Find out all bad blocks, and fill in the mapping table */ | |
249 | +static int scan_bad_blocks(struct bbbt *bbt) | |
250 | +{ | |
251 | + int i; | |
252 | + u16 block = 0; | |
253 | + | |
254 | + /* First time download, the block0 MUST NOT be a bad block, | |
255 | + * this is guaranteed by vendor | |
256 | + */ | |
257 | + bbt->bb_tbl[0] = 0; | |
258 | + | |
259 | + /* | |
260 | + * Construct the mapping table of Normal data area(non-PMT/BMTPOOL) | |
261 | + * G - Good block; B - Bad block | |
262 | + * --------------------------- | |
263 | + * physical |G|G|B|G|B|B|G|G|G|G|B|G|B| | |
264 | + * --------------------------- | |
265 | + * What bb_tbl[i] looks like: | |
266 | + * physical block(i): | |
267 | + * 0 1 2 3 4 5 6 7 8 9 a b c | |
268 | + * mapped block(bb_tbl[i]): | |
269 | + * 0 1 3 6 7 8 9 b ...... | |
270 | + * ATTENTION: | |
271 | + * If new bad block ocurred(n), search bmt_tbl to find | |
272 | + * a available block(x), and fill in the bb_tbl[n] = x; | |
273 | + */ | |
274 | + for (i = 1; i < bmtd.pool_lba; i++) { | |
275 | + bbt->bb_tbl[i] = find_valid_block(bbt->bb_tbl[i - 1] + 1); | |
276 | + BBT_LOG("bb_tbl[0x%x] = 0x%x", i, bbt->bb_tbl[i]); | |
277 | + if (bbt->bb_tbl[i] == 0) | |
278 | + return -1; | |
279 | + } | |
280 | + | |
281 | + /* Physical Block start Address of BMT pool */ | |
282 | + bmtd.pool_pba = bbt->bb_tbl[i - 1] + 1; | |
283 | + if (bmtd.pool_pba >= bmtd.total_blks - 2) { | |
284 | + pr_info("nand: FATAL ERR: Too many bad blocks!!\n"); | |
285 | + return -1; | |
286 | + } | |
287 | + | |
288 | + BBT_LOG("pool_pba=0x%x", bmtd.pool_pba); | |
289 | + i = 0; | |
290 | + block = bmtd.pool_pba; | |
291 | + /* | |
292 | + * The bmt table is used for runtime bad block mapping | |
293 | + * G - Good block; B - Bad block | |
294 | + * --------------------------- | |
295 | + * physical |G|G|B|G|B|B|G|G|G|G|B|G|B| | |
296 | + * --------------------------- | |
297 | + * block: 0 1 2 3 4 5 6 7 8 9 a b c | |
298 | + * What bmt_tbl[i] looks like in initial state: | |
299 | + * i: | |
300 | + * 0 1 2 3 4 5 6 7 | |
301 | + * bmt_tbl[i].block: | |
302 | + * 0 1 3 6 7 8 9 b | |
303 | + * bmt_tbl[i].mapped: | |
304 | + * N N N N N N N B | |
305 | + * N - Not mapped(Available) | |
306 | + * M - Mapped | |
307 | + * B - BMT | |
308 | + * ATTENTION: | |
309 | + * BMT always in the last valid block in pool | |
310 | + */ | |
311 | + while ((block = find_valid_block(block)) != 0) { | |
312 | + bmt_tbl(bbt)[i].block = block; | |
313 | + bmt_tbl(bbt)[i].mapped = NO_MAPPED; | |
314 | + BBT_LOG("bmt_tbl[%d].block = 0x%x", i, block); | |
315 | + block++; | |
316 | + i++; | |
317 | + } | |
318 | + | |
319 | + /* i - How many available blocks in pool, which is the length of bmt_tbl[] | |
320 | + * bmtd.bmt_blk_idx - bmt_tbl[bmtd.bmt_blk_idx].block => the BMT block | |
321 | + */ | |
322 | + bmtd.bmt_blk_idx = i - 1; | |
323 | + bmt_tbl(bbt)[bmtd.bmt_blk_idx].mapped = BMT_MAPPED; | |
324 | + | |
325 | + if (i < 1) { | |
326 | + pr_info("nand: FATAL ERR: no space to store BMT!!\n"); | |
327 | + return -1; | |
328 | + } | |
329 | + | |
330 | + pr_info("[BBT] %d available blocks in BMT pool\n", i); | |
331 | + | |
332 | + return 0; | |
333 | +} | |
334 | + | |
335 | +static bool is_valid_bmt(unsigned char *buf, unsigned char *fdm) | |
336 | +{ | |
337 | + struct bbbt *bbt = (struct bbbt *)buf; | |
338 | + u8 *sig = (u8*)bbt->signature + MAIN_SIGNATURE_OFFSET; | |
339 | + | |
340 | + | |
341 | + if (memcmp(bbt->signature + MAIN_SIGNATURE_OFFSET, "BMT", 3) == 0 && | |
342 | + memcmp(fdm + OOB_SIGNATURE_OFFSET, "bmt", 3) == 0) { | |
343 | + if (bbt->version == BBMT_VERSION) | |
344 | + return true; | |
345 | + } | |
346 | + BBT_LOG("[BBT] BMT Version not match,upgrage preloader and uboot please! sig=%02x%02x%02x, fdm=%02x%02x%02x", | |
347 | + sig[0], sig[1], sig[2], | |
348 | + fdm[1], fdm[2], fdm[3]); | |
349 | + return false; | |
350 | +} | |
351 | + | |
352 | +static u16 get_bmt_index(struct bbmt *bmt) | |
353 | +{ | |
354 | + int i = 0; | |
355 | + | |
356 | + while (bmt[i].block != BMT_TBL_DEF_VAL) { | |
357 | + if (bmt[i].mapped == BMT_MAPPED) | |
358 | + return i; | |
359 | + i++; | |
360 | + } | |
361 | + return 0; | |
362 | +} | |
363 | + | |
364 | +static struct bbbt *scan_bmt(u16 block) | |
365 | +{ | |
366 | + u8 fdm[4]; | |
367 | + | |
368 | + if (block < bmtd.pool_lba) | |
369 | + return NULL; | |
370 | + | |
371 | + if (read_bmt(block, nand_bbt_buf, fdm, sizeof(fdm))) | |
372 | + return scan_bmt(block - 1); | |
373 | + | |
374 | + if (is_valid_bmt(nand_bbt_buf, fdm)) { | |
375 | + bmtd.bmt_blk_idx = get_bmt_index(bmt_tbl((struct bbbt *)nand_bbt_buf)); | |
376 | + if (bmtd.bmt_blk_idx == 0) { | |
377 | + pr_info("[BBT] FATAL ERR: bmt block index is wrong!\n"); | |
378 | + return NULL; | |
379 | + } | |
380 | + pr_info("[BBT] BMT.v2 is found at 0x%x\n", block); | |
381 | + return (struct bbbt *)nand_bbt_buf; | |
382 | + } else | |
383 | + return scan_bmt(block - 1); | |
384 | +} | |
385 | + | |
386 | +/* Write the Burner Bad Block Table to Nand Flash | |
387 | + * n - write BMT to bmt_tbl[n] | |
388 | + */ | |
389 | +static u16 upload_bmt(struct bbbt *bbt, int n) | |
390 | +{ | |
391 | + u16 block; | |
392 | + | |
393 | +retry: | |
394 | + if (n < 0 || bmt_tbl(bbt)[n].mapped == NORMAL_MAPPED) { | |
395 | + pr_info("nand: FATAL ERR: no space to store BMT!\n"); | |
396 | + return (u16)-1; | |
397 | + } | |
398 | + | |
399 | + block = bmt_tbl(bbt)[n].block; | |
400 | + BBT_LOG("n = 0x%x, block = 0x%x", n, block); | |
401 | + if (bbt_nand_erase(block)) { | |
402 | + bmt_tbl(bbt)[n].block = 0; | |
403 | + /* erase failed, try the previous block: bmt_tbl[n - 1].block */ | |
404 | + n--; | |
405 | + goto retry; | |
406 | + } | |
407 | + | |
408 | + /* The signature offset is fixed set to 0, | |
409 | + * oob signature offset is fixed set to 1 | |
410 | + */ | |
411 | + memcpy(bbt->signature + MAIN_SIGNATURE_OFFSET, "BMT", 3); | |
412 | + bbt->version = BBMT_VERSION; | |
413 | + | |
414 | + if (write_bmt(block, (unsigned char *)bbt)) { | |
415 | + bmt_tbl(bbt)[n].block = 0; | |
416 | + | |
417 | + /* write failed, try the previous block in bmt_tbl[n - 1] */ | |
418 | + n--; | |
419 | + goto retry; | |
420 | + } | |
421 | + | |
422 | + /* Return the current index(n) of BMT pool (bmt_tbl[n]) */ | |
423 | + return n; | |
424 | +} | |
425 | + | |
426 | +static u16 find_valid_block_in_pool(struct bbbt *bbt) | |
427 | +{ | |
428 | + int i; | |
429 | + | |
430 | + if (bmtd.bmt_blk_idx == 0) | |
431 | + goto error; | |
432 | + | |
433 | + for (i = 0; i < bmtd.bmt_blk_idx; i++) { | |
434 | + if (bmt_tbl(bbt)[i].block != 0 && bmt_tbl(bbt)[i].mapped == NO_MAPPED) { | |
435 | + bmt_tbl(bbt)[i].mapped = NORMAL_MAPPED; | |
436 | + return bmt_tbl(bbt)[i].block; | |
437 | + } | |
438 | + } | |
439 | + | |
440 | +error: | |
441 | + pr_info("nand: FATAL ERR: BMT pool is run out!\n"); | |
442 | + return 0; | |
443 | +} | |
444 | + | |
445 | +/* We met a bad block, mark it as bad and map it to a valid block in pool, | |
446 | + * if it's a write failure, we need to write the data to mapped block | |
447 | + */ | |
448 | +static bool remap_block_v2(u16 block, u16 mapped_block, int copy_len) | |
449 | +{ | |
450 | + u16 mapped_blk; | |
451 | + struct bbbt *bbt; | |
452 | + | |
453 | + bbt = bmtd.bbt; | |
454 | + mapped_blk = find_valid_block_in_pool(bbt); | |
455 | + if (mapped_blk == 0) | |
456 | + return false; | |
457 | + | |
458 | + /* Map new bad block to available block in pool */ | |
459 | + bbt->bb_tbl[block] = mapped_blk; | |
460 | + | |
461 | + /* Erase new block */ | |
462 | + bbt_nand_erase(mapped_blk); | |
463 | + if (copy_len > 0) | |
464 | + bbt_nand_copy(mapped_blk, block, copy_len); | |
465 | + | |
466 | + bmtd.bmt_blk_idx = upload_bmt(bbt, bmtd.bmt_blk_idx); | |
467 | + | |
468 | + return true; | |
469 | +} | |
470 | + | |
471 | +static bool | |
472 | +mapping_block_in_range(int block, int *start, int *end) | |
473 | +{ | |
474 | + const __be32 *cur = bmtd.remap_range; | |
475 | + u32 addr = block << bmtd.blk_shift; | |
476 | + int i; | |
477 | + | |
478 | + if (!cur || !bmtd.remap_range_len) { | |
479 | + *start = 0; | |
480 | + *end = bmtd.total_blks; | |
481 | + return true; | |
482 | + } | |
483 | + | |
484 | + for (i = 0; i < bmtd.remap_range_len; i++, cur += 2) { | |
485 | + if (addr < be32_to_cpu(cur[0]) || addr >= be32_to_cpu(cur[1])) | |
486 | + continue; | |
487 | + | |
488 | + *start = be32_to_cpu(cur[0]); | |
489 | + *end = be32_to_cpu(cur[1]); | |
490 | + return true; | |
491 | + } | |
492 | + | |
493 | + return false; | |
494 | +} | |
495 | + | |
496 | +static u16 | |
497 | +get_mapping_block_index_v2(int block) | |
498 | +{ | |
499 | + int start, end; | |
500 | + | |
501 | + if (block >= bmtd.pool_lba) | |
502 | + return block; | |
503 | + | |
504 | + if (!mapping_block_in_range(block, &start, &end)) | |
505 | + return block; | |
506 | + | |
507 | + return bmtd.bbt->bb_tbl[block]; | |
508 | +} | |
509 | + | |
510 | +static int | |
511 | +mtk_bmt_read(struct mtd_info *mtd, loff_t from, | |
512 | + struct mtd_oob_ops *ops) | |
513 | +{ | |
514 | + struct mtd_oob_ops cur_ops = *ops; | |
515 | + int retry_count = 0; | |
516 | + loff_t cur_from; | |
517 | + int ret = 0; | |
518 | + int max_bitflips = 0; | |
519 | + int start, end; | |
520 | + | |
521 | + ops->retlen = 0; | |
522 | + ops->oobretlen = 0; | |
523 | + | |
524 | + while (ops->retlen < ops->len || ops->oobretlen < ops->ooblen) { | |
525 | + int cur_ret; | |
526 | + | |
527 | + u32 offset = from & (bmtd.blk_size - 1); | |
528 | + u32 block = from >> bmtd.blk_shift; | |
529 | + u32 cur_block; | |
530 | + | |
531 | + cur_block = bmtd.ops->get_mapping_block(block); | |
532 | + cur_from = ((loff_t)cur_block << bmtd.blk_shift) + offset; | |
533 | + | |
534 | + cur_ops.oobretlen = 0; | |
535 | + cur_ops.retlen = 0; | |
536 | + cur_ops.len = min_t(u32, mtd->erasesize - offset, | |
537 | + ops->len - ops->retlen); | |
538 | + cur_ret = bmtd._read_oob(mtd, cur_from, &cur_ops); | |
539 | + if (cur_ret < 0) | |
540 | + ret = cur_ret; | |
541 | + else | |
542 | + max_bitflips = max_t(int, max_bitflips, cur_ret); | |
543 | + if (cur_ret < 0 && !mtd_is_bitflip(cur_ret)) { | |
544 | + bmtd.ops->remap_block(block, cur_block, mtd->erasesize); | |
545 | + if (retry_count++ < 10) | |
546 | + continue; | |
547 | + | |
548 | + goto out; | |
549 | + } | |
550 | + | |
551 | + if (cur_ret >= mtd->bitflip_threshold && | |
552 | + mapping_block_in_range(block, &start, &end)) | |
553 | + bmtd.ops->remap_block(block, cur_block, mtd->erasesize); | |
554 | + | |
555 | + ops->retlen += cur_ops.retlen; | |
556 | + ops->oobretlen += cur_ops.oobretlen; | |
557 | + | |
558 | + cur_ops.ooboffs = 0; | |
559 | + cur_ops.datbuf += cur_ops.retlen; | |
560 | + cur_ops.oobbuf += cur_ops.oobretlen; | |
561 | + cur_ops.ooblen -= cur_ops.oobretlen; | |
562 | + | |
563 | + if (!cur_ops.len) | |
564 | + cur_ops.len = mtd->erasesize - offset; | |
565 | + | |
566 | + from += cur_ops.len; | |
567 | + retry_count = 0; | |
568 | + } | |
569 | + | |
570 | +out: | |
571 | + if (ret < 0) | |
572 | + return ret; | |
573 | + | |
574 | + return max_bitflips; | |
575 | +} | |
576 | + | |
577 | +static int | |
578 | +mtk_bmt_write(struct mtd_info *mtd, loff_t to, | |
579 | + struct mtd_oob_ops *ops) | |
580 | +{ | |
581 | + struct mtd_oob_ops cur_ops = *ops; | |
582 | + int retry_count = 0; | |
583 | + loff_t cur_to; | |
584 | + int ret; | |
585 | + | |
586 | + ops->retlen = 0; | |
587 | + ops->oobretlen = 0; | |
588 | + | |
589 | + while (ops->retlen < ops->len || ops->oobretlen < ops->ooblen) { | |
590 | + u32 offset = to & (bmtd.blk_size - 1); | |
591 | + u32 block = to >> bmtd.blk_shift; | |
592 | + u32 cur_block; | |
593 | + | |
594 | + cur_block = bmtd.ops->get_mapping_block(block); | |
595 | + cur_to = ((loff_t)cur_block << bmtd.blk_shift) + offset; | |
596 | + | |
597 | + cur_ops.oobretlen = 0; | |
598 | + cur_ops.retlen = 0; | |
599 | + cur_ops.len = min_t(u32, bmtd.blk_size - offset, | |
600 | + ops->len - ops->retlen); | |
601 | + ret = bmtd._write_oob(mtd, cur_to, &cur_ops); | |
602 | + if (ret < 0) { | |
603 | + bmtd.ops->remap_block(block, cur_block, offset); | |
604 | + if (retry_count++ < 10) | |
605 | + continue; | |
606 | + | |
607 | + return ret; | |
608 | + } | |
609 | + | |
610 | + ops->retlen += cur_ops.retlen; | |
611 | + ops->oobretlen += cur_ops.oobretlen; | |
612 | + | |
613 | + cur_ops.ooboffs = 0; | |
614 | + cur_ops.datbuf += cur_ops.retlen; | |
615 | + cur_ops.oobbuf += cur_ops.oobretlen; | |
616 | + cur_ops.ooblen -= cur_ops.oobretlen; | |
617 | + | |
618 | + if (!cur_ops.len) | |
619 | + cur_ops.len = mtd->erasesize - offset; | |
620 | + | |
621 | + to += cur_ops.len; | |
622 | + retry_count = 0; | |
623 | + } | |
624 | + | |
625 | + return 0; | |
626 | +} | |
627 | + | |
628 | +static int | |
629 | +mtk_bmt_mtd_erase(struct mtd_info *mtd, struct erase_info *instr) | |
630 | +{ | |
631 | + struct erase_info mapped_instr = { | |
632 | + .len = bmtd.blk_size, | |
633 | + }; | |
634 | + int retry_count = 0; | |
635 | + u64 start_addr, end_addr; | |
636 | + int ret; | |
637 | + u16 orig_block, block; | |
638 | + | |
639 | + start_addr = instr->addr & (~mtd->erasesize_mask); | |
640 | + end_addr = instr->addr + instr->len; | |
641 | + | |
642 | + while (start_addr < end_addr) { | |
643 | + orig_block = start_addr >> bmtd.blk_shift; | |
644 | + block = bmtd.ops->get_mapping_block(orig_block); | |
645 | + mapped_instr.addr = (loff_t)block << bmtd.blk_shift; | |
646 | + ret = bmtd._erase(mtd, &mapped_instr); | |
647 | + if (ret) { | |
648 | + bmtd.ops->remap_block(orig_block, block, 0); | |
649 | + if (retry_count++ < 10) | |
650 | + continue; | |
651 | + instr->fail_addr = start_addr; | |
652 | + break; | |
653 | + } | |
654 | + start_addr += mtd->erasesize; | |
655 | + retry_count = 0; | |
656 | + } | |
657 | + | |
658 | + return ret; | |
659 | +} | |
660 | +static int | |
661 | +mtk_bmt_block_isbad(struct mtd_info *mtd, loff_t ofs) | |
662 | +{ | |
663 | + int retry_count = 0; | |
664 | + u16 orig_block = ofs >> bmtd.blk_shift; | |
665 | + u16 block; | |
666 | + int ret; | |
667 | + | |
668 | +retry: | |
669 | + block = bmtd.ops->get_mapping_block(orig_block); | |
670 | + ret = bmtd._block_isbad(mtd, (loff_t)block << bmtd.blk_shift); | |
671 | + if (ret) { | |
672 | + bmtd.ops->remap_block(orig_block, block, bmtd.blk_size); | |
673 | + if (retry_count++ < 10) | |
674 | + goto retry; | |
675 | + } | |
676 | + return ret; | |
677 | +} | |
678 | + | |
679 | +static int | |
680 | +mtk_bmt_block_markbad(struct mtd_info *mtd, loff_t ofs) | |
681 | +{ | |
682 | + u16 orig_block = ofs >> bmtd.blk_shift; | |
683 | + u16 block = bmtd.ops->get_mapping_block(orig_block); | |
684 | + | |
685 | + bmtd.ops->remap_block(orig_block, block, bmtd.blk_size); | |
686 | + | |
687 | + return bmtd._block_markbad(mtd, (loff_t)block << bmtd.blk_shift); | |
688 | +} | |
689 | + | |
690 | +static void | |
691 | +mtk_bmt_replace_ops(struct mtd_info *mtd) | |
692 | +{ | |
693 | + bmtd._read_oob = mtd->_read_oob; | |
694 | + bmtd._write_oob = mtd->_write_oob; | |
695 | + bmtd._erase = mtd->_erase; | |
696 | + bmtd._block_isbad = mtd->_block_isbad; | |
697 | + bmtd._block_markbad = mtd->_block_markbad; | |
698 | + | |
699 | + mtd->_read_oob = mtk_bmt_read; | |
700 | + mtd->_write_oob = mtk_bmt_write; | |
701 | + mtd->_erase = mtk_bmt_mtd_erase; | |
702 | + mtd->_block_isbad = mtk_bmt_block_isbad; | |
703 | + mtd->_block_markbad = mtk_bmt_block_markbad; | |
704 | +} | |
705 | + | |
706 | +static void | |
707 | +unmap_block_v2(u16 block) | |
708 | +{ | |
709 | + bmtd.bbt->bb_tbl[block] = block; | |
710 | + bmtd.bmt_blk_idx = upload_bmt(bmtd.bbt, bmtd.bmt_blk_idx); | |
711 | +} | |
712 | + | |
713 | +static int mtk_bmt_debug_mark_good(void *data, u64 val) | |
714 | +{ | |
715 | + bmtd.ops->unmap_block(val >> bmtd.blk_shift); | |
716 | + | |
717 | + return 0; | |
718 | +} | |
719 | + | |
720 | +static int mtk_bmt_debug_mark_bad(void *data, u64 val) | |
721 | +{ | |
722 | + u32 block = val >> bmtd.blk_shift; | |
723 | + u16 cur_block = bmtd.ops->get_mapping_block(block); | |
724 | + | |
725 | + bmtd.ops->remap_block(block, cur_block, bmtd.blk_size); | |
726 | + | |
727 | + return 0; | |
728 | +} | |
729 | + | |
730 | +static unsigned long * | |
731 | +mtk_bmt_get_mapping_mask(void) | |
732 | +{ | |
733 | + struct bbmt *bbmt = bmt_tbl(bmtd.bbt); | |
734 | + int main_blocks = bmtd.mtd->size >> bmtd.blk_shift; | |
735 | + unsigned long *used; | |
736 | + int i, k; | |
737 | + | |
738 | + used = kcalloc(sizeof(unsigned long), BIT_WORD(bmtd.bmt_blk_idx) + 1, GFP_KERNEL); | |
739 | + if (!used) | |
740 | + return NULL; | |
741 | + | |
742 | + for (i = 1; i < main_blocks; i++) { | |
743 | + if (bmtd.bbt->bb_tbl[i] == i) | |
744 | + continue; | |
745 | + | |
746 | + for (k = 0; k < bmtd.bmt_blk_idx; k++) { | |
747 | + if (bmtd.bbt->bb_tbl[i] != bbmt[k].block) | |
748 | + continue; | |
749 | + | |
750 | + set_bit(k, used); | |
751 | + break; | |
752 | + } | |
753 | + } | |
754 | + | |
755 | + return used; | |
756 | +} | |
757 | + | |
758 | +static int mtk_bmt_debug_v2(void *data, u64 val) | |
759 | +{ | |
760 | + struct bbmt *bbmt = bmt_tbl(bmtd.bbt); | |
761 | + struct mtd_info *mtd = bmtd.mtd; | |
762 | + unsigned long *used; | |
763 | + int main_blocks = mtd->size >> bmtd.blk_shift; | |
764 | + int n_remap = 0; | |
765 | + int i; | |
766 | + | |
767 | + used = mtk_bmt_get_mapping_mask(); | |
768 | + if (!used) | |
769 | + return -ENOMEM; | |
770 | + | |
771 | + switch (val) { | |
772 | + case 0: | |
773 | + for (i = 1; i < main_blocks; i++) { | |
774 | + if (bmtd.bbt->bb_tbl[i] == i) | |
775 | + continue; | |
776 | + | |
777 | + printk("remap [%x->%x]\n", i, bmtd.bbt->bb_tbl[i]); | |
778 | + n_remap++; | |
779 | + } | |
780 | + for (i = 0; i <= bmtd.bmt_blk_idx; i++) { | |
781 | + char c; | |
782 | + | |
783 | + switch (bbmt[i].mapped) { | |
784 | + case NO_MAPPED: | |
785 | + continue; | |
786 | + case NORMAL_MAPPED: | |
787 | + c = 'm'; | |
788 | + if (test_bit(i, used)) | |
789 | + c = 'M'; | |
790 | + break; | |
791 | + case BMT_MAPPED: | |
792 | + c = 'B'; | |
793 | + break; | |
794 | + default: | |
795 | + c = 'X'; | |
796 | + break; | |
797 | + } | |
798 | + printk("[%x:%c] = 0x%x\n", i, c, bbmt[i].block); | |
799 | + } | |
800 | + break; | |
801 | + case 100: | |
802 | + for (i = 0; i <= bmtd.bmt_blk_idx; i++) { | |
803 | + if (bbmt[i].mapped != NORMAL_MAPPED) | |
804 | + continue; | |
805 | + | |
806 | + if (test_bit(i, used)) | |
807 | + continue; | |
808 | + | |
809 | + n_remap++; | |
810 | + bbmt[i].mapped = NO_MAPPED; | |
811 | + printk("free block [%d:%x]\n", i, bbmt[i].block); | |
812 | + } | |
813 | + if (n_remap) | |
814 | + bmtd.bmt_blk_idx = upload_bmt(bmtd.bbt, bmtd.bmt_blk_idx); | |
815 | + break; | |
816 | + } | |
817 | + | |
818 | + kfree(used); | |
819 | + | |
820 | + return 0; | |
821 | +} | |
822 | + | |
823 | +static int mtk_bmt_debug(void *data, u64 val) | |
824 | +{ | |
825 | + return bmtd.ops->debug(data, val); | |
826 | +} | |
827 | + | |
828 | + | |
829 | +DEFINE_DEBUGFS_ATTRIBUTE(fops_mark_good, NULL, mtk_bmt_debug_mark_good, "%llu\n"); | |
830 | +DEFINE_DEBUGFS_ATTRIBUTE(fops_mark_bad, NULL, mtk_bmt_debug_mark_bad, "%llu\n"); | |
831 | +DEFINE_DEBUGFS_ATTRIBUTE(fops_debug, NULL, mtk_bmt_debug, "%llu\n"); | |
832 | + | |
833 | +static void | |
834 | +mtk_bmt_add_debugfs(void) | |
835 | +{ | |
836 | + struct dentry *dir; | |
837 | + | |
838 | + dir = bmtd.debugfs_dir = debugfs_create_dir("mtk-bmt", NULL); | |
839 | + if (!dir) | |
840 | + return; | |
841 | + | |
842 | + debugfs_create_file_unsafe("mark_good", S_IWUSR, dir, NULL, &fops_mark_good); | |
843 | + debugfs_create_file_unsafe("mark_bad", S_IWUSR, dir, NULL, &fops_mark_bad); | |
844 | + debugfs_create_file_unsafe("debug", S_IWUSR, dir, NULL, &fops_debug); | |
845 | +} | |
846 | + | |
847 | +void mtk_bmt_detach(struct mtd_info *mtd) | |
848 | +{ | |
849 | + if (bmtd.mtd != mtd) | |
850 | + return; | |
851 | + | |
852 | + if (bmtd.debugfs_dir) | |
853 | + debugfs_remove_recursive(bmtd.debugfs_dir); | |
854 | + bmtd.debugfs_dir = NULL; | |
855 | + | |
856 | + kfree(nand_bbt_buf); | |
857 | + kfree(nand_data_buf); | |
858 | + | |
859 | + mtd->_read_oob = bmtd._read_oob; | |
860 | + mtd->_write_oob = bmtd._write_oob; | |
861 | + mtd->_erase = bmtd._erase; | |
862 | + mtd->_block_isbad = bmtd._block_isbad; | |
863 | + mtd->_block_markbad = bmtd._block_markbad; | |
864 | + mtd->size = bmtd.total_blks << bmtd.blk_shift; | |
865 | + | |
866 | + memset(&bmtd, 0, sizeof(bmtd)); | |
867 | +} | |
868 | + | |
869 | +static int mtk_bmt_init_v2(struct device_node *np) | |
870 | +{ | |
871 | + u32 bmt_pool_size, bmt_table_size; | |
872 | + u32 bufsz, block; | |
873 | + u16 pmt_block; | |
874 | + | |
875 | + if (of_property_read_u32(np, "mediatek,bmt-pool-size", | |
876 | + &bmt_pool_size) != 0) | |
877 | + bmt_pool_size = 80; | |
878 | + | |
879 | + if (of_property_read_u8(np, "mediatek,bmt-oob-offset", | |
880 | + &bmtd.oob_offset) != 0) | |
881 | + bmtd.oob_offset = 0; | |
882 | + | |
883 | + if (of_property_read_u32(np, "mediatek,bmt-table-size", | |
884 | + &bmt_table_size) != 0) | |
885 | + bmt_table_size = 0x2000U; | |
886 | + | |
887 | + bmtd.table_size = bmt_table_size; | |
888 | + | |
889 | + pmt_block = bmtd.total_blks - bmt_pool_size - 2; | |
890 | + | |
891 | + bmtd.mtd->size = pmt_block << bmtd.blk_shift; | |
892 | + | |
893 | + /* | |
894 | + * --------------------------------------- | |
895 | + * | PMT(2blks) | BMT POOL(totalblks * 2%) | | |
896 | + * --------------------------------------- | |
897 | + * ^ ^ | |
898 | + * | | | |
899 | + * pmt_block pmt_block + 2blocks(pool_lba) | |
900 | + * | |
901 | + * ATTETION!!!!!! | |
902 | + * The blocks ahead of the boundary block are stored in bb_tbl | |
903 | + * and blocks behind are stored in bmt_tbl | |
904 | + */ | |
905 | + | |
906 | + bmtd.pool_lba = (u16)(pmt_block + 2); | |
907 | + bmtd.bb_max = bmtd.total_blks * BBPOOL_RATIO / 100; | |
908 | + | |
909 | + bufsz = round_up(sizeof(struct bbbt) + | |
910 | + bmt_table_size * sizeof(struct bbmt), bmtd.pg_size); | |
911 | + bmtd.bmt_pgs = bufsz >> bmtd.pg_shift; | |
912 | + | |
913 | + nand_bbt_buf = kzalloc(bufsz, GFP_KERNEL); | |
914 | + if (!nand_bbt_buf) | |
915 | + return -ENOMEM; | |
916 | + | |
917 | + memset(nand_bbt_buf, 0xff, bufsz); | |
918 | + | |
919 | + /* Scanning start from the first page of the last block | |
920 | + * of whole flash | |
921 | + */ | |
922 | + bmtd.bbt = scan_bmt(bmtd.total_blks - 1); | |
923 | + if (!bmtd.bbt) { | |
924 | + /* BMT not found */ | |
925 | + if (bmtd.total_blks > BB_TABLE_MAX + BMT_TABLE_MAX) { | |
926 | + pr_info("nand: FATAL: Too many blocks, can not support!\n"); | |
927 | + return -1; | |
928 | + } | |
929 | + | |
930 | + bmtd.bbt = (struct bbbt *)nand_bbt_buf; | |
931 | + memset(bmt_tbl(bmtd.bbt), BMT_TBL_DEF_VAL, | |
932 | + bmtd.table_size * sizeof(struct bbmt)); | |
933 | + | |
934 | + if (scan_bad_blocks(bmtd.bbt)) | |
935 | + return -1; | |
936 | + | |
937 | + /* BMT always in the last valid block in pool */ | |
938 | + bmtd.bmt_blk_idx = upload_bmt(bmtd.bbt, bmtd.bmt_blk_idx); | |
939 | + block = bmt_tbl(bmtd.bbt)[bmtd.bmt_blk_idx].block; | |
940 | + pr_notice("[BBT] BMT.v2 is written into PBA:0x%x\n", block); | |
941 | + | |
942 | + if (bmtd.bmt_blk_idx == 0) | |
943 | + pr_info("nand: Warning: no available block in BMT pool!\n"); | |
944 | + else if (bmtd.bmt_blk_idx == (u16)-1) | |
945 | + return -1; | |
946 | + } | |
947 | + | |
948 | + return 0; | |
949 | +} | |
950 | + | |
951 | +static bool | |
952 | +bbt_block_is_bad(u16 block) | |
953 | +{ | |
954 | + u8 cur = nand_bbt_buf[block / 4]; | |
955 | + | |
956 | + return cur & (3 << ((block % 4) * 2)); | |
957 | +} | |
958 | + | |
959 | +static void | |
960 | +bbt_set_block_state(u16 block, bool bad) | |
961 | +{ | |
962 | + u8 mask = (3 << ((block % 4) * 2)); | |
963 | + | |
964 | + if (bad) | |
965 | + nand_bbt_buf[block / 4] |= mask; | |
966 | + else | |
967 | + nand_bbt_buf[block / 4] &= ~mask; | |
968 | + | |
969 | + bbt_nand_erase(bmtd.bmt_blk_idx); | |
970 | + write_bmt(bmtd.bmt_blk_idx, nand_bbt_buf); | |
971 | +} | |
972 | + | |
973 | +static u16 | |
974 | +get_mapping_block_index_bbt(int block) | |
975 | +{ | |
976 | + int start, end, ofs; | |
977 | + int bad_blocks = 0; | |
978 | + int i; | |
979 | + | |
980 | + if (!mapping_block_in_range(block, &start, &end)) | |
981 | + return block; | |
982 | + | |
983 | + start >>= bmtd.blk_shift; | |
984 | + end >>= bmtd.blk_shift; | |
985 | + /* skip bad blocks within the mapping range */ | |
986 | + ofs = block - start; | |
987 | + for (i = start; i < end; i++) { | |
988 | + if (bbt_block_is_bad(i)) | |
989 | + bad_blocks++; | |
990 | + else if (ofs) | |
991 | + ofs--; | |
992 | + else | |
993 | + break; | |
994 | + } | |
995 | + | |
996 | + if (i < end) | |
997 | + return i; | |
998 | + | |
999 | + /* when overflowing, remap remaining blocks to bad ones */ | |
1000 | + for (i = end - 1; bad_blocks > 0; i--) { | |
1001 | + if (!bbt_block_is_bad(i)) | |
1002 | + continue; | |
1003 | + | |
1004 | + bad_blocks--; | |
1005 | + if (bad_blocks <= ofs) | |
1006 | + return i; | |
1007 | + } | |
1008 | + | |
1009 | + return block; | |
1010 | +} | |
1011 | + | |
1012 | +static bool remap_block_bbt(u16 block, u16 mapped_blk, int copy_len) | |
1013 | +{ | |
1014 | + int start, end; | |
1015 | + u16 new_blk; | |
1016 | + | |
1017 | + if (!mapping_block_in_range(block, &start, &end)) | |
1018 | + return false; | |
1019 | + | |
1020 | + bbt_set_block_state(mapped_blk, true); | |
1021 | + | |
1022 | + new_blk = get_mapping_block_index_bbt(block); | |
1023 | + bbt_nand_erase(new_blk); | |
1024 | + if (copy_len > 0) | |
1025 | + bbt_nand_copy(new_blk, mapped_blk, copy_len); | |
1026 | + | |
1027 | + return false; | |
1028 | +} | |
1029 | + | |
1030 | +static void | |
1031 | +unmap_block_bbt(u16 block) | |
1032 | +{ | |
1033 | + bbt_set_block_state(block, false); | |
1034 | +} | |
1035 | + | |
1036 | +static int | |
1037 | +mtk_bmt_read_bbt(void) | |
1038 | +{ | |
1039 | + u8 oob_buf[8]; | |
1040 | + int i; | |
1041 | + | |
1042 | + for (i = bmtd.total_blks - 1; i >= bmtd.total_blks - 5; i--) { | |
1043 | + u32 page = i << (bmtd.blk_shift - bmtd.pg_shift); | |
1044 | + | |
1045 | + if (bbt_nand_read(page, nand_bbt_buf, bmtd.pg_size, | |
1046 | + oob_buf, sizeof(oob_buf))) { | |
1047 | + pr_info("read_bbt: could not read block %d\n", i); | |
1048 | + continue; | |
1049 | + } | |
1050 | + | |
1051 | + if (oob_buf[0] != 0xff) { | |
1052 | + pr_info("read_bbt: bad block at %d\n", i); | |
1053 | + continue; | |
1054 | + } | |
1055 | + | |
1056 | + if (memcmp(&oob_buf[1], "mtknand", 7) != 0) { | |
1057 | + pr_info("read_bbt: signature mismatch in block %d\n", i); | |
1058 | + print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 1, oob_buf, 8, 1); | |
1059 | + continue; | |
1060 | + } | |
1061 | + | |
1062 | + pr_info("read_bbt: found bbt at block %d\n", i); | |
1063 | + bmtd.bmt_blk_idx = i; | |
1064 | + return 0; | |
1065 | + } | |
1066 | + | |
1067 | + return -EIO; | |
1068 | +} | |
1069 | + | |
1070 | + | |
1071 | +static int | |
1072 | +mtk_bmt_init_bbt(struct device_node *np) | |
1073 | +{ | |
1074 | + int buf_size = round_up(bmtd.total_blks >> 2, bmtd.blk_size); | |
1075 | + int ret; | |
1076 | + | |
1077 | + nand_bbt_buf = kmalloc(buf_size, GFP_KERNEL); | |
1078 | + if (!nand_bbt_buf) | |
1079 | + return -ENOMEM; | |
1080 | + | |
1081 | + memset(nand_bbt_buf, 0xff, buf_size); | |
1082 | + bmtd.mtd->size -= 4 * bmtd.mtd->erasesize; | |
1083 | + | |
1084 | + ret = mtk_bmt_read_bbt(); | |
1085 | + if (ret) | |
1086 | + return ret; | |
1087 | + | |
1088 | + bmtd.bmt_pgs = buf_size / bmtd.pg_size; | |
1089 | + | |
1090 | + return 0; | |
1091 | +} | |
1092 | + | |
1093 | +static int mtk_bmt_debug_bbt(void *data, u64 val) | |
1094 | +{ | |
1095 | + char buf[5]; | |
1096 | + int i, k; | |
1097 | + | |
1098 | + switch (val) { | |
1099 | + case 0: | |
1100 | + for (i = 0; i < bmtd.total_blks; i += 4) { | |
1101 | + u8 cur = nand_bbt_buf[i / 4]; | |
1102 | + | |
1103 | + for (k = 0; k < 4; k++, cur >>= 2) | |
1104 | + buf[k] = (cur & 3) ? 'B' : '.'; | |
1105 | + | |
1106 | + buf[4] = 0; | |
1107 | + printk("[%06x] %s\n", i * bmtd.blk_size, buf); | |
1108 | + } | |
1109 | + break; | |
1110 | + case 100: | |
1111 | +#if 0 | |
1112 | + for (i = bmtd.bmt_blk_idx; i < bmtd.total_blks - 1; i++) | |
1113 | + bbt_nand_erase(bmtd.bmt_blk_idx); | |
1114 | +#endif | |
1115 | + | |
1116 | + bmtd.bmt_blk_idx = bmtd.total_blks - 1; | |
1117 | + bbt_nand_erase(bmtd.bmt_blk_idx); | |
1118 | + write_bmt(bmtd.bmt_blk_idx, nand_bbt_buf); | |
1119 | + break; | |
1120 | + default: | |
1121 | + break; | |
1122 | + } | |
1123 | + return 0; | |
1124 | +} | |
1125 | + | |
1126 | +int mtk_bmt_attach(struct mtd_info *mtd) | |
1127 | +{ | |
1128 | + static const struct mtk_bmt_ops v2_ops = { | |
1129 | + .sig = "bmt", | |
1130 | + .sig_len = 3, | |
1131 | + .init = mtk_bmt_init_v2, | |
1132 | + .remap_block = remap_block_v2, | |
1133 | + .unmap_block = unmap_block_v2, | |
1134 | + .get_mapping_block = get_mapping_block_index_v2, | |
1135 | + .debug = mtk_bmt_debug_v2, | |
1136 | + }; | |
1137 | + static const struct mtk_bmt_ops bbt_ops = { | |
1138 | + .sig = "mtknand", | |
1139 | + .sig_len = 7, | |
1140 | + .init = mtk_bmt_init_bbt, | |
1141 | + .remap_block = remap_block_bbt, | |
1142 | + .unmap_block = unmap_block_bbt, | |
1143 | + .get_mapping_block = get_mapping_block_index_bbt, | |
1144 | + .debug = mtk_bmt_debug_bbt, | |
1145 | + }; | |
1146 | + struct device_node *np; | |
1147 | + int ret = 0; | |
1148 | + | |
1149 | + if (bmtd.mtd) | |
1150 | + return -ENOSPC; | |
1151 | + | |
1152 | + np = mtd_get_of_node(mtd); | |
1153 | + if (!np) | |
1154 | + return 0; | |
1155 | + | |
1156 | + if (of_property_read_bool(np, "mediatek,bmt-v2")) | |
1157 | + bmtd.ops = &v2_ops; | |
1158 | + else if (of_property_read_bool(np, "mediatek,bbt")) | |
1159 | + bmtd.ops = &bbt_ops; | |
1160 | + else | |
1161 | + return 0; | |
1162 | + | |
1163 | + bmtd.remap_range = of_get_property(np, "mediatek,bmt-remap-range", | |
1164 | + &bmtd.remap_range_len); | |
1165 | + bmtd.remap_range_len /= 8; | |
1166 | + | |
1167 | + bmtd.mtd = mtd; | |
1168 | + mtk_bmt_replace_ops(mtd); | |
1169 | + | |
1170 | + bmtd.blk_size = mtd->erasesize; | |
1171 | + bmtd.blk_shift = ffs(bmtd.blk_size) - 1; | |
1172 | + bmtd.pg_size = mtd->writesize; | |
1173 | + bmtd.pg_shift = ffs(bmtd.pg_size) - 1; | |
1174 | + bmtd.total_blks = mtd->size >> bmtd.blk_shift; | |
1175 | + | |
1176 | + nand_data_buf = kzalloc(bmtd.pg_size, GFP_KERNEL); | |
1177 | + if (!nand_data_buf) { | |
1178 | + pr_info("nand: FATAL ERR: allocate buffer failed!\n"); | |
1179 | + ret = -1; | |
1180 | + goto error; | |
1181 | + } | |
1182 | + | |
1183 | + memset(nand_data_buf, 0xff, bmtd.pg_size); | |
1184 | + | |
1185 | + ret = bmtd.ops->init(np); | |
1186 | + if (ret) | |
1187 | + goto error; | |
1188 | + | |
1189 | + mtk_bmt_add_debugfs(); | |
1190 | + return 0; | |
1191 | + | |
1192 | +error: | |
1193 | + mtk_bmt_detach(mtd); | |
1194 | + return ret; | |
1195 | +} | |
1196 | + | |
1197 | + | |
1198 | +MODULE_LICENSE("GPL"); | |
1199 | +MODULE_AUTHOR("Xiangsheng Hou <xiangsheng.hou@mediatek.com>, Felix Fietkau <nbd@nbd.name>"); | |
1200 | +MODULE_DESCRIPTION("Bad Block mapping management v2 for MediaTek NAND Flash Driver"); | |
1201 | + |
@@ -39,7 +39,7 @@ | ||
39 | 39 | |
40 | 40 | #include "routerboot.h" |
41 | 41 | |
42 | -#define RB_HARDCONFIG_VER "0.06" | |
42 | +#define RB_HARDCONFIG_VER "0.07" | |
43 | 43 | #define RB_HC_PR_PFX "[rb_hardconfig] " |
44 | 44 | |
45 | 45 | /* ID values for hardware settings */ |
@@ -676,10 +676,9 @@ static ssize_t hc_wlan_data_bin_read(struct file *filp, struct kobject *kobj, | ||
676 | 676 | return count; |
677 | 677 | } |
678 | 678 | |
679 | -int __init rb_hardconfig_init(struct kobject *rb_kobj) | |
679 | +int rb_hardconfig_init(struct kobject *rb_kobj, struct mtd_info *mtd) | |
680 | 680 | { |
681 | 681 | struct kobject *hc_wlan_kobj; |
682 | - struct mtd_info *mtd; | |
683 | 682 | size_t bytes_read, buflen, outlen; |
684 | 683 | const u8 *buf; |
685 | 684 | void *outbuf; |
@@ -690,20 +689,19 @@ int __init rb_hardconfig_init(struct kobject *rb_kobj) | ||
690 | 689 | hc_kobj = NULL; |
691 | 690 | hc_wlan_kobj = NULL; |
692 | 691 | |
693 | - // TODO allow override | |
694 | - mtd = get_mtd_device_nm(RB_MTD_HARD_CONFIG); | |
695 | - if (IS_ERR(mtd)) | |
692 | + ret = __get_mtd_device(mtd); | |
693 | + if (ret) | |
696 | 694 | return -ENODEV; |
697 | 695 | |
698 | 696 | hc_buflen = mtd->size; |
699 | 697 | hc_buf = kmalloc(hc_buflen, GFP_KERNEL); |
700 | 698 | if (!hc_buf) { |
701 | - put_mtd_device(mtd); | |
699 | + __put_mtd_device(mtd); | |
702 | 700 | return -ENOMEM; |
703 | 701 | } |
704 | 702 | |
705 | 703 | ret = mtd_read(mtd, 0, hc_buflen, &bytes_read, hc_buf); |
706 | - put_mtd_device(mtd); | |
704 | + __put_mtd_device(mtd); | |
707 | 705 | |
708 | 706 | if (ret) |
709 | 707 | goto fail; |
@@ -818,8 +816,10 @@ fail: | ||
818 | 816 | return ret; |
819 | 817 | } |
820 | 818 | |
821 | -void __exit rb_hardconfig_exit(void) | |
819 | +void rb_hardconfig_exit(void) | |
822 | 820 | { |
823 | 821 | kobject_put(hc_kobj); |
822 | + hc_kobj = NULL; | |
824 | 823 | kfree(hc_buf); |
824 | + hc_buf = NULL; | |
825 | 825 | } |
@@ -56,23 +56,12 @@ | ||
56 | 56 | |
57 | 57 | #include "routerboot.h" |
58 | 58 | |
59 | -#define RB_SOFTCONFIG_VER "0.03" | |
59 | +#define RB_SOFTCONFIG_VER "0.05" | |
60 | 60 | #define RB_SC_PR_PFX "[rb_softconfig] " |
61 | 61 | |
62 | -/* | |
63 | - * mtd operations before 4.17 are asynchronous, not handled by this code | |
64 | - * Also make the driver act read-only if 4K_SECTORS are not enabled, since they | |
65 | - * are require to handle partial erasing of the small soft_config partition. | |
66 | - */ | |
67 | -#if defined(CONFIG_MTD_SPI_NOR_USE_4K_SECTORS) | |
68 | - #define RB_SC_HAS_WRITE_SUPPORT true | |
69 | - #define RB_SC_WMODE S_IWUSR | |
70 | - #define RB_SC_RMODE S_IRUSR | |
71 | -#else | |
72 | - #define RB_SC_HAS_WRITE_SUPPORT false | |
73 | - #define RB_SC_WMODE 0 | |
74 | - #define RB_SC_RMODE S_IRUSR | |
75 | -#endif | |
62 | +#define RB_SC_HAS_WRITE_SUPPORT true | |
63 | +#define RB_SC_WMODE S_IWUSR | |
64 | +#define RB_SC_RMODE S_IRUSR | |
76 | 65 | |
77 | 66 | /* ID values for software settings */ |
78 | 67 | #define RB_SCID_UART_SPEED 0x01 // u32*1 |
@@ -705,9 +694,8 @@ mtdfail: | ||
705 | 694 | |
706 | 695 | static struct kobj_attribute sc_kattrcommit = __ATTR(commit, RB_SC_RMODE|RB_SC_WMODE, sc_commit_show, sc_commit_store); |
707 | 696 | |
708 | -int __init rb_softconfig_init(struct kobject *rb_kobj) | |
697 | +int rb_softconfig_init(struct kobject *rb_kobj, struct mtd_info *mtd) | |
709 | 698 | { |
710 | - struct mtd_info *mtd; | |
711 | 699 | size_t bytes_read, buflen; |
712 | 700 | const u8 *buf; |
713 | 701 | int i, ret; |
@@ -716,20 +704,19 @@ int __init rb_softconfig_init(struct kobject *rb_kobj) | ||
716 | 704 | sc_buf = NULL; |
717 | 705 | sc_kobj = NULL; |
718 | 706 | |
719 | - // TODO allow override | |
720 | - mtd = get_mtd_device_nm(RB_MTD_SOFT_CONFIG); | |
721 | - if (IS_ERR(mtd)) | |
707 | + ret = __get_mtd_device(mtd); | |
708 | + if (ret) | |
722 | 709 | return -ENODEV; |
723 | 710 | |
724 | 711 | sc_buflen = mtd->size; |
725 | 712 | sc_buf = kmalloc(sc_buflen, GFP_KERNEL); |
726 | 713 | if (!sc_buf) { |
727 | - put_mtd_device(mtd); | |
714 | + __put_mtd_device(mtd); | |
728 | 715 | return -ENOMEM; |
729 | 716 | } |
730 | 717 | |
731 | 718 | ret = mtd_read(mtd, 0, sc_buflen, &bytes_read, sc_buf); |
732 | - put_mtd_device(mtd); | |
719 | + __put_mtd_device(mtd); | |
733 | 720 | |
734 | 721 | if (ret) |
735 | 722 | goto fail; |
@@ -799,8 +786,10 @@ fail: | ||
799 | 786 | return ret; |
800 | 787 | } |
801 | 788 | |
802 | -void __exit rb_softconfig_exit(void) | |
789 | +void rb_softconfig_exit(void) | |
803 | 790 | { |
804 | 791 | kobject_put(sc_kobj); |
792 | + sc_kobj = NULL; | |
805 | 793 | kfree(sc_buf); |
794 | + sc_buf = NULL; | |
806 | 795 | } |
@@ -13,6 +13,7 @@ | ||
13 | 13 | #include <linux/module.h> |
14 | 14 | #include <linux/kernel.h> |
15 | 15 | #include <linux/sysfs.h> |
16 | +#include <linux/mtd/mtd.h> | |
16 | 17 | |
17 | 18 | #include "routerboot.h" |
18 | 19 |
@@ -160,25 +161,57 @@ fail: | ||
160 | 161 | return ret; |
161 | 162 | } |
162 | 163 | |
163 | -static int __init routerboot_init(void) | |
164 | +static void routerboot_mtd_notifier_add(struct mtd_info *mtd) | |
164 | 165 | { |
165 | - rb_kobj = kobject_create_and_add("mikrotik", firmware_kobj); | |
166 | - if (!rb_kobj) | |
167 | - return -ENOMEM; | |
166 | + /* Currently routerboot is only known to live on NOR flash */ | |
167 | + if (mtd->type != MTD_NORFLASH) | |
168 | + return; | |
168 | 169 | |
169 | 170 | /* |
170 | 171 | * We ignore the following return values and always register. |
171 | 172 | * These init() routines are designed so that their failed state is |
172 | 173 | * always manageable by the corresponding exit() calls. |
174 | + * Notifier is called with MTD mutex held: use __get/__put variants. | |
175 | + * TODO: allow partition names override | |
173 | 176 | */ |
174 | - rb_hardconfig_init(rb_kobj); | |
175 | - rb_softconfig_init(rb_kobj); | |
177 | + if (!strcmp(mtd->name, RB_MTD_HARD_CONFIG)) | |
178 | + rb_hardconfig_init(rb_kobj, mtd); | |
179 | + else if (!strcmp(mtd->name, RB_MTD_SOFT_CONFIG)) | |
180 | + rb_softconfig_init(rb_kobj, mtd); | |
181 | +} | |
182 | + | |
183 | +static void routerboot_mtd_notifier_remove(struct mtd_info *mtd) | |
184 | +{ | |
185 | + if (mtd->type != MTD_NORFLASH) | |
186 | + return; | |
187 | + | |
188 | + if (!strcmp(mtd->name, RB_MTD_HARD_CONFIG)) | |
189 | + rb_hardconfig_exit(); | |
190 | + else if (!strcmp(mtd->name, RB_MTD_SOFT_CONFIG)) | |
191 | + rb_softconfig_exit(); | |
192 | +} | |
193 | + | |
194 | +/* Note: using a notifier prevents qualifying init()/exit() functions with __init/__exit */ | |
195 | +static struct mtd_notifier routerboot_mtd_notifier = { | |
196 | + .add = routerboot_mtd_notifier_add, | |
197 | + .remove = routerboot_mtd_notifier_remove, | |
198 | +}; | |
199 | + | |
200 | +static int __init routerboot_init(void) | |
201 | +{ | |
202 | + rb_kobj = kobject_create_and_add("mikrotik", firmware_kobj); | |
203 | + if (!rb_kobj) | |
204 | + return -ENOMEM; | |
205 | + | |
206 | + register_mtd_user(&routerboot_mtd_notifier); | |
176 | 207 | |
177 | 208 | return 0; |
178 | 209 | } |
179 | 210 | |
180 | 211 | static void __exit routerboot_exit(void) |
181 | 212 | { |
213 | + unregister_mtd_user(&routerboot_mtd_notifier); | |
214 | + /* Exit routines are idempotent */ | |
182 | 215 | rb_softconfig_exit(); |
183 | 216 | rb_hardconfig_exit(); |
184 | 217 | kobject_put(rb_kobj); // recursive afaict |
@@ -25,11 +25,11 @@ | ||
25 | 25 | int routerboot_tag_find(const u8 *bufhead, const size_t buflen, const u16 tag_id, u16 *pld_ofs, u16 *pld_len); |
26 | 26 | int routerboot_rle_decode(const u8 *in, size_t inlen, u8 *out, size_t *outlen); |
27 | 27 | |
28 | -int __init rb_hardconfig_init(struct kobject *rb_kobj); | |
29 | -void __exit rb_hardconfig_exit(void); | |
28 | +int rb_hardconfig_init(struct kobject *rb_kobj, struct mtd_info *mtd); | |
29 | +void rb_hardconfig_exit(void); | |
30 | 30 | |
31 | -int __init rb_softconfig_init(struct kobject *rb_kobj); | |
32 | -void __exit rb_softconfig_exit(void); | |
31 | +int rb_softconfig_init(struct kobject *rb_kobj, struct mtd_info *mtd); | |
32 | +void rb_softconfig_exit(void); | |
33 | 33 | |
34 | 34 | ssize_t routerboot_tag_show_string(const u8 *pld, u16 pld_len, char *buf); |
35 | 35 | ssize_t routerboot_tag_show_u32s(const u8 *pld, u16 pld_len, char *buf); |
@@ -0,0 +1,18 @@ | ||
1 | +#ifndef __MTK_BMT_H | |
2 | +#define __MTK_BMT_H | |
3 | + | |
4 | +#ifdef CONFIG_MTD_NAND_MTK_BMT | |
5 | +int mtk_bmt_attach(struct mtd_info *mtd); | |
6 | +void mtk_bmt_detach(struct mtd_info *mtd); | |
7 | +#else | |
8 | +static inline int mtk_bmt_attach(struct mtd_info *mtd) | |
9 | +{ | |
10 | + return 0; | |
11 | +} | |
12 | + | |
13 | +static inline void mtk_bmt_detach(struct mtd_info *mtd) | |
14 | +{ | |
15 | +} | |
16 | +#endif | |
17 | + | |
18 | +#endif |
@@ -0,0 +1,23 @@ | ||
1 | +--- a/drivers/mtd/nand/Kconfig | |
2 | ++++ b/drivers/mtd/nand/Kconfig | |
3 | +@@ -15,6 +15,10 @@ config MTD_NAND_ECC | |
4 | + bool | |
5 | + depends on MTD_NAND_CORE | |
6 | + | |
7 | ++config MTD_NAND_MTK_BMT | |
8 | ++ bool "Support MediaTek NAND Bad-block Management Table" | |
9 | ++ default n | |
10 | ++ | |
11 | + endmenu | |
12 | + | |
13 | + endmenu | |
14 | +--- a/drivers/mtd/nand/Makefile | |
15 | ++++ b/drivers/mtd/nand/Makefile | |
16 | +@@ -2,6 +2,7 @@ | |
17 | + | |
18 | + nandcore-objs := core.o bbt.o | |
19 | + obj-$(CONFIG_MTD_NAND_CORE) += nandcore.o | |
20 | ++obj-$(CONFIG_MTD_NAND_MTK_BMT) += mtk_bmt.o | |
21 | + | |
22 | + obj-y += onenand/ | |
23 | + obj-y += raw/ |
@@ -0,0 +1,397 @@ | ||
1 | +From patchwork Tue Jun 8 04:07:19 2021 | |
2 | +Content-Type: text/plain; charset="utf-8" | |
3 | +MIME-Version: 1.0 | |
4 | +Content-Transfer-Encoding: 7bit | |
5 | +X-Patchwork-Submitter: John Thomson <git@johnthomson.fastmail.com.au> | |
6 | +X-Patchwork-Id: 1489105 | |
7 | +X-Patchwork-Delegate: tudor.ambarus@gmail.com | |
8 | +Return-Path: | |
9 | + <linux-mtd-bounces+incoming=patchwork.ozlabs.org@lists.infradead.org> | |
10 | +X-Original-To: incoming@patchwork.ozlabs.org | |
11 | +Delivered-To: patchwork-incoming@bilbo.ozlabs.org | |
12 | +Authentication-Results: ozlabs.org; | |
13 | + spf=none (no SPF record) smtp.mailfrom=lists.infradead.org | |
14 | + (client-ip=2607:7c80:54:e::133; helo=bombadil.infradead.org; | |
15 | + envelope-from=linux-mtd-bounces+incoming=patchwork.ozlabs.org@lists.infradead.org; | |
16 | + receiver=<UNKNOWN>) | |
17 | +Authentication-Results: ozlabs.org; | |
18 | + dkim=pass (2048-bit key; | |
19 | + secure) header.d=lists.infradead.org header.i=@lists.infradead.org | |
20 | + header.a=rsa-sha256 header.s=bombadil.20210309 header.b=EMabhVoR; | |
21 | + dkim=fail reason="signature verification failed" (2048-bit key; | |
22 | + unprotected) header.d=fastmail.com.au header.i=@fastmail.com.au | |
23 | + header.a=rsa-sha256 header.s=fm3 header.b=dLzuZ6dB; | |
24 | + dkim=fail reason="signature verification failed" (2048-bit key; | |
25 | + unprotected) header.d=messagingengine.com header.i=@messagingengine.com | |
26 | + header.a=rsa-sha256 header.s=fm3 header.b=nSRGsW+C; | |
27 | + dkim-atps=neutral | |
28 | +Received: from bombadil.infradead.org (bombadil.infradead.org | |
29 | + [IPv6:2607:7c80:54:e::133]) | |
30 | + (using TLSv1.3 with cipher TLS_AES_256_GCM_SHA384 (256/256 bits) | |
31 | + key-exchange X25519 server-signature RSA-PSS (4096 bits) server-digest | |
32 | + SHA256) | |
33 | + (No client certificate requested) | |
34 | + by ozlabs.org (Postfix) with ESMTPS id 4FzcFN1j1nz9sW8 | |
35 | + for <incoming@patchwork.ozlabs.org>; Tue, 8 Jun 2021 14:09:28 +1000 (AEST) | |
36 | +DKIM-Signature: v=1; a=rsa-sha256; q=dns/txt; c=relaxed/relaxed; | |
37 | + d=lists.infradead.org; s=bombadil.20210309; h=Sender: | |
38 | + Content-Transfer-Encoding:Content-Type:List-Subscribe:List-Help:List-Post: | |
39 | + List-Archive:List-Unsubscribe:List-Id:MIME-Version:Message-Id:Date:Subject:Cc | |
40 | + :To:From:Reply-To:Content-ID:Content-Description:Resent-Date:Resent-From: | |
41 | + Resent-Sender:Resent-To:Resent-Cc:Resent-Message-ID:In-Reply-To:References: | |
42 | + List-Owner; bh=6mUWQd71FwsINycGYY1qOhKz+ecWJVNtwDkTebG3XkA=; b=EMabhVoRE3ad89 | |
43 | + o3L2AgyKrs+blSofUC3hoSsQe7gi3m4si8S9HW8Z+8SsS5TufUsvGwDl80qSYGlQOytQF+1yRUWvE | |
44 | + 6FJ/+bqv+TwjqZFibgJ6+9OVsQN9dZ/no1R0bBXIpmrf8ORUmv58QK4ZQquaFKbyXKpFeWOC2MSv4 | |
45 | + H2MAhyhTU8a3gtooH6G8+KvsJEfVgh6C+aDbwxyh2UY3chHKuw1kvL6AktbfUE2xl4zxi3x3kc70B | |
46 | + Wi3LiJBFokxVdgnROXxTU5tI0XboWYkQV64gLuQNV4XKClcuhVpzloDK8Iok6NTd7b32a7TdEFlCS | |
47 | + lGKsEKmxtUlW2FpfoduA==; | |
48 | +Received: from localhost ([::1] helo=bombadil.infradead.org) | |
49 | + by bombadil.infradead.org with esmtp (Exim 4.94.2 #2 (Red Hat Linux)) | |
50 | + id 1lqT1r-006OAW-DX; Tue, 08 Jun 2021 04:07:51 +0000 | |
51 | +Received: from new1-smtp.messagingengine.com ([66.111.4.221]) | |
52 | + by bombadil.infradead.org with esmtps (Exim 4.94.2 #2 (Red Hat Linux)) | |
53 | + id 1lqT1l-006O9b-Fq | |
54 | + for linux-mtd@lists.infradead.org; Tue, 08 Jun 2021 04:07:50 +0000 | |
55 | +Received: from compute2.internal (compute2.nyi.internal [10.202.2.42]) | |
56 | + by mailnew.nyi.internal (Postfix) with ESMTP id 4456B580622; | |
57 | + Tue, 8 Jun 2021 00:07:42 -0400 (EDT) | |
58 | +Received: from mailfrontend2 ([10.202.2.163]) | |
59 | + by compute2.internal (MEProxy); Tue, 08 Jun 2021 00:07:42 -0400 | |
60 | +DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=fastmail.com.au; | |
61 | + h=from:to:cc:subject:date:message-id:mime-version | |
62 | + :content-transfer-encoding; s=fm3; bh=ZXRH+YluM1mHCS1EWUiCY/Sg8O | |
63 | + LccfHe1oW5iAay6y8=; b=dLzuZ6dBYf7ZA8tWLOBFZYLi7ERsGe/4vnMXG+ovvb | |
64 | + dNBO0+SaFGwoqYSFrfq/TeyHfKyvxrA7+LCdopIuT4abpLHxtRwtRiafQcDYCPat | |
65 | + qJIqOZO+wCZC5S9Jc1OP7+t1FviGpgevqIMotci37P+RWc5u3AweMzFljZk90E8C | |
66 | + uorV6rXagD+OssJQzllRnAIK88+rOAC9ZyXv2gWxy4d1HSCwSWgzx2vnV9CNp918 | |
67 | + YC/3tiHas9krbrPIaAsdBROr7Bvoe/ShRRzruKRuvZVgg5NN90vX+/5ZjI8u04GM | |
68 | + p2bWCbC62CP6wlcgDaz+c/Sgr5ITd2GPENJsHfqmLRBA== | |
69 | +DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d= | |
70 | + messagingengine.com; h=cc:content-transfer-encoding:date:from | |
71 | + :message-id:mime-version:subject:to:x-me-proxy:x-me-proxy | |
72 | + :x-me-sender:x-me-sender:x-sasl-enc; s=fm3; bh=ZXRH+YluM1mHCS1EW | |
73 | + UiCY/Sg8OLccfHe1oW5iAay6y8=; b=nSRGsW+CQ2Zx1RVpIUu8W/VD/k5P+32BW | |
74 | + 5k2ltd+UhI3dfldBPzHrYiOP/IJqGkNW+V+rHASacW/vFygnaZoxNjRYKnOsu+26 | |
75 | + wb2yK3jpl6lsNTg3N1Z4XJrYY2lf9H29DMFbhC67l0PTc050rcZk4XsKTLAlv14Q | |
76 | + VA4WREYSaX/4IN4O+ES4TMq0a/3gKZh6nvbbJXbsXfK0WlSHTGZtZmW3fyrqvbXa | |
77 | + t+R7L8vvqWvwls0pV+Sn8LeQqb7+A69w0UOnuznjkcA3sCc2YehcHbxcUEnMH+9N | |
78 | + bxOjmIDeg9/4X/829tUWUJiLhE5SFmQZ1P6oFtmbWoLrDz0ZJIVBw== | |
79 | +X-ME-Sender: <xms:C-2-YD2uka4HsA6gcdsV2Ia7vebY4Yjp9E8q7KBMb54jnAzGL7-67Q> | |
80 | + <xme:C-2-YCEaxASy5VlcrvNO_jLFpMDGkFCRsuVNuZGEQsiRZygk8jPHWq7unPjeT6uYS | |
81 | + 2pUP6PrTQ2rggjEIg> | |
82 | +X-ME-Received: | |
83 | + <xmr:C-2-YD4exeK49N_YZWWf2BWDhVyCbCY3wwvjTyDOFxeugx7Jg08pzMUToo9oJjrBpcVTaA3kbfk> | |
84 | +X-ME-Proxy-Cause: | |
85 | + gggruggvucftvghtrhhoucdtuddrgeduledrfedtkedgjeduucetufdoteggodetrfdotf | |
86 | + fvucfrrhhofhhilhgvmecuhfgrshhtofgrihhlpdfqfgfvpdfurfetoffkrfgpnffqhgen | |
87 | + uceurghilhhouhhtmecufedttdenucesvcftvggtihhpihgvnhhtshculddquddttddmne | |
88 | + cujfgurhephffvufffkffoggfgsedtkeertdertddtnecuhfhrohhmpeflohhhnhcuvfhh | |
89 | + ohhmshhonhcuoehgihhtsehjohhhnhhthhhomhhsohhnrdhfrghsthhmrghilhdrtghomh | |
90 | + drrghuqeenucggtffrrghtthgvrhhnpefffeeihfdukedtuedufeetieeuudfhhefhkefh | |
91 | + tefgtdeuffekffelleetveduieenucevlhhushhtvghrufhiiigvpedtnecurfgrrhgrmh | |
92 | + epmhgrihhlfhhrohhmpehgihhtsehjohhhnhhthhhomhhsohhnrdhfrghsthhmrghilhdr | |
93 | + tghomhdrrghu | |
94 | +X-ME-Proxy: <xmx:C-2-YI0AJZGjcB3wIbI9BoC9X8VNl4i9A7cQnBkvwZ25czWJlkKCLw> | |
95 | + <xmx:C-2-YGGufw99T-O81-FeiSyEruv6_Pr0IHFhspQdxjv5k1VFTZ0lzQ> | |
96 | + <xmx:C-2-YJ8BW7DhSDSCEAPSJWrwh_hHP79qreTZtWh_kOUwSh1c0MMlAg> | |
97 | + <xmx:Du2-YJBeX2Fg9oFZVXGwEJ1ZrZnXHiAqNON8tbpzquYgcm2o_LM48g> | |
98 | +Received: by mail.messagingengine.com (Postfix) with ESMTPA; Tue, | |
99 | + 8 Jun 2021 00:07:35 -0400 (EDT) | |
100 | +From: John Thomson <git@johnthomson.fastmail.com.au> | |
101 | +To: Miquel Raynal <miquel.raynal@bootlin.com>, | |
102 | + Richard Weinberger <richard@nod.at>, Vignesh Raghavendra <vigneshr@ti.com>, | |
103 | + Tudor Ambarus <tudor.ambarus@microchip.com>, | |
104 | + Michael Walle <michael@walle.cc>, Pratyush Yadav <p.yadav@ti.com>, | |
105 | + linux-mtd@lists.infradead.org | |
106 | +Cc: linux-kernel@vger.kernel.org, | |
107 | + John Thomson <git@johnthomson.fastmail.com.au>, | |
108 | + kernel test robot <lkp@intel.com>, Dan Carpenter <dan.carpenter@oracle.com> | |
109 | +Subject: [PATCH] mtd: spi-nor: write support for minor aligned partitions | |
110 | +Date: Tue, 8 Jun 2021 14:07:19 +1000 | |
111 | +Message-Id: <20210608040719.14431-1-git@johnthomson.fastmail.com.au> | |
112 | +X-Mailer: git-send-email 2.31.1 | |
113 | +MIME-Version: 1.0 | |
114 | +X-CRM114-Version: 20100106-BlameMichelson ( TRE 0.8.0 (BSD) ) MR-646709E3 | |
115 | +X-CRM114-CacheID: sfid-20210607_210745_712053_67A7D864 | |
116 | +X-CRM114-Status: GOOD ( 26.99 ) | |
117 | +X-Spam-Score: -0.8 (/) | |
118 | +X-Spam-Report: Spam detection software, | |
119 | + running on the system "bombadil.infradead.org", | |
120 | + has NOT identified this incoming email as spam. The original | |
121 | + message has been attached to this so you can view it or label | |
122 | + similar future email. If you have any questions, see | |
123 | + the administrator of that system for details. | |
124 | + Content preview: Do not prevent writing to mtd partitions where a partition | |
125 | + boundary sits on a minor erasesize boundary. This addresses a FIXME that | |
126 | + has been present since the start of the linux git history: /* Doesn' [...] | |
127 | + Content analysis details: (-0.8 points, 5.0 required) | |
128 | + pts rule name description | |
129 | + ---- ---------------------- | |
130 | + -------------------------------------------------- | |
131 | + -0.7 RCVD_IN_DNSWL_LOW RBL: Sender listed at https://www.dnswl.org/, | |
132 | + low trust [66.111.4.221 listed in list.dnswl.org] | |
133 | + -0.0 SPF_PASS SPF: sender matches SPF record | |
134 | + -0.0 SPF_HELO_PASS SPF: HELO matches SPF record | |
135 | + 0.0 RCVD_IN_MSPIKE_H3 RBL: Good reputation (+3) | |
136 | + [66.111.4.221 listed in wl.mailspike.net] | |
137 | + -0.1 DKIM_VALID Message has at least one valid DKIM or DK signature | |
138 | + 0.1 DKIM_SIGNED Message has a DKIM or DK signature, | |
139 | + not necessarily | |
140 | + valid | |
141 | + -0.1 DKIM_VALID_EF Message has a valid DKIM or DK signature from | |
142 | + envelope-from domain | |
143 | + 0.0 RCVD_IN_MSPIKE_WL Mailspike good senders | |
144 | +X-BeenThere: linux-mtd@lists.infradead.org | |
145 | +X-Mailman-Version: 2.1.34 | |
146 | +Precedence: list | |
147 | +List-Id: Linux MTD discussion mailing list <linux-mtd.lists.infradead.org> | |
148 | +List-Unsubscribe: <http://lists.infradead.org/mailman/options/linux-mtd>, | |
149 | + <mailto:linux-mtd-request@lists.infradead.org?subject=unsubscribe> | |
150 | +List-Archive: <http://lists.infradead.org/pipermail/linux-mtd/> | |
151 | +List-Post: <mailto:linux-mtd@lists.infradead.org> | |
152 | +List-Help: <mailto:linux-mtd-request@lists.infradead.org?subject=help> | |
153 | +List-Subscribe: <http://lists.infradead.org/mailman/listinfo/linux-mtd>, | |
154 | + <mailto:linux-mtd-request@lists.infradead.org?subject=subscribe> | |
155 | +Sender: "linux-mtd" <linux-mtd-bounces@lists.infradead.org> | |
156 | +Errors-To: linux-mtd-bounces+incoming=patchwork.ozlabs.org@lists.infradead.org | |
157 | + | |
158 | +Do not prevent writing to mtd partitions where a partition boundary sits | |
159 | +on a minor erasesize boundary. | |
160 | +This addresses a FIXME that has been present since the start of the | |
161 | +linux git history: | |
162 | +/* Doesn't start on a boundary of major erase size */ | |
163 | +/* FIXME: Let it be writable if it is on a boundary of | |
164 | + * _minor_ erase size though */ | |
165 | + | |
166 | +Allow a uniform erase region spi-nor device to be configured | |
167 | +to use the non-uniform erase regions code path for an erase with: | |
168 | +CONFIG_MTD_SPI_NOR_USE_VARIABLE_ERASE=y | |
169 | + | |
170 | +On supporting hardware (SECT_4K: majority of current SPI-NOR device) | |
171 | +provide the facility for an erase to use the least number | |
172 | +of SPI-NOR operations, as well as access to 4K erase without | |
173 | +requiring CONFIG_MTD_SPI_NOR_USE_4K_SECTORS | |
174 | + | |
175 | +Introduce erasesize_minor to the mtd struct, | |
176 | +the smallest erasesize supported by the device | |
177 | + | |
178 | +On existing devices, this is useful where write support is wanted | |
179 | +for data on a 4K partition, such as some u-boot-env partitions, | |
180 | +or RouterBoot soft_config, while still netting the performance | |
181 | +benefits of using 64K sectors | |
182 | + | |
183 | +Performance: | |
184 | +time mtd erase firmware | |
185 | +OpenWrt 5.10 ramips MT7621 w25q128jv 0xfc0000 partition length | |
186 | + | |
187 | +Without this patch | |
188 | +MTD_SPI_NOR_USE_4K_SECTORS=y |n | |
189 | +real 2m 11.66s |0m 50.86s | |
190 | +user 0m 0.00s |0m 0.00s | |
191 | +sys 1m 56.20s |0m 50.80s | |
192 | + | |
193 | +With this patch | |
194 | +MTD_SPI_NOR_USE_VARIABLE_ERASE=n|y |4K_SECTORS=y | |
195 | +real 0m 51.68s |0m 50.85s |2m 12.89s | |
196 | +user 0m 0.00s |0m 0.00s |0m 0.01s | |
197 | +sys 0m 46.94s |0m 50.38s |2m 12.46s | |
198 | + | |
199 | +Signed-off-by: John Thomson <git@johnthomson.fastmail.com.au> | |
200 | +--- | |
201 | +Have not tested on variable erase regions device. | |
202 | + | |
203 | +checkpatch does not like the printk(KERN_WARNING | |
204 | +these should be changed separately beforehand? | |
205 | + | |
206 | +Changes RFC -> v1: | |
207 | +Fix uninitialized variable smatch warning | |
208 | +Reported-by: kernel test robot <lkp@intel.com> | |
209 | +Reported-by: Dan Carpenter <dan.carpenter@oracle.com> | |
210 | +--- | |
211 | + drivers/mtd/mtdpart.c | 52 ++++++++++++++++++++++++++++--------- | |
212 | + drivers/mtd/spi-nor/Kconfig | 10 +++++++ | |
213 | + drivers/mtd/spi-nor/core.c | 10 +++++-- | |
214 | + include/linux/mtd/mtd.h | 2 ++ | |
215 | + 4 files changed, 60 insertions(+), 14 deletions(-) | |
216 | + | |
217 | +diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c | |
218 | +index 665fd9020b76..fe7626b5020e 100644 | |
219 | +--- a/drivers/mtd/mtdpart.c | |
220 | ++++ b/drivers/mtd/mtdpart.c | |
221 | +@@ -38,10 +38,11 @@ static struct mtd_info *allocate_partition(struct mtd_info *parent, | |
222 | + struct mtd_info *master = mtd_get_master(parent); | |
223 | + int wr_alignment = (parent->flags & MTD_NO_ERASE) ? | |
224 | + master->writesize : master->erasesize; | |
225 | ++ int wr_alignment_minor = 0; | |
226 | + u64 parent_size = mtd_is_partition(parent) ? | |
227 | + parent->part.size : parent->size; | |
228 | + struct mtd_info *child; | |
229 | +- u32 remainder; | |
230 | ++ u32 remainder, remainder_minor; | |
231 | + char *name; | |
232 | + u64 tmp; | |
233 | + | |
234 | +@@ -143,6 +144,7 @@ static struct mtd_info *allocate_partition(struct mtd_info *parent, | |
235 | + int i, max = parent->numeraseregions; | |
236 | + u64 end = child->part.offset + child->part.size; | |
237 | + struct mtd_erase_region_info *regions = parent->eraseregions; | |
238 | ++ uint32_t erasesize_minor = child->erasesize; | |
239 | + | |
240 | + /* Find the first erase regions which is part of this | |
241 | + * partition. */ | |
242 | +@@ -153,15 +155,24 @@ static struct mtd_info *allocate_partition(struct mtd_info *parent, | |
243 | + if (i > 0) | |
244 | + i--; | |
245 | + | |
246 | +- /* Pick biggest erasesize */ | |
247 | + for (; i < max && regions[i].offset < end; i++) { | |
248 | ++ /* Pick biggest erasesize */ | |
249 | + if (child->erasesize < regions[i].erasesize) | |
250 | + child->erasesize = regions[i].erasesize; | |
251 | ++ /* Pick smallest non-zero erasesize */ | |
252 | ++ if ((erasesize_minor > regions[i].erasesize) && (regions[i].erasesize > 0)) | |
253 | ++ erasesize_minor = regions[i].erasesize; | |
254 | + } | |
255 | ++ | |
256 | ++ if (erasesize_minor < child->erasesize) | |
257 | ++ child->erasesize_minor = erasesize_minor; | |
258 | ++ | |
259 | + BUG_ON(child->erasesize == 0); | |
260 | + } else { | |
261 | + /* Single erase size */ | |
262 | + child->erasesize = master->erasesize; | |
263 | ++ if (master->erasesize_minor) | |
264 | ++ child->erasesize_minor = master->erasesize_minor; | |
265 | + } | |
266 | + | |
267 | + /* | |
268 | +@@ -169,26 +180,43 @@ static struct mtd_info *allocate_partition(struct mtd_info *parent, | |
269 | + * exposes several regions with different erasesize. Adjust | |
270 | + * wr_alignment accordingly. | |
271 | + */ | |
272 | +- if (!(child->flags & MTD_NO_ERASE)) | |
273 | ++ if (!(child->flags & MTD_NO_ERASE)) { | |
274 | + wr_alignment = child->erasesize; | |
275 | ++ if (IS_ENABLED(CONFIG_MTD_SPI_NOR_USE_VARIABLE_ERASE) && child->erasesize_minor) | |
276 | ++ wr_alignment_minor = child->erasesize_minor; | |
277 | ++ } | |
278 | + | |
279 | + tmp = mtd_get_master_ofs(child, 0); | |
280 | + remainder = do_div(tmp, wr_alignment); | |
281 | + if ((child->flags & MTD_WRITEABLE) && remainder) { | |
282 | +- /* Doesn't start on a boundary of major erase size */ | |
283 | +- /* FIXME: Let it be writable if it is on a boundary of | |
284 | +- * _minor_ erase size though */ | |
285 | +- child->flags &= ~MTD_WRITEABLE; | |
286 | +- printk(KERN_WARNING"mtd: partition \"%s\" doesn't start on an erase/write block boundary -- force read-only\n", | |
287 | +- part->name); | |
288 | ++ if (wr_alignment_minor) { | |
289 | ++ tmp = mtd_get_master_ofs(child, 0); | |
290 | ++ remainder_minor = do_div(tmp, wr_alignment_minor); | |
291 | ++ if (remainder_minor == 0) | |
292 | ++ child->erasesize = child->erasesize_minor; | |
293 | ++ } | |
294 | ++ | |
295 | ++ if ((!wr_alignment_minor) || (wr_alignment_minor && remainder_minor != 0)) { | |
296 | ++ child->flags &= ~MTD_WRITEABLE; | |
297 | ++ printk(KERN_WARNING"mtd: partition \"%s\" doesn't start on an erase/write block boundary -- force read-only\n", | |
298 | ++ part->name); | |
299 | ++ } | |
300 | + } | |
301 | + | |
302 | + tmp = mtd_get_master_ofs(child, 0) + child->part.size; | |
303 | + remainder = do_div(tmp, wr_alignment); | |
304 | + if ((child->flags & MTD_WRITEABLE) && remainder) { | |
305 | +- child->flags &= ~MTD_WRITEABLE; | |
306 | +- printk(KERN_WARNING"mtd: partition \"%s\" doesn't end on an erase/write block -- force read-only\n", | |
307 | +- part->name); | |
308 | ++ if (wr_alignment_minor) { | |
309 | ++ tmp = mtd_get_master_ofs(child, 0) + child->part.size; | |
310 | ++ remainder_minor = do_div(tmp, wr_alignment_minor); | |
311 | ++ if (remainder_minor == 0) | |
312 | ++ child->erasesize = child->erasesize_minor; | |
313 | ++ } | |
314 | ++ if ((!wr_alignment_minor) || (wr_alignment_minor && remainder_minor != 0)) { | |
315 | ++ child->flags &= ~MTD_WRITEABLE; | |
316 | ++ printk(KERN_WARNING"mtd: partition \"%s\" doesn't end on an erase/write block -- force read-only\n", | |
317 | ++ part->name); | |
318 | ++ } | |
319 | + } | |
320 | + | |
321 | + child->size = child->part.size; | |
322 | +diff --git a/drivers/mtd/spi-nor/Kconfig b/drivers/mtd/spi-nor/Kconfig | |
323 | +index 24cd25de2b8b..09df9f1a8127 100644 | |
324 | +--- a/drivers/mtd/spi-nor/Kconfig | |
325 | ++++ b/drivers/mtd/spi-nor/Kconfig | |
326 | +@@ -10,6 +10,16 @@ menuconfig MTD_SPI_NOR | |
327 | + | |
328 | + if MTD_SPI_NOR | |
329 | + | |
330 | ++config MTD_SPI_NOR_USE_VARIABLE_ERASE | |
331 | ++ bool "Disable uniform_erase to allow use of all hardware supported erasesizes" | |
332 | ++ depends on !MTD_SPI_NOR_USE_4K_SECTORS | |
333 | ++ default n | |
334 | ++ help | |
335 | ++ Allow mixed use of all hardware supported erasesizes, | |
336 | ++ by forcing spi_nor to use the multiple eraseregions code path. | |
337 | ++ For example: A 68K erase will use one 64K erase, and one 4K erase | |
338 | ++ on supporting hardware. | |
339 | ++ | |
340 | + config MTD_SPI_NOR_USE_4K_SECTORS | |
341 | + bool "Use small 4096 B erase sectors" | |
342 | + default y | |
343 | +diff --git a/drivers/mtd/spi-nor/core.c b/drivers/mtd/spi-nor/core.c | |
344 | +index bd2c7717eb10..43d9b54e7edd 100644 | |
345 | +--- a/drivers/mtd/spi-nor/core.c | |
346 | ++++ b/drivers/mtd/spi-nor/core.c | |
347 | +@@ -1262,6 +1262,8 @@ static u8 spi_nor_convert_3to4_erase(u8 opcode) | |
348 | + | |
349 | + static bool spi_nor_has_uniform_erase(const struct spi_nor *nor) | |
350 | + { | |
351 | ++ if (IS_ENABLED(CONFIG_MTD_SPI_NOR_USE_VARIABLE_ERASE)) | |
352 | ++ return false; | |
353 | + return !!nor->params->erase_map.uniform_erase_type; | |
354 | + } | |
355 | + | |
356 | +@@ -2381,6 +2383,7 @@ static int spi_nor_select_erase(struct spi_nor *nor) | |
357 | + { | |
358 | + struct spi_nor_erase_map *map = &nor->params->erase_map; | |
359 | + const struct spi_nor_erase_type *erase = NULL; | |
360 | ++ const struct spi_nor_erase_type *erase_minor = NULL; | |
361 | + struct mtd_info *mtd = &nor->mtd; | |
362 | + u32 wanted_size = nor->info->sector_size; | |
363 | + int i; | |
364 | +@@ -2413,8 +2416,9 @@ static int spi_nor_select_erase(struct spi_nor *nor) | |
365 | + */ | |
366 | + for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) { | |
367 | + if (map->erase_type[i].size) { | |
368 | +- erase = &map->erase_type[i]; | |
369 | +- break; | |
370 | ++ if (!erase) | |
371 | ++ erase = &map->erase_type[i]; | |
372 | ++ erase_minor = &map->erase_type[i]; | |
373 | + } | |
374 | + } | |
375 | + | |
376 | +@@ -2422,6 +2426,8 @@ static int spi_nor_select_erase(struct spi_nor *nor) | |
377 | + return -EINVAL; | |
378 | + | |
379 | + mtd->erasesize = erase->size; | |
380 | ++ if (erase_minor && erase_minor->size < erase->size) | |
381 | ++ mtd->erasesize_minor = erase_minor->size; | |
382 | + return 0; | |
383 | + } | |
384 | + | |
385 | +diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h | |
386 | +index a89955f3cbc8..33eafa27da50 100644 | |
387 | +--- a/include/linux/mtd/mtd.h | |
388 | ++++ b/include/linux/mtd/mtd.h | |
389 | +@@ -243,6 +243,8 @@ struct mtd_info { | |
390 | + * information below if they desire | |
391 | + */ | |
392 | + uint32_t erasesize; | |
393 | ++ /* "Minor" (smallest) erase size supported by the whole device */ | |
394 | ++ uint32_t erasesize_minor; | |
395 | + /* Minimal writable flash unit size. In case of NOR flash it is 1 (even | |
396 | + * though individual bits can be cleared), in case of NAND flash it is | |
397 | + * one NAND page (or half, or one-fourths of it), in case of ECC-ed NOR |
@@ -2,3 +2,4 @@ CONFIG_MIKROTIK=y | ||
2 | 2 | CONFIG_MIKROTIK_RB_SYSFS=y |
3 | 3 | CONFIG_MTD_ROUTERBOOT_PARTS=y |
4 | 4 | CONFIG_MTD_SPLIT_MINOR_FW=y |
5 | +CONFIG_MTD_SPI_NOR_USE_VARIABLE_ERASE=y |
@@ -33,13 +33,13 @@ | ||
33 | 33 | |
34 | 34 | dect { |
35 | 35 | label = "dect"; |
36 | - gpios = <&gpio 1 GPIO_ACTIVE_HIGH>; | |
36 | + gpios = <&gpio 1 GPIO_ACTIVE_LOW>; | |
37 | 37 | linux,code = <KEY_PHONE>; |
38 | 38 | }; |
39 | 39 | |
40 | 40 | wifi { |
41 | 41 | label = "wifi"; |
42 | - gpios = <&gpio 29 GPIO_ACTIVE_HIGH>; | |
42 | + gpios = <&gpio 29 GPIO_ACTIVE_LOW>; | |
43 | 43 | linux,code = <KEY_RFKILL>; |
44 | 44 | }; |
45 | 45 | }; |
@@ -23,10 +23,9 @@ case "$FIRMWARE" in | ||
23 | 23 | avm,fritz7360-v2) |
24 | 24 | caldata_extract "urlader" 0x985 0x1000 |
25 | 25 | ;; |
26 | - avm,fritz7412) | |
27 | - /usr/bin/fritz_cal_extract -i 1 -s 0x1e000 -e 0x207 -l 4096 -o /lib/firmware/$FIRMWARE $(find_mtd_chardev "urlader") | |
28 | - ;; | |
26 | + avm,fritz7412|\ | |
29 | 27 | avm,fritz7430) |
28 | + /usr/bin/fritz_cal_extract -i 1 -s 0x1e000 -e 0x207 -l 4096 -o /lib/firmware/$FIRMWARE $(find_mtd_chardev "urlader") || \ | |
30 | 29 | /usr/bin/fritz_cal_extract -i 1 -s 0x1e800 -e 0x207 -l 4096 -o /lib/firmware/$FIRMWARE $(find_mtd_chardev "urlader") |
31 | 30 | ;; |
32 | 31 | bt,homehub-v5a) |
@@ -3,10 +3,13 @@ | ||
3 | 3 | #include "mt7622-rfb1.dts" |
4 | 4 | / { |
5 | 5 | model = "MT7622_MT7531 RFB (UBI)"; |
6 | - compatible = "mediatek,mt7622,ubi"; | |
6 | + compatible = "mediatek,mt7622-rfb1-ubi"; | |
7 | 7 | }; |
8 | 8 | |
9 | 9 | &snand { |
10 | + mediatek,bmt-v2; | |
11 | + mediatek,bmt-remap-range = <0x0 0x6c0000>; | |
12 | + | |
10 | 13 | partitions { |
11 | 14 | compatible = "fixed-partitions"; |
12 | 15 | #address-cells = <1>; |
@@ -35,22 +38,19 @@ | ||
35 | 38 | |
36 | 39 | factory: partition@1c0000 { |
37 | 40 | label = "Factory"; |
38 | - reg = <0x1c0000 0x0040000>; | |
41 | + reg = <0x1c0000 0x0100000>; | |
39 | 42 | }; |
40 | 43 | |
41 | 44 | partition@200000 { |
42 | 45 | label = "kernel"; |
43 | - reg = <0x200000 0x400000>; | |
46 | + reg = <0x2c0000 0x400000>; | |
44 | 47 | }; |
45 | 48 | |
46 | - partition@600000 { | |
49 | + partition@6c0000 { | |
47 | 50 | label = "ubi"; |
48 | - reg = <0x600000 0x1C00000>; | |
51 | + reg = <0x6c0000 0x6f00000>; | |
49 | 52 | }; |
50 | 53 | |
51 | - partition@2200000 { | |
52 | - label = "User_data"; | |
53 | - reg = <0x2200000 0x4000000>; | |
54 | - }; | |
54 | + /delete-node/ partition@2200000; | |
55 | 55 | }; |
56 | 56 | }; |
@@ -328,6 +328,10 @@ | ||
328 | 328 | pinctrl-0 = <&pcie_default>; |
329 | 329 | status = "okay"; |
330 | 330 | |
331 | + pcie@0,0 { | |
332 | + status = "okay"; | |
333 | + }; | |
334 | + | |
331 | 335 | pcie@1,0 { |
332 | 336 | status = "okay"; |
333 | 337 | }; |
@@ -337,6 +341,10 @@ | ||
337 | 341 | }; |
338 | 342 | }; |
339 | 343 | |
344 | +&pcie0_phy { | |
345 | + status = "okay"; | |
346 | +}; | |
347 | + | |
340 | 348 | &pcie1_phy { |
341 | 349 | status = "okay"; |
342 | 350 | }; |
@@ -190,6 +190,7 @@ define Device/mediatek_mt7622-rfb1-ubi | ||
190 | 190 | DEVICE_DTS := mt7622-rfb1-ubi |
191 | 191 | DEVICE_DTS_DIR := ../dts |
192 | 192 | DEVICE_PACKAGES := kmod-ata-ahci-mtk kmod-btmtkuart kmod-usb3 |
193 | + BOARD_NAME := mediatek,mt7622-rfb1-ubi | |
193 | 194 | UBINIZE_OPTS := -E 5 |
194 | 195 | BLOCKSIZE := 128k |
195 | 196 | PAGESIZE := 2048 |
@@ -11,7 +11,8 @@ mediatek_setup_interfaces() | ||
11 | 11 | bananapi,bpi-r64|\ |
12 | 12 | linksys,e8450|\ |
13 | 13 | linksys,e8450-ubi|\ |
14 | - mediatek,mt7622-rfb1) | |
14 | + mediatek,mt7622-rfb1|\ | |
15 | + mediatek,mt7622-rfb1-ubi) | |
15 | 16 | ucidef_set_interfaces_lan_wan "lan1 lan2 lan3 lan4" wan |
16 | 17 | ;; |
17 | 18 | buffalo,wsr-2533dhp2) |
@@ -34,8 +34,7 @@ platform_do_upgrade() { | ||
34 | 34 | nand_do_upgrade "$1" |
35 | 35 | fi |
36 | 36 | ;; |
37 | - linksys,e8450-ubi|\ | |
38 | - mediatek,mt7622,ubi) | |
37 | + linksys,e8450-ubi) | |
39 | 38 | CI_KERNPART="fit" |
40 | 39 | nand_do_upgrade "$1" |
41 | 40 | ;; |
@@ -47,6 +46,7 @@ platform_do_upgrade() { | ||
47 | 46 | fi |
48 | 47 | default_do_upgrade "$1" |
49 | 48 | ;; |
49 | + mediatek,mt7622-rfb1-ubi|\ | |
50 | 50 | totolink,a8000ru) |
51 | 51 | nand_do_upgrade "$1" |
52 | 52 | ;; |
@@ -68,6 +68,7 @@ platform_check_image() { | ||
68 | 68 | buffalo,wsr-2533dhp2) |
69 | 69 | buffalo_check_image "$board" "$magic" "$1" || return 1 |
70 | 70 | ;; |
71 | + mediatek,mt7622-rfb1-ubi|\ | |
71 | 72 | totolink,a8000ru) |
72 | 73 | nand_do_platform_check "$board" "$1" |
73 | 74 | ;; |
@@ -1,871 +0,0 @@ | ||
1 | ---- a/drivers/mtd/nand/Kconfig | |
2 | -+++ b/drivers/mtd/nand/Kconfig | |
3 | -@@ -15,6 +15,10 @@ config MTD_NAND_ECC | |
4 | - bool | |
5 | - depends on MTD_NAND_CORE | |
6 | - | |
7 | -+config MTD_NAND_MTK_BMT | |
8 | -+ bool "Support MediaTek NAND Bad-block Management Table" | |
9 | -+ default n | |
10 | -+ | |
11 | - endmenu | |
12 | - | |
13 | - endmenu | |
14 | ---- a/drivers/mtd/nand/Makefile | |
15 | -+++ b/drivers/mtd/nand/Makefile | |
16 | -@@ -2,6 +2,7 @@ | |
17 | - | |
18 | - nandcore-objs := core.o bbt.o | |
19 | - obj-$(CONFIG_MTD_NAND_CORE) += nandcore.o | |
20 | -+obj-$(CONFIG_MTD_NAND_MTK_BMT) += mtk_bmt.o | |
21 | - | |
22 | - obj-y += onenand/ | |
23 | - obj-y += raw/ | |
24 | ---- /dev/null | |
25 | -+++ b/drivers/mtd/nand/mtk_bmt.c | |
26 | -@@ -0,0 +1,788 @@ | |
27 | -+/* | |
28 | -+ * Copyright (c) 2017 MediaTek Inc. | |
29 | -+ * Author: Xiangsheng Hou <xiangsheng.hou@mediatek.com> | |
30 | -+ * Copyright (c) 2020 Felix Fietkau <nbd@nbd.name> | |
31 | -+ * | |
32 | -+ * This program is free software; you can redistribute it and/or modify | |
33 | -+ * it under the terms of the GNU General Public License version 2 as | |
34 | -+ * published by the Free Software Foundation. | |
35 | -+ * | |
36 | -+ * This program is distributed in the hope that it will be useful, | |
37 | -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
38 | -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
39 | -+ * GNU General Public License for more details. | |
40 | -+ */ | |
41 | -+ | |
42 | -+#include <linux/slab.h> | |
43 | -+#include <linux/gfp.h> | |
44 | -+#include <linux/kernel.h> | |
45 | -+#include <linux/of.h> | |
46 | -+#include <linux/mtd/mtd.h> | |
47 | -+#include <linux/mtd/partitions.h> | |
48 | -+#include <linux/mtd/mtk_bmt.h> | |
49 | -+#include <linux/module.h> | |
50 | -+#include <linux/debugfs.h> | |
51 | -+ | |
52 | -+#define MAIN_SIGNATURE_OFFSET 0 | |
53 | -+#define OOB_SIGNATURE_OFFSET 1 | |
54 | -+#define BBPOOL_RATIO 2 | |
55 | -+ | |
56 | -+#define BBT_LOG(fmt, ...) pr_debug("[BBT][%s|%d] "fmt"\n", __func__, __LINE__, ##__VA_ARGS__) | |
57 | -+ | |
58 | -+/* Maximum 8k blocks */ | |
59 | -+#define BB_TABLE_MAX bmtd.table_size | |
60 | -+#define BMT_TABLE_MAX (BB_TABLE_MAX * BBPOOL_RATIO / 100) | |
61 | -+#define BMT_TBL_DEF_VAL 0x0 | |
62 | -+ | |
63 | -+/* | |
64 | -+ * Burner Bad Block Table | |
65 | -+ * --------- Only support SLC Nand Chips!!!!!!!!!!! ---------- | |
66 | -+ */ | |
67 | -+ | |
68 | -+struct bbbt { | |
69 | -+ char signature[3]; | |
70 | -+ /* This version is used to distinguish the legacy and new algorithm */ | |
71 | -+#define BBMT_VERSION 2 | |
72 | -+ unsigned char version; | |
73 | -+ /* Below 2 tables will be written in SLC */ | |
74 | -+ u16 bb_tbl[]; | |
75 | -+}; | |
76 | -+ | |
77 | -+struct bbmt { | |
78 | -+ u16 block; | |
79 | -+#define NO_MAPPED 0 | |
80 | -+#define NORMAL_MAPPED 1 | |
81 | -+#define BMT_MAPPED 2 | |
82 | -+ u16 mapped; | |
83 | -+}; | |
84 | -+ | |
85 | -+static struct bmt_desc { | |
86 | -+ struct mtd_info *mtd; | |
87 | -+ | |
88 | -+ int (*_read_oob) (struct mtd_info *mtd, loff_t from, | |
89 | -+ struct mtd_oob_ops *ops); | |
90 | -+ int (*_write_oob) (struct mtd_info *mtd, loff_t to, | |
91 | -+ struct mtd_oob_ops *ops); | |
92 | -+ int (*_erase) (struct mtd_info *mtd, struct erase_info *instr); | |
93 | -+ int (*_block_isbad) (struct mtd_info *mtd, loff_t ofs); | |
94 | -+ int (*_block_markbad) (struct mtd_info *mtd, loff_t ofs); | |
95 | -+ | |
96 | -+ struct bbbt *bbt; | |
97 | -+ | |
98 | -+ struct dentry *debugfs_dir; | |
99 | -+ | |
100 | -+ u32 table_size; | |
101 | -+ u32 pg_size; | |
102 | -+ u32 blk_size; | |
103 | -+ u16 pg_shift; | |
104 | -+ u16 blk_shift; | |
105 | -+ /* bbt logical address */ | |
106 | -+ u16 pool_lba; | |
107 | -+ /* bbt physical address */ | |
108 | -+ u16 pool_pba; | |
109 | -+ /* Maximum count of bad blocks that the vendor guaranteed */ | |
110 | -+ u16 bb_max; | |
111 | -+ /* Total blocks of the Nand Chip */ | |
112 | -+ u16 total_blks; | |
113 | -+ /* The block(n) BMT is located at (bmt_tbl[n]) */ | |
114 | -+ u16 bmt_blk_idx; | |
115 | -+ /* How many pages needs to store 'struct bbbt' */ | |
116 | -+ u32 bmt_pgs; | |
117 | -+ | |
118 | -+ /* to compensate for driver level remapping */ | |
119 | -+ u8 oob_offset; | |
120 | -+} bmtd = {0}; | |
121 | -+ | |
122 | -+static unsigned char *nand_bbt_buf; | |
123 | -+static unsigned char *nand_data_buf; | |
124 | -+ | |
125 | -+/* -------- Unit conversions -------- */ | |
126 | -+static inline u32 blk_pg(u16 block) | |
127 | -+{ | |
128 | -+ return (u32)(block << (bmtd.blk_shift - bmtd.pg_shift)); | |
129 | -+} | |
130 | -+ | |
131 | -+/* -------- Nand operations wrapper -------- */ | |
132 | -+static inline int | |
133 | -+bbt_nand_read(u32 page, unsigned char *dat, int dat_len, | |
134 | -+ unsigned char *fdm, int fdm_len) | |
135 | -+{ | |
136 | -+ struct mtd_oob_ops ops = { | |
137 | -+ .mode = MTD_OPS_PLACE_OOB, | |
138 | -+ .ooboffs = bmtd.oob_offset, | |
139 | -+ .oobbuf = fdm, | |
140 | -+ .ooblen = fdm_len, | |
141 | -+ .datbuf = dat, | |
142 | -+ .len = dat_len, | |
143 | -+ }; | |
144 | -+ | |
145 | -+ return bmtd._read_oob(bmtd.mtd, page << bmtd.pg_shift, &ops); | |
146 | -+} | |
147 | -+ | |
148 | -+static inline int bbt_nand_erase(u16 block) | |
149 | -+{ | |
150 | -+ struct mtd_info *mtd = bmtd.mtd; | |
151 | -+ struct erase_info instr = { | |
152 | -+ .addr = (loff_t)block << bmtd.blk_shift, | |
153 | -+ .len = bmtd.blk_size, | |
154 | -+ }; | |
155 | -+ | |
156 | -+ return bmtd._erase(mtd, &instr); | |
157 | -+} | |
158 | -+ | |
159 | -+/* -------- Bad Blocks Management -------- */ | |
160 | -+static inline struct bbmt *bmt_tbl(struct bbbt *bbbt) | |
161 | -+{ | |
162 | -+ return (struct bbmt *)&bbbt->bb_tbl[bmtd.table_size]; | |
163 | -+} | |
164 | -+ | |
165 | -+static int | |
166 | -+read_bmt(u16 block, unsigned char *dat, unsigned char *fdm, int fdm_len) | |
167 | -+{ | |
168 | -+ u32 len = bmtd.bmt_pgs << bmtd.pg_shift; | |
169 | -+ | |
170 | -+ return bbt_nand_read(blk_pg(block), dat, len, fdm, fdm_len); | |
171 | -+} | |
172 | -+ | |
173 | -+static int write_bmt(u16 block, unsigned char *dat) | |
174 | -+{ | |
175 | -+ struct mtd_oob_ops ops = { | |
176 | -+ .mode = MTD_OPS_PLACE_OOB, | |
177 | -+ .ooboffs = OOB_SIGNATURE_OFFSET + bmtd.oob_offset, | |
178 | -+ .oobbuf = "bmt", | |
179 | -+ .ooblen = 3, | |
180 | -+ .datbuf = dat, | |
181 | -+ .len = bmtd.bmt_pgs << bmtd.pg_shift, | |
182 | -+ }; | |
183 | -+ loff_t addr = (loff_t)block << bmtd.blk_shift; | |
184 | -+ | |
185 | -+ return bmtd._write_oob(bmtd.mtd, addr, &ops); | |
186 | -+} | |
187 | -+ | |
188 | -+static u16 find_valid_block(u16 block) | |
189 | -+{ | |
190 | -+ u8 fdm[4]; | |
191 | -+ int ret; | |
192 | -+ int loop = 0; | |
193 | -+ | |
194 | -+retry: | |
195 | -+ if (block >= bmtd.total_blks) | |
196 | -+ return 0; | |
197 | -+ | |
198 | -+ ret = bbt_nand_read(blk_pg(block), nand_data_buf, bmtd.pg_size, | |
199 | -+ fdm, sizeof(fdm)); | |
200 | -+ /* Read the 1st byte of FDM to judge whether it's a bad | |
201 | -+ * or not | |
202 | -+ */ | |
203 | -+ if (ret || fdm[0] != 0xff) { | |
204 | -+ pr_info("nand: found bad block 0x%x\n", block); | |
205 | -+ if (loop >= bmtd.bb_max) { | |
206 | -+ pr_info("nand: FATAL ERR: too many bad blocks!!\n"); | |
207 | -+ return 0; | |
208 | -+ } | |
209 | -+ | |
210 | -+ loop++; | |
211 | -+ block++; | |
212 | -+ goto retry; | |
213 | -+ } | |
214 | -+ | |
215 | -+ return block; | |
216 | -+} | |
217 | -+ | |
218 | -+/* Find out all bad blocks, and fill in the mapping table */ | |
219 | -+static int scan_bad_blocks(struct bbbt *bbt) | |
220 | -+{ | |
221 | -+ int i; | |
222 | -+ u16 block = 0; | |
223 | -+ | |
224 | -+ /* First time download, the block0 MUST NOT be a bad block, | |
225 | -+ * this is guaranteed by vendor | |
226 | -+ */ | |
227 | -+ bbt->bb_tbl[0] = 0; | |
228 | -+ | |
229 | -+ /* | |
230 | -+ * Construct the mapping table of Normal data area(non-PMT/BMTPOOL) | |
231 | -+ * G - Good block; B - Bad block | |
232 | -+ * --------------------------- | |
233 | -+ * physical |G|G|B|G|B|B|G|G|G|G|B|G|B| | |
234 | -+ * --------------------------- | |
235 | -+ * What bb_tbl[i] looks like: | |
236 | -+ * physical block(i): | |
237 | -+ * 0 1 2 3 4 5 6 7 8 9 a b c | |
238 | -+ * mapped block(bb_tbl[i]): | |
239 | -+ * 0 1 3 6 7 8 9 b ...... | |
240 | -+ * ATTENTION: | |
241 | -+ * If new bad block ocurred(n), search bmt_tbl to find | |
242 | -+ * a available block(x), and fill in the bb_tbl[n] = x; | |
243 | -+ */ | |
244 | -+ for (i = 1; i < bmtd.pool_lba; i++) { | |
245 | -+ bbt->bb_tbl[i] = find_valid_block(bbt->bb_tbl[i - 1] + 1); | |
246 | -+ BBT_LOG("bb_tbl[0x%x] = 0x%x", i, bbt->bb_tbl[i]); | |
247 | -+ if (bbt->bb_tbl[i] == 0) | |
248 | -+ return -1; | |
249 | -+ } | |
250 | -+ | |
251 | -+ /* Physical Block start Address of BMT pool */ | |
252 | -+ bmtd.pool_pba = bbt->bb_tbl[i - 1] + 1; | |
253 | -+ if (bmtd.pool_pba >= bmtd.total_blks - 2) { | |
254 | -+ pr_info("nand: FATAL ERR: Too many bad blocks!!\n"); | |
255 | -+ return -1; | |
256 | -+ } | |
257 | -+ | |
258 | -+ BBT_LOG("pool_pba=0x%x", bmtd.pool_pba); | |
259 | -+ i = 0; | |
260 | -+ block = bmtd.pool_pba; | |
261 | -+ /* | |
262 | -+ * The bmt table is used for runtime bad block mapping | |
263 | -+ * G - Good block; B - Bad block | |
264 | -+ * --------------------------- | |
265 | -+ * physical |G|G|B|G|B|B|G|G|G|G|B|G|B| | |
266 | -+ * --------------------------- | |
267 | -+ * block: 0 1 2 3 4 5 6 7 8 9 a b c | |
268 | -+ * What bmt_tbl[i] looks like in initial state: | |
269 | -+ * i: | |
270 | -+ * 0 1 2 3 4 5 6 7 | |
271 | -+ * bmt_tbl[i].block: | |
272 | -+ * 0 1 3 6 7 8 9 b | |
273 | -+ * bmt_tbl[i].mapped: | |
274 | -+ * N N N N N N N B | |
275 | -+ * N - Not mapped(Available) | |
276 | -+ * M - Mapped | |
277 | -+ * B - BMT | |
278 | -+ * ATTENTION: | |
279 | -+ * BMT always in the last valid block in pool | |
280 | -+ */ | |
281 | -+ while ((block = find_valid_block(block)) != 0) { | |
282 | -+ bmt_tbl(bbt)[i].block = block; | |
283 | -+ bmt_tbl(bbt)[i].mapped = NO_MAPPED; | |
284 | -+ BBT_LOG("bmt_tbl[%d].block = 0x%x", i, block); | |
285 | -+ block++; | |
286 | -+ i++; | |
287 | -+ } | |
288 | -+ | |
289 | -+ /* i - How many available blocks in pool, which is the length of bmt_tbl[] | |
290 | -+ * bmtd.bmt_blk_idx - bmt_tbl[bmtd.bmt_blk_idx].block => the BMT block | |
291 | -+ */ | |
292 | -+ bmtd.bmt_blk_idx = i - 1; | |
293 | -+ bmt_tbl(bbt)[bmtd.bmt_blk_idx].mapped = BMT_MAPPED; | |
294 | -+ | |
295 | -+ if (i < 1) { | |
296 | -+ pr_info("nand: FATAL ERR: no space to store BMT!!\n"); | |
297 | -+ return -1; | |
298 | -+ } | |
299 | -+ | |
300 | -+ pr_info("[BBT] %d available blocks in BMT pool\n", i); | |
301 | -+ | |
302 | -+ return 0; | |
303 | -+} | |
304 | -+ | |
305 | -+static bool is_valid_bmt(unsigned char *buf, unsigned char *fdm) | |
306 | -+{ | |
307 | -+ struct bbbt *bbt = (struct bbbt *)buf; | |
308 | -+ u8 *sig = (u8*)bbt->signature + MAIN_SIGNATURE_OFFSET; | |
309 | -+ | |
310 | -+ | |
311 | -+ if (memcmp(bbt->signature + MAIN_SIGNATURE_OFFSET, "BMT", 3) == 0 && | |
312 | -+ memcmp(fdm + OOB_SIGNATURE_OFFSET, "bmt", 3) == 0) { | |
313 | -+ if (bbt->version == BBMT_VERSION) | |
314 | -+ return true; | |
315 | -+ } | |
316 | -+ BBT_LOG("[BBT] BMT Version not match,upgrage preloader and uboot please! sig=%02x%02x%02x, fdm=%02x%02x%02x", | |
317 | -+ sig[0], sig[1], sig[2], | |
318 | -+ fdm[1], fdm[2], fdm[3]); | |
319 | -+ return false; | |
320 | -+} | |
321 | -+ | |
322 | -+static u16 get_bmt_index(struct bbmt *bmt) | |
323 | -+{ | |
324 | -+ int i = 0; | |
325 | -+ | |
326 | -+ while (bmt[i].block != BMT_TBL_DEF_VAL) { | |
327 | -+ if (bmt[i].mapped == BMT_MAPPED) | |
328 | -+ return i; | |
329 | -+ i++; | |
330 | -+ } | |
331 | -+ return 0; | |
332 | -+} | |
333 | -+ | |
334 | -+static struct bbbt *scan_bmt(u16 block) | |
335 | -+{ | |
336 | -+ u8 fdm[4]; | |
337 | -+ | |
338 | -+ if (block < bmtd.pool_lba) | |
339 | -+ return NULL; | |
340 | -+ | |
341 | -+ if (read_bmt(block, nand_bbt_buf, fdm, sizeof(fdm))) | |
342 | -+ return scan_bmt(block - 1); | |
343 | -+ | |
344 | -+ if (is_valid_bmt(nand_bbt_buf, fdm)) { | |
345 | -+ bmtd.bmt_blk_idx = get_bmt_index(bmt_tbl((struct bbbt *)nand_bbt_buf)); | |
346 | -+ if (bmtd.bmt_blk_idx == 0) { | |
347 | -+ pr_info("[BBT] FATAL ERR: bmt block index is wrong!\n"); | |
348 | -+ return NULL; | |
349 | -+ } | |
350 | -+ pr_info("[BBT] BMT.v2 is found at 0x%x\n", block); | |
351 | -+ return (struct bbbt *)nand_bbt_buf; | |
352 | -+ } else | |
353 | -+ return scan_bmt(block - 1); | |
354 | -+} | |
355 | -+ | |
356 | -+/* Write the Burner Bad Block Table to Nand Flash | |
357 | -+ * n - write BMT to bmt_tbl[n] | |
358 | -+ */ | |
359 | -+static u16 upload_bmt(struct bbbt *bbt, int n) | |
360 | -+{ | |
361 | -+ u16 block; | |
362 | -+ | |
363 | -+retry: | |
364 | -+ if (n < 0 || bmt_tbl(bbt)[n].mapped == NORMAL_MAPPED) { | |
365 | -+ pr_info("nand: FATAL ERR: no space to store BMT!\n"); | |
366 | -+ return (u16)-1; | |
367 | -+ } | |
368 | -+ | |
369 | -+ block = bmt_tbl(bbt)[n].block; | |
370 | -+ BBT_LOG("n = 0x%x, block = 0x%x", n, block); | |
371 | -+ if (bbt_nand_erase(block)) { | |
372 | -+ bmt_tbl(bbt)[n].block = 0; | |
373 | -+ /* erase failed, try the previous block: bmt_tbl[n - 1].block */ | |
374 | -+ n--; | |
375 | -+ goto retry; | |
376 | -+ } | |
377 | -+ | |
378 | -+ /* The signature offset is fixed set to 0, | |
379 | -+ * oob signature offset is fixed set to 1 | |
380 | -+ */ | |
381 | -+ memcpy(bbt->signature + MAIN_SIGNATURE_OFFSET, "BMT", 3); | |
382 | -+ bbt->version = BBMT_VERSION; | |
383 | -+ | |
384 | -+ if (write_bmt(block, (unsigned char *)bbt)) { | |
385 | -+ bmt_tbl(bbt)[n].block = 0; | |
386 | -+ | |
387 | -+ /* write failed, try the previous block in bmt_tbl[n - 1] */ | |
388 | -+ n--; | |
389 | -+ goto retry; | |
390 | -+ } | |
391 | -+ | |
392 | -+ /* Return the current index(n) of BMT pool (bmt_tbl[n]) */ | |
393 | -+ return n; | |
394 | -+} | |
395 | -+ | |
396 | -+static u16 find_valid_block_in_pool(struct bbbt *bbt) | |
397 | -+{ | |
398 | -+ int i; | |
399 | -+ | |
400 | -+ if (bmtd.bmt_blk_idx == 0) | |
401 | -+ goto error; | |
402 | -+ | |
403 | -+ for (i = 0; i < bmtd.bmt_blk_idx; i++) { | |
404 | -+ if (bmt_tbl(bbt)[i].block != 0 && bmt_tbl(bbt)[i].mapped == NO_MAPPED) { | |
405 | -+ bmt_tbl(bbt)[i].mapped = NORMAL_MAPPED; | |
406 | -+ return bmt_tbl(bbt)[i].block; | |
407 | -+ } | |
408 | -+ } | |
409 | -+ | |
410 | -+error: | |
411 | -+ pr_info("nand: FATAL ERR: BMT pool is run out!\n"); | |
412 | -+ return 0; | |
413 | -+} | |
414 | -+ | |
415 | -+/* We met a bad block, mark it as bad and map it to a valid block in pool, | |
416 | -+ * if it's a write failure, we need to write the data to mapped block | |
417 | -+ */ | |
418 | -+static bool update_bmt(u16 block) | |
419 | -+{ | |
420 | -+ u16 mapped_blk; | |
421 | -+ struct bbbt *bbt; | |
422 | -+ | |
423 | -+ bbt = bmtd.bbt; | |
424 | -+ mapped_blk = find_valid_block_in_pool(bbt); | |
425 | -+ if (mapped_blk == 0) | |
426 | -+ return false; | |
427 | -+ | |
428 | -+ /* Map new bad block to available block in pool */ | |
429 | -+ bbt->bb_tbl[block] = mapped_blk; | |
430 | -+ bmtd.bmt_blk_idx = upload_bmt(bbt, bmtd.bmt_blk_idx); | |
431 | -+ | |
432 | -+ return true; | |
433 | -+} | |
434 | -+ | |
435 | -+u16 get_mapping_block_index(int block) | |
436 | -+{ | |
437 | -+ int mapping_block; | |
438 | -+ | |
439 | -+ if (block < bmtd.pool_lba) | |
440 | -+ mapping_block = bmtd.bbt->bb_tbl[block]; | |
441 | -+ else | |
442 | -+ mapping_block = block; | |
443 | -+ BBT_LOG("0x%x mapped to 0x%x", block, mapping_block); | |
444 | -+ | |
445 | -+ return mapping_block; | |
446 | -+} | |
447 | -+ | |
448 | -+static int | |
449 | -+mtk_bmt_read(struct mtd_info *mtd, loff_t from, | |
450 | -+ struct mtd_oob_ops *ops) | |
451 | -+{ | |
452 | -+ struct mtd_oob_ops cur_ops = *ops; | |
453 | -+ int retry_count = 0; | |
454 | -+ loff_t cur_from; | |
455 | -+ int ret; | |
456 | -+ | |
457 | -+ ops->retlen = 0; | |
458 | -+ ops->oobretlen = 0; | |
459 | -+ | |
460 | -+ while (ops->retlen < ops->len || ops->oobretlen < ops->ooblen) { | |
461 | -+ u32 offset = from & (bmtd.blk_size - 1); | |
462 | -+ u32 block = from >> bmtd.blk_shift; | |
463 | -+ u32 cur_block; | |
464 | -+ | |
465 | -+ cur_block = get_mapping_block_index(block); | |
466 | -+ cur_from = ((loff_t)cur_block << bmtd.blk_shift) + offset; | |
467 | -+ | |
468 | -+ cur_ops.oobretlen = 0; | |
469 | -+ cur_ops.retlen = 0; | |
470 | -+ cur_ops.len = min_t(u32, mtd->erasesize - offset, | |
471 | -+ ops->len - ops->retlen); | |
472 | -+ ret = bmtd._read_oob(mtd, cur_from, &cur_ops); | |
473 | -+ if (ret < 0) { | |
474 | -+ update_bmt(block); | |
475 | -+ if (retry_count++ < 10) | |
476 | -+ continue; | |
477 | -+ | |
478 | -+ return ret; | |
479 | -+ } | |
480 | -+ | |
481 | -+ ops->retlen += cur_ops.retlen; | |
482 | -+ ops->oobretlen += cur_ops.oobretlen; | |
483 | -+ | |
484 | -+ cur_ops.ooboffs = 0; | |
485 | -+ cur_ops.datbuf += cur_ops.retlen; | |
486 | -+ cur_ops.oobbuf += cur_ops.oobretlen; | |
487 | -+ cur_ops.ooblen -= cur_ops.oobretlen; | |
488 | -+ | |
489 | -+ if (!cur_ops.len) | |
490 | -+ cur_ops.len = mtd->erasesize - offset; | |
491 | -+ | |
492 | -+ from += cur_ops.len; | |
493 | -+ retry_count = 0; | |
494 | -+ } | |
495 | -+ | |
496 | -+ return 0; | |
497 | -+} | |
498 | -+ | |
499 | -+static int | |
500 | -+mtk_bmt_write(struct mtd_info *mtd, loff_t to, | |
501 | -+ struct mtd_oob_ops *ops) | |
502 | -+{ | |
503 | -+ struct mtd_oob_ops cur_ops = *ops; | |
504 | -+ int retry_count = 0; | |
505 | -+ loff_t cur_to; | |
506 | -+ int ret; | |
507 | -+ | |
508 | -+ ops->retlen = 0; | |
509 | -+ ops->oobretlen = 0; | |
510 | -+ | |
511 | -+ while (ops->retlen < ops->len || ops->oobretlen < ops->ooblen) { | |
512 | -+ u32 offset = to & (bmtd.blk_size - 1); | |
513 | -+ u32 block = to >> bmtd.blk_shift; | |
514 | -+ u32 cur_block; | |
515 | -+ | |
516 | -+ cur_block = get_mapping_block_index(block); | |
517 | -+ cur_to = ((loff_t)cur_block << bmtd.blk_shift) + offset; | |
518 | -+ | |
519 | -+ cur_ops.oobretlen = 0; | |
520 | -+ cur_ops.retlen = 0; | |
521 | -+ cur_ops.len = min_t(u32, bmtd.blk_size - offset, | |
522 | -+ ops->len - ops->retlen); | |
523 | -+ ret = bmtd._write_oob(mtd, cur_to, &cur_ops); | |
524 | -+ if (ret < 0) { | |
525 | -+ update_bmt(block); | |
526 | -+ if (retry_count++ < 10) | |
527 | -+ continue; | |
528 | -+ | |
529 | -+ return ret; | |
530 | -+ } | |
531 | -+ | |
532 | -+ ops->retlen += cur_ops.retlen; | |
533 | -+ ops->oobretlen += cur_ops.oobretlen; | |
534 | -+ | |
535 | -+ cur_ops.ooboffs = 0; | |
536 | -+ cur_ops.datbuf += cur_ops.retlen; | |
537 | -+ cur_ops.oobbuf += cur_ops.oobretlen; | |
538 | -+ cur_ops.ooblen -= cur_ops.oobretlen; | |
539 | -+ | |
540 | -+ if (!cur_ops.len) | |
541 | -+ cur_ops.len = mtd->erasesize - offset; | |
542 | -+ | |
543 | -+ to += cur_ops.len; | |
544 | -+ retry_count = 0; | |
545 | -+ } | |
546 | -+ | |
547 | -+ return 0; | |
548 | -+} | |
549 | -+ | |
550 | -+static int | |
551 | -+mtk_bmt_mtd_erase(struct mtd_info *mtd, struct erase_info *instr) | |
552 | -+{ | |
553 | -+ struct erase_info mapped_instr = { | |
554 | -+ .len = bmtd.blk_size, | |
555 | -+ }; | |
556 | -+ int retry_count = 0; | |
557 | -+ u64 start_addr, end_addr; | |
558 | -+ int ret; | |
559 | -+ u16 orig_block, block; | |
560 | -+ | |
561 | -+ start_addr = instr->addr & (~mtd->erasesize_mask); | |
562 | -+ end_addr = instr->addr + instr->len; | |
563 | -+ | |
564 | -+ while (start_addr < end_addr) { | |
565 | -+ orig_block = start_addr >> bmtd.blk_shift; | |
566 | -+ block = get_mapping_block_index(orig_block); | |
567 | -+ mapped_instr.addr = (loff_t)block << bmtd.blk_shift; | |
568 | -+ ret = bmtd._erase(mtd, &mapped_instr); | |
569 | -+ if (ret) { | |
570 | -+ update_bmt(orig_block); | |
571 | -+ if (retry_count++ < 10) | |
572 | -+ continue; | |
573 | -+ instr->fail_addr = start_addr; | |
574 | -+ break; | |
575 | -+ } | |
576 | -+ start_addr += mtd->erasesize; | |
577 | -+ retry_count = 0; | |
578 | -+ } | |
579 | -+ | |
580 | -+ return ret; | |
581 | -+} | |
582 | -+static int | |
583 | -+mtk_bmt_block_isbad(struct mtd_info *mtd, loff_t ofs) | |
584 | -+{ | |
585 | -+ int retry_count = 0; | |
586 | -+ u16 orig_block = ofs >> bmtd.blk_shift; | |
587 | -+ u16 block; | |
588 | -+ int ret; | |
589 | -+ | |
590 | -+retry: | |
591 | -+ block = get_mapping_block_index(orig_block); | |
592 | -+ ret = bmtd._block_isbad(mtd, (loff_t)block << bmtd.blk_shift); | |
593 | -+ if (ret) { | |
594 | -+ update_bmt(orig_block); | |
595 | -+ if (retry_count++ < 10) | |
596 | -+ goto retry; | |
597 | -+ } | |
598 | -+ return ret; | |
599 | -+} | |
600 | -+ | |
601 | -+static int | |
602 | -+mtk_bmt_block_markbad(struct mtd_info *mtd, loff_t ofs) | |
603 | -+{ | |
604 | -+ u16 orig_block = ofs >> bmtd.blk_shift; | |
605 | -+ u16 block = get_mapping_block_index(orig_block); | |
606 | -+ update_bmt(orig_block); | |
607 | -+ return bmtd._block_markbad(mtd, (loff_t)block << bmtd.blk_shift); | |
608 | -+} | |
609 | -+ | |
610 | -+static void | |
611 | -+mtk_bmt_replace_ops(struct mtd_info *mtd) | |
612 | -+{ | |
613 | -+ bmtd._read_oob = mtd->_read_oob; | |
614 | -+ bmtd._write_oob = mtd->_write_oob; | |
615 | -+ bmtd._erase = mtd->_erase; | |
616 | -+ bmtd._block_isbad = mtd->_block_isbad; | |
617 | -+ bmtd._block_markbad = mtd->_block_markbad; | |
618 | -+ | |
619 | -+ mtd->_read_oob = mtk_bmt_read; | |
620 | -+ mtd->_write_oob = mtk_bmt_write; | |
621 | -+ mtd->_erase = mtk_bmt_mtd_erase; | |
622 | -+ mtd->_block_isbad = mtk_bmt_block_isbad; | |
623 | -+ mtd->_block_markbad = mtk_bmt_block_markbad; | |
624 | -+} | |
625 | -+ | |
626 | -+static int mtk_bmt_debug_mark_good(void *data, u64 val) | |
627 | -+{ | |
628 | -+ u32 block = val >> bmtd.blk_shift; | |
629 | -+ | |
630 | -+ bmtd.bbt->bb_tbl[block] = block; | |
631 | -+ bmtd.bmt_blk_idx = upload_bmt(bmtd.bbt, bmtd.bmt_blk_idx); | |
632 | -+ | |
633 | -+ return 0; | |
634 | -+} | |
635 | -+ | |
636 | -+static int mtk_bmt_debug_mark_bad(void *data, u64 val) | |
637 | -+{ | |
638 | -+ u32 block = val >> bmtd.blk_shift; | |
639 | -+ | |
640 | -+ update_bmt(block); | |
641 | -+ | |
642 | -+ return 0; | |
643 | -+} | |
644 | -+ | |
645 | -+DEFINE_DEBUGFS_ATTRIBUTE(fops_mark_good, NULL, mtk_bmt_debug_mark_good, "%llu\n"); | |
646 | -+DEFINE_DEBUGFS_ATTRIBUTE(fops_mark_bad, NULL, mtk_bmt_debug_mark_bad, "%llu\n"); | |
647 | -+ | |
648 | -+static void | |
649 | -+mtk_bmt_add_debugfs(void) | |
650 | -+{ | |
651 | -+ struct dentry *dir; | |
652 | -+ | |
653 | -+ dir = bmtd.debugfs_dir = debugfs_create_dir("mtk-bmt", NULL); | |
654 | -+ if (!dir) | |
655 | -+ return; | |
656 | -+ | |
657 | -+ debugfs_create_file_unsafe("mark_good", S_IWUSR, dir, NULL, &fops_mark_good); | |
658 | -+ debugfs_create_file_unsafe("mark_bad", S_IWUSR, dir, NULL, &fops_mark_bad); | |
659 | -+} | |
660 | -+ | |
661 | -+void mtk_bmt_detach(struct mtd_info *mtd) | |
662 | -+{ | |
663 | -+ if (bmtd.mtd != mtd) | |
664 | -+ return; | |
665 | -+ | |
666 | -+ if (bmtd.debugfs_dir) | |
667 | -+ debugfs_remove_recursive(bmtd.debugfs_dir); | |
668 | -+ bmtd.debugfs_dir = NULL; | |
669 | -+ | |
670 | -+ kfree(nand_bbt_buf); | |
671 | -+ kfree(nand_data_buf); | |
672 | -+ | |
673 | -+ mtd->_read_oob = bmtd._read_oob; | |
674 | -+ mtd->_write_oob = bmtd._write_oob; | |
675 | -+ mtd->_erase = bmtd._erase; | |
676 | -+ mtd->_block_isbad = bmtd._block_isbad; | |
677 | -+ mtd->_block_markbad = bmtd._block_markbad; | |
678 | -+ mtd->size = bmtd.total_blks << bmtd.blk_shift; | |
679 | -+ | |
680 | -+ memset(&bmtd, 0, sizeof(bmtd)); | |
681 | -+} | |
682 | -+ | |
683 | -+/* total_blocks - The total count of blocks that the Nand Chip has */ | |
684 | -+int mtk_bmt_attach(struct mtd_info *mtd) | |
685 | -+{ | |
686 | -+ struct device_node *np; | |
687 | -+ struct bbbt *bbt; | |
688 | -+ u32 bufsz; | |
689 | -+ u32 block; | |
690 | -+ u16 total_blocks, pmt_block; | |
691 | -+ int ret = 0; | |
692 | -+ u32 bmt_pool_size, bmt_table_size; | |
693 | -+ | |
694 | -+ if (bmtd.mtd) | |
695 | -+ return -ENOSPC; | |
696 | -+ | |
697 | -+ np = mtd_get_of_node(mtd); | |
698 | -+ if (!np) | |
699 | -+ return 0; | |
700 | -+ | |
701 | -+ if (!of_property_read_bool(np, "mediatek,bmt-v2")) | |
702 | -+ return 0; | |
703 | -+ | |
704 | -+ if (of_property_read_u32(np, "mediatek,bmt-pool-size", | |
705 | -+ &bmt_pool_size) != 0) | |
706 | -+ bmt_pool_size = 80; | |
707 | -+ | |
708 | -+ if (of_property_read_u8(np, "mediatek,bmt-oob-offset", | |
709 | -+ &bmtd.oob_offset) != 0) | |
710 | -+ bmtd.oob_offset = 0; | |
711 | -+ | |
712 | -+ if (of_property_read_u32(np, "mediatek,bmt-table-size", | |
713 | -+ &bmt_table_size) != 0) | |
714 | -+ bmt_table_size = 0x2000U; | |
715 | -+ | |
716 | -+ bmtd.mtd = mtd; | |
717 | -+ mtk_bmt_replace_ops(mtd); | |
718 | -+ | |
719 | -+ bmtd.table_size = bmt_table_size; | |
720 | -+ bmtd.blk_size = mtd->erasesize; | |
721 | -+ bmtd.blk_shift = ffs(bmtd.blk_size) - 1; | |
722 | -+ bmtd.pg_size = mtd->writesize; | |
723 | -+ bmtd.pg_shift = ffs(bmtd.pg_size) - 1; | |
724 | -+ total_blocks = mtd->size >> bmtd.blk_shift; | |
725 | -+ pmt_block = total_blocks - bmt_pool_size - 2; | |
726 | -+ | |
727 | -+ mtd->size = pmt_block << bmtd.blk_shift; | |
728 | -+ | |
729 | -+ /* | |
730 | -+ * --------------------------------------- | |
731 | -+ * | PMT(2blks) | BMT POOL(totalblks * 2%) | | |
732 | -+ * --------------------------------------- | |
733 | -+ * ^ ^ | |
734 | -+ * | | | |
735 | -+ * pmt_block pmt_block + 2blocks(pool_lba) | |
736 | -+ * | |
737 | -+ * ATTETION!!!!!! | |
738 | -+ * The blocks ahead of the boundary block are stored in bb_tbl | |
739 | -+ * and blocks behind are stored in bmt_tbl | |
740 | -+ */ | |
741 | -+ | |
742 | -+ bmtd.pool_lba = (u16)(pmt_block + 2); | |
743 | -+ bmtd.total_blks = total_blocks; | |
744 | -+ bmtd.bb_max = bmtd.total_blks * BBPOOL_RATIO / 100; | |
745 | -+ | |
746 | -+ /* 3 buffers we need */ | |
747 | -+ bufsz = round_up(sizeof(struct bbbt) + | |
748 | -+ bmt_table_size * sizeof(struct bbmt), bmtd.pg_size); | |
749 | -+ bmtd.bmt_pgs = bufsz >> bmtd.pg_shift; | |
750 | -+ | |
751 | -+ nand_bbt_buf = kzalloc(bufsz, GFP_KERNEL); | |
752 | -+ nand_data_buf = kzalloc(bmtd.pg_size, GFP_KERNEL); | |
753 | -+ | |
754 | -+ if (!nand_bbt_buf || !nand_data_buf) { | |
755 | -+ pr_info("nand: FATAL ERR: allocate buffer failed!\n"); | |
756 | -+ ret = -1; | |
757 | -+ goto error; | |
758 | -+ } | |
759 | -+ | |
760 | -+ memset(nand_bbt_buf, 0xff, bufsz); | |
761 | -+ memset(nand_data_buf, 0xff, bmtd.pg_size); | |
762 | -+ | |
763 | -+ BBT_LOG("bbtbuf=0x%p(0x%x) dat=0x%p(0x%x)", | |
764 | -+ nand_bbt_buf, bufsz, nand_data_buf, bmtd.pg_size); | |
765 | -+ BBT_LOG("pool_lba=0x%x total_blks=0x%x bb_max=0x%x", | |
766 | -+ bmtd.pool_lba, bmtd.total_blks, bmtd.bb_max); | |
767 | -+ | |
768 | -+ /* Scanning start from the first page of the last block | |
769 | -+ * of whole flash | |
770 | -+ */ | |
771 | -+ bbt = scan_bmt(bmtd.total_blks - 1); | |
772 | -+ if (!bbt) { | |
773 | -+ /* BMT not found */ | |
774 | -+ if (bmtd.total_blks > BB_TABLE_MAX + BMT_TABLE_MAX) { | |
775 | -+ pr_info("nand: FATAL: Too many blocks, can not support!\n"); | |
776 | -+ ret = -1; | |
777 | -+ goto error; | |
778 | -+ } | |
779 | -+ | |
780 | -+ bbt = (struct bbbt *)nand_bbt_buf; | |
781 | -+ memset(bmt_tbl(bbt), BMT_TBL_DEF_VAL, bmtd.table_size * sizeof(struct bbmt)); | |
782 | -+ | |
783 | -+ if (scan_bad_blocks(bbt)) { | |
784 | -+ ret = -1; | |
785 | -+ goto error; | |
786 | -+ } | |
787 | -+ | |
788 | -+ /* BMT always in the last valid block in pool */ | |
789 | -+ bmtd.bmt_blk_idx = upload_bmt(bbt, bmtd.bmt_blk_idx); | |
790 | -+ block = bmt_tbl(bbt)[bmtd.bmt_blk_idx].block; | |
791 | -+ pr_notice("[BBT] BMT.v2 is written into PBA:0x%x\n", block); | |
792 | -+ | |
793 | -+ if (bmtd.bmt_blk_idx == 0) | |
794 | -+ pr_info("nand: Warning: no available block in BMT pool!\n"); | |
795 | -+ else if (bmtd.bmt_blk_idx == (u16)-1) { | |
796 | -+ ret = -1; | |
797 | -+ goto error; | |
798 | -+ } | |
799 | -+ } | |
800 | -+ mtk_bmt_add_debugfs(); | |
801 | -+ | |
802 | -+ bmtd.bbt = bbt; | |
803 | -+ return 0; | |
804 | -+ | |
805 | -+error: | |
806 | -+ mtk_bmt_detach(mtd); | |
807 | -+ return ret; | |
808 | -+} | |
809 | -+ | |
810 | -+ | |
811 | -+MODULE_LICENSE("GPL"); | |
812 | -+MODULE_AUTHOR("Xiangsheng Hou <xiangsheng.hou@mediatek.com>, Felix Fietkau <nbd@nbd.name>"); | |
813 | -+MODULE_DESCRIPTION("Bad Block mapping management v2 for MediaTek NAND Flash Driver"); | |
814 | -+ | |
815 | ---- /dev/null | |
816 | -+++ b/include/linux/mtd/mtk_bmt.h | |
817 | -@@ -0,0 +1,18 @@ | |
818 | -+#ifndef __MTK_BMT_H | |
819 | -+#define __MTK_BMT_H | |
820 | -+ | |
821 | -+#ifdef CONFIG_MTD_NAND_MTK_BMT | |
822 | -+int mtk_bmt_attach(struct mtd_info *mtd); | |
823 | -+void mtk_bmt_detach(struct mtd_info *mtd); | |
824 | -+#else | |
825 | -+static inline int mtk_bmt_attach(struct mtd_info *mtd) | |
826 | -+{ | |
827 | -+ return 0; | |
828 | -+} | |
829 | -+ | |
830 | -+static inline void mtk_bmt_detach(struct mtd_info *mtd) | |
831 | -+{ | |
832 | -+} | |
833 | -+#endif | |
834 | -+ | |
835 | -+#endif | |
836 | ---- a/drivers/mtd/mtk-snand/mtk-snand-mtd.c | |
837 | -+++ b/drivers/mtd/mtk-snand/mtk-snand-mtd.c | |
838 | -@@ -16,6 +16,7 @@ | |
839 | - #include <linux/dma-mapping.h> | |
840 | - #include <linux/wait.h> | |
841 | - #include <linux/mtd/mtd.h> | |
842 | -+#include <linux/mtd/mtk_bmt.h> | |
843 | - #include <linux/mtd/partitions.h> | |
844 | - #include <linux/of_platform.h> | |
845 | - | |
846 | -@@ -612,6 +613,8 @@ static int mtk_snand_probe(struct platfo | |
847 | - mtd->_block_isbad = mtk_snand_mtd_block_isbad; | |
848 | - mtd->_block_markbad = mtk_snand_mtd_block_markbad; | |
849 | - | |
850 | -+ mtk_bmt_attach(mtd); | |
851 | -+ | |
852 | - ret = mtd_device_register(mtd, NULL, 0); | |
853 | - if (ret) { | |
854 | - dev_err(msm->pdev.dev, "failed to register mtd partition\n"); | |
855 | -@@ -623,6 +626,7 @@ static int mtk_snand_probe(struct platfo | |
856 | - return 0; | |
857 | - | |
858 | - errout4: | |
859 | -+ mtk_bmt_detach(mtd); | |
860 | - devm_kfree(msm->pdev.dev, msm->page_cache); | |
861 | - | |
862 | - errout3: | |
863 | -@@ -650,6 +654,8 @@ static int mtk_snand_remove(struct platf | |
864 | - if (ret) | |
865 | - return ret; | |
866 | - | |
867 | -+ mtk_bmt_detach(mtd); | |
868 | -+ | |
869 | - mtk_snand_cleanup(msm->snf); | |
870 | - | |
871 | - if (msm->irq >= 0) |
@@ -0,0 +1,36 @@ | ||
1 | +--- a/drivers/mtd/mtk-snand/mtk-snand-mtd.c | |
2 | ++++ b/drivers/mtd/mtk-snand/mtk-snand-mtd.c | |
3 | +@@ -16,6 +16,7 @@ | |
4 | + #include <linux/dma-mapping.h> | |
5 | + #include <linux/wait.h> | |
6 | + #include <linux/mtd/mtd.h> | |
7 | ++#include <linux/mtd/mtk_bmt.h> | |
8 | + #include <linux/mtd/partitions.h> | |
9 | + #include <linux/of_platform.h> | |
10 | + | |
11 | +@@ -612,6 +613,8 @@ static int mtk_snand_probe(struct platfo | |
12 | + mtd->_block_isbad = mtk_snand_mtd_block_isbad; | |
13 | + mtd->_block_markbad = mtk_snand_mtd_block_markbad; | |
14 | + | |
15 | ++ mtk_bmt_attach(mtd); | |
16 | ++ | |
17 | + ret = mtd_device_register(mtd, NULL, 0); | |
18 | + if (ret) { | |
19 | + dev_err(msm->pdev.dev, "failed to register mtd partition\n"); | |
20 | +@@ -623,6 +626,7 @@ static int mtk_snand_probe(struct platfo | |
21 | + return 0; | |
22 | + | |
23 | + errout4: | |
24 | ++ mtk_bmt_detach(mtd); | |
25 | + devm_kfree(msm->pdev.dev, msm->page_cache); | |
26 | + | |
27 | + errout3: | |
28 | +@@ -650,6 +654,8 @@ static int mtk_snand_remove(struct platf | |
29 | + if (ret) | |
30 | + return ret; | |
31 | + | |
32 | ++ mtk_bmt_detach(mtd); | |
33 | ++ | |
34 | + mtk_snand_cleanup(msm->snf); | |
35 | + | |
36 | + if (msm->irq >= 0) |
@@ -76,6 +76,11 @@ | ||
76 | 76 | &nand { |
77 | 77 | status = "okay"; |
78 | 78 | |
79 | + mediatek,bbt; | |
80 | + mediatek,bmt-remap-range = | |
81 | + <0x180000 0x580000>, | |
82 | + <0x2980000 0x2d80000>; | |
83 | + | |
79 | 84 | partitions { |
80 | 85 | compatible = "fixed-partitions"; |
81 | 86 | #address-cells = <1>; |
@@ -37,7 +37,6 @@ | ||
37 | 37 | partition@0 { |
38 | 38 | label = "RouterBoot"; |
39 | 39 | reg = <0x0 0x40000>; |
40 | - read-only; | |
41 | 40 | compatible = "mikrotik,routerboot-partitions"; |
42 | 41 | #address-cells = <1>; |
43 | 42 | #size-cells = <1>; |
@@ -154,10 +154,12 @@ CONFIG_MTD_NAND_CORE=y | ||
154 | 154 | CONFIG_MTD_NAND_ECC=y |
155 | 155 | CONFIG_MTD_NAND_ECC_SW_HAMMING=y |
156 | 156 | CONFIG_MTD_NAND_MT7621=y |
157 | +CONFIG_MTD_NAND_MTK_BMT=y | |
157 | 158 | CONFIG_MTD_PHYSMAP=y |
158 | 159 | CONFIG_MTD_RAW_NAND=y |
159 | 160 | CONFIG_MTD_ROUTERBOOT_PARTS=y |
160 | 161 | CONFIG_MTD_SPI_NOR=y |
162 | +CONFIG_MTD_SPI_NOR_USE_VARIABLE_ERASE=y | |
161 | 163 | CONFIG_MTD_SPLIT_FIT_FW=y |
162 | 164 | CONFIG_MTD_SPLIT_MINOR_FW=y |
163 | 165 | CONFIG_MTD_SPLIT_SEAMA_FW=y |
@@ -1022,7 +1022,7 @@ Signed-off-by: Weijie Gao <weijie.gao@mediatek.com> | ||
1022 | 1022 | + vall |= (u32)oobptr[j] << (j * 8); |
1023 | 1023 | + |
1024 | 1024 | + for (j = 0; j < 4; j++) |
1025 | -+ valm |= (u32)oobptr[j + 4] << ((j - 4) * 8); | |
1025 | ++ valm |= (u32)oobptr[j + 4] << (j * 8); | |
1026 | 1026 | + |
1027 | 1027 | + nfi_write32(nfc, NFI_FDML(i), vall); |
1028 | 1028 | + nfi_write32(nfc, NFI_FDMM(i), valm); |
@@ -11,9 +11,9 @@ PKG_RELEASE:=1 | ||
11 | 11 | |
12 | 12 | PKG_SOURCE_PROTO:=git |
13 | 13 | PKG_SOURCE_URL=$(PROJECT_GIT)/project/firmware-utils.git |
14 | -PKG_SOURCE_DATE:=2021-12-02 | |
15 | -PKG_SOURCE_VERSION:=56e8e19151743c923f48604c457850cf8eb52076 | |
16 | -PKG_MIRROR_HASH:=91f1bac08af2d856e93e651dc7c1ce6745f0b32b6fe4fb1917660494d7518057 | |
14 | +PKG_SOURCE_DATE:=2022-01-14 | |
15 | +PKG_SOURCE_VERSION:=84dbf8ee49f522d3a4528763c9473cf3dd7c8c52 | |
16 | +PKG_MIRROR_HASH:=aabf70dbc155d10175ee3b25f375953bef31bf7e25a2391fda8687aa93ea79e6 | |
17 | 17 | |
18 | 18 | include $(INCLUDE_DIR)/host-build.mk |
19 | 19 | include $(INCLUDE_DIR)/cmake.mk |