2019-03-14 08:10:02 +00:00
|
|
|
/*
|
|
|
|
|
* (C) Copyright 2019 Rockchip Electronics Co., Ltd
|
|
|
|
|
*
|
|
|
|
|
* SPDX-License-Identifier: GPL-2.0+
|
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
#include <common.h>
|
2019-06-24 06:35:39 +00:00
|
|
|
#include <blk.h>
|
|
|
|
|
#include <boot_rkimg.h>
|
2019-03-14 08:10:02 +00:00
|
|
|
#include <dm.h>
|
|
|
|
|
#include <errno.h>
|
2019-06-24 06:35:39 +00:00
|
|
|
#include <malloc.h>
|
2019-03-14 08:10:02 +00:00
|
|
|
#include <nand.h>
|
2019-06-24 06:35:39 +00:00
|
|
|
#include <part.h>
|
2019-03-14 08:10:02 +00:00
|
|
|
#include <dm/device-internal.h>
|
|
|
|
|
|
2019-06-24 06:35:39 +00:00
|
|
|
#define MTD_PART_NAND_HEAD "mtdparts="
|
|
|
|
|
#define MTD_PART_INFO_MAX_SIZE 512
|
|
|
|
|
#define MTD_SINGLE_PART_INFO_MAX_SIZE 40
|
|
|
|
|
|
2020-04-26 01:56:58 +00:00
|
|
|
#define MTD_BLK_TABLE_BLOCK_UNKNOWN (-2)
|
|
|
|
|
#define MTD_BLK_TABLE_BLOCK_SHIFT (-1)
|
|
|
|
|
|
2020-04-03 05:02:52 +00:00
|
|
|
static int *mtd_map_blk_table;
|
|
|
|
|
|
|
|
|
|
int mtd_blk_map_table_init(struct blk_desc *desc,
|
|
|
|
|
loff_t offset,
|
|
|
|
|
size_t length)
|
|
|
|
|
{
|
|
|
|
|
u32 blk_total, blk_begin, blk_cnt;
|
|
|
|
|
struct mtd_info *mtd = NULL;
|
|
|
|
|
int i, j;
|
|
|
|
|
|
|
|
|
|
if (!desc)
|
|
|
|
|
return -ENODEV;
|
|
|
|
|
|
|
|
|
|
if (desc->devnum == BLK_MTD_NAND) {
|
|
|
|
|
#if defined(CONFIG_NAND) && !defined(CONFIG_SPL_BUILD)
|
|
|
|
|
mtd = dev_get_priv(desc->bdev->parent);
|
|
|
|
|
#endif
|
|
|
|
|
} else if (desc->devnum == BLK_MTD_SPI_NAND) {
|
|
|
|
|
#if defined(CONFIG_MTD_SPI_NAND) && !defined(CONFIG_SPL_BUILD)
|
|
|
|
|
mtd = desc->bdev->priv;
|
|
|
|
|
#endif
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (!mtd) {
|
|
|
|
|
return -ENODEV;
|
|
|
|
|
} else {
|
2020-05-12 08:52:21 +00:00
|
|
|
blk_total = (mtd->size + mtd->erasesize - 1) >> mtd->erasesize_shift;
|
2020-04-03 05:02:52 +00:00
|
|
|
if (!mtd_map_blk_table) {
|
|
|
|
|
mtd_map_blk_table = (int *)malloc(blk_total * 4);
|
2020-04-26 01:56:58 +00:00
|
|
|
memset(mtd_map_blk_table, MTD_BLK_TABLE_BLOCK_UNKNOWN,
|
|
|
|
|
blk_total * 4);
|
2020-04-03 05:02:52 +00:00
|
|
|
}
|
|
|
|
|
|
2020-05-12 08:52:21 +00:00
|
|
|
blk_begin = (u32)offset >> mtd->erasesize_shift;
|
|
|
|
|
blk_cnt = ((u32)((offset & mtd->erasesize_mask) + length) >> mtd->erasesize_shift);
|
2020-04-24 01:42:48 +00:00
|
|
|
if ((blk_begin + blk_cnt) > blk_total)
|
|
|
|
|
blk_cnt = blk_total - blk_begin;
|
2020-04-26 01:56:58 +00:00
|
|
|
|
|
|
|
|
if (mtd_map_blk_table[blk_begin] != MTD_BLK_TABLE_BLOCK_UNKNOWN)
|
|
|
|
|
return 0;
|
|
|
|
|
|
2020-04-03 05:02:52 +00:00
|
|
|
j = 0;
|
|
|
|
|
/* should not across blk_cnt */
|
|
|
|
|
for (i = 0; i < blk_cnt; i++) {
|
|
|
|
|
if (j >= blk_cnt)
|
2020-04-26 01:56:58 +00:00
|
|
|
mtd_map_blk_table[blk_begin + i] = MTD_BLK_TABLE_BLOCK_SHIFT;
|
2020-04-03 05:02:52 +00:00
|
|
|
for (; j < blk_cnt; j++) {
|
2020-05-12 08:52:21 +00:00
|
|
|
if (!mtd_block_isbad(mtd, (blk_begin + j) << mtd->erasesize_shift)) {
|
2020-04-03 05:02:52 +00:00
|
|
|
mtd_map_blk_table[blk_begin + i] = blk_begin + j;
|
|
|
|
|
j++;
|
|
|
|
|
if (j == blk_cnt)
|
|
|
|
|
j++;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2020-05-24 15:34:36 +00:00
|
|
|
static bool get_mtd_blk_map_address(struct mtd_info *mtd, loff_t *off)
|
|
|
|
|
{
|
|
|
|
|
bool mapped;
|
|
|
|
|
loff_t offset = *off;
|
|
|
|
|
size_t block_offset = offset & (mtd->erasesize - 1);
|
|
|
|
|
|
|
|
|
|
mapped = false;
|
|
|
|
|
if (!mtd_map_blk_table ||
|
|
|
|
|
mtd_map_blk_table[(u64)offset >> mtd->erasesize_shift] ==
|
|
|
|
|
MTD_BLK_TABLE_BLOCK_UNKNOWN ||
|
|
|
|
|
mtd_map_blk_table[(u64)offset >> mtd->erasesize_shift] ==
|
|
|
|
|
0xffffffff)
|
|
|
|
|
return mapped;
|
|
|
|
|
|
|
|
|
|
mapped = true;
|
|
|
|
|
*off = (loff_t)(((u32)mtd_map_blk_table[(u64)offset >>
|
|
|
|
|
mtd->erasesize_shift] << mtd->erasesize_shift) + block_offset);
|
|
|
|
|
|
|
|
|
|
return mapped;
|
|
|
|
|
}
|
|
|
|
|
|
2020-05-22 03:58:49 +00:00
|
|
|
void mtd_blk_map_partitions(struct blk_desc *desc)
|
|
|
|
|
{
|
|
|
|
|
disk_partition_t info;
|
|
|
|
|
int i, ret;
|
|
|
|
|
|
|
|
|
|
if (!desc)
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
if (desc->if_type != IF_TYPE_MTD)
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
for (i = 1; i < MAX_SEARCH_PARTITIONS; i++) {
|
|
|
|
|
ret = part_get_info(desc, i, &info);
|
|
|
|
|
if (ret != 0)
|
|
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
if (mtd_blk_map_table_init(desc,
|
|
|
|
|
info.start << 9,
|
|
|
|
|
info.size << 9)) {
|
|
|
|
|
printf("mtd block map table fail\n");
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2020-04-03 05:02:52 +00:00
|
|
|
static __maybe_unused int mtd_map_read(struct mtd_info *mtd, loff_t offset,
|
|
|
|
|
size_t *length, size_t *actual,
|
|
|
|
|
loff_t lim, u_char *buffer)
|
|
|
|
|
{
|
|
|
|
|
size_t left_to_read = *length;
|
|
|
|
|
u_char *p_buffer = buffer;
|
|
|
|
|
int rval;
|
|
|
|
|
|
|
|
|
|
while (left_to_read > 0) {
|
2020-06-02 01:17:05 +00:00
|
|
|
size_t block_offset = offset & (mtd->erasesize - 1);
|
2020-04-03 05:02:52 +00:00
|
|
|
size_t read_length;
|
2020-06-02 01:17:05 +00:00
|
|
|
loff_t mapped_offset;
|
2020-04-03 05:02:52 +00:00
|
|
|
|
|
|
|
|
if (offset >= mtd->size)
|
|
|
|
|
return 0;
|
|
|
|
|
|
2020-06-02 01:17:05 +00:00
|
|
|
mapped_offset = offset;
|
|
|
|
|
if (!get_mtd_blk_map_address(mtd, &mapped_offset)) {
|
|
|
|
|
if (mtd_block_isbad(mtd, mapped_offset &
|
|
|
|
|
~(mtd->erasesize - 1))) {
|
|
|
|
|
printf("Skipping bad block 0x%08llx\n",
|
|
|
|
|
offset & ~(mtd->erasesize - 1));
|
|
|
|
|
offset += mtd->erasesize - block_offset;
|
2020-04-03 05:02:52 +00:00
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2020-06-02 01:17:05 +00:00
|
|
|
if (left_to_read < (mtd->erasesize - block_offset))
|
2020-04-03 05:02:52 +00:00
|
|
|
read_length = left_to_read;
|
|
|
|
|
else
|
2020-06-02 01:17:05 +00:00
|
|
|
read_length = mtd->erasesize - block_offset;
|
2020-04-03 05:02:52 +00:00
|
|
|
|
2020-06-02 01:17:05 +00:00
|
|
|
rval = mtd_read(mtd, mapped_offset, read_length, &read_length,
|
2020-04-03 05:02:52 +00:00
|
|
|
p_buffer);
|
|
|
|
|
if (rval && rval != -EUCLEAN) {
|
|
|
|
|
printf("NAND read from offset %llx failed %d\n",
|
2020-05-24 15:34:36 +00:00
|
|
|
offset, rval);
|
2020-04-03 05:02:52 +00:00
|
|
|
*length -= left_to_read;
|
|
|
|
|
return rval;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
left_to_read -= read_length;
|
|
|
|
|
offset += read_length;
|
|
|
|
|
p_buffer += read_length;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
2019-06-24 06:35:39 +00:00
|
|
|
char *mtd_part_parse(void)
|
|
|
|
|
{
|
|
|
|
|
char mtd_part_info_temp[MTD_SINGLE_PART_INFO_MAX_SIZE] = {0};
|
|
|
|
|
u32 length, data_len = MTD_PART_INFO_MAX_SIZE;
|
|
|
|
|
struct blk_desc *dev_desc;
|
|
|
|
|
disk_partition_t info;
|
|
|
|
|
char *mtd_part_info_p;
|
2019-06-26 12:40:05 +00:00
|
|
|
struct mtd_info *mtd;
|
2019-06-24 06:35:39 +00:00
|
|
|
char *mtd_part_info;
|
|
|
|
|
int ret;
|
|
|
|
|
int p;
|
|
|
|
|
|
|
|
|
|
dev_desc = rockchip_get_bootdev();
|
|
|
|
|
if (!dev_desc)
|
|
|
|
|
return NULL;
|
|
|
|
|
|
2019-06-26 12:40:05 +00:00
|
|
|
mtd = (struct mtd_info *)dev_desc->bdev->priv;
|
2019-07-09 06:47:07 +00:00
|
|
|
if (!mtd)
|
|
|
|
|
return NULL;
|
|
|
|
|
|
2019-06-24 06:35:39 +00:00
|
|
|
mtd_part_info = (char *)calloc(MTD_PART_INFO_MAX_SIZE, sizeof(char));
|
|
|
|
|
if (!mtd_part_info) {
|
|
|
|
|
printf("%s: Fail to malloc!", __func__);
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
mtd_part_info_p = mtd_part_info;
|
|
|
|
|
snprintf(mtd_part_info_p, data_len - 1, "%s%s:",
|
|
|
|
|
MTD_PART_NAND_HEAD,
|
|
|
|
|
dev_desc->product);
|
|
|
|
|
data_len -= strlen(mtd_part_info_p);
|
|
|
|
|
mtd_part_info_p = mtd_part_info_p + strlen(mtd_part_info_p);
|
|
|
|
|
|
|
|
|
|
for (p = 1; p < MAX_SEARCH_PARTITIONS; p++) {
|
|
|
|
|
ret = part_get_info(dev_desc, p, &info);
|
|
|
|
|
if (ret)
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
debug("name is %s, start addr is %x\n", info.name,
|
|
|
|
|
(int)(size_t)info.start);
|
|
|
|
|
|
|
|
|
|
snprintf(mtd_part_info_p, data_len - 1, "0x%x@0x%x(%s)",
|
|
|
|
|
(int)(size_t)info.size << 9,
|
|
|
|
|
(int)(size_t)info.start << 9,
|
|
|
|
|
info.name);
|
|
|
|
|
snprintf(mtd_part_info_temp, MTD_SINGLE_PART_INFO_MAX_SIZE - 1,
|
|
|
|
|
"0x%x@0x%x(%s)",
|
|
|
|
|
(int)(size_t)info.size << 9,
|
|
|
|
|
(int)(size_t)info.start << 9,
|
|
|
|
|
info.name);
|
|
|
|
|
strcat(mtd_part_info, ",");
|
|
|
|
|
if (part_get_info(dev_desc, p + 1, &info)) {
|
2019-06-26 12:40:05 +00:00
|
|
|
/* Nand flash is erased by block and gpt table just
|
|
|
|
|
* resserve 33 sectors for the last partition. This
|
|
|
|
|
* will erase the backup gpt table by user program,
|
|
|
|
|
* so reserve one block.
|
|
|
|
|
*/
|
|
|
|
|
snprintf(mtd_part_info_p, data_len - 1, "0x%x@0x%x(%s)",
|
|
|
|
|
(int)(size_t)(info.size -
|
|
|
|
|
(info.size - 1) %
|
|
|
|
|
(mtd->erasesize >> 9) - 1) << 9,
|
2019-06-24 06:35:39 +00:00
|
|
|
(int)(size_t)info.start << 9,
|
|
|
|
|
info.name);
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
length = strlen(mtd_part_info_temp);
|
|
|
|
|
data_len -= length;
|
|
|
|
|
mtd_part_info_p = mtd_part_info_p + length + 1;
|
|
|
|
|
memset(mtd_part_info_temp, 0, MTD_SINGLE_PART_INFO_MAX_SIZE);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return mtd_part_info;
|
|
|
|
|
}
|
|
|
|
|
|
2019-03-14 08:10:02 +00:00
|
|
|
ulong mtd_dread(struct udevice *udev, lbaint_t start,
|
|
|
|
|
lbaint_t blkcnt, void *dst)
|
|
|
|
|
{
|
|
|
|
|
struct blk_desc *desc = dev_get_uclass_platdata(udev);
|
2019-09-27 03:09:42 +00:00
|
|
|
#if defined(CONFIG_NAND) || defined(CONFIG_MTD_SPI_NAND) || defined(CONFIG_SPI_FLASH_MTD)
|
2019-08-13 02:12:04 +00:00
|
|
|
loff_t off = (loff_t)(start * 512);
|
|
|
|
|
size_t rwsize = blkcnt * 512;
|
2019-09-27 03:09:42 +00:00
|
|
|
#endif
|
2019-08-13 02:12:04 +00:00
|
|
|
struct mtd_info *mtd;
|
2019-08-26 09:58:02 +00:00
|
|
|
int ret = 0;
|
2019-03-14 08:10:02 +00:00
|
|
|
|
|
|
|
|
if (!desc)
|
2019-09-26 09:00:53 +00:00
|
|
|
return ret;
|
2019-03-14 08:10:02 +00:00
|
|
|
|
2019-08-13 02:12:04 +00:00
|
|
|
mtd = desc->bdev->priv;
|
|
|
|
|
if (!mtd)
|
|
|
|
|
return 0;
|
|
|
|
|
|
2019-03-14 08:10:02 +00:00
|
|
|
if (blkcnt == 0)
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
|
|
if (desc->devnum == BLK_MTD_NAND) {
|
2019-09-26 09:00:53 +00:00
|
|
|
#if defined(CONFIG_NAND) && !defined(CONFIG_SPL_BUILD)
|
2019-08-15 01:13:29 +00:00
|
|
|
mtd = dev_get_priv(udev->parent);
|
|
|
|
|
if (!mtd)
|
|
|
|
|
return 0;
|
|
|
|
|
|
2019-08-13 02:12:04 +00:00
|
|
|
ret = nand_read_skip_bad(mtd, off, &rwsize,
|
|
|
|
|
NULL, mtd->size,
|
2019-03-14 08:10:02 +00:00
|
|
|
(u_char *)(dst));
|
2020-04-03 05:02:52 +00:00
|
|
|
#else
|
|
|
|
|
ret = mtd_map_read(mtd, off, &rwsize,
|
|
|
|
|
NULL, mtd->size,
|
|
|
|
|
(u_char *)(dst));
|
|
|
|
|
#endif
|
2019-08-13 02:12:04 +00:00
|
|
|
if (!ret)
|
2019-03-14 08:10:02 +00:00
|
|
|
return blkcnt;
|
2019-08-13 02:12:04 +00:00
|
|
|
else
|
|
|
|
|
return 0;
|
2019-03-14 08:10:02 +00:00
|
|
|
} else if (desc->devnum == BLK_MTD_SPI_NAND) {
|
2020-04-03 05:02:52 +00:00
|
|
|
ret = mtd_map_read(mtd, off, &rwsize,
|
|
|
|
|
NULL, mtd->size,
|
|
|
|
|
(u_char *)(dst));
|
2019-08-26 09:58:02 +00:00
|
|
|
if (!ret)
|
2019-08-13 02:12:04 +00:00
|
|
|
return blkcnt;
|
|
|
|
|
else
|
|
|
|
|
return 0;
|
2019-03-14 08:10:02 +00:00
|
|
|
} else if (desc->devnum == BLK_MTD_SPI_NOR) {
|
2019-09-27 03:09:42 +00:00
|
|
|
#if defined(CONFIG_SPI_FLASH_MTD) || defined(CONFIG_SPL_BUILD)
|
|
|
|
|
size_t retlen_nor;
|
|
|
|
|
|
|
|
|
|
mtd_read(mtd, off, rwsize, &retlen_nor, dst);
|
|
|
|
|
if (retlen_nor == rwsize)
|
|
|
|
|
return blkcnt;
|
|
|
|
|
else
|
|
|
|
|
#endif
|
|
|
|
|
return 0;
|
2019-03-14 08:10:02 +00:00
|
|
|
} else {
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
ulong mtd_dwrite(struct udevice *udev, lbaint_t start,
|
|
|
|
|
lbaint_t blkcnt, const void *src)
|
|
|
|
|
{
|
|
|
|
|
/* Not implemented */
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
ulong mtd_derase(struct udevice *udev, lbaint_t start,
|
|
|
|
|
lbaint_t blkcnt)
|
|
|
|
|
{
|
|
|
|
|
/* Not implemented */
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int mtd_blk_probe(struct udevice *udev)
|
|
|
|
|
{
|
2019-08-13 02:12:04 +00:00
|
|
|
struct mtd_info *mtd = dev_get_uclass_priv(udev->parent);
|
2019-03-14 08:10:02 +00:00
|
|
|
struct blk_desc *desc = dev_get_uclass_platdata(udev);
|
2019-10-15 01:21:11 +00:00
|
|
|
int ret, i;
|
2019-03-14 08:10:02 +00:00
|
|
|
|
2019-06-26 12:40:05 +00:00
|
|
|
desc->bdev->priv = mtd;
|
2019-03-14 08:10:02 +00:00
|
|
|
sprintf(desc->vendor, "0x%.4x", 0x2207);
|
2019-06-21 02:21:57 +00:00
|
|
|
memcpy(desc->product, mtd->name, strlen(mtd->name));
|
2019-03-14 08:10:02 +00:00
|
|
|
memcpy(desc->revision, "V1.00", sizeof("V1.00"));
|
2019-06-18 09:24:58 +00:00
|
|
|
if (mtd->type == MTD_NANDFLASH) {
|
2019-10-30 08:37:54 +00:00
|
|
|
if (desc->devnum == BLK_MTD_NAND)
|
|
|
|
|
mtd = dev_get_priv(udev->parent);
|
2019-10-15 01:21:11 +00:00
|
|
|
/*
|
|
|
|
|
* Find the first useful block in the end,
|
|
|
|
|
* and it is the end lba of the nand storage.
|
|
|
|
|
*/
|
|
|
|
|
for (i = 0; i < (mtd->size / mtd->erasesize); i++) {
|
|
|
|
|
ret = mtd_block_isbad(mtd,
|
|
|
|
|
mtd->size - mtd->erasesize * (i + 1));
|
|
|
|
|
if (!ret) {
|
|
|
|
|
desc->lba = (mtd->size >> 9) -
|
|
|
|
|
(mtd->erasesize >> 9) * i;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
2019-06-18 09:24:58 +00:00
|
|
|
} else {
|
|
|
|
|
desc->lba = mtd->size >> 9;
|
|
|
|
|
}
|
2019-03-14 08:10:02 +00:00
|
|
|
|
2019-10-15 01:21:11 +00:00
|
|
|
debug("MTD: desc->lba is %lx\n", desc->lba);
|
|
|
|
|
|
2019-03-14 08:10:02 +00:00
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static const struct blk_ops mtd_blk_ops = {
|
|
|
|
|
.read = mtd_dread,
|
|
|
|
|
#ifndef CONFIG_SPL_BUILD
|
|
|
|
|
.write = mtd_dwrite,
|
|
|
|
|
.erase = mtd_derase,
|
|
|
|
|
#endif
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
U_BOOT_DRIVER(mtd_blk) = {
|
|
|
|
|
.name = "mtd_blk",
|
|
|
|
|
.id = UCLASS_BLK,
|
|
|
|
|
.ops = &mtd_blk_ops,
|
|
|
|
|
.probe = mtd_blk_probe,
|
|
|
|
|
};
|