#include <linux/capability.h>
#include <linux/compat.h>
#include <linux/pm_runtime.h>
+#include <linux/idr.h>
#include <linux/mmc/ioctl.h>
#include <linux/mmc/card.h>
/*
* We've only got one major, so number of mmcblk devices is
* limited to (1 << 20) / number of minors per device. It is also
- * currently limited by the size of the static bitmaps below.
+ * limited by the MAX_DEVICES below.
*/
static int max_devices;
#define MAX_DEVICES 256
-/* TODO: Replace these with struct ida */
-static DECLARE_BITMAP(dev_use, MAX_DEVICES);
+static DEFINE_IDA(mmc_blk_ida);
+static DEFINE_SPINLOCK(mmc_blk_lock);
/*
* There is one mmc_blk_data per slot.
int devidx = mmc_get_devidx(md->disk);
blk_cleanup_queue(md->queue.queue);
- __clear_bit(devidx, dev_use);
+ spin_lock(&mmc_blk_lock);
+ ida_remove(&mmc_blk_ida, devidx);
+ spin_unlock(&mmc_blk_lock);
put_disk(md->disk);
kfree(md);
ioc_err = __mmc_blk_ioctl_cmd(card, md, idata);
+ /* Always switch back to main area after RPMB access */
+ if (md->area_type & MMC_BLK_DATA_AREA_RPMB)
+ mmc_blk_part_switch(card, dev_get_drvdata(&card->dev));
+
mmc_put_card(card);
err = mmc_blk_ioctl_copy_to_user(ic_ptr, idata);
for (i = 0; i < num_of_cmds && !ioc_err; i++)
ioc_err = __mmc_blk_ioctl_cmd(card, md, idata[i]);
+ /* Always switch back to main area after RPMB access */
+ if (md->area_type & MMC_BLK_DATA_AREA_RPMB)
+ mmc_blk_part_switch(card, dev_get_drvdata(&card->dev));
+
mmc_put_card(card);
/* copy to user if data and response */
if (mmc_card_mmc(card)) {
u8 part_config = card->ext_csd.part_config;
+ if (md->part_type == EXT_CSD_PART_CONFIG_ACC_RPMB)
+ mmc_retune_pause(card->host);
+
part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK;
part_config |= md->part_type;
ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_PART_CONFIG, part_config,
card->ext_csd.part_time);
- if (ret)
+ if (ret) {
+ if (md->part_type == EXT_CSD_PART_CONFIG_ACC_RPMB)
+ mmc_retune_unpause(card->host);
return ret;
+ }
card->ext_csd.part_config = part_config;
+
+ if (main_md->part_curr == EXT_CSD_PART_CONFIG_ACC_RPMB)
+ mmc_retune_unpause(card->host);
}
main_md->part_curr = md->part_type;
req->rq_disk->disk_name, "timed out", name, status);
/* If the status cmd initially failed, retry the r/w cmd */
- if (!status_valid)
+ if (!status_valid) {
+ pr_err("%s: status not valid, retrying timeout\n",
+ req->rq_disk->disk_name);
return ERR_RETRY;
+ }
/*
* If it was a r/w cmd crc error, or illegal command
* (eg, issued in wrong state) then retry - we should
* have corrected the state problem above.
*/
- if (status & (R1_COM_CRC_ERROR | R1_ILLEGAL_COMMAND))
+ if (status & (R1_COM_CRC_ERROR | R1_ILLEGAL_COMMAND)) {
+ pr_err("%s: command error, retrying timeout\n",
+ req->rq_disk->disk_name);
return ERR_RETRY;
+ }
/* Otherwise abort the command */
return ERR_ABORT;
!IS_ALIGNED(blk_rq_sectors(next), 8))
break;
- if (next->cmd_flags & REQ_DISCARD ||
- next->cmd_flags & REQ_FLUSH)
+ if (req_op(next) == REQ_OP_DISCARD || next->cmd_flags & REQ_FLUSH)
break;
if (rq_data_dir(cur) != rq_data_dir(next))
}
mq->flags &= ~MMC_QUEUE_NEW_REQUEST;
- if (cmd_flags & REQ_DISCARD) {
+ if (req && req_op(req) == REQ_OP_DISCARD) {
/* complete ongoing async transfer before issuing discard */
if (card->host->areq)
mmc_blk_issue_rw_rq(mq, NULL);
out:
if ((!req && !(mq->flags & MMC_QUEUE_NEW_REQUEST)) ||
- (cmd_flags & MMC_REQ_SPECIAL_MASK))
+ mmc_req_is_special(req))
/*
* Release host when there are no more requests
* and after special request(discard, flush) is done.
struct mmc_blk_data *md;
int devidx, ret;
- devidx = find_first_zero_bit(dev_use, max_devices);
- if (devidx >= max_devices)
- return ERR_PTR(-ENOSPC);
- __set_bit(devidx, dev_use);
+again:
+ if (!ida_pre_get(&mmc_blk_ida, GFP_KERNEL))
+ return ERR_PTR(-ENOMEM);
+
+ spin_lock(&mmc_blk_lock);
+ ret = ida_get_new(&mmc_blk_ida, &devidx);
+ spin_unlock(&mmc_blk_lock);
+
+ if (ret == -EAGAIN)
+ goto again;
+ else if (ret)
+ return ERR_PTR(ret);
+
+ if (devidx >= max_devices) {
+ ret = -ENOSPC;
+ goto out;
+ }
md = kzalloc(sizeof(struct mmc_blk_data), GFP_KERNEL);
if (!md) {
((card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN) ||
card->ext_csd.rel_sectors)) {
md->flags |= MMC_BLK_REL_WR;
- blk_queue_flush(md->queue.queue, REQ_FLUSH | REQ_FUA);
+ blk_queue_write_cache(md->queue.queue, true, true);
}
if (mmc_card_mmc(card) &&
err_kfree:
kfree(md);
out:
+ spin_lock(&mmc_blk_lock);
+ ida_remove(&mmc_blk_ida, devidx);
+ spin_unlock(&mmc_blk_lock);
return ERR_PTR(ret);
}
MMC_QUIRK_BLK_NO_CMD23),
/*
- * Some Micron MMC cards needs longer data read timeout than
- * indicated in CSD.
+ * Some MMC cards need longer data read timeout than indicated in CSD.
*/
MMC_FIXUP(CID_NAME_ANY, CID_MANFID_MICRON, 0x200, add_quirk_mmc,
MMC_QUIRK_LONG_READ_TIME),
+ MMC_FIXUP("008GE0", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
+ MMC_QUIRK_LONG_READ_TIME),
/*
* On these Samsung MoviNAND parts, performing secure erase or