2 * linux/drivers/mmc/core/mmc.c
4 * Copyright (C) 2003-2004 Russell King, All Rights Reserved.
5 * Copyright (C) 2005-2007 Pierre Ossman, All Rights Reserved.
6 * MMCv4 support Copyright (C) 2006 Philip Langdale, All Rights Reserved.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
13 #include <linux/err.h>
14 #include <linux/slab.h>
15 #include <linux/stat.h>
16 #include <linux/pm_runtime.h>
18 #include <linux/mmc/host.h>
19 #include <linux/mmc/card.h>
20 #include <linux/mmc/mmc.h>
27 static const unsigned int tran_exp[] = {
28 10000, 100000, 1000000, 10000000,
32 static const unsigned char tran_mant[] = {
33 0, 10, 12, 13, 15, 20, 25, 30,
34 35, 40, 45, 50, 55, 60, 70, 80,
37 static const unsigned int tacc_exp[] = {
38 1, 10, 100, 1000, 10000, 100000, 1000000, 10000000,
41 static const unsigned int tacc_mant[] = {
42 0, 10, 12, 13, 15, 20, 25, 30,
43 35, 40, 45, 50, 55, 60, 70, 80,
46 #define UNSTUFF_BITS(resp,start,size) \
48 const int __size = size; \
49 const u32 __mask = (__size < 32 ? 1 << __size : 0) - 1; \
50 const int __off = 3 - ((start) / 32); \
51 const int __shft = (start) & 31; \
54 __res = resp[__off] >> __shft; \
55 if (__size + __shft > 32) \
56 __res |= resp[__off-1] << ((32 - __shft) % 32); \
61 * Given the decoded CSD structure, decode the raw CID to our CID structure.
63 static int mmc_decode_cid(struct mmc_card *card)
65 u32 *resp = card->raw_cid;
68 * The selection of the format here is based upon published
69 * specs from sandisk and from what people have reported.
71 switch (card->csd.mmca_vsn) {
72 case 0: /* MMC v1.0 - v1.2 */
73 case 1: /* MMC v1.4 */
74 card->cid.manfid = UNSTUFF_BITS(resp, 104, 24);
75 card->cid.prod_name[0] = UNSTUFF_BITS(resp, 96, 8);
76 card->cid.prod_name[1] = UNSTUFF_BITS(resp, 88, 8);
77 card->cid.prod_name[2] = UNSTUFF_BITS(resp, 80, 8);
78 card->cid.prod_name[3] = UNSTUFF_BITS(resp, 72, 8);
79 card->cid.prod_name[4] = UNSTUFF_BITS(resp, 64, 8);
80 card->cid.prod_name[5] = UNSTUFF_BITS(resp, 56, 8);
81 card->cid.prod_name[6] = UNSTUFF_BITS(resp, 48, 8);
82 card->cid.hwrev = UNSTUFF_BITS(resp, 44, 4);
83 card->cid.fwrev = UNSTUFF_BITS(resp, 40, 4);
84 card->cid.serial = UNSTUFF_BITS(resp, 16, 24);
85 card->cid.month = UNSTUFF_BITS(resp, 12, 4);
86 card->cid.year = UNSTUFF_BITS(resp, 8, 4) + 1997;
89 case 2: /* MMC v2.0 - v2.2 */
90 case 3: /* MMC v3.1 - v3.3 */
92 card->cid.manfid = UNSTUFF_BITS(resp, 120, 8);
93 card->cid.oemid = UNSTUFF_BITS(resp, 104, 16);
94 card->cid.prod_name[0] = UNSTUFF_BITS(resp, 96, 8);
95 card->cid.prod_name[1] = UNSTUFF_BITS(resp, 88, 8);
96 card->cid.prod_name[2] = UNSTUFF_BITS(resp, 80, 8);
97 card->cid.prod_name[3] = UNSTUFF_BITS(resp, 72, 8);
98 card->cid.prod_name[4] = UNSTUFF_BITS(resp, 64, 8);
99 card->cid.prod_name[5] = UNSTUFF_BITS(resp, 56, 8);
100 card->cid.prv = UNSTUFF_BITS(resp, 48, 8);
101 card->cid.serial = UNSTUFF_BITS(resp, 16, 32);
102 card->cid.month = UNSTUFF_BITS(resp, 12, 4);
103 card->cid.year = UNSTUFF_BITS(resp, 8, 4) + 1997;
107 pr_err("%s: card has unknown MMCA version %d\n",
108 mmc_hostname(card->host), card->csd.mmca_vsn);
115 static void mmc_set_erase_size(struct mmc_card *card)
117 if (card->ext_csd.erase_group_def & 1)
118 card->erase_size = card->ext_csd.hc_erase_size;
120 card->erase_size = card->csd.erase_size;
122 mmc_init_erase(card);
126 * Given a 128-bit response, decode to our card CSD structure.
128 static int mmc_decode_csd(struct mmc_card *card)
130 struct mmc_csd *csd = &card->csd;
131 unsigned int e, m, a, b;
132 u32 *resp = card->raw_csd;
135 * We only understand CSD structure v1.1 and v1.2.
136 * v1.2 has extra information in bits 15, 11 and 10.
137 * We also support eMMC v4.4 & v4.41.
139 csd->structure = UNSTUFF_BITS(resp, 126, 2);
140 if (csd->structure == 0) {
141 pr_err("%s: unrecognised CSD structure version %d\n",
142 mmc_hostname(card->host), csd->structure);
146 csd->mmca_vsn = UNSTUFF_BITS(resp, 122, 4);
147 m = UNSTUFF_BITS(resp, 115, 4);
148 e = UNSTUFF_BITS(resp, 112, 3);
149 csd->tacc_ns = (tacc_exp[e] * tacc_mant[m] + 9) / 10;
150 csd->tacc_clks = UNSTUFF_BITS(resp, 104, 8) * 100;
152 m = UNSTUFF_BITS(resp, 99, 4);
153 e = UNSTUFF_BITS(resp, 96, 3);
154 csd->max_dtr = tran_exp[e] * tran_mant[m];
155 csd->cmdclass = UNSTUFF_BITS(resp, 84, 12);
157 e = UNSTUFF_BITS(resp, 47, 3);
158 m = UNSTUFF_BITS(resp, 62, 12);
159 csd->capacity = (1 + m) << (e + 2);
161 csd->read_blkbits = UNSTUFF_BITS(resp, 80, 4);
162 csd->read_partial = UNSTUFF_BITS(resp, 79, 1);
163 csd->write_misalign = UNSTUFF_BITS(resp, 78, 1);
164 csd->read_misalign = UNSTUFF_BITS(resp, 77, 1);
165 csd->dsr_imp = UNSTUFF_BITS(resp, 76, 1);
166 csd->r2w_factor = UNSTUFF_BITS(resp, 26, 3);
167 csd->write_blkbits = UNSTUFF_BITS(resp, 22, 4);
168 csd->write_partial = UNSTUFF_BITS(resp, 21, 1);
170 if (csd->write_blkbits >= 9) {
171 a = UNSTUFF_BITS(resp, 42, 5);
172 b = UNSTUFF_BITS(resp, 37, 5);
173 csd->erase_size = (a + 1) * (b + 1);
174 csd->erase_size <<= csd->write_blkbits - 9;
183 static int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd)
189 BUG_ON(!new_ext_csd);
193 if (card->csd.mmca_vsn < CSD_SPEC_VER_4)
197 * As the ext_csd is so large and mostly unused, we don't store the
198 * raw block in mmc_card.
200 ext_csd = kmalloc(512, GFP_KERNEL);
204 err = mmc_send_ext_csd(card, ext_csd);
209 /* If the host or the card can't do the switch,
210 * fail more gracefully. */
217 * High capacity cards should have this "magic" size
218 * stored in their CSD.
220 if (card->csd.capacity == (4096 * 512)) {
221 pr_err("%s: unable to read EXT_CSD "
222 "on a possible high capacity card. "
223 "Card will be ignored.\n",
224 mmc_hostname(card->host));
226 pr_warn("%s: unable to read EXT_CSD, performance might suffer\n",
227 mmc_hostname(card->host));
231 *new_ext_csd = ext_csd;
236 static void mmc_select_card_type(struct mmc_card *card)
238 struct mmc_host *host = card->host;
239 u8 card_type = card->ext_csd.raw_card_type;
240 u32 caps = host->caps, caps2 = host->caps2;
241 unsigned int hs_max_dtr = 0, hs200_max_dtr = 0;
242 unsigned int avail_type = 0;
244 if (caps & MMC_CAP_MMC_HIGHSPEED &&
245 card_type & EXT_CSD_CARD_TYPE_HS_26) {
246 hs_max_dtr = MMC_HIGH_26_MAX_DTR;
247 avail_type |= EXT_CSD_CARD_TYPE_HS_26;
250 if (caps & MMC_CAP_MMC_HIGHSPEED &&
251 card_type & EXT_CSD_CARD_TYPE_HS_52) {
252 hs_max_dtr = MMC_HIGH_52_MAX_DTR;
253 avail_type |= EXT_CSD_CARD_TYPE_HS_52;
256 if (caps & MMC_CAP_1_8V_DDR &&
257 card_type & EXT_CSD_CARD_TYPE_DDR_1_8V) {
258 hs_max_dtr = MMC_HIGH_DDR_MAX_DTR;
259 avail_type |= EXT_CSD_CARD_TYPE_DDR_1_8V;
262 if (caps & MMC_CAP_1_2V_DDR &&
263 card_type & EXT_CSD_CARD_TYPE_DDR_1_2V) {
264 hs_max_dtr = MMC_HIGH_DDR_MAX_DTR;
265 avail_type |= EXT_CSD_CARD_TYPE_DDR_1_2V;
268 if (caps2 & MMC_CAP2_HS200_1_8V_SDR &&
269 card_type & EXT_CSD_CARD_TYPE_HS200_1_8V) {
270 hs200_max_dtr = MMC_HS200_MAX_DTR;
271 avail_type |= EXT_CSD_CARD_TYPE_HS200_1_8V;
274 if (caps2 & MMC_CAP2_HS200_1_2V_SDR &&
275 card_type & EXT_CSD_CARD_TYPE_HS200_1_2V) {
276 hs200_max_dtr = MMC_HS200_MAX_DTR;
277 avail_type |= EXT_CSD_CARD_TYPE_HS200_1_2V;
280 if (caps2 & MMC_CAP2_HS400_1_8V &&
281 card_type & EXT_CSD_CARD_TYPE_HS400_1_8V) {
282 hs200_max_dtr = MMC_HS200_MAX_DTR;
283 avail_type |= EXT_CSD_CARD_TYPE_HS400_1_8V;
286 if (caps2 & MMC_CAP2_HS400_1_2V &&
287 card_type & EXT_CSD_CARD_TYPE_HS400_1_2V) {
288 hs200_max_dtr = MMC_HS200_MAX_DTR;
289 avail_type |= EXT_CSD_CARD_TYPE_HS400_1_2V;
292 card->ext_csd.hs_max_dtr = hs_max_dtr;
293 card->ext_csd.hs200_max_dtr = hs200_max_dtr;
294 card->mmc_avail_type = avail_type;
297 static void mmc_manage_enhanced_area(struct mmc_card *card, u8 *ext_csd)
299 u8 hc_erase_grp_sz, hc_wp_grp_sz;
302 * Disable these attributes by default
304 card->ext_csd.enhanced_area_offset = -EINVAL;
305 card->ext_csd.enhanced_area_size = -EINVAL;
308 * Enhanced area feature support -- check whether the eMMC
309 * card has the Enhanced area enabled. If so, export enhanced
310 * area offset and size to user by adding sysfs interface.
312 if ((ext_csd[EXT_CSD_PARTITION_SUPPORT] & 0x2) &&
313 (ext_csd[EXT_CSD_PARTITION_ATTRIBUTE] & 0x1)) {
314 if (card->ext_csd.partition_setting_completed) {
316 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
318 ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
321 * calculate the enhanced data area offset, in bytes
323 card->ext_csd.enhanced_area_offset =
324 (ext_csd[139] << 24) + (ext_csd[138] << 16) +
325 (ext_csd[137] << 8) + ext_csd[136];
326 if (mmc_card_blockaddr(card))
327 card->ext_csd.enhanced_area_offset <<= 9;
329 * calculate the enhanced data area size, in kilobytes
331 card->ext_csd.enhanced_area_size =
332 (ext_csd[142] << 16) + (ext_csd[141] << 8) +
334 card->ext_csd.enhanced_area_size *=
335 (size_t)(hc_erase_grp_sz * hc_wp_grp_sz);
336 card->ext_csd.enhanced_area_size <<= 9;
338 pr_warn("%s: defines enhanced area without partition setting complete\n",
339 mmc_hostname(card->host));
344 static void mmc_manage_gp_partitions(struct mmc_card *card, u8 *ext_csd)
347 u8 hc_erase_grp_sz, hc_wp_grp_sz;
348 unsigned int part_size;
351 * General purpose partition feature support --
352 * If ext_csd has the size of general purpose partitions,
353 * set size, part_cfg, partition name in mmc_part.
355 if (ext_csd[EXT_CSD_PARTITION_SUPPORT] &
356 EXT_CSD_PART_SUPPORT_PART_EN) {
358 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
360 ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
362 for (idx = 0; idx < MMC_NUM_GP_PARTITION; idx++) {
363 if (!ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3] &&
364 !ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 1] &&
365 !ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 2])
367 if (card->ext_csd.partition_setting_completed == 0) {
368 pr_warn("%s: has partition size defined without partition complete\n",
369 mmc_hostname(card->host));
373 (ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 2]
375 (ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 1]
377 ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3];
378 part_size *= (size_t)(hc_erase_grp_sz *
380 mmc_part_add(card, part_size << 19,
381 EXT_CSD_PART_CONFIG_ACC_GP0 + idx,
383 MMC_BLK_DATA_AREA_GP);
389 * Decode extended CSD.
391 static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
394 unsigned int part_size;
401 /* Version is coded in the CSD_STRUCTURE byte in the EXT_CSD register */
402 card->ext_csd.raw_ext_csd_structure = ext_csd[EXT_CSD_STRUCTURE];
403 if (card->csd.structure == 3) {
404 if (card->ext_csd.raw_ext_csd_structure > 2) {
405 pr_err("%s: unrecognised EXT_CSD structure "
406 "version %d\n", mmc_hostname(card->host),
407 card->ext_csd.raw_ext_csd_structure);
414 * The EXT_CSD format is meant to be forward compatible. As long
415 * as CSD_STRUCTURE does not change, all values for EXT_CSD_REV
416 * are authorized, see JEDEC JESD84-B50 section B.8.
418 card->ext_csd.rev = ext_csd[EXT_CSD_REV];
420 card->ext_csd.raw_sectors[0] = ext_csd[EXT_CSD_SEC_CNT + 0];
421 card->ext_csd.raw_sectors[1] = ext_csd[EXT_CSD_SEC_CNT + 1];
422 card->ext_csd.raw_sectors[2] = ext_csd[EXT_CSD_SEC_CNT + 2];
423 card->ext_csd.raw_sectors[3] = ext_csd[EXT_CSD_SEC_CNT + 3];
424 if (card->ext_csd.rev >= 2) {
425 card->ext_csd.sectors =
426 ext_csd[EXT_CSD_SEC_CNT + 0] << 0 |
427 ext_csd[EXT_CSD_SEC_CNT + 1] << 8 |
428 ext_csd[EXT_CSD_SEC_CNT + 2] << 16 |
429 ext_csd[EXT_CSD_SEC_CNT + 3] << 24;
431 /* Cards with density > 2GiB are sector addressed */
432 if (card->ext_csd.sectors > (2u * 1024 * 1024 * 1024) / 512)
433 mmc_card_set_blockaddr(card);
436 card->ext_csd.raw_card_type = ext_csd[EXT_CSD_CARD_TYPE];
437 mmc_select_card_type(card);
439 card->ext_csd.raw_s_a_timeout = ext_csd[EXT_CSD_S_A_TIMEOUT];
440 card->ext_csd.raw_erase_timeout_mult =
441 ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT];
442 card->ext_csd.raw_hc_erase_grp_size =
443 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
444 if (card->ext_csd.rev >= 3) {
445 u8 sa_shift = ext_csd[EXT_CSD_S_A_TIMEOUT];
446 card->ext_csd.part_config = ext_csd[EXT_CSD_PART_CONFIG];
448 /* EXT_CSD value is in units of 10ms, but we store in ms */
449 card->ext_csd.part_time = 10 * ext_csd[EXT_CSD_PART_SWITCH_TIME];
451 /* Sleep / awake timeout in 100ns units */
452 if (sa_shift > 0 && sa_shift <= 0x17)
453 card->ext_csd.sa_timeout =
454 1 << ext_csd[EXT_CSD_S_A_TIMEOUT];
455 card->ext_csd.erase_group_def =
456 ext_csd[EXT_CSD_ERASE_GROUP_DEF];
457 card->ext_csd.hc_erase_timeout = 300 *
458 ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT];
459 card->ext_csd.hc_erase_size =
460 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] << 10;
462 card->ext_csd.rel_sectors = ext_csd[EXT_CSD_REL_WR_SEC_C];
465 * There are two boot regions of equal size, defined in
468 if (ext_csd[EXT_CSD_BOOT_MULT] && mmc_boot_partition_access(card->host)) {
469 for (idx = 0; idx < MMC_NUM_BOOT_PARTITION; idx++) {
470 part_size = ext_csd[EXT_CSD_BOOT_MULT] << 17;
471 mmc_part_add(card, part_size,
472 EXT_CSD_PART_CONFIG_ACC_BOOT0 + idx,
474 MMC_BLK_DATA_AREA_BOOT);
479 card->ext_csd.raw_hc_erase_gap_size =
480 ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
481 card->ext_csd.raw_sec_trim_mult =
482 ext_csd[EXT_CSD_SEC_TRIM_MULT];
483 card->ext_csd.raw_sec_erase_mult =
484 ext_csd[EXT_CSD_SEC_ERASE_MULT];
485 card->ext_csd.raw_sec_feature_support =
486 ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT];
487 card->ext_csd.raw_trim_mult =
488 ext_csd[EXT_CSD_TRIM_MULT];
489 card->ext_csd.raw_partition_support = ext_csd[EXT_CSD_PARTITION_SUPPORT];
490 if (card->ext_csd.rev >= 4) {
491 if (ext_csd[EXT_CSD_PARTITION_SETTING_COMPLETED] &
492 EXT_CSD_PART_SETTING_COMPLETED)
493 card->ext_csd.partition_setting_completed = 1;
495 card->ext_csd.partition_setting_completed = 0;
497 mmc_manage_enhanced_area(card, ext_csd);
499 mmc_manage_gp_partitions(card, ext_csd);
501 card->ext_csd.sec_trim_mult =
502 ext_csd[EXT_CSD_SEC_TRIM_MULT];
503 card->ext_csd.sec_erase_mult =
504 ext_csd[EXT_CSD_SEC_ERASE_MULT];
505 card->ext_csd.sec_feature_support =
506 ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT];
507 card->ext_csd.trim_timeout = 300 *
508 ext_csd[EXT_CSD_TRIM_MULT];
511 * Note that the call to mmc_part_add above defaults to read
512 * only. If this default assumption is changed, the call must
513 * take into account the value of boot_locked below.
515 card->ext_csd.boot_ro_lock = ext_csd[EXT_CSD_BOOT_WP];
516 card->ext_csd.boot_ro_lockable = true;
518 /* Save power class values */
519 card->ext_csd.raw_pwr_cl_52_195 =
520 ext_csd[EXT_CSD_PWR_CL_52_195];
521 card->ext_csd.raw_pwr_cl_26_195 =
522 ext_csd[EXT_CSD_PWR_CL_26_195];
523 card->ext_csd.raw_pwr_cl_52_360 =
524 ext_csd[EXT_CSD_PWR_CL_52_360];
525 card->ext_csd.raw_pwr_cl_26_360 =
526 ext_csd[EXT_CSD_PWR_CL_26_360];
527 card->ext_csd.raw_pwr_cl_200_195 =
528 ext_csd[EXT_CSD_PWR_CL_200_195];
529 card->ext_csd.raw_pwr_cl_200_360 =
530 ext_csd[EXT_CSD_PWR_CL_200_360];
531 card->ext_csd.raw_pwr_cl_ddr_52_195 =
532 ext_csd[EXT_CSD_PWR_CL_DDR_52_195];
533 card->ext_csd.raw_pwr_cl_ddr_52_360 =
534 ext_csd[EXT_CSD_PWR_CL_DDR_52_360];
535 card->ext_csd.raw_pwr_cl_ddr_200_360 =
536 ext_csd[EXT_CSD_PWR_CL_DDR_200_360];
539 if (card->ext_csd.rev >= 5) {
540 /* Adjust production date as per JEDEC JESD84-B451 */
541 if (card->cid.year < 2010)
542 card->cid.year += 16;
544 /* check whether the eMMC card supports BKOPS */
545 if (ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1) {
546 card->ext_csd.bkops = 1;
547 card->ext_csd.bkops_en = ext_csd[EXT_CSD_BKOPS_EN];
548 card->ext_csd.raw_bkops_status =
549 ext_csd[EXT_CSD_BKOPS_STATUS];
550 if (!card->ext_csd.bkops_en)
551 pr_info("%s: BKOPS_EN bit is not set\n",
552 mmc_hostname(card->host));
555 /* check whether the eMMC card supports HPI */
556 if (ext_csd[EXT_CSD_HPI_FEATURES] & 0x1) {
557 card->ext_csd.hpi = 1;
558 if (ext_csd[EXT_CSD_HPI_FEATURES] & 0x2)
559 card->ext_csd.hpi_cmd = MMC_STOP_TRANSMISSION;
561 card->ext_csd.hpi_cmd = MMC_SEND_STATUS;
563 * Indicate the maximum timeout to close
564 * a command interrupted by HPI
566 card->ext_csd.out_of_int_time =
567 ext_csd[EXT_CSD_OUT_OF_INTERRUPT_TIME] * 10;
570 card->ext_csd.rel_param = ext_csd[EXT_CSD_WR_REL_PARAM];
571 card->ext_csd.rst_n_function = ext_csd[EXT_CSD_RST_N_FUNCTION];
574 * RPMB regions are defined in multiples of 128K.
576 card->ext_csd.raw_rpmb_size_mult = ext_csd[EXT_CSD_RPMB_MULT];
577 if (ext_csd[EXT_CSD_RPMB_MULT] && mmc_host_cmd23(card->host)) {
578 mmc_part_add(card, ext_csd[EXT_CSD_RPMB_MULT] << 17,
579 EXT_CSD_PART_CONFIG_ACC_RPMB,
581 MMC_BLK_DATA_AREA_RPMB);
585 card->ext_csd.raw_erased_mem_count = ext_csd[EXT_CSD_ERASED_MEM_CONT];
586 if (ext_csd[EXT_CSD_ERASED_MEM_CONT])
587 card->erased_byte = 0xFF;
589 card->erased_byte = 0x0;
591 /* eMMC v4.5 or later */
592 if (card->ext_csd.rev >= 6) {
593 card->ext_csd.feature_support |= MMC_DISCARD_FEATURE;
595 card->ext_csd.generic_cmd6_time = 10 *
596 ext_csd[EXT_CSD_GENERIC_CMD6_TIME];
597 card->ext_csd.power_off_longtime = 10 *
598 ext_csd[EXT_CSD_POWER_OFF_LONG_TIME];
600 card->ext_csd.cache_size =
601 ext_csd[EXT_CSD_CACHE_SIZE + 0] << 0 |
602 ext_csd[EXT_CSD_CACHE_SIZE + 1] << 8 |
603 ext_csd[EXT_CSD_CACHE_SIZE + 2] << 16 |
604 ext_csd[EXT_CSD_CACHE_SIZE + 3] << 24;
606 if (ext_csd[EXT_CSD_DATA_SECTOR_SIZE] == 1)
607 card->ext_csd.data_sector_size = 4096;
609 card->ext_csd.data_sector_size = 512;
611 if ((ext_csd[EXT_CSD_DATA_TAG_SUPPORT] & 1) &&
612 (ext_csd[EXT_CSD_TAG_UNIT_SIZE] <= 8)) {
613 card->ext_csd.data_tag_unit_size =
614 ((unsigned int) 1 << ext_csd[EXT_CSD_TAG_UNIT_SIZE]) *
615 (card->ext_csd.data_sector_size);
617 card->ext_csd.data_tag_unit_size = 0;
620 card->ext_csd.max_packed_writes =
621 ext_csd[EXT_CSD_MAX_PACKED_WRITES];
622 card->ext_csd.max_packed_reads =
623 ext_csd[EXT_CSD_MAX_PACKED_READS];
625 card->ext_csd.data_sector_size = 512;
628 /* eMMC v5 or later */
629 if (card->ext_csd.rev >= 7) {
630 memcpy(card->ext_csd.fwrev, &ext_csd[EXT_CSD_FIRMWARE_VERSION],
632 card->ext_csd.ffu_capable =
633 (ext_csd[EXT_CSD_SUPPORTED_MODE] & 0x1) &&
634 !(ext_csd[EXT_CSD_FW_CONFIG] & 0x1);
640 static int mmc_compare_ext_csds(struct mmc_card *card, unsigned bus_width)
645 if (bus_width == MMC_BUS_WIDTH_1)
648 err = mmc_get_ext_csd(card, &bw_ext_csd);
650 if (err || bw_ext_csd == NULL) {
655 /* only compare read only fields */
656 err = !((card->ext_csd.raw_partition_support ==
657 bw_ext_csd[EXT_CSD_PARTITION_SUPPORT]) &&
658 (card->ext_csd.raw_erased_mem_count ==
659 bw_ext_csd[EXT_CSD_ERASED_MEM_CONT]) &&
660 (card->ext_csd.rev ==
661 bw_ext_csd[EXT_CSD_REV]) &&
662 (card->ext_csd.raw_ext_csd_structure ==
663 bw_ext_csd[EXT_CSD_STRUCTURE]) &&
664 (card->ext_csd.raw_card_type ==
665 bw_ext_csd[EXT_CSD_CARD_TYPE]) &&
666 (card->ext_csd.raw_s_a_timeout ==
667 bw_ext_csd[EXT_CSD_S_A_TIMEOUT]) &&
668 (card->ext_csd.raw_hc_erase_gap_size ==
669 bw_ext_csd[EXT_CSD_HC_WP_GRP_SIZE]) &&
670 (card->ext_csd.raw_erase_timeout_mult ==
671 bw_ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT]) &&
672 (card->ext_csd.raw_hc_erase_grp_size ==
673 bw_ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]) &&
674 (card->ext_csd.raw_sec_trim_mult ==
675 bw_ext_csd[EXT_CSD_SEC_TRIM_MULT]) &&
676 (card->ext_csd.raw_sec_erase_mult ==
677 bw_ext_csd[EXT_CSD_SEC_ERASE_MULT]) &&
678 (card->ext_csd.raw_sec_feature_support ==
679 bw_ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT]) &&
680 (card->ext_csd.raw_trim_mult ==
681 bw_ext_csd[EXT_CSD_TRIM_MULT]) &&
682 (card->ext_csd.raw_sectors[0] ==
683 bw_ext_csd[EXT_CSD_SEC_CNT + 0]) &&
684 (card->ext_csd.raw_sectors[1] ==
685 bw_ext_csd[EXT_CSD_SEC_CNT + 1]) &&
686 (card->ext_csd.raw_sectors[2] ==
687 bw_ext_csd[EXT_CSD_SEC_CNT + 2]) &&
688 (card->ext_csd.raw_sectors[3] ==
689 bw_ext_csd[EXT_CSD_SEC_CNT + 3]) &&
690 (card->ext_csd.raw_pwr_cl_52_195 ==
691 bw_ext_csd[EXT_CSD_PWR_CL_52_195]) &&
692 (card->ext_csd.raw_pwr_cl_26_195 ==
693 bw_ext_csd[EXT_CSD_PWR_CL_26_195]) &&
694 (card->ext_csd.raw_pwr_cl_52_360 ==
695 bw_ext_csd[EXT_CSD_PWR_CL_52_360]) &&
696 (card->ext_csd.raw_pwr_cl_26_360 ==
697 bw_ext_csd[EXT_CSD_PWR_CL_26_360]) &&
698 (card->ext_csd.raw_pwr_cl_200_195 ==
699 bw_ext_csd[EXT_CSD_PWR_CL_200_195]) &&
700 (card->ext_csd.raw_pwr_cl_200_360 ==
701 bw_ext_csd[EXT_CSD_PWR_CL_200_360]) &&
702 (card->ext_csd.raw_pwr_cl_ddr_52_195 ==
703 bw_ext_csd[EXT_CSD_PWR_CL_DDR_52_195]) &&
704 (card->ext_csd.raw_pwr_cl_ddr_52_360 ==
705 bw_ext_csd[EXT_CSD_PWR_CL_DDR_52_360]) &&
706 (card->ext_csd.raw_pwr_cl_ddr_200_360 ==
707 bw_ext_csd[EXT_CSD_PWR_CL_DDR_200_360]));
717 MMC_DEV_ATTR(cid, "%08x%08x%08x%08x\n", card->raw_cid[0], card->raw_cid[1],
718 card->raw_cid[2], card->raw_cid[3]);
719 MMC_DEV_ATTR(csd, "%08x%08x%08x%08x\n", card->raw_csd[0], card->raw_csd[1],
720 card->raw_csd[2], card->raw_csd[3]);
721 MMC_DEV_ATTR(date, "%02d/%04d\n", card->cid.month, card->cid.year);
722 MMC_DEV_ATTR(erase_size, "%u\n", card->erase_size << 9);
723 MMC_DEV_ATTR(preferred_erase_size, "%u\n", card->pref_erase << 9);
724 MMC_DEV_ATTR(ffu_capable, "%d\n", card->ext_csd.ffu_capable);
725 MMC_DEV_ATTR(hwrev, "0x%x\n", card->cid.hwrev);
726 MMC_DEV_ATTR(manfid, "0x%06x\n", card->cid.manfid);
727 MMC_DEV_ATTR(name, "%s\n", card->cid.prod_name);
728 MMC_DEV_ATTR(oemid, "0x%04x\n", card->cid.oemid);
729 MMC_DEV_ATTR(prv, "0x%x\n", card->cid.prv);
730 MMC_DEV_ATTR(serial, "0x%08x\n", card->cid.serial);
731 MMC_DEV_ATTR(enhanced_area_offset, "%llu\n",
732 card->ext_csd.enhanced_area_offset);
733 MMC_DEV_ATTR(enhanced_area_size, "%u\n", card->ext_csd.enhanced_area_size);
734 MMC_DEV_ATTR(raw_rpmb_size_mult, "%#x\n", card->ext_csd.raw_rpmb_size_mult);
735 MMC_DEV_ATTR(rel_sectors, "%#x\n", card->ext_csd.rel_sectors);
737 static ssize_t mmc_fwrev_show(struct device *dev,
738 struct device_attribute *attr,
741 struct mmc_card *card = mmc_dev_to_card(dev);
743 if (card->ext_csd.rev < 7) {
744 return sprintf(buf, "0x%x\n", card->cid.fwrev);
746 return sprintf(buf, "0x%*phN\n", MMC_FIRMWARE_LEN,
747 card->ext_csd.fwrev);
751 static DEVICE_ATTR(fwrev, S_IRUGO, mmc_fwrev_show, NULL);
753 static struct attribute *mmc_std_attrs[] = {
757 &dev_attr_erase_size.attr,
758 &dev_attr_preferred_erase_size.attr,
759 &dev_attr_fwrev.attr,
760 &dev_attr_ffu_capable.attr,
761 &dev_attr_hwrev.attr,
762 &dev_attr_manfid.attr,
764 &dev_attr_oemid.attr,
766 &dev_attr_serial.attr,
767 &dev_attr_enhanced_area_offset.attr,
768 &dev_attr_enhanced_area_size.attr,
769 &dev_attr_raw_rpmb_size_mult.attr,
770 &dev_attr_rel_sectors.attr,
773 ATTRIBUTE_GROUPS(mmc_std);
775 static struct device_type mmc_type = {
776 .groups = mmc_std_groups,
780 * Select the PowerClass for the current bus width
781 * If power class is defined for 4/8 bit bus in the
782 * extended CSD register, select it by executing the
783 * mmc_switch command.
785 static int __mmc_select_powerclass(struct mmc_card *card,
786 unsigned int bus_width)
788 struct mmc_host *host = card->host;
789 struct mmc_ext_csd *ext_csd = &card->ext_csd;
790 unsigned int pwrclass_val = 0;
793 switch (1 << host->ios.vdd) {
794 case MMC_VDD_165_195:
795 if (host->ios.clock <= MMC_HIGH_26_MAX_DTR)
796 pwrclass_val = ext_csd->raw_pwr_cl_26_195;
797 else if (host->ios.clock <= MMC_HIGH_52_MAX_DTR)
798 pwrclass_val = (bus_width <= EXT_CSD_BUS_WIDTH_8) ?
799 ext_csd->raw_pwr_cl_52_195 :
800 ext_csd->raw_pwr_cl_ddr_52_195;
801 else if (host->ios.clock <= MMC_HS200_MAX_DTR)
802 pwrclass_val = ext_csd->raw_pwr_cl_200_195;
813 if (host->ios.clock <= MMC_HIGH_26_MAX_DTR)
814 pwrclass_val = ext_csd->raw_pwr_cl_26_360;
815 else if (host->ios.clock <= MMC_HIGH_52_MAX_DTR)
816 pwrclass_val = (bus_width <= EXT_CSD_BUS_WIDTH_8) ?
817 ext_csd->raw_pwr_cl_52_360 :
818 ext_csd->raw_pwr_cl_ddr_52_360;
819 else if (host->ios.clock <= MMC_HS200_MAX_DTR)
820 pwrclass_val = (bus_width == EXT_CSD_DDR_BUS_WIDTH_8) ?
821 ext_csd->raw_pwr_cl_ddr_200_360 :
822 ext_csd->raw_pwr_cl_200_360;
825 pr_warn("%s: Voltage range not supported for power class\n",
830 if (bus_width & (EXT_CSD_BUS_WIDTH_8 | EXT_CSD_DDR_BUS_WIDTH_8))
831 pwrclass_val = (pwrclass_val & EXT_CSD_PWR_CL_8BIT_MASK) >>
832 EXT_CSD_PWR_CL_8BIT_SHIFT;
834 pwrclass_val = (pwrclass_val & EXT_CSD_PWR_CL_4BIT_MASK) >>
835 EXT_CSD_PWR_CL_4BIT_SHIFT;
837 /* If the power class is different from the default value */
838 if (pwrclass_val > 0) {
839 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
842 card->ext_csd.generic_cmd6_time);
848 static int mmc_select_powerclass(struct mmc_card *card)
850 struct mmc_host *host = card->host;
851 u32 bus_width, ext_csd_bits;
854 /* Power class selection is supported for versions >= 4.0 */
855 if (card->csd.mmca_vsn < CSD_SPEC_VER_4)
858 bus_width = host->ios.bus_width;
859 /* Power class values are defined only for 4/8 bit bus */
860 if (bus_width == MMC_BUS_WIDTH_1)
863 ddr = card->mmc_avail_type & EXT_CSD_CARD_TYPE_DDR_52;
865 ext_csd_bits = (bus_width == MMC_BUS_WIDTH_8) ?
866 EXT_CSD_DDR_BUS_WIDTH_8 : EXT_CSD_DDR_BUS_WIDTH_4;
868 ext_csd_bits = (bus_width == MMC_BUS_WIDTH_8) ?
869 EXT_CSD_BUS_WIDTH_8 : EXT_CSD_BUS_WIDTH_4;
871 err = __mmc_select_powerclass(card, ext_csd_bits);
873 pr_warn("%s: power class selection to bus width %d ddr %d failed\n",
874 mmc_hostname(host), 1 << bus_width, ddr);
880 * Set the bus speed for the selected speed mode.
882 static void mmc_set_bus_speed(struct mmc_card *card)
884 unsigned int max_dtr = (unsigned int)-1;
886 if ((mmc_card_hs200(card) || mmc_card_hs400(card)) &&
887 max_dtr > card->ext_csd.hs200_max_dtr)
888 max_dtr = card->ext_csd.hs200_max_dtr;
889 else if (mmc_card_hs(card) && max_dtr > card->ext_csd.hs_max_dtr)
890 max_dtr = card->ext_csd.hs_max_dtr;
891 else if (max_dtr > card->csd.max_dtr)
892 max_dtr = card->csd.max_dtr;
894 mmc_set_clock(card->host, max_dtr);
898 * Select the bus width amoung 4-bit and 8-bit(SDR).
899 * If the bus width is changed successfully, return the selected width value.
900 * Zero is returned instead of error value if the wide width is not supported.
902 static int mmc_select_bus_width(struct mmc_card *card)
904 static unsigned ext_csd_bits[] = {
908 static unsigned bus_widths[] = {
912 struct mmc_host *host = card->host;
913 unsigned idx, bus_width = 0;
916 if ((card->csd.mmca_vsn < CSD_SPEC_VER_4) &&
917 !(host->caps & (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA)))
920 idx = (host->caps & MMC_CAP_8_BIT_DATA) ? 0 : 1;
923 * Unlike SD, MMC cards dont have a configuration register to notify
924 * supported bus width. So bus test command should be run to identify
925 * the supported bus width or compare the ext csd values of current
926 * bus width and ext csd values of 1 bit mode read earlier.
928 for (; idx < ARRAY_SIZE(bus_widths); idx++) {
930 * Host is capable of 8bit transfer, then switch
931 * the device to work in 8bit transfer mode. If the
932 * mmc switch command returns error then switch to
933 * 4bit transfer mode. On success set the corresponding
934 * bus width on the host.
936 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
939 card->ext_csd.generic_cmd6_time);
943 bus_width = bus_widths[idx];
944 mmc_set_bus_width(host, bus_width);
947 * If controller can't handle bus width test,
948 * compare ext_csd previously read in 1 bit mode
949 * against ext_csd at new bus width
951 if (!(host->caps & MMC_CAP_BUS_WIDTH_TEST))
952 err = mmc_compare_ext_csds(card, bus_width);
954 err = mmc_bus_test(card, bus_width);
960 pr_warn("%s: switch to bus width %d failed\n",
961 mmc_hostname(host), ext_csd_bits[idx]);
969 * Switch to the high-speed mode
971 static int mmc_select_hs(struct mmc_card *card)
975 err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
976 EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS,
977 card->ext_csd.generic_cmd6_time,
980 mmc_set_timing(card->host, MMC_TIMING_MMC_HS);
986 * Activate wide bus and DDR if supported.
988 static int mmc_select_hs_ddr(struct mmc_card *card)
990 struct mmc_host *host = card->host;
991 u32 bus_width, ext_csd_bits;
994 if (!(card->mmc_avail_type & EXT_CSD_CARD_TYPE_DDR_52))
997 bus_width = host->ios.bus_width;
998 if (bus_width == MMC_BUS_WIDTH_1)
1001 ext_csd_bits = (bus_width == MMC_BUS_WIDTH_8) ?
1002 EXT_CSD_DDR_BUS_WIDTH_8 : EXT_CSD_DDR_BUS_WIDTH_4;
1004 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1007 card->ext_csd.generic_cmd6_time);
1009 pr_err("%s: switch to bus width %d ddr failed\n",
1010 mmc_hostname(host), 1 << bus_width);
1015 * eMMC cards can support 3.3V to 1.2V i/o (vccq)
1018 * EXT_CSD_CARD_TYPE_DDR_1_8V means 3.3V or 1.8V vccq.
1020 * 1.8V vccq at 3.3V core voltage (vcc) is not required
1021 * in the JEDEC spec for DDR.
1023 * Even (e)MMC card can support 3.3v to 1.2v vccq, but not all
1024 * host controller can support this, like some of the SDHCI
1025 * controller which connect to an eMMC device. Some of these
1026 * host controller still needs to use 1.8v vccq for supporting
1029 * So the sequence will be:
1030 * if (host and device can both support 1.2v IO)
1032 * else if (host and device can both support 1.8v IO)
1034 * so if host and device can only support 3.3v IO, this is the
1037 * WARNING: eMMC rules are NOT the same as SD DDR
1040 if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_DDR_1_2V)
1041 err = __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120);
1043 if (err && (card->mmc_avail_type & EXT_CSD_CARD_TYPE_DDR_1_8V))
1044 err = __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180);
1046 /* make sure vccq is 3.3v after switching disaster */
1048 err = __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330);
1051 mmc_set_timing(host, MMC_TIMING_MMC_DDR52);
1056 static int mmc_select_hs400(struct mmc_card *card)
1058 struct mmc_host *host = card->host;
1062 * HS400 mode requires 8-bit bus width
1064 if (!(card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400 &&
1065 host->ios.bus_width == MMC_BUS_WIDTH_8))
1069 * Before switching to dual data rate operation for HS400,
1070 * it is required to convert from HS200 mode to HS mode.
1072 mmc_set_timing(card->host, MMC_TIMING_MMC_HS);
1073 mmc_set_bus_speed(card);
1075 err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1076 EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS,
1077 card->ext_csd.generic_cmd6_time,
1080 pr_err("%s: switch to high-speed from hs200 failed, err:%d\n",
1081 mmc_hostname(host), err);
1085 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1087 EXT_CSD_DDR_BUS_WIDTH_8,
1088 card->ext_csd.generic_cmd6_time);
1090 pr_err("%s: switch to bus width for hs400 failed, err:%d\n",
1091 mmc_hostname(host), err);
1095 err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1096 EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS400,
1097 card->ext_csd.generic_cmd6_time,
1100 pr_err("%s: switch to hs400 failed, err:%d\n",
1101 mmc_hostname(host), err);
1105 mmc_set_timing(host, MMC_TIMING_MMC_HS400);
1106 mmc_set_bus_speed(card);
1112 * For device supporting HS200 mode, the following sequence
1113 * should be done before executing the tuning process.
1114 * 1. set the desired bus width(4-bit or 8-bit, 1-bit is not supported)
1115 * 2. switch to HS200 mode
1116 * 3. set the clock to > 52Mhz and <=200MHz
1118 static int mmc_select_hs200(struct mmc_card *card)
1120 struct mmc_host *host = card->host;
1123 if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS200_1_2V)
1124 err = __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120);
1126 if (err && card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS200_1_8V)
1127 err = __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180);
1129 /* If fails try again during next card power cycle */
1134 * Set the bus width(4 or 8) with host's support and
1135 * switch to HS200 mode if bus width is set successfully.
1137 err = mmc_select_bus_width(card);
1138 if (!IS_ERR_VALUE(err)) {
1139 err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1140 EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS200,
1141 card->ext_csd.generic_cmd6_time,
1144 mmc_set_timing(host, MMC_TIMING_MMC_HS200);
1151 * Activate High Speed or HS200 mode if supported.
1153 static int mmc_select_timing(struct mmc_card *card)
1157 if (card->csd.mmca_vsn < CSD_SPEC_VER_4)
1160 if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS200)
1161 err = mmc_select_hs200(card);
1162 else if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS)
1163 err = mmc_select_hs(card);
1165 if (err && err != -EBADMSG)
1169 pr_warn("%s: switch to %s failed\n",
1170 mmc_card_hs(card) ? "high-speed" :
1171 (mmc_card_hs200(card) ? "hs200" : ""),
1172 mmc_hostname(card->host));
1178 * Set the bus speed to the selected bus timing.
1179 * If timing is not selected, backward compatible is the default.
1181 mmc_set_bus_speed(card);
1185 const u8 tuning_blk_pattern_4bit[MMC_TUNING_BLK_PATTERN_4BIT_SIZE] = {
1186 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
1187 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
1188 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
1189 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
1190 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
1191 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
1192 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
1193 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
1195 EXPORT_SYMBOL(tuning_blk_pattern_4bit);
1197 const u8 tuning_blk_pattern_8bit[MMC_TUNING_BLK_PATTERN_8BIT_SIZE] = {
1198 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
1199 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
1200 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
1201 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
1202 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
1203 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
1204 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
1205 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
1206 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
1207 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
1208 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
1209 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
1210 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
1211 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
1212 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
1213 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
1215 EXPORT_SYMBOL(tuning_blk_pattern_8bit);
1218 * Execute tuning sequence to seek the proper bus operating
1219 * conditions for HS200 and HS400, which sends CMD21 to the device.
1221 static int mmc_hs200_tuning(struct mmc_card *card)
1223 struct mmc_host *host = card->host;
1227 * Timing should be adjusted to the HS400 target
1228 * operation frequency for tuning process
1230 if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400 &&
1231 host->ios.bus_width == MMC_BUS_WIDTH_8)
1232 if (host->ops->prepare_hs400_tuning)
1233 host->ops->prepare_hs400_tuning(host, &host->ios);
1235 if (host->ops->execute_tuning) {
1236 mmc_host_clk_hold(host);
1237 err = host->ops->execute_tuning(host,
1238 MMC_SEND_TUNING_BLOCK_HS200);
1239 mmc_host_clk_release(host);
1242 pr_err("%s: tuning execution failed\n",
1243 mmc_hostname(host));
1250 * Handle the detection and initialisation of a card.
1252 * In the case of a resume, "oldcard" will contain the card
1253 * we're trying to reinitialise.
1255 static int mmc_init_card(struct mmc_host *host, u32 ocr,
1256 struct mmc_card *oldcard)
1258 struct mmc_card *card;
1265 WARN_ON(!host->claimed);
1267 /* Set correct bus mode for MMC before attempting init */
1268 if (!mmc_host_is_spi(host))
1269 mmc_set_bus_mode(host, MMC_BUSMODE_OPENDRAIN);
1272 * Since we're changing the OCR value, we seem to
1273 * need to tell some cards to go back to the idle
1274 * state. We wait 1ms to give cards time to
1276 * mmc_go_idle is needed for eMMC that are asleep
1280 /* The extra bit indicates that we support high capacity */
1281 err = mmc_send_op_cond(host, ocr | (1 << 30), &rocr);
1286 * For SPI, enable CRC as appropriate.
1288 if (mmc_host_is_spi(host)) {
1289 err = mmc_spi_set_crc(host, use_spi_crc);
1295 * Fetch CID from card.
1297 if (mmc_host_is_spi(host))
1298 err = mmc_send_cid(host, cid);
1300 err = mmc_all_send_cid(host, cid);
1305 if (memcmp(cid, oldcard->raw_cid, sizeof(cid)) != 0) {
1313 * Allocate card structure.
1315 card = mmc_alloc_card(host, &mmc_type);
1317 err = PTR_ERR(card);
1322 card->type = MMC_TYPE_MMC;
1324 memcpy(card->raw_cid, cid, sizeof(card->raw_cid));
1328 * For native busses: set card RCA and quit open drain mode.
1330 if (!mmc_host_is_spi(host)) {
1331 err = mmc_set_relative_addr(card);
1335 mmc_set_bus_mode(host, MMC_BUSMODE_PUSHPULL);
1340 * Fetch CSD from card.
1342 err = mmc_send_csd(card, card->raw_csd);
1346 err = mmc_decode_csd(card);
1349 err = mmc_decode_cid(card);
1355 * handling only for cards supporting DSR and hosts requesting
1358 if (card->csd.dsr_imp && host->dsr_req)
1362 * Select card, as all following commands rely on that.
1364 if (!mmc_host_is_spi(host)) {
1365 err = mmc_select_card(card);
1372 * Fetch and process extended CSD.
1375 err = mmc_get_ext_csd(card, &ext_csd);
1378 err = mmc_read_ext_csd(card, ext_csd);
1382 /* If doing byte addressing, check if required to do sector
1383 * addressing. Handle the case of <2GB cards needing sector
1384 * addressing. See section 8.1 JEDEC Standard JED84-A441;
1385 * ocr register has bit 30 set for sector addressing.
1387 if (!(mmc_card_blockaddr(card)) && (rocr & (1<<30)))
1388 mmc_card_set_blockaddr(card);
1390 /* Erase size depends on CSD and Extended CSD */
1391 mmc_set_erase_size(card);
1395 * If enhanced_area_en is TRUE, host needs to enable ERASE_GRP_DEF
1396 * bit. This bit will be lost every time after a reset or power off.
1398 if (card->ext_csd.partition_setting_completed ||
1399 (card->ext_csd.rev >= 3 && (host->caps2 & MMC_CAP2_HC_ERASE_SZ))) {
1400 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1401 EXT_CSD_ERASE_GROUP_DEF, 1,
1402 card->ext_csd.generic_cmd6_time);
1404 if (err && err != -EBADMSG)
1410 * Just disable enhanced area off & sz
1411 * will try to enable ERASE_GROUP_DEF
1412 * during next time reinit
1414 card->ext_csd.enhanced_area_offset = -EINVAL;
1415 card->ext_csd.enhanced_area_size = -EINVAL;
1417 card->ext_csd.erase_group_def = 1;
1419 * enable ERASE_GRP_DEF successfully.
1420 * This will affect the erase size, so
1421 * here need to reset erase size
1423 mmc_set_erase_size(card);
1428 * Ensure eMMC user default partition is enabled
1430 if (card->ext_csd.part_config & EXT_CSD_PART_CONFIG_ACC_MASK) {
1431 card->ext_csd.part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK;
1432 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_PART_CONFIG,
1433 card->ext_csd.part_config,
1434 card->ext_csd.part_time);
1435 if (err && err != -EBADMSG)
1440 * Enable power_off_notification byte in the ext_csd register
1442 if (card->ext_csd.rev >= 6) {
1443 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1444 EXT_CSD_POWER_OFF_NOTIFICATION,
1446 card->ext_csd.generic_cmd6_time);
1447 if (err && err != -EBADMSG)
1451 * The err can be -EBADMSG or 0,
1452 * so check for success and update the flag
1455 card->ext_csd.power_off_notification = EXT_CSD_POWER_ON;
1459 * Select timing interface
1461 err = mmc_select_timing(card);
1465 if (mmc_card_hs200(card)) {
1466 err = mmc_hs200_tuning(card);
1470 err = mmc_select_hs400(card);
1473 } else if (mmc_card_hs(card)) {
1474 /* Select the desired bus width optionally */
1475 err = mmc_select_bus_width(card);
1476 if (!IS_ERR_VALUE(err)) {
1477 err = mmc_select_hs_ddr(card);
1484 * Choose the power class with selected bus interface
1486 mmc_select_powerclass(card);
1489 * Enable HPI feature (if supported)
1491 if (card->ext_csd.hpi) {
1492 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1493 EXT_CSD_HPI_MGMT, 1,
1494 card->ext_csd.generic_cmd6_time);
1495 if (err && err != -EBADMSG)
1498 pr_warn("%s: Enabling HPI failed\n",
1499 mmc_hostname(card->host));
1502 card->ext_csd.hpi_en = 1;
1506 * If cache size is higher than 0, this indicates
1507 * the existence of cache and it can be turned on.
1509 if (card->ext_csd.cache_size > 0) {
1510 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1511 EXT_CSD_CACHE_CTRL, 1,
1512 card->ext_csd.generic_cmd6_time);
1513 if (err && err != -EBADMSG)
1517 * Only if no error, cache is turned on successfully.
1520 pr_warn("%s: Cache is supported, but failed to turn on (%d)\n",
1521 mmc_hostname(card->host), err);
1522 card->ext_csd.cache_ctrl = 0;
1525 card->ext_csd.cache_ctrl = 1;
1530 * The mandatory minimum values are defined for packed command.
1533 if (card->ext_csd.max_packed_writes >= 3 &&
1534 card->ext_csd.max_packed_reads >= 5 &&
1535 host->caps2 & MMC_CAP2_PACKED_CMD) {
1536 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1537 EXT_CSD_EXP_EVENTS_CTRL,
1538 EXT_CSD_PACKED_EVENT_EN,
1539 card->ext_csd.generic_cmd6_time);
1540 if (err && err != -EBADMSG)
1543 pr_warn("%s: Enabling packed event failed\n",
1544 mmc_hostname(card->host));
1545 card->ext_csd.packed_event_en = 0;
1548 card->ext_csd.packed_event_en = 1;
1560 mmc_remove_card(card);
1567 static int mmc_can_sleep(struct mmc_card *card)
1569 return (card && card->ext_csd.rev >= 3);
1572 static int mmc_sleep(struct mmc_host *host)
1574 struct mmc_command cmd = {0};
1575 struct mmc_card *card = host->card;
1576 unsigned int timeout_ms = DIV_ROUND_UP(card->ext_csd.sa_timeout, 10000);
1579 err = mmc_deselect_cards(host);
1583 cmd.opcode = MMC_SLEEP_AWAKE;
1584 cmd.arg = card->rca << 16;
1588 * If the max_busy_timeout of the host is specified, validate it against
1589 * the sleep cmd timeout. A failure means we need to prevent the host
1590 * from doing hw busy detection, which is done by converting to a R1
1591 * response instead of a R1B.
1593 if (host->max_busy_timeout && (timeout_ms > host->max_busy_timeout)) {
1594 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
1596 cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
1597 cmd.busy_timeout = timeout_ms;
1600 err = mmc_wait_for_cmd(host, &cmd, 0);
1605 * If the host does not wait while the card signals busy, then we will
1606 * will have to wait the sleep/awake timeout. Note, we cannot use the
1607 * SEND_STATUS command to poll the status because that command (and most
1608 * others) is invalid while the card sleeps.
1610 if (!cmd.busy_timeout || !(host->caps & MMC_CAP_WAIT_WHILE_BUSY))
1611 mmc_delay(timeout_ms);
1616 static int mmc_can_poweroff_notify(const struct mmc_card *card)
1619 mmc_card_mmc(card) &&
1620 (card->ext_csd.power_off_notification == EXT_CSD_POWER_ON);
1623 static int mmc_poweroff_notify(struct mmc_card *card, unsigned int notify_type)
1625 unsigned int timeout = card->ext_csd.generic_cmd6_time;
1628 /* Use EXT_CSD_POWER_OFF_SHORT as default notification type. */
1629 if (notify_type == EXT_CSD_POWER_OFF_LONG)
1630 timeout = card->ext_csd.power_off_longtime;
1632 err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1633 EXT_CSD_POWER_OFF_NOTIFICATION,
1634 notify_type, timeout, true, false, false);
1636 pr_err("%s: Power Off Notification timed out, %u\n",
1637 mmc_hostname(card->host), timeout);
1639 /* Disable the power off notification after the switch operation. */
1640 card->ext_csd.power_off_notification = EXT_CSD_NO_POWER_NOTIFICATION;
1646 * Host is being removed. Free up the current card.
1648 static void mmc_remove(struct mmc_host *host)
1651 BUG_ON(!host->card);
1653 mmc_remove_card(host->card);
1658 * Card detection - card is alive.
1660 static int mmc_alive(struct mmc_host *host)
1662 return mmc_send_status(host->card, NULL);
1666 * Card detection callback from host.
1668 static void mmc_detect(struct mmc_host *host)
1673 BUG_ON(!host->card);
1675 mmc_get_card(host->card);
1678 * Just check if our card has been removed.
1680 err = _mmc_detect_card_removed(host);
1682 mmc_put_card(host->card);
1687 mmc_claim_host(host);
1688 mmc_detach_bus(host);
1689 mmc_power_off(host);
1690 mmc_release_host(host);
1694 static int _mmc_suspend(struct mmc_host *host, bool is_suspend)
1697 unsigned int notify_type = is_suspend ? EXT_CSD_POWER_OFF_SHORT :
1698 EXT_CSD_POWER_OFF_LONG;
1701 BUG_ON(!host->card);
1703 mmc_claim_host(host);
1705 if (mmc_card_suspended(host->card))
1708 if (mmc_card_doing_bkops(host->card)) {
1709 err = mmc_stop_bkops(host->card);
1714 err = mmc_flush_cache(host->card);
1718 if (mmc_can_poweroff_notify(host->card) &&
1719 ((host->caps2 & MMC_CAP2_FULL_PWR_CYCLE) || !is_suspend))
1720 err = mmc_poweroff_notify(host->card, notify_type);
1721 else if (mmc_can_sleep(host->card))
1722 err = mmc_sleep(host);
1723 else if (!mmc_host_is_spi(host))
1724 err = mmc_deselect_cards(host);
1727 mmc_power_off(host);
1728 mmc_card_set_suspended(host->card);
1731 mmc_release_host(host);
1738 static int mmc_suspend(struct mmc_host *host)
1742 err = _mmc_suspend(host, true);
1744 pm_runtime_disable(&host->card->dev);
1745 pm_runtime_set_suspended(&host->card->dev);
1752 * This function tries to determine if the same card is still present
1753 * and, if so, restore all state to it.
1755 static int _mmc_resume(struct mmc_host *host)
1760 BUG_ON(!host->card);
1762 mmc_claim_host(host);
1764 if (!mmc_card_suspended(host->card))
1767 mmc_power_up(host, host->card->ocr);
1768 err = mmc_init_card(host, host->card->ocr, host->card);
1769 mmc_card_clr_suspended(host->card);
1772 mmc_release_host(host);
1779 static int mmc_shutdown(struct mmc_host *host)
1784 * In a specific case for poweroff notify, we need to resume the card
1785 * before we can shutdown it properly.
1787 if (mmc_can_poweroff_notify(host->card) &&
1788 !(host->caps2 & MMC_CAP2_FULL_PWR_CYCLE))
1789 err = _mmc_resume(host);
1792 err = _mmc_suspend(host, false);
1798 * Callback for resume.
1800 static int mmc_resume(struct mmc_host *host)
1804 if (!(host->caps & MMC_CAP_RUNTIME_RESUME)) {
1805 err = _mmc_resume(host);
1806 pm_runtime_set_active(&host->card->dev);
1807 pm_runtime_mark_last_busy(&host->card->dev);
1809 pm_runtime_enable(&host->card->dev);
1815 * Callback for runtime_suspend.
1817 static int mmc_runtime_suspend(struct mmc_host *host)
1821 if (!(host->caps & MMC_CAP_AGGRESSIVE_PM))
1824 err = _mmc_suspend(host, true);
1826 pr_err("%s: error %d doing aggessive suspend\n",
1827 mmc_hostname(host), err);
1833 * Callback for runtime_resume.
1835 static int mmc_runtime_resume(struct mmc_host *host)
1839 if (!(host->caps & (MMC_CAP_AGGRESSIVE_PM | MMC_CAP_RUNTIME_RESUME)))
1842 err = _mmc_resume(host);
1844 pr_err("%s: error %d doing aggessive resume\n",
1845 mmc_hostname(host), err);
1850 static int mmc_power_restore(struct mmc_host *host)
1854 mmc_claim_host(host);
1855 ret = mmc_init_card(host, host->card->ocr, host->card);
1856 mmc_release_host(host);
1861 static const struct mmc_bus_ops mmc_ops = {
1862 .remove = mmc_remove,
1863 .detect = mmc_detect,
1864 .suspend = mmc_suspend,
1865 .resume = mmc_resume,
1866 .runtime_suspend = mmc_runtime_suspend,
1867 .runtime_resume = mmc_runtime_resume,
1868 .power_restore = mmc_power_restore,
1870 .shutdown = mmc_shutdown,
1874 * Starting point for MMC card init.
1876 int mmc_attach_mmc(struct mmc_host *host)
1882 WARN_ON(!host->claimed);
1884 /* Set correct bus mode for MMC before attempting attach */
1885 if (!mmc_host_is_spi(host))
1886 mmc_set_bus_mode(host, MMC_BUSMODE_OPENDRAIN);
1888 err = mmc_send_op_cond(host, 0, &ocr);
1892 mmc_attach_bus(host, &mmc_ops);
1893 if (host->ocr_avail_mmc)
1894 host->ocr_avail = host->ocr_avail_mmc;
1897 * We need to get OCR a different way for SPI.
1899 if (mmc_host_is_spi(host)) {
1900 err = mmc_spi_read_ocr(host, 1, &ocr);
1905 rocr = mmc_select_voltage(host, ocr);
1908 * Can we support the voltage of the card?
1916 * Detect and init the card.
1918 err = mmc_init_card(host, rocr, NULL);
1922 mmc_release_host(host);
1923 err = mmc_add_card(host->card);
1924 mmc_claim_host(host);
1931 mmc_release_host(host);
1932 mmc_remove_card(host->card);
1933 mmc_claim_host(host);
1936 mmc_detach_bus(host);
1938 pr_err("%s: error %d whilst initialising MMC card\n",
1939 mmc_hostname(host), err);