2 * Register map access API
4 * Copyright 2011 Wolfson Microelectronics plc
6 * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
13 #include <linux/device.h>
14 #include <linux/slab.h>
15 #include <linux/export.h>
16 #include <linux/mutex.h>
17 #include <linux/err.h>
19 #include <linux/rbtree.h>
20 #include <linux/sched.h>
21 #include <linux/delay.h>
23 #define CREATE_TRACE_POINTS
29 * Sometimes for failures during very early init the trace
30 * infrastructure isn't available early enough to be used. For this
31 * sort of problem defining LOG_DEVICE will add printks for basic
32 * register I/O on a specific device.
36 static int _regmap_update_bits(struct regmap *map, unsigned int reg,
37 unsigned int mask, unsigned int val,
38 bool *change, bool force_write);
40 static int _regmap_bus_reg_read(void *context, unsigned int reg,
42 static int _regmap_bus_read(void *context, unsigned int reg,
44 static int _regmap_bus_formatted_write(void *context, unsigned int reg,
46 static int _regmap_bus_reg_write(void *context, unsigned int reg,
48 static int _regmap_bus_raw_write(void *context, unsigned int reg,
51 bool regmap_reg_in_ranges(unsigned int reg,
52 const struct regmap_range *ranges,
55 const struct regmap_range *r;
58 for (i = 0, r = ranges; i < nranges; i++, r++)
59 if (regmap_reg_in_range(reg, r))
63 EXPORT_SYMBOL_GPL(regmap_reg_in_ranges);
65 bool regmap_check_range_table(struct regmap *map, unsigned int reg,
66 const struct regmap_access_table *table)
68 /* Check "no ranges" first */
69 if (regmap_reg_in_ranges(reg, table->no_ranges, table->n_no_ranges))
72 /* In case zero "yes ranges" are supplied, any reg is OK */
73 if (!table->n_yes_ranges)
76 return regmap_reg_in_ranges(reg, table->yes_ranges,
79 EXPORT_SYMBOL_GPL(regmap_check_range_table);
81 bool regmap_writeable(struct regmap *map, unsigned int reg)
83 if (map->max_register && reg > map->max_register)
86 if (map->writeable_reg)
87 return map->writeable_reg(map->dev, reg);
90 return regmap_check_range_table(map, reg, map->wr_table);
95 bool regmap_readable(struct regmap *map, unsigned int reg)
100 if (map->max_register && reg > map->max_register)
103 if (map->format.format_write)
106 if (map->readable_reg)
107 return map->readable_reg(map->dev, reg);
110 return regmap_check_range_table(map, reg, map->rd_table);
115 bool regmap_volatile(struct regmap *map, unsigned int reg)
117 if (!map->format.format_write && !regmap_readable(map, reg))
120 if (map->volatile_reg)
121 return map->volatile_reg(map->dev, reg);
123 if (map->volatile_table)
124 return regmap_check_range_table(map, reg, map->volatile_table);
132 bool regmap_precious(struct regmap *map, unsigned int reg)
134 if (!regmap_readable(map, reg))
137 if (map->precious_reg)
138 return map->precious_reg(map->dev, reg);
140 if (map->precious_table)
141 return regmap_check_range_table(map, reg, map->precious_table);
146 static bool regmap_volatile_range(struct regmap *map, unsigned int reg,
151 for (i = 0; i < num; i++)
152 if (!regmap_volatile(map, reg + i))
158 static void regmap_format_2_6_write(struct regmap *map,
159 unsigned int reg, unsigned int val)
161 u8 *out = map->work_buf;
163 *out = (reg << 6) | val;
166 static void regmap_format_4_12_write(struct regmap *map,
167 unsigned int reg, unsigned int val)
169 __be16 *out = map->work_buf;
170 *out = cpu_to_be16((reg << 12) | val);
173 static void regmap_format_7_9_write(struct regmap *map,
174 unsigned int reg, unsigned int val)
176 __be16 *out = map->work_buf;
177 *out = cpu_to_be16((reg << 9) | val);
180 static void regmap_format_10_14_write(struct regmap *map,
181 unsigned int reg, unsigned int val)
183 u8 *out = map->work_buf;
186 out[1] = (val >> 8) | (reg << 6);
190 static void regmap_format_8(void *buf, unsigned int val, unsigned int shift)
197 static void regmap_format_16_be(void *buf, unsigned int val, unsigned int shift)
201 b[0] = cpu_to_be16(val << shift);
204 static void regmap_format_16_le(void *buf, unsigned int val, unsigned int shift)
208 b[0] = cpu_to_le16(val << shift);
211 static void regmap_format_16_native(void *buf, unsigned int val,
214 *(u16 *)buf = val << shift;
217 static void regmap_format_24(void *buf, unsigned int val, unsigned int shift)
228 static void regmap_format_32_be(void *buf, unsigned int val, unsigned int shift)
232 b[0] = cpu_to_be32(val << shift);
235 static void regmap_format_32_le(void *buf, unsigned int val, unsigned int shift)
239 b[0] = cpu_to_le32(val << shift);
242 static void regmap_format_32_native(void *buf, unsigned int val,
245 *(u32 *)buf = val << shift;
248 static void regmap_parse_inplace_noop(void *buf)
252 static unsigned int regmap_parse_8(const void *buf)
259 static unsigned int regmap_parse_16_be(const void *buf)
261 const __be16 *b = buf;
263 return be16_to_cpu(b[0]);
266 static unsigned int regmap_parse_16_le(const void *buf)
268 const __le16 *b = buf;
270 return le16_to_cpu(b[0]);
273 static void regmap_parse_16_be_inplace(void *buf)
277 b[0] = be16_to_cpu(b[0]);
280 static void regmap_parse_16_le_inplace(void *buf)
284 b[0] = le16_to_cpu(b[0]);
287 static unsigned int regmap_parse_16_native(const void *buf)
292 static unsigned int regmap_parse_24(const void *buf)
295 unsigned int ret = b[2];
296 ret |= ((unsigned int)b[1]) << 8;
297 ret |= ((unsigned int)b[0]) << 16;
302 static unsigned int regmap_parse_32_be(const void *buf)
304 const __be32 *b = buf;
306 return be32_to_cpu(b[0]);
309 static unsigned int regmap_parse_32_le(const void *buf)
311 const __le32 *b = buf;
313 return le32_to_cpu(b[0]);
316 static void regmap_parse_32_be_inplace(void *buf)
320 b[0] = be32_to_cpu(b[0]);
323 static void regmap_parse_32_le_inplace(void *buf)
327 b[0] = le32_to_cpu(b[0]);
330 static unsigned int regmap_parse_32_native(const void *buf)
335 static void regmap_lock_mutex(void *__map)
337 struct regmap *map = __map;
338 mutex_lock(&map->mutex);
341 static void regmap_unlock_mutex(void *__map)
343 struct regmap *map = __map;
344 mutex_unlock(&map->mutex);
347 static void regmap_lock_spinlock(void *__map)
348 __acquires(&map->spinlock)
350 struct regmap *map = __map;
353 spin_lock_irqsave(&map->spinlock, flags);
354 map->spinlock_flags = flags;
357 static void regmap_unlock_spinlock(void *__map)
358 __releases(&map->spinlock)
360 struct regmap *map = __map;
361 spin_unlock_irqrestore(&map->spinlock, map->spinlock_flags);
364 static void dev_get_regmap_release(struct device *dev, void *res)
367 * We don't actually have anything to do here; the goal here
368 * is not to manage the regmap but to provide a simple way to
369 * get the regmap back given a struct device.
373 static bool _regmap_range_add(struct regmap *map,
374 struct regmap_range_node *data)
376 struct rb_root *root = &map->range_tree;
377 struct rb_node **new = &(root->rb_node), *parent = NULL;
380 struct regmap_range_node *this =
381 container_of(*new, struct regmap_range_node, node);
384 if (data->range_max < this->range_min)
385 new = &((*new)->rb_left);
386 else if (data->range_min > this->range_max)
387 new = &((*new)->rb_right);
392 rb_link_node(&data->node, parent, new);
393 rb_insert_color(&data->node, root);
398 static struct regmap_range_node *_regmap_range_lookup(struct regmap *map,
401 struct rb_node *node = map->range_tree.rb_node;
404 struct regmap_range_node *this =
405 container_of(node, struct regmap_range_node, node);
407 if (reg < this->range_min)
408 node = node->rb_left;
409 else if (reg > this->range_max)
410 node = node->rb_right;
418 static void regmap_range_exit(struct regmap *map)
420 struct rb_node *next;
421 struct regmap_range_node *range_node;
423 next = rb_first(&map->range_tree);
425 range_node = rb_entry(next, struct regmap_range_node, node);
426 next = rb_next(&range_node->node);
427 rb_erase(&range_node->node, &map->range_tree);
431 kfree(map->selector_work_buf);
434 int regmap_attach_dev(struct device *dev, struct regmap *map,
435 const struct regmap_config *config)
441 regmap_debugfs_init(map, config->name);
443 /* Add a devres resource for dev_get_regmap() */
444 m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL);
446 regmap_debugfs_exit(map);
454 EXPORT_SYMBOL_GPL(regmap_attach_dev);
456 static enum regmap_endian regmap_get_reg_endian(const struct regmap_bus *bus,
457 const struct regmap_config *config)
459 enum regmap_endian endian;
461 /* Retrieve the endianness specification from the regmap config */
462 endian = config->reg_format_endian;
464 /* If the regmap config specified a non-default value, use that */
465 if (endian != REGMAP_ENDIAN_DEFAULT)
468 /* Retrieve the endianness specification from the bus config */
469 if (bus && bus->reg_format_endian_default)
470 endian = bus->reg_format_endian_default;
472 /* If the bus specified a non-default value, use that */
473 if (endian != REGMAP_ENDIAN_DEFAULT)
476 /* Use this if no other value was found */
477 return REGMAP_ENDIAN_BIG;
480 enum regmap_endian regmap_get_val_endian(struct device *dev,
481 const struct regmap_bus *bus,
482 const struct regmap_config *config)
484 struct device_node *np;
485 enum regmap_endian endian;
487 /* Retrieve the endianness specification from the regmap config */
488 endian = config->val_format_endian;
490 /* If the regmap config specified a non-default value, use that */
491 if (endian != REGMAP_ENDIAN_DEFAULT)
494 /* If the dev and dev->of_node exist try to get endianness from DT */
495 if (dev && dev->of_node) {
498 /* Parse the device's DT node for an endianness specification */
499 if (of_property_read_bool(np, "big-endian"))
500 endian = REGMAP_ENDIAN_BIG;
501 else if (of_property_read_bool(np, "little-endian"))
502 endian = REGMAP_ENDIAN_LITTLE;
504 /* If the endianness was specified in DT, use that */
505 if (endian != REGMAP_ENDIAN_DEFAULT)
509 /* Retrieve the endianness specification from the bus config */
510 if (bus && bus->val_format_endian_default)
511 endian = bus->val_format_endian_default;
513 /* If the bus specified a non-default value, use that */
514 if (endian != REGMAP_ENDIAN_DEFAULT)
517 /* Use this if no other value was found */
518 return REGMAP_ENDIAN_BIG;
520 EXPORT_SYMBOL_GPL(regmap_get_val_endian);
522 struct regmap *__regmap_init(struct device *dev,
523 const struct regmap_bus *bus,
525 const struct regmap_config *config,
526 struct lock_class_key *lock_key,
527 const char *lock_name)
531 enum regmap_endian reg_endian, val_endian;
537 map = kzalloc(sizeof(*map), GFP_KERNEL);
543 if (config->lock && config->unlock) {
544 map->lock = config->lock;
545 map->unlock = config->unlock;
546 map->lock_arg = config->lock_arg;
548 if ((bus && bus->fast_io) ||
550 spin_lock_init(&map->spinlock);
551 map->lock = regmap_lock_spinlock;
552 map->unlock = regmap_unlock_spinlock;
553 lockdep_set_class_and_name(&map->spinlock,
554 lock_key, lock_name);
556 mutex_init(&map->mutex);
557 map->lock = regmap_lock_mutex;
558 map->unlock = regmap_unlock_mutex;
559 lockdep_set_class_and_name(&map->mutex,
560 lock_key, lock_name);
564 map->format.reg_bytes = DIV_ROUND_UP(config->reg_bits, 8);
565 map->format.pad_bytes = config->pad_bits / 8;
566 map->format.val_bytes = DIV_ROUND_UP(config->val_bits, 8);
567 map->format.buf_size = DIV_ROUND_UP(config->reg_bits +
568 config->val_bits + config->pad_bits, 8);
569 map->reg_shift = config->pad_bits % 8;
570 if (config->reg_stride)
571 map->reg_stride = config->reg_stride;
574 map->use_single_read = config->use_single_rw || !bus || !bus->read;
575 map->use_single_write = config->use_single_rw || !bus || !bus->write;
576 map->can_multi_write = config->can_multi_write && bus && bus->write;
578 map->max_raw_read = bus->max_raw_read;
579 map->max_raw_write = bus->max_raw_write;
583 map->bus_context = bus_context;
584 map->max_register = config->max_register;
585 map->wr_table = config->wr_table;
586 map->rd_table = config->rd_table;
587 map->volatile_table = config->volatile_table;
588 map->precious_table = config->precious_table;
589 map->writeable_reg = config->writeable_reg;
590 map->readable_reg = config->readable_reg;
591 map->volatile_reg = config->volatile_reg;
592 map->precious_reg = config->precious_reg;
593 map->cache_type = config->cache_type;
594 map->name = config->name;
596 spin_lock_init(&map->async_lock);
597 INIT_LIST_HEAD(&map->async_list);
598 INIT_LIST_HEAD(&map->async_free);
599 init_waitqueue_head(&map->async_waitq);
601 if (config->read_flag_mask || config->write_flag_mask) {
602 map->read_flag_mask = config->read_flag_mask;
603 map->write_flag_mask = config->write_flag_mask;
605 map->read_flag_mask = bus->read_flag_mask;
609 map->reg_read = config->reg_read;
610 map->reg_write = config->reg_write;
612 map->defer_caching = false;
613 goto skip_format_initialization;
614 } else if (!bus->read || !bus->write) {
615 map->reg_read = _regmap_bus_reg_read;
616 map->reg_write = _regmap_bus_reg_write;
618 map->defer_caching = false;
619 goto skip_format_initialization;
621 map->reg_read = _regmap_bus_read;
622 map->reg_update_bits = bus->reg_update_bits;
625 reg_endian = regmap_get_reg_endian(bus, config);
626 val_endian = regmap_get_val_endian(dev, bus, config);
628 switch (config->reg_bits + map->reg_shift) {
630 switch (config->val_bits) {
632 map->format.format_write = regmap_format_2_6_write;
640 switch (config->val_bits) {
642 map->format.format_write = regmap_format_4_12_write;
650 switch (config->val_bits) {
652 map->format.format_write = regmap_format_7_9_write;
660 switch (config->val_bits) {
662 map->format.format_write = regmap_format_10_14_write;
670 map->format.format_reg = regmap_format_8;
674 switch (reg_endian) {
675 case REGMAP_ENDIAN_BIG:
676 map->format.format_reg = regmap_format_16_be;
678 case REGMAP_ENDIAN_NATIVE:
679 map->format.format_reg = regmap_format_16_native;
687 if (reg_endian != REGMAP_ENDIAN_BIG)
689 map->format.format_reg = regmap_format_24;
693 switch (reg_endian) {
694 case REGMAP_ENDIAN_BIG:
695 map->format.format_reg = regmap_format_32_be;
697 case REGMAP_ENDIAN_NATIVE:
698 map->format.format_reg = regmap_format_32_native;
709 if (val_endian == REGMAP_ENDIAN_NATIVE)
710 map->format.parse_inplace = regmap_parse_inplace_noop;
712 switch (config->val_bits) {
714 map->format.format_val = regmap_format_8;
715 map->format.parse_val = regmap_parse_8;
716 map->format.parse_inplace = regmap_parse_inplace_noop;
719 switch (val_endian) {
720 case REGMAP_ENDIAN_BIG:
721 map->format.format_val = regmap_format_16_be;
722 map->format.parse_val = regmap_parse_16_be;
723 map->format.parse_inplace = regmap_parse_16_be_inplace;
725 case REGMAP_ENDIAN_LITTLE:
726 map->format.format_val = regmap_format_16_le;
727 map->format.parse_val = regmap_parse_16_le;
728 map->format.parse_inplace = regmap_parse_16_le_inplace;
730 case REGMAP_ENDIAN_NATIVE:
731 map->format.format_val = regmap_format_16_native;
732 map->format.parse_val = regmap_parse_16_native;
739 if (val_endian != REGMAP_ENDIAN_BIG)
741 map->format.format_val = regmap_format_24;
742 map->format.parse_val = regmap_parse_24;
745 switch (val_endian) {
746 case REGMAP_ENDIAN_BIG:
747 map->format.format_val = regmap_format_32_be;
748 map->format.parse_val = regmap_parse_32_be;
749 map->format.parse_inplace = regmap_parse_32_be_inplace;
751 case REGMAP_ENDIAN_LITTLE:
752 map->format.format_val = regmap_format_32_le;
753 map->format.parse_val = regmap_parse_32_le;
754 map->format.parse_inplace = regmap_parse_32_le_inplace;
756 case REGMAP_ENDIAN_NATIVE:
757 map->format.format_val = regmap_format_32_native;
758 map->format.parse_val = regmap_parse_32_native;
766 if (map->format.format_write) {
767 if ((reg_endian != REGMAP_ENDIAN_BIG) ||
768 (val_endian != REGMAP_ENDIAN_BIG))
770 map->use_single_write = true;
773 if (!map->format.format_write &&
774 !(map->format.format_reg && map->format.format_val))
777 map->work_buf = kzalloc(map->format.buf_size, GFP_KERNEL);
778 if (map->work_buf == NULL) {
783 if (map->format.format_write) {
784 map->defer_caching = false;
785 map->reg_write = _regmap_bus_formatted_write;
786 } else if (map->format.format_val) {
787 map->defer_caching = true;
788 map->reg_write = _regmap_bus_raw_write;
791 skip_format_initialization:
793 map->range_tree = RB_ROOT;
794 for (i = 0; i < config->num_ranges; i++) {
795 const struct regmap_range_cfg *range_cfg = &config->ranges[i];
796 struct regmap_range_node *new;
799 if (range_cfg->range_max < range_cfg->range_min) {
800 dev_err(map->dev, "Invalid range %d: %d < %d\n", i,
801 range_cfg->range_max, range_cfg->range_min);
805 if (range_cfg->range_max > map->max_register) {
806 dev_err(map->dev, "Invalid range %d: %d > %d\n", i,
807 range_cfg->range_max, map->max_register);
811 if (range_cfg->selector_reg > map->max_register) {
813 "Invalid range %d: selector out of map\n", i);
817 if (range_cfg->window_len == 0) {
818 dev_err(map->dev, "Invalid range %d: window_len 0\n",
823 /* Make sure, that this register range has no selector
824 or data window within its boundary */
825 for (j = 0; j < config->num_ranges; j++) {
826 unsigned sel_reg = config->ranges[j].selector_reg;
827 unsigned win_min = config->ranges[j].window_start;
828 unsigned win_max = win_min +
829 config->ranges[j].window_len - 1;
831 /* Allow data window inside its own virtual range */
835 if (range_cfg->range_min <= sel_reg &&
836 sel_reg <= range_cfg->range_max) {
838 "Range %d: selector for %d in window\n",
843 if (!(win_max < range_cfg->range_min ||
844 win_min > range_cfg->range_max)) {
846 "Range %d: window for %d in window\n",
852 new = kzalloc(sizeof(*new), GFP_KERNEL);
859 new->name = range_cfg->name;
860 new->range_min = range_cfg->range_min;
861 new->range_max = range_cfg->range_max;
862 new->selector_reg = range_cfg->selector_reg;
863 new->selector_mask = range_cfg->selector_mask;
864 new->selector_shift = range_cfg->selector_shift;
865 new->window_start = range_cfg->window_start;
866 new->window_len = range_cfg->window_len;
868 if (!_regmap_range_add(map, new)) {
869 dev_err(map->dev, "Failed to add range %d\n", i);
874 if (map->selector_work_buf == NULL) {
875 map->selector_work_buf =
876 kzalloc(map->format.buf_size, GFP_KERNEL);
877 if (map->selector_work_buf == NULL) {
884 ret = regcache_init(map, config);
889 ret = regmap_attach_dev(dev, map, config);
899 regmap_range_exit(map);
900 kfree(map->work_buf);
906 EXPORT_SYMBOL_GPL(__regmap_init);
908 static void devm_regmap_release(struct device *dev, void *res)
910 regmap_exit(*(struct regmap **)res);
913 struct regmap *__devm_regmap_init(struct device *dev,
914 const struct regmap_bus *bus,
916 const struct regmap_config *config,
917 struct lock_class_key *lock_key,
918 const char *lock_name)
920 struct regmap **ptr, *regmap;
922 ptr = devres_alloc(devm_regmap_release, sizeof(*ptr), GFP_KERNEL);
924 return ERR_PTR(-ENOMEM);
926 regmap = __regmap_init(dev, bus, bus_context, config,
927 lock_key, lock_name);
928 if (!IS_ERR(regmap)) {
930 devres_add(dev, ptr);
937 EXPORT_SYMBOL_GPL(__devm_regmap_init);
939 static void regmap_field_init(struct regmap_field *rm_field,
940 struct regmap *regmap, struct reg_field reg_field)
942 rm_field->regmap = regmap;
943 rm_field->reg = reg_field.reg;
944 rm_field->shift = reg_field.lsb;
945 rm_field->mask = GENMASK(reg_field.msb, reg_field.lsb);
946 rm_field->id_size = reg_field.id_size;
947 rm_field->id_offset = reg_field.id_offset;
951 * devm_regmap_field_alloc(): Allocate and initialise a register field
954 * @dev: Device that will be interacted with
955 * @regmap: regmap bank in which this register field is located.
956 * @reg_field: Register field with in the bank.
958 * The return value will be an ERR_PTR() on error or a valid pointer
959 * to a struct regmap_field. The regmap_field will be automatically freed
960 * by the device management code.
962 struct regmap_field *devm_regmap_field_alloc(struct device *dev,
963 struct regmap *regmap, struct reg_field reg_field)
965 struct regmap_field *rm_field = devm_kzalloc(dev,
966 sizeof(*rm_field), GFP_KERNEL);
968 return ERR_PTR(-ENOMEM);
970 regmap_field_init(rm_field, regmap, reg_field);
975 EXPORT_SYMBOL_GPL(devm_regmap_field_alloc);
978 * devm_regmap_field_free(): Free register field allocated using
979 * devm_regmap_field_alloc. Usally drivers need not call this function,
980 * as the memory allocated via devm will be freed as per device-driver
983 * @dev: Device that will be interacted with
984 * @field: regmap field which should be freed.
986 void devm_regmap_field_free(struct device *dev,
987 struct regmap_field *field)
989 devm_kfree(dev, field);
991 EXPORT_SYMBOL_GPL(devm_regmap_field_free);
994 * regmap_field_alloc(): Allocate and initialise a register field
997 * @regmap: regmap bank in which this register field is located.
998 * @reg_field: Register field with in the bank.
1000 * The return value will be an ERR_PTR() on error or a valid pointer
1001 * to a struct regmap_field. The regmap_field should be freed by the
1002 * user once its finished working with it using regmap_field_free().
1004 struct regmap_field *regmap_field_alloc(struct regmap *regmap,
1005 struct reg_field reg_field)
1007 struct regmap_field *rm_field = kzalloc(sizeof(*rm_field), GFP_KERNEL);
1010 return ERR_PTR(-ENOMEM);
1012 regmap_field_init(rm_field, regmap, reg_field);
1016 EXPORT_SYMBOL_GPL(regmap_field_alloc);
1019 * regmap_field_free(): Free register field allocated using regmap_field_alloc
1021 * @field: regmap field which should be freed.
1023 void regmap_field_free(struct regmap_field *field)
1027 EXPORT_SYMBOL_GPL(regmap_field_free);
1030 * regmap_reinit_cache(): Reinitialise the current register cache
1032 * @map: Register map to operate on.
1033 * @config: New configuration. Only the cache data will be used.
1035 * Discard any existing register cache for the map and initialize a
1036 * new cache. This can be used to restore the cache to defaults or to
1037 * update the cache configuration to reflect runtime discovery of the
1040 * No explicit locking is done here, the user needs to ensure that
1041 * this function will not race with other calls to regmap.
1043 int regmap_reinit_cache(struct regmap *map, const struct regmap_config *config)
1046 regmap_debugfs_exit(map);
1048 map->max_register = config->max_register;
1049 map->writeable_reg = config->writeable_reg;
1050 map->readable_reg = config->readable_reg;
1051 map->volatile_reg = config->volatile_reg;
1052 map->precious_reg = config->precious_reg;
1053 map->cache_type = config->cache_type;
1055 regmap_debugfs_init(map, config->name);
1057 map->cache_bypass = false;
1058 map->cache_only = false;
1060 return regcache_init(map, config);
1062 EXPORT_SYMBOL_GPL(regmap_reinit_cache);
1065 * regmap_exit(): Free a previously allocated register map
1067 void regmap_exit(struct regmap *map)
1069 struct regmap_async *async;
1072 regmap_debugfs_exit(map);
1073 regmap_range_exit(map);
1074 if (map->bus && map->bus->free_context)
1075 map->bus->free_context(map->bus_context);
1076 kfree(map->work_buf);
1077 while (!list_empty(&map->async_free)) {
1078 async = list_first_entry_or_null(&map->async_free,
1079 struct regmap_async,
1081 list_del(&async->list);
1082 kfree(async->work_buf);
1087 EXPORT_SYMBOL_GPL(regmap_exit);
1089 static int dev_get_regmap_match(struct device *dev, void *res, void *data)
1091 struct regmap **r = res;
1097 /* If the user didn't specify a name match any */
1099 return (*r)->name == data;
1105 * dev_get_regmap(): Obtain the regmap (if any) for a device
1107 * @dev: Device to retrieve the map for
1108 * @name: Optional name for the register map, usually NULL.
1110 * Returns the regmap for the device if one is present, or NULL. If
1111 * name is specified then it must match the name specified when
1112 * registering the device, if it is NULL then the first regmap found
1113 * will be used. Devices with multiple register maps are very rare,
1114 * generic code should normally not need to specify a name.
1116 struct regmap *dev_get_regmap(struct device *dev, const char *name)
1118 struct regmap **r = devres_find(dev, dev_get_regmap_release,
1119 dev_get_regmap_match, (void *)name);
1125 EXPORT_SYMBOL_GPL(dev_get_regmap);
1128 * regmap_get_device(): Obtain the device from a regmap
1130 * @map: Register map to operate on.
1132 * Returns the underlying device that the regmap has been created for.
1134 struct device *regmap_get_device(struct regmap *map)
1138 EXPORT_SYMBOL_GPL(regmap_get_device);
1140 static int _regmap_select_page(struct regmap *map, unsigned int *reg,
1141 struct regmap_range_node *range,
1142 unsigned int val_num)
1144 void *orig_work_buf;
1145 unsigned int win_offset;
1146 unsigned int win_page;
1150 win_offset = (*reg - range->range_min) % range->window_len;
1151 win_page = (*reg - range->range_min) / range->window_len;
1154 /* Bulk write shouldn't cross range boundary */
1155 if (*reg + val_num - 1 > range->range_max)
1158 /* ... or single page boundary */
1159 if (val_num > range->window_len - win_offset)
1163 /* It is possible to have selector register inside data window.
1164 In that case, selector register is located on every page and
1165 it needs no page switching, when accessed alone. */
1167 range->window_start + win_offset != range->selector_reg) {
1168 /* Use separate work_buf during page switching */
1169 orig_work_buf = map->work_buf;
1170 map->work_buf = map->selector_work_buf;
1172 ret = _regmap_update_bits(map, range->selector_reg,
1173 range->selector_mask,
1174 win_page << range->selector_shift,
1177 map->work_buf = orig_work_buf;
1183 *reg = range->window_start + win_offset;
1188 int _regmap_raw_write(struct regmap *map, unsigned int reg,
1189 const void *val, size_t val_len)
1191 struct regmap_range_node *range;
1192 unsigned long flags;
1193 u8 *u8 = map->work_buf;
1194 void *work_val = map->work_buf + map->format.reg_bytes +
1195 map->format.pad_bytes;
1197 int ret = -ENOTSUPP;
1203 /* Check for unwritable registers before we start */
1204 if (map->writeable_reg)
1205 for (i = 0; i < val_len / map->format.val_bytes; i++)
1206 if (!map->writeable_reg(map->dev,
1207 reg + (i * map->reg_stride)))
1210 if (!map->cache_bypass && map->format.parse_val) {
1212 int val_bytes = map->format.val_bytes;
1213 for (i = 0; i < val_len / val_bytes; i++) {
1214 ival = map->format.parse_val(val + (i * val_bytes));
1215 ret = regcache_write(map, reg + (i * map->reg_stride),
1219 "Error in caching of register: %x ret: %d\n",
1224 if (map->cache_only) {
1225 map->cache_dirty = true;
1230 range = _regmap_range_lookup(map, reg);
1232 int val_num = val_len / map->format.val_bytes;
1233 int win_offset = (reg - range->range_min) % range->window_len;
1234 int win_residue = range->window_len - win_offset;
1236 /* If the write goes beyond the end of the window split it */
1237 while (val_num > win_residue) {
1238 dev_dbg(map->dev, "Writing window %d/%zu\n",
1239 win_residue, val_len / map->format.val_bytes);
1240 ret = _regmap_raw_write(map, reg, val, win_residue *
1241 map->format.val_bytes);
1246 val_num -= win_residue;
1247 val += win_residue * map->format.val_bytes;
1248 val_len -= win_residue * map->format.val_bytes;
1250 win_offset = (reg - range->range_min) %
1252 win_residue = range->window_len - win_offset;
1255 ret = _regmap_select_page(map, ®, range, val_num);
1260 map->format.format_reg(map->work_buf, reg, map->reg_shift);
1262 u8[0] |= map->write_flag_mask;
1265 * Essentially all I/O mechanisms will be faster with a single
1266 * buffer to write. Since register syncs often generate raw
1267 * writes of single registers optimise that case.
1269 if (val != work_val && val_len == map->format.val_bytes) {
1270 memcpy(work_val, val, map->format.val_bytes);
1274 if (map->async && map->bus->async_write) {
1275 struct regmap_async *async;
1277 trace_regmap_async_write_start(map, reg, val_len);
1279 spin_lock_irqsave(&map->async_lock, flags);
1280 async = list_first_entry_or_null(&map->async_free,
1281 struct regmap_async,
1284 list_del(&async->list);
1285 spin_unlock_irqrestore(&map->async_lock, flags);
1288 async = map->bus->async_alloc();
1292 async->work_buf = kzalloc(map->format.buf_size,
1293 GFP_KERNEL | GFP_DMA);
1294 if (!async->work_buf) {
1302 /* If the caller supplied the value we can use it safely. */
1303 memcpy(async->work_buf, map->work_buf, map->format.pad_bytes +
1304 map->format.reg_bytes + map->format.val_bytes);
1306 spin_lock_irqsave(&map->async_lock, flags);
1307 list_add_tail(&async->list, &map->async_list);
1308 spin_unlock_irqrestore(&map->async_lock, flags);
1310 if (val != work_val)
1311 ret = map->bus->async_write(map->bus_context,
1313 map->format.reg_bytes +
1314 map->format.pad_bytes,
1315 val, val_len, async);
1317 ret = map->bus->async_write(map->bus_context,
1319 map->format.reg_bytes +
1320 map->format.pad_bytes +
1321 val_len, NULL, 0, async);
1324 dev_err(map->dev, "Failed to schedule write: %d\n",
1327 spin_lock_irqsave(&map->async_lock, flags);
1328 list_move(&async->list, &map->async_free);
1329 spin_unlock_irqrestore(&map->async_lock, flags);
1335 trace_regmap_hw_write_start(map, reg, val_len / map->format.val_bytes);
1337 /* If we're doing a single register write we can probably just
1338 * send the work_buf directly, otherwise try to do a gather
1341 if (val == work_val)
1342 ret = map->bus->write(map->bus_context, map->work_buf,
1343 map->format.reg_bytes +
1344 map->format.pad_bytes +
1346 else if (map->bus->gather_write)
1347 ret = map->bus->gather_write(map->bus_context, map->work_buf,
1348 map->format.reg_bytes +
1349 map->format.pad_bytes,
1352 /* If that didn't work fall back on linearising by hand. */
1353 if (ret == -ENOTSUPP) {
1354 len = map->format.reg_bytes + map->format.pad_bytes + val_len;
1355 buf = kzalloc(len, GFP_KERNEL);
1359 memcpy(buf, map->work_buf, map->format.reg_bytes);
1360 memcpy(buf + map->format.reg_bytes + map->format.pad_bytes,
1362 ret = map->bus->write(map->bus_context, buf, len);
1367 trace_regmap_hw_write_done(map, reg, val_len / map->format.val_bytes);
1373 * regmap_can_raw_write - Test if regmap_raw_write() is supported
1375 * @map: Map to check.
1377 bool regmap_can_raw_write(struct regmap *map)
1379 return map->bus && map->bus->write && map->format.format_val &&
1380 map->format.format_reg;
1382 EXPORT_SYMBOL_GPL(regmap_can_raw_write);
1385 * regmap_get_raw_read_max - Get the maximum size we can read
1387 * @map: Map to check.
1389 size_t regmap_get_raw_read_max(struct regmap *map)
1391 return map->max_raw_read;
1393 EXPORT_SYMBOL_GPL(regmap_get_raw_read_max);
1396 * regmap_get_raw_write_max - Get the maximum size we can read
1398 * @map: Map to check.
1400 size_t regmap_get_raw_write_max(struct regmap *map)
1402 return map->max_raw_write;
1404 EXPORT_SYMBOL_GPL(regmap_get_raw_write_max);
1406 static int _regmap_bus_formatted_write(void *context, unsigned int reg,
1410 struct regmap_range_node *range;
1411 struct regmap *map = context;
1413 WARN_ON(!map->bus || !map->format.format_write);
1415 range = _regmap_range_lookup(map, reg);
1417 ret = _regmap_select_page(map, ®, range, 1);
1422 map->format.format_write(map, reg, val);
1424 trace_regmap_hw_write_start(map, reg, 1);
1426 ret = map->bus->write(map->bus_context, map->work_buf,
1427 map->format.buf_size);
1429 trace_regmap_hw_write_done(map, reg, 1);
1434 static int _regmap_bus_reg_write(void *context, unsigned int reg,
1437 struct regmap *map = context;
1439 return map->bus->reg_write(map->bus_context, reg, val);
1442 static int _regmap_bus_raw_write(void *context, unsigned int reg,
1445 struct regmap *map = context;
1447 WARN_ON(!map->bus || !map->format.format_val);
1449 map->format.format_val(map->work_buf + map->format.reg_bytes
1450 + map->format.pad_bytes, val, 0);
1451 return _regmap_raw_write(map, reg,
1453 map->format.reg_bytes +
1454 map->format.pad_bytes,
1455 map->format.val_bytes);
1458 static inline void *_regmap_map_get_context(struct regmap *map)
1460 return (map->bus) ? map : map->bus_context;
1463 int _regmap_write(struct regmap *map, unsigned int reg,
1467 void *context = _regmap_map_get_context(map);
1469 if (!regmap_writeable(map, reg))
1472 if (!map->cache_bypass && !map->defer_caching) {
1473 ret = regcache_write(map, reg, val);
1476 if (map->cache_only) {
1477 map->cache_dirty = true;
1483 if (map->dev && strcmp(dev_name(map->dev), LOG_DEVICE) == 0)
1484 dev_info(map->dev, "%x <= %x\n", reg, val);
1487 trace_regmap_reg_write(map, reg, val);
1489 return map->reg_write(context, reg, val);
1493 * regmap_write(): Write a value to a single register
1495 * @map: Register map to write to
1496 * @reg: Register to write to
1497 * @val: Value to be written
1499 * A value of zero will be returned on success, a negative errno will
1500 * be returned in error cases.
1502 int regmap_write(struct regmap *map, unsigned int reg, unsigned int val)
1506 if (reg % map->reg_stride)
1509 map->lock(map->lock_arg);
1511 ret = _regmap_write(map, reg, val);
1513 map->unlock(map->lock_arg);
1517 EXPORT_SYMBOL_GPL(regmap_write);
1520 * regmap_write_async(): Write a value to a single register asynchronously
1522 * @map: Register map to write to
1523 * @reg: Register to write to
1524 * @val: Value to be written
1526 * A value of zero will be returned on success, a negative errno will
1527 * be returned in error cases.
1529 int regmap_write_async(struct regmap *map, unsigned int reg, unsigned int val)
1533 if (reg % map->reg_stride)
1536 map->lock(map->lock_arg);
1540 ret = _regmap_write(map, reg, val);
1544 map->unlock(map->lock_arg);
1548 EXPORT_SYMBOL_GPL(regmap_write_async);
1551 * regmap_raw_write(): Write raw values to one or more registers
1553 * @map: Register map to write to
1554 * @reg: Initial register to write to
1555 * @val: Block of data to be written, laid out for direct transmission to the
1557 * @val_len: Length of data pointed to by val.
1559 * This function is intended to be used for things like firmware
1560 * download where a large block of data needs to be transferred to the
1561 * device. No formatting will be done on the data provided.
1563 * A value of zero will be returned on success, a negative errno will
1564 * be returned in error cases.
1566 int regmap_raw_write(struct regmap *map, unsigned int reg,
1567 const void *val, size_t val_len)
1571 if (!regmap_can_raw_write(map))
1573 if (val_len % map->format.val_bytes)
1575 if (map->max_raw_write && map->max_raw_write > val_len)
1578 map->lock(map->lock_arg);
1580 ret = _regmap_raw_write(map, reg, val, val_len);
1582 map->unlock(map->lock_arg);
1586 EXPORT_SYMBOL_GPL(regmap_raw_write);
1589 * regmap_field_write(): Write a value to a single register field
1591 * @field: Register field to write to
1592 * @val: Value to be written
1594 * A value of zero will be returned on success, a negative errno will
1595 * be returned in error cases.
1597 int regmap_field_write(struct regmap_field *field, unsigned int val)
1599 return regmap_update_bits(field->regmap, field->reg,
1600 field->mask, val << field->shift);
1602 EXPORT_SYMBOL_GPL(regmap_field_write);
1605 * regmap_field_update_bits(): Perform a read/modify/write cycle
1606 * on the register field
1608 * @field: Register field to write to
1609 * @mask: Bitmask to change
1610 * @val: Value to be written
1612 * A value of zero will be returned on success, a negative errno will
1613 * be returned in error cases.
1615 int regmap_field_update_bits(struct regmap_field *field, unsigned int mask, unsigned int val)
1617 mask = (mask << field->shift) & field->mask;
1619 return regmap_update_bits(field->regmap, field->reg,
1620 mask, val << field->shift);
1622 EXPORT_SYMBOL_GPL(regmap_field_update_bits);
1625 * regmap_fields_write(): Write a value to a single register field with port ID
1627 * @field: Register field to write to
1629 * @val: Value to be written
1631 * A value of zero will be returned on success, a negative errno will
1632 * be returned in error cases.
1634 int regmap_fields_write(struct regmap_field *field, unsigned int id,
1637 if (id >= field->id_size)
1640 return regmap_update_bits(field->regmap,
1641 field->reg + (field->id_offset * id),
1642 field->mask, val << field->shift);
1644 EXPORT_SYMBOL_GPL(regmap_fields_write);
1646 int regmap_fields_force_write(struct regmap_field *field, unsigned int id,
1649 if (id >= field->id_size)
1652 return regmap_write_bits(field->regmap,
1653 field->reg + (field->id_offset * id),
1654 field->mask, val << field->shift);
1656 EXPORT_SYMBOL_GPL(regmap_fields_force_write);
1659 * regmap_fields_update_bits(): Perform a read/modify/write cycle
1660 * on the register field
1662 * @field: Register field to write to
1664 * @mask: Bitmask to change
1665 * @val: Value to be written
1667 * A value of zero will be returned on success, a negative errno will
1668 * be returned in error cases.
1670 int regmap_fields_update_bits(struct regmap_field *field, unsigned int id,
1671 unsigned int mask, unsigned int val)
1673 if (id >= field->id_size)
1676 mask = (mask << field->shift) & field->mask;
1678 return regmap_update_bits(field->regmap,
1679 field->reg + (field->id_offset * id),
1680 mask, val << field->shift);
1682 EXPORT_SYMBOL_GPL(regmap_fields_update_bits);
1685 * regmap_bulk_write(): Write multiple registers to the device
1687 * @map: Register map to write to
1688 * @reg: First register to be write from
1689 * @val: Block of data to be written, in native register size for device
1690 * @val_count: Number of registers to write
1692 * This function is intended to be used for writing a large block of
1693 * data to the device either in single transfer or multiple transfer.
1695 * A value of zero will be returned on success, a negative errno will
1696 * be returned in error cases.
1698 int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
1702 size_t val_bytes = map->format.val_bytes;
1703 size_t total_size = val_bytes * val_count;
1705 if (map->bus && !map->format.parse_inplace)
1707 if (reg % map->reg_stride)
1711 * Some devices don't support bulk write, for
1712 * them we have a series of single write operations in the first two if
1715 * The first if block is used for memory mapped io. It does not allow
1716 * val_bytes of 3 for example.
1717 * The second one is used for busses which do not have this limitation
1718 * and can write arbitrary value lengths.
1721 map->lock(map->lock_arg);
1722 for (i = 0; i < val_count; i++) {
1725 switch (val_bytes) {
1727 ival = *(u8 *)(val + (i * val_bytes));
1730 ival = *(u16 *)(val + (i * val_bytes));
1733 ival = *(u32 *)(val + (i * val_bytes));
1737 ival = *(u64 *)(val + (i * val_bytes));
1745 ret = _regmap_write(map, reg + (i * map->reg_stride),
1751 map->unlock(map->lock_arg);
1752 } else if (map->use_single_write ||
1753 (map->max_raw_write && map->max_raw_write < total_size)) {
1754 int chunk_stride = map->reg_stride;
1755 size_t chunk_size = val_bytes;
1756 size_t chunk_count = val_count;
1758 if (!map->use_single_write) {
1759 chunk_size = map->max_raw_write;
1760 if (chunk_size % val_bytes)
1761 chunk_size -= chunk_size % val_bytes;
1762 chunk_count = total_size / chunk_size;
1763 chunk_stride *= chunk_size / val_bytes;
1766 map->lock(map->lock_arg);
1767 /* Write as many bytes as possible with chunk_size */
1768 for (i = 0; i < chunk_count; i++) {
1769 ret = _regmap_raw_write(map,
1770 reg + (i * chunk_stride),
1771 val + (i * chunk_size),
1777 /* Write remaining bytes */
1778 if (!ret && chunk_size * i < total_size) {
1779 ret = _regmap_raw_write(map, reg + (i * chunk_stride),
1780 val + (i * chunk_size),
1781 total_size - i * chunk_size);
1783 map->unlock(map->lock_arg);
1790 wval = kmemdup(val, val_count * val_bytes, GFP_KERNEL);
1792 dev_err(map->dev, "Error in memory allocation\n");
1795 for (i = 0; i < val_count * val_bytes; i += val_bytes)
1796 map->format.parse_inplace(wval + i);
1798 map->lock(map->lock_arg);
1799 ret = _regmap_raw_write(map, reg, wval, val_bytes * val_count);
1800 map->unlock(map->lock_arg);
1806 EXPORT_SYMBOL_GPL(regmap_bulk_write);
1809 * _regmap_raw_multi_reg_write()
1811 * the (register,newvalue) pairs in regs have not been formatted, but
1812 * they are all in the same page and have been changed to being page
1813 * relative. The page register has been written if that was necessary.
1815 static int _regmap_raw_multi_reg_write(struct regmap *map,
1816 const struct reg_sequence *regs,
1823 size_t val_bytes = map->format.val_bytes;
1824 size_t reg_bytes = map->format.reg_bytes;
1825 size_t pad_bytes = map->format.pad_bytes;
1826 size_t pair_size = reg_bytes + pad_bytes + val_bytes;
1827 size_t len = pair_size * num_regs;
1832 buf = kzalloc(len, GFP_KERNEL);
1836 /* We have to linearise by hand. */
1840 for (i = 0; i < num_regs; i++) {
1841 unsigned int reg = regs[i].reg;
1842 unsigned int val = regs[i].def;
1843 trace_regmap_hw_write_start(map, reg, 1);
1844 map->format.format_reg(u8, reg, map->reg_shift);
1845 u8 += reg_bytes + pad_bytes;
1846 map->format.format_val(u8, val, 0);
1850 *u8 |= map->write_flag_mask;
1852 ret = map->bus->write(map->bus_context, buf, len);
1856 for (i = 0; i < num_regs; i++) {
1857 int reg = regs[i].reg;
1858 trace_regmap_hw_write_done(map, reg, 1);
1863 static unsigned int _regmap_register_page(struct regmap *map,
1865 struct regmap_range_node *range)
1867 unsigned int win_page = (reg - range->range_min) / range->window_len;
1872 static int _regmap_range_multi_paged_reg_write(struct regmap *map,
1873 struct reg_sequence *regs,
1878 struct reg_sequence *base;
1879 unsigned int this_page = 0;
1880 unsigned int page_change = 0;
1882 * the set of registers are not neccessarily in order, but
1883 * since the order of write must be preserved this algorithm
1884 * chops the set each time the page changes. This also applies
1885 * if there is a delay required at any point in the sequence.
1888 for (i = 0, n = 0; i < num_regs; i++, n++) {
1889 unsigned int reg = regs[i].reg;
1890 struct regmap_range_node *range;
1892 range = _regmap_range_lookup(map, reg);
1894 unsigned int win_page = _regmap_register_page(map, reg,
1898 this_page = win_page;
1899 if (win_page != this_page) {
1900 this_page = win_page;
1905 /* If we have both a page change and a delay make sure to
1906 * write the regs and apply the delay before we change the
1910 if (page_change || regs[i].delay_us) {
1912 /* For situations where the first write requires
1913 * a delay we need to make sure we don't call
1914 * raw_multi_reg_write with n=0
1915 * This can't occur with page breaks as we
1916 * never write on the first iteration
1918 if (regs[i].delay_us && i == 0)
1921 ret = _regmap_raw_multi_reg_write(map, base, n);
1925 if (regs[i].delay_us)
1926 udelay(regs[i].delay_us);
1932 ret = _regmap_select_page(map,
1945 return _regmap_raw_multi_reg_write(map, base, n);
1949 static int _regmap_multi_reg_write(struct regmap *map,
1950 const struct reg_sequence *regs,
1956 if (!map->can_multi_write) {
1957 for (i = 0; i < num_regs; i++) {
1958 ret = _regmap_write(map, regs[i].reg, regs[i].def);
1962 if (regs[i].delay_us)
1963 udelay(regs[i].delay_us);
1968 if (!map->format.parse_inplace)
1971 if (map->writeable_reg)
1972 for (i = 0; i < num_regs; i++) {
1973 int reg = regs[i].reg;
1974 if (!map->writeable_reg(map->dev, reg))
1976 if (reg % map->reg_stride)
1980 if (!map->cache_bypass) {
1981 for (i = 0; i < num_regs; i++) {
1982 unsigned int val = regs[i].def;
1983 unsigned int reg = regs[i].reg;
1984 ret = regcache_write(map, reg, val);
1987 "Error in caching of register: %x ret: %d\n",
1992 if (map->cache_only) {
1993 map->cache_dirty = true;
2000 for (i = 0; i < num_regs; i++) {
2001 unsigned int reg = regs[i].reg;
2002 struct regmap_range_node *range;
2004 /* Coalesce all the writes between a page break or a delay
2007 range = _regmap_range_lookup(map, reg);
2008 if (range || regs[i].delay_us) {
2009 size_t len = sizeof(struct reg_sequence)*num_regs;
2010 struct reg_sequence *base = kmemdup(regs, len,
2014 ret = _regmap_range_multi_paged_reg_write(map, base,
2021 return _regmap_raw_multi_reg_write(map, regs, num_regs);
2025 * regmap_multi_reg_write(): Write multiple registers to the device
2027 * where the set of register,value pairs are supplied in any order,
2028 * possibly not all in a single range.
2030 * @map: Register map to write to
2031 * @regs: Array of structures containing register,value to be written
2032 * @num_regs: Number of registers to write
2034 * The 'normal' block write mode will send ultimately send data on the
2035 * target bus as R,V1,V2,V3,..,Vn where successively higer registers are
2036 * addressed. However, this alternative block multi write mode will send
2037 * the data as R1,V1,R2,V2,..,Rn,Vn on the target bus. The target device
2038 * must of course support the mode.
2040 * A value of zero will be returned on success, a negative errno will be
2041 * returned in error cases.
2043 int regmap_multi_reg_write(struct regmap *map, const struct reg_sequence *regs,
2048 map->lock(map->lock_arg);
2050 ret = _regmap_multi_reg_write(map, regs, num_regs);
2052 map->unlock(map->lock_arg);
2056 EXPORT_SYMBOL_GPL(regmap_multi_reg_write);
2059 * regmap_multi_reg_write_bypassed(): Write multiple registers to the
2060 * device but not the cache
2062 * where the set of register are supplied in any order
2064 * @map: Register map to write to
2065 * @regs: Array of structures containing register,value to be written
2066 * @num_regs: Number of registers to write
2068 * This function is intended to be used for writing a large block of data
2069 * atomically to the device in single transfer for those I2C client devices
2070 * that implement this alternative block write mode.
2072 * A value of zero will be returned on success, a negative errno will
2073 * be returned in error cases.
2075 int regmap_multi_reg_write_bypassed(struct regmap *map,
2076 const struct reg_sequence *regs,
2082 map->lock(map->lock_arg);
2084 bypass = map->cache_bypass;
2085 map->cache_bypass = true;
2087 ret = _regmap_multi_reg_write(map, regs, num_regs);
2089 map->cache_bypass = bypass;
2091 map->unlock(map->lock_arg);
2095 EXPORT_SYMBOL_GPL(regmap_multi_reg_write_bypassed);
2098 * regmap_raw_write_async(): Write raw values to one or more registers
2101 * @map: Register map to write to
2102 * @reg: Initial register to write to
2103 * @val: Block of data to be written, laid out for direct transmission to the
2104 * device. Must be valid until regmap_async_complete() is called.
2105 * @val_len: Length of data pointed to by val.
2107 * This function is intended to be used for things like firmware
2108 * download where a large block of data needs to be transferred to the
2109 * device. No formatting will be done on the data provided.
2111 * If supported by the underlying bus the write will be scheduled
2112 * asynchronously, helping maximise I/O speed on higher speed buses
2113 * like SPI. regmap_async_complete() can be called to ensure that all
2114 * asynchrnous writes have been completed.
2116 * A value of zero will be returned on success, a negative errno will
2117 * be returned in error cases.
2119 int regmap_raw_write_async(struct regmap *map, unsigned int reg,
2120 const void *val, size_t val_len)
2124 if (val_len % map->format.val_bytes)
2126 if (reg % map->reg_stride)
2129 map->lock(map->lock_arg);
2133 ret = _regmap_raw_write(map, reg, val, val_len);
2137 map->unlock(map->lock_arg);
2141 EXPORT_SYMBOL_GPL(regmap_raw_write_async);
2143 static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
2144 unsigned int val_len)
2146 struct regmap_range_node *range;
2147 u8 *u8 = map->work_buf;
2152 range = _regmap_range_lookup(map, reg);
2154 ret = _regmap_select_page(map, ®, range,
2155 val_len / map->format.val_bytes);
2160 map->format.format_reg(map->work_buf, reg, map->reg_shift);
2163 * Some buses or devices flag reads by setting the high bits in the
2164 * register address; since it's always the high bits for all
2165 * current formats we can do this here rather than in
2166 * formatting. This may break if we get interesting formats.
2168 u8[0] |= map->read_flag_mask;
2170 trace_regmap_hw_read_start(map, reg, val_len / map->format.val_bytes);
2172 ret = map->bus->read(map->bus_context, map->work_buf,
2173 map->format.reg_bytes + map->format.pad_bytes,
2176 trace_regmap_hw_read_done(map, reg, val_len / map->format.val_bytes);
2181 static int _regmap_bus_reg_read(void *context, unsigned int reg,
2184 struct regmap *map = context;
2186 return map->bus->reg_read(map->bus_context, reg, val);
2189 static int _regmap_bus_read(void *context, unsigned int reg,
2193 struct regmap *map = context;
2195 if (!map->format.parse_val)
2198 ret = _regmap_raw_read(map, reg, map->work_buf, map->format.val_bytes);
2200 *val = map->format.parse_val(map->work_buf);
2205 static int _regmap_read(struct regmap *map, unsigned int reg,
2209 void *context = _regmap_map_get_context(map);
2211 if (!map->cache_bypass) {
2212 ret = regcache_read(map, reg, val);
2217 if (map->cache_only)
2220 if (!regmap_readable(map, reg))
2223 ret = map->reg_read(context, reg, val);
2226 if (map->dev && strcmp(dev_name(map->dev), LOG_DEVICE) == 0)
2227 dev_info(map->dev, "%x => %x\n", reg, *val);
2230 trace_regmap_reg_read(map, reg, *val);
2232 if (!map->cache_bypass)
2233 regcache_write(map, reg, *val);
2240 * regmap_read(): Read a value from a single register
2242 * @map: Register map to read from
2243 * @reg: Register to be read from
2244 * @val: Pointer to store read value
2246 * A value of zero will be returned on success, a negative errno will
2247 * be returned in error cases.
2249 int regmap_read(struct regmap *map, unsigned int reg, unsigned int *val)
2253 if (reg % map->reg_stride)
2256 map->lock(map->lock_arg);
2258 ret = _regmap_read(map, reg, val);
2260 map->unlock(map->lock_arg);
2264 EXPORT_SYMBOL_GPL(regmap_read);
2267 * regmap_raw_read(): Read raw data from the device
2269 * @map: Register map to read from
2270 * @reg: First register to be read from
2271 * @val: Pointer to store read value
2272 * @val_len: Size of data to read
2274 * A value of zero will be returned on success, a negative errno will
2275 * be returned in error cases.
2277 int regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
2280 size_t val_bytes = map->format.val_bytes;
2281 size_t val_count = val_len / val_bytes;
2287 if (val_len % map->format.val_bytes)
2289 if (reg % map->reg_stride)
2294 map->lock(map->lock_arg);
2296 if (regmap_volatile_range(map, reg, val_count) || map->cache_bypass ||
2297 map->cache_type == REGCACHE_NONE) {
2298 if (!map->bus->read) {
2302 if (map->max_raw_read && map->max_raw_read < val_len) {
2307 /* Physical block read if there's no cache involved */
2308 ret = _regmap_raw_read(map, reg, val, val_len);
2311 /* Otherwise go word by word for the cache; should be low
2312 * cost as we expect to hit the cache.
2314 for (i = 0; i < val_count; i++) {
2315 ret = _regmap_read(map, reg + (i * map->reg_stride),
2320 map->format.format_val(val + (i * val_bytes), v, 0);
2325 map->unlock(map->lock_arg);
2329 EXPORT_SYMBOL_GPL(regmap_raw_read);
2332 * regmap_field_read(): Read a value to a single register field
2334 * @field: Register field to read from
2335 * @val: Pointer to store read value
2337 * A value of zero will be returned on success, a negative errno will
2338 * be returned in error cases.
2340 int regmap_field_read(struct regmap_field *field, unsigned int *val)
2343 unsigned int reg_val;
2344 ret = regmap_read(field->regmap, field->reg, ®_val);
2348 reg_val &= field->mask;
2349 reg_val >>= field->shift;
2354 EXPORT_SYMBOL_GPL(regmap_field_read);
2357 * regmap_fields_read(): Read a value to a single register field with port ID
2359 * @field: Register field to read from
2361 * @val: Pointer to store read value
2363 * A value of zero will be returned on success, a negative errno will
2364 * be returned in error cases.
2366 int regmap_fields_read(struct regmap_field *field, unsigned int id,
2370 unsigned int reg_val;
2372 if (id >= field->id_size)
2375 ret = regmap_read(field->regmap,
2376 field->reg + (field->id_offset * id),
2381 reg_val &= field->mask;
2382 reg_val >>= field->shift;
2387 EXPORT_SYMBOL_GPL(regmap_fields_read);
2390 * regmap_bulk_read(): Read multiple registers from the device
2392 * @map: Register map to read from
2393 * @reg: First register to be read from
2394 * @val: Pointer to store read value, in native register size for device
2395 * @val_count: Number of registers to read
2397 * A value of zero will be returned on success, a negative errno will
2398 * be returned in error cases.
2400 int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
2404 size_t val_bytes = map->format.val_bytes;
2405 bool vol = regmap_volatile_range(map, reg, val_count);
2407 if (reg % map->reg_stride)
2410 if (map->bus && map->format.parse_inplace && (vol || map->cache_type == REGCACHE_NONE)) {
2412 * Some devices does not support bulk read, for
2413 * them we have a series of single read operations.
2415 size_t total_size = val_bytes * val_count;
2417 if (!map->use_single_read &&
2418 (!map->max_raw_read || map->max_raw_read > total_size)) {
2419 ret = regmap_raw_read(map, reg, val,
2420 val_bytes * val_count);
2425 * Some devices do not support bulk read or do not
2426 * support large bulk reads, for them we have a series
2427 * of read operations.
2429 int chunk_stride = map->reg_stride;
2430 size_t chunk_size = val_bytes;
2431 size_t chunk_count = val_count;
2433 if (!map->use_single_read) {
2434 chunk_size = map->max_raw_read;
2435 if (chunk_size % val_bytes)
2436 chunk_size -= chunk_size % val_bytes;
2437 chunk_count = total_size / chunk_size;
2438 chunk_stride *= chunk_size / val_bytes;
2441 /* Read bytes that fit into a multiple of chunk_size */
2442 for (i = 0; i < chunk_count; i++) {
2443 ret = regmap_raw_read(map,
2444 reg + (i * chunk_stride),
2445 val + (i * chunk_size),
2451 /* Read remaining bytes */
2452 if (chunk_size * i < total_size) {
2453 ret = regmap_raw_read(map,
2454 reg + (i * chunk_stride),
2455 val + (i * chunk_size),
2456 total_size - i * chunk_size);
2462 for (i = 0; i < val_count * val_bytes; i += val_bytes)
2463 map->format.parse_inplace(val + i);
2465 for (i = 0; i < val_count; i++) {
2467 ret = regmap_read(map, reg + (i * map->reg_stride),
2472 if (map->format.format_val) {
2473 map->format.format_val(val + (i * val_bytes), ival, 0);
2475 /* Devices providing read and write
2476 * operations can use the bulk I/O
2477 * functions if they define a val_bytes,
2478 * we assume that the values are native
2485 switch (map->format.val_bytes) {
2504 EXPORT_SYMBOL_GPL(regmap_bulk_read);
2506 static int _regmap_update_bits(struct regmap *map, unsigned int reg,
2507 unsigned int mask, unsigned int val,
2508 bool *change, bool force_write)
2511 unsigned int tmp, orig;
2513 if (map->reg_update_bits) {
2514 ret = map->reg_update_bits(map->bus_context, reg, mask, val,
2515 change, force_write);
2519 /* Fix up the cache by read/modify/write */
2520 if (!map->cache_bypass && !map->defer_caching) {
2521 ret = regcache_read(map, reg, &orig);
2528 ret = regcache_write(map, reg, tmp);
2531 if (map->cache_only)
2532 map->cache_dirty = true;
2537 ret = _regmap_read(map, reg, &orig);
2544 if (force_write || (tmp != orig)) {
2545 ret = _regmap_write(map, reg, tmp);
2557 * regmap_update_bits: Perform a read/modify/write cycle on the register map
2559 * @map: Register map to update
2560 * @reg: Register to update
2561 * @mask: Bitmask to change
2562 * @val: New value for bitmask
2564 * Returns zero for success, a negative number on error.
2566 int regmap_update_bits(struct regmap *map, unsigned int reg,
2567 unsigned int mask, unsigned int val)
2571 map->lock(map->lock_arg);
2572 ret = _regmap_update_bits(map, reg, mask, val, NULL, false);
2573 map->unlock(map->lock_arg);
2577 EXPORT_SYMBOL_GPL(regmap_update_bits);
2580 * regmap_write_bits: Perform a read/modify/write cycle on the register map
2582 * @map: Register map to update
2583 * @reg: Register to update
2584 * @mask: Bitmask to change
2585 * @val: New value for bitmask
2587 * Returns zero for success, a negative number on error.
2589 int regmap_write_bits(struct regmap *map, unsigned int reg,
2590 unsigned int mask, unsigned int val)
2594 map->lock(map->lock_arg);
2595 ret = _regmap_update_bits(map, reg, mask, val, NULL, true);
2596 map->unlock(map->lock_arg);
2600 EXPORT_SYMBOL_GPL(regmap_write_bits);
2603 * regmap_update_bits_async: Perform a read/modify/write cycle on the register
2604 * map asynchronously
2606 * @map: Register map to update
2607 * @reg: Register to update
2608 * @mask: Bitmask to change
2609 * @val: New value for bitmask
2611 * With most buses the read must be done synchronously so this is most
2612 * useful for devices with a cache which do not need to interact with
2613 * the hardware to determine the current register value.
2615 * Returns zero for success, a negative number on error.
2617 int regmap_update_bits_async(struct regmap *map, unsigned int reg,
2618 unsigned int mask, unsigned int val)
2622 map->lock(map->lock_arg);
2626 ret = _regmap_update_bits(map, reg, mask, val, NULL, false);
2630 map->unlock(map->lock_arg);
2634 EXPORT_SYMBOL_GPL(regmap_update_bits_async);
2637 * regmap_update_bits_check: Perform a read/modify/write cycle on the
2638 * register map and report if updated
2640 * @map: Register map to update
2641 * @reg: Register to update
2642 * @mask: Bitmask to change
2643 * @val: New value for bitmask
2644 * @change: Boolean indicating if a write was done
2646 * Returns zero for success, a negative number on error.
2648 int regmap_update_bits_check(struct regmap *map, unsigned int reg,
2649 unsigned int mask, unsigned int val,
2654 map->lock(map->lock_arg);
2655 ret = _regmap_update_bits(map, reg, mask, val, change, false);
2656 map->unlock(map->lock_arg);
2659 EXPORT_SYMBOL_GPL(regmap_update_bits_check);
2662 * regmap_update_bits_check_async: Perform a read/modify/write cycle on the
2663 * register map asynchronously and report if
2666 * @map: Register map to update
2667 * @reg: Register to update
2668 * @mask: Bitmask to change
2669 * @val: New value for bitmask
2670 * @change: Boolean indicating if a write was done
2672 * With most buses the read must be done synchronously so this is most
2673 * useful for devices with a cache which do not need to interact with
2674 * the hardware to determine the current register value.
2676 * Returns zero for success, a negative number on error.
2678 int regmap_update_bits_check_async(struct regmap *map, unsigned int reg,
2679 unsigned int mask, unsigned int val,
2684 map->lock(map->lock_arg);
2688 ret = _regmap_update_bits(map, reg, mask, val, change, false);
2692 map->unlock(map->lock_arg);
2696 EXPORT_SYMBOL_GPL(regmap_update_bits_check_async);
2698 void regmap_async_complete_cb(struct regmap_async *async, int ret)
2700 struct regmap *map = async->map;
2703 trace_regmap_async_io_complete(map);
2705 spin_lock(&map->async_lock);
2706 list_move(&async->list, &map->async_free);
2707 wake = list_empty(&map->async_list);
2710 map->async_ret = ret;
2712 spin_unlock(&map->async_lock);
2715 wake_up(&map->async_waitq);
2717 EXPORT_SYMBOL_GPL(regmap_async_complete_cb);
2719 static int regmap_async_is_done(struct regmap *map)
2721 unsigned long flags;
2724 spin_lock_irqsave(&map->async_lock, flags);
2725 ret = list_empty(&map->async_list);
2726 spin_unlock_irqrestore(&map->async_lock, flags);
2732 * regmap_async_complete: Ensure all asynchronous I/O has completed.
2734 * @map: Map to operate on.
2736 * Blocks until any pending asynchronous I/O has completed. Returns
2737 * an error code for any failed I/O operations.
2739 int regmap_async_complete(struct regmap *map)
2741 unsigned long flags;
2744 /* Nothing to do with no async support */
2745 if (!map->bus || !map->bus->async_write)
2748 trace_regmap_async_complete_start(map);
2750 wait_event(map->async_waitq, regmap_async_is_done(map));
2752 spin_lock_irqsave(&map->async_lock, flags);
2753 ret = map->async_ret;
2755 spin_unlock_irqrestore(&map->async_lock, flags);
2757 trace_regmap_async_complete_done(map);
2761 EXPORT_SYMBOL_GPL(regmap_async_complete);
2764 * regmap_register_patch: Register and apply register updates to be applied
2765 * on device initialistion
2767 * @map: Register map to apply updates to.
2768 * @regs: Values to update.
2769 * @num_regs: Number of entries in regs.
2771 * Register a set of register updates to be applied to the device
2772 * whenever the device registers are synchronised with the cache and
2773 * apply them immediately. Typically this is used to apply
2774 * corrections to be applied to the device defaults on startup, such
2775 * as the updates some vendors provide to undocumented registers.
2777 * The caller must ensure that this function cannot be called
2778 * concurrently with either itself or regcache_sync().
2780 int regmap_register_patch(struct regmap *map, const struct reg_sequence *regs,
2783 struct reg_sequence *p;
2787 if (WARN_ONCE(num_regs <= 0, "invalid registers number (%d)\n",
2791 p = krealloc(map->patch,
2792 sizeof(struct reg_sequence) * (map->patch_regs + num_regs),
2795 memcpy(p + map->patch_regs, regs, num_regs * sizeof(*regs));
2797 map->patch_regs += num_regs;
2802 map->lock(map->lock_arg);
2804 bypass = map->cache_bypass;
2806 map->cache_bypass = true;
2809 ret = _regmap_multi_reg_write(map, regs, num_regs);
2812 map->cache_bypass = bypass;
2814 map->unlock(map->lock_arg);
2816 regmap_async_complete(map);
2820 EXPORT_SYMBOL_GPL(regmap_register_patch);
2823 * regmap_get_val_bytes(): Report the size of a register value
2825 * Report the size of a register value, mainly intended to for use by
2826 * generic infrastructure built on top of regmap.
2828 int regmap_get_val_bytes(struct regmap *map)
2830 if (map->format.format_write)
2833 return map->format.val_bytes;
2835 EXPORT_SYMBOL_GPL(regmap_get_val_bytes);
2838 * regmap_get_max_register(): Report the max register value
2840 * Report the max register value, mainly intended to for use by
2841 * generic infrastructure built on top of regmap.
2843 int regmap_get_max_register(struct regmap *map)
2845 return map->max_register ? map->max_register : -EINVAL;
2847 EXPORT_SYMBOL_GPL(regmap_get_max_register);
2850 * regmap_get_reg_stride(): Report the register address stride
2852 * Report the register address stride, mainly intended to for use by
2853 * generic infrastructure built on top of regmap.
2855 int regmap_get_reg_stride(struct regmap *map)
2857 return map->reg_stride;
2859 EXPORT_SYMBOL_GPL(regmap_get_reg_stride);
2861 int regmap_parse_val(struct regmap *map, const void *buf,
2864 if (!map->format.parse_val)
2867 *val = map->format.parse_val(buf);
2871 EXPORT_SYMBOL_GPL(regmap_parse_val);
2873 static int __init regmap_initcall(void)
2875 regmap_debugfs_initcall();
2879 postcore_initcall(regmap_initcall);