1 /* The industrial I/O core in kernel channel mapping
3 * Copyright (c) 2011 Jonathan Cameron
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
10 #include <linux/export.h>
11 #include <linux/slab.h>
12 #include <linux/mutex.h>
20 struct iio_map_internal {
21 struct iio_dev *indio_dev;
26 static LIST_HEAD(iio_map_list);
27 static DEFINE_MUTEX(iio_map_list_lock);
29 int iio_map_array_register(struct iio_dev *indio_dev, struct iio_map *maps)
32 struct iio_map_internal *mapi;
37 mutex_lock(&iio_map_list_lock);
38 while (maps[i].consumer_dev_name != NULL) {
39 mapi = kzalloc(sizeof(*mapi), GFP_KERNEL);
45 mapi->indio_dev = indio_dev;
46 list_add(&mapi->l, &iio_map_list);
50 mutex_unlock(&iio_map_list_lock);
54 EXPORT_SYMBOL_GPL(iio_map_array_register);
57 /* Assumes the exact same array (e.g. memory locations)
58 * used at unregistration as used at registration rather than
59 * more complex checking of contents.
61 int iio_map_array_unregister(struct iio_dev *indio_dev,
66 struct iio_map_internal *mapi;
71 mutex_lock(&iio_map_list_lock);
72 while (maps[i].consumer_dev_name != NULL) {
74 list_for_each_entry(mapi, &iio_map_list, l)
75 if (&maps[i] == mapi->map) {
81 if (found_it == false) {
87 mutex_unlock(&iio_map_list_lock);
91 EXPORT_SYMBOL_GPL(iio_map_array_unregister);
93 static const struct iio_chan_spec
94 *iio_chan_spec_from_name(const struct iio_dev *indio_dev,
98 const struct iio_chan_spec *chan = NULL;
100 for (i = 0; i < indio_dev->num_channels; i++)
101 if (indio_dev->channels[i].datasheet_name &&
102 strcmp(name, indio_dev->channels[i].datasheet_name) == 0) {
103 chan = &indio_dev->channels[i];
110 struct iio_channel *iio_st_channel_get(const char *name,
111 const char *channel_name)
113 struct iio_map_internal *c_i = NULL, *c = NULL;
114 struct iio_channel *channel;
116 if (name == NULL && channel_name == NULL)
117 return ERR_PTR(-ENODEV);
119 /* first find matching entry the channel map */
120 mutex_lock(&iio_map_list_lock);
121 list_for_each_entry(c_i, &iio_map_list, l) {
122 if ((name && strcmp(name, c_i->map->consumer_dev_name) != 0) ||
124 strcmp(channel_name, c_i->map->consumer_channel) != 0))
127 get_device(&c->indio_dev->dev);
130 mutex_unlock(&iio_map_list_lock);
132 return ERR_PTR(-ENODEV);
134 channel = kmalloc(sizeof(*channel), GFP_KERNEL);
136 return ERR_PTR(-ENOMEM);
138 channel->indio_dev = c->indio_dev;
140 if (c->map->adc_channel_label)
142 iio_chan_spec_from_name(channel->indio_dev,
143 c->map->adc_channel_label);
147 EXPORT_SYMBOL_GPL(iio_st_channel_get);
149 void iio_st_channel_release(struct iio_channel *channel)
151 put_device(&channel->indio_dev->dev);
154 EXPORT_SYMBOL_GPL(iio_st_channel_release);
156 struct iio_channel *iio_st_channel_get_all(const char *name)
158 struct iio_channel *chans;
159 struct iio_map_internal *c = NULL;
165 return ERR_PTR(-EINVAL);
167 mutex_lock(&iio_map_list_lock);
168 /* first count the matching maps */
169 list_for_each_entry(c, &iio_map_list, l)
170 if (name && strcmp(name, c->map->consumer_dev_name) != 0)
180 /* NULL terminated array to save passing size */
181 chans = kzalloc(sizeof(*chans)*(nummaps + 1), GFP_KERNEL);
187 /* for each map fill in the chans element */
188 list_for_each_entry(c, &iio_map_list, l) {
189 if (name && strcmp(name, c->map->consumer_dev_name) != 0)
191 chans[mapind].indio_dev = c->indio_dev;
192 chans[mapind].channel =
193 iio_chan_spec_from_name(chans[mapind].indio_dev,
194 c->map->adc_channel_label);
195 if (chans[mapind].channel == NULL) {
197 put_device(&chans[mapind].indio_dev->dev);
198 goto error_free_chans;
200 get_device(&chans[mapind].indio_dev->dev);
203 mutex_unlock(&iio_map_list_lock);
206 goto error_free_chans;
211 for (i = 0; i < nummaps; i++)
212 if (chans[i].indio_dev)
213 put_device(&chans[i].indio_dev->dev);
216 mutex_unlock(&iio_map_list_lock);
220 EXPORT_SYMBOL_GPL(iio_st_channel_get_all);
222 void iio_st_channel_release_all(struct iio_channel *channels)
224 struct iio_channel *chan = &channels[0];
226 while (chan->indio_dev) {
227 put_device(&chan->indio_dev->dev);
232 EXPORT_SYMBOL_GPL(iio_st_channel_release_all);
234 int iio_st_read_channel_raw(struct iio_channel *chan, int *val)
238 mutex_lock(&chan->indio_dev->info_exist_lock);
239 if (chan->indio_dev->info == NULL) {
244 ret = chan->indio_dev->info->read_raw(chan->indio_dev, chan->channel,
247 mutex_unlock(&chan->indio_dev->info_exist_lock);
251 EXPORT_SYMBOL_GPL(iio_st_read_channel_raw);
253 int iio_st_read_channel_scale(struct iio_channel *chan, int *val, int *val2)
257 mutex_lock(&chan->indio_dev->info_exist_lock);
258 if (chan->indio_dev->info == NULL) {
263 ret = chan->indio_dev->info->read_raw(chan->indio_dev,
266 IIO_CHAN_INFO_SCALE);
268 mutex_unlock(&chan->indio_dev->info_exist_lock);
272 EXPORT_SYMBOL_GPL(iio_st_read_channel_scale);
274 int iio_st_get_channel_type(struct iio_channel *chan,
275 enum iio_chan_type *type)
278 /* Need to verify underlying driver has not gone away */
280 mutex_lock(&chan->indio_dev->info_exist_lock);
281 if (chan->indio_dev->info == NULL) {
286 *type = chan->channel->type;
288 mutex_unlock(&chan->indio_dev->info_exist_lock);
292 EXPORT_SYMBOL_GPL(iio_st_get_channel_type);