1 /* SPDX-License-Identifier: GPL-2.0-only */
2
3 /* The industrial I/O core
4 *
5 * Copyright (c) 2008 Jonathan Cameron
6 */
7 #ifndef _INDUSTRIAL_IO_H_
8 #define _INDUSTRIAL_IO_H_
9
10 #include <linux/device.h>
11 #include <linux/cdev.h>
12 #include <linux/cleanup.h>
13 #include <linux/compiler_types.h>
14 #include <linux/slab.h>
15 #include <linux/iio/types.h>
16 /* IIO TODO LIST */
17 /*
18 * Provide means of adjusting timer accuracy.
19 * Currently assumes nano seconds.
20 */
21
22 struct fwnode_reference_args;
23
24 enum iio_shared_by {
25 IIO_SEPARATE,
26 IIO_SHARED_BY_TYPE,
27 IIO_SHARED_BY_DIR,
28 IIO_SHARED_BY_ALL
29 };
30
31 enum iio_endian {
32 IIO_CPU,
33 IIO_BE,
34 IIO_LE,
35 };
36
37 struct iio_chan_spec;
38 struct iio_dev;
39
40 /**
41 * struct iio_chan_spec_ext_info - Extended channel info attribute
42 * @name: Info attribute name
43 * @shared: Whether this attribute is shared between all channels.
44 * @read: Read callback for this info attribute, may be NULL.
45 * @write: Write callback for this info attribute, may be NULL.
46 * @private: Data private to the driver.
47 */
48 struct iio_chan_spec_ext_info {
49 const char *name;
50 enum iio_shared_by shared;
51 ssize_t (*read)(struct iio_dev *, uintptr_t private,
52 struct iio_chan_spec const *, char *buf);
53 ssize_t (*write)(struct iio_dev *, uintptr_t private,
54 struct iio_chan_spec const *, const char *buf,
55 size_t len);
56 uintptr_t private;
57 };
58
59 /**
60 * struct iio_enum - Enum channel info attribute
61 * @items: An array of strings.
62 * @num_items: Length of the item array.
63 * @set: Set callback function, may be NULL.
64 * @get: Get callback function, may be NULL.
65 *
66 * The iio_enum struct can be used to implement enum style channel attributes.
67 * Enum style attributes are those which have a set of strings which map to
68 * unsigned integer values. The IIO enum helper code takes care of mapping
69 * between value and string as well as generating a "_available" file which
70 * contains a list of all available items. The set callback will be called when
71 * the attribute is updated. The last parameter is the index to the newly
72 * activated item. The get callback will be used to query the currently active
73 * item and is supposed to return the index for it.
74 */
75 struct iio_enum {
76 const char * const *items;
77 unsigned int num_items;
78 int (*set)(struct iio_dev *, const struct iio_chan_spec *, unsigned int);
79 int (*get)(struct iio_dev *, const struct iio_chan_spec *);
80 };
81
82 ssize_t iio_enum_available_read(struct iio_dev *indio_dev,
83 uintptr_t priv, const struct iio_chan_spec *chan, char *buf);
84 ssize_t iio_enum_read(struct iio_dev *indio_dev,
85 uintptr_t priv, const struct iio_chan_spec *chan, char *buf);
86 ssize_t iio_enum_write(struct iio_dev *indio_dev,
87 uintptr_t priv, const struct iio_chan_spec *chan, const char *buf,
88 size_t len);
89
90 /**
91 * IIO_ENUM() - Initialize enum extended channel attribute
92 * @_name: Attribute name
93 * @_shared: Whether the attribute is shared between all channels
94 * @_e: Pointer to an iio_enum struct
95 *
96 * This should usually be used together with IIO_ENUM_AVAILABLE()
97 */
98 #define IIO_ENUM(_name, _shared, _e) \
99 { \
100 .name = (_name), \
101 .shared = (_shared), \
102 .read = iio_enum_read, \
103 .write = iio_enum_write, \
104 .private = (uintptr_t)(_e), \
105 }
106
107 /**
108 * IIO_ENUM_AVAILABLE() - Initialize enum available extended channel attribute
109 * @_name: Attribute name ("_available" will be appended to the name)
110 * @_shared: Whether the attribute is shared between all channels
111 * @_e: Pointer to an iio_enum struct
112 *
113 * Creates a read only attribute which lists all the available enum items in a
114 * space separated list. This should usually be used together with IIO_ENUM()
115 */
116 #define IIO_ENUM_AVAILABLE(_name, _shared, _e) \
117 { \
118 .name = (_name "_available"), \
119 .shared = _shared, \
120 .read = iio_enum_available_read, \
121 .private = (uintptr_t)(_e), \
122 }
123
124 /**
125 * struct iio_mount_matrix - iio mounting matrix
126 * @rotation: 3 dimensional space rotation matrix defining sensor alignment with
127 * main hardware
128 */
129 struct iio_mount_matrix {
130 const char *rotation[9];
131 };
132
133 ssize_t iio_show_mount_matrix(struct iio_dev *indio_dev, uintptr_t priv,
134 const struct iio_chan_spec *chan, char *buf);
135 int iio_read_mount_matrix(struct device *dev, struct iio_mount_matrix *matrix);
136
137 typedef const struct iio_mount_matrix *
138 (iio_get_mount_matrix_t)(const struct iio_dev *indio_dev,
139 const struct iio_chan_spec *chan);
140
141 /**
142 * IIO_MOUNT_MATRIX() - Initialize mount matrix extended channel attribute
143 * @_shared: Whether the attribute is shared between all channels
144 * @_get: Pointer to an iio_get_mount_matrix_t accessor
145 */
146 #define IIO_MOUNT_MATRIX(_shared, _get) \
147 { \
148 .name = "mount_matrix", \
149 .shared = (_shared), \
150 .read = iio_show_mount_matrix, \
151 .private = (uintptr_t)(_get), \
152 }
153
154 /**
155 * struct iio_event_spec - specification for a channel event
156 * @type: Type of the event
157 * @dir: Direction of the event
158 * @mask_separate: Bit mask of enum iio_event_info values. Attributes
159 * set in this mask will be registered per channel.
160 * @mask_shared_by_type: Bit mask of enum iio_event_info values. Attributes
161 * set in this mask will be shared by channel type.
162 * @mask_shared_by_dir: Bit mask of enum iio_event_info values. Attributes
163 * set in this mask will be shared by channel type and
164 * direction.
165 * @mask_shared_by_all: Bit mask of enum iio_event_info values. Attributes
166 * set in this mask will be shared by all channels.
167 */
168 struct iio_event_spec {
169 enum iio_event_type type;
170 enum iio_event_direction dir;
171 unsigned long mask_separate;
172 unsigned long mask_shared_by_type;
173 unsigned long mask_shared_by_dir;
174 unsigned long mask_shared_by_all;
175 };
176
177 /**
178 * struct iio_scan_type - specification for channel data format in buffer
179 * @sign: 's' or 'u' to specify signed or unsigned
180 * @realbits: Number of valid bits of data
181 * @storagebits: Realbits + padding
182 * @shift: Shift right by this before masking out realbits.
183 * @repeat: Number of times real/storage bits repeats. When the
184 * repeat element is more than 1, then the type element in
185 * sysfs will show a repeat value. Otherwise, the number
186 * of repetitions is omitted.
187 * @endianness: little or big endian
188 */
189 struct iio_scan_type {
190 char sign;
191 u8 realbits;
192 u8 storagebits;
193 u8 shift;
194 u8 repeat;
195 enum iio_endian endianness;
196 };
197
198 /**
199 * struct iio_chan_spec - specification of a single channel
200 * @type: What type of measurement is the channel making.
201 * @channel: What number do we wish to assign the channel.
202 * @channel2: If there is a second number for a differential
203 * channel then this is it. If modified is set then the
204 * value here specifies the modifier.
205 * @address: Driver specific identifier.
206 * @scan_index: Monotonic index to give ordering in scans when read
207 * from a buffer.
208 * @scan_type: struct describing the scan type - mutually exclusive
209 * with ext_scan_type.
210 * @ext_scan_type: Used in rare cases where there is more than one scan
211 * format for a channel. When this is used, the flag
212 * has_ext_scan_type must be set and the driver must
213 * implement get_current_scan_type in struct iio_info.
214 * @num_ext_scan_type: Number of elements in ext_scan_type.
215 * @info_mask_separate: What information is to be exported that is specific to
216 * this channel.
217 * @info_mask_separate_available: What availability information is to be
218 * exported that is specific to this channel.
219 * @info_mask_shared_by_type: What information is to be exported that is shared
220 * by all channels of the same type.
221 * @info_mask_shared_by_type_available: What availability information is to be
222 * exported that is shared by all channels of the same
223 * type.
224 * @info_mask_shared_by_dir: What information is to be exported that is shared
225 * by all channels of the same direction.
226 * @info_mask_shared_by_dir_available: What availability information is to be
227 * exported that is shared by all channels of the same
228 * direction.
229 * @info_mask_shared_by_all: What information is to be exported that is shared
230 * by all channels.
231 * @info_mask_shared_by_all_available: What availability information is to be
232 * exported that is shared by all channels.
233 * @event_spec: Array of events which should be registered for this
234 * channel.
235 * @num_event_specs: Size of the event_spec array.
236 * @ext_info: Array of extended info attributes for this channel.
237 * The array is NULL terminated, the last element should
238 * have its name field set to NULL.
239 * @extend_name: Allows labeling of channel attributes with an
240 * informative name. Note this has no effect codes etc,
241 * unlike modifiers.
242 * This field is deprecated in favour of providing
243 * iio_info->read_label() to override the label, which
244 * unlike @extend_name does not affect sysfs filenames.
245 * @datasheet_name: A name used in in-kernel mapping of channels. It should
246 * correspond to the first name that the channel is referred
247 * to by in the datasheet (e.g. IND), or the nearest
248 * possible compound name (e.g. IND-INC).
249 * @modified: Does a modifier apply to this channel. What these are
250 * depends on the channel type. Modifier is set in
251 * channel2. Examples are IIO_MOD_X for axial sensors about
252 * the 'x' axis.
253 * @indexed: Specify the channel has a numerical index. If not,
254 * the channel index number will be suppressed for sysfs
255 * attributes but not for event codes.
256 * @output: Channel is output.
257 * @differential: Channel is differential.
258 * @has_ext_scan_type: True if ext_scan_type is used instead of scan_type.
259 */
260 struct iio_chan_spec {
261 enum iio_chan_type type;
262 int channel;
263 int channel2;
264 unsigned long address;
265 int scan_index;
266 union {
267 struct iio_scan_type scan_type;
268 struct {
269 const struct iio_scan_type *ext_scan_type;
270 unsigned int num_ext_scan_type;
271 };
272 };
273 long info_mask_separate;
274 long info_mask_separate_available;
275 long info_mask_shared_by_type;
276 long info_mask_shared_by_type_available;
277 long info_mask_shared_by_dir;
278 long info_mask_shared_by_dir_available;
279 long info_mask_shared_by_all;
280 long info_mask_shared_by_all_available;
281 const struct iio_event_spec *event_spec;
282 unsigned int num_event_specs;
283 const struct iio_chan_spec_ext_info *ext_info;
284 const char *extend_name;
285 const char *datasheet_name;
286 unsigned int modified:1;
287 unsigned int indexed:1;
288 unsigned int output:1;
289 unsigned int differential:1;
290 unsigned int has_ext_scan_type:1;
291 };
292
293
294 /**
295 * iio_channel_has_info() - Checks whether a channel supports a info attribute
296 * @chan: The channel to be queried
297 * @type: Type of the info attribute to be checked
298 *
299 * Returns true if the channels supports reporting values for the given info
300 * attribute type, false otherwise.
301 */
iio_channel_has_info(const struct iio_chan_spec * chan,enum iio_chan_info_enum type)302 static inline bool iio_channel_has_info(const struct iio_chan_spec *chan,
303 enum iio_chan_info_enum type)
304 {
305 return (chan->info_mask_separate & BIT(type)) |
306 (chan->info_mask_shared_by_type & BIT(type)) |
307 (chan->info_mask_shared_by_dir & BIT(type)) |
308 (chan->info_mask_shared_by_all & BIT(type));
309 }
310
311 /**
312 * iio_channel_has_available() - Checks if a channel has an available attribute
313 * @chan: The channel to be queried
314 * @type: Type of the available attribute to be checked
315 *
316 * Returns true if the channel supports reporting available values for the
317 * given attribute type, false otherwise.
318 */
iio_channel_has_available(const struct iio_chan_spec * chan,enum iio_chan_info_enum type)319 static inline bool iio_channel_has_available(const struct iio_chan_spec *chan,
320 enum iio_chan_info_enum type)
321 {
322 return (chan->info_mask_separate_available & BIT(type)) |
323 (chan->info_mask_shared_by_type_available & BIT(type)) |
324 (chan->info_mask_shared_by_dir_available & BIT(type)) |
325 (chan->info_mask_shared_by_all_available & BIT(type));
326 }
327
328 #define IIO_CHAN_SOFT_TIMESTAMP(_si) { \
329 .type = IIO_TIMESTAMP, \
330 .channel = -1, \
331 .scan_index = _si, \
332 .scan_type = { \
333 .sign = 's', \
334 .realbits = 64, \
335 .storagebits = 64, \
336 }, \
337 }
338
339 s64 iio_get_time_ns(const struct iio_dev *indio_dev);
340
341 /*
342 * Device operating modes
343 * @INDIO_DIRECT_MODE: There is an access to either:
344 * a) The last single value available for devices that do not provide
345 * on-demand reads.
346 * b) A new value after performing an on-demand read otherwise.
347 * On most devices, this is a single-shot read. On some devices with data
348 * streams without an 'on-demand' function, this might also be the 'last value'
349 * feature. Above all, this mode internally means that we are not in any of the
350 * other modes, and sysfs reads should work.
351 * Device drivers should inform the core if they support this mode.
352 * @INDIO_BUFFER_TRIGGERED: Common mode when dealing with kfifo buffers.
353 * It indicates that an explicit trigger is required. This requests the core to
354 * attach a poll function when enabling the buffer, which is indicated by the
355 * _TRIGGERED suffix.
356 * The core will ensure this mode is set when registering a triggered buffer
357 * with iio_triggered_buffer_setup().
358 * @INDIO_BUFFER_SOFTWARE: Another kfifo buffer mode, but not event triggered.
359 * No poll function can be attached because there is no triggered infrastructure
360 * we can use to cause capture. There is a kfifo that the driver will fill, but
361 * not "only one scan at a time". Typically, hardware will have a buffer that
362 * can hold multiple scans. Software may read one or more scans at a single time
363 * and push the available data to a Kfifo. This means the core will not attach
364 * any poll function when enabling the buffer.
365 * The core will ensure this mode is set when registering a simple kfifo buffer
366 * with devm_iio_kfifo_buffer_setup().
367 * @INDIO_BUFFER_HARDWARE: For specific hardware, if unsure do not use this mode.
368 * Same as above but this time the buffer is not a kfifo where we have direct
369 * access to the data. Instead, the consumer driver must access the data through
370 * non software visible channels (or DMA when there is no demux possible in
371 * software)
372 * The core will ensure this mode is set when registering a dmaengine buffer
373 * with devm_iio_dmaengine_buffer_setup().
374 * @INDIO_EVENT_TRIGGERED: Very unusual mode.
375 * Triggers usually refer to an external event which will start data capture.
376 * Here it is kind of the opposite as, a particular state of the data might
377 * produce an event which can be considered as an event. We don't necessarily
378 * have access to the data itself, but to the event produced. For example, this
379 * can be a threshold detector. The internal path of this mode is very close to
380 * the INDIO_BUFFER_TRIGGERED mode.
381 * The core will ensure this mode is set when registering a triggered event.
382 * @INDIO_HARDWARE_TRIGGERED: Very unusual mode.
383 * Here, triggers can result in data capture and can be routed to multiple
384 * hardware components, which make them close to regular triggers in the way
385 * they must be managed by the core, but without the entire interrupts/poll
386 * functions burden. Interrupts are irrelevant as the data flow is hardware
387 * mediated and distributed.
388 */
389 #define INDIO_DIRECT_MODE 0x01
390 #define INDIO_BUFFER_TRIGGERED 0x02
391 #define INDIO_BUFFER_SOFTWARE 0x04
392 #define INDIO_BUFFER_HARDWARE 0x08
393 #define INDIO_EVENT_TRIGGERED 0x10
394 #define INDIO_HARDWARE_TRIGGERED 0x20
395
396 #define INDIO_ALL_BUFFER_MODES \
397 (INDIO_BUFFER_TRIGGERED | INDIO_BUFFER_HARDWARE | INDIO_BUFFER_SOFTWARE)
398
399 #define INDIO_ALL_TRIGGERED_MODES \
400 (INDIO_BUFFER_TRIGGERED \
401 | INDIO_EVENT_TRIGGERED \
402 | INDIO_HARDWARE_TRIGGERED)
403
404 #define INDIO_MAX_RAW_ELEMENTS 4
405
406 struct iio_val_int_plus_micro {
407 int integer;
408 int micro;
409 };
410
411 struct iio_trigger; /* forward declaration */
412
413 /**
414 * struct iio_info - constant information about device
415 * @event_attrs: event control attributes
416 * @attrs: general purpose device attributes
417 * @read_raw: function to request a value from the device.
418 * mask specifies which value. Note 0 means a reading of
419 * the channel in question. Return value will specify the
420 * type of value returned by the device. val and val2 will
421 * contain the elements making up the returned value.
422 * @read_raw_multi: function to return values from the device.
423 * mask specifies which value. Note 0 means a reading of
424 * the channel in question. Return value will specify the
425 * type of value returned by the device. vals pointer
426 * contain the elements making up the returned value.
427 * max_len specifies maximum number of elements
428 * vals pointer can contain. val_len is used to return
429 * length of valid elements in vals.
430 * @read_avail: function to return the available values from the device.
431 * mask specifies which value. Note 0 means the available
432 * values for the channel in question. Return value
433 * specifies if a IIO_AVAIL_LIST or a IIO_AVAIL_RANGE is
434 * returned in vals. The type of the vals are returned in
435 * type and the number of vals is returned in length. For
436 * ranges, there are always three vals returned; min, step
437 * and max. For lists, all possible values are enumerated.
438 * @write_raw: function to write a value to the device.
439 * Parameters are the same as for read_raw.
440 * @read_label: function to request label name for a specified label,
441 * for better channel identification.
442 * @write_raw_get_fmt: callback function to query the expected
443 * format/precision. If not set by the driver, write_raw
444 * returns IIO_VAL_INT_PLUS_MICRO.
445 * @read_event_config: find out if the event is enabled.
446 * @write_event_config: set if the event is enabled.
447 * @read_event_value: read a configuration value associated with the event.
448 * @write_event_value: write a configuration value for the event.
449 * @read_event_label: function to request label name for a specified label,
450 * for better event identification.
451 * @validate_trigger: function to validate the trigger when the
452 * current trigger gets changed.
453 * @get_current_scan_type: must be implemented by drivers that use ext_scan_type
454 * in the channel spec to return the index of the currently
455 * active ext_scan type for a channel.
456 * @update_scan_mode: function to configure device and scan buffer when
457 * channels have changed
458 * @debugfs_reg_access: function to read or write register value of device
459 * @fwnode_xlate: fwnode based function pointer to obtain channel specifier index.
460 * @hwfifo_set_watermark: function pointer to set the current hardware
461 * fifo watermark level; see hwfifo_* entries in
462 * Documentation/ABI/testing/sysfs-bus-iio for details on
463 * how the hardware fifo operates
464 * @hwfifo_flush_to_buffer: function pointer to flush the samples stored
465 * in the hardware fifo to the device buffer. The driver
466 * should not flush more than count samples. The function
467 * must return the number of samples flushed, 0 if no
468 * samples were flushed or a negative integer if no samples
469 * were flushed and there was an error.
470 **/
471 struct iio_info {
472 const struct attribute_group *event_attrs;
473 const struct attribute_group *attrs;
474
475 int (*read_raw)(struct iio_dev *indio_dev,
476 struct iio_chan_spec const *chan,
477 int *val,
478 int *val2,
479 long mask);
480
481 int (*read_raw_multi)(struct iio_dev *indio_dev,
482 struct iio_chan_spec const *chan,
483 int max_len,
484 int *vals,
485 int *val_len,
486 long mask);
487
488 int (*read_avail)(struct iio_dev *indio_dev,
489 struct iio_chan_spec const *chan,
490 const int **vals,
491 int *type,
492 int *length,
493 long mask);
494
495 int (*write_raw)(struct iio_dev *indio_dev,
496 struct iio_chan_spec const *chan,
497 int val,
498 int val2,
499 long mask);
500
501 int (*read_label)(struct iio_dev *indio_dev,
502 struct iio_chan_spec const *chan,
503 char *label);
504
505 int (*write_raw_get_fmt)(struct iio_dev *indio_dev,
506 struct iio_chan_spec const *chan,
507 long mask);
508
509 int (*read_event_config)(struct iio_dev *indio_dev,
510 const struct iio_chan_spec *chan,
511 enum iio_event_type type,
512 enum iio_event_direction dir);
513
514 int (*write_event_config)(struct iio_dev *indio_dev,
515 const struct iio_chan_spec *chan,
516 enum iio_event_type type,
517 enum iio_event_direction dir,
518 bool state);
519
520 int (*read_event_value)(struct iio_dev *indio_dev,
521 const struct iio_chan_spec *chan,
522 enum iio_event_type type,
523 enum iio_event_direction dir,
524 enum iio_event_info info, int *val, int *val2);
525
526 int (*write_event_value)(struct iio_dev *indio_dev,
527 const struct iio_chan_spec *chan,
528 enum iio_event_type type,
529 enum iio_event_direction dir,
530 enum iio_event_info info, int val, int val2);
531
532 int (*read_event_label)(struct iio_dev *indio_dev,
533 struct iio_chan_spec const *chan,
534 enum iio_event_type type,
535 enum iio_event_direction dir,
536 char *label);
537
538 int (*validate_trigger)(struct iio_dev *indio_dev,
539 struct iio_trigger *trig);
540 int (*get_current_scan_type)(const struct iio_dev *indio_dev,
541 const struct iio_chan_spec *chan);
542 int (*update_scan_mode)(struct iio_dev *indio_dev,
543 const unsigned long *scan_mask);
544 int (*debugfs_reg_access)(struct iio_dev *indio_dev,
545 unsigned int reg, unsigned int writeval,
546 unsigned int *readval);
547 int (*fwnode_xlate)(struct iio_dev *indio_dev,
548 const struct fwnode_reference_args *iiospec);
549 int (*hwfifo_set_watermark)(struct iio_dev *indio_dev, unsigned int val);
550 int (*hwfifo_flush_to_buffer)(struct iio_dev *indio_dev,
551 unsigned int count);
552 };
553
554 /**
555 * struct iio_buffer_setup_ops - buffer setup related callbacks
556 * @preenable: [DRIVER] function to run prior to marking buffer enabled
557 * @postenable: [DRIVER] function to run after marking buffer enabled
558 * @predisable: [DRIVER] function to run prior to marking buffer
559 * disabled
560 * @postdisable: [DRIVER] function to run after marking buffer disabled
561 * @validate_scan_mask: [DRIVER] function callback to check whether a given
562 * scan mask is valid for the device.
563 */
564 struct iio_buffer_setup_ops {
565 int (*preenable)(struct iio_dev *);
566 int (*postenable)(struct iio_dev *);
567 int (*predisable)(struct iio_dev *);
568 int (*postdisable)(struct iio_dev *);
569 bool (*validate_scan_mask)(struct iio_dev *indio_dev,
570 const unsigned long *scan_mask);
571 };
572
573 /**
574 * struct iio_dev - industrial I/O device
575 * @modes: [DRIVER] bitmask listing all the operating modes
576 * supported by the IIO device. This list should be
577 * initialized before registering the IIO device. It can
578 * also be filed up by the IIO core, as a result of
579 * enabling particular features in the driver
580 * (see iio_triggered_event_setup()).
581 * @dev: [DRIVER] device structure, should be assigned a parent
582 * and owner
583 * @buffer: [DRIVER] any buffer present
584 * @scan_bytes: [INTERN] num bytes captured to be fed to buffer demux
585 * @available_scan_masks: [DRIVER] optional array of allowed bitmasks. Sort the
586 * array in order of preference, the most preferred
587 * masks first.
588 * @masklength: [INTERN] the length of the mask established from
589 * channels
590 * @active_scan_mask: [INTERN] union of all scan masks requested by buffers
591 * @scan_timestamp: [INTERN] set if any buffers have requested timestamp
592 * @trig: [INTERN] current device trigger (buffer modes)
593 * @pollfunc: [DRIVER] function run on trigger being received
594 * @pollfunc_event: [DRIVER] function run on events trigger being received
595 * @channels: [DRIVER] channel specification structure table
596 * @num_channels: [DRIVER] number of channels specified in @channels.
597 * @name: [DRIVER] name of the device.
598 * @label: [DRIVER] unique name to identify which device this is
599 * @info: [DRIVER] callbacks and constant info from driver
600 * @setup_ops: [DRIVER] callbacks to call before and after buffer
601 * enable/disable
602 * @priv: [DRIVER] reference to driver's private information
603 * **MUST** be accessed **ONLY** via iio_priv() helper
604 */
605 struct iio_dev {
606 int modes;
607 struct device dev;
608
609 struct iio_buffer *buffer;
610 int scan_bytes;
611
612 const unsigned long *available_scan_masks;
613 unsigned int __private masklength;
614 const unsigned long *active_scan_mask;
615 bool __private scan_timestamp;
616 struct iio_trigger *trig;
617 struct iio_poll_func *pollfunc;
618 struct iio_poll_func *pollfunc_event;
619
620 struct iio_chan_spec const *channels;
621 int num_channels;
622
623 const char *name;
624 const char *label;
625 const struct iio_info *info;
626 const struct iio_buffer_setup_ops *setup_ops;
627
628 void *__private priv;
629 };
630
631 int iio_device_id(struct iio_dev *indio_dev);
632 int iio_device_get_current_mode(struct iio_dev *indio_dev);
633 bool iio_buffer_enabled(struct iio_dev *indio_dev);
634
635 const struct iio_chan_spec
636 *iio_find_channel_from_si(struct iio_dev *indio_dev, int si);
637 /**
638 * iio_device_register() - register a device with the IIO subsystem
639 * @indio_dev: Device structure filled by the device driver
640 **/
641 #define iio_device_register(indio_dev) \
642 __iio_device_register((indio_dev), THIS_MODULE)
643 int __iio_device_register(struct iio_dev *indio_dev, struct module *this_mod);
644 void iio_device_unregister(struct iio_dev *indio_dev);
645 /**
646 * devm_iio_device_register - Resource-managed iio_device_register()
647 * @dev: Device to allocate iio_dev for
648 * @indio_dev: Device structure filled by the device driver
649 *
650 * Managed iio_device_register. The IIO device registered with this
651 * function is automatically unregistered on driver detach. This function
652 * calls iio_device_register() internally. Refer to that function for more
653 * information.
654 *
655 * RETURNS:
656 * 0 on success, negative error number on failure.
657 */
658 #define devm_iio_device_register(dev, indio_dev) \
659 __devm_iio_device_register((dev), (indio_dev), THIS_MODULE)
660 int __devm_iio_device_register(struct device *dev, struct iio_dev *indio_dev,
661 struct module *this_mod);
662 int iio_push_event(struct iio_dev *indio_dev, u64 ev_code, s64 timestamp);
663 int iio_device_claim_direct_mode(struct iio_dev *indio_dev);
664 void iio_device_release_direct_mode(struct iio_dev *indio_dev);
665
666 /*
667 * Helper functions that allow claim and release of direct mode
668 * in a fashion that doesn't generate many false positives from sparse.
669 * Note this must remain static inline in the header so that sparse
670 * can see the __acquire() marking. Revisit when sparse supports
671 * __cond_acquires()
672 */
iio_device_claim_direct(struct iio_dev * indio_dev)673 static inline bool iio_device_claim_direct(struct iio_dev *indio_dev)
674 {
675 int ret = iio_device_claim_direct_mode(indio_dev);
676
677 if (ret)
678 return false;
679
680 __acquire(iio_dev);
681
682 return true;
683 }
684
iio_device_release_direct(struct iio_dev * indio_dev)685 static inline void iio_device_release_direct(struct iio_dev *indio_dev)
686 {
687 iio_device_release_direct_mode(indio_dev);
688 __release(indio_dev);
689 }
690
691 /*
692 * This autocleanup logic is normally used via
693 * iio_device_claim_direct_scoped().
694 */
695 DEFINE_GUARD(iio_claim_direct, struct iio_dev *, iio_device_claim_direct_mode(_T),
696 iio_device_release_direct_mode(_T))
697
698 DEFINE_GUARD_COND(iio_claim_direct, _try, ({
699 struct iio_dev *dev;
700 int d = iio_device_claim_direct_mode(_T);
701
702 if (d < 0)
703 dev = NULL;
704 else
705 dev = _T;
706 dev;
707 }))
708
709 /**
710 * iio_device_claim_direct_scoped() - Scoped call to iio_device_claim_direct.
711 * @fail: What to do on failure to claim device.
712 * @iio_dev: Pointer to the IIO devices structure
713 */
714 #define iio_device_claim_direct_scoped(fail, iio_dev) \
715 scoped_cond_guard(iio_claim_direct_try, fail, iio_dev)
716
717 int iio_device_claim_buffer_mode(struct iio_dev *indio_dev);
718 void iio_device_release_buffer_mode(struct iio_dev *indio_dev);
719
720 extern const struct bus_type iio_bus_type;
721
722 /**
723 * iio_device_put() - reference counted deallocation of struct device
724 * @indio_dev: IIO device structure containing the device
725 **/
iio_device_put(struct iio_dev * indio_dev)726 static inline void iio_device_put(struct iio_dev *indio_dev)
727 {
728 if (indio_dev)
729 put_device(&indio_dev->dev);
730 }
731
732 clockid_t iio_device_get_clock(const struct iio_dev *indio_dev);
733 int iio_device_set_clock(struct iio_dev *indio_dev, clockid_t clock_id);
734
735 /**
736 * dev_to_iio_dev() - Get IIO device struct from a device struct
737 * @dev: The device embedded in the IIO device
738 *
739 * Note: The device must be a IIO device, otherwise the result is undefined.
740 */
dev_to_iio_dev(struct device * dev)741 static inline struct iio_dev *dev_to_iio_dev(struct device *dev)
742 {
743 return container_of(dev, struct iio_dev, dev);
744 }
745
746 /**
747 * iio_device_get() - increment reference count for the device
748 * @indio_dev: IIO device structure
749 *
750 * Returns: The passed IIO device
751 **/
iio_device_get(struct iio_dev * indio_dev)752 static inline struct iio_dev *iio_device_get(struct iio_dev *indio_dev)
753 {
754 return indio_dev ? dev_to_iio_dev(get_device(&indio_dev->dev)) : NULL;
755 }
756
757 /**
758 * iio_device_set_parent() - assign parent device to the IIO device object
759 * @indio_dev: IIO device structure
760 * @parent: reference to parent device object
761 *
762 * This utility must be called between IIO device allocation
763 * (via devm_iio_device_alloc()) & IIO device registration
764 * (via iio_device_register() and devm_iio_device_register())).
765 * By default, the device allocation will also assign a parent device to
766 * the IIO device object. In cases where devm_iio_device_alloc() is used,
767 * sometimes the parent device must be different than the device used to
768 * manage the allocation.
769 * In that case, this helper should be used to change the parent, hence the
770 * requirement to call this between allocation & registration.
771 **/
iio_device_set_parent(struct iio_dev * indio_dev,struct device * parent)772 static inline void iio_device_set_parent(struct iio_dev *indio_dev,
773 struct device *parent)
774 {
775 indio_dev->dev.parent = parent;
776 }
777
778 /**
779 * iio_device_set_drvdata() - Set device driver data
780 * @indio_dev: IIO device structure
781 * @data: Driver specific data
782 *
783 * Allows to attach an arbitrary pointer to an IIO device, which can later be
784 * retrieved by iio_device_get_drvdata().
785 */
iio_device_set_drvdata(struct iio_dev * indio_dev,void * data)786 static inline void iio_device_set_drvdata(struct iio_dev *indio_dev, void *data)
787 {
788 dev_set_drvdata(&indio_dev->dev, data);
789 }
790
791 /**
792 * iio_device_get_drvdata() - Get device driver data
793 * @indio_dev: IIO device structure
794 *
795 * Returns the data previously set with iio_device_set_drvdata()
796 */
iio_device_get_drvdata(const struct iio_dev * indio_dev)797 static inline void *iio_device_get_drvdata(const struct iio_dev *indio_dev)
798 {
799 return dev_get_drvdata(&indio_dev->dev);
800 }
801
802 /*
803 * Used to ensure the iio_priv() structure is aligned to allow that structure
804 * to in turn include IIO_DMA_MINALIGN'd elements such as buffers which
805 * must not share cachelines with the rest of the structure, thus making
806 * them safe for use with non-coherent DMA.
807 */
808 #define IIO_DMA_MINALIGN ARCH_DMA_MINALIGN
809 struct iio_dev *iio_device_alloc(struct device *parent, int sizeof_priv);
810
811 /* The information at the returned address is guaranteed to be cacheline aligned */
iio_priv(const struct iio_dev * indio_dev)812 static inline void *iio_priv(const struct iio_dev *indio_dev)
813 {
814 return ACCESS_PRIVATE(indio_dev, priv);
815 }
816
817 void iio_device_free(struct iio_dev *indio_dev);
818 struct iio_dev *devm_iio_device_alloc(struct device *parent, int sizeof_priv);
819
820 #define devm_iio_trigger_alloc(parent, fmt, ...) \
821 __devm_iio_trigger_alloc((parent), THIS_MODULE, (fmt), ##__VA_ARGS__)
822 __printf(3, 4)
823 struct iio_trigger *__devm_iio_trigger_alloc(struct device *parent,
824 struct module *this_mod,
825 const char *fmt, ...);
826 /**
827 * iio_get_debugfs_dentry() - helper function to get the debugfs_dentry
828 * @indio_dev: IIO device structure for device
829 **/
830 #if defined(CONFIG_DEBUG_FS)
831 struct dentry *iio_get_debugfs_dentry(struct iio_dev *indio_dev);
832 #else
iio_get_debugfs_dentry(struct iio_dev * indio_dev)833 static inline struct dentry *iio_get_debugfs_dentry(struct iio_dev *indio_dev)
834 {
835 return NULL;
836 }
837 #endif
838
839 /**
840 * iio_device_suspend_triggering() - suspend trigger attached to an iio_dev
841 * @indio_dev: iio_dev associated with the device that will have triggers suspended
842 *
843 * Return 0 if successful, negative otherwise
844 **/
845 int iio_device_suspend_triggering(struct iio_dev *indio_dev);
846
847 /**
848 * iio_device_resume_triggering() - resume trigger attached to an iio_dev
849 * that was previously suspended with iio_device_suspend_triggering()
850 * @indio_dev: iio_dev associated with the device that will have triggers resumed
851 *
852 * Return 0 if successful, negative otherwise
853 **/
854 int iio_device_resume_triggering(struct iio_dev *indio_dev);
855
856 #ifdef CONFIG_ACPI
857 bool iio_read_acpi_mount_matrix(struct device *dev,
858 struct iio_mount_matrix *orientation,
859 char *acpi_method);
860 const char *iio_get_acpi_device_name_and_data(struct device *dev, const void **data);
861 #else
iio_read_acpi_mount_matrix(struct device * dev,struct iio_mount_matrix * orientation,char * acpi_method)862 static inline bool iio_read_acpi_mount_matrix(struct device *dev,
863 struct iio_mount_matrix *orientation,
864 char *acpi_method)
865 {
866 return false;
867 }
868 static inline const char *
iio_get_acpi_device_name_and_data(struct device * dev,const void ** data)869 iio_get_acpi_device_name_and_data(struct device *dev, const void **data)
870 {
871 return NULL;
872 }
873 #endif
iio_get_acpi_device_name(struct device * dev)874 static inline const char *iio_get_acpi_device_name(struct device *dev)
875 {
876 return iio_get_acpi_device_name_and_data(dev, NULL);
877 }
878
879 /**
880 * iio_get_current_scan_type - Get the current scan type for a channel
881 * @indio_dev: the IIO device to get the scan type for
882 * @chan: the channel to get the scan type for
883 *
884 * Most devices only have one scan type per channel and can just access it
885 * directly without calling this function. Core IIO code and drivers that
886 * implement ext_scan_type in the channel spec should use this function to
887 * get the current scan type for a channel.
888 *
889 * Returns: the current scan type for the channel or error.
890 */
891 static inline const struct iio_scan_type
iio_get_current_scan_type(const struct iio_dev * indio_dev,const struct iio_chan_spec * chan)892 *iio_get_current_scan_type(const struct iio_dev *indio_dev,
893 const struct iio_chan_spec *chan)
894 {
895 int ret;
896
897 if (chan->has_ext_scan_type) {
898 ret = indio_dev->info->get_current_scan_type(indio_dev, chan);
899 if (ret < 0)
900 return ERR_PTR(ret);
901
902 if (ret >= chan->num_ext_scan_type)
903 return ERR_PTR(-EINVAL);
904
905 return &chan->ext_scan_type[ret];
906 }
907
908 return &chan->scan_type;
909 }
910
911 /**
912 * iio_get_masklength - Get length of the channels mask
913 * @indio_dev: the IIO device to get the masklength for
914 */
iio_get_masklength(const struct iio_dev * indio_dev)915 static inline unsigned int iio_get_masklength(const struct iio_dev *indio_dev)
916 {
917 return ACCESS_PRIVATE(indio_dev, masklength);
918 }
919
920 int iio_active_scan_mask_index(struct iio_dev *indio_dev);
921
922 /**
923 * iio_for_each_active_channel - Iterated over active channels
924 * @indio_dev: the IIO device
925 * @chan: Holds the index of the enabled channel
926 */
927 #define iio_for_each_active_channel(indio_dev, chan) \
928 for_each_set_bit((chan), (indio_dev)->active_scan_mask, \
929 iio_get_masklength(indio_dev))
930
931 ssize_t iio_format_value(char *buf, unsigned int type, int size, int *vals);
932
933 int iio_str_to_fixpoint(const char *str, int fract_mult, int *integer,
934 int *fract);
935
936 /**
937 * IIO_DEGREE_TO_RAD() - Convert degree to rad
938 * @deg: A value in degree
939 *
940 * Returns the given value converted from degree to rad
941 */
942 #define IIO_DEGREE_TO_RAD(deg) (((deg) * 314159ULL + 9000000ULL) / 18000000ULL)
943
944 /**
945 * IIO_RAD_TO_DEGREE() - Convert rad to degree
946 * @rad: A value in rad
947 *
948 * Returns the given value converted from rad to degree
949 */
950 #define IIO_RAD_TO_DEGREE(rad) \
951 (((rad) * 18000000ULL + 314159ULL / 2) / 314159ULL)
952
953 /**
954 * IIO_G_TO_M_S_2() - Convert g to meter / second**2
955 * @g: A value in g
956 *
957 * Returns the given value converted from g to meter / second**2
958 */
959 #define IIO_G_TO_M_S_2(g) ((g) * 980665ULL / 100000ULL)
960
961 /**
962 * IIO_M_S_2_TO_G() - Convert meter / second**2 to g
963 * @ms2: A value in meter / second**2
964 *
965 * Returns the given value converted from meter / second**2 to g
966 */
967 #define IIO_M_S_2_TO_G(ms2) (((ms2) * 100000ULL + 980665ULL / 2) / 980665ULL)
968
969 #endif /* _INDUSTRIAL_IO_H_ */
970