blob: e57b7bc035fc66a59c8c58ceab2dd6bd6e9e5bab [file] [log] [blame]
Mark Brownb83a3132011-05-11 19:59:58 +02001/*
2 * Register map access API
3 *
4 * Copyright 2011 Wolfson Microelectronics plc
5 *
6 * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
Stephen Warrenf5d6eba2012-03-09 13:17:28 -070013#include <linux/device.h>
Mark Brownb83a3132011-05-11 19:59:58 +020014#include <linux/slab.h>
Paul Gortmaker19694b52012-02-28 19:28:02 -050015#include <linux/export.h>
Mark Brownb83a3132011-05-11 19:59:58 +020016#include <linux/mutex.h>
17#include <linux/err.h>
Krystian Garbaciak6863ca62012-06-15 11:23:56 +010018#include <linux/rbtree.h>
Mark Brownb83a3132011-05-11 19:59:58 +020019
Mark Brownfb2736b2011-07-24 21:30:55 +010020#define CREATE_TRACE_POINTS
21#include <trace/events/regmap.h>
22
Mark Brown93de9122011-07-20 22:35:37 +010023#include "internal.h"
Mark Brownb83a3132011-05-11 19:59:58 +020024
Mark Brown1044c182012-07-06 14:10:23 +010025/*
26 * Sometimes for failures during very early init the trace
27 * infrastructure isn't available early enough to be used. For this
28 * sort of problem defining LOG_DEVICE will add printks for basic
29 * register I/O on a specific device.
30 */
31#undef LOG_DEVICE
32
33static int _regmap_update_bits(struct regmap *map, unsigned int reg,
34 unsigned int mask, unsigned int val,
35 bool *change);
36
Andrey Smirnovad278402013-01-12 12:54:12 -080037static int _regmap_bus_read(void *context, unsigned int reg,
38 unsigned int *val);
Andrey Smirnov07c320d2013-01-12 12:54:13 -080039static int _regmap_bus_formatted_write(void *context, unsigned int reg,
40 unsigned int val);
41static int _regmap_bus_raw_write(void *context, unsigned int reg,
42 unsigned int val);
Andrey Smirnovad278402013-01-12 12:54:12 -080043
Mark Brown0d509f22013-01-27 22:07:38 +080044static void async_cleanup(struct work_struct *work)
45{
46 struct regmap_async *async = container_of(work, struct regmap_async,
47 cleanup);
48
49 kfree(async->work_buf);
50 kfree(async);
51}
52
Davide Ciminaghi76aad392012-11-20 15:20:30 +010053bool regmap_reg_in_ranges(unsigned int reg,
54 const struct regmap_range *ranges,
55 unsigned int nranges)
56{
57 const struct regmap_range *r;
58 int i;
59
60 for (i = 0, r = ranges; i < nranges; i++, r++)
61 if (regmap_reg_in_range(reg, r))
62 return true;
63 return false;
64}
65EXPORT_SYMBOL_GPL(regmap_reg_in_ranges);
66
67static bool _regmap_check_range_table(struct regmap *map,
68 unsigned int reg,
69 const struct regmap_access_table *table)
70{
71 /* Check "no ranges" first */
72 if (regmap_reg_in_ranges(reg, table->no_ranges, table->n_no_ranges))
73 return false;
74
75 /* In case zero "yes ranges" are supplied, any reg is OK */
76 if (!table->n_yes_ranges)
77 return true;
78
79 return regmap_reg_in_ranges(reg, table->yes_ranges,
80 table->n_yes_ranges);
81}
82
Mark Brown8de2f082011-08-10 17:14:41 +090083bool regmap_writeable(struct regmap *map, unsigned int reg)
84{
85 if (map->max_register && reg > map->max_register)
86 return false;
87
88 if (map->writeable_reg)
89 return map->writeable_reg(map->dev, reg);
90
Davide Ciminaghi76aad392012-11-20 15:20:30 +010091 if (map->wr_table)
92 return _regmap_check_range_table(map, reg, map->wr_table);
93
Mark Brown8de2f082011-08-10 17:14:41 +090094 return true;
95}
96
97bool regmap_readable(struct regmap *map, unsigned int reg)
98{
99 if (map->max_register && reg > map->max_register)
100 return false;
101
Wolfram Sang4191f192012-01-30 15:08:16 +0100102 if (map->format.format_write)
103 return false;
104
Mark Brown8de2f082011-08-10 17:14:41 +0900105 if (map->readable_reg)
106 return map->readable_reg(map->dev, reg);
107
Davide Ciminaghi76aad392012-11-20 15:20:30 +0100108 if (map->rd_table)
109 return _regmap_check_range_table(map, reg, map->rd_table);
110
Mark Brown8de2f082011-08-10 17:14:41 +0900111 return true;
112}
113
114bool regmap_volatile(struct regmap *map, unsigned int reg)
115{
Wolfram Sang4191f192012-01-30 15:08:16 +0100116 if (!regmap_readable(map, reg))
Mark Brown8de2f082011-08-10 17:14:41 +0900117 return false;
118
119 if (map->volatile_reg)
120 return map->volatile_reg(map->dev, reg);
121
Davide Ciminaghi76aad392012-11-20 15:20:30 +0100122 if (map->volatile_table)
123 return _regmap_check_range_table(map, reg, map->volatile_table);
124
Mark Brown8de2f082011-08-10 17:14:41 +0900125 return true;
126}
127
128bool regmap_precious(struct regmap *map, unsigned int reg)
129{
Wolfram Sang4191f192012-01-30 15:08:16 +0100130 if (!regmap_readable(map, reg))
Mark Brown8de2f082011-08-10 17:14:41 +0900131 return false;
132
133 if (map->precious_reg)
134 return map->precious_reg(map->dev, reg);
135
Davide Ciminaghi76aad392012-11-20 15:20:30 +0100136 if (map->precious_table)
137 return _regmap_check_range_table(map, reg, map->precious_table);
138
Mark Brown8de2f082011-08-10 17:14:41 +0900139 return false;
140}
141
Lars-Peter Clausen82cd9962011-11-08 18:37:25 +0100142static bool regmap_volatile_range(struct regmap *map, unsigned int reg,
Paul Bollea8f28cf2012-10-08 22:06:30 +0200143 size_t num)
Lars-Peter Clausen82cd9962011-11-08 18:37:25 +0100144{
145 unsigned int i;
146
147 for (i = 0; i < num; i++)
148 if (!regmap_volatile(map, reg + i))
149 return false;
150
151 return true;
152}
153
Wolfram Sang9aa50752012-01-27 16:10:22 +0100154static void regmap_format_2_6_write(struct regmap *map,
155 unsigned int reg, unsigned int val)
156{
157 u8 *out = map->work_buf;
158
159 *out = (reg << 6) | val;
160}
161
Mark Brownb83a3132011-05-11 19:59:58 +0200162static void regmap_format_4_12_write(struct regmap *map,
163 unsigned int reg, unsigned int val)
164{
165 __be16 *out = map->work_buf;
166 *out = cpu_to_be16((reg << 12) | val);
167}
168
169static void regmap_format_7_9_write(struct regmap *map,
170 unsigned int reg, unsigned int val)
171{
172 __be16 *out = map->work_buf;
173 *out = cpu_to_be16((reg << 9) | val);
174}
175
Lars-Peter Clausen7e5ec632011-11-16 16:28:21 +0100176static void regmap_format_10_14_write(struct regmap *map,
177 unsigned int reg, unsigned int val)
178{
179 u8 *out = map->work_buf;
180
181 out[2] = val;
182 out[1] = (val >> 8) | (reg << 6);
183 out[0] = reg >> 2;
184}
185
Marc Reillyd939fb92012-03-16 12:11:43 +1100186static void regmap_format_8(void *buf, unsigned int val, unsigned int shift)
Mark Brownb83a3132011-05-11 19:59:58 +0200187{
188 u8 *b = buf;
189
Marc Reillyd939fb92012-03-16 12:11:43 +1100190 b[0] = val << shift;
Mark Brownb83a3132011-05-11 19:59:58 +0200191}
192
Stephen Warren141eba22012-05-24 10:47:26 -0600193static void regmap_format_16_be(void *buf, unsigned int val, unsigned int shift)
Mark Brownb83a3132011-05-11 19:59:58 +0200194{
195 __be16 *b = buf;
196
Marc Reillyd939fb92012-03-16 12:11:43 +1100197 b[0] = cpu_to_be16(val << shift);
Mark Brownb83a3132011-05-11 19:59:58 +0200198}
199
Stephen Warren141eba22012-05-24 10:47:26 -0600200static void regmap_format_16_native(void *buf, unsigned int val,
201 unsigned int shift)
202{
203 *(u16 *)buf = val << shift;
204}
205
Marc Reillyd939fb92012-03-16 12:11:43 +1100206static void regmap_format_24(void *buf, unsigned int val, unsigned int shift)
Marc Reillyea279fc2012-03-16 12:11:42 +1100207{
208 u8 *b = buf;
209
Marc Reillyd939fb92012-03-16 12:11:43 +1100210 val <<= shift;
211
Marc Reillyea279fc2012-03-16 12:11:42 +1100212 b[0] = val >> 16;
213 b[1] = val >> 8;
214 b[2] = val;
215}
216
Stephen Warren141eba22012-05-24 10:47:26 -0600217static void regmap_format_32_be(void *buf, unsigned int val, unsigned int shift)
Mark Brown7d5e5252012-02-17 15:58:25 -0800218{
219 __be32 *b = buf;
220
Marc Reillyd939fb92012-03-16 12:11:43 +1100221 b[0] = cpu_to_be32(val << shift);
Mark Brown7d5e5252012-02-17 15:58:25 -0800222}
223
Stephen Warren141eba22012-05-24 10:47:26 -0600224static void regmap_format_32_native(void *buf, unsigned int val,
225 unsigned int shift)
226{
227 *(u32 *)buf = val << shift;
228}
229
Mark Brownb83a3132011-05-11 19:59:58 +0200230static unsigned int regmap_parse_8(void *buf)
231{
232 u8 *b = buf;
233
234 return b[0];
235}
236
Stephen Warren141eba22012-05-24 10:47:26 -0600237static unsigned int regmap_parse_16_be(void *buf)
Mark Brownb83a3132011-05-11 19:59:58 +0200238{
239 __be16 *b = buf;
240
241 b[0] = be16_to_cpu(b[0]);
242
243 return b[0];
244}
245
Stephen Warren141eba22012-05-24 10:47:26 -0600246static unsigned int regmap_parse_16_native(void *buf)
247{
248 return *(u16 *)buf;
249}
250
Marc Reillyea279fc2012-03-16 12:11:42 +1100251static unsigned int regmap_parse_24(void *buf)
252{
253 u8 *b = buf;
254 unsigned int ret = b[2];
255 ret |= ((unsigned int)b[1]) << 8;
256 ret |= ((unsigned int)b[0]) << 16;
257
258 return ret;
259}
260
Stephen Warren141eba22012-05-24 10:47:26 -0600261static unsigned int regmap_parse_32_be(void *buf)
Mark Brown7d5e5252012-02-17 15:58:25 -0800262{
263 __be32 *b = buf;
264
265 b[0] = be32_to_cpu(b[0]);
266
267 return b[0];
268}
269
Stephen Warren141eba22012-05-24 10:47:26 -0600270static unsigned int regmap_parse_32_native(void *buf)
271{
272 return *(u32 *)buf;
273}
274
Davide Ciminaghi0d4529c2012-10-16 15:56:59 +0200275static void regmap_lock_mutex(void *__map)
Stephen Warrenbacdbe02012-04-04 15:48:28 -0600276{
Davide Ciminaghi0d4529c2012-10-16 15:56:59 +0200277 struct regmap *map = __map;
Stephen Warrenbacdbe02012-04-04 15:48:28 -0600278 mutex_lock(&map->mutex);
279}
280
Davide Ciminaghi0d4529c2012-10-16 15:56:59 +0200281static void regmap_unlock_mutex(void *__map)
Stephen Warrenbacdbe02012-04-04 15:48:28 -0600282{
Davide Ciminaghi0d4529c2012-10-16 15:56:59 +0200283 struct regmap *map = __map;
Stephen Warrenbacdbe02012-04-04 15:48:28 -0600284 mutex_unlock(&map->mutex);
285}
286
Davide Ciminaghi0d4529c2012-10-16 15:56:59 +0200287static void regmap_lock_spinlock(void *__map)
Stephen Warrenbacdbe02012-04-04 15:48:28 -0600288{
Davide Ciminaghi0d4529c2012-10-16 15:56:59 +0200289 struct regmap *map = __map;
Stephen Warrenbacdbe02012-04-04 15:48:28 -0600290 spin_lock(&map->spinlock);
291}
292
Davide Ciminaghi0d4529c2012-10-16 15:56:59 +0200293static void regmap_unlock_spinlock(void *__map)
Stephen Warrenbacdbe02012-04-04 15:48:28 -0600294{
Davide Ciminaghi0d4529c2012-10-16 15:56:59 +0200295 struct regmap *map = __map;
Stephen Warrenbacdbe02012-04-04 15:48:28 -0600296 spin_unlock(&map->spinlock);
297}
298
Mark Brown72b39f62012-05-08 17:44:40 +0100299static void dev_get_regmap_release(struct device *dev, void *res)
300{
301 /*
302 * We don't actually have anything to do here; the goal here
303 * is not to manage the regmap but to provide a simple way to
304 * get the regmap back given a struct device.
305 */
306}
307
Krystian Garbaciak6863ca62012-06-15 11:23:56 +0100308static bool _regmap_range_add(struct regmap *map,
309 struct regmap_range_node *data)
310{
311 struct rb_root *root = &map->range_tree;
312 struct rb_node **new = &(root->rb_node), *parent = NULL;
313
314 while (*new) {
315 struct regmap_range_node *this =
316 container_of(*new, struct regmap_range_node, node);
317
318 parent = *new;
319 if (data->range_max < this->range_min)
320 new = &((*new)->rb_left);
321 else if (data->range_min > this->range_max)
322 new = &((*new)->rb_right);
323 else
324 return false;
325 }
326
327 rb_link_node(&data->node, parent, new);
328 rb_insert_color(&data->node, root);
329
330 return true;
331}
332
333static struct regmap_range_node *_regmap_range_lookup(struct regmap *map,
334 unsigned int reg)
335{
336 struct rb_node *node = map->range_tree.rb_node;
337
338 while (node) {
339 struct regmap_range_node *this =
340 container_of(node, struct regmap_range_node, node);
341
342 if (reg < this->range_min)
343 node = node->rb_left;
344 else if (reg > this->range_max)
345 node = node->rb_right;
346 else
347 return this;
348 }
349
350 return NULL;
351}
352
353static void regmap_range_exit(struct regmap *map)
354{
355 struct rb_node *next;
356 struct regmap_range_node *range_node;
357
358 next = rb_first(&map->range_tree);
359 while (next) {
360 range_node = rb_entry(next, struct regmap_range_node, node);
361 next = rb_next(&range_node->node);
362 rb_erase(&range_node->node, &map->range_tree);
363 kfree(range_node);
364 }
365
366 kfree(map->selector_work_buf);
367}
368
Mark Brownb83a3132011-05-11 19:59:58 +0200369/**
370 * regmap_init(): Initialise register map
371 *
372 * @dev: Device that will be interacted with
373 * @bus: Bus-specific callbacks to use with device
Stephen Warren0135bbc2012-04-04 15:48:30 -0600374 * @bus_context: Data passed to bus-specific callbacks
Mark Brownb83a3132011-05-11 19:59:58 +0200375 * @config: Configuration for register map
376 *
377 * The return value will be an ERR_PTR() on error or a valid pointer to
378 * a struct regmap. This function should generally not be called
379 * directly, it should be called by bus-specific init functions.
380 */
381struct regmap *regmap_init(struct device *dev,
382 const struct regmap_bus *bus,
Stephen Warren0135bbc2012-04-04 15:48:30 -0600383 void *bus_context,
Mark Brownb83a3132011-05-11 19:59:58 +0200384 const struct regmap_config *config)
385{
Mark Brown72b39f62012-05-08 17:44:40 +0100386 struct regmap *map, **m;
Mark Brownb83a3132011-05-11 19:59:58 +0200387 int ret = -EINVAL;
Stephen Warren141eba22012-05-24 10:47:26 -0600388 enum regmap_endian reg_endian, val_endian;
Krystian Garbaciak6863ca62012-06-15 11:23:56 +0100389 int i, j;
Mark Brownb83a3132011-05-11 19:59:58 +0200390
391 if (!bus || !config)
Lars-Peter Clausenabbb18f2011-11-14 10:40:15 +0100392 goto err;
Mark Brownb83a3132011-05-11 19:59:58 +0200393
394 map = kzalloc(sizeof(*map), GFP_KERNEL);
395 if (map == NULL) {
396 ret = -ENOMEM;
397 goto err;
398 }
399
Davide Ciminaghi0d4529c2012-10-16 15:56:59 +0200400 if (config->lock && config->unlock) {
401 map->lock = config->lock;
402 map->unlock = config->unlock;
403 map->lock_arg = config->lock_arg;
Stephen Warrenbacdbe02012-04-04 15:48:28 -0600404 } else {
Davide Ciminaghi0d4529c2012-10-16 15:56:59 +0200405 if (bus->fast_io) {
406 spin_lock_init(&map->spinlock);
407 map->lock = regmap_lock_spinlock;
408 map->unlock = regmap_unlock_spinlock;
409 } else {
410 mutex_init(&map->mutex);
411 map->lock = regmap_lock_mutex;
412 map->unlock = regmap_unlock_mutex;
413 }
414 map->lock_arg = map;
Stephen Warrenbacdbe02012-04-04 15:48:28 -0600415 }
Wolfram Sangc212acc2012-01-28 02:16:41 +0100416 map->format.reg_bytes = DIV_ROUND_UP(config->reg_bits, 8);
Mark Brown82159ba2012-01-18 10:52:25 +0000417 map->format.pad_bytes = config->pad_bits / 8;
Wolfram Sangc212acc2012-01-28 02:16:41 +0100418 map->format.val_bytes = DIV_ROUND_UP(config->val_bits, 8);
Fabio Estevam5494a982012-05-31 21:10:30 -0300419 map->format.buf_size = DIV_ROUND_UP(config->reg_bits +
420 config->val_bits + config->pad_bits, 8);
Marc Reillyd939fb92012-03-16 12:11:43 +1100421 map->reg_shift = config->pad_bits % 8;
Stephen Warrenf01ee602012-04-09 13:40:24 -0600422 if (config->reg_stride)
423 map->reg_stride = config->reg_stride;
424 else
425 map->reg_stride = 1;
Ashish Jangam2e33caf2012-04-30 23:23:40 +0100426 map->use_single_rw = config->use_single_rw;
Mark Brownb83a3132011-05-11 19:59:58 +0200427 map->dev = dev;
428 map->bus = bus;
Stephen Warren0135bbc2012-04-04 15:48:30 -0600429 map->bus_context = bus_context;
Mark Brown2e2ae662011-07-20 22:33:39 +0100430 map->max_register = config->max_register;
Davide Ciminaghi76aad392012-11-20 15:20:30 +0100431 map->wr_table = config->wr_table;
432 map->rd_table = config->rd_table;
433 map->volatile_table = config->volatile_table;
434 map->precious_table = config->precious_table;
Mark Brown2e2ae662011-07-20 22:33:39 +0100435 map->writeable_reg = config->writeable_reg;
436 map->readable_reg = config->readable_reg;
437 map->volatile_reg = config->volatile_reg;
Mark Brown2efe1642011-08-08 15:41:46 +0900438 map->precious_reg = config->precious_reg;
Dimitris Papastamos5d1729e2011-09-19 14:34:05 +0100439 map->cache_type = config->cache_type;
Mark Brown72b39f62012-05-08 17:44:40 +0100440 map->name = config->name;
Mark Brownb83a3132011-05-11 19:59:58 +0200441
Mark Brown0d509f22013-01-27 22:07:38 +0800442 spin_lock_init(&map->async_lock);
443 INIT_LIST_HEAD(&map->async_list);
444 init_waitqueue_head(&map->async_waitq);
445
Lars-Peter Clausen6f306442011-09-05 20:46:32 +0200446 if (config->read_flag_mask || config->write_flag_mask) {
447 map->read_flag_mask = config->read_flag_mask;
448 map->write_flag_mask = config->write_flag_mask;
449 } else {
450 map->read_flag_mask = bus->read_flag_mask;
451 }
452
Andrey Smirnovad278402013-01-12 12:54:12 -0800453 map->reg_read = _regmap_bus_read;
454
Stephen Warren141eba22012-05-24 10:47:26 -0600455 reg_endian = config->reg_format_endian;
456 if (reg_endian == REGMAP_ENDIAN_DEFAULT)
457 reg_endian = bus->reg_format_endian_default;
458 if (reg_endian == REGMAP_ENDIAN_DEFAULT)
459 reg_endian = REGMAP_ENDIAN_BIG;
460
461 val_endian = config->val_format_endian;
462 if (val_endian == REGMAP_ENDIAN_DEFAULT)
463 val_endian = bus->val_format_endian_default;
464 if (val_endian == REGMAP_ENDIAN_DEFAULT)
465 val_endian = REGMAP_ENDIAN_BIG;
466
Marc Reillyd939fb92012-03-16 12:11:43 +1100467 switch (config->reg_bits + map->reg_shift) {
Wolfram Sang9aa50752012-01-27 16:10:22 +0100468 case 2:
469 switch (config->val_bits) {
470 case 6:
471 map->format.format_write = regmap_format_2_6_write;
472 break;
473 default:
474 goto err_map;
475 }
476 break;
477
Mark Brownb83a3132011-05-11 19:59:58 +0200478 case 4:
479 switch (config->val_bits) {
480 case 12:
481 map->format.format_write = regmap_format_4_12_write;
482 break;
483 default:
484 goto err_map;
485 }
486 break;
487
488 case 7:
489 switch (config->val_bits) {
490 case 9:
491 map->format.format_write = regmap_format_7_9_write;
492 break;
493 default:
494 goto err_map;
495 }
496 break;
497
Lars-Peter Clausen7e5ec632011-11-16 16:28:21 +0100498 case 10:
499 switch (config->val_bits) {
500 case 14:
501 map->format.format_write = regmap_format_10_14_write;
502 break;
503 default:
504 goto err_map;
505 }
506 break;
507
Mark Brownb83a3132011-05-11 19:59:58 +0200508 case 8:
509 map->format.format_reg = regmap_format_8;
510 break;
511
512 case 16:
Stephen Warren141eba22012-05-24 10:47:26 -0600513 switch (reg_endian) {
514 case REGMAP_ENDIAN_BIG:
515 map->format.format_reg = regmap_format_16_be;
516 break;
517 case REGMAP_ENDIAN_NATIVE:
518 map->format.format_reg = regmap_format_16_native;
519 break;
520 default:
521 goto err_map;
522 }
Mark Brownb83a3132011-05-11 19:59:58 +0200523 break;
524
Mark Brown7d5e5252012-02-17 15:58:25 -0800525 case 32:
Stephen Warren141eba22012-05-24 10:47:26 -0600526 switch (reg_endian) {
527 case REGMAP_ENDIAN_BIG:
528 map->format.format_reg = regmap_format_32_be;
529 break;
530 case REGMAP_ENDIAN_NATIVE:
531 map->format.format_reg = regmap_format_32_native;
532 break;
533 default:
534 goto err_map;
535 }
Mark Brown7d5e5252012-02-17 15:58:25 -0800536 break;
537
Mark Brownb83a3132011-05-11 19:59:58 +0200538 default:
539 goto err_map;
540 }
541
542 switch (config->val_bits) {
543 case 8:
544 map->format.format_val = regmap_format_8;
545 map->format.parse_val = regmap_parse_8;
546 break;
547 case 16:
Stephen Warren141eba22012-05-24 10:47:26 -0600548 switch (val_endian) {
549 case REGMAP_ENDIAN_BIG:
550 map->format.format_val = regmap_format_16_be;
551 map->format.parse_val = regmap_parse_16_be;
552 break;
553 case REGMAP_ENDIAN_NATIVE:
554 map->format.format_val = regmap_format_16_native;
555 map->format.parse_val = regmap_parse_16_native;
556 break;
557 default:
558 goto err_map;
559 }
Mark Brownb83a3132011-05-11 19:59:58 +0200560 break;
Marc Reillyea279fc2012-03-16 12:11:42 +1100561 case 24:
Stephen Warren141eba22012-05-24 10:47:26 -0600562 if (val_endian != REGMAP_ENDIAN_BIG)
563 goto err_map;
Marc Reillyea279fc2012-03-16 12:11:42 +1100564 map->format.format_val = regmap_format_24;
565 map->format.parse_val = regmap_parse_24;
566 break;
Mark Brown7d5e5252012-02-17 15:58:25 -0800567 case 32:
Stephen Warren141eba22012-05-24 10:47:26 -0600568 switch (val_endian) {
569 case REGMAP_ENDIAN_BIG:
570 map->format.format_val = regmap_format_32_be;
571 map->format.parse_val = regmap_parse_32_be;
572 break;
573 case REGMAP_ENDIAN_NATIVE:
574 map->format.format_val = regmap_format_32_native;
575 map->format.parse_val = regmap_parse_32_native;
576 break;
577 default:
578 goto err_map;
579 }
Mark Brown7d5e5252012-02-17 15:58:25 -0800580 break;
Mark Brownb83a3132011-05-11 19:59:58 +0200581 }
582
Stephen Warren141eba22012-05-24 10:47:26 -0600583 if (map->format.format_write) {
584 if ((reg_endian != REGMAP_ENDIAN_BIG) ||
585 (val_endian != REGMAP_ENDIAN_BIG))
586 goto err_map;
Mark Brown7a647612012-04-30 23:26:32 +0100587 map->use_single_rw = true;
Stephen Warren141eba22012-05-24 10:47:26 -0600588 }
Mark Brown7a647612012-04-30 23:26:32 +0100589
Mark Brownb83a3132011-05-11 19:59:58 +0200590 if (!map->format.format_write &&
591 !(map->format.format_reg && map->format.format_val))
592 goto err_map;
593
Mark Brown82159ba2012-01-18 10:52:25 +0000594 map->work_buf = kzalloc(map->format.buf_size, GFP_KERNEL);
Mark Brownb83a3132011-05-11 19:59:58 +0200595 if (map->work_buf == NULL) {
596 ret = -ENOMEM;
Mark Brown5204f5e2011-09-05 08:07:47 -0700597 goto err_map;
Mark Brownb83a3132011-05-11 19:59:58 +0200598 }
599
Andrey Smirnov07c320d2013-01-12 12:54:13 -0800600 if (map->format.format_write)
601 map->reg_write = _regmap_bus_formatted_write;
602 else if (map->format.format_val)
603 map->reg_write = _regmap_bus_raw_write;
604
Krystian Garbaciak6863ca62012-06-15 11:23:56 +0100605 map->range_tree = RB_ROOT;
Mark Browne3549cd2012-10-02 20:17:15 +0100606 for (i = 0; i < config->num_ranges; i++) {
Krystian Garbaciak6863ca62012-06-15 11:23:56 +0100607 const struct regmap_range_cfg *range_cfg = &config->ranges[i];
608 struct regmap_range_node *new;
609
610 /* Sanity check */
Mark Brown061adc02012-10-03 12:17:51 +0100611 if (range_cfg->range_max < range_cfg->range_min) {
612 dev_err(map->dev, "Invalid range %d: %d < %d\n", i,
613 range_cfg->range_max, range_cfg->range_min);
Krystian Garbaciak6863ca62012-06-15 11:23:56 +0100614 goto err_range;
Mark Brown061adc02012-10-03 12:17:51 +0100615 }
616
617 if (range_cfg->range_max > map->max_register) {
618 dev_err(map->dev, "Invalid range %d: %d > %d\n", i,
619 range_cfg->range_max, map->max_register);
620 goto err_range;
621 }
622
623 if (range_cfg->selector_reg > map->max_register) {
624 dev_err(map->dev,
625 "Invalid range %d: selector out of map\n", i);
626 goto err_range;
627 }
628
629 if (range_cfg->window_len == 0) {
630 dev_err(map->dev, "Invalid range %d: window_len 0\n",
631 i);
632 goto err_range;
633 }
Krystian Garbaciak6863ca62012-06-15 11:23:56 +0100634
635 /* Make sure, that this register range has no selector
636 or data window within its boundary */
Mark Browne3549cd2012-10-02 20:17:15 +0100637 for (j = 0; j < config->num_ranges; j++) {
Krystian Garbaciak6863ca62012-06-15 11:23:56 +0100638 unsigned sel_reg = config->ranges[j].selector_reg;
639 unsigned win_min = config->ranges[j].window_start;
640 unsigned win_max = win_min +
641 config->ranges[j].window_len - 1;
642
643 if (range_cfg->range_min <= sel_reg &&
644 sel_reg <= range_cfg->range_max) {
Mark Brown061adc02012-10-03 12:17:51 +0100645 dev_err(map->dev,
646 "Range %d: selector for %d in window\n",
647 i, j);
Krystian Garbaciak6863ca62012-06-15 11:23:56 +0100648 goto err_range;
649 }
650
651 if (!(win_max < range_cfg->range_min ||
652 win_min > range_cfg->range_max)) {
Mark Brown061adc02012-10-03 12:17:51 +0100653 dev_err(map->dev,
654 "Range %d: window for %d in window\n",
655 i, j);
Krystian Garbaciak6863ca62012-06-15 11:23:56 +0100656 goto err_range;
657 }
658 }
659
660 new = kzalloc(sizeof(*new), GFP_KERNEL);
661 if (new == NULL) {
662 ret = -ENOMEM;
663 goto err_range;
664 }
665
Mark Brown4b020b32012-10-03 13:13:16 +0100666 new->map = map;
Mark Brownd058bb42012-10-03 12:40:47 +0100667 new->name = range_cfg->name;
Krystian Garbaciak6863ca62012-06-15 11:23:56 +0100668 new->range_min = range_cfg->range_min;
669 new->range_max = range_cfg->range_max;
670 new->selector_reg = range_cfg->selector_reg;
671 new->selector_mask = range_cfg->selector_mask;
672 new->selector_shift = range_cfg->selector_shift;
673 new->window_start = range_cfg->window_start;
674 new->window_len = range_cfg->window_len;
675
676 if (_regmap_range_add(map, new) == false) {
Mark Brown061adc02012-10-03 12:17:51 +0100677 dev_err(map->dev, "Failed to add range %d\n", i);
Krystian Garbaciak6863ca62012-06-15 11:23:56 +0100678 kfree(new);
679 goto err_range;
680 }
681
682 if (map->selector_work_buf == NULL) {
683 map->selector_work_buf =
684 kzalloc(map->format.buf_size, GFP_KERNEL);
685 if (map->selector_work_buf == NULL) {
686 ret = -ENOMEM;
687 goto err_range;
688 }
689 }
690 }
Mark Brown052d2cd2011-11-21 19:05:13 +0000691
Lars-Peter Clausene5e3b8a2011-11-16 16:28:16 +0100692 ret = regcache_init(map, config);
Mark Brown0ff3e622012-10-04 17:39:13 +0100693 if (ret != 0)
Krystian Garbaciak6863ca62012-06-15 11:23:56 +0100694 goto err_range;
695
696 regmap_debugfs_init(map, config->name);
Dimitris Papastamos5d1729e2011-09-19 14:34:05 +0100697
Mark Brown72b39f62012-05-08 17:44:40 +0100698 /* Add a devres resource for dev_get_regmap() */
699 m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL);
700 if (!m) {
701 ret = -ENOMEM;
Krystian Garbaciak6863ca62012-06-15 11:23:56 +0100702 goto err_debugfs;
Mark Brown72b39f62012-05-08 17:44:40 +0100703 }
704 *m = map;
705 devres_add(dev, m);
706
Mark Brownb83a3132011-05-11 19:59:58 +0200707 return map;
708
Stephen Warrenbfaa25f2012-05-23 16:30:53 -0600709err_debugfs:
710 regmap_debugfs_exit(map);
Mark Brown72b39f62012-05-08 17:44:40 +0100711 regcache_exit(map);
Krystian Garbaciak6863ca62012-06-15 11:23:56 +0100712err_range:
713 regmap_range_exit(map);
Lars-Peter Clausen58072cb2011-11-10 18:15:15 +0100714 kfree(map->work_buf);
Mark Brownb83a3132011-05-11 19:59:58 +0200715err_map:
716 kfree(map);
717err:
718 return ERR_PTR(ret);
719}
720EXPORT_SYMBOL_GPL(regmap_init);
721
Mark Brownc0eb4672012-01-30 19:56:52 +0000722static void devm_regmap_release(struct device *dev, void *res)
723{
724 regmap_exit(*(struct regmap **)res);
725}
726
727/**
728 * devm_regmap_init(): Initialise managed register map
729 *
730 * @dev: Device that will be interacted with
731 * @bus: Bus-specific callbacks to use with device
Stephen Warren0135bbc2012-04-04 15:48:30 -0600732 * @bus_context: Data passed to bus-specific callbacks
Mark Brownc0eb4672012-01-30 19:56:52 +0000733 * @config: Configuration for register map
734 *
735 * The return value will be an ERR_PTR() on error or a valid pointer
736 * to a struct regmap. This function should generally not be called
737 * directly, it should be called by bus-specific init functions. The
738 * map will be automatically freed by the device management code.
739 */
740struct regmap *devm_regmap_init(struct device *dev,
741 const struct regmap_bus *bus,
Stephen Warren0135bbc2012-04-04 15:48:30 -0600742 void *bus_context,
Mark Brownc0eb4672012-01-30 19:56:52 +0000743 const struct regmap_config *config)
744{
745 struct regmap **ptr, *regmap;
746
747 ptr = devres_alloc(devm_regmap_release, sizeof(*ptr), GFP_KERNEL);
748 if (!ptr)
749 return ERR_PTR(-ENOMEM);
750
Stephen Warren0135bbc2012-04-04 15:48:30 -0600751 regmap = regmap_init(dev, bus, bus_context, config);
Mark Brownc0eb4672012-01-30 19:56:52 +0000752 if (!IS_ERR(regmap)) {
753 *ptr = regmap;
754 devres_add(dev, ptr);
755 } else {
756 devres_free(ptr);
757 }
758
759 return regmap;
760}
761EXPORT_SYMBOL_GPL(devm_regmap_init);
762
Mark Brownb83a3132011-05-11 19:59:58 +0200763/**
Mark Brownbf315172011-12-03 17:06:20 +0000764 * regmap_reinit_cache(): Reinitialise the current register cache
765 *
766 * @map: Register map to operate on.
767 * @config: New configuration. Only the cache data will be used.
768 *
769 * Discard any existing register cache for the map and initialize a
770 * new cache. This can be used to restore the cache to defaults or to
771 * update the cache configuration to reflect runtime discovery of the
772 * hardware.
Dimitris Papastamos4d879512012-07-27 14:54:15 +0100773 *
774 * No explicit locking is done here, the user needs to ensure that
775 * this function will not race with other calls to regmap.
Mark Brownbf315172011-12-03 17:06:20 +0000776 */
777int regmap_reinit_cache(struct regmap *map, const struct regmap_config *config)
778{
Mark Brownbf315172011-12-03 17:06:20 +0000779 regcache_exit(map);
Mark Browna24f64a2012-01-26 18:30:16 +0000780 regmap_debugfs_exit(map);
Mark Brownbf315172011-12-03 17:06:20 +0000781
782 map->max_register = config->max_register;
783 map->writeable_reg = config->writeable_reg;
784 map->readable_reg = config->readable_reg;
785 map->volatile_reg = config->volatile_reg;
786 map->precious_reg = config->precious_reg;
787 map->cache_type = config->cache_type;
788
Stephen Warrend3c242e2012-04-04 15:48:29 -0600789 regmap_debugfs_init(map, config->name);
Mark Browna24f64a2012-01-26 18:30:16 +0000790
Mark Brown421e8d22012-01-20 13:39:37 +0000791 map->cache_bypass = false;
792 map->cache_only = false;
793
Dimitris Papastamos4d879512012-07-27 14:54:15 +0100794 return regcache_init(map, config);
Mark Brownbf315172011-12-03 17:06:20 +0000795}
Mark Brown752a6a52012-05-14 10:00:12 +0100796EXPORT_SYMBOL_GPL(regmap_reinit_cache);
Mark Brownbf315172011-12-03 17:06:20 +0000797
798/**
Mark Brownb83a3132011-05-11 19:59:58 +0200799 * regmap_exit(): Free a previously allocated register map
800 */
801void regmap_exit(struct regmap *map)
802{
Dimitris Papastamos5d1729e2011-09-19 14:34:05 +0100803 regcache_exit(map);
Mark Brown31244e32011-07-20 22:56:53 +0100804 regmap_debugfs_exit(map);
Krystian Garbaciak6863ca62012-06-15 11:23:56 +0100805 regmap_range_exit(map);
Stephen Warren0135bbc2012-04-04 15:48:30 -0600806 if (map->bus->free_context)
807 map->bus->free_context(map->bus_context);
Mark Brownb83a3132011-05-11 19:59:58 +0200808 kfree(map->work_buf);
Mark Brownb83a3132011-05-11 19:59:58 +0200809 kfree(map);
810}
811EXPORT_SYMBOL_GPL(regmap_exit);
812
Mark Brown72b39f62012-05-08 17:44:40 +0100813static int dev_get_regmap_match(struct device *dev, void *res, void *data)
814{
815 struct regmap **r = res;
816 if (!r || !*r) {
817 WARN_ON(!r || !*r);
818 return 0;
819 }
820
821 /* If the user didn't specify a name match any */
822 if (data)
823 return (*r)->name == data;
824 else
825 return 1;
826}
827
828/**
829 * dev_get_regmap(): Obtain the regmap (if any) for a device
830 *
831 * @dev: Device to retrieve the map for
832 * @name: Optional name for the register map, usually NULL.
833 *
834 * Returns the regmap for the device if one is present, or NULL. If
835 * name is specified then it must match the name specified when
836 * registering the device, if it is NULL then the first regmap found
837 * will be used. Devices with multiple register maps are very rare,
838 * generic code should normally not need to specify a name.
839 */
840struct regmap *dev_get_regmap(struct device *dev, const char *name)
841{
842 struct regmap **r = devres_find(dev, dev_get_regmap_release,
843 dev_get_regmap_match, (void *)name);
844
845 if (!r)
846 return NULL;
847 return *r;
848}
849EXPORT_SYMBOL_GPL(dev_get_regmap);
850
Krystian Garbaciak6863ca62012-06-15 11:23:56 +0100851static int _regmap_select_page(struct regmap *map, unsigned int *reg,
Mark Brown98bc7df2012-10-04 17:31:11 +0100852 struct regmap_range_node *range,
Krystian Garbaciak6863ca62012-06-15 11:23:56 +0100853 unsigned int val_num)
854{
Krystian Garbaciak6863ca62012-06-15 11:23:56 +0100855 void *orig_work_buf;
856 unsigned int win_offset;
857 unsigned int win_page;
858 bool page_chg;
859 int ret;
860
Mark Brown98bc7df2012-10-04 17:31:11 +0100861 win_offset = (*reg - range->range_min) % range->window_len;
862 win_page = (*reg - range->range_min) / range->window_len;
Krystian Garbaciak6863ca62012-06-15 11:23:56 +0100863
Mark Brown98bc7df2012-10-04 17:31:11 +0100864 if (val_num > 1) {
865 /* Bulk write shouldn't cross range boundary */
866 if (*reg + val_num - 1 > range->range_max)
867 return -EINVAL;
Krystian Garbaciak6863ca62012-06-15 11:23:56 +0100868
Mark Brown98bc7df2012-10-04 17:31:11 +0100869 /* ... or single page boundary */
870 if (val_num > range->window_len - win_offset)
871 return -EINVAL;
Krystian Garbaciak6863ca62012-06-15 11:23:56 +0100872 }
873
Mark Brown98bc7df2012-10-04 17:31:11 +0100874 /* It is possible to have selector register inside data window.
875 In that case, selector register is located on every page and
876 it needs no page switching, when accessed alone. */
877 if (val_num > 1 ||
878 range->window_start + win_offset != range->selector_reg) {
879 /* Use separate work_buf during page switching */
880 orig_work_buf = map->work_buf;
881 map->work_buf = map->selector_work_buf;
882
883 ret = _regmap_update_bits(map, range->selector_reg,
884 range->selector_mask,
885 win_page << range->selector_shift,
886 &page_chg);
887
888 map->work_buf = orig_work_buf;
889
Mark Brown0ff3e622012-10-04 17:39:13 +0100890 if (ret != 0)
Mark Brown98bc7df2012-10-04 17:31:11 +0100891 return ret;
892 }
893
894 *reg = range->window_start + win_offset;
895
Krystian Garbaciak6863ca62012-06-15 11:23:56 +0100896 return 0;
897}
898
Mark Brownb83a3132011-05-11 19:59:58 +0200899static int _regmap_raw_write(struct regmap *map, unsigned int reg,
Mark Brown0d509f22013-01-27 22:07:38 +0800900 const void *val, size_t val_len, bool async)
Mark Brownb83a3132011-05-11 19:59:58 +0200901{
Mark Brown98bc7df2012-10-04 17:31:11 +0100902 struct regmap_range_node *range;
Mark Brown0d509f22013-01-27 22:07:38 +0800903 unsigned long flags;
Lars-Peter Clausen6f306442011-09-05 20:46:32 +0200904 u8 *u8 = map->work_buf;
Mark Brown0d509f22013-01-27 22:07:38 +0800905 void *work_val = map->work_buf + map->format.reg_bytes +
906 map->format.pad_bytes;
Mark Brownb83a3132011-05-11 19:59:58 +0200907 void *buf;
908 int ret = -ENOTSUPP;
909 size_t len;
Mark Brown73304782011-07-24 11:46:20 +0100910 int i;
911
912 /* Check for unwritable registers before we start */
913 if (map->writeable_reg)
914 for (i = 0; i < val_len / map->format.val_bytes; i++)
Stephen Warrenf01ee602012-04-09 13:40:24 -0600915 if (!map->writeable_reg(map->dev,
916 reg + (i * map->reg_stride)))
Mark Brown73304782011-07-24 11:46:20 +0100917 return -EINVAL;
Mark Brownb83a3132011-05-11 19:59:58 +0200918
Laxman Dewanganc9157192012-02-10 21:30:27 +0530919 if (!map->cache_bypass && map->format.parse_val) {
920 unsigned int ival;
921 int val_bytes = map->format.val_bytes;
922 for (i = 0; i < val_len / val_bytes; i++) {
923 memcpy(map->work_buf, val + (i * val_bytes), val_bytes);
924 ival = map->format.parse_val(map->work_buf);
Stephen Warrenf01ee602012-04-09 13:40:24 -0600925 ret = regcache_write(map, reg + (i * map->reg_stride),
926 ival);
Laxman Dewanganc9157192012-02-10 21:30:27 +0530927 if (ret) {
928 dev_err(map->dev,
Mark Brown6d04b8a2012-10-26 19:05:32 +0100929 "Error in caching of register: %x ret: %d\n",
Laxman Dewanganc9157192012-02-10 21:30:27 +0530930 reg + i, ret);
931 return ret;
932 }
933 }
934 if (map->cache_only) {
935 map->cache_dirty = true;
936 return 0;
937 }
938 }
939
Mark Brown98bc7df2012-10-04 17:31:11 +0100940 range = _regmap_range_lookup(map, reg);
941 if (range) {
Mark Brown8a2ceac2012-10-04 18:20:18 +0100942 int val_num = val_len / map->format.val_bytes;
943 int win_offset = (reg - range->range_min) % range->window_len;
944 int win_residue = range->window_len - win_offset;
945
946 /* If the write goes beyond the end of the window split it */
947 while (val_num > win_residue) {
Fabio Estevam1a61cfe2012-10-25 14:07:18 -0200948 dev_dbg(map->dev, "Writing window %d/%zu\n",
Mark Brown8a2ceac2012-10-04 18:20:18 +0100949 win_residue, val_len / map->format.val_bytes);
950 ret = _regmap_raw_write(map, reg, val, win_residue *
Mark Brown0d509f22013-01-27 22:07:38 +0800951 map->format.val_bytes, async);
Mark Brown8a2ceac2012-10-04 18:20:18 +0100952 if (ret != 0)
953 return ret;
954
955 reg += win_residue;
956 val_num -= win_residue;
957 val += win_residue * map->format.val_bytes;
958 val_len -= win_residue * map->format.val_bytes;
959
960 win_offset = (reg - range->range_min) %
961 range->window_len;
962 win_residue = range->window_len - win_offset;
963 }
964
965 ret = _regmap_select_page(map, &reg, range, val_num);
Mark Brown0ff3e622012-10-04 17:39:13 +0100966 if (ret != 0)
Mark Brown98bc7df2012-10-04 17:31:11 +0100967 return ret;
968 }
Krystian Garbaciak6863ca62012-06-15 11:23:56 +0100969
Marc Reillyd939fb92012-03-16 12:11:43 +1100970 map->format.format_reg(map->work_buf, reg, map->reg_shift);
Mark Brownb83a3132011-05-11 19:59:58 +0200971
Lars-Peter Clausen6f306442011-09-05 20:46:32 +0200972 u8[0] |= map->write_flag_mask;
973
Mark Brown0d509f22013-01-27 22:07:38 +0800974 if (async && map->bus->async_write) {
975 struct regmap_async *async = map->bus->async_alloc();
976 if (!async)
977 return -ENOMEM;
978
979 async->work_buf = kzalloc(map->format.buf_size,
980 GFP_KERNEL | GFP_DMA);
981 if (!async->work_buf) {
982 kfree(async);
983 return -ENOMEM;
984 }
985
986 INIT_WORK(&async->cleanup, async_cleanup);
987 async->map = map;
988
989 /* If the caller supplied the value we can use it safely. */
990 memcpy(async->work_buf, map->work_buf, map->format.pad_bytes +
991 map->format.reg_bytes + map->format.val_bytes);
992 if (val == work_val)
993 val = async->work_buf + map->format.pad_bytes +
994 map->format.reg_bytes;
995
996 spin_lock_irqsave(&map->async_lock, flags);
997 list_add_tail(&async->list, &map->async_list);
998 spin_unlock_irqrestore(&map->async_lock, flags);
999
1000 ret = map->bus->async_write(map->bus_context, async->work_buf,
1001 map->format.reg_bytes +
1002 map->format.pad_bytes,
1003 val, val_len, async);
1004
1005 if (ret != 0) {
1006 dev_err(map->dev, "Failed to schedule write: %d\n",
1007 ret);
1008
1009 spin_lock_irqsave(&map->async_lock, flags);
1010 list_del(&async->list);
1011 spin_unlock_irqrestore(&map->async_lock, flags);
1012
1013 kfree(async->work_buf);
1014 kfree(async);
1015 }
1016 }
1017
Mark Brownfb2736b2011-07-24 21:30:55 +01001018 trace_regmap_hw_write_start(map->dev, reg,
1019 val_len / map->format.val_bytes);
1020
Mark Brown2547e202011-07-20 21:47:22 +01001021 /* If we're doing a single register write we can probably just
1022 * send the work_buf directly, otherwise try to do a gather
1023 * write.
1024 */
Mark Brown0d509f22013-01-27 22:07:38 +08001025 if (val == work_val)
Stephen Warren0135bbc2012-04-04 15:48:30 -06001026 ret = map->bus->write(map->bus_context, map->work_buf,
Mark Brown82159ba2012-01-18 10:52:25 +00001027 map->format.reg_bytes +
1028 map->format.pad_bytes +
1029 val_len);
Mark Brown2547e202011-07-20 21:47:22 +01001030 else if (map->bus->gather_write)
Stephen Warren0135bbc2012-04-04 15:48:30 -06001031 ret = map->bus->gather_write(map->bus_context, map->work_buf,
Mark Brown82159ba2012-01-18 10:52:25 +00001032 map->format.reg_bytes +
1033 map->format.pad_bytes,
Mark Brownb83a3132011-05-11 19:59:58 +02001034 val, val_len);
1035
Mark Brown2547e202011-07-20 21:47:22 +01001036 /* If that didn't work fall back on linearising by hand. */
Mark Brownb83a3132011-05-11 19:59:58 +02001037 if (ret == -ENOTSUPP) {
Mark Brown82159ba2012-01-18 10:52:25 +00001038 len = map->format.reg_bytes + map->format.pad_bytes + val_len;
1039 buf = kzalloc(len, GFP_KERNEL);
Mark Brownb83a3132011-05-11 19:59:58 +02001040 if (!buf)
1041 return -ENOMEM;
1042
1043 memcpy(buf, map->work_buf, map->format.reg_bytes);
Mark Brown82159ba2012-01-18 10:52:25 +00001044 memcpy(buf + map->format.reg_bytes + map->format.pad_bytes,
1045 val, val_len);
Stephen Warren0135bbc2012-04-04 15:48:30 -06001046 ret = map->bus->write(map->bus_context, buf, len);
Mark Brownb83a3132011-05-11 19:59:58 +02001047
1048 kfree(buf);
1049 }
1050
Mark Brownfb2736b2011-07-24 21:30:55 +01001051 trace_regmap_hw_write_done(map->dev, reg,
1052 val_len / map->format.val_bytes);
1053
Mark Brownb83a3132011-05-11 19:59:58 +02001054 return ret;
1055}
1056
Andrey Smirnov07c320d2013-01-12 12:54:13 -08001057static int _regmap_bus_formatted_write(void *context, unsigned int reg,
1058 unsigned int val)
1059{
1060 int ret;
1061 struct regmap_range_node *range;
1062 struct regmap *map = context;
1063
1064 BUG_ON(!map->format.format_write);
1065
1066 range = _regmap_range_lookup(map, reg);
1067 if (range) {
1068 ret = _regmap_select_page(map, &reg, range, 1);
1069 if (ret != 0)
1070 return ret;
1071 }
1072
1073 map->format.format_write(map, reg, val);
1074
1075 trace_regmap_hw_write_start(map->dev, reg, 1);
1076
1077 ret = map->bus->write(map->bus_context, map->work_buf,
1078 map->format.buf_size);
1079
1080 trace_regmap_hw_write_done(map->dev, reg, 1);
1081
1082 return ret;
1083}
1084
1085static int _regmap_bus_raw_write(void *context, unsigned int reg,
1086 unsigned int val)
1087{
1088 struct regmap *map = context;
1089
1090 BUG_ON(!map->format.format_val);
1091
1092 map->format.format_val(map->work_buf + map->format.reg_bytes
1093 + map->format.pad_bytes, val, 0);
1094 return _regmap_raw_write(map, reg,
1095 map->work_buf +
1096 map->format.reg_bytes +
1097 map->format.pad_bytes,
Mark Brown0d509f22013-01-27 22:07:38 +08001098 map->format.val_bytes, false);
Andrey Smirnov07c320d2013-01-12 12:54:13 -08001099}
1100
Dimitris Papastamos4d2dc092011-09-29 10:39:07 +01001101int _regmap_write(struct regmap *map, unsigned int reg,
1102 unsigned int val)
Mark Brownb83a3132011-05-11 19:59:58 +02001103{
Mark Brownfb2736b2011-07-24 21:30:55 +01001104 int ret;
Mark Brownb83a3132011-05-11 19:59:58 +02001105
Laxman Dewanganc9157192012-02-10 21:30:27 +05301106 if (!map->cache_bypass && map->format.format_write) {
Dimitris Papastamos5d1729e2011-09-19 14:34:05 +01001107 ret = regcache_write(map, reg, val);
1108 if (ret != 0)
1109 return ret;
Mark Brown8ae0d7e2011-10-26 10:34:22 +02001110 if (map->cache_only) {
1111 map->cache_dirty = true;
Dimitris Papastamos5d1729e2011-09-19 14:34:05 +01001112 return 0;
Mark Brown8ae0d7e2011-10-26 10:34:22 +02001113 }
Dimitris Papastamos5d1729e2011-09-19 14:34:05 +01001114 }
1115
Mark Brown1044c182012-07-06 14:10:23 +01001116#ifdef LOG_DEVICE
1117 if (strcmp(dev_name(map->dev), LOG_DEVICE) == 0)
1118 dev_info(map->dev, "%x <= %x\n", reg, val);
1119#endif
1120
Mark Brownfb2736b2011-07-24 21:30:55 +01001121 trace_regmap_reg_write(map->dev, reg, val);
1122
Andrey Smirnov07c320d2013-01-12 12:54:13 -08001123 return map->reg_write(map, reg, val);
Mark Brownb83a3132011-05-11 19:59:58 +02001124}
1125
1126/**
1127 * regmap_write(): Write a value to a single register
1128 *
1129 * @map: Register map to write to
1130 * @reg: Register to write to
1131 * @val: Value to be written
1132 *
1133 * A value of zero will be returned on success, a negative errno will
1134 * be returned in error cases.
1135 */
1136int regmap_write(struct regmap *map, unsigned int reg, unsigned int val)
1137{
1138 int ret;
1139
Stephen Warrenf01ee602012-04-09 13:40:24 -06001140 if (reg % map->reg_stride)
1141 return -EINVAL;
1142
Davide Ciminaghi0d4529c2012-10-16 15:56:59 +02001143 map->lock(map->lock_arg);
Mark Brownb83a3132011-05-11 19:59:58 +02001144
1145 ret = _regmap_write(map, reg, val);
1146
Davide Ciminaghi0d4529c2012-10-16 15:56:59 +02001147 map->unlock(map->lock_arg);
Mark Brownb83a3132011-05-11 19:59:58 +02001148
1149 return ret;
1150}
1151EXPORT_SYMBOL_GPL(regmap_write);
1152
1153/**
1154 * regmap_raw_write(): Write raw values to one or more registers
1155 *
1156 * @map: Register map to write to
1157 * @reg: Initial register to write to
1158 * @val: Block of data to be written, laid out for direct transmission to the
1159 * device
1160 * @val_len: Length of data pointed to by val.
1161 *
1162 * This function is intended to be used for things like firmware
1163 * download where a large block of data needs to be transferred to the
1164 * device. No formatting will be done on the data provided.
1165 *
1166 * A value of zero will be returned on success, a negative errno will
1167 * be returned in error cases.
1168 */
1169int regmap_raw_write(struct regmap *map, unsigned int reg,
1170 const void *val, size_t val_len)
1171{
1172 int ret;
1173
Stephen Warren851960b2012-04-06 15:16:03 -06001174 if (val_len % map->format.val_bytes)
1175 return -EINVAL;
Stephen Warrenf01ee602012-04-09 13:40:24 -06001176 if (reg % map->reg_stride)
1177 return -EINVAL;
Stephen Warren851960b2012-04-06 15:16:03 -06001178
Davide Ciminaghi0d4529c2012-10-16 15:56:59 +02001179 map->lock(map->lock_arg);
Mark Brownb83a3132011-05-11 19:59:58 +02001180
Mark Brown0d509f22013-01-27 22:07:38 +08001181 ret = _regmap_raw_write(map, reg, val, val_len, false);
Mark Brownb83a3132011-05-11 19:59:58 +02001182
Davide Ciminaghi0d4529c2012-10-16 15:56:59 +02001183 map->unlock(map->lock_arg);
Mark Brownb83a3132011-05-11 19:59:58 +02001184
1185 return ret;
1186}
1187EXPORT_SYMBOL_GPL(regmap_raw_write);
1188
Laxman Dewangan8eaeb212012-02-12 19:49:43 +05301189/*
1190 * regmap_bulk_write(): Write multiple registers to the device
1191 *
1192 * @map: Register map to write to
1193 * @reg: First register to be write from
1194 * @val: Block of data to be written, in native register size for device
1195 * @val_count: Number of registers to write
1196 *
1197 * This function is intended to be used for writing a large block of
1198 * data to be device either in single transfer or multiple transfer.
1199 *
1200 * A value of zero will be returned on success, a negative errno will
1201 * be returned in error cases.
1202 */
1203int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
1204 size_t val_count)
1205{
1206 int ret = 0, i;
1207 size_t val_bytes = map->format.val_bytes;
1208 void *wval;
1209
1210 if (!map->format.parse_val)
1211 return -EINVAL;
Stephen Warrenf01ee602012-04-09 13:40:24 -06001212 if (reg % map->reg_stride)
1213 return -EINVAL;
Laxman Dewangan8eaeb212012-02-12 19:49:43 +05301214
Davide Ciminaghi0d4529c2012-10-16 15:56:59 +02001215 map->lock(map->lock_arg);
Laxman Dewangan8eaeb212012-02-12 19:49:43 +05301216
1217 /* No formatting is require if val_byte is 1 */
1218 if (val_bytes == 1) {
1219 wval = (void *)val;
1220 } else {
1221 wval = kmemdup(val, val_count * val_bytes, GFP_KERNEL);
1222 if (!wval) {
1223 ret = -ENOMEM;
1224 dev_err(map->dev, "Error in memory allocation\n");
1225 goto out;
1226 }
1227 for (i = 0; i < val_count * val_bytes; i += val_bytes)
1228 map->format.parse_val(wval + i);
1229 }
Ashish Jangam2e33caf2012-04-30 23:23:40 +01001230 /*
1231 * Some devices does not support bulk write, for
1232 * them we have a series of single write operations.
1233 */
1234 if (map->use_single_rw) {
1235 for (i = 0; i < val_count; i++) {
1236 ret = regmap_raw_write(map,
Mark Brown0d509f22013-01-27 22:07:38 +08001237 reg + (i * map->reg_stride),
1238 val + (i * val_bytes),
1239 val_bytes);
Ashish Jangam2e33caf2012-04-30 23:23:40 +01001240 if (ret != 0)
1241 return ret;
1242 }
1243 } else {
Mark Brown0d509f22013-01-27 22:07:38 +08001244 ret = _regmap_raw_write(map, reg, wval, val_bytes * val_count,
1245 false);
Ashish Jangam2e33caf2012-04-30 23:23:40 +01001246 }
Laxman Dewangan8eaeb212012-02-12 19:49:43 +05301247
1248 if (val_bytes != 1)
1249 kfree(wval);
1250
1251out:
Davide Ciminaghi0d4529c2012-10-16 15:56:59 +02001252 map->unlock(map->lock_arg);
Laxman Dewangan8eaeb212012-02-12 19:49:43 +05301253 return ret;
1254}
1255EXPORT_SYMBOL_GPL(regmap_bulk_write);
1256
Mark Brown0d509f22013-01-27 22:07:38 +08001257/**
1258 * regmap_raw_write_async(): Write raw values to one or more registers
1259 * asynchronously
1260 *
1261 * @map: Register map to write to
1262 * @reg: Initial register to write to
1263 * @val: Block of data to be written, laid out for direct transmission to the
1264 * device. Must be valid until regmap_async_complete() is called.
1265 * @val_len: Length of data pointed to by val.
1266 *
1267 * This function is intended to be used for things like firmware
1268 * download where a large block of data needs to be transferred to the
1269 * device. No formatting will be done on the data provided.
1270 *
1271 * If supported by the underlying bus the write will be scheduled
1272 * asynchronously, helping maximise I/O speed on higher speed buses
1273 * like SPI. regmap_async_complete() can be called to ensure that all
1274 * asynchrnous writes have been completed.
1275 *
1276 * A value of zero will be returned on success, a negative errno will
1277 * be returned in error cases.
1278 */
1279int regmap_raw_write_async(struct regmap *map, unsigned int reg,
1280 const void *val, size_t val_len)
1281{
1282 int ret;
1283
1284 if (val_len % map->format.val_bytes)
1285 return -EINVAL;
1286 if (reg % map->reg_stride)
1287 return -EINVAL;
1288
1289 map->lock(map->lock_arg);
1290
1291 ret = _regmap_raw_write(map, reg, val, val_len, true);
1292
1293 map->unlock(map->lock_arg);
1294
1295 return ret;
1296}
1297EXPORT_SYMBOL_GPL(regmap_raw_write_async);
1298
Mark Brownb83a3132011-05-11 19:59:58 +02001299static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
1300 unsigned int val_len)
1301{
Mark Brown98bc7df2012-10-04 17:31:11 +01001302 struct regmap_range_node *range;
Mark Brownb83a3132011-05-11 19:59:58 +02001303 u8 *u8 = map->work_buf;
1304 int ret;
1305
Mark Brown98bc7df2012-10-04 17:31:11 +01001306 range = _regmap_range_lookup(map, reg);
1307 if (range) {
1308 ret = _regmap_select_page(map, &reg, range,
1309 val_len / map->format.val_bytes);
Mark Brown0ff3e622012-10-04 17:39:13 +01001310 if (ret != 0)
Mark Brown98bc7df2012-10-04 17:31:11 +01001311 return ret;
1312 }
Krystian Garbaciak6863ca62012-06-15 11:23:56 +01001313
Marc Reillyd939fb92012-03-16 12:11:43 +11001314 map->format.format_reg(map->work_buf, reg, map->reg_shift);
Mark Brownb83a3132011-05-11 19:59:58 +02001315
1316 /*
Lars-Peter Clausen6f306442011-09-05 20:46:32 +02001317 * Some buses or devices flag reads by setting the high bits in the
Mark Brownb83a3132011-05-11 19:59:58 +02001318 * register addresss; since it's always the high bits for all
1319 * current formats we can do this here rather than in
1320 * formatting. This may break if we get interesting formats.
1321 */
Lars-Peter Clausen6f306442011-09-05 20:46:32 +02001322 u8[0] |= map->read_flag_mask;
Mark Brownb83a3132011-05-11 19:59:58 +02001323
Mark Brownfb2736b2011-07-24 21:30:55 +01001324 trace_regmap_hw_read_start(map->dev, reg,
1325 val_len / map->format.val_bytes);
1326
Stephen Warren0135bbc2012-04-04 15:48:30 -06001327 ret = map->bus->read(map->bus_context, map->work_buf,
Mark Brown82159ba2012-01-18 10:52:25 +00001328 map->format.reg_bytes + map->format.pad_bytes,
Mark Brown40c5cc22011-07-24 22:39:12 +01001329 val, val_len);
Mark Brownb83a3132011-05-11 19:59:58 +02001330
Mark Brownfb2736b2011-07-24 21:30:55 +01001331 trace_regmap_hw_read_done(map->dev, reg,
1332 val_len / map->format.val_bytes);
1333
1334 return ret;
Mark Brownb83a3132011-05-11 19:59:58 +02001335}
1336
Andrey Smirnovad278402013-01-12 12:54:12 -08001337static int _regmap_bus_read(void *context, unsigned int reg,
1338 unsigned int *val)
1339{
1340 int ret;
1341 struct regmap *map = context;
1342
1343 if (!map->format.parse_val)
1344 return -EINVAL;
1345
1346 ret = _regmap_raw_read(map, reg, map->work_buf, map->format.val_bytes);
1347 if (ret == 0)
1348 *val = map->format.parse_val(map->work_buf);
1349
1350 return ret;
1351}
1352
Mark Brownb83a3132011-05-11 19:59:58 +02001353static int _regmap_read(struct regmap *map, unsigned int reg,
1354 unsigned int *val)
1355{
1356 int ret;
Andrey Smirnovad278402013-01-12 12:54:12 -08001357 BUG_ON(!map->reg_read);
Mark Brownb83a3132011-05-11 19:59:58 +02001358
Dimitris Papastamos5d1729e2011-09-19 14:34:05 +01001359 if (!map->cache_bypass) {
1360 ret = regcache_read(map, reg, val);
1361 if (ret == 0)
1362 return 0;
1363 }
1364
1365 if (map->cache_only)
1366 return -EBUSY;
1367
Andrey Smirnovad278402013-01-12 12:54:12 -08001368 ret = map->reg_read(map, reg, val);
Mark Brownfb2736b2011-07-24 21:30:55 +01001369 if (ret == 0) {
Mark Brown1044c182012-07-06 14:10:23 +01001370#ifdef LOG_DEVICE
1371 if (strcmp(dev_name(map->dev), LOG_DEVICE) == 0)
1372 dev_info(map->dev, "%x => %x\n", reg, *val);
1373#endif
1374
Mark Brownfb2736b2011-07-24 21:30:55 +01001375 trace_regmap_reg_read(map->dev, reg, *val);
Mark Brownb83a3132011-05-11 19:59:58 +02001376
Andrey Smirnovad278402013-01-12 12:54:12 -08001377 if (!map->cache_bypass)
1378 regcache_write(map, reg, *val);
1379 }
Mark Brownf2985362012-04-30 21:25:05 +01001380
Mark Brownb83a3132011-05-11 19:59:58 +02001381 return ret;
1382}
1383
1384/**
1385 * regmap_read(): Read a value from a single register
1386 *
1387 * @map: Register map to write to
1388 * @reg: Register to be read from
1389 * @val: Pointer to store read value
1390 *
1391 * A value of zero will be returned on success, a negative errno will
1392 * be returned in error cases.
1393 */
1394int regmap_read(struct regmap *map, unsigned int reg, unsigned int *val)
1395{
1396 int ret;
1397
Stephen Warrenf01ee602012-04-09 13:40:24 -06001398 if (reg % map->reg_stride)
1399 return -EINVAL;
1400
Davide Ciminaghi0d4529c2012-10-16 15:56:59 +02001401 map->lock(map->lock_arg);
Mark Brownb83a3132011-05-11 19:59:58 +02001402
1403 ret = _regmap_read(map, reg, val);
1404
Davide Ciminaghi0d4529c2012-10-16 15:56:59 +02001405 map->unlock(map->lock_arg);
Mark Brownb83a3132011-05-11 19:59:58 +02001406
1407 return ret;
1408}
1409EXPORT_SYMBOL_GPL(regmap_read);
1410
1411/**
1412 * regmap_raw_read(): Read raw data from the device
1413 *
1414 * @map: Register map to write to
1415 * @reg: First register to be read from
1416 * @val: Pointer to store read value
1417 * @val_len: Size of data to read
1418 *
1419 * A value of zero will be returned on success, a negative errno will
1420 * be returned in error cases.
1421 */
1422int regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
1423 size_t val_len)
1424{
Mark Brownb8fb5ab2012-02-21 19:12:47 +00001425 size_t val_bytes = map->format.val_bytes;
1426 size_t val_count = val_len / val_bytes;
1427 unsigned int v;
1428 int ret, i;
Mark Brown04e016a2011-10-09 13:35:43 +01001429
Stephen Warren851960b2012-04-06 15:16:03 -06001430 if (val_len % map->format.val_bytes)
1431 return -EINVAL;
Stephen Warrenf01ee602012-04-09 13:40:24 -06001432 if (reg % map->reg_stride)
1433 return -EINVAL;
Stephen Warren851960b2012-04-06 15:16:03 -06001434
Davide Ciminaghi0d4529c2012-10-16 15:56:59 +02001435 map->lock(map->lock_arg);
Mark Brownb83a3132011-05-11 19:59:58 +02001436
Mark Brownb8fb5ab2012-02-21 19:12:47 +00001437 if (regmap_volatile_range(map, reg, val_count) || map->cache_bypass ||
1438 map->cache_type == REGCACHE_NONE) {
1439 /* Physical block read if there's no cache involved */
1440 ret = _regmap_raw_read(map, reg, val, val_len);
Mark Brownb83a3132011-05-11 19:59:58 +02001441
Mark Brownb8fb5ab2012-02-21 19:12:47 +00001442 } else {
1443 /* Otherwise go word by word for the cache; should be low
1444 * cost as we expect to hit the cache.
1445 */
1446 for (i = 0; i < val_count; i++) {
Stephen Warrenf01ee602012-04-09 13:40:24 -06001447 ret = _regmap_read(map, reg + (i * map->reg_stride),
1448 &v);
Mark Brownb8fb5ab2012-02-21 19:12:47 +00001449 if (ret != 0)
1450 goto out;
1451
Marc Reillyd939fb92012-03-16 12:11:43 +11001452 map->format.format_val(val + (i * val_bytes), v, 0);
Mark Brownb8fb5ab2012-02-21 19:12:47 +00001453 }
1454 }
1455
1456 out:
Davide Ciminaghi0d4529c2012-10-16 15:56:59 +02001457 map->unlock(map->lock_arg);
Mark Brownb83a3132011-05-11 19:59:58 +02001458
1459 return ret;
1460}
1461EXPORT_SYMBOL_GPL(regmap_raw_read);
1462
1463/**
1464 * regmap_bulk_read(): Read multiple registers from the device
1465 *
1466 * @map: Register map to write to
1467 * @reg: First register to be read from
1468 * @val: Pointer to store read value, in native register size for device
1469 * @val_count: Number of registers to read
1470 *
1471 * A value of zero will be returned on success, a negative errno will
1472 * be returned in error cases.
1473 */
1474int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
1475 size_t val_count)
1476{
1477 int ret, i;
1478 size_t val_bytes = map->format.val_bytes;
Lars-Peter Clausen82cd9962011-11-08 18:37:25 +01001479 bool vol = regmap_volatile_range(map, reg, val_count);
Dimitris Papastamos5d1729e2011-09-19 14:34:05 +01001480
Mark Brownb83a3132011-05-11 19:59:58 +02001481 if (!map->format.parse_val)
1482 return -EINVAL;
Stephen Warrenf01ee602012-04-09 13:40:24 -06001483 if (reg % map->reg_stride)
1484 return -EINVAL;
Mark Brownb83a3132011-05-11 19:59:58 +02001485
Mark Brownde2d8082011-10-10 13:24:52 +01001486 if (vol || map->cache_type == REGCACHE_NONE) {
Ashish Jangam2e33caf2012-04-30 23:23:40 +01001487 /*
1488 * Some devices does not support bulk read, for
1489 * them we have a series of single read operations.
1490 */
1491 if (map->use_single_rw) {
1492 for (i = 0; i < val_count; i++) {
1493 ret = regmap_raw_read(map,
1494 reg + (i * map->reg_stride),
1495 val + (i * val_bytes),
1496 val_bytes);
1497 if (ret != 0)
1498 return ret;
1499 }
1500 } else {
1501 ret = regmap_raw_read(map, reg, val,
1502 val_bytes * val_count);
1503 if (ret != 0)
1504 return ret;
1505 }
Mark Brownde2d8082011-10-10 13:24:52 +01001506
1507 for (i = 0; i < val_count * val_bytes; i += val_bytes)
1508 map->format.parse_val(val + i);
1509 } else {
1510 for (i = 0; i < val_count; i++) {
Laxman Dewangan6560ffd2012-05-09 17:43:12 +05301511 unsigned int ival;
Stephen Warrenf01ee602012-04-09 13:40:24 -06001512 ret = regmap_read(map, reg + (i * map->reg_stride),
Mark Brown25061d22012-05-12 13:06:08 +01001513 &ival);
Mark Brownde2d8082011-10-10 13:24:52 +01001514 if (ret != 0)
1515 return ret;
Laxman Dewangan6560ffd2012-05-09 17:43:12 +05301516 memcpy(val + (i * val_bytes), &ival, val_bytes);
Mark Brownde2d8082011-10-10 13:24:52 +01001517 }
1518 }
Mark Brownb83a3132011-05-11 19:59:58 +02001519
1520 return 0;
1521}
1522EXPORT_SYMBOL_GPL(regmap_bulk_read);
1523
Mark Brown018690d2011-11-29 20:10:36 +00001524static int _regmap_update_bits(struct regmap *map, unsigned int reg,
1525 unsigned int mask, unsigned int val,
1526 bool *change)
Mark Brownb83a3132011-05-11 19:59:58 +02001527{
1528 int ret;
Mark Brownd91e8db2011-11-18 16:03:50 +00001529 unsigned int tmp, orig;
Mark Brownb83a3132011-05-11 19:59:58 +02001530
Mark Brownd91e8db2011-11-18 16:03:50 +00001531 ret = _regmap_read(map, reg, &orig);
Mark Brownb83a3132011-05-11 19:59:58 +02001532 if (ret != 0)
Krystian Garbaciakfc3ebd72012-06-15 11:23:56 +01001533 return ret;
Mark Brownb83a3132011-05-11 19:59:58 +02001534
Mark Brownd91e8db2011-11-18 16:03:50 +00001535 tmp = orig & ~mask;
Mark Brownb83a3132011-05-11 19:59:58 +02001536 tmp |= val & mask;
1537
Mark Brown018690d2011-11-29 20:10:36 +00001538 if (tmp != orig) {
Mark Brownd91e8db2011-11-18 16:03:50 +00001539 ret = _regmap_write(map, reg, tmp);
Mark Brown018690d2011-11-29 20:10:36 +00001540 *change = true;
1541 } else {
1542 *change = false;
1543 }
Mark Brownb83a3132011-05-11 19:59:58 +02001544
Mark Brownb83a3132011-05-11 19:59:58 +02001545 return ret;
1546}
Mark Brown018690d2011-11-29 20:10:36 +00001547
1548/**
1549 * regmap_update_bits: Perform a read/modify/write cycle on the register map
1550 *
1551 * @map: Register map to update
1552 * @reg: Register to update
1553 * @mask: Bitmask to change
1554 * @val: New value for bitmask
1555 *
1556 * Returns zero for success, a negative number on error.
1557 */
1558int regmap_update_bits(struct regmap *map, unsigned int reg,
1559 unsigned int mask, unsigned int val)
1560{
1561 bool change;
Krystian Garbaciakfc3ebd72012-06-15 11:23:56 +01001562 int ret;
1563
Davide Ciminaghi0d4529c2012-10-16 15:56:59 +02001564 map->lock(map->lock_arg);
Krystian Garbaciakfc3ebd72012-06-15 11:23:56 +01001565 ret = _regmap_update_bits(map, reg, mask, val, &change);
Davide Ciminaghi0d4529c2012-10-16 15:56:59 +02001566 map->unlock(map->lock_arg);
Krystian Garbaciakfc3ebd72012-06-15 11:23:56 +01001567
1568 return ret;
Mark Brown018690d2011-11-29 20:10:36 +00001569}
Mark Brownb83a3132011-05-11 19:59:58 +02001570EXPORT_SYMBOL_GPL(regmap_update_bits);
Mark Brown31244e32011-07-20 22:56:53 +01001571
Mark Brown018690d2011-11-29 20:10:36 +00001572/**
1573 * regmap_update_bits_check: Perform a read/modify/write cycle on the
1574 * register map and report if updated
1575 *
1576 * @map: Register map to update
1577 * @reg: Register to update
1578 * @mask: Bitmask to change
1579 * @val: New value for bitmask
1580 * @change: Boolean indicating if a write was done
1581 *
1582 * Returns zero for success, a negative number on error.
1583 */
1584int regmap_update_bits_check(struct regmap *map, unsigned int reg,
1585 unsigned int mask, unsigned int val,
1586 bool *change)
1587{
Krystian Garbaciakfc3ebd72012-06-15 11:23:56 +01001588 int ret;
1589
Davide Ciminaghi0d4529c2012-10-16 15:56:59 +02001590 map->lock(map->lock_arg);
Krystian Garbaciakfc3ebd72012-06-15 11:23:56 +01001591 ret = _regmap_update_bits(map, reg, mask, val, change);
Davide Ciminaghi0d4529c2012-10-16 15:56:59 +02001592 map->unlock(map->lock_arg);
Krystian Garbaciakfc3ebd72012-06-15 11:23:56 +01001593 return ret;
Mark Brown018690d2011-11-29 20:10:36 +00001594}
1595EXPORT_SYMBOL_GPL(regmap_update_bits_check);
1596
Mark Brown0d509f22013-01-27 22:07:38 +08001597void regmap_async_complete_cb(struct regmap_async *async, int ret)
1598{
1599 struct regmap *map = async->map;
1600 bool wake;
1601
1602 spin_lock(&map->async_lock);
1603
1604 list_del(&async->list);
1605 wake = list_empty(&map->async_list);
1606
1607 if (ret != 0)
1608 map->async_ret = ret;
1609
1610 spin_unlock(&map->async_lock);
1611
1612 schedule_work(&async->cleanup);
1613
1614 if (wake)
1615 wake_up(&map->async_waitq);
1616}
1617
1618static int regmap_async_is_done(struct regmap *map)
1619{
1620 unsigned long flags;
1621 int ret;
1622
1623 spin_lock_irqsave(&map->async_lock, flags);
1624 ret = list_empty(&map->async_list);
1625 spin_unlock_irqrestore(&map->async_lock, flags);
1626
1627 return ret;
1628}
1629
1630/**
1631 * regmap_async_complete: Ensure all asynchronous I/O has completed.
1632 *
1633 * @map: Map to operate on.
1634 *
1635 * Blocks until any pending asynchronous I/O has completed. Returns
1636 * an error code for any failed I/O operations.
1637 */
1638int regmap_async_complete(struct regmap *map)
1639{
1640 unsigned long flags;
1641 int ret;
1642
1643 /* Nothing to do with no async support */
1644 if (!map->bus->async_write)
1645 return 0;
1646
1647 wait_event(map->async_waitq, regmap_async_is_done(map));
1648
1649 spin_lock_irqsave(&map->async_lock, flags);
1650 ret = map->async_ret;
1651 map->async_ret = 0;
1652 spin_unlock_irqrestore(&map->async_lock, flags);
1653
1654 return ret;
1655}
1656
Mark Brown22f0d902012-01-21 12:01:14 +00001657/**
1658 * regmap_register_patch: Register and apply register updates to be applied
1659 * on device initialistion
1660 *
1661 * @map: Register map to apply updates to.
1662 * @regs: Values to update.
1663 * @num_regs: Number of entries in regs.
1664 *
1665 * Register a set of register updates to be applied to the device
1666 * whenever the device registers are synchronised with the cache and
1667 * apply them immediately. Typically this is used to apply
1668 * corrections to be applied to the device defaults on startup, such
1669 * as the updates some vendors provide to undocumented registers.
1670 */
1671int regmap_register_patch(struct regmap *map, const struct reg_default *regs,
1672 int num_regs)
1673{
1674 int i, ret;
1675 bool bypass;
1676
1677 /* If needed the implementation can be extended to support this */
1678 if (map->patch)
1679 return -EBUSY;
1680
Davide Ciminaghi0d4529c2012-10-16 15:56:59 +02001681 map->lock(map->lock_arg);
Mark Brown22f0d902012-01-21 12:01:14 +00001682
1683 bypass = map->cache_bypass;
1684
1685 map->cache_bypass = true;
1686
1687 /* Write out first; it's useful to apply even if we fail later. */
1688 for (i = 0; i < num_regs; i++) {
1689 ret = _regmap_write(map, regs[i].reg, regs[i].def);
1690 if (ret != 0) {
1691 dev_err(map->dev, "Failed to write %x = %x: %d\n",
1692 regs[i].reg, regs[i].def, ret);
1693 goto out;
1694 }
1695 }
1696
Axel Lin2a14d7d2012-02-10 19:29:55 +08001697 map->patch = kcalloc(num_regs, sizeof(struct reg_default), GFP_KERNEL);
Mark Brown22f0d902012-01-21 12:01:14 +00001698 if (map->patch != NULL) {
1699 memcpy(map->patch, regs,
1700 num_regs * sizeof(struct reg_default));
1701 map->patch_regs = num_regs;
1702 } else {
1703 ret = -ENOMEM;
1704 }
1705
1706out:
1707 map->cache_bypass = bypass;
1708
Davide Ciminaghi0d4529c2012-10-16 15:56:59 +02001709 map->unlock(map->lock_arg);
Mark Brown22f0d902012-01-21 12:01:14 +00001710
1711 return ret;
1712}
1713EXPORT_SYMBOL_GPL(regmap_register_patch);
1714
Mark Browneae4b512012-03-14 13:15:03 +00001715/*
Mark Browna6539c32012-02-17 14:20:14 -08001716 * regmap_get_val_bytes(): Report the size of a register value
1717 *
1718 * Report the size of a register value, mainly intended to for use by
1719 * generic infrastructure built on top of regmap.
1720 */
1721int regmap_get_val_bytes(struct regmap *map)
1722{
1723 if (map->format.format_write)
1724 return -EINVAL;
1725
1726 return map->format.val_bytes;
1727}
1728EXPORT_SYMBOL_GPL(regmap_get_val_bytes);
1729
Mark Brown31244e32011-07-20 22:56:53 +01001730static int __init regmap_initcall(void)
1731{
1732 regmap_debugfs_initcall();
1733
1734 return 0;
1735}
1736postcore_initcall(regmap_initcall);