| diff --git a/stblinux-2.6.37/arch/mips/include/asm/io.h b/stblinux-2.6.37/arch/mips/include/asm/io.h |
| index e1c2e2e..d789366 100644 |
| --- a/stblinux-2.6.37/arch/mips/include/asm/io.h |
| +++ b/stblinux-2.6.37/arch/mips/include/asm/io.h |
| @@ -260,6 +260,10 @@ static inline void __iomem * __ioremap_mode(phys_t offset, unsigned long size, |
| #define ioremap_nocache(offset, size) \ |
| __ioremap_mode((offset), (size), _CACHE_UNCACHED) |
| |
| +#define ioremap_cached(offset, size) \ |
| + __ioremap_mode((offset), (size), _CACHE_CACHABLE_NONCOHERENT) |
| + |
| + |
| /* |
| * ioremap_cachable - map bus memory into CPU space |
| * @offset: bus address of the memory |
| diff --git a/stblinux-2.6.37/drivers/char/mem.c b/stblinux-2.6.37/drivers/char/mem.c |
| index 1256454..f408c21 100644 |
| --- a/stblinux-2.6.37/drivers/char/mem.c |
| +++ b/stblinux-2.6.37/drivers/char/mem.c |
| @@ -731,7 +731,11 @@ static loff_t memory_lseek(struct file *file, loff_t offset, int orig) |
| |
| static int open_port(struct inode * inode, struct file * filp) |
| { |
| +#if 0 |
| return capable(CAP_SYS_RAWIO) ? 0 : -EPERM; |
| +#else |
| + return 0; |
| +#endif |
| } |
| |
| #define zero_lseek null_lseek |
| diff --git a/stblinux-2.6.37/drivers/input/Kconfig b/stblinux-2.6.37/drivers/input/Kconfig |
| index 07c2cd4..dd62135 100644 |
| --- a/stblinux-2.6.37/drivers/input/Kconfig |
| +++ b/stblinux-2.6.37/drivers/input/Kconfig |
| @@ -161,6 +161,15 @@ config INPUT_APMPOWER |
| To compile this driver as a module, choose M here: the |
| module will be called apm-power. |
| |
| +config INPUT_KEYRESET |
| + tristate "Reset key" |
| + depends on INPUT |
| + ---help--- |
| + Say Y here if you want to reboot when some keys are pressed; |
| + |
| + To compile this driver as a module, choose M here: the |
| + module will be called keyreset. |
| + |
| config XEN_KBDDEV_FRONTEND |
| tristate "Xen virtual keyboard and mouse support" |
| depends on XEN_FBDEV_FRONTEND |
| diff --git a/stblinux-2.6.37/drivers/input/Makefile b/stblinux-2.6.37/drivers/input/Makefile |
| index 7ad212d..3c71257 100644 |
| --- a/stblinux-2.6.37/drivers/input/Makefile |
| +++ b/stblinux-2.6.37/drivers/input/Makefile |
| @@ -24,5 +24,6 @@ obj-$(CONFIG_INPUT_TOUCHSCREEN) += touchscreen/ |
| obj-$(CONFIG_INPUT_MISC) += misc/ |
| |
| obj-$(CONFIG_INPUT_APMPOWER) += apm-power.o |
| +obj-$(CONFIG_INPUT_KEYRESET) += keyreset.o |
| |
| obj-$(CONFIG_XEN_KBDDEV_FRONTEND) += xen-kbdfront.o |
| diff --git a/stblinux-2.6.37/drivers/input/keyreset.c b/stblinux-2.6.37/drivers/input/keyreset.c |
| new file mode 100644 |
| index 0000000..4905692 |
| --- /dev/null |
| +++ b/stblinux-2.6.37/drivers/input/keyreset.c |
| @@ -0,0 +1,229 @@ |
| +/* drivers/input/keyreset.c |
| + * |
| + * Copyright (C) 2008 Google, Inc. |
| + * |
| + * This software is licensed under the terms of the GNU General Public |
| + * License version 2, as published by the Free Software Foundation, and |
| + * may be copied, distributed, and modified under those terms. |
| + * |
| + * This program is distributed in the hope that it will be useful, |
| + * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| + * GNU General Public License for more details. |
| + * |
| + */ |
| + |
| +#include <linux/input.h> |
| +#include <linux/keyreset.h> |
| +#include <linux/module.h> |
| +#include <linux/platform_device.h> |
| +#include <linux/reboot.h> |
| +#include <linux/sched.h> |
| +#include <linux/syscalls.h> |
| + |
| + |
| +struct keyreset_state { |
| + struct input_handler input_handler; |
| + unsigned long keybit[BITS_TO_LONGS(KEY_CNT)]; |
| + unsigned long upbit[BITS_TO_LONGS(KEY_CNT)]; |
| + unsigned long key[BITS_TO_LONGS(KEY_CNT)]; |
| + spinlock_t lock; |
| + int key_down_target; |
| + int key_down; |
| + int key_up; |
| + int restart_disabled; |
| +}; |
| + |
| +int restart_requested; |
| +static void deferred_restart(struct work_struct *dummy) |
| +{ |
| + restart_requested = 2; |
| + sys_sync(); |
| + restart_requested = 3; |
| + kernel_restart(NULL); |
| +} |
| +static DECLARE_WORK(restart_work, deferred_restart); |
| + |
| +static void keyreset_event(struct input_handle *handle, unsigned int type, |
| + unsigned int code, int value) |
| +{ |
| + unsigned long flags; |
| + struct keyreset_state *state = handle->private; |
| + |
| + if (type != EV_KEY) |
| + return; |
| + |
| + if (code >= KEY_MAX) |
| + return; |
| + |
| + if (!test_bit(code, state->keybit)) |
| + return; |
| + |
| + spin_lock_irqsave(&state->lock, flags); |
| + if (!test_bit(code, state->key) == !value) |
| + goto done; |
| + __change_bit(code, state->key); |
| + if (test_bit(code, state->upbit)) { |
| + if (value) { |
| + state->restart_disabled = 1; |
| + state->key_up++; |
| + } else |
| + state->key_up--; |
| + } else { |
| + if (value) |
| + state->key_down++; |
| + else |
| + state->key_down--; |
| + } |
| + if (state->key_down == 0 && state->key_up == 0) |
| + state->restart_disabled = 0; |
| + |
| + pr_debug("reset key changed %d %d new state %d-%d-%d\n", code, value, |
| + state->key_down, state->key_up, state->restart_disabled); |
| + |
| + if (value && !state->restart_disabled && |
| + state->key_down == state->key_down_target) { |
| + state->restart_disabled = 1; |
| + if (restart_requested) |
| + panic("keyboard reset failed, %d", restart_requested); |
| + pr_info("keyboard reset\n"); |
| + schedule_work(&restart_work); |
| + restart_requested = 1; |
| + } |
| +done: |
| + spin_unlock_irqrestore(&state->lock, flags); |
| +} |
| + |
| +static int keyreset_connect(struct input_handler *handler, |
| + struct input_dev *dev, |
| + const struct input_device_id *id) |
| +{ |
| + int i; |
| + int ret; |
| + struct input_handle *handle; |
| + struct keyreset_state *state = |
| + container_of(handler, struct keyreset_state, input_handler); |
| + |
| + for (i = 0; i < KEY_MAX; i++) { |
| + if (test_bit(i, state->keybit) && test_bit(i, dev->keybit)) |
| + break; |
| + } |
| + if (i == KEY_MAX) |
| + return -ENODEV; |
| + |
| + handle = kzalloc(sizeof(*handle), GFP_KERNEL); |
| + if (!handle) |
| + return -ENOMEM; |
| + |
| + handle->dev = dev; |
| + handle->handler = handler; |
| + handle->name = "keyreset"; |
| + handle->private = state; |
| + |
| + ret = input_register_handle(handle); |
| + if (ret) |
| + goto err_input_register_handle; |
| + |
| + ret = input_open_device(handle); |
| + if (ret) |
| + goto err_input_open_device; |
| + |
| + pr_info("using input dev %s for key reset\n", dev->name); |
| + |
| + return 0; |
| + |
| +err_input_open_device: |
| + input_unregister_handle(handle); |
| +err_input_register_handle: |
| + kfree(handle); |
| + return ret; |
| +} |
| + |
| +static void keyreset_disconnect(struct input_handle *handle) |
| +{ |
| + input_close_device(handle); |
| + input_unregister_handle(handle); |
| + kfree(handle); |
| +} |
| + |
| +static const struct input_device_id keyreset_ids[] = { |
| + { |
| + .flags = INPUT_DEVICE_ID_MATCH_EVBIT, |
| + .evbit = { BIT_MASK(EV_KEY) }, |
| + }, |
| + { }, |
| +}; |
| +MODULE_DEVICE_TABLE(input, keyreset_ids); |
| + |
| +static int keyreset_probe(struct platform_device *pdev) |
| +{ |
| + int ret; |
| + int key, *keyp; |
| + struct keyreset_state *state; |
| + struct keyreset_platform_data *pdata = pdev->dev.platform_data; |
| + |
| + if (!pdata) |
| + return -EINVAL; |
| + |
| + state = kzalloc(sizeof(*state), GFP_KERNEL); |
| + if (!state) |
| + return -ENOMEM; |
| + |
| + spin_lock_init(&state->lock); |
| + keyp = pdata->keys_down; |
| + while ((key = *keyp++)) { |
| + if (key >= KEY_MAX) |
| + continue; |
| + state->key_down_target++; |
| + __set_bit(key, state->keybit); |
| + } |
| + if (pdata->keys_up) { |
| + keyp = pdata->keys_up; |
| + while ((key = *keyp++)) { |
| + if (key >= KEY_MAX) |
| + continue; |
| + __set_bit(key, state->keybit); |
| + __set_bit(key, state->upbit); |
| + } |
| + } |
| + state->input_handler.event = keyreset_event; |
| + state->input_handler.connect = keyreset_connect; |
| + state->input_handler.disconnect = keyreset_disconnect; |
| + state->input_handler.name = KEYRESET_NAME; |
| + state->input_handler.id_table = keyreset_ids; |
| + ret = input_register_handler(&state->input_handler); |
| + if (ret) { |
| + kfree(state); |
| + return ret; |
| + } |
| + platform_set_drvdata(pdev, state); |
| + return 0; |
| +} |
| + |
| +int keyreset_remove(struct platform_device *pdev) |
| +{ |
| + struct keyreset_state *state = platform_get_drvdata(pdev); |
| + input_unregister_handler(&state->input_handler); |
| + kfree(state); |
| + return 0; |
| +} |
| + |
| + |
| +struct platform_driver keyreset_driver = { |
| + .driver.name = KEYRESET_NAME, |
| + .probe = keyreset_probe, |
| + .remove = keyreset_remove, |
| +}; |
| + |
| +static int __init keyreset_init(void) |
| +{ |
| + return platform_driver_register(&keyreset_driver); |
| +} |
| + |
| +static void __exit keyreset_exit(void) |
| +{ |
| + return platform_driver_unregister(&keyreset_driver); |
| +} |
| + |
| +module_init(keyreset_init); |
| +module_exit(keyreset_exit); |
| diff --git a/stblinux-2.6.37/drivers/input/misc/Kconfig b/stblinux-2.6.37/drivers/input/misc/Kconfig |
| index b99b8cb..9cbdc0d 100644 |
| --- a/stblinux-2.6.37/drivers/input/misc/Kconfig |
| +++ b/stblinux-2.6.37/drivers/input/misc/Kconfig |
| @@ -312,6 +312,11 @@ config INPUT_WINBOND_CIR |
| To compile this driver as a module, choose M here: the module will be |
| called winbond_cir. |
| |
| +config INPUT_GPIO |
| + tristate "GPIO driver support" |
| + help |
| + Say Y here if you want to support gpio based keys, wheels etc... |
| + |
| config HP_SDC_RTC |
| tristate "HP SDC Real Time Clock" |
| depends on (GSC || HP300) && SERIO |
| @@ -348,6 +353,17 @@ config INPUT_PWM_BEEPER |
| To compile this driver as a module, choose M here: the module will be |
| called pwm-beeper. |
| |
| +config INPUT_KEYCHORD |
| + tristate "Key chord input driver support" |
| + help |
| + Say Y here if you want to enable the key chord driver |
| + accessible at /dev/keychord. This driver can be used |
| + for receiving notifications when client specified key |
| + combinations are pressed. |
| + |
| + To compile this driver as a module, choose M here: the |
| + module will be called keychord. |
| + |
| config INPUT_GPIO_ROTARY_ENCODER |
| tristate "Rotary encoders connected to GPIO pins" |
| depends on GPIOLIB && GENERIC_GPIO |
| diff --git a/stblinux-2.6.37/drivers/input/misc/Makefile b/stblinux-2.6.37/drivers/input/misc/Makefile |
| index 1fe1f6c..5a3d1f8 100644 |
| --- a/stblinux-2.6.37/drivers/input/misc/Makefile |
| +++ b/stblinux-2.6.37/drivers/input/misc/Makefile |
| @@ -42,4 +42,6 @@ obj-$(CONFIG_INPUT_WINBOND_CIR) += winbond-cir.o |
| obj-$(CONFIG_INPUT_WISTRON_BTNS) += wistron_btns.o |
| obj-$(CONFIG_INPUT_WM831X_ON) += wm831x-on.o |
| obj-$(CONFIG_INPUT_YEALINK) += yealink.o |
| +obj-$(CONFIG_INPUT_GPIO) += gpio_event.o gpio_matrix.o gpio_input.o gpio_output.o gpio_axis.o |
| +obj-$(CONFIG_INPUT_KEYCHORD) += keychord.o |
| |
| diff --git a/stblinux-2.6.37/drivers/input/misc/gpio_axis.c b/stblinux-2.6.37/drivers/input/misc/gpio_axis.c |
| new file mode 100644 |
| index 0000000..30b9f56 |
| --- /dev/null |
| +++ b/stblinux-2.6.37/drivers/input/misc/gpio_axis.c |
| @@ -0,0 +1,191 @@ |
| +/* drivers/input/misc/gpio_axis.c |
| + * |
| + * Copyright (C) 2007 Google, Inc. |
| + * |
| + * This software is licensed under the terms of the GNU General Public |
| + * License version 2, as published by the Free Software Foundation, and |
| + * may be copied, distributed, and modified under those terms. |
| + * |
| + * This program is distributed in the hope that it will be useful, |
| + * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| + * GNU General Public License for more details. |
| + * |
| + */ |
| + |
| +#include <linux/kernel.h> |
| +#include <linux/gpio.h> |
| +#include <linux/gpio_event.h> |
| +#include <linux/interrupt.h> |
| + |
| +struct gpio_axis_state { |
| + struct gpio_event_input_devs *input_devs; |
| + struct gpio_event_axis_info *info; |
| + uint32_t pos; |
| +}; |
| + |
| +uint16_t gpio_axis_4bit_gray_map_table[] = { |
| + [0x0] = 0x0, [0x1] = 0x1, /* 0000 0001 */ |
| + [0x3] = 0x2, [0x2] = 0x3, /* 0011 0010 */ |
| + [0x6] = 0x4, [0x7] = 0x5, /* 0110 0111 */ |
| + [0x5] = 0x6, [0x4] = 0x7, /* 0101 0100 */ |
| + [0xc] = 0x8, [0xd] = 0x9, /* 1100 1101 */ |
| + [0xf] = 0xa, [0xe] = 0xb, /* 1111 1110 */ |
| + [0xa] = 0xc, [0xb] = 0xd, /* 1010 1011 */ |
| + [0x9] = 0xe, [0x8] = 0xf, /* 1001 1000 */ |
| +}; |
| +uint16_t gpio_axis_4bit_gray_map(struct gpio_event_axis_info *info, uint16_t in) |
| +{ |
| + return gpio_axis_4bit_gray_map_table[in]; |
| +} |
| + |
| +uint16_t gpio_axis_5bit_singletrack_map_table[] = { |
| + [0x10] = 0x00, [0x14] = 0x01, [0x1c] = 0x02, /* 10000 10100 11100 */ |
| + [0x1e] = 0x03, [0x1a] = 0x04, [0x18] = 0x05, /* 11110 11010 11000 */ |
| + [0x08] = 0x06, [0x0a] = 0x07, [0x0e] = 0x08, /* 01000 01010 01110 */ |
| + [0x0f] = 0x09, [0x0d] = 0x0a, [0x0c] = 0x0b, /* 01111 01101 01100 */ |
| + [0x04] = 0x0c, [0x05] = 0x0d, [0x07] = 0x0e, /* 00100 00101 00111 */ |
| + [0x17] = 0x0f, [0x16] = 0x10, [0x06] = 0x11, /* 10111 10110 00110 */ |
| + [0x02] = 0x12, [0x12] = 0x13, [0x13] = 0x14, /* 00010 10010 10011 */ |
| + [0x1b] = 0x15, [0x0b] = 0x16, [0x03] = 0x17, /* 11011 01011 00011 */ |
| + [0x01] = 0x18, [0x09] = 0x19, [0x19] = 0x1a, /* 00001 01001 11001 */ |
| + [0x1d] = 0x1b, [0x15] = 0x1c, [0x11] = 0x1d, /* 11101 10101 10001 */ |
| +}; |
| +uint16_t gpio_axis_5bit_singletrack_map( |
| + struct gpio_event_axis_info *info, uint16_t in) |
| +{ |
| + return gpio_axis_5bit_singletrack_map_table[in]; |
| +} |
| + |
| +static void gpio_event_update_axis(struct gpio_axis_state *as, int report) |
| +{ |
| + struct gpio_event_axis_info *ai = as->info; |
| + int i; |
| + int change; |
| + uint16_t state = 0; |
| + uint16_t pos; |
| + uint16_t old_pos = as->pos; |
| + for (i = ai->count - 1; i >= 0; i--) |
| + state = (state << 1) | gpio_get_value(ai->gpio[i]); |
| + pos = ai->map(ai, state); |
| + if (ai->flags & GPIOEAF_PRINT_RAW) |
| + pr_info("axis %d-%d raw %x, pos %d -> %d\n", |
| + ai->type, ai->code, state, old_pos, pos); |
| + if (report && pos != old_pos) { |
| + if (ai->type == EV_REL) { |
| + change = (ai->decoded_size + pos - old_pos) % |
| + ai->decoded_size; |
| + if (change > ai->decoded_size / 2) |
| + change -= ai->decoded_size; |
| + if (change == ai->decoded_size / 2) { |
| + if (ai->flags & GPIOEAF_PRINT_EVENT) |
| + pr_info("axis %d-%d unknown direction, " |
| + "pos %d -> %d\n", ai->type, |
| + ai->code, old_pos, pos); |
| + change = 0; /* no closest direction */ |
| + } |
| + if (ai->flags & GPIOEAF_PRINT_EVENT) |
| + pr_info("axis %d-%d change %d\n", |
| + ai->type, ai->code, change); |
| + input_report_rel(as->input_devs->dev[ai->dev], |
| + ai->code, change); |
| + } else { |
| + if (ai->flags & GPIOEAF_PRINT_EVENT) |
| + pr_info("axis %d-%d now %d\n", |
| + ai->type, ai->code, pos); |
| + input_event(as->input_devs->dev[ai->dev], |
| + ai->type, ai->code, pos); |
| + } |
| + input_sync(as->input_devs->dev[ai->dev]); |
| + } |
| + as->pos = pos; |
| +} |
| + |
| +static irqreturn_t gpio_axis_irq_handler(int irq, void *dev_id) |
| +{ |
| + struct gpio_axis_state *as = dev_id; |
| + gpio_event_update_axis(as, 1); |
| + return IRQ_HANDLED; |
| +} |
| + |
| +int gpio_event_axis_func(struct gpio_event_input_devs *input_devs, |
| + struct gpio_event_info *info, void **data, int func) |
| +{ |
| + int ret; |
| + int i; |
| + int irq; |
| + struct gpio_event_axis_info *ai; |
| + struct gpio_axis_state *as; |
| + |
| + ai = container_of(info, struct gpio_event_axis_info, info); |
| + if (func == GPIO_EVENT_FUNC_SUSPEND) { |
| + for (i = 0; i < ai->count; i++) |
| + disable_irq(gpio_to_irq(ai->gpio[i])); |
| + return 0; |
| + } |
| + if (func == GPIO_EVENT_FUNC_RESUME) { |
| + for (i = 0; i < ai->count; i++) |
| + enable_irq(gpio_to_irq(ai->gpio[i])); |
| + return 0; |
| + } |
| + |
| + if (func == GPIO_EVENT_FUNC_INIT) { |
| + *data = as = kmalloc(sizeof(*as), GFP_KERNEL); |
| + if (as == NULL) { |
| + ret = -ENOMEM; |
| + goto err_alloc_axis_state_failed; |
| + } |
| + as->input_devs = input_devs; |
| + as->info = ai; |
| + if (ai->dev >= input_devs->count) { |
| + pr_err("gpio_event_axis: bad device index %d >= %d " |
| + "for %d:%d\n", ai->dev, input_devs->count, |
| + ai->type, ai->code); |
| + ret = -EINVAL; |
| + goto err_bad_device_index; |
| + } |
| + |
| + input_set_capability(input_devs->dev[ai->dev], |
| + ai->type, ai->code); |
| + if (ai->type == EV_ABS) { |
| + input_set_abs_params(input_devs->dev[ai->dev], ai->code, |
| + 0, ai->decoded_size - 1, 0, 0); |
| + } |
| + for (i = 0; i < ai->count; i++) { |
| + ret = gpio_request(ai->gpio[i], "gpio_event_axis"); |
| + if (ret < 0) |
| + goto err_request_gpio_failed; |
| + ret = gpio_direction_input(ai->gpio[i]); |
| + if (ret < 0) |
| + goto err_gpio_direction_input_failed; |
| + ret = irq = gpio_to_irq(ai->gpio[i]); |
| + if (ret < 0) |
| + goto err_get_irq_num_failed; |
| + ret = request_irq(irq, gpio_axis_irq_handler, |
| + IRQF_TRIGGER_RISING | |
| + IRQF_TRIGGER_FALLING, |
| + "gpio_event_axis", as); |
| + if (ret < 0) |
| + goto err_request_irq_failed; |
| + } |
| + gpio_event_update_axis(as, 0); |
| + return 0; |
| + } |
| + |
| + ret = 0; |
| + as = *data; |
| + for (i = ai->count - 1; i >= 0; i--) { |
| + free_irq(gpio_to_irq(ai->gpio[i]), as); |
| +err_request_irq_failed: |
| +err_get_irq_num_failed: |
| +err_gpio_direction_input_failed: |
| + gpio_free(ai->gpio[i]); |
| +err_request_gpio_failed: |
| + ; |
| + } |
| +err_bad_device_index: |
| + kfree(as); |
| + *data = NULL; |
| +err_alloc_axis_state_failed: |
| + return ret; |
| +} |
| diff --git a/stblinux-2.6.37/drivers/leds/ledtrig-sleep.c b/stblinux-2.6.37/drivers/leds/ledtrig-sleep.c |
| new file mode 100644 |
| index 0000000..f164042 |
| --- /dev/null |
| +++ b/stblinux-2.6.37/drivers/leds/ledtrig-sleep.c |
| @@ -0,0 +1,80 @@ |
| +/* drivers/leds/ledtrig-sleep.c |
| + * |
| + * Copyright (C) 2007 Google, Inc. |
| + * |
| + * This software is licensed under the terms of the GNU General Public |
| + * License version 2, as published by the Free Software Foundation, and |
| + * may be copied, distributed, and modified under those terms. |
| + * |
| + * This program is distributed in the hope that it will be useful, |
| + * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| + * GNU General Public License for more details. |
| + * |
| + */ |
| + |
| +#include <linux/earlysuspend.h> |
| +#include <linux/leds.h> |
| +#include <linux/suspend.h> |
| + |
| +static int ledtrig_sleep_pm_callback(struct notifier_block *nfb, |
| + unsigned long action, |
| + void *ignored); |
| + |
| +DEFINE_LED_TRIGGER(ledtrig_sleep) |
| +static struct notifier_block ledtrig_sleep_pm_notifier = { |
| + .notifier_call = ledtrig_sleep_pm_callback, |
| + .priority = 0, |
| +}; |
| + |
| +static void ledtrig_sleep_early_suspend(struct early_suspend *h) |
| +{ |
| + led_trigger_event(ledtrig_sleep, LED_FULL); |
| +} |
| + |
| +static void ledtrig_sleep_early_resume(struct early_suspend *h) |
| +{ |
| + led_trigger_event(ledtrig_sleep, LED_OFF); |
| +} |
| + |
| +static struct early_suspend ledtrig_sleep_early_suspend_handler = { |
| + .suspend = ledtrig_sleep_early_suspend, |
| + .resume = ledtrig_sleep_early_resume, |
| +}; |
| + |
| +static int ledtrig_sleep_pm_callback(struct notifier_block *nfb, |
| + unsigned long action, |
| + void *ignored) |
| +{ |
| + switch (action) { |
| + case PM_HIBERNATION_PREPARE: |
| + case PM_SUSPEND_PREPARE: |
| + led_trigger_event(ledtrig_sleep, LED_OFF); |
| + return NOTIFY_OK; |
| + case PM_POST_HIBERNATION: |
| + case PM_POST_SUSPEND: |
| + led_trigger_event(ledtrig_sleep, LED_FULL); |
| + return NOTIFY_OK; |
| + } |
| + |
| + return NOTIFY_DONE; |
| +} |
| + |
| +static int __init ledtrig_sleep_init(void) |
| +{ |
| + led_trigger_register_simple("sleep", &ledtrig_sleep); |
| + register_pm_notifier(&ledtrig_sleep_pm_notifier); |
| + register_early_suspend(&ledtrig_sleep_early_suspend_handler); |
| + return 0; |
| +} |
| + |
| +static void __exit ledtrig_sleep_exit(void) |
| +{ |
| + unregister_early_suspend(&ledtrig_sleep_early_suspend_handler); |
| + unregister_pm_notifier(&ledtrig_sleep_pm_notifier); |
| + led_trigger_unregister_simple(ledtrig_sleep); |
| +} |
| + |
| +module_init(ledtrig_sleep_init); |
| +module_exit(ledtrig_sleep_exit); |
| + |
| diff --git a/stblinux-2.6.37/drivers/media/video/uvc/uvc_driver.c b/stblinux-2.6.37/drivers/media/video/uvc/uvc_driver.c |
| index a1e9dfb..c921d0f 100644 |
| --- a/stblinux-2.6.37/drivers/media/video/uvc/uvc_driver.c |
| +++ b/stblinux-2.6.37/drivers/media/video/uvc/uvc_driver.c |
| @@ -22,7 +22,7 @@ |
| * Huffman tables (DHT marker), you will need to add it back if your JPEG |
| * codec can't handle MJPEG data. |
| */ |
| - |
| +#include <linux/ctype.h> |
| #include <linux/kernel.h> |
| #include <linux/list.h> |
| #include <linux/module.h> |
| @@ -302,6 +302,10 @@ static int uvc_parse_format(struct uvc_device *dev, |
| switch (buffer[2]) { |
| case UVC_VS_FORMAT_UNCOMPRESSED: |
| case UVC_VS_FORMAT_FRAME_BASED: |
| + { |
| + const __u8 *guid = &buffer[5]; |
| + const __u8 uvc_guid_template[16] = UVC_GUID_FORMAT_TEMPLATE; |
| + |
| n = buffer[2] == UVC_VS_FORMAT_UNCOMPRESSED ? 27 : 28; |
| if (buflen < n) { |
| uvc_trace(UVC_TRACE_DESCR, "device %d videostreaming " |
| @@ -311,19 +315,40 @@ static int uvc_parse_format(struct uvc_device *dev, |
| return -EINVAL; |
| } |
| |
| - /* Find the format descriptor from its GUID. */ |
| - fmtdesc = uvc_format_by_guid(&buffer[5]); |
| + /* Find the format descriptor from its GUID first. |
| + * If no known format matches then check if the last 12 bytes |
| + * of the GUID match the following UVC format GUID template: |
| + * {????????-0000-0010-8000-00AA00389B71} |
| + * If they match and the first four bytes are alphanumeric |
| + * then use the first four bytes as a FourCC code and create a |
| + * custom name based on it. |
| + */ |
| + fmtdesc = uvc_format_by_guid(guid); |
| |
| if (fmtdesc != NULL) { |
| strlcpy(format->name, fmtdesc->name, |
| sizeof format->name); |
| format->fcc = fmtdesc->fcc; |
| + } else if (memcmp(&guid[4], &uvc_guid_template[4], 12) == 0) { |
| + uvc_trace(UVC_TRACE_DESCR, "Unknown UVC format found: " |
| + UVC_GUID_FORMAT "\n", |
| + UVC_GUID_ARGS(guid)); |
| + i = 0; |
| + while(i++ < 4 && isalnum(guid[i])); |
| + if (i == 4) { |
| + format->fcc = *(__u32 *)guid; |
| + snprintf(format->name, sizeof format->name, |
| + "Custom (%c%c%c%c)", |
| + guid[0], guid[1], guid[2], guid[3]); |
| + uvc_trace(UVC_TRACE_DESCR, |
| + "Treating unknown UVC format as: %s\n", |
| + format->name); |
| + } |
| } else { |
| - uvc_printk(KERN_INFO, "Unknown video format %pUl\n", |
| - &buffer[5]); |
| - snprintf(format->name, sizeof(format->name), "%pUl\n", |
| - &buffer[5]); |
| - format->fcc = 0; |
| + uvc_printk(KERN_INFO, "Skipping unknown video format: " |
| + UVC_GUID_FORMAT "\n", |
| + UVC_GUID_ARGS(guid)); |
| + return -EINVAL; |
| } |
| |
| format->bpp = buffer[21]; |
| @@ -335,6 +360,7 @@ static int uvc_parse_format(struct uvc_device *dev, |
| format->flags = UVC_FMT_FLAG_COMPRESSED; |
| } |
| break; |
| + } |
| |
| case UVC_VS_FORMAT_MJPEG: |
| if (buflen < 11) { |
| @@ -2263,6 +2289,39 @@ static struct usb_device_id uvc_ids[] = { |
| .bInterfaceProtocol = 0, |
| .driver_info = UVC_QUIRK_PROBE_MINMAX |
| | UVC_QUIRK_IGNORE_SELECTOR_UNIT }, |
| + /* Logitech Tv Camera */ |
| + { .match_flags = USB_DEVICE_ID_MATCH_DEVICE |
| + | USB_DEVICE_ID_MATCH_INT_INFO, |
| + .idVendor = 0x046d, |
| + .idProduct = 0x080c, |
| + .bInterfaceClass = USB_CLASS_VIDEO, |
| + .bInterfaceSubClass = 1, |
| + .bInterfaceProtocol = 0, |
| + .driver_info = UVC_QUIRK_CUT_EXTRA_EOI }, |
| + { .match_flags = USB_DEVICE_ID_MATCH_DEVICE |
| + | USB_DEVICE_ID_MATCH_INT_INFO, |
| + .idVendor = 0x046d, |
| + .idProduct = 0x0828, |
| + .bInterfaceClass = USB_CLASS_VIDEO, |
| + .bInterfaceSubClass = 1, |
| + .bInterfaceProtocol = 0, |
| + .driver_info = UVC_QUIRK_CUT_EXTRA_EOI }, |
| + { .match_flags = USB_DEVICE_ID_MATCH_DEVICE |
| + | USB_DEVICE_ID_MATCH_INT_INFO, |
| + .idVendor = 0x046d, |
| + .idProduct = 0x0831, |
| + .bInterfaceClass = USB_CLASS_VIDEO, |
| + .bInterfaceSubClass = 1, |
| + .bInterfaceProtocol = 0, |
| + .driver_info = UVC_QUIRK_CUT_EXTRA_EOI }, |
| + { .match_flags = USB_DEVICE_ID_MATCH_DEVICE |
| + | USB_DEVICE_ID_MATCH_INT_INFO, |
| + .idVendor = 0x046d, |
| + .idProduct = 0x0832, |
| + .bInterfaceClass = USB_CLASS_VIDEO, |
| + .bInterfaceSubClass = 1, |
| + .bInterfaceProtocol = 0, |
| + .driver_info = UVC_QUIRK_CUT_EXTRA_EOI }, |
| /* Generic USB Video Class */ |
| { USB_INTERFACE_INFO(USB_CLASS_VIDEO, 1, 0) }, |
| {} |
| diff --git a/stblinux-2.6.37/drivers/media/video/uvc/uvc_v4l2.c b/stblinux-2.6.37/drivers/media/video/uvc/uvc_v4l2.c |
| index 8cf61e8..f5ef56a 100644 |
| --- a/stblinux-2.6.37/drivers/media/video/uvc/uvc_v4l2.c |
| +++ b/stblinux-2.6.37/drivers/media/video/uvc/uvc_v4l2.c |
| @@ -1034,6 +1034,37 @@ static long uvc_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg) |
| case UVCIOC_CTRL_SET: |
| return uvc_xu_ctrl_query(chain, arg, 1); |
| |
| + case UVCIOC_CTRL_GET_EX: |
| + case UVCIOC_CTRL_SET_EX: |
| + { |
| + struct uvc_xu_control_ex *ctrlex = arg; |
| + struct uvc_xu_control ctrl = { |
| + .unit = 0, |
| + .selector = ctrlex->selector, |
| + .size = ctrlex->size, |
| + .data = ctrlex->data |
| + }; |
| + struct uvc_entity *entity; |
| + |
| + /* Find the extension unit. */ |
| + list_for_each_entry(entity, &chain->extensions, chain) { |
| + if (memcmp(entity->extension.guidExtensionCode, |
| + ctrlex->entity, 16) == 0) { |
| + ctrl.unit = entity->id; |
| + break; |
| + } |
| + } |
| + if (ctrl.unit == 0) { |
| + uvc_trace(UVC_TRACE_CONTROL, "Extension unit " |
| + UVC_GUID_FORMAT" not found.\n", |
| + UVC_GUID_ARGS(ctrlex->entity)); |
| + return -EINVAL; |
| + } |
| + |
| + return uvc_xu_ctrl_query(chain, &ctrl, |
| + cmd == UVCIOC_CTRL_GET_EX ? 0 : 1); |
| + } |
| + |
| default: |
| if ((ret = v4l_compat_translate_ioctl(file, cmd, arg, |
| uvc_v4l2_do_ioctl)) == -ENOIOCTLCMD) |
| diff --git a/stblinux-2.6.37/drivers/media/video/uvc/uvc_video.c b/stblinux-2.6.37/drivers/media/video/uvc/uvc_video.c |
| index 5673d67..66d5735 100644 |
| --- a/stblinux-2.6.37/drivers/media/video/uvc/uvc_video.c |
| +++ b/stblinux-2.6.37/drivers/media/video/uvc/uvc_video.c |
| @@ -534,6 +534,31 @@ static void uvc_video_decode_end(struct uvc_streaming *stream, |
| if (stream->dev->quirks & UVC_QUIRK_STREAM_NO_FID) |
| stream->last_fid ^= UVC_STREAM_FID; |
| } |
| + if ((stream->dev->quirks & UVC_QUIRK_CUT_EXTRA_EOI) && |
| + stream->cur_format->fcc != V4L2_PIX_FMT_YUYV) { |
| + struct uvc_video_queue *queue = &stream->queue; |
| + __u8 *mem = queue->mem + buf->buf.m.offset; |
| + __u8 *cur = mem + buf->buf.bytesused - 1; |
| + |
| + if (buf->buf.bytesused >= 1 && *cur == 0xFF) { |
| + /* Cut off trailing 0xFF bytes */ |
| + uvc_warn_once(stream->dev, UVC_WARN_CUT_EXTRA_EOI, |
| + "Cutting off extra 0xFF.\n"); |
| + buf->buf.bytesused--; |
| + } |
| + else if (buf->buf.bytesused >= 2) { |
| + /* Skip zero bytes at the end but leave at least two bytes */ |
| + while ((cur > mem + 1) && (*cur == 0)) |
| + cur--; |
| + /* If we encountered a 0xFFD9 marker strip it off (together |
| + * with the trailing zeroes. */ |
| + if (*(cur - 1) == 0xFF && *cur == 0xD9) { |
| + uvc_warn_once(stream->dev, UVC_WARN_CUT_EXTRA_EOI, |
| + "Cutting off extra EOI.\n"); |
| + buf->buf.bytesused = (cur - mem) - 1; |
| + } |
| + } |
| + } |
| } |
| |
| /* Video payload encoding is handled by uvc_video_encode_header() and |
| diff --git a/stblinux-2.6.37/drivers/media/video/uvc/uvcvideo.h b/stblinux-2.6.37/drivers/media/video/uvc/uvcvideo.h |
| index 45f01e7..6659096 100644 |
| --- a/stblinux-2.6.37/drivers/media/video/uvc/uvcvideo.h |
| +++ b/stblinux-2.6.37/drivers/media/video/uvc/uvcvideo.h |
| @@ -73,11 +73,20 @@ struct uvc_xu_control { |
| __u8 __user *data; |
| }; |
| |
| +struct uvc_xu_control_ex { |
| + __u8 entity[16]; |
| + __u8 selector; |
| + __u16 size; |
| + __u8 __user *data; |
| +}; |
| + |
| #define UVCIOC_CTRL_ADD _IOW('U', 1, struct uvc_xu_control_info) |
| #define UVCIOC_CTRL_MAP_OLD _IOWR('U', 2, struct uvc_xu_control_mapping_old) |
| #define UVCIOC_CTRL_MAP _IOWR('U', 2, struct uvc_xu_control_mapping) |
| #define UVCIOC_CTRL_GET _IOWR('U', 3, struct uvc_xu_control) |
| #define UVCIOC_CTRL_SET _IOW('U', 4, struct uvc_xu_control) |
| +#define UVCIOC_CTRL_GET_EX _IOWR('U', 5, struct uvc_xu_control_ex) |
| +#define UVCIOC_CTRL_SET_EX _IOW('U', 6, struct uvc_xu_control_ex) |
| |
| #ifdef __KERNEL__ |
| |
| @@ -122,6 +131,23 @@ struct uvc_xu_control { |
| {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \ |
| 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x02} |
| |
| +#define UVC_GUID_LOGITECH_DEV_INFO \ |
| + {0x82, 0x06, 0x61, 0x63, 0x70, 0x50, 0xab, 0x49, \ |
| + 0xb8, 0xcc, 0xb3, 0x85, 0x5e, 0x8d, 0x22, 0x1e} |
| +#define UVC_GUID_LOGITECH_USER_HW \ |
| + {0x82, 0x06, 0x61, 0x63, 0x70, 0x50, 0xab, 0x49, \ |
| + 0xb8, 0xcc, 0xb3, 0x85, 0x5e, 0x8d, 0x22, 0x1f} |
| +#define UVC_GUID_LOGITECH_VIDEO \ |
| + {0x82, 0x06, 0x61, 0x63, 0x70, 0x50, 0xab, 0x49, \ |
| + 0xb8, 0xcc, 0xb3, 0x85, 0x5e, 0x8d, 0x22, 0x50} |
| +#define UVC_GUID_LOGITECH_MOTOR \ |
| + {0x82, 0x06, 0x61, 0x63, 0x70, 0x50, 0xab, 0x49, \ |
| + 0xb8, 0xcc, 0xb3, 0x85, 0x5e, 0x8d, 0x22, 0x56} |
| + |
| +#define UVC_GUID_FORMAT_TEMPLATE \ |
| + { '?', '?', '?', '?', 0x00, 0x00, 0x10, 0x00, \ |
| + 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71} |
| + |
| #define UVC_GUID_FORMAT_MJPEG \ |
| { 'M', 'J', 'P', 'G', 0x00, 0x00, 0x10, 0x00, \ |
| 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71} |
| @@ -185,6 +211,7 @@ struct uvc_xu_control { |
| #define UVC_QUIRK_FIX_BANDWIDTH 0x00000080 |
| #define UVC_QUIRK_PROBE_DEF 0x00000100 |
| #define UVC_QUIRK_RESTRICT_FRAME_RATE 0x00000200 |
| +#define UVC_QUIRK_CUT_EXTRA_EOI 0x00000200 |
| |
| /* Format flags */ |
| #define UVC_FMT_FLAG_COMPRESSED 0x00000001 |
| @@ -412,6 +439,7 @@ struct uvc_video_chain { |
| struct list_head entities; /* All entities */ |
| struct uvc_entity *processing; /* Processing unit */ |
| struct uvc_entity *selector; /* Selector unit */ |
| + struct list_head extensions; |
| |
| struct mutex ctrl_mutex; /* Protects ctrl.info */ |
| }; |
| @@ -532,6 +560,7 @@ struct uvc_driver { |
| |
| #define UVC_WARN_MINMAX 0 |
| #define UVC_WARN_PROBE_DEF 1 |
| +#define UVC_WARN_CUT_EXTRA_EOI 2 |
| |
| extern unsigned int uvc_clock_param; |
| extern unsigned int uvc_no_drop_param; |
| @@ -553,6 +582,16 @@ extern unsigned int uvc_timeout_param; |
| #define uvc_printk(level, msg...) \ |
| printk(level "uvcvideo: " msg) |
| |
| +#define UVC_GUID_FORMAT "%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-" \ |
| + "%02x%02x%02x%02x%02x%02x" |
| +#define UVC_GUID_ARGS(guid) \ |
| + (guid)[3], (guid)[2], (guid)[1], (guid)[0], \ |
| + (guid)[5], (guid)[4], \ |
| + (guid)[7], (guid)[6], \ |
| + (guid)[8], (guid)[9], \ |
| + (guid)[10], (guid)[11], (guid)[12], \ |
| + (guid)[13], (guid)[14], (guid)[15] |
| + |
| /* -------------------------------------------------------------------------- |
| * Internal functions. |
| */ |
| diff --git a/stblinux-2.6.37/drivers/misc/Kconfig b/stblinux-2.6.37/drivers/misc/Kconfig |
| index 4d073f1..73fc71c 100644 |
| --- a/stblinux-2.6.37/drivers/misc/Kconfig |
| +++ b/stblinux-2.6.37/drivers/misc/Kconfig |
| @@ -53,6 +53,14 @@ config AD525X_DPOT_SPI |
| To compile this driver as a module, choose M here: the |
| module will be called ad525x_dpot-spi. |
| |
| +config ANDROID_PMEM |
| + bool "Android pmem allocator" |
| + default y |
| + |
| +config ANDROID_PMEM_RUA |
| + tristate "Android rua pmem allocator" |
| + default n |
| + |
| config ATMEL_PWM |
| tristate "Atmel AT32/AT91 PWM support" |
| depends on AVR32 || ARCH_AT91SAM9263 || ARCH_AT91SAM9RL || ARCH_AT91CAP9 |
| @@ -208,6 +216,13 @@ config ENCLOSURE_SERVICES |
| driver (SCSI/ATA) which supports enclosures |
| or a SCSI enclosure device (SES) to use these services. |
| |
| +config KERNEL_DEBUGGER_CORE |
| + bool "Kernel Debugger Core" |
| + default n |
| + ---help--- |
| + Generic kernel debugging command processor used by low level |
| + (interrupt context) platform-specific debuggers. |
| + |
| config SGI_XP |
| tristate "Support communication between SGI SSIs" |
| depends on NET |
| @@ -312,6 +327,17 @@ config ISL29003 |
| |
| This driver can also be built as a module. If so, the module |
| will be called isl29003. |
| +config UID_STAT |
| + bool "UID based statistics tracking exported to /proc/uid_stat" |
| + default n |
| + |
| +config WL127X_RFKILL |
| + tristate "Bluetooth power control driver for TI wl127x" |
| + depends on RFKILL |
| + default n |
| + ---help--- |
| + Creates an rfkill entry in sysfs for power control of Bluetooth |
| + TI wl127x chips. |
| |
| config ISL29020 |
| tristate "Intersil ISL29020 ambient light sensor" |
| diff --git a/stblinux-2.6.37/drivers/misc/Makefile b/stblinux-2.6.37/drivers/misc/Makefile |
| index 98009cc..11d7ff5 100644 |
| --- a/stblinux-2.6.37/drivers/misc/Makefile |
| +++ b/stblinux-2.6.37/drivers/misc/Makefile |
| @@ -15,11 +15,14 @@ obj-$(CONFIG_LKDTM) += lkdtm.o |
| obj-$(CONFIG_TIFM_CORE) += tifm_core.o |
| obj-$(CONFIG_TIFM_7XX1) += tifm_7xx1.o |
| obj-$(CONFIG_PHANTOM) += phantom.o |
| +obj-$(CONFIG_ANDROID_PMEM) += pmem.o |
| +obj-$(CONFIG_ANDROID_PMEM_RUA) += pmem_rua.o |
| obj-$(CONFIG_SENSORS_BH1780) += bh1780gli.o |
| obj-$(CONFIG_SENSORS_BH1770) += bh1770glc.o |
| obj-$(CONFIG_SENSORS_APDS990X) += apds990x.o |
| obj-$(CONFIG_SGI_IOC4) += ioc4.o |
| obj-$(CONFIG_ENCLOSURE_SERVICES) += enclosure.o |
| +obj-$(CONFIG_KERNEL_DEBUGGER_CORE) += kernel_debugger.o |
| obj-$(CONFIG_KGDB_TESTS) += kgdbts.o |
| obj-$(CONFIG_SGI_XP) += sgi-xp/ |
| obj-$(CONFIG_SGI_GRU) += sgi-gru/ |
| @@ -28,6 +31,7 @@ obj-$(CONFIG_HP_ILO) += hpilo.o |
| obj-$(CONFIG_APDS9802ALS) += apds9802als.o |
| obj-$(CONFIG_ISL29003) += isl29003.o |
| obj-$(CONFIG_ISL29020) += isl29020.o |
| +obj-$(CONFIG_UID_STAT) += uid_stat.o |
| obj-$(CONFIG_SENSORS_TSL2550) += tsl2550.o |
| obj-$(CONFIG_EP93XX_PWM) += ep93xx_pwm.o |
| obj-$(CONFIG_DS1682) += ds1682.o |
| @@ -37,6 +41,7 @@ obj-$(CONFIG_IWMC3200TOP) += iwmc3200top/ |
| obj-$(CONFIG_HMC6352) += hmc6352.o |
| obj-y += eeprom/ |
| obj-y += cb710/ |
| +obj-$(CONFIG_WL127X_RFKILL) += wl127x-rfkill.o |
| obj-$(CONFIG_VMWARE_BALLOON) += vmw_balloon.o |
| obj-$(CONFIG_ARM_CHARLCD) += arm-charlcd.o |
| obj-$(CONFIG_PCH_PHUB) += pch_phub.o |
| diff --git a/stblinux-2.6.37/drivers/misc/kernel_debugger.c b/stblinux-2.6.37/drivers/misc/kernel_debugger.c |
| new file mode 100644 |
| index 0000000..f10eaa4 |
| --- /dev/null |
| +++ b/stblinux-2.6.37/drivers/misc/kernel_debugger.c |
| @@ -0,0 +1,79 @@ |
| +/* drivers/android/kernel_debugger.c |
| + * |
| + * Guts of the kernel debugger. |
| + * Needs something to actually push commands to it. |
| + * |
| + * Copyright (C) 2007-2008 Google, Inc. |
| + * |
| + * This software is licensed under the terms of the GNU General Public |
| + * License version 2, as published by the Free Software Foundation, and |
| + * may be copied, distributed, and modified under those terms. |
| + * |
| + * This program is distributed in the hope that it will be useful, |
| + * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| + * GNU General Public License for more details. |
| + * |
| + */ |
| + |
| +#include <linux/ctype.h> |
| +#include <linux/device.h> |
| +#include <linux/sched.h> |
| +#include <linux/spinlock.h> |
| +#include <linux/sysrq.h> |
| +#include <linux/kernel_debugger.h> |
| + |
| +#define dprintf(fmt...) (ctxt->printf(ctxt->cookie, fmt)) |
| + |
| +static void do_ps(struct kdbg_ctxt *ctxt) |
| +{ |
| + struct task_struct *g, *p; |
| + unsigned state; |
| + static const char stat_nam[] = "RSDTtZX"; |
| + |
| + dprintf("pid ppid prio task pc\n"); |
| + read_lock(&tasklist_lock); |
| + do_each_thread(g, p) { |
| + state = p->state ? __ffs(p->state) + 1 : 0; |
| + dprintf("%5d %5d %4d ", p->pid, p->parent->pid, p->prio); |
| + dprintf("%-13.13s %c", p->comm, |
| + state >= sizeof(stat_nam) ? '?' : stat_nam[state]); |
| + if (state == TASK_RUNNING) |
| + dprintf(" running\n"); |
| + else |
| + dprintf(" %08lx\n", thread_saved_pc(p)); |
| + } while_each_thread(g, p); |
| + read_unlock(&tasklist_lock); |
| +} |
| + |
| +int log_buf_copy(char *dest, int idx, int len); |
| +extern int do_syslog(int type, char __user *bug, int count); |
| +static void do_sysrq(struct kdbg_ctxt *ctxt, char rq) |
| +{ |
| + char buf[128]; |
| + int ret; |
| + int idx = 0; |
| + do_syslog(5 /* clear */, NULL, 0); |
| + __handle_sysrq(rq, NULL, 0); |
| + while (1) { |
| + ret = log_buf_copy(buf, idx, sizeof(buf) - 1); |
| + if (ret <= 0) |
| + break; |
| + buf[ret] = 0; |
| + dprintf("%s", buf); |
| + idx += ret; |
| + } |
| +} |
| + |
| +int kernel_debugger(struct kdbg_ctxt *ctxt, char *cmd) |
| +{ |
| + if (!strcmp(cmd, "ps")) |
| + do_ps(ctxt); |
| + if (!strcmp(cmd, "sysrq")) |
| + do_sysrq(ctxt, 'h'); |
| + if (!strncmp(cmd, "sysrq ", 6)) |
| + do_sysrq(ctxt, cmd[6]); |
| + |
| + return 0; |
| +} |
| + |
| diff --git a/stblinux-2.6.37/drivers/misc/pmem.c b/stblinux-2.6.37/drivers/misc/pmem.c |
| new file mode 100644 |
| index 0000000..c265788 |
| --- /dev/null |
| +++ b/stblinux-2.6.37/drivers/misc/pmem.c |
| @@ -0,0 +1,1346 @@ |
| +/* drivers/android/pmem.c |
| + * |
| + * Copyright (C) 2007 Google, Inc. |
| + * |
| + * This software is licensed under the terms of the GNU General Public |
| + * License version 2, as published by the Free Software Foundation, and |
| + * may be copied, distributed, and modified under those terms. |
| + * |
| + * This program is distributed in the hope that it will be useful, |
| + * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| + * GNU General Public License for more details. |
| + * |
| + */ |
| + |
| +#include <linux/miscdevice.h> |
| +#include <linux/platform_device.h> |
| +#include <linux/fs.h> |
| +#include <linux/file.h> |
| +#include <linux/mm.h> |
| +#include <linux/list.h> |
| +#include <linux/debugfs.h> |
| +#include <linux/android_pmem.h> |
| +#include <linux/mempolicy.h> |
| +#include <linux/sched.h> |
| +#include <linux/semaphore.h> |
| +#include <asm/io.h> |
| +#include <asm/uaccess.h> |
| +#include <asm/cacheflush.h> |
| + |
| +#define PMEM_MAX_DEVICES 10 |
| +#define PMEM_MAX_ORDER 128 |
| +#define PMEM_MIN_ALLOC PAGE_SIZE |
| + |
| +#define PMEM_DEBUG 1 |
| + |
| +/* indicates that a refernce to this file has been taken via get_pmem_file, |
| + * the file should not be released until put_pmem_file is called */ |
| +#define PMEM_FLAGS_BUSY 0x1 |
| +/* indicates that this is a suballocation of a larger master range */ |
| +#define PMEM_FLAGS_CONNECTED 0x1 << 1 |
| +/* indicates this is a master and not a sub allocation and that it is mmaped */ |
| +#define PMEM_FLAGS_MASTERMAP 0x1 << 2 |
| +/* submap and unsubmap flags indicate: |
| + * 00: subregion has never been mmaped |
| + * 10: subregion has been mmaped, reference to the mm was taken |
| + * 11: subretion has ben released, refernece to the mm still held |
| + * 01: subretion has been released, reference to the mm has been released |
| + */ |
| +#define PMEM_FLAGS_SUBMAP 0x1 << 3 |
| +#define PMEM_FLAGS_UNSUBMAP 0x1 << 4 |
| + |
| + |
| +struct pmem_data { |
| + /* in alloc mode: an index into the bitmap |
| + * in no_alloc mode: the size of the allocation */ |
| + int index; |
| + /* see flags above for descriptions */ |
| + unsigned int flags; |
| + /* protects this data field, if the mm_mmap sem will be held at the |
| + * same time as this sem, the mm sem must be taken first (as this is |
| + * the order for vma_open and vma_close ops */ |
| + struct rw_semaphore sem; |
| + /* info about the mmaping process */ |
| + struct vm_area_struct *vma; |
| + /* task struct of the mapping process */ |
| + struct task_struct *task; |
| + /* process id of teh mapping process */ |
| + pid_t pid; |
| + /* file descriptor of the master */ |
| + int master_fd; |
| + /* file struct of the master */ |
| + struct file *master_file; |
| + /* a list of currently available regions if this is a suballocation */ |
| + struct list_head region_list; |
| + /* a linked list of data so we can access them for debugging */ |
| + struct list_head list; |
| +#if PMEM_DEBUG |
| + int ref; |
| +#endif |
| +}; |
| + |
| +struct pmem_bits { |
| + unsigned allocated:1; /* 1 if allocated, 0 if free */ |
| + unsigned order:7; /* size of the region in pmem space */ |
| +}; |
| + |
| +struct pmem_region_node { |
| + struct pmem_region region; |
| + struct list_head list; |
| +}; |
| + |
| +#define PMEM_DEBUG_MSGS 0 |
| +#if PMEM_DEBUG_MSGS |
| +#define DLOG(fmt,args...) \ |
| + do { printk(KERN_INFO "[%s:%s:%d] "fmt, __FILE__, __func__, __LINE__, \ |
| + ##args); } \ |
| + while (0) |
| +#else |
| +#define DLOG(x...) do {} while (0) |
| +#endif |
| + |
| +struct pmem_info { |
| + struct miscdevice dev; |
| + /* physical start address of the remaped pmem space */ |
| + unsigned long base; |
| + /* vitual start address of the remaped pmem space */ |
| + unsigned char __iomem *vbase; |
| + /* total size of the pmem space */ |
| + unsigned long size; |
| + /* number of entries in the pmem space */ |
| + unsigned long num_entries; |
| + /* pfn of the garbage page in memory */ |
| + unsigned long garbage_pfn; |
| + /* index of the garbage page in the pmem space */ |
| + int garbage_index; |
| + /* the bitmap for the region indicating which entries are allocated |
| + * and which are free */ |
| + struct pmem_bits *bitmap; |
| + /* indicates the region should not be managed with an allocator */ |
| + unsigned no_allocator; |
| + /* indicates maps of this region should be cached, if a mix of |
| + * cached and uncached is desired, set this and open the device with |
| + * O_SYNC to get an uncached region */ |
| + unsigned cached; |
| + unsigned buffered; |
| + /* in no_allocator mode the first mapper gets the whole space and sets |
| + * this flag */ |
| + unsigned allocated; |
| + /* for debugging, creates a list of pmem file structs, the |
| + * data_list_sem should be taken before pmem_data->sem if both are |
| + * needed */ |
| + struct semaphore data_list_sem; |
| + struct list_head data_list; |
| + /* pmem_sem protects the bitmap array |
| + * a write lock should be held when modifying entries in bitmap |
| + * a read lock should be held when reading data from bits or |
| + * dereferencing a pointer into bitmap |
| + * |
| + * pmem_data->sem protects the pmem data of a particular file |
| + * Many of the function that require the pmem_data->sem have a non- |
| + * locking version for when the caller is already holding that sem. |
| + * |
| + * IF YOU TAKE BOTH LOCKS TAKE THEM IN THIS ORDER: |
| + * down(pmem_data->sem) => down(bitmap_sem) |
| + */ |
| + struct rw_semaphore bitmap_sem; |
| + |
| + long (*ioctl)(struct file *, unsigned int, unsigned long); |
| + int (*release)(struct inode *, struct file *); |
| +}; |
| + |
| +static struct pmem_info pmem[PMEM_MAX_DEVICES]; |
| +static int id_count; |
| + |
| +#define PMEM_IS_FREE(id, index) !(pmem[id].bitmap[index].allocated) |
| +#define PMEM_ORDER(id, index) pmem[id].bitmap[index].order |
| +#define PMEM_BUDDY_INDEX(id, index) (index ^ (1 << PMEM_ORDER(id, index))) |
| +#define PMEM_NEXT_INDEX(id, index) (index + (1 << PMEM_ORDER(id, index))) |
| +#define PMEM_OFFSET(index) (index * PMEM_MIN_ALLOC) |
| +#define PMEM_START_ADDR(id, index) (PMEM_OFFSET(index) + pmem[id].base) |
| +#define PMEM_LEN(id, index) ((1 << PMEM_ORDER(id, index)) * PMEM_MIN_ALLOC) |
| +#define PMEM_END_ADDR(id, index) (PMEM_START_ADDR(id, index) + \ |
| + PMEM_LEN(id, index)) |
| +#define PMEM_START_VADDR(id, index) (PMEM_OFFSET(id, index) + pmem[id].vbase) |
| +#define PMEM_END_VADDR(id, index) (PMEM_START_VADDR(id, index) + \ |
| + PMEM_LEN(id, index)) |
| +#define PMEM_REVOKED(data) (data->flags & PMEM_FLAGS_REVOKED) |
| +#define PMEM_IS_PAGE_ALIGNED(addr) (!((addr) & (~PAGE_MASK))) |
| +#define PMEM_IS_SUBMAP(data) ((data->flags & PMEM_FLAGS_SUBMAP) && \ |
| + (!(data->flags & PMEM_FLAGS_UNSUBMAP))) |
| + |
| +static int pmem_release(struct inode *, struct file *); |
| +static int pmem_mmap(struct file *, struct vm_area_struct *); |
| +static int pmem_open(struct inode *, struct file *); |
| +static long pmem_ioctl(struct file *, unsigned int, unsigned long); |
| + |
| +struct file_operations pmem_fops = { |
| + .release = pmem_release, |
| + .mmap = pmem_mmap, |
| + .open = pmem_open, |
| + .unlocked_ioctl = pmem_ioctl, |
| +}; |
| + |
| +static int get_id(struct file *file) |
| +{ |
| + return MINOR(file->f_dentry->d_inode->i_rdev); |
| +} |
| + |
| +static int is_pmem_file(struct file *file) |
| +{ |
| + int id; |
| + |
| + if (unlikely(!file || !file->f_dentry || !file->f_dentry->d_inode)) |
| + return 0; |
| + id = get_id(file); |
| + if (unlikely(id >= PMEM_MAX_DEVICES)) |
| + return 0; |
| + if (unlikely(file->f_dentry->d_inode->i_rdev != |
| + MKDEV(MISC_MAJOR, pmem[id].dev.minor))) |
| + return 0; |
| + return 1; |
| +} |
| + |
| +static int has_allocation(struct file *file) |
| +{ |
| + struct pmem_data *data; |
| + /* check is_pmem_file first if not accessed via pmem_file_ops */ |
| + |
| + if (unlikely(!file->private_data)) |
| + return 0; |
| + data = (struct pmem_data *)file->private_data; |
| + if (unlikely(data->index < 0)) |
| + return 0; |
| + return 1; |
| +} |
| + |
| +static int is_master_owner(struct file *file) |
| +{ |
| + struct file *master_file; |
| + struct pmem_data *data; |
| + int put_needed, ret = 0; |
| + |
| + if (!is_pmem_file(file) || !has_allocation(file)) |
| + return 0; |
| + data = (struct pmem_data *)file->private_data; |
| + if (PMEM_FLAGS_MASTERMAP & data->flags) |
| + return 1; |
| + master_file = fget_light(data->master_fd, &put_needed); |
| + if (master_file && data->master_file == master_file) |
| + ret = 1; |
| + fput_light(master_file, put_needed); |
| + return ret; |
| +} |
| + |
| +static int pmem_free(int id, int index) |
| +{ |
| + /* caller should hold the write lock on pmem_sem! */ |
| + int buddy, curr = index; |
| + DLOG("index %d\n", index); |
| + |
| + if (pmem[id].no_allocator) { |
| + pmem[id].allocated = 0; |
| + return 0; |
| + } |
| + /* clean up the bitmap, merging any buddies */ |
| + pmem[id].bitmap[curr].allocated = 0; |
| + /* find a slots buddy Buddy# = Slot# ^ (1 << order) |
| + * if the buddy is also free merge them |
| + * repeat until the buddy is not free or end of the bitmap is reached |
| + */ |
| + do { |
| + buddy = PMEM_BUDDY_INDEX(id, curr); |
| + if (PMEM_IS_FREE(id, buddy) && |
| + PMEM_ORDER(id, buddy) == PMEM_ORDER(id, curr)) { |
| + PMEM_ORDER(id, buddy)++; |
| + PMEM_ORDER(id, curr)++; |
| + curr = min(buddy, curr); |
| + } else { |
| + break; |
| + } |
| + } while (curr < pmem[id].num_entries); |
| + |
| + return 0; |
| +} |
| + |
| +static void pmem_revoke(struct file *file, struct pmem_data *data); |
| + |
| +static int pmem_release(struct inode *inode, struct file *file) |
| +{ |
| + struct pmem_data *data = (struct pmem_data *)file->private_data; |
| + struct pmem_region_node *region_node; |
| + struct list_head *elt, *elt2; |
| + int id = get_id(file), ret = 0; |
| + |
| + |
| + down(&pmem[id].data_list_sem); |
| + /* if this file is a master, revoke all the memory in the connected |
| + * files */ |
| + if (PMEM_FLAGS_MASTERMAP & data->flags) { |
| + struct pmem_data *sub_data; |
| + list_for_each(elt, &pmem[id].data_list) { |
| + sub_data = list_entry(elt, struct pmem_data, list); |
| + down_read(&sub_data->sem); |
| + if (PMEM_IS_SUBMAP(sub_data) && |
| + file == sub_data->master_file) { |
| + up_read(&sub_data->sem); |
| + pmem_revoke(file, sub_data); |
| + } else |
| + up_read(&sub_data->sem); |
| + } |
| + } |
| + list_del(&data->list); |
| + up(&pmem[id].data_list_sem); |
| + |
| + |
| + down_write(&data->sem); |
| + |
| + /* if its not a conencted file and it has an allocation, free it */ |
| + if (!(PMEM_FLAGS_CONNECTED & data->flags) && has_allocation(file)) { |
| + down_write(&pmem[id].bitmap_sem); |
| + ret = pmem_free(id, data->index); |
| + up_write(&pmem[id].bitmap_sem); |
| + } |
| + |
| + /* if this file is a submap (mapped, connected file), downref the |
| + * task struct */ |
| + if (PMEM_FLAGS_SUBMAP & data->flags) |
| + if (data->task) { |
| + put_task_struct(data->task); |
| + data->task = NULL; |
| + } |
| + |
| + file->private_data = NULL; |
| + |
| + list_for_each_safe(elt, elt2, &data->region_list) { |
| + region_node = list_entry(elt, struct pmem_region_node, list); |
| + list_del(elt); |
| + kfree(region_node); |
| + } |
| + BUG_ON(!list_empty(&data->region_list)); |
| + |
| + up_write(&data->sem); |
| + kfree(data); |
| + if (pmem[id].release) |
| + ret = pmem[id].release(inode, file); |
| + |
| + return ret; |
| +} |
| + |
| +static int pmem_open(struct inode *inode, struct file *file) |
| +{ |
| + struct pmem_data *data; |
| + int id = get_id(file); |
| + int ret = 0; |
| + |
| + DLOG("current %u file %p(%d)\n", current->pid, file, file_count(file)); |
| + /* setup file->private_data to indicate its unmapped */ |
| + /* you can only open a pmem device one time */ |
| + if (file->private_data != NULL) |
| + return -1; |
| + data = kmalloc(sizeof(struct pmem_data), GFP_KERNEL); |
| + if (!data) { |
| + printk("pmem: unable to allocate memory for pmem metadata."); |
| + return -1; |
| + } |
| + data->flags = 0; |
| + data->index = -1; |
| + data->task = NULL; |
| + data->vma = NULL; |
| + data->pid = 0; |
| + data->master_file = NULL; |
| +#if PMEM_DEBUG |
| + data->ref = 0; |
| +#endif |
| + INIT_LIST_HEAD(&data->region_list); |
| + init_rwsem(&data->sem); |
| + |
| + file->private_data = data; |
| + INIT_LIST_HEAD(&data->list); |
| + |
| + down(&pmem[id].data_list_sem); |
| + list_add(&data->list, &pmem[id].data_list); |
| + up(&pmem[id].data_list_sem); |
| + return ret; |
| +} |
| + |
| +static unsigned long pmem_order(unsigned long len) |
| +{ |
| + int i; |
| + |
| + len = (len + PMEM_MIN_ALLOC - 1)/PMEM_MIN_ALLOC; |
| + len--; |
| + for (i = 0; i < sizeof(len)*8; i++) |
| + if (len >> i == 0) |
| + break; |
| + return i; |
| +} |
| + |
| +static int pmem_allocate(int id, unsigned long len) |
| +{ |
| + /* caller should hold the write lock on pmem_sem! */ |
| + /* return the corresponding pdata[] entry */ |
| + int curr = 0; |
| + int end = pmem[id].num_entries; |
| + int best_fit = -1; |
| + unsigned long order = pmem_order(len); |
| + |
| + if (pmem[id].no_allocator) { |
| + DLOG("no allocator"); |
| + if ((len > pmem[id].size) || pmem[id].allocated) |
| + return -1; |
| + pmem[id].allocated = 1; |
| + return len; |
| + } |
| + |
| + if (order > PMEM_MAX_ORDER) |
| + return -1; |
| + DLOG("order %lx\n", order); |
| + |
| + /* look through the bitmap: |
| + * if you find a free slot of the correct order use it |
| + * otherwise, use the best fit (smallest with size > order) slot |
| + */ |
| + while (curr < end) { |
| + if (PMEM_IS_FREE(id, curr)) { |
| + if (PMEM_ORDER(id, curr) == (unsigned char)order) { |
| + /* set the not free bit and clear others */ |
| + best_fit = curr; |
| + break; |
| + } |
| + if (PMEM_ORDER(id, curr) > (unsigned char)order && |
| + (best_fit < 0 || |
| + PMEM_ORDER(id, curr) < PMEM_ORDER(id, best_fit))) |
| + best_fit = curr; |
| + } |
| + curr = PMEM_NEXT_INDEX(id, curr); |
| + } |
| + |
| + /* if best_fit < 0, there are no suitable slots, |
| + * return an error |
| + */ |
| + if (best_fit < 0) { |
| + printk("pmem: no space left to allocate!\n"); |
| + return -1; |
| + } |
| + |
| + /* now partition the best fit: |
| + * split the slot into 2 buddies of order - 1 |
| + * repeat until the slot is of the correct order |
| + */ |
| + while (PMEM_ORDER(id, best_fit) > (unsigned char)order) { |
| + int buddy; |
| + PMEM_ORDER(id, best_fit) -= 1; |
| + buddy = PMEM_BUDDY_INDEX(id, best_fit); |
| + PMEM_ORDER(id, buddy) = PMEM_ORDER(id, best_fit); |
| + } |
| + pmem[id].bitmap[best_fit].allocated = 1; |
| + return best_fit; |
| +} |
| + |
| +static pgprot_t phys_mem_access_prot(struct file *file, pgprot_t vma_prot) |
| +{ |
| + int id = get_id(file); |
| +#ifdef pgprot_noncached |
| + if (pmem[id].cached == 0 || file->f_flags & O_SYNC) |
| + return pgprot_noncached(vma_prot); |
| +#endif |
| +#ifdef pgprot_ext_buffered |
| + else if (pmem[id].buffered) |
| + return pgprot_ext_buffered(vma_prot); |
| +#endif |
| + return vma_prot; |
| +} |
| + |
| +static unsigned long pmem_start_addr(int id, struct pmem_data *data) |
| +{ |
| + if (pmem[id].no_allocator) |
| + return PMEM_START_ADDR(id, 0); |
| + else |
| + return PMEM_START_ADDR(id, data->index); |
| + |
| +} |
| + |
| +static void *pmem_start_vaddr(int id, struct pmem_data *data) |
| +{ |
| + return pmem_start_addr(id, data) - pmem[id].base + pmem[id].vbase; |
| +} |
| + |
| +static unsigned long pmem_len(int id, struct pmem_data *data) |
| +{ |
| + if (pmem[id].no_allocator) |
| + return data->index; |
| + else |
| + return PMEM_LEN(id, data->index); |
| +} |
| + |
| +static int pmem_map_garbage(int id, struct vm_area_struct *vma, |
| + struct pmem_data *data, unsigned long offset, |
| + unsigned long len) |
| +{ |
| + int i, garbage_pages = len >> PAGE_SHIFT; |
| + |
| + vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP | VM_SHARED | VM_WRITE; |
| + for (i = 0; i < garbage_pages; i++) { |
| + if (vm_insert_pfn(vma, vma->vm_start + offset + (i * PAGE_SIZE), |
| + pmem[id].garbage_pfn)) |
| + return -EAGAIN; |
| + } |
| + return 0; |
| +} |
| + |
| +static int pmem_unmap_pfn_range(int id, struct vm_area_struct *vma, |
| + struct pmem_data *data, unsigned long offset, |
| + unsigned long len) |
| +{ |
| + int garbage_pages; |
| + DLOG("unmap offset %lx len %lx\n", offset, len); |
| + |
| + BUG_ON(!PMEM_IS_PAGE_ALIGNED(len)); |
| + |
| + garbage_pages = len >> PAGE_SHIFT; |
| + zap_page_range(vma, vma->vm_start + offset, len, NULL); |
| + pmem_map_garbage(id, vma, data, offset, len); |
| + return 0; |
| +} |
| + |
| +static int pmem_map_pfn_range(int id, struct vm_area_struct *vma, |
| + struct pmem_data *data, unsigned long offset, |
| + unsigned long len) |
| +{ |
| + DLOG("map offset %lx len %lx\n", offset, len); |
| + BUG_ON(!PMEM_IS_PAGE_ALIGNED(vma->vm_start)); |
| + BUG_ON(!PMEM_IS_PAGE_ALIGNED(vma->vm_end)); |
| + BUG_ON(!PMEM_IS_PAGE_ALIGNED(len)); |
| + BUG_ON(!PMEM_IS_PAGE_ALIGNED(offset)); |
| + |
| + if (io_remap_pfn_range(vma, vma->vm_start + offset, |
| + (pmem_start_addr(id, data) + offset) >> PAGE_SHIFT, |
| + len, vma->vm_page_prot)) { |
| + return -EAGAIN; |
| + } |
| + return 0; |
| +} |
| + |
| +static int pmem_remap_pfn_range(int id, struct vm_area_struct *vma, |
| + struct pmem_data *data, unsigned long offset, |
| + unsigned long len) |
| +{ |
| + /* hold the mm semp for the vma you are modifying when you call this */ |
| + BUG_ON(!vma); |
| + zap_page_range(vma, vma->vm_start + offset, len, NULL); |
| + return pmem_map_pfn_range(id, vma, data, offset, len); |
| +} |
| + |
| +static void pmem_vma_open(struct vm_area_struct *vma) |
| +{ |
| + struct file *file = vma->vm_file; |
| + struct pmem_data *data = file->private_data; |
| + int id = get_id(file); |
| + /* this should never be called as we don't support copying pmem |
| + * ranges via fork */ |
| + BUG_ON(!has_allocation(file)); |
| + down_write(&data->sem); |
| + /* remap the garbage pages, forkers don't get access to the data */ |
| + pmem_unmap_pfn_range(id, vma, data, 0, vma->vm_start - vma->vm_end); |
| + up_write(&data->sem); |
| +} |
| + |
| +static void pmem_vma_close(struct vm_area_struct *vma) |
| +{ |
| + struct file *file = vma->vm_file; |
| + struct pmem_data *data = file->private_data; |
| + |
| + DLOG("current %u ppid %u file %p count %d\n", current->pid, |
| + current->parent->pid, file, file_count(file)); |
| + if (unlikely(!is_pmem_file(file) || !has_allocation(file))) { |
| + printk(KERN_WARNING "pmem: something is very wrong, you are " |
| + "closing a vm backing an allocation that doesn't " |
| + "exist!\n"); |
| + return; |
| + } |
| + down_write(&data->sem); |
| + if (data->vma == vma) { |
| + data->vma = NULL; |
| + if ((data->flags & PMEM_FLAGS_CONNECTED) && |
| + (data->flags & PMEM_FLAGS_SUBMAP)) |
| + data->flags |= PMEM_FLAGS_UNSUBMAP; |
| + } |
| + /* the kernel is going to free this vma now anyway */ |
| + up_write(&data->sem); |
| +} |
| + |
| +static struct vm_operations_struct vm_ops = { |
| + .open = pmem_vma_open, |
| + .close = pmem_vma_close, |
| +}; |
| + |
| +static int pmem_mmap(struct file *file, struct vm_area_struct *vma) |
| +{ |
| + struct pmem_data *data; |
| + int index; |
| + unsigned long vma_size = vma->vm_end - vma->vm_start; |
| + int ret = 0, id = get_id(file); |
| + |
| + if (vma->vm_pgoff || !PMEM_IS_PAGE_ALIGNED(vma_size)) { |
| +#if PMEM_DEBUG |
| + printk(KERN_ERR "pmem: mmaps must be at offset zero, aligned" |
| + " and a multiple of pages_size.\n"); |
| +#endif |
| + return -EINVAL; |
| + } |
| + |
| + data = (struct pmem_data *)file->private_data; |
| + down_write(&data->sem); |
| + /* check this file isn't already mmaped, for submaps check this file |
| + * has never been mmaped */ |
| + if ((data->flags & PMEM_FLAGS_MASTERMAP) || |
| + (data->flags & PMEM_FLAGS_SUBMAP) || |
| + (data->flags & PMEM_FLAGS_UNSUBMAP)) { |
| +#if PMEM_DEBUG |
| + printk(KERN_ERR "pmem: you can only mmap a pmem file once, " |
| + "this file is already mmaped. %x\n", data->flags); |
| +#endif |
| + ret = -EINVAL; |
| + goto error; |
| + } |
| + /* if file->private_data == unalloced, alloc*/ |
| + if (data && data->index == -1) { |
| + down_write(&pmem[id].bitmap_sem); |
| + index = pmem_allocate(id, vma->vm_end - vma->vm_start); |
| + up_write(&pmem[id].bitmap_sem); |
| + data->index = index; |
| + } |
| + /* either no space was available or an error occured */ |
| + if (!has_allocation(file)) { |
| + ret = -EINVAL; |
| + printk("pmem: could not find allocation for map.\n"); |
| + goto error; |
| + } |
| + |
| + if (pmem_len(id, data) < vma_size) { |
| +#if PMEM_DEBUG |
| + printk(KERN_WARNING "pmem: mmap size [%lu] does not match" |
| + "size of backing region [%lu].\n", vma_size, |
| + pmem_len(id, data)); |
| +#endif |
| + ret = -EINVAL; |
| + goto error; |
| + } |
| + |
| + vma->vm_pgoff = pmem_start_addr(id, data) >> PAGE_SHIFT; |
| + vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_page_prot); |
| + |
| + if (data->flags & PMEM_FLAGS_CONNECTED) { |
| + struct pmem_region_node *region_node; |
| + struct list_head *elt; |
| + if (pmem_map_garbage(id, vma, data, 0, vma_size)) { |
| + printk("pmem: mmap failed in kernel!\n"); |
| + ret = -EAGAIN; |
| + goto error; |
| + } |
| + list_for_each(elt, &data->region_list) { |
| + region_node = list_entry(elt, struct pmem_region_node, |
| + list); |
| + DLOG("remapping file: %p %lx %lx\n", file, |
| + region_node->region.offset, |
| + region_node->region.len); |
| + if (pmem_remap_pfn_range(id, vma, data, |
| + region_node->region.offset, |
| + region_node->region.len)) { |
| + ret = -EAGAIN; |
| + goto error; |
| + } |
| + } |
| + data->flags |= PMEM_FLAGS_SUBMAP; |
| + get_task_struct(current->group_leader); |
| + data->task = current->group_leader; |
| + data->vma = vma; |
| +#if PMEM_DEBUG |
| + data->pid = current->pid; |
| +#endif |
| + DLOG("submmapped file %p vma %p pid %u\n", file, vma, |
| + current->pid); |
| + } else { |
| + if (pmem_map_pfn_range(id, vma, data, 0, vma_size)) { |
| + printk(KERN_INFO "pmem: mmap failed in kernel!\n"); |
| + ret = -EAGAIN; |
| + goto error; |
| + } |
| + data->flags |= PMEM_FLAGS_MASTERMAP; |
| + data->pid = current->pid; |
| + } |
| + vma->vm_ops = &vm_ops; |
| +error: |
| + up_write(&data->sem); |
| + return ret; |
| +} |
| + |
| +/* the following are the api for accessing pmem regions by other drivers |
| + * from inside the kernel */ |
| +int get_pmem_user_addr(struct file *file, unsigned long *start, |
| + unsigned long *len) |
| +{ |
| + struct pmem_data *data; |
| + if (!is_pmem_file(file) || !has_allocation(file)) { |
| +#if PMEM_DEBUG |
| + printk(KERN_INFO "pmem: requested pmem data from invalid" |
| + "file.\n"); |
| +#endif |
| + return -1; |
| + } |
| + data = (struct pmem_data *)file->private_data; |
| + down_read(&data->sem); |
| + if (data->vma) { |
| + *start = data->vma->vm_start; |
| + *len = data->vma->vm_end - data->vma->vm_start; |
| + } else { |
| + *start = 0; |
| + *len = 0; |
| + } |
| + up_read(&data->sem); |
| + return 0; |
| +} |
| + |
| +int get_pmem_addr(struct file *file, unsigned long *start, |
| + unsigned long *vstart, unsigned long *len) |
| +{ |
| + struct pmem_data *data; |
| + int id; |
| + |
| + if (!is_pmem_file(file) || !has_allocation(file)) { |
| + return -1; |
| + } |
| + |
| + data = (struct pmem_data *)file->private_data; |
| + if (data->index == -1) { |
| +#if PMEM_DEBUG |
| + printk(KERN_INFO "pmem: requested pmem data from file with no " |
| + "allocation.\n"); |
| + return -1; |
| +#endif |
| + } |
| + id = get_id(file); |
| + |
| + down_read(&data->sem); |
| + *start = pmem_start_addr(id, data); |
| + *len = pmem_len(id, data); |
| + *vstart = (unsigned long)pmem_start_vaddr(id, data); |
| + up_read(&data->sem); |
| +#if PMEM_DEBUG |
| + down_write(&data->sem); |
| + data->ref++; |
| + up_write(&data->sem); |
| +#endif |
| + return 0; |
| +} |
| + |
| +int get_pmem_file(int fd, unsigned long *start, unsigned long *vstart, |
| + unsigned long *len, struct file **filp) |
| +{ |
| + struct file *file; |
| + |
| + file = fget(fd); |
| + if (unlikely(file == NULL)) { |
| + printk(KERN_INFO "pmem: requested data from file descriptor " |
| + "that doesn't exist."); |
| + return -1; |
| + } |
| + |
| + if (get_pmem_addr(file, start, vstart, len)) |
| + goto end; |
| + |
| + if (filp) |
| + *filp = file; |
| + return 0; |
| +end: |
| + fput(file); |
| + return -1; |
| +} |
| + |
| +EXPORT_SYMBOL(get_pmem_file); |
| + |
| +void put_pmem_file(struct file *file) |
| +{ |
| + struct pmem_data *data; |
| + int id; |
| + |
| + if (!is_pmem_file(file)) |
| + return; |
| + id = get_id(file); |
| + data = (struct pmem_data *)file->private_data; |
| +#if PMEM_DEBUG |
| + down_write(&data->sem); |
| + if (data->ref == 0) { |
| + printk("pmem: pmem_put > pmem_get %s (pid %d)\n", |
| + pmem[id].dev.name, data->pid); |
| + BUG(); |
| + } |
| + data->ref--; |
| + up_write(&data->sem); |
| +#endif |
| + fput(file); |
| +} |
| + |
| +void flush_pmem_file(struct file *file, unsigned long offset, unsigned long len) |
| +{ |
| + struct pmem_data *data; |
| + int id; |
| + void *vaddr; |
| + struct pmem_region_node *region_node; |
| + struct list_head *elt; |
| + void *flush_start, *flush_end; |
| + |
| + if (!is_pmem_file(file) || !has_allocation(file)) { |
| + return; |
| + } |
| + |
| + id = get_id(file); |
| + data = (struct pmem_data *)file->private_data; |
| + if (!pmem[id].cached) |
| + return; |
| + |
| + down_read(&data->sem); |
| + vaddr = pmem_start_vaddr(id, data); |
| + /* if this isn't a submmapped file, flush the whole thing */ |
| + if (unlikely(!(data->flags & PMEM_FLAGS_CONNECTED))) { |
| +#if 0 |
| + dmac_flush_range(vaddr, vaddr + pmem_len(id, data)); |
| +#else |
| + dma_cache_wback_inv((u32)vaddr, pmem_len(id, data)); |
| +#endif |
| + goto end; |
| + } |
| + /* otherwise, flush the region of the file we are drawing */ |
| + list_for_each(elt, &data->region_list) { |
| + region_node = list_entry(elt, struct pmem_region_node, list); |
| + if ((offset >= region_node->region.offset) && |
| + ((offset + len) <= (region_node->region.offset + |
| + region_node->region.len))) { |
| + flush_start = vaddr + region_node->region.offset; |
| + flush_end = flush_start + region_node->region.len; |
| +#if 0 |
| + dmac_flush_range(flush_start, flush_end); |
| +#else |
| + dma_cache_wback_inv((u32)flush_start, region_node->region.len); |
| +#endif |
| + break; |
| + } |
| + } |
| +end: |
| + up_read(&data->sem); |
| +} |
| + |
| +static int pmem_connect(unsigned long connect, struct file *file) |
| +{ |
| + struct pmem_data *data = (struct pmem_data *)file->private_data; |
| + struct pmem_data *src_data; |
| + struct file *src_file; |
| + int ret = 0, put_needed; |
| + |
| + down_write(&data->sem); |
| + /* retrieve the src file and check it is a pmem file with an alloc */ |
| + src_file = fget_light(connect, &put_needed); |
| + DLOG("connect %p to %p\n", file, src_file); |
| + if (!src_file) { |
| + printk("pmem: src file not found!\n"); |
| + ret = -EINVAL; |
| + goto err_no_file; |
| + } |
| + if (unlikely(!is_pmem_file(src_file) || !has_allocation(src_file))) { |
| + printk(KERN_INFO "pmem: src file is not a pmem file or has no " |
| + "alloc!\n"); |
| + ret = -EINVAL; |
| + goto err_bad_file; |
| + } |
| + src_data = (struct pmem_data *)src_file->private_data; |
| + |
| + if (has_allocation(file) && (data->index != src_data->index)) { |
| + printk("pmem: file is already mapped but doesn't match this" |
| + " src_file!\n"); |
| + ret = -EINVAL; |
| + goto err_bad_file; |
| + } |
| + data->index = src_data->index; |
| + data->flags |= PMEM_FLAGS_CONNECTED; |
| + data->master_fd = connect; |
| + data->master_file = src_file; |
| + |
| +err_bad_file: |
| + fput_light(src_file, put_needed); |
| +err_no_file: |
| + up_write(&data->sem); |
| + return ret; |
| +} |
| + |
| +static void pmem_unlock_data_and_mm(struct pmem_data *data, |
| + struct mm_struct *mm) |
| +{ |
| + up_write(&data->sem); |
| + if (mm != NULL) { |
| + up_write(&mm->mmap_sem); |
| + mmput(mm); |
| + } |
| +} |
| + |
| +static int pmem_lock_data_and_mm(struct file *file, struct pmem_data *data, |
| + struct mm_struct **locked_mm) |
| +{ |
| + int ret = 0; |
| + struct mm_struct *mm = NULL; |
| + *locked_mm = NULL; |
| +lock_mm: |
| + down_read(&data->sem); |
| + if (PMEM_IS_SUBMAP(data)) { |
| + mm = get_task_mm(data->task); |
| + if (!mm) { |
| +#if PMEM_DEBUG |
| + printk("pmem: can't remap task is gone!\n"); |
| +#endif |
| + up_read(&data->sem); |
| + return -1; |
| + } |
| + } |
| + up_read(&data->sem); |
| + |
| + if (mm) |
| + down_write(&mm->mmap_sem); |
| + |
| + down_write(&data->sem); |
| + /* check that the file didn't get mmaped before we could take the |
| + * data sem, this should be safe b/c you can only submap each file |
| + * once */ |
| + if (PMEM_IS_SUBMAP(data) && !mm) { |
| + pmem_unlock_data_and_mm(data, mm); |
| + up_write(&data->sem); |
| + goto lock_mm; |
| + } |
| + /* now check that vma.mm is still there, it could have been |
| + * deleted by vma_close before we could get the data->sem */ |
| + if ((data->flags & PMEM_FLAGS_UNSUBMAP) && (mm != NULL)) { |
| + /* might as well release this */ |
| + if (data->flags & PMEM_FLAGS_SUBMAP) { |
| + put_task_struct(data->task); |
| + data->task = NULL; |
| + /* lower the submap flag to show the mm is gone */ |
| + data->flags &= ~(PMEM_FLAGS_SUBMAP); |
| + } |
| + pmem_unlock_data_and_mm(data, mm); |
| + return -1; |
| + } |
| + *locked_mm = mm; |
| + return ret; |
| +} |
| + |
| +int pmem_remap(struct pmem_region *region, struct file *file, |
| + unsigned operation) |
| +{ |
| + int ret; |
| + struct pmem_region_node *region_node; |
| + struct mm_struct *mm = NULL; |
| + struct list_head *elt, *elt2; |
| + int id = get_id(file); |
| + struct pmem_data *data = (struct pmem_data *)file->private_data; |
| + |
| + /* pmem region must be aligned on a page boundry */ |
| + if (unlikely(!PMEM_IS_PAGE_ALIGNED(region->offset) || |
| + !PMEM_IS_PAGE_ALIGNED(region->len))) { |
| +#if PMEM_DEBUG |
| + printk("pmem: request for unaligned pmem suballocation " |
| + "%lx %lx\n", region->offset, region->len); |
| +#endif |
| + return -EINVAL; |
| + } |
| + |
| + /* if userspace requests a region of len 0, there's nothing to do */ |
| + if (region->len == 0) |
| + return 0; |
| + |
| + /* lock the mm and data */ |
| + ret = pmem_lock_data_and_mm(file, data, &mm); |
| + if (ret) |
| + return 0; |
| + |
| + /* only the owner of the master file can remap the client fds |
| + * that back in it */ |
| + if (!is_master_owner(file)) { |
| +#if PMEM_DEBUG |
| + printk("pmem: remap requested from non-master process\n"); |
| +#endif |
| + ret = -EINVAL; |
| + goto err; |
| + } |
| + |
| + /* check that the requested range is within the src allocation */ |
| + if (unlikely((region->offset > pmem_len(id, data)) || |
| + (region->len > pmem_len(id, data)) || |
| + (region->offset + region->len > pmem_len(id, data)))) { |
| +#if PMEM_DEBUG |
| + printk(KERN_INFO "pmem: suballoc doesn't fit in src_file!\n"); |
| +#endif |
| + ret = -EINVAL; |
| + goto err; |
| + } |
| + |
| + if (operation == PMEM_MAP) { |
| + region_node = kmalloc(sizeof(struct pmem_region_node), |
| + GFP_KERNEL); |
| + if (!region_node) { |
| + ret = -ENOMEM; |
| +#if PMEM_DEBUG |
| + printk(KERN_INFO "No space to allocate metadata!"); |
| +#endif |
| + goto err; |
| + } |
| + region_node->region = *region; |
| + list_add(®ion_node->list, &data->region_list); |
| + } else if (operation == PMEM_UNMAP) { |
| + int found = 0; |
| + list_for_each_safe(elt, elt2, &data->region_list) { |
| + region_node = list_entry(elt, struct pmem_region_node, |
| + list); |
| + if (region->len == 0 || |
| + (region_node->region.offset == region->offset && |
| + region_node->region.len == region->len)) { |
| + list_del(elt); |
| + kfree(region_node); |
| + found = 1; |
| + } |
| + } |
| + if (!found) { |
| +#if PMEM_DEBUG |
| + printk("pmem: Unmap region does not map any mapped " |
| + "region!"); |
| +#endif |
| + ret = -EINVAL; |
| + goto err; |
| + } |
| + } |
| + |
| + if (data->vma && PMEM_IS_SUBMAP(data)) { |
| + if (operation == PMEM_MAP) |
| + ret = pmem_remap_pfn_range(id, data->vma, data, |
| + region->offset, region->len); |
| + else if (operation == PMEM_UNMAP) |
| + ret = pmem_unmap_pfn_range(id, data->vma, data, |
| + region->offset, region->len); |
| + } |
| + |
| +err: |
| + pmem_unlock_data_and_mm(data, mm); |
| + return ret; |
| +} |
| + |
| +static void pmem_revoke(struct file *file, struct pmem_data *data) |
| +{ |
| + struct pmem_region_node *region_node; |
| + struct list_head *elt, *elt2; |
| + struct mm_struct *mm = NULL; |
| + int id = get_id(file); |
| + int ret = 0; |
| + |
| + data->master_file = NULL; |
| + ret = pmem_lock_data_and_mm(file, data, &mm); |
| + /* if lock_data_and_mm fails either the task that mapped the fd, or |
| + * the vma that mapped it have already gone away, nothing more |
| + * needs to be done */ |
| + if (ret) |
| + return; |
| + /* unmap everything */ |
| + /* delete the regions and region list nothing is mapped any more */ |
| + if (data->vma) |
| + list_for_each_safe(elt, elt2, &data->region_list) { |
| + region_node = list_entry(elt, struct pmem_region_node, |
| + list); |
| + pmem_unmap_pfn_range(id, data->vma, data, |
| + region_node->region.offset, |
| + region_node->region.len); |
| + list_del(elt); |
| + kfree(region_node); |
| + } |
| + /* delete the master file */ |
| + pmem_unlock_data_and_mm(data, mm); |
| +} |
| + |
| +static void pmem_get_size(struct pmem_region *region, struct file *file) |
| +{ |
| + struct pmem_data *data = (struct pmem_data *)file->private_data; |
| + int id = get_id(file); |
| + |
| + if (!has_allocation(file)) { |
| + region->offset = 0; |
| + region->len = 0; |
| + return; |
| + } else { |
| + region->offset = pmem_start_addr(id, data); |
| + region->len = pmem_len(id, data); |
| + } |
| + DLOG("offset %lx len %lx\n", region->offset, region->len); |
| +} |
| + |
| + |
| +static long pmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg) |
| +{ |
| + struct pmem_data *data; |
| + int id = get_id(file); |
| + |
| + switch (cmd) { |
| + case PMEM_GET_PHYS: |
| + { |
| + struct pmem_region region; |
| + DLOG("get_phys\n"); |
| + if (!has_allocation(file)) { |
| + region.offset = 0; |
| + region.len = 0; |
| + } else { |
| + data = (struct pmem_data *)file->private_data; |
| + region.offset = pmem_start_addr(id, data); |
| + region.len = pmem_len(id, data); |
| + } |
| + /* printk(KERN_INFO "pmem: request for physical address of pmem region " |
| + "from process %d.\n", current->pid); */ |
| + if (copy_to_user((void __user *)arg, ®ion, |
| + sizeof(struct pmem_region))) |
| + return -EFAULT; |
| + break; |
| + } |
| + case PMEM_MAP: |
| + { |
| + struct pmem_region region; |
| + if (copy_from_user(®ion, (void __user *)arg, |
| + sizeof(struct pmem_region))) |
| + return -EFAULT; |
| + data = (struct pmem_data *)file->private_data; |
| + return pmem_remap(®ion, file, PMEM_MAP); |
| + } |
| + break; |
| + case PMEM_UNMAP: |
| + { |
| + struct pmem_region region; |
| + if (copy_from_user(®ion, (void __user *)arg, |
| + sizeof(struct pmem_region))) |
| + return -EFAULT; |
| + data = (struct pmem_data *)file->private_data; |
| + return pmem_remap(®ion, file, PMEM_UNMAP); |
| + break; |
| + } |
| + case PMEM_GET_SIZE: |
| + { |
| + struct pmem_region region; |
| + DLOG("get_size\n"); |
| + pmem_get_size(®ion, file); |
| + if (copy_to_user((void __user *)arg, ®ion, |
| + sizeof(struct pmem_region))) |
| + return -EFAULT; |
| + break; |
| + } |
| + case PMEM_GET_TOTAL_SIZE: |
| + { |
| + struct pmem_region region; |
| + DLOG("get total size\n"); |
| + region.offset = 0; |
| + get_id(file); |
| + region.len = pmem[id].size; |
| + if (copy_to_user((void __user *)arg, ®ion, |
| + sizeof(struct pmem_region))) |
| + return -EFAULT; |
| + break; |
| + } |
| + case PMEM_ALLOCATE: |
| + { |
| + if (has_allocation(file)) |
| + return -EINVAL; |
| + data = (struct pmem_data *)file->private_data; |
| + data->index = pmem_allocate(id, arg); |
| + break; |
| + } |
| + case PMEM_CONNECT: |
| + DLOG("connect\n"); |
| + return pmem_connect(arg, file); |
| + break; |
| + default: |
| + if (pmem[id].ioctl) |
| + return pmem[id].ioctl(file, cmd, arg); |
| + return -EINVAL; |
| + } |
| + return 0; |
| +} |
| + |
| +#if PMEM_DEBUG |
| +static ssize_t debug_open(struct inode *inode, struct file *file) |
| +{ |
| + file->private_data = inode->i_private; |
| + return 0; |
| +} |
| + |
| +static ssize_t debug_read(struct file *file, char __user *buf, size_t count, |
| + loff_t *ppos) |
| +{ |
| + struct list_head *elt, *elt2; |
| + struct pmem_data *data; |
| + struct pmem_region_node *region_node; |
| + int id = (int)file->private_data; |
| + const int debug_bufmax = 4096; |
| + static char buffer[4096]; |
| + int n = 0; |
| + |
| + DLOG("debug open\n"); |
| + n = scnprintf(buffer, debug_bufmax, |
| + "pid #: mapped regions (offset, len) (offset,len)...\n"); |
| + |
| + down(&pmem[id].data_list_sem); |
| + list_for_each(elt, &pmem[id].data_list) { |
| + data = list_entry(elt, struct pmem_data, list); |
| + down_read(&data->sem); |
| + n += scnprintf(buffer + n, debug_bufmax - n, "pid %u:", |
| + data->pid); |
| + list_for_each(elt2, &data->region_list) { |
| + region_node = list_entry(elt2, struct pmem_region_node, |
| + list); |
| + n += scnprintf(buffer + n, debug_bufmax - n, |
| + "(%lx,%lx) ", |
| + region_node->region.offset, |
| + region_node->region.len); |
| + } |
| + n += scnprintf(buffer + n, debug_bufmax - n, "\n"); |
| + up_read(&data->sem); |
| + } |
| + up(&pmem[id].data_list_sem); |
| + |
| + n++; |
| + buffer[n] = 0; |
| + return simple_read_from_buffer(buf, count, ppos, buffer, n); |
| +} |
| + |
| +static struct file_operations debug_fops = { |
| + .read = debug_read, |
| + .open = debug_open, |
| +}; |
| +#endif |
| + |
| +#if 0 |
| +static struct miscdevice pmem_dev = { |
| + .name = "pmem", |
| + .fops = &pmem_fops, |
| +}; |
| +#endif |
| + |
| +int pmem_setup(struct android_pmem_platform_data *pdata, |
| + long (*ioctl)(struct file *, unsigned int, unsigned long), |
| + int (*release)(struct inode *, struct file *)) |
| +{ |
| + int err = 0; |
| + int i, index = 0; |
| + int id = id_count; |
| + id_count++; |
| + |
| + pmem[id].no_allocator = pdata->no_allocator; |
| + pmem[id].cached = pdata->cached; |
| + pmem[id].buffered = pdata->buffered; |
| + pmem[id].base = pdata->start; |
| + pmem[id].size = pdata->size; |
| + pmem[id].ioctl = ioctl; |
| + pmem[id].release = release; |
| + init_rwsem(&pmem[id].bitmap_sem); |
| + sema_init(&pmem[id].data_list_sem, 0); |
| + INIT_LIST_HEAD(&pmem[id].data_list); |
| + pmem[id].dev.name = pdata->name; |
| + pmem[id].dev.minor = id; |
| + pmem[id].dev.fops = &pmem_fops; |
| + printk(KERN_INFO "%s: %d init\n", pdata->name, pdata->cached); |
| + |
| + err = misc_register(&pmem[id].dev); |
| + if (err) { |
| + printk(KERN_ALERT "Unable to register pmem driver!\n"); |
| + goto err_cant_register_device; |
| + } |
| + pmem[id].num_entries = pmem[id].size / PMEM_MIN_ALLOC; |
| + |
| + pmem[id].bitmap = kmalloc(pmem[id].num_entries * |
| + sizeof(struct pmem_bits), GFP_KERNEL); |
| + if (!pmem[id].bitmap) |
| + goto err_no_mem_for_metadata; |
| + |
| + memset(pmem[id].bitmap, 0, sizeof(struct pmem_bits) * |
| + pmem[id].num_entries); |
| + |
| + for (i = sizeof(pmem[id].num_entries) * 8 - 1; i >= 0; i--) { |
| + if ((pmem[id].num_entries) & 1<<i) { |
| + PMEM_ORDER(id, index) = i; |
| + index = PMEM_NEXT_INDEX(id, index); |
| + } |
| + } |
| + |
| + if (pmem[id].cached) |
| + pmem[id].vbase = ioremap_cached(pmem[id].base, |
| + pmem[id].size); |
| +#ifdef ioremap_ext_buffered |
| + else if (pmem[id].buffered) |
| + pmem[id].vbase = ioremap_ext_buffered(pmem[id].base, |
| + pmem[id].size); |
| +#endif |
| + else |
| + pmem[id].vbase = ioremap(pmem[id].base, pmem[id].size); |
| + |
| + if (pmem[id].vbase == 0) |
| + goto error_cant_remap; |
| + |
| + pmem[id].garbage_pfn = page_to_pfn(alloc_page(GFP_KERNEL)); |
| + if (pmem[id].no_allocator) |
| + pmem[id].allocated = 0; |
| + |
| +#if PMEM_DEBUG |
| + debugfs_create_file(pdata->name, S_IFREG | S_IRUGO, NULL, (void *)id, |
| + &debug_fops); |
| +#endif |
| + return 0; |
| +error_cant_remap: |
| + kfree(pmem[id].bitmap); |
| +err_no_mem_for_metadata: |
| + misc_deregister(&pmem[id].dev); |
| +err_cant_register_device: |
| + return -1; |
| +} |
| + |
| +static int pmem_probe(struct platform_device *pdev) |
| +{ |
| + struct android_pmem_platform_data *pdata; |
| + |
| + if (!pdev || !pdev->dev.platform_data) { |
| + printk(KERN_ALERT "Unable to probe pmem!\n"); |
| + return -1; |
| + } |
| + pdata = pdev->dev.platform_data; |
| + return pmem_setup(pdata, NULL, NULL); |
| +} |
| + |
| + |
| +static int pmem_remove(struct platform_device *pdev) |
| +{ |
| + int id = pdev->id; |
| + __free_page(pfn_to_page(pmem[id].garbage_pfn)); |
| + misc_deregister(&pmem[id].dev); |
| + return 0; |
| +} |
| + |
| +static struct platform_driver pmem_driver = { |
| + .probe = pmem_probe, |
| + .remove = pmem_remove, |
| + .driver = { .name = "android_pmem" } |
| +}; |
| + |
| + |
| +static int __init pmem_init(void) |
| +{ |
| + return platform_driver_register(&pmem_driver); |
| +} |
| + |
| +static void __exit pmem_exit(void) |
| +{ |
| + platform_driver_unregister(&pmem_driver); |
| +} |
| + |
| +module_init(pmem_init); |
| +module_exit(pmem_exit); |
| + |
| diff --git a/stblinux-2.6.37/drivers/misc/pmem_rua.c b/stblinux-2.6.37/drivers/misc/pmem_rua.c |
| new file mode 100644 |
| index 0000000..4078910 |
| --- /dev/null |
| +++ b/stblinux-2.6.37/drivers/misc/pmem_rua.c |
| @@ -0,0 +1,85 @@ |
| +/* drivers/android/pmem.c |
| + * |
| + * Copyright (C) 2007 Google, Inc. |
| + * |
| + * This software is licensed under the terms of the GNU General Public |
| + * License version 2, as published by the Free Software Foundation, and |
| + * may be copied, distributed, and modified under those terms. |
| + * |
| + * This program is distributed in the hope that it will be useful, |
| + * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| + * GNU General Public License for more details. |
| + * |
| + */ |
| + |
| +#include <linux/miscdevice.h> |
| +#include <linux/platform_device.h> |
| +#include <linux/fs.h> |
| +#include <linux/file.h> |
| +#include <linux/mm.h> |
| +#include <linux/list.h> |
| +#include <linux/debugfs.h> |
| +#include <linux/android_pmem.h> |
| +#include <linux/mempolicy.h> |
| +#include <linux/sched.h> |
| +#include <asm/io.h> |
| +#include <asm/uaccess.h> |
| +#include <asm/cacheflush.h> |
| + |
| +MODULE_DESCRIPTION("SMP86xx RUA PMEM Device"); |
| +MODULE_AUTHOR("Benoit Goby <benoit_goby@sdesigns.com>"); |
| +MODULE_LICENSE("GPL"); |
| + |
| +static unsigned int address = 0; |
| +MODULE_PARM_DESC(address, "Buffer Address"); |
| +module_param(address, int, 0); |
| + |
| +static unsigned int size = 0; |
| +MODULE_PARM_DESC(size, "Buffer Size"); |
| +module_param(size, int, 0); |
| + |
| +static struct android_pmem_platform_data android_pmem_pdata = { |
| + .name = "pmem", |
| + .start = 0, |
| + .size = 0, |
| + .no_allocator = 1, |
| + .cached = 0, |
| +}; |
| + |
| +static struct platform_device android_pmem_device = { |
| + .name = "android_pmem", |
| + .id = 0, |
| + .dev = { .platform_data = &android_pmem_pdata }, |
| +}; |
| + |
| +static struct platform_device *devices[] __initdata = { |
| + &android_pmem_device, |
| +}; |
| + |
| +static int __init pmem_rua_init(void) |
| +{ |
| + if (!size) { |
| + printk(KERN_INFO "size can't be null\n"); |
| + } |
| + |
| + if (!address) { |
| + printk(KERN_INFO "address can't be null\n"); |
| + } |
| + |
| + android_pmem_pdata.start = address & (1-PAGE_SIZE); |
| + android_pmem_pdata.size = size - PAGE_SIZE; |
| + |
| + platform_add_devices(devices, ARRAY_SIZE(devices)); |
| + printk(KERN_INFO "pmem_rua_init start %08lx size %08lx\n", android_pmem_pdata.start, android_pmem_pdata.size); |
| + return 0; |
| +} |
| + |
| +static void __exit pmem_rua_exit(void) |
| +{ |
| + printk(KERN_INFO "pmem_rua_exit\n"); |
| +} |
| + |
| +module_init(pmem_rua_init); |
| +module_exit(pmem_rua_exit); |
| + |
| diff --git a/stblinux-2.6.37/drivers/misc/uid_stat.c b/stblinux-2.6.37/drivers/misc/uid_stat.c |
| new file mode 100644 |
| index 0000000..43a548b |
| --- /dev/null |
| +++ b/stblinux-2.6.37/drivers/misc/uid_stat.c |
| @@ -0,0 +1,153 @@ |
| +/* drivers/misc/uid_stat.c |
| + * |
| + * Copyright (C) 2008 - 2009 Google, Inc. |
| + * |
| + * This software is licensed under the terms of the GNU General Public |
| + * License version 2, as published by the Free Software Foundation, and |
| + * may be copied, distributed, and modified under those terms. |
| + * |
| + * This program is distributed in the hope that it will be useful, |
| + * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| + * GNU General Public License for more details. |
| + * |
| + */ |
| + |
| +#include <asm/atomic.h> |
| + |
| +#include <linux/err.h> |
| +#include <linux/init.h> |
| +#include <linux/kernel.h> |
| +#include <linux/list.h> |
| +#include <linux/proc_fs.h> |
| +#include <linux/slab.h> |
| +#include <linux/spinlock.h> |
| +#include <linux/stat.h> |
| +#include <linux/uid_stat.h> |
| + |
| +static DEFINE_SPINLOCK(uid_lock); |
| +static LIST_HEAD(uid_list); |
| +static struct proc_dir_entry *parent; |
| + |
| +struct uid_stat { |
| + struct list_head link; |
| + uid_t uid; |
| + atomic_t tcp_rcv; |
| + atomic_t tcp_snd; |
| +}; |
| + |
| +static struct uid_stat *find_uid_stat(uid_t uid) { |
| + unsigned long flags; |
| + struct uid_stat *entry; |
| + |
| + spin_lock_irqsave(&uid_lock, flags); |
| + list_for_each_entry(entry, &uid_list, link) { |
| + if (entry->uid == uid) { |
| + spin_unlock_irqrestore(&uid_lock, flags); |
| + return entry; |
| + } |
| + } |
| + spin_unlock_irqrestore(&uid_lock, flags); |
| + return NULL; |
| +} |
| + |
| +static int tcp_snd_read_proc(char *page, char **start, off_t off, |
| + int count, int *eof, void *data) |
| +{ |
| + int len; |
| + unsigned int bytes; |
| + char *p = page; |
| + struct uid_stat *uid_entry = (struct uid_stat *) data; |
| + if (!data) |
| + return 0; |
| + |
| + bytes = (unsigned int) (atomic_read(&uid_entry->tcp_snd) + INT_MIN); |
| + p += sprintf(p, "%u\n", bytes); |
| + len = (p - page) - off; |
| + *eof = (len <= count) ? 1 : 0; |
| + *start = page + off; |
| + return len; |
| +} |
| + |
| +static int tcp_rcv_read_proc(char *page, char **start, off_t off, |
| + int count, int *eof, void *data) |
| +{ |
| + int len; |
| + unsigned int bytes; |
| + char *p = page; |
| + struct uid_stat *uid_entry = (struct uid_stat *) data; |
| + if (!data) |
| + return 0; |
| + |
| + bytes = (unsigned int) (atomic_read(&uid_entry->tcp_rcv) + INT_MIN); |
| + p += sprintf(p, "%u\n", bytes); |
| + len = (p - page) - off; |
| + *eof = (len <= count) ? 1 : 0; |
| + *start = page + off; |
| + return len; |
| +} |
| + |
| +/* Create a new entry for tracking the specified uid. */ |
| +static struct uid_stat *create_stat(uid_t uid) { |
| + unsigned long flags; |
| + char uid_s[32]; |
| + struct uid_stat *new_uid; |
| + struct proc_dir_entry *entry; |
| + |
| + /* Create the uid stat struct and append it to the list. */ |
| + if ((new_uid = kmalloc(sizeof(struct uid_stat), GFP_KERNEL)) == NULL) |
| + return NULL; |
| + |
| + new_uid->uid = uid; |
| + /* Counters start at INT_MIN, so we can track 4GB of network traffic. */ |
| + atomic_set(&new_uid->tcp_rcv, INT_MIN); |
| + atomic_set(&new_uid->tcp_snd, INT_MIN); |
| + |
| + spin_lock_irqsave(&uid_lock, flags); |
| + list_add_tail(&new_uid->link, &uid_list); |
| + spin_unlock_irqrestore(&uid_lock, flags); |
| + |
| + sprintf(uid_s, "%d", uid); |
| + entry = proc_mkdir(uid_s, parent); |
| + |
| + /* Keep reference to uid_stat so we know what uid to read stats from. */ |
| + create_proc_read_entry("tcp_snd", S_IRUGO, entry , tcp_snd_read_proc, |
| + (void *) new_uid); |
| + |
| + create_proc_read_entry("tcp_rcv", S_IRUGO, entry, tcp_rcv_read_proc, |
| + (void *) new_uid); |
| + |
| + return new_uid; |
| +} |
| + |
| +int update_tcp_snd(uid_t uid, int size) { |
| + struct uid_stat *entry; |
| + if ((entry = find_uid_stat(uid)) == NULL && |
| + ((entry = create_stat(uid)) == NULL)) { |
| + return -1; |
| + } |
| + atomic_add(size, &entry->tcp_snd); |
| + return 0; |
| +} |
| + |
| +int update_tcp_rcv(uid_t uid, int size) { |
| + struct uid_stat *entry; |
| + if ((entry = find_uid_stat(uid)) == NULL && |
| + ((entry = create_stat(uid)) == NULL)) { |
| + return -1; |
| + } |
| + atomic_add(size, &entry->tcp_rcv); |
| + return 0; |
| +} |
| + |
| +static int __init uid_stat_init(void) |
| +{ |
| + parent = proc_mkdir("uid_stat", NULL); |
| + if (!parent) { |
| + pr_err("uid_stat: failed to create proc entry\n"); |
| + return -1; |
| + } |
| + return 0; |
| +} |
| + |
| +__initcall(uid_stat_init); |
| diff --git a/stblinux-2.6.37/drivers/misc/wl127x-rfkill.c b/stblinux-2.6.37/drivers/misc/wl127x-rfkill.c |
| new file mode 100644 |
| index 0000000..f5b9515 |
| --- /dev/null |
| +++ b/stblinux-2.6.37/drivers/misc/wl127x-rfkill.c |
| @@ -0,0 +1,121 @@ |
| +/* |
| + * Bluetooth TI wl127x rfkill power control via GPIO |
| + * |
| + * Copyright (C) 2009 Motorola, Inc. |
| + * Copyright (C) 2008 Texas Instruments |
| + * Initial code: Pavan Savoy <pavan.savoy@gmail.com> (wl127x_power.c) |
| + * |
| + * This program is free software; you can redistribute it and/or modify |
| + * it under the terms of the GNU General Public License as published by |
| + * the Free Software Foundation; either version 2 of the License, or |
| + * (at your option) any later version. |
| + * |
| + * This program is distributed in the hope that it will be useful, |
| + * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| + * GNU General Public License for more details. |
| + * |
| + * You should have received a copy of the GNU General Public License |
| + * along with this program; if not, write to the Free Software |
| + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
| + * |
| + */ |
| + |
| +#include <linux/module.h> |
| +#include <linux/init.h> |
| +#include <linux/gpio.h> |
| +#include <linux/rfkill.h> |
| +#include <linux/platform_device.h> |
| +#include <linux/wl127x-rfkill.h> |
| + |
| +static int wl127x_rfkill_set_power(void *data, enum rfkill_state state) |
| +{ |
| + int nshutdown_gpio = (int) data; |
| + |
| + switch (state) { |
| + case RFKILL_STATE_UNBLOCKED: |
| + gpio_set_value(nshutdown_gpio, 1); |
| + break; |
| + case RFKILL_STATE_SOFT_BLOCKED: |
| + gpio_set_value(nshutdown_gpio, 0); |
| + break; |
| + default: |
| + printk(KERN_ERR "invalid bluetooth rfkill state %d\n", state); |
| + } |
| + return 0; |
| +} |
| + |
| +static int wl127x_rfkill_probe(struct platform_device *pdev) |
| +{ |
| + int rc = 0; |
| + struct wl127x_rfkill_platform_data *pdata = pdev->dev.platform_data; |
| + enum rfkill_state default_state = RFKILL_STATE_SOFT_BLOCKED; /* off */ |
| + |
| + rc = gpio_request(pdata->nshutdown_gpio, "wl127x_nshutdown_gpio"); |
| + if (unlikely(rc)) |
| + return rc; |
| + |
| + rc = gpio_direction_output(pdata->nshutdown_gpio, 0); |
| + if (unlikely(rc)) |
| + return rc; |
| + |
| + rfkill_set_default(RFKILL_TYPE_BLUETOOTH, default_state); |
| + wl127x_rfkill_set_power(NULL, default_state); |
| + |
| + pdata->rfkill = rfkill_allocate(&pdev->dev, RFKILL_TYPE_BLUETOOTH); |
| + if (unlikely(!pdata->rfkill)) |
| + return -ENOMEM; |
| + |
| + pdata->rfkill->name = "wl127x"; |
| + pdata->rfkill->state = default_state; |
| + /* userspace cannot take exclusive control */ |
| + pdata->rfkill->user_claim_unsupported = 1; |
| + pdata->rfkill->user_claim = 0; |
| + pdata->rfkill->data = (void *) pdata->nshutdown_gpio; |
| + pdata->rfkill->toggle_radio = wl127x_rfkill_set_power; |
| + |
| + rc = rfkill_register(pdata->rfkill); |
| + |
| + if (unlikely(rc)) |
| + rfkill_free(pdata->rfkill); |
| + |
| + return 0; |
| +} |
| + |
| +static int wl127x_rfkill_remove(struct platform_device *pdev) |
| +{ |
| + struct wl127x_rfkill_platform_data *pdata = pdev->dev.platform_data; |
| + |
| + rfkill_unregister(pdata->rfkill); |
| + rfkill_free(pdata->rfkill); |
| + gpio_free(pdata->nshutdown_gpio); |
| + |
| + return 0; |
| +} |
| + |
| +static struct platform_driver wl127x_rfkill_platform_driver = { |
| + .probe = wl127x_rfkill_probe, |
| + .remove = wl127x_rfkill_remove, |
| + .driver = { |
| + .name = "wl127x-rfkill", |
| + .owner = THIS_MODULE, |
| + }, |
| +}; |
| + |
| +static int __init wl127x_rfkill_init(void) |
| +{ |
| + return platform_driver_register(&wl127x_rfkill_platform_driver); |
| +} |
| + |
| +static void __exit wl127x_rfkill_exit(void) |
| +{ |
| + platform_driver_unregister(&wl127x_rfkill_platform_driver); |
| +} |
| + |
| +module_init(wl127x_rfkill_init); |
| +module_exit(wl127x_rfkill_exit); |
| + |
| +MODULE_ALIAS("platform:wl127x"); |
| +MODULE_DESCRIPTION("wl127x-rfkill"); |
| +MODULE_AUTHOR("Motorola"); |
| +MODULE_LICENSE("GPL"); |
| diff --git a/stblinux-2.6.37/drivers/rtc/alarm.c b/stblinux-2.6.37/drivers/rtc/alarm.c |
| new file mode 100644 |
| index 0000000..9f6134b |
| --- /dev/null |
| +++ b/stblinux-2.6.37/drivers/rtc/alarm.c |
| @@ -0,0 +1,567 @@ |
| +/* drivers/rtc/alarm.c |
| + * |
| + * Copyright (C) 2007 Google, Inc. |
| + * |
| + * This software is licensed under the terms of the GNU General Public |
| + * License version 2, as published by the Free Software Foundation, and |
| + * may be copied, distributed, and modified under those terms. |
| + * |
| + * This program is distributed in the hope that it will be useful, |
| + * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| + * GNU General Public License for more details. |
| + * |
| + */ |
| + |
| +#include <asm/mach/time.h> |
| +#include <linux/android_alarm.h> |
| +#include <linux/device.h> |
| +#include <linux/miscdevice.h> |
| +#include <linux/platform_device.h> |
| +#include <linux/rtc.h> |
| +#include <linux/spinlock.h> |
| +#include <linux/sysdev.h> |
| +#include <linux/wakelock.h> |
| + |
| +#define ANDROID_ALARM_PRINT_ERRORS (1U << 0) |
| +#define ANDROID_ALARM_PRINT_INIT_STATUS (1U << 1) |
| +#define ANDROID_ALARM_PRINT_INFO (1U << 2) |
| +#define ANDROID_ALARM_PRINT_IO (1U << 3) |
| +#define ANDROID_ALARM_PRINT_INT (1U << 4) |
| +#define ANDROID_ALARM_PRINT_FLOW (1U << 5) |
| + |
| +#if 0 |
| +#define ANDROID_ALARM_DPRINTF_MASK (~0) |
| +#define ANDROID_ALARM_DPRINTF(debug_level_mask, args...) \ |
| + do { \ |
| + if (ANDROID_ALARM_DPRINTF_MASK & debug_level_mask) { \ |
| + printk(args); \ |
| + } \ |
| + } while (0) |
| +#else |
| +#define ANDROID_ALARM_DPRINTF(args...) |
| +#endif |
| + |
| +#define ANDROID_ALARM_WAKEUP_MASK ( \ |
| + ANDROID_ALARM_RTC_WAKEUP_MASK | \ |
| + ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP_MASK) |
| + |
| +/* support old usespace code */ |
| +#define ANDROID_ALARM_SET_OLD _IOW('a', 2, time_t) /* set alarm */ |
| +#define ANDROID_ALARM_SET_AND_WAIT_OLD _IOW('a', 3, time_t) |
| + |
| +static struct rtc_device *alarm_rtc_dev; |
| +static int alarm_opened; |
| +static DEFINE_SPINLOCK(alarm_slock); |
| +static DEFINE_MUTEX(alarm_setrtc_mutex); |
| +static struct wake_lock alarm_wake_lock; |
| +static struct wake_lock alarm_rtc_wake_lock; |
| +static DECLARE_WAIT_QUEUE_HEAD(alarm_wait_queue); |
| +static uint32_t alarm_pending; |
| +static uint32_t alarm_enabled; |
| +static uint32_t wait_pending; |
| +static struct platform_device *alarm_platform_dev; |
| +static struct hrtimer alarm_timer[ANDROID_ALARM_TYPE_COUNT]; |
| +static struct timespec alarm_time[ANDROID_ALARM_TYPE_COUNT]; |
| +static struct timespec elapsed_rtc_delta; |
| + |
| +static void alarm_start_hrtimer(enum android_alarm_type alarm_type) |
| +{ |
| + struct timespec hr_alarm_time; |
| + if (!(alarm_enabled & (1U << alarm_type))) |
| + return; |
| + hr_alarm_time = alarm_time[alarm_type]; |
| + if (alarm_type == ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP || |
| + alarm_type == ANDROID_ALARM_ELAPSED_REALTIME) |
| + set_normalized_timespec(&hr_alarm_time, |
| + hr_alarm_time.tv_sec + elapsed_rtc_delta.tv_sec, |
| + hr_alarm_time.tv_nsec + elapsed_rtc_delta.tv_nsec); |
| + ANDROID_ALARM_DPRINTF(ANDROID_ALARM_PRINT_FLOW, |
| + "alarm start hrtimer %d at %ld.%09ld\n", |
| + alarm_type, hr_alarm_time.tv_sec, hr_alarm_time.tv_nsec); |
| + hrtimer_start(&alarm_timer[alarm_type], |
| + timespec_to_ktime(hr_alarm_time), HRTIMER_MODE_ABS); |
| +} |
| + |
| +static long alarm_ioctl(struct file *file, unsigned int cmd, unsigned long arg) |
| +{ |
| + int rv = 0; |
| + unsigned long flags; |
| + int i; |
| + struct timespec new_alarm_time; |
| + struct timespec new_rtc_time; |
| + struct timespec tmp_time; |
| + struct rtc_time rtc_new_rtc_time; |
| + enum android_alarm_type alarm_type = ANDROID_ALARM_IOCTL_TO_TYPE(cmd); |
| + uint32_t alarm_type_mask = 1U << alarm_type; |
| + |
| + if (alarm_type >= ANDROID_ALARM_TYPE_COUNT) |
| + return -EINVAL; |
| + |
| + if (ANDROID_ALARM_BASE_CMD(cmd) != ANDROID_ALARM_GET_TIME(0)) { |
| + if ((file->f_flags & O_ACCMODE) == O_RDONLY) |
| + return -EPERM; |
| + if (file->private_data == NULL && |
| + cmd != ANDROID_ALARM_SET_RTC) { |
| + spin_lock_irqsave(&alarm_slock, flags); |
| + if (alarm_opened) { |
| + spin_unlock_irqrestore(&alarm_slock, flags); |
| + return -EBUSY; |
| + } |
| + alarm_opened = 1; |
| + file->private_data = (void *)1; |
| + spin_unlock_irqrestore(&alarm_slock, flags); |
| + } |
| + } |
| + |
| + switch (ANDROID_ALARM_BASE_CMD(cmd)) { |
| + case ANDROID_ALARM_CLEAR(0): |
| + spin_lock_irqsave(&alarm_slock, flags); |
| + ANDROID_ALARM_DPRINTF(ANDROID_ALARM_PRINT_IO, |
| + "alarm %d clear\n", alarm_type); |
| + hrtimer_try_to_cancel(&alarm_timer[alarm_type]); |
| + if (alarm_pending) { |
| + alarm_pending &= ~alarm_type_mask; |
| + if (!alarm_pending && !wait_pending) |
| + wake_unlock(&alarm_wake_lock); |
| + } |
| + alarm_enabled &= ~alarm_type_mask; |
| + spin_unlock_irqrestore(&alarm_slock, flags); |
| + break; |
| + |
| + case ANDROID_ALARM_SET_OLD: |
| + case ANDROID_ALARM_SET_AND_WAIT_OLD: |
| + if (get_user(new_alarm_time.tv_sec, (int __user *)arg)) { |
| + rv = -EFAULT; |
| + goto err1; |
| + } |
| + new_alarm_time.tv_nsec = 0; |
| + goto from_old_alarm_set; |
| + |
| + case ANDROID_ALARM_SET_AND_WAIT(0): |
| + case ANDROID_ALARM_SET(0): |
| + if (copy_from_user(&new_alarm_time, (void __user *)arg, |
| + sizeof(new_alarm_time))) { |
| + rv = -EFAULT; |
| + goto err1; |
| + } |
| +from_old_alarm_set: |
| + spin_lock_irqsave(&alarm_slock, flags); |
| + ANDROID_ALARM_DPRINTF(ANDROID_ALARM_PRINT_IO, |
| + "alarm %d set %ld.%09ld\n", alarm_type, |
| + new_alarm_time.tv_sec, new_alarm_time.tv_nsec); |
| + alarm_time[alarm_type] = new_alarm_time; |
| + alarm_enabled |= alarm_type_mask; |
| + alarm_start_hrtimer(alarm_type); |
| + spin_unlock_irqrestore(&alarm_slock, flags); |
| + if (ANDROID_ALARM_BASE_CMD(cmd) != ANDROID_ALARM_SET_AND_WAIT(0) |
| + && cmd != ANDROID_ALARM_SET_AND_WAIT_OLD) |
| + break; |
| + /* fall though */ |
| + case ANDROID_ALARM_WAIT: |
| + spin_lock_irqsave(&alarm_slock, flags); |
| + ANDROID_ALARM_DPRINTF(ANDROID_ALARM_PRINT_IO, "alarm wait\n"); |
| + if (!alarm_pending && wait_pending) { |
| + wake_unlock(&alarm_wake_lock); |
| + wait_pending = 0; |
| + } |
| + spin_unlock_irqrestore(&alarm_slock, flags); |
| + rv = wait_event_interruptible(alarm_wait_queue, alarm_pending); |
| + if (rv) |
| + goto err1; |
| + spin_lock_irqsave(&alarm_slock, flags); |
| + rv = alarm_pending; |
| + wait_pending = 1; |
| + alarm_pending = 0; |
| + if (rv & ANDROID_ALARM_WAKEUP_MASK) |
| + wake_unlock(&alarm_rtc_wake_lock); |
| + spin_unlock_irqrestore(&alarm_slock, flags); |
| + break; |
| + case ANDROID_ALARM_SET_RTC: |
| + if (copy_from_user(&new_rtc_time, (void __user *)arg, |
| + sizeof(new_rtc_time))) { |
| + rv = -EFAULT; |
| + goto err1; |
| + } |
| + rtc_time_to_tm(new_rtc_time.tv_sec, &rtc_new_rtc_time); |
| + |
| + ANDROID_ALARM_DPRINTF(ANDROID_ALARM_PRINT_IO, |
| + "set rtc %ld %ld - rtc %02d:%02d:%02d %02d/%02d/%04d\n", |
| + new_rtc_time.tv_sec, new_rtc_time.tv_nsec, |
| + rtc_new_rtc_time.tm_hour, rtc_new_rtc_time.tm_min, |
| + rtc_new_rtc_time.tm_sec, rtc_new_rtc_time.tm_mon + 1, |
| + rtc_new_rtc_time.tm_mday, |
| + rtc_new_rtc_time.tm_year + 1900); |
| + |
| + mutex_lock(&alarm_setrtc_mutex); |
| + spin_lock_irqsave(&alarm_slock, flags); |
| + for (i = 0; i < ANDROID_ALARM_SYSTEMTIME; i++) |
| + hrtimer_try_to_cancel(&alarm_timer[i]); |
| + getnstimeofday(&tmp_time); |
| + elapsed_rtc_delta = timespec_sub(elapsed_rtc_delta, |
| + timespec_sub(tmp_time, new_rtc_time)); |
| + spin_unlock_irqrestore(&alarm_slock, flags); |
| + rv = do_settimeofday(&new_rtc_time); |
| + spin_lock_irqsave(&alarm_slock, flags); |
| + for (i = 0; i < ANDROID_ALARM_SYSTEMTIME; i++) |
| + alarm_start_hrtimer(i); |
| + spin_unlock_irqrestore(&alarm_slock, flags); |
| + if (rv < 0) { |
| + ANDROID_ALARM_DPRINTF(ANDROID_ALARM_PRINT_ERRORS, |
| + "Failed to set time\n"); |
| + mutex_unlock(&alarm_setrtc_mutex); |
| + goto err1; |
| + } |
| + rv = rtc_set_time(alarm_rtc_dev, &rtc_new_rtc_time); |
| + spin_lock_irqsave(&alarm_slock, flags); |
| + alarm_pending |= ANDROID_ALARM_TIME_CHANGE_MASK; |
| + wake_up(&alarm_wait_queue); |
| + spin_unlock_irqrestore(&alarm_slock, flags); |
| + mutex_unlock(&alarm_setrtc_mutex); |
| + if (rv < 0) { |
| + ANDROID_ALARM_DPRINTF(ANDROID_ALARM_PRINT_ERRORS, |
| + "Failed to set RTC, time will be lost on reboot\n"); |
| + goto err1; |
| + } |
| + break; |
| + case ANDROID_ALARM_GET_TIME(0): |
| + mutex_lock(&alarm_setrtc_mutex); |
| + spin_lock_irqsave(&alarm_slock, flags); |
| + if (alarm_type != ANDROID_ALARM_SYSTEMTIME) { |
| + getnstimeofday(&tmp_time); |
| + if (alarm_type >= ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP) |
| + tmp_time = timespec_sub(tmp_time, |
| + elapsed_rtc_delta); |
| + } else |
| + ktime_get_ts(&tmp_time); |
| + spin_unlock_irqrestore(&alarm_slock, flags); |
| + mutex_unlock(&alarm_setrtc_mutex); |
| + if (copy_to_user((void __user *)arg, &tmp_time, |
| + sizeof(tmp_time))) { |
| + rv = -EFAULT; |
| + goto err1; |
| + } |
| + break; |
| + |
| + default: |
| + rv = -EINVAL; |
| + goto err1; |
| + } |
| +err1: |
| + return rv; |
| +} |
| + |
| +static int alarm_open(struct inode *inode, struct file *file) |
| +{ |
| + file->private_data = NULL; |
| + return 0; |
| +} |
| + |
| +static int alarm_release(struct inode *inode, struct file *file) |
| +{ |
| + int i; |
| + unsigned long flags; |
| + |
| + spin_lock_irqsave(&alarm_slock, flags); |
| + if (file->private_data != 0) { |
| + for (i = 0; i < ANDROID_ALARM_TYPE_COUNT; i++) { |
| + uint32_t alarm_type_mask = 1U << i; |
| + if (alarm_enabled & alarm_type_mask) { |
| + ANDROID_ALARM_DPRINTF(ANDROID_ALARM_PRINT_INFO, |
| + "alarm_release: clear alarm, " |
| + "pending %d\n", |
| + !!(alarm_pending & alarm_type_mask)); |
| + alarm_enabled &= ~alarm_type_mask; |
| + } |
| + spin_unlock_irqrestore(&alarm_slock, flags); |
| + hrtimer_cancel(&alarm_timer[i]); |
| + spin_lock_irqsave(&alarm_slock, flags); |
| + } |
| + if (alarm_pending | wait_pending) { |
| + if (alarm_pending) |
| + ANDROID_ALARM_DPRINTF(ANDROID_ALARM_PRINT_INFO, |
| + "alarm_release: clear pending alarms " |
| + "%x\n", alarm_pending); |
| + wake_unlock(&alarm_wake_lock); |
| + wait_pending = 0; |
| + alarm_pending = 0; |
| + } |
| + alarm_opened = 0; |
| + } |
| + spin_unlock_irqrestore(&alarm_slock, flags); |
| + return 0; |
| +} |
| + |
| +static enum hrtimer_restart alarm_timer_triggered(struct hrtimer *timer) |
| +{ |
| + unsigned long flags; |
| + enum android_alarm_type alarm_type = (timer - alarm_timer); |
| + uint32_t alarm_type_mask = 1U << alarm_type; |
| + |
| + |
| + ANDROID_ALARM_DPRINTF(ANDROID_ALARM_PRINT_INT, |
| + "alarm_timer_triggered type %d\n", alarm_type); |
| + spin_lock_irqsave(&alarm_slock, flags); |
| + if (alarm_enabled & alarm_type_mask) { |
| + wake_lock_timeout(&alarm_wake_lock, 5 * HZ); |
| + alarm_enabled &= ~alarm_type_mask; |
| + alarm_pending |= alarm_type_mask; |
| + wake_up(&alarm_wait_queue); |
| + } |
| + spin_unlock_irqrestore(&alarm_slock, flags); |
| + return HRTIMER_NORESTART; |
| +} |
| + |
| +static void alarm_triggered_func(void *p) |
| +{ |
| + struct rtc_device *rtc = alarm_rtc_dev; |
| + if (!(rtc->irq_data & RTC_AF)) |
| + return; |
| + ANDROID_ALARM_DPRINTF(ANDROID_ALARM_PRINT_INT, "rtc alarm triggered\n"); |
| + wake_lock_timeout(&alarm_rtc_wake_lock, 1 * HZ); |
| +} |
| + |
| +int alarm_suspend(struct platform_device *pdev, pm_message_t state) |
| +{ |
| + int err = 0; |
| + unsigned long flags; |
| + struct rtc_wkalrm rtc_alarm; |
| + struct rtc_time rtc_current_rtc_time; |
| + unsigned long rtc_current_time; |
| + unsigned long rtc_alarm_time; |
| + struct timespec rtc_current_timespec; |
| + struct timespec rtc_delta; |
| + struct timespec elapsed_realtime_alarm_time; |
| + |
| + ANDROID_ALARM_DPRINTF(ANDROID_ALARM_PRINT_FLOW, |
| + "alarm_suspend(%p, %d)\n", pdev, state.event); |
| + spin_lock_irqsave(&alarm_slock, flags); |
| + if (alarm_pending && !wake_lock_active(&alarm_wake_lock)) { |
| + ANDROID_ALARM_DPRINTF(ANDROID_ALARM_PRINT_INFO, |
| + "alarm pending\n"); |
| + err = -EBUSY; |
| + goto err1; |
| + } |
| + if (alarm_enabled & ANDROID_ALARM_WAKEUP_MASK) { |
| + spin_unlock_irqrestore(&alarm_slock, flags); |
| + if (alarm_enabled & ANDROID_ALARM_RTC_WAKEUP_MASK) |
| + hrtimer_cancel(&alarm_timer[ANDROID_ALARM_RTC_WAKEUP]); |
| + if (alarm_enabled & ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP_MASK) |
| + hrtimer_cancel(&alarm_timer[ |
| + ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP]); |
| + |
| + rtc_read_time(alarm_rtc_dev, &rtc_current_rtc_time); |
| + rtc_current_timespec.tv_nsec = 0; |
| + rtc_tm_to_time(&rtc_current_rtc_time, |
| + &rtc_current_timespec.tv_sec); |
| + save_time_delta(&rtc_delta, &rtc_current_timespec); |
| + set_normalized_timespec(&elapsed_realtime_alarm_time, |
| + alarm_time[ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP] |
| + .tv_sec + elapsed_rtc_delta.tv_sec, |
| + alarm_time[ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP] |
| + .tv_nsec + elapsed_rtc_delta.tv_nsec); |
| + if ((alarm_enabled & ANDROID_ALARM_RTC_WAKEUP_MASK) && |
| + (!(alarm_enabled & |
| + ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP_MASK) || |
| + timespec_compare(&alarm_time[ANDROID_ALARM_RTC_WAKEUP], |
| + &elapsed_realtime_alarm_time) < 0)) |
| + rtc_alarm_time = timespec_sub( |
| + alarm_time[ANDROID_ALARM_RTC_WAKEUP], |
| + rtc_delta).tv_sec; |
| + else |
| + rtc_alarm_time = timespec_sub( |
| + elapsed_realtime_alarm_time, rtc_delta).tv_sec; |
| + rtc_time_to_tm(rtc_alarm_time, &rtc_alarm.time); |
| + rtc_alarm.enabled = 1; |
| + rtc_set_alarm(alarm_rtc_dev, &rtc_alarm); |
| + rtc_read_time(alarm_rtc_dev, &rtc_current_rtc_time); |
| + rtc_tm_to_time(&rtc_current_rtc_time, &rtc_current_time); |
| + ANDROID_ALARM_DPRINTF(ANDROID_ALARM_PRINT_INFO, |
| + "rtc alarm set at %ld, now %ld, rtc delta %ld.%09ld\n", |
| + rtc_alarm_time, rtc_current_time, |
| + rtc_delta.tv_sec, rtc_delta.tv_nsec); |
| + if (rtc_current_time + 1 >= rtc_alarm_time) { |
| + ANDROID_ALARM_DPRINTF(ANDROID_ALARM_PRINT_INFO, |
| + "alarm about to go off\n"); |
| + memset(&rtc_alarm, 0, sizeof(rtc_alarm)); |
| + rtc_alarm.enabled = 0; |
| + rtc_set_alarm(alarm_rtc_dev, &rtc_alarm); |
| + |
| + spin_lock_irqsave(&alarm_slock, flags); |
| + wake_lock_timeout(&alarm_rtc_wake_lock, 2 * HZ); |
| + alarm_start_hrtimer(ANDROID_ALARM_RTC_WAKEUP); |
| + alarm_start_hrtimer( |
| + ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP); |
| + err = -EBUSY; |
| + spin_unlock_irqrestore(&alarm_slock, flags); |
| + } |
| + } else { |
| +err1: |
| + spin_unlock_irqrestore(&alarm_slock, flags); |
| + } |
| + return err; |
| +} |
| + |
| +int alarm_resume(struct platform_device *pdev) |
| +{ |
| + struct rtc_wkalrm alarm; |
| + ANDROID_ALARM_DPRINTF(ANDROID_ALARM_PRINT_FLOW, |
| + "alarm_resume(%p)\n", pdev); |
| + if (alarm_enabled & ANDROID_ALARM_WAKEUP_MASK) { |
| + memset(&alarm, 0, sizeof(alarm)); |
| + alarm.enabled = 0; |
| + rtc_set_alarm(alarm_rtc_dev, &alarm); |
| + alarm_start_hrtimer(ANDROID_ALARM_RTC_WAKEUP); |
| + alarm_start_hrtimer(ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP); |
| + } |
| + return 0; |
| +} |
| + |
| +static struct rtc_task alarm_rtc_task = { |
| + .func = alarm_triggered_func |
| +}; |
| + |
| +static struct file_operations alarm_fops = { |
| + .owner = THIS_MODULE, |
| + .unlocked_ioctl = alarm_ioctl, |
| + .open = alarm_open, |
| + .release = alarm_release, |
| +}; |
| + |
| +static struct miscdevice alarm_device = { |
| + .minor = MISC_DYNAMIC_MINOR, |
| + .name = "alarm", |
| + .fops = &alarm_fops, |
| +}; |
| + |
| +static int rtc_alarm_add_device(struct device *dev, |
| + struct class_interface *class_intf) |
| +{ |
| + int err; |
| + struct rtc_device *rtc = to_rtc_device(dev); |
| + |
| + mutex_lock(&alarm_setrtc_mutex); |
| + |
| + if (alarm_rtc_dev) { |
| + err = -EBUSY; |
| + goto err1; |
| + } |
| + |
| + err = misc_register(&alarm_device); |
| + if (err) |
| + goto err1; |
| + alarm_platform_dev = |
| + platform_device_register_simple("alarm", -1, NULL, 0); |
| + if (IS_ERR(alarm_platform_dev)) { |
| + err = PTR_ERR(alarm_platform_dev); |
| + goto err2; |
| + } |
| + err = rtc_irq_register(rtc, &alarm_rtc_task); |
| + if (err) |
| + goto err3; |
| + alarm_rtc_dev = rtc; |
| + mutex_unlock(&alarm_setrtc_mutex); |
| + |
| + ANDROID_ALARM_DPRINTF(ANDROID_ALARM_PRINT_INFO, "alarm: parent %p\n", |
| + alarm_platform_dev->dev.power.pm_parent); |
| + return 0; |
| + |
| +err3: |
| + platform_device_unregister(alarm_platform_dev); |
| +err2: |
| + misc_deregister(&alarm_device); |
| +err1: |
| + mutex_unlock(&alarm_setrtc_mutex); |
| + return err; |
| +} |
| + |
| +static void rtc_alarm_remove_device(struct device *dev, |
| + struct class_interface *class_intf) |
| +{ |
| + if (dev == &alarm_rtc_dev->dev) { |
| + rtc_irq_unregister(alarm_rtc_dev, &alarm_rtc_task); |
| + platform_device_unregister(alarm_platform_dev); |
| + misc_deregister(&alarm_device); |
| + alarm_rtc_dev = NULL; |
| + } |
| +} |
| + |
| +static struct class_interface rtc_alarm_interface = { |
| + .add_dev = &rtc_alarm_add_device, |
| + .remove_dev = &rtc_alarm_remove_device, |
| +}; |
| + |
| +static struct platform_driver alarm_driver = { |
| + .suspend = alarm_suspend, |
| + .resume = alarm_resume, |
| + .driver = { |
| + .name = "alarm" |
| + } |
| +}; |
| + |
| +static int __init alarm_late_init(void) |
| +{ |
| + unsigned long flags; |
| + struct timespec system_time; |
| + |
| + /* this needs to run after the rtc is read at boot */ |
| + spin_lock_irqsave(&alarm_slock, flags); |
| + /* We read the current rtc and system time so we can later calulate |
| + * elasped realtime to be (boot_systemtime + rtc - boot_rtc) == |
| + * (rtc - (boot_rtc - boot_systemtime)) |
| + */ |
| + getnstimeofday(&elapsed_rtc_delta); |
| + ktime_get_ts(&system_time); |
| + elapsed_rtc_delta = timespec_sub(elapsed_rtc_delta, system_time); |
| + spin_unlock_irqrestore(&alarm_slock, flags); |
| + |
| + ANDROID_ALARM_DPRINTF(ANDROID_ALARM_PRINT_INFO, |
| + "alarm_late_init: rtc to elapsed realtime delta %ld.%09ld\n", |
| + elapsed_rtc_delta.tv_sec, elapsed_rtc_delta.tv_nsec); |
| + return 0; |
| +} |
| + |
| +static int __init alarm_init(void) |
| +{ |
| + int err; |
| + int i; |
| + |
| + for (i = 0; i < ANDROID_ALARM_SYSTEMTIME; i++) { |
| + hrtimer_init(&alarm_timer[i], CLOCK_REALTIME, HRTIMER_MODE_ABS); |
| + alarm_timer[i].function = alarm_timer_triggered; |
| + } |
| + hrtimer_init(&alarm_timer[ANDROID_ALARM_SYSTEMTIME], |
| + CLOCK_MONOTONIC, HRTIMER_MODE_ABS); |
| + alarm_timer[ANDROID_ALARM_SYSTEMTIME].function = alarm_timer_triggered; |
| + err = platform_driver_register(&alarm_driver); |
| + if (err < 0) |
| + goto err1; |
| + wake_lock_init(&alarm_wake_lock, WAKE_LOCK_SUSPEND, "alarm"); |
| + wake_lock_init(&alarm_rtc_wake_lock, WAKE_LOCK_SUSPEND, "alarm_rtc"); |
| + rtc_alarm_interface.class = rtc_class; |
| + err = class_interface_register(&rtc_alarm_interface); |
| + if (err < 0) |
| + goto err2; |
| + |
| + return 0; |
| + |
| +err2: |
| + wake_lock_destroy(&alarm_rtc_wake_lock); |
| + wake_lock_destroy(&alarm_wake_lock); |
| + platform_driver_unregister(&alarm_driver); |
| +err1: |
| + return err; |
| +} |
| + |
| +static void __exit alarm_exit(void) |
| +{ |
| + class_interface_unregister(&rtc_alarm_interface); |
| + wake_lock_destroy(&alarm_rtc_wake_lock); |
| + wake_lock_destroy(&alarm_wake_lock); |
| + platform_driver_unregister(&alarm_driver); |
| +} |
| + |
| +late_initcall(alarm_late_init); |
| +module_init(alarm_init); |
| +module_exit(alarm_exit); |
| + |
| diff --git a/stblinux-2.6.37/drivers/staging/Kconfig b/stblinux-2.6.37/drivers/staging/Kconfig |
| index 5eafdf4..310c88a 100644 |
| --- a/stblinux-2.6.37/drivers/staging/Kconfig |
| +++ b/stblinux-2.6.37/drivers/staging/Kconfig |
| @@ -1,6 +1,6 @@ |
| menuconfig STAGING |
| bool "Staging drivers" |
| - default n |
| + default y |
| ---help--- |
| This option allows you to select a number of drivers that are |
| not of the "normal" Linux kernel quality level. These drivers |
| @@ -22,11 +22,10 @@ menuconfig STAGING |
| If in doubt, say N here. |
| |
| |
| -if STAGING |
| |
| config STAGING_EXCLUDE_BUILD |
| bool "Exclude Staging drivers from being built" if STAGING |
| - default y |
| + default n |
| ---help--- |
| Are you sure you really want to build the staging drivers? |
| They taint your kernel, don't live up to the normal Linux |
| @@ -39,7 +38,6 @@ config STAGING_EXCLUDE_BUILD |
| when selecting 'make allyesconfg' and 'make allmodconfig' so |
| don't be all that put off, your dog will be just fine. |
| |
| -if !STAGING_EXCLUDE_BUILD |
| |
| source "drivers/staging/et131x/Kconfig" |
| |
| @@ -87,6 +85,8 @@ source "drivers/staging/rtl8712/Kconfig" |
| |
| source "drivers/staging/frontier/Kconfig" |
| |
| +source "drivers/staging/android/Kconfig" |
| + |
| source "drivers/staging/pohmelfs/Kconfig" |
| |
| source "drivers/staging/autofs/Kconfig" |
| @@ -175,5 +175,3 @@ source "drivers/staging/intel_sst/Kconfig" |
| |
| source "drivers/staging/speakup/Kconfig" |
| |
| -endif # !STAGING_EXCLUDE_BUILD |
| -endif # STAGING |
| diff --git a/stblinux-2.6.37/drivers/staging/Makefile b/stblinux-2.6.37/drivers/staging/Makefile |
| index a97a955..3181b14 100644 |
| --- a/stblinux-2.6.37/drivers/staging/Makefile |
| +++ b/stblinux-2.6.37/drivers/staging/Makefile |
| @@ -28,6 +28,7 @@ obj-$(CONFIG_RTL8192E) += rtl8192e/ |
| obj-$(CONFIG_R8712U) += rtl8712/ |
| obj-$(CONFIG_SPECTRA) += spectra/ |
| obj-$(CONFIG_TRANZPORT) += frontier/ |
| +obj-$(CONFIG_ANDROID) += android/ |
| obj-$(CONFIG_POHMELFS) += pohmelfs/ |
| obj-$(CONFIG_AUTOFS_FS) += autofs/ |
| obj-$(CONFIG_IDE_PHISON) += phison/ |
| diff --git a/stblinux-2.6.37/drivers/staging/android/Kconfig b/stblinux-2.6.37/drivers/staging/android/Kconfig |
| new file mode 100644 |
| index 0000000..0d87eb0 |
| --- /dev/null |
| +++ b/stblinux-2.6.37/drivers/staging/android/Kconfig |
| @@ -0,0 +1,91 @@ |
| +menu "Android" |
| + |
| +config ANDROID |
| + bool "Android Drivers" |
| + default Y |
| + ---help--- |
| + Enable support for various drivers needed on the Android platform |
| + |
| +config ANDROID_BINDER_IPC |
| + bool "Android Binder IPC Driver" |
| + default Y |
| + |
| +config ANDROID_LOGGER |
| + tristate "Android log driver" |
| + default Y |
| + |
| +config ANDROID_RAM_CONSOLE |
| + bool "Android RAM buffer console" |
| + default Y |
| + |
| +config ANDROID_RAM_CONSOLE_ENABLE_VERBOSE |
| + bool "Enable verbose console messages on Android RAM console" |
| + default y |
| + depends on ANDROID_RAM_CONSOLE |
| + |
| +menuconfig ANDROID_RAM_CONSOLE_ERROR_CORRECTION |
| + bool "Android RAM Console Enable error correction" |
| + default n |
| + depends on ANDROID_RAM_CONSOLE |
| + depends on !ANDROID_RAM_CONSOLE_EARLY_INIT |
| + select REED_SOLOMON |
| + select REED_SOLOMON_ENC8 |
| + select REED_SOLOMON_DEC8 |
| + |
| +if ANDROID_RAM_CONSOLE_ERROR_CORRECTION |
| + |
| +config ANDROID_RAM_CONSOLE_ERROR_CORRECTION_DATA_SIZE |
| + int "Android RAM Console Data data size" |
| + default 128 |
| + help |
| + Must be a power of 2. |
| + |
| +config ANDROID_RAM_CONSOLE_ERROR_CORRECTION_ECC_SIZE |
| + int "Android RAM Console ECC size" |
| + default 16 |
| + |
| +config ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE |
| + int "Android RAM Console Symbol size" |
| + default 8 |
| + |
| +config ANDROID_RAM_CONSOLE_ERROR_CORRECTION_POLYNOMIAL |
| + hex "Android RAM Console Polynomial" |
| + default 0x19 if (ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE = 4) |
| + default 0x29 if (ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE = 5) |
| + default 0x61 if (ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE = 6) |
| + default 0x89 if (ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE = 7) |
| + default 0x11d if (ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE = 8) |
| + |
| +endif # ANDROID_RAM_CONSOLE_ERROR_CORRECTION |
| + |
| +config ANDROID_RAM_CONSOLE_EARLY_INIT |
| + bool "Start Android RAM console early" |
| + default n |
| + depends on ANDROID_RAM_CONSOLE |
| + |
| +config ANDROID_RAM_CONSOLE_EARLY_ADDR |
| + hex "Android RAM console virtual address" |
| + default 0 |
| + depends on ANDROID_RAM_CONSOLE_EARLY_INIT |
| + |
| +config ANDROID_RAM_CONSOLE_EARLY_SIZE |
| + hex "Android RAM console buffer size" |
| + default 0 |
| + depends on ANDROID_RAM_CONSOLE_EARLY_INIT |
| + |
| +config ANDROID_TIMED_OUTPUT |
| + bool "Timed output class driver" |
| + default y |
| + |
| +config ANDROID_TIMED_GPIO |
| + tristate "Android timed gpio driver" |
| + depends on GENERIC_GPIO && ANDROID_TIMED_OUTPUT |
| + default n |
| + |
| +config ANDROID_LOW_MEMORY_KILLER |
| + bool "Android Low Memory Killer" |
| + default N |
| + ---help--- |
| + Register processes to be killed when memory is low |
| + |
| +endmenu |
| diff --git a/stblinux-2.6.37/drivers/staging/android/Makefile b/stblinux-2.6.37/drivers/staging/android/Makefile |
| new file mode 100644 |
| index 0000000..8e057e6 |
| --- /dev/null |
| +++ b/stblinux-2.6.37/drivers/staging/android/Makefile |
| @@ -0,0 +1,6 @@ |
| +obj-$(CONFIG_ANDROID_BINDER_IPC) += binder.o |
| +obj-$(CONFIG_ANDROID_LOGGER) += logger.o |
| +obj-$(CONFIG_ANDROID_RAM_CONSOLE) += ram_console.o |
| +obj-$(CONFIG_ANDROID_TIMED_OUTPUT) += timed_output.o |
| +obj-$(CONFIG_ANDROID_TIMED_GPIO) += timed_gpio.o |
| +obj-$(CONFIG_ANDROID_LOW_MEMORY_KILLER) += lowmemorykiller.o |
| diff --git a/stblinux-2.6.37/drivers/staging/android/TODO b/stblinux-2.6.37/drivers/staging/android/TODO |
| new file mode 100644 |
| index 0000000..e59c5be |
| --- /dev/null |
| +++ b/stblinux-2.6.37/drivers/staging/android/TODO |
| @@ -0,0 +1,10 @@ |
| +TODO: |
| + - checkpatch.pl cleanups |
| + - sparse fixes |
| + - rename files to be not so "generic" |
| + - make sure things build as modules properly |
| + - add proper arch dependancies as needed |
| + - audit userspace interfaces to make sure they are sane |
| + |
| +Please send patches to Greg Kroah-Hartman <greg@kroah.com> and Cc: |
| +Brian Swetland <swetland@google.com> |
| diff --git a/stblinux-2.6.37/drivers/staging/android/binder.c b/stblinux-2.6.37/drivers/staging/android/binder.c |
| new file mode 100644 |
| index 0000000..17d89a8 |
| --- /dev/null |
| +++ b/stblinux-2.6.37/drivers/staging/android/binder.c |
| @@ -0,0 +1,3730 @@ |
| +/* binder.c |
| + * |
| + * Android IPC Subsystem |
| + * |
| + * Copyright (C) 2007-2008 Google, Inc. |
| + * |
| + * This software is licensed under the terms of the GNU General Public |
| + * License version 2, as published by the Free Software Foundation, and |
| + * may be copied, distributed, and modified under those terms. |
| + * |
| + * This program is distributed in the hope that it will be useful, |
| + * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| + * GNU General Public License for more details. |
| + * |
| + */ |
| + |
| +#include <asm/cacheflush.h> |
| +#include <linux/fdtable.h> |
| +#include <linux/file.h> |
| +#include <linux/fs.h> |
| +#include <linux/list.h> |
| +#include <linux/miscdevice.h> |
| +#include <linux/mm.h> |
| +#include <linux/module.h> |
| +#include <linux/mutex.h> |
| +#include <linux/nsproxy.h> |
| +#include <linux/poll.h> |
| +#include <linux/proc_fs.h> |
| +#include <linux/rbtree.h> |
| +#include <linux/sched.h> |
| +#include <linux/uaccess.h> |
| +#include <linux/vmalloc.h> |
| +#include "binder.h" |
| + |
| +static DEFINE_MUTEX(binder_lock); |
| +static HLIST_HEAD(binder_procs); |
| +static struct binder_node *binder_context_mgr_node; |
| +static uid_t binder_context_mgr_uid = -1; |
| +static int binder_last_id; |
| +static struct proc_dir_entry *binder_proc_dir_entry_root; |
| +static struct proc_dir_entry *binder_proc_dir_entry_proc; |
| +static struct hlist_head binder_dead_nodes; |
| +static HLIST_HEAD(binder_deferred_list); |
| +static DEFINE_MUTEX(binder_deferred_lock); |
| + |
| +static int binder_read_proc_proc(char *page, char **start, off_t off, |
| + int count, int *eof, void *data); |
| + |
| +/* This is only defined in include/asm-arm/sizes.h */ |
| +#ifndef SZ_1K |
| +#define SZ_1K 0x400 |
| +#endif |
| + |
| +#ifndef SZ_4M |
| +#define SZ_4M 0x400000 |
| +#endif |
| + |
| +#define FORBIDDEN_MMAP_FLAGS (VM_WRITE) |
| + |
| +#define BINDER_SMALL_BUF_SIZE (PAGE_SIZE * 64) |
| + |
| +enum { |
| + BINDER_DEBUG_USER_ERROR = 1U << 0, |
| + BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1, |
| + BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2, |
| + BINDER_DEBUG_OPEN_CLOSE = 1U << 3, |
| + BINDER_DEBUG_DEAD_BINDER = 1U << 4, |
| + BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5, |
| + BINDER_DEBUG_READ_WRITE = 1U << 6, |
| + BINDER_DEBUG_USER_REFS = 1U << 7, |
| + BINDER_DEBUG_THREADS = 1U << 8, |
| + BINDER_DEBUG_TRANSACTION = 1U << 9, |
| + BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10, |
| + BINDER_DEBUG_FREE_BUFFER = 1U << 11, |
| + BINDER_DEBUG_INTERNAL_REFS = 1U << 12, |
| + BINDER_DEBUG_BUFFER_ALLOC = 1U << 13, |
| + BINDER_DEBUG_PRIORITY_CAP = 1U << 14, |
| + BINDER_DEBUG_BUFFER_ALLOC_ASYNC = 1U << 15, |
| +}; |
| +static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR | |
| + BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION; |
| +module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO); |
| + |
| +static int binder_debug_no_lock; |
| +module_param_named(proc_no_lock, binder_debug_no_lock, bool, S_IWUSR | S_IRUGO); |
| + |
| +static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait); |
| +static int binder_stop_on_user_error; |
| + |
| +static int binder_set_stop_on_user_error(const char *val, |
| + struct kernel_param *kp) |
| +{ |
| + int ret; |
| + ret = param_set_int(val, kp); |
| + if (binder_stop_on_user_error < 2) |
| + wake_up(&binder_user_error_wait); |
| + return ret; |
| +} |
| +module_param_call(stop_on_user_error, binder_set_stop_on_user_error, |
| + param_get_int, &binder_stop_on_user_error, S_IWUSR | S_IRUGO); |
| + |
| +#define binder_user_error(x...) \ |
| + do { \ |
| + if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \ |
| + printk(KERN_INFO x); \ |
| + if (binder_stop_on_user_error) \ |
| + binder_stop_on_user_error = 2; \ |
| + } while (0) |
| + |
| +enum { |
| + BINDER_STAT_PROC, |
| + BINDER_STAT_THREAD, |
| + BINDER_STAT_NODE, |
| + BINDER_STAT_REF, |
| + BINDER_STAT_DEATH, |
| + BINDER_STAT_TRANSACTION, |
| + BINDER_STAT_TRANSACTION_COMPLETE, |
| + BINDER_STAT_COUNT |
| +}; |
| + |
| +struct binder_stats { |
| + int br[_IOC_NR(BR_FAILED_REPLY) + 1]; |
| + int bc[_IOC_NR(BC_DEAD_BINDER_DONE) + 1]; |
| + int obj_created[BINDER_STAT_COUNT]; |
| + int obj_deleted[BINDER_STAT_COUNT]; |
| +}; |
| + |
| +static struct binder_stats binder_stats; |
| + |
| +struct binder_transaction_log_entry { |
| + int debug_id; |
| + int call_type; |
| + int from_proc; |
| + int from_thread; |
| + int target_handle; |
| + int to_proc; |
| + int to_thread; |
| + int to_node; |
| + int data_size; |
| + int offsets_size; |
| +}; |
| +struct binder_transaction_log { |
| + int next; |
| + int full; |
| + struct binder_transaction_log_entry entry[32]; |
| +}; |
| +struct binder_transaction_log binder_transaction_log; |
| +struct binder_transaction_log binder_transaction_log_failed; |
| + |
| +static struct binder_transaction_log_entry *binder_transaction_log_add( |
| + struct binder_transaction_log *log) |
| +{ |
| + struct binder_transaction_log_entry *e; |
| + e = &log->entry[log->next]; |
| + memset(e, 0, sizeof(*e)); |
| + log->next++; |
| + if (log->next == ARRAY_SIZE(log->entry)) { |
| + log->next = 0; |
| + log->full = 1; |
| + } |
| + return e; |
| +} |
| + |
| +struct binder_work { |
| + struct list_head entry; |
| + enum { |
| + BINDER_WORK_TRANSACTION = 1, |
| + BINDER_WORK_TRANSACTION_COMPLETE, |
| + BINDER_WORK_NODE, |
| + BINDER_WORK_DEAD_BINDER, |
| + BINDER_WORK_DEAD_BINDER_AND_CLEAR, |
| + BINDER_WORK_CLEAR_DEATH_NOTIFICATION, |
| + } type; |
| +}; |
| + |
| +struct binder_node { |
| + int debug_id; |
| + struct binder_work work; |
| + union { |
| + struct rb_node rb_node; |
| + struct hlist_node dead_node; |
| + }; |
| + struct binder_proc *proc; |
| + struct hlist_head refs; |
| + int internal_strong_refs; |
| + int local_weak_refs; |
| + int local_strong_refs; |
| + void __user *ptr; |
| + void __user *cookie; |
| + unsigned has_strong_ref:1; |
| + unsigned pending_strong_ref:1; |
| + unsigned has_weak_ref:1; |
| + unsigned pending_weak_ref:1; |
| + unsigned has_async_transaction:1; |
| + unsigned accept_fds:1; |
| + unsigned min_priority:8; |
| + struct list_head async_todo; |
| +}; |
| + |
| +struct binder_ref_death { |
| + struct binder_work work; |
| + void __user *cookie; |
| +}; |
| + |
| +struct binder_ref { |
| + /* Lookups needed: */ |
| + /* node + proc => ref (transaction) */ |
| + /* desc + proc => ref (transaction, inc/dec ref) */ |
| + /* node => refs + procs (proc exit) */ |
| + int debug_id; |
| + struct rb_node rb_node_desc; |
| + struct rb_node rb_node_node; |
| + struct hlist_node node_entry; |
| + struct binder_proc *proc; |
| + struct binder_node *node; |
| + uint32_t desc; |
| + int strong; |
| + int weak; |
| + struct binder_ref_death *death; |
| +}; |
| + |
| +struct binder_buffer { |
| + struct list_head entry; /* free and allocated entries by addesss */ |
| + struct rb_node rb_node; /* free entry by size or allocated entry */ |
| + /* by address */ |
| + unsigned free:1; |
| + unsigned allow_user_free:1; |
| + unsigned async_transaction:1; |
| + unsigned debug_id:29; |
| + |
| + struct binder_transaction *transaction; |
| + |
| + struct binder_node *target_node; |
| + size_t data_size; |
| + size_t offsets_size; |
| + uint8_t data[0]; |
| +}; |
| + |
| +enum { |
| + BINDER_DEFERRED_PUT_FILES = 0x01, |
| + BINDER_DEFERRED_FLUSH = 0x02, |
| + BINDER_DEFERRED_RELEASE = 0x04, |
| +}; |
| + |
| +struct binder_proc { |
| + struct hlist_node proc_node; |
| + struct rb_root threads; |
| + struct rb_root nodes; |
| + struct rb_root refs_by_desc; |
| + struct rb_root refs_by_node; |
| + int pid; |
| + struct vm_area_struct *vma; |
| + struct task_struct *tsk; |
| + struct files_struct *files; |
| + struct hlist_node deferred_work_node; |
| + int deferred_work; |
| + void *buffer; |
| + ptrdiff_t user_buffer_offset; |
| + |
| + struct list_head buffers; |
| + struct rb_root free_buffers; |
| + struct rb_root allocated_buffers; |
| + size_t free_async_space; |
| + |
| + struct page **pages; |
| + size_t buffer_size; |
| + uint32_t buffer_free; |
| + struct list_head todo; |
| + wait_queue_head_t wait; |
| + struct binder_stats stats; |
| + struct list_head delivered_death; |
| + int max_threads; |
| + int requested_threads; |
| + int requested_threads_started; |
| + int ready_threads; |
| + long default_priority; |
| +}; |
| + |
| +enum { |
| + BINDER_LOOPER_STATE_REGISTERED = 0x01, |
| + BINDER_LOOPER_STATE_ENTERED = 0x02, |
| + BINDER_LOOPER_STATE_EXITED = 0x04, |
| + BINDER_LOOPER_STATE_INVALID = 0x08, |
| + BINDER_LOOPER_STATE_WAITING = 0x10, |
| + BINDER_LOOPER_STATE_NEED_RETURN = 0x20 |
| +}; |
| + |
| +struct binder_thread { |
| + struct binder_proc *proc; |
| + struct rb_node rb_node; |
| + int pid; |
| + int looper; |
| + struct binder_transaction *transaction_stack; |
| + struct list_head todo; |
| + uint32_t return_error; /* Write failed, return error code in read buf */ |
| + uint32_t return_error2; /* Write failed, return error code in read */ |
| + /* buffer. Used when sending a reply to a dead process that */ |
| + /* we are also waiting on */ |
| + wait_queue_head_t wait; |
| + struct binder_stats stats; |
| +}; |
| + |
| +struct binder_transaction { |
| + int debug_id; |
| + struct binder_work work; |
| + struct binder_thread *from; |
| + struct binder_transaction *from_parent; |
| + struct binder_proc *to_proc; |
| + struct binder_thread *to_thread; |
| + struct binder_transaction *to_parent; |
| + unsigned need_reply:1; |
| + /* unsigned is_dead:1; */ /* not used at the moment */ |
| + |
| + struct binder_buffer *buffer; |
| + unsigned int code; |
| + unsigned int flags; |
| + long priority; |
| + long saved_priority; |
| + uid_t sender_euid; |
| +}; |
| + |
| +static void binder_defer_work(struct binder_proc *proc, int defer); |
| + |
| +/* |
| + * copied from get_unused_fd_flags |
| + */ |
| +int task_get_unused_fd_flags(struct binder_proc *proc, int flags) |
| +{ |
| + struct files_struct *files = proc->files; |
| + int fd, error; |
| + struct fdtable *fdt; |
| + unsigned long rlim_cur; |
| + unsigned long irqs; |
| + |
| + if (files == NULL) |
| + return -ESRCH; |
| + |
| + error = -EMFILE; |
| + spin_lock(&files->file_lock); |
| + |
| +repeat: |
| + fdt = files_fdtable(files); |
| + fd = find_next_zero_bit(fdt->open_fds->fds_bits, fdt->max_fds, |
| + files->next_fd); |
| + |
| + /* |
| + * N.B. For clone tasks sharing a files structure, this test |
| + * will limit the total number of files that can be opened. |
| + */ |
| + rlim_cur = 0; |
| + if (lock_task_sighand(proc->tsk, &irqs)) { |
| + rlim_cur = proc->tsk->signal->rlim[RLIMIT_NOFILE].rlim_cur; |
| + unlock_task_sighand(proc->tsk, &irqs); |
| + } |
| + if (fd >= rlim_cur) |
| + goto out; |
| + |
| + /* Do we need to expand the fd array or fd set? */ |
| + error = expand_files(files, fd); |
| + if (error < 0) |
| + goto out; |
| + |
| + if (error) { |
| + /* |
| + * If we needed to expand the fs array we |
| + * might have blocked - try again. |
| + */ |
| + error = -EMFILE; |
| + goto repeat; |
| + } |
| + |
| + FD_SET(fd, fdt->open_fds); |
| + if (flags & O_CLOEXEC) |
| + FD_SET(fd, fdt->close_on_exec); |
| + else |
| + FD_CLR(fd, fdt->close_on_exec); |
| + files->next_fd = fd + 1; |
| +#if 1 |
| + /* Sanity check */ |
| + if (fdt->fd[fd] != NULL) { |
| + printk(KERN_WARNING "get_unused_fd: slot %d not NULL!\n", fd); |
| + fdt->fd[fd] = NULL; |
| + } |
| +#endif |
| + error = fd; |
| + |
| +out: |
| + spin_unlock(&files->file_lock); |
| + return error; |
| +} |
| + |
| +/* |
| + * copied from fd_install |
| + */ |
| +static void task_fd_install( |
| + struct binder_proc *proc, unsigned int fd, struct file *file) |
| +{ |
| + struct files_struct *files = proc->files; |
| + struct fdtable *fdt; |
| + |
| + if (files == NULL) |
| + return; |
| + |
| + spin_lock(&files->file_lock); |
| + fdt = files_fdtable(files); |
| + BUG_ON(fdt->fd[fd] != NULL); |
| + rcu_assign_pointer(fdt->fd[fd], file); |
| + spin_unlock(&files->file_lock); |
| +} |
| + |
| +/* |
| + * copied from __put_unused_fd in open.c |
| + */ |
| +static void __put_unused_fd(struct files_struct *files, unsigned int fd) |
| +{ |
| + struct fdtable *fdt = files_fdtable(files); |
| + __FD_CLR(fd, fdt->open_fds); |
| + if (fd < files->next_fd) |
| + files->next_fd = fd; |
| +} |
| + |
| +/* |
| + * copied from sys_close |
| + */ |
| +static long task_close_fd(struct binder_proc *proc, unsigned int fd) |
| +{ |
| + struct file *filp; |
| + struct files_struct *files = proc->files; |
| + struct fdtable *fdt; |
| + int retval; |
| + |
| + if (files == NULL) |
| + return -ESRCH; |
| + |
| + spin_lock(&files->file_lock); |
| + fdt = files_fdtable(files); |
| + if (fd >= fdt->max_fds) |
| + goto out_unlock; |
| + filp = fdt->fd[fd]; |
| + if (!filp) |
| + goto out_unlock; |
| + rcu_assign_pointer(fdt->fd[fd], NULL); |
| + FD_CLR(fd, fdt->close_on_exec); |
| + __put_unused_fd(files, fd); |
| + spin_unlock(&files->file_lock); |
| + retval = filp_close(filp, files); |
| + |
| + /* can't restart close syscall because file table entry was cleared */ |
| + if (unlikely(retval == -ERESTARTSYS || |
| + retval == -ERESTARTNOINTR || |
| + retval == -ERESTARTNOHAND || |
| + retval == -ERESTART_RESTARTBLOCK)) |
| + retval = -EINTR; |
| + |
| + return retval; |
| + |
| +out_unlock: |
| + spin_unlock(&files->file_lock); |
| + return -EBADF; |
| +} |
| + |
| +static void binder_set_nice(long nice) |
| +{ |
| + long min_nice; |
| + if (can_nice(current, nice)) { |
| + set_user_nice(current, nice); |
| + return; |
| + } |
| + min_nice = 20 - current->signal->rlim[RLIMIT_NICE].rlim_cur; |
| + if (binder_debug_mask & BINDER_DEBUG_PRIORITY_CAP) |
| + printk(KERN_INFO "binder: %d: nice value %ld not allowed use " |
| + "%ld instead\n", current->pid, nice, min_nice); |
| + set_user_nice(current, min_nice); |
| + if (min_nice < 20) |
| + return; |
| + binder_user_error("binder: %d RLIMIT_NICE not set\n", current->pid); |
| +} |
| + |
| +static size_t binder_buffer_size(struct binder_proc *proc, |
| + struct binder_buffer *buffer) |
| +{ |
| + if (list_is_last(&buffer->entry, &proc->buffers)) |
| + return proc->buffer + proc->buffer_size - (void *)buffer->data; |
| + else |
| + return (size_t)list_entry(buffer->entry.next, |
| + struct binder_buffer, entry) - (size_t)buffer->data; |
| +} |
| + |
| +static void binder_insert_free_buffer(struct binder_proc *proc, |
| + struct binder_buffer *new_buffer) |
| +{ |
| + struct rb_node **p = &proc->free_buffers.rb_node; |
| + struct rb_node *parent = NULL; |
| + struct binder_buffer *buffer; |
| + size_t buffer_size; |
| + size_t new_buffer_size; |
| + |
| + BUG_ON(!new_buffer->free); |
| + |
| + new_buffer_size = binder_buffer_size(proc, new_buffer); |
| + |
| + if (binder_debug_mask & BINDER_DEBUG_BUFFER_ALLOC) |
| + printk(KERN_INFO "binder: %d: add free buffer, size %zd, " |
| + "at %p\n", proc->pid, new_buffer_size, new_buffer); |
| + |
| + while (*p) { |
| + parent = *p; |
| + buffer = rb_entry(parent, struct binder_buffer, rb_node); |
| + BUG_ON(!buffer->free); |
| + |
| + buffer_size = binder_buffer_size(proc, buffer); |
| + |
| + if (new_buffer_size < buffer_size) |
| + p = &parent->rb_left; |
| + else |
| + p = &parent->rb_right; |
| + } |
| + rb_link_node(&new_buffer->rb_node, parent, p); |
| + rb_insert_color(&new_buffer->rb_node, &proc->free_buffers); |
| +} |
| + |
| +static void binder_insert_allocated_buffer(struct binder_proc *proc, |
| + struct binder_buffer *new_buffer) |
| +{ |
| + struct rb_node **p = &proc->allocated_buffers.rb_node; |
| + struct rb_node *parent = NULL; |
| + struct binder_buffer *buffer; |
| + |
| + BUG_ON(new_buffer->free); |
| + |
| + while (*p) { |
| + parent = *p; |
| + buffer = rb_entry(parent, struct binder_buffer, rb_node); |
| + BUG_ON(buffer->free); |
| + |
| + if (new_buffer < buffer) |
| + p = &parent->rb_left; |
| + else if (new_buffer > buffer) |
| + p = &parent->rb_right; |
| + else |
| + BUG(); |
| + } |
| + rb_link_node(&new_buffer->rb_node, parent, p); |
| + rb_insert_color(&new_buffer->rb_node, &proc->allocated_buffers); |
| +} |
| + |
| +static struct binder_buffer *binder_buffer_lookup(struct binder_proc *proc, |
| + void __user *user_ptr) |
| +{ |
| + struct rb_node *n = proc->allocated_buffers.rb_node; |
| + struct binder_buffer *buffer; |
| + struct binder_buffer *kern_ptr; |
| + |
| + kern_ptr = user_ptr - proc->user_buffer_offset |
| + - offsetof(struct binder_buffer, data); |
| + |
| + while (n) { |
| + buffer = rb_entry(n, struct binder_buffer, rb_node); |
| + BUG_ON(buffer->free); |
| + |
| + if (kern_ptr < buffer) |
| + n = n->rb_left; |
| + else if (kern_ptr > buffer) |
| + n = n->rb_right; |
| + else |
| + return buffer; |
| + } |
| + return NULL; |
| +} |
| + |
| +static int binder_update_page_range(struct binder_proc *proc, int allocate, |
| + void *start, void *end, |
| + struct vm_area_struct *vma) |
| +{ |
| + void *page_addr; |
| + unsigned long user_page_addr; |
| + struct vm_struct tmp_area; |
| + struct page **page; |
| + struct mm_struct *mm; |
| + |
| + if (binder_debug_mask & BINDER_DEBUG_BUFFER_ALLOC) |
| + printk(KERN_INFO "binder: %d: %s pages %p-%p\n", |
| + proc->pid, allocate ? "allocate" : "free", start, end); |
| + |
| + if (end <= start) |
| + return 0; |
| + |
| + if (vma) |
| + mm = NULL; |
| + else |
| + mm = get_task_mm(proc->tsk); |
| + |
| + if (mm) { |
| + down_write(&mm->mmap_sem); |
| + vma = proc->vma; |
| + } |
| + |
| + if (allocate == 0) |
| + goto free_range; |
| + |
| + if (vma == NULL) { |
| + printk(KERN_ERR "binder: %d: binder_alloc_buf failed to " |
| + "map pages in userspace, no vma\n", proc->pid); |
| + goto err_no_vma; |
| + } |
| + |
| + for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) { |
| + int ret; |
| + struct page **page_array_ptr; |
| + page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE]; |
| + |
| + BUG_ON(*page); |
| + *page = alloc_page(GFP_KERNEL | __GFP_ZERO); |
| + if (*page == NULL) { |
| + printk(KERN_ERR "binder: %d: binder_alloc_buf failed " |
| + "for page at %p\n", proc->pid, page_addr); |
| + goto err_alloc_page_failed; |
| + } |
| + tmp_area.addr = page_addr; |
| + tmp_area.size = PAGE_SIZE + PAGE_SIZE /* guard page? */; |
| + page_array_ptr = page; |
| + ret = map_vm_area(&tmp_area, PAGE_KERNEL, &page_array_ptr); |
| + if (ret) { |
| + printk(KERN_ERR "binder: %d: binder_alloc_buf failed " |
| + "to map page at %p in kernel\n", |
| + proc->pid, page_addr); |
| + goto err_map_kernel_failed; |
| + } |
| + user_page_addr = |
| + (uintptr_t)page_addr + proc->user_buffer_offset; |
| + ret = vm_insert_page(vma, user_page_addr, page[0]); |
| + if (ret) { |
| + printk(KERN_ERR "binder: %d: binder_alloc_buf failed " |
| + "to map page at %lx in userspace\n", |
| + proc->pid, user_page_addr); |
| + goto err_vm_insert_page_failed; |
| + } |
| + /* vm_insert_page does not seem to increment the refcount */ |
| + } |
| + if (mm) { |
| + up_write(&mm->mmap_sem); |
| + mmput(mm); |
| + } |
| + return 0; |
| + |
| +free_range: |
| + for (page_addr = end - PAGE_SIZE; page_addr >= start; |
| + page_addr -= PAGE_SIZE) { |
| + page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE]; |
| + if (vma) |
| + zap_page_range(vma, (uintptr_t)page_addr + |
| + proc->user_buffer_offset, PAGE_SIZE, NULL); |
| +err_vm_insert_page_failed: |
| + unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE); |
| +err_map_kernel_failed: |
| + __free_page(*page); |
| + *page = NULL; |
| +err_alloc_page_failed: |
| + ; |
| + } |
| +err_no_vma: |
| + if (mm) { |
| + up_write(&mm->mmap_sem); |
| + mmput(mm); |
| + } |
| + return -ENOMEM; |
| +} |
| + |
| +static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc, |
| + size_t data_size, |
| + size_t offsets_size, int is_async) |
| +{ |
| + struct rb_node *n = proc->free_buffers.rb_node; |
| + struct binder_buffer *buffer; |
| + size_t buffer_size; |
| + struct rb_node *best_fit = NULL; |
| + void *has_page_addr; |
| + void *end_page_addr; |
| + size_t size; |
| + |
| + if (proc->vma == NULL) { |
| + printk(KERN_ERR "binder: %d: binder_alloc_buf, no vma\n", |
| + proc->pid); |
| + return NULL; |
| + } |
| + |
| + size = ALIGN(data_size, sizeof(void *)) + |
| + ALIGN(offsets_size, sizeof(void *)); |
| + |
| + if (size < data_size || size < offsets_size) { |
| + binder_user_error("binder: %d: got transaction with invalid " |
| + "size %zd-%zd\n", proc->pid, data_size, offsets_size); |
| + return NULL; |
| + } |
| + |
| + if (is_async && |
| + proc->free_async_space < size + sizeof(struct binder_buffer)) { |
| + if (binder_debug_mask & BINDER_DEBUG_BUFFER_ALLOC) |
| + printk(KERN_ERR |
| + "binder: %d: binder_alloc_buf size %zd failed, " |
| + "no async space left\n", proc->pid, size); |
| + return NULL; |
| + } |
| + |
| + while (n) { |
| + buffer = rb_entry(n, struct binder_buffer, rb_node); |
| + BUG_ON(!buffer->free); |
| + buffer_size = binder_buffer_size(proc, buffer); |
| + |
| + if (size < buffer_size) { |
| + best_fit = n; |
| + n = n->rb_left; |
| + } else if (size > buffer_size) |
| + n = n->rb_right; |
| + else { |
| + best_fit = n; |
| + break; |
| + } |
| + } |
| + if (best_fit == NULL) { |
| + printk(KERN_ERR "binder: %d: binder_alloc_buf size %zd failed, " |
| + "no address space\n", proc->pid, size); |
| + return NULL; |
| + } |
| + if (n == NULL) { |
| + buffer = rb_entry(best_fit, struct binder_buffer, rb_node); |
| + buffer_size = binder_buffer_size(proc, buffer); |
| + } |
| + if (binder_debug_mask & BINDER_DEBUG_BUFFER_ALLOC) |
| + printk(KERN_INFO "binder: %d: binder_alloc_buf size %zd got buff" |
| + "er %p size %zd\n", proc->pid, size, buffer, buffer_size); |
| + |
| + has_page_addr = |
| + (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK); |
| + if (n == NULL) { |
| + if (size + sizeof(struct binder_buffer) + 4 >= buffer_size) |
| + buffer_size = size; /* no room for other buffers */ |
| + else |
| + buffer_size = size + sizeof(struct binder_buffer); |
| + } |
| + end_page_addr = |
| + (void *)PAGE_ALIGN((uintptr_t)buffer->data + buffer_size); |
| + if (end_page_addr > has_page_addr) |
| + end_page_addr = has_page_addr; |
| + if (binder_update_page_range(proc, 1, |
| + (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr, NULL)) |
| + return NULL; |
| + |
| + rb_erase(best_fit, &proc->free_buffers); |
| + buffer->free = 0; |
| + binder_insert_allocated_buffer(proc, buffer); |
| + if (buffer_size != size) { |
| + struct binder_buffer *new_buffer = (void *)buffer->data + size; |
| + list_add(&new_buffer->entry, &buffer->entry); |
| + new_buffer->free = 1; |
| + binder_insert_free_buffer(proc, new_buffer); |
| + } |
| + if (binder_debug_mask & BINDER_DEBUG_BUFFER_ALLOC) |
| + printk(KERN_INFO "binder: %d: binder_alloc_buf size %zd got " |
| + "%p\n", proc->pid, size, buffer); |
| + buffer->data_size = data_size; |
| + buffer->offsets_size = offsets_size; |
| + buffer->async_transaction = is_async; |
| + if (is_async) { |
| + proc->free_async_space -= size + sizeof(struct binder_buffer); |
| + if (binder_debug_mask & BINDER_DEBUG_BUFFER_ALLOC_ASYNC) |
| + printk(KERN_INFO "binder: %d: binder_alloc_buf size %zd " |
| + "async free %zd\n", proc->pid, size, |
| + proc->free_async_space); |
| + } |
| + |
| + return buffer; |
| +} |
| + |
| +static void *buffer_start_page(struct binder_buffer *buffer) |
| +{ |
| + return (void *)((uintptr_t)buffer & PAGE_MASK); |
| +} |
| + |
| +static void *buffer_end_page(struct binder_buffer *buffer) |
| +{ |
| + return (void *)(((uintptr_t)(buffer + 1) - 1) & PAGE_MASK); |
| +} |
| + |
| +static void binder_delete_free_buffer(struct binder_proc *proc, |
| + struct binder_buffer *buffer) |
| +{ |
| + struct binder_buffer *prev, *next = NULL; |
| + int free_page_end = 1; |
| + int free_page_start = 1; |
| + |
| + BUG_ON(proc->buffers.next == &buffer->entry); |
| + prev = list_entry(buffer->entry.prev, struct binder_buffer, entry); |
| + BUG_ON(!prev->free); |
| + if (buffer_end_page(prev) == buffer_start_page(buffer)) { |
| + free_page_start = 0; |
| + if (buffer_end_page(prev) == buffer_end_page(buffer)) |
| + free_page_end = 0; |
| + if (binder_debug_mask & BINDER_DEBUG_BUFFER_ALLOC) |
| + printk(KERN_INFO "binder: %d: merge free, buffer %p " |
| + "share page with %p\n", proc->pid, buffer, prev); |
| + } |
| + |
| + if (!list_is_last(&buffer->entry, &proc->buffers)) { |
| + next = list_entry(buffer->entry.next, |
| + struct binder_buffer, entry); |
| + if (buffer_start_page(next) == buffer_end_page(buffer)) { |
| + free_page_end = 0; |
| + if (buffer_start_page(next) == |
| + buffer_start_page(buffer)) |
| + free_page_start = 0; |
| + if (binder_debug_mask & BINDER_DEBUG_BUFFER_ALLOC) |
| + printk(KERN_INFO "binder: %d: merge free, " |
| + "buffer %p share page with %p\n", |
| + proc->pid, buffer, prev); |
| + } |
| + } |
| + list_del(&buffer->entry); |
| + if (free_page_start || free_page_end) { |
| + if (binder_debug_mask & BINDER_DEBUG_BUFFER_ALLOC) |
| + printk(KERN_INFO "binder: %d: merge free, buffer %p do " |
| + "not share page%s%s with with %p or %p\n", |
| + proc->pid, buffer, free_page_start ? "" : " end", |
| + free_page_end ? "" : " start", prev, next); |
| + binder_update_page_range(proc, 0, free_page_start ? |
| + buffer_start_page(buffer) : buffer_end_page(buffer), |
| + (free_page_end ? buffer_end_page(buffer) : |
| + buffer_start_page(buffer)) + PAGE_SIZE, NULL); |
| + } |
| +} |
| + |
| +static void binder_free_buf(struct binder_proc *proc, |
| + struct binder_buffer *buffer) |
| +{ |
| + size_t size, buffer_size; |
| + |
| + buffer_size = binder_buffer_size(proc, buffer); |
| + |
| + size = ALIGN(buffer->data_size, sizeof(void *)) + |
| + ALIGN(buffer->offsets_size, sizeof(void *)); |
| + if (binder_debug_mask & BINDER_DEBUG_BUFFER_ALLOC) |
| + printk(KERN_INFO "binder: %d: binder_free_buf %p size %zd buffer" |
| + "_size %zd\n", proc->pid, buffer, size, buffer_size); |
| + |
| + BUG_ON(buffer->free); |
| + BUG_ON(size > buffer_size); |
| + BUG_ON(buffer->transaction != NULL); |
| + BUG_ON((void *)buffer < proc->buffer); |
| + BUG_ON((void *)buffer > proc->buffer + proc->buffer_size); |
| + |
| + if (buffer->async_transaction) { |
| + proc->free_async_space += size + sizeof(struct binder_buffer); |
| + if (binder_debug_mask & BINDER_DEBUG_BUFFER_ALLOC_ASYNC) |
| + printk(KERN_INFO "binder: %d: binder_free_buf size %zd " |
| + "async free %zd\n", proc->pid, size, |
| + proc->free_async_space); |
| + } |
| + |
| + binder_update_page_range(proc, 0, |
| + (void *)PAGE_ALIGN((uintptr_t)buffer->data), |
| + (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK), |
| + NULL); |
| + rb_erase(&buffer->rb_node, &proc->allocated_buffers); |
| + buffer->free = 1; |
| + if (!list_is_last(&buffer->entry, &proc->buffers)) { |
| + struct binder_buffer *next = list_entry(buffer->entry.next, |
| + struct binder_buffer, entry); |
| + if (next->free) { |
| + rb_erase(&next->rb_node, &proc->free_buffers); |
| + binder_delete_free_buffer(proc, next); |
| + } |
| + } |
| + if (proc->buffers.next != &buffer->entry) { |
| + struct binder_buffer *prev = list_entry(buffer->entry.prev, |
| + struct binder_buffer, entry); |
| + if (prev->free) { |
| + binder_delete_free_buffer(proc, buffer); |
| + rb_erase(&prev->rb_node, &proc->free_buffers); |
| + buffer = prev; |
| + } |
| + } |
| + binder_insert_free_buffer(proc, buffer); |
| +} |
| + |
| +static struct binder_node *binder_get_node(struct binder_proc *proc, |
| + void __user *ptr) |
| +{ |
| + struct rb_node *n = proc->nodes.rb_node; |
| + struct binder_node *node; |
| + |
| + while (n) { |
| + node = rb_entry(n, struct binder_node, rb_node); |
| + |
| + if (ptr < node->ptr) |
| + n = n->rb_left; |
| + else if (ptr > node->ptr) |
| + n = n->rb_right; |
| + else |
| + return node; |
| + } |
| + return NULL; |
| +} |
| + |
| +static struct binder_node *binder_new_node(struct binder_proc *proc, |
| + void __user *ptr, |
| + void __user *cookie) |
| +{ |
| + struct rb_node **p = &proc->nodes.rb_node; |
| + struct rb_node *parent = NULL; |
| + struct binder_node *node; |
| + |
| + while (*p) { |
| + parent = *p; |
| + node = rb_entry(parent, struct binder_node, rb_node); |
| + |
| + if (ptr < node->ptr) |
| + p = &(*p)->rb_left; |
| + else if (ptr > node->ptr) |
| + p = &(*p)->rb_right; |
| + else |
| + return NULL; |
| + } |
| + |
| + node = kzalloc(sizeof(*node), GFP_KERNEL); |
| + if (node == NULL) |
| + return NULL; |
| + binder_stats.obj_created[BINDER_STAT_NODE]++; |
| + rb_link_node(&node->rb_node, parent, p); |
| + rb_insert_color(&node->rb_node, &proc->nodes); |
| + node->debug_id = ++binder_last_id; |
| + node->proc = proc; |
| + node->ptr = ptr; |
| + node->cookie = cookie; |
| + node->work.type = BINDER_WORK_NODE; |
| + INIT_LIST_HEAD(&node->work.entry); |
| + INIT_LIST_HEAD(&node->async_todo); |
| + if (binder_debug_mask & BINDER_DEBUG_INTERNAL_REFS) |
| + printk(KERN_INFO "binder: %d:%d node %d u%p c%p created\n", |
| + proc->pid, current->pid, node->debug_id, |
| + node->ptr, node->cookie); |
| + return node; |
| +} |
| + |
| +static int binder_inc_node(struct binder_node *node, int strong, int internal, |
| + struct list_head *target_list) |
| +{ |
| + if (strong) { |
| + if (internal) { |
| + if (target_list == NULL && |
| + node->internal_strong_refs == 0 && |
| + !(node == binder_context_mgr_node && |
| + node->has_strong_ref)) { |
| + printk(KERN_ERR "binder: invalid inc strong " |
| + "node for %d\n", node->debug_id); |
| + return -EINVAL; |
| + } |
| + node->internal_strong_refs++; |
| + } else |
| + node->local_strong_refs++; |
| + if (!node->has_strong_ref && target_list) { |
| + list_del_init(&node->work.entry); |
| + list_add_tail(&node->work.entry, target_list); |
| + } |
| + } else { |
| + if (!internal) |
| + node->local_weak_refs++; |
| + if (!node->has_weak_ref && list_empty(&node->work.entry)) { |
| + if (target_list == NULL) { |
| + printk(KERN_ERR "binder: invalid inc weak node " |
| + "for %d\n", node->debug_id); |
| + return -EINVAL; |
| + } |
| + list_add_tail(&node->work.entry, target_list); |
| + } |
| + } |
| + return 0; |
| +} |
| + |
| +static int binder_dec_node(struct binder_node *node, int strong, int internal) |
| +{ |
| + if (strong) { |
| + if (internal) |
| + node->internal_strong_refs--; |
| + else |
| + node->local_strong_refs--; |
| + if (node->local_strong_refs || node->internal_strong_refs) |
| + return 0; |
| + } else { |
| + if (!internal) |
| + node->local_weak_refs--; |
| + if (node->local_weak_refs || !hlist_empty(&node->refs)) |
| + return 0; |
| + } |
| + if (node->proc && (node->has_strong_ref || node->has_weak_ref)) { |
| + if (list_empty(&node->work.entry)) { |
| + list_add_tail(&node->work.entry, &node->proc->todo); |
| + wake_up_interruptible(&node->proc->wait); |
| + } |
| + } else { |
| + if (hlist_empty(&node->refs) && !node->local_strong_refs && |
| + !node->local_weak_refs) { |
| + list_del_init(&node->work.entry); |
| + if (node->proc) { |
| + rb_erase(&node->rb_node, &node->proc->nodes); |
| + if (binder_debug_mask & BINDER_DEBUG_INTERNAL_REFS) |
| + printk(KERN_INFO "binder: refless node %d deleted\n", node->debug_id); |
| + } else { |
| + hlist_del(&node->dead_node); |
| + if (binder_debug_mask & BINDER_DEBUG_INTERNAL_REFS) |
| + printk(KERN_INFO "binder: dead node %d deleted\n", node->debug_id); |
| + } |
| + kfree(node); |
| + binder_stats.obj_deleted[BINDER_STAT_NODE]++; |
| + } |
| + } |
| + |
| + return 0; |
| +} |
| + |
| + |
| +static struct binder_ref *binder_get_ref(struct binder_proc *proc, |
| + uint32_t desc) |
| +{ |
| + struct rb_node *n = proc->refs_by_desc.rb_node; |
| + struct binder_ref *ref; |
| + |
| + while (n) { |
| + ref = rb_entry(n, struct binder_ref, rb_node_desc); |
| + |
| + if (desc < ref->desc) |
| + n = n->rb_left; |
| + else if (desc > ref->desc) |
| + n = n->rb_right; |
| + else |
| + return ref; |
| + } |
| + return NULL; |
| +} |
| + |
| +static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc, |
| + struct binder_node *node) |
| +{ |
| + struct rb_node *n; |
| + struct rb_node **p = &proc->refs_by_node.rb_node; |
| + struct rb_node *parent = NULL; |
| + struct binder_ref *ref, *new_ref; |
| + |
| + while (*p) { |
| + parent = *p; |
| + ref = rb_entry(parent, struct binder_ref, rb_node_node); |
| + |
| + if (node < ref->node) |
| + p = &(*p)->rb_left; |
| + else if (node > ref->node) |
| + p = &(*p)->rb_right; |
| + else |
| + return ref; |
| + } |
| + new_ref = kzalloc(sizeof(*ref), GFP_KERNEL); |
| + if (new_ref == NULL) |
| + return NULL; |
| + binder_stats.obj_created[BINDER_STAT_REF]++; |
| + new_ref->debug_id = ++binder_last_id; |
| + new_ref->proc = proc; |
| + new_ref->node = node; |
| + rb_link_node(&new_ref->rb_node_node, parent, p); |
| + rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node); |
| + |
| + new_ref->desc = (node == binder_context_mgr_node) ? 0 : 1; |
| + for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) { |
| + ref = rb_entry(n, struct binder_ref, rb_node_desc); |
| + if (ref->desc > new_ref->desc) |
| + break; |
| + new_ref->desc = ref->desc + 1; |
| + } |
| + |
| + p = &proc->refs_by_desc.rb_node; |
| + while (*p) { |
| + parent = *p; |
| + ref = rb_entry(parent, struct binder_ref, rb_node_desc); |
| + |
| + if (new_ref->desc < ref->desc) |
| + p = &(*p)->rb_left; |
| + else if (new_ref->desc > ref->desc) |
| + p = &(*p)->rb_right; |
| + else |
| + BUG(); |
| + } |
| + rb_link_node(&new_ref->rb_node_desc, parent, p); |
| + rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc); |
| + if (node) { |
| + hlist_add_head(&new_ref->node_entry, &node->refs); |
| + if (binder_debug_mask & BINDER_DEBUG_INTERNAL_REFS) |
| + printk(KERN_INFO "binder: %d new ref %d desc %d for " |
| + "node %d\n", proc->pid, new_ref->debug_id, |
| + new_ref->desc, node->debug_id); |
| + } else { |
| + if (binder_debug_mask & BINDER_DEBUG_INTERNAL_REFS) |
| + printk(KERN_INFO "binder: %d new ref %d desc %d for " |
| + "dead node\n", proc->pid, new_ref->debug_id, |
| + new_ref->desc); |
| + } |
| + return new_ref; |
| +} |
| + |
| +static void binder_delete_ref(struct binder_ref *ref) |
| +{ |
| + if (binder_debug_mask & BINDER_DEBUG_INTERNAL_REFS) |
| + printk(KERN_INFO "binder: %d delete ref %d desc %d for " |
| + "node %d\n", ref->proc->pid, ref->debug_id, |
| + ref->desc, ref->node->debug_id); |
| + rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc); |
| + rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node); |
| + if (ref->strong) |
| + binder_dec_node(ref->node, 1, 1); |
| + hlist_del(&ref->node_entry); |
| + binder_dec_node(ref->node, 0, 1); |
| + if (ref->death) { |
| + if (binder_debug_mask & BINDER_DEBUG_DEAD_BINDER) |
| + printk(KERN_INFO "binder: %d delete ref %d desc %d " |
| + "has death notification\n", ref->proc->pid, |
| + ref->debug_id, ref->desc); |
| + list_del(&ref->death->work.entry); |
| + kfree(ref->death); |
| + binder_stats.obj_deleted[BINDER_STAT_DEATH]++; |
| + } |
| + kfree(ref); |
| + binder_stats.obj_deleted[BINDER_STAT_REF]++; |
| +} |
| + |
| +static int binder_inc_ref(struct binder_ref *ref, int strong, |
| + struct list_head *target_list) |
| +{ |
| + int ret; |
| + if (strong) { |
| + if (ref->strong == 0) { |
| + ret = binder_inc_node(ref->node, 1, 1, target_list); |
| + if (ret) |
| + return ret; |
| + } |
| + ref->strong++; |
| + } else { |
| + if (ref->weak == 0) { |
| + ret = binder_inc_node(ref->node, 0, 1, target_list); |
| + if (ret) |
| + return ret; |
| + } |
| + ref->weak++; |
| + } |
| + return 0; |
| +} |
| + |
| + |
| +static int binder_dec_ref(struct binder_ref *ref, int strong) |
| +{ |
| + if (strong) { |
| + if (ref->strong == 0) { |
| + binder_user_error("binder: %d invalid dec strong, " |
| + "ref %d desc %d s %d w %d\n", |
| + ref->proc->pid, ref->debug_id, |
| + ref->desc, ref->strong, ref->weak); |
| + return -EINVAL; |
| + } |
| + ref->strong--; |
| + if (ref->strong == 0) { |
| + int ret; |
| + ret = binder_dec_node(ref->node, strong, 1); |
| + if (ret) |
| + return ret; |
| + } |
| + } else { |
| + if (ref->weak == 0) { |
| + binder_user_error("binder: %d invalid dec weak, " |
| + "ref %d desc %d s %d w %d\n", |
| + ref->proc->pid, ref->debug_id, |
| + ref->desc, ref->strong, ref->weak); |
| + return -EINVAL; |
| + } |
| + ref->weak--; |
| + } |
| + if (ref->strong == 0 && ref->weak == 0) |
| + binder_delete_ref(ref); |
| + return 0; |
| +} |
| + |
| +static void binder_pop_transaction(struct binder_thread *target_thread, |
| + struct binder_transaction *t) |
| +{ |
| + if (target_thread) { |
| + BUG_ON(target_thread->transaction_stack != t); |
| + BUG_ON(target_thread->transaction_stack->from != target_thread); |
| + target_thread->transaction_stack = |
| + target_thread->transaction_stack->from_parent; |
| + t->from = NULL; |
| + } |
| + t->need_reply = 0; |
| + if (t->buffer) |
| + t->buffer->transaction = NULL; |
| + kfree(t); |
| + binder_stats.obj_deleted[BINDER_STAT_TRANSACTION]++; |
| +} |
| + |
| +static void binder_send_failed_reply(struct binder_transaction *t, |
| + uint32_t error_code) |
| +{ |
| + struct binder_thread *target_thread; |
| + BUG_ON(t->flags & TF_ONE_WAY); |
| + while (1) { |
| + target_thread = t->from; |
| + if (target_thread) { |
| + if (target_thread->return_error != BR_OK && |
| + target_thread->return_error2 == BR_OK) { |
| + target_thread->return_error2 = |
| + target_thread->return_error; |
| + target_thread->return_error = BR_OK; |
| + } |
| + if (target_thread->return_error == BR_OK) { |
| + if (binder_debug_mask & BINDER_DEBUG_FAILED_TRANSACTION) |
| + printk(KERN_INFO "binder: send failed reply for transaction %d to %d:%d\n", |
| + t->debug_id, target_thread->proc->pid, target_thread->pid); |
| + |
| + binder_pop_transaction(target_thread, t); |
| + target_thread->return_error = error_code; |
| + wake_up_interruptible(&target_thread->wait); |
| + } else { |
| + printk(KERN_ERR "binder: reply failed, target " |
| + "thread, %d:%d, has error code %d " |
| + "already\n", target_thread->proc->pid, |
| + target_thread->pid, |
| + target_thread->return_error); |
| + } |
| + return; |
| + } else { |
| + struct binder_transaction *next = t->from_parent; |
| + |
| + if (binder_debug_mask & BINDER_DEBUG_FAILED_TRANSACTION) |
| + printk(KERN_INFO "binder: send failed reply " |
| + "for transaction %d, target dead\n", |
| + t->debug_id); |
| + |
| + binder_pop_transaction(target_thread, t); |
| + if (next == NULL) { |
| + if (binder_debug_mask & BINDER_DEBUG_DEAD_BINDER) |
| + printk(KERN_INFO "binder: reply failed," |
| + " no target thread at root\n"); |
| + return; |
| + } |
| + t = next; |
| + if (binder_debug_mask & BINDER_DEBUG_DEAD_BINDER) |
| + printk(KERN_INFO "binder: reply failed, no targ" |
| + "et thread -- retry %d\n", t->debug_id); |
| + } |
| + } |
| +} |
| + |
| +static void binder_transaction_buffer_release(struct binder_proc *proc, |
| + struct binder_buffer *buffer, |
| + size_t *failed_at); |
| + |
| +static void binder_transaction(struct binder_proc *proc, |
| + struct binder_thread *thread, |
| + struct binder_transaction_data *tr, int reply) |
| +{ |
| + struct binder_transaction *t; |
| + struct binder_work *tcomplete; |
| + size_t *offp, *off_end; |
| + struct binder_proc *target_proc; |
| + struct binder_thread *target_thread = NULL; |
| + struct binder_node *target_node = NULL; |
| + struct list_head *target_list; |
| + wait_queue_head_t *target_wait; |
| + struct binder_transaction *in_reply_to = NULL; |
| + struct binder_transaction_log_entry *e; |
| + uint32_t return_error; |
| + |
| + e = binder_transaction_log_add(&binder_transaction_log); |
| + e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY); |
| + e->from_proc = proc->pid; |
| + e->from_thread = thread->pid; |
| + e->target_handle = tr->target.handle; |
| + e->data_size = tr->data_size; |
| + e->offsets_size = tr->offsets_size; |
| + |
| + if (reply) { |
| + in_reply_to = thread->transaction_stack; |
| + if (in_reply_to == NULL) { |
| + binder_user_error("binder: %d:%d got reply transaction " |
| + "with no transaction stack\n", |
| + proc->pid, thread->pid); |
| + return_error = BR_FAILED_REPLY; |
| + goto err_empty_call_stack; |
| + } |
| + binder_set_nice(in_reply_to->saved_priority); |
| + if (in_reply_to->to_thread != thread) { |
| + binder_user_error("binder: %d:%d got reply transaction " |
| + "with bad transaction stack," |
| + " transaction %d has target %d:%d\n", |
| + proc->pid, thread->pid, in_reply_to->debug_id, |
| + in_reply_to->to_proc ? |
| + in_reply_to->to_proc->pid : 0, |
| + in_reply_to->to_thread ? |
| + in_reply_to->to_thread->pid : 0); |
| + return_error = BR_FAILED_REPLY; |
| + in_reply_to = NULL; |
| + goto err_bad_call_stack; |
| + } |
| + thread->transaction_stack = in_reply_to->to_parent; |
| + target_thread = in_reply_to->from; |
| + if (target_thread == NULL) { |
| + return_error = BR_DEAD_REPLY; |
| + goto err_dead_binder; |
| + } |
| + if (target_thread->transaction_stack != in_reply_to) { |
| + binder_user_error("binder: %d:%d got reply transaction " |
| + "with bad target transaction stack %d, " |
| + "expected %d\n", |
| + proc->pid, thread->pid, |
| + target_thread->transaction_stack ? |
| + target_thread->transaction_stack->debug_id : 0, |
| + in_reply_to->debug_id); |
| + return_error = BR_FAILED_REPLY; |
| + in_reply_to = NULL; |
| + target_thread = NULL; |
| + goto err_dead_binder; |
| + } |
| + target_proc = target_thread->proc; |
| + } else { |
| + if (tr->target.handle) { |
| + struct binder_ref *ref; |
| + ref = binder_get_ref(proc, tr->target.handle); |
| + if (ref == NULL) { |
| + binder_user_error("binder: %d:%d got " |
| + "transaction to invalid handle\n", |
| + proc->pid, thread->pid); |
| + return_error = BR_FAILED_REPLY; |
| + goto err_invalid_target_handle; |
| + } |
| + target_node = ref->node; |
| + } else { |
| + target_node = binder_context_mgr_node; |
| + if (target_node == NULL) { |
| + return_error = BR_DEAD_REPLY; |
| + goto err_no_context_mgr_node; |
| + } |
| + } |
| + e->to_node = target_node->debug_id; |
| + target_proc = target_node->proc; |
| + if (target_proc == NULL) { |
| + return_error = BR_DEAD_REPLY; |
| + goto err_dead_binder; |
| + } |
| + if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) { |
| + struct binder_transaction *tmp; |
| + tmp = thread->transaction_stack; |
| + if (tmp->to_thread != thread) { |
| + binder_user_error("binder: %d:%d got new " |
| + "transaction with bad transaction stack" |
| + ", transaction %d has target %d:%d\n", |
| + proc->pid, thread->pid, tmp->debug_id, |
| + tmp->to_proc ? tmp->to_proc->pid : 0, |
| + tmp->to_thread ? |
| + tmp->to_thread->pid : 0); |
| + return_error = BR_FAILED_REPLY; |
| + goto err_bad_call_stack; |
| + } |
| + while (tmp) { |
| + if (tmp->from && tmp->from->proc == target_proc) |
| + target_thread = tmp->from; |
| + tmp = tmp->from_parent; |
| + } |
| + } |
| + } |
| + if (target_thread) { |
| + e->to_thread = target_thread->pid; |
| + target_list = &target_thread->todo; |
| + target_wait = &target_thread->wait; |
| + } else { |
| + target_list = &target_proc->todo; |
| + target_wait = &target_proc->wait; |
| + } |
| + e->to_proc = target_proc->pid; |
| + |
| + /* TODO: reuse incoming transaction for reply */ |
| + t = kzalloc(sizeof(*t), GFP_KERNEL); |
| + if (t == NULL) { |
| + return_error = BR_FAILED_REPLY; |
| + goto err_alloc_t_failed; |
| + } |
| + binder_stats.obj_created[BINDER_STAT_TRANSACTION]++; |
| + |
| + tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL); |
| + if (tcomplete == NULL) { |
| + return_error = BR_FAILED_REPLY; |
| + goto err_alloc_tcomplete_failed; |
| + } |
| + binder_stats.obj_created[BINDER_STAT_TRANSACTION_COMPLETE]++; |
| + |
| + t->debug_id = ++binder_last_id; |
| + e->debug_id = t->debug_id; |
| + |
| + if (binder_debug_mask & BINDER_DEBUG_TRANSACTION) { |
| + if (reply) |
| + printk(KERN_INFO "binder: %d:%d BC_REPLY %d -> %d:%d, " |
| + "data %p-%p size %zd-%zd\n", |
| + proc->pid, thread->pid, t->debug_id, |
| + target_proc->pid, target_thread->pid, |
| + tr->data.ptr.buffer, tr->data.ptr.offsets, |
| + tr->data_size, tr->offsets_size); |
| + else |
| + printk(KERN_INFO "binder: %d:%d BC_TRANSACTION %d -> " |
| + "%d - node %d, data %p-%p size %zd-%zd\n", |
| + proc->pid, thread->pid, t->debug_id, |
| + target_proc->pid, target_node->debug_id, |
| + tr->data.ptr.buffer, tr->data.ptr.offsets, |
| + tr->data_size, tr->offsets_size); |
| + } |
| + |
| + if (!reply && !(tr->flags & TF_ONE_WAY)) |
| + t->from = thread; |
| + else |
| + t->from = NULL; |
| + t->sender_euid = proc->tsk->cred->euid; |
| + t->to_proc = target_proc; |
| + t->to_thread = target_thread; |
| + t->code = tr->code; |
| + t->flags = tr->flags; |
| + t->priority = task_nice(current); |
| + t->buffer = binder_alloc_buf(target_proc, tr->data_size, |
| + tr->offsets_size, !reply && (t->flags & TF_ONE_WAY)); |
| + if (t->buffer == NULL) { |
| + return_error = BR_FAILED_REPLY; |
| + goto err_binder_alloc_buf_failed; |
| + } |
| + t->buffer->allow_user_free = 0; |
| + t->buffer->debug_id = t->debug_id; |
| + t->buffer->transaction = t; |
| + t->buffer->target_node = target_node; |
| + if (target_node) |
| + binder_inc_node(target_node, 1, 0, NULL); |
| + |
| + offp = (size_t *)(t->buffer->data + ALIGN(tr->data_size, sizeof(void *))); |
| + |
| + if (copy_from_user(t->buffer->data, tr->data.ptr.buffer, tr->data_size)) { |
| + binder_user_error("binder: %d:%d got transaction with invalid " |
| + "data ptr\n", proc->pid, thread->pid); |
| + return_error = BR_FAILED_REPLY; |
| + goto err_copy_data_failed; |
| + } |
| + if (copy_from_user(offp, tr->data.ptr.offsets, tr->offsets_size)) { |
| + binder_user_error("binder: %d:%d got transaction with invalid " |
| + "offsets ptr\n", proc->pid, thread->pid); |
| + return_error = BR_FAILED_REPLY; |
| + goto err_copy_data_failed; |
| + } |
| + if (!IS_ALIGNED(tr->offsets_size, sizeof(size_t))) { |
| + binder_user_error("binder: %d:%d got transaction with " |
| + "invalid offsets size, %zd\n", |
| + proc->pid, thread->pid, tr->offsets_size); |
| + return_error = BR_FAILED_REPLY; |
| + goto err_bad_offset; |
| + } |
| + off_end = (void *)offp + tr->offsets_size; |
| + for (; offp < off_end; offp++) { |
| + struct flat_binder_object *fp; |
| + if (*offp > t->buffer->data_size - sizeof(*fp) || |
| + t->buffer->data_size < sizeof(*fp) || |
| + !IS_ALIGNED(*offp, sizeof(void *))) { |
| + binder_user_error("binder: %d:%d got transaction with " |
| + "invalid offset, %zd\n", |
| + proc->pid, thread->pid, *offp); |
| + return_error = BR_FAILED_REPLY; |
| + goto err_bad_offset; |
| + } |
| + fp = (struct flat_binder_object *)(t->buffer->data + *offp); |
| + switch (fp->type) { |
| + case BINDER_TYPE_BINDER: |
| + case BINDER_TYPE_WEAK_BINDER: { |
| + struct binder_ref *ref; |
| + struct binder_node *node = binder_get_node(proc, fp->binder); |
| + if (node == NULL) { |
| + node = binder_new_node(proc, fp->binder, fp->cookie); |
| + if (node == NULL) { |
| + return_error = BR_FAILED_REPLY; |
| + goto err_binder_new_node_failed; |
| + } |
| + node->min_priority = fp->flags & FLAT_BINDER_FLAG_PRIORITY_MASK; |
| + node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS); |
| + } |
| + if (fp->cookie != node->cookie) { |
| + binder_user_error("binder: %d:%d sending u%p " |
| + "node %d, cookie mismatch %p != %p\n", |
| + proc->pid, thread->pid, |
| + fp->binder, node->debug_id, |
| + fp->cookie, node->cookie); |
| + goto err_binder_get_ref_for_node_failed; |
| + } |
| + ref = binder_get_ref_for_node(target_proc, node); |
| + if (ref == NULL) { |
| + return_error = BR_FAILED_REPLY; |
| + goto err_binder_get_ref_for_node_failed; |
| + } |
| + if (fp->type == BINDER_TYPE_BINDER) |
| + fp->type = BINDER_TYPE_HANDLE; |
| + else |
| + fp->type = BINDER_TYPE_WEAK_HANDLE; |
| + fp->handle = ref->desc; |
| + binder_inc_ref(ref, fp->type == BINDER_TYPE_HANDLE, &thread->todo); |
| + if (binder_debug_mask & BINDER_DEBUG_TRANSACTION) |
| + printk(KERN_INFO " node %d u%p -> ref %d desc %d\n", |
| + node->debug_id, node->ptr, ref->debug_id, ref->desc); |
| + } break; |
| + case BINDER_TYPE_HANDLE: |
| + case BINDER_TYPE_WEAK_HANDLE: { |
| + struct binder_ref *ref = binder_get_ref(proc, fp->handle); |
| + if (ref == NULL) { |
| + binder_user_error("binder: %d:%d got " |
| + "transaction with invalid " |
| + "handle, %ld\n", proc->pid, |
| + thread->pid, fp->handle); |
| + return_error = BR_FAILED_REPLY; |
| + goto err_binder_get_ref_failed; |
| + } |
| + if (ref->node->proc == target_proc) { |
| + if (fp->type == BINDER_TYPE_HANDLE) |
| + fp->type = BINDER_TYPE_BINDER; |
| + else |
| + fp->type = BINDER_TYPE_WEAK_BINDER; |
| + fp->binder = ref->node->ptr; |
| + fp->cookie = ref->node->cookie; |
| + binder_inc_node(ref->node, fp->type == BINDER_TYPE_BINDER, 0, NULL); |
| + if (binder_debug_mask & BINDER_DEBUG_TRANSACTION) |
| + printk(KERN_INFO " ref %d desc %d -> node %d u%p\n", |
| + ref->debug_id, ref->desc, ref->node->debug_id, ref->node->ptr); |
| + } else { |
| + struct binder_ref *new_ref; |
| + new_ref = binder_get_ref_for_node(target_proc, ref->node); |
| + if (new_ref == NULL) { |
| + return_error = BR_FAILED_REPLY; |
| + goto err_binder_get_ref_for_node_failed; |
| + } |
| + fp->handle = new_ref->desc; |
| + binder_inc_ref(new_ref, fp->type == BINDER_TYPE_HANDLE, NULL); |
| + if (binder_debug_mask & BINDER_DEBUG_TRANSACTION) |
| + printk(KERN_INFO " ref %d desc %d -> ref %d desc %d (node %d)\n", |
| + ref->debug_id, ref->desc, new_ref->debug_id, new_ref->desc, ref->node->debug_id); |
| + } |
| + } break; |
| + |
| + case BINDER_TYPE_FD: { |
| + int target_fd; |
| + struct file *file; |
| + |
| + if (reply) { |
| + if (!(in_reply_to->flags & TF_ACCEPT_FDS)) { |
| + binder_user_error("binder: %d:%d got reply with fd, %ld, but target does not allow fds\n", |
| + proc->pid, thread->pid, fp->handle); |
| + return_error = BR_FAILED_REPLY; |
| + goto err_fd_not_allowed; |
| + } |
| + } else if (!target_node->accept_fds) { |
| + binder_user_error("binder: %d:%d got transaction with fd, %ld, but target does not allow fds\n", |
| + proc->pid, thread->pid, fp->handle); |
| + return_error = BR_FAILED_REPLY; |
| + goto err_fd_not_allowed; |
| + } |
| + |
| + file = fget(fp->handle); |
| + if (file == NULL) { |
| + binder_user_error("binder: %d:%d got transaction with invalid fd, %ld\n", |
| + proc->pid, thread->pid, fp->handle); |
| + return_error = BR_FAILED_REPLY; |
| + goto err_fget_failed; |
| + } |
| + target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC); |
| + if (target_fd < 0) { |
| + fput(file); |
| + return_error = BR_FAILED_REPLY; |
| + goto err_get_unused_fd_failed; |
| + } |
| + task_fd_install(target_proc, target_fd, file); |
| + if (binder_debug_mask & BINDER_DEBUG_TRANSACTION) |
| + printk(KERN_INFO " fd %ld -> %d\n", fp->handle, target_fd); |
| + /* TODO: fput? */ |
| + fp->handle = target_fd; |
| + } break; |
| + |
| + default: |
| + binder_user_error("binder: %d:%d got transactio" |
| + "n with invalid object type, %lx\n", |
| + proc->pid, thread->pid, fp->type); |
| + return_error = BR_FAILED_REPLY; |
| + goto err_bad_object_type; |
| + } |
| + } |
| + if (reply) { |
| + BUG_ON(t->buffer->async_transaction != 0); |
| + binder_pop_transaction(target_thread, in_reply_to); |
| + } else if (!(t->flags & TF_ONE_WAY)) { |
| + BUG_ON(t->buffer->async_transaction != 0); |
| + t->need_reply = 1; |
| + t->from_parent = thread->transaction_stack; |
| + thread->transaction_stack = t; |
| + } else { |
| + BUG_ON(target_node == NULL); |
| + BUG_ON(t->buffer->async_transaction != 1); |
| + if (target_node->has_async_transaction) { |
| + target_list = &target_node->async_todo; |
| + target_wait = NULL; |
| + } else |
| + target_node->has_async_transaction = 1; |
| + } |
| + t->work.type = BINDER_WORK_TRANSACTION; |
| + list_add_tail(&t->work.entry, target_list); |
| + tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE; |
| + list_add_tail(&tcomplete->entry, &thread->todo); |
| + if (target_wait) |
| + wake_up_interruptible(target_wait); |
| + return; |
| + |
| +err_get_unused_fd_failed: |
| +err_fget_failed: |
| +err_fd_not_allowed: |
| +err_binder_get_ref_for_node_failed: |
| +err_binder_get_ref_failed: |
| +err_binder_new_node_failed: |
| +err_bad_object_type: |
| +err_bad_offset: |
| +err_copy_data_failed: |
| + binder_transaction_buffer_release(target_proc, t->buffer, offp); |
| + t->buffer->transaction = NULL; |
| + binder_free_buf(target_proc, t->buffer); |
| +err_binder_alloc_buf_failed: |
| + kfree(tcomplete); |
| + binder_stats.obj_deleted[BINDER_STAT_TRANSACTION_COMPLETE]++; |
| +err_alloc_tcomplete_failed: |
| + kfree(t); |
| + binder_stats.obj_deleted[BINDER_STAT_TRANSACTION]++; |
| +err_alloc_t_failed: |
| +err_bad_call_stack: |
| +err_empty_call_stack: |
| +err_dead_binder: |
| +err_invalid_target_handle: |
| +err_no_context_mgr_node: |
| + if (binder_debug_mask & BINDER_DEBUG_FAILED_TRANSACTION) |
| + printk(KERN_INFO "binder: %d:%d transaction failed %d, size" |
| + "%zd-%zd\n", |
| + proc->pid, thread->pid, return_error, |
| + tr->data_size, tr->offsets_size); |
| + |
| + { |
| + struct binder_transaction_log_entry *fe; |
| + fe = binder_transaction_log_add(&binder_transaction_log_failed); |
| + *fe = *e; |
| + } |
| + |
| + BUG_ON(thread->return_error != BR_OK); |
| + if (in_reply_to) { |
| + thread->return_error = BR_TRANSACTION_COMPLETE; |
| + binder_send_failed_reply(in_reply_to, return_error); |
| + } else |
| + thread->return_error = return_error; |
| +} |
| + |
| +static void binder_transaction_buffer_release(struct binder_proc *proc, |
| + struct binder_buffer *buffer, |
| + size_t *failed_at) |
| +{ |
| + size_t *offp, *off_end; |
| + int debug_id = buffer->debug_id; |
| + |
| + if (binder_debug_mask & BINDER_DEBUG_TRANSACTION) |
| + printk(KERN_INFO "binder: %d buffer release %d, size %zd-%zd, failed at %p\n", |
| + proc->pid, buffer->debug_id, |
| + buffer->data_size, buffer->offsets_size, failed_at); |
| + |
| + if (buffer->target_node) |
| + binder_dec_node(buffer->target_node, 1, 0); |
| + |
| + offp = (size_t *)(buffer->data + ALIGN(buffer->data_size, sizeof(void *))); |
| + if (failed_at) |
| + off_end = failed_at; |
| + else |
| + off_end = (void *)offp + buffer->offsets_size; |
| + for (; offp < off_end; offp++) { |
| + struct flat_binder_object *fp; |
| + if (*offp > buffer->data_size - sizeof(*fp) || |
| + buffer->data_size < sizeof(*fp) || |
| + !IS_ALIGNED(*offp, sizeof(void *))) { |
| + printk(KERN_ERR "binder: transaction release %d bad" |
| + "offset %zd, size %zd\n", debug_id, *offp, buffer->data_size); |
| + continue; |
| + } |
| + fp = (struct flat_binder_object *)(buffer->data + *offp); |
| + switch (fp->type) { |
| + case BINDER_TYPE_BINDER: |
| + case BINDER_TYPE_WEAK_BINDER: { |
| + struct binder_node *node = binder_get_node(proc, fp->binder); |
| + if (node == NULL) { |
| + printk(KERN_ERR "binder: transaction release %d bad node %p\n", debug_id, fp->binder); |
| + break; |
| + } |
| + if (binder_debug_mask & BINDER_DEBUG_TRANSACTION) |
| + printk(KERN_INFO " node %d u%p\n", |
| + node->debug_id, node->ptr); |
| + binder_dec_node(node, fp->type == BINDER_TYPE_BINDER, 0); |
| + } break; |
| + case BINDER_TYPE_HANDLE: |
| + case BINDER_TYPE_WEAK_HANDLE: { |
| + struct binder_ref *ref = binder_get_ref(proc, fp->handle); |
| + if (ref == NULL) { |
| + printk(KERN_ERR "binder: transaction release %d bad handle %ld\n", debug_id, fp->handle); |
| + break; |
| + } |
| + if (binder_debug_mask & BINDER_DEBUG_TRANSACTION) |
| + printk(KERN_INFO " ref %d desc %d (node %d)\n", |
| + ref->debug_id, ref->desc, ref->node->debug_id); |
| + binder_dec_ref(ref, fp->type == BINDER_TYPE_HANDLE); |
| + } break; |
| + |
| + case BINDER_TYPE_FD: |
| + if (binder_debug_mask & BINDER_DEBUG_TRANSACTION) |
| + printk(KERN_INFO " fd %ld\n", fp->handle); |
| + if (failed_at) |
| + task_close_fd(proc, fp->handle); |
| + break; |
| + |
| + default: |
| + printk(KERN_ERR "binder: transaction release %d bad object type %lx\n", debug_id, fp->type); |
| + break; |
| + } |
| + } |
| +} |
| + |
| +int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread, |
| + void __user *buffer, int size, signed long *consumed) |
| +{ |
| + uint32_t cmd; |
| + void __user *ptr = buffer + *consumed; |
| + void __user *end = buffer + size; |
| + |
| + while (ptr < end && thread->return_error == BR_OK) { |
| + if (get_user(cmd, (uint32_t __user *)ptr)) |
| + return -EFAULT; |
| + ptr += sizeof(uint32_t); |
| + if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) { |
| + binder_stats.bc[_IOC_NR(cmd)]++; |
| + proc->stats.bc[_IOC_NR(cmd)]++; |
| + thread->stats.bc[_IOC_NR(cmd)]++; |
| + } |
| + switch (cmd) { |
| + case BC_INCREFS: |
| + case BC_ACQUIRE: |
| + case BC_RELEASE: |
| + case BC_DECREFS: { |
| + uint32_t target; |
| + struct binder_ref *ref; |
| + const char *debug_string; |
| + |
| + if (get_user(target, (uint32_t __user *)ptr)) |
| + return -EFAULT; |
| + ptr += sizeof(uint32_t); |
| + if (target == 0 && binder_context_mgr_node && |
| + (cmd == BC_INCREFS || cmd == BC_ACQUIRE)) { |
| + ref = binder_get_ref_for_node(proc, |
| + binder_context_mgr_node); |
| + if (ref->desc != target) { |
| + binder_user_error("binder: %d:" |
| + "%d tried to acquire " |
| + "reference to desc 0, " |
| + "got %d instead\n", |
| + proc->pid, thread->pid, |
| + ref->desc); |
| + } |
| + } else |
| + ref = binder_get_ref(proc, target); |
| + if (ref == NULL) { |
| + binder_user_error("binder: %d:%d refcou" |
| + "nt change on invalid ref %d\n", |
| + proc->pid, thread->pid, target); |
| + break; |
| + } |
| + switch (cmd) { |
| + case BC_INCREFS: |
| + debug_string = "IncRefs"; |
| + binder_inc_ref(ref, 0, NULL); |
| + break; |
| + case BC_ACQUIRE: |
| + debug_string = "Acquire"; |
| + binder_inc_ref(ref, 1, NULL); |
| + break; |
| + case BC_RELEASE: |
| + debug_string = "Release"; |
| + binder_dec_ref(ref, 1); |
| + break; |
| + case BC_DECREFS: |
| + default: |
| + debug_string = "DecRefs"; |
| + binder_dec_ref(ref, 0); |
| + break; |
| + } |
| + if (binder_debug_mask & BINDER_DEBUG_USER_REFS) |
| + printk(KERN_INFO "binder: %d:%d %s ref %d desc %d s %d w %d for node %d\n", |
| + proc->pid, thread->pid, debug_string, ref->debug_id, ref->desc, ref->strong, ref->weak, ref->node->debug_id); |
| + break; |
| + } |
| + case BC_INCREFS_DONE: |
| + case BC_ACQUIRE_DONE: { |
| + void __user *node_ptr; |
| + void *cookie; |
| + struct binder_node *node; |
| + |
| + if (get_user(node_ptr, (void * __user *)ptr)) |
| + return -EFAULT; |
| + ptr += sizeof(void *); |
| + if (get_user(cookie, (void * __user *)ptr)) |
| + return -EFAULT; |
| + ptr += sizeof(void *); |
| + node = binder_get_node(proc, node_ptr); |
| + if (node == NULL) { |
| + binder_user_error("binder: %d:%d " |
| + "%s u%p no match\n", |
| + proc->pid, thread->pid, |
| + cmd == BC_INCREFS_DONE ? |
| + "BC_INCREFS_DONE" : |
| + "BC_ACQUIRE_DONE", |
| + node_ptr); |
| + break; |
| + } |
| + if (cookie != node->cookie) { |
| + binder_user_error("binder: %d:%d %s u%p node %d" |
| + " cookie mismatch %p != %p\n", |
| + proc->pid, thread->pid, |
| + cmd == BC_INCREFS_DONE ? |
| + "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE", |
| + node_ptr, node->debug_id, |
| + cookie, node->cookie); |
| + break; |
| + } |
| + if (cmd == BC_ACQUIRE_DONE) { |
| + if (node->pending_strong_ref == 0) { |
| + binder_user_error("binder: %d:%d " |
| + "BC_ACQUIRE_DONE node %d has " |
| + "no pending acquire request\n", |
| + proc->pid, thread->pid, |
| + node->debug_id); |
| + break; |
| + } |
| + node->pending_strong_ref = 0; |
| + } else { |
| + if (node->pending_weak_ref == 0) { |
| + binder_user_error("binder: %d:%d " |
| + "BC_INCREFS_DONE node %d has " |
| + "no pending increfs request\n", |
| + proc->pid, thread->pid, |
| + node->debug_id); |
| + break; |
| + } |
| + node->pending_weak_ref = 0; |
| + } |
| + binder_dec_node(node, cmd == BC_ACQUIRE_DONE, 0); |
| + if (binder_debug_mask & BINDER_DEBUG_USER_REFS) |
| + printk(KERN_INFO "binder: %d:%d %s node %d ls %d lw %d\n", |
| + proc->pid, thread->pid, cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE", node->debug_id, node->local_strong_refs, node->local_weak_refs); |
| + break; |
| + } |
| + case BC_ATTEMPT_ACQUIRE: |
| + printk(KERN_ERR "binder: BC_ATTEMPT_ACQUIRE not supported\n"); |
| + return -EINVAL; |
| + case BC_ACQUIRE_RESULT: |
| + printk(KERN_ERR "binder: BC_ACQUIRE_RESULT not supported\n"); |
| + return -EINVAL; |
| + |
| + case BC_FREE_BUFFER: { |
| + void __user *data_ptr; |
| + struct binder_buffer *buffer; |
| + |
| + if (get_user(data_ptr, (void * __user *)ptr)) |
| + return -EFAULT; |
| + ptr += sizeof(void *); |
| + |
| + buffer = binder_buffer_lookup(proc, data_ptr); |
| + if (buffer == NULL) { |
| + binder_user_error("binder: %d:%d " |
| + "BC_FREE_BUFFER u%p no match\n", |
| + proc->pid, thread->pid, data_ptr); |
| + break; |
| + } |
| + if (!buffer->allow_user_free) { |
| + binder_user_error("binder: %d:%d " |
| + "BC_FREE_BUFFER u%p matched " |
| + "unreturned buffer\n", |
| + proc->pid, thread->pid, data_ptr); |
| + break; |
| + } |
| + if (binder_debug_mask & BINDER_DEBUG_FREE_BUFFER) |
| + printk(KERN_INFO "binder: %d:%d BC_FREE_BUFFER u%p found buffer %d for %s transaction\n", |
| + proc->pid, thread->pid, data_ptr, buffer->debug_id, |
| + buffer->transaction ? "active" : "finished"); |
| + |
| + if (buffer->transaction) { |
| + buffer->transaction->buffer = NULL; |
| + buffer->transaction = NULL; |
| + } |
| + if (buffer->async_transaction && buffer->target_node) { |
| + BUG_ON(!buffer->target_node->has_async_transaction); |
| + if (list_empty(&buffer->target_node->async_todo)) |
| + buffer->target_node->has_async_transaction = 0; |
| + else |
| + list_move_tail(buffer->target_node->async_todo.next, &thread->todo); |
| + } |
| + binder_transaction_buffer_release(proc, buffer, NULL); |
| + binder_free_buf(proc, buffer); |
| + break; |
| + } |
| + |
| + case BC_TRANSACTION: |
| + case BC_REPLY: { |
| + struct binder_transaction_data tr; |
| + |
| + if (copy_from_user(&tr, ptr, sizeof(tr))) |
| + return -EFAULT; |
| + ptr += sizeof(tr); |
| + binder_transaction(proc, thread, &tr, cmd == BC_REPLY); |
| + break; |
| + } |
| + |
| + case BC_REGISTER_LOOPER: |
| + if (binder_debug_mask & BINDER_DEBUG_THREADS) |
| + printk(KERN_INFO "binder: %d:%d BC_REGISTER_LOOPER\n", |
| + proc->pid, thread->pid); |
| + if (thread->looper & BINDER_LOOPER_STATE_ENTERED) { |
| + thread->looper |= BINDER_LOOPER_STATE_INVALID; |
| + binder_user_error("binder: %d:%d ERROR:" |
| + " BC_REGISTER_LOOPER called " |
| + "after BC_ENTER_LOOPER\n", |
| + proc->pid, thread->pid); |
| + } else if (proc->requested_threads == 0) { |
| + thread->looper |= BINDER_LOOPER_STATE_INVALID; |
| + binder_user_error("binder: %d:%d ERROR:" |
| + " BC_REGISTER_LOOPER called " |
| + "without request\n", |
| + proc->pid, thread->pid); |
| + } else { |
| + proc->requested_threads--; |
| + proc->requested_threads_started++; |
| + } |
| + thread->looper |= BINDER_LOOPER_STATE_REGISTERED; |
| + break; |
| + case BC_ENTER_LOOPER: |
| + if (binder_debug_mask & BINDER_DEBUG_THREADS) |
| + printk(KERN_INFO "binder: %d:%d BC_ENTER_LOOPER\n", |
| + proc->pid, thread->pid); |
| + if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) { |
| + thread->looper |= BINDER_LOOPER_STATE_INVALID; |
| + binder_user_error("binder: %d:%d ERROR:" |
| + " BC_ENTER_LOOPER called after " |
| + "BC_REGISTER_LOOPER\n", |
| + proc->pid, thread->pid); |
| + } |
| + thread->looper |= BINDER_LOOPER_STATE_ENTERED; |
| + break; |
| + case BC_EXIT_LOOPER: |
| + if (binder_debug_mask & BINDER_DEBUG_THREADS) |
| + printk(KERN_INFO "binder: %d:%d BC_EXIT_LOOPER\n", |
| + proc->pid, thread->pid); |
| + thread->looper |= BINDER_LOOPER_STATE_EXITED; |
| + break; |
| + |
| + case BC_REQUEST_DEATH_NOTIFICATION: |
| + case BC_CLEAR_DEATH_NOTIFICATION: { |
| + uint32_t target; |
| + void __user *cookie; |
| + struct binder_ref *ref; |
| + struct binder_ref_death *death; |
| + |
| + if (get_user(target, (uint32_t __user *)ptr)) |
| + return -EFAULT; |
| + ptr += sizeof(uint32_t); |
| + if (get_user(cookie, (void __user * __user *)ptr)) |
| + return -EFAULT; |
| + ptr += sizeof(void *); |
| + ref = binder_get_ref(proc, target); |
| + if (ref == NULL) { |
| + binder_user_error("binder: %d:%d %s " |
| + "invalid ref %d\n", |
| + proc->pid, thread->pid, |
| + cmd == BC_REQUEST_DEATH_NOTIFICATION ? |
| + "BC_REQUEST_DEATH_NOTIFICATION" : |
| + "BC_CLEAR_DEATH_NOTIFICATION", |
| + target); |
| + break; |
| + } |
| + |
| + if (binder_debug_mask & BINDER_DEBUG_DEATH_NOTIFICATION) |
| + printk(KERN_INFO "binder: %d:%d %s %p ref %d desc %d s %d w %d for node %d\n", |
| + proc->pid, thread->pid, |
| + cmd == BC_REQUEST_DEATH_NOTIFICATION ? |
| + "BC_REQUEST_DEATH_NOTIFICATION" : |
| + "BC_CLEAR_DEATH_NOTIFICATION", |
| + cookie, ref->debug_id, ref->desc, |
| + ref->strong, ref->weak, ref->node->debug_id); |
| + |
| + if (cmd == BC_REQUEST_DEATH_NOTIFICATION) { |
| + if (ref->death) { |
| + binder_user_error("binder: %d:%" |
| + "d BC_REQUEST_DEATH_NOTI" |
| + "FICATION death notific" |
| + "ation already set\n", |
| + proc->pid, thread->pid); |
| + break; |
| + } |
| + death = kzalloc(sizeof(*death), GFP_KERNEL); |
| + if (death == NULL) { |
| + thread->return_error = BR_ERROR; |
| + if (binder_debug_mask & BINDER_DEBUG_FAILED_TRANSACTION) |
| + printk(KERN_INFO "binder: %d:%d " |
| + "BC_REQUEST_DEATH_NOTIFICATION failed\n", |
| + proc->pid, thread->pid); |
| + break; |
| + } |
| + binder_stats.obj_created[BINDER_STAT_DEATH]++; |
| + INIT_LIST_HEAD(&death->work.entry); |
| + death->cookie = cookie; |
| + ref->death = death; |
| + if (ref->node->proc == NULL) { |
| + ref->death->work.type = BINDER_WORK_DEAD_BINDER; |
| + if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) { |
| + list_add_tail(&ref->death->work.entry, &thread->todo); |
| + } else { |
| + list_add_tail(&ref->death->work.entry, &proc->todo); |
| + wake_up_interruptible(&proc->wait); |
| + } |
| + } |
| + } else { |
| + if (ref->death == NULL) { |
| + binder_user_error("binder: %d:%" |
| + "d BC_CLEAR_DEATH_NOTIFI" |
| + "CATION death notificat" |
| + "ion not active\n", |
| + proc->pid, thread->pid); |
| + break; |
| + } |
| + death = ref->death; |
| + if (death->cookie != cookie) { |
| + binder_user_error("binder: %d:%" |
| + "d BC_CLEAR_DEATH_NOTIFI" |
| + "CATION death notificat" |
| + "ion cookie mismatch " |
| + "%p != %p\n", |
| + proc->pid, thread->pid, |
| + death->cookie, cookie); |
| + break; |
| + } |
| + ref->death = NULL; |
| + if (list_empty(&death->work.entry)) { |
| + death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION; |
| + if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) { |
| + list_add_tail(&death->work.entry, &thread->todo); |
| + } else { |
| + list_add_tail(&death->work.entry, &proc->todo); |
| + wake_up_interruptible(&proc->wait); |
| + } |
| + } else { |
| + BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER); |
| + death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR; |
| + } |
| + } |
| + } break; |
| + case BC_DEAD_BINDER_DONE: { |
| + struct binder_work *w; |
| + void __user *cookie; |
| + struct binder_ref_death *death = NULL; |
| + if (get_user(cookie, (void __user * __user *)ptr)) |
| + return -EFAULT; |
| + |
| + ptr += sizeof(void *); |
| + list_for_each_entry(w, &proc->delivered_death, entry) { |
| + struct binder_ref_death *tmp_death = container_of(w, struct binder_ref_death, work); |
| + if (tmp_death->cookie == cookie) { |
| + death = tmp_death; |
| + break; |
| + } |
| + } |
| + if (binder_debug_mask & BINDER_DEBUG_DEAD_BINDER) |
| + printk(KERN_INFO "binder: %d:%d BC_DEAD_BINDER_DONE %p found %p\n", |
| + proc->pid, thread->pid, cookie, death); |
| + if (death == NULL) { |
| + binder_user_error("binder: %d:%d BC_DEAD" |
| + "_BINDER_DONE %p not found\n", |
| + proc->pid, thread->pid, cookie); |
| + break; |
| + } |
| + |
| + list_del_init(&death->work.entry); |
| + if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) { |
| + death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION; |
| + if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) { |
| + list_add_tail(&death->work.entry, &thread->todo); |
| + } else { |
| + list_add_tail(&death->work.entry, &proc->todo); |
| + wake_up_interruptible(&proc->wait); |
| + } |
| + } |
| + } break; |
| + |
| + default: |
| + printk(KERN_ERR "binder: %d:%d unknown command %d\n", |
| + proc->pid, thread->pid, cmd); |
| + return -EINVAL; |
| + } |
| + *consumed = ptr - buffer; |
| + } |
| + return 0; |
| +} |
| + |
| +void binder_stat_br(struct binder_proc *proc, struct binder_thread *thread, |
| + uint32_t cmd) |
| +{ |
| + if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) { |
| + binder_stats.br[_IOC_NR(cmd)]++; |
| + proc->stats.br[_IOC_NR(cmd)]++; |
| + thread->stats.br[_IOC_NR(cmd)]++; |
| + } |
| +} |
| + |
| +static int binder_has_proc_work(struct binder_proc *proc, |
| + struct binder_thread *thread) |
| +{ |
| + return !list_empty(&proc->todo) || |
| + (thread->looper & BINDER_LOOPER_STATE_NEED_RETURN); |
| +} |
| + |
| +static int binder_has_thread_work(struct binder_thread *thread) |
| +{ |
| + return !list_empty(&thread->todo) || thread->return_error != BR_OK || |
| + (thread->looper & BINDER_LOOPER_STATE_NEED_RETURN); |
| +} |
| + |
| +static int binder_thread_read(struct binder_proc *proc, |
| + struct binder_thread *thread, |
| + void __user *buffer, int size, |
| + signed long *consumed, int non_block) |
| +{ |
| + void __user *ptr = buffer + *consumed; |
| + void __user *end = buffer + size; |
| + |
| + int ret = 0; |
| + int wait_for_proc_work; |
| + |
| + if (*consumed == 0) { |
| + if (put_user(BR_NOOP, (uint32_t __user *)ptr)) |
| + return -EFAULT; |
| + ptr += sizeof(uint32_t); |
| + } |
| + |
| +retry: |
| + wait_for_proc_work = thread->transaction_stack == NULL && |
| + list_empty(&thread->todo); |
| + |
| + if (thread->return_error != BR_OK && ptr < end) { |
| + if (thread->return_error2 != BR_OK) { |
| + if (put_user(thread->return_error2, (uint32_t __user *)ptr)) |
| + return -EFAULT; |
| + ptr += sizeof(uint32_t); |
| + if (ptr == end) |
| + goto done; |
| + thread->return_error2 = BR_OK; |
| + } |
| + if (put_user(thread->return_error, (uint32_t __user *)ptr)) |
| + return -EFAULT; |
| + ptr += sizeof(uint32_t); |
| + thread->return_error = BR_OK; |
| + goto done; |
| + } |
| + |
| + |
| + thread->looper |= BINDER_LOOPER_STATE_WAITING; |
| + if (wait_for_proc_work) |
| + proc->ready_threads++; |
| + mutex_unlock(&binder_lock); |
| + if (wait_for_proc_work) { |
| + if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED | |
| + BINDER_LOOPER_STATE_ENTERED))) { |
| + binder_user_error("binder: %d:%d ERROR: Thread waiting " |
| + "for process work before calling BC_REGISTER_" |
| + "LOOPER or BC_ENTER_LOOPER (state %x)\n", |
| + proc->pid, thread->pid, thread->looper); |
| + wait_event_interruptible(binder_user_error_wait, |
| + binder_stop_on_user_error < 2); |
| + } |
| + binder_set_nice(proc->default_priority); |
| + if (non_block) { |
| + if (!binder_has_proc_work(proc, thread)) |
| + ret = -EAGAIN; |
| + } else |
| + ret = wait_event_interruptible_exclusive(proc->wait, binder_has_proc_work(proc, thread)); |
| + } else { |
| + if (non_block) { |
| + if (!binder_has_thread_work(thread)) |
| + ret = -EAGAIN; |
| + } else |
| + ret = wait_event_interruptible(thread->wait, binder_has_thread_work(thread)); |
| + } |
| + mutex_lock(&binder_lock); |
| + if (wait_for_proc_work) |
| + proc->ready_threads--; |
| + thread->looper &= ~BINDER_LOOPER_STATE_WAITING; |
| + |
| + if (ret) |
| + return ret; |
| + |
| + while (1) { |
| + uint32_t cmd; |
| + struct binder_transaction_data tr; |
| + struct binder_work *w; |
| + struct binder_transaction *t = NULL; |
| + |
| + if (!list_empty(&thread->todo)) |
| + w = list_first_entry(&thread->todo, struct binder_work, entry); |
| + else if (!list_empty(&proc->todo) && wait_for_proc_work) |
| + w = list_first_entry(&proc->todo, struct binder_work, entry); |
| + else { |
| + if (ptr - buffer == 4 && !(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN)) /* no data added */ |
| + goto retry; |
| + break; |
| + } |
| + |
| + if (end - ptr < sizeof(tr) + 4) |
| + break; |
| + |
| + switch (w->type) { |
| + case BINDER_WORK_TRANSACTION: { |
| + t = container_of(w, struct binder_transaction, work); |
| + } break; |
| + case BINDER_WORK_TRANSACTION_COMPLETE: { |
| + cmd = BR_TRANSACTION_COMPLETE; |
| + if (put_user(cmd, (uint32_t __user *)ptr)) |
| + return -EFAULT; |
| + ptr += sizeof(uint32_t); |
| + |
| + binder_stat_br(proc, thread, cmd); |
| + if (binder_debug_mask & BINDER_DEBUG_TRANSACTION_COMPLETE) |
| + printk(KERN_INFO "binder: %d:%d BR_TRANSACTION_COMPLETE\n", |
| + proc->pid, thread->pid); |
| + |
| + list_del(&w->entry); |
| + kfree(w); |
| + binder_stats.obj_deleted[BINDER_STAT_TRANSACTION_COMPLETE]++; |
| + } break; |
| + case BINDER_WORK_NODE: { |
| + struct binder_node *node = container_of(w, struct binder_node, work); |
| + uint32_t cmd = BR_NOOP; |
| + const char *cmd_name; |
| + int strong = node->internal_strong_refs || node->local_strong_refs; |
| + int weak = !hlist_empty(&node->refs) || node->local_weak_refs || strong; |
| + if (weak && !node->has_weak_ref) { |
| + cmd = BR_INCREFS; |
| + cmd_name = "BR_INCREFS"; |
| + node->has_weak_ref = 1; |
| + node->pending_weak_ref = 1; |
| + node->local_weak_refs++; |
| + } else if (strong && !node->has_strong_ref) { |
| + cmd = BR_ACQUIRE; |
| + cmd_name = "BR_ACQUIRE"; |
| + node->has_strong_ref = 1; |
| + node->pending_strong_ref = 1; |
| + node->local_strong_refs++; |
| + } else if (!strong && node->has_strong_ref) { |
| + cmd = BR_RELEASE; |
| + cmd_name = "BR_RELEASE"; |
| + node->has_strong_ref = 0; |
| + } else if (!weak && node->has_weak_ref) { |
| + cmd = BR_DECREFS; |
| + cmd_name = "BR_DECREFS"; |
| + node->has_weak_ref = 0; |
| + } |
| + if (cmd != BR_NOOP) { |
| + if (put_user(cmd, (uint32_t __user *)ptr)) |
| + return -EFAULT; |
| + ptr += sizeof(uint32_t); |
| + if (put_user(node->ptr, (void * __user *)ptr)) |
| + return -EFAULT; |
| + ptr += sizeof(void *); |
| + if (put_user(node->cookie, (void * __user *)ptr)) |
| + return -EFAULT; |
| + ptr += sizeof(void *); |
| + |
| + binder_stat_br(proc, thread, cmd); |
| + if (binder_debug_mask & BINDER_DEBUG_USER_REFS) |
| + printk(KERN_INFO "binder: %d:%d %s %d u%p c%p\n", |
| + proc->pid, thread->pid, cmd_name, node->debug_id, node->ptr, node->cookie); |
| + } else { |
| + list_del_init(&w->entry); |
| + if (!weak && !strong) { |
| + if (binder_debug_mask & BINDER_DEBUG_INTERNAL_REFS) |
| + printk(KERN_INFO "binder: %d:%d node %d u%p c%p deleted\n", |
| + proc->pid, thread->pid, node->debug_id, node->ptr, node->cookie); |
| + rb_erase(&node->rb_node, &proc->nodes); |
| + kfree(node); |
| + binder_stats.obj_deleted[BINDER_STAT_NODE]++; |
| + } else { |
| + if (binder_debug_mask & BINDER_DEBUG_INTERNAL_REFS) |
| + printk(KERN_INFO "binder: %d:%d node %d u%p c%p state unchanged\n", |
| + proc->pid, thread->pid, node->debug_id, node->ptr, node->cookie); |
| + } |
| + } |
| + } break; |
| + case BINDER_WORK_DEAD_BINDER: |
| + case BINDER_WORK_DEAD_BINDER_AND_CLEAR: |
| + case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: { |
| + struct binder_ref_death *death; |
| + uint32_t cmd; |
| + |
| + death = container_of(w, struct binder_ref_death, work); |
| + if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) |
| + cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE; |
| + else |
| + cmd = BR_DEAD_BINDER; |
| + if (put_user(cmd, (uint32_t __user *)ptr)) |
| + return -EFAULT; |
| + ptr += sizeof(uint32_t); |
| + if (put_user(death->cookie, (void * __user *)ptr)) |
| + return -EFAULT; |
| + ptr += sizeof(void *); |
| + if (binder_debug_mask & BINDER_DEBUG_DEATH_NOTIFICATION) |
| + printk(KERN_INFO "binder: %d:%d %s %p\n", |
| + proc->pid, thread->pid, |
| + cmd == BR_DEAD_BINDER ? |
| + "BR_DEAD_BINDER" : |
| + "BR_CLEAR_DEATH_NOTIFICATION_DONE", |
| + death->cookie); |
| + |
| + if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) { |
| + list_del(&w->entry); |
| + kfree(death); |
| + binder_stats.obj_deleted[BINDER_STAT_DEATH]++; |
| + } else |
| + list_move(&w->entry, &proc->delivered_death); |
| + if (cmd == BR_DEAD_BINDER) |
| + goto done; /* DEAD_BINDER notifications can cause transactions */ |
| + } break; |
| + } |
| + |
| + if (!t) |
| + continue; |
| + |
| + BUG_ON(t->buffer == NULL); |
| + if (t->buffer->target_node) { |
| + struct binder_node *target_node = t->buffer->target_node; |
| + tr.target.ptr = target_node->ptr; |
| + tr.cookie = target_node->cookie; |
| + t->saved_priority = task_nice(current); |
| + if (t->priority < target_node->min_priority && |
| + !(t->flags & TF_ONE_WAY)) |
| + binder_set_nice(t->priority); |
| + else if (!(t->flags & TF_ONE_WAY) || |
| + t->saved_priority > target_node->min_priority) |
| + binder_set_nice(target_node->min_priority); |
| + cmd = BR_TRANSACTION; |
| + } else { |
| + tr.target.ptr = NULL; |
| + tr.cookie = NULL; |
| + cmd = BR_REPLY; |
| + } |
| + tr.code = t->code; |
| + tr.flags = t->flags; |
| + tr.sender_euid = t->sender_euid; |
| + |
| + if (t->from) { |
| + struct task_struct *sender = t->from->proc->tsk; |
| + tr.sender_pid = task_tgid_nr_ns(sender, |
| + current->nsproxy->pid_ns); |
| + } else { |
| + tr.sender_pid = 0; |
| + } |
| + |
| + tr.data_size = t->buffer->data_size; |
| + tr.offsets_size = t->buffer->offsets_size; |
| + tr.data.ptr.buffer = (void *)t->buffer->data + |
| + proc->user_buffer_offset; |
| + tr.data.ptr.offsets = tr.data.ptr.buffer + |
| + ALIGN(t->buffer->data_size, |
| + sizeof(void *)); |
| + |
| + if (put_user(cmd, (uint32_t __user *)ptr)) |
| + return -EFAULT; |
| + ptr += sizeof(uint32_t); |
| + if (copy_to_user(ptr, &tr, sizeof(tr))) |
| + return -EFAULT; |
| + ptr += sizeof(tr); |
| + |
| + binder_stat_br(proc, thread, cmd); |
| + if (binder_debug_mask & BINDER_DEBUG_TRANSACTION) |
| + printk(KERN_INFO "binder: %d:%d %s %d %d:%d, cmd %d" |
| + "size %zd-%zd ptr %p-%p\n", |
| + proc->pid, thread->pid, |
| + (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" : |
| + "BR_REPLY", |
| + t->debug_id, t->from ? t->from->proc->pid : 0, |
| + t->from ? t->from->pid : 0, cmd, |
| + t->buffer->data_size, t->buffer->offsets_size, |
| + tr.data.ptr.buffer, tr.data.ptr.offsets); |
| + |
| + list_del(&t->work.entry); |
| + t->buffer->allow_user_free = 1; |
| + if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) { |
| + t->to_parent = thread->transaction_stack; |
| + t->to_thread = thread; |
| + thread->transaction_stack = t; |
| + } else { |
| + t->buffer->transaction = NULL; |
| + kfree(t); |
| + binder_stats.obj_deleted[BINDER_STAT_TRANSACTION]++; |
| + } |
| + break; |
| + } |
| + |
| +done: |
| + |
| + *consumed = ptr - buffer; |
| + if (proc->requested_threads + proc->ready_threads == 0 && |
| + proc->requested_threads_started < proc->max_threads && |
| + (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | |
| + BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */ |
| + /*spawn a new thread if we leave this out */) { |
| + proc->requested_threads++; |
| + if (binder_debug_mask & BINDER_DEBUG_THREADS) |
| + printk(KERN_INFO "binder: %d:%d BR_SPAWN_LOOPER\n", |
| + proc->pid, thread->pid); |
| + if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer)) |
| + return -EFAULT; |
| + } |
| + return 0; |
| +} |
| + |
| +static void binder_release_work(struct list_head *list) |
| +{ |
| + struct binder_work *w; |
| + while (!list_empty(list)) { |
| + w = list_first_entry(list, struct binder_work, entry); |
| + list_del_init(&w->entry); |
| + switch (w->type) { |
| + case BINDER_WORK_TRANSACTION: { |
| + struct binder_transaction *t; |
| + |
| + t = container_of(w, struct binder_transaction, work); |
| + if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) |
| + binder_send_failed_reply(t, BR_DEAD_REPLY); |
| + } break; |
| + case BINDER_WORK_TRANSACTION_COMPLETE: { |
| + kfree(w); |
| + binder_stats.obj_deleted[BINDER_STAT_TRANSACTION_COMPLETE]++; |
| + } break; |
| + default: |
| + break; |
| + } |
| + } |
| + |
| +} |
| + |
| +static struct binder_thread *binder_get_thread(struct binder_proc *proc) |
| +{ |
| + struct binder_thread *thread = NULL; |
| + struct rb_node *parent = NULL; |
| + struct rb_node **p = &proc->threads.rb_node; |
| + |
| + while (*p) { |
| + parent = *p; |
| + thread = rb_entry(parent, struct binder_thread, rb_node); |
| + |
| + if (current->pid < thread->pid) |
| + p = &(*p)->rb_left; |
| + else if (current->pid > thread->pid) |
| + p = &(*p)->rb_right; |
| + else |
| + break; |
| + } |
| + if (*p == NULL) { |
| + thread = kzalloc(sizeof(*thread), GFP_KERNEL); |
| + if (thread == NULL) |
| + return NULL; |
| + binder_stats.obj_created[BINDER_STAT_THREAD]++; |
| + thread->proc = proc; |
| + thread->pid = current->pid; |
| + init_waitqueue_head(&thread->wait); |
| + INIT_LIST_HEAD(&thread->todo); |
| + rb_link_node(&thread->rb_node, parent, p); |
| + rb_insert_color(&thread->rb_node, &proc->threads); |
| + thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN; |
| + thread->return_error = BR_OK; |
| + thread->return_error2 = BR_OK; |
| + } |
| + return thread; |
| +} |
| + |
| +static int binder_free_thread(struct binder_proc *proc, |
| + struct binder_thread *thread) |
| +{ |
| + struct binder_transaction *t; |
| + struct binder_transaction *send_reply = NULL; |
| + int active_transactions = 0; |
| + |
| + rb_erase(&thread->rb_node, &proc->threads); |
| + t = thread->transaction_stack; |
| + if (t && t->to_thread == thread) |
| + send_reply = t; |
| + while (t) { |
| + active_transactions++; |
| + if (binder_debug_mask & BINDER_DEBUG_DEAD_TRANSACTION) |
| + printk(KERN_INFO "binder: release %d:%d transaction %d " |
| + "%s, still active\n", proc->pid, thread->pid, |
| + t->debug_id, |
| + (t->to_thread == thread) ? "in" : "out"); |
| + if (t->to_thread == thread) { |
| + t->to_proc = NULL; |
| + t->to_thread = NULL; |
| + if (t->buffer) { |
| + t->buffer->transaction = NULL; |
| + t->buffer = NULL; |
| + } |
| + t = t->to_parent; |
| + } else if (t->from == thread) { |
| + t->from = NULL; |
| + t = t->from_parent; |
| + } else |
| + BUG(); |
| + } |
| + if (send_reply) |
| + binder_send_failed_reply(send_reply, BR_DEAD_REPLY); |
| + binder_release_work(&thread->todo); |
| + kfree(thread); |
| + binder_stats.obj_deleted[BINDER_STAT_THREAD]++; |
| + return active_transactions; |
| +} |
| + |
| +static unsigned int binder_poll(struct file *filp, |
| + struct poll_table_struct *wait) |
| +{ |
| + struct binder_proc *proc = filp->private_data; |
| + struct binder_thread *thread = NULL; |
| + int wait_for_proc_work; |
| + |
| + mutex_lock(&binder_lock); |
| + thread = binder_get_thread(proc); |
| + |
| + wait_for_proc_work = thread->transaction_stack == NULL && |
| + list_empty(&thread->todo) && thread->return_error == BR_OK; |
| + mutex_unlock(&binder_lock); |
| + |
| + if (wait_for_proc_work) { |
| + if (binder_has_proc_work(proc, thread)) |
| + return POLLIN; |
| + poll_wait(filp, &proc->wait, wait); |
| + if (binder_has_proc_work(proc, thread)) |
| + return POLLIN; |
| + } else { |
| + if (binder_has_thread_work(thread)) |
| + return POLLIN; |
| + poll_wait(filp, &thread->wait, wait); |
| + if (binder_has_thread_work(thread)) |
| + return POLLIN; |
| + } |
| + return 0; |
| +} |
| + |
| +static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) |
| +{ |
| + int ret; |
| + struct binder_proc *proc = filp->private_data; |
| + struct binder_thread *thread; |
| + unsigned int size = _IOC_SIZE(cmd); |
| + void __user *ubuf = (void __user *)arg; |
| + |
| + /*printk(KERN_INFO "binder_ioctl: %d:%d %x %lx\n", proc->pid, current->pid, cmd, arg);*/ |
| + |
| + ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2); |
| + if (ret) |
| + return ret; |
| + |
| + mutex_lock(&binder_lock); |
| + thread = binder_get_thread(proc); |
| + if (thread == NULL) { |
| + ret = -ENOMEM; |
| + goto err; |
| + } |
| + |
| + switch (cmd) { |
| + case BINDER_WRITE_READ: { |
| + struct binder_write_read bwr; |
| + if (size != sizeof(struct binder_write_read)) { |
| + ret = -EINVAL; |
| + goto err; |
| + } |
| + if (copy_from_user(&bwr, ubuf, sizeof(bwr))) { |
| + ret = -EFAULT; |
| + goto err; |
| + } |
| + if (binder_debug_mask & BINDER_DEBUG_READ_WRITE) |
| + printk(KERN_INFO "binder: %d:%d write %ld at %08lx, read %ld at %08lx\n", |
| + proc->pid, thread->pid, bwr.write_size, bwr.write_buffer, bwr.read_size, bwr.read_buffer); |
| + if (bwr.write_size > 0) { |
| + ret = binder_thread_write(proc, thread, (void __user *)bwr.write_buffer, bwr.write_size, &bwr.write_consumed); |
| + if (ret < 0) { |
| + bwr.read_consumed = 0; |
| + if (copy_to_user(ubuf, &bwr, sizeof(bwr))) |
| + ret = -EFAULT; |
| + goto err; |
| + } |
| + } |
| + if (bwr.read_size > 0) { |
| + ret = binder_thread_read(proc, thread, (void __user *)bwr.read_buffer, bwr.read_size, &bwr.read_consumed, filp->f_flags & O_NONBLOCK); |
| + if (!list_empty(&proc->todo)) |
| + wake_up_interruptible(&proc->wait); |
| + if (ret < 0) { |
| + if (copy_to_user(ubuf, &bwr, sizeof(bwr))) |
| + ret = -EFAULT; |
| + goto err; |
| + } |
| + } |
| + if (binder_debug_mask & BINDER_DEBUG_READ_WRITE) |
| + printk(KERN_INFO "binder: %d:%d wrote %ld of %ld, read return %ld of %ld\n", |
| + proc->pid, thread->pid, bwr.write_consumed, bwr.write_size, bwr.read_consumed, bwr.read_size); |
| + if (copy_to_user(ubuf, &bwr, sizeof(bwr))) { |
| + ret = -EFAULT; |
| + goto err; |
| + } |
| + break; |
| + } |
| + case BINDER_SET_MAX_THREADS: |
| + if (copy_from_user(&proc->max_threads, ubuf, sizeof(proc->max_threads))) { |
| + ret = -EINVAL; |
| + goto err; |
| + } |
| + break; |
| + case BINDER_SET_CONTEXT_MGR: |
| + if (binder_context_mgr_node != NULL) { |
| + printk(KERN_ERR "binder: BINDER_SET_CONTEXT_MGR already set\n"); |
| + ret = -EBUSY; |
| + goto err; |
| + } |
| + if (binder_context_mgr_uid != -1) { |
| + if (binder_context_mgr_uid != current->cred->euid) { |
| + printk(KERN_ERR "binder: BINDER_SET_" |
| + "CONTEXT_MGR bad uid %d != %d\n", |
| + current->cred->euid, |
| + binder_context_mgr_uid); |
| + ret = -EPERM; |
| + goto err; |
| + } |
| + } else |
| + binder_context_mgr_uid = current->cred->euid; |
| + binder_context_mgr_node = binder_new_node(proc, NULL, NULL); |
| + if (binder_context_mgr_node == NULL) { |
| + ret = -ENOMEM; |
| + goto err; |
| + } |
| + binder_context_mgr_node->local_weak_refs++; |
| + binder_context_mgr_node->local_strong_refs++; |
| + binder_context_mgr_node->has_strong_ref = 1; |
| + binder_context_mgr_node->has_weak_ref = 1; |
| + break; |
| + case BINDER_THREAD_EXIT: |
| + if (binder_debug_mask & BINDER_DEBUG_THREADS) |
| + printk(KERN_INFO "binder: %d:%d exit\n", |
| + proc->pid, thread->pid); |
| + binder_free_thread(proc, thread); |
| + thread = NULL; |
| + break; |
| + case BINDER_VERSION: |
| + if (size != sizeof(struct binder_version)) { |
| + ret = -EINVAL; |
| + goto err; |
| + } |
| + if (put_user(BINDER_CURRENT_PROTOCOL_VERSION, &((struct binder_version *)ubuf)->protocol_version)) { |
| + ret = -EINVAL; |
| + goto err; |
| + } |
| + break; |
| + default: |
| + ret = -EINVAL; |
| + goto err; |
| + } |
| + ret = 0; |
| +err: |
| + if (thread) |
| + thread->looper &= ~BINDER_LOOPER_STATE_NEED_RETURN; |
| + mutex_unlock(&binder_lock); |
| + wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2); |
| + if (ret && ret != -ERESTARTSYS) |
| + printk(KERN_INFO "binder: %d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret); |
| + return ret; |
| +} |
| + |
| +static void binder_vma_open(struct vm_area_struct *vma) |
| +{ |
| + struct binder_proc *proc = vma->vm_private_data; |
| + if (binder_debug_mask & BINDER_DEBUG_OPEN_CLOSE) |
| + printk(KERN_INFO |
| + "binder: %d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n", |
| + proc->pid, vma->vm_start, vma->vm_end, |
| + (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, |
| + (unsigned long)pgprot_val(vma->vm_page_prot)); |
| + dump_stack(); |
| +} |
| + |
| +static void binder_vma_close(struct vm_area_struct *vma) |
| +{ |
| + struct binder_proc *proc = vma->vm_private_data; |
| + if (binder_debug_mask & BINDER_DEBUG_OPEN_CLOSE) |
| + printk(KERN_INFO |
| + "binder: %d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n", |
| + proc->pid, vma->vm_start, vma->vm_end, |
| + (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, |
| + (unsigned long)pgprot_val(vma->vm_page_prot)); |
| + proc->vma = NULL; |
| + binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES); |
| +} |
| + |
| +static struct vm_operations_struct binder_vm_ops = { |
| + .open = binder_vma_open, |
| + .close = binder_vma_close, |
| +}; |
| + |
| +static int binder_mmap(struct file *filp, struct vm_area_struct *vma) |
| +{ |
| + int ret; |
| + struct vm_struct *area; |
| + struct binder_proc *proc = filp->private_data; |
| + const char *failure_string; |
| + struct binder_buffer *buffer; |
| + |
| + if ((vma->vm_end - vma->vm_start) > SZ_4M) |
| + vma->vm_end = vma->vm_start + SZ_4M; |
| + |
| + if (binder_debug_mask & BINDER_DEBUG_OPEN_CLOSE) |
| + printk(KERN_INFO |
| + "binder_mmap: %d %lx-%lx (%ld K) vma %lx pagep %lx\n", |
| + proc->pid, vma->vm_start, vma->vm_end, |
| + (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, |
| + (unsigned long)pgprot_val(vma->vm_page_prot)); |
| + |
| + if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) { |
| + ret = -EPERM; |
| + failure_string = "bad vm_flags"; |
| + goto err_bad_arg; |
| + } |
| + vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE; |
| + |
| + if (proc->buffer) { |
| + ret = -EBUSY; |
| + failure_string = "already mapped"; |
| + goto err_already_mapped; |
| + } |
| + |
| + area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP); |
| + if (area == NULL) { |
| + ret = -ENOMEM; |
| + failure_string = "get_vm_area"; |
| + goto err_get_vm_area_failed; |
| + } |
| + proc->buffer = area->addr; |
| + proc->user_buffer_offset = vma->vm_start - (uintptr_t)proc->buffer; |
| + |
| +#ifdef CONFIG_CPU_CACHE_VIPT |
| + if (cache_is_vipt_aliasing()) { |
| + while (CACHE_COLOUR((vma->vm_start ^ (uint32_t)proc->buffer))) { |
| + printk(KERN_INFO "binder_mmap: %d %lx-%lx maps %p bad alignment\n", proc->pid, vma->vm_start, vma->vm_end, proc->buffer); |
| + vma->vm_start += PAGE_SIZE; |
| + } |
| + } |
| +#endif |
| + proc->pages = kzalloc(sizeof(proc->pages[0]) * ((vma->vm_end - vma->vm_start) / PAGE_SIZE), GFP_KERNEL); |
| + if (proc->pages == NULL) { |
| + ret = -ENOMEM; |
| + failure_string = "alloc page array"; |
| + goto err_alloc_pages_failed; |
| + } |
| + proc->buffer_size = vma->vm_end - vma->vm_start; |
| + |
| + vma->vm_ops = &binder_vm_ops; |
| + vma->vm_private_data = proc; |
| + |
| + if (binder_update_page_range(proc, 1, proc->buffer, proc->buffer + PAGE_SIZE, vma)) { |
| + ret = -ENOMEM; |
| + failure_string = "alloc small buf"; |
| + goto err_alloc_small_buf_failed; |
| + } |
| + buffer = proc->buffer; |
| + INIT_LIST_HEAD(&proc->buffers); |
| + list_add(&buffer->entry, &proc->buffers); |
| + buffer->free = 1; |
| + binder_insert_free_buffer(proc, buffer); |
| + proc->free_async_space = proc->buffer_size / 2; |
| + barrier(); |
| + proc->files = get_files_struct(current); |
| + proc->vma = vma; |
| + |
| + /*printk(KERN_INFO "binder_mmap: %d %lx-%lx maps %p\n", |
| + proc->pid, vma->vm_start, vma->vm_end, proc->buffer);*/ |
| + return 0; |
| + |
| +err_alloc_small_buf_failed: |
| + kfree(proc->pages); |
| + proc->pages = NULL; |
| +err_alloc_pages_failed: |
| + vfree(proc->buffer); |
| + proc->buffer = NULL; |
| +err_get_vm_area_failed: |
| +err_already_mapped: |
| +err_bad_arg: |
| + printk(KERN_ERR "binder_mmap: %d %lx-%lx %s failed %d\n", |
| + proc->pid, vma->vm_start, vma->vm_end, failure_string, ret); |
| + return ret; |
| +} |
| + |
| +static int binder_open(struct inode *nodp, struct file *filp) |
| +{ |
| + struct binder_proc *proc; |
| + |
| + if (binder_debug_mask & BINDER_DEBUG_OPEN_CLOSE) |
| + printk(KERN_INFO "binder_open: %d:%d\n", |
| + current->group_leader->pid, current->pid); |
| + |
| + proc = kzalloc(sizeof(*proc), GFP_KERNEL); |
| + if (proc == NULL) |
| + return -ENOMEM; |
| + get_task_struct(current); |
| + proc->tsk = current; |
| + INIT_LIST_HEAD(&proc->todo); |
| + init_waitqueue_head(&proc->wait); |
| + proc->default_priority = task_nice(current); |
| + mutex_lock(&binder_lock); |
| + binder_stats.obj_created[BINDER_STAT_PROC]++; |
| + hlist_add_head(&proc->proc_node, &binder_procs); |
| + proc->pid = current->group_leader->pid; |
| + INIT_LIST_HEAD(&proc->delivered_death); |
| + filp->private_data = proc; |
| + mutex_unlock(&binder_lock); |
| + |
| + if (binder_proc_dir_entry_proc) { |
| + char strbuf[11]; |
| + snprintf(strbuf, sizeof(strbuf), "%u", proc->pid); |
| + remove_proc_entry(strbuf, binder_proc_dir_entry_proc); |
| + create_proc_read_entry(strbuf, S_IRUGO, |
| + binder_proc_dir_entry_proc, |
| + binder_read_proc_proc, proc); |
| + } |
| + |
| + return 0; |
| +} |
| + |
| +static int binder_flush(struct file *filp, fl_owner_t id) |
| +{ |
| + struct binder_proc *proc = filp->private_data; |
| + |
| + binder_defer_work(proc, BINDER_DEFERRED_FLUSH); |
| + |
| + return 0; |
| +} |
| + |
| +static void binder_deferred_flush(struct binder_proc *proc) |
| +{ |
| + struct rb_node *n; |
| + int wake_count = 0; |
| + for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) { |
| + struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node); |
| + thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN; |
| + if (thread->looper & BINDER_LOOPER_STATE_WAITING) { |
| + wake_up_interruptible(&thread->wait); |
| + wake_count++; |
| + } |
| + } |
| + wake_up_interruptible_all(&proc->wait); |
| + |
| + if (binder_debug_mask & BINDER_DEBUG_OPEN_CLOSE) |
| + printk(KERN_INFO "binder_flush: %d woke %d threads\n", proc->pid, wake_count); |
| +} |
| + |
| +static int binder_release(struct inode *nodp, struct file *filp) |
| +{ |
| + struct binder_proc *proc = filp->private_data; |
| + if (binder_proc_dir_entry_proc) { |
| + char strbuf[11]; |
| + snprintf(strbuf, sizeof(strbuf), "%u", proc->pid); |
| + remove_proc_entry(strbuf, binder_proc_dir_entry_proc); |
| + } |
| + |
| + binder_defer_work(proc, BINDER_DEFERRED_RELEASE); |
| + |
| + return 0; |
| +} |
| + |
| +static void binder_deferred_release(struct binder_proc *proc) |
| +{ |
| + struct hlist_node *pos; |
| + struct binder_transaction *t; |
| + struct rb_node *n; |
| + int threads, nodes, incoming_refs, outgoing_refs, buffers, active_transactions, page_count; |
| + |
| + BUG_ON(proc->vma); |
| + BUG_ON(proc->files); |
| + |
| + hlist_del(&proc->proc_node); |
| + if (binder_context_mgr_node && binder_context_mgr_node->proc == proc) { |
| + if (binder_debug_mask & BINDER_DEBUG_DEAD_BINDER) |
| + printk(KERN_INFO "binder_release: %d context_mgr_node gone\n", proc->pid); |
| + binder_context_mgr_node = NULL; |
| + } |
| + |
| + threads = 0; |
| + active_transactions = 0; |
| + while ((n = rb_first(&proc->threads))) { |
| + struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node); |
| + threads++; |
| + active_transactions += binder_free_thread(proc, thread); |
| + } |
| + nodes = 0; |
| + incoming_refs = 0; |
| + while ((n = rb_first(&proc->nodes))) { |
| + struct binder_node *node = rb_entry(n, struct binder_node, rb_node); |
| + |
| + nodes++; |
| + rb_erase(&node->rb_node, &proc->nodes); |
| + list_del_init(&node->work.entry); |
| + if (hlist_empty(&node->refs)) { |
| + kfree(node); |
| + binder_stats.obj_deleted[BINDER_STAT_NODE]++; |
| + } else { |
| + struct binder_ref *ref; |
| + int death = 0; |
| + |
| + node->proc = NULL; |
| + node->local_strong_refs = 0; |
| + node->local_weak_refs = 0; |
| + hlist_add_head(&node->dead_node, &binder_dead_nodes); |
| + |
| + hlist_for_each_entry(ref, pos, &node->refs, node_entry) { |
| + incoming_refs++; |
| + if (ref->death) { |
| + death++; |
| + if (list_empty(&ref->death->work.entry)) { |
| + ref->death->work.type = BINDER_WORK_DEAD_BINDER; |
| + list_add_tail(&ref->death->work.entry, &ref->proc->todo); |
| + wake_up_interruptible(&ref->proc->wait); |
| + } else |
| + BUG(); |
| + } |
| + } |
| + if (binder_debug_mask & BINDER_DEBUG_DEAD_BINDER) |
| + printk(KERN_INFO "binder: node %d now dead, " |
| + "refs %d, death %d\n", node->debug_id, |
| + incoming_refs, death); |
| + } |
| + } |
| + outgoing_refs = 0; |
| + while ((n = rb_first(&proc->refs_by_desc))) { |
| + struct binder_ref *ref = rb_entry(n, struct binder_ref, |
| + rb_node_desc); |
| + outgoing_refs++; |
| + binder_delete_ref(ref); |
| + } |
| + binder_release_work(&proc->todo); |
| + buffers = 0; |
| + |
| + while ((n = rb_first(&proc->allocated_buffers))) { |
| + struct binder_buffer *buffer = rb_entry(n, struct binder_buffer, |
| + rb_node); |
| + t = buffer->transaction; |
| + if (t) { |
| + t->buffer = NULL; |
| + buffer->transaction = NULL; |
| + printk(KERN_ERR "binder: release proc %d, " |
| + "transaction %d, not freed\n", |
| + proc->pid, t->debug_id); |
| + /*BUG();*/ |
| + } |
| + binder_free_buf(proc, buffer); |
| + buffers++; |
| + } |
| + |
| + binder_stats.obj_deleted[BINDER_STAT_PROC]++; |
| + |
| + page_count = 0; |
| + if (proc->pages) { |
| + int i; |
| + for (i = 0; i < proc->buffer_size / PAGE_SIZE; i++) { |
| + if (proc->pages[i]) { |
| + if (binder_debug_mask & |
| + BINDER_DEBUG_BUFFER_ALLOC) |
| + printk(KERN_INFO |
| + "binder_release: %d: " |
| + "page %d at %p not freed\n", |
| + proc->pid, i, |
| + proc->buffer + i * PAGE_SIZE); |
| + __free_page(proc->pages[i]); |
| + page_count++; |
| + } |
| + } |
| + kfree(proc->pages); |
| + vfree(proc->buffer); |
| + } |
| + |
| + put_task_struct(proc->tsk); |
| + |
| + if (binder_debug_mask & BINDER_DEBUG_OPEN_CLOSE) |
| + printk(KERN_INFO |
| + "binder_release: %d threads %d, nodes %d (ref %d), " |
| + "refs %d, active transactions %d, buffers %d, " |
| + "pages %d\n", |
| + proc->pid, threads, nodes, incoming_refs, outgoing_refs, |
| + active_transactions, buffers, page_count); |
| + |
| + kfree(proc); |
| +} |
| + |
| +static void binder_deferred_func(struct work_struct *work) |
| +{ |
| + struct binder_proc *proc; |
| + struct files_struct *files; |
| + |
| + int defer; |
| + do { |
| + mutex_lock(&binder_lock); |
| + mutex_lock(&binder_deferred_lock); |
| + if (!hlist_empty(&binder_deferred_list)) { |
| + proc = hlist_entry(binder_deferred_list.first, |
| + struct binder_proc, deferred_work_node); |
| + hlist_del_init(&proc->deferred_work_node); |
| + defer = proc->deferred_work; |
| + proc->deferred_work = 0; |
| + } else { |
| + proc = NULL; |
| + defer = 0; |
| + } |
| + mutex_unlock(&binder_deferred_lock); |
| + |
| + files = NULL; |
| + if (defer & BINDER_DEFERRED_PUT_FILES) { |
| + files = proc->files; |
| + if (files) |
| + proc->files = NULL; |
| + } |
| + |
| + if (defer & BINDER_DEFERRED_FLUSH) |
| + binder_deferred_flush(proc); |
| + |
| + if (defer & BINDER_DEFERRED_RELEASE) |
| + binder_deferred_release(proc); /* frees proc */ |
| + |
| + mutex_unlock(&binder_lock); |
| + if (files) |
| + put_files_struct(files); |
| + } while (proc); |
| +} |
| +static DECLARE_WORK(binder_deferred_work, binder_deferred_func); |
| + |
| +static void binder_defer_work(struct binder_proc *proc, int defer) |
| +{ |
| + mutex_lock(&binder_deferred_lock); |
| + proc->deferred_work |= defer; |
| + if (hlist_unhashed(&proc->deferred_work_node)) { |
| + hlist_add_head(&proc->deferred_work_node, |
| + &binder_deferred_list); |
| + schedule_work(&binder_deferred_work); |
| + } |
| + mutex_unlock(&binder_deferred_lock); |
| +} |
| + |
| +static char *print_binder_transaction(char *buf, char *end, const char *prefix, |
| + struct binder_transaction *t) |
| +{ |
| + buf += snprintf(buf, end - buf, |
| + "%s %d: %p from %d:%d to %d:%d code %x " |
| + "flags %x pri %ld r%d", |
| + prefix, t->debug_id, t, |
| + t->from ? t->from->proc->pid : 0, |
| + t->from ? t->from->pid : 0, |
| + t->to_proc ? t->to_proc->pid : 0, |
| + t->to_thread ? t->to_thread->pid : 0, |
| + t->code, t->flags, t->priority, t->need_reply); |
| + if (buf >= end) |
| + return buf; |
| + if (t->buffer == NULL) { |
| + buf += snprintf(buf, end - buf, " buffer free\n"); |
| + return buf; |
| + } |
| + if (t->buffer->target_node) { |
| + buf += snprintf(buf, end - buf, " node %d", |
| + t->buffer->target_node->debug_id); |
| + if (buf >= end) |
| + return buf; |
| + } |
| + buf += snprintf(buf, end - buf, " size %zd:%zd data %p\n", |
| + t->buffer->data_size, t->buffer->offsets_size, |
| + t->buffer->data); |
| + return buf; |
| +} |
| + |
| +static char *print_binder_buffer(char *buf, char *end, const char *prefix, |
| + struct binder_buffer *buffer) |
| +{ |
| + buf += snprintf(buf, end - buf, "%s %d: %p size %zd:%zd %s\n", |
| + prefix, buffer->debug_id, buffer->data, |
| + buffer->data_size, buffer->offsets_size, |
| + buffer->transaction ? "active" : "delivered"); |
| + return buf; |
| +} |
| + |
| +static char *print_binder_work(char *buf, char *end, const char *prefix, |
| + const char *transaction_prefix, |
| + struct binder_work *w) |
| +{ |
| + struct binder_node *node; |
| + struct binder_transaction *t; |
| + |
| + switch (w->type) { |
| + case BINDER_WORK_TRANSACTION: |
| + t = container_of(w, struct binder_transaction, work); |
| + buf = print_binder_transaction(buf, end, transaction_prefix, t); |
| + break; |
| + case BINDER_WORK_TRANSACTION_COMPLETE: |
| + buf += snprintf(buf, end - buf, |
| + "%stransaction complete\n", prefix); |
| + break; |
| + case BINDER_WORK_NODE: |
| + node = container_of(w, struct binder_node, work); |
| + buf += snprintf(buf, end - buf, "%snode work %d: u%p c%p\n", |
| + prefix, node->debug_id, node->ptr, |
| + node->cookie); |
| + break; |
| + case BINDER_WORK_DEAD_BINDER: |
| + buf += snprintf(buf, end - buf, "%shas dead binder\n", prefix); |
| + break; |
| + case BINDER_WORK_DEAD_BINDER_AND_CLEAR: |
| + buf += snprintf(buf, end - buf, |
| + "%shas cleared dead binder\n", prefix); |
| + break; |
| + case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: |
| + buf += snprintf(buf, end - buf, |
| + "%shas cleared death notification\n", prefix); |
| + break; |
| + default: |
| + buf += snprintf(buf, end - buf, "%sunknown work: type %d\n", |
| + prefix, w->type); |
| + break; |
| + } |
| + return buf; |
| +} |
| + |
| +static char *print_binder_thread(char *buf, char *end, |
| + struct binder_thread *thread, |
| + int print_always) |
| +{ |
| + struct binder_transaction *t; |
| + struct binder_work *w; |
| + char *start_buf = buf; |
| + char *header_buf; |
| + |
| + buf += snprintf(buf, end - buf, " thread %d: l %02x\n", |
| + thread->pid, thread->looper); |
| + header_buf = buf; |
| + t = thread->transaction_stack; |
| + while (t) { |
| + if (buf >= end) |
| + break; |
| + if (t->from == thread) { |
| + buf = print_binder_transaction(buf, end, |
| + " outgoing transaction", t); |
| + t = t->from_parent; |
| + } else if (t->to_thread == thread) { |
| + buf = print_binder_transaction(buf, end, |
| + " incoming transaction", t); |
| + t = t->to_parent; |
| + } else { |
| + buf = print_binder_transaction(buf, end, |
| + " bad transaction", t); |
| + t = NULL; |
| + } |
| + } |
| + list_for_each_entry(w, &thread->todo, entry) { |
| + if (buf >= end) |
| + break; |
| + buf = print_binder_work(buf, end, " ", |
| + " pending transaction", w); |
| + } |
| + if (!print_always && buf == header_buf) |
| + buf = start_buf; |
| + return buf; |
| +} |
| + |
| +static char *print_binder_node(char *buf, char *end, struct binder_node *node) |
| +{ |
| + struct binder_ref *ref; |
| + struct hlist_node *pos; |
| + struct binder_work *w; |
| + int count; |
| + |
| + count = 0; |
| + hlist_for_each_entry(ref, pos, &node->refs, node_entry) |
| + count++; |
| + |
| + buf += snprintf(buf, end - buf, |
| + " node %d: u%p c%p hs %d hw %d ls %d lw %d " |
| + "is %d iw %d", |
| + node->debug_id, node->ptr, node->cookie, |
| + node->has_strong_ref, node->has_weak_ref, |
| + node->local_strong_refs, node->local_weak_refs, |
| + node->internal_strong_refs, count); |
| + if (buf >= end) |
| + return buf; |
| + if (count) { |
| + buf += snprintf(buf, end - buf, " proc"); |
| + if (buf >= end) |
| + return buf; |
| + hlist_for_each_entry(ref, pos, &node->refs, node_entry) { |
| + buf += snprintf(buf, end - buf, " %d", ref->proc->pid); |
| + if (buf >= end) |
| + return buf; |
| + } |
| + } |
| + buf += snprintf(buf, end - buf, "\n"); |
| + list_for_each_entry(w, &node->async_todo, entry) { |
| + if (buf >= end) |
| + break; |
| + buf = print_binder_work(buf, end, " ", |
| + " pending async transaction", w); |
| + } |
| + return buf; |
| +} |
| + |
| +static char *print_binder_ref(char *buf, char *end, struct binder_ref *ref) |
| +{ |
| + buf += snprintf(buf, end - buf, |
| + " ref %d: desc %d %snode %d s %d w %d d %p\n", |
| + ref->debug_id, ref->desc, |
| + ref->node->proc ? "" : "dead ", ref->node->debug_id, |
| + ref->strong, ref->weak, ref->death); |
| + return buf; |
| +} |
| + |
| +static char *print_binder_proc(char *buf, char *end, |
| + struct binder_proc *proc, int print_all) |
| +{ |
| + struct binder_work *w; |
| + struct rb_node *n; |
| + char *start_buf = buf; |
| + char *header_buf; |
| + |
| + buf += snprintf(buf, end - buf, "proc %d\n", proc->pid); |
| + header_buf = buf; |
| + |
| + for (n = rb_first(&proc->threads); |
| + n != NULL && buf < end; |
| + n = rb_next(n)) |
| + buf = print_binder_thread(buf, end, |
| + rb_entry(n, struct binder_thread, |
| + rb_node), print_all); |
| + for (n = rb_first(&proc->nodes); |
| + n != NULL && buf < end; |
| + n = rb_next(n)) { |
| + struct binder_node *node = rb_entry(n, struct binder_node, |
| + rb_node); |
| + if (print_all || node->has_async_transaction) |
| + buf = print_binder_node(buf, end, node); |
| + } |
| + if (print_all) { |
| + for (n = rb_first(&proc->refs_by_desc); |
| + n != NULL && buf < end; |
| + n = rb_next(n)) |
| + buf = print_binder_ref(buf, end, |
| + rb_entry(n, struct binder_ref, |
| + rb_node_desc)); |
| + } |
| + for (n = rb_first(&proc->allocated_buffers); |
| + n != NULL && buf < end; |
| + n = rb_next(n)) |
| + buf = print_binder_buffer(buf, end, " buffer", |
| + rb_entry(n, struct binder_buffer, |
| + rb_node)); |
| + list_for_each_entry(w, &proc->todo, entry) { |
| + if (buf >= end) |
| + break; |
| + buf = print_binder_work(buf, end, " ", |
| + " pending transaction", w); |
| + } |
| + list_for_each_entry(w, &proc->delivered_death, entry) { |
| + if (buf >= end) |
| + break; |
| + buf += snprintf(buf, end - buf, |
| + " has delivered dead binder\n"); |
| + break; |
| + } |
| + if (!print_all && buf == header_buf) |
| + buf = start_buf; |
| + return buf; |
| +} |
| + |
| +static const char *binder_return_strings[] = { |
| + "BR_ERROR", |
| + "BR_OK", |
| + "BR_TRANSACTION", |
| + "BR_REPLY", |
| + "BR_ACQUIRE_RESULT", |
| + "BR_DEAD_REPLY", |
| + "BR_TRANSACTION_COMPLETE", |
| + "BR_INCREFS", |
| + "BR_ACQUIRE", |
| + "BR_RELEASE", |
| + "BR_DECREFS", |
| + "BR_ATTEMPT_ACQUIRE", |
| + "BR_NOOP", |
| + "BR_SPAWN_LOOPER", |
| + "BR_FINISHED", |
| + "BR_DEAD_BINDER", |
| + "BR_CLEAR_DEATH_NOTIFICATION_DONE", |
| + "BR_FAILED_REPLY" |
| +}; |
| + |
| +static const char *binder_command_strings[] = { |
| + "BC_TRANSACTION", |
| + "BC_REPLY", |
| + "BC_ACQUIRE_RESULT", |
| + "BC_FREE_BUFFER", |
| + "BC_INCREFS", |
| + "BC_ACQUIRE", |
| + "BC_RELEASE", |
| + "BC_DECREFS", |
| + "BC_INCREFS_DONE", |
| + "BC_ACQUIRE_DONE", |
| + "BC_ATTEMPT_ACQUIRE", |
| + "BC_REGISTER_LOOPER", |
| + "BC_ENTER_LOOPER", |
| + "BC_EXIT_LOOPER", |
| + "BC_REQUEST_DEATH_NOTIFICATION", |
| + "BC_CLEAR_DEATH_NOTIFICATION", |
| + "BC_DEAD_BINDER_DONE" |
| +}; |
| + |
| +static const char *binder_objstat_strings[] = { |
| + "proc", |
| + "thread", |
| + "node", |
| + "ref", |
| + "death", |
| + "transaction", |
| + "transaction_complete" |
| +}; |
| + |
| +static char *print_binder_stats(char *buf, char *end, const char *prefix, |
| + struct binder_stats *stats) |
| +{ |
| + int i; |
| + |
| + BUILD_BUG_ON(ARRAY_SIZE(stats->bc) != |
| + ARRAY_SIZE(binder_command_strings)); |
| + for (i = 0; i < ARRAY_SIZE(stats->bc); i++) { |
| + if (stats->bc[i]) |
| + buf += snprintf(buf, end - buf, "%s%s: %d\n", prefix, |
| + binder_command_strings[i], |
| + stats->bc[i]); |
| + if (buf >= end) |
| + return buf; |
| + } |
| + |
| + BUILD_BUG_ON(ARRAY_SIZE(stats->br) != |
| + ARRAY_SIZE(binder_return_strings)); |
| + for (i = 0; i < ARRAY_SIZE(stats->br); i++) { |
| + if (stats->br[i]) |
| + buf += snprintf(buf, end - buf, "%s%s: %d\n", prefix, |
| + binder_return_strings[i], stats->br[i]); |
| + if (buf >= end) |
| + return buf; |
| + } |
| + |
| + BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) != |
| + ARRAY_SIZE(binder_objstat_strings)); |
| + BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) != |
| + ARRAY_SIZE(stats->obj_deleted)); |
| + for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) { |
| + if (stats->obj_created[i] || stats->obj_deleted[i]) |
| + buf += snprintf(buf, end - buf, |
| + "%s%s: active %d total %d\n", prefix, |
| + binder_objstat_strings[i], |
| + stats->obj_created[i] - |
| + stats->obj_deleted[i], |
| + stats->obj_created[i]); |
| + if (buf >= end) |
| + return buf; |
| + } |
| + return buf; |
| +} |
| + |
| +static char *print_binder_proc_stats(char *buf, char *end, |
| + struct binder_proc *proc) |
| +{ |
| + struct binder_work *w; |
| + struct rb_node *n; |
| + int count, strong, weak; |
| + |
| + buf += snprintf(buf, end - buf, "proc %d\n", proc->pid); |
| + if (buf >= end) |
| + return buf; |
| + count = 0; |
| + for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) |
| + count++; |
| + buf += snprintf(buf, end - buf, " threads: %d\n", count); |
| + if (buf >= end) |
| + return buf; |
| + buf += snprintf(buf, end - buf, " requested threads: %d+%d/%d\n" |
| + " ready threads %d\n" |
| + " free async space %zd\n", proc->requested_threads, |
| + proc->requested_threads_started, proc->max_threads, |
| + proc->ready_threads, proc->free_async_space); |
| + if (buf >= end) |
| + return buf; |
| + count = 0; |
| + for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) |
| + count++; |
| + buf += snprintf(buf, end - buf, " nodes: %d\n", count); |
| + if (buf >= end) |
| + return buf; |
| + count = 0; |
| + strong = 0; |
| + weak = 0; |
| + for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) { |
| + struct binder_ref *ref = rb_entry(n, struct binder_ref, |
| + rb_node_desc); |
| + count++; |
| + strong += ref->strong; |
| + weak += ref->weak; |
| + } |
| + buf += snprintf(buf, end - buf, " refs: %d s %d w %d\n", |
| + count, strong, weak); |
| + if (buf >= end) |
| + return buf; |
| + |
| + count = 0; |
| + for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n)) |
| + count++; |
| + buf += snprintf(buf, end - buf, " buffers: %d\n", count); |
| + if (buf >= end) |
| + return buf; |
| + |
| + count = 0; |
| + list_for_each_entry(w, &proc->todo, entry) { |
| + switch (w->type) { |
| + case BINDER_WORK_TRANSACTION: |
| + count++; |
| + break; |
| + default: |
| + break; |
| + } |
| + } |
| + buf += snprintf(buf, end - buf, " pending transactions: %d\n", count); |
| + if (buf >= end) |
| + return buf; |
| + |
| + buf = print_binder_stats(buf, end, " ", &proc->stats); |
| + |
| + return buf; |
| +} |
| + |
| + |
| +static int binder_read_proc_state(char *page, char **start, off_t off, |
| + int count, int *eof, void *data) |
| +{ |
| + struct binder_proc *proc; |
| + struct hlist_node *pos; |
| + struct binder_node *node; |
| + int len = 0; |
| + char *buf = page; |
| + char *end = page + PAGE_SIZE; |
| + int do_lock = !binder_debug_no_lock; |
| + |
| + if (off) |
| + return 0; |
| + |
| + if (do_lock) |
| + mutex_lock(&binder_lock); |
| + |
| + buf += snprintf(buf, end - buf, "binder state:\n"); |
| + |
| + if (!hlist_empty(&binder_dead_nodes)) |
| + buf += snprintf(buf, end - buf, "dead nodes:\n"); |
| + hlist_for_each_entry(node, pos, &binder_dead_nodes, dead_node) { |
| + if (buf >= end) |
| + break; |
| + buf = print_binder_node(buf, end, node); |
| + } |
| + |
| + hlist_for_each_entry(proc, pos, &binder_procs, proc_node) { |
| + if (buf >= end) |
| + break; |
| + buf = print_binder_proc(buf, end, proc, 1); |
| + } |
| + if (do_lock) |
| + mutex_unlock(&binder_lock); |
| + if (buf > page + PAGE_SIZE) |
| + buf = page + PAGE_SIZE; |
| + |
| + *start = page + off; |
| + |
| + len = buf - page; |
| + if (len > off) |
| + len -= off; |
| + else |
| + len = 0; |
| + |
| + return len < count ? len : count; |
| +} |
| + |
| +static int binder_read_proc_stats(char *page, char **start, off_t off, |
| + int count, int *eof, void *data) |
| +{ |
| + struct binder_proc *proc; |
| + struct hlist_node *pos; |
| + int len = 0; |
| + char *p = page; |
| + int do_lock = !binder_debug_no_lock; |
| + |
| + if (off) |
| + return 0; |
| + |
| + if (do_lock) |
| + mutex_lock(&binder_lock); |
| + |
| + p += snprintf(p, PAGE_SIZE, "binder stats:\n"); |
| + |
| + p = print_binder_stats(p, page + PAGE_SIZE, "", &binder_stats); |
| + |
| + hlist_for_each_entry(proc, pos, &binder_procs, proc_node) { |
| + if (p >= page + PAGE_SIZE) |
| + break; |
| + p = print_binder_proc_stats(p, page + PAGE_SIZE, proc); |
| + } |
| + if (do_lock) |
| + mutex_unlock(&binder_lock); |
| + if (p > page + PAGE_SIZE) |
| + p = page + PAGE_SIZE; |
| + |
| + *start = page + off; |
| + |
| + len = p - page; |
| + if (len > off) |
| + len -= off; |
| + else |
| + len = 0; |
| + |
| + return len < count ? len : count; |
| +} |
| + |
| +static int binder_read_proc_transactions(char *page, char **start, off_t off, |
| + int count, int *eof, void *data) |
| +{ |
| + struct binder_proc *proc; |
| + struct hlist_node *pos; |
| + int len = 0; |
| + char *buf = page; |
| + char *end = page + PAGE_SIZE; |
| + int do_lock = !binder_debug_no_lock; |
| + |
| + if (off) |
| + return 0; |
| + |
| + if (do_lock) |
| + mutex_lock(&binder_lock); |
| + |
| + buf += snprintf(buf, end - buf, "binder transactions:\n"); |
| + hlist_for_each_entry(proc, pos, &binder_procs, proc_node) { |
| + if (buf >= end) |
| + break; |
| + buf = print_binder_proc(buf, end, proc, 0); |
| + } |
| + if (do_lock) |
| + mutex_unlock(&binder_lock); |
| + if (buf > page + PAGE_SIZE) |
| + buf = page + PAGE_SIZE; |
| + |
| + *start = page + off; |
| + |
| + len = buf - page; |
| + if (len > off) |
| + len -= off; |
| + else |
| + len = 0; |
| + |
| + return len < count ? len : count; |
| +} |
| + |
| +static int binder_read_proc_proc(char *page, char **start, off_t off, |
| + int count, int *eof, void *data) |
| +{ |
| + struct binder_proc *proc = data; |
| + int len = 0; |
| + char *p = page; |
| + int do_lock = !binder_debug_no_lock; |
| + |
| + if (off) |
| + return 0; |
| + |
| + if (do_lock) |
| + mutex_lock(&binder_lock); |
| + p += snprintf(p, PAGE_SIZE, "binder proc state:\n"); |
| + p = print_binder_proc(p, page + PAGE_SIZE, proc, 1); |
| + if (do_lock) |
| + mutex_unlock(&binder_lock); |
| + |
| + if (p > page + PAGE_SIZE) |
| + p = page + PAGE_SIZE; |
| + *start = page + off; |
| + |
| + len = p - page; |
| + if (len > off) |
| + len -= off; |
| + else |
| + len = 0; |
| + |
| + return len < count ? len : count; |
| +} |
| + |
| +static char *print_binder_transaction_log_entry(char *buf, char *end, |
| + struct binder_transaction_log_entry *e) |
| +{ |
| + buf += snprintf(buf, end - buf, |
| + "%d: %s from %d:%d to %d:%d node %d handle %d " |
| + "size %d:%d\n", |
| + e->debug_id, (e->call_type == 2) ? "reply" : |
| + ((e->call_type == 1) ? "async" : "call "), e->from_proc, |
| + e->from_thread, e->to_proc, e->to_thread, e->to_node, |
| + e->target_handle, e->data_size, e->offsets_size); |
| + return buf; |
| +} |
| + |
| +static int binder_read_proc_transaction_log( |
| + char *page, char **start, off_t off, int count, int *eof, void *data) |
| +{ |
| + struct binder_transaction_log *log = data; |
| + int len = 0; |
| + int i; |
| + char *buf = page; |
| + char *end = page + PAGE_SIZE; |
| + |
| + if (off) |
| + return 0; |
| + |
| + if (log->full) { |
| + for (i = log->next; i < ARRAY_SIZE(log->entry); i++) { |
| + if (buf >= end) |
| + break; |
| + buf = print_binder_transaction_log_entry(buf, end, |
| + &log->entry[i]); |
| + } |
| + } |
| + for (i = 0; i < log->next; i++) { |
| + if (buf >= end) |
| + break; |
| + buf = print_binder_transaction_log_entry(buf, end, |
| + &log->entry[i]); |
| + } |
| + |
| + *start = page + off; |
| + |
| + len = buf - page; |
| + if (len > off) |
| + len -= off; |
| + else |
| + len = 0; |
| + |
| + return len < count ? len : count; |
| +} |
| + |
| +static const struct file_operations binder_fops = { |
| + .owner = THIS_MODULE, |
| + .poll = binder_poll, |
| + .unlocked_ioctl = binder_ioctl, |
| + .mmap = binder_mmap, |
| + .open = binder_open, |
| + .flush = binder_flush, |
| + .release = binder_release, |
| +}; |
| + |
| +static struct miscdevice binder_miscdev = { |
| + .minor = MISC_DYNAMIC_MINOR, |
| + .name = "binder", |
| + .fops = &binder_fops |
| +}; |
| + |
| +static int __init binder_init(void) |
| +{ |
| + int ret; |
| + |
| + binder_proc_dir_entry_root = proc_mkdir("binder", NULL); |
| + if (binder_proc_dir_entry_root) |
| + binder_proc_dir_entry_proc = proc_mkdir("proc", |
| + binder_proc_dir_entry_root); |
| + ret = misc_register(&binder_miscdev); |
| + if (binder_proc_dir_entry_root) { |
| + create_proc_read_entry("state", |
| + S_IRUGO, |
| + binder_proc_dir_entry_root, |
| + binder_read_proc_state, |
| + NULL); |
| + create_proc_read_entry("stats", |
| + S_IRUGO, |
| + binder_proc_dir_entry_root, |
| + binder_read_proc_stats, |
| + NULL); |
| + create_proc_read_entry("transactions", |
| + S_IRUGO, |
| + binder_proc_dir_entry_root, |
| + binder_read_proc_transactions, |
| + NULL); |
| + create_proc_read_entry("transaction_log", |
| + S_IRUGO, |
| + binder_proc_dir_entry_root, |
| + binder_read_proc_transaction_log, |
| + &binder_transaction_log); |
| + create_proc_read_entry("failed_transaction_log", |
| + S_IRUGO, |
| + binder_proc_dir_entry_root, |
| + binder_read_proc_transaction_log, |
| + &binder_transaction_log_failed); |
| + } |
| + return ret; |
| +} |
| + |
| +device_initcall(binder_init); |
| + |
| +MODULE_LICENSE("GPL v2"); |
| diff --git a/stblinux-2.6.37/drivers/staging/android/binder.h b/stblinux-2.6.37/drivers/staging/android/binder.h |
| new file mode 100644 |
| index 0000000..863ae1a |
| --- /dev/null |
| +++ b/stblinux-2.6.37/drivers/staging/android/binder.h |
| @@ -0,0 +1,330 @@ |
| +/* |
| + * Copyright (C) 2008 Google, Inc. |
| + * |
| + * Based on, but no longer compatible with, the original |
| + * OpenBinder.org binder driver interface, which is: |
| + * |
| + * Copyright (c) 2005 Palmsource, Inc. |
| + * |
| + * This software is licensed under the terms of the GNU General Public |
| + * License version 2, as published by the Free Software Foundation, and |
| + * may be copied, distributed, and modified under those terms. |
| + * |
| + * This program is distributed in the hope that it will be useful, |
| + * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| + * GNU General Public License for more details. |
| + * |
| + */ |
| + |
| +#ifndef _LINUX_BINDER_H |
| +#define _LINUX_BINDER_H |
| + |
| +#include <linux/ioctl.h> |
| + |
| +#define B_PACK_CHARS(c1, c2, c3, c4) \ |
| + ((((c1)<<24)) | (((c2)<<16)) | (((c3)<<8)) | (c4)) |
| +#define B_TYPE_LARGE 0x85 |
| + |
| +enum { |
| + BINDER_TYPE_BINDER = B_PACK_CHARS('s', 'b', '*', B_TYPE_LARGE), |
| + BINDER_TYPE_WEAK_BINDER = B_PACK_CHARS('w', 'b', '*', B_TYPE_LARGE), |
| + BINDER_TYPE_HANDLE = B_PACK_CHARS('s', 'h', '*', B_TYPE_LARGE), |
| + BINDER_TYPE_WEAK_HANDLE = B_PACK_CHARS('w', 'h', '*', B_TYPE_LARGE), |
| + BINDER_TYPE_FD = B_PACK_CHARS('f', 'd', '*', B_TYPE_LARGE), |
| +}; |
| + |
| +enum { |
| + FLAT_BINDER_FLAG_PRIORITY_MASK = 0xff, |
| + FLAT_BINDER_FLAG_ACCEPTS_FDS = 0x100, |
| +}; |
| + |
| +/* |
| + * This is the flattened representation of a Binder object for transfer |
| + * between processes. The 'offsets' supplied as part of a binder transaction |
| + * contains offsets into the data where these structures occur. The Binder |
| + * driver takes care of re-writing the structure type and data as it moves |
| + * between processes. |
| + */ |
| +struct flat_binder_object { |
| + /* 8 bytes for large_flat_header. */ |
| + unsigned long type; |
| + unsigned long flags; |
| + |
| + /* 8 bytes of data. */ |
| + union { |
| + void *binder; /* local object */ |
| + signed long handle; /* remote object */ |
| + }; |
| + |
| + /* extra data associated with local object */ |
| + void *cookie; |
| +}; |
| + |
| +/* |
| + * On 64-bit platforms where user code may run in 32-bits the driver must |
| + * translate the buffer (and local binder) addresses apropriately. |
| + */ |
| + |
| +struct binder_write_read { |
| + signed long write_size; /* bytes to write */ |
| + signed long write_consumed; /* bytes consumed by driver */ |
| + unsigned long write_buffer; |
| + signed long read_size; /* bytes to read */ |
| + signed long read_consumed; /* bytes consumed by driver */ |
| + unsigned long read_buffer; |
| +}; |
| + |
| +/* Use with BINDER_VERSION, driver fills in fields. */ |
| +struct binder_version { |
| + /* driver protocol version -- increment with incompatible change */ |
| + signed long protocol_version; |
| +}; |
| + |
| +/* This is the current protocol version. */ |
| +#define BINDER_CURRENT_PROTOCOL_VERSION 7 |
| + |
| +#define BINDER_WRITE_READ _IOWR('b', 1, struct binder_write_read) |
| +#define BINDER_SET_IDLE_TIMEOUT _IOW('b', 3, int64_t) |
| +#define BINDER_SET_MAX_THREADS _IOW('b', 5, size_t) |
| +#define BINDER_SET_IDLE_PRIORITY _IOW('b', 6, int) |
| +#define BINDER_SET_CONTEXT_MGR _IOW('b', 7, int) |
| +#define BINDER_THREAD_EXIT _IOW('b', 8, int) |
| +#define BINDER_VERSION _IOWR('b', 9, struct binder_version) |
| + |
| +/* |
| + * NOTE: Two special error codes you should check for when calling |
| + * in to the driver are: |
| + * |
| + * EINTR -- The operation has been interupted. This should be |
| + * handled by retrying the ioctl() until a different error code |
| + * is returned. |
| + * |
| + * ECONNREFUSED -- The driver is no longer accepting operations |
| + * from your process. That is, the process is being destroyed. |
| + * You should handle this by exiting from your process. Note |
| + * that once this error code is returned, all further calls to |
| + * the driver from any thread will return this same code. |
| + */ |
| + |
| +enum transaction_flags { |
| + TF_ONE_WAY = 0x01, /* this is a one-way call: async, no return */ |
| + TF_ROOT_OBJECT = 0x04, /* contents are the component's root object */ |
| + TF_STATUS_CODE = 0x08, /* contents are a 32-bit status code */ |
| + TF_ACCEPT_FDS = 0x10, /* allow replies with file descriptors */ |
| +}; |
| + |
| +struct binder_transaction_data { |
| + /* The first two are only used for bcTRANSACTION and brTRANSACTION, |
| + * identifying the target and contents of the transaction. |
| + */ |
| + union { |
| + size_t handle; /* target descriptor of command transaction */ |
| + void *ptr; /* target descriptor of return transaction */ |
| + } target; |
| + void *cookie; /* target object cookie */ |
| + unsigned int code; /* transaction command */ |
| + |
| + /* General information about the transaction. */ |
| + unsigned int flags; |
| + pid_t sender_pid; |
| + uid_t sender_euid; |
| + size_t data_size; /* number of bytes of data */ |
| + size_t offsets_size; /* number of bytes of offsets */ |
| + |
| + /* If this transaction is inline, the data immediately |
| + * follows here; otherwise, it ends with a pointer to |
| + * the data buffer. |
| + */ |
| + union { |
| + struct { |
| + /* transaction data */ |
| + const void *buffer; |
| + /* offsets from buffer to flat_binder_object structs */ |
| + const void *offsets; |
| + } ptr; |
| + uint8_t buf[8]; |
| + } data; |
| +}; |
| + |
| +struct binder_ptr_cookie { |
| + void *ptr; |
| + void *cookie; |
| +}; |
| + |
| +struct binder_pri_desc { |
| + int priority; |
| + int desc; |
| +}; |
| + |
| +struct binder_pri_ptr_cookie { |
| + int priority; |
| + void *ptr; |
| + void *cookie; |
| +}; |
| + |
| +enum BinderDriverReturnProtocol { |
| + BR_ERROR = _IOR('r', 0, int), |
| + /* |
| + * int: error code |
| + */ |
| + |
| + BR_OK = _IO('r', 1), |
| + /* No parameters! */ |
| + |
| + BR_TRANSACTION = _IOR('r', 2, struct binder_transaction_data), |
| + BR_REPLY = _IOR('r', 3, struct binder_transaction_data), |
| + /* |
| + * binder_transaction_data: the received command. |
| + */ |
| + |
| + BR_ACQUIRE_RESULT = _IOR('r', 4, int), |
| + /* |
| + * not currently supported |
| + * int: 0 if the last bcATTEMPT_ACQUIRE was not successful. |
| + * Else the remote object has acquired a primary reference. |
| + */ |
| + |
| + BR_DEAD_REPLY = _IO('r', 5), |
| + /* |
| + * The target of the last transaction (either a bcTRANSACTION or |
| + * a bcATTEMPT_ACQUIRE) is no longer with us. No parameters. |
| + */ |
| + |
| + BR_TRANSACTION_COMPLETE = _IO('r', 6), |
| + /* |
| + * No parameters... always refers to the last transaction requested |
| + * (including replies). Note that this will be sent even for |
| + * asynchronous transactions. |
| + */ |
| + |
| + BR_INCREFS = _IOR('r', 7, struct binder_ptr_cookie), |
| + BR_ACQUIRE = _IOR('r', 8, struct binder_ptr_cookie), |
| + BR_RELEASE = _IOR('r', 9, struct binder_ptr_cookie), |
| + BR_DECREFS = _IOR('r', 10, struct binder_ptr_cookie), |
| + /* |
| + * void *: ptr to binder |
| + * void *: cookie for binder |
| + */ |
| + |
| + BR_ATTEMPT_ACQUIRE = _IOR('r', 11, struct binder_pri_ptr_cookie), |
| + /* |
| + * not currently supported |
| + * int: priority |
| + * void *: ptr to binder |
| + * void *: cookie for binder |
| + */ |
| + |
| + BR_NOOP = _IO('r', 12), |
| + /* |
| + * No parameters. Do nothing and examine the next command. It exists |
| + * primarily so that we can replace it with a BR_SPAWN_LOOPER command. |
| + */ |
| + |
| + BR_SPAWN_LOOPER = _IO('r', 13), |
| + /* |
| + * No parameters. The driver has determined that a process has no |
| + * threads waiting to service incomming transactions. When a process |
| + * receives this command, it must spawn a new service thread and |
| + * register it via bcENTER_LOOPER. |
| + */ |
| + |
| + BR_FINISHED = _IO('r', 14), |
| + /* |
| + * not currently supported |
| + * stop threadpool thread |
| + */ |
| + |
| + BR_DEAD_BINDER = _IOR('r', 15, void *), |
| + /* |
| + * void *: cookie |
| + */ |
| + BR_CLEAR_DEATH_NOTIFICATION_DONE = _IOR('r', 16, void *), |
| + /* |
| + * void *: cookie |
| + */ |
| + |
| + BR_FAILED_REPLY = _IO('r', 17), |
| + /* |
| + * The the last transaction (either a bcTRANSACTION or |
| + * a bcATTEMPT_ACQUIRE) failed (e.g. out of memory). No parameters. |
| + */ |
| +}; |
| + |
| +enum BinderDriverCommandProtocol { |
| + BC_TRANSACTION = _IOW('c', 0, struct binder_transaction_data), |
| + BC_REPLY = _IOW('c', 1, struct binder_transaction_data), |
| + /* |
| + * binder_transaction_data: the sent command. |
| + */ |
| + |
| + BC_ACQUIRE_RESULT = _IOW('c', 2, int), |
| + /* |
| + * not currently supported |
| + * int: 0 if the last BR_ATTEMPT_ACQUIRE was not successful. |
| + * Else you have acquired a primary reference on the object. |
| + */ |
| + |
| + BC_FREE_BUFFER = _IOW('c', 3, int), |
| + /* |
| + * void *: ptr to transaction data received on a read |
| + */ |
| + |
| + BC_INCREFS = _IOW('c', 4, int), |
| + BC_ACQUIRE = _IOW('c', 5, int), |
| + BC_RELEASE = _IOW('c', 6, int), |
| + BC_DECREFS = _IOW('c', 7, int), |
| + /* |
| + * int: descriptor |
| + */ |
| + |
| + BC_INCREFS_DONE = _IOW('c', 8, struct binder_ptr_cookie), |
| + BC_ACQUIRE_DONE = _IOW('c', 9, struct binder_ptr_cookie), |
| + /* |
| + * void *: ptr to binder |
| + * void *: cookie for binder |
| + */ |
| + |
| + BC_ATTEMPT_ACQUIRE = _IOW('c', 10, struct binder_pri_desc), |
| + /* |
| + * not currently supported |
| + * int: priority |
| + * int: descriptor |
| + */ |
| + |
| + BC_REGISTER_LOOPER = _IO('c', 11), |
| + /* |
| + * No parameters. |
| + * Register a spawned looper thread with the device. |
| + */ |
| + |
| + BC_ENTER_LOOPER = _IO('c', 12), |
| + BC_EXIT_LOOPER = _IO('c', 13), |
| + /* |
| + * No parameters. |
| + * These two commands are sent as an application-level thread |
| + * enters and exits the binder loop, respectively. They are |
| + * used so the binder can have an accurate count of the number |
| + * of looping threads it has available. |
| + */ |
| + |
| + BC_REQUEST_DEATH_NOTIFICATION = _IOW('c', 14, struct binder_ptr_cookie), |
| + /* |
| + * void *: ptr to binder |
| + * void *: cookie |
| + */ |
| + |
| + BC_CLEAR_DEATH_NOTIFICATION = _IOW('c', 15, struct binder_ptr_cookie), |
| + /* |
| + * void *: ptr to binder |
| + * void *: cookie |
| + */ |
| + |
| + BC_DEAD_BINDER_DONE = _IOW('c', 16, void *), |
| + /* |
| + * void *: cookie |
| + */ |
| +}; |
| + |
| +#endif /* _LINUX_BINDER_H */ |
| + |
| diff --git a/stblinux-2.6.37/drivers/staging/android/logger.c b/stblinux-2.6.37/drivers/staging/android/logger.c |
| new file mode 100644 |
| index 0000000..7d8e7d8 |
| --- /dev/null |
| +++ b/stblinux-2.6.37/drivers/staging/android/logger.c |
| @@ -0,0 +1,609 @@ |
| +/* |
| + * drivers/misc/logger.c |
| + * |
| + * A Logging Subsystem |
| + * |
| + * Copyright (C) 2007-2008 Google, Inc. |
| + * |
| + * Robert Love <rlove@google.com> |
| + * |
| + * This software is licensed under the terms of the GNU General Public |
| + * License version 2, as published by the Free Software Foundation, and |
| + * may be copied, distributed, and modified under those terms. |
| + * |
| + * This program is distributed in the hope that it will be useful, |
| + * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| + * GNU General Public License for more details. |
| + */ |
| + |
| +#include <linux/module.h> |
| +#include <linux/fs.h> |
| +#include <linux/miscdevice.h> |
| +#include <linux/uaccess.h> |
| +#include <linux/poll.h> |
| +#include <linux/time.h> |
| +#include <linux/sched.h> |
| +#include <linux/slab.h> |
| +#include "logger.h" |
| + |
| +#include <asm/ioctls.h> |
| + |
| +/* |
| + * struct logger_log - represents a specific log, such as 'main' or 'radio' |
| + * |
| + * This structure lives from module insertion until module removal, so it does |
| + * not need additional reference counting. The structure is protected by the |
| + * mutex 'mutex'. |
| + */ |
| +struct logger_log { |
| + unsigned char *buffer;/* the ring buffer itself */ |
| + struct miscdevice misc; /* misc device representing the log */ |
| + wait_queue_head_t wq; /* wait queue for readers */ |
| + struct list_head readers; /* this log's readers */ |
| + struct mutex mutex; /* mutex protecting buffer */ |
| + size_t w_off; /* current write head offset */ |
| + size_t head; /* new readers start here */ |
| + size_t size; /* size of the log */ |
| +}; |
| + |
| +/* |
| + * struct logger_reader - a logging device open for reading |
| + * |
| + * This object lives from open to release, so we don't need additional |
| + * reference counting. The structure is protected by log->mutex. |
| + */ |
| +struct logger_reader { |
| + struct logger_log *log; /* associated log */ |
| + struct list_head list; /* entry in logger_log's list */ |
| + size_t r_off; /* current read head offset */ |
| +}; |
| + |
| +/* logger_offset - returns index 'n' into the log via (optimized) modulus */ |
| +#define logger_offset(n) ((n) & (log->size - 1)) |
| + |
| +/* |
| + * file_get_log - Given a file structure, return the associated log |
| + * |
| + * This isn't aesthetic. We have several goals: |
| + * |
| + * 1) Need to quickly obtain the associated log during an I/O operation |
| + * 2) Readers need to maintain state (logger_reader) |
| + * 3) Writers need to be very fast (open() should be a near no-op) |
| + * |
| + * In the reader case, we can trivially go file->logger_reader->logger_log. |
| + * For a writer, we don't want to maintain a logger_reader, so we just go |
| + * file->logger_log. Thus what file->private_data points at depends on whether |
| + * or not the file was opened for reading. This function hides that dirtiness. |
| + */ |
| +static inline struct logger_log *file_get_log(struct file *file) |
| +{ |
| + if (file->f_mode & FMODE_READ) { |
| + struct logger_reader *reader = file->private_data; |
| + return reader->log; |
| + } else |
| + return file->private_data; |
| +} |
| + |
| +/* |
| + * get_entry_len - Grabs the length of the payload of the next entry starting |
| + * from 'off'. |
| + * |
| + * Caller needs to hold log->mutex. |
| + */ |
| +static __u32 get_entry_len(struct logger_log *log, size_t off) |
| +{ |
| + __u16 val; |
| + |
| + switch (log->size - off) { |
| + case 1: |
| + memcpy(&val, log->buffer + off, 1); |
| + memcpy(((char *) &val) + 1, log->buffer, 1); |
| + break; |
| + default: |
| + memcpy(&val, log->buffer + off, 2); |
| + } |
| + |
| + return sizeof(struct logger_entry) + val; |
| +} |
| + |
| +/* |
| + * do_read_log_to_user - reads exactly 'count' bytes from 'log' into the |
| + * user-space buffer 'buf'. Returns 'count' on success. |
| + * |
| + * Caller must hold log->mutex. |
| + */ |
| +static ssize_t do_read_log_to_user(struct logger_log *log, |
| + struct logger_reader *reader, |
| + char __user *buf, |
| + size_t count) |
| +{ |
| + size_t len; |
| + |
| + /* |
| + * We read from the log in two disjoint operations. First, we read from |
| + * the current read head offset up to 'count' bytes or to the end of |
| + * the log, whichever comes first. |
| + */ |
| + len = min(count, log->size - reader->r_off); |
| + if (copy_to_user(buf, log->buffer + reader->r_off, len)) |
| + return -EFAULT; |
| + |
| + /* |
| + * Second, we read any remaining bytes, starting back at the head of |
| + * the log. |
| + */ |
| + if (count != len) |
| + if (copy_to_user(buf + len, log->buffer, count - len)) |
| + return -EFAULT; |
| + |
| + reader->r_off = logger_offset(reader->r_off + count); |
| + |
| + return count; |
| +} |
| + |
| +/* |
| + * logger_read - our log's read() method |
| + * |
| + * Behavior: |
| + * |
| + * - O_NONBLOCK works |
| + * - If there are no log entries to read, blocks until log is written to |
| + * - Atomically reads exactly one log entry |
| + * |
| + * Optimal read size is LOGGER_ENTRY_MAX_LEN. Will set errno to EINVAL if read |
| + * buffer is insufficient to hold next entry. |
| + */ |
| +static ssize_t logger_read(struct file *file, char __user *buf, |
| + size_t count, loff_t *pos) |
| +{ |
| + struct logger_reader *reader = file->private_data; |
| + struct logger_log *log = reader->log; |
| + ssize_t ret; |
| + DEFINE_WAIT(wait); |
| + |
| +start: |
| + while (1) { |
| + prepare_to_wait(&log->wq, &wait, TASK_INTERRUPTIBLE); |
| + |
| + mutex_lock(&log->mutex); |
| + ret = (log->w_off == reader->r_off); |
| + mutex_unlock(&log->mutex); |
| + if (!ret) |
| + break; |
| + |
| + if (file->f_flags & O_NONBLOCK) { |
| + ret = -EAGAIN; |
| + break; |
| + } |
| + |
| + if (signal_pending(current)) { |
| + ret = -EINTR; |
| + break; |
| + } |
| + |
| + schedule(); |
| + } |
| + |
| + finish_wait(&log->wq, &wait); |
| + if (ret) |
| + return ret; |
| + |
| + mutex_lock(&log->mutex); |
| + |
| + /* is there still something to read or did we race? */ |
| + if (unlikely(log->w_off == reader->r_off)) { |
| + mutex_unlock(&log->mutex); |
| + goto start; |
| + } |
| + |
| + /* get the size of the next entry */ |
| + ret = get_entry_len(log, reader->r_off); |
| + if (count < ret) { |
| + ret = -EINVAL; |
| + goto out; |
| + } |
| + |
| + /* get exactly one entry from the log */ |
| + ret = do_read_log_to_user(log, reader, buf, ret); |
| + |
| +out: |
| + mutex_unlock(&log->mutex); |
| + |
| + return ret; |
| +} |
| + |
| +/* |
| + * get_next_entry - return the offset of the first valid entry at least 'len' |
| + * bytes after 'off'. |
| + * |
| + * Caller must hold log->mutex. |
| + */ |
| +static size_t get_next_entry(struct logger_log *log, size_t off, size_t len) |
| +{ |
| + size_t count = 0; |
| + |
| + do { |
| + size_t nr = get_entry_len(log, off); |
| + off = logger_offset(off + nr); |
| + count += nr; |
| + } while (count < len); |
| + |
| + return off; |
| +} |
| + |
| +/* |
| + * clock_interval - is a < c < b in mod-space? Put another way, does the line |
| + * from a to b cross c? |
| + */ |
| +static inline int clock_interval(size_t a, size_t b, size_t c) |
| +{ |
| + if (b < a) { |
| + if (a < c || b >= c) |
| + return 1; |
| + } else { |
| + if (a < c && b >= c) |
| + return 1; |
| + } |
| + |
| + return 0; |
| +} |
| + |
| +/* |
| + * fix_up_readers - walk the list of all readers and "fix up" any who were |
| + * lapped by the writer; also do the same for the default "start head". |
| + * We do this by "pulling forward" the readers and start head to the first |
| + * entry after the new write head. |
| + * |
| + * The caller needs to hold log->mutex. |
| + */ |
| +static void fix_up_readers(struct logger_log *log, size_t len) |
| +{ |
| + size_t old = log->w_off; |
| + size_t new = logger_offset(old + len); |
| + struct logger_reader *reader; |
| + |
| + if (clock_interval(old, new, log->head)) |
| + log->head = get_next_entry(log, log->head, len); |
| + |
| + list_for_each_entry(reader, &log->readers, list) |
| + if (clock_interval(old, new, reader->r_off)) |
| + reader->r_off = get_next_entry(log, reader->r_off, len); |
| +} |
| + |
| +/* |
| + * do_write_log - writes 'len' bytes from 'buf' to 'log' |
| + * |
| + * The caller needs to hold log->mutex. |
| + */ |
| +static void do_write_log(struct logger_log *log, const void *buf, size_t count) |
| +{ |
| + size_t len; |
| + |
| + len = min(count, log->size - log->w_off); |
| + memcpy(log->buffer + log->w_off, buf, len); |
| + |
| + if (count != len) |
| + memcpy(log->buffer, buf + len, count - len); |
| + |
| + log->w_off = logger_offset(log->w_off + count); |
| + |
| +} |
| + |
| +/* |
| + * do_write_log_user - writes 'len' bytes from the user-space buffer 'buf' to |
| + * the log 'log' |
| + * |
| + * The caller needs to hold log->mutex. |
| + * |
| + * Returns 'count' on success, negative error code on failure. |
| + */ |
| +static ssize_t do_write_log_from_user(struct logger_log *log, |
| + const void __user *buf, size_t count) |
| +{ |
| + size_t len; |
| + |
| + len = min(count, log->size - log->w_off); |
| + if (len && copy_from_user(log->buffer + log->w_off, buf, len)) |
| + return -EFAULT; |
| + |
| + if (count != len) |
| + if (copy_from_user(log->buffer, buf + len, count - len)) |
| + return -EFAULT; |
| + |
| + log->w_off = logger_offset(log->w_off + count); |
| + |
| + return count; |
| +} |
| + |
| +/* |
| + * logger_aio_write - our write method, implementing support for write(), |
| + * writev(), and aio_write(). Writes are our fast path, and we try to optimize |
| + * them above all else. |
| + */ |
| +ssize_t logger_aio_write(struct kiocb *iocb, const struct iovec *iov, |
| + unsigned long nr_segs, loff_t ppos) |
| +{ |
| + struct logger_log *log = file_get_log(iocb->ki_filp); |
| + size_t orig = log->w_off; |
| + struct logger_entry header; |
| + struct timespec now; |
| + ssize_t ret = 0; |
| + |
| + now = current_kernel_time(); |
| + |
| + header.pid = current->tgid; |
| + header.tid = current->pid; |
| + header.sec = now.tv_sec; |
| + header.nsec = now.tv_nsec; |
| + header.len = min_t(size_t, iocb->ki_left, LOGGER_ENTRY_MAX_PAYLOAD); |
| + |
| + /* null writes succeed, return zero */ |
| + if (unlikely(!header.len)) |
| + return 0; |
| + |
| + mutex_lock(&log->mutex); |
| + |
| + /* |
| + * Fix up any readers, pulling them forward to the first readable |
| + * entry after (what will be) the new write offset. We do this now |
| + * because if we partially fail, we can end up with clobbered log |
| + * entries that encroach on readable buffer. |
| + */ |
| + fix_up_readers(log, sizeof(struct logger_entry) + header.len); |
| + |
| + do_write_log(log, &header, sizeof(struct logger_entry)); |
| + |
| + while (nr_segs-- > 0) { |
| + size_t len; |
| + ssize_t nr; |
| + |
| + /* figure out how much of this vector we can keep */ |
| + len = min_t(size_t, iov->iov_len, header.len - ret); |
| + |
| + /* write out this segment's payload */ |
| + nr = do_write_log_from_user(log, iov->iov_base, len); |
| + if (unlikely(nr < 0)) { |
| + log->w_off = orig; |
| + mutex_unlock(&log->mutex); |
| + return nr; |
| + } |
| + |
| + iov++; |
| + ret += nr; |
| + } |
| + |
| + mutex_unlock(&log->mutex); |
| + |
| + /* wake up any blocked readers */ |
| + wake_up_interruptible(&log->wq); |
| + |
| + return ret; |
| +} |
| + |
| +static struct logger_log *get_log_from_minor(int); |
| + |
| +/* |
| + * logger_open - the log's open() file operation |
| + * |
| + * Note how near a no-op this is in the write-only case. Keep it that way! |
| + */ |
| +static int logger_open(struct inode *inode, struct file *file) |
| +{ |
| + struct logger_log *log; |
| + int ret; |
| + |
| + ret = nonseekable_open(inode, file); |
| + if (ret) |
| + return ret; |
| + |
| + log = get_log_from_minor(MINOR(inode->i_rdev)); |
| + if (!log) |
| + return -ENODEV; |
| + |
| + if (file->f_mode & FMODE_READ) { |
| + struct logger_reader *reader; |
| + |
| + reader = kmalloc(sizeof(struct logger_reader), GFP_KERNEL); |
| + if (!reader) |
| + return -ENOMEM; |
| + |
| + reader->log = log; |
| + INIT_LIST_HEAD(&reader->list); |
| + |
| + mutex_lock(&log->mutex); |
| + reader->r_off = log->head; |
| + list_add_tail(&reader->list, &log->readers); |
| + mutex_unlock(&log->mutex); |
| + |
| + file->private_data = reader; |
| + } else |
| + file->private_data = log; |
| + |
| + return 0; |
| +} |
| + |
| +/* |
| + * logger_release - the log's release file operation |
| + * |
| + * Note this is a total no-op in the write-only case. Keep it that way! |
| + */ |
| +static int logger_release(struct inode *ignored, struct file *file) |
| +{ |
| + if (file->f_mode & FMODE_READ) { |
| + struct logger_reader *reader = file->private_data; |
| + list_del(&reader->list); |
| + kfree(reader); |
| + } |
| + |
| + return 0; |
| +} |
| + |
| +/* |
| + * logger_poll - the log's poll file operation, for poll/select/epoll |
| + * |
| + * Note we always return POLLOUT, because you can always write() to the log. |
| + * Note also that, strictly speaking, a return value of POLLIN does not |
| + * guarantee that the log is readable without blocking, as there is a small |
| + * chance that the writer can lap the reader in the interim between poll() |
| + * returning and the read() request. |
| + */ |
| +static unsigned int logger_poll(struct file *file, poll_table *wait) |
| +{ |
| + struct logger_reader *reader; |
| + struct logger_log *log; |
| + unsigned int ret = POLLOUT | POLLWRNORM; |
| + |
| + if (!(file->f_mode & FMODE_READ)) |
| + return ret; |
| + |
| + reader = file->private_data; |
| + log = reader->log; |
| + |
| + poll_wait(file, &log->wq, wait); |
| + |
| + mutex_lock(&log->mutex); |
| + if (log->w_off != reader->r_off) |
| + ret |= POLLIN | POLLRDNORM; |
| + mutex_unlock(&log->mutex); |
| + |
| + return ret; |
| +} |
| + |
| +static long logger_ioctl(struct file *file, unsigned int cmd, unsigned long arg) |
| +{ |
| + struct logger_log *log = file_get_log(file); |
| + struct logger_reader *reader; |
| + long ret = -ENOTTY; |
| + |
| + mutex_lock(&log->mutex); |
| + |
| + switch (cmd) { |
| + case LOGGER_GET_LOG_BUF_SIZE: |
| + ret = log->size; |
| + break; |
| + case LOGGER_GET_LOG_LEN: |
| + if (!(file->f_mode & FMODE_READ)) { |
| + ret = -EBADF; |
| + break; |
| + } |
| + reader = file->private_data; |
| + if (log->w_off >= reader->r_off) |
| + ret = log->w_off - reader->r_off; |
| + else |
| + ret = (log->size - reader->r_off) + log->w_off; |
| + break; |
| + case LOGGER_GET_NEXT_ENTRY_LEN: |
| + if (!(file->f_mode & FMODE_READ)) { |
| + ret = -EBADF; |
| + break; |
| + } |
| + reader = file->private_data; |
| + if (log->w_off != reader->r_off) |
| + ret = get_entry_len(log, reader->r_off); |
| + else |
| + ret = 0; |
| + break; |
| + case LOGGER_FLUSH_LOG: |
| + if (!(file->f_mode & FMODE_WRITE)) { |
| + ret = -EBADF; |
| + break; |
| + } |
| + list_for_each_entry(reader, &log->readers, list) |
| + reader->r_off = log->w_off; |
| + log->head = log->w_off; |
| + ret = 0; |
| + break; |
| + } |
| + |
| + mutex_unlock(&log->mutex); |
| + |
| + return ret; |
| +} |
| + |
| +static const struct file_operations logger_fops = { |
| + .owner = THIS_MODULE, |
| + .read = logger_read, |
| + .aio_write = logger_aio_write, |
| + .poll = logger_poll, |
| + .unlocked_ioctl = logger_ioctl, |
| + .compat_ioctl = logger_ioctl, |
| + .open = logger_open, |
| + .release = logger_release, |
| +}; |
| + |
| +/* |
| + * Defines a log structure with name 'NAME' and a size of 'SIZE' bytes, which |
| + * must be a power of two, greater than LOGGER_ENTRY_MAX_LEN, and less than |
| + * LONG_MAX minus LOGGER_ENTRY_MAX_LEN. |
| + */ |
| +#define DEFINE_LOGGER_DEVICE(VAR, NAME, SIZE) \ |
| +static unsigned char _buf_ ## VAR[SIZE]; \ |
| +static struct logger_log VAR = { \ |
| + .buffer = _buf_ ## VAR, \ |
| + .misc = { \ |
| + .minor = MISC_DYNAMIC_MINOR, \ |
| + .name = NAME, \ |
| + .fops = &logger_fops, \ |
| + .parent = NULL, \ |
| + }, \ |
| + .wq = __WAIT_QUEUE_HEAD_INITIALIZER(VAR .wq), \ |
| + .readers = LIST_HEAD_INIT(VAR .readers), \ |
| + .mutex = __MUTEX_INITIALIZER(VAR .mutex), \ |
| + .w_off = 0, \ |
| + .head = 0, \ |
| + .size = SIZE, \ |
| +}; |
| + |
| +DEFINE_LOGGER_DEVICE(log_main, LOGGER_LOG_MAIN, 64*1024) |
| +DEFINE_LOGGER_DEVICE(log_events, LOGGER_LOG_EVENTS, 256*1024) |
| +DEFINE_LOGGER_DEVICE(log_radio, LOGGER_LOG_RADIO, 64*1024) |
| + |
| +static struct logger_log *get_log_from_minor(int minor) |
| +{ |
| + if (log_main.misc.minor == minor) |
| + return &log_main; |
| + if (log_events.misc.minor == minor) |
| + return &log_events; |
| + if (log_radio.misc.minor == minor) |
| + return &log_radio; |
| + return NULL; |
| +} |
| + |
| +static int __init init_log(struct logger_log *log) |
| +{ |
| + int ret; |
| + |
| + ret = misc_register(&log->misc); |
| + if (unlikely(ret)) { |
| + printk(KERN_ERR "logger: failed to register misc " |
| + "device for log '%s'!\n", log->misc.name); |
| + return ret; |
| + } |
| + |
| + printk(KERN_INFO "logger: created %luK log '%s'\n", |
| + (unsigned long) log->size >> 10, log->misc.name); |
| + |
| + return 0; |
| +} |
| + |
| +static int __init logger_init(void) |
| +{ |
| + int ret; |
| + |
| + ret = init_log(&log_main); |
| + if (unlikely(ret)) |
| + goto out; |
| + |
| + ret = init_log(&log_events); |
| + if (unlikely(ret)) |
| + goto out; |
| + |
| + ret = init_log(&log_radio); |
| + if (unlikely(ret)) |
| + goto out; |
| + |
| +out: |
| + return ret; |
| +} |
| +device_initcall(logger_init); |
| diff --git a/stblinux-2.6.37/drivers/staging/android/logger.h b/stblinux-2.6.37/drivers/staging/android/logger.h |
| new file mode 100644 |
| index 0000000..a562434 |
| --- /dev/null |
| +++ b/stblinux-2.6.37/drivers/staging/android/logger.h |
| @@ -0,0 +1,48 @@ |
| +/* include/linux/logger.h |
| + * |
| + * Copyright (C) 2007-2008 Google, Inc. |
| + * Author: Robert Love <rlove@android.com> |
| + * |
| + * This software is licensed under the terms of the GNU General Public |
| + * License version 2, as published by the Free Software Foundation, and |
| + * may be copied, distributed, and modified under those terms. |
| + * |
| + * This program is distributed in the hope that it will be useful, |
| + * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| + * GNU General Public License for more details. |
| + * |
| + */ |
| + |
| +#ifndef _LINUX_LOGGER_H |
| +#define _LINUX_LOGGER_H |
| + |
| +#include <linux/types.h> |
| +#include <linux/ioctl.h> |
| + |
| +struct logger_entry { |
| + __u16 len; /* length of the payload */ |
| + __u16 __pad; /* no matter what, we get 2 bytes of padding */ |
| + __s32 pid; /* generating process's pid */ |
| + __s32 tid; /* generating process's tid */ |
| + __s32 sec; /* seconds since Epoch */ |
| + __s32 nsec; /* nanoseconds */ |
| + char msg[0]; /* the entry's payload */ |
| +}; |
| + |
| +#define LOGGER_LOG_RADIO "log_radio" /* radio-related messages */ |
| +#define LOGGER_LOG_EVENTS "log_events" /* system/hardware events */ |
| +#define LOGGER_LOG_MAIN "log_main" /* everything else */ |
| + |
| +#define LOGGER_ENTRY_MAX_LEN (4*1024) |
| +#define LOGGER_ENTRY_MAX_PAYLOAD \ |
| + (LOGGER_ENTRY_MAX_LEN - sizeof(struct logger_entry)) |
| + |
| +#define __LOGGERIO 0xAE |
| + |
| +#define LOGGER_GET_LOG_BUF_SIZE _IO(__LOGGERIO, 1) /* size of log */ |
| +#define LOGGER_GET_LOG_LEN _IO(__LOGGERIO, 2) /* used log len */ |
| +#define LOGGER_GET_NEXT_ENTRY_LEN _IO(__LOGGERIO, 3) /* next entry len */ |
| +#define LOGGER_FLUSH_LOG _IO(__LOGGERIO, 4) /* flush log */ |
| + |
| +#endif /* _LINUX_LOGGER_H */ |
| diff --git a/stblinux-2.6.37/drivers/staging/android/lowmemorykiller.c b/stblinux-2.6.37/drivers/staging/android/lowmemorykiller.c |
| new file mode 100644 |
| index 0000000..f934393 |
| --- /dev/null |
| +++ b/stblinux-2.6.37/drivers/staging/android/lowmemorykiller.c |
| @@ -0,0 +1,158 @@ |
| +/* drivers/misc/lowmemorykiller.c |
| + * |
| + * Copyright (C) 2007-2008 Google, Inc. |
| + * |
| + * This software is licensed under the terms of the GNU General Public |
| + * License version 2, as published by the Free Software Foundation, and |
| + * may be copied, distributed, and modified under those terms. |
| + * |
| + * This program is distributed in the hope that it will be useful, |
| + * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| + * GNU General Public License for more details. |
| + * |
| + */ |
| + |
| +#include <linux/module.h> |
| +#include <linux/kernel.h> |
| +#include <linux/mm.h> |
| +#include <linux/oom.h> |
| +#include <linux/sched.h> |
| + |
| +static int lowmem_shrink(int nr_to_scan, gfp_t gfp_mask); |
| + |
| +static struct shrinker lowmem_shrinker = { |
| + .shrink = lowmem_shrink, |
| + .seeks = DEFAULT_SEEKS * 16 |
| +}; |
| +static uint32_t lowmem_debug_level = 2; |
| +static int lowmem_adj[6] = { |
| + 0, |
| + 1, |
| + 6, |
| + 12, |
| +}; |
| +static int lowmem_adj_size = 4; |
| +static size_t lowmem_minfree[6] = { |
| + 3 * 512, /* 6MB */ |
| + 2 * 1024, /* 8MB */ |
| + 4 * 1024, /* 16MB */ |
| + 16 * 1024, /* 64MB */ |
| +}; |
| +static int lowmem_minfree_size = 4; |
| + |
| +#define lowmem_print(level, x...) \ |
| + do { \ |
| + if (lowmem_debug_level >= (level)) \ |
| + printk(x); \ |
| + } while (0) |
| + |
| +module_param_named(cost, lowmem_shrinker.seeks, int, S_IRUGO | S_IWUSR); |
| +module_param_array_named(adj, lowmem_adj, int, &lowmem_adj_size, |
| + S_IRUGO | S_IWUSR); |
| +module_param_array_named(minfree, lowmem_minfree, uint, &lowmem_minfree_size, |
| + S_IRUGO | S_IWUSR); |
| +module_param_named(debug_level, lowmem_debug_level, uint, S_IRUGO | S_IWUSR); |
| + |
| +static int lowmem_shrink(int nr_to_scan, gfp_t gfp_mask) |
| +{ |
| + struct task_struct *p; |
| + struct task_struct *selected = NULL; |
| + int rem = 0; |
| + int tasksize; |
| + int i; |
| + int min_adj = OOM_ADJUST_MAX + 1; |
| + int selected_tasksize = 0; |
| + int selected_oom_adj; |
| + int array_size = ARRAY_SIZE(lowmem_adj); |
| + int other_free = global_page_state(NR_FREE_PAGES); |
| + int other_file = global_page_state(NR_FILE_PAGES); |
| + |
| + if (lowmem_adj_size < array_size) |
| + array_size = lowmem_adj_size; |
| + if (lowmem_minfree_size < array_size) |
| + array_size = lowmem_minfree_size; |
| + for (i = 0; i < array_size; i++) { |
| + if (other_free < lowmem_minfree[i] && |
| + other_file < lowmem_minfree[i]) { |
| + min_adj = lowmem_adj[i]; |
| + break; |
| + } |
| + } |
| + if (nr_to_scan > 0) |
| + lowmem_print(3, "lowmem_shrink %d, %x, ofree %d %d, ma %d\n", |
| + nr_to_scan, gfp_mask, other_free, other_file, |
| + min_adj); |
| + rem = global_page_state(NR_ACTIVE_ANON) + |
| + global_page_state(NR_ACTIVE_FILE) + |
| + global_page_state(NR_INACTIVE_ANON) + |
| + global_page_state(NR_INACTIVE_FILE); |
| + if (nr_to_scan <= 0 || min_adj == OOM_ADJUST_MAX + 1) { |
| + lowmem_print(5, "lowmem_shrink %d, %x, return %d\n", |
| + nr_to_scan, gfp_mask, rem); |
| + return rem; |
| + } |
| + selected_oom_adj = min_adj; |
| + |
| + read_lock(&tasklist_lock); |
| + for_each_process(p) { |
| + struct mm_struct *mm; |
| + int oom_adj; |
| + |
| + task_lock(p); |
| + mm = p->mm; |
| + if (!mm) { |
| + task_unlock(p); |
| + continue; |
| + } |
| + oom_adj = mm->oom_adj; |
| + if (oom_adj < min_adj) { |
| + task_unlock(p); |
| + continue; |
| + } |
| + tasksize = get_mm_rss(mm); |
| + task_unlock(p); |
| + if (tasksize <= 0) |
| + continue; |
| + if (selected) { |
| + if (oom_adj < selected_oom_adj) |
| + continue; |
| + if (oom_adj == selected_oom_adj && |
| + tasksize <= selected_tasksize) |
| + continue; |
| + } |
| + selected = p; |
| + selected_tasksize = tasksize; |
| + selected_oom_adj = oom_adj; |
| + lowmem_print(2, "select %d (%s), adj %d, size %d, to kill\n", |
| + p->pid, p->comm, oom_adj, tasksize); |
| + } |
| + if (selected) { |
| + lowmem_print(1, "send sigkill to %d (%s), adj %d, size %d\n", |
| + selected->pid, selected->comm, |
| + selected_oom_adj, selected_tasksize); |
| + force_sig(SIGKILL, selected); |
| + rem -= selected_tasksize; |
| + } |
| + lowmem_print(4, "lowmem_shrink %d, %x, return %d\n", |
| + nr_to_scan, gfp_mask, rem); |
| + read_unlock(&tasklist_lock); |
| + return rem; |
| +} |
| + |
| +static int __init lowmem_init(void) |
| +{ |
| + register_shrinker(&lowmem_shrinker); |
| + return 0; |
| +} |
| + |
| +static void __exit lowmem_exit(void) |
| +{ |
| + unregister_shrinker(&lowmem_shrinker); |
| +} |
| + |
| +module_init(lowmem_init); |
| +module_exit(lowmem_exit); |
| + |
| +MODULE_LICENSE("GPL"); |
| + |
| diff --git a/stblinux-2.6.37/drivers/staging/android/lowmemorykiller.txt b/stblinux-2.6.37/drivers/staging/android/lowmemorykiller.txt |
| new file mode 100644 |
| index 0000000..bd5c0c0 |
| --- /dev/null |
| +++ b/stblinux-2.6.37/drivers/staging/android/lowmemorykiller.txt |
| @@ -0,0 +1,16 @@ |
| +The lowmemorykiller driver lets user-space specify a set of memory thresholds |
| +where processes with a range of oom_adj values will get killed. Specify the |
| +minimum oom_adj values in /sys/module/lowmemorykiller/parameters/adj and the |
| +number of free pages in /sys/module/lowmemorykiller/parameters/minfree. Both |
| +files take a comma separated list of numbers in ascending order. |
| + |
| +For example, write "0,8" to /sys/module/lowmemorykiller/parameters/adj and |
| +"1024,4096" to /sys/module/lowmemorykiller/parameters/minfree to kill processes |
| +with a oom_adj value of 8 or higher when the free memory drops below 4096 pages |
| +and kill processes with a oom_adj value of 0 or higher when the free memory |
| +drops below 1024 pages. |
| + |
| +The driver considers memory used for caches to be free, but if a large |
| +percentage of the cached memory is locked this can be very inaccurate |
| +and processes may not get killed until the normal oom killer is triggered. |
| + |
| diff --git a/stblinux-2.6.37/drivers/staging/android/ram_console.c b/stblinux-2.6.37/drivers/staging/android/ram_console.c |
| new file mode 100644 |
| index 0000000..8f18a59 |
| --- /dev/null |
| +++ b/stblinux-2.6.37/drivers/staging/android/ram_console.c |
| @@ -0,0 +1,410 @@ |
| +/* drivers/android/ram_console.c |
| + * |
| + * Copyright (C) 2007-2008 Google, Inc. |
| + * |
| + * This software is licensed under the terms of the GNU General Public |
| + * License version 2, as published by the Free Software Foundation, and |
| + * may be copied, distributed, and modified under those terms. |
| + * |
| + * This program is distributed in the hope that it will be useful, |
| + * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| + * GNU General Public License for more details. |
| + * |
| + */ |
| + |
| +#include <linux/console.h> |
| +#include <linux/init.h> |
| +#include <linux/module.h> |
| +#include <linux/platform_device.h> |
| +#include <linux/proc_fs.h> |
| +#include <linux/string.h> |
| +#include <linux/uaccess.h> |
| +#include <linux/io.h> |
| + |
| +#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION |
| +#include <linux/rslib.h> |
| +#endif |
| + |
| +struct ram_console_buffer { |
| + uint32_t sig; |
| + uint32_t start; |
| + uint32_t size; |
| + uint8_t data[0]; |
| +}; |
| + |
| +#define RAM_CONSOLE_SIG (0x43474244) /* DBGC */ |
| + |
| +#ifdef CONFIG_ANDROID_RAM_CONSOLE_EARLY_INIT |
| +static char __initdata |
| + ram_console_old_log_init_buffer[CONFIG_ANDROID_RAM_CONSOLE_EARLY_SIZE]; |
| +#endif |
| +static char *ram_console_old_log; |
| +static size_t ram_console_old_log_size; |
| + |
| +static struct ram_console_buffer *ram_console_buffer; |
| +static size_t ram_console_buffer_size; |
| +#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION |
| +static char *ram_console_par_buffer; |
| +static struct rs_control *ram_console_rs_decoder; |
| +static int ram_console_corrected_bytes; |
| +static int ram_console_bad_blocks; |
| +#define ECC_BLOCK_SIZE CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION_DATA_SIZE |
| +#define ECC_SIZE CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION_ECC_SIZE |
| +#define ECC_SYMSIZE CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE |
| +#define ECC_POLY CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION_POLYNOMIAL |
| +#endif |
| + |
| +#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION |
| +static void ram_console_encode_rs8(uint8_t *data, size_t len, uint8_t *ecc) |
| +{ |
| + int i; |
| + uint16_t par[ECC_SIZE]; |
| + /* Initialize the parity buffer */ |
| + memset(par, 0, sizeof(par)); |
| + encode_rs8(ram_console_rs_decoder, data, len, par, 0); |
| + for (i = 0; i < ECC_SIZE; i++) |
| + ecc[i] = par[i]; |
| +} |
| + |
| +static int ram_console_decode_rs8(void *data, size_t len, uint8_t *ecc) |
| +{ |
| + int i; |
| + uint16_t par[ECC_SIZE]; |
| + for (i = 0; i < ECC_SIZE; i++) |
| + par[i] = ecc[i]; |
| + return decode_rs8(ram_console_rs_decoder, data, par, len, |
| + NULL, 0, NULL, 0, NULL); |
| +} |
| +#endif |
| + |
| +static void ram_console_update(const char *s, unsigned int count) |
| +{ |
| + struct ram_console_buffer *buffer = ram_console_buffer; |
| +#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION |
| + uint8_t *buffer_end = buffer->data + ram_console_buffer_size; |
| + uint8_t *block; |
| + uint8_t *par; |
| + int size = ECC_BLOCK_SIZE; |
| +#endif |
| + memcpy(buffer->data + buffer->start, s, count); |
| +#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION |
| + block = buffer->data + (buffer->start & ~(ECC_BLOCK_SIZE - 1)); |
| + par = ram_console_par_buffer + |
| + (buffer->start / ECC_BLOCK_SIZE) * ECC_SIZE; |
| + do { |
| + if (block + ECC_BLOCK_SIZE > buffer_end) |
| + size = buffer_end - block; |
| + ram_console_encode_rs8(block, size, par); |
| + block += ECC_BLOCK_SIZE; |
| + par += ECC_SIZE; |
| + } while (block < buffer->data + buffer->start + count); |
| +#endif |
| +} |
| + |
| +static void ram_console_update_header(void) |
| +{ |
| +#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION |
| + struct ram_console_buffer *buffer = ram_console_buffer; |
| + uint8_t *par; |
| + par = ram_console_par_buffer + |
| + DIV_ROUND_UP(ram_console_buffer_size, ECC_BLOCK_SIZE) * ECC_SIZE; |
| + ram_console_encode_rs8((uint8_t *)buffer, sizeof(*buffer), par); |
| +#endif |
| +} |
| + |
| +static void |
| +ram_console_write(struct console *console, const char *s, unsigned int count) |
| +{ |
| + int rem; |
| + struct ram_console_buffer *buffer = ram_console_buffer; |
| + |
| + if (count > ram_console_buffer_size) { |
| + s += count - ram_console_buffer_size; |
| + count = ram_console_buffer_size; |
| + } |
| + rem = ram_console_buffer_size - buffer->start; |
| + if (rem < count) { |
| + ram_console_update(s, rem); |
| + s += rem; |
| + count -= rem; |
| + buffer->start = 0; |
| + buffer->size = ram_console_buffer_size; |
| + } |
| + ram_console_update(s, count); |
| + |
| + buffer->start += count; |
| + if (buffer->size < ram_console_buffer_size) |
| + buffer->size += count; |
| + ram_console_update_header(); |
| +} |
| + |
| +static struct console ram_console = { |
| + .name = "ram", |
| + .write = ram_console_write, |
| + .flags = CON_PRINTBUFFER | CON_ENABLED, |
| + .index = -1, |
| +}; |
| + |
| +static void __init |
| +ram_console_save_old(struct ram_console_buffer *buffer, char *dest) |
| +{ |
| + size_t old_log_size = buffer->size; |
| +#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION |
| + uint8_t *block; |
| + uint8_t *par; |
| + char strbuf[80]; |
| + int strbuf_len; |
| + |
| + block = buffer->data; |
| + par = ram_console_par_buffer; |
| + while (block < buffer->data + buffer->size) { |
| + int numerr; |
| + int size = ECC_BLOCK_SIZE; |
| + if (block + size > buffer->data + ram_console_buffer_size) |
| + size = buffer->data + ram_console_buffer_size - block; |
| + numerr = ram_console_decode_rs8(block, size, par); |
| + if (numerr > 0) { |
| +#if 0 |
| + printk(KERN_INFO "ram_console: error in block %p, %d\n", |
| + block, numerr); |
| +#endif |
| + ram_console_corrected_bytes += numerr; |
| + } else if (numerr < 0) { |
| +#if 0 |
| + printk(KERN_INFO "ram_console: uncorrectable error in " |
| + "block %p\n", block); |
| +#endif |
| + ram_console_bad_blocks++; |
| + } |
| + block += ECC_BLOCK_SIZE; |
| + par += ECC_SIZE; |
| + } |
| + if (ram_console_corrected_bytes || ram_console_bad_blocks) |
| + strbuf_len = snprintf(strbuf, sizeof(strbuf), |
| + "\n%d Corrected bytes, %d unrecoverable blocks\n", |
| + ram_console_corrected_bytes, ram_console_bad_blocks); |
| + else |
| + strbuf_len = snprintf(strbuf, sizeof(strbuf), |
| + "\nNo errors detected\n"); |
| + if (strbuf_len >= sizeof(strbuf)) |
| + strbuf_len = sizeof(strbuf) - 1; |
| + old_log_size += strbuf_len; |
| +#endif |
| + |
| + if (dest == NULL) { |
| + dest = kmalloc(old_log_size, GFP_KERNEL); |
| + if (dest == NULL) { |
| + printk(KERN_ERR |
| + "ram_console: failed to allocate buffer\n"); |
| + return; |
| + } |
| + } |
| + |
| + ram_console_old_log = dest; |
| + ram_console_old_log_size = old_log_size; |
| + memcpy(ram_console_old_log, |
| + &buffer->data[buffer->start], buffer->size - buffer->start); |
| + memcpy(ram_console_old_log + buffer->size - buffer->start, |
| + &buffer->data[0], buffer->start); |
| +#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION |
| + memcpy(ram_console_old_log + old_log_size - strbuf_len, |
| + strbuf, strbuf_len); |
| +#endif |
| +} |
| + |
| +static int __init ram_console_init(struct ram_console_buffer *buffer, |
| + size_t buffer_size, char *old_buf) |
| +{ |
| +#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION |
| + int numerr; |
| + uint8_t *par; |
| +#endif |
| + ram_console_buffer = buffer; |
| + ram_console_buffer_size = |
| + buffer_size - sizeof(struct ram_console_buffer); |
| + |
| + if (ram_console_buffer_size > buffer_size) { |
| + pr_err("ram_console: buffer %p, invalid size %zu, " |
| + "datasize %zu\n", buffer, buffer_size, |
| + ram_console_buffer_size); |
| + return 0; |
| + } |
| + |
| +#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION |
| + ram_console_buffer_size -= (DIV_ROUND_UP(ram_console_buffer_size, |
| + ECC_BLOCK_SIZE) + 1) * ECC_SIZE; |
| + |
| + if (ram_console_buffer_size > buffer_size) { |
| + pr_err("ram_console: buffer %p, invalid size %zu, " |
| + "non-ecc datasize %zu\n", |
| + buffer, buffer_size, ram_console_buffer_size); |
| + return 0; |
| + } |
| + |
| + ram_console_par_buffer = buffer->data + ram_console_buffer_size; |
| + |
| + |
| + /* first consecutive root is 0 |
| + * primitive element to generate roots = 1 |
| + */ |
| + ram_console_rs_decoder = init_rs(ECC_SYMSIZE, ECC_POLY, 0, 1, ECC_SIZE); |
| + if (ram_console_rs_decoder == NULL) { |
| + printk(KERN_INFO "ram_console: init_rs failed\n"); |
| + return 0; |
| + } |
| + |
| + ram_console_corrected_bytes = 0; |
| + ram_console_bad_blocks = 0; |
| + |
| + par = ram_console_par_buffer + |
| + DIV_ROUND_UP(ram_console_buffer_size, ECC_BLOCK_SIZE) * ECC_SIZE; |
| + |
| + numerr = ram_console_decode_rs8(buffer, sizeof(*buffer), par); |
| + if (numerr > 0) { |
| + printk(KERN_INFO "ram_console: error in header, %d\n", numerr); |
| + ram_console_corrected_bytes += numerr; |
| + } else if (numerr < 0) { |
| + printk(KERN_INFO |
| + "ram_console: uncorrectable error in header\n"); |
| + ram_console_bad_blocks++; |
| + } |
| +#endif |
| + |
| + if (buffer->sig == RAM_CONSOLE_SIG) { |
| + if (buffer->size > ram_console_buffer_size |
| + || buffer->start > buffer->size) |
| + printk(KERN_INFO "ram_console: found existing invalid " |
| + "buffer, size %d, start %d\n", |
| + buffer->size, buffer->start); |
| + else { |
| + printk(KERN_INFO "ram_console: found existing buffer, " |
| + "size %d, start %d\n", |
| + buffer->size, buffer->start); |
| + ram_console_save_old(buffer, old_buf); |
| + } |
| + } else { |
| + printk(KERN_INFO "ram_console: no valid data in buffer " |
| + "(sig = 0x%08x)\n", buffer->sig); |
| + } |
| + |
| + buffer->sig = RAM_CONSOLE_SIG; |
| + buffer->start = 0; |
| + buffer->size = 0; |
| + |
| + register_console(&ram_console); |
| +#ifdef CONFIG_ANDROID_RAM_CONSOLE_ENABLE_VERBOSE |
| + console_verbose(); |
| +#endif |
| + return 0; |
| +} |
| + |
| +#ifdef CONFIG_ANDROID_RAM_CONSOLE_EARLY_INIT |
| +static int __init ram_console_early_init(void) |
| +{ |
| + return ram_console_init((struct ram_console_buffer *) |
| + CONFIG_ANDROID_RAM_CONSOLE_EARLY_ADDR, |
| + CONFIG_ANDROID_RAM_CONSOLE_EARLY_SIZE, |
| + ram_console_old_log_init_buffer); |
| +} |
| +#else |
| +static int ram_console_driver_probe(struct platform_device *pdev) |
| +{ |
| + struct resource *res = pdev->resource; |
| + size_t start; |
| + size_t buffer_size; |
| + void *buffer; |
| + |
| + if (res == NULL || pdev->num_resources != 1 || |
| + !(res->flags & IORESOURCE_MEM)) { |
| + printk(KERN_ERR "ram_console: invalid resource, %p %d flags " |
| + "%lx\n", res, pdev->num_resources, res ? res->flags : 0); |
| + return -ENXIO; |
| + } |
| + buffer_size = res->end - res->start + 1; |
| + start = res->start; |
| + printk(KERN_INFO "ram_console: got buffer at %zx, size %zx\n", |
| + start, buffer_size); |
| + buffer = ioremap(res->start, buffer_size); |
| + if (buffer == NULL) { |
| + printk(KERN_ERR "ram_console: failed to map memory\n"); |
| + return -ENOMEM; |
| + } |
| + |
| + return ram_console_init(buffer, buffer_size, NULL/* allocate */); |
| +} |
| + |
| +static struct platform_driver ram_console_driver = { |
| + .probe = ram_console_driver_probe, |
| + .driver = { |
| + .name = "ram_console", |
| + }, |
| +}; |
| + |
| +static int __init ram_console_module_init(void) |
| +{ |
| + int err; |
| + err = platform_driver_register(&ram_console_driver); |
| + return err; |
| +} |
| +#endif |
| + |
| +static ssize_t ram_console_read_old(struct file *file, char __user *buf, |
| + size_t len, loff_t *offset) |
| +{ |
| + loff_t pos = *offset; |
| + ssize_t count; |
| + |
| + if (pos >= ram_console_old_log_size) |
| + return 0; |
| + |
| + count = min(len, (size_t)(ram_console_old_log_size - pos)); |
| + if (copy_to_user(buf, ram_console_old_log + pos, count)) |
| + return -EFAULT; |
| + |
| + *offset += count; |
| + return count; |
| +} |
| + |
| +static const struct file_operations ram_console_file_ops = { |
| + .owner = THIS_MODULE, |
| + .read = ram_console_read_old, |
| +}; |
| + |
| +static int __init ram_console_late_init(void) |
| +{ |
| + struct proc_dir_entry *entry; |
| + |
| + if (ram_console_old_log == NULL) |
| + return 0; |
| +#ifdef CONFIG_ANDROID_RAM_CONSOLE_EARLY_INIT |
| + ram_console_old_log = kmalloc(ram_console_old_log_size, GFP_KERNEL); |
| + if (ram_console_old_log == NULL) { |
| + printk(KERN_ERR |
| + "ram_console: failed to allocate buffer for old log\n"); |
| + ram_console_old_log_size = 0; |
| + return 0; |
| + } |
| + memcpy(ram_console_old_log, |
| + ram_console_old_log_init_buffer, ram_console_old_log_size); |
| +#endif |
| + entry = create_proc_entry("last_kmsg", S_IFREG | S_IRUGO, NULL); |
| + if (!entry) { |
| + printk(KERN_ERR "ram_console: failed to create proc entry\n"); |
| + kfree(ram_console_old_log); |
| + ram_console_old_log = NULL; |
| + return 0; |
| + } |
| + |
| + entry->proc_fops = &ram_console_file_ops; |
| + entry->size = ram_console_old_log_size; |
| + return 0; |
| +} |
| + |
| +#ifdef CONFIG_ANDROID_RAM_CONSOLE_EARLY_INIT |
| +console_initcall(ram_console_early_init); |
| +#else |
| +module_init(ram_console_module_init); |
| +#endif |
| +late_initcall(ram_console_late_init); |
| + |
| diff --git a/stblinux-2.6.37/drivers/staging/android/timed_gpio.c b/stblinux-2.6.37/drivers/staging/android/timed_gpio.c |
| new file mode 100644 |
| index 0000000..be7cdaa |
| --- /dev/null |
| +++ b/stblinux-2.6.37/drivers/staging/android/timed_gpio.c |
| @@ -0,0 +1,166 @@ |
| +/* drivers/misc/timed_gpio.c |
| + * |
| + * Copyright (C) 2008 Google, Inc. |
| + * Author: Mike Lockwood <lockwood@android.com> |
| + * |
| + * This software is licensed under the terms of the GNU General Public |
| + * License version 2, as published by the Free Software Foundation, and |
| + * may be copied, distributed, and modified under those terms. |
| + * |
| + * This program is distributed in the hope that it will be useful, |
| + * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| + * GNU General Public License for more details. |
| + * |
| + */ |
| + |
| +#include <linux/module.h> |
| +#include <linux/platform_device.h> |
| +#include <linux/hrtimer.h> |
| +#include <linux/err.h> |
| +#include <linux/gpio.h> |
| + |
| +#include "timed_output.h" |
| +#include "timed_gpio.h" |
| + |
| + |
| +struct timed_gpio_data { |
| + struct timed_output_dev dev; |
| + struct hrtimer timer; |
| + spinlock_t lock; |
| + unsigned gpio; |
| + int max_timeout; |
| + u8 active_low; |
| +}; |
| + |
| +static enum hrtimer_restart gpio_timer_func(struct hrtimer *timer) |
| +{ |
| + struct timed_gpio_data *data = |
| + container_of(timer, struct timed_gpio_data, timer); |
| + |
| + gpio_direction_output(data->gpio, data->active_low ? 1 : 0); |
| + return HRTIMER_NORESTART; |
| +} |
| + |
| +static int gpio_get_time(struct timed_output_dev *dev) |
| +{ |
| + struct timed_gpio_data *data = |
| + container_of(dev, struct timed_gpio_data, dev); |
| + |
| + if (hrtimer_active(&data->timer)) { |
| + ktime_t r = hrtimer_get_remaining(&data->timer); |
| + struct timeval t = ktime_to_timeval(r); |
| + return t.tv_sec * 1000 + t.tv_usec / 1000; |
| + } else |
| + return 0; |
| +} |
| + |
| +static void gpio_enable(struct timed_output_dev *dev, int value) |
| +{ |
| + struct timed_gpio_data *data = |
| + container_of(dev, struct timed_gpio_data, dev); |
| + unsigned long flags; |
| + |
| + spin_lock_irqsave(&data->lock, flags); |
| + |
| + /* cancel previous timer and set GPIO according to value */ |
| + hrtimer_cancel(&data->timer); |
| + gpio_direction_output(data->gpio, data->active_low ? !value : !!value); |
| + |
| + if (value > 0) { |
| + if (value > data->max_timeout) |
| + value = data->max_timeout; |
| + |
| + hrtimer_start(&data->timer, |
| + ktime_set(value / 1000, (value % 1000) * 1000000), |
| + HRTIMER_MODE_REL); |
| + } |
| + |
| + spin_unlock_irqrestore(&data->lock, flags); |
| +} |
| + |
| +static int timed_gpio_probe(struct platform_device *pdev) |
| +{ |
| + struct timed_gpio_platform_data *pdata = pdev->dev.platform_data; |
| + struct timed_gpio *cur_gpio; |
| + struct timed_gpio_data *gpio_data, *gpio_dat; |
| + int i, j, ret = 0; |
| + |
| + if (!pdata) |
| + return -EBUSY; |
| + |
| + gpio_data = kzalloc(sizeof(struct timed_gpio_data) * pdata->num_gpios, |
| + GFP_KERNEL); |
| + if (!gpio_data) |
| + return -ENOMEM; |
| + |
| + for (i = 0; i < pdata->num_gpios; i++) { |
| + cur_gpio = &pdata->gpios[i]; |
| + gpio_dat = &gpio_data[i]; |
| + |
| + hrtimer_init(&gpio_dat->timer, CLOCK_MONOTONIC, |
| + HRTIMER_MODE_REL); |
| + gpio_dat->timer.function = gpio_timer_func; |
| + spin_lock_init(&gpio_dat->lock); |
| + |
| + gpio_dat->dev.name = cur_gpio->name; |
| + gpio_dat->dev.get_time = gpio_get_time; |
| + gpio_dat->dev.enable = gpio_enable; |
| + ret = timed_output_dev_register(&gpio_dat->dev); |
| + if (ret < 0) { |
| + for (j = 0; j < i; j++) |
| + timed_output_dev_unregister(&gpio_data[i].dev); |
| + kfree(gpio_data); |
| + return ret; |
| + } |
| + |
| + gpio_dat->gpio = cur_gpio->gpio; |
| + gpio_dat->max_timeout = cur_gpio->max_timeout; |
| + gpio_dat->active_low = cur_gpio->active_low; |
| + gpio_direction_output(gpio_dat->gpio, gpio_dat->active_low); |
| + } |
| + |
| + platform_set_drvdata(pdev, gpio_data); |
| + |
| + return 0; |
| +} |
| + |
| +static int timed_gpio_remove(struct platform_device *pdev) |
| +{ |
| + struct timed_gpio_platform_data *pdata = pdev->dev.platform_data; |
| + struct timed_gpio_data *gpio_data = platform_get_drvdata(pdev); |
| + int i; |
| + |
| + for (i = 0; i < pdata->num_gpios; i++) |
| + timed_output_dev_unregister(&gpio_data[i].dev); |
| + |
| + kfree(gpio_data); |
| + |
| + return 0; |
| +} |
| + |
| +static struct platform_driver timed_gpio_driver = { |
| + .probe = timed_gpio_probe, |
| + .remove = timed_gpio_remove, |
| + .driver = { |
| + .name = TIMED_GPIO_NAME, |
| + .owner = THIS_MODULE, |
| + }, |
| +}; |
| + |
| +static int __init timed_gpio_init(void) |
| +{ |
| + return platform_driver_register(&timed_gpio_driver); |
| +} |
| + |
| +static void __exit timed_gpio_exit(void) |
| +{ |
| + platform_driver_unregister(&timed_gpio_driver); |
| +} |
| + |
| +module_init(timed_gpio_init); |
| +module_exit(timed_gpio_exit); |
| + |
| +MODULE_AUTHOR("Mike Lockwood <lockwood@android.com>"); |
| +MODULE_DESCRIPTION("timed gpio driver"); |
| +MODULE_LICENSE("GPL"); |
| diff --git a/stblinux-2.6.37/drivers/staging/android/timed_gpio.h b/stblinux-2.6.37/drivers/staging/android/timed_gpio.h |
| new file mode 100644 |
| index 0000000..a0e15f8 |
| --- /dev/null |
| +++ b/stblinux-2.6.37/drivers/staging/android/timed_gpio.h |
| @@ -0,0 +1,33 @@ |
| +/* include/linux/timed_gpio.h |
| + * |
| + * Copyright (C) 2008 Google, Inc. |
| + * |
| + * This software is licensed under the terms of the GNU General Public |
| + * License version 2, as published by the Free Software Foundation, and |
| + * may be copied, distributed, and modified under those terms. |
| + * |
| + * This program is distributed in the hope that it will be useful, |
| + * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| + * GNU General Public License for more details. |
| + * |
| +*/ |
| + |
| +#ifndef _LINUX_TIMED_GPIO_H |
| +#define _LINUX_TIMED_GPIO_H |
| + |
| +#define TIMED_GPIO_NAME "timed-gpio" |
| + |
| +struct timed_gpio { |
| + const char *name; |
| + unsigned gpio; |
| + int max_timeout; |
| + u8 active_low; |
| +}; |
| + |
| +struct timed_gpio_platform_data { |
| + int num_gpios; |
| + struct timed_gpio *gpios; |
| +}; |
| + |
| +#endif |
| diff --git a/stblinux-2.6.37/drivers/staging/android/timed_output.c b/stblinux-2.6.37/drivers/staging/android/timed_output.c |
| new file mode 100644 |
| index 0000000..62e7918 |
| --- /dev/null |
| +++ b/stblinux-2.6.37/drivers/staging/android/timed_output.c |
| @@ -0,0 +1,121 @@ |
| +/* drivers/misc/timed_output.c |
| + * |
| + * Copyright (C) 2009 Google, Inc. |
| + * Author: Mike Lockwood <lockwood@android.com> |
| + * |
| + * This software is licensed under the terms of the GNU General Public |
| + * License version 2, as published by the Free Software Foundation, and |
| + * may be copied, distributed, and modified under those terms. |
| + * |
| + * This program is distributed in the hope that it will be useful, |
| + * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| + * GNU General Public License for more details. |
| + * |
| + */ |
| + |
| +#include <linux/module.h> |
| +#include <linux/types.h> |
| +#include <linux/device.h> |
| +#include <linux/fs.h> |
| +#include <linux/err.h> |
| + |
| +#include "timed_output.h" |
| + |
| +static struct class *timed_output_class; |
| +static atomic_t device_count; |
| + |
| +static ssize_t enable_show(struct device *dev, struct device_attribute *attr, |
| + char *buf) |
| +{ |
| + struct timed_output_dev *tdev = dev_get_drvdata(dev); |
| + int remaining = tdev->get_time(tdev); |
| + |
| + return sprintf(buf, "%d\n", remaining); |
| +} |
| + |
| +static ssize_t enable_store( |
| + struct device *dev, struct device_attribute *attr, |
| + const char *buf, size_t size) |
| +{ |
| + struct timed_output_dev *tdev = dev_get_drvdata(dev); |
| + int value; |
| + |
| + sscanf(buf, "%d", &value); |
| + tdev->enable(tdev, value); |
| + |
| + return size; |
| +} |
| + |
| +static DEVICE_ATTR(enable, S_IRUGO | S_IWUSR, enable_show, enable_store); |
| + |
| +static int create_timed_output_class(void) |
| +{ |
| + if (!timed_output_class) { |
| + timed_output_class = class_create(THIS_MODULE, "timed_output"); |
| + if (IS_ERR(timed_output_class)) |
| + return PTR_ERR(timed_output_class); |
| + atomic_set(&device_count, 0); |
| + } |
| + |
| + return 0; |
| +} |
| + |
| +int timed_output_dev_register(struct timed_output_dev *tdev) |
| +{ |
| + int ret; |
| + |
| + if (!tdev || !tdev->name || !tdev->enable || !tdev->get_time) |
| + return -EINVAL; |
| + |
| + ret = create_timed_output_class(); |
| + if (ret < 0) |
| + return ret; |
| + |
| + tdev->index = atomic_inc_return(&device_count); |
| + tdev->dev = device_create(timed_output_class, NULL, |
| + MKDEV(0, tdev->index), NULL, tdev->name); |
| + if (IS_ERR(tdev->dev)) |
| + return PTR_ERR(tdev->dev); |
| + |
| + ret = device_create_file(tdev->dev, &dev_attr_enable); |
| + if (ret < 0) |
| + goto err_create_file; |
| + |
| + dev_set_drvdata(tdev->dev, tdev); |
| + tdev->state = 0; |
| + return 0; |
| + |
| +err_create_file: |
| + device_destroy(timed_output_class, MKDEV(0, tdev->index)); |
| + printk(KERN_ERR "timed_output: Failed to register driver %s\n", |
| + tdev->name); |
| + |
| + return ret; |
| +} |
| +EXPORT_SYMBOL_GPL(timed_output_dev_register); |
| + |
| +void timed_output_dev_unregister(struct timed_output_dev *tdev) |
| +{ |
| + device_remove_file(tdev->dev, &dev_attr_enable); |
| + device_destroy(timed_output_class, MKDEV(0, tdev->index)); |
| + dev_set_drvdata(tdev->dev, NULL); |
| +} |
| +EXPORT_SYMBOL_GPL(timed_output_dev_unregister); |
| + |
| +static int __init timed_output_init(void) |
| +{ |
| + return create_timed_output_class(); |
| +} |
| + |
| +static void __exit timed_output_exit(void) |
| +{ |
| + class_destroy(timed_output_class); |
| +} |
| + |
| +module_init(timed_output_init); |
| +module_exit(timed_output_exit); |
| + |
| +MODULE_AUTHOR("Mike Lockwood <lockwood@android.com>"); |
| +MODULE_DESCRIPTION("timed output class driver"); |
| +MODULE_LICENSE("GPL"); |
| diff --git a/stblinux-2.6.37/drivers/staging/android/timed_output.h b/stblinux-2.6.37/drivers/staging/android/timed_output.h |
| new file mode 100644 |
| index 0000000..ec907ab |
| --- /dev/null |
| +++ b/stblinux-2.6.37/drivers/staging/android/timed_output.h |
| @@ -0,0 +1,37 @@ |
| +/* include/linux/timed_output.h |
| + * |
| + * Copyright (C) 2008 Google, Inc. |
| + * |
| + * This software is licensed under the terms of the GNU General Public |
| + * License version 2, as published by the Free Software Foundation, and |
| + * may be copied, distributed, and modified under those terms. |
| + * |
| + * This program is distributed in the hope that it will be useful, |
| + * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| + * GNU General Public License for more details. |
| + * |
| +*/ |
| + |
| +#ifndef _LINUX_TIMED_OUTPUT_H |
| +#define _LINUX_TIMED_OUTPUT_H |
| + |
| +struct timed_output_dev { |
| + const char *name; |
| + |
| + /* enable the output and set the timer */ |
| + void (*enable)(struct timed_output_dev *sdev, int timeout); |
| + |
| + /* returns the current number of milliseconds remaining on the timer */ |
| + int (*get_time)(struct timed_output_dev *sdev); |
| + |
| + /* private data */ |
| + struct device *dev; |
| + int index; |
| + int state; |
| +}; |
| + |
| +extern int timed_output_dev_register(struct timed_output_dev *dev); |
| +extern void timed_output_dev_unregister(struct timed_output_dev *dev); |
| + |
| +#endif |
| diff --git a/stblinux-2.6.37/drivers/switch/Kconfig b/stblinux-2.6.37/drivers/switch/Kconfig |
| new file mode 100644 |
| index 0000000..5238591 |
| --- /dev/null |
| +++ b/stblinux-2.6.37/drivers/switch/Kconfig |
| @@ -0,0 +1,15 @@ |
| +menuconfig SWITCH |
| + tristate "Switch class support" |
| + help |
| + Say Y here to enable switch class support. This allows |
| + monitoring switches by userspace via sysfs and uevent. |
| + |
| +if SWITCH |
| + |
| +config SWITCH_GPIO |
| + tristate "GPIO Swith support" |
| + depends on GENERIC_GPIO |
| + help |
| + Say Y here to enable GPIO based switch support. |
| + |
| +endif # SWITCH |
| diff --git a/stblinux-2.6.37/drivers/switch/Makefile b/stblinux-2.6.37/drivers/switch/Makefile |
| new file mode 100644 |
| index 0000000..f7606ed |
| --- /dev/null |
| +++ b/stblinux-2.6.37/drivers/switch/Makefile |
| @@ -0,0 +1,4 @@ |
| +# Switch Class Driver |
| +obj-$(CONFIG_SWITCH) += switch_class.o |
| +obj-$(CONFIG_SWITCH_GPIO) += switch_gpio.o |
| + |
| diff --git a/stblinux-2.6.37/drivers/switch/switch_class.c b/stblinux-2.6.37/drivers/switch/switch_class.c |
| new file mode 100644 |
| index 0000000..e05fc25 |
| --- /dev/null |
| +++ b/stblinux-2.6.37/drivers/switch/switch_class.c |
| @@ -0,0 +1,174 @@ |
| +/* |
| + * drivers/switch/switch_class.c |
| + * |
| + * Copyright (C) 2008 Google, Inc. |
| + * Author: Mike Lockwood <lockwood@android.com> |
| + * |
| + * This software is licensed under the terms of the GNU General Public |
| + * License version 2, as published by the Free Software Foundation, and |
| + * may be copied, distributed, and modified under those terms. |
| + * |
| + * This program is distributed in the hope that it will be useful, |
| + * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| + * GNU General Public License for more details. |
| + * |
| +*/ |
| + |
| +#include <linux/module.h> |
| +#include <linux/types.h> |
| +#include <linux/init.h> |
| +#include <linux/device.h> |
| +#include <linux/fs.h> |
| +#include <linux/err.h> |
| +#include <linux/switch.h> |
| + |
| +struct class *switch_class; |
| +static atomic_t device_count; |
| + |
| +static ssize_t state_show(struct device *dev, struct device_attribute *attr, |
| + char *buf) |
| +{ |
| + struct switch_dev *sdev = (struct switch_dev *) |
| + dev_get_drvdata(dev); |
| + |
| + if (sdev->print_state) { |
| + int ret = sdev->print_state(sdev, buf); |
| + if (ret >= 0) |
| + return ret; |
| + } |
| + return sprintf(buf, "%d\n", sdev->state); |
| +} |
| + |
| +static ssize_t name_show(struct device *dev, struct device_attribute *attr, |
| + char *buf) |
| +{ |
| + struct switch_dev *sdev = (struct switch_dev *) |
| + dev_get_drvdata(dev); |
| + |
| + if (sdev->print_name) { |
| + int ret = sdev->print_name(sdev, buf); |
| + if (ret >= 0) |
| + return ret; |
| + } |
| + return sprintf(buf, "%s\n", sdev->name); |
| +} |
| + |
| +static DEVICE_ATTR(state, S_IRUGO | S_IWUSR, state_show, NULL); |
| +static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, name_show, NULL); |
| + |
| +void switch_set_state(struct switch_dev *sdev, int state) |
| +{ |
| + char name_buf[120]; |
| + char state_buf[120]; |
| + char *prop_buf; |
| + char *envp[3]; |
| + int env_offset = 0; |
| + int length; |
| + |
| + if (sdev->state != state) { |
| + sdev->state = state; |
| + |
| + prop_buf = (char *)get_zeroed_page(GFP_KERNEL); |
| + if (prop_buf) { |
| + length = name_show(sdev->dev, NULL, prop_buf); |
| + if (length > 0) { |
| + if (prop_buf[length - 1] == '\n') |
| + prop_buf[length - 1] = 0; |
| + snprintf(name_buf, sizeof(name_buf), |
| + "SWITCH_NAME=%s", prop_buf); |
| + envp[env_offset++] = name_buf; |
| + } |
| + length = state_show(sdev->dev, NULL, prop_buf); |
| + if (length > 0) { |
| + if (prop_buf[length - 1] == '\n') |
| + prop_buf[length - 1] = 0; |
| + snprintf(state_buf, sizeof(state_buf), |
| + "SWITCH_STATE=%s", prop_buf); |
| + envp[env_offset++] = state_buf; |
| + } |
| + envp[env_offset] = NULL; |
| + kobject_uevent_env(&sdev->dev->kobj, KOBJ_CHANGE, envp); |
| + free_page((unsigned long)prop_buf); |
| + } else { |
| + printk(KERN_ERR "out of memory in switch_set_state\n"); |
| + kobject_uevent(&sdev->dev->kobj, KOBJ_CHANGE); |
| + } |
| + } |
| +} |
| +EXPORT_SYMBOL_GPL(switch_set_state); |
| + |
| +static int create_switch_class(void) |
| +{ |
| + if (!switch_class) { |
| + switch_class = class_create(THIS_MODULE, "switch"); |
| + if (IS_ERR(switch_class)) |
| + return PTR_ERR(switch_class); |
| + atomic_set(&device_count, 0); |
| + } |
| + |
| + return 0; |
| +} |
| + |
| +int switch_dev_register(struct switch_dev *sdev) |
| +{ |
| + int ret; |
| + |
| + if (!switch_class) { |
| + ret = create_switch_class(); |
| + if (ret < 0) |
| + return ret; |
| + } |
| + |
| + sdev->index = atomic_inc_return(&device_count); |
| + sdev->dev = device_create(switch_class, NULL, |
| + MKDEV(0, sdev->index), NULL, sdev->name); |
| + if (IS_ERR(sdev->dev)) |
| + return PTR_ERR(sdev->dev); |
| + |
| + ret = device_create_file(sdev->dev, &dev_attr_state); |
| + if (ret < 0) |
| + goto err_create_file_1; |
| + ret = device_create_file(sdev->dev, &dev_attr_name); |
| + if (ret < 0) |
| + goto err_create_file_2; |
| + |
| + dev_set_drvdata(sdev->dev, sdev); |
| + sdev->state = 0; |
| + return 0; |
| + |
| +err_create_file_2: |
| + device_remove_file(sdev->dev, &dev_attr_state); |
| +err_create_file_1: |
| + device_destroy(switch_class, MKDEV(0, sdev->index)); |
| + printk(KERN_ERR "switch: Failed to register driver %s\n", sdev->name); |
| + |
| + return ret; |
| +} |
| +EXPORT_SYMBOL_GPL(switch_dev_register); |
| + |
| +void switch_dev_unregister(struct switch_dev *sdev) |
| +{ |
| + device_remove_file(sdev->dev, &dev_attr_name); |
| + device_remove_file(sdev->dev, &dev_attr_state); |
| + device_destroy(switch_class, MKDEV(0, sdev->index)); |
| + dev_set_drvdata(sdev->dev, NULL); |
| +} |
| +EXPORT_SYMBOL_GPL(switch_dev_unregister); |
| + |
| +static int __init switch_class_init(void) |
| +{ |
| + return create_switch_class(); |
| +} |
| + |
| +static void __exit switch_class_exit(void) |
| +{ |
| + class_destroy(switch_class); |
| +} |
| + |
| +module_init(switch_class_init); |
| +module_exit(switch_class_exit); |
| + |
| +MODULE_AUTHOR("Mike Lockwood <lockwood@android.com>"); |
| +MODULE_DESCRIPTION("Switch class driver"); |
| +MODULE_LICENSE("GPL"); |
| diff --git a/stblinux-2.6.37/drivers/switch/switch_gpio.c b/stblinux-2.6.37/drivers/switch/switch_gpio.c |
| new file mode 100644 |
| index 0000000..b5f98ca |
| --- /dev/null |
| +++ b/stblinux-2.6.37/drivers/switch/switch_gpio.c |
| @@ -0,0 +1,171 @@ |
| +/* |
| + * drivers/switch/switch_gpio.c |
| + * |
| + * Copyright (C) 2008 Google, Inc. |
| + * Author: Mike Lockwood <lockwood@android.com> |
| + * |
| + * This software is licensed under the terms of the GNU General Public |
| + * License version 2, as published by the Free Software Foundation, and |
| + * may be copied, distributed, and modified under those terms. |
| + * |
| + * This program is distributed in the hope that it will be useful, |
| + * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| + * GNU General Public License for more details. |
| + * |
| +*/ |
| + |
| +#include <linux/module.h> |
| +#include <linux/kernel.h> |
| +#include <linux/init.h> |
| +#include <linux/interrupt.h> |
| +#include <linux/platform_device.h> |
| +#include <linux/switch.h> |
| +#include <linux/workqueue.h> |
| +#include <linux/gpio.h> |
| + |
| +struct gpio_switch_data { |
| + struct switch_dev sdev; |
| + unsigned gpio; |
| + const char *name_on; |
| + const char *name_off; |
| + const char *state_on; |
| + const char *state_off; |
| + int irq; |
| + struct work_struct work; |
| +}; |
| + |
| +static void gpio_switch_work(struct work_struct *work) |
| +{ |
| + int state; |
| + struct gpio_switch_data *data = |
| + container_of(work, struct gpio_switch_data, work); |
| + |
| + state = gpio_get_value(data->gpio); |
| + switch_set_state(&data->sdev, state); |
| +} |
| + |
| +static irqreturn_t gpio_irq_handler(int irq, void *dev_id) |
| +{ |
| + struct gpio_switch_data *switch_data = |
| + (struct gpio_switch_data *)dev_id; |
| + |
| + schedule_work(&switch_data->work); |
| + return IRQ_HANDLED; |
| +} |
| + |
| +static ssize_t switch_gpio_print_state(struct switch_dev *sdev, char *buf) |
| +{ |
| + struct gpio_switch_data *switch_data = |
| + container_of(sdev, struct gpio_switch_data, sdev); |
| + const char *state; |
| + if (switch_get_state(sdev)) |
| + state = switch_data->state_on; |
| + else |
| + state = switch_data->state_off; |
| + |
| + if (state) |
| + return sprintf(buf, "%s\n", state); |
| + return -1; |
| +} |
| + |
| +static int gpio_switch_probe(struct platform_device *pdev) |
| +{ |
| + struct gpio_switch_platform_data *pdata = pdev->dev.platform_data; |
| + struct gpio_switch_data *switch_data; |
| + int ret = 0; |
| + |
| + if (!pdata) |
| + return -EBUSY; |
| + |
| + switch_data = kzalloc(sizeof(struct gpio_switch_data), GFP_KERNEL); |
| + if (!switch_data) |
| + return -ENOMEM; |
| + |
| + switch_data->sdev.name = pdata->name; |
| + switch_data->gpio = pdata->gpio; |
| + switch_data->name_on = pdata->name_on; |
| + switch_data->name_off = pdata->name_off; |
| + switch_data->state_on = pdata->state_on; |
| + switch_data->state_off = pdata->state_off; |
| + switch_data->sdev.print_state = switch_gpio_print_state; |
| + |
| + ret = switch_dev_register(&switch_data->sdev); |
| + if (ret < 0) |
| + goto err_switch_dev_register; |
| + |
| + ret = gpio_request(switch_data->gpio, pdev->name); |
| + if (ret < 0) |
| + goto err_request_gpio; |
| + |
| + ret = gpio_direction_input(switch_data->gpio); |
| + if (ret < 0) |
| + goto err_set_gpio_input; |
| + |
| + INIT_WORK(&switch_data->work, gpio_switch_work); |
| + |
| + switch_data->irq = gpio_to_irq(switch_data->gpio); |
| + if (switch_data->irq < 0) { |
| + ret = switch_data->irq; |
| + goto err_detect_irq_num_failed; |
| + } |
| + |
| + ret = request_irq(switch_data->irq, gpio_irq_handler, |
| + IRQF_TRIGGER_LOW, pdev->name, switch_data); |
| + if (ret < 0) |
| + goto err_request_irq; |
| + |
| + /* Perform initial detection */ |
| + gpio_switch_work(&switch_data->work); |
| + |
| + return 0; |
| + |
| +err_request_irq: |
| +err_detect_irq_num_failed: |
| +err_set_gpio_input: |
| + gpio_free(switch_data->gpio); |
| +err_request_gpio: |
| + switch_dev_unregister(&switch_data->sdev); |
| +err_switch_dev_register: |
| + kfree(switch_data); |
| + |
| + return ret; |
| +} |
| + |
| +static int __devexit gpio_switch_remove(struct platform_device *pdev) |
| +{ |
| + struct gpio_switch_data *switch_data = platform_get_drvdata(pdev); |
| + |
| + cancel_work_sync(&switch_data->work); |
| + gpio_free(switch_data->gpio); |
| + switch_dev_unregister(&switch_data->sdev); |
| + kfree(switch_data); |
| + |
| + return 0; |
| +} |
| + |
| +static struct platform_driver gpio_switch_driver = { |
| + .probe = gpio_switch_probe, |
| + .remove = __devexit_p(gpio_switch_remove), |
| + .driver = { |
| + .name = "switch-gpio", |
| + .owner = THIS_MODULE, |
| + }, |
| +}; |
| + |
| +static int __init gpio_switch_init(void) |
| +{ |
| + return platform_driver_register(&gpio_switch_driver); |
| +} |
| + |
| +static void __exit gpio_switch_exit(void) |
| +{ |
| + platform_driver_unregister(&gpio_switch_driver); |
| +} |
| + |
| +module_init(gpio_switch_init); |
| +module_exit(gpio_switch_exit); |
| + |
| +MODULE_AUTHOR("Mike Lockwood <lockwood@android.com>"); |
| +MODULE_DESCRIPTION("GPIO Switch driver"); |
| +MODULE_LICENSE("GPL"); |
| diff --git a/stblinux-2.6.37/drivers/usb/gadget/Kconfig b/stblinux-2.6.37/drivers/usb/gadget/Kconfig |
| index 607d0db..4409f5a 100644 |
| --- a/stblinux-2.6.37/drivers/usb/gadget/Kconfig |
| +++ b/stblinux-2.6.37/drivers/usb/gadget/Kconfig |
| @@ -827,6 +827,14 @@ config USB_G_PRINTER |
| For more information, see Documentation/usb/gadget_printer.txt |
| which includes sample code for accessing the device file. |
| |
| +config USB_ANDROID |
| + tristate "Android Gadget" |
| + help |
| + The Android gadget provides mass storage and adb transport. |
| + |
| + Say "y" to link the driver statically, or "m" to build a |
| + dynamically linked module called "g_android". |
| + |
| config USB_CDC_COMPOSITE |
| tristate "CDC Composite Device (Ethernet and ACM)" |
| depends on NET |
| diff --git a/stblinux-2.6.37/drivers/usb/gadget/Makefile b/stblinux-2.6.37/drivers/usb/gadget/Makefile |
| index 5780db4..789cd11 100644 |
| --- a/stblinux-2.6.37/drivers/usb/gadget/Makefile |
| +++ b/stblinux-2.6.37/drivers/usb/gadget/Makefile |
| @@ -43,6 +43,7 @@ g_hid-y := hid.o |
| g_dbgp-y := dbgp.o |
| g_nokia-y := nokia.o |
| g_webcam-y := webcam.o |
| +g_android-y := android.o f_adb.o f_mass_storage.o |
| |
| obj-$(CONFIG_USB_ZERO) += g_zero.o |
| obj-$(CONFIG_USB_AUDIO) += g_audio.o |
| @@ -60,3 +61,5 @@ obj-$(CONFIG_USB_G_DBGP) += g_dbgp.o |
| obj-$(CONFIG_USB_G_MULTI) += g_multi.o |
| obj-$(CONFIG_USB_G_NOKIA) += g_nokia.o |
| obj-$(CONFIG_USB_G_WEBCAM) += g_webcam.o |
| +obj-$(CONFIG_USB_ANDROID) += g_android.o |
| + |
| diff --git a/stblinux-2.6.37/drivers/usb/gadget/android.c b/stblinux-2.6.37/drivers/usb/gadget/android.c |
| new file mode 100644 |
| index 0000000..2c175b0 |
| --- /dev/null |
| +++ b/stblinux-2.6.37/drivers/usb/gadget/android.c |
| @@ -0,0 +1,354 @@ |
| +/* |
| + * Gadget Driver for Android |
| + * |
| + * Copyright (C) 2008 Google, Inc. |
| + * Author: Mike Lockwood <lockwood@android.com> |
| + * |
| + * This software is licensed under the terms of the GNU General Public |
| + * License version 2, as published by the Free Software Foundation, and |
| + * may be copied, distributed, and modified under those terms. |
| + * |
| + * This program is distributed in the hope that it will be useful, |
| + * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| + * GNU General Public License for more details. |
| + * |
| + */ |
| + |
| +/* #define DEBUG */ |
| +/* #define VERBOSE_DEBUG */ |
| + |
| +#include <linux/init.h> |
| +#include <linux/module.h> |
| +#include <linux/fs.h> |
| + |
| +#include <linux/delay.h> |
| +#include <linux/kernel.h> |
| +#include <linux/utsname.h> |
| +#include <linux/miscdevice.h> |
| +#include <linux/platform_device.h> |
| + |
| +#include <linux/usb/android.h> |
| +#include <linux/usb/ch9.h> |
| +#include <linux/usb/composite.h> |
| +#include <linux/usb/gadget.h> |
| + |
| +#include "f_mass_storage.h" |
| +#include "f_adb.h" |
| + |
| +#include "gadget_chips.h" |
| + |
| +/* |
| + * Kbuild is not very cooperative with respect to linking separately |
| + * compiled library objects into one module. So for now we won't use |
| + * separate compilation ... ensuring init/exit sections work to shrink |
| + * the runtime footprint, and giving us at least some parts of what |
| + * a "gcc --combine ... part1.c part2.c part3.c ... " build would. |
| + */ |
| +#include "usbstring.c" |
| +#include "config.c" |
| +#include "epautoconf.c" |
| +#include "composite.c" |
| + |
| +MODULE_AUTHOR("Mike Lockwood"); |
| +MODULE_DESCRIPTION("Android Composite USB Driver"); |
| +MODULE_LICENSE("GPL"); |
| +MODULE_VERSION("1.0"); |
| + |
| +static const char longname[] = "Gadget Android"; |
| + |
| +/* Default vendor and product IDs, overridden by platform data */ |
| +#define VENDOR_ID 0x18D1 |
| +#define PRODUCT_ID 0x0001 |
| +#define ADB_PRODUCT_ID 0x0002 |
| + |
| +struct android_dev { |
| + struct usb_composite_dev *cdev; |
| + |
| + int product_id; |
| + int adb_product_id; |
| + int version; |
| + |
| + int adb_enabled; |
| + int nluns; |
| +}; |
| + |
| +static atomic_t adb_enable_excl; |
| +static struct android_dev *_android_dev; |
| + |
| +/* string IDs are assigned dynamically */ |
| + |
| +#define STRING_MANUFACTURER_IDX 0 |
| +#define STRING_PRODUCT_IDX 1 |
| +#define STRING_SERIAL_IDX 2 |
| + |
| +/* String Table */ |
| +static struct usb_string strings_dev[] = { |
| + /* These dummy values should be overridden by platform data */ |
| + [STRING_MANUFACTURER_IDX].s = "Android", |
| + [STRING_PRODUCT_IDX].s = "Android", |
| + [STRING_SERIAL_IDX].s = "0123456789ABCDEF", |
| + { } /* end of list */ |
| +}; |
| + |
| +static struct usb_gadget_strings stringtab_dev = { |
| + .language = 0x0409, /* en-us */ |
| + .strings = strings_dev, |
| +}; |
| + |
| +static struct usb_gadget_strings *dev_strings[] = { |
| + &stringtab_dev, |
| + NULL, |
| +}; |
| + |
| +static struct usb_device_descriptor device_desc = { |
| + .bLength = sizeof(device_desc), |
| + .bDescriptorType = USB_DT_DEVICE, |
| + .bcdUSB = __constant_cpu_to_le16(0x0200), |
| + .bDeviceClass = USB_CLASS_PER_INTERFACE, |
| + .idVendor = __constant_cpu_to_le16(VENDOR_ID), |
| + .idProduct = __constant_cpu_to_le16(PRODUCT_ID), |
| + .bcdDevice = __constant_cpu_to_le16(0xffff), |
| + .bNumConfigurations = 1, |
| +}; |
| + |
| +void android_usb_set_connected(int connected) |
| +{ |
| + if (_android_dev && _android_dev->cdev && _android_dev->cdev->gadget) { |
| + if (connected) |
| + usb_gadget_connect(_android_dev->cdev->gadget); |
| + else |
| + usb_gadget_disconnect(_android_dev->cdev->gadget); |
| + } |
| +} |
| + |
| +static int __init android_bind_config(struct usb_configuration *c) |
| +{ |
| + struct android_dev *dev = _android_dev; |
| + int ret; |
| + printk(KERN_DEBUG "android_bind_config\n"); |
| + |
| + ret = mass_storage_function_add(dev->cdev, c, dev->nluns); |
| + if (ret) |
| + return ret; |
| + return adb_function_add(dev->cdev, c); |
| +} |
| + |
| +static struct usb_configuration android_config_driver = { |
| + .label = "android", |
| + .bind = android_bind_config, |
| + .bConfigurationValue = 1, |
| + .bmAttributes = USB_CONFIG_ATT_ONE | USB_CONFIG_ATT_SELFPOWER, |
| + .bMaxPower = 0xFA, /* 500ma */ |
| +}; |
| + |
| +static int __init android_bind(struct usb_composite_dev *cdev) |
| +{ |
| + struct android_dev *dev = _android_dev; |
| + struct usb_gadget *gadget = cdev->gadget; |
| + int gcnum; |
| + int id; |
| + int ret; |
| + |
| + printk(KERN_INFO "android_bind\n"); |
| + |
| + /* Allocate string descriptor numbers ... note that string |
| + * contents can be overridden by the composite_dev glue. |
| + */ |
| + id = usb_string_id(cdev); |
| + if (id < 0) |
| + return id; |
| + strings_dev[STRING_MANUFACTURER_IDX].id = id; |
| + device_desc.iManufacturer = id; |
| + |
| + id = usb_string_id(cdev); |
| + if (id < 0) |
| + return id; |
| + strings_dev[STRING_PRODUCT_IDX].id = id; |
| + device_desc.iProduct = id; |
| + |
| + id = usb_string_id(cdev); |
| + if (id < 0) |
| + return id; |
| + strings_dev[STRING_SERIAL_IDX].id = id; |
| + device_desc.iSerialNumber = id; |
| + |
| + if (gadget->ops->wakeup) |
| + android_config_driver.bmAttributes |= USB_CONFIG_ATT_WAKEUP; |
| + |
| + /* register our configuration */ |
| + ret = usb_add_config(cdev, &android_config_driver); |
| + if (ret) { |
| + printk(KERN_ERR "usb_add_config failed\n"); |
| + return ret; |
| + } |
| + |
| + gcnum = usb_gadget_controller_number(gadget); |
| + if (gcnum >= 0) |
| + device_desc.bcdDevice = cpu_to_le16(0x0200 + gcnum); |
| + else { |
| + /* gadget zero is so simple (for now, no altsettings) that |
| + * it SHOULD NOT have problems with bulk-capable hardware. |
| + * so just warn about unrcognized controllers -- don't panic. |
| + * |
| + * things like configuration and altsetting numbering |
| + * can need hardware-specific attention though. |
| + */ |
| + pr_warning("%s: controller '%s' not recognized\n", |
| + longname, gadget->name); |
| + device_desc.bcdDevice = __constant_cpu_to_le16(0x9999); |
| + } |
| + |
| + usb_gadget_set_selfpowered(gadget); |
| + dev->cdev = cdev; |
| + |
| + return 0; |
| +} |
| + |
| +static struct usb_composite_driver android_usb_driver = { |
| + .name = "android_usb", |
| + .dev = &device_desc, |
| + .strings = dev_strings, |
| + .bind = android_bind, |
| +}; |
| + |
| +static void enable_adb(struct android_dev *dev, int enable) |
| +{ |
| + if (enable != dev->adb_enabled) { |
| + dev->adb_enabled = enable; |
| + adb_function_enable(enable); |
| + |
| + /* set product ID to the appropriate value */ |
| + if (enable) |
| + device_desc.idProduct = |
| + __constant_cpu_to_le16(dev->adb_product_id); |
| + else |
| + device_desc.idProduct = |
| + __constant_cpu_to_le16(dev->product_id); |
| + if (dev->cdev) |
| + dev->cdev->desc.idProduct = device_desc.idProduct; |
| + |
| + /* force reenumeration */ |
| + if (dev->cdev && dev->cdev->gadget && |
| + dev->cdev->gadget->speed != USB_SPEED_UNKNOWN) { |
| + usb_gadget_disconnect(dev->cdev->gadget); |
| + msleep(10); |
| + usb_gadget_connect(dev->cdev->gadget); |
| + } |
| + } |
| +} |
| + |
| +static int adb_enable_open(struct inode *ip, struct file *fp) |
| +{ |
| + if (atomic_inc_return(&adb_enable_excl) != 1) { |
| + atomic_dec(&adb_enable_excl); |
| + return -EBUSY; |
| + } |
| + |
| + printk(KERN_INFO "enabling adb\n"); |
| + enable_adb(_android_dev, 1); |
| + |
| + return 0; |
| +} |
| + |
| +static int adb_enable_release(struct inode *ip, struct file *fp) |
| +{ |
| + printk(KERN_INFO "disabling adb\n"); |
| + enable_adb(_android_dev, 0); |
| + atomic_dec(&adb_enable_excl); |
| + return 0; |
| +} |
| + |
| +static const struct file_operations adb_enable_fops = { |
| + .owner = THIS_MODULE, |
| + .open = adb_enable_open, |
| + .release = adb_enable_release, |
| +}; |
| + |
| +static struct miscdevice adb_enable_device = { |
| + .minor = MISC_DYNAMIC_MINOR, |
| + .name = "android_adb_enable", |
| + .fops = &adb_enable_fops, |
| +}; |
| + |
| +static int __init android_probe(struct platform_device *pdev) |
| +{ |
| + struct android_usb_platform_data *pdata = pdev->dev.platform_data; |
| + struct android_dev *dev = _android_dev; |
| + |
| + printk(KERN_INFO "android_probe pdata: %p\n", pdata); |
| + |
| + if (pdata) { |
| + if (pdata->vendor_id) |
| + device_desc.idVendor = |
| + __constant_cpu_to_le16(pdata->vendor_id); |
| + if (pdata->product_id) { |
| + dev->product_id = pdata->product_id; |
| + device_desc.idProduct = |
| + __constant_cpu_to_le16(pdata->product_id); |
| + } |
| + if (pdata->adb_product_id) |
| + dev->adb_product_id = pdata->adb_product_id; |
| + if (pdata->version) |
| + dev->version = pdata->version; |
| + |
| + if (pdata->product_name) |
| + strings_dev[STRING_PRODUCT_IDX].s = pdata->product_name; |
| + if (pdata->manufacturer_name) |
| + strings_dev[STRING_MANUFACTURER_IDX].s = |
| + pdata->manufacturer_name; |
| + if (pdata->serial_number) |
| + strings_dev[STRING_SERIAL_IDX].s = pdata->serial_number; |
| + dev->nluns = pdata->nluns; |
| + } |
| + |
| + return 0; |
| +} |
| + |
| +static struct platform_driver android_platform_driver = { |
| + .driver = { .name = "android_usb", }, |
| + .probe = android_probe, |
| +}; |
| + |
| +static int __init init(void) |
| +{ |
| + struct android_dev *dev; |
| + int ret; |
| + |
| + printk(KERN_INFO "android init\n"); |
| + |
| + dev = kzalloc(sizeof(*dev), GFP_KERNEL); |
| + if (!dev) |
| + return -ENOMEM; |
| + |
| + /* set default values, which should be overridden by platform data */ |
| + dev->product_id = PRODUCT_ID; |
| + dev->adb_product_id = ADB_PRODUCT_ID; |
| + _android_dev = dev; |
| + |
| + ret = platform_driver_register(&android_platform_driver); |
| + if (ret) |
| + return ret; |
| + ret = misc_register(&adb_enable_device); |
| + if (ret) { |
| + platform_driver_unregister(&android_platform_driver); |
| + return ret; |
| + } |
| + ret = usb_composite_register(&android_usb_driver); |
| + if (ret) { |
| + misc_deregister(&adb_enable_device); |
| + platform_driver_unregister(&android_platform_driver); |
| + } |
| + return ret; |
| +} |
| +module_init(init); |
| + |
| +static void __exit cleanup(void) |
| +{ |
| + usb_composite_unregister(&android_usb_driver); |
| + misc_deregister(&adb_enable_device); |
| + platform_driver_unregister(&android_platform_driver); |
| + kfree(_android_dev); |
| + _android_dev = NULL; |
| +} |
| +module_exit(cleanup); |
| diff --git a/stblinux-2.6.37/drivers/usb/gadget/f_adb.c b/stblinux-2.6.37/drivers/usb/gadget/f_adb.c |
| new file mode 100644 |
| index 0000000..6ca5327 |
| --- /dev/null |
| +++ b/stblinux-2.6.37/drivers/usb/gadget/f_adb.c |
| @@ -0,0 +1,668 @@ |
| +/* |
| + * Gadget Driver for Android ADB |
| + * |
| + * Copyright (C) 2008 Google, Inc. |
| + * Author: Mike Lockwood <lockwood@android.com> |
| + * |
| + * This software is licensed under the terms of the GNU General Public |
| + * License version 2, as published by the Free Software Foundation, and |
| + * may be copied, distributed, and modified under those terms. |
| + * |
| + * This program is distributed in the hope that it will be useful, |
| + * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| + * GNU General Public License for more details. |
| + * |
| + */ |
| + |
| +/* #define DEBUG */ |
| +/* #define VERBOSE_DEBUG */ |
| + |
| +#include <linux/module.h> |
| +#include <linux/init.h> |
| +#include <linux/poll.h> |
| +#include <linux/delay.h> |
| +#include <linux/wait.h> |
| +#include <linux/err.h> |
| +#include <linux/interrupt.h> |
| + |
| +#include <linux/types.h> |
| +#include <linux/device.h> |
| +#include <linux/miscdevice.h> |
| + |
| +#include <linux/usb/ch9.h> |
| +#include <linux/usb/composite.h> |
| +#include <linux/usb/gadget.h> |
| + |
| +#include "f_adb.h" |
| + |
| +#define BULK_BUFFER_SIZE 4096 |
| + |
| +/* number of rx and tx requests to allocate */ |
| +#define RX_REQ_MAX 4 |
| +#define TX_REQ_MAX 4 |
| + |
| +static const char shortname[] = "android_adb"; |
| + |
| +struct adb_dev { |
| + struct usb_function function; |
| + struct usb_composite_dev *cdev; |
| + spinlock_t lock; |
| + |
| + struct usb_ep *ep_in; |
| + struct usb_ep *ep_out; |
| + |
| + int online; |
| + int error; |
| + |
| + atomic_t read_excl; |
| + atomic_t write_excl; |
| + atomic_t open_excl; |
| + |
| + struct list_head tx_idle; |
| + struct list_head rx_idle; |
| + struct list_head rx_done; |
| + |
| + wait_queue_head_t read_wq; |
| + wait_queue_head_t write_wq; |
| + |
| + /* the request we're currently reading from */ |
| + struct usb_request *read_req; |
| + unsigned char *read_buf; |
| + unsigned read_count; |
| +}; |
| + |
| +static struct usb_interface_descriptor adb_interface_desc = { |
| + .bLength = USB_DT_INTERFACE_SIZE, |
| + .bDescriptorType = USB_DT_INTERFACE, |
| + .bInterfaceNumber = 0, |
| + .bNumEndpoints = 2, |
| + .bInterfaceClass = 0xFF, |
| + .bInterfaceSubClass = 0x42, |
| + .bInterfaceProtocol = 1, |
| +}; |
| + |
| +static struct usb_endpoint_descriptor adb_highspeed_in_desc = { |
| + .bLength = USB_DT_ENDPOINT_SIZE, |
| + .bDescriptorType = USB_DT_ENDPOINT, |
| + .bEndpointAddress = USB_DIR_IN, |
| + .bmAttributes = USB_ENDPOINT_XFER_BULK, |
| + .wMaxPacketSize = __constant_cpu_to_le16(512), |
| +}; |
| + |
| +static struct usb_endpoint_descriptor adb_highspeed_out_desc = { |
| + .bLength = USB_DT_ENDPOINT_SIZE, |
| + .bDescriptorType = USB_DT_ENDPOINT, |
| + .bEndpointAddress = USB_DIR_OUT, |
| + .bmAttributes = USB_ENDPOINT_XFER_BULK, |
| + .wMaxPacketSize = __constant_cpu_to_le16(512), |
| +}; |
| + |
| +static struct usb_endpoint_descriptor adb_fullspeed_in_desc = { |
| + .bLength = USB_DT_ENDPOINT_SIZE, |
| + .bDescriptorType = USB_DT_ENDPOINT, |
| + .bEndpointAddress = USB_DIR_IN, |
| + .bmAttributes = USB_ENDPOINT_XFER_BULK, |
| +}; |
| + |
| +static struct usb_endpoint_descriptor adb_fullspeed_out_desc = { |
| + .bLength = USB_DT_ENDPOINT_SIZE, |
| + .bDescriptorType = USB_DT_ENDPOINT, |
| + .bEndpointAddress = USB_DIR_OUT, |
| + .bmAttributes = USB_ENDPOINT_XFER_BULK, |
| +}; |
| + |
| +static struct usb_descriptor_header *fs_adb_descs[] = { |
| + (struct usb_descriptor_header *) &adb_interface_desc, |
| + (struct usb_descriptor_header *) &adb_fullspeed_in_desc, |
| + (struct usb_descriptor_header *) &adb_fullspeed_out_desc, |
| + NULL, |
| +}; |
| + |
| +static struct usb_descriptor_header *hs_adb_descs[] = { |
| + (struct usb_descriptor_header *) &adb_interface_desc, |
| + (struct usb_descriptor_header *) &adb_highspeed_in_desc, |
| + (struct usb_descriptor_header *) &adb_highspeed_out_desc, |
| + NULL, |
| +}; |
| + |
| +/* used when adb function is disabled */ |
| +static struct usb_descriptor_header *null_adb_descs[] = { |
| + NULL, |
| +}; |
| + |
| + |
| +/* temporary variable used between adb_open() and adb_gadget_bind() */ |
| +static struct adb_dev *_adb_dev; |
| + |
| +static inline struct adb_dev *func_to_dev(struct usb_function *f) |
| +{ |
| + return container_of(f, struct adb_dev, function); |
| +} |
| + |
| + |
| +static struct usb_request *adb_request_new(struct usb_ep *ep, int buffer_size) |
| +{ |
| + struct usb_request *req = usb_ep_alloc_request(ep, GFP_KERNEL); |
| + if (!req) |
| + return NULL; |
| + |
| + /* now allocate buffers for the requests */ |
| + req->buf = kmalloc(buffer_size, GFP_KERNEL); |
| + if (!req->buf) { |
| + usb_ep_free_request(ep, req); |
| + return NULL; |
| + } |
| + |
| + return req; |
| +} |
| + |
| +static void adb_request_free(struct usb_request *req, struct usb_ep *ep) |
| +{ |
| + if (req) { |
| + kfree(req->buf); |
| + usb_ep_free_request(ep, req); |
| + } |
| +} |
| + |
| +static inline int _lock(atomic_t *excl) |
| +{ |
| + if (atomic_inc_return(excl) == 1) { |
| + return 0; |
| + } else { |
| + atomic_dec(excl); |
| + return -1; |
| + } |
| +} |
| + |
| +static inline void _unlock(atomic_t *excl) |
| +{ |
| + atomic_dec(excl); |
| +} |
| + |
| +/* add a request to the tail of a list */ |
| +void req_put(struct adb_dev *dev, struct list_head *head, |
| + struct usb_request *req) |
| +{ |
| + unsigned long flags; |
| + |
| + spin_lock_irqsave(&dev->lock, flags); |
| + list_add_tail(&req->list, head); |
| + spin_unlock_irqrestore(&dev->lock, flags); |
| +} |
| + |
| +/* remove a request from the head of a list */ |
| +struct usb_request *req_get(struct adb_dev *dev, struct list_head *head) |
| +{ |
| + unsigned long flags; |
| + struct usb_request *req; |
| + |
| + spin_lock_irqsave(&dev->lock, flags); |
| + if (list_empty(head)) { |
| + req = 0; |
| + } else { |
| + req = list_first_entry(head, struct usb_request, list); |
| + list_del(&req->list); |
| + } |
| + spin_unlock_irqrestore(&dev->lock, flags); |
| + return req; |
| +} |
| + |
| +static void adb_complete_in(struct usb_ep *ep, struct usb_request *req) |
| +{ |
| + struct adb_dev *dev = _adb_dev; |
| + |
| + if (req->status != 0) |
| + dev->error = 1; |
| + |
| + req_put(dev, &dev->tx_idle, req); |
| + |
| + wake_up(&dev->write_wq); |
| +} |
| + |
| +static void adb_complete_out(struct usb_ep *ep, struct usb_request *req) |
| +{ |
| + struct adb_dev *dev = _adb_dev; |
| + |
| + if (req->status != 0) { |
| + dev->error = 1; |
| + req_put(dev, &dev->rx_idle, req); |
| + } else { |
| + req_put(dev, &dev->rx_done, req); |
| + } |
| + |
| + wake_up(&dev->read_wq); |
| +} |
| + |
| +static int __init create_bulk_endpoints(struct adb_dev *dev, |
| + struct usb_endpoint_descriptor *in_desc, |
| + struct usb_endpoint_descriptor *out_desc) |
| +{ |
| + struct usb_composite_dev *cdev = dev->cdev; |
| + struct usb_request *req; |
| + struct usb_ep *ep; |
| + int i; |
| + |
| + DBG(cdev, "create_bulk_endpoints dev: %p\n", dev); |
| + |
| + ep = usb_ep_autoconfig(cdev->gadget, in_desc); |
| + if (!ep) { |
| + DBG(cdev, "usb_ep_autoconfig for ep_in failed\n"); |
| + return -ENODEV; |
| + } |
| + DBG(cdev, "usb_ep_autoconfig for ep_in got %s\n", ep->name); |
| + dev->ep_in = ep; |
| + |
| + ep = usb_ep_autoconfig(cdev->gadget, out_desc); |
| + if (!ep) { |
| + DBG(cdev, "usb_ep_autoconfig for ep_out failed\n"); |
| + return -ENODEV; |
| + } |
| + DBG(cdev, "usb_ep_autoconfig for adb ep_out got %s\n", ep->name); |
| + dev->ep_out = ep; |
| + |
| + /* now allocate requests for our endpoints */ |
| + for (i = 0; i < RX_REQ_MAX; i++) { |
| + req = adb_request_new(dev->ep_out, BULK_BUFFER_SIZE); |
| + if (!req) |
| + goto fail; |
| + req->complete = adb_complete_out; |
| + req_put(dev, &dev->rx_idle, req); |
| + } |
| + |
| + for (i = 0; i < TX_REQ_MAX; i++) { |
| + req = adb_request_new(dev->ep_in, BULK_BUFFER_SIZE); |
| + if (!req) |
| + goto fail; |
| + req->complete = adb_complete_in; |
| + req_put(dev, &dev->tx_idle, req); |
| + } |
| + |
| + return 0; |
| + |
| +fail: |
| + printk(KERN_ERR "adb_bind() could not allocate requests\n"); |
| + return -1; |
| +} |
| + |
| +static ssize_t adb_read(struct file *fp, char __user *buf, |
| + size_t count, loff_t *pos) |
| +{ |
| + struct adb_dev *dev = fp->private_data; |
| + struct usb_composite_dev *cdev = dev->cdev; |
| + struct usb_request *req; |
| + int r = count, xfer; |
| + int ret; |
| + |
| + DBG(cdev, "adb_read(%d)\n", count); |
| + |
| + if (_lock(&dev->read_excl)) |
| + return -EBUSY; |
| + |
| + /* we will block until we're online */ |
| + while (!(dev->online || dev->error)) { |
| + DBG(cdev, "adb_read: waiting for online state\n"); |
| + ret = wait_event_interruptible(dev->read_wq, |
| + (dev->online || dev->error)); |
| + if (ret < 0) { |
| + _unlock(&dev->read_excl); |
| + return ret; |
| + } |
| + } |
| + |
| + while (count > 0) { |
| + if (dev->error) { |
| + DBG(cdev, "adb_read dev->error\n"); |
| + r = -EIO; |
| + break; |
| + } |
| + |
| + /* if we have idle read requests, get them queued */ |
| + while ((req = req_get(dev, &dev->rx_idle))) { |
| +requeue_req: |
| + req->length = BULK_BUFFER_SIZE; |
| + ret = usb_ep_queue(dev->ep_out, req, GFP_ATOMIC); |
| + |
| + if (ret < 0) { |
| + r = -EIO; |
| + dev->error = 1; |
| + req_put(dev, &dev->rx_idle, req); |
| + goto fail; |
| + } else { |
| + DBG(cdev, "rx %p queue\n", req); |
| + } |
| + } |
| + |
| + /* if we have data pending, give it to userspace */ |
| + if (dev->read_count > 0) { |
| + if (dev->read_count < count) |
| + xfer = dev->read_count; |
| + else |
| + xfer = count; |
| + |
| + if (copy_to_user(buf, dev->read_buf, xfer)) { |
| + r = -EFAULT; |
| + break; |
| + } |
| + dev->read_buf += xfer; |
| + dev->read_count -= xfer; |
| + buf += xfer; |
| + count -= xfer; |
| + |
| + /* if we've emptied the buffer, release the request */ |
| + if (dev->read_count == 0) { |
| + req_put(dev, &dev->rx_idle, dev->read_req); |
| + dev->read_req = 0; |
| + } |
| + continue; |
| + } |
| + |
| + /* wait for a request to complete */ |
| + req = 0; |
| + ret = wait_event_interruptible(dev->read_wq, |
| + ((req = req_get(dev, &dev->rx_done)) || dev->error)); |
| + if (req != 0) { |
| + /* if we got a 0-len one we need to put it back into |
| + ** service. if we made it the current read req we'd |
| + ** be stuck forever |
| + */ |
| + if (req->actual == 0) |
| + goto requeue_req; |
| + |
| + dev->read_req = req; |
| + dev->read_count = req->actual; |
| + dev->read_buf = req->buf; |
| + DBG(cdev, "rx %p %d\n", req, req->actual); |
| + } |
| + |
| + if (ret < 0) { |
| + r = ret; |
| + break; |
| + } |
| + } |
| + |
| +fail: |
| + _unlock(&dev->read_excl); |
| + DBG(cdev, "adb_read returning %d\n", r); |
| + return r; |
| +} |
| + |
| +static ssize_t adb_write(struct file *fp, const char __user *buf, |
| + size_t count, loff_t *pos) |
| +{ |
| + struct adb_dev *dev = fp->private_data; |
| + struct usb_composite_dev *cdev = dev->cdev; |
| + struct usb_request *req = 0; |
| + int r = count, xfer; |
| + int ret; |
| + |
| + DBG(cdev, "adb_write(%d)\n", count); |
| + |
| + if (_lock(&dev->write_excl)) |
| + return -EBUSY; |
| + |
| + while (count > 0) { |
| + if (dev->error) { |
| + DBG(cdev, "adb_write dev->error\n"); |
| + r = -EIO; |
| + break; |
| + } |
| + |
| + /* get an idle tx request to use */ |
| + req = 0; |
| + ret = wait_event_interruptible(dev->write_wq, |
| + ((req = req_get(dev, &dev->tx_idle)) || dev->error)); |
| + |
| + if (ret < 0) { |
| + r = ret; |
| + break; |
| + } |
| + |
| + if (req != 0) { |
| + if (count > BULK_BUFFER_SIZE) |
| + xfer = BULK_BUFFER_SIZE; |
| + else |
| + xfer = count; |
| + if (copy_from_user(req->buf, buf, xfer)) { |
| + r = -EFAULT; |
| + break; |
| + } |
| + |
| + req->length = xfer; |
| + ret = usb_ep_queue(dev->ep_in, req, GFP_ATOMIC); |
| + if (ret < 0) { |
| + DBG(cdev, "adb_write: xfer error %d\n", ret); |
| + dev->error = 1; |
| + r = -EIO; |
| + break; |
| + } |
| + |
| + buf += xfer; |
| + count -= xfer; |
| + |
| + /* zero this so we don't try to free it on error exit */ |
| + req = 0; |
| + } |
| + } |
| + |
| + if (req) |
| + req_put(dev, &dev->tx_idle, req); |
| + |
| + _unlock(&dev->write_excl); |
| + DBG(cdev, "adb_write returning %d\n", r); |
| + return r; |
| +} |
| + |
| +static int adb_open(struct inode *ip, struct file *fp) |
| +{ |
| + printk(KERN_INFO "adb_open\n"); |
| + if (_lock(&_adb_dev->open_excl)) |
| + return -EBUSY; |
| + |
| + fp->private_data = _adb_dev; |
| + |
| + /* clear the error latch */ |
| + _adb_dev->error = 0; |
| + |
| + return 0; |
| +} |
| + |
| +static int adb_release(struct inode *ip, struct file *fp) |
| +{ |
| + printk(KERN_INFO "adb_release\n"); |
| + _unlock(&_adb_dev->open_excl); |
| + return 0; |
| +} |
| + |
| +/* file operations for ADB device /dev/android_adb */ |
| +static struct file_operations adb_fops = { |
| + .owner = THIS_MODULE, |
| + .read = adb_read, |
| + .write = adb_write, |
| + .open = adb_open, |
| + .release = adb_release, |
| +}; |
| + |
| +static struct miscdevice adb_device = { |
| + .minor = MISC_DYNAMIC_MINOR, |
| + .name = shortname, |
| + .fops = &adb_fops, |
| +}; |
| + |
| +static int __init |
| +adb_function_bind(struct usb_configuration *c, struct usb_function *f) |
| +{ |
| + struct usb_composite_dev *cdev = c->cdev; |
| + struct adb_dev *dev = func_to_dev(f); |
| + int id; |
| + int ret; |
| + |
| + dev->cdev = cdev; |
| + DBG(cdev, "adb_function_bind dev: %p\n", dev); |
| + |
| + /* allocate interface ID(s) */ |
| + id = usb_interface_id(c, f); |
| + if (id < 0) |
| + return id; |
| + adb_interface_desc.bInterfaceNumber = id; |
| + |
| + /* allocate endpoints */ |
| + ret = create_bulk_endpoints(dev, &adb_fullspeed_in_desc, |
| + &adb_fullspeed_out_desc); |
| + if (ret) |
| + return ret; |
| + |
| + /* support high speed hardware */ |
| + if (gadget_is_dualspeed(c->cdev->gadget)) { |
| + adb_highspeed_in_desc.bEndpointAddress = |
| + adb_fullspeed_in_desc.bEndpointAddress; |
| + adb_highspeed_out_desc.bEndpointAddress = |
| + adb_fullspeed_out_desc.bEndpointAddress; |
| + } |
| + |
| + DBG(cdev, "%s speed %s: IN/%s, OUT/%s\n", |
| + gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full", |
| + f->name, dev->ep_in->name, dev->ep_out->name); |
| + return 0; |
| +} |
| + |
| +static void |
| +adb_function_unbind(struct usb_configuration *c, struct usb_function *f) |
| +{ |
| + struct adb_dev *dev = func_to_dev(f); |
| + struct usb_request *req; |
| + |
| + spin_lock_irq(&dev->lock); |
| + |
| + while ((req = req_get(dev, &dev->rx_idle))) |
| + adb_request_free(req, dev->ep_out); |
| + while ((req = req_get(dev, &dev->tx_idle))) |
| + adb_request_free(req, dev->ep_in); |
| + |
| + dev->online = 0; |
| + dev->error = 1; |
| + spin_unlock_irq(&dev->lock); |
| + |
| + misc_deregister(&adb_device); |
| + kfree(_adb_dev); |
| + _adb_dev = NULL; |
| +} |
| + |
| +static int adb_function_set_alt(struct usb_function *f, |
| + unsigned intf, unsigned alt) |
| +{ |
| + struct adb_dev *dev = func_to_dev(f); |
| + struct usb_composite_dev *cdev = f->config->cdev; |
| + int ret; |
| + |
| + DBG(cdev, "adb_function_set_alt intf: %d alt: %d\n", intf, alt); |
| + ret = usb_ep_enable(dev->ep_in, |
| + ep_choose(cdev->gadget, |
| + &adb_highspeed_in_desc, |
| + &adb_fullspeed_in_desc)); |
| + if (ret) |
| + return ret; |
| + ret = usb_ep_enable(dev->ep_out, |
| + ep_choose(cdev->gadget, |
| + &adb_highspeed_out_desc, |
| + &adb_fullspeed_out_desc)); |
| + if (ret) { |
| + usb_ep_disable(dev->ep_in); |
| + return ret; |
| + } |
| + dev->online = 1; |
| + |
| + /* readers may be blocked waiting for us to go online */ |
| + wake_up(&dev->read_wq); |
| + return 0; |
| +} |
| + |
| +static void adb_function_disable(struct usb_function *f) |
| +{ |
| + struct adb_dev *dev = func_to_dev(f); |
| + struct usb_composite_dev *cdev = dev->cdev; |
| + |
| + DBG(cdev, "adb_function_disable\n"); |
| + dev->online = 0; |
| + dev->error = 1; |
| + usb_ep_disable(dev->ep_in); |
| + usb_ep_disable(dev->ep_out); |
| + |
| + /* readers may be blocked waiting for us to go online */ |
| + wake_up(&dev->read_wq); |
| + |
| + VDBG(cdev, "%s disabled\n", dev->function.name); |
| +} |
| + |
| +int __init adb_function_add(struct usb_composite_dev *cdev, |
| + struct usb_configuration *c) |
| +{ |
| + struct adb_dev *dev; |
| + int ret; |
| + |
| + printk(KERN_INFO "adb_function_add\n"); |
| + |
| + dev = kzalloc(sizeof(*dev), GFP_KERNEL); |
| + if (!dev) |
| + return -ENOMEM; |
| + |
| + spin_lock_init(&dev->lock); |
| + |
| + init_waitqueue_head(&dev->read_wq); |
| + init_waitqueue_head(&dev->write_wq); |
| + |
| + atomic_set(&dev->open_excl, 0); |
| + atomic_set(&dev->read_excl, 0); |
| + atomic_set(&dev->write_excl, 0); |
| + |
| + INIT_LIST_HEAD(&dev->rx_idle); |
| + INIT_LIST_HEAD(&dev->rx_done); |
| + INIT_LIST_HEAD(&dev->tx_idle); |
| + |
| + dev->cdev = cdev; |
| + dev->function.name = "adb"; |
| + dev->function.descriptors = null_adb_descs; |
| + dev->function.hs_descriptors = null_adb_descs; |
| + dev->function.bind = adb_function_bind; |
| + dev->function.unbind = adb_function_unbind; |
| + dev->function.set_alt = adb_function_set_alt; |
| + dev->function.disable = adb_function_disable; |
| + |
| + /* _adb_dev must be set before calling usb_gadget_register_driver */ |
| + _adb_dev = dev; |
| + |
| + ret = misc_register(&adb_device); |
| + if (ret) |
| + goto err1; |
| + ret = usb_add_function(c, &dev->function); |
| + if (ret) |
| + goto err2; |
| + |
| + return 0; |
| + |
| +err2: |
| + misc_deregister(&adb_device); |
| +err1: |
| + kfree(dev); |
| + printk(KERN_ERR "adb gadget driver failed to initialize\n"); |
| + return ret; |
| +} |
| + |
| +void adb_function_enable(int enable) |
| +{ |
| + struct adb_dev *dev = _adb_dev; |
| + |
| + if (dev) { |
| + DBG(dev->cdev, "adb_function_enable(%s)\n", |
| + enable ? "true" : "false"); |
| + |
| + if (enable) { |
| + dev->function.descriptors = fs_adb_descs; |
| + dev->function.hs_descriptors = hs_adb_descs; |
| + } else { |
| + dev->function.descriptors = null_adb_descs; |
| + dev->function.hs_descriptors = null_adb_descs; |
| + } |
| + } |
| +} |
| + |
| diff --git a/stblinux-2.6.37/drivers/usb/gadget/f_adb.h b/stblinux-2.6.37/drivers/usb/gadget/f_adb.h |
| new file mode 100644 |
| index 0000000..4854ff6 |
| --- /dev/null |
| +++ b/stblinux-2.6.37/drivers/usb/gadget/f_adb.h |
| @@ -0,0 +1,25 @@ |
| +/* |
| + * Gadget Driver for Android ADB |
| + * |
| + * Copyright (C) 2008 Google, Inc. |
| + * Author: Mike Lockwood <lockwood@android.com> |
| + * |
| + * This software is licensed under the terms of the GNU General Public |
| + * License version 2, as published by the Free Software Foundation, and |
| + * may be copied, distributed, and modified under those terms. |
| + * |
| + * This program is distributed in the hope that it will be useful, |
| + * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| + * GNU General Public License for more details. |
| + * |
| + */ |
| + |
| +#ifndef __F_ADB_H |
| +#define __F_ADB_H |
| + |
| +int adb_function_add(struct usb_composite_dev *cdev, |
| + struct usb_configuration *c); |
| +void adb_function_enable(int enable); |
| + |
| +#endif /* __F_ADB_H */ |
| diff --git a/stblinux-2.6.37/drivers/usb/gadget/f_mass_storage.h b/stblinux-2.6.37/drivers/usb/gadget/f_mass_storage.h |
| new file mode 100644 |
| index 0000000..8e63ac0 |
| --- /dev/null |
| +++ b/stblinux-2.6.37/drivers/usb/gadget/f_mass_storage.h |
| @@ -0,0 +1,52 @@ |
| +/* |
| + * drivers/usb/gadget/f_mass_storage.h |
| + * |
| + * Function Driver for USB Mass Storage |
| + * |
| + * Copyright (C) 2008 Google, Inc. |
| + * Author: Mike Lockwood <lockwood@android.com> |
| + * |
| + * Based heavily on the file_storage gadget driver in |
| + * drivers/usb/gadget/file_storage.c and licensed under the same terms: |
| + * |
| + * Copyright (C) 2003-2007 Alan Stern |
| + * All rights reserved. |
| + * |
| + * Redistribution and use in source and binary forms, with or without |
| + * modification, are permitted provided that the following conditions |
| + * are met: |
| + * 1. Redistributions of source code must retain the above copyright |
| + * notice, this list of conditions, and the following disclaimer, |
| + * without modification. |
| + * 2. Redistributions in binary form must reproduce the above copyright |
| + * notice, this list of conditions and the following disclaimer in the |
| + * documentation and/or other materials provided with the distribution. |
| + * 3. The names of the above-listed copyright holders may not be used |
| + * to endorse or promote products derived from this software without |
| + * specific prior written permission. |
| + * |
| + * ALTERNATIVELY, this software may be distributed under the terms of the |
| + * GNU General Public License ("GPL") as published by the Free Software |
| + * Foundation, either version 2 of that License or (at your option) any |
| + * later version. |
| + * |
| + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS |
| + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, |
| + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
| + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR |
| + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, |
| + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, |
| + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR |
| + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF |
| + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING |
| + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS |
| + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| + */ |
| + |
| +#ifndef __F_MASS_STORAGE_H |
| +#define __F_MASS_STORAGE_H |
| + |
| +int mass_storage_function_add(struct usb_composite_dev *cdev, |
| + struct usb_configuration *c, int nluns); |
| + |
| +#endif /* __F_MASS_STORAGE_H */ |
| diff --git a/stblinux-2.6.37/include/linux/android_aid.h b/stblinux-2.6.37/include/linux/android_aid.h |
| new file mode 100644 |
| index 0000000..7f16a14 |
| --- /dev/null |
| +++ b/stblinux-2.6.37/include/linux/android_aid.h |
| @@ -0,0 +1,26 @@ |
| +/* include/linux/android_aid.h |
| + * |
| + * Copyright (C) 2008 Google, Inc. |
| + * |
| + * This software is licensed under the terms of the GNU General Public |
| + * License version 2, as published by the Free Software Foundation, and |
| + * may be copied, distributed, and modified under those terms. |
| + * |
| + * This program is distributed in the hope that it will be useful, |
| + * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| + * GNU General Public License for more details. |
| + * |
| + */ |
| + |
| +#ifndef _LINUX_ANDROID_AID_H |
| +#define _LINUX_ANDROID_AID_H |
| + |
| +/* AIDs that the kernel treats differently */ |
| +#define AID_NET_BT_ADMIN 3001 |
| +#define AID_NET_BT 3002 |
| +#define AID_INET 3003 |
| +#define AID_NET_RAW 3004 |
| +#define AID_NET_ADMIN 3005 |
| + |
| +#endif |
| diff --git a/stblinux-2.6.37/include/linux/android_alarm.h b/stblinux-2.6.37/include/linux/android_alarm.h |
| new file mode 100644 |
| index 0000000..d0cafd6 |
| --- /dev/null |
| +++ b/stblinux-2.6.37/include/linux/android_alarm.h |
| @@ -0,0 +1,62 @@ |
| +/* include/linux/android_alarm.h |
| + * |
| + * Copyright (C) 2006-2007 Google, Inc. |
| + * |
| + * This software is licensed under the terms of the GNU General Public |
| + * License version 2, as published by the Free Software Foundation, and |
| + * may be copied, distributed, and modified under those terms. |
| + * |
| + * This program is distributed in the hope that it will be useful, |
| + * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| + * GNU General Public License for more details. |
| + * |
| + */ |
| + |
| +#ifndef _LINUX_ANDROID_ALARM_H |
| +#define _LINUX_ANDROID_ALARM_H |
| + |
| +#include <linux/ioctl.h> |
| +#include <linux/time.h> |
| + |
| +enum android_alarm_type { |
| + /* return code bit numbers or set alarm arg */ |
| + ANDROID_ALARM_RTC_WAKEUP, |
| + ANDROID_ALARM_RTC, |
| + ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP, |
| + ANDROID_ALARM_ELAPSED_REALTIME, |
| + ANDROID_ALARM_SYSTEMTIME, |
| + |
| + ANDROID_ALARM_TYPE_COUNT, |
| + |
| + /* return code bit numbers */ |
| + /* ANDROID_ALARM_TIME_CHANGE = 16 */ |
| +}; |
| + |
| +enum android_alarm_return_flags { |
| + ANDROID_ALARM_RTC_WAKEUP_MASK = 1U << ANDROID_ALARM_RTC_WAKEUP, |
| + ANDROID_ALARM_RTC_MASK = 1U << ANDROID_ALARM_RTC, |
| + ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP_MASK = |
| + 1U << ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP, |
| + ANDROID_ALARM_ELAPSED_REALTIME_MASK = |
| + 1U << ANDROID_ALARM_ELAPSED_REALTIME, |
| + ANDROID_ALARM_SYSTEMTIME_MASK = 1U << ANDROID_ALARM_SYSTEMTIME, |
| + ANDROID_ALARM_TIME_CHANGE_MASK = 1U << 16 |
| +}; |
| + |
| +/* Disable alarm */ |
| +#define ANDROID_ALARM_CLEAR(type) _IO('a', 0 | ((type) << 4)) |
| + |
| +/* Ack last alarm and wait for next */ |
| +#define ANDROID_ALARM_WAIT _IO('a', 1) |
| + |
| +#define ALARM_IOW(c, type, size) _IOW('a', (c) | ((type) << 4), size) |
| +/* Set alarm */ |
| +#define ANDROID_ALARM_SET(type) ALARM_IOW(2, type, struct timespec) |
| +#define ANDROID_ALARM_SET_AND_WAIT(type) ALARM_IOW(3, type, struct timespec) |
| +#define ANDROID_ALARM_GET_TIME(type) ALARM_IOW(4, type, struct timespec) |
| +#define ANDROID_ALARM_SET_RTC _IOW('a', 5, struct timespec) |
| +#define ANDROID_ALARM_BASE_CMD(cmd) (cmd & ~(_IOC(0, 0, 0xf0, 0))) |
| +#define ANDROID_ALARM_IOCTL_TO_TYPE(cmd) (_IOC_NR(cmd) >> 4) |
| + |
| +#endif |
| diff --git a/stblinux-2.6.37/include/linux/android_pmem.h b/stblinux-2.6.37/include/linux/android_pmem.h |
| new file mode 100644 |
| index 0000000..d845483 |
| --- /dev/null |
| +++ b/stblinux-2.6.37/include/linux/android_pmem.h |
| @@ -0,0 +1,80 @@ |
| +/* include/linux/android_pmem.h |
| + * |
| + * Copyright (C) 2007 Google, Inc. |
| + * |
| + * This software is licensed under the terms of the GNU General Public |
| + * License version 2, as published by the Free Software Foundation, and |
| + * may be copied, distributed, and modified under those terms. |
| + * |
| + * This program is distributed in the hope that it will be useful, |
| + * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| + * GNU General Public License for more details. |
| + * |
| + */ |
| + |
| +#ifndef _ANDROID_PMEM_H_ |
| +#define _ANDROID_PMEM_H_ |
| + |
| +#define PMEM_IOCTL_MAGIC 'p' |
| +#define PMEM_GET_PHYS _IOW(PMEM_IOCTL_MAGIC, 1, unsigned int) |
| +#define PMEM_MAP _IOW(PMEM_IOCTL_MAGIC, 2, unsigned int) |
| +#define PMEM_GET_SIZE _IOW(PMEM_IOCTL_MAGIC, 3, unsigned int) |
| +#define PMEM_UNMAP _IOW(PMEM_IOCTL_MAGIC, 4, unsigned int) |
| +/* This ioctl will allocate pmem space, backing the file, it will fail |
| + * if the file already has an allocation, pass it the len as the argument |
| + * to the ioctl */ |
| +#define PMEM_ALLOCATE _IOW(PMEM_IOCTL_MAGIC, 5, unsigned int) |
| +/* This will connect a one pmem file to another, pass the file that is already |
| + * backed in memory as the argument to the ioctl |
| + */ |
| +#define PMEM_CONNECT _IOW(PMEM_IOCTL_MAGIC, 6, unsigned int) |
| +/* Returns the total size of the pmem region it is sent to as a pmem_region |
| + * struct (with offset set to 0). |
| + */ |
| +#define PMEM_GET_TOTAL_SIZE _IOW(PMEM_IOCTL_MAGIC, 7, unsigned int) |
| +/* Revokes gpu registers and resets the gpu. Pass a pointer to the |
| + * start of the mapped gpu regs (the vaddr returned by mmap) as the argument. |
| + */ |
| +#define HW3D_REVOKE_GPU _IOW(PMEM_IOCTL_MAGIC, 8, unsigned int) |
| +#define HW3D_GRANT_GPU _IOW(PMEM_IOCTL_MAGIC, 9, unsigned int) |
| +#define HW3D_WAIT_FOR_INTERRUPT _IOW(PMEM_IOCTL_MAGIC, 10, unsigned int) |
| + |
| +int get_pmem_file(int fd, unsigned long *start, unsigned long *vstart, |
| + unsigned long *end, struct file **filp); |
| +int get_pmem_user_addr(struct file *file, unsigned long *start, |
| + unsigned long *end); |
| +void put_pmem_file(struct file* file); |
| +void flush_pmem_file(struct file *file, unsigned long start, unsigned long len); |
| + |
| +struct android_pmem_platform_data |
| +{ |
| + const char* name; |
| + /* starting physical address of memory region */ |
| + unsigned long start; |
| + /* size of memory region */ |
| + unsigned long size; |
| + /* set to indicate the region should not be managed with an allocator */ |
| + unsigned no_allocator; |
| + /* set to indicate maps of this region should be cached, if a mix of |
| + * cached and uncached is desired, set this and open the device with |
| + * O_SYNC to get an uncached region */ |
| + unsigned cached; |
| + /* The MSM7k has bits to enable a write buffer in the bus controller*/ |
| + unsigned buffered; |
| +}; |
| + |
| +struct pmem_region { |
| + unsigned long offset; |
| + unsigned long len; |
| +}; |
| + |
| +int pmem_setup(struct android_pmem_platform_data *pdata, |
| + long (*ioctl)(struct file *, unsigned int, unsigned long), |
| + int (*release)(struct inode *, struct file *)); |
| + |
| +int pmem_remap(struct pmem_region *region, struct file *file, |
| + unsigned operation); |
| + |
| +#endif //_ANDROID_PPP_H_ |
| + |
| diff --git a/stblinux-2.6.37/include/linux/ashmem.h b/stblinux-2.6.37/include/linux/ashmem.h |
| new file mode 100644 |
| index 0000000..1976b10 |
| --- /dev/null |
| +++ b/stblinux-2.6.37/include/linux/ashmem.h |
| @@ -0,0 +1,48 @@ |
| +/* |
| + * include/linux/ashmem.h |
| + * |
| + * Copyright 2008 Google Inc. |
| + * Author: Robert Love |
| + * |
| + * This file is dual licensed. It may be redistributed and/or modified |
| + * under the terms of the Apache 2.0 License OR version 2 of the GNU |
| + * General Public License. |
| + */ |
| + |
| +#ifndef _LINUX_ASHMEM_H |
| +#define _LINUX_ASHMEM_H |
| + |
| +#include <linux/limits.h> |
| +#include <linux/ioctl.h> |
| + |
| +#define ASHMEM_NAME_LEN 256 |
| + |
| +#define ASHMEM_NAME_DEF "dev/ashmem" |
| + |
| +/* Return values from ASHMEM_PIN: Was the mapping purged while unpinned? */ |
| +#define ASHMEM_NOT_PURGED 0 |
| +#define ASHMEM_WAS_PURGED 1 |
| + |
| +/* Return values from ASHMEM_GET_PIN_STATUS: Is the mapping pinned? */ |
| +#define ASHMEM_IS_UNPINNED 0 |
| +#define ASHMEM_IS_PINNED 1 |
| + |
| +struct ashmem_pin { |
| + __u32 offset; /* offset into region, in bytes, page-aligned */ |
| + __u32 len; /* length forward from offset, in bytes, page-aligned */ |
| +}; |
| + |
| +#define __ASHMEMIOC 0x77 |
| + |
| +#define ASHMEM_SET_NAME _IOW(__ASHMEMIOC, 1, char[ASHMEM_NAME_LEN]) |
| +#define ASHMEM_GET_NAME _IOR(__ASHMEMIOC, 2, char[ASHMEM_NAME_LEN]) |
| +#define ASHMEM_SET_SIZE _IOW(__ASHMEMIOC, 3, size_t) |
| +#define ASHMEM_GET_SIZE _IO(__ASHMEMIOC, 4) |
| +#define ASHMEM_SET_PROT_MASK _IOW(__ASHMEMIOC, 5, unsigned long) |
| +#define ASHMEM_GET_PROT_MASK _IO(__ASHMEMIOC, 6) |
| +#define ASHMEM_PIN _IOW(__ASHMEMIOC, 7, struct ashmem_pin) |
| +#define ASHMEM_UNPIN _IOW(__ASHMEMIOC, 8, struct ashmem_pin) |
| +#define ASHMEM_GET_PIN_STATUS _IO(__ASHMEMIOC, 9) |
| +#define ASHMEM_PURGE_ALL_CACHES _IO(__ASHMEMIOC, 10) |
| + |
| +#endif /* _LINUX_ASHMEM_H */ |
| diff --git a/stblinux-2.6.37/include/linux/mm.h b/stblinux-2.6.37/include/linux/mm.h |
| index 721f451..b90daa9 100644 |
| --- a/stblinux-2.6.37/include/linux/mm.h |
| +++ b/stblinux-2.6.37/include/linux/mm.h |
| @@ -746,6 +746,7 @@ extern void show_free_areas(void); |
| |
| int shmem_lock(struct file *file, int lock, struct user_struct *user); |
| struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags); |
| +void shmem_set_file(struct vm_area_struct *vma, struct file *file); |
| int shmem_zero_setup(struct vm_area_struct *); |
| |
| #ifndef CONFIG_MMU |
| diff --git a/stblinux-2.6.37/include/linux/switch.h b/stblinux-2.6.37/include/linux/switch.h |
| new file mode 100644 |
| index 0000000..3e4c748 |
| --- /dev/null |
| +++ b/stblinux-2.6.37/include/linux/switch.h |
| @@ -0,0 +1,53 @@ |
| +/* |
| + * Switch class driver |
| + * |
| + * Copyright (C) 2008 Google, Inc. |
| + * Author: Mike Lockwood <lockwood@android.com> |
| + * |
| + * This software is licensed under the terms of the GNU General Public |
| + * License version 2, as published by the Free Software Foundation, and |
| + * may be copied, distributed, and modified under those terms. |
| + * |
| + * This program is distributed in the hope that it will be useful, |
| + * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| + * GNU General Public License for more details. |
| + * |
| +*/ |
| + |
| +#ifndef __LINUX_SWITCH_H__ |
| +#define __LINUX_SWITCH_H__ |
| + |
| +struct switch_dev { |
| + const char *name; |
| + struct device *dev; |
| + int index; |
| + int state; |
| + |
| + ssize_t (*print_name)(struct switch_dev *sdev, char *buf); |
| + ssize_t (*print_state)(struct switch_dev *sdev, char *buf); |
| +}; |
| + |
| +struct gpio_switch_platform_data { |
| + const char *name; |
| + unsigned gpio; |
| + |
| + /* if NULL, switch_dev.name will be printed */ |
| + const char *name_on; |
| + const char *name_off; |
| + /* if NULL, "0" or "1" will be printed */ |
| + const char *state_on; |
| + const char *state_off; |
| +}; |
| + |
| +extern int switch_dev_register(struct switch_dev *sdev); |
| +extern void switch_dev_unregister(struct switch_dev *sdev); |
| + |
| +static inline int switch_get_state(struct switch_dev *sdev) |
| +{ |
| + return sdev->state; |
| +} |
| + |
| +extern void switch_set_state(struct switch_dev *sdev, int state); |
| + |
| +#endif /* __LINUX_SWITCH_H__ */ |
| diff --git a/stblinux-2.6.37/include/linux/usb/android.h b/stblinux-2.6.37/include/linux/usb/android.h |
| new file mode 100644 |
| index 0000000..bf9dfe7 |
| --- /dev/null |
| +++ b/stblinux-2.6.37/include/linux/usb/android.h |
| @@ -0,0 +1,42 @@ |
| +/* |
| + * Platform data for Android USB |
| + * |
| + * Copyright (C) 2008 Google, Inc. |
| + * Author: Mike Lockwood <lockwood@android.com> |
| + * |
| + * This software is licensed under the terms of the GNU General Public |
| + * License version 2, as published by the Free Software Foundation, and |
| + * may be copied, distributed, and modified under those terms. |
| + * |
| + * This program is distributed in the hope that it will be useful, |
| + * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| + * GNU General Public License for more details. |
| + * |
| + */ |
| +#ifndef __LINUX_USB_ANDROID_H |
| +#define __LINUX_USB_ANDROID_H |
| + |
| +struct android_usb_platform_data { |
| + /* USB device descriptor fields */ |
| + __u16 vendor_id; |
| + |
| + /* Default product ID. */ |
| + __u16 product_id; |
| + |
| + /* Product ID when adb is enabled. */ |
| + __u16 adb_product_id; |
| + |
| + __u16 version; |
| + |
| + char *product_name; |
| + char *manufacturer_name; |
| + char *serial_number; |
| + |
| + /* number of LUNS for mass storage function */ |
| + int nluns; |
| +}; |
| + |
| +extern void android_usb_set_connected(int on); |
| + |
| +#endif /* __LINUX_USB_ANDROID_H */ |
| diff --git a/stblinux-2.6.37/init/Kconfig b/stblinux-2.6.37/init/Kconfig |
| index c972899..5569886 100644 |
| --- a/stblinux-2.6.37/init/Kconfig |
| +++ b/stblinux-2.6.37/init/Kconfig |
| @@ -1009,6 +1009,15 @@ config AIO |
| This option enables POSIX asynchronous I/O which may by used |
| by some high performance threaded applications. Disabling |
| this option saves about 7k. |
| + |
| +config ASHMEM |
| + bool "Enable the Anonymous Shared Memory Subsystem" |
| + default n |
| + depends on SHMEM || TINY_SHMEM |
| + help |
| + The ashmem subsystem is a new shared memory allocator, similar to |
| + POSIX SHM but with different behavior and sporting a simpler |
| + file-based API. |
| |
| config HAVE_PERF_EVENTS |
| bool |
| diff --git a/stblinux-2.6.37/kernel/power/Kconfig b/stblinux-2.6.37/kernel/power/Kconfig |
| index a5aff3e..36bca6f 100644 |
| --- a/stblinux-2.6.37/kernel/power/Kconfig |
| +++ b/stblinux-2.6.37/kernel/power/Kconfig |
| @@ -248,6 +248,73 @@ config PM_OPS |
| |
| config ARCH_HAS_OPP |
| bool |
| + |
| +config HAS_WAKELOCK |
| + bool |
| + |
| +config HAS_EARLYSUSPEND |
| + bool |
| + |
| +config WAKELOCK |
| + bool "Wake lock" |
| + depends on PM && RTC_CLASS |
| + default n |
| + select HAS_WAKELOCK |
| + ---help--- |
| + Enable wakelocks. When user space request a sleep state the |
| + sleep request will be delayed until no wake locks are held. |
| + |
| +config WAKELOCK_STAT |
| + bool "Wake lock stats" |
| + depends on WAKELOCK |
| + default y |
| + ---help--- |
| + Report wake lock stats in /proc/wakelocks |
| + |
| +config USER_WAKELOCK |
| + bool "Userspace wake locks" |
| + depends on WAKELOCK |
| + default y |
| + ---help--- |
| + User-space wake lock api. Write "lockname" or "lockname timeout" |
| + to /sys/power/wake_lock lock and if needed create a wake lock. |
| + Write "lockname" to /sys/power/wake_unlock to unlock a user wake |
| + lock. |
| + |
| +config EARLYSUSPEND |
| + bool "Early suspend" |
| + depends on WAKELOCK |
| + default y |
| + select HAS_EARLYSUSPEND |
| + ---help--- |
| + Call early suspend handlers when the user requested sleep state |
| + changes. |
| + |
| +choice |
| + prompt "User-space screen access" |
| + default FB_EARLYSUSPEND if !FRAMEBUFFER_CONSOLE |
| + default CONSOLE_EARLYSUSPEND |
| + depends on HAS_EARLYSUSPEND |
| + |
| + config NO_USER_SPACE_SCREEN_ACCESS_CONTROL |
| + bool "None" |
| + |
| + config CONSOLE_EARLYSUSPEND |
| + bool "Console switch on early-suspend" |
| + depends on HAS_EARLYSUSPEND && VT |
| + ---help--- |
| + Register early suspend handler to perform a console switch to |
| + when user-space should stop drawing to the screen and a switch |
| + back when it should resume. |
| + |
| + config FB_EARLYSUSPEND |
| + bool "Sysfs interface" |
| + depends on HAS_EARLYSUSPEND |
| + ---help--- |
| + Register early suspend handler that notifies and waits for |
| + user-space through sysfs when user-space should stop drawing |
| + to the screen and notifies user-space when it should resume. |
| +endchoice |
| |
| config PM_OPP |
| bool "Operating Performance Point (OPP) Layer library" |
| diff --git a/stblinux-2.6.37/kernel/power/Makefile b/stblinux-2.6.37/kernel/power/Makefile |
| index f9063c6..c292957 100644 |
| --- a/stblinux-2.6.37/kernel/power/Makefile |
| +++ b/stblinux-2.6.37/kernel/power/Makefile |
| @@ -6,6 +6,12 @@ endif |
| obj-$(CONFIG_PM) += main.o |
| obj-$(CONFIG_PM_SLEEP) += console.o |
| obj-$(CONFIG_FREEZER) += process.o |
| +obj-$(CONFIG_WAKELOCK) += wakelock.o |
| +obj-$(CONFIG_USER_WAKELOCK) += userwakelock.o |
| +obj-$(CONFIG_EARLYSUSPEND) += earlysuspend.o |
| +obj-$(CONFIG_CONSOLE_EARLYSUSPEND) += consoleearlysuspend.o |
| +obj-$(CONFIG_FB_EARLYSUSPEND) += fbearlysuspend.o |
| +obj-$(CONFIG_HIBERNATION) += swsusp.o disk.o snapshot.o swap.o user.o |
| obj-$(CONFIG_SUSPEND) += suspend.o |
| obj-$(CONFIG_PM_TEST_SUSPEND) += suspend_test.o |
| obj-$(CONFIG_HIBERNATION) += hibernate.o snapshot.o swap.o user.o \ |
| diff --git a/stblinux-2.6.37/kernel/power/consoleearlysuspend.c b/stblinux-2.6.37/kernel/power/consoleearlysuspend.c |
| new file mode 100644 |
| index 0000000..a8befb4 |
| --- /dev/null |
| +++ b/stblinux-2.6.37/kernel/power/consoleearlysuspend.c |
| @@ -0,0 +1,78 @@ |
| +/* kernel/power/consoleearlysuspend.c |
| + * |
| + * Copyright (C) 2005-2008 Google, Inc. |
| + * |
| + * This software is licensed under the terms of the GNU General Public |
| + * License version 2, as published by the Free Software Foundation, and |
| + * may be copied, distributed, and modified under those terms. |
| + * |
| + * This program is distributed in the hope that it will be useful, |
| + * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| + * GNU General Public License for more details. |
| + * |
| + */ |
| + |
| +#include <linux/console.h> |
| +#include <linux/earlysuspend.h> |
| +#include <linux/kbd_kern.h> |
| +#include <linux/module.h> |
| +#include <linux/vt_kern.h> |
| +#include <linux/wait.h> |
| + |
| +#define EARLY_SUSPEND_CONSOLE (MAX_NR_CONSOLES-1) |
| + |
| +static int orig_fgconsole; |
| +static void console_early_suspend(struct early_suspend *h) |
| +{ |
| + acquire_console_sem(); |
| + orig_fgconsole = fg_console; |
| + if (vc_allocate(EARLY_SUSPEND_CONSOLE)) |
| + goto err; |
| + if (set_console(EARLY_SUSPEND_CONSOLE)) |
| + goto err; |
| + release_console_sem(); |
| + |
| + if (vt_waitactive(EARLY_SUSPEND_CONSOLE)) |
| + pr_warning("console_early_suspend: Can't switch VCs.\n"); |
| + return; |
| +err: |
| + pr_warning("console_early_suspend: Can't set console\n"); |
| + release_console_sem(); |
| +} |
| + |
| +static void console_late_resume(struct early_suspend *h) |
| +{ |
| + int ret; |
| + acquire_console_sem(); |
| + ret = set_console(orig_fgconsole); |
| + release_console_sem(); |
| + if (ret) { |
| + pr_warning("console_late_resume: Can't set console.\n"); |
| + return; |
| + } |
| + |
| + if (vt_waitactive(orig_fgconsole)) |
| + pr_warning("console_late_resume: Can't switch VCs.\n"); |
| +} |
| + |
| +static struct early_suspend console_early_suspend_desc = { |
| + .level = EARLY_SUSPEND_LEVEL_STOP_DRAWING, |
| + .suspend = console_early_suspend, |
| + .resume = console_late_resume, |
| +}; |
| + |
| +static int __init console_early_suspend_init(void) |
| +{ |
| + register_early_suspend(&console_early_suspend_desc); |
| + return 0; |
| +} |
| + |
| +static void __exit console_early_suspend_exit(void) |
| +{ |
| + unregister_early_suspend(&console_early_suspend_desc); |
| +} |
| + |
| +module_init(console_early_suspend_init); |
| +module_exit(console_early_suspend_exit); |
| + |
| diff --git a/stblinux-2.6.37/kernel/power/disk.c b/stblinux-2.6.37/kernel/power/disk.c |
| new file mode 100644 |
| index 0000000..4a4a206 |
| --- /dev/null |
| +++ b/stblinux-2.6.37/kernel/power/disk.c |
| @@ -0,0 +1,926 @@ |
| +/* |
| + * kernel/power/disk.c - Suspend-to-disk support. |
| + * |
| + * Copyright (c) 2003 Patrick Mochel |
| + * Copyright (c) 2003 Open Source Development Lab |
| + * Copyright (c) 2004 Pavel Machek <pavel@suse.cz> |
| + * |
| + * This file is released under the GPLv2. |
| + * |
| + */ |
| + |
| +#include <linux/suspend.h> |
| +#include <linux/syscalls.h> |
| +#include <linux/reboot.h> |
| +#include <linux/string.h> |
| +#include <linux/device.h> |
| +#include <linux/kmod.h> |
| +#include <linux/delay.h> |
| +#include <linux/fs.h> |
| +#include <linux/mount.h> |
| +#include <linux/pm.h> |
| +#include <linux/console.h> |
| +#include <linux/cpu.h> |
| +#include <linux/freezer.h> |
| + |
| +#include "power.h" |
| + |
| + |
| +static int noresume = 0; |
| +static char resume_file[256] = CONFIG_PM_STD_PARTITION; |
| +dev_t swsusp_resume_device; |
| +sector_t swsusp_resume_block; |
| + |
| +enum { |
| + HIBERNATION_INVALID, |
| + HIBERNATION_PLATFORM, |
| + HIBERNATION_TEST, |
| + HIBERNATION_TESTPROC, |
| + HIBERNATION_SHUTDOWN, |
| + HIBERNATION_REBOOT, |
| + /* keep last */ |
| + __HIBERNATION_AFTER_LAST |
| +}; |
| +#define HIBERNATION_MAX (__HIBERNATION_AFTER_LAST-1) |
| +#define HIBERNATION_FIRST (HIBERNATION_INVALID + 1) |
| + |
| +static int hibernation_mode = HIBERNATION_SHUTDOWN; |
| + |
| +static struct platform_hibernation_ops *hibernation_ops; |
| + |
| +/** |
| + * hibernation_set_ops - set the global hibernate operations |
| + * @ops: the hibernation operations to use in subsequent hibernation transitions |
| + */ |
| + |
| +void hibernation_set_ops(struct platform_hibernation_ops *ops) |
| +{ |
| + if (ops && !(ops->begin && ops->end && ops->pre_snapshot |
| + && ops->prepare && ops->finish && ops->enter && ops->pre_restore |
| + && ops->restore_cleanup)) { |
| + WARN_ON(1); |
| + return; |
| + } |
| + mutex_lock(&pm_mutex); |
| + hibernation_ops = ops; |
| + if (ops) |
| + hibernation_mode = HIBERNATION_PLATFORM; |
| + else if (hibernation_mode == HIBERNATION_PLATFORM) |
| + hibernation_mode = HIBERNATION_SHUTDOWN; |
| + |
| + mutex_unlock(&pm_mutex); |
| +} |
| + |
| +static bool entering_platform_hibernation; |
| + |
| +bool system_entering_hibernation(void) |
| +{ |
| + return entering_platform_hibernation; |
| +} |
| +EXPORT_SYMBOL(system_entering_hibernation); |
| + |
| +#ifdef CONFIG_PM_DEBUG |
| +static void hibernation_debug_sleep(void) |
| +{ |
| + printk(KERN_INFO "hibernation debug: Waiting for 5 seconds.\n"); |
| + mdelay(5000); |
| +} |
| + |
| +static int hibernation_testmode(int mode) |
| +{ |
| + if (hibernation_mode == mode) { |
| + hibernation_debug_sleep(); |
| + return 1; |
| + } |
| + return 0; |
| +} |
| + |
| +static int hibernation_test(int level) |
| +{ |
| + if (pm_test_level == level) { |
| + hibernation_debug_sleep(); |
| + return 1; |
| + } |
| + return 0; |
| +} |
| +#else /* !CONFIG_PM_DEBUG */ |
| +static int hibernation_testmode(int mode) { return 0; } |
| +static int hibernation_test(int level) { return 0; } |
| +#endif /* !CONFIG_PM_DEBUG */ |
| + |
| +/** |
| + * platform_begin - tell the platform driver that we're starting |
| + * hibernation |
| + */ |
| + |
| +static int platform_begin(int platform_mode) |
| +{ |
| + return (platform_mode && hibernation_ops) ? |
| + hibernation_ops->begin() : 0; |
| +} |
| + |
| +/** |
| + * platform_end - tell the platform driver that we've entered the |
| + * working state |
| + */ |
| + |
| +static void platform_end(int platform_mode) |
| +{ |
| + if (platform_mode && hibernation_ops) |
| + hibernation_ops->end(); |
| +} |
| + |
| +/** |
| + * platform_pre_snapshot - prepare the machine for hibernation using the |
| + * platform driver if so configured and return an error code if it fails |
| + */ |
| + |
| +static int platform_pre_snapshot(int platform_mode) |
| +{ |
| + return (platform_mode && hibernation_ops) ? |
| + hibernation_ops->pre_snapshot() : 0; |
| +} |
| + |
| +/** |
| + * platform_leave - prepare the machine for switching to the normal mode |
| + * of operation using the platform driver (called with interrupts disabled) |
| + */ |
| + |
| +static void platform_leave(int platform_mode) |
| +{ |
| + if (platform_mode && hibernation_ops) |
| + hibernation_ops->leave(); |
| +} |
| + |
| +/** |
| + * platform_finish - switch the machine to the normal mode of operation |
| + * using the platform driver (must be called after platform_prepare()) |
| + */ |
| + |
| +static void platform_finish(int platform_mode) |
| +{ |
| + if (platform_mode && hibernation_ops) |
| + hibernation_ops->finish(); |
| +} |
| + |
| +/** |
| + * platform_pre_restore - prepare the platform for the restoration from a |
| + * hibernation image. If the restore fails after this function has been |
| + * called, platform_restore_cleanup() must be called. |
| + */ |
| + |
| +static int platform_pre_restore(int platform_mode) |
| +{ |
| + return (platform_mode && hibernation_ops) ? |
| + hibernation_ops->pre_restore() : 0; |
| +} |
| + |
| +/** |
| + * platform_restore_cleanup - switch the platform to the normal mode of |
| + * operation after a failing restore. If platform_pre_restore() has been |
| + * called before the failing restore, this function must be called too, |
| + * regardless of the result of platform_pre_restore(). |
| + */ |
| + |
| +static void platform_restore_cleanup(int platform_mode) |
| +{ |
| + if (platform_mode && hibernation_ops) |
| + hibernation_ops->restore_cleanup(); |
| +} |
| + |
| +/** |
| + * platform_recover - recover the platform from a failure to suspend |
| + * devices. |
| + */ |
| + |
| +static void platform_recover(int platform_mode) |
| +{ |
| + if (platform_mode && hibernation_ops && hibernation_ops->recover) |
| + hibernation_ops->recover(); |
| +} |
| + |
| +/** |
| + * create_image - freeze devices that need to be frozen with interrupts |
| + * off, create the hibernation image and thaw those devices. Control |
| + * reappears in this routine after a restore. |
| + */ |
| + |
| +static int create_image(int platform_mode) |
| +{ |
| + int error; |
| + |
| + error = arch_prepare_suspend(); |
| + if (error) |
| + return error; |
| + |
| + device_pm_lock(); |
| + local_irq_disable(); |
| + /* At this point, device_suspend() has been called, but *not* |
| + * device_power_down(). We *must* call device_power_down() now. |
| + * Otherwise, drivers for some devices (e.g. interrupt controllers) |
| + * become desynchronized with the actual state of the hardware |
| + * at resume time, and evil weirdness ensues. |
| + */ |
| + error = device_power_down(PMSG_FREEZE); |
| + if (error) { |
| + printk(KERN_ERR "PM: Some devices failed to power down, " |
| + "aborting hibernation\n"); |
| + goto Enable_irqs; |
| + } |
| + sysdev_suspend(PMSG_FREEZE); |
| + if (error) { |
| + printk(KERN_ERR "PM: Some devices failed to power down, " |
| + "aborting hibernation\n"); |
| + goto Power_up_devices; |
| + } |
| + |
| + if (hibernation_test(TEST_CORE)) |
| + goto Power_up; |
| + |
| + in_suspend = 1; |
| + save_processor_state(); |
| + error = swsusp_arch_suspend(); |
| + if (error) |
| + printk(KERN_ERR "PM: Error %d creating hibernation image\n", |
| + error); |
| + /* Restore control flow magically appears here */ |
| + restore_processor_state(); |
| + if (!in_suspend) |
| + platform_leave(platform_mode); |
| + Power_up: |
| + sysdev_resume(); |
| + /* NOTE: device_power_up() is just a resume() for devices |
| + * that suspended with irqs off ... no overall powerup. |
| + */ |
| + Power_up_devices: |
| + device_power_up(in_suspend ? |
| + (error ? PMSG_RECOVER : PMSG_THAW) : PMSG_RESTORE); |
| + Enable_irqs: |
| + local_irq_enable(); |
| + device_pm_unlock(); |
| + return error; |
| +} |
| + |
| +/** |
| + * hibernation_snapshot - quiesce devices and create the hibernation |
| + * snapshot image. |
| + * @platform_mode - if set, use the platform driver, if available, to |
| + * prepare the platform frimware for the power transition. |
| + * |
| + * Must be called with pm_mutex held |
| + */ |
| + |
| +int hibernation_snapshot(int platform_mode) |
| +{ |
| + int error; |
| + |
| + error = platform_begin(platform_mode); |
| + if (error) |
| + return error; |
| + |
| + /* Free memory before shutting down devices. */ |
| + error = swsusp_shrink_memory(); |
| + if (error) |
| + goto Close; |
| + |
| + suspend_console(); |
| + error = device_suspend(PMSG_FREEZE); |
| + if (error) |
| + goto Recover_platform; |
| + |
| + if (hibernation_test(TEST_DEVICES)) |
| + goto Recover_platform; |
| + |
| + error = platform_pre_snapshot(platform_mode); |
| + if (error || hibernation_test(TEST_PLATFORM)) |
| + goto Finish; |
| + |
| + error = disable_nonboot_cpus(); |
| + if (!error) { |
| + if (hibernation_test(TEST_CPUS)) |
| + goto Enable_cpus; |
| + |
| + if (hibernation_testmode(HIBERNATION_TEST)) |
| + goto Enable_cpus; |
| + |
| + error = create_image(platform_mode); |
| + /* Control returns here after successful restore */ |
| + } |
| + Enable_cpus: |
| + enable_nonboot_cpus(); |
| + Finish: |
| + platform_finish(platform_mode); |
| + Resume_devices: |
| + device_resume(in_suspend ? |
| + (error ? PMSG_RECOVER : PMSG_THAW) : PMSG_RESTORE); |
| + resume_console(); |
| + Close: |
| + platform_end(platform_mode); |
| + return error; |
| + |
| + Recover_platform: |
| + platform_recover(platform_mode); |
| + goto Resume_devices; |
| +} |
| + |
| +/** |
| + * resume_target_kernel - prepare devices that need to be suspended with |
| + * interrupts off, restore the contents of highmem that have not been |
| + * restored yet from the image and run the low level code that will restore |
| + * the remaining contents of memory and switch to the just restored target |
| + * kernel. |
| + */ |
| + |
| +static int resume_target_kernel(void) |
| +{ |
| + int error; |
| + |
| + device_pm_lock(); |
| + local_irq_disable(); |
| + error = device_power_down(PMSG_QUIESCE); |
| + if (error) { |
| + printk(KERN_ERR "PM: Some devices failed to power down, " |
| + "aborting resume\n"); |
| + goto Enable_irqs; |
| + } |
| + sysdev_suspend(PMSG_QUIESCE); |
| + /* We'll ignore saved state, but this gets preempt count (etc) right */ |
| + save_processor_state(); |
| + error = restore_highmem(); |
| + if (!error) { |
| + error = swsusp_arch_resume(); |
| + /* |
| + * The code below is only ever reached in case of a failure. |
| + * Otherwise execution continues at place where |
| + * swsusp_arch_suspend() was called |
| + */ |
| + BUG_ON(!error); |
| + /* This call to restore_highmem() undos the previous one */ |
| + restore_highmem(); |
| + } |
| + /* |
| + * The only reason why swsusp_arch_resume() can fail is memory being |
| + * very tight, so we have to free it as soon as we can to avoid |
| + * subsequent failures |
| + */ |
| + swsusp_free(); |
| + restore_processor_state(); |
| + touch_softlockup_watchdog(); |
| + sysdev_resume(); |
| + device_power_up(PMSG_RECOVER); |
| + Enable_irqs: |
| + local_irq_enable(); |
| + device_pm_unlock(); |
| + return error; |
| +} |
| + |
| +/** |
| + * hibernation_restore - quiesce devices and restore the hibernation |
| + * snapshot image. If successful, control returns in hibernation_snaphot() |
| + * @platform_mode - if set, use the platform driver, if available, to |
| + * prepare the platform frimware for the transition. |
| + * |
| + * Must be called with pm_mutex held |
| + */ |
| + |
| +int hibernation_restore(int platform_mode) |
| +{ |
| + int error; |
| + |
| + pm_prepare_console(); |
| + suspend_console(); |
| + error = device_suspend(PMSG_QUIESCE); |
| + if (error) |
| + goto Finish; |
| + |
| + error = platform_pre_restore(platform_mode); |
| + if (!error) { |
| + error = disable_nonboot_cpus(); |
| + if (!error) |
| + error = resume_target_kernel(); |
| + enable_nonboot_cpus(); |
| + } |
| + platform_restore_cleanup(platform_mode); |
| + device_resume(PMSG_RECOVER); |
| + Finish: |
| + resume_console(); |
| + pm_restore_console(); |
| + return error; |
| +} |
| + |
| +/** |
| + * hibernation_platform_enter - enter the hibernation state using the |
| + * platform driver (if available) |
| + */ |
| + |
| +int hibernation_platform_enter(void) |
| +{ |
| + int error; |
| + |
| + if (!hibernation_ops) |
| + return -ENOSYS; |
| + |
| + /* |
| + * We have cancelled the power transition by running |
| + * hibernation_ops->finish() before saving the image, so we should let |
| + * the firmware know that we're going to enter the sleep state after all |
| + */ |
| + error = hibernation_ops->begin(); |
| + if (error) |
| + goto Close; |
| + |
| + entering_platform_hibernation = true; |
| + suspend_console(); |
| + error = device_suspend(PMSG_HIBERNATE); |
| + if (error) { |
| + if (hibernation_ops->recover) |
| + hibernation_ops->recover(); |
| + goto Resume_devices; |
| + } |
| + |
| + error = hibernation_ops->prepare(); |
| + if (error) |
| + goto Resume_devices; |
| + |
| + error = disable_nonboot_cpus(); |
| + if (error) |
| + goto Finish; |
| + |
| + device_pm_lock(); |
| + local_irq_disable(); |
| + error = device_power_down(PMSG_HIBERNATE); |
| + if (!error) { |
| + sysdev_suspend(PMSG_HIBERNATE); |
| + hibernation_ops->enter(); |
| + /* We should never get here */ |
| + while (1); |
| + } |
| + local_irq_enable(); |
| + device_pm_unlock(); |
| + |
| + /* |
| + * We don't need to reenable the nonboot CPUs or resume consoles, since |
| + * the system is going to be halted anyway. |
| + */ |
| + Finish: |
| + hibernation_ops->finish(); |
| + Resume_devices: |
| + entering_platform_hibernation = false; |
| + device_resume(PMSG_RESTORE); |
| + resume_console(); |
| + Close: |
| + hibernation_ops->end(); |
| + return error; |
| +} |
| + |
| +/** |
| + * power_down - Shut the machine down for hibernation. |
| + * |
| + * Use the platform driver, if configured so; otherwise try |
| + * to power off or reboot. |
| + */ |
| + |
| +static void power_down(void) |
| +{ |
| + switch (hibernation_mode) { |
| + case HIBERNATION_TEST: |
| + case HIBERNATION_TESTPROC: |
| + break; |
| + case HIBERNATION_REBOOT: |
| + kernel_restart(NULL); |
| + break; |
| + case HIBERNATION_PLATFORM: |
| + hibernation_platform_enter(); |
| + case HIBERNATION_SHUTDOWN: |
| + kernel_power_off(); |
| + break; |
| + } |
| + kernel_halt(); |
| + /* |
| + * Valid image is on the disk, if we continue we risk serious data |
| + * corruption after resume. |
| + */ |
| + printk(KERN_CRIT "PM: Please power down manually\n"); |
| + while(1); |
| +} |
| + |
| +static int prepare_processes(void) |
| +{ |
| + int error = 0; |
| + |
| + if (freeze_processes()) { |
| + error = -EBUSY; |
| + thaw_processes(); |
| + } |
| + return error; |
| +} |
| + |
| +/** |
| + * hibernate - The granpappy of the built-in hibernation management |
| + */ |
| + |
| +int hibernate(void) |
| +{ |
| + int error; |
| + |
| + mutex_lock(&pm_mutex); |
| + /* The snapshot device should not be opened while we're running */ |
| + if (!atomic_add_unless(&snapshot_device_available, -1, 0)) { |
| + error = -EBUSY; |
| + goto Unlock; |
| + } |
| + |
| + pm_prepare_console(); |
| + error = pm_notifier_call_chain(PM_HIBERNATION_PREPARE); |
| + if (error) |
| + goto Exit; |
| + |
| + error = usermodehelper_disable(); |
| + if (error) |
| + goto Exit; |
| + |
| + /* Allocate memory management structures */ |
| + error = create_basic_memory_bitmaps(); |
| + if (error) |
| + goto Exit; |
| + |
| + printk(KERN_INFO "PM: Syncing filesystems ... "); |
| + sys_sync(); |
| + printk("done.\n"); |
| + |
| + error = prepare_processes(); |
| + if (error) |
| + goto Finish; |
| + |
| + if (hibernation_test(TEST_FREEZER)) |
| + goto Thaw; |
| + |
| + if (hibernation_testmode(HIBERNATION_TESTPROC)) |
| + goto Thaw; |
| + |
| + error = hibernation_snapshot(hibernation_mode == HIBERNATION_PLATFORM); |
| + if (in_suspend && !error) { |
| + unsigned int flags = 0; |
| + |
| + if (hibernation_mode == HIBERNATION_PLATFORM) |
| + flags |= SF_PLATFORM_MODE; |
| + pr_debug("PM: writing image.\n"); |
| + error = swsusp_write(flags); |
| + swsusp_free(); |
| + if (!error) |
| + power_down(); |
| + } else { |
| + pr_debug("PM: Image restored successfully.\n"); |
| + swsusp_free(); |
| + } |
| + Thaw: |
| + thaw_processes(); |
| + Finish: |
| + free_basic_memory_bitmaps(); |
| + usermodehelper_enable(); |
| + Exit: |
| + pm_notifier_call_chain(PM_POST_HIBERNATION); |
| + pm_restore_console(); |
| + atomic_inc(&snapshot_device_available); |
| + Unlock: |
| + mutex_unlock(&pm_mutex); |
| + return error; |
| +} |
| + |
| + |
| +/** |
| + * software_resume - Resume from a saved image. |
| + * |
| + * Called as a late_initcall (so all devices are discovered and |
| + * initialized), we call swsusp to see if we have a saved image or not. |
| + * If so, we quiesce devices, the restore the saved image. We will |
| + * return above (in hibernate() ) if everything goes well. |
| + * Otherwise, we fail gracefully and return to the normally |
| + * scheduled program. |
| + * |
| + */ |
| + |
| +static int software_resume(void) |
| +{ |
| + int error; |
| + unsigned int flags; |
| + |
| + /* |
| + * If the user said "noresume".. bail out early. |
| + */ |
| + if (noresume) |
| + return 0; |
| + |
| + /* |
| + * name_to_dev_t() below takes a sysfs buffer mutex when sysfs |
| + * is configured into the kernel. Since the regular hibernate |
| + * trigger path is via sysfs which takes a buffer mutex before |
| + * calling hibernate functions (which take pm_mutex) this can |
| + * cause lockdep to complain about a possible ABBA deadlock |
| + * which cannot happen since we're in the boot code here and |
| + * sysfs can't be invoked yet. Therefore, we use a subclass |
| + * here to avoid lockdep complaining. |
| + */ |
| + mutex_lock_nested(&pm_mutex, SINGLE_DEPTH_NESTING); |
| + if (!swsusp_resume_device) { |
| + if (!strlen(resume_file)) { |
| + mutex_unlock(&pm_mutex); |
| + return -ENOENT; |
| + } |
| + /* |
| + * Some device discovery might still be in progress; we need |
| + * to wait for this to finish. |
| + */ |
| + wait_for_device_probe(); |
| + swsusp_resume_device = name_to_dev_t(resume_file); |
| + pr_debug("PM: Resume from partition %s\n", resume_file); |
| + } else { |
| + pr_debug("PM: Resume from partition %d:%d\n", |
| + MAJOR(swsusp_resume_device), |
| + MINOR(swsusp_resume_device)); |
| + } |
| + |
| + if (noresume) { |
| + /** |
| + * FIXME: If noresume is specified, we need to find the |
| + * partition and reset it back to normal swap space. |
| + */ |
| + mutex_unlock(&pm_mutex); |
| + return 0; |
| + } |
| + |
| + pr_debug("PM: Checking hibernation image.\n"); |
| + error = swsusp_check(); |
| + if (error) |
| + goto Unlock; |
| + |
| + /* The snapshot device should not be opened while we're running */ |
| + if (!atomic_add_unless(&snapshot_device_available, -1, 0)) { |
| + error = -EBUSY; |
| + goto Unlock; |
| + } |
| + |
| + pm_prepare_console(); |
| + error = pm_notifier_call_chain(PM_RESTORE_PREPARE); |
| + if (error) |
| + goto Finish; |
| + |
| + error = usermodehelper_disable(); |
| + if (error) |
| + goto Finish; |
| + |
| + error = create_basic_memory_bitmaps(); |
| + if (error) |
| + goto Finish; |
| + |
| + pr_debug("PM: Preparing processes for restore.\n"); |
| + error = prepare_processes(); |
| + if (error) { |
| + swsusp_close(FMODE_READ); |
| + goto Done; |
| + } |
| + |
| + pr_debug("PM: Reading hibernation image.\n"); |
| + |
| + error = swsusp_read(&flags); |
| + if (!error) |
| + hibernation_restore(flags & SF_PLATFORM_MODE); |
| + |
| + printk(KERN_ERR "PM: Restore failed, recovering.\n"); |
| + swsusp_free(); |
| + thaw_processes(); |
| + Done: |
| + free_basic_memory_bitmaps(); |
| + usermodehelper_enable(); |
| + Finish: |
| + pm_notifier_call_chain(PM_POST_RESTORE); |
| + pm_restore_console(); |
| + atomic_inc(&snapshot_device_available); |
| + /* For success case, the suspend path will release the lock */ |
| + Unlock: |
| + mutex_unlock(&pm_mutex); |
| + pr_debug("PM: Resume from disk failed.\n"); |
| + return error; |
| +} |
| + |
| +late_initcall(software_resume); |
| + |
| + |
| +static const char * const hibernation_modes[] = { |
| + [HIBERNATION_PLATFORM] = "platform", |
| + [HIBERNATION_SHUTDOWN] = "shutdown", |
| + [HIBERNATION_REBOOT] = "reboot", |
| + [HIBERNATION_TEST] = "test", |
| + [HIBERNATION_TESTPROC] = "testproc", |
| +}; |
| + |
| +/** |
| + * disk - Control hibernation mode |
| + * |
| + * Suspend-to-disk can be handled in several ways. We have a few options |
| + * for putting the system to sleep - using the platform driver (e.g. ACPI |
| + * or other hibernation_ops), powering off the system or rebooting the |
| + * system (for testing) as well as the two test modes. |
| + * |
| + * The system can support 'platform', and that is known a priori (and |
| + * encoded by the presence of hibernation_ops). However, the user may |
| + * choose 'shutdown' or 'reboot' as alternatives, as well as one fo the |
| + * test modes, 'test' or 'testproc'. |
| + * |
| + * show() will display what the mode is currently set to. |
| + * store() will accept one of |
| + * |
| + * 'platform' |
| + * 'shutdown' |
| + * 'reboot' |
| + * 'test' |
| + * 'testproc' |
| + * |
| + * It will only change to 'platform' if the system |
| + * supports it (as determined by having hibernation_ops). |
| + */ |
| + |
| +static ssize_t disk_show(struct kobject *kobj, struct kobj_attribute *attr, |
| + char *buf) |
| +{ |
| + int i; |
| + char *start = buf; |
| + |
| + for (i = HIBERNATION_FIRST; i <= HIBERNATION_MAX; i++) { |
| + if (!hibernation_modes[i]) |
| + continue; |
| + switch (i) { |
| + case HIBERNATION_SHUTDOWN: |
| + case HIBERNATION_REBOOT: |
| + case HIBERNATION_TEST: |
| + case HIBERNATION_TESTPROC: |
| + break; |
| + case HIBERNATION_PLATFORM: |
| + if (hibernation_ops) |
| + break; |
| + /* not a valid mode, continue with loop */ |
| + continue; |
| + } |
| + if (i == hibernation_mode) |
| + buf += sprintf(buf, "[%s] ", hibernation_modes[i]); |
| + else |
| + buf += sprintf(buf, "%s ", hibernation_modes[i]); |
| + } |
| + buf += sprintf(buf, "\n"); |
| + return buf-start; |
| +} |
| + |
| + |
| +static ssize_t disk_store(struct kobject *kobj, struct kobj_attribute *attr, |
| + const char *buf, size_t n) |
| +{ |
| + int error = 0; |
| + int i; |
| + int len; |
| + char *p; |
| + int mode = HIBERNATION_INVALID; |
| + |
| + p = memchr(buf, '\n', n); |
| + len = p ? p - buf : n; |
| + |
| + mutex_lock(&pm_mutex); |
| + for (i = HIBERNATION_FIRST; i <= HIBERNATION_MAX; i++) { |
| + if (len == strlen(hibernation_modes[i]) |
| + && !strncmp(buf, hibernation_modes[i], len)) { |
| + mode = i; |
| + break; |
| + } |
| + } |
| + if (mode != HIBERNATION_INVALID) { |
| + switch (mode) { |
| + case HIBERNATION_SHUTDOWN: |
| + case HIBERNATION_REBOOT: |
| + case HIBERNATION_TEST: |
| + case HIBERNATION_TESTPROC: |
| + hibernation_mode = mode; |
| + break; |
| + case HIBERNATION_PLATFORM: |
| + if (hibernation_ops) |
| + hibernation_mode = mode; |
| + else |
| + error = -EINVAL; |
| + } |
| + } else |
| + error = -EINVAL; |
| + |
| + if (!error) |
| + pr_debug("PM: Hibernation mode set to '%s'\n", |
| + hibernation_modes[mode]); |
| + mutex_unlock(&pm_mutex); |
| + return error ? error : n; |
| +} |
| + |
| +power_attr(disk); |
| + |
| +static ssize_t resume_show(struct kobject *kobj, struct kobj_attribute *attr, |
| + char *buf) |
| +{ |
| + return sprintf(buf,"%d:%d\n", MAJOR(swsusp_resume_device), |
| + MINOR(swsusp_resume_device)); |
| +} |
| + |
| +static ssize_t resume_store(struct kobject *kobj, struct kobj_attribute *attr, |
| + const char *buf, size_t n) |
| +{ |
| + unsigned int maj, min; |
| + dev_t res; |
| + int ret = -EINVAL; |
| + |
| + if (sscanf(buf, "%u:%u", &maj, &min) != 2) |
| + goto out; |
| + |
| + res = MKDEV(maj,min); |
| + if (maj != MAJOR(res) || min != MINOR(res)) |
| + goto out; |
| + |
| + mutex_lock(&pm_mutex); |
| + swsusp_resume_device = res; |
| + mutex_unlock(&pm_mutex); |
| + printk(KERN_INFO "PM: Starting manual resume from disk\n"); |
| + noresume = 0; |
| + software_resume(); |
| + ret = n; |
| + out: |
| + return ret; |
| +} |
| + |
| +power_attr(resume); |
| + |
| +static ssize_t image_size_show(struct kobject *kobj, struct kobj_attribute *attr, |
| + char *buf) |
| +{ |
| + return sprintf(buf, "%lu\n", image_size); |
| +} |
| + |
| +static ssize_t image_size_store(struct kobject *kobj, struct kobj_attribute *attr, |
| + const char *buf, size_t n) |
| +{ |
| + unsigned long size; |
| + |
| + if (sscanf(buf, "%lu", &size) == 1) { |
| + image_size = size; |
| + return n; |
| + } |
| + |
| + return -EINVAL; |
| +} |
| + |
| +power_attr(image_size); |
| + |
| +static struct attribute * g[] = { |
| + &disk_attr.attr, |
| + &resume_attr.attr, |
| + &image_size_attr.attr, |
| + NULL, |
| +}; |
| + |
| + |
| +static struct attribute_group attr_group = { |
| + .attrs = g, |
| +}; |
| + |
| + |
| +static int __init pm_disk_init(void) |
| +{ |
| + return sysfs_create_group(power_kobj, &attr_group); |
| +} |
| + |
| +core_initcall(pm_disk_init); |
| + |
| + |
| +static int __init resume_setup(char *str) |
| +{ |
| + if (noresume) |
| + return 1; |
| + |
| + strncpy( resume_file, str, 255 ); |
| + return 1; |
| +} |
| + |
| +static int __init resume_offset_setup(char *str) |
| +{ |
| + unsigned long long offset; |
| + |
| + if (noresume) |
| + return 1; |
| + |
| + if (sscanf(str, "%llu", &offset) == 1) |
| + swsusp_resume_block = offset; |
| + |
| + return 1; |
| +} |
| + |
| +static int __init noresume_setup(char *str) |
| +{ |
| + noresume = 1; |
| + return 1; |
| +} |
| + |
| +__setup("noresume", noresume_setup); |
| +__setup("resume_offset=", resume_offset_setup); |
| +__setup("resume=", resume_setup); |
| diff --git a/stblinux-2.6.37/kernel/power/earlysuspend.c b/stblinux-2.6.37/kernel/power/earlysuspend.c |
| new file mode 100644 |
| index 0000000..84bed51 |
| --- /dev/null |
| +++ b/stblinux-2.6.37/kernel/power/earlysuspend.c |
| @@ -0,0 +1,178 @@ |
| +/* kernel/power/earlysuspend.c |
| + * |
| + * Copyright (C) 2005-2008 Google, Inc. |
| + * |
| + * This software is licensed under the terms of the GNU General Public |
| + * License version 2, as published by the Free Software Foundation, and |
| + * may be copied, distributed, and modified under those terms. |
| + * |
| + * This program is distributed in the hope that it will be useful, |
| + * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| + * GNU General Public License for more details. |
| + * |
| + */ |
| + |
| +#include <linux/earlysuspend.h> |
| +#include <linux/module.h> |
| +#include <linux/mutex.h> |
| +#include <linux/rtc.h> |
| +#include <linux/syscalls.h> /* sys_sync */ |
| +#include <linux/wakelock.h> |
| +#include <linux/workqueue.h> |
| + |
| +#include "power.h" |
| + |
| +enum { |
| + DEBUG_USER_STATE = 1U << 0, |
| + DEBUG_SUSPEND = 1U << 2, |
| +}; |
| +static int debug_mask = DEBUG_USER_STATE; |
| +module_param_named(debug_mask, debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP); |
| + |
| +static DEFINE_MUTEX(early_suspend_lock); |
| +static LIST_HEAD(early_suspend_handlers); |
| +static void early_suspend(struct work_struct *work); |
| +static void late_resume(struct work_struct *work); |
| +static DECLARE_WORK(early_suspend_work, early_suspend); |
| +static DECLARE_WORK(late_resume_work, late_resume); |
| +static DEFINE_SPINLOCK(state_lock); |
| +enum { |
| + SUSPEND_REQUESTED = 0x1, |
| + SUSPENDED = 0x2, |
| + SUSPEND_REQUESTED_AND_SUSPENDED = SUSPEND_REQUESTED | SUSPENDED, |
| +}; |
| +static int state; |
| + |
| +void register_early_suspend(struct early_suspend *handler) |
| +{ |
| + struct list_head *pos; |
| + |
| + mutex_lock(&early_suspend_lock); |
| + list_for_each(pos, &early_suspend_handlers) { |
| + struct early_suspend *e; |
| + e = list_entry(pos, struct early_suspend, link); |
| + if (e->level > handler->level) |
| + break; |
| + } |
| + list_add_tail(&handler->link, pos); |
| + if ((state & SUSPENDED) && handler->suspend) |
| + handler->suspend(handler); |
| + mutex_unlock(&early_suspend_lock); |
| +} |
| +EXPORT_SYMBOL(register_early_suspend); |
| + |
| +void unregister_early_suspend(struct early_suspend *handler) |
| +{ |
| + mutex_lock(&early_suspend_lock); |
| + list_del(&handler->link); |
| + mutex_unlock(&early_suspend_lock); |
| +} |
| +EXPORT_SYMBOL(unregister_early_suspend); |
| + |
| +static void early_suspend(struct work_struct *work) |
| +{ |
| + struct early_suspend *pos; |
| + unsigned long irqflags; |
| + int abort = 0; |
| + |
| + mutex_lock(&early_suspend_lock); |
| + spin_lock_irqsave(&state_lock, irqflags); |
| + if (state == SUSPEND_REQUESTED) |
| + state |= SUSPENDED; |
| + else |
| + abort = 1; |
| + spin_unlock_irqrestore(&state_lock, irqflags); |
| + |
| + if (abort) { |
| + if (debug_mask & DEBUG_SUSPEND) |
| + pr_info("early_suspend: abort, state %d\n", state); |
| + mutex_unlock(&early_suspend_lock); |
| + goto abort; |
| + } |
| + |
| + if (debug_mask & DEBUG_SUSPEND) |
| + pr_info("early_suspend: call handlers\n"); |
| + list_for_each_entry(pos, &early_suspend_handlers, link) { |
| + if (pos->suspend != NULL) |
| + pos->suspend(pos); |
| + } |
| + mutex_unlock(&early_suspend_lock); |
| + |
| + if (debug_mask & DEBUG_SUSPEND) |
| + pr_info("early_suspend: sync\n"); |
| + |
| + sys_sync(); |
| +abort: |
| + spin_lock_irqsave(&state_lock, irqflags); |
| + if (state == SUSPEND_REQUESTED_AND_SUSPENDED) |
| + wake_unlock(&main_wake_lock); |
| + spin_unlock_irqrestore(&state_lock, irqflags); |
| +} |
| + |
| +static void late_resume(struct work_struct *work) |
| +{ |
| + struct early_suspend *pos; |
| + unsigned long irqflags; |
| + int abort = 0; |
| + |
| + mutex_lock(&early_suspend_lock); |
| + spin_lock_irqsave(&state_lock, irqflags); |
| + if (state == SUSPENDED) |
| + state &= ~SUSPENDED; |
| + else |
| + abort = 1; |
| + spin_unlock_irqrestore(&state_lock, irqflags); |
| + |
| + if (abort) { |
| + if (debug_mask & DEBUG_SUSPEND) |
| + pr_info("late_resume: abort, state %d\n", state); |
| + goto abort; |
| + } |
| + if (debug_mask & DEBUG_SUSPEND) |
| + pr_info("late_resume: call handlers\n"); |
| + list_for_each_entry_reverse(pos, &early_suspend_handlers, link) |
| + if (pos->resume != NULL) |
| + pos->resume(pos); |
| + if (debug_mask & DEBUG_SUSPEND) |
| + pr_info("late_resume: done\n"); |
| +abort: |
| + mutex_unlock(&early_suspend_lock); |
| +} |
| + |
| +void request_suspend_state(suspend_state_t new_state) |
| +{ |
| + unsigned long irqflags; |
| + int old_sleep; |
| + |
| + spin_lock_irqsave(&state_lock, irqflags); |
| + old_sleep = state & SUSPEND_REQUESTED; |
| + if (debug_mask & DEBUG_USER_STATE) { |
| + struct timespec ts; |
| + struct rtc_time tm; |
| + getnstimeofday(&ts); |
| + rtc_time_to_tm(ts.tv_sec, &tm); |
| + pr_info("request_suspend_state: %s (%d->%d) at %lld " |
| + "(%d-%02d-%02d %02d:%02d:%02d.%09lu UTC)\n", |
| + new_state != PM_SUSPEND_ON ? "sleep" : "wakeup", |
| + requested_suspend_state, new_state, |
| + ktime_to_ns(ktime_get()), |
| + tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, |
| + tm.tm_hour, tm.tm_min, tm.tm_sec, ts.tv_nsec); |
| + } |
| + if (!old_sleep && new_state != PM_SUSPEND_ON) { |
| + state |= SUSPEND_REQUESTED; |
| + queue_work(suspend_work_queue, &early_suspend_work); |
| + } else if (old_sleep && new_state == PM_SUSPEND_ON) { |
| + state &= ~SUSPEND_REQUESTED; |
| + wake_lock(&main_wake_lock); |
| + queue_work(suspend_work_queue, &late_resume_work); |
| + } |
| + requested_suspend_state = new_state; |
| + spin_unlock_irqrestore(&state_lock, irqflags); |
| +} |
| + |
| +suspend_state_t get_suspend_state(void) |
| +{ |
| + return requested_suspend_state; |
| +} |
| diff --git a/stblinux-2.6.37/kernel/power/fbearlysuspend.c b/stblinux-2.6.37/kernel/power/fbearlysuspend.c |
| new file mode 100644 |
| index 0000000..1513765 |
| --- /dev/null |
| +++ b/stblinux-2.6.37/kernel/power/fbearlysuspend.c |
| @@ -0,0 +1,153 @@ |
| +/* kernel/power/fbearlysuspend.c |
| + * |
| + * Copyright (C) 2005-2008 Google, Inc. |
| + * |
| + * This software is licensed under the terms of the GNU General Public |
| + * License version 2, as published by the Free Software Foundation, and |
| + * may be copied, distributed, and modified under those terms. |
| + * |
| + * This program is distributed in the hope that it will be useful, |
| + * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| + * GNU General Public License for more details. |
| + * |
| + */ |
| + |
| +#include <linux/earlysuspend.h> |
| +#include <linux/module.h> |
| +#include <linux/wait.h> |
| + |
| +#include "power.h" |
| + |
| +static wait_queue_head_t fb_state_wq; |
| +static DEFINE_SPINLOCK(fb_state_lock); |
| +static enum { |
| + FB_STATE_STOPPED_DRAWING, |
| + FB_STATE_REQUEST_STOP_DRAWING, |
| + FB_STATE_DRAWING_OK, |
| +} fb_state; |
| + |
| +/* tell userspace to stop drawing, wait for it to stop */ |
| +static void stop_drawing_early_suspend(struct early_suspend *h) |
| +{ |
| + int ret; |
| + unsigned long irq_flags; |
| + |
| + spin_lock_irqsave(&fb_state_lock, irq_flags); |
| + fb_state = FB_STATE_REQUEST_STOP_DRAWING; |
| + spin_unlock_irqrestore(&fb_state_lock, irq_flags); |
| + |
| + wake_up_all(&fb_state_wq); |
| + ret = wait_event_timeout(fb_state_wq, |
| + fb_state == FB_STATE_STOPPED_DRAWING, |
| + HZ); |
| + if (unlikely(fb_state != FB_STATE_STOPPED_DRAWING)) |
| + pr_warning("stop_drawing_early_suspend: timeout waiting for " |
| + "userspace to stop drawing\n"); |
| +} |
| + |
| +/* tell userspace to start drawing */ |
| +static void start_drawing_late_resume(struct early_suspend *h) |
| +{ |
| + unsigned long irq_flags; |
| + |
| + spin_lock_irqsave(&fb_state_lock, irq_flags); |
| + fb_state = FB_STATE_DRAWING_OK; |
| + spin_unlock_irqrestore(&fb_state_lock, irq_flags); |
| + wake_up(&fb_state_wq); |
| +} |
| + |
| +static struct early_suspend stop_drawing_early_suspend_desc = { |
| + .level = EARLY_SUSPEND_LEVEL_STOP_DRAWING, |
| + .suspend = stop_drawing_early_suspend, |
| + .resume = start_drawing_late_resume, |
| +}; |
| + |
| +static ssize_t wait_for_fb_sleep_show(struct kobject *kobj, |
| + struct kobj_attribute *attr, char *buf) |
| +{ |
| + char *s = buf; |
| + int ret; |
| + |
| + ret = wait_event_interruptible(fb_state_wq, |
| + fb_state != FB_STATE_DRAWING_OK); |
| + if (ret && fb_state == FB_STATE_DRAWING_OK) |
| + return ret; |
| + else |
| + s += sprintf(buf, "sleeping"); |
| + return s - buf; |
| +} |
| + |
| +static ssize_t wait_for_fb_wake_show(struct kobject *kobj, |
| + struct kobj_attribute *attr, char *buf) |
| +{ |
| + char *s = buf; |
| + int ret; |
| + unsigned long irq_flags; |
| + |
| + spin_lock_irqsave(&fb_state_lock, irq_flags); |
| + if (fb_state == FB_STATE_REQUEST_STOP_DRAWING) { |
| + fb_state = FB_STATE_STOPPED_DRAWING; |
| + wake_up(&fb_state_wq); |
| + } |
| + spin_unlock_irqrestore(&fb_state_lock, irq_flags); |
| + |
| + ret = wait_event_interruptible(fb_state_wq, |
| + fb_state == FB_STATE_DRAWING_OK); |
| + if (ret && fb_state != FB_STATE_DRAWING_OK) |
| + return ret; |
| + else |
| + s += sprintf(buf, "awake"); |
| + |
| + return s - buf; |
| +} |
| + |
| +#define power_ro_attr(_name) \ |
| +static struct kobj_attribute _name##_attr = { \ |
| + .attr = { \ |
| + .name = __stringify(_name), \ |
| + .mode = 0444, \ |
| + }, \ |
| + .show = _name##_show, \ |
| + .store = NULL, \ |
| +} |
| + |
| +power_ro_attr(wait_for_fb_sleep); |
| +power_ro_attr(wait_for_fb_wake); |
| + |
| +static struct attribute *g[] = { |
| + &wait_for_fb_sleep_attr.attr, |
| + &wait_for_fb_wake_attr.attr, |
| + NULL, |
| +}; |
| + |
| +static struct attribute_group attr_group = { |
| + .attrs = g, |
| +}; |
| + |
| +static int __init android_power_init(void) |
| +{ |
| + int ret; |
| + |
| + init_waitqueue_head(&fb_state_wq); |
| + fb_state = FB_STATE_DRAWING_OK; |
| + |
| + ret = sysfs_create_group(power_kobj, &attr_group); |
| + if (ret) { |
| + pr_err("android_power_init: sysfs_create_group failed\n"); |
| + return ret; |
| + } |
| + |
| + register_early_suspend(&stop_drawing_early_suspend_desc); |
| + return 0; |
| +} |
| + |
| +static void __exit android_power_exit(void) |
| +{ |
| + unregister_early_suspend(&stop_drawing_early_suspend_desc); |
| + sysfs_remove_group(power_kobj, &attr_group); |
| +} |
| + |
| +module_init(android_power_init); |
| +module_exit(android_power_exit); |
| + |
| diff --git a/stblinux-2.6.37/kernel/power/userwakelock.c b/stblinux-2.6.37/kernel/power/userwakelock.c |
| new file mode 100644 |
| index 0000000..d7242d9 |
| --- /dev/null |
| +++ b/stblinux-2.6.37/kernel/power/userwakelock.c |
| @@ -0,0 +1,218 @@ |
| +/* kernel/power/userwakelock.c |
| + * |
| + * Copyright (C) 2005-2008 Google, Inc. |
| + * |
| + * This software is licensed under the terms of the GNU General Public |
| + * License version 2, as published by the Free Software Foundation, and |
| + * may be copied, distributed, and modified under those terms. |
| + * |
| + * This program is distributed in the hope that it will be useful, |
| + * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| + * GNU General Public License for more details. |
| + * |
| + */ |
| + |
| +#include <linux/ctype.h> |
| +#include <linux/module.h> |
| +#include <linux/wakelock.h> |
| + |
| +#include "power.h" |
| + |
| +enum { |
| + DEBUG_FAILURE = BIT(0), |
| + DEBUG_ERROR = BIT(1), |
| + DEBUG_NEW = BIT(2), |
| + DEBUG_ACCESS = BIT(3), |
| + DEBUG_LOOKUP = BIT(4), |
| +}; |
| +static int debug_mask = DEBUG_FAILURE; |
| +module_param_named(debug_mask, debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP); |
| + |
| +static DEFINE_MUTEX(tree_lock); |
| + |
| +struct user_wake_lock { |
| + struct rb_node node; |
| + struct wake_lock wake_lock; |
| + char name[0]; |
| +}; |
| +struct rb_root user_wake_locks; |
| + |
| +static struct user_wake_lock *lookup_wake_lock_name( |
| + const char *buf, int allocate, long *timeoutptr) |
| +{ |
| + struct rb_node **p = &user_wake_locks.rb_node; |
| + struct rb_node *parent = NULL; |
| + struct user_wake_lock *l; |
| + int diff; |
| + u64 timeout; |
| + int name_len; |
| + const char *arg; |
| + |
| + /* Find length of lock name and start of optional timeout string */ |
| + arg = buf; |
| + while (*arg && !isspace(*arg)) |
| + arg++; |
| + name_len = arg - buf; |
| + if (!name_len) |
| + goto bad_arg; |
| + while (isspace(*arg)) |
| + arg++; |
| + |
| + /* Process timeout string */ |
| + if (timeoutptr && *arg) { |
| + timeout = simple_strtoull(arg, (char **)&arg, 0); |
| + while (isspace(*arg)) |
| + arg++; |
| + if (*arg) |
| + goto bad_arg; |
| + /* convert timeout from nanoseconds to jiffies > 0 */ |
| + timeout += (NSEC_PER_SEC / HZ) - 1; |
| + do_div(timeout, (NSEC_PER_SEC / HZ)); |
| + if (timeout <= 0) |
| + timeout = 1; |
| + *timeoutptr = timeout; |
| + } else if (*arg) |
| + goto bad_arg; |
| + else if (timeoutptr) |
| + *timeoutptr = 0; |
| + |
| + /* Lookup wake lock in rbtree */ |
| + while (*p) { |
| + parent = *p; |
| + l = rb_entry(parent, struct user_wake_lock, node); |
| + diff = strncmp(buf, l->name, name_len); |
| + if (!diff && l->name[name_len]) |
| + diff = -1; |
| + if (debug_mask & DEBUG_ERROR) |
| + pr_info("lookup_wake_lock_name: compare %.*s %s %d\n", |
| + name_len, buf, l->name, diff); |
| + |
| + if (diff < 0) |
| + p = &(*p)->rb_left; |
| + else if (diff > 0) |
| + p = &(*p)->rb_right; |
| + else |
| + return l; |
| + } |
| + |
| + /* Allocate and add new wakelock to rbtree */ |
| + if (!allocate) { |
| + if (debug_mask & DEBUG_ERROR) |
| + pr_info("lookup_wake_lock_name: %.*s not found\n", |
| + name_len, buf); |
| + return ERR_PTR(-EINVAL); |
| + } |
| + l = kzalloc(sizeof(*l) + name_len + 1, GFP_KERNEL); |
| + if (l == NULL) { |
| + if (debug_mask & DEBUG_FAILURE) |
| + pr_err("lookup_wake_lock_name: failed to allocate " |
| + "memory for %.*s\n", name_len, buf); |
| + return ERR_PTR(-ENOMEM); |
| + } |
| + memcpy(l->name, buf, name_len); |
| + if (debug_mask & DEBUG_NEW) |
| + pr_info("lookup_wake_lock_name: new wake lock %s\n", l->name); |
| + wake_lock_init(&l->wake_lock, WAKE_LOCK_SUSPEND, l->name); |
| + rb_link_node(&l->node, parent, p); |
| + rb_insert_color(&l->node, &user_wake_locks); |
| + return l; |
| + |
| +bad_arg: |
| + if (debug_mask & DEBUG_ERROR) |
| + pr_info("lookup_wake_lock_name: wake lock, %.*s, bad arg, %s\n", |
| + name_len, buf, arg); |
| + return ERR_PTR(-EINVAL); |
| +} |
| + |
| +ssize_t wake_lock_show( |
| + struct kobject *kobj, struct kobj_attribute *attr, char *buf) |
| +{ |
| + char *s = buf; |
| + char *end = buf + PAGE_SIZE; |
| + struct rb_node *n; |
| + struct user_wake_lock *l; |
| + |
| + mutex_lock(&tree_lock); |
| + |
| + for (n = rb_first(&user_wake_locks); n != NULL; n = rb_next(n)) { |
| + l = rb_entry(n, struct user_wake_lock, node); |
| + if (wake_lock_active(&l->wake_lock)) |
| + s += scnprintf(s, end - s, "%s ", l->name); |
| + } |
| + s += scnprintf(s, end - s, "\n"); |
| + |
| + mutex_unlock(&tree_lock); |
| + return (s - buf); |
| +} |
| + |
| +ssize_t wake_lock_store( |
| + struct kobject *kobj, struct kobj_attribute *attr, |
| + const char *buf, size_t n) |
| +{ |
| + long timeout; |
| + struct user_wake_lock *l; |
| + |
| + mutex_lock(&tree_lock); |
| + l = lookup_wake_lock_name(buf, 1, &timeout); |
| + if (IS_ERR(l)) { |
| + n = PTR_ERR(l); |
| + goto bad_name; |
| + } |
| + |
| + if (debug_mask & DEBUG_ACCESS) |
| + pr_info("wake_lock_store: %s, timeout %ld\n", l->name, timeout); |
| + |
| + if (timeout) |
| + wake_lock_timeout(&l->wake_lock, timeout); |
| + else |
| + wake_lock(&l->wake_lock); |
| +bad_name: |
| + mutex_unlock(&tree_lock); |
| + return n; |
| +} |
| + |
| + |
| +ssize_t wake_unlock_show( |
| + struct kobject *kobj, struct kobj_attribute *attr, char *buf) |
| +{ |
| + char *s = buf; |
| + char *end = buf + PAGE_SIZE; |
| + struct rb_node *n; |
| + struct user_wake_lock *l; |
| + |
| + mutex_lock(&tree_lock); |
| + |
| + for (n = rb_first(&user_wake_locks); n != NULL; n = rb_next(n)) { |
| + l = rb_entry(n, struct user_wake_lock, node); |
| + if (!wake_lock_active(&l->wake_lock)) |
| + s += scnprintf(s, end - s, "%s ", l->name); |
| + } |
| + s += scnprintf(s, end - s, "\n"); |
| + |
| + mutex_unlock(&tree_lock); |
| + return (s - buf); |
| +} |
| + |
| +ssize_t wake_unlock_store( |
| + struct kobject *kobj, struct kobj_attribute *attr, |
| + const char *buf, size_t n) |
| +{ |
| + struct user_wake_lock *l; |
| + |
| + mutex_lock(&tree_lock); |
| + l = lookup_wake_lock_name(buf, 0, NULL); |
| + if (IS_ERR(l)) { |
| + n = PTR_ERR(l); |
| + goto not_found; |
| + } |
| + |
| + if (debug_mask & DEBUG_ACCESS) |
| + pr_info("wake_unlock_store: %s\n", l->name); |
| + |
| + wake_unlock(&l->wake_lock); |
| +not_found: |
| + mutex_unlock(&tree_lock); |
| + return n; |
| +} |
| + |
| diff --git a/stblinux-2.6.37/kernel/power/wakelock.c b/stblinux-2.6.37/kernel/power/wakelock.c |
| new file mode 100644 |
| index 0000000..7833d28 |
| --- /dev/null |
| +++ b/stblinux-2.6.37/kernel/power/wakelock.c |
| @@ -0,0 +1,598 @@ |
| +/* kernel/power/wakelock.c |
| + * |
| + * Copyright (C) 2005-2008 Google, Inc. |
| + * |
| + * This software is licensed under the terms of the GNU General Public |
| + * License version 2, as published by the Free Software Foundation, and |
| + * may be copied, distributed, and modified under those terms. |
| + * |
| + * This program is distributed in the hope that it will be useful, |
| + * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| + * GNU General Public License for more details. |
| + * |
| + */ |
| + |
| +#include <linux/module.h> |
| +#include <linux/platform_device.h> |
| +#include <linux/rtc.h> |
| +#include <linux/suspend.h> |
| +#include <linux/syscalls.h> /* sys_sync */ |
| +#include <linux/wakelock.h> |
| +#ifdef CONFIG_WAKELOCK_STAT |
| +#include <linux/proc_fs.h> |
| +#endif |
| +#include "power.h" |
| + |
| +enum { |
| + DEBUG_EXIT_SUSPEND = 1U << 0, |
| + DEBUG_WAKEUP = 1U << 1, |
| + DEBUG_SUSPEND = 1U << 2, |
| + DEBUG_EXPIRE = 1U << 3, |
| + DEBUG_WAKE_LOCK = 1U << 4, |
| +}; |
| +static int debug_mask = DEBUG_EXIT_SUSPEND | DEBUG_WAKEUP; |
| +module_param_named(debug_mask, debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP); |
| + |
| +#define WAKE_LOCK_TYPE_MASK (0x0f) |
| +#define WAKE_LOCK_INITIALIZED (1U << 8) |
| +#define WAKE_LOCK_ACTIVE (1U << 9) |
| +#define WAKE_LOCK_AUTO_EXPIRE (1U << 10) |
| +#define WAKE_LOCK_PREVENTING_SUSPEND (1U << 11) |
| + |
| +static DEFINE_SPINLOCK(list_lock); |
| +static LIST_HEAD(inactive_locks); |
| +static struct list_head active_wake_locks[WAKE_LOCK_TYPE_COUNT]; |
| +static int current_event_num; |
| +struct workqueue_struct *suspend_work_queue; |
| +struct wake_lock main_wake_lock; |
| +suspend_state_t requested_suspend_state = PM_SUSPEND_MEM; |
| +static struct wake_lock unknown_wakeup; |
| + |
| +#ifdef CONFIG_WAKELOCK_STAT |
| +static struct wake_lock deleted_wake_locks; |
| +static ktime_t last_sleep_time_update; |
| +static int wait_for_wakeup; |
| + |
| +int get_expired_time(struct wake_lock *lock, ktime_t *expire_time) |
| +{ |
| + struct timespec ts; |
| + struct timespec kt; |
| + struct timespec tomono; |
| + struct timespec delta; |
| + unsigned long seq; |
| + long timeout; |
| + |
| + if (!(lock->flags & WAKE_LOCK_AUTO_EXPIRE)) |
| + return 0; |
| + do { |
| + seq = read_seqbegin(&xtime_lock); |
| + timeout = lock->expires - jiffies; |
| + if (timeout > 0) |
| + return 0; |
| + kt = current_kernel_time(); |
| + tomono = wall_to_monotonic; |
| + } while (read_seqretry(&xtime_lock, seq)); |
| + jiffies_to_timespec(-timeout, &delta); |
| + set_normalized_timespec(&ts, kt.tv_sec + tomono.tv_sec - delta.tv_sec, |
| + kt.tv_nsec + tomono.tv_nsec - delta.tv_nsec); |
| + *expire_time = timespec_to_ktime(ts); |
| + return 1; |
| +} |
| + |
| + |
| +static int print_lock_stat(char *buf, struct wake_lock *lock) |
| +{ |
| + int lock_count = lock->stat.count; |
| + int expire_count = lock->stat.expire_count; |
| + ktime_t active_time = ktime_set(0, 0); |
| + ktime_t total_time = lock->stat.total_time; |
| + ktime_t max_time = lock->stat.max_time; |
| + ktime_t prevent_suspend_time = lock->stat.prevent_suspend_time; |
| + if (lock->flags & WAKE_LOCK_ACTIVE) { |
| + ktime_t now, add_time; |
| + int expired = get_expired_time(lock, &now); |
| + if (!expired) |
| + now = ktime_get(); |
| + add_time = ktime_sub(now, lock->stat.last_time); |
| + lock_count++; |
| + if (!expired) |
| + active_time = add_time; |
| + else |
| + expire_count++; |
| + total_time = ktime_add(total_time, add_time); |
| + if (lock->flags & WAKE_LOCK_PREVENTING_SUSPEND) |
| + prevent_suspend_time = ktime_add(prevent_suspend_time, |
| + ktime_sub(now, last_sleep_time_update)); |
| + if (add_time.tv64 > max_time.tv64) |
| + max_time = add_time; |
| + } |
| + |
| + return sprintf(buf, "\"%s\"\t%d\t%d\t%d\t%lld\t%lld\t%lld\t%lld\t" |
| + "%lld\n", lock->name, lock_count, expire_count, |
| + lock->stat.wakeup_count, ktime_to_ns(active_time), |
| + ktime_to_ns(total_time), |
| + ktime_to_ns(prevent_suspend_time), ktime_to_ns(max_time), |
| + ktime_to_ns(lock->stat.last_time)); |
| +} |
| + |
| + |
| +static int wakelocks_read_proc(char *page, char **start, off_t off, |
| + int count, int *eof, void *data) |
| +{ |
| + unsigned long irqflags; |
| + struct wake_lock *lock; |
| + int len = 0; |
| + char *p = page; |
| + int type; |
| + |
| + spin_lock_irqsave(&list_lock, irqflags); |
| + |
| + p += sprintf(p, "name\tcount\texpire_count\twake_count\tactive_since" |
| + "\ttotal_time\tsleep_time\tmax_time\tlast_change\n"); |
| + list_for_each_entry(lock, &inactive_locks, link) { |
| + p += print_lock_stat(p, lock); |
| + } |
| + for (type = 0; type < WAKE_LOCK_TYPE_COUNT; type++) { |
| + list_for_each_entry(lock, &active_wake_locks[type], link) |
| + p += print_lock_stat(p, lock); |
| + } |
| + spin_unlock_irqrestore(&list_lock, irqflags); |
| + |
| + *start = page + off; |
| + |
| + len = p - page; |
| + if (len > off) |
| + len -= off; |
| + else |
| + len = 0; |
| + |
| + return len < count ? len : count; |
| +} |
| + |
| +static void wake_unlock_stat_locked(struct wake_lock *lock, int expired) |
| +{ |
| + ktime_t duration; |
| + ktime_t now; |
| + if (!(lock->flags & WAKE_LOCK_ACTIVE)) |
| + return; |
| + if (get_expired_time(lock, &now)) |
| + expired = 1; |
| + else |
| + now = ktime_get(); |
| + lock->stat.count++; |
| + if (expired) |
| + lock->stat.expire_count++; |
| + duration = ktime_sub(now, lock->stat.last_time); |
| + lock->stat.total_time = ktime_add(lock->stat.total_time, duration); |
| + if (ktime_to_ns(duration) > ktime_to_ns(lock->stat.max_time)) |
| + lock->stat.max_time = duration; |
| + lock->stat.last_time = ktime_get(); |
| + if (lock->flags & WAKE_LOCK_PREVENTING_SUSPEND) { |
| + duration = ktime_sub(now, last_sleep_time_update); |
| + lock->stat.prevent_suspend_time = ktime_add( |
| + lock->stat.prevent_suspend_time, duration); |
| + lock->flags &= ~WAKE_LOCK_PREVENTING_SUSPEND; |
| + } |
| +} |
| + |
| +static void update_sleep_wait_stats_locked(int done) |
| +{ |
| + struct wake_lock *lock; |
| + ktime_t now, etime, elapsed, add; |
| + int expired; |
| + |
| + now = ktime_get(); |
| + elapsed = ktime_sub(now, last_sleep_time_update); |
| + list_for_each_entry(lock, &active_wake_locks[WAKE_LOCK_SUSPEND], link) { |
| + expired = get_expired_time(lock, &etime); |
| + if (lock->flags & WAKE_LOCK_PREVENTING_SUSPEND) { |
| + if (expired) |
| + add = ktime_sub(etime, last_sleep_time_update); |
| + else |
| + add = elapsed; |
| + lock->stat.prevent_suspend_time = ktime_add( |
| + lock->stat.prevent_suspend_time, add); |
| + } |
| + if (done || expired) |
| + lock->flags &= ~WAKE_LOCK_PREVENTING_SUSPEND; |
| + else |
| + lock->flags |= WAKE_LOCK_PREVENTING_SUSPEND; |
| + } |
| + last_sleep_time_update = now; |
| +} |
| +#endif |
| + |
| + |
| +static void expire_wake_lock(struct wake_lock *lock) |
| +{ |
| +#ifdef CONFIG_WAKELOCK_STAT |
| + wake_unlock_stat_locked(lock, 1); |
| +#endif |
| + lock->flags &= ~(WAKE_LOCK_ACTIVE | WAKE_LOCK_AUTO_EXPIRE); |
| + list_del(&lock->link); |
| + list_add(&lock->link, &inactive_locks); |
| + if (debug_mask & (DEBUG_WAKE_LOCK | DEBUG_EXPIRE)) |
| + pr_info("expired wake lock %s\n", lock->name); |
| +} |
| + |
| +static void print_active_locks(int type) |
| +{ |
| + unsigned long irqflags; |
| + struct wake_lock *lock; |
| + |
| + BUG_ON(type >= WAKE_LOCK_TYPE_COUNT); |
| + spin_lock_irqsave(&list_lock, irqflags); |
| + list_for_each_entry(lock, &active_wake_locks[type], link) { |
| + if (lock->flags & WAKE_LOCK_AUTO_EXPIRE) { |
| + long timeout = lock->expires - jiffies; |
| + if (timeout <= 0) |
| + pr_info("wake lock %s, expired\n", lock->name); |
| + else |
| + pr_info("active wake lock %s, time left %ld\n", |
| + lock->name, timeout); |
| + } else |
| + pr_info("active wake lock %s\n", lock->name); |
| + } |
| + spin_unlock_irqrestore(&list_lock, irqflags); |
| +} |
| + |
| +static long has_wake_lock_locked(int type) |
| +{ |
| + struct wake_lock *lock, *n; |
| + long max_timeout = 0; |
| + |
| + BUG_ON(type >= WAKE_LOCK_TYPE_COUNT); |
| + list_for_each_entry_safe(lock, n, &active_wake_locks[type], link) { |
| + if (lock->flags & WAKE_LOCK_AUTO_EXPIRE) { |
| + long timeout = lock->expires - jiffies; |
| + if (timeout <= 0) |
| + expire_wake_lock(lock); |
| + else if (timeout > max_timeout) |
| + max_timeout = timeout; |
| + } else |
| + return -1; |
| + } |
| + return max_timeout; |
| +} |
| + |
| +long has_wake_lock(int type) |
| +{ |
| + long ret; |
| + unsigned long irqflags; |
| + spin_lock_irqsave(&list_lock, irqflags); |
| + ret = has_wake_lock_locked(type); |
| + spin_unlock_irqrestore(&list_lock, irqflags); |
| + return ret; |
| +} |
| + |
| +static void suspend(struct work_struct *work) |
| +{ |
| + int ret; |
| + int entry_event_num; |
| + |
| + if (has_wake_lock(WAKE_LOCK_SUSPEND)) { |
| + if (debug_mask & DEBUG_SUSPEND) |
| + pr_info("suspend: abort suspend\n"); |
| + return; |
| + } |
| + |
| + entry_event_num = current_event_num; |
| + sys_sync(); |
| + if (debug_mask & DEBUG_SUSPEND) |
| + pr_info("suspend: enter suspend\n"); |
| + ret = pm_suspend(requested_suspend_state); |
| + if (debug_mask & DEBUG_EXIT_SUSPEND) { |
| + struct timespec ts; |
| + struct rtc_time tm; |
| + getnstimeofday(&ts); |
| + rtc_time_to_tm(ts.tv_sec, &tm); |
| + pr_info("suspend: exit suspend, ret = %d " |
| + "(%d-%02d-%02d %02d:%02d:%02d.%09lu UTC)\n", ret, |
| + tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, |
| + tm.tm_hour, tm.tm_min, tm.tm_sec, ts.tv_nsec); |
| + } |
| + if (current_event_num == entry_event_num) { |
| + if (debug_mask & DEBUG_SUSPEND) |
| + pr_info("suspend: pm_suspend returned with no event\n"); |
| + wake_lock_timeout(&unknown_wakeup, HZ / 2); |
| + } |
| +} |
| +static DECLARE_WORK(suspend_work, suspend); |
| + |
| +static void expire_wake_locks(unsigned long data) |
| +{ |
| + long has_lock; |
| + unsigned long irqflags; |
| + if (debug_mask & DEBUG_EXPIRE) |
| + pr_info("expire_wake_locks: start\n"); |
| + if (debug_mask & DEBUG_SUSPEND) |
| + print_active_locks(WAKE_LOCK_SUSPEND); |
| + spin_lock_irqsave(&list_lock, irqflags); |
| + has_lock = has_wake_lock_locked(WAKE_LOCK_SUSPEND); |
| + if (debug_mask & DEBUG_EXPIRE) |
| + pr_info("expire_wake_locks: done, has_lock %ld\n", has_lock); |
| + if (has_lock == 0) |
| + queue_work(suspend_work_queue, &suspend_work); |
| + spin_unlock_irqrestore(&list_lock, irqflags); |
| +} |
| +static DEFINE_TIMER(expire_timer, expire_wake_locks, 0, 0); |
| + |
| +static int power_suspend_late(struct platform_device *pdev, pm_message_t state) |
| +{ |
| + int ret = has_wake_lock(WAKE_LOCK_SUSPEND) ? -EAGAIN : 0; |
| +#ifdef CONFIG_WAKELOCK_STAT |
| + wait_for_wakeup = 1; |
| +#endif |
| + if (debug_mask & DEBUG_SUSPEND) |
| + pr_info("power_suspend_late return %d\n", ret); |
| + return ret; |
| +} |
| + |
| +static struct platform_driver power_driver = { |
| + .driver.name = "power", |
| + .suspend_late = power_suspend_late, |
| +}; |
| +static struct platform_device power_device = { |
| + .name = "power", |
| +}; |
| + |
| +void wake_lock_init(struct wake_lock *lock, int type, const char *name) |
| +{ |
| + unsigned long irqflags = 0; |
| + |
| + if (name) |
| + lock->name = name; |
| + BUG_ON(!lock->name); |
| + |
| + if (debug_mask & DEBUG_WAKE_LOCK) |
| + pr_info("wake_lock_init name=%s\n", lock->name); |
| +#ifdef CONFIG_WAKELOCK_STAT |
| + lock->stat.count = 0; |
| + lock->stat.expire_count = 0; |
| + lock->stat.wakeup_count = 0; |
| + lock->stat.total_time = ktime_set(0, 0); |
| + lock->stat.prevent_suspend_time = ktime_set(0, 0); |
| + lock->stat.max_time = ktime_set(0, 0); |
| + lock->stat.last_time = ktime_set(0, 0); |
| +#endif |
| + lock->flags = (type & WAKE_LOCK_TYPE_MASK) | WAKE_LOCK_INITIALIZED; |
| + |
| + INIT_LIST_HEAD(&lock->link); |
| + spin_lock_irqsave(&list_lock, irqflags); |
| + list_add(&lock->link, &inactive_locks); |
| + spin_unlock_irqrestore(&list_lock, irqflags); |
| +} |
| +EXPORT_SYMBOL(wake_lock_init); |
| + |
| +void wake_lock_destroy(struct wake_lock *lock) |
| +{ |
| + unsigned long irqflags; |
| + if (debug_mask & DEBUG_WAKE_LOCK) |
| + pr_info("wake_lock_destroy name=%s\n", lock->name); |
| + spin_lock_irqsave(&list_lock, irqflags); |
| + lock->flags &= ~WAKE_LOCK_INITIALIZED; |
| +#ifdef CONFIG_WAKELOCK_STAT |
| + if (lock->stat.count) { |
| + deleted_wake_locks.stat.count += lock->stat.count; |
| + deleted_wake_locks.stat.expire_count += lock->stat.expire_count; |
| + deleted_wake_locks.stat.total_time = |
| + ktime_add(deleted_wake_locks.stat.total_time, |
| + lock->stat.total_time); |
| + deleted_wake_locks.stat.prevent_suspend_time = |
| + ktime_add(deleted_wake_locks.stat.prevent_suspend_time, |
| + lock->stat.prevent_suspend_time); |
| + deleted_wake_locks.stat.max_time = |
| + ktime_add(deleted_wake_locks.stat.max_time, |
| + lock->stat.max_time); |
| + } |
| +#endif |
| + list_del(&lock->link); |
| + spin_unlock_irqrestore(&list_lock, irqflags); |
| +} |
| +EXPORT_SYMBOL(wake_lock_destroy); |
| + |
| +static void wake_lock_internal( |
| + struct wake_lock *lock, long timeout, int has_timeout) |
| +{ |
| + int type; |
| + unsigned long irqflags; |
| + long expire_in; |
| + |
| + spin_lock_irqsave(&list_lock, irqflags); |
| + type = lock->flags & WAKE_LOCK_TYPE_MASK; |
| + BUG_ON(type >= WAKE_LOCK_TYPE_COUNT); |
| + BUG_ON(!(lock->flags & WAKE_LOCK_INITIALIZED)); |
| +#ifdef CONFIG_WAKELOCK_STAT |
| + if (type == WAKE_LOCK_SUSPEND && wait_for_wakeup) { |
| + if (debug_mask & DEBUG_WAKEUP) |
| + pr_info("wakeup wake lock: %s\n", lock->name); |
| + wait_for_wakeup = 0; |
| + lock->stat.wakeup_count++; |
| + } |
| + if ((lock->flags & WAKE_LOCK_AUTO_EXPIRE) && |
| + (long)(lock->expires - jiffies) <= 0) { |
| + wake_unlock_stat_locked(lock, 0); |
| + lock->stat.last_time = ktime_get(); |
| + } |
| +#endif |
| + if (!(lock->flags & WAKE_LOCK_ACTIVE)) { |
| + lock->flags |= WAKE_LOCK_ACTIVE; |
| +#ifdef CONFIG_WAKELOCK_STAT |
| + lock->stat.last_time = ktime_get(); |
| +#endif |
| + } |
| + list_del(&lock->link); |
| + if (has_timeout) { |
| + if (debug_mask & DEBUG_WAKE_LOCK) |
| + pr_info("wake_lock: %s, type %d, timeout %ld.%03lu\n", |
| + lock->name, type, timeout / HZ, |
| + (timeout % HZ) * MSEC_PER_SEC / HZ); |
| + lock->expires = jiffies + timeout; |
| + lock->flags |= WAKE_LOCK_AUTO_EXPIRE; |
| + list_add_tail(&lock->link, &active_wake_locks[type]); |
| + } else { |
| + if (debug_mask & DEBUG_WAKE_LOCK) |
| + pr_info("wake_lock: %s, type %d\n", lock->name, type); |
| + lock->expires = LONG_MAX; |
| + lock->flags &= ~WAKE_LOCK_AUTO_EXPIRE; |
| + list_add(&lock->link, &active_wake_locks[type]); |
| + } |
| + if (type == WAKE_LOCK_SUSPEND) { |
| + current_event_num++; |
| +#ifdef CONFIG_WAKELOCK_STAT |
| + if (lock == &main_wake_lock) |
| + update_sleep_wait_stats_locked(1); |
| + else if (!wake_lock_active(&main_wake_lock)) |
| + update_sleep_wait_stats_locked(0); |
| +#endif |
| + if (has_timeout) |
| + expire_in = has_wake_lock_locked(type); |
| + else |
| + expire_in = -1; |
| + if (expire_in > 0) { |
| + if (debug_mask & DEBUG_EXPIRE) |
| + pr_info("wake_lock: %s, start expire timer, " |
| + "%ld\n", lock->name, expire_in); |
| + mod_timer(&expire_timer, jiffies + expire_in); |
| + } else { |
| + if (del_timer(&expire_timer)) |
| + if (debug_mask & DEBUG_EXPIRE) |
| + pr_info("wake_lock: %s, stop expire timer\n", |
| + lock->name); |
| + if (expire_in == 0) |
| + queue_work(suspend_work_queue, &suspend_work); |
| + } |
| + } |
| + spin_unlock_irqrestore(&list_lock, irqflags); |
| +} |
| + |
| +void wake_lock(struct wake_lock *lock) |
| +{ |
| + wake_lock_internal(lock, 0, 0); |
| +} |
| +EXPORT_SYMBOL(wake_lock); |
| + |
| +void wake_lock_timeout(struct wake_lock *lock, long timeout) |
| +{ |
| + wake_lock_internal(lock, timeout, 1); |
| +} |
| +EXPORT_SYMBOL(wake_lock_timeout); |
| + |
| +void wake_unlock(struct wake_lock *lock) |
| +{ |
| + int type; |
| + unsigned long irqflags; |
| + spin_lock_irqsave(&list_lock, irqflags); |
| + type = lock->flags & WAKE_LOCK_TYPE_MASK; |
| +#ifdef CONFIG_WAKELOCK_STAT |
| + wake_unlock_stat_locked(lock, 0); |
| +#endif |
| + if (debug_mask & DEBUG_WAKE_LOCK) |
| + pr_info("wake_unlock: %s\n", lock->name); |
| + lock->flags &= ~(WAKE_LOCK_ACTIVE | WAKE_LOCK_AUTO_EXPIRE); |
| + list_del(&lock->link); |
| + list_add(&lock->link, &inactive_locks); |
| + if (type == WAKE_LOCK_SUSPEND) { |
| + long has_lock = has_wake_lock_locked(type); |
| + if (has_lock > 0) { |
| + if (debug_mask & DEBUG_EXPIRE) |
| + pr_info("wake_unlock: %s, start expire timer, " |
| + "%ld\n", lock->name, has_lock); |
| + mod_timer(&expire_timer, jiffies + has_lock); |
| + } else { |
| + if (del_timer(&expire_timer)) |
| + if (debug_mask & DEBUG_EXPIRE) |
| + pr_info("wake_unlock: %s, stop expire " |
| + "timer\n", lock->name); |
| + if (has_lock == 0) |
| + queue_work(suspend_work_queue, &suspend_work); |
| + } |
| + if (lock == &main_wake_lock) { |
| + if (debug_mask & DEBUG_SUSPEND) |
| + print_active_locks(WAKE_LOCK_SUSPEND); |
| +#ifdef CONFIG_WAKELOCK_STAT |
| + update_sleep_wait_stats_locked(0); |
| +#endif |
| + } |
| + } |
| + spin_unlock_irqrestore(&list_lock, irqflags); |
| +} |
| +EXPORT_SYMBOL(wake_unlock); |
| + |
| +int wake_lock_active(struct wake_lock *lock) |
| +{ |
| + return !!(lock->flags & WAKE_LOCK_ACTIVE); |
| +} |
| +EXPORT_SYMBOL(wake_lock_active); |
| + |
| +static int __init wakelocks_init(void) |
| +{ |
| + int ret; |
| + int i; |
| + |
| + for (i = 0; i < ARRAY_SIZE(active_wake_locks); i++) |
| + INIT_LIST_HEAD(&active_wake_locks[i]); |
| + |
| +#ifdef CONFIG_WAKELOCK_STAT |
| + wake_lock_init(&deleted_wake_locks, WAKE_LOCK_SUSPEND, |
| + "deleted_wake_locks"); |
| +#endif |
| + wake_lock_init(&main_wake_lock, WAKE_LOCK_SUSPEND, "main"); |
| + wake_lock(&main_wake_lock); |
| + wake_lock_init(&unknown_wakeup, WAKE_LOCK_SUSPEND, "unknown_wakeups"); |
| + |
| + ret = platform_device_register(&power_device); |
| + if (ret) { |
| + pr_err("wakelocks_init: platform_device_register failed\n"); |
| + goto err_platform_device_register; |
| + } |
| + ret = platform_driver_register(&power_driver); |
| + if (ret) { |
| + pr_err("wakelocks_init: platform_driver_register failed\n"); |
| + goto err_platform_driver_register; |
| + } |
| + |
| + suspend_work_queue = create_singlethread_workqueue("suspend"); |
| + if (suspend_work_queue == NULL) { |
| + ret = -ENOMEM; |
| + goto err_suspend_work_queue; |
| + } |
| + |
| +#ifdef CONFIG_WAKELOCK_STAT |
| + create_proc_read_entry("wakelocks", S_IRUGO, NULL, |
| + wakelocks_read_proc, NULL); |
| +#endif |
| + |
| + return 0; |
| + |
| +err_suspend_work_queue: |
| + platform_driver_unregister(&power_driver); |
| +err_platform_driver_register: |
| + platform_device_unregister(&power_device); |
| +err_platform_device_register: |
| + wake_lock_destroy(&unknown_wakeup); |
| + wake_lock_destroy(&main_wake_lock); |
| +#ifdef CONFIG_WAKELOCK_STAT |
| + wake_lock_destroy(&deleted_wake_locks); |
| +#endif |
| + return ret; |
| +} |
| + |
| +static void __exit wakelocks_exit(void) |
| +{ |
| +#ifdef CONFIG_WAKELOCK_STAT |
| + remove_proc_entry("wakelocks", NULL); |
| +#endif |
| + destroy_workqueue(suspend_work_queue); |
| + platform_driver_unregister(&power_driver); |
| + platform_device_unregister(&power_device); |
| + wake_lock_destroy(&unknown_wakeup); |
| + wake_lock_destroy(&main_wake_lock); |
| +#ifdef CONFIG_WAKELOCK_STAT |
| + wake_lock_destroy(&deleted_wake_locks); |
| +#endif |
| +} |
| + |
| +core_initcall(wakelocks_init); |
| +module_exit(wakelocks_exit); |
| diff --git a/stblinux-2.6.37/mm/Makefile b/stblinux-2.6.37/mm/Makefile |
| index f73f75a..cfa5e9a 100644 |
| --- a/stblinux-2.6.37/mm/Makefile |
| +++ b/stblinux-2.6.37/mm/Makefile |
| @@ -24,6 +24,7 @@ obj-$(CONFIG_HUGETLBFS) += hugetlb.o |
| obj-$(CONFIG_NUMA) += mempolicy.o |
| obj-$(CONFIG_SPARSEMEM) += sparse.o |
| obj-$(CONFIG_SPARSEMEM_VMEMMAP) += sparse-vmemmap.o |
| +obj-$(CONFIG_ASHMEM) += ashmem.o |
| obj-$(CONFIG_SLOB) += slob.o |
| obj-$(CONFIG_COMPACTION) += compaction.o |
| obj-$(CONFIG_MMU_NOTIFIER) += mmu_notifier.o |
| diff --git a/stblinux-2.6.37/mm/ashmem.c b/stblinux-2.6.37/mm/ashmem.c |
| new file mode 100644 |
| index 0000000..493c7ca |
| --- /dev/null |
| +++ b/stblinux-2.6.37/mm/ashmem.c |
| @@ -0,0 +1,669 @@ |
| +/* mm/ashmem.c |
| +** |
| +** Anonymous Shared Memory Subsystem, ashmem |
| +** |
| +** Copyright (C) 2008 Google, Inc. |
| +** |
| +** Robert Love <rlove@google.com> |
| +** |
| +** This software is licensed under the terms of the GNU General Public |
| +** License version 2, as published by the Free Software Foundation, and |
| +** may be copied, distributed, and modified under those terms. |
| +** |
| +** This program is distributed in the hope that it will be useful, |
| +** but WITHOUT ANY WARRANTY; without even the implied warranty of |
| +** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| +** GNU General Public License for more details. |
| +*/ |
| + |
| +#include <linux/module.h> |
| +#include <linux/file.h> |
| +#include <linux/fs.h> |
| +#include <linux/miscdevice.h> |
| +#include <linux/security.h> |
| +#include <linux/mm.h> |
| +#include <linux/mman.h> |
| +#include <linux/uaccess.h> |
| +#include <linux/personality.h> |
| +#include <linux/bitops.h> |
| +#include <linux/mutex.h> |
| +#include <linux/shmem_fs.h> |
| +#include <linux/ashmem.h> |
| + |
| +#define ASHMEM_NAME_PREFIX "dev/ashmem/" |
| +#define ASHMEM_NAME_PREFIX_LEN (sizeof(ASHMEM_NAME_PREFIX) - 1) |
| +#define ASHMEM_FULL_NAME_LEN (ASHMEM_NAME_LEN + ASHMEM_NAME_PREFIX_LEN) |
| + |
| +/* |
| + * ashmem_area - anonymous shared memory area |
| + * Lifecycle: From our parent file's open() until its release() |
| + * Locking: Protected by `ashmem_mutex' |
| + * Big Note: Mappings do NOT pin this structure; it dies on close() |
| + */ |
| +struct ashmem_area { |
| + char name[ASHMEM_FULL_NAME_LEN];/* optional name for /proc/pid/maps */ |
| + struct list_head unpinned_list; /* list of all ashmem areas */ |
| + struct file *file; /* the shmem-based backing file */ |
| + size_t size; /* size of the mapping, in bytes */ |
| + unsigned long prot_mask; /* allowed prot bits, as vm_flags */ |
| +}; |
| + |
| +/* |
| + * ashmem_range - represents an interval of unpinned (evictable) pages |
| + * Lifecycle: From unpin to pin |
| + * Locking: Protected by `ashmem_mutex' |
| + */ |
| +struct ashmem_range { |
| + struct list_head lru; /* entry in LRU list */ |
| + struct list_head unpinned; /* entry in its area's unpinned list */ |
| + struct ashmem_area *asma; /* associated area */ |
| + size_t pgstart; /* starting page, inclusive */ |
| + size_t pgend; /* ending page, inclusive */ |
| + unsigned int purged; /* ASHMEM_NOT or ASHMEM_WAS_PURGED */ |
| +}; |
| + |
| +/* LRU list of unpinned pages, protected by ashmem_mutex */ |
| +static LIST_HEAD(ashmem_lru_list); |
| + |
| +/* Count of pages on our LRU list, protected by ashmem_mutex */ |
| +static unsigned long lru_count; |
| + |
| +/* |
| + * ashmem_mutex - protects the list of and each individual ashmem_area |
| + * |
| + * Lock Ordering: ashmex_mutex -> i_mutex -> i_alloc_sem |
| + */ |
| +static DEFINE_MUTEX(ashmem_mutex); |
| + |
| +static struct kmem_cache *ashmem_area_cachep __read_mostly; |
| +static struct kmem_cache *ashmem_range_cachep __read_mostly; |
| + |
| +#define range_size(range) \ |
| + ((range)->pgend - (range)->pgstart + 1) |
| + |
| +#define range_on_lru(range) \ |
| + ((range)->purged == ASHMEM_NOT_PURGED) |
| + |
| +#define page_range_subsumes_range(range, start, end) \ |
| + (((range)->pgstart >= (start)) && ((range)->pgend <= (end))) |
| + |
| +#define page_range_subsumed_by_range(range, start, end) \ |
| + (((range)->pgstart <= (start)) && ((range)->pgend >= (end))) |
| + |
| +#define page_in_range(range, page) \ |
| + (((range)->pgstart <= (page)) && ((range)->pgend >= (page))) |
| + |
| +#define page_range_in_range(range, start, end) \ |
| + (page_in_range(range, start) || page_in_range(range, end) || \ |
| + page_range_subsumes_range(range, start, end)) |
| + |
| +#define range_before_page(range, page) \ |
| + ((range)->pgend < (page)) |
| + |
| +#define PROT_MASK (PROT_EXEC | PROT_READ | PROT_WRITE) |
| + |
| +static inline void lru_add(struct ashmem_range *range) |
| +{ |
| + list_add_tail(&range->lru, &ashmem_lru_list); |
| + lru_count += range_size(range); |
| +} |
| + |
| +static inline void lru_del(struct ashmem_range *range) |
| +{ |
| + list_del(&range->lru); |
| + lru_count -= range_size(range); |
| +} |
| + |
| +/* |
| + * range_alloc - allocate and initialize a new ashmem_range structure |
| + * |
| + * 'asma' - associated ashmem_area |
| + * 'prev_range' - the previous ashmem_range in the sorted asma->unpinned list |
| + * 'purged' - initial purge value (ASMEM_NOT_PURGED or ASHMEM_WAS_PURGED) |
| + * 'start' - starting page, inclusive |
| + * 'end' - ending page, inclusive |
| + * |
| + * Caller must hold ashmem_mutex. |
| + */ |
| +static int range_alloc(struct ashmem_area *asma, |
| + struct ashmem_range *prev_range, unsigned int purged, |
| + size_t start, size_t end) |
| +{ |
| + struct ashmem_range *range; |
| + range = kmem_cache_zalloc(ashmem_range_cachep, GFP_KERNEL); |
| + if (unlikely(!range)) |
| + return -ENOMEM; |
| + |
| + range->asma = asma; |
| + range->pgstart = start; |
| + range->pgend = end; |
| + range->purged = purged; |
| + |
| + list_add_tail(&range->unpinned, &prev_range->unpinned); |
| + |
| + if (range_on_lru(range)) |
| + lru_add(range); |
| + |
| + return 0; |
| +} |
| + |
| +static void range_del(struct ashmem_range *range) |
| +{ |
| + list_del(&range->unpinned); |
| + if (range_on_lru(range)) |
| + lru_del(range); |
| + kmem_cache_free(ashmem_range_cachep, range); |
| +} |
| + |
| +/* |
| + * range_shrink - shrinks a range |
| + * |
| + * Caller must hold ashmem_mutex. |
| + */ |
| +static inline void range_shrink(struct ashmem_range *range, |
| + size_t start, size_t end) |
| +{ |
| + size_t pre = range_size(range); |
| + |
| + range->pgstart = start; |
| + range->pgend = end; |
| + |
| + if (range_on_lru(range)) |
| + lru_count -= pre - range_size(range); |
| +} |
| + |
| +static int ashmem_open(struct inode *inode, struct file *file) |
| +{ |
| + struct ashmem_area *asma; |
| + int ret; |
| + |
| + ret = nonseekable_open(inode, file); |
| + if (unlikely(ret)) |
| + return ret; |
| + |
| + asma = kmem_cache_zalloc(ashmem_area_cachep, GFP_KERNEL); |
| + if (unlikely(!asma)) |
| + return -ENOMEM; |
| + |
| + INIT_LIST_HEAD(&asma->unpinned_list); |
| + memcpy(asma->name, ASHMEM_NAME_PREFIX, ASHMEM_NAME_PREFIX_LEN); |
| + asma->prot_mask = PROT_MASK; |
| + file->private_data = asma; |
| + |
| + return 0; |
| +} |
| + |
| +static int ashmem_release(struct inode *ignored, struct file *file) |
| +{ |
| + struct ashmem_area *asma = file->private_data; |
| + struct ashmem_range *range, *next; |
| + |
| + mutex_lock(&ashmem_mutex); |
| + list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned) |
| + range_del(range); |
| + mutex_unlock(&ashmem_mutex); |
| + |
| + if (asma->file) |
| + fput(asma->file); |
| + kmem_cache_free(ashmem_area_cachep, asma); |
| + |
| + return 0; |
| +} |
| + |
| +static int ashmem_mmap(struct file *file, struct vm_area_struct *vma) |
| +{ |
| + struct ashmem_area *asma = file->private_data; |
| + int ret = 0; |
| + |
| + mutex_lock(&ashmem_mutex); |
| + |
| + /* user needs to SET_SIZE before mapping */ |
| + if (unlikely(!asma->size)) { |
| + ret = -EINVAL; |
| + goto out; |
| + } |
| + |
| + /* requested protection bits must match our allowed protection mask */ |
| + if (unlikely((vma->vm_flags & ~asma->prot_mask) & PROT_MASK)) { |
| + ret = -EPERM; |
| + goto out; |
| + } |
| + |
| + if (!asma->file) { |
| + char *name = ASHMEM_NAME_DEF; |
| + struct file *vmfile; |
| + |
| + if (asma->name[ASHMEM_NAME_PREFIX_LEN] != '\0') |
| + name = asma->name; |
| + |
| + /* ... and allocate the backing shmem file */ |
| + vmfile = shmem_file_setup(name, asma->size, vma->vm_flags); |
| + if (unlikely(IS_ERR(vmfile))) { |
| + ret = PTR_ERR(vmfile); |
| + goto out; |
| + } |
| + asma->file = vmfile; |
| + } |
| + get_file(asma->file); |
| + |
| + if (vma->vm_flags & VM_SHARED) |
| + shmem_set_file(vma, asma->file); |
| + else { |
| + if (vma->vm_file) |
| + fput(vma->vm_file); |
| + vma->vm_file = asma->file; |
| + } |
| + vma->vm_flags |= VM_CAN_NONLINEAR; |
| + |
| +out: |
| + mutex_unlock(&ashmem_mutex); |
| + return ret; |
| +} |
| + |
| +/* |
| + * ashmem_shrink - our cache shrinker, called from mm/vmscan.c :: shrink_slab |
| + * |
| + * 'nr_to_scan' is the number of objects (pages) to prune, or 0 to query how |
| + * many objects (pages) we have in total. |
| + * |
| + * 'gfp_mask' is the mask of the allocation that got us into this mess. |
| + * |
| + * Return value is the number of objects (pages) remaining, or -1 if we cannot |
| + * proceed without risk of deadlock (due to gfp_mask). |
| + * |
| + * We approximate LRU via least-recently-unpinned, jettisoning unpinned partial |
| + * chunks of ashmem regions LRU-wise one-at-a-time until we hit 'nr_to_scan' |
| + * pages freed. |
| + */ |
| +static int ashmem_shrink(struct shrinker *pshrinker, int nr_to_scan, gfp_t gfp_mask) |
| +{ |
| + struct ashmem_range *range, *next; |
| + |
| + /* We might recurse into filesystem code, so bail out if necessary */ |
| + if (nr_to_scan && !(gfp_mask & __GFP_FS)) |
| + return -1; |
| + if (!nr_to_scan) |
| + return lru_count; |
| + |
| + mutex_lock(&ashmem_mutex); |
| + list_for_each_entry_safe(range, next, &ashmem_lru_list, lru) { |
| + struct inode *inode = range->asma->file->f_dentry->d_inode; |
| + loff_t start = range->pgstart * PAGE_SIZE; |
| + loff_t end = (range->pgend + 1) * PAGE_SIZE - 1; |
| + |
| + vmtruncate_range(inode, start, end); |
| + range->purged = ASHMEM_WAS_PURGED; |
| + lru_del(range); |
| + |
| + nr_to_scan -= range_size(range); |
| + if (nr_to_scan <= 0) |
| + break; |
| + } |
| + mutex_unlock(&ashmem_mutex); |
| + |
| + return lru_count; |
| +} |
| + |
| +static struct shrinker ashmem_shrinker = { |
| + .shrink = ashmem_shrink, |
| + .seeks = DEFAULT_SEEKS * 4, |
| +}; |
| + |
| +static int set_prot_mask(struct ashmem_area *asma, unsigned long prot) |
| +{ |
| + int ret = 0; |
| + |
| + mutex_lock(&ashmem_mutex); |
| + |
| + /* the user can only remove, not add, protection bits */ |
| + if (unlikely((asma->prot_mask & prot) != prot)) { |
| + ret = -EINVAL; |
| + goto out; |
| + } |
| + |
| + /* does the application expect PROT_READ to imply PROT_EXEC? */ |
| + if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC)) |
| + prot |= PROT_EXEC; |
| + |
| + asma->prot_mask = prot; |
| + |
| +out: |
| + mutex_unlock(&ashmem_mutex); |
| + return ret; |
| +} |
| + |
| +static int set_name(struct ashmem_area *asma, void __user *name) |
| +{ |
| + int ret = 0; |
| + |
| + mutex_lock(&ashmem_mutex); |
| + |
| + /* cannot change an existing mapping's name */ |
| + if (unlikely(asma->file)) { |
| + ret = -EINVAL; |
| + goto out; |
| + } |
| + |
| + if (unlikely(copy_from_user(asma->name + ASHMEM_NAME_PREFIX_LEN, |
| + name, ASHMEM_NAME_LEN))) |
| + ret = -EFAULT; |
| + asma->name[ASHMEM_FULL_NAME_LEN-1] = '\0'; |
| + |
| +out: |
| + mutex_unlock(&ashmem_mutex); |
| + |
| + return ret; |
| +} |
| + |
| +static int get_name(struct ashmem_area *asma, void __user *name) |
| +{ |
| + int ret = 0; |
| + |
| + mutex_lock(&ashmem_mutex); |
| + if (asma->name[ASHMEM_NAME_PREFIX_LEN] != '\0') { |
| + size_t len; |
| + |
| + /* |
| + * Copying only `len', instead of ASHMEM_NAME_LEN, bytes |
| + * prevents us from revealing one user's stack to another. |
| + */ |
| + len = strlen(asma->name + ASHMEM_NAME_PREFIX_LEN) + 1; |
| + if (unlikely(copy_to_user(name, |
| + asma->name + ASHMEM_NAME_PREFIX_LEN, len))) |
| + ret = -EFAULT; |
| + } else { |
| + if (unlikely(copy_to_user(name, ASHMEM_NAME_DEF, |
| + sizeof(ASHMEM_NAME_DEF)))) |
| + ret = -EFAULT; |
| + } |
| + mutex_unlock(&ashmem_mutex); |
| + |
| + return ret; |
| +} |
| + |
| +/* |
| + * ashmem_pin - pin the given ashmem region, returning whether it was |
| + * previously purged (ASHMEM_WAS_PURGED) or not (ASHMEM_NOT_PURGED). |
| + * |
| + * Caller must hold ashmem_mutex. |
| + */ |
| +static int ashmem_pin(struct ashmem_area *asma, size_t pgstart, size_t pgend) |
| +{ |
| + struct ashmem_range *range, *next; |
| + int ret = ASHMEM_NOT_PURGED; |
| + |
| + list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned) { |
| + /* moved past last applicable page; we can short circuit */ |
| + if (range_before_page(range, pgstart)) |
| + break; |
| + |
| + /* |
| + * The user can ask us to pin pages that span multiple ranges, |
| + * or to pin pages that aren't even unpinned, so this is messy. |
| + * |
| + * Four cases: |
| + * 1. The requested range subsumes an existing range, so we |
| + * just remove the entire matching range. |
| + * 2. The requested range overlaps the start of an existing |
| + * range, so we just update that range. |
| + * 3. The requested range overlaps the end of an existing |
| + * range, so we just update that range. |
| + * 4. The requested range punches a hole in an existing range, |
| + * so we have to update one side of the range and then |
| + * create a new range for the other side. |
| + */ |
| + if (page_range_in_range(range, pgstart, pgend)) { |
| + ret |= range->purged; |
| + |
| + /* Case #1: Easy. Just nuke the whole thing. */ |
| + if (page_range_subsumes_range(range, pgstart, pgend)) { |
| + range_del(range); |
| + continue; |
| + } |
| + |
| + /* Case #2: We overlap from the start, so adjust it */ |
| + if (range->pgstart >= pgstart) { |
| + range_shrink(range, pgend + 1, range->pgend); |
| + continue; |
| + } |
| + |
| + /* Case #3: We overlap from the rear, so adjust it */ |
| + if (range->pgend <= pgend) { |
| + range_shrink(range, range->pgstart, pgstart-1); |
| + continue; |
| + } |
| + |
| + /* |
| + * Case #4: We eat a chunk out of the middle. A bit |
| + * more complicated, we allocate a new range for the |
| + * second half and adjust the first chunk's endpoint. |
| + */ |
| + range_alloc(asma, range, range->purged, |
| + pgend + 1, range->pgend); |
| + range_shrink(range, range->pgstart, pgstart - 1); |
| + break; |
| + } |
| + } |
| + |
| + return ret; |
| +} |
| + |
| +/* |
| + * ashmem_unpin - unpin the given range of pages. Returns zero on success. |
| + * |
| + * Caller must hold ashmem_mutex. |
| + */ |
| +static int ashmem_unpin(struct ashmem_area *asma, size_t pgstart, size_t pgend) |
| +{ |
| + struct ashmem_range *range, *next; |
| + unsigned int purged = ASHMEM_NOT_PURGED; |
| + |
| +restart: |
| + list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned) { |
| + /* short circuit: this is our insertion point */ |
| + if (range_before_page(range, pgstart)) |
| + break; |
| + |
| + /* |
| + * The user can ask us to unpin pages that are already entirely |
| + * or partially pinned. We handle those two cases here. |
| + */ |
| + if (page_range_subsumed_by_range(range, pgstart, pgend)) |
| + return 0; |
| + if (page_range_in_range(range, pgstart, pgend)) { |
| + pgstart = min_t(size_t, range->pgstart, pgstart), |
| + pgend = max_t(size_t, range->pgend, pgend); |
| + purged |= range->purged; |
| + range_del(range); |
| + goto restart; |
| + } |
| + } |
| + |
| + return range_alloc(asma, range, purged, pgstart, pgend); |
| +} |
| + |
| +/* |
| + * ashmem_get_pin_status - Returns ASHMEM_IS_UNPINNED if _any_ pages in the |
| + * given interval are unpinned and ASHMEM_IS_PINNED otherwise. |
| + * |
| + * Caller must hold ashmem_mutex. |
| + */ |
| +static int ashmem_get_pin_status(struct ashmem_area *asma, size_t pgstart, |
| + size_t pgend) |
| +{ |
| + struct ashmem_range *range; |
| + int ret = ASHMEM_IS_PINNED; |
| + |
| + list_for_each_entry(range, &asma->unpinned_list, unpinned) { |
| + if (range_before_page(range, pgstart)) |
| + break; |
| + if (page_range_in_range(range, pgstart, pgend)) { |
| + ret = ASHMEM_IS_UNPINNED; |
| + break; |
| + } |
| + } |
| + |
| + return ret; |
| +} |
| + |
| +static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd, |
| + void __user *p) |
| +{ |
| + struct ashmem_pin pin; |
| + size_t pgstart, pgend; |
| + int ret = -EINVAL; |
| + |
| + if (unlikely(!asma->file)) |
| + return -EINVAL; |
| + |
| + if (unlikely(copy_from_user(&pin, p, sizeof(pin)))) |
| + return -EFAULT; |
| + |
| + /* per custom, you can pass zero for len to mean "everything onward" */ |
| + if (!pin.len) |
| + pin.len = PAGE_ALIGN(asma->size) - pin.offset; |
| + |
| + if (unlikely((pin.offset | pin.len) & ~PAGE_MASK)) |
| + return -EINVAL; |
| + |
| + if (unlikely(((__u32) -1) - pin.offset < pin.len)) |
| + return -EINVAL; |
| + |
| + if (unlikely(PAGE_ALIGN(asma->size) < pin.offset + pin.len)) |
| + return -EINVAL; |
| + |
| + pgstart = pin.offset / PAGE_SIZE; |
| + pgend = pgstart + (pin.len / PAGE_SIZE) - 1; |
| + |
| + mutex_lock(&ashmem_mutex); |
| + |
| + switch (cmd) { |
| + case ASHMEM_PIN: |
| + ret = ashmem_pin(asma, pgstart, pgend); |
| + break; |
| + case ASHMEM_UNPIN: |
| + ret = ashmem_unpin(asma, pgstart, pgend); |
| + break; |
| + case ASHMEM_GET_PIN_STATUS: |
| + ret = ashmem_get_pin_status(asma, pgstart, pgend); |
| + break; |
| + } |
| + |
| + mutex_unlock(&ashmem_mutex); |
| + |
| + return ret; |
| +} |
| + |
| +static long ashmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg) |
| +{ |
| + struct ashmem_area *asma = file->private_data; |
| + long ret = -ENOTTY; |
| + |
| + switch (cmd) { |
| + case ASHMEM_SET_NAME: |
| + ret = set_name(asma, (void __user *) arg); |
| + break; |
| + case ASHMEM_GET_NAME: |
| + ret = get_name(asma, (void __user *) arg); |
| + break; |
| + case ASHMEM_SET_SIZE: |
| + ret = -EINVAL; |
| + if (!asma->file) { |
| + ret = 0; |
| + asma->size = (size_t) arg; |
| + } |
| + break; |
| + case ASHMEM_GET_SIZE: |
| + ret = asma->size; |
| + break; |
| + case ASHMEM_SET_PROT_MASK: |
| + ret = set_prot_mask(asma, arg); |
| + break; |
| + case ASHMEM_GET_PROT_MASK: |
| + ret = asma->prot_mask; |
| + break; |
| + case ASHMEM_PIN: |
| + case ASHMEM_UNPIN: |
| + case ASHMEM_GET_PIN_STATUS: |
| + ret = ashmem_pin_unpin(asma, cmd, (void __user *) arg); |
| + break; |
| + case ASHMEM_PURGE_ALL_CACHES: |
| + ret = -EPERM; |
| + if (capable(CAP_SYS_ADMIN)) { |
| + ret = ashmem_shrink(&ashmem_shrinker, 0, GFP_KERNEL); |
| + ashmem_shrink(&ashmem_shrinker, ret, GFP_KERNEL); |
| + } |
| + break; |
| + } |
| + |
| + return ret; |
| +} |
| + |
| +static struct file_operations ashmem_fops = { |
| + .owner = THIS_MODULE, |
| + .open = ashmem_open, |
| + .release = ashmem_release, |
| + .mmap = ashmem_mmap, |
| + .unlocked_ioctl = ashmem_ioctl, |
| + .compat_ioctl = ashmem_ioctl, |
| +}; |
| + |
| +static struct miscdevice ashmem_misc = { |
| + .minor = MISC_DYNAMIC_MINOR, |
| + .name = "ashmem", |
| + .fops = &ashmem_fops, |
| +}; |
| + |
| +static int __init ashmem_init(void) |
| +{ |
| + int ret; |
| + |
| + ashmem_area_cachep = kmem_cache_create("ashmem_area_cache", |
| + sizeof(struct ashmem_area), |
| + 0, 0, NULL); |
| + if (unlikely(!ashmem_area_cachep)) { |
| + printk(KERN_ERR "ashmem: failed to create slab cache\n"); |
| + return -ENOMEM; |
| + } |
| + |
| + ashmem_range_cachep = kmem_cache_create("ashmem_range_cache", |
| + sizeof(struct ashmem_range), |
| + 0, 0, NULL); |
| + if (unlikely(!ashmem_range_cachep)) { |
| + printk(KERN_ERR "ashmem: failed to create slab cache\n"); |
| + return -ENOMEM; |
| + } |
| + |
| + ret = misc_register(&ashmem_misc); |
| + if (unlikely(ret)) { |
| + printk(KERN_ERR "ashmem: failed to register misc device!\n"); |
| + return ret; |
| + } |
| + |
| + register_shrinker(&ashmem_shrinker); |
| + |
| + printk(KERN_INFO "ashmem: initialized\n"); |
| + |
| + return 0; |
| +} |
| + |
| +static void __exit ashmem_exit(void) |
| +{ |
| + int ret; |
| + |
| + unregister_shrinker(&ashmem_shrinker); |
| + |
| + ret = misc_deregister(&ashmem_misc); |
| + if (unlikely(ret)) |
| + printk(KERN_ERR "ashmem: failed to unregister misc device!\n"); |
| + |
| + kmem_cache_destroy(ashmem_range_cachep); |
| + kmem_cache_destroy(ashmem_area_cachep); |
| + |
| + printk(KERN_INFO "ashmem: unloaded\n"); |
| +} |
| + |
| +module_init(ashmem_init); |
| +module_exit(ashmem_exit); |
| + |
| +MODULE_LICENSE("GPL"); |
| diff --git a/stblinux-2.6.37/mm/shmem.c b/stblinux-2.6.37/mm/shmem.c |
| index 47fdeeb..76ebb45 100644 |
| --- a/stblinux-2.6.37/mm/shmem.c |
| +++ b/stblinux-2.6.37/mm/shmem.c |
| @@ -2767,6 +2767,14 @@ put_memory: |
| } |
| EXPORT_SYMBOL_GPL(shmem_file_setup); |
| |
| +void shmem_set_file(struct vm_area_struct *vma, struct file *file) |
| +{ |
| + if (vma->vm_file) |
| + fput(vma->vm_file); |
| + vma->vm_file = file; |
| + vma->vm_ops = &shmem_vm_ops; |
| +} |
| + |
| /** |
| * shmem_zero_setup - setup a shared anonymous mapping |
| * @vma: the vma to be mmapped is prepared by do_mmap_pgoff |
| diff --git a/stblinux-2.6.37/net/ipv4/sysfs_net_ipv4.c b/stblinux-2.6.37/net/ipv4/sysfs_net_ipv4.c |
| new file mode 100644 |
| index 0000000..0cbbf10 |
| --- /dev/null |
| +++ b/stblinux-2.6.37/net/ipv4/sysfs_net_ipv4.c |
| @@ -0,0 +1,88 @@ |
| +/* |
| + * net/ipv4/sysfs_net_ipv4.c |
| + * |
| + * sysfs-based networking knobs (so we can, unlike with sysctl, control perms) |
| + * |
| + * Copyright (C) 2008 Google, Inc. |
| + * |
| + * Robert Love <rlove@google.com> |
| + * |
| + * This software is licensed under the terms of the GNU General Public |
| + * License version 2, as published by the Free Software Foundation, and |
| + * may be copied, distributed, and modified under those terms. |
| + * |
| + * This program is distributed in the hope that it will be useful, |
| + * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| + * GNU General Public License for more details. |
| + */ |
| + |
| +#include <linux/kobject.h> |
| +#include <linux/string.h> |
| +#include <linux/sysfs.h> |
| +#include <linux/init.h> |
| +#include <net/tcp.h> |
| + |
| +#define CREATE_IPV4_FILE(_name, _var) \ |
| +static ssize_t _name##_show(struct kobject *kobj, \ |
| + struct kobj_attribute *attr, char *buf) \ |
| +{ \ |
| + return sprintf(buf, "%d\n", _var); \ |
| +} \ |
| +static ssize_t _name##_store(struct kobject *kobj, \ |
| + struct kobj_attribute *attr, \ |
| + const char *buf, size_t count) \ |
| +{ \ |
| + int val, ret; \ |
| + ret = sscanf(buf, "%d", &val); \ |
| + if (ret != 1) \ |
| + return -EINVAL; \ |
| + if (val < 0) \ |
| + return -EINVAL; \ |
| + _var = val; \ |
| + return count; \ |
| +} \ |
| +static struct kobj_attribute _name##_attr = \ |
| + __ATTR(_name, 0644, _name##_show, _name##_store) |
| + |
| +CREATE_IPV4_FILE(tcp_wmem_min, sysctl_tcp_wmem[0]); |
| +CREATE_IPV4_FILE(tcp_wmem_def, sysctl_tcp_wmem[1]); |
| +CREATE_IPV4_FILE(tcp_wmem_max, sysctl_tcp_wmem[2]); |
| + |
| +CREATE_IPV4_FILE(tcp_rmem_min, sysctl_tcp_rmem[0]); |
| +CREATE_IPV4_FILE(tcp_rmem_def, sysctl_tcp_rmem[1]); |
| +CREATE_IPV4_FILE(tcp_rmem_max, sysctl_tcp_rmem[2]); |
| + |
| +static struct attribute *ipv4_attrs[] = { |
| + &tcp_wmem_min_attr.attr, |
| + &tcp_wmem_def_attr.attr, |
| + &tcp_wmem_max_attr.attr, |
| + &tcp_rmem_min_attr.attr, |
| + &tcp_rmem_def_attr.attr, |
| + &tcp_rmem_max_attr.attr, |
| + NULL |
| +}; |
| + |
| +static struct attribute_group ipv4_attr_group = { |
| + .attrs = ipv4_attrs, |
| +}; |
| + |
| +static __init int sysfs_ipv4_init(void) |
| +{ |
| + struct kobject *ipv4_kobject; |
| + int ret; |
| + |
| + ipv4_kobject = kobject_create_and_add("ipv4", kernel_kobj); |
| + if (!ipv4_kobject) |
| + return -ENOMEM; |
| + |
| + ret = sysfs_create_group(ipv4_kobject, &ipv4_attr_group); |
| + if (ret) { |
| + kobject_put(ipv4_kobject); |
| + return ret; |
| + } |
| + |
| + return 0; |
| +} |
| + |
| +subsys_initcall(sysfs_ipv4_init); |