| #ifndef _LINUX_VMSTAT_H |
| #define _LINUX_VMSTAT_H |
| |
| #include <linux/types.h> |
| #include <linux/percpu.h> |
| #include <linux/mm.h> |
| #include <linux/mmzone.h> |
| #include <asm/atomic.h> |
| |
| #ifdef CONFIG_ZONE_DMA |
| #define DMA_ZONE(xx) xx##_DMA, |
| #else |
| #define DMA_ZONE(xx) |
| #endif |
| |
| #ifdef CONFIG_ZONE_DMA32 |
| #define DMA32_ZONE(xx) xx##_DMA32, |
| #else |
| #define DMA32_ZONE(xx) |
| #endif |
| |
| #ifdef CONFIG_HIGHMEM |
| #define HIGHMEM_ZONE(xx) , xx##_HIGH |
| #else |
| #define HIGHMEM_ZONE(xx) |
| #endif |
| |
| |
| #define FOR_ALL_ZONES(xx) DMA_ZONE(xx) DMA32_ZONE(xx) xx##_NORMAL HIGHMEM_ZONE(xx) , xx##_MOVABLE |
| |
| enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, |
| FOR_ALL_ZONES(PGALLOC), |
| PGFREE, PGACTIVATE, PGDEACTIVATE, |
| PGFAULT, PGMAJFAULT, |
| FOR_ALL_ZONES(PGREFILL), |
| FOR_ALL_ZONES(PGSTEAL), |
| FOR_ALL_ZONES(PGSCAN_KSWAPD), |
| FOR_ALL_ZONES(PGSCAN_DIRECT), |
| #ifdef CONFIG_NUMA |
| PGSCAN_ZONE_RECLAIM_FAILED, |
| #endif |
| PGINODESTEAL, SLABS_SCANNED, KSWAPD_STEAL, KSWAPD_INODESTEAL, |
| PAGEOUTRUN, ALLOCSTALL, PGROTATED, |
| #ifdef CONFIG_HUGETLB_PAGE |
| HTLB_BUDDY_PGALLOC, HTLB_BUDDY_PGALLOC_FAIL, |
| #endif |
| UNEVICTABLE_PGCULLED, /* culled to noreclaim list */ |
| UNEVICTABLE_PGSCANNED, /* scanned for reclaimability */ |
| UNEVICTABLE_PGRESCUED, /* rescued from noreclaim list */ |
| UNEVICTABLE_PGMLOCKED, |
| UNEVICTABLE_PGMUNLOCKED, |
| UNEVICTABLE_PGCLEARED, /* on COW, page truncate */ |
| UNEVICTABLE_PGSTRANDED, /* unable to isolate on unlock */ |
| UNEVICTABLE_MLOCKFREED, |
| NR_VM_EVENT_ITEMS |
| }; |
| |
| extern int sysctl_stat_interval; |
| |
| #ifdef CONFIG_VM_EVENT_COUNTERS |
| /* |
| * Light weight per cpu counter implementation. |
| * |
| * Counters should only be incremented and no critical kernel component |
| * should rely on the counter values. |
| * |
| * Counters are handled completely inline. On many platforms the code |
| * generated will simply be the increment of a global address. |
| */ |
| |
| struct vm_event_state { |
| unsigned long event[NR_VM_EVENT_ITEMS]; |
| }; |
| |
| DECLARE_PER_CPU(struct vm_event_state, vm_event_states); |
| |
| static inline void __count_vm_event(enum vm_event_item item) |
| { |
| __get_cpu_var(vm_event_states).event[item]++; |
| } |
| |
| static inline void count_vm_event(enum vm_event_item item) |
| { |
| get_cpu_var(vm_event_states).event[item]++; |
| put_cpu(); |
| } |
| |
| static inline void __count_vm_events(enum vm_event_item item, long delta) |
| { |
| __get_cpu_var(vm_event_states).event[item] += delta; |
| } |
| |
| static inline void count_vm_events(enum vm_event_item item, long delta) |
| { |
| get_cpu_var(vm_event_states).event[item] += delta; |
| put_cpu(); |
| } |
| |
| extern void all_vm_events(unsigned long *); |
| #ifdef CONFIG_HOTPLUG |
| extern void vm_events_fold_cpu(int cpu); |
| #else |
| static inline void vm_events_fold_cpu(int cpu) |
| { |
| } |
| #endif |
| |
| #else |
| |
| /* Disable counters */ |
| static inline void count_vm_event(enum vm_event_item item) |
| { |
| } |
| static inline void count_vm_events(enum vm_event_item item, long delta) |
| { |
| } |
| static inline void __count_vm_event(enum vm_event_item item) |
| { |
| } |
| static inline void __count_vm_events(enum vm_event_item item, long delta) |
| { |
| } |
| static inline void all_vm_events(unsigned long *ret) |
| { |
| } |
| static inline void vm_events_fold_cpu(int cpu) |
| { |
| } |
| |
| #endif /* CONFIG_VM_EVENT_COUNTERS */ |
| |
| #define __count_zone_vm_events(item, zone, delta) \ |
| __count_vm_events(item##_NORMAL - ZONE_NORMAL + \ |
| zone_idx(zone), delta) |
| |
| /* |
| * Zone based page accounting with per cpu differentials. |
| */ |
| extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; |
| |
| static inline void zone_page_state_add(long x, struct zone *zone, |
| enum zone_stat_item item) |
| { |
| atomic_long_add(x, &zone->vm_stat[item]); |
| atomic_long_add(x, &vm_stat[item]); |
| } |
| |
| static inline unsigned long global_page_state(enum zone_stat_item item) |
| { |
| long x = atomic_long_read(&vm_stat[item]); |
| #ifdef CONFIG_SMP |
| if (x < 0) |
| x = 0; |
| #endif |
| return x; |
| } |
| |
| static inline unsigned long zone_page_state(struct zone *zone, |
| enum zone_stat_item item) |
| { |
| long x = atomic_long_read(&zone->vm_stat[item]); |
| #ifdef CONFIG_SMP |
| if (x < 0) |
| x = 0; |
| #endif |
| return x; |
| } |
| |
| extern unsigned long global_reclaimable_pages(void); |
| extern unsigned long zone_reclaimable_pages(struct zone *zone); |
| |
| #ifdef CONFIG_NUMA |
| /* |
| * Determine the per node value of a stat item. This function |
| * is called frequently in a NUMA machine, so try to be as |
| * frugal as possible. |
| */ |
| static inline unsigned long node_page_state(int node, |
| enum zone_stat_item item) |
| { |
| struct zone *zones = NODE_DATA(node)->node_zones; |
| |
| return |
| #ifdef CONFIG_ZONE_DMA |
| zone_page_state(&zones[ZONE_DMA], item) + |
| #endif |
| #ifdef CONFIG_ZONE_DMA32 |
| zone_page_state(&zones[ZONE_DMA32], item) + |
| #endif |
| #ifdef CONFIG_HIGHMEM |
| zone_page_state(&zones[ZONE_HIGHMEM], item) + |
| #endif |
| zone_page_state(&zones[ZONE_NORMAL], item) + |
| zone_page_state(&zones[ZONE_MOVABLE], item); |
| } |
| |
| extern void zone_statistics(struct zone *, struct zone *); |
| |
| #else |
| |
| #define node_page_state(node, item) global_page_state(item) |
| #define zone_statistics(_zl,_z) do { } while (0) |
| |
| #endif /* CONFIG_NUMA */ |
| |
| #define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d) |
| #define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d)) |
| |
| static inline void zap_zone_vm_stats(struct zone *zone) |
| { |
| memset(zone->vm_stat, 0, sizeof(zone->vm_stat)); |
| } |
| |
| extern void inc_zone_state(struct zone *, enum zone_stat_item); |
| |
| #ifdef CONFIG_SMP |
| void __mod_zone_page_state(struct zone *, enum zone_stat_item item, int); |
| void __inc_zone_page_state(struct page *, enum zone_stat_item); |
| void __dec_zone_page_state(struct page *, enum zone_stat_item); |
| |
| void mod_zone_page_state(struct zone *, enum zone_stat_item, int); |
| void inc_zone_page_state(struct page *, enum zone_stat_item); |
| void dec_zone_page_state(struct page *, enum zone_stat_item); |
| |
| extern void inc_zone_state(struct zone *, enum zone_stat_item); |
| extern void __inc_zone_state(struct zone *, enum zone_stat_item); |
| extern void dec_zone_state(struct zone *, enum zone_stat_item); |
| extern void __dec_zone_state(struct zone *, enum zone_stat_item); |
| |
| void refresh_cpu_vm_stats(int); |
| #else /* CONFIG_SMP */ |
| |
| /* |
| * We do not maintain differentials in a single processor configuration. |
| * The functions directly modify the zone and global counters. |
| */ |
| static inline void __mod_zone_page_state(struct zone *zone, |
| enum zone_stat_item item, int delta) |
| { |
| zone_page_state_add(delta, zone, item); |
| } |
| |
| static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item) |
| { |
| atomic_long_inc(&zone->vm_stat[item]); |
| atomic_long_inc(&vm_stat[item]); |
| } |
| |
| static inline void __inc_zone_page_state(struct page *page, |
| enum zone_stat_item item) |
| { |
| __inc_zone_state(page_zone(page), item); |
| } |
| |
| static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item) |
| { |
| atomic_long_dec(&zone->vm_stat[item]); |
| atomic_long_dec(&vm_stat[item]); |
| } |
| |
| static inline void __dec_zone_page_state(struct page *page, |
| enum zone_stat_item item) |
| { |
| __dec_zone_state(page_zone(page), item); |
| } |
| |
| /* |
| * We only use atomic operations to update counters. So there is no need to |
| * disable interrupts. |
| */ |
| #define inc_zone_page_state __inc_zone_page_state |
| #define dec_zone_page_state __dec_zone_page_state |
| #define mod_zone_page_state __mod_zone_page_state |
| |
| static inline void refresh_cpu_vm_stats(int cpu) { } |
| #endif |
| |
| #endif /* _LINUX_VMSTAT_H */ |