typedefstructpglist_data { structzonenode_zones[MAX_NR_ZONES]; structzonelistnode_zonelists[MAX_ZONELISTS]; int nr_zones; #ifdef CONFIG_FLAT_NODE_MEM_MAP /* means !SPARSEMEM */ structpage *node_mem_map; #ifdef CONFIG_PAGE_EXTENSION structpage_ext *node_page_ext; #endif #endif #ifndef CONFIG_NO_BOOTMEM structbootmem_data *bdata; #endif #ifdef CONFIG_MEMORY_HOTPLUG spinlock_t node_size_lock; #endif unsignedlong node_start_pfn; unsignedlong node_present_pages; /* total number of physical pages */ unsignedlong node_spanned_pages; /* total size of physical page range, including holes */ int node_id; wait_queue_head_t kswapd_wait; wait_queue_head_t pfmemalloc_wait; structtask_struct *kswapd;/* Protected by mem_hotplug_begin/end() */ int kswapd_order; enum zone_type kswapd_classzone_idx; int kswapd_failures; /* Number of 'reclaimed == 0' runs */ #ifdef CONFIG_COMPACTION int kcompactd_max_order; enum zone_type kcompactd_classzone_idx; wait_queue_head_t kcompactd_wait; structtask_struct *kcompactd; #endif #ifdef CONFIG_NUMA_BALANCING /* Lock serializing the migrate rate limiting window */ spinlock_t numabalancing_migrate_lock; /* Rate limiting time interval */ unsignedlong numabalancing_migrate_next_window; /* Number of pages migrated during the rate limiting time interval */ unsignedlong numabalancing_migrate_nr_pages; #endif unsignedlong totalreserve_pages; #ifdef CONFIG_NUMA /* * zone reclaim becomes active if more unmapped pages exist. */ unsignedlong min_unmapped_pages; unsignedlong min_slab_pages; #endif/* CONFIG_NUMA */
/* Write-intensive fields used by page reclaim */ ZONE_PADDING(_pad1_) spinlock_t lru_lock;
#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT /* * If memory initialisation on large machines is deferred then this * is the first PFN that needs to be initialised. */ unsignedlong first_deferred_pfn; /* Number of non-deferred pages */ unsignedlong static_init_pgcnt; #endif/* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
structpage { /* First double word block */ unsignedlong flags; union { structaddress_space *mapping; void *s_mem; /* slab first object */ atomic_t compound_mapcount; /* first tail page */ };
/* Second double word */ union { pgoff_t index; /* Our offset within mapping. */ void *freelist; /* slub first free object */ };
union { #if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \ defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE) /* Used for cmpxchg_double in slub */ unsignedlong counters; #else unsigned counters; #endif struct { union { atomic_t _mapcount; unsignedint active; /* SLAB */ struct {/* SLUB */ unsigned inuse:16; unsigned objects:15; unsigned frozen:1; }; int units; /* SLOB */ }; atomic_t _refcount; }; };
/* Third double word block */ union { structlist_headlru; structdev_pagemap *pgmap; struct {/* slub per cpu partial pages */ structpage *next;/* Next partial slab */ #ifdef CONFIG_64BIT int pages; /* Nr of partial slabs left */ int pobjects; /* Approximate # of objects */ #else short int pages; short int pobjects; #endif };
structrcu_headrcu_head; struct { unsignedlong compound_head; #ifdef CONFIG_64BIT unsignedint compound_dtor; unsignedint compound_order; #else unsigned short int compound_dtor; unsigned short int compound_order; #endif };