From 2a47bd2e62bde35ea007cea45433e8c3e8170147 Mon Sep 17 00:00:00 2001 From: Fabian Keil Date: Sun, 20 Sep 2015 19:53:09 +0200 Subject: [PATCH 1/5] top: Show ZFS ARC target size --- usr.bin/top/machine.c | 6 ++++-- usr.bin/top/top.local.1 | 12 +++++++++--- 2 files changed, 13 insertions(+), 5 deletions(-) diff --git a/usr.bin/top/machine.c b/usr.bin/top/machine.c index 6c4cee7..0f5367f 100644 --- a/usr.bin/top/machine.c +++ b/usr.bin/top/machine.c @@ -180,9 +180,9 @@ char *memorynames[] = { "K Free", NULL }; -int arc_stats[7]; +int arc_stats[8]; char *arcnames[] = { - "K Total, ", "K MFU, ", "K MRU, ", "K Anon, ", "K Header, ", "K Other", + "K Total, ", "K MFU, ", "K MRU, ", "K Anon, ", "K Header, ", "K Other, ", "K Target", NULL }; @@ -544,6 +544,8 @@ get_system_info(struct system_info *si) arc_stats[4] = arc_stat + arc_stat2 >> 10; GETSYSCTL("kstat.zfs.misc.arcstats.other_size", arc_stat); arc_stats[5] = arc_stat >> 10; + GETSYSCTL("kstat.zfs.misc.arcstats.c", arc_stat); + arc_stats[6] = arc_stat >> 10; si->arc = arc_stats; } diff --git a/usr.bin/top/top.local.1 b/usr.bin/top/top.local.1 index 864ab8d..951754c 100644 --- a/usr.bin/top/top.local.1 +++ b/usr.bin/top/top.local.1 @@ -2,9 +2,9 @@ .SH "FreeBSD NOTES" .SH DESCRIPTION OF MEMORY -Mem: 9220K Active, 1M Inact, 3284K Wired, 1M Cache, 2M Buf, 1320K Free -ARC: 2048K Total, 342K MRU, 760K MFU, 272K Anon, 232K Header, 442K Other -Swap: 91M Total, 79M Free, 13% Inuse, 80K In, 104K Out +Mem: 387M Active, 499M Inact, 945M Wired, 8K Cache, 5528K Buf, 83M Free +ARC: 349M Total, 174M MFU, 94M MRU, 2192K Anon, 6488K Header, 73M Other, 350M Target +Swap: 2048M Total, 124M Used, 1924M Free, 6% Inuse .TP .B K: Kilobyte @@ -57,6 +57,12 @@ number of ARC bytes holding headers .TP .B Other miscellaneous ARC bytes +.TP +.B Target +ARC target size, that is the total amount of memory +the ARC considers usable for itself. If it's not equal +to the total size, the ARC will shrink or grow to reach +the target. .SS Swap Stats .TP .B Total: -- 2.7.0 From 2e58932de3cb62f082c2236a4c637b4f206c4625 Mon Sep 17 00:00:00 2001 From: Fabian Keil Date: Tue, 22 Sep 2015 12:34:53 +0200 Subject: [PATCH 2/5] sys/vm: Stop increasing domain->vmd_pass after reaching the "maximum" value ... explicitly understood by vm_pageout_scan(). Should prevent (purely cosmetic) issues like: fk@r500 ~ $sudo /usr/src/share/dtrace/monitor-page-scanner [...] 2015 Sep 22 12:15:54: Scan goal 59: Invalid 2015 Sep 22 12:15:54: Scan goal 60: Invalid 2015 Sep 22 12:15:55: Scan goal 61: Invalid 2015 Sep 22 12:15:55: Scan goal 62: Invalid 2015 Sep 22 12:15:55: Scan goals in the previous minute: Launder dirty pages 1 Pageout dirty pages 1 Move inactive to cache or free 2 Invalid 5 Update active LRU/deactivate pages 28 2015 Sep 22 12:15:55: Seconds since last 'Move inactive to cache or free' pass: 30 2015 Sep 22 12:15:55: Seconds since last 'Launder dirty pages' pass: 30 2015 Sep 22 12:15:55: Seconds since last 'Pageout dirty pages' pass: 30 --- sys/vm/vm_pageout.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/sys/vm/vm_pageout.c b/sys/vm/vm_pageout.c index d38e985..5d6d512 100644 --- a/sys/vm/vm_pageout.c +++ b/sys/vm/vm_pageout.c @@ -867,6 +867,7 @@ unlock_mp: return (error); } +#define VMD_PASS_MAX 3 /* * vm_pageout_scan does the dirty work for the pageout daemon. * @@ -886,6 +887,9 @@ vm_pageout_scan(struct vm_domain *vmd, int pass) int vnodes_skipped; boolean_t pageout_ok, queues_locked; + KASSERT(pass <= VMD_PASS_MAX, + ("vm_pageout_scan: Invalid pass code %d", pass)); + /* * If we need to reclaim memory ask kernel caches to return * some. We rate limit to avoid thrashing. @@ -1588,7 +1592,8 @@ vm_pageout_worker(void *arg) } if (vm_pages_needed) { vm_cnt.v_pdwakeups++; - domain->vmd_pass++; + if (domain->vmd_pass < VMD_PASS_MAX) + domain->vmd_pass++; } else domain->vmd_pass = 0; mtx_unlock(&vm_page_queue_free_mtx); -- 2.7.0 From 8df63dfa23443104e8607e6ab553f7b0a40be076 Mon Sep 17 00:00:00 2001 From: Fabian Keil Date: Sun, 20 Sep 2015 16:36:46 +0200 Subject: [PATCH 3/5] vm_pageout_scan(): Add SDT probes to make a couple of internal variables visible --- sys/vm/vm_pageout.c | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/sys/vm/vm_pageout.c b/sys/vm/vm_pageout.c index 5d6d512..afb5557 100644 --- a/sys/vm/vm_pageout.c +++ b/sys/vm/vm_pageout.c @@ -141,6 +141,15 @@ SYSINIT(pagedaemon, SI_SUB_KTHREAD_PAGE, SI_ORDER_SECOND, kproc_start, SDT_PROVIDER_DEFINE(vm); SDT_PROBE_DEFINE(vm, , , vm__lowmem_cache); SDT_PROBE_DEFINE(vm, , , vm__lowmem_scan); +SDT_PROBE_DEFINE4(vm, , , before__inactive__scan, "struct vm_domain *vmd", + "int pass", "int page_shortage", "int deficit"); +SDT_PROBE_DEFINE5(vm, , , after__inactive__scan, "struct vm_domain *vmd", + "int pass", "int page_shortage", "int addl_page_shortage", + "int vnodes_skipped"); +SDT_PROBE_DEFINE3(vm, , , before__active__scan, "struct vm_domain *vmd", + "int pass", "int page_shortage"); +SDT_PROBE_DEFINE3(vm, , , after__active__scan, "struct vm_domain *vmd", + "int pass", "int page_shortage"); #if !defined(NO_SWAPPING) /* the kernel process "vm_daemon"*/ @@ -945,6 +954,9 @@ vm_pageout_scan(struct vm_domain *vmd, int pass) vnodes_skipped = 0; + SDT_PROBE4(vm, , , before__inactive__scan, vmd, pass, page_shortage, + deficit); + /* * Start scanning the inactive queue for pages we can move to the * cache or free. The scan will stop when the target is reached or @@ -1172,6 +1184,9 @@ relock_queues: } vm_pagequeue_unlock(pq); + SDT_PROBE5(vm, , , after__inactive__scan, vmd, pass, page_shortage, + addl_page_shortage, vnodes_skipped); + #if !defined(NO_SWAPPING) /* * Wakeup the swapout daemon if we didn't cache or free the targeted @@ -1220,6 +1235,8 @@ relock_queues: if (min_scan > 0 || (page_shortage > 0 && maxscan > 0)) vmd->vmd_last_active_scan = scan_tick; + SDT_PROBE3(vm, , , before__active__scan, vmd, pass, page_shortage); + /* * Scan the active queue for pages that can be deactivated. Update * the per-page activity counter and use it to identify deactivation @@ -1294,6 +1311,9 @@ relock_queues: vm_page_unlock(m); } vm_pagequeue_unlock(pq); + + SDT_PROBE3(vm, , , after__active__scan, vmd, pass, page_shortage); + #if !defined(NO_SWAPPING) /* * Idle process swapout -- run once per second. -- 2.7.0 From 824ffd3bfe301873883df60afa9804404522a184 Mon Sep 17 00:00:00 2001 From: Fabian Keil Date: Tue, 22 Sep 2015 16:05:49 +0200 Subject: [PATCH 4/5] sys/vm: Limit the inactive memory more aggressively Currently the ZFS ARC does not take the inactive memory into account when calculating its target size. If there's no limit for the inactive pages, the ARC may shrink to its own limit while the number of inactive pages continues to grow: last pid: 28429; load averages: 0.48, 0.46, 0.41 up 0+03:39:07 17:24:59 91 processes: 2 running, 88 sleeping, 1 waiting CPU: 1.4% user, 0.0% nice, 12.7% system, 0.2% interrupt, 85.7% idle Mem: 396M Active, 489M Inact, 986M Wired, 292K Cache, 5202K Buf, 43M Free ARC: 351M Total, 90M MFU, 44M MRU, 6839K Anon, 7810K Header, 203M Other, 350M Target Swap: 2048M Total, 99M Used, 1949M Free, 4% Inuse PID USERNAME THR PRI NICE SIZE RES STATE C TIME WCPU COMMAND 11 root 2 155 ki31 0K 32K RUN 0 377:37 170.34% idle 26625 fk 17 36 0 175M 24504K uwait 1 0:09 8.40% git 0 root 468 -16 0 0K 7488K swapin 1 3:29 6.26% kernel 22 root 1 20 - 0K 16K geli:w 1 4:16 5.06% g_eli[1] ada0s1d [...] 2015 Sep 21 17:24:58: Scan goals in the previous minute: Update active LRU/deactivate pages 60 2015 Sep 21 17:24:58: Seconds since last 'Move inactive to cache or free' pass: 1477 2015 Sep 21 17:24:58: Seconds since last 'Launder dirty pages' pass: 9273 With this commit, the system can be configured to let the ARC indirectly put pressure on the inactive memory until a given target is reached. A couple of sysctls can and should be used to tune the limits, as the defaults currently aren't auto-tuned. Note that suboptimal tuning can result in excessive paging. Example /etc/sysctl.conf excerpt that appears to work reasonably well for an ElectroBSD development system with 2 GB of RAM: # Set the free page target for the ZFS ARC slightly below # the autotuned vm.v_free_target (10479) so the ARC shrinks # before the system starts paging. vfs.zfs.arc_free_target=10000 # If we come too close to the vm.v_free_target, start freeing # inactive pages above the ceiling of the inactive page # target plus the offset_f. vm.inactive_page_limit_offset_i=40000 vm.inactive_page_limit_offset_f=10000 # Free up to 10000 inactive pages in a row, before checking # if we should continue to do so. vm.inactive_pages_to_free_max=10000 Screenshot: https://www.fabiankeil.de/bilder/electrobsd/kernel-compilation-with-inactive-page-limit-enabled.png --- sys/vm/vm_pageout.c | 125 +++++++++++++++++++++++++++++++++++++++++++++------- 1 file changed, 109 insertions(+), 16 deletions(-) diff --git a/sys/vm/vm_pageout.c b/sys/vm/vm_pageout.c index afb5557..019e1f0 100644 --- a/sys/vm/vm_pageout.c +++ b/sys/vm/vm_pageout.c @@ -141,15 +141,17 @@ SYSINIT(pagedaemon, SI_SUB_KTHREAD_PAGE, SI_ORDER_SECOND, kproc_start, SDT_PROVIDER_DEFINE(vm); SDT_PROBE_DEFINE(vm, , , vm__lowmem_cache); SDT_PROBE_DEFINE(vm, , , vm__lowmem_scan); -SDT_PROBE_DEFINE4(vm, , , before__inactive__scan, "struct vm_domain *vmd", - "int pass", "int page_shortage", "int deficit"); -SDT_PROBE_DEFINE5(vm, , , after__inactive__scan, "struct vm_domain *vmd", +SDT_PROBE_DEFINE5(vm, , , before__inactive__scan, "struct vm_domain *vmd", + "int pass", "int page_shortage", "int deficit", "int inactive_page_surplus"); +SDT_PROBE_DEFINE6(vm, , , after__inactive__scan, "struct vm_domain *vmd", "int pass", "int page_shortage", "int addl_page_shortage", - "int vnodes_skipped"); + "int vnodes_skipped", "int inactive_page_surplus"); SDT_PROBE_DEFINE3(vm, , , before__active__scan, "struct vm_domain *vmd", "int pass", "int page_shortage"); SDT_PROBE_DEFINE3(vm, , , after__active__scan, "struct vm_domain *vmd", "int pass", "int page_shortage"); +SDT_PROBE_DEFINE3(vm, , , checked__inactive__pages, "int pages_to_free", + "int pages_above_limit", "int enforced_limit"); #if !defined(NO_SWAPPING) /* the kernel process "vm_daemon"*/ @@ -230,6 +232,30 @@ SYSCTL_INT(_vm, OID_AUTO, defer_swapspace_pageouts, SYSCTL_INT(_vm, OID_AUTO, disable_swapspace_pageouts, CTLFLAG_RW, &disable_swap_pageouts, 0, "Disallow swapout of dirty pages"); +static int inactive_page_limit_enabled = 1; +SYSCTL_INT(_vm, OID_AUTO, inactive_page_limit_enabled, CTLFLAG_RW, + &inactive_page_limit_enabled, 0, + "Free inactive pages above the target more aggressively. " + "Values: 0 (disabled), 1 (soft mode, only apply limit if free " + "page count is low), 2 (ignore free count)"); + +static int inactive_page_limit_offset_i = 0; +SYSCTL_INT(_vm, OID_AUTO, inactive_page_limit_offset_i, CTLFLAG_RW, + &inactive_page_limit_offset_i, 0, + "Number of inactive pages relative to the inactive target " + "required for inactive pages to be freed."); + +static int inactive_page_limit_offset_f = 0; +SYSCTL_INT(_vm, OID_AUTO, inactive_page_limit_offset_f, CTLFLAG_RW, + &inactive_page_limit_offset_f, 0, + "Number of free pages relative to the free target required for " + "the inactive memory limit to be applied."); + +static int inactive_pages_to_free_max = 1000; +SYSCTL_INT(_vm, OID_AUTO, inactive_pages_to_free_max, CTLFLAG_RW, + &inactive_pages_to_free_max, 0, + "Maximum number of inactive pages above the target to free at once."); + static int pageout_lock_miss; SYSCTL_INT(_vm, OID_AUTO, pageout_lock_miss, CTLFLAG_RD, &pageout_lock_miss, 0, "vget() lock misses during pageout"); @@ -876,6 +902,41 @@ unlock_mp: return (error); } +static int +vm_pageout_get_inactive_page_surplus(void) +{ + int pages_to_free; + int pages_above_limit; + int enforced_limit; + + /* Return early so the DTrace probe does not fire. */ + if (!inactive_page_limit_enabled) + return (0); + + enforced_limit = vm_cnt.v_inactive_target + inactive_page_limit_offset_i; + pages_above_limit = vm_cnt.v_inactive_count - enforced_limit; + + /* + * We want to free inactive pages if there actually are + * inactive pages above the limit and we are either using + * a hard limit, or the number of free pages is below the + * free page limit. + */ + if ((pages_above_limit > 0) && + ((inactive_page_limit_enabled == 2) || + (vm_paging_target() + inactive_page_limit_offset_f > 0))) { + pages_to_free = imin(inactive_pages_to_free_max, + pages_above_limit); + } else { + pages_to_free = 0; + } + + SDT_PROBE3(vm, , , checked__inactive__pages, pages_to_free, + pages_above_limit, enforced_limit); + + return (pages_to_free); +} + #define VMD_PASS_MAX 3 /* * vm_pageout_scan does the dirty work for the pageout daemon. @@ -894,6 +955,7 @@ vm_pageout_scan(struct vm_domain *vmd, int pass) int act_delta, addl_page_shortage, deficit, error, maxlaunder, maxscan; int page_shortage, scan_tick, scanned, starting_page_shortage; int vnodes_skipped; + int inactive_page_surplus; boolean_t pageout_ok, queues_locked; KASSERT(pass <= VMD_PASS_MAX, @@ -933,8 +995,9 @@ vm_pageout_scan(struct vm_domain *vmd, int pass) if (pass > 0) { deficit = atomic_readandclear_int(&vm_pageout_deficit); page_shortage = vm_paging_target() + deficit; + inactive_page_surplus = vm_pageout_get_inactive_page_surplus(); } else - page_shortage = deficit = 0; + page_shortage = deficit = inactive_page_surplus = 0; starting_page_shortage = page_shortage; /* @@ -952,10 +1015,18 @@ vm_pageout_scan(struct vm_domain *vmd, int pass) if (pass > 1) maxlaunder = 10000; + /* + * Prevent laundering if there's no page shortage and we are + * merely trying to free inactive pages. Otherwise we may end + * up swapping before it's really necessary. + */ + if (page_shortage <= 0) + maxlaunder = 0; + vnodes_skipped = 0; - SDT_PROBE4(vm, , , before__inactive__scan, vmd, pass, page_shortage, - deficit); + SDT_PROBE5(vm, , , before__inactive__scan, vmd, pass, page_shortage, + deficit, inactive_page_surplus); /* * Start scanning the inactive queue for pages we can move to the @@ -969,7 +1040,8 @@ vm_pageout_scan(struct vm_domain *vmd, int pass) vm_pagequeue_lock(pq); queues_locked = TRUE; for (m = TAILQ_FIRST(&pq->pq_pl); - m != NULL && maxscan-- > 0 && page_shortage > 0; + m != NULL && maxscan-- > 0 && + (page_shortage > 0 || inactive_page_surplus > 0); m = next) { vm_pagequeue_assert_locked(pq); KASSERT(queues_locked, ("unlocked queues")); @@ -1106,6 +1178,7 @@ free_page: vm_page_free(m); PCPU_INC(cnt.v_dfree); --page_shortage; + --inactive_page_surplus; } else if ((object->flags & OBJ_DEAD) != 0) { /* * Leave dirty pages from dead objects at the front of @@ -1133,13 +1206,19 @@ requeue_page: vm_pagequeue_lock(pq); queues_locked = TRUE; vm_page_requeue_locked(m); - } else if (maxlaunder > 0) { + } else if (maxlaunder > 0 && page_shortage > 0) { /* - * We always want to try to flush some dirty pages if - * we encounter them, to keep the system stable. + * As long as there is a page shortage, we try to + * flush some dirty pages if we encounter them, to + * keep the system stable. * Normally this number is small, but under extreme * pressure where there are insufficient clean pages * on the inactive queue, we may have to go all out. + * + * XXX: We probably should not bother laundering + * until we know that there might be a chance + * that we will not be able to free the required + * amount of pages to take care of the page shortage. */ if (object->type != OBJT_SWAP && @@ -1184,15 +1263,27 @@ relock_queues: } vm_pagequeue_unlock(pq); - SDT_PROBE5(vm, , , after__inactive__scan, vmd, pass, page_shortage, - addl_page_shortage, vnodes_skipped); + /* + * If the page shortage has been taken care of, or if we were + * just trying to free surplus inactive pages, the locked pages + * are more or less meaningless. Reset the counter to prevent + * pointless swapping. + */ + if (page_shortage <= 0) + addl_page_shortage = 0; + + SDT_PROBE6(vm, , , after__inactive__scan, vmd, pass, page_shortage, + addl_page_shortage, vnodes_skipped, inactive_page_surplus); #if !defined(NO_SWAPPING) /* - * Wakeup the swapout daemon if we didn't cache or free the targeted - * number of pages. + * Wakeup the swapout daemon if we didn't cache or free the + * targeted number of pages and we are either desperate or there + * are no inactive pages to free left (in which case we will be + * desperate soon enough). */ - if (vm_swap_enabled && page_shortage > 0) + if (vm_swap_enabled && page_shortage > 0 && + (pass > 1 || !vm_pageout_get_inactive_page_surplus())) vm_req_vmdaemon(VM_SWAP_NORMAL); #endif @@ -1586,6 +1677,8 @@ vm_pageout_worker(void *arg) vm_pages_needed = 0; wakeup(&vm_cnt.v_free_count); } + if (vm_pageout_get_inactive_page_surplus() > 0) + vm_pages_needed = 1; if (vm_pages_needed) { /* * We're still not done. Either vm_pages_needed was -- 2.7.0 From 8d4fca8afa23cc6db99ec3bbcaa768ea4073046b Mon Sep 17 00:00:00 2001 From: Fabian Keil Date: Sun, 20 Sep 2015 19:10:51 +0200 Subject: [PATCH 5/5] share/dtrace: Add monitor-page-scanner ... which, who would have guessed it, monitors the vm page scanner. It's useful to tune the sysctls for the inactive page limit. --- share/dtrace/monitor-page-scanner | 168 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 168 insertions(+) create mode 100755 share/dtrace/monitor-page-scanner diff --git a/share/dtrace/monitor-page-scanner b/share/dtrace/monitor-page-scanner new file mode 100755 index 0000000..85537ca --- /dev/null +++ b/share/dtrace/monitor-page-scanner @@ -0,0 +1,168 @@ +#!/usr/sbin/dtrace -s + +/*************************************************************************** + * monitor-page-scanner + * + * Traces the vm page scanner. + * + * Relies on SDT probes that currrently are not part of vanilla FreeBSD. + * + * Copyright (c) 2015 Fabian Keil + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ***************************************************************************/ + +#pragma D option quiet +#pragma D option dynvarsize=10m + +dtrace:::BEGIN +{ + goal[0] = "Update active LRU/deactivate pages"; + goal[1] = "Move inactive to cache or free"; + goal[2] = "Launder dirty pages"; + goal[3] = "Pageout dirty pages"; + start_time = walltimestamp; + + min_pass = 2; + + printf("%Y: Monitoring the page scanner. Minimum pass value to show 'boring' scans ", walltimestamp); + printf("without memory pressure or inactive page surplus: %d (%s). Press CTRL-C to abort.\n", + min_pass, goal[min_pass]); +} + +vm:kernel::checked-inactive-pages +{ + this->to_free = (int)arg0; +} + +vm:kernel::checked-inactive-pages +/this->to_free/ +{ + this->above_limit = (int)arg1; + + printf("%Y: %s: Inactive page surplus: %d, above limit: %d.\n", + walltimestamp, probename, this->to_free, this->above_limit); +} + +vm:kernel::before-inactive-scan +{ + this->pass = (int)arg1; + this->page_shortage = (int)arg2; + this->inactive_page_surplus = (int)arg4; + + /* + * Every pass code above 3 is treated like 3, + * adjust index accordingly. + */ + this->goal_index = (this->pass <= 3) ? this->pass : 3; + this->goal = goal[this->goal_index]; + @goals[this->goal] = count(); + @goals_total[this->goal] = count(); + last_pass[this->pass] = timestamp; +} + +vm:kernel::before-inactive-scan +/(this->pass >= min_pass) || (this->page_shortage > 0) +|| (this->inactive_page_surplus > 0)/ +{ + /* this->vmd = (struct vm_domain *)arg0; */ + this->deficit = (int)arg3; + + printf("%Y: %s: Scan goal %d: %s.\n", + walltimestamp, probename, this->pass, this->goal); + printf("%Y: %s: Page shortage: %d, inactive page surplus: %d\n", + walltimestamp, probename, this->page_shortage, this->inactive_page_surplus); + printf("%Y: v_free_target: %d, v_free_count: %d, v_cache_count: %d.\n", + walltimestamp, `vm_cnt.v_free_target, `vm_cnt.v_free_count, `vm_cnt.v_cache_count); + printf("%Y: v_inactive_target: %d. v_inactive_count: %d. deficit: %d\n", + walltimestamp, `vm_cnt.v_inactive_target, `vm_cnt.v_inactive_count, this->deficit); +} + +vm:kernel::after-inactive-scan +{ + this->pass = (int)arg1; + this->page_shortage = (int)arg2; + this->addl_page_shortage = (int)arg3; + this->vnodes_skipped = (int)arg4; + this->inactive_page_surplus = (int)arg5; +} + +vm:kernel::after-inactive-scan +/(this->pass >= min_pass) || (this->page_shortage > 0) || +(this->addl_page_shortage > 0) || (this->vnodes_skipped > 0) || +(this->inactive_page_surplus > 0)/ +{ + printf("%Y: %s pass %d: page shortage: %d, inactive page surplus: %d, addl shortage: %d, vnodes skipped: %d.\n", + walltimestamp, probename, this->pass, this->page_shortage, + this->inactive_page_surplus, this->addl_page_shortage, this->vnodes_skipped); +} + +vm:kernel::before-active-scan, +vm:kernel::after-active-scan +{ + this->pass = (int)arg1; + this->page_shortage = (int)arg2; +} + +vm:kernel::before-active-scan, +vm:kernel::after-active-scan +/(this->pass >= min_pass) || (this->page_shortage > 0)/ +{ + printf("%Y: %s pass %d: page_shortage: %d.\n", + walltimestamp, probename, this->pass, this->page_shortage); +} + +tick-60s +{ + printf("%Y: Scan goals in the previous minute:", walltimestamp); + printa(@goals); + trunc(@goals); +} + +tick-60s +/last_pass[1]/ +{ + this->pass = 1; + this->elapsed = (timestamp - last_pass[this->pass]) / 1000000000; + printf("%Y: Seconds since last '%s' pass: %d.\n", + walltimestamp, goal[this->pass], this->elapsed); +} + +tick-60s +/last_pass[2]/ +{ + this->pass = 2; + this->elapsed = (timestamp - last_pass[this->pass]) / 1000000000; + printf("%Y: Seconds since last '%s' pass: %d.\n", + walltimestamp, goal[this->pass], this->elapsed); +} + +tick-60s +/last_pass[3]/ +{ + this->pass = 3; + this->elapsed = (timestamp - last_pass[this->pass]) / 1000000000; + printf("%Y: Seconds since last '%s' pass: %d.\n", + walltimestamp, goal[this->pass], this->elapsed); +} + +END +{ + printf("%Y: Scan goals since start of script at %Y:", + walltimestamp, start_time); + printa(@goals_total); + + /* Clear aggregates so DTrace does not show them again. */ + trunc(@goals_total); + trunc(@goals); +} -- 2.7.0