diff options
author | Eelco Dolstra <eelco.dolstra@logicblox.com> | 2013-08-07 11:51:55 +0000 |
---|---|---|
committer | Eelco Dolstra <eelco.dolstra@logicblox.com> | 2013-08-07 14:02:04 +0200 |
commit | a583a2bc59a4ee2b067e5520f6c5bc0c61852c32 (patch) | |
tree | 137be66f58b664dbf0aa21138000d711d5e00162 /src/libutil | |
parent | 263d6682224f516aed74286453c5e2e097a38aa6 (diff) | |
download | guix-a583a2bc59a4ee2b067e5520f6c5bc0c61852c32.tar.gz |
Run the daemon worker on the same CPU as the client
On a system with multiple CPUs, running Nix operations through the daemon is significantly slower than "direct" mode: $ NIX_REMOTE= nix-instantiate '<nixos>' -A system real 0m0.974s user 0m0.875s sys 0m0.088s $ NIX_REMOTE=daemon nix-instantiate '<nixos>' -A system real 0m2.118s user 0m1.463s sys 0m0.218s The main reason seems to be that the client and the worker get moved to a different CPU after every call to the worker. This patch adds a hack to lock them to the same CPU. With this, the overhead of going through the daemon is very small: $ NIX_REMOTE=daemon nix-instantiate '<nixos>' -A system real 0m1.074s user 0m0.809s sys 0m0.098s
Diffstat (limited to 'src/libutil')
-rw-r--r-- | src/libutil/Makefile.am | 4 | ||||
-rw-r--r-- | src/libutil/affinity.cc | 54 | ||||
-rw-r--r-- | src/libutil/affinity.hh | 9 |
3 files changed, 65 insertions, 2 deletions
diff --git a/src/libutil/Makefile.am b/src/libutil/Makefile.am index fe896eec50..0c4073e66e 100644 --- a/src/libutil/Makefile.am +++ b/src/libutil/Makefile.am @@ -1,12 +1,12 @@ pkglib_LTLIBRARIES = libutil.la libutil_la_SOURCES = util.cc hash.cc serialise.cc \ - archive.cc xml-writer.cc + archive.cc xml-writer.cc affinity.cc libutil_la_LIBADD = ../boost/format/libformat.la pkginclude_HEADERS = util.hh hash.hh serialise.hh \ - archive.hh xml-writer.hh types.hh + archive.hh xml-writer.hh types.hh affinity.hh if !HAVE_OPENSSL libutil_la_SOURCES += \ diff --git a/src/libutil/affinity.cc b/src/libutil/affinity.cc new file mode 100644 index 0000000000..3a20fd2774 --- /dev/null +++ b/src/libutil/affinity.cc @@ -0,0 +1,54 @@ +#include "types.hh" +#include "util.hh" +#include "affinity.hh" + +#if HAVE_SCHED_H +#include <sched.h> +#endif + +namespace nix { + + +static bool didSaveAffinity = false; +static cpu_set_t savedAffinity; + + +void setAffinityTo(int cpu) +{ +#if HAVE_SCHED_SETAFFINITY + if (sched_getaffinity(0, sizeof(cpu_set_t), &savedAffinity) == -1) return; + didSaveAffinity = true; + printMsg(lvlDebug, format("locking this thread to CPU %1%") % cpu); + cpu_set_t newAffinity; + CPU_ZERO(&newAffinity); + CPU_SET(cpu, &newAffinity); + if (sched_setaffinity(0, sizeof(cpu_set_t), &newAffinity) == -1) + printMsg(lvlError, format("failed to lock thread to CPU %1%") % cpu); +#endif +} + + +int lockToCurrentCPU() +{ +#if HAVE_SCHED_SETAFFINITY + if (getEnv("NIX_AFFINITY_HACK", "1") == "1") { + int cpu = sched_getcpu(); + if (cpu != -1) setAffinityTo(cpu); + return cpu; + } +#endif + return -1; +} + + +void restoreAffinity() +{ +#if HAVE_SCHED_SETAFFINITY + if (!didSaveAffinity) return; + if (sched_setaffinity(0, sizeof(cpu_set_t), &savedAffinity) == -1) + printMsg(lvlError, "failed to restore affinity %1%"); +#endif +} + + +} diff --git a/src/libutil/affinity.hh b/src/libutil/affinity.hh new file mode 100644 index 0000000000..c1bd28e136 --- /dev/null +++ b/src/libutil/affinity.hh @@ -0,0 +1,9 @@ +#pragma once + +namespace nix { + +void setAffinityTo(int cpu); +int lockToCurrentCPU(); +void restoreAffinity(); + +} |