#include <type_traits>
#include <utility>
// functions, functors, lambdas, etc.
template<
class F, class... Args,
class = typename std::enable_if<!std::is_member_function_pointer<F>::value>::type,
class = typename std::enable_if<!std::is_member_object_pointer<F>::value>::type
>
auto eval(F&& f, Args&&... args) -> decltype(f(std::forward<Args>(args)...))
{
return f(std::forward<Args>(args)...);
}
// const member function
template<class R, class C, class P, class... Args>
auto eval(R(C::*f)() const, P&& p, Args&&... args) -> R
{
return (*p.*f)(std::forward<Args>(args)...);
}
template<class R, class C, class... Args>
auto eval(R(C::*f)() const, C& c, Args&&... args) -> R
{
return (c.*f)(std::forward<Args>(args)...);
}
// non-const member function
template<class R, class C, class P, class... Args>
auto eval(R(C::*f)(), P&& p, Args&&... args) -> R
{
return (*p.*f)(std::forward<Args>(args)...);
}
// member object
template<class R, class C>
auto eval(R(C::*m), const C& c) -> const R&
{
return c.*m;
}
template<class R, class C>
auto eval(R(C::*m), C& c) -> R&
{
return c.*m;
}
Taken from here: http://functionalcpp.wordpress.com/2013/08/03/generalized-function-evaluation/
This blog serves as a dumping ground for my own interests. On it you will find anything which I want to keep track of; links, articles, tips and tricks. Mostly it focuses on C++, Javascript and HTML, linux and performance.
Sunday, 18 August 2013
Sunday, 4 August 2013
Quick stablize NTP
By design NTP converges on the correct time slowly so as to prevent jumps in the clock time. It achieves this by varying the frequency of the clock rather than stepping the time.
When the clock is far out (say, after a reboot when the hardware clock isn't sync'd with the system clock), overshoot is caused because NTP applies a huge frequency correction to accelerate the convergence between the kernel clock time and the real time.
Overshoot causes the clock to oscillate, eventually settling down to the correct time and stabilizing.
If we want to quickly stabilize NTP we can apply the following process:
1. Stop NTP - /etc/init.d/ntp stop
2. Reset kernel bias - /usr/sbin/ntptime -f 0
3. Run ntpdate to sync the time - ntpdate -p8 <server>
4. Run it several times, this will make more measurements and have the kernel get a more accurate idea of the time
5. Start NTP - /etc/init.d/ntp start
When the clock is far out (say, after a reboot when the hardware clock isn't sync'd with the system clock), overshoot is caused because NTP applies a huge frequency correction to accelerate the convergence between the kernel clock time and the real time.
Overshoot causes the clock to oscillate, eventually settling down to the correct time and stabilizing.
If we want to quickly stabilize NTP we can apply the following process:
1. Stop NTP - /etc/init.d/ntp stop
2. Reset kernel bias - /usr/sbin/ntptime -f 0
3. Run ntpdate to sync the time - ntpdate -p8 <server>
4. Run it several times, this will make more measurements and have the kernel get a more accurate idea of the time
5. Start NTP - /etc/init.d/ntp start
Wednesday, 24 July 2013
Parsing socket / cpu / hyperthreading information from /proc/cpuinfo
#!/bin/bash
# total number of sockets
NUM_SOCKETS=`grep physical\ id /proc/cpuinfo | sort -u | wc -l`
# total number of cores per socket
NUM_CORES=`grep cpu\ cores /proc/cpuinfo | sort -u | awk '{print $4}'`
# total number of physical cpus (cores per socket * number of sockets)
NUM_PHYSICAL_CPUS=$[NUM_SOCKETS * ${NUM_CORES}]
echo "${NUM_SOCKETS} sockets"
echo "${NUM_CORES} cores per socket"
echo "${NUM_PHYSICAL_CPUS} physical processors"
# Work out if hyperthreading is enabled.
# This is done by working out how many siblings each core has
# If it's not the same as the number of cores per socket then
# hyperthreading must be on
NUM_SIBLINGS=`grep siblings /proc/cpuinfo | sort -u | awk '{print $3}'`
echo "$NUM_SIBLINGS siblings"
if [ ${NUM_SIBLINGS} -ne ${NUM_CORES} ]
then
# total number of local cpus (ie: physical cpus + hyperthreading cpus)
NUM_LOGICAL_CPUS=`grep processor /proc/cpuinfo | sort -u | wc -l`
echo "hyperthreading is enabled - ${NUM_LOGICAL_CPUS} logical processors"
fi
# display which socket each core is on
echo "Sockets: Cores"
cat /proc/cpuinfo | egrep "physical id|processor" | tr \\n ' ' | sed 's/processor/\nprocessor/g' | grep -v ^$ | awk '{printf "%d: %02d\n",$7,$3}' | sort
# total number of sockets
NUM_SOCKETS=`grep physical\ id /proc/cpuinfo | sort -u | wc -l`
# total number of cores per socket
NUM_CORES=`grep cpu\ cores /proc/cpuinfo | sort -u | awk '{print $4}'`
# total number of physical cpus (cores per socket * number of sockets)
NUM_PHYSICAL_CPUS=$[NUM_SOCKETS * ${NUM_CORES}]
echo "${NUM_SOCKETS} sockets"
echo "${NUM_CORES} cores per socket"
echo "${NUM_PHYSICAL_CPUS} physical processors"
# Work out if hyperthreading is enabled.
# This is done by working out how many siblings each core has
# If it's not the same as the number of cores per socket then
# hyperthreading must be on
NUM_SIBLINGS=`grep siblings /proc/cpuinfo | sort -u | awk '{print $3}'`
echo "$NUM_SIBLINGS siblings"
if [ ${NUM_SIBLINGS} -ne ${NUM_CORES} ]
then
# total number of local cpus (ie: physical cpus + hyperthreading cpus)
NUM_LOGICAL_CPUS=`grep processor /proc/cpuinfo | sort -u | wc -l`
echo "hyperthreading is enabled - ${NUM_LOGICAL_CPUS} logical processors"
fi
# display which socket each core is on
echo "Sockets: Cores"
cat /proc/cpuinfo | egrep "physical id|processor" | tr \\n ' ' | sed 's/processor/\nprocessor/g' | grep -v ^$ | awk '{printf "%d: %02d\n",$7,$3}' | sort
Notes on performance counters and profiling with PAPI
Performance counters
2 main types of profiling applications with performance counters: aggregate (direct) and statistical (indirect).
Taken from a Dr Dobbs article
2 main types of profiling applications with performance counters: aggregate (direct) and statistical (indirect).
- Aggregate: Involves reading the counters before and after the execution of a region of code and recording the difference. This usage model permits explicit, highly accurate, fine-grained measurements. There are two sub-cases of aggregate counter usage: Summation of the data from multiple executions of an instrumented location, and trace generation, where the counter values are recorded for every execution of the instrumentation.
- Statistical: The PM hardware is set to generate an interrupt when a performance counter reaches a preset value. This interrupt carries with it important contextual information about the state of the processor at the time of the event. Specifically, it includes the program counter (PC), the text address at which the interrupt occurred. By populating a histogram with this data, users obtain a probabilistic distribution of PM interrupt events across the address space of the application. This kind of profiling facilitates a good high-level understanding of where and why the bottlenecks are occurring. For instance, the questions, "What code is responsible for most of the cache misses?" and "Where is the branch prediction hardware performing poorly?" can quickly be answered by generating a statistical profile.
PAPI supports two types of events, preset and native.
- Preset events have a symbolic name associated with them that is the same for every processor supported by PAPI.
- Native events, on the other hand, provide a means to access every possible event on a particular platform, regardless of there being a predefined PAPI event name for it
PAPI supports measurements per-thread; that is, each measurement only contains counts generated by the thread performing the PAPI calls
int events[2] = { PAPI_L1_DCM, PAPI_FP_OPS }; // L1 data cache misses; hardware flops
long_long values[2];
PAPI_start_counters(events, 2);
// do work
PAPI_read_counters(values, 2);
Taken from a Dr Dobbs article
Monday, 22 July 2013
Voluntary/involuntary context switches
$ cat /prod/$PID/status
Voluntary context switches are when your application is blocked in a system call and the kernel decide to give it's time slice to another process.
Non voluntary context switches are when your application has used the entire timeslice the scheduler has attributed to it
Voluntary context switches are when your application is blocked in a system call and the kernel decide to give it's time slice to another process.
Non voluntary context switches are when your application has used the entire timeslice the scheduler has attributed to it
Monday, 17 June 2013
intercepting libc functions with LD_PRELOAD
What follows is an example of how to intercept uname
// pseudo-handle RTLD_NEXT: find the next occurrence of a function in the search order after the current library. This allows one to provide a wrapper around a function in another shared library.
#ifndef RTLD_NEXT
# define RTLD_NEXT ((void *) -1L)
#endif
#define REAL_LIBC RTLD_NEXT
// function pointer which will store the location of libc's uname (ie: the 'real' uname function)
int (*real_uname)(struct utsname *buf) = 0;
static void init (void) __attribute__ ((constructor));
static void init (void)
{
if(!real_uname)
{
real_uname = dlsym(REAL_LIBC, "uname");
if(!real_uname)
{
fprintf(stderr, "missing symbol: uname");
exit(1);
}
}
// pseudo-handle RTLD_NEXT: find the next occurrence of a function in the search order after the current library. This allows one to provide a wrapper around a function in another shared library.
#ifndef RTLD_NEXT
# define RTLD_NEXT ((void *) -1L)
#endif
#define REAL_LIBC RTLD_NEXT
// function pointer which will store the location of libc's uname (ie: the 'real' uname function)
int (*real_uname)(struct utsname *buf) = 0;
static void init (void) __attribute__ ((constructor));
static void init (void)
{
if(!real_uname)
{
real_uname = dlsym(REAL_LIBC, "uname");
if(!real_uname)
{
fprintf(stderr, "missing symbol: uname");
exit(1);
}
}
}
static int do_uname(struct utsname *buf, int (*uname_proc)(struct utsname *buf))
{
return uname_proc(buf);
}
__attribute__ ((visibility("default"))) int uname(struct utsname *buf)
{
init(); // we must always call init as constructor may not be called in some cases (such as loading 32bit pthread library)
int rc = do_uname(buf, real_uname);
if(!rc)
{
// do special processing
}
return rc;
}
Compile this into a shared library, and intercept libc's uname by using LD_PRELOAD=libname.so
Thursday, 13 June 2013
Display a process's threads and the cpu core each thread is running on
ps -ALopid,lwp,psr,cpuid,comm | grep $APP_NAME
Subscribe to:
Posts (Atom)