You can use this function I wrote. You call GetTimeMs64()
, and it returns the number of milliseconds elapsed since the unix epoch using the system clock - the just like time(NULL)
, except in milliseconds.
It works on both windows and linux; it is thread safe.
Note that the granularity is 15 ms on windows; on linux it is implementation dependent, but it usually 15 ms as well.
#ifdef _WIN32
#include <Windows.h>
#else
#include <sys/time.h>
#include <ctime>
#endif
/* Remove if already defined */
typedef long long int64; typedef unsigned long long uint64;
/* Returns the amount of milliseconds elapsed since the UNIX epoch. Works on both
* windows and linux. */
uint64 GetTimeMs64()
{
#ifdef _WIN32
/* Windows */
FILETIME ft;
LARGE_INTEGER li;
/* Get the amount of 100 nano seconds intervals elapsed since January 1, 1601 (UTC) and copy it
* to a LARGE_INTEGER structure. */
GetSystemTimeAsFileTime(&ft);
li.LowPart = ft.dwLowDateTime;
li.HighPart = ft.dwHighDateTime;
uint64 ret = li.QuadPart;
ret -= 116444736000000000LL; /* Convert from file time to UNIX epoch time. */
ret /= 10000; /* From 100 nano seconds (10^-7) to 1 millisecond (10^-3) intervals */
return ret;
#else
/* Linux */
struct timeval tv;
gettimeofday(&tv, NULL);
uint64 ret = tv.tv_usec;
/* Convert from micro seconds (10^-6) to milliseconds (10^-3) */
ret /= 1000;
/* Adds the seconds (10^0) after converting them to milliseconds (10^-3) */
ret += (tv.tv_sec * 1000);
return ret;
#endif
}
I have another working example that uses microseconds (UNIX, POSIX, etc).
#include <sys/time.h>
typedef unsigned long long timestamp_t;
static timestamp_t
get_timestamp ()
{
struct timeval now;
gettimeofday (&now, NULL);
return now.tv_usec + (timestamp_t)now.tv_sec * 1000000;
}
...
timestamp_t t0 = get_timestamp();
// Process
timestamp_t t1 = get_timestamp();
double secs = (t1 - t0) / 1000000.0L;
Here's the file where we coded this:
https://github.com/arhuaco/junkcode/blob/master/emqbit-bench/bench.c
Here is a simple solution in C++11 which gives you satisfying resolution.
#include <iostream>
#include <chrono>
class Timer
{
public:
Timer() : beg_(clock_::now()) {}
void reset() { beg_ = clock_::now(); }
double elapsed() const {
return std::chrono::duration_cast<second_>
(clock_::now() - beg_).count(); }
private:
typedef std::chrono::high_resolution_clock clock_;
typedef std::chrono::duration<double, std::ratio<1> > second_;
std::chrono::time_point<clock_> beg_;
};
Or on *nix, for c++03
#include <iostream>
#include <ctime>
class Timer
{
public:
Timer() { clock_gettime(CLOCK_REALTIME, &beg_); }
double elapsed() {
clock_gettime(CLOCK_REALTIME, &end_);
return end_.tv_sec - beg_.tv_sec +
(end_.tv_nsec - beg_.tv_nsec) / 1000000000.;
}
void reset() { clock_gettime(CLOCK_REALTIME, &beg_); }
private:
timespec beg_, end_;
};
Here is the example usage:
int main()
{
Timer tmr;
double t = tmr.elapsed();
std::cout << t << std::endl;
tmr.reset();
t = tmr.elapsed();
std::cout << t << std::endl;
return 0;
}
From https://gist.github.com/gongzhitaao/7062087
#include <boost/progress.hpp>
using namespace boost;
int main (int argc, const char * argv[])
{
progress_timer timer;
// do stuff, preferably in a 100x loop to make it take longer.
return 0;
}
When progress_timer
goes out of scope it will print out the time elapsed since its creation.
UPDATE: Here's a version that works without Boost (tested on macOS/iOS):
#include <chrono>
#include <string>
#include <iostream>
#include <math.h>
#include <unistd.h>
class NLTimerScoped {
private:
const std::chrono::steady_clock::time_point start;
const std::string name;
public:
NLTimerScoped( const std::string & name ) : name( name ), start( std::chrono::steady_clock::now() ) {
}
~NLTimerScoped() {
const auto end(std::chrono::steady_clock::now());
const auto duration_ms = std::chrono::duration_cast<std::chrono::milliseconds>( end - start ).count();
std::cout << name << " duration: " << duration_ms << "ms" << std::endl;
}
};
int main(int argc, const char * argv[]) {
{
NLTimerScoped timer( "sin sum" );
float a = 0.0f;
for ( int i=0; i < 1000000; i++ ) {
a += sin( (float) i / 100 );
}
std::cout << "sin sum = " << a << std::endl;
}
{
NLTimerScoped timer( "sleep( 4 )" );
sleep( 4 );
}
return 0;
}
Windows provides QueryPerformanceCounter() function, and Unix has gettimeofday() Both functions can measure at least 1 micro-second difference.
In some programs I wrote I used RDTS for such purpose. RDTSC is not about time but about number of cycles from processor start. You have to calibrate it on your system to get a result in second, but it's really handy when you want to evaluate performance, it's even better to use number of cycles directly without trying to change them back to seconds.
(link above is to a french wikipedia page, but it has C++ code samples, english version is here)
If you love us? You can donate to us via Paypal or buy me a coffee so we can maintain and grow! Thank you!
Donate Us With