I have a constant stream of numbers that I am generating that I will later need to iterate over.
Is it more efficient to add them to an array then use std::sort()
or to add them to a heap
(priority queue) and then later pop them off?
Currently, I just have a vector
that I am pushing back. Would another data structure better suited for sorting-as-i-go-along? So the question is would be inserting into a heap n times at log(n)
per insert (nlogn
) be faster than just sorting after the fact (also nlogn
)?
Running the following program (with gcc 8.3 on GNU/Linux) gives these results:
100 elements, 2171202 iterations --> v: 1.7s pq: 2.89572s (x 1.70337)
1000 elements, 144400 iterations --> v: 3.08776s pq: 6.75459s (x 2.18754)
10000 elements, 10816 iterations --> v: 5.24278s pq: 8.79159s (x 1.6769)
100000 elements, 841 iterations --> v: 5.06147s pq: 8.62931s (x 1.7049)
1000000 elements, 72 iterations --> v: 4.64172s pq: 9.16332s (x 1.97412)
Invoking sort()
once on a vector
seems to be better than using a priority_queue
in any case.
/*
g++ -o prog prog.cpp -O3 -march=native \
-std=c++17 -pedantic -Wall -Wextra -Wconversion
*/
#include <iostream>
#include <stdexcept>
#include <cmath>
#include <vector>
#include <queue>
#include <algorithm>
#include <chrono>
#include <random>
int
get_elem_count(int argc,
char **argv)
{
auto elem_count=-1;
if(argc>1)
{
try
{
elem_count=std::stoi(argv[1]);
}
catch(...)
{
// ignore
}
}
return elem_count;
}
std::vector<double>
generate_sequence(int elem_count)
{
auto gen=std::default_random_engine{std::random_device{}()};
auto dist=std::uniform_real_distribution<double>{0.0, 1.0};
auto sequence=std::vector<double>{};
sequence.reserve(elem_count);
for(auto i=0; i<elem_count; ++i)
{
sequence.emplace_back(dist(gen));
}
return sequence;
}
double
current_time()
{
const auto now{std::chrono::system_clock::now().time_since_epoch()};
return 1e-6*double(std::chrono::duration_cast
<std::chrono::microseconds>(now).count());
}
void
vector_test(const std::vector<double> &sequence,
std::vector<double> &tested)
{
//~~~~ consume the sequence and store values ~~~~
tested.clear();
for(const auto &elem: sequence)
{
tested.emplace_back(elem);
}
std::sort(begin(tested), end(tested),
[](const auto &lhs, const auto &rhs)
{
return lhs>rhs;
});
//~~~~ make use of the sorted values ~~~~
auto previous=1.0;
for(const auto &elem: tested)
{
if(elem>previous)
{
throw std::runtime_error{"this is just a dummy test (never true) "
"in order to prevent the optimizer from "
"discarding all the code..."};
}
previous=elem;
}
}
void
priority_queue_test(const std::vector<double> &sequence,
std::priority_queue<double> &tested)
{
//~~~~ consume the sequence and store values ~~~~
for(const auto &elem: sequence)
{
tested.emplace(elem);
}
//~~~~ make use of the sorted values ~~~~
auto previous=1.0;
while(!empty(tested))
{
const auto elem=tested.top();
tested.pop();
if(elem>previous)
{
throw std::runtime_error{"this is just a dummy test (never true) "
"in order to prevent the optimizer from "
"discarding all the code..."};
}
previous=elem;
}
}
int
main(int argc,
char **argv)
{
const auto elem_count=get_elem_count(argc, argv);
if(elem_count<=0)
{
std::cerr << "usage: " << argv[0] << " iteration_count\n";
return 1;
}
const auto iteration_count=
int(1'000'000'000.0/(elem_count*std::log(elem_count)));
const auto generation_count=int(std::sqrt(iteration_count));
const auto repeat_count=iteration_count/generation_count;
auto vector_duration=0.0;
auto priority_queue_duration=0.0;
for(auto generation=0; generation<generation_count; ++generation)
{
const auto sequence=generate_sequence(elem_count);
auto tested_vector=std::vector<double>{};
auto tested_priority_queue=std::priority_queue<double>{};
auto t0=0.0;
for(auto repeat=0; repeat<repeat_count; ++repeat)
{
if(repeat==1) // 0 is a dry run
{
t0=current_time();
}
vector_test(sequence, tested_vector);
}
vector_duration+=current_time()-t0;
for(auto repeat=0; repeat<repeat_count; ++repeat)
{
if(repeat==1) // 0 is a dry run
{
t0=current_time();
}
priority_queue_test(sequence, tested_priority_queue);
}
priority_queue_duration+=current_time()-t0;
}
std::cout << elem_count << " elements, "
<< generation_count*repeat_count << " iterations --> "
<< "v: "<< vector_duration << "s "
<< "pq: "<< priority_queue_duration << "s "
<< "(x " << priority_queue_duration/vector_duration << ")\n";
return 0;
}
The number of operations in case of the vector approach is:
n*O(1)
= O(n)
O(n*logn)
O(n)
Total = O(n*logn) + 2*O(n)
[Ignoring for a second that 2*O(n)
~ O(n)
].
The number of operations in case of the heap approach is:
n*O(logn)
= O(n*logn)
n*O(logn)
= O(n*logn)
Total = 2*O(n*logn)
.
Even though the number of operations is O(n*logn)
in both the cases, but by the exact mathematical formula, the difference between the heap and vector case is:
O(Heap) - O(Vector) = 2*O(n*logn) - O(n*logn) - 2*O(n)
= O(n*logn) - 2*O(n)
which would be positive for large cases of n
:
https://www.desmos.com/calculator/uw4i9oiy19
<iframe src="https://www.desmos.com/calculator/uw4i9oiy19?embed" width="1000px" height="1000px" style="border: 1px solid #ccc" frameborder=0></iframe>
In the above graph, blue is y = x*logx
and red is y = 2*x
.
So by this analysis, you should go for the vector approach.
If you love us? You can donate to us via Paypal or buy me a coffee so we can maintain and grow! Thank you!
Donate Us With