M
ma740988
Consider:
# include <vector>
# include <iostream>
# include <cstdlib>
# include <ctime>
bool
ispow2i ( double n )
{
double r = ( n <= 0 ) ? 0.5 : std::log ( n ) / std::log ( 2. ) ;
if ( fmod ( r, 1 ) > 0.5 )
return ( -r + static_cast<long>( r + 1 ) ) < 0.00000000001;
else
return ( r - static_cast<long>( r ) ) < 0.00000000001;
}
typedef std::vector < int > INT_VEC;
int main()
{
INT_VEC iv;
for ( size_t idx ( 1024 ); idx < 0x100000 + 1; ++idx )
{
if ( ispow2i ( idx ) )
iv.push_back ( idx ) ;
}
int const num_iterations ( 50 );
for ( INT_VEC::size_type jdx ( 0 ); jdx < iv.size(); ++jdx )
{
int const NumSamples = iv [ jdx ];
INT_VEC iv2 ( NumSamples, jdx );
INT_VEC iv3;
for ( int idx ( 0 ) ; idx < num_iterations; ++idx )
{
// start time
iv3.swap ( iv2 ) ;
iv2.swap ( iv3 ) ;
// elapsed time
}
}
return ( EXIT_SUCCESS ) ;
}
The code was actually executed on a power pc - as a result the timer
class is specific to that of a power pc. That aside, profiling
surrounded the lines marked "start time" and "elapsed time" . In
essense, I stored the time in a vector then computed the elapsed time
for each iteration. Details aside, if memory serves std::swap executes
in linear time. What's troubling is the fact that the elapsed time was
on average .55 microseconds +/- .02 for all for NumSamples. I would
expect a linear behavior. i.e a gradual increase as NumSamples
increased. Is there something amiss in my logic ?
Thanks in advance .
# include <vector>
# include <iostream>
# include <cstdlib>
# include <ctime>
bool
ispow2i ( double n )
{
double r = ( n <= 0 ) ? 0.5 : std::log ( n ) / std::log ( 2. ) ;
if ( fmod ( r, 1 ) > 0.5 )
return ( -r + static_cast<long>( r + 1 ) ) < 0.00000000001;
else
return ( r - static_cast<long>( r ) ) < 0.00000000001;
}
typedef std::vector < int > INT_VEC;
int main()
{
INT_VEC iv;
for ( size_t idx ( 1024 ); idx < 0x100000 + 1; ++idx )
{
if ( ispow2i ( idx ) )
iv.push_back ( idx ) ;
}
int const num_iterations ( 50 );
for ( INT_VEC::size_type jdx ( 0 ); jdx < iv.size(); ++jdx )
{
int const NumSamples = iv [ jdx ];
INT_VEC iv2 ( NumSamples, jdx );
INT_VEC iv3;
for ( int idx ( 0 ) ; idx < num_iterations; ++idx )
{
// start time
iv3.swap ( iv2 ) ;
iv2.swap ( iv3 ) ;
// elapsed time
}
}
return ( EXIT_SUCCESS ) ;
}
The code was actually executed on a power pc - as a result the timer
class is specific to that of a power pc. That aside, profiling
surrounded the lines marked "start time" and "elapsed time" . In
essense, I stored the time in a vector then computed the elapsed time
for each iteration. Details aside, if memory serves std::swap executes
in linear time. What's troubling is the fact that the elapsed time was
on average .55 microseconds +/- .02 for all for NumSamples. I would
expect a linear behavior. i.e a gradual increase as NumSamples
increased. Is there something amiss in my logic ?
Thanks in advance .