STL based LRU cache, any suggestions for improvements?

P

pmouse

Hi Guys, I've written a templated lru cache based on the SGI version
of STL. To use the class, simply write:
lru_cache<key_type, data_type, cache_length, custom_container> cache;
cache.insert( key, value );
cache[key2] = value2;

data_type* pvalue3 = cache.get(key3);
if ( pvalue3 == NULL )
{} // cache missing
else
{} // cache hit

data_type must be a class supporting the function dispose(), this is
because for smart pointers, the cache calls this function on the least
recently used data when it's full. for example:

struct myDataType
{
int value;
myDataType( int v = 0 ) : value(v) {}
void dispose() {} // empty functions will be ignored by most
compilers
};
struct mySmartPointerType
{
int* parray;
mySmartPointerType( int* a = 0 ) : parray(a) {}
void dispose() { delete[] parray; }
}

It is important that delete[] parray does not occur in the destructor,
otherwise this type cannot be copied.

my problem: I feel this cache is rather slow. Is there any suggestions
to improve its performance?
I've written a test program:

struct point2d
{
short x;
short y;
point2d( short _x, short _y ) : x(_x), y(_y){}
bool operator ==( const point2d & rhs ) const
{
return (x == rhs.x) && (y == rhs.y);
}
bool operator != ( const point2d& rhs ) const
{
return ( x!=rhs.x ) || ( y!=rhs.y );
}
bool operator < (const point2d & rhs ) const
{
return ( x < rhs.x ) || ( x==rhs.x && y < rhs.y ) ;
}
};

struct basetile
{
short id;
char z;
basetile( int _id = 0, int _z = 0 ) : id(_id), z(_z) {}

bool operator == ( const basetile& rhs ) const
{
return id == rhs.id && z == rhs.z;
}
bool operator != ( const basetile& rhs ) const
{
return id != rhs.id || z != rhs.z;
}
void dispose() {}
} __attribute__((packed)) ;

void cacheTest_perf()
{
lru_cache<point2d, basetile> cache;
int count=0;

srand( 3456789 );
for ( int i = 0; i < 100000; ++i )
{
point2d p(rand()%200, rand()%200);
basetile& t = cache[p];

if ( t.id != 0 )
{
count++;
}
else
{
t.id = i % 1024 + 1;
t.z = 10;
}
}

cout << "total hit " << count << " out of 100000 requests" <<
endl;
}

the test program uses a point2d structure as index, and a 3 byte
packed structure as data type. the result is:

$ time ./testapp
total hit 23574 out of 100000 requests

real 0m0.066s
user 0m0.055s
sys 0m0.002s

i'm pretty sure that's a bad performance for a cache. soo...any
suggestions to speed up the process is welcome!
the source code is included below:

#ifndef LRUCACHE_H_INCLUDED
#define LRUCACHE_H_INCLUDED

#include <iostream>
#include <list>
// #include <map>

#ifdef __GNUC__
#include <ext/hash_map>
using namespace __gnu_cxx;
#else
#include <hash_map>
#endif
using namespace std;

template<class T>
struct GenericHashAdapter
{
size_t operator()(const T& x) const
{
unsigned char* p = (unsigned char*)(&x);
unsigned int h = 0;
for ( unsigned int i = 0; i < sizeof(T); ++i )
{
h += p;
h += h << 10;
h ^= h >> 6;
}
h += ( h << 3 );
h ^= ( h >> 11 );
h += ( h << 15 );
return(h);
}
};

template<class T>
struct GenericEqualAdapter
{
size_t operator() (const T&a, const T&b) const
{
return a == b;
}
};

/*
* lru cache
* TElm must support the "dispose()" call
* the cache will call dispose() on the least recently used element
* when the cache is full
*/
template <class TKey,
class TElm,
int Capacity = 10000,
// - to use RB Tree instead of hash table: uncomment next
line and comment out the following line.
// class Container = map<TKey, pair<TElm, typename
list<TKey>::iterator> >
// - hashtable container:
class Container = hash_map<TKey, pair<TElm, typename
GenericHashAdapter<TKey> said:
class lru_cache
{
typedef TKey key_type;
typedef TElm data_type;
typedef TElm& reference;
typedef TElm* pointer;

public:
typedef list<TKey> key_list;
typedef typename key_list::iterator key_iterator;
typedef typename Container::iterator item_iterator;
typedef pair<TElm, key_iterator> mapped_type;
typedef pair<TKey, mapped_type> value_type;

lru_cache() : _list(), _map()
{
}
virtual ~lru_cache()
{
}

item_iterator find( const key_type& key )
{
return _map.find(key);
}

reference insert( const key_type& key, const data_type& data )
{
// 1. push key to list
_list.push_front(key);
// 2. insert element to map
pair<item_iterator, bool> ret = _map.insert(
value_type(key, mapped_type(data, _list.begin()))
);
if ( !ret.second )
{
// key already exist
// remove the old key from the list
// old key is at ret.first->second.second
_list.erase( ret.first->second.second );
// also need to update the key iterator reference in the
note
ret.first->second.second = _list.begin();
}
else
{
// insert successful
// is map full?
if ( _map.size() >= Capacity )
{
// get the least recently used key
item_iterator itr = _map.find(_list.back());
// dispose data
itr->second.first.dispose();
// erase from table
_map.erase( itr );
// remove last key from list
_list.pop_back();
}
}
return ret.first->second.first;
}
void update_key( item_iterator& itr )
{
_list.erase( itr->second.second );
_list.push_front( itr->first );
itr->second.second = _list.begin();
}

pointer get( const key_type& key )
{
item_iterator itr = _map.find(key);
if ( itr != _map.end() )
{
update_key( itr );
return &(itr->second.first);
}
else
return NULL;
}
pointer look( const key_type& key )
{
item_iterator itr = _map.find(key);
if ( itr != _map.end() )
{
return &(itr->second.first);
}
else
return NULL;
}

reference operator[](const key_type& key )
{
// algorithm:
// try insert the key
// if it already exist, then the correct element will be returned
// otherwise, a new note with default element will be created
and returned
return insert( key, data_type() );
}

pointer operator()(const key_type&key)
{
return get(key);
}

key_iterator beginkey()
{
return _list.begin();
}
key_iterator endkey()
{
return _list.end();
}

private:
key_list _list;
Container _map;
};

#endif


Regards,

PQ
 

Ask a Question

Want to reply to this thread or ask your own question?

You'll need to choose a username for the site, which only take a couple of moments. After that, you can post your question and our members will help you out.

Ask a Question

Members online

Forum statistics

Threads
473,756
Messages
2,569,533
Members
45,007
Latest member
OrderFitnessKetoCapsules

Latest Threads

Top