pagelocker.h 6.09 KB
Newer Older
1
// Copyright (c) 2009-2010 Satoshi Nakamoto
2
// Copyright (c) 2009-2013 The Bitcoin Core developers
3
// Distributed under the MIT software license, see the accompanying
Fordy's avatar
Fordy committed
4
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
5

6
7
#ifndef BITCOIN_ALLOCATORS_PAGELOCKER_H
#define BITCOIN_ALLOCATORS_PAGELOCKER_H
8

9
10
#include "support/cleanse.h"

11
12
#include <map>

13
#include <boost/thread/mutex.hpp>
14
#include <boost/thread/once.hpp>
Philip Kaufmann's avatar
Philip Kaufmann committed
15

16
17
18
19
20
21
22
23
24
25
26
/**
 * Thread-safe class to keep track of locked (ie, non-swappable) memory pages.
 *
 * Memory locks do not stack, that is, pages which have been locked several times by calls to mlock()
 * will be unlocked by a single call to munlock(). This can result in keying material ending up in swap when
 * those functions are used naively. This class simulates stacking memory locks by keeping a counter per page.
 *
 * @note By using a map from each page base address to lock count, this class is optimized for
 * small objects that span up to a few pages, mostly smaller than a page. To support large allocations,
 * something like an interval tree would be the preferred data structure.
 */
27
28
template <class Locker>
class LockedPageManagerBase
29
30
{
public:
31
    LockedPageManagerBase(size_t page_size) : page_size(page_size)
32
33
    {
        // Determine bitmask for extracting page from address
34
        assert(!(page_size & (page_size - 1))); // size must be power of two
35
36
37
        page_mask = ~(page_size - 1);
    }

38
39
40
41
42
43
    ~LockedPageManagerBase()
    {
        assert(this->GetLockedPageCount() == 0);
    }


44
    // For all pages in affected range, increase lock count
45
    void LockRange(void* p, size_t size)
46
47
    {
        boost::mutex::scoped_lock lock(mutex);
48
49
        if (!size)
            return;
50
51
52
        const size_t base_addr = reinterpret_cast<size_t>(p);
        const size_t start_page = base_addr & page_mask;
        const size_t end_page = (base_addr + size - 1) & page_mask;
53
        for (size_t page = start_page; page <= end_page; page += page_size) {
54
            Histogram::iterator it = histogram.find(page);
55
            if (it == histogram.end()) // Newly locked page
56
57
58
            {
                locker.Lock(reinterpret_cast<void*>(page), page_size);
                histogram.insert(std::make_pair(page, 1));
59
            } else // Page was already locked; increase counter
60
61
62
63
64
65
66
            {
                it->second += 1;
            }
        }
    }

    // For all pages in affected range, decrease lock count
67
    void UnlockRange(void* p, size_t size)
68
69
    {
        boost::mutex::scoped_lock lock(mutex);
70
71
        if (!size)
            return;
72
73
74
        const size_t base_addr = reinterpret_cast<size_t>(p);
        const size_t start_page = base_addr & page_mask;
        const size_t end_page = (base_addr + size - 1) & page_mask;
75
        for (size_t page = start_page; page <= end_page; page += page_size) {
76
77
78
79
            Histogram::iterator it = histogram.find(page);
            assert(it != histogram.end()); // Cannot unlock an area that was not locked
            // Decrease counter for page, when it is zero, the page will be unlocked
            it->second -= 1;
80
            if (it->second == 0) // Nothing on the page anymore that keeps it locked
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
            {
                // Unlock page and remove the count from histogram
                locker.Unlock(reinterpret_cast<void*>(page), page_size);
                histogram.erase(it);
            }
        }
    }

    // Get number of locked pages for diagnostics
    int GetLockedPageCount()
    {
        boost::mutex::scoped_lock lock(mutex);
        return histogram.size();
    }

private:
    Locker locker;
    boost::mutex mutex;
    size_t page_size, page_mask;
    // map of page base address to lock count
101
    typedef std::map<size_t, int> Histogram;
102
103
104
105
106
107
108
109
110
111
112
113
114
115
    Histogram histogram;
};


/**
 * OS-dependent memory page locking/unlocking.
 * Defined as policy class to make stubbing for test possible.
 */
class MemoryPageLocker
{
public:
    /** Lock memory pages.
     * addr and len must be a multiple of the system page size
     */
116
    bool Lock(const void* addr, size_t len);
117
118
119
    /** Unlock memory pages.
     * addr and len must be a multiple of the system page size
     */
120
    bool Unlock(const void* addr, size_t len);
121
122
123
124
125
};

/**
 * Singleton class to keep track of locked (ie, non-swappable) memory pages, for use in
 * std::allocator templates.
126
127
128
129
130
131
 *
 * Some implementations of the STL allocate memory in some constructors (i.e., see
 * MSVC's vector<T> implementation where it allocates 1 byte of memory in the allocator.)
 * Due to the unpredictable order of static initializers, we have to make sure the
 * LockedPageManager instance exists before any other STL-based objects that use
 * secure_allocator are created. So instead of having LockedPageManager also be
132
 * static-initialized, it is created on demand.
133
 */
134
class LockedPageManager : public LockedPageManagerBase<MemoryPageLocker>
135
136
{
public:
137
    static LockedPageManager& Instance()
138
139
140
141
142
    {
        boost::call_once(LockedPageManager::CreateInstance, LockedPageManager::init_flag);
        return *LockedPageManager::_instance;
    }

143
private:
144
    LockedPageManager();
145
146
147
148
149
150
151
152
153
154
155
156
157
158

    static void CreateInstance()
    {
        // Using a local static instance guarantees that the object is initialized
        // when it's first needed and also deinitialized after all objects that use
        // it are done with it.  I can think of one unlikely scenario where we may
        // have a static deinitialization order/problem, but the check in
        // LockedPageManagerBase's destructor helps us detect if that ever happens.
        static LockedPageManager instance;
        LockedPageManager::_instance = &instance;
    }

    static LockedPageManager* _instance;
    static boost::once_flag init_flag;
159
};
160

161
162
163
164
//
// Functions for directly locking/unlocking memory objects.
// Intended for non-dynamically allocated structures.
//
165
166
167
template <typename T>
void LockObject(const T& t)
{
168
    LockedPageManager::Instance().LockRange((void*)(&t), sizeof(T));
169
170
}

171
172
173
template <typename T>
void UnlockObject(const T& t)
{
174
    memory_cleanse((void*)(&t), sizeof(T));
175
    LockedPageManager::Instance().UnlockRange((void*)(&t), sizeof(T));
176
177
}

178
#endif // BITCOIN_ALLOCATORS_PAGELOCKER_H