内存池

头文件

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
#ifndef __MEMPOOL_H__
#define __MEMPOOL_H__
#include <stdio.h>

class MemPool
{
class Impl;
Impl *m_impl;
MemPool();
~MemPool();

public:
static MemPool &instance();

void reset_buff(void *ptr, size_t size);

void *alloc(size_t size);

void dealloc(void *ptr);
};

#define malloc(a) MemPool::instance().alloc(a)
#define free(a) MemPool::instance().dealloc(a)

#endif

基于单向链表的内存池

优点是不需要分配管理的内存,性能高,节省内存,缺点是业务的代码如果内存越界,可能会影响内存池的运行;如果业务释放的内存地址是申请内存对齐后的内存,无法正常释放

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
#include <assert.h>
#include <stdint.h>
#include <mutex>
#include "mempool.h"

#define MAGIC_NUMBER (0xafafafafu)
struct Block
{
Block* m_next;
uint32_t m_size;
uint32_t m_magic = MAGIC_NUMBER;

Block(uint32_t size): m_size(size){};
Block(uint32_t size, Block* next): m_size(size), m_next(next){};

inline Block* splitBlock(uint32_t size, Block* next)
{
if (m_size <= size + sizeof(Block)) return nullptr;
Block* newBlock = new (reinterpret_cast<uint8_t*>(getDataStartPtr()) + size) Block(m_size - size - sizeof(Block), next);
m_size = size;
return newBlock;
}

inline bool verify()
{
return m_magic == MAGIC_NUMBER;
}

inline void* getDataStartPtr()
{
return this + 1;
}

inline void* getDataEndPtr()
{
return (uint8_t*)getDataStartPtr() + m_size;
}

static inline Block* ptr2Block(void* ptr)
{
return (Block*)ptr - 1;
}
};

class MemPool::Impl
{
Block* m_freeList;
size_t m_size;
void* m_poolData;
std::mutex m_mtx;

void mergeBlock(Block* current, Block* previous, Block* next)
{
if (previous && (size_t)previous->getDataEndPtr() == (size_t)current)
{
previous->m_size += current->m_size + sizeof(Block);
previous->m_next = next;
current = previous;
}

if (next && (size_t)current->getDataEndPtr() == (size_t)next)
{
current->m_size += next->m_size + sizeof(Block);
current->m_next = next->m_next;
}
}

public:
Impl()
{
m_freeList = nullptr;
m_poolData = nullptr;
m_size = 0;
};

void reset_buff(void *ptr, size_t size)
{
std::lock_guard<std::mutex> lock(m_mtx);
m_poolData = ptr;
m_size = size;
m_freeList = new(m_poolData) Block(size - sizeof(Block), nullptr);
}

void *alloc(size_t size)
{
if (size == 0) return nullptr;

std::lock_guard<std::mutex> lock(m_mtx);

Block* current = m_freeList;
Block* previous = nullptr;

while (current)
{
if (current->m_size >= size)
{
Block* newBlock = current->splitBlock(size, current->m_next);
if (newBlock == NULL)
{
newBlock = current->m_next;
}
previous ? previous->m_next = newBlock : m_freeList = newBlock;
return current->getDataStartPtr();
}

previous = current;
current = current->m_next;
}

return nullptr;
}

void dealloc(void *ptr)
{
if ((size_t)ptr < (size_t)m_poolData || (size_t)ptr >= (size_t)m_poolData + m_size)
{
assert(0);
}

Block* block = Block::ptr2Block(ptr);
assert(block->verify());

std::lock_guard<std::mutex> lock(m_mtx);

Block* current = m_freeList;
Block* previous = nullptr;
while (current && (size_t)current < (size_t)block)
{
previous = current;
current = current->m_next;
}

block->m_next = current;
previous ? previous->m_next = block : m_freeList = block;
mergeBlock(block, previous, current);
}
};


MemPool::MemPool()
{
m_impl = new Impl();

}

MemPool::~MemPool()
{
delete m_impl;
}

MemPool &MemPool::instance()
{
static MemPool obj;
return obj;
}

void MemPool::reset_buff(void *ptr, size_t size)
{
m_impl->reset_buff(ptr, size);
}

void *MemPool::alloc(size_t size)
{
return m_impl->alloc(size);
}

void MemPool::dealloc(void *ptr)
{
m_impl->dealloc(ptr);
}

基于std::list的内存池

优点是管理信息的内存(std::list分配)和分配的内存(m_poolData)是分离的,如果业务的代码内存越界,大概率不会影响内存池的运行,可以正常释放内存地址对齐后的内存;缺点std::list需要消耗额外的内存维护内存信息的链表,性能上比单链表low那么一丢丢

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
#include <assert.h>
#include <stdint.h>
#include <list>
#include <mutex>
#include <algorithm>
#include "mempool.h"

struct Block
{
void* m_data;
size_t m_size;
bool m_isFree;
Block(void *data, size_t size): m_size(size), m_data(data){ m_isFree = true; };
};

class MemPool::Impl
{
std::list<Block> m_memList;
size_t m_size;
void* m_poolData;
std::mutex m_mtx;

void mergeBlock(std::list<Block>::iterator& it)
{
auto nextIt = std::next(it);
if (nextIt != m_memList.end() && nextIt->m_isFree)
{
it->m_size += nextIt->m_size;
m_memList.erase(nextIt);
}

if (it != m_memList.begin())
{
auto prevIt = std::prev(it);
if (prevIt->m_isFree)
{
prevIt->m_size += it->m_size;
it = m_memList.erase(it);
}
}
}

inline void splitBlock(std::list<Block>::iterator& it, size_t size)
{
it->m_isFree = false;
if (it->m_size > size)
{
size_t freeSize = it->m_size - size;
it->m_size = size;
m_memList.emplace(std::next(it), (uint8_t*)it->m_data + size, freeSize);
}
}

public:
Impl()
{
m_poolData = nullptr;
m_size = 0;
};

void reset_buff(void *ptr, size_t size)
{
std::lock_guard<std::mutex> lock(m_mtx);
m_poolData = ptr;
m_size = size;
m_memList.clear();
m_memList.emplace_back(ptr, size);
}

void *alloc(size_t size)
{
if (size == 0) return nullptr;

std::lock_guard<std::mutex> lock(m_mtx);

for (auto it = m_memList.begin(); it != m_memList.end(); it++)
{
if (it->m_isFree && it->m_size >= size)
{
splitBlock(it, size);
return it->m_data;
}
}

return nullptr;
}

void dealloc(void *ptr)
{
if ((size_t)ptr < (size_t)m_poolData || (size_t)ptr >= (size_t)m_poolData + m_size)
{
assert(0);
}

std::lock_guard<std::mutex> lock(m_mtx);
auto it = std::find_if(m_memList.begin(), m_memList.end(), [ptr](const Block& ele){ return !ele.m_isFree && (size_t)ptr >= (size_t)ele.m_data && (size_t)ptr < (size_t)ele.m_data + ele.m_size; });
if (it == m_memList.end())
{
assert(0);
}

it->m_isFree = true;
mergeBlock(it);
}
};


MemPool::MemPool()
{
m_impl = new Impl();

}

MemPool::~MemPool()
{
delete m_impl;
}

MemPool &MemPool::instance()
{
static MemPool obj;
return obj;
}

void MemPool::reset_buff(void *ptr, size_t size)
{
m_impl->reset_buff(ptr, size);
}

void *MemPool::alloc(size_t size)
{
return m_impl->alloc(size);
}

void MemPool::dealloc(void *ptr)
{
m_impl->dealloc(ptr);
}