[go: nahoru, domu]

1/*
2 * Copyright (C) 2007 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#define LOG_TAG "MemoryDealer"
18
19#include <binder/MemoryDealer.h>
20#include <binder/IPCThreadState.h>
21#include <binder/MemoryBase.h>
22
23#include <utils/Log.h>
24#include <utils/SortedVector.h>
25#include <utils/String8.h>
26#include <utils/threads.h>
27
28#include <stdint.h>
29#include <stdio.h>
30#include <stdlib.h>
31#include <fcntl.h>
32#include <unistd.h>
33#include <errno.h>
34#include <string.h>
35
36#include <sys/stat.h>
37#include <sys/types.h>
38#include <sys/mman.h>
39#include <sys/file.h>
40
41namespace android {
42// ----------------------------------------------------------------------------
43
44/*
45 * A simple templatized doubly linked-list implementation
46 */
47
48template <typename NODE>
49class LinkedList
50{
51    NODE*  mFirst;
52    NODE*  mLast;
53
54public:
55                LinkedList() : mFirst(0), mLast(0) { }
56    bool        isEmpty() const { return mFirst == 0; }
57    NODE const* head() const { return mFirst; }
58    NODE*       head() { return mFirst; }
59    NODE const* tail() const { return mLast; }
60    NODE*       tail() { return mLast; }
61
62    void insertAfter(NODE* node, NODE* newNode) {
63        newNode->prev = node;
64        newNode->next = node->next;
65        if (node->next == 0) mLast = newNode;
66        else                 node->next->prev = newNode;
67        node->next = newNode;
68    }
69
70    void insertBefore(NODE* node, NODE* newNode) {
71         newNode->prev = node->prev;
72         newNode->next = node;
73         if (node->prev == 0)   mFirst = newNode;
74         else                   node->prev->next = newNode;
75         node->prev = newNode;
76    }
77
78    void insertHead(NODE* newNode) {
79        if (mFirst == 0) {
80            mFirst = mLast = newNode;
81            newNode->prev = newNode->next = 0;
82        } else {
83            newNode->prev = 0;
84            newNode->next = mFirst;
85            mFirst->prev = newNode;
86            mFirst = newNode;
87        }
88    }
89
90    void insertTail(NODE* newNode) {
91        if (mLast == 0) {
92            insertHead(newNode);
93        } else {
94            newNode->prev = mLast;
95            newNode->next = 0;
96            mLast->next = newNode;
97            mLast = newNode;
98        }
99    }
100
101    NODE* remove(NODE* node) {
102        if (node->prev == 0)    mFirst = node->next;
103        else                    node->prev->next = node->next;
104        if (node->next == 0)    mLast = node->prev;
105        else                    node->next->prev = node->prev;
106        return node;
107    }
108};
109
110// ----------------------------------------------------------------------------
111
112class Allocation : public MemoryBase {
113public:
114    Allocation(const sp<MemoryDealer>& dealer,
115            const sp<IMemoryHeap>& heap, ssize_t offset, size_t size);
116    virtual ~Allocation();
117private:
118    sp<MemoryDealer> mDealer;
119};
120
121// ----------------------------------------------------------------------------
122
123class SimpleBestFitAllocator
124{
125    enum {
126        PAGE_ALIGNED = 0x00000001
127    };
128public:
129    SimpleBestFitAllocator(size_t size);
130    ~SimpleBestFitAllocator();
131
132    size_t      allocate(size_t size, uint32_t flags = 0);
133    status_t    deallocate(size_t offset);
134    size_t      size() const;
135    void        dump(const char* what) const;
136    void        dump(String8& res, const char* what) const;
137
138    static size_t getAllocationAlignment() { return kMemoryAlign; }
139
140private:
141
142    struct chunk_t {
143        chunk_t(size_t start, size_t size)
144        : start(start), size(size), free(1), prev(0), next(0) {
145        }
146        size_t              start;
147        size_t              size : 28;
148        int                 free : 4;
149        mutable chunk_t*    prev;
150        mutable chunk_t*    next;
151    };
152
153    ssize_t  alloc(size_t size, uint32_t flags);
154    chunk_t* dealloc(size_t start);
155    void     dump_l(const char* what) const;
156    void     dump_l(String8& res, const char* what) const;
157
158    static const int    kMemoryAlign;
159    mutable Mutex       mLock;
160    LinkedList<chunk_t> mList;
161    size_t              mHeapSize;
162};
163
164// ----------------------------------------------------------------------------
165
166Allocation::Allocation(
167        const sp<MemoryDealer>& dealer,
168        const sp<IMemoryHeap>& heap, ssize_t offset, size_t size)
169    : MemoryBase(heap, offset, size), mDealer(dealer)
170{
171#ifndef NDEBUG
172    void* const start_ptr = (void*)(intptr_t(heap->base()) + offset);
173    memset(start_ptr, 0xda, size);
174#endif
175}
176
177Allocation::~Allocation()
178{
179    size_t freedOffset = getOffset();
180    size_t freedSize   = getSize();
181    if (freedSize) {
182        /* NOTE: it's VERY important to not free allocations of size 0 because
183         * they're special as they don't have any record in the allocator
184         * and could alias some real allocation (their offset is zero). */
185
186        // keep the size to unmap in excess
187        size_t pagesize = getpagesize();
188        size_t start = freedOffset;
189        size_t end = start + freedSize;
190        start &= ~(pagesize-1);
191        end = (end + pagesize-1) & ~(pagesize-1);
192
193        // give back to the kernel the pages we don't need
194        size_t free_start = freedOffset;
195        size_t free_end = free_start + freedSize;
196        if (start < free_start)
197            start = free_start;
198        if (end > free_end)
199            end = free_end;
200        start = (start + pagesize-1) & ~(pagesize-1);
201        end &= ~(pagesize-1);
202
203        if (start < end) {
204            void* const start_ptr = (void*)(intptr_t(getHeap()->base()) + start);
205            size_t size = end-start;
206
207#ifndef NDEBUG
208            memset(start_ptr, 0xdf, size);
209#endif
210
211            // MADV_REMOVE is not defined on Dapper based Goobuntu
212#ifdef MADV_REMOVE
213            if (size) {
214                int err = madvise(start_ptr, size, MADV_REMOVE);
215                ALOGW_IF(err, "madvise(%p, %zu, MADV_REMOVE) returned %s",
216                        start_ptr, size, err<0 ? strerror(errno) : "Ok");
217            }
218#endif
219        }
220
221        // This should be done after madvise(MADV_REMOVE), otherwise madvise()
222        // might kick out the memory region that's allocated and/or written
223        // right after the deallocation.
224        mDealer->deallocate(freedOffset);
225    }
226}
227
228// ----------------------------------------------------------------------------
229
230MemoryDealer::MemoryDealer(size_t size, const char* name, uint32_t flags)
231    : mHeap(new MemoryHeapBase(size, flags, name)),
232    mAllocator(new SimpleBestFitAllocator(size))
233{
234}
235
236MemoryDealer::~MemoryDealer()
237{
238    delete mAllocator;
239}
240
241sp<IMemory> MemoryDealer::allocate(size_t size)
242{
243    sp<IMemory> memory;
244    const ssize_t offset = allocator()->allocate(size);
245    if (offset >= 0) {
246        memory = new Allocation(this, heap(), offset, size);
247    }
248    return memory;
249}
250
251void MemoryDealer::deallocate(size_t offset)
252{
253    allocator()->deallocate(offset);
254}
255
256void MemoryDealer::dump(const char* what) const
257{
258    allocator()->dump(what);
259}
260
261const sp<IMemoryHeap>& MemoryDealer::heap() const {
262    return mHeap;
263}
264
265SimpleBestFitAllocator* MemoryDealer::allocator() const {
266    return mAllocator;
267}
268
269// static
270size_t MemoryDealer::getAllocationAlignment()
271{
272    return SimpleBestFitAllocator::getAllocationAlignment();
273}
274
275// ----------------------------------------------------------------------------
276
277// align all the memory blocks on a cache-line boundary
278const int SimpleBestFitAllocator::kMemoryAlign = 32;
279
280SimpleBestFitAllocator::SimpleBestFitAllocator(size_t size)
281{
282    size_t pagesize = getpagesize();
283    mHeapSize = ((size + pagesize-1) & ~(pagesize-1));
284
285    chunk_t* node = new chunk_t(0, mHeapSize / kMemoryAlign);
286    mList.insertHead(node);
287}
288
289SimpleBestFitAllocator::~SimpleBestFitAllocator()
290{
291    while(!mList.isEmpty()) {
292        delete mList.remove(mList.head());
293    }
294}
295
296size_t SimpleBestFitAllocator::size() const
297{
298    return mHeapSize;
299}
300
301size_t SimpleBestFitAllocator::allocate(size_t size, uint32_t flags)
302{
303    Mutex::Autolock _l(mLock);
304    ssize_t offset = alloc(size, flags);
305    return offset;
306}
307
308status_t SimpleBestFitAllocator::deallocate(size_t offset)
309{
310    Mutex::Autolock _l(mLock);
311    chunk_t const * const freed = dealloc(offset);
312    if (freed) {
313        return NO_ERROR;
314    }
315    return NAME_NOT_FOUND;
316}
317
318ssize_t SimpleBestFitAllocator::alloc(size_t size, uint32_t flags)
319{
320    if (size == 0) {
321        return 0;
322    }
323    size = (size + kMemoryAlign-1) / kMemoryAlign;
324    chunk_t* free_chunk = 0;
325    chunk_t* cur = mList.head();
326
327    size_t pagesize = getpagesize();
328    while (cur) {
329        int extra = 0;
330        if (flags & PAGE_ALIGNED)
331            extra = ( -cur->start & ((pagesize/kMemoryAlign)-1) ) ;
332
333        // best fit
334        if (cur->free && (cur->size >= (size+extra))) {
335            if ((!free_chunk) || (cur->size < free_chunk->size)) {
336                free_chunk = cur;
337            }
338            if (cur->size == size) {
339                break;
340            }
341        }
342        cur = cur->next;
343    }
344
345    if (free_chunk) {
346        const size_t free_size = free_chunk->size;
347        free_chunk->free = 0;
348        free_chunk->size = size;
349        if (free_size > size) {
350            int extra = 0;
351            if (flags & PAGE_ALIGNED)
352                extra = ( -free_chunk->start & ((pagesize/kMemoryAlign)-1) ) ;
353            if (extra) {
354                chunk_t* split = new chunk_t(free_chunk->start, extra);
355                free_chunk->start += extra;
356                mList.insertBefore(free_chunk, split);
357            }
358
359            ALOGE_IF((flags&PAGE_ALIGNED) &&
360                    ((free_chunk->start*kMemoryAlign)&(pagesize-1)),
361                    "PAGE_ALIGNED requested, but page is not aligned!!!");
362
363            const ssize_t tail_free = free_size - (size+extra);
364            if (tail_free > 0) {
365                chunk_t* split = new chunk_t(
366                        free_chunk->start + free_chunk->size, tail_free);
367                mList.insertAfter(free_chunk, split);
368            }
369        }
370        return (free_chunk->start)*kMemoryAlign;
371    }
372    return NO_MEMORY;
373}
374
375SimpleBestFitAllocator::chunk_t* SimpleBestFitAllocator::dealloc(size_t start)
376{
377    start = start / kMemoryAlign;
378    chunk_t* cur = mList.head();
379    while (cur) {
380        if (cur->start == start) {
381            LOG_FATAL_IF(cur->free,
382                "block at offset 0x%08lX of size 0x%08lX already freed",
383                cur->start*kMemoryAlign, cur->size*kMemoryAlign);
384
385            // merge freed blocks together
386            chunk_t* freed = cur;
387            cur->free = 1;
388            do {
389                chunk_t* const p = cur->prev;
390                chunk_t* const n = cur->next;
391                if (p && (p->free || !cur->size)) {
392                    freed = p;
393                    p->size += cur->size;
394                    mList.remove(cur);
395                    delete cur;
396                }
397                cur = n;
398            } while (cur && cur->free);
399
400            #ifndef NDEBUG
401                if (!freed->free) {
402                    dump_l("dealloc (!freed->free)");
403                }
404            #endif
405            LOG_FATAL_IF(!freed->free,
406                "freed block at offset 0x%08lX of size 0x%08lX is not free!",
407                freed->start * kMemoryAlign, freed->size * kMemoryAlign);
408
409            return freed;
410        }
411        cur = cur->next;
412    }
413    return 0;
414}
415
416void SimpleBestFitAllocator::dump(const char* what) const
417{
418    Mutex::Autolock _l(mLock);
419    dump_l(what);
420}
421
422void SimpleBestFitAllocator::dump_l(const char* what) const
423{
424    String8 result;
425    dump_l(result, what);
426    ALOGD("%s", result.string());
427}
428
429void SimpleBestFitAllocator::dump(String8& result,
430        const char* what) const
431{
432    Mutex::Autolock _l(mLock);
433    dump_l(result, what);
434}
435
436void SimpleBestFitAllocator::dump_l(String8& result,
437        const char* what) const
438{
439    size_t size = 0;
440    int32_t i = 0;
441    chunk_t const* cur = mList.head();
442
443    const size_t SIZE = 256;
444    char buffer[SIZE];
445    snprintf(buffer, SIZE, "  %s (%p, size=%u)\n",
446            what, this, (unsigned int)mHeapSize);
447
448    result.append(buffer);
449
450    while (cur) {
451        const char* errs[] = {"", "| link bogus NP",
452                            "| link bogus PN", "| link bogus NP+PN" };
453        int np = ((cur->next) && cur->next->prev != cur) ? 1 : 0;
454        int pn = ((cur->prev) && cur->prev->next != cur) ? 2 : 0;
455
456        snprintf(buffer, SIZE, "  %3u: %p | 0x%08X | 0x%08X | %s %s\n",
457            i, cur, int(cur->start*kMemoryAlign),
458            int(cur->size*kMemoryAlign),
459                    int(cur->free) ? "F" : "A",
460                    errs[np|pn]);
461
462        result.append(buffer);
463
464        if (!cur->free)
465            size += cur->size*kMemoryAlign;
466
467        i++;
468        cur = cur->next;
469    }
470    snprintf(buffer, SIZE,
471            "  size allocated: %u (%u KB)\n", int(size), int(size/1024));
472    result.append(buffer);
473}
474
475
476}; // namespace android
477