fixpool.hh 6.01 KB
Newer Older
1
// -*- coding: utf-8 -*-
2
// Copyright (C) 2011, 2015-2018 Laboratoire de Recherche et
3
// Développement de l'Epita (LRDE)
4 5 6 7 8
//
// This file is part of Spot, a model checking library.
//
// Spot is free software; you can redistribute it and/or modify it
// under the terms of the GNU General Public License as published by
9
// the Free Software Foundation; either version 3 of the License, or
10 11 12 13 14 15 16 17
// (at your option) any later version.
//
// Spot is distributed in the hope that it will be useful, but WITHOUT
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
// or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public
// License for more details.
//
// You should have received a copy of the GNU General Public License
18
// along with this program.  If not, see <http://www.gnu.org/licenses/>.
19

20
#pragma once
21

22
#include <spot/misc/common.hh>
23 24
#include <climits>
#include <cstddef>
25 26 27 28

#if SPOT_DEBUG && defined(HAVE_VALGRIND_MEMCHECK_H)
#include <valgrind/memcheck.h>
#endif
29 30 31

namespace spot
{
32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102
  namespace
  {
// use gcc and clang built-in functions
// both compilers use the same function names, and define __GNUC__
#if __GNUC__
    template<class T>
    struct _clz;

    template<>
    struct _clz<unsigned>
    {
      unsigned
      operator()(unsigned n) const noexcept
      {
        return __builtin_clz(n);
      }
    };

    template<>
    struct _clz<unsigned long>
    {
      unsigned long
      operator()(unsigned long n) const noexcept
      {
        return __builtin_clzl(n);
      }
    };

    template<>
    struct _clz<unsigned long long>
    {
      unsigned long long
      operator()(unsigned long long n) const noexcept
      {
        return __builtin_clzll(n);
      }
    };

    static
    size_t
    clz(size_t n)
    {
      return _clz<size_t>()(n);
    }

#else
    size_t
    clz(size_t n)
    {
      size_t res = CHAR_BIT*sizeof(size_t);
      while (n)
        {
          --res;
          n >>= 1;
        }
      return res;
    }
#endif
  }

  /// A enum class to define the policy of the fixed_sized_pool.
  /// We propose 2 policies for the pool:
  ///   - Safe: ensure (when used with memcheck) that each allocation
  ///     is deallocated one at a time
  ///   - Unsafe: rely on the fact that deallocating the pool also release
  ///     all elements it contains. This case is usefull in a multithreaded
  ///     environnement with multiple fixed_sized_pool allocating the same
  ///     ressource. In this case it's hard to detect wich pool has allocated
  ///     some ressource.
  enum class pool_type { Safe , Unsafe };

103
  /// A fixed-size memory pool implementation.
104
  template<pool_type Kind>
105
  class SPOT_API fixed_size_pool
106 107 108
  {
  public:
    /// Create a pool allocating objects of \a size bytes.
109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131
    fixed_size_pool(size_t size)
    : size_(
          [](size_t size)
          {
            // to properly store chunks and freelist, we need size to be at
            // least the size of a block_
            if (size < sizeof(block_))
                size = sizeof(block_);
            // powers of 2 are a good alignment
            if (!(size & (size-1)))
              return size;
            // small numbers are best aligned to the next power of 2
            else if (size < alignof(std::max_align_t))
              return size_t{1} << (CHAR_BIT*sizeof(size_t) - clz(size));
            else
              {
                size_t mask = alignof(std::max_align_t)-1;
                return (size + mask) & ~mask;
              }
          }(size)),
      freelist_(nullptr), free_start_(nullptr),
      free_end_(nullptr), chunklist_(nullptr)
  {}
132 133 134 135 136

    /// Free any memory allocated by this pool.
    ~fixed_size_pool()
    {
      while (chunklist_)
137 138
        {
          chunk_* prev = chunklist_->prev;
139
          ::operator delete(chunklist_);
140 141
          chunklist_ = prev;
        }
142 143 144 145 146 147 148 149 150
    }

    /// Allocate \a size bytes of memory.
    void*
    allocate()
    {
      block_* f = freelist_;
      // If we have free blocks available, return the first one.
      if (f)
151
        {
152
#if SPOT_DEBUG && defined(HAVE_VALGRIND_MEMCHECK_H)
153 154 155 156 157 158 159
          if (Kind == pool_type::Safe)
            {
              VALGRIND_MALLOCLIKE_BLOCK(f, size_, 0, false);
              // field f->next is initialized: prevents valgrind from
              // complaining about jumps depending on uninitialized memory
              VALGRIND_MAKE_MEM_DEFINED(f, sizeof(block_*));
            }
160
#endif
161 162 163
          freelist_ = f->next;
          return f;
        }
164 165 166 167 168 169

      // Else, create a block out of the last chunk of allocated
      // memory.

      // If all the last chunk has been used, allocate one more.
      if (free_start_ + size_ > free_end_)
170 171
        {
          const size_t requested = (size_ > 128 ? size_ : 128) * 8192 - 64;
172
          chunk_* c = reinterpret_cast<chunk_*>(::operator new(requested));
173 174
          c->prev = chunklist_;
          chunklist_ = c;
175

176 177 178
          free_start_ = c->data_ + size_;
          free_end_ = c->data_ + requested;
        }
179 180 181

      void* res = free_start_;
      free_start_ += size_;
182
#if SPOT_DEBUG && defined(HAVE_VALGRIND_MEMCHECK_H)
183 184 185 186
      if (Kind == pool_type::Safe)
        {
          VALGRIND_MALLOCLIKE_BLOCK(res, size_, 0, false);
        }
187
#endif
188 189 190 191 192
      return res;
    }

    /// \brief Recycle \a size bytes of memory.
    ///
193
    /// Despite the name, the memory is not really deallocated in the
194 195 196 197
    /// "delete" sense: it is still owned by the pool and will be
    /// reused by allocate as soon as possible.  The memory is only
    /// freed when the pool is destroyed.
    void
198
    deallocate(void* ptr)
199
    {
200
      SPOT_ASSERT(ptr);
201
      block_* b = reinterpret_cast<block_*>(ptr);
202 203
      b->next = freelist_;
      freelist_ = b;
204
#if SPOT_DEBUG && defined(HAVE_VALGRIND_MEMCHECK_H)
205 206 207 208
      if (Kind == pool_type::Safe)
        {
          VALGRIND_FREELIKE_BLOCK(ptr, 0);
        }
209
#endif
210 211 212
    }

  private:
213
    const size_t size_;
214 215 216 217 218 219 220
    struct block_ { block_* next; }* freelist_;
    char* free_start_;
    char* free_end_;
    // chunk = several agglomerated blocks
    union chunk_ { chunk_* prev; char data_[1]; }* chunklist_;
  };
}