Mercurial > hg > octave-lyh
changeset 13981:5f8bc2f145f5
clean up octave_chunk_buffer storage before exit
* oct-locbuf.h, oct-locbuf.cc (octave_chunk_buffer::active):
New member variable.
(octave_chunk_buffer::clear): New static function.
(octave_chunk_buffer::octave_chunk_buffer): Update active here.
(octave_chunk_buffer::~octave_chunk_buffer): Likewise.
* toplev.cc (clean_up_and_exit): Call octave_chunk_buffer::clear.
author | John W. Eaton <jwe@octave.org> |
---|---|
date | Sat, 03 Dec 2011 03:30:27 -0500 |
parents | bd2be36fd949 |
children | 6cdfbe90e2ab |
files | liboctave/oct-locbuf.cc liboctave/oct-locbuf.h src/toplev.cc |
diffstat | 3 files changed, 127 insertions(+), 53 deletions(-) [+] |
line wrap: on
line diff
--- a/liboctave/oct-locbuf.cc +++ b/liboctave/oct-locbuf.cc @@ -25,44 +25,55 @@ #endif #include <iostream> + +#include "lo-error.h" #include "oct-locbuf.h" -// Query for configured chunk size, and if not defined, set it to 32 MB. -// FIXME: 32MB is hard-coded. Maybe we could use something better, like -// querying for available physical memory. +// FIXME -- Maybe we should querying for available physical memory? + #ifndef OCTAVE_LOCBUF_CHUNKSIZE_MB #define OCTAVE_LOCBUF_CHUNKSIZE_MB 32 #endif // Each chunk will be at least this big. + const size_t octave_chunk_buffer::chunk_size = static_cast<size_t> (OCTAVE_LOCBUF_CHUNKSIZE_MB) << 20; -char *octave_chunk_buffer::top = 0, *octave_chunk_buffer::chunk = 0; +char *octave_chunk_buffer::top = 0; +char *octave_chunk_buffer::chunk = 0; size_t octave_chunk_buffer::left = 0; +size_t octave_chunk_buffer::active = 0; octave_chunk_buffer::octave_chunk_buffer (size_t size) : cnk (0), dat (0) { - // Alignment mask. The size of double or long int, whichever is greater. - // All data will be aligned to this size. If it's not enough for a type, - // that type should not be declared as POD. + // Alignment mask. The size of double or long int, whichever is + // greater. All data will be aligned to this size. If it's not + // enough for a type, that type should not be declared as POD. + static const size_t align_mask = (sizeof (long) < sizeof (double) ? sizeof (double) : sizeof (long)) - 1; - if (! size) return; + active++; + + if (! size) + return; + // Align size. Note that size_t is unsigned, so size-1 must correctly // wrap around. + size = ((size - 1) | align_mask) + 1; if (size > left) { // Big buffers (> 1/8 chunk) will be allocated as stand-alone and // won't disrupt the chain. + if (size > chunk_size >> 3) { - // Use new [] to get std::bad_alloc if out of memory. Could as - // well be std::malloc and handle that ourselves. + // Use new [] to get std::bad_alloc if out of memory. + dat = new char [size]; return; } @@ -73,6 +84,7 @@ } // Now allocate memory from the chunk and update state. + cnk = chunk; dat = top; left -= size; @@ -81,24 +93,55 @@ octave_chunk_buffer::~octave_chunk_buffer (void) { + active--; + if (cnk == chunk) { - // Our chunk is still the active one. Just restore the state. + // Our chunk is still the active one. Just restore the state. + left += top - dat; top = dat; } - else if (! cnk) + else { - // We were a stand-alone buffer. - delete [] dat; + if (cnk) + { + // Responsible for deletion. + + delete [] chunk; + chunk = cnk; + top = dat; + + // FIXME -- the following calcuation of remaining data will + // only work if each chunk has the same chunk_size. + + left = chunk_size - (dat - cnk); + } + else + { + // We were a stand-alone buffer. + + delete [] dat; + } + } +} + +// Clear the final chunk of allocated memory. + +void +octave_chunk_buffer::clear (void) +{ + if (active == 0) + { + delete [] chunk; + chunk = 0; + top = 0; + left = 0; } else { - // Responsible for deletion. - delete [] chunk; - chunk = cnk; - top = dat; - // FIXME: This will only work if chunk_size is constant. - left = chunk_size - (dat - cnk); + (*current_liboctave_warning_handler) + ("octave_chunk_buffer::clear: %d active allocations remain!", + active); } }
--- a/liboctave/oct-locbuf.h +++ b/liboctave/oct-locbuf.h @@ -26,8 +26,9 @@ #include <cstddef> #include "oct-cmplx.h" -// The default local buffer simply encapsulates an *array* pointer that gets -// delete[]d automatically. For common POD types, we provide specializations. +// The default local buffer simply encapsulates an *array* pointer +// that gets deleted automatically. For common POD types, we provide +// specializations. template <class T> class octave_local_buffer @@ -50,16 +51,17 @@ octave_local_buffer& operator = (const octave_local_buffer&); }; -// For buffers of POD types, we'll be more smart. There is one thing that -// differentiates a local buffer from a dynamic array - the local buffers, if -// not manipulated improperly, have a FIFO semantics, meaning that if buffer B -// is allocated after buffer A, B *must* be deallocated before A. This is -// *guaranteed* if you use local buffer exclusively through the -// OCTAVE_LOCAL_BUFFER macro, because the C++ standard *mandates* explicit -// local objects be destroyed in reverse order of declaration. -// Therefore, we can avoid memory fragmentation by allocating fairly large -// chunks of memory and serving local buffers from them in a stack-like manner. -// The first returning buffer in previous chunk will be responsible for +// For buffers of POD types, we'll be smarter. There is one thing +// that differentiates a local buffer from a dynamic array - the local +// buffers, if not manipulated improperly, have a FIFO semantics, +// meaning that if buffer B is allocated after buffer A, B *must* be +// deallocated before A. This is *guaranteed* if you use local buffer +// exclusively through the OCTAVE_LOCAL_BUFFER macro, because the C++ +// standard requires that explicit local objects be destroyed in +// reverse order of declaration. Therefore, we can avoid memory +// fragmentation by allocating fairly large chunks of memory and +// serving local buffers from them in a stack-like manner. The first +// returning buffer in previous chunk will be responsible for // deallocating the chunk. class octave_chunk_buffer @@ -72,13 +74,30 @@ char *data (void) const { return dat; } + static void clear (void); + private: + + // The number of bytes we allocate for each large chunk of memory we + // manage. static const size_t chunk_size; - static char *top, *chunk; + // Pointer to the end end of the last allocation. + static char *top; + + // Pointer to the current active chunk. + static char *chunk; + + // The number of bytes remaining in the active chunk. static size_t left; + // The number of active allocations. + static size_t active; + + // Pointer to the current chunk. char *cnk; + + // Pointer to the beginning of the most recent allocation. char *dat; // No copying! @@ -86,8 +105,8 @@ octave_chunk_buffer& operator = (const octave_chunk_buffer&); }; -// This specializes octave_local_buffer to use the chunked buffer mechanism -// for POD types. +// This specializes octave_local_buffer to use the chunked buffer +// mechanism for POD types. #define SPECIALIZE_POD_BUFFER(TYPE) \ template <> \ class octave_local_buffer<TYPE> : private octave_chunk_buffer \ @@ -143,32 +162,37 @@ } }; -// If the compiler supports dynamic stack arrays, we can use the attached hack -// to place small buffer arrays on the stack. It may be even faster than our -// obstack-like optimization, but is dangerous because stack is a very limited -// resource, so we disable it. -#if 0 //defined (HAVE_DYNAMIC_AUTO_ARRAYS) +// If the compiler supports dynamic stack arrays, we can use the +// attached hack to place small buffer arrays on the stack. It may be +// even faster than our obstack-like optimization, but is dangerous +// because stack is a very limited resource, so we disable it. + +#if 0 // defined (HAVE_DYNAMIC_AUTO_ARRAYS) // Maximum buffer size (in bytes) to be placed on the stack. #define OCTAVE_LOCAL_BUFFER_MAX_STACK_SIZE 8192 -// If we have automatic arrays, we use an automatic array if the size is small -// enough. To avoid possibly evaluating `size' multiple times, we first cache -// it. Note that we always construct both the stack array and the -// octave_local_buffer object, but only one of them will be nonempty. +// If we have automatic arrays, we use an automatic array if the size +// is small enough. To avoid possibly evaluating `size' multiple +// times, we first cache it. Note that we always construct both the +// stack array and the octave_local_buffer object, but only one of +// them will be nonempty. #define OCTAVE_LOCAL_BUFFER(T, buf, size) \ const size_t _bufsize_ ## buf = size; \ const bool _lbufaut_ ## buf = _bufsize_ ## buf * sizeof (T) \ <= OCTAVE_LOCAL_BUFFER_MAX_STACK_SIZE; \ T _bufaut_ ## buf [_lbufaut_ ## buf ? _bufsize_ ## buf : 0]; \ - octave_local_buffer<T> _bufheap_ ## buf (!_lbufaut_ ## buf ? _bufsize_ ## buf : 0); \ - T *buf = _lbufaut_ ## buf ? _bufaut_ ## buf : static_cast<T *> (_bufheap_ ## buf) + octave_local_buffer<T> _bufheap_ ## buf \ + (!_lbufaut_ ## buf ? _bufsize_ ## buf : 0); \ + T *buf = _lbufaut_ ## buf \ + ? _bufaut_ ## buf : static_cast<T *> (_bufheap_ ## buf) #else -// If we don't have automatic arrays, we simply always use octave_local_buffer. +// If we don't have automatic arrays, we simply always use +// octave_local_buffer. #define OCTAVE_LOCAL_BUFFER(T, buf, size) \ octave_local_buffer<T> _buffer_ ## buf (size); \ @@ -176,13 +200,14 @@ #endif -// Yeah overloading macros would be nice. -// Note: we use weird variables in the for loop to avoid warnings about -// shadowed parameters. +// Note: we use weird variables in the for loop to avoid warnings +// about shadowed parameters. + #define OCTAVE_LOCAL_BUFFER_INIT(T, buf, size, value) \ OCTAVE_LOCAL_BUFFER(T, buf, size); \ for (size_t _buf_iter = 0, _buf_size = size; \ - _buf_iter < _buf_size; _buf_iter++) buf[_buf_iter] = value + _buf_iter < _buf_size; _buf_iter++) \ + buf[_buf_iter] = value #endif
--- a/src/toplev.cc +++ b/src/toplev.cc @@ -680,6 +680,8 @@ SAFE_CALL (sysdep_cleanup, ()) + SAFE_CALL (octave_chunk_buffer::clear, ()) + if (octave_exit) (*octave_exit) (retval == EOF ? 0 : retval); } @@ -709,9 +711,13 @@ if (! error_state) { - quitting_gracefully = true; + // Instead of simply calling exit, we simulate an interrupt + // with a request to exit cleanly so that no matter where the + // call to quit occurs, we will run the unwind_protect stack, + // clear the OCTAVE_LOCAL_BUFFER allocations, etc. before + // exiting. - // Simulate interrupt. + quitting_gracefully = true; octave_interrupt_state = -1;