commit - 1fb5a7cc560039198898eddd356f0feebfa356b1
commit + e9fc51d0e490283801a0f2f9768047b40260d547
blob - /dev/null
blob + 08a810ddc970fa6a60b238a4593a0fa756dd6c51 (mode 644)
--- /dev/null
+++ changelogs/unreleased/gh-10551-fix-index-crash-on-OOM-on-rollback.md
+## bugfix/memtx
+
+- Fixed a crash on transaction rollback if memory usage is close to the limit
+ (gh-10551).
blob - bc1241aca4399738b0edf90964601536aa1e4b5d
blob + c57fe518d622e3fa14362c75f46ebbddf7ae1ae6
--- src/box/memtx_engine.cc
+++ src/box/memtx_engine.cc
#include <small/small.h>
#include <small/mempool.h>
+#include "assoc.h"
#include "fiber.h"
#include "errinj.h"
#include "coio_file.h"
void *p = memtx->reserved_extents;
while (p != NULL) {
void *next = *(void **)p;
- mempool_free(&memtx->index_extent_pool, p);
+ memtx_index_extent_free(memtx, p);
p = next;
}
mempool_destroy(&memtx->index_extent_pool);
slab_cache_destroy(&memtx->index_slab_cache);
+ mh_ptr_delete(memtx->malloc_extents);
/*
* The order is vital: allocator destroy should take place before
* slab cache destroy!
MEMTX_ITERATOR_SIZE);
memtx->num_reserved_extents = 0;
memtx->reserved_extents = NULL;
+ memtx->malloc_extents = mh_ptr_new();
memtx->state = MEMTX_INITIALIZED;
memtx->max_tuple_size = MAX_TUPLE_SIZE;
memtx->reserved_extents = *(void **)memtx->reserved_extents;
return result;
}
- ERROR_INJECT(ERRINJ_INDEX_ALLOC, {
- /* same error as in mempool_alloc */
- diag_set(OutOfMemory, MEMTX_EXTENT_SIZE,
- "mempool", "new slab");
- return NULL;
- });
+ ERROR_INJECT(ERRINJ_INDEX_ALLOC, { goto fail; });
void *ret;
while ((ret = mempool_alloc(&memtx->index_extent_pool)) == NULL) {
bool stop;
break;
}
if (ret == NULL)
- diag_set(OutOfMemory, MEMTX_EXTENT_SIZE,
- "mempool", "new slab");
+ goto fail;
return ret;
+fail:
+ if (in_txn() != NULL && in_txn()->status == TXN_ABORTED) {
+ /*
+ * We cannot sanely reserve blocks for rollback because strictly
+ * speaking the whole index can change. We cannot tolerate
+ * allocation failure also. So just allocate outside of the
+ * memtx arena quota.
+ */
+ ret = xmalloc(MEMTX_EXTENT_SIZE);
+ mh_ptr_put(memtx->malloc_extents, (const void **)&ret,
+ NULL, NULL);
+ return ret;
+ }
+ diag_set(OutOfMemory, MEMTX_EXTENT_SIZE, "mempool", "new slab");
+ return NULL;
}
/**
memtx_index_extent_free(void *ctx, void *extent)
{
struct memtx_engine *memtx = (struct memtx_engine *)ctx;
- return mempool_free(&memtx->index_extent_pool, extent);
+ mh_int_t p = mh_ptr_find(memtx->malloc_extents, extent, NULL);
+ if (p != mh_end(memtx->malloc_extents)) {
+ mh_ptr_del(memtx->malloc_extents, p, NULL);
+ free(extent);
+ return;
+ }
+ mempool_free(&memtx->index_extent_pool, extent);
}
/**
blob - 5b001867cee0b8b98605c796161e4498e9ee9093
blob + 25ad39f533d25369161a04ba78a75b3080f9033e
--- src/box/memtx_engine.h
+++ src/box/memtx_engine.h
* Format used for allocating functional index keys.
*/
struct tuple_format *func_key_format;
+ /** Set of extents allocated using malloc. */
+ struct mh_ptr_t *malloc_extents;
};
struct memtx_gc_task;
blob - /dev/null
blob + b4772ee3f39d51677b6c7bcad15b818091e87441 (mode 644)
--- /dev/null
+++ test/box-luatest/gh_10551_massive_index_change_on_rollback_test.lua
+local server = require('luatest.server')
+local t = require('luatest')
+
+local g = t.group()
+
+g.before_all(function(cg)
+ t.tarantool.skip_if_not_debug()
+ cg.server = server:new()
+ cg.server:start()
+end)
+
+g.after_all(function(cg)
+ cg.server:drop()
+end)
+
+g.before_test('test_massive_index_change_on_rollback', function(cg)
+ cg.server:exec(function()
+ for i = 1,10 do
+ local s = box.schema.create_space('test' .. i)
+ s:create_index('pk')
+ s:insert({1})
+ end
+ end)
+end)
+
+g.after_test('test_massive_index_change_on_rollback', function(cg)
+ cg.server:exec(function()
+ for i = 1,10 do
+ local s = box.space['test' .. i]
+ if s ~= nil then
+ s:drop()
+ end
+ end
+ end)
+end)
+
+g.test_massive_index_change_on_rollback = function(cg)
+ cg.server:exec(function()
+ local fiber = require('fiber')
+ local errinj = box.error.injection
+ local fibers = {}
+ errinj.set('ERRINJ_WAL_DELAY', true)
+ for i = 1,10 do
+ local f = fiber.new(function()
+ local s = box.space['test' .. i]
+ s:insert({2})
+ end)
+ fibers[i] = f
+ f:set_joinable(true)
+ f:wakeup()
+ fiber.yield()
+ end
+ fiber.create(function()
+ box.snapshot()
+ end)
+ errinj.set('ERRINJ_INDEX_ALLOC', true)
+ errinj.set('ERRINJ_WAL_WRITE_DISK', true)
+ errinj.set('ERRINJ_WAL_DELAY', false)
+ -- Test rollback is correct.
+ for i = 1,10 do
+ fibers[i]:join()
+ local s = box.space['test' .. i]
+ t.assert_equals(s:count(), 1)
+ end
+ errinj.set('ERRINJ_INDEX_ALLOC', false)
+ errinj.set('ERRINJ_WAL_WRITE_DISK', false)
+ -- Now index memory is allocated using malloc. Do insertions
+ -- to allocate index blocks as usual and thus mix malloc and default
+ -- blocks. Check index works in this case.
+ for i = 1,10 do
+ local s = box.space['test' .. i]
+ for j = 2, 1000 do
+ s:insert({j})
+ end
+ for j = 1, 1000 do
+ s:delete({j})
+ end
+ end
+ end)
+end