IB, SQL: InnoDB partitioning [closes #118]
* native InnoDB partitioning for BY SYSTEM_TIME partitions.
This commit is contained in:
parent
fb801289f3
commit
fc7da4dd4f
527
include/priority_queue.h
Normal file
527
include/priority_queue.h
Normal file
@ -0,0 +1,527 @@
|
|||||||
|
/* Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||||
|
|
||||||
|
This program is free software; you can redistribute it and/or modify
|
||||||
|
it under the terms of the GNU General Public License as published by
|
||||||
|
the Free Software Foundation; version 2 of the License.
|
||||||
|
|
||||||
|
This program is distributed in the hope that it will be useful,
|
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
GNU General Public License for more details.
|
||||||
|
|
||||||
|
You should have received a copy of the GNU General Public License
|
||||||
|
along with this program; if not, write to the Free Software
|
||||||
|
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
|
||||||
|
|
||||||
|
#ifndef PRIORITY_QUEUE_INCLUDED
|
||||||
|
#define PRIORITY_QUEUE_INCLUDED
|
||||||
|
|
||||||
|
#include "my_dbug.h"
|
||||||
|
|
||||||
|
#include <functional>
|
||||||
|
#include <utility>
|
||||||
|
#include <vector>
|
||||||
|
#include "template_utils.h"
|
||||||
|
|
||||||
|
#if defined(EXTRA_CODE_FOR_UNIT_TESTING)
|
||||||
|
#include <iostream>
|
||||||
|
#include <sstream>
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifndef MY_ATTRIBUTE
|
||||||
|
#if defined(__GNUC__)
|
||||||
|
# define MY_ATTRIBUTE(A) __attribute__(A)
|
||||||
|
#else
|
||||||
|
# define MY_ATTRIBUTE(A)
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
|
||||||
|
namespace priority_queue_unittest { class PriorityQueueTest; };
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
Implements a priority queue using a vector-based max-heap.
|
||||||
|
|
||||||
|
A priority queue is a container specifically designed such that its first
|
||||||
|
element is always the greatest of the elements it contains, according to
|
||||||
|
some strict weak ordering criterion.
|
||||||
|
|
||||||
|
For object locality, the implementation is vector-based, rather than
|
||||||
|
node-based.
|
||||||
|
|
||||||
|
The priority queue is mutable, which means that the priority of an element
|
||||||
|
can be changed. See increase/decrease/update member functions.
|
||||||
|
The typical use case is to change the value/priority of the root node.
|
||||||
|
|
||||||
|
We provide iterators, which can be used to visit all elements.
|
||||||
|
Iterators do not visit queue elements in priority order.
|
||||||
|
Iterators should not be used to change the priority of elements.
|
||||||
|
|
||||||
|
The underlying container must be
|
||||||
|
constructible from an iterator range, should provide const and
|
||||||
|
non-const random access iterators to access its elements, as well as
|
||||||
|
the following operations:
|
||||||
|
- size()
|
||||||
|
- empty()
|
||||||
|
- push_back()
|
||||||
|
- pop_back()
|
||||||
|
- swap()
|
||||||
|
- clear()
|
||||||
|
- capacity()
|
||||||
|
- reserve()
|
||||||
|
- max_size()
|
||||||
|
|
||||||
|
@tparam T Type of the elements of the priority queue.
|
||||||
|
@tparam Container Type of the underlying container object where elements
|
||||||
|
are stored. Its value_type shall be T.
|
||||||
|
@tparam Less A binary predicate that takes two elements (of type T)
|
||||||
|
and returns a bool. The expression less(a,b), where
|
||||||
|
less is an object of this type and a and b are elements
|
||||||
|
in the container, shall return true if a is considered
|
||||||
|
to go before b in the strict weak ordering the
|
||||||
|
function defines.
|
||||||
|
*/
|
||||||
|
template
|
||||||
|
<
|
||||||
|
typename T,
|
||||||
|
typename Container = std::vector<T>,
|
||||||
|
typename Less = std::less<typename Container::value_type>
|
||||||
|
>
|
||||||
|
class Priority_queue : public Less
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
typedef Container container_type;
|
||||||
|
typedef Less less_type;
|
||||||
|
typedef typename container_type::value_type value_type;
|
||||||
|
typedef typename container_type::size_type size_type;
|
||||||
|
typedef typename container_type::iterator iterator;
|
||||||
|
typedef typename container_type::const_iterator const_iterator;
|
||||||
|
typedef typename container_type::allocator_type allocator_type;
|
||||||
|
|
||||||
|
friend class priority_queue_unittest::PriorityQueueTest;
|
||||||
|
private:
|
||||||
|
// Deriving from Less allows empty base-class optimization in some cases.
|
||||||
|
typedef Less Base;
|
||||||
|
|
||||||
|
// Returns the index of the parent node of node i.
|
||||||
|
static size_type parent(size_type i)
|
||||||
|
{
|
||||||
|
DBUG_ASSERT(i != 0);
|
||||||
|
return (--i) >> 1; // (i - 1) / 2
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns the index of the left child of node i.
|
||||||
|
static size_type left(size_type i)
|
||||||
|
{
|
||||||
|
return (i << 1) | 1; // 2 * i + 1
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns the index of the right child of node i.
|
||||||
|
static size_type right(size_type i)
|
||||||
|
{
|
||||||
|
return (++i) << 1; // 2 * i + 2
|
||||||
|
}
|
||||||
|
|
||||||
|
void heapify(size_type i, size_type last)
|
||||||
|
{
|
||||||
|
DBUG_ASSERT(i < size());
|
||||||
|
size_type largest = i;
|
||||||
|
|
||||||
|
do
|
||||||
|
{
|
||||||
|
i = largest;
|
||||||
|
size_type l = left(i);
|
||||||
|
size_type r = right(i);
|
||||||
|
|
||||||
|
if (l < last && Base::operator()(m_container[i], m_container[l]))
|
||||||
|
{
|
||||||
|
largest = l;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (r < last && Base::operator()(m_container[largest], m_container[r]))
|
||||||
|
{
|
||||||
|
largest = r;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (largest != i)
|
||||||
|
{
|
||||||
|
std::swap(m_container[i], m_container[largest]);
|
||||||
|
}
|
||||||
|
} while (largest != i);
|
||||||
|
}
|
||||||
|
|
||||||
|
void heapify(size_type i)
|
||||||
|
{
|
||||||
|
heapify(i, m_container.size());
|
||||||
|
}
|
||||||
|
|
||||||
|
void reverse_heapify(size_type i)
|
||||||
|
{
|
||||||
|
DBUG_ASSERT(i < size());
|
||||||
|
while (i > 0 && !Base::operator()(m_container[i], m_container[parent(i)]))
|
||||||
|
{
|
||||||
|
std::swap(m_container[parent(i)], m_container[i]);
|
||||||
|
i = parent(i);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sets the value of element i, and rebuilds the priority queue.
|
||||||
|
void decrease_key(size_type i, value_type const &x)
|
||||||
|
{
|
||||||
|
m_container[i] = x;
|
||||||
|
heapify(i);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sets the value of element i, and rebuilds the priority queue.
|
||||||
|
void increase_key(size_type i, value_type const &x)
|
||||||
|
{
|
||||||
|
m_container[i] = x;
|
||||||
|
reverse_heapify(i);
|
||||||
|
}
|
||||||
|
|
||||||
|
public:
|
||||||
|
/// Constructs an empty priority queue.
|
||||||
|
Priority_queue(Less const &less = Less(),
|
||||||
|
const allocator_type& alloc = allocator_type())
|
||||||
|
: Base(less),
|
||||||
|
m_container(alloc)
|
||||||
|
{}
|
||||||
|
|
||||||
|
/// Constructs a heap of the objects between first and beyond.
|
||||||
|
template <typename Input_iterator>
|
||||||
|
Priority_queue(Input_iterator first, Input_iterator beyond,
|
||||||
|
Less const &less = Less(),
|
||||||
|
const allocator_type& alloc = allocator_type())
|
||||||
|
: Base(less),
|
||||||
|
m_container(first, beyond, alloc)
|
||||||
|
{
|
||||||
|
build_heap();
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Constructs a heap based on input argument.
|
||||||
|
void assign(const container_type &container)
|
||||||
|
{
|
||||||
|
m_container= container;
|
||||||
|
build_heap();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
Constructs a heap based on container contents.
|
||||||
|
Can also be used when many elements have changed.
|
||||||
|
*/
|
||||||
|
void build_heap()
|
||||||
|
{
|
||||||
|
if (m_container.size() > 1)
|
||||||
|
{
|
||||||
|
for (size_type i = parent(m_container.size() - 1); i > 0; --i)
|
||||||
|
{
|
||||||
|
heapify(i);
|
||||||
|
}
|
||||||
|
heapify(0);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns a const reference to the top element of the priority queue.
|
||||||
|
value_type const &top() const
|
||||||
|
{
|
||||||
|
DBUG_ASSERT(!empty());
|
||||||
|
return m_container[0];
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns a reference to the top element of the priority queue.
|
||||||
|
value_type& top()
|
||||||
|
{
|
||||||
|
DBUG_ASSERT(!empty());
|
||||||
|
return m_container[0];
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
Inserts an element in the priority queue.
|
||||||
|
|
||||||
|
@param x value to be pushed.
|
||||||
|
@retval true if out-of-memory, false otherwise.
|
||||||
|
*/
|
||||||
|
bool push(value_type const &x)
|
||||||
|
{
|
||||||
|
try
|
||||||
|
{
|
||||||
|
m_container.push_back(x);
|
||||||
|
}
|
||||||
|
catch(std::bad_alloc const &)
|
||||||
|
{
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
reverse_heapify(m_container.size() - 1);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Pops the top-most element in the priority queue.
|
||||||
|
void pop()
|
||||||
|
{
|
||||||
|
remove(0);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Removes the element at position i from the priority queue.
|
||||||
|
void remove(size_type i)
|
||||||
|
{
|
||||||
|
DBUG_ASSERT(i < size());
|
||||||
|
|
||||||
|
if (i == m_container.size() - 1)
|
||||||
|
{
|
||||||
|
m_container.pop_back();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
m_container[i] = m_container[m_container.size() - 1];
|
||||||
|
m_container.pop_back();
|
||||||
|
heapify(i);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
Decreases the priority of the element at position i, where the
|
||||||
|
new priority is x.
|
||||||
|
*/
|
||||||
|
void decrease(size_type i, value_type const &x)
|
||||||
|
{
|
||||||
|
DBUG_ASSERT(i < size());
|
||||||
|
DBUG_ASSERT(!Base::operator()(m_container[i], x));
|
||||||
|
decrease_key(i, x);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
Increases the priority of the element at position i, where the
|
||||||
|
new priority is x.
|
||||||
|
*/
|
||||||
|
void increase(size_type i, value_type const &x)
|
||||||
|
{
|
||||||
|
DBUG_ASSERT(i < size());
|
||||||
|
DBUG_ASSERT(!Base::operator()(x, m_container[i]));
|
||||||
|
increase_key(i, x);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
Changes the priority of the element at position i, where the
|
||||||
|
new priority is x.
|
||||||
|
*/
|
||||||
|
void update(size_type i, value_type const &x)
|
||||||
|
{
|
||||||
|
DBUG_ASSERT(i < size());
|
||||||
|
if (Base::operator()(x, m_container[i]))
|
||||||
|
{
|
||||||
|
decrease_key(i, x);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
increase_key(i, x);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
Assumes that the i-th element's value has increased
|
||||||
|
and rebuilds the priority queue.
|
||||||
|
*/
|
||||||
|
void increase(size_type i)
|
||||||
|
{
|
||||||
|
reverse_heapify(i);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
Assumes that the i-th element's value has decreased
|
||||||
|
and rebuilds the priority queue.
|
||||||
|
*/
|
||||||
|
void decrease(size_type i)
|
||||||
|
{
|
||||||
|
heapify(i);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
Assumes that the i-th element's value has changed
|
||||||
|
and rebuilds the priority queue.
|
||||||
|
*/
|
||||||
|
void update(size_type i)
|
||||||
|
{
|
||||||
|
DBUG_ASSERT(i < size());
|
||||||
|
if (i == 0 || Base::operator()(m_container[i], m_container[parent(i)]))
|
||||||
|
{
|
||||||
|
heapify(i);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
reverse_heapify(i);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
Assumes that the top element's value has changed
|
||||||
|
and rebuilds the priority queue.
|
||||||
|
*/
|
||||||
|
void update_top()
|
||||||
|
{
|
||||||
|
DBUG_ASSERT(!empty());
|
||||||
|
heapify(0);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the number of elements of the priority queue
|
||||||
|
size_type size() const { return m_container.size(); }
|
||||||
|
|
||||||
|
/// Returns true if the priority queue is empty
|
||||||
|
bool empty() const { return m_container.empty(); }
|
||||||
|
|
||||||
|
/// Returns a const reference to the i-th element in the underlying container.
|
||||||
|
value_type const& operator[](size_type i) const
|
||||||
|
{
|
||||||
|
DBUG_ASSERT(i < size());
|
||||||
|
return m_container[i];
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns a reference to the i-th element in the underlying container.
|
||||||
|
value_type& operator[](size_type i)
|
||||||
|
{
|
||||||
|
DBUG_ASSERT(i < size());
|
||||||
|
return m_container[i];
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns a const iterator to the first element of the underlying container.
|
||||||
|
const_iterator begin() const
|
||||||
|
{
|
||||||
|
return m_container.begin();
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns a const iterator to the end element of the underlying container.
|
||||||
|
const_iterator end() const
|
||||||
|
{
|
||||||
|
return m_container.end();
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns an iterator to the first element of the underlying container.
|
||||||
|
iterator begin()
|
||||||
|
{
|
||||||
|
return m_container.begin();
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns an iterator to the end element of the underlying container.
|
||||||
|
iterator end()
|
||||||
|
{
|
||||||
|
return m_container.end();
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Swaps the contents of two priority queues.
|
||||||
|
void swap(Priority_queue& other)
|
||||||
|
{
|
||||||
|
std::swap(static_cast<Base&>(*this), static_cast<Base&>(other));
|
||||||
|
m_container.swap(other.m_container);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns true if the priority queue has the heap property.
|
||||||
|
bool is_valid() const
|
||||||
|
{
|
||||||
|
for (size_type i = 1; i < m_container.size(); ++i)
|
||||||
|
{
|
||||||
|
if (Base::operator()(m_container[parent(i)], m_container[i]))
|
||||||
|
{
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
Sorts the elements of the priority queue according to the strict
|
||||||
|
partial ordering defined by the object of type Less passed to
|
||||||
|
the priority queue.
|
||||||
|
|
||||||
|
The heap property of the priority queue is invalidated by this
|
||||||
|
operation.
|
||||||
|
*/
|
||||||
|
void sort()
|
||||||
|
{
|
||||||
|
if (!m_container.empty())
|
||||||
|
{
|
||||||
|
for (size_type i = m_container.size() - 1; i > 0; --i)
|
||||||
|
{
|
||||||
|
std::swap(m_container[i], m_container[0]);
|
||||||
|
heapify(0, i);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Clears the priority queue.
|
||||||
|
void clear()
|
||||||
|
{
|
||||||
|
m_container.clear();
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Clears the priority queue, but deletes all elements first.
|
||||||
|
void delete_elements()
|
||||||
|
{
|
||||||
|
delete_container_pointers(m_container);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the capacity of the internal container.
|
||||||
|
size_type capacity() const
|
||||||
|
{
|
||||||
|
return m_container.capacity();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
Reserves space for array elements.
|
||||||
|
|
||||||
|
@param n number of elements.
|
||||||
|
@retval true if out-of-memory, false otherwise.
|
||||||
|
*/
|
||||||
|
MY_ATTRIBUTE((warn_unused_result))
|
||||||
|
bool reserve(size_type n)
|
||||||
|
{
|
||||||
|
DBUG_ASSERT(n <= m_container.max_size());
|
||||||
|
try
|
||||||
|
{
|
||||||
|
m_container.reserve(n);
|
||||||
|
}
|
||||||
|
catch(std::bad_alloc const &)
|
||||||
|
{
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
container_type m_container;
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
#if defined(EXTRA_CODE_FOR_UNIT_TESTING)
|
||||||
|
template <class T, class Container, class Less>
|
||||||
|
inline std::ostream&
|
||||||
|
operator<<(std::ostream& os,
|
||||||
|
Priority_queue<T, Container, Less> const& pq)
|
||||||
|
{
|
||||||
|
typedef typename Priority_queue<T, Container, Less>::size_type size_type;
|
||||||
|
|
||||||
|
for (size_type i = 0; i < pq.size(); i++)
|
||||||
|
{
|
||||||
|
os << pq[i] << " " << std::flush;
|
||||||
|
}
|
||||||
|
|
||||||
|
return os;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
template <class T, class Container, class Less>
|
||||||
|
inline std::stringstream&
|
||||||
|
operator<<(std::stringstream& ss,
|
||||||
|
Priority_queue<T, Container, Less> const& pq)
|
||||||
|
{
|
||||||
|
typedef typename Priority_queue<T, Container, Less>::size_type size_type;
|
||||||
|
|
||||||
|
for (size_type i = 0; i < pq.size(); i++)
|
||||||
|
{
|
||||||
|
ss << pq[i] << " ";;
|
||||||
|
}
|
||||||
|
|
||||||
|
return ss;
|
||||||
|
}
|
||||||
|
#endif // EXTRA_CODE_FOR_UNIT_TESTING
|
||||||
|
|
||||||
|
|
||||||
|
#endif // PRIORITY_QUEUE_INCLUDED
|
98
include/template_utils.h
Normal file
98
include/template_utils.h
Normal file
@ -0,0 +1,98 @@
|
|||||||
|
/* Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||||
|
|
||||||
|
This program is free software; you can redistribute it and/or modify
|
||||||
|
it under the terms of the GNU General Public License as published by
|
||||||
|
the Free Software Foundation; version 2 of the License.
|
||||||
|
|
||||||
|
This program is distributed in the hope that it will be useful,
|
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
GNU General Public License for more details.
|
||||||
|
|
||||||
|
You should have received a copy of the GNU General Public License
|
||||||
|
along with this program; if not, write to the Free Software
|
||||||
|
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
|
||||||
|
|
||||||
|
#ifndef TEMPLATE_UTILS_INCLUDED
|
||||||
|
#define TEMPLATE_UTILS_INCLUDED
|
||||||
|
|
||||||
|
/**
|
||||||
|
Clears a container, but deletes all objects that the elements point to first.
|
||||||
|
@tparam Container of pointers.
|
||||||
|
*/
|
||||||
|
template<typename Container_type>
|
||||||
|
void delete_container_pointers(Container_type &container)
|
||||||
|
{
|
||||||
|
typename Container_type::iterator it1= container.begin();
|
||||||
|
typename Container_type::iterator it2= container.end();
|
||||||
|
for (; it1 != it2; ++it1)
|
||||||
|
{
|
||||||
|
delete (*it1);
|
||||||
|
}
|
||||||
|
container.clear();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
Clears a container, but frees all objects that the elements point to first.
|
||||||
|
@tparam Container of pointers.
|
||||||
|
*/
|
||||||
|
template<typename Container_type>
|
||||||
|
void my_free_container_pointers(Container_type &container)
|
||||||
|
{
|
||||||
|
typename Container_type::iterator it1= container.begin();
|
||||||
|
typename Container_type::iterator it2= container.end();
|
||||||
|
for (; it1 != it2; ++it1)
|
||||||
|
{
|
||||||
|
my_free(*it1);
|
||||||
|
}
|
||||||
|
container.clear();
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
Casts from one pointer type, to another, without using
|
||||||
|
reinterpret_cast or C-style cast:
|
||||||
|
foo *f; bar *b= pointer_cast<bar*>(f);
|
||||||
|
This avoids having to do:
|
||||||
|
foo *f; bar *b= static_cast<b*>(static_cast<void*>(f));
|
||||||
|
*/
|
||||||
|
template<typename T>
|
||||||
|
inline T pointer_cast(void *p)
|
||||||
|
{
|
||||||
|
return static_cast<T>(p);
|
||||||
|
}
|
||||||
|
|
||||||
|
template<typename T>
|
||||||
|
inline const T pointer_cast(const void *p)
|
||||||
|
{
|
||||||
|
return static_cast<const T>(p);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
Casts from one pointer type to another in a type hierarchy.
|
||||||
|
In debug mode, we verify the cast is indeed legal.
|
||||||
|
*/
|
||||||
|
template<typename Target, typename Source>
|
||||||
|
inline Target down_cast(Source arg)
|
||||||
|
{
|
||||||
|
DBUG_ASSERT(NULL != dynamic_cast<Target>(arg));
|
||||||
|
return static_cast<Target>(arg);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
Sometimes the compiler insists that types be the same and does not do any
|
||||||
|
implicit conversion. For example:
|
||||||
|
Derived1 *a;
|
||||||
|
Derived2 *b; // Derived1 and 2 are children classes of Base
|
||||||
|
Base *x= cond ? a : b; // Error, need to force a cast.
|
||||||
|
|
||||||
|
Use:
|
||||||
|
Base *x= cond ? implicit_cast<Base*>(a) : implicit_cast<Base*>(b);
|
||||||
|
static_cast would work too, but would be less safe (allows any
|
||||||
|
pointer-to-pointer conversion, not only up-casts).
|
||||||
|
*/
|
||||||
|
template<typename To>
|
||||||
|
inline To implicit_cast(To x) { return x; }
|
||||||
|
|
||||||
|
#endif // TEMPLATE_UTILS_INCLUDED
|
@ -1,6 +1,5 @@
|
|||||||
create table t1 (x int)
|
create table t1 (x int)
|
||||||
with system versioning
|
with system versioning
|
||||||
engine innodb
|
|
||||||
partition by range columns (x) (
|
partition by range columns (x) (
|
||||||
partition p0 values less than (100),
|
partition p0 values less than (100),
|
||||||
partition p1 values less than (1000));
|
partition p1 values less than (1000));
|
||||||
@ -78,14 +77,14 @@ show create table t1;
|
|||||||
Table Create Table
|
Table Create Table
|
||||||
t1 CREATE TABLE `t1` (
|
t1 CREATE TABLE `t1` (
|
||||||
`x` int(11) DEFAULT NULL,
|
`x` int(11) DEFAULT NULL,
|
||||||
`sys_trx_start` timestamp(6) GENERATED ALWAYS AS ROW START,
|
`sys_trx_start` ${SYS_TRX_TYPE} GENERATED ALWAYS AS ROW START,
|
||||||
`sys_trx_end` timestamp(6) GENERATED ALWAYS AS ROW END,
|
`sys_trx_end` ${SYS_TRX_TYPE} GENERATED ALWAYS AS ROW END,
|
||||||
PERIOD FOR SYSTEM_TIME (`sys_trx_start`, `sys_trx_end`)
|
PERIOD FOR SYSTEM_TIME (`sys_trx_start`, `sys_trx_end`)
|
||||||
) ENGINE=MyISAM DEFAULT CHARSET=latin1 WITH SYSTEM VERSIONING
|
) ENGINE=${INNODB_OR_MYISAM} DEFAULT CHARSET=latin1 WITH SYSTEM VERSIONING
|
||||||
PARTITION BY SYSTEM_TIME
|
PARTITION BY SYSTEM_TIME
|
||||||
(PARTITION p0 VERSIONING ENGINE = MyISAM,
|
(PARTITION p0 VERSIONING ENGINE = ${INNODB_OR_MYISAM},
|
||||||
PARTITION p1 VERSIONING ENGINE = MyISAM,
|
PARTITION p1 VERSIONING ENGINE = ${INNODB_OR_MYISAM},
|
||||||
PARTITION pn AS OF NOW ENGINE = MyISAM)
|
PARTITION pn AS OF NOW ENGINE = ${INNODB_OR_MYISAM})
|
||||||
alter table t1 drop partition pn;
|
alter table t1 drop partition pn;
|
||||||
ERROR HY000: Wrong parameters for `BY SYSTEM_TIME`: `AS OF NOW` partition can not be dropped
|
ERROR HY000: Wrong parameters for `BY SYSTEM_TIME`: `AS OF NOW` partition can not be dropped
|
||||||
alter table t1 drop partition p1;
|
alter table t1 drop partition p1;
|
||||||
@ -108,7 +107,6 @@ select * from t1 partition (pn) for system_time all;
|
|||||||
x
|
x
|
||||||
create or replace table t1 (x int)
|
create or replace table t1 (x int)
|
||||||
with system versioning
|
with system versioning
|
||||||
engine myisam
|
|
||||||
partition by system_time limit 1 (
|
partition by system_time limit 1 (
|
||||||
partition p0 versioning,
|
partition p0 versioning,
|
||||||
partition p1 versioning,
|
partition p1 versioning,
|
||||||
@ -137,7 +135,6 @@ x
|
|||||||
3
|
3
|
||||||
create or replace table t1 (x int)
|
create or replace table t1 (x int)
|
||||||
with system versioning
|
with system versioning
|
||||||
engine myisam
|
|
||||||
partition by system_time interval 1 second (
|
partition by system_time interval 1 second (
|
||||||
partition p0 versioning,
|
partition p0 versioning,
|
||||||
partition p1 versioning,
|
partition p1 versioning,
|
||||||
@ -163,7 +160,6 @@ x
|
|||||||
4
|
4
|
||||||
create or replace table t1 (x int)
|
create or replace table t1 (x int)
|
||||||
with system versioning
|
with system versioning
|
||||||
engine myisam
|
|
||||||
partition by system_time limit 1
|
partition by system_time limit 1
|
||||||
subpartition by key (x)
|
subpartition by key (x)
|
||||||
subpartitions 2 (
|
subpartitions 2 (
|
||||||
|
10
mysql-test/suite/versioning/t/partition.combinations
Normal file
10
mysql-test/suite/versioning/t/partition.combinations
Normal file
@ -0,0 +1,10 @@
|
|||||||
|
[innodb]
|
||||||
|
innodb
|
||||||
|
partition
|
||||||
|
default-storage-engine=innodb
|
||||||
|
|
||||||
|
[myisam]
|
||||||
|
skip-innodb
|
||||||
|
partition
|
||||||
|
default-storage-engine=myisam
|
||||||
|
|
@ -1,11 +1,7 @@
|
|||||||
--source include/have_innodb.inc
|
### check System Versioning and conventional partitioning
|
||||||
--source include/have_partition.inc
|
|
||||||
|
|
||||||
### check InnoDB versioning and conventional partitioning
|
|
||||||
|
|
||||||
create table t1 (x int)
|
create table t1 (x int)
|
||||||
with system versioning
|
with system versioning
|
||||||
engine innodb
|
|
||||||
partition by range columns (x) (
|
partition by range columns (x) (
|
||||||
partition p0 values less than (100),
|
partition p0 values less than (100),
|
||||||
partition p1 values less than (1000));
|
partition p1 values less than (1000));
|
||||||
@ -78,6 +74,7 @@ alter table t1 add partition (
|
|||||||
alter table t1 add partition (
|
alter table t1 add partition (
|
||||||
partition p1 versioning);
|
partition p1 versioning);
|
||||||
|
|
||||||
|
--replace_result InnoDB ${INNODB_OR_MYISAM} MyISAM ${INNODB_OR_MYISAM} "bigint(20) unsigned" ${SYS_TRX_TYPE} timestamp(6) ${SYS_TRX_TYPE}
|
||||||
show create table t1;
|
show create table t1;
|
||||||
|
|
||||||
--error ER_VERS_WRONG_PARAMS
|
--error ER_VERS_WRONG_PARAMS
|
||||||
@ -99,7 +96,6 @@ select * from t1 partition (pn) for system_time all;
|
|||||||
# rotation by LIMIT
|
# rotation by LIMIT
|
||||||
create or replace table t1 (x int)
|
create or replace table t1 (x int)
|
||||||
with system versioning
|
with system versioning
|
||||||
engine myisam
|
|
||||||
partition by system_time limit 1 (
|
partition by system_time limit 1 (
|
||||||
partition p0 versioning,
|
partition p0 versioning,
|
||||||
partition p1 versioning,
|
partition p1 versioning,
|
||||||
@ -118,7 +114,6 @@ select * from t1 partition (p1) for system_time all;
|
|||||||
# rotation by INTERVAL
|
# rotation by INTERVAL
|
||||||
create or replace table t1 (x int)
|
create or replace table t1 (x int)
|
||||||
with system versioning
|
with system versioning
|
||||||
engine myisam
|
|
||||||
partition by system_time interval 1 second (
|
partition by system_time interval 1 second (
|
||||||
partition p0 versioning,
|
partition p0 versioning,
|
||||||
partition p1 versioning,
|
partition p1 versioning,
|
||||||
@ -137,7 +132,6 @@ select * from t1 partition (p1) for system_time all;
|
|||||||
# Subpartitions
|
# Subpartitions
|
||||||
create or replace table t1 (x int)
|
create or replace table t1 (x int)
|
||||||
with system versioning
|
with system versioning
|
||||||
engine myisam
|
|
||||||
partition by system_time limit 1
|
partition by system_time limit 1
|
||||||
subpartition by key (x)
|
subpartition by key (x)
|
||||||
subpartitions 2 (
|
subpartitions 2 (
|
||||||
@ -156,4 +150,3 @@ select * from t1 partition (p1sp0) for system_time all;
|
|||||||
select * from t1 partition (p1sp1) for system_time all;
|
select * from t1 partition (p1sp1) for system_time all;
|
||||||
|
|
||||||
drop table t1;
|
drop table t1;
|
||||||
|
|
||||||
|
@ -121,7 +121,7 @@ SET (SQL_SOURCE
|
|||||||
rpl_tblmap.cc sql_binlog.cc event_scheduler.cc event_data_objects.cc
|
rpl_tblmap.cc sql_binlog.cc event_scheduler.cc event_data_objects.cc
|
||||||
event_queue.cc event_db_repository.cc
|
event_queue.cc event_db_repository.cc
|
||||||
sql_tablespace.cc events.cc ../sql-common/my_user.c
|
sql_tablespace.cc events.cc ../sql-common/my_user.c
|
||||||
partition_info.cc rpl_utility.cc rpl_injector.cc sql_locale.cc
|
partition_info.cc partitioning/partition_handler.cc rpl_utility.cc rpl_injector.cc sql_locale.cc
|
||||||
rpl_rli.cc rpl_mi.cc sql_servers.cc sql_audit.cc
|
rpl_rli.cc rpl_mi.cc sql_servers.cc sql_audit.cc
|
||||||
sql_connect.cc scheduler.cc sql_partition_admin.cc
|
sql_connect.cc scheduler.cc sql_partition_admin.cc
|
||||||
sql_profile.cc event_parse_data.cc sql_alter.cc
|
sql_profile.cc event_parse_data.cc sql_alter.cc
|
||||||
@ -165,7 +165,7 @@ IF (CMAKE_SYSTEM_NAME MATCHES "Linux" OR
|
|||||||
|
|
||||||
ENDIF()
|
ENDIF()
|
||||||
|
|
||||||
MYSQL_ADD_PLUGIN(partition ha_partition.cc STORAGE_ENGINE DEFAULT STATIC_ONLY
|
MYSQL_ADD_PLUGIN(partition ha_partition.cc partitioning/partition_handler.cc STORAGE_ENGINE DEFAULT STATIC_ONLY
|
||||||
RECOMPILE_FOR_EMBEDDED)
|
RECOMPILE_FOR_EMBEDDED)
|
||||||
MYSQL_ADD_PLUGIN(sql_sequence ha_sequence.cc STORAGE_ENGINE MANDATORY STATIC_ONLY
|
MYSQL_ADD_PLUGIN(sql_sequence ha_sequence.cc STORAGE_ENGINE MANDATORY STATIC_ONLY
|
||||||
RECOMPILE_FOR_EMBEDDED)
|
RECOMPILE_FOR_EMBEDDED)
|
||||||
|
@ -160,9 +160,6 @@ static int partition_initialize(void *p)
|
|||||||
bool Partition_share::init(uint num_parts)
|
bool Partition_share::init(uint num_parts)
|
||||||
{
|
{
|
||||||
DBUG_ENTER("Partition_share::init");
|
DBUG_ENTER("Partition_share::init");
|
||||||
mysql_mutex_init(key_partition_auto_inc_mutex,
|
|
||||||
&auto_inc_mutex,
|
|
||||||
MY_MUTEX_INIT_FAST);
|
|
||||||
auto_inc_initialized= false;
|
auto_inc_initialized= false;
|
||||||
partition_name_hash_initialized= false;
|
partition_name_hash_initialized= false;
|
||||||
next_auto_inc_val= 0;
|
next_auto_inc_val= 0;
|
||||||
@ -1246,12 +1243,12 @@ int ha_partition::handle_opt_part(THD *thd, HA_CHECK_OPT *check_opt,
|
|||||||
(modelled after mi_check_print_msg)
|
(modelled after mi_check_print_msg)
|
||||||
TODO: move this into the handler, or rewrite mysql_admin_table.
|
TODO: move this into the handler, or rewrite mysql_admin_table.
|
||||||
*/
|
*/
|
||||||
static bool print_admin_msg(THD* thd, uint len,
|
bool print_admin_msg(THD* thd, uint len,
|
||||||
const char* msg_type,
|
const char* msg_type,
|
||||||
const char* db_name, String &table_name,
|
const char* db_name, String &table_name,
|
||||||
const char* op_name, const char *fmt, ...)
|
const char* op_name, const char *fmt, ...)
|
||||||
ATTRIBUTE_FORMAT(printf, 7, 8);
|
ATTRIBUTE_FORMAT(printf, 7, 8);
|
||||||
static bool print_admin_msg(THD* thd, uint len,
|
bool print_admin_msg(THD* thd, uint len,
|
||||||
const char* msg_type,
|
const char* msg_type,
|
||||||
const char* db_name, String &table_name,
|
const char* db_name, String &table_name,
|
||||||
const char* op_name, const char *fmt, ...)
|
const char* op_name, const char *fmt, ...)
|
||||||
@ -5731,6 +5728,22 @@ int ha_partition::index_next_same(uchar *buf, const uchar *key, uint keylen)
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
int ha_partition::index_read_last_map(uchar *buf,
|
||||||
|
const uchar *key,
|
||||||
|
key_part_map keypart_map)
|
||||||
|
{
|
||||||
|
DBUG_ENTER("ha_partition::index_read_last_map");
|
||||||
|
|
||||||
|
m_ordered= true; // Safety measure
|
||||||
|
end_range= NULL;
|
||||||
|
m_index_scan_type= partition_index_read_last;
|
||||||
|
m_start_key.key= key;
|
||||||
|
m_start_key.keypart_map= keypart_map;
|
||||||
|
m_start_key.flag= HA_READ_PREFIX_LAST;
|
||||||
|
DBUG_RETURN(common_index_read(buf, true));
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Read next record when performing index scan backwards
|
Read next record when performing index scan backwards
|
||||||
|
|
||||||
|
@ -77,43 +77,118 @@ public:
|
|||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
extern PSI_mutex_key key_partition_auto_inc_mutex;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
Partition specific Handler_share.
|
Partition specific Handler_share.
|
||||||
*/
|
*/
|
||||||
class Partition_share : public Handler_share
|
class Partition_share : public Handler_share
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
bool auto_inc_initialized;
|
Partition_share()
|
||||||
mysql_mutex_t auto_inc_mutex; /**< protecting auto_inc val */
|
: auto_inc_initialized(false),
|
||||||
ulonglong next_auto_inc_val; /**< first non reserved value */
|
next_auto_inc_val(0),
|
||||||
/**
|
partition_name_hash_initialized(false),
|
||||||
Hash of partition names. Initialized in the first ha_partition::open()
|
partitions_share_refs(NULL),
|
||||||
for the table_share. After that it is read-only, i.e. no locking required.
|
partition_names(NULL)
|
||||||
*/
|
{
|
||||||
bool partition_name_hash_initialized;
|
mysql_mutex_init(key_partition_auto_inc_mutex,
|
||||||
HASH partition_name_hash;
|
&auto_inc_mutex,
|
||||||
/** Storage for each partitions Handler_share */
|
MY_MUTEX_INIT_FAST);
|
||||||
Parts_share_refs *partitions_share_refs;
|
}
|
||||||
Partition_share() {}
|
|
||||||
~Partition_share()
|
~Partition_share()
|
||||||
{
|
{
|
||||||
DBUG_ENTER("Partition_share::~Partition_share");
|
|
||||||
mysql_mutex_destroy(&auto_inc_mutex);
|
mysql_mutex_destroy(&auto_inc_mutex);
|
||||||
|
if (partition_names)
|
||||||
|
{
|
||||||
|
my_free(partition_names);
|
||||||
|
}
|
||||||
if (partition_name_hash_initialized)
|
if (partition_name_hash_initialized)
|
||||||
|
{
|
||||||
my_hash_free(&partition_name_hash);
|
my_hash_free(&partition_name_hash);
|
||||||
|
}
|
||||||
if (partitions_share_refs)
|
if (partitions_share_refs)
|
||||||
delete partitions_share_refs;
|
delete partitions_share_refs;
|
||||||
DBUG_VOID_RETURN;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
bool init(uint num_parts);
|
bool init(uint num_parts);
|
||||||
void lock_auto_inc()
|
|
||||||
|
/** Set if auto increment is used an initialized. */
|
||||||
|
bool auto_inc_initialized;
|
||||||
|
/**
|
||||||
|
Mutex protecting next_auto_inc_val.
|
||||||
|
Initialized if table uses auto increment.
|
||||||
|
*/
|
||||||
|
mysql_mutex_t auto_inc_mutex;
|
||||||
|
/** First non reserved auto increment value. */
|
||||||
|
ulonglong next_auto_inc_val;
|
||||||
|
/**
|
||||||
|
Hash of partition names. Initialized by the first handler instance of a
|
||||||
|
table_share calling populate_partition_name_hash().
|
||||||
|
After that it is read-only, i.e. no locking required for reading.
|
||||||
|
*/
|
||||||
|
HASH partition_name_hash;
|
||||||
|
/** flag that the name hash is initialized, so it only will do it once. */
|
||||||
|
bool partition_name_hash_initialized;
|
||||||
|
|
||||||
|
/** Storage for each partitions Handler_share */
|
||||||
|
Parts_share_refs *partitions_share_refs;
|
||||||
|
|
||||||
|
/**
|
||||||
|
Release reserved auto increment values not used.
|
||||||
|
@param thd Thread.
|
||||||
|
@param table_share Table Share
|
||||||
|
@param next_insert_id Next insert id (first non used auto inc value).
|
||||||
|
@param max_reserved End of reserved auto inc range.
|
||||||
|
*/
|
||||||
|
void release_auto_inc_if_possible(THD *thd, TABLE_SHARE *table_share,
|
||||||
|
const ulonglong next_insert_id,
|
||||||
|
const ulonglong max_reserved);
|
||||||
|
|
||||||
|
/** lock mutex protecting auto increment value next_auto_inc_val. */
|
||||||
|
inline void lock_auto_inc()
|
||||||
{
|
{
|
||||||
mysql_mutex_lock(&auto_inc_mutex);
|
mysql_mutex_lock(&auto_inc_mutex);
|
||||||
}
|
}
|
||||||
void unlock_auto_inc()
|
/** unlock mutex protecting auto increment value next_auto_inc_val. */
|
||||||
|
inline void unlock_auto_inc()
|
||||||
{
|
{
|
||||||
mysql_mutex_unlock(&auto_inc_mutex);
|
mysql_mutex_unlock(&auto_inc_mutex);
|
||||||
}
|
}
|
||||||
|
/**
|
||||||
|
Populate partition_name_hash with partition and subpartition names
|
||||||
|
from part_info.
|
||||||
|
@param part_info Partition info containing all partitions metadata.
|
||||||
|
|
||||||
|
@return Operation status.
|
||||||
|
@retval false Success.
|
||||||
|
@retval true Failure.
|
||||||
|
*/
|
||||||
|
bool populate_partition_name_hash(partition_info *part_info);
|
||||||
|
/** Get partition name.
|
||||||
|
|
||||||
|
@param part_id Partition id (for subpartitioned table only subpartition
|
||||||
|
names will be returned.)
|
||||||
|
|
||||||
|
@return partition name or NULL if error.
|
||||||
|
*/
|
||||||
|
const char *get_partition_name(size_t part_id) const;
|
||||||
|
private:
|
||||||
|
const uchar **partition_names;
|
||||||
|
/**
|
||||||
|
Insert [sub]partition name into partition_name_hash
|
||||||
|
@param name Partition name.
|
||||||
|
@param part_id Partition id.
|
||||||
|
@param is_subpart True if subpartition else partition.
|
||||||
|
|
||||||
|
@return Operation status.
|
||||||
|
@retval false Success.
|
||||||
|
@retval true Failure.
|
||||||
|
*/
|
||||||
|
bool insert_partition_name_in_hash(const char *name,
|
||||||
|
uint part_id,
|
||||||
|
bool is_subpart);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
@ -605,6 +680,10 @@ public:
|
|||||||
virtual int index_last(uchar * buf);
|
virtual int index_last(uchar * buf);
|
||||||
virtual int index_next_same(uchar * buf, const uchar * key, uint keylen);
|
virtual int index_next_same(uchar * buf, const uchar * key, uint keylen);
|
||||||
|
|
||||||
|
int index_read_last_map(uchar *buf,
|
||||||
|
const uchar *key,
|
||||||
|
key_part_map keypart_map);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
read_first_row is virtual method but is only implemented by
|
read_first_row is virtual method but is only implemented by
|
||||||
handler.cc, no storage engine has implemented it so neither
|
handler.cc, no storage engine has implemented it so neither
|
||||||
@ -1086,7 +1165,6 @@ private:
|
|||||||
ulonglong nr= (((Field_num*) field)->unsigned_flag ||
|
ulonglong nr= (((Field_num*) field)->unsigned_flag ||
|
||||||
field->val_int() > 0) ? field->val_int() : 0;
|
field->val_int() > 0) ? field->val_int() : 0;
|
||||||
lock_auto_increment();
|
lock_auto_increment();
|
||||||
DBUG_ASSERT(part_share->auto_inc_initialized);
|
|
||||||
/* must check when the mutex is taken */
|
/* must check when the mutex is taken */
|
||||||
if (nr >= part_share->next_auto_inc_val)
|
if (nr >= part_share->next_auto_inc_val)
|
||||||
part_share->next_auto_inc_val= nr + 1;
|
part_share->next_auto_inc_val= nr + 1;
|
||||||
@ -1310,4 +1388,9 @@ public:
|
|||||||
friend int cmp_key_rowid_part_id(void *ptr, uchar *ref1, uchar *ref2);
|
friend int cmp_key_rowid_part_id(void *ptr, uchar *ref1, uchar *ref2);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
bool print_admin_msg(THD* thd, uint len,
|
||||||
|
const char* msg_type,
|
||||||
|
const char* db_name, String &table_name,
|
||||||
|
const char* op_name, const char *fmt, ...);
|
||||||
|
|
||||||
#endif /* HA_PARTITION_INCLUDED */
|
#endif /* HA_PARTITION_INCLUDED */
|
||||||
|
@ -2435,6 +2435,12 @@ LEX_STRING *handler::engine_name()
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void handler::ha_statistic_increment(ulong SSV::*offset) const
|
||||||
|
{
|
||||||
|
(table->in_use->status_var.*offset)++;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
double handler::keyread_time(uint index, uint ranges, ha_rows rows)
|
double handler::keyread_time(uint index, uint ranges, ha_rows rows)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
|
@ -1393,6 +1393,7 @@ struct handlerton
|
|||||||
bool (*vers_query_trx_id)(THD* thd, void *out, ulonglong trx_id, vtq_field_t field);
|
bool (*vers_query_trx_id)(THD* thd, void *out, ulonglong trx_id, vtq_field_t field);
|
||||||
bool (*vers_query_commit_ts)(THD* thd, void *out, const MYSQL_TIME &commit_ts, vtq_field_t field, bool backwards);
|
bool (*vers_query_commit_ts)(THD* thd, void *out, const MYSQL_TIME &commit_ts, vtq_field_t field, bool backwards);
|
||||||
bool (*vers_trx_sees)(THD *thd, bool &result, ulonglong trx_id1, ulonglong trx_id0, ulonglong commit_id1, uchar iso_level1, ulonglong commit_id0);
|
bool (*vers_trx_sees)(THD *thd, bool &result, ulonglong trx_id1, ulonglong trx_id0, ulonglong commit_id1, uchar iso_level1, ulonglong commit_id0);
|
||||||
|
handler *(*vers_upgrade_handler)(handler *hnd, MEM_ROOT *mem_root);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
@ -3271,6 +3272,18 @@ protected:
|
|||||||
virtual int index_last(uchar * buf)
|
virtual int index_last(uchar * buf)
|
||||||
{ return HA_ERR_WRONG_COMMAND; }
|
{ return HA_ERR_WRONG_COMMAND; }
|
||||||
virtual int index_next_same(uchar *buf, const uchar *key, uint keylen);
|
virtual int index_next_same(uchar *buf, const uchar *key, uint keylen);
|
||||||
|
/**
|
||||||
|
@brief
|
||||||
|
The following functions works like index_read, but it find the last
|
||||||
|
row with the current key value or prefix.
|
||||||
|
@returns @see index_read_map().
|
||||||
|
*/
|
||||||
|
virtual int index_read_last_map(uchar * buf, const uchar * key,
|
||||||
|
key_part_map keypart_map)
|
||||||
|
{
|
||||||
|
uint key_len= calculate_key_len(table, active_index, key, keypart_map);
|
||||||
|
return index_read_last(buf, key, key_len);
|
||||||
|
}
|
||||||
virtual int close(void)=0;
|
virtual int close(void)=0;
|
||||||
inline void update_rows_read()
|
inline void update_rows_read()
|
||||||
{
|
{
|
||||||
@ -3350,7 +3363,7 @@ public:
|
|||||||
void ft_end() { ft_handler=NULL; }
|
void ft_end() { ft_handler=NULL; }
|
||||||
virtual FT_INFO *ft_init_ext(uint flags, uint inx,String *key)
|
virtual FT_INFO *ft_init_ext(uint flags, uint inx,String *key)
|
||||||
{ return NULL; }
|
{ return NULL; }
|
||||||
private:
|
public:
|
||||||
virtual int ft_read(uchar *buf) { return HA_ERR_WRONG_COMMAND; }
|
virtual int ft_read(uchar *buf) { return HA_ERR_WRONG_COMMAND; }
|
||||||
virtual int rnd_next(uchar *buf)=0;
|
virtual int rnd_next(uchar *buf)=0;
|
||||||
virtual int rnd_pos(uchar * buf, uchar *pos)=0;
|
virtual int rnd_pos(uchar * buf, uchar *pos)=0;
|
||||||
@ -4057,6 +4070,7 @@ public:
|
|||||||
TABLE_SHARE* get_table_share() { return table_share; }
|
TABLE_SHARE* get_table_share() { return table_share; }
|
||||||
protected:
|
protected:
|
||||||
/* Service methods for use by storage engines. */
|
/* Service methods for use by storage engines. */
|
||||||
|
void ha_statistic_increment(ulong SSV::*offset) const;
|
||||||
void **ha_data(THD *) const;
|
void **ha_data(THD *) const;
|
||||||
THD *ha_thd(void) const;
|
THD *ha_thd(void) const;
|
||||||
|
|
||||||
@ -4082,7 +4096,7 @@ protected:
|
|||||||
|
|
||||||
public:
|
public:
|
||||||
bool check_table_binlog_row_based(bool binlog_row);
|
bool check_table_binlog_row_based(bool binlog_row);
|
||||||
private:
|
|
||||||
/* Cache result to avoid extra calls */
|
/* Cache result to avoid extra calls */
|
||||||
inline void mark_trx_read_write()
|
inline void mark_trx_read_write()
|
||||||
{
|
{
|
||||||
@ -4092,6 +4106,8 @@ private:
|
|||||||
mark_trx_read_write_internal();
|
mark_trx_read_write_internal();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
void mark_trx_read_write_internal();
|
void mark_trx_read_write_internal();
|
||||||
bool check_table_binlog_row_based_internal(bool binlog_row);
|
bool check_table_binlog_row_based_internal(bool binlog_row);
|
||||||
|
|
||||||
@ -4210,6 +4226,11 @@ protected:
|
|||||||
virtual int index_read(uchar * buf, const uchar * key, uint key_len,
|
virtual int index_read(uchar * buf, const uchar * key, uint key_len,
|
||||||
enum ha_rkey_function find_flag)
|
enum ha_rkey_function find_flag)
|
||||||
{ return HA_ERR_WRONG_COMMAND; }
|
{ return HA_ERR_WRONG_COMMAND; }
|
||||||
|
virtual int index_read_last(uchar * buf, const uchar * key, uint key_len)
|
||||||
|
{
|
||||||
|
my_errno= HA_ERR_WRONG_COMMAND;
|
||||||
|
return HA_ERR_WRONG_COMMAND;
|
||||||
|
}
|
||||||
friend class ha_partition;
|
friend class ha_partition;
|
||||||
friend class ha_sequence;
|
friend class ha_sequence;
|
||||||
public:
|
public:
|
||||||
@ -4340,6 +4361,8 @@ public:
|
|||||||
{ DBUG_ASSERT(0); return false; }
|
{ DBUG_ASSERT(0); return false; }
|
||||||
virtual handler* part_handler(uint32 part_id)
|
virtual handler* part_handler(uint32 part_id)
|
||||||
{ DBUG_ASSERT(0); return NULL; }
|
{ DBUG_ASSERT(0); return NULL; }
|
||||||
|
virtual void update_partition(uint part_id)
|
||||||
|
{}
|
||||||
protected:
|
protected:
|
||||||
Handler_share *get_ha_share_ptr();
|
Handler_share *get_ha_share_ptr();
|
||||||
void set_ha_share_ptr(Handler_share *arg_ha_share);
|
void set_ha_share_ptr(Handler_share *arg_ha_share);
|
||||||
|
@ -208,6 +208,48 @@ bool partition_info::set_named_partition_bitmap(const char *part_name,
|
|||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
Prune away partitions not mentioned in the PARTITION () clause,
|
||||||
|
if used.
|
||||||
|
|
||||||
|
@param table_list Table list pointing to table to prune.
|
||||||
|
|
||||||
|
@return Operation status
|
||||||
|
@retval false Success
|
||||||
|
@retval true Failure
|
||||||
|
*/
|
||||||
|
bool partition_info::set_read_partitions(List<char> *partition_names)
|
||||||
|
{
|
||||||
|
DBUG_ENTER("partition_info::set_read_partitions");
|
||||||
|
if (!partition_names || !partition_names->elements)
|
||||||
|
{
|
||||||
|
DBUG_RETURN(true);
|
||||||
|
}
|
||||||
|
|
||||||
|
uint num_names= partition_names->elements;
|
||||||
|
List_iterator<char> partition_names_it(*partition_names);
|
||||||
|
uint i= 0;
|
||||||
|
/*
|
||||||
|
TODO: When adding support for FK in partitioned tables, the referenced
|
||||||
|
table must probably lock all partitions for read, and also write depending
|
||||||
|
of ON DELETE/UPDATE.
|
||||||
|
*/
|
||||||
|
bitmap_clear_all(&read_partitions);
|
||||||
|
|
||||||
|
/* No check for duplicate names or overlapping partitions/subpartitions. */
|
||||||
|
|
||||||
|
DBUG_PRINT("info", ("Searching through partition_name_hash"));
|
||||||
|
do
|
||||||
|
{
|
||||||
|
char *part_name= partition_names_it++;
|
||||||
|
if (add_named_partition(part_name, strlen(part_name)))
|
||||||
|
DBUG_RETURN(true);
|
||||||
|
} while (++i < num_names);
|
||||||
|
DBUG_RETURN(false);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
Prune away partitions not mentioned in the PARTITION () clause,
|
Prune away partitions not mentioned in the PARTITION () clause,
|
||||||
if used.
|
if used.
|
||||||
@ -989,13 +1031,22 @@ bool partition_info::vers_scan_min_max(THD *thd, partition_element *part)
|
|||||||
uint32 part_id= part->id * sub_factor;
|
uint32 part_id= part->id * sub_factor;
|
||||||
uint32 part_id_end= part_id + sub_factor;
|
uint32 part_id_end= part_id + sub_factor;
|
||||||
DBUG_ASSERT(part->empty);
|
DBUG_ASSERT(part->empty);
|
||||||
|
DBUG_ASSERT(part->type == partition_element::VERSIONING);
|
||||||
DBUG_ASSERT(table->s->stat_trx);
|
DBUG_ASSERT(table->s->stat_trx);
|
||||||
for (; part_id < part_id_end; ++part_id)
|
for (; part_id < part_id_end; ++part_id)
|
||||||
{
|
{
|
||||||
handler *file= table->file->part_handler(part_id);
|
handler *file= table->file->part_handler(part_id); // requires update_partition() for ha_innopart
|
||||||
int rc= file->ha_external_lock(thd, F_RDLCK);
|
int rc= file->ha_external_lock(thd, F_RDLCK); // requires ha_commit_trans() for ha_innobase
|
||||||
if (rc)
|
if (rc)
|
||||||
goto error;
|
{
|
||||||
|
file->update_partition(part_id);
|
||||||
|
goto lock_fail;
|
||||||
|
}
|
||||||
|
|
||||||
|
table->default_column_bitmaps();
|
||||||
|
bitmap_set_bit(table->read_set, table->vers_end_field()->field_index);
|
||||||
|
file->column_bitmaps_signal();
|
||||||
|
|
||||||
rc= file->ha_rnd_init(true);
|
rc= file->ha_rnd_init(true);
|
||||||
if (!rc)
|
if (!rc)
|
||||||
{
|
{
|
||||||
@ -1006,6 +1057,8 @@ bool partition_info::vers_scan_min_max(THD *thd, partition_element *part)
|
|||||||
if (thd->killed)
|
if (thd->killed)
|
||||||
{
|
{
|
||||||
file->ha_rnd_end();
|
file->ha_rnd_end();
|
||||||
|
file->update_partition(part_id);
|
||||||
|
ha_commit_trans(thd, false);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
if (rc)
|
if (rc)
|
||||||
@ -1014,18 +1067,44 @@ bool partition_info::vers_scan_min_max(THD *thd, partition_element *part)
|
|||||||
continue;
|
continue;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
vers_stat_trx(STAT_TRX_END, part).update_unguarded(table->vers_end_field());
|
if (table->vers_end_field()->is_max())
|
||||||
|
{
|
||||||
|
rc= HA_ERR_INTERNAL_ERROR;
|
||||||
|
push_warning_printf(thd,
|
||||||
|
Sql_condition::WARN_LEVEL_WARN,
|
||||||
|
WARN_VERS_PART_NON_HISTORICAL,
|
||||||
|
ER_THD(thd, WARN_VERS_PART_NON_HISTORICAL),
|
||||||
|
part->partition_name);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
if (table->versioned_by_engine())
|
||||||
|
{
|
||||||
|
uchar buf[8];
|
||||||
|
Field_timestampf fld(buf, NULL, 0, Field::NONE, table->vers_end_field()->field_name, NULL, 6);
|
||||||
|
if (!vers_trx_id_to_ts(thd, table->vers_end_field(), fld))
|
||||||
|
{
|
||||||
|
vers_stat_trx(STAT_TRX_END, part).update_unguarded(&fld);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
vers_stat_trx(STAT_TRX_END, part).update_unguarded(table->vers_end_field());
|
||||||
|
}
|
||||||
}
|
}
|
||||||
file->ha_rnd_end();
|
file->ha_rnd_end();
|
||||||
}
|
}
|
||||||
file->ha_external_lock(thd, F_UNLCK);
|
file->ha_external_lock(thd, F_UNLCK);
|
||||||
|
file->update_partition(part_id);
|
||||||
if (rc != HA_ERR_END_OF_FILE)
|
if (rc != HA_ERR_END_OF_FILE)
|
||||||
{
|
{
|
||||||
error:
|
ha_commit_trans(thd, false);
|
||||||
my_error(ER_INTERNAL_ERROR, MYF(0), "partition/subpartition scan failed in versioned partitions setup");
|
lock_fail:
|
||||||
|
// TODO: print rc code
|
||||||
|
my_error(ER_INTERNAL_ERROR, MYF(0), "min/max scan failed in versioned partitions setup (see warnings)");
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
ha_commit_trans(thd, false);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1073,11 +1152,9 @@ bool partition_info::vers_setup_2(THD * thd, bool is_create_table_ind)
|
|||||||
DBUG_ASSERT(part_type == VERSIONING_PARTITION);
|
DBUG_ASSERT(part_type == VERSIONING_PARTITION);
|
||||||
DBUG_ASSERT(vers_info && vers_info->initialized(false));
|
DBUG_ASSERT(vers_info && vers_info->initialized(false));
|
||||||
DBUG_ASSERT(table && table->s);
|
DBUG_ASSERT(table && table->s);
|
||||||
if (!table->versioned_by_sql())
|
|
||||||
{
|
bool error= false;
|
||||||
my_error(ER_VERS_WRONG_PARAMS, MYF(0), table->s->table_name.str, "selected engine is not supported in `BY SYSTEM_TIME` partitioning");
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
mysql_mutex_lock(&table->s->LOCK_rotation);
|
mysql_mutex_lock(&table->s->LOCK_rotation);
|
||||||
if (table->s->busy_rotation)
|
if (table->s->busy_rotation)
|
||||||
{
|
{
|
||||||
@ -1124,8 +1201,19 @@ bool partition_info::vers_setup_2(THD * thd, bool is_create_table_ind)
|
|||||||
|
|
||||||
if (!is_create_table_ind)
|
if (!is_create_table_ind)
|
||||||
{
|
{
|
||||||
if (vers_scan_min_max(thd, el))
|
if (el->type == partition_element::AS_OF_NOW)
|
||||||
return true;
|
{
|
||||||
|
uchar buf[8];
|
||||||
|
Field_timestampf fld(buf, NULL, 0, Field::NONE, table->vers_end_field()->field_name, NULL, 6);
|
||||||
|
fld.set_max();
|
||||||
|
vers_stat_trx(STAT_TRX_END, el).update_unguarded(&fld);
|
||||||
|
el->empty= false;
|
||||||
|
}
|
||||||
|
else if (vers_scan_min_max(thd, el))
|
||||||
|
{
|
||||||
|
error= true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
if (!el->empty)
|
if (!el->empty)
|
||||||
{
|
{
|
||||||
vers_update_col_vals(thd, prev, el);
|
vers_update_col_vals(thd, prev, el);
|
||||||
@ -1151,7 +1239,7 @@ bool partition_info::vers_setup_2(THD * thd, bool is_create_table_ind)
|
|||||||
}
|
}
|
||||||
} // while
|
} // while
|
||||||
|
|
||||||
if (!dont_stat)
|
if (!error && !dont_stat)
|
||||||
{
|
{
|
||||||
if (col_val_updated)
|
if (col_val_updated)
|
||||||
table->s->stat_serial++;
|
table->s->stat_serial++;
|
||||||
@ -1165,7 +1253,7 @@ bool partition_info::vers_setup_2(THD * thd, bool is_create_table_ind)
|
|||||||
table->s->busy_rotation= false;
|
table->s->busy_rotation= false;
|
||||||
}
|
}
|
||||||
mysql_mutex_unlock(&table->s->LOCK_rotation);
|
mysql_mutex_unlock(&table->s->LOCK_rotation);
|
||||||
return false;
|
return error;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -3262,6 +3350,80 @@ bool partition_info::has_same_partitioning(partition_info *new_part_info)
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
static bool has_same_column_order(List<Create_field> *create_list,
|
||||||
|
Field** field_array)
|
||||||
|
{
|
||||||
|
Field **f_ptr;
|
||||||
|
List_iterator_fast<Create_field> new_field_it;
|
||||||
|
Create_field *new_field= NULL;
|
||||||
|
new_field_it.init(*create_list);
|
||||||
|
|
||||||
|
for (f_ptr= field_array; *f_ptr; f_ptr++)
|
||||||
|
{
|
||||||
|
while ((new_field= new_field_it++))
|
||||||
|
{
|
||||||
|
if (new_field->field == *f_ptr)
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
if (!new_field)
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!new_field)
|
||||||
|
{
|
||||||
|
/* Not same order!*/
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool partition_info::vers_trx_id_to_ts(THD* thd, Field* in_trx_id, Field_timestamp& out_ts)
|
||||||
|
{
|
||||||
|
handlerton *hton= plugin_hton(table->s->db_plugin);
|
||||||
|
DBUG_ASSERT(hton);
|
||||||
|
ulonglong trx_id= in_trx_id->val_int();
|
||||||
|
MYSQL_TIME ts;
|
||||||
|
bool found= hton->vers_query_trx_id(thd, &ts, trx_id, VTQ_COMMIT_TS);
|
||||||
|
if (!found)
|
||||||
|
{
|
||||||
|
push_warning_printf(thd,
|
||||||
|
Sql_condition::WARN_LEVEL_WARN,
|
||||||
|
WARN_VERS_TRX_MISSING,
|
||||||
|
ER_THD(thd, WARN_VERS_TRX_MISSING),
|
||||||
|
trx_id);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
out_ts.store_time_dec(&ts, 6);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
Check if the partitioning columns are in the same order as the given list.
|
||||||
|
|
||||||
|
Used to see if INPLACE alter can be allowed or not. If the order is
|
||||||
|
different then the rows must be redistributed for KEY [sub]partitioning.
|
||||||
|
|
||||||
|
@param[in] create_list Column list after ALTER TABLE.
|
||||||
|
@return true is same order as before ALTER TABLE, else false.
|
||||||
|
*/
|
||||||
|
bool partition_info::same_key_column_order(List<Create_field> *create_list)
|
||||||
|
{
|
||||||
|
/* Only need to check for KEY [sub] partitioning. */
|
||||||
|
if (list_of_part_fields && !column_list)
|
||||||
|
{
|
||||||
|
if (!has_same_column_order(create_list, part_field_array))
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
if (list_of_subpart_fields)
|
||||||
|
{
|
||||||
|
if (!has_same_column_order(create_list, subpart_field_array))
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
void partition_info::print_debug(const char *str, uint *value)
|
void partition_info::print_debug(const char *str, uint *value)
|
||||||
{
|
{
|
||||||
DBUG_ENTER("print_debug");
|
DBUG_ENTER("print_debug");
|
||||||
|
@ -22,6 +22,7 @@
|
|||||||
|
|
||||||
#include "sql_class.h"
|
#include "sql_class.h"
|
||||||
#include "partition_element.h"
|
#include "partition_element.h"
|
||||||
|
#include "sql_partition.h"
|
||||||
|
|
||||||
class partition_info;
|
class partition_info;
|
||||||
struct TABLE_LIST;
|
struct TABLE_LIST;
|
||||||
@ -382,6 +383,28 @@ public:
|
|||||||
uint32 *part_id);
|
uint32 *part_id);
|
||||||
void report_part_expr_error(bool use_subpart_expr);
|
void report_part_expr_error(bool use_subpart_expr);
|
||||||
bool has_same_partitioning(partition_info *new_part_info);
|
bool has_same_partitioning(partition_info *new_part_info);
|
||||||
|
inline bool is_partition_used(uint part_id) const
|
||||||
|
{
|
||||||
|
return bitmap_is_set(&read_partitions, part_id);
|
||||||
|
}
|
||||||
|
inline bool is_partition_locked(uint part_id) const
|
||||||
|
{
|
||||||
|
return bitmap_is_set(&lock_partitions, part_id);
|
||||||
|
}
|
||||||
|
inline uint num_partitions_used()
|
||||||
|
{
|
||||||
|
return bitmap_bits_set(&read_partitions);
|
||||||
|
}
|
||||||
|
inline uint get_first_used_partition() const
|
||||||
|
{
|
||||||
|
return bitmap_get_first_set(&read_partitions);
|
||||||
|
}
|
||||||
|
inline uint get_next_used_partition(uint part_id) const
|
||||||
|
{
|
||||||
|
return bitmap_get_next_set(&read_partitions, part_id);
|
||||||
|
}
|
||||||
|
bool same_key_column_order(List<Create_field> *create_list);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
static int list_part_cmp(const void* a, const void* b);
|
static int list_part_cmp(const void* a, const void* b);
|
||||||
bool set_up_default_partitions(THD *thd, handler *file, HA_CREATE_INFO *info,
|
bool set_up_default_partitions(THD *thd, handler *file, HA_CREATE_INFO *info,
|
||||||
@ -392,9 +415,11 @@ private:
|
|||||||
uint start_no);
|
uint start_no);
|
||||||
char *create_default_subpartition_name(THD *thd, uint subpart_no,
|
char *create_default_subpartition_name(THD *thd, uint subpart_no,
|
||||||
const char *part_name);
|
const char *part_name);
|
||||||
|
// FIXME: prune_partition_bitmaps() is duplicate of set_read_partitions()
|
||||||
bool prune_partition_bitmaps(TABLE_LIST *table_list);
|
bool prune_partition_bitmaps(TABLE_LIST *table_list);
|
||||||
bool add_named_partition(const char *part_name, uint length);
|
bool add_named_partition(const char *part_name, uint length);
|
||||||
public:
|
public:
|
||||||
|
bool set_read_partitions(List<char> *partition_names);
|
||||||
bool has_unique_name(partition_element *element);
|
bool has_unique_name(partition_element *element);
|
||||||
|
|
||||||
bool vers_init_info(THD *thd);
|
bool vers_init_info(THD *thd);
|
||||||
@ -475,8 +500,8 @@ public:
|
|||||||
DBUG_ASSERT(vers_info->initialized());
|
DBUG_ASSERT(vers_info->initialized());
|
||||||
part= vers_hist_part();
|
part= vers_hist_part();
|
||||||
}
|
}
|
||||||
max_time-= vers_stat_trx(STAT_TRX_END, part).min_time();
|
my_time_t min_time= vers_stat_trx(STAT_TRX_END, part).min_time();
|
||||||
return max_time > vers_info->interval;
|
return max_time - min_time > vers_info->interval;
|
||||||
}
|
}
|
||||||
bool vers_interval_exceed(partition_element *part)
|
bool vers_interval_exceed(partition_element *part)
|
||||||
{
|
{
|
||||||
@ -486,15 +511,31 @@ public:
|
|||||||
{
|
{
|
||||||
return vers_interval_exceed(vers_hist_part());
|
return vers_interval_exceed(vers_hist_part());
|
||||||
}
|
}
|
||||||
|
bool vers_trx_id_to_ts(THD *thd, Field *in_trx_id, Field_timestamp &out_ts);
|
||||||
void vers_update_stats(THD *thd, partition_element *el)
|
void vers_update_stats(THD *thd, partition_element *el)
|
||||||
{
|
{
|
||||||
DBUG_ASSERT(vers_info && vers_info->initialized());
|
DBUG_ASSERT(vers_info && vers_info->initialized());
|
||||||
DBUG_ASSERT(table && table->s);
|
DBUG_ASSERT(table && table->s);
|
||||||
DBUG_ASSERT(el && el->type == partition_element::VERSIONING);
|
DBUG_ASSERT(el && el->type == partition_element::VERSIONING);
|
||||||
|
bool updated;
|
||||||
mysql_rwlock_wrlock(&table->s->LOCK_stat_serial);
|
mysql_rwlock_wrlock(&table->s->LOCK_stat_serial);
|
||||||
el->empty= false;
|
el->empty= false;
|
||||||
bool updated=
|
if (table->versioned_by_engine())
|
||||||
vers_stat_trx(STAT_TRX_END, el->id).update(table->vers_end_field());
|
{
|
||||||
|
// transaction is not yet pushed to VTQ, so we use now-time
|
||||||
|
my_time_t end_ts= my_time(0);
|
||||||
|
|
||||||
|
uchar buf[8];
|
||||||
|
Field_timestampf fld(buf, NULL, 0, Field::NONE, table->vers_end_field()->field_name, NULL, 6);
|
||||||
|
fld.store_TIME(end_ts, 0);
|
||||||
|
updated=
|
||||||
|
vers_stat_trx(STAT_TRX_END, el->id).update(&fld);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
updated=
|
||||||
|
vers_stat_trx(STAT_TRX_END, el->id).update(table->vers_end_field());
|
||||||
|
}
|
||||||
if (updated)
|
if (updated)
|
||||||
table->s->stat_serial++;
|
table->s->stat_serial++;
|
||||||
mysql_rwlock_unlock(&table->s->LOCK_stat_serial);
|
mysql_rwlock_unlock(&table->s->LOCK_stat_serial);
|
||||||
|
3746
sql/partitioning/partition_handler.cc
Normal file
3746
sql/partitioning/partition_handler.cc
Normal file
File diff suppressed because it is too large
Load Diff
1113
sql/partitioning/partition_handler.h
Normal file
1113
sql/partitioning/partition_handler.h
Normal file
File diff suppressed because it is too large
Load Diff
@ -7523,8 +7523,17 @@ WARN_VERS_PARAMETERS
|
|||||||
WARN_VERS_PART_ROTATION
|
WARN_VERS_PART_ROTATION
|
||||||
eng "Switching from partition %`s to %`s"
|
eng "Switching from partition %`s to %`s"
|
||||||
|
|
||||||
|
WARN_VERS_TRX_MISSING
|
||||||
|
eng "VTQ missing transaction ID %lu"
|
||||||
|
|
||||||
|
WARN_VERS_PART_NON_HISTORICAL
|
||||||
|
eng "Partition %`s contains non-historical data"
|
||||||
|
|
||||||
ER_VERS_NOT_ALLOWED
|
ER_VERS_NOT_ALLOWED
|
||||||
eng "%`s is not allowed for versioned table"
|
eng "%`s is not allowed for versioned table"
|
||||||
|
|
||||||
ER_VERS_WRONG_QUERY_TYPE
|
ER_VERS_WRONG_QUERY_TYPE
|
||||||
eng "%`s works only with %`s query type"
|
eng "%`s works only with %`s query type"
|
||||||
|
|
||||||
|
ER_WRONG_TABLESPACE_NAME 42000
|
||||||
|
eng "Incorrect tablespace name `%-.192s`"
|
||||||
|
@ -67,6 +67,7 @@
|
|||||||
#include "opt_range.h" // store_key_image_to_rec
|
#include "opt_range.h" // store_key_image_to_rec
|
||||||
#include "sql_alter.h" // Alter_table_ctx
|
#include "sql_alter.h" // Alter_table_ctx
|
||||||
#include "sql_select.h"
|
#include "sql_select.h"
|
||||||
|
#include "sql_tablespace.h" // check_tablespace_name
|
||||||
|
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
using std::max;
|
using std::max;
|
||||||
@ -3458,7 +3459,10 @@ int vers_get_partition_id(partition_info *part_info,
|
|||||||
{
|
{
|
||||||
table->s->busy_rotation= true;
|
table->s->busy_rotation= true;
|
||||||
mysql_mutex_unlock(&table->s->LOCK_rotation);
|
mysql_mutex_unlock(&table->s->LOCK_rotation);
|
||||||
if (part_info->vers_limit_exceed() || part_info->vers_interval_exceed(sys_trx_end->get_timestamp()))
|
// transaction is not yet pushed to VTQ, so we use now-time
|
||||||
|
my_time_t end_ts= sys_trx_end->table->versioned_by_engine() ?
|
||||||
|
my_time(0) : sys_trx_end->get_timestamp();
|
||||||
|
if (part_info->vers_limit_exceed() || part_info->vers_interval_exceed(end_ts))
|
||||||
{
|
{
|
||||||
part_info->vers_part_rotate(thd);
|
part_info->vers_part_rotate(thd);
|
||||||
}
|
}
|
||||||
@ -7388,6 +7392,39 @@ err:
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
||||||
|
/*
|
||||||
|
Prepare for calling val_int on partition function by setting fields to
|
||||||
|
point to the record where the values of the PF-fields are stored.
|
||||||
|
|
||||||
|
SYNOPSIS
|
||||||
|
set_field_ptr()
|
||||||
|
ptr Array of fields to change ptr
|
||||||
|
new_buf New record pointer
|
||||||
|
old_buf Old record pointer
|
||||||
|
|
||||||
|
DESCRIPTION
|
||||||
|
Set ptr in field objects of field array to refer to new_buf record
|
||||||
|
instead of previously old_buf. Used before calling val_int and after
|
||||||
|
it is used to restore pointers to table->record[0].
|
||||||
|
This routine is placed outside of partition code since it can be useful
|
||||||
|
also for other programs.
|
||||||
|
*/
|
||||||
|
|
||||||
|
void set_field_ptr(Field **ptr, const uchar *new_buf,
|
||||||
|
const uchar *old_buf)
|
||||||
|
{
|
||||||
|
my_ptrdiff_t diff= (new_buf - old_buf);
|
||||||
|
DBUG_ENTER("set_field_ptr");
|
||||||
|
|
||||||
|
do
|
||||||
|
{
|
||||||
|
(*ptr)->move_field_offset(diff);
|
||||||
|
} while (*(++ptr));
|
||||||
|
DBUG_VOID_RETURN;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Prepare for calling val_int on partition function by setting fields to
|
Prepare for calling val_int on partition function by setting fields to
|
||||||
point to the record where the values of the PF-fields are stored.
|
point to the record where the values of the PF-fields are stored.
|
||||||
@ -7426,6 +7463,61 @@ void set_key_field_ptr(KEY *key_info, const uchar *new_buf,
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
Append all fields in read_set to string
|
||||||
|
|
||||||
|
@param[in,out] str String to append to.
|
||||||
|
@param[in] row Row to append.
|
||||||
|
@param[in] table Table containing read_set and fields for the row.
|
||||||
|
*/
|
||||||
|
void append_row_to_str(String &str, const uchar *row, TABLE *table)
|
||||||
|
{
|
||||||
|
Field **fields, **field_ptr;
|
||||||
|
const uchar *rec;
|
||||||
|
uint num_fields= bitmap_bits_set(table->read_set);
|
||||||
|
uint curr_field_index= 0;
|
||||||
|
bool is_rec0= !row || row == table->record[0];
|
||||||
|
if (!row)
|
||||||
|
rec= table->record[0];
|
||||||
|
else
|
||||||
|
rec= row;
|
||||||
|
|
||||||
|
/* Create a new array of all read fields. */
|
||||||
|
fields= (Field**) my_malloc(sizeof(void*) * (num_fields + 1),
|
||||||
|
MYF(0));
|
||||||
|
if (!fields)
|
||||||
|
return;
|
||||||
|
fields[num_fields]= NULL;
|
||||||
|
for (field_ptr= table->field;
|
||||||
|
*field_ptr;
|
||||||
|
field_ptr++)
|
||||||
|
{
|
||||||
|
if (!bitmap_is_set(table->read_set, (*field_ptr)->field_index))
|
||||||
|
continue;
|
||||||
|
fields[curr_field_index++]= *field_ptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
if (!is_rec0)
|
||||||
|
set_field_ptr(fields, rec, table->record[0]);
|
||||||
|
|
||||||
|
for (field_ptr= fields;
|
||||||
|
*field_ptr;
|
||||||
|
field_ptr++)
|
||||||
|
{
|
||||||
|
Field *field= *field_ptr;
|
||||||
|
str.append(" ");
|
||||||
|
str.append(field->field_name);
|
||||||
|
str.append(":");
|
||||||
|
field_unpack(&str, field, rec, 0, false);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!is_rec0)
|
||||||
|
set_field_ptr(fields, table->record[0], rec);
|
||||||
|
my_free(fields);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
SYNOPSIS
|
SYNOPSIS
|
||||||
mem_alloc_error()
|
mem_alloc_error()
|
||||||
@ -8595,4 +8687,52 @@ uint get_partition_field_store_length(Field *field)
|
|||||||
store_length+= HA_KEY_BLOB_LENGTH;
|
store_length+= HA_KEY_BLOB_LENGTH;
|
||||||
return store_length;
|
return store_length;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// FIXME: duplicate of ha_partition::set_up_table_before_create
|
||||||
|
bool set_up_table_before_create(THD *thd,
|
||||||
|
TABLE_SHARE *share,
|
||||||
|
const char *partition_name_with_path,
|
||||||
|
HA_CREATE_INFO *info,
|
||||||
|
partition_element *part_elem)
|
||||||
|
{
|
||||||
|
bool error= false;
|
||||||
|
const char *partition_name;
|
||||||
|
DBUG_ENTER("set_up_table_before_create");
|
||||||
|
|
||||||
|
DBUG_ASSERT(part_elem);
|
||||||
|
|
||||||
|
if (!part_elem)
|
||||||
|
DBUG_RETURN(true);
|
||||||
|
share->max_rows= part_elem->part_max_rows;
|
||||||
|
share->min_rows= part_elem->part_min_rows;
|
||||||
|
partition_name= strrchr(partition_name_with_path, FN_LIBCHAR);
|
||||||
|
if ((part_elem->index_file_name &&
|
||||||
|
(error= append_file_to_dir(thd,
|
||||||
|
const_cast<const char**>(&part_elem->index_file_name),
|
||||||
|
partition_name+1))) ||
|
||||||
|
(part_elem->data_file_name &&
|
||||||
|
(error= append_file_to_dir(thd,
|
||||||
|
const_cast<const char**>(&part_elem->data_file_name),
|
||||||
|
partition_name+1))))
|
||||||
|
{
|
||||||
|
DBUG_RETURN(error);
|
||||||
|
}
|
||||||
|
if (part_elem->index_file_name != NULL)
|
||||||
|
{
|
||||||
|
info->index_file_name= part_elem->index_file_name;
|
||||||
|
}
|
||||||
|
if (part_elem->data_file_name != NULL)
|
||||||
|
{
|
||||||
|
info->data_file_name= part_elem->data_file_name;
|
||||||
|
}
|
||||||
|
if (part_elem->tablespace_name != NULL)
|
||||||
|
{
|
||||||
|
if (check_tablespace_name(part_elem->tablespace_name) != IDENT_NAME_OK)
|
||||||
|
{
|
||||||
|
DBUG_RETURN(true);
|
||||||
|
}
|
||||||
|
info->tablespace= part_elem->tablespace_name;
|
||||||
|
}
|
||||||
|
DBUG_RETURN(error);
|
||||||
|
}
|
||||||
#endif
|
#endif
|
||||||
|
@ -40,6 +40,7 @@ typedef struct st_key_range key_range;
|
|||||||
#define HA_CAN_UPDATE_PARTITION_KEY (1 << 1)
|
#define HA_CAN_UPDATE_PARTITION_KEY (1 << 1)
|
||||||
#define HA_CAN_PARTITION_UNIQUE (1 << 2)
|
#define HA_CAN_PARTITION_UNIQUE (1 << 2)
|
||||||
#define HA_USE_AUTO_PARTITION (1 << 3)
|
#define HA_USE_AUTO_PARTITION (1 << 3)
|
||||||
|
#define HA_ONLY_VERS_PARTITION (1 << 4)
|
||||||
|
|
||||||
#define NORMAL_PART_NAME 0
|
#define NORMAL_PART_NAME 0
|
||||||
#define TEMP_PART_NAME 1
|
#define TEMP_PART_NAME 1
|
||||||
@ -127,6 +128,14 @@ uint32 get_partition_id_range_for_endpoint(partition_info *part_info,
|
|||||||
bool check_part_func_fields(Field **ptr, bool ok_with_charsets);
|
bool check_part_func_fields(Field **ptr, bool ok_with_charsets);
|
||||||
bool field_is_partition_charset(Field *field);
|
bool field_is_partition_charset(Field *field);
|
||||||
Item* convert_charset_partition_constant(Item *item, CHARSET_INFO *cs);
|
Item* convert_charset_partition_constant(Item *item, CHARSET_INFO *cs);
|
||||||
|
/**
|
||||||
|
Append all fields in read_set to string
|
||||||
|
|
||||||
|
@param[in,out] str String to append to.
|
||||||
|
@param[in] row Row to append.
|
||||||
|
@param[in] table Table containing read_set and fields for the row.
|
||||||
|
*/
|
||||||
|
void append_row_to_str(String &str, const uchar *row, TABLE *table);
|
||||||
void mem_alloc_error(size_t size);
|
void mem_alloc_error(size_t size);
|
||||||
void truncate_partition_filename(char *path);
|
void truncate_partition_filename(char *path);
|
||||||
|
|
||||||
@ -291,6 +300,31 @@ void create_subpartition_name(char *out, const char *in1,
|
|||||||
void set_key_field_ptr(KEY *key_info, const uchar *new_buf,
|
void set_key_field_ptr(KEY *key_info, const uchar *new_buf,
|
||||||
const uchar *old_buf);
|
const uchar *old_buf);
|
||||||
|
|
||||||
|
/** Set up table for creating a partition.
|
||||||
|
Copy info from partition to the table share so the created partition
|
||||||
|
has the correct info.
|
||||||
|
@param thd THD object
|
||||||
|
@param share Table share to be updated.
|
||||||
|
@param info Create info to be updated.
|
||||||
|
@param part_elem partition_element containing the info.
|
||||||
|
|
||||||
|
@return status
|
||||||
|
@retval TRUE Error
|
||||||
|
@retval FALSE Success
|
||||||
|
|
||||||
|
@details
|
||||||
|
Set up
|
||||||
|
1) Comment on partition
|
||||||
|
2) MAX_ROWS, MIN_ROWS on partition
|
||||||
|
3) Index file name on partition
|
||||||
|
4) Data file name on partition
|
||||||
|
*/
|
||||||
|
bool set_up_table_before_create(THD *thd,
|
||||||
|
TABLE_SHARE *share,
|
||||||
|
const char *partition_name_with_path,
|
||||||
|
HA_CREATE_INFO *info,
|
||||||
|
partition_element *part_elem);
|
||||||
|
|
||||||
extern const LEX_STRING partition_keywords[];
|
extern const LEX_STRING partition_keywords[];
|
||||||
|
|
||||||
#endif /* SQL_PARTITION_INCLUDED */
|
#endif /* SQL_PARTITION_INCLUDED */
|
||||||
|
@ -4492,7 +4492,10 @@ handler *mysql_create_frm_image(THD *thd,
|
|||||||
part_info->part_info_string= part_syntax_buf;
|
part_info->part_info_string= part_syntax_buf;
|
||||||
part_info->part_info_len= syntax_len;
|
part_info->part_info_len= syntax_len;
|
||||||
if ((!(engine_type->partition_flags &&
|
if ((!(engine_type->partition_flags &&
|
||||||
engine_type->partition_flags() & HA_CAN_PARTITION)) ||
|
((engine_type->partition_flags() & HA_CAN_PARTITION) ||
|
||||||
|
(part_info->part_type == VERSIONING_PARTITION &&
|
||||||
|
engine_type->partition_flags() & HA_ONLY_VERS_PARTITION))
|
||||||
|
)) ||
|
||||||
create_info->db_type == partition_hton)
|
create_info->db_type == partition_hton)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
|
@ -22,6 +22,70 @@
|
|||||||
#include "sql_table.h" // write_bin_log
|
#include "sql_table.h" // write_bin_log
|
||||||
#include "sql_class.h" // THD
|
#include "sql_class.h" // THD
|
||||||
|
|
||||||
|
/**
|
||||||
|
Check if tablespace name is valid
|
||||||
|
|
||||||
|
@param tablespace_name Name of the tablespace
|
||||||
|
|
||||||
|
@note Tablespace names are not reflected in the file system, so
|
||||||
|
character case conversion or consideration is not relevant.
|
||||||
|
|
||||||
|
@note Checking for path characters or ending space is not done.
|
||||||
|
The only checks are for identifier length, both in terms of
|
||||||
|
number of characters and number of bytes.
|
||||||
|
|
||||||
|
@retval IDENT_NAME_OK Identifier name is ok (Success)
|
||||||
|
@retval IDENT_NAME_WRONG Identifier name is wrong, if length == 0
|
||||||
|
* (ER_WRONG_TABLESPACE_NAME)
|
||||||
|
@retval IDENT_NAME_TOO_LONG Identifier name is too long if it is greater
|
||||||
|
than 64 characters (ER_TOO_LONG_IDENT)
|
||||||
|
|
||||||
|
@note In case of IDENT_NAME_TOO_LONG or IDENT_NAME_WRONG, the function
|
||||||
|
reports an error (using my_error()).
|
||||||
|
*/
|
||||||
|
|
||||||
|
enum_ident_name_check check_tablespace_name(const char *tablespace_name)
|
||||||
|
{
|
||||||
|
size_t name_length= 0; //< Length as number of bytes
|
||||||
|
size_t name_length_symbols= 0; //< Length as number of symbols
|
||||||
|
|
||||||
|
// Name must be != NULL and length must be > 0
|
||||||
|
if (!tablespace_name || (name_length= strlen(tablespace_name)) == 0)
|
||||||
|
{
|
||||||
|
my_error(ER_WRONG_TABLESPACE_NAME, MYF(0), tablespace_name);
|
||||||
|
return IDENT_NAME_WRONG;
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we do not have too many bytes, we must check the number of symbols,
|
||||||
|
// provided the system character set may use more than one byte per symbol.
|
||||||
|
if (name_length <= NAME_LEN && use_mb(system_charset_info))
|
||||||
|
{
|
||||||
|
const char *name= tablespace_name; //< The actual tablespace name
|
||||||
|
const char *end= name + name_length; //< Pointer to first byte after name
|
||||||
|
|
||||||
|
// Loop over all symbols as long as we don't have too many already
|
||||||
|
while (name != end && name_length_symbols <= NAME_CHAR_LEN)
|
||||||
|
{
|
||||||
|
int len= my_ismbchar(system_charset_info, name, end);
|
||||||
|
if (len)
|
||||||
|
name += len;
|
||||||
|
else
|
||||||
|
name++;
|
||||||
|
|
||||||
|
name_length_symbols++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (name_length_symbols > NAME_CHAR_LEN || name_length > NAME_LEN)
|
||||||
|
{
|
||||||
|
my_error(ER_TOO_LONG_IDENT, MYF(0), tablespace_name);
|
||||||
|
return IDENT_NAME_TOO_LONG;
|
||||||
|
}
|
||||||
|
|
||||||
|
return IDENT_NAME_OK;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
int mysql_alter_tablespace(THD *thd, st_alter_tablespace *ts_info)
|
int mysql_alter_tablespace(THD *thd, st_alter_tablespace *ts_info)
|
||||||
{
|
{
|
||||||
int error= HA_ADMIN_NOT_IMPLEMENTED;
|
int error= HA_ADMIN_NOT_IMPLEMENTED;
|
||||||
|
@ -19,6 +19,41 @@
|
|||||||
class THD;
|
class THD;
|
||||||
class st_alter_tablespace;
|
class st_alter_tablespace;
|
||||||
|
|
||||||
|
/**
|
||||||
|
Enumerate possible status of a identifier name while determining
|
||||||
|
its validity
|
||||||
|
*/
|
||||||
|
enum enum_ident_name_check
|
||||||
|
{
|
||||||
|
IDENT_NAME_OK,
|
||||||
|
IDENT_NAME_WRONG,
|
||||||
|
IDENT_NAME_TOO_LONG
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
Check if tablespace name is valid
|
||||||
|
|
||||||
|
@param tablespace_name Name of the tablespace
|
||||||
|
|
||||||
|
@note Tablespace names are not reflected in the file system, so
|
||||||
|
character case conversion or consideration is not relevant.
|
||||||
|
|
||||||
|
@note Checking for path characters or ending space is not done.
|
||||||
|
The only checks are for identifier length, both in terms of
|
||||||
|
number of characters and number of bytes.
|
||||||
|
|
||||||
|
@retval IDENT_NAME_OK Identifier name is ok (Success)
|
||||||
|
@retval IDENT_NAME_WRONG Identifier name is wrong, if length == 0
|
||||||
|
(ER_WRONG_TABLESPACE_NAME)
|
||||||
|
@retval IDENT_NAME_TOO_LONG Identifier name is too long if it is greater
|
||||||
|
than 64 characters (ER_TOO_LONG_IDENT)
|
||||||
|
|
||||||
|
@note In case of IDENT_NAME_TOO_LONG or IDENT_NAME_WRONG, the function
|
||||||
|
reports an error (using my_error()).
|
||||||
|
*/
|
||||||
|
|
||||||
|
enum_ident_name_check check_tablespace_name(const char *tablespace_name);
|
||||||
|
|
||||||
int mysql_alter_tablespace(THD* thd, st_alter_tablespace *ts_info);
|
int mysql_alter_tablespace(THD* thd, st_alter_tablespace *ts_info);
|
||||||
|
|
||||||
#endif /* SQL_TABLESPACE_INCLUDED */
|
#endif /* SQL_TABLESPACE_INCLUDED */
|
||||||
|
14
sql/table.cc
14
sql/table.cc
@ -3261,6 +3261,20 @@ enum open_frm_error open_table_from_share(THD *thd, TABLE_SHARE *share,
|
|||||||
}
|
}
|
||||||
outparam->part_info->is_auto_partitioned= share->auto_partitioned;
|
outparam->part_info->is_auto_partitioned= share->auto_partitioned;
|
||||||
DBUG_PRINT("info", ("autopartitioned: %u", share->auto_partitioned));
|
DBUG_PRINT("info", ("autopartitioned: %u", share->auto_partitioned));
|
||||||
|
if (outparam->part_info->part_type == VERSIONING_PARTITION &&
|
||||||
|
share->db_type()->vers_upgrade_handler)
|
||||||
|
{
|
||||||
|
outparam->file= share->db_type()->vers_upgrade_handler(
|
||||||
|
outparam->file, &outparam->mem_root);
|
||||||
|
if (!outparam->file)
|
||||||
|
{
|
||||||
|
thd->stmt_arena= backup_stmt_arena_ptr;
|
||||||
|
thd->restore_active_arena(&part_func_arena, &backup_arena);
|
||||||
|
my_error(ER_OUTOFMEMORY, MYF(0), 4095);
|
||||||
|
error_reported= TRUE;
|
||||||
|
goto err;
|
||||||
|
}
|
||||||
|
}
|
||||||
/*
|
/*
|
||||||
We should perform the fix_partition_func in either local or
|
We should perform the fix_partition_func in either local or
|
||||||
caller's arena depending on work_part_info_used value.
|
caller's arena depending on work_part_info_used value.
|
||||||
|
@ -74,7 +74,7 @@ SET(INNOBASE_SOURCES
|
|||||||
gis/gis0sea.cc
|
gis/gis0sea.cc
|
||||||
fts/fts0plugin.cc
|
fts/fts0plugin.cc
|
||||||
handler/ha_innodb.cc
|
handler/ha_innodb.cc
|
||||||
# handler/ha_innopart.cc
|
handler/ha_innopart.cc
|
||||||
handler/handler0alter.cc
|
handler/handler0alter.cc
|
||||||
handler/i_s.cc
|
handler/i_s.cc
|
||||||
ibuf/ibuf0ibuf.cc
|
ibuf/ibuf0ibuf.cc
|
||||||
|
@ -147,10 +147,7 @@ TABLE *open_purge_table(THD *thd, const char *db, size_t dblen,
|
|||||||
#include <sstream>
|
#include <sstream>
|
||||||
|
|
||||||
/* for ha_innopart, Native InnoDB Partitioning. */
|
/* for ha_innopart, Native InnoDB Partitioning. */
|
||||||
/* JAN: TODO: MySQL 5.7 Native InnoDB Partitioning */
|
|
||||||
#ifdef HAVE_HA_INNOPART_H
|
|
||||||
#include "ha_innopart.h"
|
#include "ha_innopart.h"
|
||||||
#endif
|
|
||||||
|
|
||||||
#include <mysql/plugin.h>
|
#include <mysql/plugin.h>
|
||||||
#include <mysql/service_wsrep.h>
|
#include <mysql/service_wsrep.h>
|
||||||
@ -1622,6 +1619,22 @@ innobase_create_handler(
|
|||||||
return(new (mem_root) ha_innobase(hton, table));
|
return(new (mem_root) ha_innobase(hton, table));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static
|
||||||
|
handler*
|
||||||
|
innobase_upgrade_handler(
|
||||||
|
handler* hnd,
|
||||||
|
MEM_ROOT* mem_root)
|
||||||
|
{
|
||||||
|
ha_innopart* file = new (mem_root) ha_innopart(
|
||||||
|
static_cast<ha_innobase *>(hnd));
|
||||||
|
if (file && file->init_partitioning(mem_root))
|
||||||
|
{
|
||||||
|
delete file;
|
||||||
|
return(NULL);
|
||||||
|
}
|
||||||
|
return file;
|
||||||
|
}
|
||||||
|
|
||||||
/* General functions */
|
/* General functions */
|
||||||
|
|
||||||
/** Check that a page_size is correct for InnoDB.
|
/** Check that a page_size is correct for InnoDB.
|
||||||
@ -3737,10 +3750,7 @@ innobase_init_abort()
|
|||||||
/** Return partitioning flags. */
|
/** Return partitioning flags. */
|
||||||
static uint innobase_partition_flags()
|
static uint innobase_partition_flags()
|
||||||
{
|
{
|
||||||
/* JAN: TODO: MYSQL 5.7
|
return(HA_ONLY_VERS_PARTITION);
|
||||||
return(HA_CAN_EXCHANGE_PARTITION | HA_CANNOT_PARTITION_FK);
|
|
||||||
*/
|
|
||||||
return (0);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Deprecation message about InnoDB file format related parameters */
|
/** Deprecation message about InnoDB file format related parameters */
|
||||||
@ -3893,6 +3903,7 @@ innobase_init(
|
|||||||
innobase_hton->vers_query_trx_id = vtq_query_trx_id;
|
innobase_hton->vers_query_trx_id = vtq_query_trx_id;
|
||||||
innobase_hton->vers_query_commit_ts = vtq_query_commit_ts;
|
innobase_hton->vers_query_commit_ts = vtq_query_commit_ts;
|
||||||
innobase_hton->vers_trx_sees = vtq_trx_sees;
|
innobase_hton->vers_trx_sees = vtq_trx_sees;
|
||||||
|
innobase_hton->vers_upgrade_handler = innobase_upgrade_handler;
|
||||||
|
|
||||||
innodb_remember_check_sysvar_funcs();
|
innodb_remember_check_sysvar_funcs();
|
||||||
|
|
||||||
@ -8522,6 +8533,7 @@ ha_innobase::write_row(
|
|||||||
|
|
||||||
trx_t* trx = thd_to_trx(m_user_thd);
|
trx_t* trx = thd_to_trx(m_user_thd);
|
||||||
TrxInInnoDB trx_in_innodb(trx);
|
TrxInInnoDB trx_in_innodb(trx);
|
||||||
|
ins_mode_t vers_set_fields;
|
||||||
|
|
||||||
if (trx_in_innodb.is_aborted()) {
|
if (trx_in_innodb.is_aborted()) {
|
||||||
|
|
||||||
@ -8722,8 +8734,14 @@ no_commit:
|
|||||||
|
|
||||||
innobase_srv_conc_enter_innodb(m_prebuilt);
|
innobase_srv_conc_enter_innodb(m_prebuilt);
|
||||||
|
|
||||||
|
vers_set_fields = table->versioned() &&
|
||||||
|
(sql_command != SQLCOM_DELETE ||
|
||||||
|
(m_int_table_flags & HA_INNOPART_DISABLED_TABLE_FLAGS)) ?
|
||||||
|
ROW_INS_VERSIONED :
|
||||||
|
ROW_INS_NORMAL;
|
||||||
|
|
||||||
/* Step-5: Execute insert graph that will result in actual insert. */
|
/* Step-5: Execute insert graph that will result in actual insert. */
|
||||||
error = row_insert_for_mysql((byte*) record, m_prebuilt);
|
error = row_insert_for_mysql((byte*) record, m_prebuilt, vers_set_fields);
|
||||||
|
|
||||||
DEBUG_SYNC(m_user_thd, "ib_after_row_insert");
|
DEBUG_SYNC(m_user_thd, "ib_after_row_insert");
|
||||||
|
|
||||||
@ -9499,6 +9517,7 @@ ha_innobase::update_row(
|
|||||||
|
|
||||||
upd_t* uvect = row_get_prebuilt_update_vector(m_prebuilt);
|
upd_t* uvect = row_get_prebuilt_update_vector(m_prebuilt);
|
||||||
ib_uint64_t autoinc;
|
ib_uint64_t autoinc;
|
||||||
|
bool vers_set_fields;
|
||||||
|
|
||||||
/* Build an update vector from the modified fields in the rows
|
/* Build an update vector from the modified fields in the rows
|
||||||
(uses m_upd_buf of the handle) */
|
(uses m_upd_buf of the handle) */
|
||||||
@ -9524,11 +9543,14 @@ ha_innobase::update_row(
|
|||||||
|
|
||||||
innobase_srv_conc_enter_innodb(m_prebuilt);
|
innobase_srv_conc_enter_innodb(m_prebuilt);
|
||||||
|
|
||||||
error = row_update_for_mysql((byte*) old_row, m_prebuilt);
|
vers_set_fields = m_prebuilt->upd_node->versioned &&
|
||||||
|
(m_int_table_flags & HA_INNOPART_DISABLED_TABLE_FLAGS);
|
||||||
|
|
||||||
if (error == DB_SUCCESS && m_prebuilt->upd_node->versioned) {
|
error = row_update_for_mysql((byte*) old_row, m_prebuilt, vers_set_fields);
|
||||||
|
|
||||||
|
if (error == DB_SUCCESS && vers_set_fields) {
|
||||||
if (trx->id != static_cast<trx_id_t>(table->vers_start_field()->val_int()))
|
if (trx->id != static_cast<trx_id_t>(table->vers_start_field()->val_int()))
|
||||||
error = row_insert_for_mysql((byte*) old_row, m_prebuilt, true);
|
error = row_insert_for_mysql((byte*) old_row, m_prebuilt, ROW_INS_HISTORICAL);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (error == DB_SUCCESS && autoinc) {
|
if (error == DB_SUCCESS && autoinc) {
|
||||||
@ -9644,11 +9666,13 @@ ha_innobase::delete_row(
|
|||||||
|
|
||||||
innobase_srv_conc_enter_innodb(m_prebuilt);
|
innobase_srv_conc_enter_innodb(m_prebuilt);
|
||||||
|
|
||||||
bool delete_history_row =
|
bool vers_set_fields =
|
||||||
table->versioned() && !table->vers_end_field()->is_max();
|
table->versioned() &&
|
||||||
|
(m_int_table_flags & HA_INNOPART_DISABLED_TABLE_FLAGS) &&
|
||||||
|
table->vers_end_field()->is_max();
|
||||||
|
|
||||||
error = row_update_for_mysql(
|
error = row_update_for_mysql(
|
||||||
(byte *)record, m_prebuilt, delete_history_row);
|
(byte *)record, m_prebuilt, vers_set_fields);
|
||||||
|
|
||||||
innobase_srv_conc_exit_innodb(m_prebuilt);
|
innobase_srv_conc_exit_innodb(m_prebuilt);
|
||||||
|
|
||||||
@ -14276,16 +14300,14 @@ These errors will abort the current query:
|
|||||||
case HA_ERR_QUERY_INTERRUPTED:
|
case HA_ERR_QUERY_INTERRUPTED:
|
||||||
For other error codes, the server will fall back to counting records. */
|
For other error codes, the server will fall back to counting records. */
|
||||||
|
|
||||||
#ifdef MYSQL_57_SELECT_COUNT_OPTIMIZATION
|
ha_rows
|
||||||
int
|
ha_innobase::records_new() /*!< out: number of rows */
|
||||||
ha_innobase::records(
|
|
||||||
/*==================*/
|
|
||||||
ha_rows* num_rows) /*!< out: number of rows */
|
|
||||||
{
|
{
|
||||||
DBUG_ENTER("ha_innobase::records()");
|
DBUG_ENTER("ha_innobase::records()");
|
||||||
|
|
||||||
dberr_t ret;
|
dberr_t ret;
|
||||||
ulint n_rows = 0; /* Record count in this view */
|
ulint n_rows = 0; /* Record count in this view */
|
||||||
|
ha_rows num_rows;
|
||||||
|
|
||||||
update_thd();
|
update_thd();
|
||||||
|
|
||||||
@ -14296,8 +14318,8 @@ ha_innobase::records(
|
|||||||
ER_TABLESPACE_DISCARDED,
|
ER_TABLESPACE_DISCARDED,
|
||||||
table->s->table_name.str);
|
table->s->table_name.str);
|
||||||
|
|
||||||
*num_rows = HA_POS_ERROR;
|
num_rows = HA_POS_ERROR;
|
||||||
DBUG_RETURN(HA_ERR_NO_SUCH_TABLE);
|
DBUG_RETURN(num_rows);
|
||||||
|
|
||||||
} else if (m_prebuilt->table->ibd_file_missing) {
|
} else if (m_prebuilt->table->ibd_file_missing) {
|
||||||
ib_senderrf(
|
ib_senderrf(
|
||||||
@ -14305,8 +14327,8 @@ ha_innobase::records(
|
|||||||
ER_TABLESPACE_MISSING,
|
ER_TABLESPACE_MISSING,
|
||||||
table->s->table_name.str);
|
table->s->table_name.str);
|
||||||
|
|
||||||
*num_rows = HA_POS_ERROR;
|
num_rows = HA_POS_ERROR;
|
||||||
DBUG_RETURN(HA_ERR_TABLESPACE_MISSING);
|
DBUG_RETURN(num_rows);
|
||||||
|
|
||||||
} else if (m_prebuilt->table->corrupted) {
|
} else if (m_prebuilt->table->corrupted) {
|
||||||
ib_errf(m_user_thd, IB_LOG_LEVEL_WARN,
|
ib_errf(m_user_thd, IB_LOG_LEVEL_WARN,
|
||||||
@ -14314,8 +14336,8 @@ ha_innobase::records(
|
|||||||
"Table '%s' is corrupt.",
|
"Table '%s' is corrupt.",
|
||||||
table->s->table_name.str);
|
table->s->table_name.str);
|
||||||
|
|
||||||
*num_rows = HA_POS_ERROR;
|
num_rows = HA_POS_ERROR;
|
||||||
DBUG_RETURN(HA_ERR_INDEX_CORRUPT);
|
DBUG_RETURN(num_rows);
|
||||||
}
|
}
|
||||||
|
|
||||||
TrxInInnoDB trx_in_innodb(m_prebuilt->trx);
|
TrxInInnoDB trx_in_innodb(m_prebuilt->trx);
|
||||||
@ -14330,8 +14352,8 @@ ha_innobase::records(
|
|||||||
m_prebuilt->trx, index);
|
m_prebuilt->trx, index);
|
||||||
|
|
||||||
if (!m_prebuilt->index_usable) {
|
if (!m_prebuilt->index_usable) {
|
||||||
*num_rows = HA_POS_ERROR;
|
num_rows = HA_POS_ERROR;
|
||||||
DBUG_RETURN(HA_ERR_TABLE_DEF_CHANGED);
|
DBUG_RETURN(num_rows);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* (Re)Build the m_prebuilt->mysql_template if it is null to use
|
/* (Re)Build the m_prebuilt->mysql_template if it is null to use
|
||||||
@ -14350,30 +14372,29 @@ ha_innobase::records(
|
|||||||
case DB_DEADLOCK:
|
case DB_DEADLOCK:
|
||||||
case DB_LOCK_TABLE_FULL:
|
case DB_LOCK_TABLE_FULL:
|
||||||
case DB_LOCK_WAIT_TIMEOUT:
|
case DB_LOCK_WAIT_TIMEOUT:
|
||||||
*num_rows = HA_POS_ERROR;
|
num_rows = HA_POS_ERROR;
|
||||||
DBUG_RETURN(convert_error_code_to_mysql(ret, 0, m_user_thd));
|
DBUG_RETURN(num_rows);
|
||||||
case DB_INTERRUPTED:
|
case DB_INTERRUPTED:
|
||||||
*num_rows = HA_POS_ERROR;
|
num_rows = HA_POS_ERROR;
|
||||||
DBUG_RETURN(HA_ERR_QUERY_INTERRUPTED);
|
DBUG_RETURN(num_rows);
|
||||||
default:
|
default:
|
||||||
/* No other error besides the three below is returned from
|
/* No other error besides the three below is returned from
|
||||||
row_scan_index_for_mysql(). Make a debug catch. */
|
row_scan_index_for_mysql(). Make a debug catch. */
|
||||||
*num_rows = HA_POS_ERROR;
|
num_rows = HA_POS_ERROR;
|
||||||
ut_ad(0);
|
ut_ad(0);
|
||||||
DBUG_RETURN(-1);
|
DBUG_RETURN(num_rows);
|
||||||
}
|
}
|
||||||
|
|
||||||
m_prebuilt->trx->op_info = "";
|
m_prebuilt->trx->op_info = "";
|
||||||
|
|
||||||
if (thd_killed(m_user_thd)) {
|
if (thd_killed(m_user_thd)) {
|
||||||
*num_rows = HA_POS_ERROR;
|
num_rows = HA_POS_ERROR;
|
||||||
DBUG_RETURN(HA_ERR_QUERY_INTERRUPTED);
|
DBUG_RETURN(num_rows);
|
||||||
}
|
}
|
||||||
|
|
||||||
*num_rows= n_rows;
|
num_rows= n_rows;
|
||||||
DBUG_RETURN(0);
|
DBUG_RETURN(num_rows);
|
||||||
}
|
}
|
||||||
#endif /* MYSQL_57_SELECT_COUNT_OPTIMIZATION */
|
|
||||||
|
|
||||||
/*********************************************************************//**
|
/*********************************************************************//**
|
||||||
Estimates the number of index records in a range.
|
Estimates the number of index records in a range.
|
||||||
|
@ -220,7 +220,7 @@ public:
|
|||||||
ha_rows estimate_rows_upper_bound();
|
ha_rows estimate_rows_upper_bound();
|
||||||
|
|
||||||
// JAN: TODO: MySQL 5.7
|
// JAN: TODO: MySQL 5.7
|
||||||
// int records(ha_rows* num_rows);
|
ha_rows records_new(); // FIXME: rename to records(), fix main.bug39022
|
||||||
|
|
||||||
void update_create_info(HA_CREATE_INFO* create_info);
|
void update_create_info(HA_CREATE_INFO* create_info);
|
||||||
|
|
||||||
@ -454,7 +454,7 @@ protected:
|
|||||||
void reset_template();
|
void reset_template();
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
inline void update_thd(THD* thd);
|
void update_thd(THD* thd);
|
||||||
void update_thd();
|
void update_thd();
|
||||||
|
|
||||||
int general_fetch(uchar* buf, uint direction, uint match_mode);
|
int general_fetch(uchar* buf, uint direction, uint match_mode);
|
||||||
|
@ -54,6 +54,21 @@ Created Nov 22, 2013 Mattias Jonsson */
|
|||||||
#include "partition_info.h"
|
#include "partition_info.h"
|
||||||
#include "key.h"
|
#include "key.h"
|
||||||
|
|
||||||
|
/********************************************************************//**
|
||||||
|
Get the upper limit of the MySQL integral and floating-point type.
|
||||||
|
@return maximum allowed value for the field */
|
||||||
|
UNIV_INTERN
|
||||||
|
ulonglong
|
||||||
|
innobase_get_int_col_max_value(
|
||||||
|
/*===========================*/
|
||||||
|
const Field* field); /*!< in: MySQL field */
|
||||||
|
|
||||||
|
static
|
||||||
|
void set_my_errno(int err)
|
||||||
|
{
|
||||||
|
errno = err;
|
||||||
|
}
|
||||||
|
|
||||||
#define INSIDE_HA_INNOPART_CC
|
#define INSIDE_HA_INNOPART_CC
|
||||||
|
|
||||||
/* To be backwards compatible we also fold partition separator on windows. */
|
/* To be backwards compatible we also fold partition separator on windows. */
|
||||||
@ -242,7 +257,7 @@ Ha_innopart_share::set_v_templ(
|
|||||||
innobase_build_v_templ(
|
innobase_build_v_templ(
|
||||||
table, ib_table,
|
table, ib_table,
|
||||||
m_table_parts[i]->vc_templ,
|
m_table_parts[i]->vc_templ,
|
||||||
NULL, true, name);
|
NULL, true);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -814,6 +829,25 @@ ha_innopart::ha_innopart(
|
|||||||
m_share = NULL;
|
m_share = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ha_innopart::ha_innopart(
|
||||||
|
ha_innobase* innobase)
|
||||||
|
:
|
||||||
|
ha_innobase(*innobase),
|
||||||
|
Partition_helper(this),
|
||||||
|
m_ins_node_parts(),
|
||||||
|
m_upd_node_parts(),
|
||||||
|
m_blob_heap_parts(),
|
||||||
|
m_trx_id_parts(),
|
||||||
|
m_row_read_type_parts(),
|
||||||
|
m_sql_stat_start_parts(),
|
||||||
|
m_pcur(),
|
||||||
|
m_clust_pcur(),
|
||||||
|
m_new_partitions()
|
||||||
|
{
|
||||||
|
m_int_table_flags &= ~(HA_INNOPART_DISABLED_TABLE_FLAGS);
|
||||||
|
m_share = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
/** Destruct ha_innopart handler. */
|
/** Destruct ha_innopart handler. */
|
||||||
ha_innopart::~ha_innopart()
|
ha_innopart::~ha_innopart()
|
||||||
{}
|
{}
|
||||||
@ -845,7 +879,7 @@ ha_innopart::initialize_auto_increment(
|
|||||||
#ifndef DBUG_OFF
|
#ifndef DBUG_OFF
|
||||||
if (table_share->tmp_table == NO_TMP_TABLE)
|
if (table_share->tmp_table == NO_TMP_TABLE)
|
||||||
{
|
{
|
||||||
mysql_mutex_assert_owner(m_part_share->auto_inc_mutex);
|
mysql_mutex_assert_owner(&m_part_share->auto_inc_mutex);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
@ -882,7 +916,7 @@ ha_innopart::initialize_auto_increment(
|
|||||||
my_error(ER_AUTOINC_READ_FAILED, MYF(0));
|
my_error(ER_AUTOINC_READ_FAILED, MYF(0));
|
||||||
error = HA_ERR_AUTOINC_READ_FAILED;
|
error = HA_ERR_AUTOINC_READ_FAILED;
|
||||||
} else {
|
} else {
|
||||||
ib_uint64_t col_max_value = field->get_max_int_value();
|
ib_uint64_t col_max_value = innobase_get_int_col_max_value(field);
|
||||||
|
|
||||||
update_thd(ha_thd());
|
update_thd(ha_thd());
|
||||||
|
|
||||||
@ -911,7 +945,6 @@ ha_innopart::initialize_auto_increment(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
done:
|
|
||||||
m_part_share->next_auto_inc_val = auto_inc;
|
m_part_share->next_auto_inc_val = auto_inc;
|
||||||
m_part_share->auto_inc_initialized = true;
|
m_part_share->auto_inc_initialized = true;
|
||||||
return(error);
|
return(error);
|
||||||
@ -973,12 +1006,7 @@ share_error:
|
|||||||
|| m_part_share->populate_partition_name_hash(m_part_info)) {
|
|| m_part_share->populate_partition_name_hash(m_part_info)) {
|
||||||
goto share_error;
|
goto share_error;
|
||||||
}
|
}
|
||||||
if (m_part_share->auto_inc_mutex == NULL
|
|
||||||
&& table->found_next_number_field != NULL) {
|
|
||||||
if (m_part_share->init_auto_inc_mutex(table_share)) {
|
|
||||||
goto share_error;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
unlock_shared_ha_data();
|
unlock_shared_ha_data();
|
||||||
|
|
||||||
/* Will be allocated if it is needed in ::update_row(). */
|
/* Will be allocated if it is needed in ::update_row(). */
|
||||||
@ -1098,7 +1126,7 @@ share_error:
|
|||||||
by printing a warning in addition to log a message
|
by printing a warning in addition to log a message
|
||||||
in the errorlog. */
|
in the errorlog. */
|
||||||
|
|
||||||
push_warning_printf(thd, Sql_condition::SL_WARNING,
|
push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
|
||||||
ER_NO_SUCH_INDEX,
|
ER_NO_SUCH_INDEX,
|
||||||
"Table %s has a"
|
"Table %s has a"
|
||||||
" primary key in InnoDB data"
|
" primary key in InnoDB data"
|
||||||
@ -1173,7 +1201,7 @@ share_error:
|
|||||||
by printing a warning in addition to log a message
|
by printing a warning in addition to log a message
|
||||||
in the errorlog. */
|
in the errorlog. */
|
||||||
|
|
||||||
push_warning_printf(thd, Sql_condition::SL_WARNING,
|
push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
|
||||||
ER_NO_SUCH_INDEX,
|
ER_NO_SUCH_INDEX,
|
||||||
"InnoDB: Table %s has no"
|
"InnoDB: Table %s has no"
|
||||||
" primary key in InnoDB data"
|
" primary key in InnoDB data"
|
||||||
@ -1288,7 +1316,7 @@ share_error:
|
|||||||
}
|
}
|
||||||
info(HA_STATUS_NO_LOCK | HA_STATUS_VARIABLE | HA_STATUS_CONST);
|
info(HA_STATUS_NO_LOCK | HA_STATUS_VARIABLE | HA_STATUS_CONST);
|
||||||
|
|
||||||
DBUG_RETURN(0);
|
DBUG_RETURN(init_record_priority_queue_for_parts(m_tot_parts));
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Get a cloned ha_innopart handler.
|
/** Get a cloned ha_innopart handler.
|
||||||
@ -1304,7 +1332,7 @@ ha_innopart::clone(
|
|||||||
|
|
||||||
DBUG_ENTER("ha_innopart::clone");
|
DBUG_ENTER("ha_innopart::clone");
|
||||||
|
|
||||||
new_handler = dynamic_cast<ha_innopart*>(handler::clone(name,
|
new_handler = static_cast<ha_innopart*>(handler::clone(name,
|
||||||
mem_root));
|
mem_root));
|
||||||
if (new_handler != NULL) {
|
if (new_handler != NULL) {
|
||||||
ut_ad(new_handler->m_prebuilt != NULL);
|
ut_ad(new_handler->m_prebuilt != NULL);
|
||||||
@ -1381,6 +1409,8 @@ ha_innopart::close()
|
|||||||
innobase_release_temporary_latches(ht, thd);
|
innobase_release_temporary_latches(ht, thd);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
destroy_record_priority_queue_for_parts();
|
||||||
|
|
||||||
ut_ad(m_pcur_parts == NULL);
|
ut_ad(m_pcur_parts == NULL);
|
||||||
ut_ad(m_clust_pcur_parts == NULL);
|
ut_ad(m_clust_pcur_parts == NULL);
|
||||||
close_partitioning();
|
close_partitioning();
|
||||||
@ -1838,18 +1868,18 @@ ha_innopart::print_error(
|
|||||||
/** Can error be ignored.
|
/** Can error be ignored.
|
||||||
@param[in] error Error code to check.
|
@param[in] error Error code to check.
|
||||||
@return true if ignorable else false. */
|
@return true if ignorable else false. */
|
||||||
bool
|
// bool
|
||||||
ha_innopart::is_ignorable_error(
|
// ha_innopart::is_ignorable_error(
|
||||||
int error)
|
// int error)
|
||||||
{
|
// {
|
||||||
if (ha_innobase::is_ignorable_error(error)
|
// if (ha_innobase::is_ignorable_error(error)
|
||||||
|| error == HA_ERR_NO_PARTITION_FOUND
|
// || error == HA_ERR_NO_PARTITION_FOUND
|
||||||
|| error == HA_ERR_NOT_IN_LOCK_PARTITIONS) {
|
// || error == HA_ERR_NOT_IN_LOCK_PARTITIONS) {
|
||||||
|
//
|
||||||
return(true);
|
// return(true);
|
||||||
}
|
// }
|
||||||
return(false);
|
// return(false);
|
||||||
}
|
// }
|
||||||
|
|
||||||
/** Get the index for the current partition
|
/** Get the index for the current partition
|
||||||
@param[in] keynr MySQL index number.
|
@param[in] keynr MySQL index number.
|
||||||
@ -1960,7 +1990,7 @@ ha_innopart::change_active_index(
|
|||||||
m_prebuilt->index->table->name.m_name);
|
m_prebuilt->index->table->name.m_name);
|
||||||
|
|
||||||
push_warning_printf(
|
push_warning_printf(
|
||||||
m_user_thd, Sql_condition::SL_WARNING,
|
m_user_thd, Sql_condition::WARN_LEVEL_WARN,
|
||||||
HA_ERR_INDEX_CORRUPT,
|
HA_ERR_INDEX_CORRUPT,
|
||||||
"InnoDB: Index %s for table %s is"
|
"InnoDB: Index %s for table %s is"
|
||||||
" marked as corrupted"
|
" marked as corrupted"
|
||||||
@ -1969,7 +1999,7 @@ ha_innopart::change_active_index(
|
|||||||
DBUG_RETURN(HA_ERR_INDEX_CORRUPT);
|
DBUG_RETURN(HA_ERR_INDEX_CORRUPT);
|
||||||
} else {
|
} else {
|
||||||
push_warning_printf(
|
push_warning_printf(
|
||||||
m_user_thd, Sql_condition::SL_WARNING,
|
m_user_thd, Sql_condition::WARN_LEVEL_WARN,
|
||||||
HA_ERR_TABLE_DEF_CHANGED,
|
HA_ERR_TABLE_DEF_CHANGED,
|
||||||
"InnoDB: insufficient history for index %u",
|
"InnoDB: insufficient history for index %u",
|
||||||
keynr);
|
keynr);
|
||||||
@ -2901,28 +2931,6 @@ int
|
|||||||
ha_innopart::extra(
|
ha_innopart::extra(
|
||||||
enum ha_extra_function operation)
|
enum ha_extra_function operation)
|
||||||
{
|
{
|
||||||
if (operation == HA_EXTRA_SECONDARY_SORT_ROWID) {
|
|
||||||
/* index_init(sorted=true) must have been called! */
|
|
||||||
ut_ad(m_ordered);
|
|
||||||
ut_ad(m_ordered_rec_buffer != NULL);
|
|
||||||
/* No index_read call must have been done! */
|
|
||||||
ut_ad(m_queue->empty());
|
|
||||||
|
|
||||||
/* If not PK is set as secondary sort, do secondary sort by
|
|
||||||
rowid/ref. */
|
|
||||||
|
|
||||||
ut_ad(m_curr_key_info[1] != NULL
|
|
||||||
|| m_prebuilt->clust_index_was_generated != 0
|
|
||||||
|| m_curr_key_info[0]
|
|
||||||
== table->key_info + table->s->primary_key);
|
|
||||||
|
|
||||||
if (m_curr_key_info[1] == NULL
|
|
||||||
&& m_prebuilt->clust_index_was_generated) {
|
|
||||||
m_ref_usage = Partition_helper::REF_USED_FOR_SORT;
|
|
||||||
m_queue->m_fun = key_and_rowid_cmp;
|
|
||||||
}
|
|
||||||
return(0);
|
|
||||||
}
|
|
||||||
return(ha_innobase::extra(operation));
|
return(ha_innobase::extra(operation));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3007,17 +3015,15 @@ ha_innopart::truncate()
|
|||||||
/** Total number of rows in all used partitions.
|
/** Total number of rows in all used partitions.
|
||||||
Returns the exact number of records that this client can see using this
|
Returns the exact number of records that this client can see using this
|
||||||
handler object.
|
handler object.
|
||||||
@param[out] num_rows Number of rows.
|
@return Number of rows. */
|
||||||
@return 0 or error number. */
|
ha_rows
|
||||||
int
|
ha_innopart::records()
|
||||||
ha_innopart::records(
|
|
||||||
ha_rows* num_rows)
|
|
||||||
{
|
{
|
||||||
|
ha_rows num_rows;
|
||||||
ha_rows n_rows;
|
ha_rows n_rows;
|
||||||
int err;
|
|
||||||
DBUG_ENTER("ha_innopart::records()");
|
DBUG_ENTER("ha_innopart::records()");
|
||||||
|
|
||||||
*num_rows = 0;
|
num_rows = 0;
|
||||||
|
|
||||||
/* The index scan is probably so expensive, so the overhead
|
/* The index scan is probably so expensive, so the overhead
|
||||||
of the rest of the function is neglectable for each partition.
|
of the rest of the function is neglectable for each partition.
|
||||||
@ -3028,15 +3034,44 @@ ha_innopart::records(
|
|||||||
i = m_part_info->get_next_used_partition(i)) {
|
i = m_part_info->get_next_used_partition(i)) {
|
||||||
|
|
||||||
set_partition(i);
|
set_partition(i);
|
||||||
err = ha_innobase::records(&n_rows);
|
n_rows = ha_innobase::records();
|
||||||
update_partition(i);
|
update_partition(i);
|
||||||
if (err != 0) {
|
if (n_rows == HA_POS_ERROR) {
|
||||||
*num_rows = HA_POS_ERROR;
|
DBUG_RETURN(HA_POS_ERROR);
|
||||||
DBUG_RETURN(err);
|
|
||||||
}
|
}
|
||||||
*num_rows += n_rows;
|
num_rows += n_rows;
|
||||||
}
|
}
|
||||||
DBUG_RETURN(0);
|
DBUG_RETURN(num_rows);
|
||||||
|
}
|
||||||
|
|
||||||
|
ha_rows
|
||||||
|
ha_innopart::part_recs_slow(void *_part_elem)
|
||||||
|
{
|
||||||
|
partition_element *part_elem= reinterpret_cast<partition_element *>(_part_elem);
|
||||||
|
DBUG_ASSERT(m_part_info);
|
||||||
|
uint32 sub_factor= m_part_info->num_subparts ? m_part_info->num_subparts : 1;
|
||||||
|
uint32 part_id= part_elem->id * sub_factor;
|
||||||
|
uint32 part_id_end= part_id + sub_factor;
|
||||||
|
DBUG_ASSERT(part_id_end <= m_tot_parts);
|
||||||
|
ha_rows part_recs= 0;
|
||||||
|
uint last_part = m_last_part;
|
||||||
|
for (; part_id < part_id_end; ++part_id)
|
||||||
|
{
|
||||||
|
DBUG_ASSERT(bitmap_is_set(&(m_part_info->read_partitions), part_id));
|
||||||
|
set_partition(part_id);
|
||||||
|
ha_rows n = ha_innobase::records_new();
|
||||||
|
update_partition(part_id);
|
||||||
|
if (n == HA_POS_ERROR) {
|
||||||
|
return HA_POS_ERROR;
|
||||||
|
}
|
||||||
|
part_recs += n;
|
||||||
|
}
|
||||||
|
if (m_last_part != last_part)
|
||||||
|
{
|
||||||
|
set_partition(last_part);
|
||||||
|
update_partition(last_part);
|
||||||
|
}
|
||||||
|
return part_recs;
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Estimates the number of index records in a range.
|
/** Estimates the number of index records in a range.
|
||||||
@ -3098,14 +3133,14 @@ ha_innopart::records_in_range(
|
|||||||
goto func_exit;
|
goto func_exit;
|
||||||
}
|
}
|
||||||
|
|
||||||
heap = mem_heap_create(2 * (key->actual_key_parts * sizeof(dfield_t)
|
heap = mem_heap_create(2 * (key->user_defined_key_parts * sizeof(dfield_t)
|
||||||
+ sizeof(dtuple_t)));
|
+ sizeof(dtuple_t)));
|
||||||
|
|
||||||
range_start = dtuple_create(heap, key->actual_key_parts);
|
range_start = dtuple_create(heap, key->user_defined_key_parts);
|
||||||
dict_index_copy_types(range_start, index, key->actual_key_parts);
|
dict_index_copy_types(range_start, index, key->user_defined_key_parts);
|
||||||
|
|
||||||
range_end = dtuple_create(heap, key->actual_key_parts);
|
range_end = dtuple_create(heap, key->user_defined_key_parts);
|
||||||
dict_index_copy_types(range_end, index, key->actual_key_parts);
|
dict_index_copy_types(range_end, index, key->user_defined_key_parts);
|
||||||
|
|
||||||
row_sel_convert_mysql_key_to_innobase(
|
row_sel_convert_mysql_key_to_innobase(
|
||||||
range_start,
|
range_start,
|
||||||
@ -3361,8 +3396,7 @@ ha_innopart::info_low(
|
|||||||
DBUG_RETURN(error);
|
DBUG_RETURN(error);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
set_if_bigger(stats.update_time,
|
set_if_bigger(stats.update_time, ib_table->update_time);
|
||||||
(ulong) ib_table->update_time);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (is_analyze || innobase_stats_on_metadata) {
|
if (is_analyze || innobase_stats_on_metadata) {
|
||||||
@ -3437,7 +3471,7 @@ ha_innopart::info_low(
|
|||||||
|
|
||||||
push_warning_printf(
|
push_warning_printf(
|
||||||
thd,
|
thd,
|
||||||
Sql_condition::SL_WARNING,
|
Sql_condition::WARN_LEVEL_WARN,
|
||||||
ER_CANT_GET_STAT,
|
ER_CANT_GET_STAT,
|
||||||
"InnoDB: Trying to get the"
|
"InnoDB: Trying to get the"
|
||||||
" free space for partition %s"
|
" free space for partition %s"
|
||||||
@ -3613,7 +3647,7 @@ ha_innopart::info_low(
|
|||||||
|
|
||||||
KEY* key = &table->key_info[i];
|
KEY* key = &table->key_info[i];
|
||||||
for (j = 0;
|
for (j = 0;
|
||||||
j < key->actual_key_parts;
|
j < key->user_defined_key_parts;
|
||||||
j++) {
|
j++) {
|
||||||
|
|
||||||
if ((key->flags & HA_FULLTEXT) != 0) {
|
if ((key->flags & HA_FULLTEXT) != 0) {
|
||||||
@ -3638,32 +3672,6 @@ ha_innopart::info_low(
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* innodb_rec_per_key() will use
|
|
||||||
index->stat_n_diff_key_vals[] and the value we
|
|
||||||
pass index->table->stat_n_rows. Both are
|
|
||||||
calculated by ANALYZE and by the background
|
|
||||||
stats gathering thread (which kicks in when too
|
|
||||||
much of the table has been changed). In
|
|
||||||
addition table->stat_n_rows is adjusted with
|
|
||||||
each DML (e.g. ++ on row insert). Those
|
|
||||||
adjustments are not MVCC'ed and not even
|
|
||||||
reversed on rollback. So,
|
|
||||||
index->stat_n_diff_key_vals[] and
|
|
||||||
index->table->stat_n_rows could have been
|
|
||||||
calculated at different time. This is
|
|
||||||
acceptable. */
|
|
||||||
const rec_per_key_t rec_per_key =
|
|
||||||
innodb_rec_per_key(
|
|
||||||
index, j,
|
|
||||||
max_rows);
|
|
||||||
|
|
||||||
key->set_records_per_key(j, rec_per_key);
|
|
||||||
|
|
||||||
/* The code below is legacy and should be
|
|
||||||
removed together with this comment once we
|
|
||||||
are sure the new floating point rec_per_key,
|
|
||||||
set via set_records_per_key(), works fine. */
|
|
||||||
|
|
||||||
ulong rec_per_key_int = static_cast<ulong>(
|
ulong rec_per_key_int = static_cast<ulong>(
|
||||||
innodb_rec_per_key(index, j,
|
innodb_rec_per_key(index, j,
|
||||||
max_rows));
|
max_rows));
|
||||||
@ -3851,7 +3859,7 @@ ha_innopart::repair(
|
|||||||
/* TODO: enable this warning to be clear about what is repaired.
|
/* TODO: enable this warning to be clear about what is repaired.
|
||||||
Currently disabled to generate smaller test diffs. */
|
Currently disabled to generate smaller test diffs. */
|
||||||
#ifdef ADD_WARNING_FOR_REPAIR_ONLY_PARTITION
|
#ifdef ADD_WARNING_FOR_REPAIR_ONLY_PARTITION
|
||||||
push_warning_printf(thd, Sql_condition::SL_WARNING,
|
push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
|
||||||
ER_ILLEGAL_HA,
|
ER_ILLEGAL_HA,
|
||||||
"Only moving rows from wrong partition to correct"
|
"Only moving rows from wrong partition to correct"
|
||||||
" partition is supported,"
|
" partition is supported,"
|
||||||
|
@ -23,6 +23,7 @@ this program; if not, write to the Free Software Foundation, Inc.,
|
|||||||
#define ha_innopart_h
|
#define ha_innopart_h
|
||||||
|
|
||||||
#include "partitioning/partition_handler.h"
|
#include "partitioning/partition_handler.h"
|
||||||
|
#include "ha_partition.h"
|
||||||
|
|
||||||
/* Forward declarations */
|
/* Forward declarations */
|
||||||
class Altered_partitions;
|
class Altered_partitions;
|
||||||
@ -185,14 +186,16 @@ truncate_partition.
|
|||||||
InnoDB specific functions related to partitioning is implemented here. */
|
InnoDB specific functions related to partitioning is implemented here. */
|
||||||
class ha_innopart:
|
class ha_innopart:
|
||||||
public ha_innobase,
|
public ha_innobase,
|
||||||
public Partition_helper,
|
public Partition_helper
|
||||||
public Partition_handler
|
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
ha_innopart(
|
ha_innopart(
|
||||||
handlerton* hton,
|
handlerton* hton,
|
||||||
TABLE_SHARE* table_arg);
|
TABLE_SHARE* table_arg);
|
||||||
|
|
||||||
|
ha_innopart(
|
||||||
|
ha_innobase* innobase);
|
||||||
|
|
||||||
~ha_innopart();
|
~ha_innopart();
|
||||||
|
|
||||||
/** Clone this handler, used when needing more than one cursor
|
/** Clone this handler, used when needing more than one cursor
|
||||||
@ -218,7 +221,7 @@ public:
|
|||||||
register_query_cache_table(
|
register_query_cache_table(
|
||||||
THD* thd,
|
THD* thd,
|
||||||
char* table_key,
|
char* table_key,
|
||||||
size_t key_length,
|
uint key_length,
|
||||||
qc_engine_callback* call_back,
|
qc_engine_callback* call_back,
|
||||||
ulonglong* engine_data)
|
ulonglong* engine_data)
|
||||||
{
|
{
|
||||||
@ -350,9 +353,9 @@ public:
|
|||||||
int error,
|
int error,
|
||||||
myf errflag);
|
myf errflag);
|
||||||
|
|
||||||
bool
|
// bool
|
||||||
is_ignorable_error(
|
// is_ignorable_error(
|
||||||
int error);
|
// int error);
|
||||||
|
|
||||||
int
|
int
|
||||||
start_stmt(
|
start_stmt(
|
||||||
@ -506,7 +509,7 @@ public:
|
|||||||
ft_init_ext_with_hints(
|
ft_init_ext_with_hints(
|
||||||
uint inx,
|
uint inx,
|
||||||
String* key,
|
String* key,
|
||||||
Ft_hints* hints)
|
void* hints)
|
||||||
{
|
{
|
||||||
ut_ad(0);
|
ut_ad(0);
|
||||||
return(NULL);
|
return(NULL);
|
||||||
@ -596,13 +599,13 @@ public:
|
|||||||
/** See Partition_handler. */
|
/** See Partition_handler. */
|
||||||
void
|
void
|
||||||
get_dynamic_partition_info(
|
get_dynamic_partition_info(
|
||||||
ha_statistics* stat_info,
|
PARTITION_STATS* stat_info,
|
||||||
ha_checksum* check_sum,
|
|
||||||
uint part_id)
|
uint part_id)
|
||||||
{
|
{
|
||||||
|
ha_checksum check_sum;
|
||||||
Partition_helper::get_dynamic_partition_info_low(
|
Partition_helper::get_dynamic_partition_info_low(
|
||||||
stat_info,
|
stat_info,
|
||||||
check_sum,
|
&check_sum,
|
||||||
part_id);
|
part_id);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -614,18 +617,11 @@ public:
|
|||||||
| HA_FAST_CHANGE_PARTITION);
|
| HA_FAST_CHANGE_PARTITION);
|
||||||
}
|
}
|
||||||
|
|
||||||
Partition_handler*
|
|
||||||
get_partition_handler()
|
|
||||||
{
|
|
||||||
return(static_cast<Partition_handler*>(this));
|
|
||||||
}
|
|
||||||
|
|
||||||
void
|
void
|
||||||
set_part_info(
|
set_part_info(
|
||||||
partition_info* part_info,
|
partition_info* part_info)
|
||||||
bool early)
|
|
||||||
{
|
{
|
||||||
Partition_helper::set_part_info_low(part_info, early);
|
Partition_helper::set_part_info_low(part_info, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
@ -752,10 +748,16 @@ private:
|
|||||||
/** Update active partition.
|
/** Update active partition.
|
||||||
Copies needed info from m_prebuilt into the partition specific memory.
|
Copies needed info from m_prebuilt into the partition specific memory.
|
||||||
@param[in] part_id Partition to set as active. */
|
@param[in] part_id Partition to set as active. */
|
||||||
void
|
virtual void
|
||||||
update_partition(
|
update_partition(
|
||||||
uint part_id);
|
uint part_id);
|
||||||
|
|
||||||
|
virtual handler* part_handler(uint32 part_id)
|
||||||
|
{
|
||||||
|
set_partition(part_id);
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
/** Helpers needed by Partition_helper, @see partition_handler.h @{ */
|
/** Helpers needed by Partition_helper, @see partition_handler.h @{ */
|
||||||
|
|
||||||
/** Set the autoinc column max value.
|
/** Set the autoinc column max value.
|
||||||
@ -1132,6 +1134,20 @@ private:
|
|||||||
delete_row(
|
delete_row(
|
||||||
const uchar* record)
|
const uchar* record)
|
||||||
{
|
{
|
||||||
|
ut_a(table);
|
||||||
|
if (table->versioned() && table->vers_end_field()->is_max()) {
|
||||||
|
int err = rnd_pos_by_record(const_cast<uchar *>(record));
|
||||||
|
if (err)
|
||||||
|
return err;
|
||||||
|
trx_t* trx = thd_to_trx(ha_thd());
|
||||||
|
if (!trx->id)
|
||||||
|
trx_start_if_not_started_xa(trx, true);
|
||||||
|
ut_a(table->record[0] == record);
|
||||||
|
store_record(table, record[1]);
|
||||||
|
ut_a(trx->id);
|
||||||
|
table->vers_end_field()->store(trx->id, true);
|
||||||
|
return Partition_helper::ph_update_row(table->record[1], table->record[0]);
|
||||||
|
}
|
||||||
return(Partition_helper::ph_delete_row(record));
|
return(Partition_helper::ph_delete_row(record));
|
||||||
}
|
}
|
||||||
/** @} */
|
/** @} */
|
||||||
@ -1162,6 +1178,148 @@ private:
|
|||||||
deleted));
|
deleted));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public:
|
||||||
|
/**
|
||||||
|
Truncate partitions.
|
||||||
|
|
||||||
|
Truncate all partitions matching table->part_info->read_partitions.
|
||||||
|
Handler level wrapper for truncating partitions, will ensure that
|
||||||
|
mark_trx_read_write() is called and also checks locking assertions.
|
||||||
|
|
||||||
|
@return Operation status.
|
||||||
|
@retval 0 Success.
|
||||||
|
@retval != 0 Error code.
|
||||||
|
*/
|
||||||
|
int truncate_partition()
|
||||||
|
{
|
||||||
|
handler *file= get_handler();
|
||||||
|
if (!file)
|
||||||
|
{
|
||||||
|
return HA_ERR_WRONG_COMMAND;
|
||||||
|
}
|
||||||
|
DBUG_ASSERT(file->get_table_share()->tmp_table != NO_TMP_TABLE ||
|
||||||
|
file->get_lock_type() == F_WRLCK);
|
||||||
|
file->mark_trx_read_write();
|
||||||
|
return truncate_partition_low();
|
||||||
|
}
|
||||||
|
/**
|
||||||
|
Change partitions.
|
||||||
|
|
||||||
|
Change partitions according to their partition_element::part_state set up
|
||||||
|
in prep_alter_part_table(). Will create new partitions and copy requested
|
||||||
|
partitions there. Also updating part_state to reflect current state.
|
||||||
|
|
||||||
|
Handler level wrapper for changing partitions.
|
||||||
|
This is the reason for having Partition_handler a friend class of handler,
|
||||||
|
mark_trx_read_write() is called and also checks locking assertions.
|
||||||
|
to ensure that mark_trx_read_write() is called and checking the asserts.
|
||||||
|
|
||||||
|
@param[in] create_info Table create info.
|
||||||
|
@param[in] path Path including table name.
|
||||||
|
@param[out] copied Number of rows copied.
|
||||||
|
@param[out] deleted Number of rows deleted.
|
||||||
|
*/
|
||||||
|
int change_partitions(HA_CREATE_INFO *create_info,
|
||||||
|
const char *path,
|
||||||
|
ulonglong * const copied,
|
||||||
|
ulonglong * const deleted,
|
||||||
|
const uchar *pack_frm_data,
|
||||||
|
size_t pack_frm_len)
|
||||||
|
{
|
||||||
|
handler *file= get_handler();
|
||||||
|
if (!file)
|
||||||
|
{
|
||||||
|
my_error(ER_ILLEGAL_HA, MYF(0), create_info->alias);
|
||||||
|
return HA_ERR_WRONG_COMMAND;
|
||||||
|
}
|
||||||
|
DBUG_ASSERT(file->get_table_share()->tmp_table != NO_TMP_TABLE ||
|
||||||
|
file->get_lock_type() != F_UNLCK);
|
||||||
|
file->mark_trx_read_write();
|
||||||
|
return change_partitions_low(create_info, path, copied, deleted);
|
||||||
|
}
|
||||||
|
|
||||||
|
// FIXME: duplicate of ha_partition::drop_partitions
|
||||||
|
int drop_partitions(const char *path)
|
||||||
|
{
|
||||||
|
List_iterator<partition_element> part_it(m_part_info->partitions);
|
||||||
|
char part_name_buff[FN_REFLEN];
|
||||||
|
uint num_parts= m_part_info->partitions.elements;
|
||||||
|
uint num_subparts= m_part_info->num_subparts;
|
||||||
|
uint i= 0;
|
||||||
|
uint name_variant;
|
||||||
|
int ret_error;
|
||||||
|
int error= 0;
|
||||||
|
DBUG_ENTER("ha_partition::drop_partitions");
|
||||||
|
|
||||||
|
/*
|
||||||
|
Assert that it works without HA_FILE_BASED and lower_case_table_name = 2.
|
||||||
|
We use m_file[0] as long as all partitions have the same storage engine.
|
||||||
|
*/
|
||||||
|
DBUG_ASSERT(!strcmp(path, get_canonical_filename(this, path,
|
||||||
|
part_name_buff)));
|
||||||
|
do
|
||||||
|
{
|
||||||
|
partition_element *part_elem= part_it++;
|
||||||
|
if (part_elem->part_state == PART_TO_BE_DROPPED)
|
||||||
|
{
|
||||||
|
handler *file = this;
|
||||||
|
/*
|
||||||
|
This part is to be dropped, meaning the part or all its subparts.
|
||||||
|
*/
|
||||||
|
name_variant= NORMAL_PART_NAME;
|
||||||
|
if (m_is_sub_partitioned)
|
||||||
|
{
|
||||||
|
List_iterator<partition_element> sub_it(part_elem->subpartitions);
|
||||||
|
uint j= 0/*, part*/;
|
||||||
|
do
|
||||||
|
{
|
||||||
|
partition_element *sub_elem= sub_it++;
|
||||||
|
//part= i * num_subparts + j;
|
||||||
|
create_subpartition_name(part_name_buff, path,
|
||||||
|
part_elem->partition_name,
|
||||||
|
sub_elem->partition_name, name_variant);
|
||||||
|
// set_partition(part);
|
||||||
|
DBUG_PRINT("info", ("Drop subpartition %s", part_name_buff));
|
||||||
|
if ((ret_error= file->ha_delete_table(part_name_buff)))
|
||||||
|
error= ret_error;
|
||||||
|
if (deactivate_ddl_log_entry(sub_elem->log_entry->entry_pos))
|
||||||
|
error= 1;
|
||||||
|
// update_partition(part);
|
||||||
|
} while (++j < num_subparts);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
create_partition_name(part_name_buff, path,
|
||||||
|
part_elem->partition_name, name_variant,
|
||||||
|
TRUE);
|
||||||
|
// set_partition(i);
|
||||||
|
DBUG_PRINT("info", ("Drop partition %s", part_name_buff));
|
||||||
|
if ((ret_error= file->ha_delete_table(part_name_buff)))
|
||||||
|
error= ret_error;
|
||||||
|
if (deactivate_ddl_log_entry(part_elem->log_entry->entry_pos))
|
||||||
|
error= 1;
|
||||||
|
// update_partition(i);
|
||||||
|
}
|
||||||
|
if (part_elem->part_state == PART_IS_CHANGED)
|
||||||
|
part_elem->part_state= PART_NORMAL;
|
||||||
|
else
|
||||||
|
part_elem->part_state= PART_IS_DROPPED;
|
||||||
|
}
|
||||||
|
} while (++i < num_parts);
|
||||||
|
(void) sync_ddl_log();
|
||||||
|
DBUG_RETURN(error);
|
||||||
|
}
|
||||||
|
|
||||||
|
int rename_partitions(const char *path)
|
||||||
|
{
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
virtual ha_rows
|
||||||
|
part_recs_slow(void *_part_elem);
|
||||||
|
|
||||||
|
|
||||||
|
private:
|
||||||
/** Access methods to protected areas in handler to avoid adding
|
/** Access methods to protected areas in handler to avoid adding
|
||||||
friend class Partition_helper in class handler.
|
friend class Partition_helper in class handler.
|
||||||
@see partition_handler.h @{ */
|
@see partition_handler.h @{ */
|
||||||
@ -1221,9 +1379,8 @@ protected:
|
|||||||
uchar* record,
|
uchar* record,
|
||||||
uchar* pos);
|
uchar* pos);
|
||||||
|
|
||||||
int
|
ha_rows
|
||||||
records(
|
records();
|
||||||
ha_rows* num_rows);
|
|
||||||
|
|
||||||
int
|
int
|
||||||
index_next(
|
index_next(
|
||||||
|
@ -64,10 +64,9 @@ static const char *MSG_UNSUPPORTED_ALTER_ONLINE_ON_VIRTUAL_COLUMN=
|
|||||||
"combined with other ALTER TABLE actions";
|
"combined with other ALTER TABLE actions";
|
||||||
|
|
||||||
/* For supporting Native InnoDB Partitioning. */
|
/* For supporting Native InnoDB Partitioning. */
|
||||||
/* JAN: TODO: MySQL 5.7
|
|
||||||
#include "partition_info.h"
|
#include "partition_info.h"
|
||||||
#include "ha_innopart.h"
|
#include "ha_innopart.h"
|
||||||
*/
|
|
||||||
/** Operations for creating secondary indexes (no rebuild needed) */
|
/** Operations for creating secondary indexes (no rebuild needed) */
|
||||||
static const Alter_inplace_info::HA_ALTER_FLAGS INNOBASE_ONLINE_CREATE
|
static const Alter_inplace_info::HA_ALTER_FLAGS INNOBASE_ONLINE_CREATE
|
||||||
= Alter_inplace_info::ADD_INDEX
|
= Alter_inplace_info::ADD_INDEX
|
||||||
@ -9230,9 +9229,6 @@ ha_innopart::prepare_inplace_alter_table(
|
|||||||
ctx_parts->prebuilt_array[i] = tmp_prebuilt;
|
ctx_parts->prebuilt_array[i] = tmp_prebuilt;
|
||||||
}
|
}
|
||||||
|
|
||||||
const char* save_tablespace =
|
|
||||||
ha_alter_info->create_info->tablespace;
|
|
||||||
|
|
||||||
const char* save_data_file_name =
|
const char* save_data_file_name =
|
||||||
ha_alter_info->create_info->data_file_name;
|
ha_alter_info->create_info->data_file_name;
|
||||||
|
|
||||||
@ -9242,15 +9238,6 @@ ha_innopart::prepare_inplace_alter_table(
|
|||||||
ha_alter_info->handler_ctx = ctx_parts->ctx_array[i];
|
ha_alter_info->handler_ctx = ctx_parts->ctx_array[i];
|
||||||
set_partition(i);
|
set_partition(i);
|
||||||
|
|
||||||
/* Set the tablespace and data_file_name value of the
|
|
||||||
alter_info to the tablespace value and data_file_name
|
|
||||||
value that was existing for the partition originally,
|
|
||||||
so that for ALTER TABLE the tablespace clause in create
|
|
||||||
option is ignored for existing partitions, and later
|
|
||||||
set it back to its old value */
|
|
||||||
|
|
||||||
ha_alter_info->create_info->tablespace =
|
|
||||||
m_prebuilt->table->tablespace;
|
|
||||||
ha_alter_info->create_info->data_file_name =
|
ha_alter_info->create_info->data_file_name =
|
||||||
m_prebuilt->table->data_dir_path;
|
m_prebuilt->table->data_dir_path;
|
||||||
|
|
||||||
@ -9266,7 +9253,6 @@ ha_innopart::prepare_inplace_alter_table(
|
|||||||
m_prebuilt_ptr = &m_prebuilt;
|
m_prebuilt_ptr = &m_prebuilt;
|
||||||
ha_alter_info->handler_ctx = ctx_parts;
|
ha_alter_info->handler_ctx = ctx_parts;
|
||||||
ha_alter_info->group_commit_ctx = ctx_parts->ctx_array;
|
ha_alter_info->group_commit_ctx = ctx_parts->ctx_array;
|
||||||
ha_alter_info->create_info->tablespace = save_tablespace;
|
|
||||||
ha_alter_info->create_info->data_file_name = save_data_file_name;
|
ha_alter_info->create_info->data_file_name = save_data_file_name;
|
||||||
DBUG_RETURN(res);
|
DBUG_RETURN(res);
|
||||||
}
|
}
|
||||||
|
@ -40,13 +40,14 @@ class THD;
|
|||||||
// JAN: TODO missing features:
|
// JAN: TODO missing features:
|
||||||
#undef MYSQL_57_SELECT_COUNT_OPTIMIZATION
|
#undef MYSQL_57_SELECT_COUNT_OPTIMIZATION
|
||||||
#undef MYSQL_FT_INIT_EXT
|
#undef MYSQL_FT_INIT_EXT
|
||||||
#undef MYSQL_INNODB_PARTITIONING
|
|
||||||
#undef MYSQL_PFS
|
#undef MYSQL_PFS
|
||||||
#undef MYSQL_RENAME_INDEX
|
#undef MYSQL_RENAME_INDEX
|
||||||
#undef MYSQL_REPLACE_TRX_IN_THD
|
#undef MYSQL_REPLACE_TRX_IN_THD
|
||||||
#undef MYSQL_SPATIAL_INDEX
|
#undef MYSQL_SPATIAL_INDEX
|
||||||
#undef MYSQL_STORE_FTS_DOC_ID
|
#undef MYSQL_STORE_FTS_DOC_ID
|
||||||
|
|
||||||
|
#define MYSQL_INNODB_PARTITIONING
|
||||||
|
|
||||||
/*******************************************************************//**
|
/*******************************************************************//**
|
||||||
Formats the raw data in "data" (in InnoDB on-disk format) that is of
|
Formats the raw data in "data" (in InnoDB on-disk format) that is of
|
||||||
type DATA_(CHAR|VARCHAR|MYSQL|VARMYSQL) using "charset_coll" and writes
|
type DATA_(CHAR|VARCHAR|MYSQL|VARMYSQL) using "charset_coll" and writes
|
||||||
|
@ -234,6 +234,15 @@ row_lock_table_for_mysql(
|
|||||||
(ignored if table==NULL) */
|
(ignored if table==NULL) */
|
||||||
MY_ATTRIBUTE((nonnull(1)));
|
MY_ATTRIBUTE((nonnull(1)));
|
||||||
|
|
||||||
|
/* System Versioning: row_insert_for_mysql() modes */
|
||||||
|
enum ins_mode_t {
|
||||||
|
ROW_INS_NORMAL = 0,
|
||||||
|
// insert versioned row: sys_trx_start = TRX_ID, sys_trx_end = MAX
|
||||||
|
ROW_INS_VERSIONED,
|
||||||
|
// insert historical row: sys_trx_end = TRX_ID
|
||||||
|
ROW_INS_HISTORICAL
|
||||||
|
};
|
||||||
|
|
||||||
/** Does an insert for MySQL.
|
/** Does an insert for MySQL.
|
||||||
@param[in] mysql_rec row in the MySQL format
|
@param[in] mysql_rec row in the MySQL format
|
||||||
@param[in,out] prebuilt prebuilt struct in MySQL handle
|
@param[in,out] prebuilt prebuilt struct in MySQL handle
|
||||||
@ -242,9 +251,7 @@ dberr_t
|
|||||||
row_insert_for_mysql(
|
row_insert_for_mysql(
|
||||||
const byte* mysql_rec,
|
const byte* mysql_rec,
|
||||||
row_prebuilt_t* prebuilt,
|
row_prebuilt_t* prebuilt,
|
||||||
bool historical
|
ins_mode_t ins_mode)
|
||||||
/*!< in: System Versioning, row is */
|
|
||||||
= false) /* historical */
|
|
||||||
MY_ATTRIBUTE((warn_unused_result));
|
MY_ATTRIBUTE((warn_unused_result));
|
||||||
|
|
||||||
/*********************************************************************//**
|
/*********************************************************************//**
|
||||||
@ -281,7 +288,7 @@ dberr_t
|
|||||||
row_update_for_mysql(
|
row_update_for_mysql(
|
||||||
const byte* mysql_rec,
|
const byte* mysql_rec,
|
||||||
row_prebuilt_t* prebuilt,
|
row_prebuilt_t* prebuilt,
|
||||||
bool delete_history_row = false)
|
bool vers_set_fields)
|
||||||
MY_ATTRIBUTE((warn_unused_result));
|
MY_ATTRIBUTE((warn_unused_result));
|
||||||
|
|
||||||
/** This can only be used when srv_locks_unsafe_for_binlog is TRUE or this
|
/** This can only be used when srv_locks_unsafe_for_binlog is TRUE or this
|
||||||
|
@ -59,7 +59,11 @@ public:
|
|||||||
@param[in] id ID of the latch to track */
|
@param[in] id ID of the latch to track */
|
||||||
Context(latch_id_t id)
|
Context(latch_id_t id)
|
||||||
:
|
:
|
||||||
latch_t(id)
|
latch_t(id),
|
||||||
|
m_mutex(),
|
||||||
|
m_filename(),
|
||||||
|
m_line(),
|
||||||
|
m_thread_id(os_thread_id_t(ULINT_UNDEFINED))
|
||||||
{
|
{
|
||||||
/* No op */
|
/* No op */
|
||||||
}
|
}
|
||||||
|
@ -1407,7 +1407,7 @@ dberr_t
|
|||||||
row_insert_for_mysql(
|
row_insert_for_mysql(
|
||||||
const byte* mysql_rec,
|
const byte* mysql_rec,
|
||||||
row_prebuilt_t* prebuilt,
|
row_prebuilt_t* prebuilt,
|
||||||
bool historical)
|
ins_mode_t ins_mode)
|
||||||
{
|
{
|
||||||
trx_savept_t savept;
|
trx_savept_t savept;
|
||||||
que_thr_t* thr;
|
que_thr_t* thr;
|
||||||
@ -1487,14 +1487,15 @@ row_insert_for_mysql(
|
|||||||
row_mysql_convert_row_to_innobase(node->row, prebuilt, mysql_rec,
|
row_mysql_convert_row_to_innobase(node->row, prebuilt, mysql_rec,
|
||||||
&blob_heap);
|
&blob_heap);
|
||||||
|
|
||||||
if (DICT_TF2_FLAG_IS_SET(node->table, DICT_TF2_VERSIONED)) {
|
if (ins_mode != ROW_INS_NORMAL)
|
||||||
|
{
|
||||||
ut_ad(table->vers_row_start != table->vers_row_end);
|
ut_ad(table->vers_row_start != table->vers_row_end);
|
||||||
/* Return back modified fields into mysql_rec, so that
|
/* Return back modified fields into mysql_rec, so that
|
||||||
upper logic may benefit from it (f.ex. 'on duplicate key'). */
|
upper logic may benefit from it (f.ex. 'on duplicate key'). */
|
||||||
const mysql_row_templ_t* t = &prebuilt->mysql_template[table->vers_row_end];
|
const mysql_row_templ_t* t = &prebuilt->mysql_template[table->vers_row_end];
|
||||||
ut_ad(t->mysql_col_len == 8);
|
ut_ad(t->mysql_col_len == 8);
|
||||||
|
|
||||||
if (historical) {
|
if (ins_mode == ROW_INS_HISTORICAL) {
|
||||||
set_tuple_col_8(node->row, table->vers_row_end, trx->id, node->entry_sys_heap);
|
set_tuple_col_8(node->row, table->vers_row_end, trx->id, node->entry_sys_heap);
|
||||||
int8store(&mysql_rec[t->mysql_col_offset], trx->id);
|
int8store(&mysql_rec[t->mysql_col_offset], trx->id);
|
||||||
}
|
}
|
||||||
@ -1860,7 +1861,7 @@ dberr_t
|
|||||||
row_update_for_mysql_using_upd_graph(
|
row_update_for_mysql_using_upd_graph(
|
||||||
const byte* mysql_rec,
|
const byte* mysql_rec,
|
||||||
row_prebuilt_t* prebuilt,
|
row_prebuilt_t* prebuilt,
|
||||||
bool delete_history_row)
|
bool vers_set_fields)
|
||||||
{
|
{
|
||||||
trx_savept_t savept;
|
trx_savept_t savept;
|
||||||
dberr_t err;
|
dberr_t err;
|
||||||
@ -1993,8 +1994,7 @@ row_update_for_mysql_using_upd_graph(
|
|||||||
thr->fk_cascade_depth = 0;
|
thr->fk_cascade_depth = 0;
|
||||||
|
|
||||||
run_again:
|
run_again:
|
||||||
if (DICT_TF2_FLAG_IS_SET(node->table, DICT_TF2_VERSIONED) &&
|
if (vers_set_fields)
|
||||||
(node->is_delete || node->versioned) && !delete_history_row)
|
|
||||||
{
|
{
|
||||||
/* System Versioning: modify update vector to set
|
/* System Versioning: modify update vector to set
|
||||||
sys_trx_start (or sys_trx_end in case of DELETE)
|
sys_trx_start (or sys_trx_end in case of DELETE)
|
||||||
@ -2130,6 +2130,8 @@ run_again:
|
|||||||
cascade_upd_nodes->pop_front();
|
cascade_upd_nodes->pop_front();
|
||||||
thr->fk_cascade_depth++;
|
thr->fk_cascade_depth++;
|
||||||
prebuilt->m_mysql_table = NULL;
|
prebuilt->m_mysql_table = NULL;
|
||||||
|
vers_set_fields = DICT_TF2_FLAG_IS_SET(node->table, DICT_TF2_VERSIONED)
|
||||||
|
&& (node->is_delete || node->versioned);
|
||||||
|
|
||||||
goto run_again;
|
goto run_again;
|
||||||
}
|
}
|
||||||
@ -2259,11 +2261,11 @@ dberr_t
|
|||||||
row_update_for_mysql(
|
row_update_for_mysql(
|
||||||
const byte* mysql_rec,
|
const byte* mysql_rec,
|
||||||
row_prebuilt_t* prebuilt,
|
row_prebuilt_t* prebuilt,
|
||||||
bool delete_history_row)
|
bool vers_set_fields)
|
||||||
{
|
{
|
||||||
ut_a(prebuilt->template_type == ROW_MYSQL_WHOLE_ROW);
|
ut_a(prebuilt->template_type == ROW_MYSQL_WHOLE_ROW);
|
||||||
return (row_update_for_mysql_using_upd_graph(
|
return (row_update_for_mysql_using_upd_graph(
|
||||||
mysql_rec, prebuilt, delete_history_row));
|
mysql_rec, prebuilt, vers_set_fields));
|
||||||
}
|
}
|
||||||
|
|
||||||
/** This can only be used when srv_locks_unsafe_for_binlog is TRUE or this
|
/** This can only be used when srv_locks_unsafe_for_binlog is TRUE or this
|
||||||
|
Loading…
x
Reference in New Issue
Block a user