Turns out that the bulk interface was not well documented. I fixed the issue in archive and I will go and update the comments in handler next. I should probably also look through the other engines and see if this an issue elsewhere as well.
sql/ha_archive.cc: Fixed for bulk_insert
This commit is contained in:
parent
ff3079c8d0
commit
a207c820e0
@ -135,6 +135,13 @@ static HASH archive_open_tables;
|
|||||||
#define DATA_BUFFER_SIZE 2 // Size of the data used in the data file
|
#define DATA_BUFFER_SIZE 2 // Size of the data used in the data file
|
||||||
#define ARCHIVE_CHECK_HEADER 254 // The number we use to determine corruption
|
#define ARCHIVE_CHECK_HEADER 254 // The number we use to determine corruption
|
||||||
|
|
||||||
|
/*
|
||||||
|
Number of rows that will force a bulk insert.
|
||||||
|
*/
|
||||||
|
#define ARCHIVE_MIN_ROWS_TO_USE_BULK_INSERT 2
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
/* dummy handlerton - only to have something to return from archive_db_init */
|
/* dummy handlerton - only to have something to return from archive_db_init */
|
||||||
handlerton archive_hton = {
|
handlerton archive_hton = {
|
||||||
"ARCHIVE",
|
"ARCHIVE",
|
||||||
@ -1026,7 +1033,8 @@ void ha_archive::info(uint flag)
|
|||||||
void ha_archive::start_bulk_insert(ha_rows rows)
|
void ha_archive::start_bulk_insert(ha_rows rows)
|
||||||
{
|
{
|
||||||
DBUG_ENTER("ha_archive::start_bulk_insert");
|
DBUG_ENTER("ha_archive::start_bulk_insert");
|
||||||
bulk_insert= TRUE;
|
if (!rows || rows >= ARCHIVE_MIN_ROWS_TO_USE_BULK_INSERT)
|
||||||
|
bulk_insert= TRUE;
|
||||||
DBUG_VOID_RETURN;
|
DBUG_VOID_RETURN;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user