Merge lgrimmer@build.mysql.com:/home/bk/mysql-4.1
into mysql.com:/space/my/mysql-4.1
This commit is contained in:
commit
e7a70ed162
@ -30,6 +30,7 @@ bk@admin.bk
|
|||||||
bk@mysql.r18.ru
|
bk@mysql.r18.ru
|
||||||
brian@avenger.(none)
|
brian@avenger.(none)
|
||||||
brian@brian-akers-computer.local
|
brian@brian-akers-computer.local
|
||||||
|
brian@private-client-ip-101.oz.net
|
||||||
carsten@tsort.bitbybit.dk
|
carsten@tsort.bitbybit.dk
|
||||||
davida@isil.mysql.com
|
davida@isil.mysql.com
|
||||||
dlenev@brandersnatch.localdomain
|
dlenev@brandersnatch.localdomain
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -1297,4 +1297,6 @@ INSERT INTO t2 VALUES (2,011401,37,'breaking','dreaded','Steinberg','W');
|
|||||||
INSERT INTO t2 VALUES (3,011402,37,'Romans','scholastics','jarring','');
|
INSERT INTO t2 VALUES (3,011402,37,'Romans','scholastics','jarring','');
|
||||||
INSERT INTO t2 VALUES (4,011403,37,'intercepted','audiology','tinily','');
|
INSERT INTO t2 VALUES (4,011403,37,'intercepted','audiology','tinily','');
|
||||||
SELECT * FROM t2;
|
SELECT * FROM t2;
|
||||||
|
OPTIMIZE TABLE t2;
|
||||||
|
SELECT * FROM t2;
|
||||||
drop table t1, t2;
|
drop table t1, t2;
|
||||||
|
@ -229,10 +229,10 @@ LocalConfig::parseString(const char * connectString, char *line){
|
|||||||
|
|
||||||
bool LocalConfig::readFile(const char * filename, bool &fopenError)
|
bool LocalConfig::readFile(const char * filename, bool &fopenError)
|
||||||
{
|
{
|
||||||
char line[150], line2[150];
|
char line[1024];
|
||||||
|
|
||||||
fopenError = false;
|
fopenError = false;
|
||||||
|
|
||||||
FILE * file = fopen(filename, "r");
|
FILE * file = fopen(filename, "r");
|
||||||
if(file == 0){
|
if(file == 0){
|
||||||
snprintf(line, 150, "Unable to open local config file: %s", filename);
|
snprintf(line, 150, "Unable to open local config file: %s", filename);
|
||||||
@ -241,31 +241,33 @@ bool LocalConfig::readFile(const char * filename, bool &fopenError)
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
unsigned int sz = 1024;
|
BaseString theString;
|
||||||
char* theString = (char*)NdbMem_Allocate(sz);
|
|
||||||
theString[0] = 0;
|
|
||||||
|
|
||||||
fgets(theString, sz, file);
|
while(fgets(line, 1024, file)){
|
||||||
while (fgets(line+1, 100, file)) {
|
BaseString tmp(line);
|
||||||
line[0] = ';';
|
tmp.trim(" \t\n\r");
|
||||||
while (strlen(theString) + strlen(line) >= sz) {
|
if(tmp.length() > 0 && tmp.c_str()[0] != '#'){
|
||||||
sz = sz*2;
|
theString.append(tmp);
|
||||||
char *newString = (char*)NdbMem_Allocate(sz);
|
break;
|
||||||
strcpy(newString, theString);
|
|
||||||
free(theString);
|
|
||||||
theString = newString;
|
|
||||||
}
|
}
|
||||||
strcat(theString, line);
|
|
||||||
}
|
}
|
||||||
|
while (fgets(line, 1024, file)) {
|
||||||
bool return_value = parseString(theString, line);
|
BaseString tmp(line);
|
||||||
|
tmp.trim(" \t\n\r");
|
||||||
|
if(tmp.length() > 0 && tmp.c_str()[0] != '#'){
|
||||||
|
theString.append(";");
|
||||||
|
theString.append(tmp);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
bool return_value = parseString(theString.c_str(), line);
|
||||||
|
|
||||||
if (!return_value) {
|
if (!return_value) {
|
||||||
snprintf(line2, 150, "Reading %s: %s", filename, line);
|
BaseString tmp;
|
||||||
setError(0,line2);
|
tmp.assfmt("Reading %s: %s", filename, line);
|
||||||
|
setError(0, tmp.c_str());
|
||||||
}
|
}
|
||||||
|
|
||||||
free(theString);
|
|
||||||
fclose(file);
|
fclose(file);
|
||||||
return return_value;
|
return return_value;
|
||||||
}
|
}
|
||||||
|
@ -70,7 +70,6 @@
|
|||||||
Allow users to set compression level.
|
Allow users to set compression level.
|
||||||
Add truncate table command.
|
Add truncate table command.
|
||||||
Implement versioning, should be easy.
|
Implement versioning, should be easy.
|
||||||
Implement optimize so we can fix broken tables.
|
|
||||||
Allow for errors, find a way to mark bad rows.
|
Allow for errors, find a way to mark bad rows.
|
||||||
See if during an optimize you can make the table smaller.
|
See if during an optimize you can make the table smaller.
|
||||||
Talk to the gzip guys, come up with a writable format so that updates are doable
|
Talk to the gzip guys, come up with a writable format so that updates are doable
|
||||||
@ -88,6 +87,7 @@ static int archive_init= 0;
|
|||||||
|
|
||||||
/* The file extension */
|
/* The file extension */
|
||||||
#define ARZ ".ARZ"
|
#define ARZ ".ARZ"
|
||||||
|
#define ARN ".ARN"
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Used for hash table that tracks open tables.
|
Used for hash table that tracks open tables.
|
||||||
@ -117,7 +117,7 @@ static ARCHIVE_SHARE *get_share(const char *table_name, TABLE *table)
|
|||||||
if (!archive_init)
|
if (!archive_init)
|
||||||
{
|
{
|
||||||
VOID(pthread_mutex_init(&archive_mutex,MY_MUTEX_INIT_FAST));
|
VOID(pthread_mutex_init(&archive_mutex,MY_MUTEX_INIT_FAST));
|
||||||
if (!hash_init(&archive_open_tables,system_charset_info,32,0,0,
|
if (hash_init(&archive_open_tables,system_charset_info,32,0,0,
|
||||||
(hash_get_key) archive_get_key,0,0))
|
(hash_get_key) archive_get_key,0,0))
|
||||||
{
|
{
|
||||||
pthread_mutex_unlock(&LOCK_mysql_create_db);
|
pthread_mutex_unlock(&LOCK_mysql_create_db);
|
||||||
@ -205,7 +205,7 @@ static int free_share(ARCHIVE_SHARE *share)
|
|||||||
We just implement one additional file extension.
|
We just implement one additional file extension.
|
||||||
*/
|
*/
|
||||||
const char **ha_archive::bas_ext() const
|
const char **ha_archive::bas_ext() const
|
||||||
{ static const char *ext[]= { ARZ, NullS }; return ext; }
|
{ static const char *ext[]= { ARZ, ARN, NullS }; return ext; }
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -322,6 +322,11 @@ err:
|
|||||||
/*
|
/*
|
||||||
Look at ha_archive::open() for an explanation of the row format.
|
Look at ha_archive::open() for an explanation of the row format.
|
||||||
Here we just write out the row.
|
Here we just write out the row.
|
||||||
|
|
||||||
|
Wondering about start_bulk_insert()? We don't implement it for
|
||||||
|
archive since it optimizes for lots of writes. The only save
|
||||||
|
for implementing start_bulk_insert() is that we could skip
|
||||||
|
setting dirty to true each time.
|
||||||
*/
|
*/
|
||||||
int ha_archive::write_row(byte * buf)
|
int ha_archive::write_row(byte * buf)
|
||||||
{
|
{
|
||||||
@ -380,17 +385,7 @@ int ha_archive::rnd_init(bool scan)
|
|||||||
pthread_mutex_lock(&share->mutex);
|
pthread_mutex_lock(&share->mutex);
|
||||||
if (share->dirty == TRUE)
|
if (share->dirty == TRUE)
|
||||||
{
|
{
|
||||||
/* I was having problems with OSX, but it worked for 10.3 so I am wrapping this with and ifdef */
|
|
||||||
#ifdef BROKEN_GZFLUSH
|
|
||||||
gzclose(share->archive_write);
|
|
||||||
if ((share->archive_write= gzopen(share->data_file_name, "ab")) == NULL)
|
|
||||||
{
|
|
||||||
pthread_mutex_unlock(&share->mutex);
|
|
||||||
DBUG_RETURN(errno ? errno : -1);
|
|
||||||
}
|
|
||||||
#else
|
|
||||||
gzflush(share->archive_write, Z_SYNC_FLUSH);
|
gzflush(share->archive_write, Z_SYNC_FLUSH);
|
||||||
#endif
|
|
||||||
share->dirty= FALSE;
|
share->dirty= FALSE;
|
||||||
}
|
}
|
||||||
pthread_mutex_unlock(&share->mutex);
|
pthread_mutex_unlock(&share->mutex);
|
||||||
@ -504,6 +499,54 @@ int ha_archive::rnd_pos(byte * buf, byte *pos)
|
|||||||
DBUG_RETURN(get_row(buf));
|
DBUG_RETURN(get_row(buf));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
The table can become fragmented if data was inserted, read, and then
|
||||||
|
inserted again. What we do is open up the file and recompress it completely.
|
||||||
|
*/
|
||||||
|
int ha_archive::optimize(THD* thd, HA_CHECK_OPT* check_opt)
|
||||||
|
{
|
||||||
|
DBUG_ENTER("ha_archive::optimize");
|
||||||
|
int read; // Bytes read, gzread() returns int
|
||||||
|
gzFile reader, writer;
|
||||||
|
char block[IO_SIZE];
|
||||||
|
char writer_filename[FN_REFLEN];
|
||||||
|
|
||||||
|
/* Lets create a file to contain the new data */
|
||||||
|
fn_format(writer_filename,share->table_name,"",ARN, MY_REPLACE_EXT|MY_UNPACK_FILENAME);
|
||||||
|
|
||||||
|
/* Closing will cause all data waiting to be flushed, to be flushed */
|
||||||
|
gzclose(share->archive_write);
|
||||||
|
|
||||||
|
if ((reader= gzopen(share->data_file_name, "rb")) == NULL)
|
||||||
|
DBUG_RETURN(-1);
|
||||||
|
|
||||||
|
if ((writer= gzopen(writer_filename, "wb")) == NULL)
|
||||||
|
{
|
||||||
|
gzclose(reader);
|
||||||
|
DBUG_RETURN(-1);
|
||||||
|
}
|
||||||
|
|
||||||
|
while (read= gzread(reader, block, IO_SIZE))
|
||||||
|
gzwrite(writer, block, read);
|
||||||
|
|
||||||
|
gzclose(reader);
|
||||||
|
gzclose(writer);
|
||||||
|
|
||||||
|
my_rename(writer_filename,share->data_file_name,MYF(0));
|
||||||
|
|
||||||
|
/*
|
||||||
|
We reopen the file in case some IO is waiting to go through.
|
||||||
|
In theory the table is closed right after this operation,
|
||||||
|
but it is possible for IO to still happen.
|
||||||
|
I may be being a bit too paranoid right here.
|
||||||
|
*/
|
||||||
|
if ((share->archive_write= gzopen(share->data_file_name, "ab")) == NULL)
|
||||||
|
DBUG_RETURN(errno ? errno : -1);
|
||||||
|
share->dirty= FALSE;
|
||||||
|
|
||||||
|
DBUG_RETURN(0);
|
||||||
|
}
|
||||||
|
|
||||||
/******************************************************************************
|
/******************************************************************************
|
||||||
|
|
||||||
Everything below here is default, please look at ha_example.cc for
|
Everything below here is default, please look at ha_example.cc for
|
||||||
|
@ -112,7 +112,7 @@ public:
|
|||||||
int external_lock(THD *thd, int lock_type);
|
int external_lock(THD *thd, int lock_type);
|
||||||
ha_rows records_in_range(uint inx, key_range *min_key, key_range *max_key);
|
ha_rows records_in_range(uint inx, key_range *min_key, key_range *max_key);
|
||||||
int create(const char *name, TABLE *form, HA_CREATE_INFO *create_info);
|
int create(const char *name, TABLE *form, HA_CREATE_INFO *create_info);
|
||||||
|
int optimize(THD* thd, HA_CHECK_OPT* check_opt);
|
||||||
THR_LOCK_DATA **store_lock(THD *thd, THR_LOCK_DATA **to,
|
THR_LOCK_DATA **store_lock(THD *thd, THR_LOCK_DATA **to,
|
||||||
enum thr_lock_type lock_type);
|
enum thr_lock_type lock_type);
|
||||||
};
|
};
|
||||||
|
@ -175,7 +175,7 @@ void ha_ndbcluster::records_update()
|
|||||||
DBUG_PRINT("info", ("id=%d, no_uncommitted_rows_count=%d",
|
DBUG_PRINT("info", ("id=%d, no_uncommitted_rows_count=%d",
|
||||||
((const NDBTAB *)m_table)->getTableId(),
|
((const NDBTAB *)m_table)->getTableId(),
|
||||||
info->no_uncommitted_rows_count));
|
info->no_uncommitted_rows_count));
|
||||||
if (info->records == ~(ha_rows)0)
|
// if (info->records == ~(ha_rows)0)
|
||||||
{
|
{
|
||||||
Uint64 rows;
|
Uint64 rows;
|
||||||
if(ndb_get_table_statistics(m_ndb, m_tabname, &rows, 0) == 0){
|
if(ndb_get_table_statistics(m_ndb, m_tabname, &rows, 0) == 0){
|
||||||
@ -607,7 +607,7 @@ int ha_ndbcluster::get_metadata(const char *path)
|
|||||||
DBUG_ENTER("get_metadata");
|
DBUG_ENTER("get_metadata");
|
||||||
DBUG_PRINT("enter", ("m_tabname: %s, path: %s", m_tabname, path));
|
DBUG_PRINT("enter", ("m_tabname: %s, path: %s", m_tabname, path));
|
||||||
|
|
||||||
if (!(tab= dict->getTable(m_tabname, &m_table_info)))
|
if (!(tab= dict->getTable(m_tabname)))
|
||||||
ERR_RETURN(dict->getNdbError());
|
ERR_RETURN(dict->getNdbError());
|
||||||
DBUG_PRINT("info", ("Table schema version: %d", tab->getObjectVersion()));
|
DBUG_PRINT("info", ("Table schema version: %d", tab->getObjectVersion()));
|
||||||
|
|
||||||
@ -655,8 +655,8 @@ int ha_ndbcluster::get_metadata(const char *path)
|
|||||||
if (error)
|
if (error)
|
||||||
DBUG_RETURN(error);
|
DBUG_RETURN(error);
|
||||||
|
|
||||||
// All checks OK, lets use the table
|
m_table= NULL;
|
||||||
m_table= (void*)tab;
|
m_table_info= NULL;
|
||||||
|
|
||||||
DBUG_RETURN(build_index_list(table, ILBP_OPEN));
|
DBUG_RETURN(build_index_list(table, ILBP_OPEN));
|
||||||
}
|
}
|
||||||
@ -771,6 +771,7 @@ void ha_ndbcluster::release_metadata()
|
|||||||
DBUG_PRINT("enter", ("m_tabname: %s", m_tabname));
|
DBUG_PRINT("enter", ("m_tabname: %s", m_tabname));
|
||||||
|
|
||||||
m_table= NULL;
|
m_table= NULL;
|
||||||
|
m_table_info= NULL;
|
||||||
|
|
||||||
// Release index list
|
// Release index list
|
||||||
for (i= 0; i < MAX_KEY; i++)
|
for (i= 0; i < MAX_KEY; i++)
|
||||||
@ -2394,7 +2395,17 @@ void ha_ndbcluster::info(uint flag)
|
|||||||
if (flag & HA_STATUS_VARIABLE)
|
if (flag & HA_STATUS_VARIABLE)
|
||||||
{
|
{
|
||||||
DBUG_PRINT("info", ("HA_STATUS_VARIABLE"));
|
DBUG_PRINT("info", ("HA_STATUS_VARIABLE"));
|
||||||
records_update();
|
if (m_table_info)
|
||||||
|
{
|
||||||
|
records_update();
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
Uint64 rows;
|
||||||
|
if(ndb_get_table_statistics(m_ndb, m_tabname, &rows, 0) == 0){
|
||||||
|
records= rows;
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if (flag & HA_STATUS_ERRKEY)
|
if (flag & HA_STATUS_ERRKEY)
|
||||||
{
|
{
|
||||||
@ -2781,6 +2792,16 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type)
|
|||||||
// Start of transaction
|
// Start of transaction
|
||||||
retrieve_all_fields= FALSE;
|
retrieve_all_fields= FALSE;
|
||||||
ops_pending= 0;
|
ops_pending= 0;
|
||||||
|
{
|
||||||
|
NDBDICT *dict= m_ndb->getDictionary();
|
||||||
|
const NDBTAB *tab;
|
||||||
|
void *tab_info;
|
||||||
|
if (!(tab= dict->getTable(m_tabname, &tab_info)))
|
||||||
|
ERR_RETURN(dict->getNdbError());
|
||||||
|
DBUG_PRINT("info", ("Table schema version: %d", tab->getObjectVersion()));
|
||||||
|
m_table= (void *)tab;
|
||||||
|
m_table_info= tab_info;
|
||||||
|
}
|
||||||
no_uncommitted_rows_init(thd);
|
no_uncommitted_rows_init(thd);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
@ -2803,6 +2824,8 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type)
|
|||||||
thd->transaction.stmt.ndb_tid= 0;
|
thd->transaction.stmt.ndb_tid= 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
m_table= NULL;
|
||||||
|
m_table_info= NULL;
|
||||||
if (m_active_trans)
|
if (m_active_trans)
|
||||||
DBUG_PRINT("warning", ("m_active_trans != NULL"));
|
DBUG_PRINT("warning", ("m_active_trans != NULL"));
|
||||||
if (m_active_cursor)
|
if (m_active_cursor)
|
||||||
@ -3288,6 +3311,7 @@ int ha_ndbcluster::alter_table_name(const char *from, const char *to)
|
|||||||
ERR_RETURN(dict->getNdbError());
|
ERR_RETURN(dict->getNdbError());
|
||||||
|
|
||||||
m_table= NULL;
|
m_table= NULL;
|
||||||
|
m_table_info= NULL;
|
||||||
|
|
||||||
DBUG_RETURN(0);
|
DBUG_RETURN(0);
|
||||||
}
|
}
|
||||||
|
Loading…
x
Reference in New Issue
Block a user