Merge pnousiainen@bk-internal.mysql.com:/home/bk/mysql-4.1
into orca.ndb.mysql.com:/space_old/pekka/ndb/version/my41
This commit is contained in:
commit
c9db5f8063
@ -1155,12 +1155,13 @@ int chk_data_link(MI_CHECK *param, MI_INFO *info,int extend)
|
||||
*/
|
||||
int search_result= (keyinfo->flag & HA_SPATIAL) ?
|
||||
rtree_find_first(info, key, info->lastkey, key_length,
|
||||
SEARCH_SAME) :
|
||||
MBR_EQUAL | MBR_DATA) :
|
||||
_mi_search(info,keyinfo,info->lastkey,key_length,
|
||||
SEARCH_SAME, info->s->state.key_root[key]);
|
||||
if (search_result)
|
||||
{
|
||||
mi_check_print_error(param,"Record at: %10s Can't find key for index: %2d",
|
||||
mi_check_print_error(param,"Record at: %10s "
|
||||
"Can't find key for index: %2d",
|
||||
llstr(start_recpos,llbuff),key+1);
|
||||
if (error++ > MAXERR || !(param->testflag & T_VERBOSE))
|
||||
goto err2;
|
||||
|
@ -59,6 +59,8 @@ int mi_create(const char *name,uint keys,MI_KEYDEF *keydefs,
|
||||
my_off_t key_root[MI_MAX_POSSIBLE_KEY],key_del[MI_MAX_KEY_BLOCK_SIZE];
|
||||
MI_CREATE_INFO tmp_create_info;
|
||||
DBUG_ENTER("mi_create");
|
||||
DBUG_PRINT("enter", ("keys: %u columns: %u uniques: %u flags: %u",
|
||||
keys, columns, uniques, flags));
|
||||
|
||||
if (!ci)
|
||||
{
|
||||
@ -447,6 +449,16 @@ int mi_create(const char *name,uint keys,MI_KEYDEF *keydefs,
|
||||
uniques * MI_UNIQUEDEF_SIZE +
|
||||
(key_segs + unique_key_parts)*HA_KEYSEG_SIZE+
|
||||
columns*MI_COLUMNDEF_SIZE);
|
||||
DBUG_PRINT("info", ("info_length: %u", info_length));
|
||||
/* There are only 16 bits for the total header length. */
|
||||
if (info_length > 65535)
|
||||
{
|
||||
my_printf_error(0, "MyISAM table '%s' has too many columns and/or "
|
||||
"indexes and/or unique constraints.",
|
||||
MYF(0), name + dirname_length(name));
|
||||
my_errno= HA_WRONG_CREATE_OPTION;
|
||||
goto err;
|
||||
}
|
||||
|
||||
bmove(share.state.header.file_version,(byte*) myisam_file_magic,4);
|
||||
ci->old_options=options| (ci->old_options & HA_OPTION_TEMP_COMPRESS_RECORD ?
|
||||
@ -616,6 +628,7 @@ int mi_create(const char *name,uint keys,MI_KEYDEF *keydefs,
|
||||
errpos=3;
|
||||
}
|
||||
|
||||
DBUG_PRINT("info", ("write state info and base info"));
|
||||
if (mi_state_info_write(file, &share.state, 2) ||
|
||||
mi_base_info_write(file, &share.base))
|
||||
goto err;
|
||||
@ -629,6 +642,7 @@ int mi_create(const char *name,uint keys,MI_KEYDEF *keydefs,
|
||||
#endif
|
||||
|
||||
/* Write key and keyseg definitions */
|
||||
DBUG_PRINT("info", ("write key and keyseg definitions"));
|
||||
for (i=0 ; i < share.base.keys - uniques; i++)
|
||||
{
|
||||
uint sp_segs=(keydefs[i].flag & HA_SPATIAL) ? 2*SPDIMS : 0;
|
||||
@ -677,6 +691,7 @@ int mi_create(const char *name,uint keys,MI_KEYDEF *keydefs,
|
||||
}
|
||||
|
||||
/* Save unique definition */
|
||||
DBUG_PRINT("info", ("write unique definitions"));
|
||||
for (i=0 ; i < share.state.header.uniques ; i++)
|
||||
{
|
||||
if (mi_uniquedef_write(file, &uniquedefs[i]))
|
||||
@ -687,6 +702,7 @@ int mi_create(const char *name,uint keys,MI_KEYDEF *keydefs,
|
||||
goto err;
|
||||
}
|
||||
}
|
||||
DBUG_PRINT("info", ("write field definitions"));
|
||||
for (i=0 ; i < share.base.fields ; i++)
|
||||
if (mi_recinfo_write(file, &recinfo[i]))
|
||||
goto err;
|
||||
@ -701,6 +717,7 @@ int mi_create(const char *name,uint keys,MI_KEYDEF *keydefs,
|
||||
#endif
|
||||
|
||||
/* Enlarge files */
|
||||
DBUG_PRINT("info", ("enlarge to keystart: %lu", (ulong) share.base.keystart));
|
||||
if (my_chsize(file,(ulong) share.base.keystart,0,MYF(0)))
|
||||
goto err;
|
||||
|
||||
|
@ -34,12 +34,24 @@ int mi_delete_table(const char *name)
|
||||
#ifdef USE_RAID
|
||||
{
|
||||
MI_INFO *info;
|
||||
/* we use 'open_for_repair' to be able to delete a crashed table */
|
||||
if (!(info=mi_open(name, O_RDONLY, HA_OPEN_FOR_REPAIR)))
|
||||
DBUG_RETURN(my_errno);
|
||||
raid_type = info->s->base.raid_type;
|
||||
raid_chunks = info->s->base.raid_chunks;
|
||||
mi_close(info);
|
||||
/*
|
||||
When built with RAID support, we need to determine if this table
|
||||
makes use of the raid feature. If yes, we need to remove all raid
|
||||
chunks. This is done with my_raid_delete(). Unfortunately it is
|
||||
necessary to open the table just to check this. We use
|
||||
'open_for_repair' to be able to open even a crashed table. If even
|
||||
this open fails, we assume no raid configuration for this table
|
||||
and try to remove the normal data file only. This may however
|
||||
leave the raid chunks behind.
|
||||
*/
|
||||
if (!(info= mi_open(name, O_RDONLY, HA_OPEN_FOR_REPAIR)))
|
||||
raid_type= 0;
|
||||
else
|
||||
{
|
||||
raid_type= info->s->base.raid_type;
|
||||
raid_chunks= info->s->base.raid_chunks;
|
||||
mi_close(info);
|
||||
}
|
||||
}
|
||||
#ifdef EXTRA_DEBUG
|
||||
check_table_is_closed(name,"delete");
|
||||
|
@ -1116,6 +1116,9 @@ int _mi_read_dynamic_record(MI_INFO *info, my_off_t filepos, byte *buf)
|
||||
info->rec_cache.pos_in_file <= block_info.next_filepos &&
|
||||
flush_io_cache(&info->rec_cache))
|
||||
goto err;
|
||||
/* A corrupted table can have wrong pointers. (Bug# 19835) */
|
||||
if (block_info.next_filepos == HA_OFFSET_ERROR)
|
||||
goto panic;
|
||||
info->rec_cache.seek_not_done=1;
|
||||
if ((b_type=_mi_get_block_info(&block_info,file,
|
||||
block_info.next_filepos))
|
||||
|
@ -54,7 +54,7 @@ uint _mi_make_key(register MI_INFO *info, uint keynr, uchar *key,
|
||||
TODO: nulls processing
|
||||
*/
|
||||
#ifdef HAVE_SPATIAL
|
||||
return sp_make_key(info,keynr,key,record,filepos);
|
||||
DBUG_RETURN(sp_make_key(info,keynr,key,record,filepos));
|
||||
#else
|
||||
DBUG_ASSERT(0); /* mi_open should check that this never happens*/
|
||||
#endif
|
||||
|
@ -68,6 +68,7 @@ int mi_rkey(MI_INFO *info, byte *buf, int inx, const byte *key, uint key_len,
|
||||
|
||||
if (fast_mi_readinfo(info))
|
||||
goto err;
|
||||
|
||||
if (share->concurrent_insert)
|
||||
rw_rdlock(&share->key_root_lock[inx]);
|
||||
|
||||
@ -89,24 +90,35 @@ int mi_rkey(MI_INFO *info, byte *buf, int inx, const byte *key, uint key_len,
|
||||
case HA_KEY_ALG_BTREE:
|
||||
default:
|
||||
if (!_mi_search(info, keyinfo, key_buff, use_key_length,
|
||||
myisam_read_vec[search_flag], info->s->state.key_root[inx]))
|
||||
myisam_read_vec[search_flag], info->s->state.key_root[inx]))
|
||||
{
|
||||
while (info->lastpos >= info->state->data_file_length)
|
||||
/*
|
||||
If we are searching for an exact key (including the data pointer)
|
||||
and this was added by an concurrent insert,
|
||||
then the result is "key not found".
|
||||
*/
|
||||
if ((search_flag == HA_READ_KEY_EXACT) &&
|
||||
(info->lastpos >= info->state->data_file_length))
|
||||
{
|
||||
my_errno= HA_ERR_KEY_NOT_FOUND;
|
||||
info->lastpos= HA_OFFSET_ERROR;
|
||||
}
|
||||
else while (info->lastpos >= info->state->data_file_length)
|
||||
{
|
||||
/*
|
||||
Skip rows that are inserted by other threads since we got a lock
|
||||
Note that this can only happen if we are not searching after an
|
||||
exact key, because the keys are sorted according to position
|
||||
*/
|
||||
|
||||
if (_mi_search_next(info, keyinfo, info->lastkey,
|
||||
info->lastkey_length,
|
||||
myisam_readnext_vec[search_flag],
|
||||
info->s->state.key_root[inx]))
|
||||
info->lastkey_length,
|
||||
myisam_readnext_vec[search_flag],
|
||||
info->s->state.key_root[inx]))
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (share->concurrent_insert)
|
||||
rw_unlock(&share->key_root_lock[inx]);
|
||||
|
||||
|
@ -183,9 +183,11 @@ int rtree_find_first(MI_INFO *info, uint keynr, uchar *key, uint key_length,
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* Save searched key */
|
||||
memcpy(info->first_mbr_key, key, keyinfo->keylength -
|
||||
info->s->base.rec_reflength);
|
||||
/*
|
||||
Save searched key, include data pointer.
|
||||
The data pointer is required if the search_flag contains MBR_DATA.
|
||||
*/
|
||||
memcpy(info->first_mbr_key, key, keyinfo->keylength);
|
||||
info->last_rkey_length = key_length;
|
||||
|
||||
info->rtree_recursion_depth = -1;
|
||||
|
@ -52,10 +52,14 @@
|
||||
if (EQUAL_CMP(amin, amax, bmin, bmax)) \
|
||||
return 1; \
|
||||
} \
|
||||
else /* if (nextflag & MBR_DISJOINT) */ \
|
||||
else if (nextflag & MBR_DISJOINT) \
|
||||
{ \
|
||||
if (DISJOINT_CMP(amin, amax, bmin, bmax)) \
|
||||
return 1; \
|
||||
}\
|
||||
else /* if unknown comparison operator */ \
|
||||
{ \
|
||||
DBUG_ASSERT(0); \
|
||||
}
|
||||
|
||||
#define RT_CMP_KORR(type, korr_func, len, nextflag) \
|
||||
|
4
mysql-test/include/have_case_sensitive_file_system.inc
Normal file
4
mysql-test/include/have_case_sensitive_file_system.inc
Normal file
@ -0,0 +1,4 @@
|
||||
--require r/case_sensitive_file_system.require
|
||||
--disable_query_log
|
||||
show variables like "lower_case_file_system";
|
||||
--enable_query_log
|
2
mysql-test/r/case_sensitive_file_system.require
Normal file
2
mysql-test/r/case_sensitive_file_system.require
Normal file
@ -0,0 +1,2 @@
|
||||
Variable_name Value
|
||||
lower_case_file_system OFF
|
@ -817,3 +817,43 @@ check table t1 extended;
|
||||
Table Op Msg_type Msg_text
|
||||
test.t1 check status OK
|
||||
drop table t1;
|
||||
CREATE TABLE t1 (
|
||||
c1 geometry NOT NULL default '',
|
||||
SPATIAL KEY i1 (c1(32))
|
||||
) ENGINE=MyISAM DEFAULT CHARSET=latin1;
|
||||
INSERT INTO t1 (c1) VALUES (
|
||||
PolygonFromText('POLYGON((-18.6086111000 -66.9327777000,
|
||||
-18.6055555000 -66.8158332999,
|
||||
-18.7186111000 -66.8102777000,
|
||||
-18.7211111000 -66.9269443999,
|
||||
-18.6086111000 -66.9327777000))'));
|
||||
CHECK TABLE t1 EXTENDED;
|
||||
Table Op Msg_type Msg_text
|
||||
test.t1 check status OK
|
||||
DROP TABLE t1;
|
||||
CREATE TABLE t1 (
|
||||
c1 geometry NOT NULL default '',
|
||||
SPATIAL KEY i1 (c1(32))
|
||||
) ENGINE=MyISAM DEFAULT CHARSET=latin1;
|
||||
INSERT INTO t1 (c1) VALUES (
|
||||
PolygonFromText('POLYGON((-18.6086111000 -66.9327777000,
|
||||
-18.6055555000 -66.8158332999,
|
||||
-18.7186111000 -66.8102777000,
|
||||
-18.7211111000 -66.9269443999,
|
||||
-18.6086111000 -66.9327777000))'));
|
||||
INSERT INTO t1 (c1) VALUES (
|
||||
PolygonFromText('POLYGON((-65.7402776999 -96.6686111000,
|
||||
-65.7372222000 -96.5516666000,
|
||||
-65.8502777000 -96.5461111000,
|
||||
-65.8527777000 -96.6627777000,
|
||||
-65.7402776999 -96.6686111000))'));
|
||||
INSERT INTO t1 (c1) VALUES (
|
||||
PolygonFromText('POLYGON((-18.6086111000 -66.9327777000,
|
||||
-18.6055555000 -66.8158332999,
|
||||
-18.7186111000 -66.8102777000,
|
||||
-18.7211111000 -66.9269443999,
|
||||
-18.6086111000 -66.9327777000))'));
|
||||
CHECK TABLE t1 EXTENDED;
|
||||
Table Op Msg_type Msg_text
|
||||
test.t1 check status OK
|
||||
DROP TABLE t1;
|
||||
|
11
mysql-test/r/lowercase_fs_off.result
Normal file
11
mysql-test/r/lowercase_fs_off.result
Normal file
@ -0,0 +1,11 @@
|
||||
create database d1;
|
||||
grant all on d1.* to 'sample'@'localhost' identified by 'password';
|
||||
flush privileges;
|
||||
select database();
|
||||
database()
|
||||
d1
|
||||
create database d2;
|
||||
ERROR 42000: Access denied for user 'sample'@'localhost' to database 'd2'
|
||||
create database D1;
|
||||
ERROR 42000: Access denied for user 'sample'@'localhost' to database 'D1'
|
||||
drop database if exists d1;
|
@ -875,3 +875,17 @@ select @@max_prepared_stmt_count, @@prepared_stmt_count;
|
||||
@@max_prepared_stmt_count @@prepared_stmt_count
|
||||
3 0
|
||||
set global max_prepared_stmt_count= @old_max_prepared_stmt_count;
|
||||
drop table if exists t1;
|
||||
create temporary table if not exists t1 (a1 int);
|
||||
prepare stmt from "delete t1 from t1 where (cast(a1/3 as unsigned) * 3) = a1";
|
||||
drop temporary table t1;
|
||||
create temporary table if not exists t1 (a1 int);
|
||||
execute stmt;
|
||||
drop temporary table t1;
|
||||
create temporary table if not exists t1 (a1 int);
|
||||
execute stmt;
|
||||
drop temporary table t1;
|
||||
create temporary table if not exists t1 (a1 int);
|
||||
execute stmt;
|
||||
drop temporary table t1;
|
||||
deallocate prepare stmt;
|
||||
|
@ -188,4 +188,48 @@ check table t1 extended;
|
||||
|
||||
drop table t1;
|
||||
|
||||
#
|
||||
# Bug#17877 - Corrupted spatial index
|
||||
#
|
||||
CREATE TABLE t1 (
|
||||
c1 geometry NOT NULL default '',
|
||||
SPATIAL KEY i1 (c1(32))
|
||||
) ENGINE=MyISAM DEFAULT CHARSET=latin1;
|
||||
INSERT INTO t1 (c1) VALUES (
|
||||
PolygonFromText('POLYGON((-18.6086111000 -66.9327777000,
|
||||
-18.6055555000 -66.8158332999,
|
||||
-18.7186111000 -66.8102777000,
|
||||
-18.7211111000 -66.9269443999,
|
||||
-18.6086111000 -66.9327777000))'));
|
||||
# This showed a missing key.
|
||||
CHECK TABLE t1 EXTENDED;
|
||||
DROP TABLE t1;
|
||||
#
|
||||
CREATE TABLE t1 (
|
||||
c1 geometry NOT NULL default '',
|
||||
SPATIAL KEY i1 (c1(32))
|
||||
) ENGINE=MyISAM DEFAULT CHARSET=latin1;
|
||||
INSERT INTO t1 (c1) VALUES (
|
||||
PolygonFromText('POLYGON((-18.6086111000 -66.9327777000,
|
||||
-18.6055555000 -66.8158332999,
|
||||
-18.7186111000 -66.8102777000,
|
||||
-18.7211111000 -66.9269443999,
|
||||
-18.6086111000 -66.9327777000))'));
|
||||
INSERT INTO t1 (c1) VALUES (
|
||||
PolygonFromText('POLYGON((-65.7402776999 -96.6686111000,
|
||||
-65.7372222000 -96.5516666000,
|
||||
-65.8502777000 -96.5461111000,
|
||||
-65.8527777000 -96.6627777000,
|
||||
-65.7402776999 -96.6686111000))'));
|
||||
# This is the same as the first insert to get a non-unique key.
|
||||
INSERT INTO t1 (c1) VALUES (
|
||||
PolygonFromText('POLYGON((-18.6086111000 -66.9327777000,
|
||||
-18.6055555000 -66.8158332999,
|
||||
-18.7186111000 -66.8102777000,
|
||||
-18.7211111000 -66.9269443999,
|
||||
-18.6086111000 -66.9327777000))'));
|
||||
# This showed (and still shows) OK.
|
||||
CHECK TABLE t1 EXTENDED;
|
||||
DROP TABLE t1;
|
||||
|
||||
# End of 4.1 tests
|
||||
|
27
mysql-test/t/lowercase_fs_off.test
Normal file
27
mysql-test/t/lowercase_fs_off.test
Normal file
@ -0,0 +1,27 @@
|
||||
#
|
||||
# Specific tests for case sensitive file systems
|
||||
# i.e. lower_case_filesystem=OFF
|
||||
#
|
||||
-- source include/have_case_sensitive_file_system.inc
|
||||
|
||||
connect (master,localhost,root,,);
|
||||
connection master;
|
||||
create database d1;
|
||||
grant all on d1.* to 'sample'@'localhost' identified by 'password';
|
||||
flush privileges;
|
||||
|
||||
connect (sample,localhost,sample,password,d1);
|
||||
connection sample;
|
||||
select database();
|
||||
--error 1044
|
||||
create database d2;
|
||||
--error 1044
|
||||
create database D1;
|
||||
disconnect sample;
|
||||
|
||||
connection master;
|
||||
drop database if exists d1;
|
||||
disconnect master;
|
||||
connection default;
|
||||
|
||||
# End of 4.1 tests
|
@ -926,4 +926,29 @@ select @@max_prepared_stmt_count, @@prepared_stmt_count;
|
||||
set global max_prepared_stmt_count= @old_max_prepared_stmt_count;
|
||||
--enable_ps_protocol
|
||||
|
||||
#
|
||||
# Bug#19399 "Stored Procedures 'Lost Connection' when dropping/creating
|
||||
# tables"
|
||||
# Check that multi-delete tables are also cleaned up before re-execution.
|
||||
#
|
||||
--disable_warnings
|
||||
drop table if exists t1;
|
||||
create temporary table if not exists t1 (a1 int);
|
||||
--enable_warnings
|
||||
# exact delete syntax is essential
|
||||
prepare stmt from "delete t1 from t1 where (cast(a1/3 as unsigned) * 3) = a1";
|
||||
drop temporary table t1;
|
||||
create temporary table if not exists t1 (a1 int);
|
||||
# the server crashed on the next statement without the fix
|
||||
execute stmt;
|
||||
drop temporary table t1;
|
||||
create temporary table if not exists t1 (a1 int);
|
||||
# the problem was in memory corruption: repeat the test just in case
|
||||
execute stmt;
|
||||
drop temporary table t1;
|
||||
create temporary table if not exists t1 (a1 int);
|
||||
execute stmt;
|
||||
drop temporary table t1;
|
||||
deallocate prepare stmt;
|
||||
|
||||
# End of 4.1 tests
|
||||
|
@ -232,6 +232,7 @@
|
||||
#define ZSCAN_MARKERS 18
|
||||
#define ZOPERATION_EVENT_REP 19
|
||||
#define ZPREP_DROP_TABLE 20
|
||||
#define ZENABLE_EXPAND_CHECK 21
|
||||
|
||||
/* ------------------------------------------------------------------------- */
|
||||
/* NODE STATE DURING SYSTEM RESTART, VARIABLES CNODES_SR_STATE */
|
||||
|
@ -434,6 +434,33 @@ void Dblqh::execCONTINUEB(Signal* signal)
|
||||
checkDropTab(signal);
|
||||
return;
|
||||
break;
|
||||
case ZENABLE_EXPAND_CHECK:
|
||||
{
|
||||
jam();
|
||||
fragptr.i = signal->theData[1];
|
||||
if (fragptr.i != RNIL)
|
||||
{
|
||||
jam();
|
||||
ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
|
||||
signal->theData[0] = fragptr.p->tabRef;
|
||||
signal->theData[1] = fragptr.p->fragId;
|
||||
sendSignal(DBACC_REF, GSN_EXPANDCHECK2, signal, 2, JBB);
|
||||
|
||||
signal->theData[0] = ZENABLE_EXPAND_CHECK;
|
||||
signal->theData[1] = fragptr.p->nextFrag;
|
||||
sendSignal(DBLQH_REF, GSN_CONTINUEB, signal, 2, JBB);
|
||||
return;
|
||||
}
|
||||
else
|
||||
{
|
||||
jam();
|
||||
StartRecConf * conf = (StartRecConf*)signal->getDataPtrSend();
|
||||
conf->startingNodeId = getOwnNodeId();
|
||||
sendSignal(cmasterDihBlockref, GSN_START_RECCONF, signal,
|
||||
StartRecConf::SignalLength, JBB);
|
||||
return;
|
||||
}
|
||||
}
|
||||
default:
|
||||
ndbrequire(false);
|
||||
break;
|
||||
@ -15503,20 +15530,21 @@ void Dblqh::srFourthComp(Signal* signal)
|
||||
} else if ((cstartType == NodeState::ST_NODE_RESTART) ||
|
||||
(cstartType == NodeState::ST_SYSTEM_RESTART)) {
|
||||
jam();
|
||||
StartRecConf * conf = (StartRecConf*)signal->getDataPtrSend();
|
||||
conf->startingNodeId = getOwnNodeId();
|
||||
sendSignal(cmasterDihBlockref, GSN_START_RECCONF, signal,
|
||||
StartRecConf::SignalLength, JBB);
|
||||
|
||||
if(cstartType == NodeState::ST_SYSTEM_RESTART){
|
||||
fragptr.i = c_redo_log_complete_frags;
|
||||
while(fragptr.i != RNIL){
|
||||
ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
|
||||
signal->theData[0] = fragptr.p->tabRef;
|
||||
signal->theData[1] = fragptr.p->fragId;
|
||||
sendSignal(DBACC_REF, GSN_EXPANDCHECK2, signal, 2, JBB);
|
||||
fragptr.i = fragptr.p->nextFrag;
|
||||
}
|
||||
if(cstartType == NodeState::ST_SYSTEM_RESTART)
|
||||
{
|
||||
jam();
|
||||
signal->theData[0] = ZENABLE_EXPAND_CHECK;
|
||||
signal->theData[1] = c_redo_log_complete_frags;
|
||||
sendSignal(DBLQH_REF, GSN_CONTINUEB, signal, 2, JBB);
|
||||
}
|
||||
else
|
||||
{
|
||||
jam();
|
||||
StartRecConf * conf = (StartRecConf*)signal->getDataPtrSend();
|
||||
conf->startingNodeId = getOwnNodeId();
|
||||
sendSignal(cmasterDihBlockref, GSN_START_RECCONF, signal,
|
||||
StartRecConf::SignalLength, JBB);
|
||||
}
|
||||
} else {
|
||||
ndbrequire(false);
|
||||
|
@ -6978,6 +6978,18 @@ void Dbtc::checkScanActiveInFailedLqh(Signal* signal,
|
||||
found = true;
|
||||
}
|
||||
}
|
||||
|
||||
ScanFragList deliv(c_scan_frag_pool, scanptr.p->m_delivered_scan_frags);
|
||||
for(deliv.first(ptr); !ptr.isNull(); deliv.next(ptr))
|
||||
{
|
||||
jam();
|
||||
if (refToNode(ptr.p->lqhBlockref) == failedNodeId)
|
||||
{
|
||||
jam();
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
if(found){
|
||||
jam();
|
||||
|
@ -26,12 +26,12 @@ public:
|
||||
void init() { m_confs.clear(); m_nRefs = 0; }
|
||||
|
||||
template<typename SignalClass>
|
||||
void init(SafeCounterManager& mgr,
|
||||
bool init(SafeCounterManager& mgr,
|
||||
NodeReceiverGroup rg, Uint16 GSN, Uint32 senderData)
|
||||
{
|
||||
init();
|
||||
SafeCounter tmp(mgr, m_sc);
|
||||
tmp.init<SignalClass>(rg, GSN, senderData);
|
||||
return tmp.init<SignalClass>(rg, GSN, senderData);
|
||||
}
|
||||
|
||||
bool ignoreRef(SafeCounterManager& mgr, Uint32 nodeId)
|
||||
|
@ -230,10 +230,13 @@ inline
|
||||
bool
|
||||
SafeCounter::init(NodeReceiverGroup rg, Uint16 GSN, Uint32 senderData){
|
||||
|
||||
bool b = init<Ref>(rg.m_block, GSN, senderData);
|
||||
m_nodes = rg.m_nodes;
|
||||
m_count = m_nodes.count();
|
||||
return b;
|
||||
if (init<Ref>(rg.m_block, GSN, senderData))
|
||||
{
|
||||
m_nodes = rg.m_nodes;
|
||||
m_count = m_nodes.count();
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
template<typename Ref>
|
||||
@ -241,10 +244,13 @@ inline
|
||||
bool
|
||||
SafeCounter::init(NodeReceiverGroup rg, Uint32 senderData){
|
||||
|
||||
bool b = init<Ref>(rg.m_block, Ref::GSN, senderData);
|
||||
m_nodes = rg.m_nodes;
|
||||
m_count = m_nodes.count();
|
||||
return b;
|
||||
if (init<Ref>(rg.m_block, Ref::GSN, senderData))
|
||||
{
|
||||
m_nodes = rg.m_nodes;
|
||||
m_count = m_nodes.count();
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
inline
|
||||
|
@ -30,6 +30,7 @@ extern my_bool opt_core;
|
||||
#define MAX_LINE_LENGTH 255
|
||||
#define KEY_INTERNAL 0
|
||||
#define MAX_INT_RNIL 0xfffffeff
|
||||
#define MAX_PORT_NO 65535
|
||||
|
||||
#define _STR_VALUE(x) #x
|
||||
#define STR_VALUE(x) _STR_VALUE(x)
|
||||
@ -426,7 +427,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
|
||||
ConfigInfo::CI_INT,
|
||||
UNDEFINED,
|
||||
"1",
|
||||
STR_VALUE(MAX_INT_RNIL) },
|
||||
STR_VALUE(MAX_PORT_NO) },
|
||||
|
||||
{
|
||||
CFG_DB_NO_REPLICAS,
|
||||
@ -1430,7 +1431,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
|
||||
ConfigInfo::CI_INT,
|
||||
NDB_PORT,
|
||||
"0",
|
||||
STR_VALUE(MAX_INT_RNIL) },
|
||||
STR_VALUE(MAX_PORT_NO) },
|
||||
|
||||
{
|
||||
KEY_INTERNAL,
|
||||
@ -1442,7 +1443,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
|
||||
ConfigInfo::CI_INT,
|
||||
UNDEFINED,
|
||||
"0",
|
||||
STR_VALUE(MAX_INT_RNIL) },
|
||||
STR_VALUE(MAX_PORT_NO) },
|
||||
|
||||
{
|
||||
CFG_NODE_ARBIT_RANK,
|
||||
@ -1573,7 +1574,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
|
||||
ConfigInfo::CI_INT,
|
||||
MANDATORY,
|
||||
"0",
|
||||
STR_VALUE(MAX_INT_RNIL) },
|
||||
STR_VALUE(MAX_PORT_NO) },
|
||||
|
||||
{
|
||||
CFG_TCP_SEND_BUFFER_SIZE,
|
||||
@ -1679,7 +1680,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
|
||||
ConfigInfo::CI_INT,
|
||||
MANDATORY,
|
||||
"0",
|
||||
STR_VALUE(MAX_INT_RNIL) },
|
||||
STR_VALUE(MAX_PORT_NO) },
|
||||
|
||||
{
|
||||
CFG_SHM_SIGNUM,
|
||||
@ -1879,7 +1880,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
|
||||
ConfigInfo::CI_INT,
|
||||
MANDATORY,
|
||||
"0",
|
||||
STR_VALUE(MAX_INT_RNIL) },
|
||||
STR_VALUE(MAX_PORT_NO) },
|
||||
|
||||
{
|
||||
CFG_SCI_HOST1_ID_0,
|
||||
|
@ -278,12 +278,15 @@ GlobalDictCache::invalidate_all()
|
||||
if (vers->size())
|
||||
{
|
||||
TableVersion * ver = & vers->back();
|
||||
ver->m_impl->m_status = NdbDictionary::Object::Invalid;
|
||||
ver->m_status = DROPPED;
|
||||
if (ver->m_refCount == 0)
|
||||
if (ver->m_status != RETREIVING)
|
||||
{
|
||||
delete ver->m_impl;
|
||||
vers->erase(vers->size() - 1);
|
||||
ver->m_impl->m_status = NdbDictionary::Object::Invalid;
|
||||
ver->m_status = DROPPED;
|
||||
if (ver->m_refCount == 0)
|
||||
{
|
||||
delete ver->m_impl;
|
||||
vers->erase(vers->size() - 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
curr = m_tableHash.getNext(curr);
|
||||
|
@ -294,6 +294,7 @@ int runRestarts(NDBT_Context* ctx, NDBT_Step* step){
|
||||
}
|
||||
i++;
|
||||
}
|
||||
ctx->stopTest();
|
||||
return result;
|
||||
}
|
||||
|
||||
|
@ -146,7 +146,9 @@ my_bool acl_init(bool dont_read_acl_tables)
|
||||
|
||||
acl_cache= new hash_filo(ACL_CACHE_SIZE, 0, 0,
|
||||
(hash_get_key) acl_entry_get_key,
|
||||
(hash_free_key) free, system_charset_info);
|
||||
(hash_free_key) free,
|
||||
lower_case_file_system ?
|
||||
system_charset_info : &my_charset_bin);
|
||||
if (dont_read_acl_tables)
|
||||
{
|
||||
DBUG_RETURN(0); /* purecov: tested */
|
||||
|
@ -822,7 +822,7 @@ bool select_send::send_data(List<Item> &items)
|
||||
Protocol *protocol= thd->protocol;
|
||||
char buff[MAX_FIELD_WIDTH];
|
||||
String buffer(buff, sizeof(buff), &my_charset_bin);
|
||||
DBUG_ENTER("send_data");
|
||||
DBUG_ENTER("select_send::send_data");
|
||||
|
||||
protocol->prepare_for_resend();
|
||||
Item *item;
|
||||
@ -1033,7 +1033,7 @@ select_export::prepare(List<Item> &list, SELECT_LEX_UNIT *u)
|
||||
bool select_export::send_data(List<Item> &items)
|
||||
{
|
||||
|
||||
DBUG_ENTER("send_data");
|
||||
DBUG_ENTER("select_export::send_data");
|
||||
char buff[MAX_FIELD_WIDTH],null_buff[2],space[MAX_FIELD_WIDTH];
|
||||
bool space_inited=0;
|
||||
String tmp(buff,sizeof(buff),&my_charset_bin),*res;
|
||||
@ -1190,7 +1190,7 @@ bool select_dump::send_data(List<Item> &items)
|
||||
String tmp(buff,sizeof(buff),&my_charset_bin),*res;
|
||||
tmp.length(0);
|
||||
Item *item;
|
||||
DBUG_ENTER("send_data");
|
||||
DBUG_ENTER("select_dump::send_data");
|
||||
|
||||
if (unit->offset_limit_cnt)
|
||||
{ // using limit offset,count
|
||||
|
@ -125,6 +125,7 @@ void lex_start(THD *thd, uchar *buf,uint length)
|
||||
lex->value_list.empty();
|
||||
lex->update_list.empty();
|
||||
lex->param_list.empty();
|
||||
lex->auxilliary_table_list.empty();
|
||||
lex->unit.next= lex->unit.master=
|
||||
lex->unit.link_next= lex->unit.return_to= 0;
|
||||
lex->unit.prev= lex->unit.link_prev= 0;
|
||||
|
@ -1727,14 +1727,9 @@ static void reset_stmt_for_execute(Prepared_statement *stmt)
|
||||
tables;
|
||||
tables= tables->next)
|
||||
{
|
||||
/*
|
||||
Reset old pointers to TABLEs: they are not valid since the tables
|
||||
were closed in the end of previous prepare or execute call.
|
||||
*/
|
||||
tables->table= 0;
|
||||
tables->table_list= 0;
|
||||
tables->reinit_before_use(thd);
|
||||
}
|
||||
|
||||
|
||||
{
|
||||
SELECT_LEX_UNIT *unit= sl->master_unit();
|
||||
unit->unclean();
|
||||
@ -1743,6 +1738,17 @@ static void reset_stmt_for_execute(Prepared_statement *stmt)
|
||||
unit->reinit_exec_mechanism();
|
||||
}
|
||||
}
|
||||
/*
|
||||
Cleanup of the special case of DELETE t1, t2 FROM t1, t2, t3 ...
|
||||
(multi-delete). We do a full clean up, although at the moment all we
|
||||
need to clean in the tables of MULTI-DELETE list is 'table' member.
|
||||
*/
|
||||
for (TABLE_LIST *tables= (TABLE_LIST*) lex->auxilliary_table_list.first;
|
||||
tables;
|
||||
tables= tables->next)
|
||||
{
|
||||
tables->reinit_before_use(thd);
|
||||
}
|
||||
lex->current_select= &lex->select_lex;
|
||||
if (lex->result)
|
||||
lex->result->cleanup();
|
||||
|
17
sql/table.cc
17
sql/table.cc
@ -1544,6 +1544,23 @@ db_type get_table_type(const char *name)
|
||||
DBUG_RETURN(ha_checktype((enum db_type) (uint) *(head+3)));
|
||||
}
|
||||
|
||||
/*
|
||||
Cleanup this table for re-execution.
|
||||
|
||||
SYNOPSIS
|
||||
st_table_list::reinit_before_use()
|
||||
*/
|
||||
|
||||
void st_table_list::reinit_before_use(THD * /* thd */)
|
||||
{
|
||||
/*
|
||||
Reset old pointers to TABLEs: they are not valid since the tables
|
||||
were closed in the end of previous prepare or execute call.
|
||||
*/
|
||||
table= 0;
|
||||
table_list= 0;
|
||||
}
|
||||
|
||||
|
||||
/*****************************************************************************
|
||||
** Instansiate templates
|
||||
|
@ -238,6 +238,11 @@ typedef struct st_table_list
|
||||
bool cacheable_table; /* stop PS caching */
|
||||
/* used in multi-upd privelege check */
|
||||
bool table_in_update_from_clause;
|
||||
/*
|
||||
Cleanup for re-execution in a prepared statement or a stored
|
||||
procedure.
|
||||
*/
|
||||
void reinit_before_use(THD *thd);
|
||||
} TABLE_LIST;
|
||||
|
||||
typedef struct st_changed_table_list
|
||||
|
Loading…
x
Reference in New Issue
Block a user