From f3a286cc5466f04c4c72a81f15f80c2f03a175a3 Mon Sep 17 00:00:00 2001 From: unknown Date: Wed, 14 Jun 2006 20:22:03 +0200 Subject: [PATCH] Bug #19493 NDB does not ignore duplicate keys when using LOAD DATA LOCAL - correction of backport error --- sql/ha_ndbcluster.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index 5be09f697a0..e3dca77ba2f 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -3001,7 +3001,7 @@ void ha_ndbcluster::start_bulk_insert(ha_rows rows) DBUG_PRINT("enter", ("rows: %d", (int)rows)); m_rows_inserted= (ha_rows) 0; - if (!m_use_write && m_ignore_dup_key) + if (m_ignore_dup_key && table->primary_key != MAX_KEY) { /* compare if expression with that in write_row