=== modified file 'innobase/dict/dict0dict.c'
--- innobase/dict/dict0dict.c	2009-05-19 06:32:21 +0000
+++ innobase/dict/dict0dict.c	2009-12-25 15:01:15 +0000
@@ -754,12 +754,6 @@
 
 	mutex_exit(&(dict_sys->mutex));
 
-	if (table != NULL) {
-	        if (!table->stat_initialized) {
-			dict_update_statistics(table);
-		}
-	}
-	
 	return(table);
 }
 
@@ -789,12 +783,6 @@
 
 	mutex_exit(&(dict_sys->mutex));
 
-	if (table != NULL) {
-	        if (!table->stat_initialized && !table->ibd_file_missing) {
-			dict_update_statistics(table);
-		}
-	}
-	
 	return(table);
 }
 

=== modified file 'innobase/dict/dict0load.c'
--- innobase/dict/dict0load.c	2006-04-26 03:50:29 +0000
+++ innobase/dict/dict0load.c	2009-12-25 10:48:37 +0000
@@ -190,7 +190,7 @@
 			/* The table definition was corrupt if there
 			is no index */
 
-			if (dict_table_get_first_index(table)) {
+			if (!table->stat_initialized || dict_table_get_first_index(table)) {
 				dict_update_statistics_low(table, TRUE);
 			}
 

=== modified file 'innobase/row/row0mysql.c'
--- innobase/row/row0mysql.c	2009-06-25 09:50:26 +0000
+++ innobase/row/row0mysql.c	2009-12-25 10:49:12 +0000
@@ -886,7 +886,7 @@
 	We calculate statistics at most every 16th round, since we may have
 	a counter table which is very small and updated very often. */
 
-	if (counter > 2000000000
+	if (!table->stat_initialized || counter > 2000000000
 	    || ((ib_longlong)counter > 16 + table->stat_n_rows / 16)) {
 
 		dict_update_statistics(table);

=== modified file 'sql/ha_innodb.cc'
--- sql/ha_innodb.cc	2009-12-01 10:24:44 +0000
+++ sql/ha_innodb.cc	2009-12-25 15:09:00 +0000
@@ -2307,7 +2307,7 @@
 	/* Init table lock structure */
 	thr_lock_data_init(&share->lock,&lock,(void*) 0);
 
-  	info(HA_STATUS_NO_LOCK | HA_STATUS_VARIABLE | HA_STATUS_CONST);
+  	// info(HA_STATUS_NO_LOCK | HA_STATUS_VARIABLE | HA_STATUS_CONST);
 
   	DBUG_RETURN(0);
 }
@@ -3516,6 +3516,9 @@
 	int		error = 0;
 
 	DBUG_ENTER("ha_innobase::update_row");
+	
+	if (!prebuilt->table->stat_initialized)
+		dict_update_statistics(prebuilt->table);
 
 	ut_ad(prebuilt->trx ==
                 (trx_t*) current_thd->ha_data[innobase_hton.slot]);
@@ -3598,6 +3601,9 @@
 	int		error = 0;
 
 	DBUG_ENTER("ha_innobase::delete_row");
+	
+	if (!prebuilt->table->stat_initialized)
+		dict_update_statistics(prebuilt->table);
 
 	ut_ad(prebuilt->trx ==
                 (trx_t*) current_thd->ha_data[innobase_hton.slot]);
@@ -3829,6 +3835,9 @@
 
   	DBUG_ENTER("index_read");
 
+	if (!prebuilt->table->stat_initialized)
+		dict_update_statistics(prebuilt->table);
+
 	ut_ad(prebuilt->trx ==
                 (trx_t*) current_thd->ha_data[innobase_hton.slot]);
 
@@ -4192,6 +4201,9 @@
 
 	/* Store the active index value so that we can restore the original
 	value after a scan */
+	
+	if (!prebuilt->table->stat_initialized)
+		dict_update_statistics(prebuilt->table);
 
 	if (prebuilt->clust_index_was_generated) {
 		err = change_active_index(MAX_KEY);
@@ -4268,6 +4280,9 @@
 
 	statistic_increment(current_thd->status_var.ha_read_rnd_count,
 			    &LOCK_status);
+			
+	if (!prebuilt->table->stat_initialized)
+		dict_update_statistics(prebuilt->table);
 
 	ut_ad(prebuilt->trx ==
                 (trx_t*) current_thd->ha_data[innobase_hton.slot]);
@@ -4319,6 +4334,9 @@
 	row_prebuilt_t*	prebuilt = (row_prebuilt_t*) innobase_prebuilt;
 	uint		len;
 
+	if (!prebuilt->table->stat_initialized)
+		dict_update_statistics(prebuilt->table);
+
 	ut_ad(prebuilt->trx ==
                 (trx_t*) current_thd->ha_data[innobase_hton.slot]);
 
@@ -5188,6 +5206,9 @@
 
 	key = table->key_info + active_index;
 
+	if (!prebuilt->table->stat_initialized)
+		dict_update_statistics(prebuilt->table);
+
 	index = dict_table_get_index_noninline(prebuilt->table, key->name);
 
 	range_start = dtuple_create_for_mysql(&heap1, key->key_parts);
@@ -5278,6 +5299,9 @@
 
 	trx_search_latch_release_if_reserved(prebuilt->trx);
 
+	if (!prebuilt->table->stat_initialized)
+		dict_update_statistics(prebuilt->table);
+		
 	index = dict_table_get_first_index_noninline(prebuilt->table);
 
 	local_data_file_length = ((ulonglong) index->stat_n_leaf_pages)
@@ -5312,7 +5336,10 @@
 	searches, we pretend that a sequential read takes the same time
 	as a random disk read, that is, we do not divide the following
 	by 10, which would be physically realistic. */
-
+	
+	if (!prebuilt->table->stat_initialized)
+		dict_update_statistics(prebuilt->table);
+		
 	return((double) (prebuilt->table->stat_clustered_index_size));
 }
 
@@ -5328,9 +5355,13 @@
 	uint	ranges,	/* in: how many ranges */
 	ha_rows rows)	/* in: estimated number of rows in the ranges */
 {
+	row_prebuilt_t* prebuilt	= (row_prebuilt_t*) innobase_prebuilt;
 	ha_rows total_rows;
 	double  time_for_scan;
 
+	if (!prebuilt->table->stat_initialized)
+		dict_update_statistics(prebuilt->table);
+
 	if (index != table->s->primary_key) {
 		/* Not clustered */
 	  	return(handler::read_time(index, ranges, rows));
@@ -5402,7 +5433,7 @@
 
 	trx_search_latch_release_if_reserved(prebuilt->trx);
 
- 	ib_table = prebuilt->table;
+	ib_table = prebuilt->table;
 
  	if (flag & HA_STATUS_TIME) {
  		/* In sql_show we call with this flag: update then statistics
@@ -5428,6 +5459,9 @@
 		}
  	}
 
+	if (!ib_table->stat_initialized)
+		dict_update_statistics(ib_table);
+
 	if (flag & HA_STATUS_VARIABLE) {
 		n_rows = ib_table->stat_n_rows;
 

flu:5.0-dict midom$