1// Copyright 2013 The Go Authors. All rights reserved. 2// Use of this source code is governed by a BSD-style 3// license that can be found in the LICENSE file. 4 5package runtime_test 6 7import ( 8 "fmt" 9 "internal/abi" 10 "internal/goarch" 11 "internal/testenv" 12 "math" 13 "os" 14 "reflect" 15 "runtime" 16 "slices" 17 "strconv" 18 "strings" 19 "sync" 20 "testing" 21 "unsafe" 22) 23 24func TestHmapSize(t *testing.T) { 25 // The structure of hmap is defined in runtime/map.go 26 // and in cmd/compile/internal/gc/reflect.go and must be in sync. 27 // The size of hmap should be 48 bytes on 64 bit and 28 bytes on 32 bit platforms. 28 var hmapSize = uintptr(8 + 5*goarch.PtrSize) 29 if runtime.RuntimeHmapSize != hmapSize { 30 t.Errorf("sizeof(runtime.hmap{})==%d, want %d", runtime.RuntimeHmapSize, hmapSize) 31 } 32 33} 34 35// negative zero is a good test because: 36// 1. 0 and -0 are equal, yet have distinct representations. 37// 2. 0 is represented as all zeros, -0 isn't. 38// 39// I'm not sure the language spec actually requires this behavior, 40// but it's what the current map implementation does. 41func TestNegativeZero(t *testing.T) { 42 m := make(map[float64]bool, 0) 43 44 m[+0.0] = true 45 m[math.Copysign(0.0, -1.0)] = true // should overwrite +0 entry 46 47 if len(m) != 1 { 48 t.Error("length wrong") 49 } 50 51 for k := range m { 52 if math.Copysign(1.0, k) > 0 { 53 t.Error("wrong sign") 54 } 55 } 56 57 m = make(map[float64]bool, 0) 58 m[math.Copysign(0.0, -1.0)] = true 59 m[+0.0] = true // should overwrite -0.0 entry 60 61 if len(m) != 1 { 62 t.Error("length wrong") 63 } 64 65 for k := range m { 66 if math.Copysign(1.0, k) < 0 { 67 t.Error("wrong sign") 68 } 69 } 70} 71 72func testMapNan(t *testing.T, m map[float64]int) { 73 if len(m) != 3 { 74 t.Error("length wrong") 75 } 76 s := 0 77 for k, v := range m { 78 if k == k { 79 t.Error("nan disappeared") 80 } 81 if (v & (v - 1)) != 0 { 82 t.Error("value wrong") 83 } 84 s |= v 85 } 86 if s != 7 { 87 t.Error("values wrong") 88 } 89} 90 91// nan is a good test because nan != nan, and nan has 92// a randomized hash value. 93func TestMapAssignmentNan(t *testing.T) { 94 m := make(map[float64]int, 0) 95 nan := math.NaN() 96 97 // Test assignment. 98 m[nan] = 1 99 m[nan] = 2 100 m[nan] = 4 101 testMapNan(t, m) 102} 103 104// nan is a good test because nan != nan, and nan has 105// a randomized hash value. 106func TestMapOperatorAssignmentNan(t *testing.T) { 107 m := make(map[float64]int, 0) 108 nan := math.NaN() 109 110 // Test assignment operations. 111 m[nan] += 1 112 m[nan] += 2 113 m[nan] += 4 114 testMapNan(t, m) 115} 116 117func TestMapOperatorAssignment(t *testing.T) { 118 m := make(map[int]int, 0) 119 120 // "m[k] op= x" is rewritten into "m[k] = m[k] op x" 121 // differently when op is / or % than when it isn't. 122 // Simple test to make sure they all work as expected. 123 m[0] = 12345 124 m[0] += 67890 125 m[0] /= 123 126 m[0] %= 456 127 128 const want = (12345 + 67890) / 123 % 456 129 if got := m[0]; got != want { 130 t.Errorf("got %d, want %d", got, want) 131 } 132} 133 134var sinkAppend bool 135 136func TestMapAppendAssignment(t *testing.T) { 137 m := make(map[int][]int, 0) 138 139 m[0] = nil 140 m[0] = append(m[0], 12345) 141 m[0] = append(m[0], 67890) 142 sinkAppend, m[0] = !sinkAppend, append(m[0], 123, 456) 143 a := []int{7, 8, 9, 0} 144 m[0] = append(m[0], a...) 145 146 want := []int{12345, 67890, 123, 456, 7, 8, 9, 0} 147 if got := m[0]; !reflect.DeepEqual(got, want) { 148 t.Errorf("got %v, want %v", got, want) 149 } 150} 151 152// Maps aren't actually copied on assignment. 153func TestAlias(t *testing.T) { 154 m := make(map[int]int, 0) 155 m[0] = 5 156 n := m 157 n[0] = 6 158 if m[0] != 6 { 159 t.Error("alias didn't work") 160 } 161} 162 163func TestGrowWithNaN(t *testing.T) { 164 m := make(map[float64]int, 4) 165 nan := math.NaN() 166 167 // Use both assignment and assignment operations as they may 168 // behave differently. 169 m[nan] = 1 170 m[nan] = 2 171 m[nan] += 4 172 173 cnt := 0 174 s := 0 175 growflag := true 176 for k, v := range m { 177 if growflag { 178 // force a hashtable resize 179 for i := 0; i < 50; i++ { 180 m[float64(i)] = i 181 } 182 for i := 50; i < 100; i++ { 183 m[float64(i)] += i 184 } 185 growflag = false 186 } 187 if k != k { 188 cnt++ 189 s |= v 190 } 191 } 192 if cnt != 3 { 193 t.Error("NaN keys lost during grow") 194 } 195 if s != 7 { 196 t.Error("NaN values lost during grow") 197 } 198} 199 200type FloatInt struct { 201 x float64 202 y int 203} 204 205func TestGrowWithNegativeZero(t *testing.T) { 206 negzero := math.Copysign(0.0, -1.0) 207 m := make(map[FloatInt]int, 4) 208 m[FloatInt{0.0, 0}] = 1 209 m[FloatInt{0.0, 1}] += 2 210 m[FloatInt{0.0, 2}] += 4 211 m[FloatInt{0.0, 3}] = 8 212 growflag := true 213 s := 0 214 cnt := 0 215 negcnt := 0 216 // The first iteration should return the +0 key. 217 // The subsequent iterations should return the -0 key. 218 // I'm not really sure this is required by the spec, 219 // but it makes sense. 220 // TODO: are we allowed to get the first entry returned again??? 221 for k, v := range m { 222 if v == 0 { 223 continue 224 } // ignore entries added to grow table 225 cnt++ 226 if math.Copysign(1.0, k.x) < 0 { 227 if v&16 == 0 { 228 t.Error("key/value not updated together 1") 229 } 230 negcnt++ 231 s |= v & 15 232 } else { 233 if v&16 == 16 { 234 t.Error("key/value not updated together 2", k, v) 235 } 236 s |= v 237 } 238 if growflag { 239 // force a hashtable resize 240 for i := 0; i < 100; i++ { 241 m[FloatInt{3.0, i}] = 0 242 } 243 // then change all the entries 244 // to negative zero 245 m[FloatInt{negzero, 0}] = 1 | 16 246 m[FloatInt{negzero, 1}] = 2 | 16 247 m[FloatInt{negzero, 2}] = 4 | 16 248 m[FloatInt{negzero, 3}] = 8 | 16 249 growflag = false 250 } 251 } 252 if s != 15 { 253 t.Error("entry missing", s) 254 } 255 if cnt != 4 { 256 t.Error("wrong number of entries returned by iterator", cnt) 257 } 258 if negcnt != 3 { 259 t.Error("update to negzero missed by iteration", negcnt) 260 } 261} 262 263func TestIterGrowAndDelete(t *testing.T) { 264 m := make(map[int]int, 4) 265 for i := 0; i < 100; i++ { 266 m[i] = i 267 } 268 growflag := true 269 for k := range m { 270 if growflag { 271 // grow the table 272 for i := 100; i < 1000; i++ { 273 m[i] = i 274 } 275 // delete all odd keys 276 for i := 1; i < 1000; i += 2 { 277 delete(m, i) 278 } 279 growflag = false 280 } else { 281 if k&1 == 1 { 282 t.Error("odd value returned") 283 } 284 } 285 } 286} 287 288// make sure old bucket arrays don't get GCd while 289// an iterator is still using them. 290func TestIterGrowWithGC(t *testing.T) { 291 m := make(map[int]int, 4) 292 for i := 0; i < 8; i++ { 293 m[i] = i 294 } 295 for i := 8; i < 16; i++ { 296 m[i] += i 297 } 298 growflag := true 299 bitmask := 0 300 for k := range m { 301 if k < 16 { 302 bitmask |= 1 << uint(k) 303 } 304 if growflag { 305 // grow the table 306 for i := 100; i < 1000; i++ { 307 m[i] = i 308 } 309 // trigger a gc 310 runtime.GC() 311 growflag = false 312 } 313 } 314 if bitmask != 1<<16-1 { 315 t.Error("missing key", bitmask) 316 } 317} 318 319func testConcurrentReadsAfterGrowth(t *testing.T, useReflect bool) { 320 t.Parallel() 321 if runtime.GOMAXPROCS(-1) == 1 { 322 defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(16)) 323 } 324 numLoop := 10 325 numGrowStep := 250 326 numReader := 16 327 if testing.Short() { 328 numLoop, numGrowStep = 2, 100 329 } 330 for i := 0; i < numLoop; i++ { 331 m := make(map[int]int, 0) 332 for gs := 0; gs < numGrowStep; gs++ { 333 m[gs] = gs 334 var wg sync.WaitGroup 335 wg.Add(numReader * 2) 336 for nr := 0; nr < numReader; nr++ { 337 go func() { 338 defer wg.Done() 339 for range m { 340 } 341 }() 342 go func() { 343 defer wg.Done() 344 for key := 0; key < gs; key++ { 345 _ = m[key] 346 } 347 }() 348 if useReflect { 349 wg.Add(1) 350 go func() { 351 defer wg.Done() 352 mv := reflect.ValueOf(m) 353 keys := mv.MapKeys() 354 for _, k := range keys { 355 mv.MapIndex(k) 356 } 357 }() 358 } 359 } 360 wg.Wait() 361 } 362 } 363} 364 365func TestConcurrentReadsAfterGrowth(t *testing.T) { 366 testConcurrentReadsAfterGrowth(t, false) 367} 368 369func TestConcurrentReadsAfterGrowthReflect(t *testing.T) { 370 testConcurrentReadsAfterGrowth(t, true) 371} 372 373func TestBigItems(t *testing.T) { 374 var key [256]string 375 for i := 0; i < 256; i++ { 376 key[i] = "foo" 377 } 378 m := make(map[[256]string][256]string, 4) 379 for i := 0; i < 100; i++ { 380 key[37] = fmt.Sprintf("string%02d", i) 381 m[key] = key 382 } 383 var keys [100]string 384 var values [100]string 385 i := 0 386 for k, v := range m { 387 keys[i] = k[37] 388 values[i] = v[37] 389 i++ 390 } 391 slices.Sort(keys[:]) 392 slices.Sort(values[:]) 393 for i := 0; i < 100; i++ { 394 if keys[i] != fmt.Sprintf("string%02d", i) { 395 t.Errorf("#%d: missing key: %v", i, keys[i]) 396 } 397 if values[i] != fmt.Sprintf("string%02d", i) { 398 t.Errorf("#%d: missing value: %v", i, values[i]) 399 } 400 } 401} 402 403func TestMapHugeZero(t *testing.T) { 404 type T [4000]byte 405 m := map[int]T{} 406 x := m[0] 407 if x != (T{}) { 408 t.Errorf("map value not zero") 409 } 410 y, ok := m[0] 411 if ok { 412 t.Errorf("map value should be missing") 413 } 414 if y != (T{}) { 415 t.Errorf("map value not zero") 416 } 417} 418 419type empty struct { 420} 421 422func TestEmptyKeyAndValue(t *testing.T) { 423 a := make(map[int]empty, 4) 424 b := make(map[empty]int, 4) 425 c := make(map[empty]empty, 4) 426 a[0] = empty{} 427 b[empty{}] = 0 428 b[empty{}] = 1 429 c[empty{}] = empty{} 430 431 if len(a) != 1 { 432 t.Errorf("empty value insert problem") 433 } 434 if b[empty{}] != 1 { 435 t.Errorf("empty key returned wrong value") 436 } 437} 438 439// Tests a map with a single bucket, with same-lengthed short keys 440// ("quick keys") as well as long keys. 441func TestSingleBucketMapStringKeys_DupLen(t *testing.T) { 442 testMapLookups(t, map[string]string{ 443 "x": "x1val", 444 "xx": "x2val", 445 "foo": "fooval", 446 "bar": "barval", // same key length as "foo" 447 "xxxx": "x4val", 448 strings.Repeat("x", 128): "longval1", 449 strings.Repeat("y", 128): "longval2", 450 }) 451} 452 453// Tests a map with a single bucket, with all keys having different lengths. 454func TestSingleBucketMapStringKeys_NoDupLen(t *testing.T) { 455 testMapLookups(t, map[string]string{ 456 "x": "x1val", 457 "xx": "x2val", 458 "foo": "fooval", 459 "xxxx": "x4val", 460 "xxxxx": "x5val", 461 "xxxxxx": "x6val", 462 strings.Repeat("x", 128): "longval", 463 }) 464} 465 466func testMapLookups(t *testing.T, m map[string]string) { 467 for k, v := range m { 468 if m[k] != v { 469 t.Fatalf("m[%q] = %q; want %q", k, m[k], v) 470 } 471 } 472} 473 474// Tests whether the iterator returns the right elements when 475// started in the middle of a grow, when the keys are NaNs. 476func TestMapNanGrowIterator(t *testing.T) { 477 m := make(map[float64]int) 478 nan := math.NaN() 479 const nBuckets = 16 480 // To fill nBuckets buckets takes LOAD * nBuckets keys. 481 nKeys := int(nBuckets * runtime.HashLoad) 482 483 // Get map to full point with nan keys. 484 for i := 0; i < nKeys; i++ { 485 m[nan] = i 486 } 487 // Trigger grow 488 m[1.0] = 1 489 delete(m, 1.0) 490 491 // Run iterator 492 found := make(map[int]struct{}) 493 for _, v := range m { 494 if v != -1 { 495 if _, repeat := found[v]; repeat { 496 t.Fatalf("repeat of value %d", v) 497 } 498 found[v] = struct{}{} 499 } 500 if len(found) == nKeys/2 { 501 // Halfway through iteration, finish grow. 502 for i := 0; i < nBuckets; i++ { 503 delete(m, 1.0) 504 } 505 } 506 } 507 if len(found) != nKeys { 508 t.Fatalf("missing value") 509 } 510} 511 512func TestMapIterOrder(t *testing.T) { 513 sizes := []int{3, 7, 9, 15} 514 if abi.MapBucketCountBits >= 5 { 515 // it gets flaky (often only one iteration order) at size 3 when abi.MapBucketCountBits >=5. 516 t.Fatalf("This test becomes flaky if abi.MapBucketCountBits(=%d) is 5 or larger", abi.MapBucketCountBits) 517 } 518 for _, n := range sizes { 519 for i := 0; i < 1000; i++ { 520 // Make m be {0: true, 1: true, ..., n-1: true}. 521 m := make(map[int]bool) 522 for i := 0; i < n; i++ { 523 m[i] = true 524 } 525 // Check that iterating over the map produces at least two different orderings. 526 ord := func() []int { 527 var s []int 528 for key := range m { 529 s = append(s, key) 530 } 531 return s 532 } 533 first := ord() 534 ok := false 535 for try := 0; try < 100; try++ { 536 if !reflect.DeepEqual(first, ord()) { 537 ok = true 538 break 539 } 540 } 541 if !ok { 542 t.Errorf("Map with n=%d elements had consistent iteration order: %v", n, first) 543 break 544 } 545 } 546 } 547} 548 549// Issue 8410 550func TestMapSparseIterOrder(t *testing.T) { 551 // Run several rounds to increase the probability 552 // of failure. One is not enough. 553NextRound: 554 for round := 0; round < 10; round++ { 555 m := make(map[int]bool) 556 // Add 1000 items, remove 980. 557 for i := 0; i < 1000; i++ { 558 m[i] = true 559 } 560 for i := 20; i < 1000; i++ { 561 delete(m, i) 562 } 563 564 var first []int 565 for i := range m { 566 first = append(first, i) 567 } 568 569 // 800 chances to get a different iteration order. 570 // See bug 8736 for why we need so many tries. 571 for n := 0; n < 800; n++ { 572 idx := 0 573 for i := range m { 574 if i != first[idx] { 575 // iteration order changed. 576 continue NextRound 577 } 578 idx++ 579 } 580 } 581 t.Fatalf("constant iteration order on round %d: %v", round, first) 582 } 583} 584 585func TestMapStringBytesLookup(t *testing.T) { 586 // Use large string keys to avoid small-allocation coalescing, 587 // which can cause AllocsPerRun to report lower counts than it should. 588 m := map[string]int{ 589 "1000000000000000000000000000000000000000000000000": 1, 590 "2000000000000000000000000000000000000000000000000": 2, 591 } 592 buf := []byte("1000000000000000000000000000000000000000000000000") 593 if x := m[string(buf)]; x != 1 { 594 t.Errorf(`m[string([]byte("1"))] = %d, want 1`, x) 595 } 596 buf[0] = '2' 597 if x := m[string(buf)]; x != 2 { 598 t.Errorf(`m[string([]byte("2"))] = %d, want 2`, x) 599 } 600 601 var x int 602 n := testing.AllocsPerRun(100, func() { 603 x += m[string(buf)] 604 }) 605 if n != 0 { 606 t.Errorf("AllocsPerRun for m[string(buf)] = %v, want 0", n) 607 } 608 609 x = 0 610 n = testing.AllocsPerRun(100, func() { 611 y, ok := m[string(buf)] 612 if !ok { 613 panic("!ok") 614 } 615 x += y 616 }) 617 if n != 0 { 618 t.Errorf("AllocsPerRun for x,ok = m[string(buf)] = %v, want 0", n) 619 } 620} 621 622func TestMapLargeKeyNoPointer(t *testing.T) { 623 const ( 624 I = 1000 625 N = 64 626 ) 627 type T [N]int 628 m := make(map[T]int) 629 for i := 0; i < I; i++ { 630 var v T 631 for j := 0; j < N; j++ { 632 v[j] = i + j 633 } 634 m[v] = i 635 } 636 runtime.GC() 637 for i := 0; i < I; i++ { 638 var v T 639 for j := 0; j < N; j++ { 640 v[j] = i + j 641 } 642 if m[v] != i { 643 t.Fatalf("corrupted map: want %+v, got %+v", i, m[v]) 644 } 645 } 646} 647 648func TestMapLargeValNoPointer(t *testing.T) { 649 const ( 650 I = 1000 651 N = 64 652 ) 653 type T [N]int 654 m := make(map[int]T) 655 for i := 0; i < I; i++ { 656 var v T 657 for j := 0; j < N; j++ { 658 v[j] = i + j 659 } 660 m[i] = v 661 } 662 runtime.GC() 663 for i := 0; i < I; i++ { 664 var v T 665 for j := 0; j < N; j++ { 666 v[j] = i + j 667 } 668 v1 := m[i] 669 for j := 0; j < N; j++ { 670 if v1[j] != v[j] { 671 t.Fatalf("corrupted map: want %+v, got %+v", v, v1) 672 } 673 } 674 } 675} 676 677// Test that making a map with a large or invalid hint 678// doesn't panic. (Issue 19926). 679func TestIgnoreBogusMapHint(t *testing.T) { 680 for _, hint := range []int64{-1, 1 << 62} { 681 _ = make(map[int]int, hint) 682 } 683} 684 685const bs = abi.MapBucketCount 686 687// belowOverflow should be a pretty-full pair of buckets; 688// atOverflow is 1/8 bs larger = 13/8 buckets or two buckets 689// that are 13/16 full each, which is the overflow boundary. 690// Adding one to that should ensure overflow to the next higher size. 691const ( 692 belowOverflow = bs * 3 / 2 // 1.5 bs = 2 buckets @ 75% 693 atOverflow = belowOverflow + bs/8 // 2 buckets at 13/16 fill. 694) 695 696var mapBucketTests = [...]struct { 697 n int // n is the number of map elements 698 noescape int // number of expected buckets for non-escaping map 699 escape int // number of expected buckets for escaping map 700}{ 701 {-(1 << 30), 1, 1}, 702 {-1, 1, 1}, 703 {0, 1, 1}, 704 {1, 1, 1}, 705 {bs, 1, 1}, 706 {bs + 1, 2, 2}, 707 {belowOverflow, 2, 2}, // 1.5 bs = 2 buckets @ 75% 708 {atOverflow + 1, 4, 4}, // 13/8 bs + 1 == overflow to 4 709 710 {2 * belowOverflow, 4, 4}, // 3 bs = 4 buckets @75% 711 {2*atOverflow + 1, 8, 8}, // 13/4 bs + 1 = overflow to 8 712 713 {4 * belowOverflow, 8, 8}, // 6 bs = 8 buckets @ 75% 714 {4*atOverflow + 1, 16, 16}, // 13/2 bs + 1 = overflow to 16 715} 716 717func TestMapBuckets(t *testing.T) { 718 // Test that maps of different sizes have the right number of buckets. 719 // Non-escaping maps with small buckets (like map[int]int) never 720 // have a nil bucket pointer due to starting with preallocated buckets 721 // on the stack. Escaping maps start with a non-nil bucket pointer if 722 // hint size is above bucketCnt and thereby have more than one bucket. 723 // These tests depend on bucketCnt and loadFactor* in map.go. 724 t.Run("mapliteral", func(t *testing.T) { 725 for _, tt := range mapBucketTests { 726 localMap := map[int]int{} 727 if runtime.MapBucketsPointerIsNil(localMap) { 728 t.Errorf("no escape: buckets pointer is nil for non-escaping map") 729 } 730 for i := 0; i < tt.n; i++ { 731 localMap[i] = i 732 } 733 if got := runtime.MapBucketsCount(localMap); got != tt.noescape { 734 t.Errorf("no escape: n=%d want %d buckets, got %d", tt.n, tt.noescape, got) 735 } 736 escapingMap := runtime.Escape(map[int]int{}) 737 if count := runtime.MapBucketsCount(escapingMap); count > 1 && runtime.MapBucketsPointerIsNil(escapingMap) { 738 t.Errorf("escape: buckets pointer is nil for n=%d buckets", count) 739 } 740 for i := 0; i < tt.n; i++ { 741 escapingMap[i] = i 742 } 743 if got := runtime.MapBucketsCount(escapingMap); got != tt.escape { 744 t.Errorf("escape n=%d want %d buckets, got %d", tt.n, tt.escape, got) 745 } 746 } 747 }) 748 t.Run("nohint", func(t *testing.T) { 749 for _, tt := range mapBucketTests { 750 localMap := make(map[int]int) 751 if runtime.MapBucketsPointerIsNil(localMap) { 752 t.Errorf("no escape: buckets pointer is nil for non-escaping map") 753 } 754 for i := 0; i < tt.n; i++ { 755 localMap[i] = i 756 } 757 if got := runtime.MapBucketsCount(localMap); got != tt.noescape { 758 t.Errorf("no escape: n=%d want %d buckets, got %d", tt.n, tt.noescape, got) 759 } 760 escapingMap := runtime.Escape(make(map[int]int)) 761 if count := runtime.MapBucketsCount(escapingMap); count > 1 && runtime.MapBucketsPointerIsNil(escapingMap) { 762 t.Errorf("escape: buckets pointer is nil for n=%d buckets", count) 763 } 764 for i := 0; i < tt.n; i++ { 765 escapingMap[i] = i 766 } 767 if got := runtime.MapBucketsCount(escapingMap); got != tt.escape { 768 t.Errorf("escape: n=%d want %d buckets, got %d", tt.n, tt.escape, got) 769 } 770 } 771 }) 772 t.Run("makemap", func(t *testing.T) { 773 for _, tt := range mapBucketTests { 774 localMap := make(map[int]int, tt.n) 775 if runtime.MapBucketsPointerIsNil(localMap) { 776 t.Errorf("no escape: buckets pointer is nil for non-escaping map") 777 } 778 for i := 0; i < tt.n; i++ { 779 localMap[i] = i 780 } 781 if got := runtime.MapBucketsCount(localMap); got != tt.noescape { 782 t.Errorf("no escape: n=%d want %d buckets, got %d", tt.n, tt.noescape, got) 783 } 784 escapingMap := runtime.Escape(make(map[int]int, tt.n)) 785 if count := runtime.MapBucketsCount(escapingMap); count > 1 && runtime.MapBucketsPointerIsNil(escapingMap) { 786 t.Errorf("escape: buckets pointer is nil for n=%d buckets", count) 787 } 788 for i := 0; i < tt.n; i++ { 789 escapingMap[i] = i 790 } 791 if got := runtime.MapBucketsCount(escapingMap); got != tt.escape { 792 t.Errorf("escape: n=%d want %d buckets, got %d", tt.n, tt.escape, got) 793 } 794 } 795 }) 796 t.Run("makemap64", func(t *testing.T) { 797 for _, tt := range mapBucketTests { 798 localMap := make(map[int]int, int64(tt.n)) 799 if runtime.MapBucketsPointerIsNil(localMap) { 800 t.Errorf("no escape: buckets pointer is nil for non-escaping map") 801 } 802 for i := 0; i < tt.n; i++ { 803 localMap[i] = i 804 } 805 if got := runtime.MapBucketsCount(localMap); got != tt.noescape { 806 t.Errorf("no escape: n=%d want %d buckets, got %d", tt.n, tt.noescape, got) 807 } 808 escapingMap := runtime.Escape(make(map[int]int, tt.n)) 809 if count := runtime.MapBucketsCount(escapingMap); count > 1 && runtime.MapBucketsPointerIsNil(escapingMap) { 810 t.Errorf("escape: buckets pointer is nil for n=%d buckets", count) 811 } 812 for i := 0; i < tt.n; i++ { 813 escapingMap[i] = i 814 } 815 if got := runtime.MapBucketsCount(escapingMap); got != tt.escape { 816 t.Errorf("escape: n=%d want %d buckets, got %d", tt.n, tt.escape, got) 817 } 818 } 819 }) 820 821} 822 823func benchmarkMapPop(b *testing.B, n int) { 824 m := map[int]int{} 825 for i := 0; i < b.N; i++ { 826 for j := 0; j < n; j++ { 827 m[j] = j 828 } 829 for j := 0; j < n; j++ { 830 // Use iterator to pop an element. 831 // We want this to be fast, see issue 8412. 832 for k := range m { 833 delete(m, k) 834 break 835 } 836 } 837 } 838} 839 840func BenchmarkMapPop100(b *testing.B) { benchmarkMapPop(b, 100) } 841func BenchmarkMapPop1000(b *testing.B) { benchmarkMapPop(b, 1000) } 842func BenchmarkMapPop10000(b *testing.B) { benchmarkMapPop(b, 10000) } 843 844var testNonEscapingMapVariable int = 8 845 846func TestNonEscapingMap(t *testing.T) { 847 n := testing.AllocsPerRun(1000, func() { 848 m := map[int]int{} 849 m[0] = 0 850 }) 851 if n != 0 { 852 t.Fatalf("mapliteral: want 0 allocs, got %v", n) 853 } 854 n = testing.AllocsPerRun(1000, func() { 855 m := make(map[int]int) 856 m[0] = 0 857 }) 858 if n != 0 { 859 t.Fatalf("no hint: want 0 allocs, got %v", n) 860 } 861 n = testing.AllocsPerRun(1000, func() { 862 m := make(map[int]int, 8) 863 m[0] = 0 864 }) 865 if n != 0 { 866 t.Fatalf("with small hint: want 0 allocs, got %v", n) 867 } 868 n = testing.AllocsPerRun(1000, func() { 869 m := make(map[int]int, testNonEscapingMapVariable) 870 m[0] = 0 871 }) 872 if n != 0 { 873 t.Fatalf("with variable hint: want 0 allocs, got %v", n) 874 } 875 876} 877 878func benchmarkMapAssignInt32(b *testing.B, n int) { 879 a := make(map[int32]int) 880 for i := 0; i < b.N; i++ { 881 a[int32(i&(n-1))] = i 882 } 883} 884 885func benchmarkMapOperatorAssignInt32(b *testing.B, n int) { 886 a := make(map[int32]int) 887 for i := 0; i < b.N; i++ { 888 a[int32(i&(n-1))] += i 889 } 890} 891 892func benchmarkMapAppendAssignInt32(b *testing.B, n int) { 893 a := make(map[int32][]int) 894 b.ReportAllocs() 895 b.ResetTimer() 896 for i := 0; i < b.N; i++ { 897 key := int32(i & (n - 1)) 898 a[key] = append(a[key], i) 899 } 900} 901 902func benchmarkMapDeleteInt32(b *testing.B, n int) { 903 a := make(map[int32]int, n) 904 b.ResetTimer() 905 for i := 0; i < b.N; i++ { 906 if len(a) == 0 { 907 b.StopTimer() 908 for j := i; j < i+n; j++ { 909 a[int32(j)] = j 910 } 911 b.StartTimer() 912 } 913 delete(a, int32(i)) 914 } 915} 916 917func benchmarkMapAssignInt64(b *testing.B, n int) { 918 a := make(map[int64]int) 919 for i := 0; i < b.N; i++ { 920 a[int64(i&(n-1))] = i 921 } 922} 923 924func benchmarkMapOperatorAssignInt64(b *testing.B, n int) { 925 a := make(map[int64]int) 926 for i := 0; i < b.N; i++ { 927 a[int64(i&(n-1))] += i 928 } 929} 930 931func benchmarkMapAppendAssignInt64(b *testing.B, n int) { 932 a := make(map[int64][]int) 933 b.ReportAllocs() 934 b.ResetTimer() 935 for i := 0; i < b.N; i++ { 936 key := int64(i & (n - 1)) 937 a[key] = append(a[key], i) 938 } 939} 940 941func benchmarkMapDeleteInt64(b *testing.B, n int) { 942 a := make(map[int64]int, n) 943 b.ResetTimer() 944 for i := 0; i < b.N; i++ { 945 if len(a) == 0 { 946 b.StopTimer() 947 for j := i; j < i+n; j++ { 948 a[int64(j)] = j 949 } 950 b.StartTimer() 951 } 952 delete(a, int64(i)) 953 } 954} 955 956func benchmarkMapAssignStr(b *testing.B, n int) { 957 k := make([]string, n) 958 for i := 0; i < len(k); i++ { 959 k[i] = strconv.Itoa(i) 960 } 961 b.ResetTimer() 962 a := make(map[string]int) 963 for i := 0; i < b.N; i++ { 964 a[k[i&(n-1)]] = i 965 } 966} 967 968func benchmarkMapOperatorAssignStr(b *testing.B, n int) { 969 k := make([]string, n) 970 for i := 0; i < len(k); i++ { 971 k[i] = strconv.Itoa(i) 972 } 973 b.ResetTimer() 974 a := make(map[string]string) 975 for i := 0; i < b.N; i++ { 976 key := k[i&(n-1)] 977 a[key] += key 978 } 979} 980 981func benchmarkMapAppendAssignStr(b *testing.B, n int) { 982 k := make([]string, n) 983 for i := 0; i < len(k); i++ { 984 k[i] = strconv.Itoa(i) 985 } 986 a := make(map[string][]string) 987 b.ReportAllocs() 988 b.ResetTimer() 989 for i := 0; i < b.N; i++ { 990 key := k[i&(n-1)] 991 a[key] = append(a[key], key) 992 } 993} 994 995func benchmarkMapDeleteStr(b *testing.B, n int) { 996 i2s := make([]string, n) 997 for i := 0; i < n; i++ { 998 i2s[i] = strconv.Itoa(i) 999 } 1000 a := make(map[string]int, n) 1001 b.ResetTimer() 1002 k := 0 1003 for i := 0; i < b.N; i++ { 1004 if len(a) == 0 { 1005 b.StopTimer() 1006 for j := 0; j < n; j++ { 1007 a[i2s[j]] = j 1008 } 1009 k = i 1010 b.StartTimer() 1011 } 1012 delete(a, i2s[i-k]) 1013 } 1014} 1015 1016func benchmarkMapDeletePointer(b *testing.B, n int) { 1017 i2p := make([]*int, n) 1018 for i := 0; i < n; i++ { 1019 i2p[i] = new(int) 1020 } 1021 a := make(map[*int]int, n) 1022 b.ResetTimer() 1023 k := 0 1024 for i := 0; i < b.N; i++ { 1025 if len(a) == 0 { 1026 b.StopTimer() 1027 for j := 0; j < n; j++ { 1028 a[i2p[j]] = j 1029 } 1030 k = i 1031 b.StartTimer() 1032 } 1033 delete(a, i2p[i-k]) 1034 } 1035} 1036 1037func runWith(f func(*testing.B, int), v ...int) func(*testing.B) { 1038 return func(b *testing.B) { 1039 for _, n := range v { 1040 b.Run(strconv.Itoa(n), func(b *testing.B) { f(b, n) }) 1041 } 1042 } 1043} 1044 1045func BenchmarkMapAssign(b *testing.B) { 1046 b.Run("Int32", runWith(benchmarkMapAssignInt32, 1<<8, 1<<16)) 1047 b.Run("Int64", runWith(benchmarkMapAssignInt64, 1<<8, 1<<16)) 1048 b.Run("Str", runWith(benchmarkMapAssignStr, 1<<8, 1<<16)) 1049} 1050 1051func BenchmarkMapOperatorAssign(b *testing.B) { 1052 b.Run("Int32", runWith(benchmarkMapOperatorAssignInt32, 1<<8, 1<<16)) 1053 b.Run("Int64", runWith(benchmarkMapOperatorAssignInt64, 1<<8, 1<<16)) 1054 b.Run("Str", runWith(benchmarkMapOperatorAssignStr, 1<<8, 1<<16)) 1055} 1056 1057func BenchmarkMapAppendAssign(b *testing.B) { 1058 b.Run("Int32", runWith(benchmarkMapAppendAssignInt32, 1<<8, 1<<16)) 1059 b.Run("Int64", runWith(benchmarkMapAppendAssignInt64, 1<<8, 1<<16)) 1060 b.Run("Str", runWith(benchmarkMapAppendAssignStr, 1<<8, 1<<16)) 1061} 1062 1063func BenchmarkMapDelete(b *testing.B) { 1064 b.Run("Int32", runWith(benchmarkMapDeleteInt32, 100, 1000, 10000)) 1065 b.Run("Int64", runWith(benchmarkMapDeleteInt64, 100, 1000, 10000)) 1066 b.Run("Str", runWith(benchmarkMapDeleteStr, 100, 1000, 10000)) 1067 b.Run("Pointer", runWith(benchmarkMapDeletePointer, 100, 1000, 10000)) 1068} 1069 1070func TestDeferDeleteSlow(t *testing.T) { 1071 ks := []complex128{0, 1, 2, 3} 1072 1073 m := make(map[any]int) 1074 for i, k := range ks { 1075 m[k] = i 1076 } 1077 if len(m) != len(ks) { 1078 t.Errorf("want %d elements, got %d", len(ks), len(m)) 1079 } 1080 1081 func() { 1082 for _, k := range ks { 1083 defer delete(m, k) 1084 } 1085 }() 1086 if len(m) != 0 { 1087 t.Errorf("want 0 elements, got %d", len(m)) 1088 } 1089} 1090 1091// TestIncrementAfterDeleteValueInt and other test Issue 25936. 1092// Value types int, int32, int64 are affected. Value type string 1093// works as expected. 1094func TestIncrementAfterDeleteValueInt(t *testing.T) { 1095 const key1 = 12 1096 const key2 = 13 1097 1098 m := make(map[int]int) 1099 m[key1] = 99 1100 delete(m, key1) 1101 m[key2]++ 1102 if n2 := m[key2]; n2 != 1 { 1103 t.Errorf("incremented 0 to %d", n2) 1104 } 1105} 1106 1107func TestIncrementAfterDeleteValueInt32(t *testing.T) { 1108 const key1 = 12 1109 const key2 = 13 1110 1111 m := make(map[int]int32) 1112 m[key1] = 99 1113 delete(m, key1) 1114 m[key2]++ 1115 if n2 := m[key2]; n2 != 1 { 1116 t.Errorf("incremented 0 to %d", n2) 1117 } 1118} 1119 1120func TestIncrementAfterDeleteValueInt64(t *testing.T) { 1121 const key1 = 12 1122 const key2 = 13 1123 1124 m := make(map[int]int64) 1125 m[key1] = 99 1126 delete(m, key1) 1127 m[key2]++ 1128 if n2 := m[key2]; n2 != 1 { 1129 t.Errorf("incremented 0 to %d", n2) 1130 } 1131} 1132 1133func TestIncrementAfterDeleteKeyStringValueInt(t *testing.T) { 1134 const key1 = "" 1135 const key2 = "x" 1136 1137 m := make(map[string]int) 1138 m[key1] = 99 1139 delete(m, key1) 1140 m[key2] += 1 1141 if n2 := m[key2]; n2 != 1 { 1142 t.Errorf("incremented 0 to %d", n2) 1143 } 1144} 1145 1146func TestIncrementAfterDeleteKeyValueString(t *testing.T) { 1147 const key1 = "" 1148 const key2 = "x" 1149 1150 m := make(map[string]string) 1151 m[key1] = "99" 1152 delete(m, key1) 1153 m[key2] += "1" 1154 if n2 := m[key2]; n2 != "1" { 1155 t.Errorf("appended '1' to empty (nil) string, got %s", n2) 1156 } 1157} 1158 1159// TestIncrementAfterBulkClearKeyStringValueInt tests that map bulk 1160// deletion (mapclear) still works as expected. Note that it was not 1161// affected by Issue 25936. 1162func TestIncrementAfterBulkClearKeyStringValueInt(t *testing.T) { 1163 const key1 = "" 1164 const key2 = "x" 1165 1166 m := make(map[string]int) 1167 m[key1] = 99 1168 for k := range m { 1169 delete(m, k) 1170 } 1171 m[key2]++ 1172 if n2 := m[key2]; n2 != 1 { 1173 t.Errorf("incremented 0 to %d", n2) 1174 } 1175} 1176 1177func TestMapTombstones(t *testing.T) { 1178 m := map[int]int{} 1179 const N = 10000 1180 // Fill a map. 1181 for i := 0; i < N; i++ { 1182 m[i] = i 1183 } 1184 runtime.MapTombstoneCheck(m) 1185 // Delete half of the entries. 1186 for i := 0; i < N; i += 2 { 1187 delete(m, i) 1188 } 1189 runtime.MapTombstoneCheck(m) 1190 // Add new entries to fill in holes. 1191 for i := N; i < 3*N/2; i++ { 1192 m[i] = i 1193 } 1194 runtime.MapTombstoneCheck(m) 1195 // Delete everything. 1196 for i := 0; i < 3*N/2; i++ { 1197 delete(m, i) 1198 } 1199 runtime.MapTombstoneCheck(m) 1200} 1201 1202type canString int 1203 1204func (c canString) String() string { 1205 return fmt.Sprintf("%d", int(c)) 1206} 1207 1208func TestMapInterfaceKey(t *testing.T) { 1209 // Test all the special cases in runtime.typehash. 1210 type GrabBag struct { 1211 f32 float32 1212 f64 float64 1213 c64 complex64 1214 c128 complex128 1215 s string 1216 i0 any 1217 i1 interface { 1218 String() string 1219 } 1220 a [4]string 1221 } 1222 1223 m := map[any]bool{} 1224 // Put a bunch of data in m, so that a bad hash is likely to 1225 // lead to a bad bucket, which will lead to a missed lookup. 1226 for i := 0; i < 1000; i++ { 1227 m[i] = true 1228 } 1229 m[GrabBag{f32: 1.0}] = true 1230 if !m[GrabBag{f32: 1.0}] { 1231 panic("f32 not found") 1232 } 1233 m[GrabBag{f64: 1.0}] = true 1234 if !m[GrabBag{f64: 1.0}] { 1235 panic("f64 not found") 1236 } 1237 m[GrabBag{c64: 1.0i}] = true 1238 if !m[GrabBag{c64: 1.0i}] { 1239 panic("c64 not found") 1240 } 1241 m[GrabBag{c128: 1.0i}] = true 1242 if !m[GrabBag{c128: 1.0i}] { 1243 panic("c128 not found") 1244 } 1245 m[GrabBag{s: "foo"}] = true 1246 if !m[GrabBag{s: "foo"}] { 1247 panic("string not found") 1248 } 1249 m[GrabBag{i0: "foo"}] = true 1250 if !m[GrabBag{i0: "foo"}] { 1251 panic("interface{} not found") 1252 } 1253 m[GrabBag{i1: canString(5)}] = true 1254 if !m[GrabBag{i1: canString(5)}] { 1255 panic("interface{String() string} not found") 1256 } 1257 m[GrabBag{a: [4]string{"foo", "bar", "baz", "bop"}}] = true 1258 if !m[GrabBag{a: [4]string{"foo", "bar", "baz", "bop"}}] { 1259 panic("array not found") 1260 } 1261} 1262 1263type panicStructKey struct { 1264 sli []int 1265} 1266 1267func (p panicStructKey) String() string { 1268 return "panic" 1269} 1270 1271type structKey struct { 1272} 1273 1274func (structKey) String() string { 1275 return "structKey" 1276} 1277 1278func TestEmptyMapWithInterfaceKey(t *testing.T) { 1279 var ( 1280 b bool 1281 i int 1282 i8 int8 1283 i16 int16 1284 i32 int32 1285 i64 int64 1286 ui uint 1287 ui8 uint8 1288 ui16 uint16 1289 ui32 uint32 1290 ui64 uint64 1291 uipt uintptr 1292 f32 float32 1293 f64 float64 1294 c64 complex64 1295 c128 complex128 1296 a [4]string 1297 s string 1298 p *int 1299 up unsafe.Pointer 1300 ch chan int 1301 i0 any 1302 i1 interface { 1303 String() string 1304 } 1305 structKey structKey 1306 i0Panic any = []int{} 1307 i1Panic interface { 1308 String() string 1309 } = panicStructKey{} 1310 panicStructKey = panicStructKey{} 1311 sli []int 1312 me = map[any]struct{}{} 1313 mi = map[interface { 1314 String() string 1315 }]struct{}{} 1316 ) 1317 mustNotPanic := func(f func()) { 1318 f() 1319 } 1320 mustPanic := func(f func()) { 1321 defer func() { 1322 r := recover() 1323 if r == nil { 1324 t.Errorf("didn't panic") 1325 } 1326 }() 1327 f() 1328 } 1329 mustNotPanic(func() { 1330 _ = me[b] 1331 }) 1332 mustNotPanic(func() { 1333 _ = me[i] 1334 }) 1335 mustNotPanic(func() { 1336 _ = me[i8] 1337 }) 1338 mustNotPanic(func() { 1339 _ = me[i16] 1340 }) 1341 mustNotPanic(func() { 1342 _ = me[i32] 1343 }) 1344 mustNotPanic(func() { 1345 _ = me[i64] 1346 }) 1347 mustNotPanic(func() { 1348 _ = me[ui] 1349 }) 1350 mustNotPanic(func() { 1351 _ = me[ui8] 1352 }) 1353 mustNotPanic(func() { 1354 _ = me[ui16] 1355 }) 1356 mustNotPanic(func() { 1357 _ = me[ui32] 1358 }) 1359 mustNotPanic(func() { 1360 _ = me[ui64] 1361 }) 1362 mustNotPanic(func() { 1363 _ = me[uipt] 1364 }) 1365 mustNotPanic(func() { 1366 _ = me[f32] 1367 }) 1368 mustNotPanic(func() { 1369 _ = me[f64] 1370 }) 1371 mustNotPanic(func() { 1372 _ = me[c64] 1373 }) 1374 mustNotPanic(func() { 1375 _ = me[c128] 1376 }) 1377 mustNotPanic(func() { 1378 _ = me[a] 1379 }) 1380 mustNotPanic(func() { 1381 _ = me[s] 1382 }) 1383 mustNotPanic(func() { 1384 _ = me[p] 1385 }) 1386 mustNotPanic(func() { 1387 _ = me[up] 1388 }) 1389 mustNotPanic(func() { 1390 _ = me[ch] 1391 }) 1392 mustNotPanic(func() { 1393 _ = me[i0] 1394 }) 1395 mustNotPanic(func() { 1396 _ = me[i1] 1397 }) 1398 mustNotPanic(func() { 1399 _ = me[structKey] 1400 }) 1401 mustPanic(func() { 1402 _ = me[i0Panic] 1403 }) 1404 mustPanic(func() { 1405 _ = me[i1Panic] 1406 }) 1407 mustPanic(func() { 1408 _ = me[panicStructKey] 1409 }) 1410 mustPanic(func() { 1411 _ = me[sli] 1412 }) 1413 mustPanic(func() { 1414 _ = me[me] 1415 }) 1416 1417 mustNotPanic(func() { 1418 _ = mi[structKey] 1419 }) 1420 mustPanic(func() { 1421 _ = mi[panicStructKey] 1422 }) 1423} 1424 1425func TestLoadFactor(t *testing.T) { 1426 for b := uint8(0); b < 20; b++ { 1427 count := 13 * (1 << b) / 2 // 6.5 1428 if b == 0 { 1429 count = 8 1430 } 1431 if runtime.OverLoadFactor(count, b) { 1432 t.Errorf("OverLoadFactor(%d,%d)=true, want false", count, b) 1433 } 1434 if !runtime.OverLoadFactor(count+1, b) { 1435 t.Errorf("OverLoadFactor(%d,%d)=false, want true", count+1, b) 1436 } 1437 } 1438} 1439 1440func TestMapKeys(t *testing.T) { 1441 type key struct { 1442 s string 1443 pad [128]byte // sizeof(key) > abi.MapMaxKeyBytes 1444 } 1445 m := map[key]int{{s: "a"}: 1, {s: "b"}: 2} 1446 keys := make([]key, 0, len(m)) 1447 runtime.MapKeys(m, unsafe.Pointer(&keys)) 1448 for _, k := range keys { 1449 if len(k.s) != 1 { 1450 t.Errorf("len(k.s) == %d, want 1", len(k.s)) 1451 } 1452 } 1453} 1454 1455func TestMapValues(t *testing.T) { 1456 type val struct { 1457 s string 1458 pad [128]byte // sizeof(val) > abi.MapMaxElemBytes 1459 } 1460 m := map[int]val{1: {s: "a"}, 2: {s: "b"}} 1461 vals := make([]val, 0, len(m)) 1462 runtime.MapValues(m, unsafe.Pointer(&vals)) 1463 for _, v := range vals { 1464 if len(v.s) != 1 { 1465 t.Errorf("len(v.s) == %d, want 1", len(v.s)) 1466 } 1467 } 1468} 1469 1470func computeHash() uintptr { 1471 var v struct{} 1472 return runtime.MemHash(unsafe.Pointer(&v), 0, unsafe.Sizeof(v)) 1473} 1474 1475func subprocessHash(t *testing.T, env string) uintptr { 1476 t.Helper() 1477 1478 cmd := testenv.CleanCmdEnv(testenv.Command(t, os.Args[0], "-test.run=^TestMemHashGlobalSeed$")) 1479 cmd.Env = append(cmd.Env, "GO_TEST_SUBPROCESS_HASH=1") 1480 if env != "" { 1481 cmd.Env = append(cmd.Env, env) 1482 } 1483 1484 out, err := cmd.Output() 1485 if err != nil { 1486 t.Fatalf("cmd.Output got err %v want nil", err) 1487 } 1488 1489 s := strings.TrimSpace(string(out)) 1490 h, err := strconv.ParseUint(s, 10, 64) 1491 if err != nil { 1492 t.Fatalf("Parse output %q got err %v want nil", s, err) 1493 } 1494 return uintptr(h) 1495} 1496 1497// memhash has unique per-process seeds, so hashes should differ across 1498// processes. 1499// 1500// Regression test for https://go.dev/issue/66885. 1501func TestMemHashGlobalSeed(t *testing.T) { 1502 if os.Getenv("GO_TEST_SUBPROCESS_HASH") != "" { 1503 fmt.Println(computeHash()) 1504 os.Exit(0) 1505 return 1506 } 1507 1508 testenv.MustHaveExec(t) 1509 1510 // aeshash and memhashFallback use separate per-process seeds, so test 1511 // both. 1512 t.Run("aes", func(t *testing.T) { 1513 if !*runtime.UseAeshash { 1514 t.Skip("No AES") 1515 } 1516 1517 h1 := subprocessHash(t, "") 1518 t.Logf("%d", h1) 1519 h2 := subprocessHash(t, "") 1520 t.Logf("%d", h2) 1521 h3 := subprocessHash(t, "") 1522 t.Logf("%d", h3) 1523 1524 if h1 == h2 && h2 == h3 { 1525 t.Errorf("got duplicate hash %d want unique", h1) 1526 } 1527 }) 1528 1529 t.Run("noaes", func(t *testing.T) { 1530 env := "" 1531 if *runtime.UseAeshash { 1532 env = "GODEBUG=cpu.aes=off" 1533 } 1534 1535 h1 := subprocessHash(t, env) 1536 t.Logf("%d", h1) 1537 h2 := subprocessHash(t, env) 1538 t.Logf("%d", h2) 1539 h3 := subprocessHash(t, env) 1540 t.Logf("%d", h3) 1541 1542 if h1 == h2 && h2 == h3 { 1543 t.Errorf("got duplicate hash %d want unique", h1) 1544 } 1545 }) 1546} 1547