publicvirtualvoidCopyTo(Array array) { CopyTo(array, 0); } // Copies this ArrayList into array, which must be of a // compatible array type. // publicvirtualvoidCopyTo(Array array, int arrayIndex) { if ((array != null) && (array.Rank != 1)) thrownew ArgumentException(Environment.GetResourceString("Arg_RankMultiDimNotSupported")); Contract.EndContractBlock(); // Delegate rest of error checking to Array.Copy. Array.Copy(_items, 0, array, arrayIndex, _size); } // Copies a section of this list to the given array at the given index. // // The method uses the Array.Copy method to copy the elements. // publicvirtualvoidCopyTo(int index, Array array, int arrayIndex, int count) { if (_size - index < count) thrownew ArgumentException(Environment.GetResourceString("Argument_InvalidOffLen")); if ((array != null) && (array.Rank != 1)) thrownew ArgumentException(Environment.GetResourceString("Arg_RankMultiDimNotSupported")); Contract.EndContractBlock(); // Delegate rest of error checking to Array.Copy. Array.Copy(_items, index, array, arrayIndex, count); }
// Ensures that the capacity of this list is at least the given minimum // value. If the currect capacity of the list is less than min, the // capacity is increased to twice the current capacity or to min, // whichever is larger. privatevoidEnsureCapacity(int min) { if (_items.Length < min) { int newCapacity = _items.Length == 0? _defaultCapacity: _items.Length * 2; // Allow the list to grow to maximum possible capacity (~2G elements) before encountering overflow. // Note that this check works even when _items.Length overflowed thanks to the (uint) cast if ((uint)newCapacity > Array.MaxArrayLength) newCapacity = Array.MaxArrayLength; if (newCapacity < min) newCapacity = min; Capacity = newCapacity; } }
// Avoid awfully small sizes int hashsize = (rawsize > InitialSize) ? HashHelpers.GetPrime((int)rawsize) : InitialSize; buckets = new bucket[hashsize];
loadsize = (int)(this.loadFactor * hashsize); isWriterInProgress = false; // Based on the current algorithm, loadsize must be less than hashsize. Contract.Assert( loadsize < hashsize, "Invalid hashtable loadsize!"); }
1 2 3 4 5 6 7 8 9
// The hash table data. // This cannot be serialised privatestruct bucket { public Object key; public Object val; publicint hash_coll; // Store hash code; sign bit means there was a collision. }
private bucket[] buckets;
当冲突数达到上限时,重新选择比较器,更新哈希表
代码
1 2 3 4 5 6 7 8 9 10 11 12 13 14
#if FEATURE_RANDOMIZED_STRING_HASHING #if !FEATURE_CORECLR if(buckets.Length > HashHelpers.HashCollisionThreshold && HashHelpers.IsWellKnownEqualityComparer(_keycomparer)) { // PERF: We don't want to rehash if _keycomparer is already a RandomizedObjectEqualityComparer since in some // cases there may not be any strings in the hashtable and we wouldn't get any mixing. if(_keycomparer == null || !(_keycomparer is System.Collections.Generic.RandomizedObjectEqualityComparer)) { _keycomparer = HashHelpers.GetRandomizedEqualityComparer(_keycomparer); rehash(buckets.Length, true); } } #endif // !FEATURE_CORECLR #endif
// reset occupancy occupancy=0; // Don't replace any internal state until we've finished adding to the // new bucket[]. This serves two purposes: // 1) Allow concurrent readers to see valid hashtable contents // at all times // 2) Protect against an OutOfMemoryException while allocating this // new bucket[]. bucket[] newBuckets = new bucket[newsize];
// rehash table into new buckets int nb; for (nb = 0; nb < buckets.Length; nb++){ bucket oldb = buckets[nb]; if ((oldb.key != null) && (oldb.key != buckets)) { int hashcode = ((forceNewHashCode ? GetHash(oldb.key) : oldb.hash_coll) & 0x7FFFFFFF); putEntry(newBuckets, oldb.key, oldb.val, hashcode); } } // New bucket[] is good to go - replace buckets and other internal state. #if !FEATURE_CORECLR Thread.BeginCriticalRegion(); #endif isWriterInProgress = true; buckets = newBuckets; loadsize = (int)(loadFactor * newsize); UpdateVersion(); isWriterInProgress = false; #if !FEATURE_CORECLR Thread.EndCriticalRegion(); #endif // minimun size of hashtable is 3 now and maximum loadFactor is 0.72 now. Contract.Assert(loadsize < newsize, "Our current implementaion means this is not possible."); return; }
// Returns size of hashtable to grow to. publicstaticintExpandPrime(int oldSize) { int newSize = 2 * oldSize;
// Allow the hashtables to grow to maximum possible size (~2G elements) before encoutering capacity overflow. // Note that this check works even when _items.Length overflowed thanks to the (uint) cast if ((uint)newSize > MaxPrimeArrayLength && MaxPrimeArrayLength > oldSize) { Contract.Assert( MaxPrimeArrayLength == GetPrime(MaxPrimeArrayLength), "Invalid MaxPrimeArrayLength"); return MaxPrimeArrayLength; }