HashMap和ConcurrentHashMap源码精讲

HashMap类结构图

HashMap和ConcurrentHashMap源码精讲_第1张图片

HashMap源码讲解

  // HashMap 默认初始容量是2的4次方,也就是16;
 static final int DEFAULT_INITIAL_CAPACITY = 1 << 4; // aka 16
 
    static final int MAXIMUM_CAPACITY = 1 << 30;
   // HashMap的负载因子,当存储的数据量超过 容量*负载因子,就会触发扩容操作
    static final float DEFAULT_LOAD_FACTOR = 0.75f;
   /**
    树化的链表长度,jdk8中HashMap是数组+链表+红黑树(二叉平衡树)结构,
    当出现Hash  冲突后,会将元素插入当前数组下标指向的链表尾部,当链表长度超过8个,
    就会将链表转程二叉树,提高效率
    */
    static final int TREEIFY_THRESHOLD = 8;
   // 回退链表结构的长度,当树节点低于6个,就会回退成链表,
    static final int UNTREEIFY_THRESHOLD = 6;
   // 转二叉树的容量要求  容量必须超过64才会进行树化 
    static final int MIN_TREEIFY_CAPACITY = 64;

HashMap中元素的结构,每一个键值对都是以Node结构存储在HashMap中的
static class Node<K,V> implements Map.Entry<K,V> {
        final int hash;
        final K key;
        V value;
        Node<K,V> next;

        Node(int hash, K key, V value, Node<K,V> next) {
            this.hash = hash;
            this.key = key;
            this.value = value;
            this.next = next;
        }
}
 // 这个就是存储实际Node元素的数组结构
 transient Node<K,V>[] table;
   transient Set<Map.Entry<K,V>> entrySet;

    /**
     * 这个是保存HashMap中元素个数的变量
     */
    transient int size;

    /**
     *这个是HashMap修改版本号,每一次增加、修改、删除元素会修改这个版本号,迭代遍历的时候就可以知道是否删除过元素
     */
    transient int modCount;

    /**
     * 这个是下次扩容的触发数量,是容量*负载因子的值,存储数量超过这个值就需要扩容
    int threshold;

    /**
     * 这个就是负载因子
     */
    final float loadFactor;
// HashMap构造函数,这个由外部给定初始容量和负载因子,
public HashMap(int initialCapacity, float loadFactor) {
        if (initialCapacity < 0)
            throw new IllegalArgumentException("Illegal initial capacity: " + initialCapacity);
    // 如果初始容量是超过了最大容量2的30次方,就指定最大容量
        if (initialCapacity > MAXIMUM_CAPACITY)
            initialCapacity = MAXIMUM_CAPACITY;
        if (loadFactor <= 0 || Float.isNaN(loadFactor))
            throw new IllegalArgumentException("Illegal load factor: " +
                                               loadFactor);
        this.loadFactor = loadFactor;
        // 触发扩容值进行处理,取值要求 大于等于initialCapacity的2的n次方,
        // 这里没有乘负载因子,在第一次扩容的时候才会实际给负载因子赋值
        this.threshold = tableSizeFor(initialCapacity);
    }

插入数据到HashMap

// 入参 key的hash值,key\value

 final V putVal(int hash, K key, V value, boolean onlyIfAbsent,
                   boolean evict) {
        Node<K,V>[] tab; Node<K,V> p; int n, i;
        // 如果table[] 数组是空的,说明第一次插入数据,需要首先分配内存,调用扩容方法
        if ((tab = table) == null || (n = tab.length) == 0)
            n = (tab = resize()).length;
       // 如果直接通过hash映射到数组下标是空的,那就是可以直接插入,其他的什么都不做
        if ((p = tab[i = (n - 1) & hash]) == null)
            tab[i] = newNode(hash, key, value, null);
        else {
        // 这种情况就很麻烦了,出现了hash冲突,就得用链表法或者树结构解决冲突,一般hash冲突的解决方法:链表法、开放地址法、再哈希法
            Node<K,V> e; K k;
            // 插入的是重复key,相当于修改数据把这个原值的引用地址给e做缓存
            if (p.hash == hash &&
                ((k = p.key) == key || (key != null && key.equals(k))))
                e = p;
            // p节点是TreeNode,已经被树化的节点,就要创建一个树节点到二叉树中
            else if (p instanceof TreeNode)
                e = ((TreeNode<K,V>)p).putTreeVal(this, tab, hash, key, value);
            // 其他情况 既不是树也不与第一个相同,就要遍历链表进行插入,这里采用的是直接插入到链表末尾,
             // 同时 如果数量超过了TREEIFY_THRESHOLD,需要进行链表转二叉树
            else {
                for (int binCount = 0; ; ++binCount) {
                    if ((e = p.next) == null) {
                        p.next = newNode(hash, key, value, null);
                        if (binCount >= TREEIFY_THRESHOLD - 1) // -1 for 1st
                            treeifyBin(tab, hash);
                        break;
                    }
                    // 在遍历过程中 如果发现相同key的元素,直接拿到这个引用
                    if (e.hash == hash &&
                        ((k = e.key) == key || (key != null && key.equals(k))))
                        break;
                    p = e;
                }
            }
            if (e != null) { // existing mapping for key
                V oldValue = e.value;
                if (!onlyIfAbsent || oldValue == null)
                    e.value = value;
                afterNodeAccess(e);
                // 返回旧值
                return oldValue;
            }
        }
        // 这里对计数版本进行更新
        ++modCount;
        // 如果size超过了扩容值,就得进行扩容
        if (++size > threshold)
            resize();
        afterNodeInsertion(evict);
        return null;
    }

接下来看下扩容操作,第一次插入数据时才进行扩容,不会在构造函数就进行分配内存

 final Node<K,V>[] resize() {
        Node<K,V>[] oldTab = table;
        int oldCap = (oldTab == null) ? 0 : oldTab.length;
        int oldThr = threshold;
        int newCap, newThr = 0;
        // 取到oldCap 旧数组的长度容量,以及触发扩容的值oldThr 
        // 如果table有数据,判断他的容量是不是超过上限,如果超过上限,旧不能扩容了
        // 直接返回旧数组
        if (oldCap > 0) {
            if (oldCap >= MAXIMUM_CAPACITY) {
                threshold = Integer.MAX_VALUE;
                return oldTab;
            }
           // 如果旧数组扩容两倍还没有超过最大上限 并且旧数组长度时超过初始默认
           //容量的,这是为了第一此扩容直接扩到16初始容量,比如第一次给的容量3,那就不会扩到6,而是直接扩到16
            else if ((newCap = oldCap << 1) < MAXIMUM_CAPACITY &&
                     oldCap >= DEFAULT_INITIAL_CAPACITY)
                newThr = oldThr << 1; // double threshold
        }
        // 数组不存在 第一次插入,用的是有参构造函数,但是容量小于16,
        // 那扩容值还是旧值,相当于直接初次分配内存地址
        else if (oldThr > 0)
            newCap = oldThr;
        // 其他情况 无参构造函数,新数组直接扩到到默认容量  扩充触发值为容量*负载因子
        else {              defaults
            newCap = DEFAULT_INITIAL_CAPACITY;
            newThr = (int)(DEFAULT_LOAD_FACTOR * DEFAULT_INITIAL_CAPACITY);
        }
        // 无参构造函数  初始扩容值是0,第一次给扩容值赋值
        if (newThr == 0) {
            float ft = (float)newCap * loadFactor;
            newThr = (newCap < MAXIMUM_CAPACITY && ft < (float)MAXIMUM_CAPACITY ?
                      (int)ft : Integer.MAX_VALUE);
        }
        threshold = newThr;
        // 这里旧创建新数组了,准备把旧的数据全部都存到新空间中
        @SuppressWarnings({"rawtypes","unchecked"})
        Node<K,V>[] newTab = (Node<K,V>[])new Node[newCap];
        table = newTab;
        if (oldTab != null) {
        // 遍历数组,挨个遍历链表或者二叉树,一个一个的映射到新数组中,所以其实还是相当麻烦的,扩容对性能影响比较大,最好初始选择好容量
            for (int j = 0; j < oldCap; ++j) {
                Node<K,V> e;
                if ((e = oldTab[j]) != null) {
                    oldTab[j] = null;
                    if (e.next == null)
                        newTab[e.hash & (newCap - 1)] = e;
                    else if (e instanceof TreeNode)
                        ((TreeNode<K,V>)e).split(this, newTab, j, oldCap);
                    else { // preserve order
                        Node<K,V> loHead = null, loTail = null;
                        Node<K,V> hiHead = null, hiTail = null;
                        Node<K,V> next;
                        do {
                            next = e.next;
                            if ((e.hash & oldCap) == 0) {
                                if (loTail == null)
                                    loHead = e;
                                else
                                    loTail.next = e;
                                loTail = e;
                            }
                            else {
                                if (hiTail == null)
                                    hiHead = e;
                                else
                                    hiTail.next = e;
                                hiTail = e;
                            }
                        } while ((e = next) != null);
                        if (loTail != null) {
                            loTail.next = null;
                            newTab[j] = loHead;
                        }
                        if (hiTail != null) {
                            hiTail.next = null;
                            newTab[j + oldCap] = hiHead;
                        }
                    }
                }
            }
        }
        return newTab;
    }

删除元素,删除元素比较简单,就是定位到元素位置,然后删除,这样删除性能不稳定,如果都是数组,那好说,如果有链表和二叉树,那就不好说了

 final Node<K,V> removeNode(int hash, Object key, Object value,
                               boolean matchValue, boolean movable) {
        Node<K,V>[] tab; Node<K,V> p; int n, index;
        if ((tab = table) != null && (n = tab.length) > 0 &&
            (p = tab[index = (n - 1) & hash]) != null) {
            Node<K,V> node = null, e; K k; V v;
            if (p.hash == hash &&
                ((k = p.key) == key || (key != null && key.equals(k))))
                node = p;
            else if ((e = p.next) != null) {
                if (p instanceof TreeNode)
                    node = ((TreeNode<K,V>)p).getTreeNode(hash, key);
                else {
                    do {
                        if (e.hash == hash &&
                            ((k = e.key) == key ||
                             (key != null && key.equals(k)))) {
                            node = e;
                            break;
                        }
                        p = e;
                    } while ((e = e.next) != null);
                }
            }
            if (node != null && (!matchValue || (v = node.value) == value ||
                                 (value != null && value.equals(v)))) {
                if (node instanceof TreeNode)
                    ((TreeNode<K,V>)node).removeTreeNode(this, tab, movable);
                else if (node == p)
                    tab[index] = node.next;
                else
                    p.next = node.next;
                ++modCount;
                --size;
                afterNodeRemoval(node);
                return node;
            }
        }
        return null;
    }

遍历HashMap

遍历map中的key
  // 这里就是遍历key的内部类,他只是一个遍历逻辑视图,不会存储数据,还是引用的父类的
  // table数组 
  final class KeySet extends AbstractSet<K> {
        public final int size()                 { return size; }
        public final void clear()               { HashMap.this.clear(); }
        public final Iterator<K> iterator()     { return new KeyIterator(); }
        public final boolean contains(Object o) { return containsKey(o); }
        // 在遍历过程中直接用remove 安全删除
        public final boolean remove(Object key) {
            return removeNode(hash(key), key, null, false, true) != null;
        }
        public final Spliterator<K> spliterator() {
            return new KeySpliterator<>(HashMap.this, 0, -1, 0, 0);
        }
        // 这个就是遍历方法,lambda表达式作为处理逻辑传入,我们可以拿到每一个key进行处理
        public final void forEach(Consumer<? super K> action) {
            Node<K,V>[] tab;
            if (action == null)
                throw new NullPointerException();
            if (size > 0 && (tab = table) != null) {
                int mc = modCount;
                for (int i = 0; i < tab.length; ++i) {
                    for (Node<K,V> e = tab[i]; e != null; e = e.next)
                        action.accept(e.key);
                }
                // 记住在遍历过程中不要通过普通方法删除数据,遍历计算出不对,直接抛异常
                if (modCount != mc)
                    throw new ConcurrentModificationException();
            }
        }
    }

#### 遍历所有键值对entry

// 逻辑和上面差不多,删除要安全删除方法,遍历forEach
final class EntrySet extends AbstractSet<Map.Entry<K,V>> {
        public final int size()                 { return size; }
        public final void clear()               { HashMap.this.clear(); }
        public final Iterator<Map.Entry<K,V>> iterator() {
            return new EntryIterator();
        }
        public final boolean contains(Object o) {
            if (!(o instanceof Map.Entry))
                return false;
            Map.Entry<?,?> e = (Map.Entry<?,?>) o;
            Object key = e.getKey();
            Node<K,V> candidate = getNode(hash(key), key);
            return candidate != null && candidate.equals(e);
        }
        public final boolean remove(Object o) {
            if (o instanceof Map.Entry) {
                Map.Entry<?,?> e = (Map.Entry<?,?>) o;
                Object key = e.getKey();
                Object value = e.getValue();
                return removeNode(hash(key), key, value, true, true) != null;
            }
            return false;
        }
        public final Spliterator<Map.Entry<K,V>> spliterator() {
            return new EntrySpliterator<>(HashMap.this, 0, -1, 0, 0);
        }
        public final void forEach(Consumer<? super Map.Entry<K,V>> action) {
            Node<K,V>[] tab;
            if (action == null)
                throw new NullPointerException();
            if (size > 0 && (tab = table) != null) {
                int mc = modCount;
                for (int i = 0; i < tab.length; ++i) {
                    for (Node<K,V> e = tab[i]; e != null; e = e.next)
                        action.accept(e);
                }
                if (modCount != mc)
                    throw new ConcurrentModificationException();
            }
        }
    }
直接点用HashMap 的foreach方法,不需要用entrySet一样遍历KV
// 这个方法相比于entrySet直接,方便
  public void forEach(BiConsumer<? super K, ? super V> action) {
        Node<K,V>[] tab;
        if (action == null)
            throw new NullPointerException();
        if (size > 0 && (tab = table) != null) {
            int mc = modCount;
            for (int i = 0; i < tab.length; ++i) {
                for (Node<K,V> e = tab[i]; e != null; e = e.next)
                    action.accept(e.key, e.value);
            }
            if (modCount != mc)
                throw new ConcurrentModificationException();
        }
    }

HashMap主要方法就讲完了,下面说下ConcurrentHashMap,它的逻辑基本上和HashMap保持一致,同步上有点区别

ConcurrentHashMap

主要说一下涉及到同步的逻辑

插入数据

final V putVal(K key, V value, boolean onlyIfAbsent) {
        if (key == null || value == null) throw new NullPointerException();
        int hash = spread(key.hashCode());
        int binCount = 0;
        // 准备插入,遍历table
        for (Node<K,V>[] tab = table;;) {
            Node<K,V> f; int n, i, fh;
            if (tab == null || (n = tab.length) == 0)
            // table是空的,需要初始化分配空间
                tab = initTable();
            else if ((f = tabAt(tab, i = (n - 1) & hash)) == null) {
            // 找到了对应的下标,并且没有存数据,直接引用改一下就行,这里CAS操作,
            // 使用CPU原语指令,原子化修改,比同步更快
                if (casTabAt(tab, i, null,
                             new Node<K,V>(hash, key, value, null)))
                    break;                   // no lock when adding to empty bin
            }
             // 在扩容中 那么就协助扩容
            else if ((fh = f.hash) == MOVED)
                tab = helpTransfer(tab, f);
            else {
                V oldVal = null;
                // 锁住数组下标首个节点,分段锁,性能更好
                synchronized (f) {
                    if (tabAt(tab, i) == f) {
                        if (fh >= 0) {
                            binCount = 1;
                            for (Node<K,V> e = f;; ++binCount) {
                                K ek;
                                if (e.hash == hash &&
                                    ((ek = e.key) == key ||
                                     (ek != null && key.equals(ek)))) {
                                    oldVal = e.val;
                                    if (!onlyIfAbsent)
                                        e.val = value;
                                    break;
                                }
                                Node<K,V> pred = e;
                                if ((e = e.next) == null) {
                                    pred.next = new Node<K,V>(hash, key,
                                                              value, null);
                                    break;
                                }
                            }
                        }
                        else if (f instanceof TreeBin) {
                            Node<K,V> p;
                            binCount = 2;
                            if ((p = ((TreeBin<K,V>)f).putTreeVal(hash, key,
                                                           value)) != null) {
                                oldVal = p.val;
                                if (!onlyIfAbsent)
                                    p.val = value;
                            }
                        }
                    }
                }
                if (binCount != 0) {
                    if (binCount >= TREEIFY_THRESHOLD)
                        treeifyBin(tab, i);
                    if (oldVal != null)
                        return oldVal;
                    break;
                }
            }
        }
        addCount(1L, binCount);
        return null;
    }

协助扩容

如果出现数组在迁移中,就协助迁移,每个线程负责迁移一定范围的数据,最终组合在一起

  final Node<K,V>[] helpTransfer(Node<K,V>[] tab, Node<K,V> f) {
        Node<K,V>[] nextTab; int sc;
        if (tab != null && (f instanceof ForwardingNode) &&
            (nextTab = ((ForwardingNode<K,V>)f).nextTable) != null) {
            int rs = resizeStamp(tab.length) << RESIZE_STAMP_SHIFT;
            while (nextTab == nextTable && table == tab &&
                   (sc = sizeCtl) < 0) {
                if (sc == rs + MAX_RESIZERS || sc == rs + 1 ||
                    transferIndex <= 0)
                    break;
                if (U.compareAndSwapInt(this, SIZECTL, sc, sc + 1)) {
                    transfer(tab, nextTab);
                    break;
                }
            }
            return nextTab;
        }
        return table;
    }

初始化数组table

private transient volatile int sizeCtl; 这个标志数组是否在初始化中

private final Node<K,V>[] initTable() {
        Node<K,V>[] tab; int sc;
        while ((tab = table) == null || tab.length == 0) {
            if ((sc = sizeCtl) < 0) //如果 sizeCtl < 0,说明别的线程正在初始化或扩容,我循环等待
                Thread.yield(); 
                // 没有其他线程扩容 就我来扩容 CAS原子操作
            else if (U.compareAndSwapInt(this, SIZECTL, sc, -1)) {
                try {
                    if ((tab = table) == null || tab.length == 0) {
                        int n = (sc > 0) ? sc : DEFAULT_CAPACITY;
                        @SuppressWarnings("unchecked")
                        Node<K,V>[] nt = (Node<K,V>[])new Node<?,?>[n];
                        table = tab = nt;
                        sc = n - (n >>> 2);
                    }
                } finally {
                    sizeCtl = sc;
                }
                break;
            }
        }
        return tab;
    }

你可能感兴趣的:(HashMap和ConcurrentHashMap源码精讲)