Map源码解析之HashMap
Map源码解析之HashMap红黑树
Map源码解析之HashMap补充:集合、迭代器、compute、merge、replace
Map源码解析之LinkedHashMap
Map源码解析之TreeMap
Map源码解析之HashTable
HashMap和LinkedHashMap都不是线程安全的,而线程安全的HashTable和Collections#synchronizedMap(Map) 返回的SynchronizedMap两者都是通过synchronized锁实现同步的,当其中的一个同步方法被一个线程访问时,其它的同步方法也无法被其它线程访问,效率低下。
ConcurrentHashMap在jdk1.8中同之前的版本发生了比较大的改动,本篇博客在没有特别说明的情况下的源码及解析均基于jdk1.8版本。
ConcurrentHashMap的key和value值都不能为null。
ConcurrentHashMap中有着重要的内部类,我们针对部分重要的内部类先做一个简单介绍,以帮助分析源码。
Node类是ConcurrentHashMap的节点类,存储几点的key、value、hash和指向下一个节点的指针。Node在ConcurrentHashMap中还有子类ForwardingNode, ReservationNode, TreeNode和TreeBin,用来表示处于不同状态下的节点。
Traverser类用于封装对于containsValue等遍历方法的遍历,同时也是ConcurrentHashMap中迭代器的父类。其子类有BaseIterator, EntryIterator, EntrySpliterator, KeyIterator, KeySpliterator, ValueIterator, ValueSpliteraotr,用于针对不同情况下的迭代。
CollectionView类是ConcurrentHashMap的内部抽象类,表示ConcurrentHashMap的集合视图。其子类有EntrySetView, KeySetView, ValuesView表示不同目标的集合视图。
BulkTask类是ConcurrentHashMap的内部抽象类,用来帮助处理批量任务。其子类众多,具体如下图所示:
/**
* The largest possible table capacity. This value must be
* exactly 1<<30 to stay within Java array allocation and indexing
* bounds for power of two table sizes, and is further required
* because the top two bits of 32bit hash fields are used for
* control purposes.
*/
//表的最大容量
private static final int MAXIMUM_CAPACITY = 1 << 30;
/**
* The default initial table capacity. Must be a power of 2
* (i.e., at least 1) and at most MAXIMUM_CAPACITY.
*/
//默认表的容量
private static final int DEFAULT_CAPACITY = 16;
/**
* The largest possible (non-power of two) array size.
* Needed by toArray and related methods.
*/
//最大数组长度
static final int MAX_ARRAY_SIZE = Integer.MAX_VALUE - 8;
/**
* The default concurrency level for this table. Unused but
* defined for compatibility with previous versions of this class.
*/
//默认并发级别
private static final int DEFAULT_CONCURRENCY_LEVEL = 16;
/**
* The load factor for this table. Overrides of this value in
* constructors affect only the initial table capacity. The
* actual floating point value isn't normally used -- it is
* simpler to use expressions such as {@code n - (n >>> 2)} for
* the associated resizing threshold.
*/
//负载因子
private static final float LOAD_FACTOR = 0.75f;
/**
* The bin count threshold for using a tree rather than list for a
* bin. Bins are converted to trees when adding an element to a
* bin with at least this many nodes. The value must be greater
* than 2, and should be at least 8 to mesh with assumptions in
* tree removal about conversion back to plain bins upon
* shrinkage.
*/
//树化阈值
static final int TREEIFY_THRESHOLD = 8;
/**
* The bin count threshold for untreeifying a (split) bin during a
* resize operation. Should be less than TREEIFY_THRESHOLD, and at
* most 6 to mesh with shrinkage detection under removal.
*/
//去树化阈值
static final int UNTREEIFY_THRESHOLD = 6;
/**
* The smallest table capacity for which bins may be treeified.
* (Otherwise the table is resized if too many nodes in a bin.)
* The value should be at least 4 * TREEIFY_THRESHOLD to avoid
* conflicts between resizing and treeification thresholds.
*/
//树化的最小容量
static final int MIN_TREEIFY_CAPACITY = 64;
/**
* Minimum number of rebinnings per transfer step. Ranges are
* subdivided to allow multiple resizer threads. This value
* serves as a lower bound to avoid resizers encountering
* excessive memory contention. The value should be at least
* DEFAULT_CAPACITY.
*/
//转移的最小值
private static final int MIN_TRANSFER_STRIDE = 16;
/**
* The number of bits used for generation stamp in sizeCtl.
* Must be at least 6 for 32bit arrays.
*/
//生成sizeCtl的最小位数
private static int RESIZE_STAMP_BITS = 16;
/**
* The maximum number of threads that can help resize.
* Must fit in 32 - RESIZE_STAMP_BITS bits.
*/
//进行扩容允许的最大线程数
private static final int MAX_RESIZERS = (1 << (32 - RESIZE_STAMP_BITS)) - 1;
/**
* The bit shift for recording size stamp in sizeCtl.
*/
//sizeCtl所需要的偏移位数
private static final int RESIZE_STAMP_SHIFT = 32 - RESIZE_STAMP_BITS;
/*
* Encodings for Node hash fields. See above for explanation.
*/
//标志值
static final int MOVED = -1; // hash for forwarding nodes
static final int TREEBIN = -2; // hash for roots of trees
static final int RESERVED = -3; // hash for transient reservations
static final int HASH_BITS = 0x7fffffff; // usable bits of normal node hash
/** Number of CPUS, to place bounds on some sizings */
//cpu数量
static final int NCPU = Runtime.getRuntime().availableProcessors();
/** For serialization compatibility. */
//序列化属性
private static final ObjectStreamField[] serialPersistentFields = {
new ObjectStreamField("segments", Segment[].class),
new ObjectStreamField("segmentMask", Integer.TYPE),
new ObjectStreamField("segmentShift", Integer.TYPE)
};
部分属性与HashMap一致,部分属性则用于实现ConcurrentHashMap的并发特性。
/* ---------------- Fields -------------- */
/**
* The array of bins. Lazily initialized upon first insertion.
* Size is always a power of two. Accessed directly by iterators.
*/
//node数组,第一次插入节点时初始化
transient volatile Node[] table;
/**
* The next table to use; non-null only while resizing.
*/
//用于扩容
private transient volatile Node[] nextTable;
/**
* Base counter value, used mainly when there is no contention,
* but also as a fallback during table initialization
* races. Updated via CAS.
*/
//计算器值,通过CAS修改值,没有竞争时使用,或者出现多线程初始化时回滚
private transient volatile long baseCount;
/**
* Table initialization and resizing control. When negative, the
* table is being initialized or resized: -1 for initialization,
* else -(1 + the number of active resizing threads). Otherwise,
* when table is null, holds the initial table size to use upon
* creation, or 0 for default. After initialization, holds the
* next element count value upon which to resize the table.
*/
//初始化和扩容的标志,concurrent包中有很多类似用法
// -1 初始化中; -N (N-1)个线程在扩容;table没有数据 初始化的大小 ; table有数据 下一次扩容的大小
//转载自[https://blog.csdn.net/u011392897/article/details/60479937](https://blog.csdn.net/u011392897/article/details/60479937)
//下文注释转载自 https://blog.csdn.net/u011392897/article/details/60479937
// 非常重要的一个属性,源码中的英文翻译,直译过来是下面的四行文字的意思
// sizeCtl = -1,表示有线程正在进行真正的初始化操作
// sizeCtl = -(1 + nThreads),表示有nThreads个线程正在进行扩容操作
// sizeCtl > 0,表示接下来的真正的初始化操作中使用的容量,或者初始化/扩容完成后的threshold
// sizeCtl = 0,默认值,此时在真正的初始化操作中使用默认容量
// 但是,通过我对源码的理解,这段注释实际上是有问题的,
// 有问题的是第二句,sizeCtl = -(1 + nThreads)这个,网上好多都是用第二句的直接翻译去解释代码,这样理解是错误的
// 默认构造的16个大小的ConcurrentHashMap,只有一个线程执行扩容时,sizeCtl = -2145714174,
// 但是照这段英文注释的意思,sizeCtl的值应该是 -(1 + 1) = -2
// sizeCtl在小于0时的确有记录有多少个线程正在执行扩容任务的功能,但是不是这段英文注释说的那样直接用 -(1 + nThreads)
// 实际中使用了一种生成戳,根据生成戳算出一个基数,不同轮次的扩容操作的生成戳都是唯一的,来保证多次扩容之间不会交叉重 叠,
// 当有n个线程正在执行扩容时,sizeCtl在值变为 (基数 + n)
// 1.8.0_111的源码的383-384行写了个说明:A generation stamp in field sizeCtl ensures that resizings do not overlap.
private transient volatile int sizeCtl;
/**
* The next table index (plus one) to split while resizing.
*/
//transfer的table索引
private transient volatile int transferIndex;
/**
* Spinlock (locked via CAS) used when resizing and/or creating CounterCells.
*/
//扩容或创建counterCells的自旋锁
private transient volatile int cellsBusy;
/**
* Table of counter cells. When non-null, size is a power of 2.
*/
// CounterCell数组
private transient volatile CounterCell[] counterCells;
// views
private transient KeySetView keySet;
private transient ValuesView values;
private transient EntrySetView entrySet;
创建一个新的空Map默认初始表大小(16)。
创建一个新的空Map初始表大小的指定数量的元素,而不需要动态地调整。
创建一个新的空Map基于给定的初始表的大小( initialCapacity)和初始表的元素数量密度( loadFactor)。
创建一个新的空Map初始表大小根据给定的初始容量( initialCapacity),负载因子( loadFactor)和并发级别( concurrencyLevel)。
public ConcurrentHashMap(int initialCapacity,
float loadFactor, int concurrencyLevel) {
if (!(loadFactor > 0.0f) || initialCapacity < 0 || concurrencyLevel <= 0)
throw new IllegalArgumentException();
if (initialCapacity < concurrencyLevel) // Use at least as many bins
initialCapacity = concurrencyLevel; // as estimated threads
long size = (long)(1.0 + (long)initialCapacity / loadFactor);
int cap = (size >= (long)MAXIMUM_CAPACITY) ?
MAXIMUM_CAPACITY : tableSizeFor((int)size);
this.sizeCtl = cap;
}
此时ConcurrentHashMap的构造方法逻辑和HashMap基本一致,只是多了concurrencyLevel和SizeCtl。
创建一个新的映射相同的映射作为给定的Map。
public ConcurrentHashMap(Map extends K, ? extends V> m) {
this.sizeCtl = DEFAULT_CAPACITY;
putAll(m);
}
该方法的作用是初始化table。
初始化表通过SizeCtl实现同步效果,如果sizeCtl<0,表示已经有其他线程在操作表,此时通过线程让步实现自旋效果,否则根据SizeCtl值创建一个空的table数组,更新sizeCtl为数组长度的0.75。最后,如果table数组已经创建成果,返回。
private final Node[] initTable() {
Node[] tab; int sc;
while ((tab = table) == null || tab.length == 0) {
if ((sc = sizeCtl) < 0)
Thread.yield(); // lost initialization race; just spin
else if (U.compareAndSwapInt(this, SIZECTL, sc, -1)) {
try {
if ((tab = table) == null || tab.length == 0) {
int n = (sc > 0) ? sc : DEFAULT_CAPACITY;
@SuppressWarnings("unchecked")
Node[] nt = (Node[])new Node,?>[n];
table = tab = nt;
sc = n - (n >>> 2);
}
} finally {
sizeCtl = sc;
}
break;
}
}
return tab;
}
该方法的目的是实现数组的转移,即ConcurrentHashMap的扩容逻辑。
在ConcurrentHashMap中,扩容虽然和HashMap一样,将Node数组的长度变为原来的两倍,但是为了保证多线程的同步性,ConcurrentHashMap引入了nextTable属性。在扩容过程中,大致可以分为三步:
private final void transfer(Node[] tab, Node[] nextTab) {
int n = tab.length, stride;
//确定stride
if ((stride = (NCPU > 1) ? (n >>> 3) / NCPU : n) < MIN_TRANSFER_STRIDE)
stride = MIN_TRANSFER_STRIDE; // subdivide range
if (nextTab == null) { // initiating
//初始化,即使多个线程同时进入,也只不过是创建了多个Node[]数组nt,在赋值给nextTab时后者覆盖前者,线程必然安全
try {
@SuppressWarnings("unchecked")
Node[] nt = (Node[])new Node,?>[n << 1];
nextTab = nt;
} catch (Throwable ex) { // try to cope with OOME
sizeCtl = Integer.MAX_VALUE;
return;
}
nextTable = nextTab;
transferIndex = n;
}
int nextn = nextTab.length;
//数组元素复制结束的标志位
ForwardingNode fwd = new ForwardingNode(nextTab);
//advance表示该节点是否处理成功,处理成功后继续遍历,否则该节点再次处理(CAS)
boolean advance = true;
//循环是否接受的标志
boolean finishing = false; // to ensure sweep before committing nextTab
for (int i = 0, bound = 0;;) {
Node f; int fh;
while (advance) {
int nextIndex, nextBound;
if (--i >= bound || finishing)
advance = false;
else if ((nextIndex = transferIndex) <= 0) {
i = -1;
advance = false;
}
else if (U.compareAndSwapInt
(this, TRANSFERINDEX, nextIndex,
nextBound = (nextIndex > stride ?
nextIndex - stride : 0))) {
bound = nextBound;
i = nextIndex - 1;
advance = false;
}
}
if (i < 0 || i >= n || i + n >= nextn) {
int sc;
if (finishing) {
//数组复制结束后的操作
nextTable = null;
table = nextTab;
sizeCtl = (n << 1) - (n >>> 1); 原数组长度的1.75倍,即扩容后的0.75倍
return;
}
//利用CAS方法更新sizeCtl,在这里面sizectl值减一,说明新加入一个线程参与到扩容操作
if (U.compareAndSwapInt(this, SIZECTL, sc = sizeCtl, sc - 1)) {
if ((sc - 2) != resizeStamp(n) << RESIZE_STAMP_SHIFT)
return;
finishing = advance = true;
i = n; // recheck before commit
}
}
//如果遍历到的节点为空 则放入ForwardingNode指针
else if ((f = tabAt(tab, i)) == null)
advance = casTabAt(tab, i, null, fwd);
//如果遍历到ForwardingNode节点 说明这个点已经被处理过了 直接跳过
else if ((fh = f.hash) == MOVED)
advance = true; // already processed
else {
//synchronized锁保证节点复制的线程安全
synchronized (f) {
if (tabAt(tab, i) == f) {
Node ln, hn;
//链表节点,头插法得到ln和hn两条链表,分别对应nextTable中下标i和n+i的元素
if (fh >= 0) {
int runBit = fh & n;
Node lastRun = f;
for (Node p = f.next; p != null; p = p.next) {
int b = p.hash & n;
if (b != runBit) {
runBit = b;
lastRun = p;
}
}
if (runBit == 0) {
ln = lastRun;
hn = null;
}
else {
hn = lastRun;
ln = null;
}
for (Node p = f; p != lastRun; p = p.next) {
int ph = p.hash; K pk = p.key; V pv = p.val;
if ((ph & n) == 0)
ln = new Node(ph, pk, pv, ln);
else
hn = new Node(ph, pk, pv, hn);
}
setTabAt(nextTab, i, ln);
setTabAt(nextTab, i + n, hn);
setTabAt(tab, i, fwd);
advance = true;
}
//红黑树节点,先尾插法得到由TreeNode组成的ln和hn两条链表,分别对应nextTable中下标i和n+i的元素,然后作为参数传入TreeBin的构造方法
else if (f instanceof TreeBin) {
TreeBin t = (TreeBin)f;
TreeNode lo = null, loTail = null;
TreeNode hi = null, hiTail = null;
int lc = 0, hc = 0;
for (Node e = t.first; e != null; e = e.next) {
int h = e.hash;
TreeNode p = new TreeNode
(h, e.key, e.val, null, null);
if ((h & n) == 0) {
if ((p.prev = loTail) == null)
lo = p;
else
loTail.next = p;
loTail = p;
++lc;
}
else {
if ((p.prev = hiTail) == null)
hi = p;
else
hiTail.next = p;
hiTail = p;
++hc;
}
}
ln = (lc <= UNTREEIFY_THRESHOLD) ? untreeify(lo) :
(hc != 0) ? new TreeBin(lo) : t;
hn = (hc <= UNTREEIFY_THRESHOLD) ? untreeify(hi) :
(lc != 0) ? new TreeBin(hi) : t;
setTabAt(nextTab, i, ln);
setTabAt(nextTab, i + n, hn);
setTabAt(tab, i, fwd);
advance = true;
}
}
}
}
}
}
if ((sc - 2) != resizeStamp(n) << RESIZE_STAMP_SHIFT)
是用来保证只能有一个线程进入后续逻辑,是特别精髓的一个部分,放到最后再专门解析。
该方法用于根据key值查找对应的value值,逻辑并不复杂。
public V get(Object key) {
Node[] tab; Node e, p; int n, eh; K ek;
int h = spread(key.hashCode());
if ((tab = table) != null && (n = tab.length) > 0 &&
(e = tabAt(tab, (n - 1) & h)) != null) {
//头结点
if ((eh = e.hash) == h) {
if ((ek = e.key) == key || (ek != null && key.equals(ek)))
return e.val;
}
else if (eh < 0)
//头结点为特殊Node时的查找
return (p = e.find(h, key)) != null ? p.val : null;
//遍历链表
while ((e = e.next) != null) {
if (e.hash == h &&
((ek = e.key) == key || (ek != null && key.equals(ek))))
return e.val;
}
}
return null;
}
当桶元素为特殊的Node(ForwardingNode, ReservationNode, TreeBin)时,分别进入对应的find方法进行查找。
由于ForwardingNode存在一个指向nextTable的指针,引入重新在数组中定位查找,其相当于一个递归的循环。
Node find(int h, Object k) {
// loop to avoid arbitrarily deep recursion on forwarding nodes
outer: for (Node[] tab = nextTable;;) {
Node e; int n;
if (k == null || tab == null || (n = tab.length) == 0 ||
(e = tabAt(tab, (n - 1) & h)) == null)
return null;
for (;;) {
int eh; K ek;
if ((eh = e.hash) == h &&
((ek = e.key) == k || (ek != null && k.equals(ek))))
return e;
if (eh < 0) {
if (e instanceof ForwardingNode) {
tab = ((ForwardingNode)e).nextTable;
continue outer;
}
else
return e.find(h, k);
}
if ((e = e.next) == null)
return null;
}
}
}
此时不可能存在,固定返回null。
Node find(int h, Object k) {
return null;
}
TreeBin的相关逻辑后续展开解析
该方法用于加入新节点。
ConcurrentHashMap的节点的key和value值都不能为null。
ConcurrentHashMap通过如下方式保证put操作的线程安全;
(1)如果table数组为空,初始化table数组
(2)根据hash值确定节点再table数组的位置,如果这个位置为null,直接通过CAS放入
(3)如果该位置的节点为Forwarding,说明有其他线程在对ConcurrentHashMap进行扩容操作,此时该线程也需要参数加入扩容操作。(该线程并不一定会参与扩容,在helpTransfer中会再次进行相关校验)
(4)如果节点是链表或红黑树,通过synchronized锁加入节点。
final V putVal(K key, V value, boolean onlyIfAbsent) {
if (key == null || value == null) throw new NullPointerException();
int hash = spread(key.hashCode());
int binCount = 0;
for (Node[] tab = table;;) {
Node f; int n, i, fh;
//table为空,初始化
if (tab == null || (n = tab.length) == 0)
tab = initTable();
//桶内没有节点,新建节点加入,CAS,不需要加锁
else if ((f = tabAt(tab, i = (n - 1) & hash)) == null) {
if (casTabAt(tab, i, null,
new Node(hash, key, value, null)))
break; // no lock when adding to empty bin
}
//ForwardingNode,扩容过程中,本线程尝试加入扩容
else if ((fh = f.hash) == MOVED)
tab = helpTransfer(tab, f);
else {
V oldVal = null;
//链表或者红黑树加入节点,需要加锁
synchronized (f) {
if (tabAt(tab, i) == f) {
//链表中加入节点
if (fh >= 0) {
binCount = 1;
for (Node e = f;; ++binCount) {
K ek;
if (e.hash == hash &&
((ek = e.key) == key ||
(ek != null && key.equals(ek)))) {
oldVal = e.val;
if (!onlyIfAbsent)
e.val = value;
break;
}
Node pred = e;
if ((e = e.next) == null) {
pred.next = new Node(hash, key,
value, null);
break;
}
}
}
//红黑树加入节点
else if (f instanceof TreeBin) {
Node p;
binCount = 2;
if ((p = ((TreeBin)f).putTreeVal(hash, key,
value)) != null) {
oldVal = p.val;
if (!onlyIfAbsent)
p.val = value;
}
}
}
}
if (binCount != 0) {
//检查是否需要树化
if (binCount >= TREEIFY_THRESHOLD)
treeifyBin(tab, i);
if (oldVal != null)
return oldVal;
break;
}
}
}
//检查是否需要扩容
addCount(1L, binCount);
return null;
}
ConcurrentHashMap通过该方法实现节点的删除或替换。
逻辑和代码与ConcurrentHashMap#putVal(K, V, Boolean)很类似,通过以下方式保证线程安全
(1)如果table数组为空,结束
(2)根据hash值确定节点再table数组的位置,如果这个位置为null,结束
(3)如果该位置的节点为Forwarding,说明有其他线程在对ConcurrentHashMap进行扩容操作,此时该线程也需要参数加入扩容操作。(该线程并不一定会参与扩容,在helpTransfer中会再次进行相关校验)
(4)如果节点是链表或红黑树,通过synchronized锁替换(或删除)节点。。
final V replaceNode(Object key, V value, Object cv) {
int hash = spread(key.hashCode());
for (Node[] tab = table;;) {
Node f; int n, i, fh;
if (tab == null || (n = tab.length) == 0 ||
(f = tabAt(tab, i = (n - 1) & hash)) == null)
break;
else if ((fh = f.hash) == MOVED)
tab = helpTransfer(tab, f);
else {
V oldVal = null;
//是否可以跳出循环的标志
boolean validated = false;
synchronized (f) {
if (tabAt(tab, i) == f) {
if (fh >= 0) {
validated = true;
for (Node e = f, pred = null;;) {
K ek;
if (e.hash == hash &&
((ek = e.key) == key ||
(ek != null && key.equals(ek)))) {
V ev = e.val;
if (cv == null || cv == ev ||
(ev != null && cv.equals(ev))) {
oldVal = ev;
if (value != null)
e.val = value;
else if (pred != null)
pred.next = e.next;
else
setTabAt(tab, i, e.next);
}
break;
}
pred = e;
if ((e = e.next) == null)
break;
}
}
else if (f instanceof TreeBin) {
validated = true;
TreeBin t = (TreeBin)f;
TreeNode r, p;
if ((r = t.root) != null &&
(p = r.findTreeNode(hash, key, null)) != null) {
V pv = p.val;
if (cv == null || cv == pv ||
(pv != null && cv.equals(pv))) {
oldVal = pv;
if (value != null)
p.val = value;
else if (t.removeTreeNode(p))
setTabAt(tab, i, untreeify(t.first));
}
}
}
}
}
if (validated) {
if (oldVal != null) {
if (value == null)
addCount(-1L, -1);
return oldVal;
}
break;
}
}
}
return null;
}
该方法根据key值和BiFuncation的计算结果修改或者插入或者删除对应的节点
逻辑和代码与ConcurrentHashMap#putVal(K, V, Boolean)和ConcurrentHashMap#replaceNode(K, V, Object)很类似,通过以下方式保证线程安全
(1)如果table数组为空,结束
(2)根据hash值确定节点再table数组的位置,如果这个位置为null,新建ReservationNode作为synchronized锁的对象。
(3)如果该位置的节点为Forwarding,说明有其他线程在对ConcurrentHashMap进行扩容操作,此时该线程也需要参数加入扩容操作。(该线程并不一定会参与扩容,在helpTransfer中会再次进行相关校验)
(4)如果节点是链表或红黑树,通过synchronized锁替换(或删除或插入)节点。。
public V compute(K key,
BiFunction super K, ? super V, ? extends V> remappingFunction) {
if (key == null || remappingFunction == null)
throw new NullPointerException();
int h = spread(key.hashCode());
V val = null;
int delta = 0;
int binCount = 0;
for (Node[] tab = table;;) {
Node f; int n, i, fh;
if (tab == null || (n = tab.length) == 0)
tab = initTable();
else if ((f = tabAt(tab, i = (n - 1) & h)) == null) {
//ReservationNode本身只是为了synchronized有加锁对象而创建的空的占位节点,本身没有任何意义,仅仅在compute和computeIfAbsent中使用
Node r = new ReservationNode();
synchronized (r) {
if (casTabAt(tab, i, null, r)) {
binCount = 1;
Node node = null;
try {
if ((val = remappingFunction.apply(key, null)) != null) {
delta = 1;
node = new Node(h, key, val, null);
}
} finally {
setTabAt(tab, i, node);
}
}
}
if (binCount != 0)
break;
}
else if ((fh = f.hash) == MOVED)
tab = helpTransfer(tab, f);
else {
synchronized (f) {
if (tabAt(tab, i) == f) {
if (fh >= 0) {
binCount = 1;
for (Node e = f, pred = null;; ++binCount) {
K ek;
if (e.hash == h &&
((ek = e.key) == key ||
(ek != null && key.equals(ek)))) {
val = remappingFunction.apply(key, e.val);
if (val != null)
e.val = val;
else {
delta = -1;
Node en = e.next;
if (pred != null)
pred.next = en;
else
setTabAt(tab, i, en);
}
break;
}
pred = e;
if ((e = e.next) == null) {
val = remappingFunction.apply(key, null);
if (val != null) {
delta = 1;
pred.next =
new Node(h, key, val, null);
}
break;
}
}
}
else if (f instanceof TreeBin) {
binCount = 1;
TreeBin t = (TreeBin)f;
TreeNode r, p;
if ((r = t.root) != null)
p = r.findTreeNode(h, key, null);
else
p = null;
V pv = (p == null) ? null : p.val;
val = remappingFunction.apply(key, pv);
if (val != null) {
if (p != null)
p.val = val;
else {
delta = 1;
t.putTreeVal(h, key, val);
}
}
else if (p != null) {
delta = -1;
if (t.removeTreeNode(p))
setTabAt(tab, i, untreeify(t.first));
}
}
}
}
if (binCount != 0) {
if (binCount >= TREEIFY_THRESHOLD)
treeifyBin(tab, i);
break;
}
}
}
if (delta != 0)
addCount((long)delta, binCount);
return val;
}
最后,介绍两篇个人感觉ConcurrentHashMap相关的不错的博客:
深入理解ConcurrentHashMap之源码分析(JDK8版本)
Java集合类框架学习 5.3—— ConcurrentHashMap(JDK1.8)