ConcurrentHashMap is
introduced in JDK 1.5 as an alternative of Hashtable or synchronized Map to
implement the thread safe Map (HashMap is not thread-safe).
ConcurrentHashMap is better choice; because, not
only it can be safely used in concurrent multi-threaded environment but also provides
better performance over Hashtable and synchronizedMap.
ConcurrentHashMap performs better than Hashtable
because it only locks a Segment of Map.
Retrieval operations do not block
in ConcurrentHashMap i.e. allows
concurred read operations and same time, maintains integrity by synchronizing
write operations.
ConcurrentHashMap is provided all functionalities
of Hashtable and with additional feature called "concurrency level", which allows ConcurrentHashMap to partition Map.
Default concurrency level is 16, and
accordingly Map is divided into 16 parts and each part is governed with
different lock.
This means, 16 threads can
operate on Map simultaneously, until they are operating on different part of
Map. It makes ConcurrentHashMap high performance despite keeping thread-safety
intact.
We can change the default
concurrency level is 16 using constructor while creating ConcurrentHashMap.
In ConcurrentHashMap, Multiple
readers allowed to reading concurrently without any blocking. This is achieved
by partitioning Map into different parts based on concurrency level.
Locking is used when we update
the Map.
Since update operations like put(), remove(), putAll() or clear() is not synchronized, concurrent retrieval may not reflect most recent change on Map.
Since update operations like put(), remove(), putAll() or clear() is not synchronized, concurrent retrieval may not reflect most recent change on Map.
ConcurrentHashMap also uses
ReentrantLock to internally lock its segments.
Liken Hashtable but unlike
HashMap, it also does not allow null to be used as a key or value.
public class ConcurrentHashMap<K,
V> extends AbstractMap<K, V> implements
ConcurrentMap<K, V>, Serializable
/**Default initial capacity for this table.
* Also can specify using
constructor.*/
static final int DEFAULT_INITIAL_CAPACITY =
16;
/**Default load factor for this table.
* Also can specify using
constructor.*/
static final float DEFAULT_LOAD_FACTOR =
0.75f;
/**Default concurrency level for this table.
* Also can specify using
constructor.*/
static final int DEFAULT_CONCURRENCY_LEVEL =
16;
/**Maximum capacity
* MUST be a power of
two <=
1<<30.
* Ensure that entries are
indexable using ints.
*/
static final int MAXIMUM_CAPACITY =
1 << 30;
/**Maximum number of segments to allow. */
static final int MAX_SEGMENTS = 1
<< 16; // slightly
conservative
/**The segments, each of which is a specialized hash table*/
final Segment<K,V>[] segments;
transient Set<K> keySet;
transient Set<Map.Entry<K,V>> entrySet;
transient Collection<V> values;
public V put(K key, V value) {
if (value == null) {
throw new NullPointerException();
}
int hash = hash(key.hashCode());
return segmentFor(hash).put(key, hash, value, false);
}
final Segment<K,V>
segmentFor(int hash) {
return segments[(hash >>> segmentShift) &
segmentMask];
}
/**Segments are specialized versions of hash tables.
* This subclasses from
ReentrantLock,
* just to simplify some
locking and avoid separate construction.*/
static final class Segment<K,V> extends ReentrantLock implements Serializable {
/** The number of elements in this segment's region.*/
transient volatile int count;
/** Number of updates
that alter the size of the table.
* If modCounts change
during a traversal of segments computing size or
* checking
containsValue, then we might have an inconsistent view
* of state so (usually) must retry.*/
transient int modCount;
/** The table is rehashed when its size exceeds this threshold.
* threshold =
(int)(capacity * loadFactor)*/
transient int threshold;
/** The per-segment table. */
transient volatile HashEntry<K,V>[]
table;
/** The load factor for the hash table.
* Even though this
value is same for all segments,
* it is replicated
to avoid needing links to outer object.*/
final float loadFactor;
Segment(int initialCapacity, float lf) {
loadFactor = lf;
setTable(HashEntry.<K,V>newArray(initialCapacity));
}
@SuppressWarnings("unchecked")
static final <K,V>
Segment<K,V>[] newArray(int i) {
return new Segment[i];
}
void setTable(HashEntry<K,V>[]
newTable) {
threshold = (int)(newTable.length * loadFactor);
table = newTable;
}
/** Reads value field of an entry under lock.
* Called if value
field ever appears to be null.
* This is possible
only if a compiler happens to reorder a HashEntry
* initialization
with its table assignment, which is legal under memory
* model but is not
known to ever occur.*/
V
readValueUnderLock(HashEntry<K,V> e) {
lock();
try {
return e.value;
} finally {
unlock();
}
}
/** Specialized implementations of map methods */
V get(Object key, int hash) {
if (count != 0)
{ // read-volatile
HashEntry<K,V> e = getFirst(hash);
while (e != null) {
if (e.hash == hash
&& key.equals(e.key)) {
V v = e.value;
if (v != null)
return v;
return readValueUnderLock(e); // recheck
}
e = e.next;
}
}
return null;
}
boolean containsKey(Object
key, int hash)
{
if (count != 0)
{ // read-volatile
HashEntry<K,V> e = getFirst(hash);
while (e != null) {
if (e.hash == hash
&& key.equals(e.key))
return true;
e = e.next;
}
}
return false;
}
boolean containsValue(Object
value) {
if (count != 0)
{ // read-volatile
HashEntry<K,V>[] tab = table;
int len =
tab.length;
for (int i = 0 ; i <
len; i++) {
for (HashEntry<K,V>
e=tab[i]; e != null; e = e.next){
V v = e.value;
if (v == null) // recheck
v = readValueUnderLock(e);
if (value.equals(v))
return true;
}
}
}
return false;
}
V put(K key, int hash, V
value, boolean onlyIfAbsent) {
lock();
try {
int c = count;
if (c++ > threshold) // ensure capacity
rehash();
HashEntry<K,V>[] tab = table;
int index = hash
& (tab.length - 1);
HashEntry<K,V> first = tab[index];
HashEntry<K,V> e = first;
while (e != null &&
(e.hash != hash || !key.equals(e.key)))
e = e.next;
V oldValue;
if (e != null) {
oldValue = e.value;
if (!onlyIfAbsent)
e.value = value;
}
else {
oldValue = null;
++modCount;
tab[index]=new HashEntry<K,V>(key,hash,first,value);
count = c; // write-volatile
}
return oldValue;
} finally {
unlock();
}
}
/** Remove; match on key only if value null, else match both. */
V remove(Object key, int hash, Object
value) {
lock();
try {
int c = count - 1;
HashEntry<K,V>[] tab = table;
int index = hash
& (tab.length - 1);
HashEntry<K,V> first = tab[index];
HashEntry<K,V> e = first;
while (e != null &&
(e.hash != hash || !key.equals(e.key)))
e = e.next;
V oldValue = null;
if (e != null) {
V v = e.value;
if (value == null ||
value.equals(v)) {
oldValue = v;
// All entries following removed node can stay
// in list, but all preceding ones need to be
// cloned.
++modCount;
HashEntry<K,V> newFirst = e.next;
for(HashEntry<K,V>
p=first; p != e; p = p.next)
newFirst= new HashEntry<K,V>
(p.key, p.hash, newFirst, p.value);
tab[index] = newFirst;
count = c; // write-volatile
}
}
return oldValue;
} finally {
unlock();
}
}
void clear() {
if (count != 0) {
lock();
try {
HashEntry<K,V>[] tab = table;
for (int i = 0; i <
tab.length ; i++)
tab[i] = null;
++modCount;
count = 0; // write-volatile
} finally {
unlock();
}
}
}
No comments:
Post a Comment