1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19 package org.apache.hadoop.hbase.client;
20
21 import java.io.Closeable;
22 import java.io.IOException;
23 import java.io.InterruptedIOException;
24 import java.lang.reflect.UndeclaredThrowableException;
25 import java.util.ArrayList;
26 import java.util.Date;
27 import java.util.HashSet;
28 import java.util.LinkedHashMap;
29 import java.util.List;
30 import java.util.Map;
31 import java.util.Map.Entry;
32 import java.util.NavigableMap;
33 import java.util.Set;
34 import java.util.concurrent.ConcurrentHashMap;
35 import java.util.concurrent.ConcurrentMap;
36 import java.util.concurrent.ExecutorService;
37 import java.util.concurrent.LinkedBlockingQueue;
38 import java.util.concurrent.ThreadPoolExecutor;
39 import java.util.concurrent.TimeUnit;
40 import java.util.concurrent.atomic.AtomicBoolean;
41 import java.util.concurrent.atomic.AtomicInteger;
42
43 import org.apache.commons.logging.Log;
44 import org.apache.commons.logging.LogFactory;
45 import org.apache.hadoop.hbase.classification.InterfaceAudience;
46 import org.apache.hadoop.conf.Configuration;
47 import org.apache.hadoop.hbase.Chore;
48 import org.apache.hadoop.hbase.DoNotRetryIOException;
49 import org.apache.hadoop.hbase.HBaseConfiguration;
50 import org.apache.hadoop.hbase.HConstants;
51 import org.apache.hadoop.hbase.HRegionInfo;
52 import org.apache.hadoop.hbase.HRegionLocation;
53 import org.apache.hadoop.hbase.HTableDescriptor;
54 import org.apache.hadoop.hbase.MasterNotRunningException;
55 import org.apache.hadoop.hbase.MetaTableAccessor;
56 import org.apache.hadoop.hbase.RegionLocations;
57 import org.apache.hadoop.hbase.RegionTooBusyException;
58 import org.apache.hadoop.hbase.ServerName;
59 import org.apache.hadoop.hbase.Stoppable;
60 import org.apache.hadoop.hbase.TableName;
61 import org.apache.hadoop.hbase.TableNotEnabledException;
62 import org.apache.hadoop.hbase.TableNotFoundException;
63 import org.apache.hadoop.hbase.ZooKeeperConnectionException;
64 import org.apache.hadoop.hbase.client.AsyncProcess.AsyncRequestFuture;
65 import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitor;
66 import org.apache.hadoop.hbase.client.MetaScanner.MetaScannerVisitorBase;
67 import org.apache.hadoop.hbase.client.backoff.ClientBackoffPolicy;
68 import org.apache.hadoop.hbase.client.backoff.ClientBackoffPolicyFactory;
69 import org.apache.hadoop.hbase.client.coprocessor.Batch;
70 import org.apache.hadoop.hbase.exceptions.RegionMovedException;
71 import org.apache.hadoop.hbase.exceptions.RegionOpeningException;
72 import org.apache.hadoop.hbase.ipc.RpcClient;
73 import org.apache.hadoop.hbase.ipc.RpcClientFactory;
74 import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
75 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
76 import org.apache.hadoop.hbase.protobuf.RequestConverter;
77 import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService;
78 import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ClientService;
79 import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceRequest;
80 import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.CoprocessorServiceResponse;
81 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddColumnRequest;
82 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AddColumnResponse;
83 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AssignRegionRequest;
84 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.AssignRegionResponse;
85 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceRequest;
86 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.BalanceResponse;
87 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceRequest;
88 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateNamespaceResponse;
89 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateTableRequest;
90 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.CreateTableResponse;
91 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteColumnRequest;
92 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteColumnResponse;
93 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceRequest;
94 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteNamespaceResponse;
95 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotRequest;
96 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteSnapshotResponse;
97 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteTableRequest;
98 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DeleteTableResponse;
99 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DisableTableRequest;
100 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DisableTableResponse;
101 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DispatchMergingRegionsRequest;
102 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.DispatchMergingRegionsResponse;
103 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorRequest;
104 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableCatalogJanitorResponse;
105 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableTableRequest;
106 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.EnableTableResponse;
107 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureRequest;
108 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ExecProcedureResponse;
109 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusRequest;
110 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetClusterStatusResponse;
111 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsRequest;
112 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetCompletedSnapshotsResponse;
113 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorRequest;
114 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetNamespaceDescriptorResponse;
115 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetSchemaAlterStatusRequest;
116 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetSchemaAlterStatusResponse;
117 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableDescriptorsRequest;
118 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableDescriptorsResponse;
119 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableNamesRequest;
120 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.GetTableNamesResponse;
121 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledRequest;
122 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsCatalogJanitorEnabledResponse;
123 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningRequest;
124 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsMasterRunningResponse;
125 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneRequest;
126 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsProcedureDoneResponse;
127 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsRestoreSnapshotDoneRequest;
128 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsRestoreSnapshotDoneResponse;
129 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneRequest;
130 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.IsSnapshotDoneResponse;
131 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsRequest;
132 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListNamespaceDescriptorsResponse;
133 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceRequest;
134 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableDescriptorsByNamespaceResponse;
135 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceRequest;
136 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ListTableNamesByNamespaceResponse;
137 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MasterService;
138 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyColumnRequest;
139 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyColumnResponse;
140 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceRequest;
141 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyNamespaceResponse;
142 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyTableRequest;
143 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ModifyTableResponse;
144 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveRegionRequest;
145 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.MoveRegionResponse;
146 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.OfflineRegionRequest;
147 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.OfflineRegionResponse;
148 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotRequest;
149 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RestoreSnapshotResponse;
150 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanRequest;
151 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanResponse;
152 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningRequest;
153 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SetBalancerRunningResponse;
154 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownRequest;
155 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.ShutdownResponse;
156 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotRequest;
157 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.SnapshotResponse;
158 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterRequest;
159 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.TruncateTableRequest;
160 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.StopMasterResponse;
161 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.TruncateTableResponse;
162 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UnassignRegionRequest;
163 import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.UnassignRegionResponse;
164 import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
165 import org.apache.hadoop.hbase.security.User;
166 import org.apache.hadoop.hbase.security.UserProvider;
167 import org.apache.hadoop.hbase.util.Bytes;
168 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
169 import org.apache.hadoop.hbase.util.ExceptionUtil;
170 import org.apache.hadoop.hbase.util.Threads;
171 import org.apache.hadoop.hbase.zookeeper.MasterAddressTracker;
172 import org.apache.hadoop.hbase.zookeeper.ZKUtil;
173 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
174 import org.apache.hadoop.ipc.RemoteException;
175 import org.apache.zookeeper.KeeperException;
176
177 import com.google.common.annotations.VisibleForTesting;
178 import com.google.protobuf.BlockingRpcChannel;
179 import com.google.protobuf.RpcController;
180 import com.google.protobuf.ServiceException;
181
182
183
184
185 @SuppressWarnings("serial")
186 @InterfaceAudience.Private
187
188 class ConnectionManager {
189 static final Log LOG = LogFactory.getLog(ConnectionManager.class);
190
191 public static final String RETRIES_BY_SERVER_KEY = "hbase.client.retries.by.server";
192 private static final String CLIENT_NONCES_ENABLED_KEY = "hbase.client.nonces.enabled";
193
194
195
196
197 static final Map<HConnectionKey, HConnectionImplementation> CONNECTION_INSTANCES;
198
199 public static final int MAX_CACHED_CONNECTION_INSTANCES;
200
201
202
203
204
205 private static volatile NonceGenerator nonceGenerator = null;
206
207 private static Object nonceGeneratorCreateLock = new Object();
208
209 static {
210
211
212
213
214 MAX_CACHED_CONNECTION_INSTANCES = HBaseConfiguration.create().getInt(
215 HConstants.ZOOKEEPER_MAX_CLIENT_CNXNS, HConstants.DEFAULT_ZOOKEPER_MAX_CLIENT_CNXNS) + 1;
216 CONNECTION_INSTANCES = new LinkedHashMap<HConnectionKey, HConnectionImplementation>(
217 (int) (MAX_CACHED_CONNECTION_INSTANCES / 0.75F) + 1, 0.75F, true) {
218 @Override
219 protected boolean removeEldestEntry(
220 Map.Entry<HConnectionKey, HConnectionImplementation> eldest) {
221 return size() > MAX_CACHED_CONNECTION_INSTANCES;
222 }
223 };
224 }
225
226
227 static class NoNonceGenerator implements NonceGenerator {
228 @Override
229 public long getNonceGroup() {
230 return HConstants.NO_NONCE;
231 }
232 @Override
233 public long newNonce() {
234 return HConstants.NO_NONCE;
235 }
236 }
237
238
239
240
241 private ConnectionManager() {
242 super();
243 }
244
245
246
247
248
249
250 @VisibleForTesting
251 static NonceGenerator injectNonceGeneratorForTesting(
252 ClusterConnection conn, NonceGenerator cnm) {
253 HConnectionImplementation connImpl = (HConnectionImplementation)conn;
254 NonceGenerator ng = connImpl.getNonceGenerator();
255 LOG.warn("Nonce generator is being replaced by test code for " + cnm.getClass().getName());
256 connImpl.nonceGenerator = cnm;
257 return ng;
258 }
259
260
261
262
263
264
265
266
267
268
269 @Deprecated
270 public static HConnection getConnection(final Configuration conf) throws IOException {
271 return getConnectionInternal(conf);
272 }
273
274
275 static ClusterConnection getConnectionInternal(final Configuration conf)
276 throws IOException {
277 HConnectionKey connectionKey = new HConnectionKey(conf);
278 synchronized (CONNECTION_INSTANCES) {
279 HConnectionImplementation connection = CONNECTION_INSTANCES.get(connectionKey);
280 if (connection == null) {
281 connection = (HConnectionImplementation)createConnection(conf, true);
282 CONNECTION_INSTANCES.put(connectionKey, connection);
283 } else if (connection.isClosed()) {
284 ConnectionManager.deleteConnection(connectionKey, true);
285 connection = (HConnectionImplementation)createConnection(conf, true);
286 CONNECTION_INSTANCES.put(connectionKey, connection);
287 }
288 connection.incCount();
289 return connection;
290 }
291 }
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313 public static HConnection createConnection(Configuration conf) throws IOException {
314 return createConnectionInternal(conf);
315 }
316
317 static ClusterConnection createConnectionInternal(Configuration conf) throws IOException {
318 UserProvider provider = UserProvider.instantiate(conf);
319 return createConnection(conf, false, null, provider.getCurrent());
320 }
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342 public static HConnection createConnection(Configuration conf, ExecutorService pool)
343 throws IOException {
344 UserProvider provider = UserProvider.instantiate(conf);
345 return createConnection(conf, false, pool, provider.getCurrent());
346 }
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368 public static HConnection createConnection(Configuration conf, User user)
369 throws IOException {
370 return createConnection(conf, false, null, user);
371 }
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394 public static HConnection createConnection(Configuration conf, ExecutorService pool, User user)
395 throws IOException {
396 return createConnection(conf, false, pool, user);
397 }
398
399 @Deprecated
400 static HConnection createConnection(final Configuration conf, final boolean managed)
401 throws IOException {
402 UserProvider provider = UserProvider.instantiate(conf);
403 return createConnection(conf, managed, null, provider.getCurrent());
404 }
405
406 @Deprecated
407 static ClusterConnection createConnection(final Configuration conf, final boolean managed,
408 final ExecutorService pool, final User user)
409 throws IOException {
410 return (ClusterConnection) ConnectionFactory.createConnection(conf, managed, pool, user);
411 }
412
413
414
415
416
417
418
419
420
421 @Deprecated
422 public static void deleteConnection(Configuration conf) {
423 deleteConnection(new HConnectionKey(conf), false);
424 }
425
426
427
428
429
430
431
432
433 @Deprecated
434 public static void deleteStaleConnection(HConnection connection) {
435 deleteConnection(connection, true);
436 }
437
438
439
440
441
442
443
444 @Deprecated
445 public static void deleteAllConnections(boolean staleConnection) {
446 synchronized (CONNECTION_INSTANCES) {
447 Set<HConnectionKey> connectionKeys = new HashSet<HConnectionKey>();
448 connectionKeys.addAll(CONNECTION_INSTANCES.keySet());
449 for (HConnectionKey connectionKey : connectionKeys) {
450 deleteConnection(connectionKey, staleConnection);
451 }
452 CONNECTION_INSTANCES.clear();
453 }
454 }
455
456
457
458
459
460 @Deprecated
461 public static void deleteAllConnections() {
462 deleteAllConnections(false);
463 }
464
465
466 @Deprecated
467 private static void deleteConnection(HConnection connection, boolean staleConnection) {
468 synchronized (CONNECTION_INSTANCES) {
469 for (Entry<HConnectionKey, HConnectionImplementation> e: CONNECTION_INSTANCES.entrySet()) {
470 if (e.getValue() == connection) {
471 deleteConnection(e.getKey(), staleConnection);
472 break;
473 }
474 }
475 }
476 }
477
478 @Deprecated
479 private static void deleteConnection(HConnectionKey connectionKey, boolean staleConnection) {
480 synchronized (CONNECTION_INSTANCES) {
481 HConnectionImplementation connection = CONNECTION_INSTANCES.get(connectionKey);
482 if (connection != null) {
483 connection.decCount();
484 if (connection.isZeroReference() || staleConnection) {
485 CONNECTION_INSTANCES.remove(connectionKey);
486 connection.internalClose();
487 }
488 } else {
489 LOG.error("Connection not found in the list, can't delete it "+
490 "(connection key=" + connectionKey + "). May be the key was modified?", new Exception());
491 }
492 }
493 }
494
495
496
497
498
499
500
501
502
503
504
505
506 @InterfaceAudience.Private
507 public static <T> T execute(HConnectable<T> connectable) throws IOException {
508 if (connectable == null || connectable.conf == null) {
509 return null;
510 }
511 Configuration conf = connectable.conf;
512 HConnection connection = getConnection(conf);
513 boolean connectSucceeded = false;
514 try {
515 T returnValue = connectable.connect(connection);
516 connectSucceeded = true;
517 return returnValue;
518 } finally {
519 try {
520 connection.close();
521 } catch (Exception e) {
522 ExceptionUtil.rethrowIfInterrupt(e);
523 if (connectSucceeded) {
524 throw new IOException("The connection to " + connection
525 + " could not be deleted.", e);
526 }
527 }
528 }
529 }
530
531
532 @edu.umd.cs.findbugs.annotations.SuppressWarnings(
533 value="AT_OPERATION_SEQUENCE_ON_CONCURRENT_ABSTRACTION",
534 justification="Access to the conncurrent hash map is under a lock so should be fine.")
535 static class HConnectionImplementation implements ClusterConnection, Closeable {
536 static final Log LOG = LogFactory.getLog(HConnectionImplementation.class);
537 private final long pause;
538 private final int numTries;
539 final int rpcTimeout;
540 private NonceGenerator nonceGenerator = null;
541 private final AsyncProcess asyncProcess;
542
543 private final ServerStatisticTracker stats;
544
545 private volatile boolean closed;
546 private volatile boolean aborted;
547
548
549 ClusterStatusListener clusterStatusListener;
550
551
552 private final Object metaRegionLock = new Object();
553
554
555
556
557
558
559 private final Object masterAndZKLock = new Object();
560
561 private long keepZooKeeperWatcherAliveUntil = Long.MAX_VALUE;
562 private final DelayedClosing delayedClosing =
563 DelayedClosing.createAndStart(this);
564
565
566
567 private volatile ExecutorService batchPool = null;
568 private volatile boolean cleanupPool = false;
569
570 private final Configuration conf;
571
572
573
574 private final TableConfiguration tableConfig;
575
576
577 private RpcClient rpcClient;
578
579 private MetaCache metaCache = new MetaCache();
580
581 private int refCount;
582
583
584 private boolean managed;
585
586 private User user;
587
588 private RpcRetryingCallerFactory rpcCallerFactory;
589
590 private RpcControllerFactory rpcControllerFactory;
591
592 private final RetryingCallerInterceptor interceptor;
593
594
595
596
597 Registry registry;
598
599 private final ClientBackoffPolicy backoffPolicy;
600
601 HConnectionImplementation(Configuration conf, boolean managed) throws IOException {
602 this(conf, managed, null, null);
603 }
604
605
606
607
608
609
610
611
612
613
614
615
616 HConnectionImplementation(Configuration conf, boolean managed,
617 ExecutorService pool, User user) throws IOException {
618 this(conf);
619 this.user = user;
620 this.batchPool = pool;
621 this.managed = managed;
622 this.registry = setupRegistry();
623 retrieveClusterId();
624
625 this.rpcClient = RpcClientFactory.createClient(this.conf, this.clusterId);
626 this.rpcControllerFactory = RpcControllerFactory.instantiate(conf);
627
628
629 boolean shouldListen = conf.getBoolean(HConstants.STATUS_PUBLISHED,
630 HConstants.STATUS_PUBLISHED_DEFAULT);
631 Class<? extends ClusterStatusListener.Listener> listenerClass =
632 conf.getClass(ClusterStatusListener.STATUS_LISTENER_CLASS,
633 ClusterStatusListener.DEFAULT_STATUS_LISTENER_CLASS,
634 ClusterStatusListener.Listener.class);
635 if (shouldListen) {
636 if (listenerClass == null) {
637 LOG.warn(HConstants.STATUS_PUBLISHED + " is true, but " +
638 ClusterStatusListener.STATUS_LISTENER_CLASS + " is not set - not listening status");
639 } else {
640 clusterStatusListener = new ClusterStatusListener(
641 new ClusterStatusListener.DeadServerHandler() {
642 @Override
643 public void newDead(ServerName sn) {
644 clearCaches(sn);
645 rpcClient.cancelConnections(sn);
646 }
647 }, conf, listenerClass);
648 }
649 }
650 }
651
652
653
654
655 protected HConnectionImplementation(Configuration conf) {
656 this.conf = conf;
657 this.tableConfig = new TableConfiguration(conf);
658 this.closed = false;
659 this.pause = conf.getLong(HConstants.HBASE_CLIENT_PAUSE,
660 HConstants.DEFAULT_HBASE_CLIENT_PAUSE);
661 this.numTries = tableConfig.getRetriesNumber();
662 this.rpcTimeout = conf.getInt(
663 HConstants.HBASE_RPC_TIMEOUT_KEY,
664 HConstants.DEFAULT_HBASE_RPC_TIMEOUT);
665 if (conf.getBoolean(CLIENT_NONCES_ENABLED_KEY, true)) {
666 synchronized (nonceGeneratorCreateLock) {
667 if (ConnectionManager.nonceGenerator == null) {
668 ConnectionManager.nonceGenerator = new PerClientRandomNonceGenerator();
669 }
670 this.nonceGenerator = ConnectionManager.nonceGenerator;
671 }
672 } else {
673 this.nonceGenerator = new NoNonceGenerator();
674 }
675 stats = ServerStatisticTracker.create(conf);
676 this.asyncProcess = createAsyncProcess(this.conf);
677 this.interceptor = (new RetryingCallerInterceptorFactory(conf)).build();
678 this.rpcCallerFactory = RpcRetryingCallerFactory.instantiate(conf, interceptor, this.stats);
679 this.backoffPolicy = ClientBackoffPolicyFactory.create(conf);
680 }
681
682 @Override
683 public HTableInterface getTable(String tableName) throws IOException {
684 return getTable(TableName.valueOf(tableName));
685 }
686
687 @Override
688 public HTableInterface getTable(byte[] tableName) throws IOException {
689 return getTable(TableName.valueOf(tableName));
690 }
691
692 @Override
693 public HTableInterface getTable(TableName tableName) throws IOException {
694 return getTable(tableName, getBatchPool());
695 }
696
697 @Override
698 public HTableInterface getTable(String tableName, ExecutorService pool) throws IOException {
699 return getTable(TableName.valueOf(tableName), pool);
700 }
701
702 @Override
703 public HTableInterface getTable(byte[] tableName, ExecutorService pool) throws IOException {
704 return getTable(TableName.valueOf(tableName), pool);
705 }
706
707 @Override
708 public HTableInterface getTable(TableName tableName, ExecutorService pool) throws IOException {
709 if (managed) {
710 throw new NeedUnmanagedConnectionException();
711 }
712 return new HTable(tableName, this, tableConfig, rpcCallerFactory, rpcControllerFactory, pool);
713 }
714
715 @Override
716 public BufferedMutator getBufferedMutator(BufferedMutatorParams params) {
717 if (params.getTableName() == null) {
718 throw new IllegalArgumentException("TableName cannot be null.");
719 }
720 if (params.getPool() == null) {
721 params.pool(HTable.getDefaultExecutor(getConfiguration()));
722 }
723 if (params.getWriteBufferSize() == BufferedMutatorParams.UNSET) {
724 params.writeBufferSize(tableConfig.getWriteBufferSize());
725 }
726 if (params.getMaxKeyValueSize() == BufferedMutatorParams.UNSET) {
727 params.maxKeyValueSize(tableConfig.getMaxKeyValueSize());
728 }
729 return new BufferedMutatorImpl(this, rpcCallerFactory, rpcControllerFactory, params);
730 }
731
732 @Override
733 public BufferedMutator getBufferedMutator(TableName tableName) {
734 return getBufferedMutator(new BufferedMutatorParams(tableName));
735 }
736
737 @Override
738 public RegionLocator getRegionLocator(TableName tableName) throws IOException {
739 if (managed) {
740 throw new IOException("The connection has to be unmanaged.");
741 }
742 return new HTable(
743 tableName, this, tableConfig, rpcCallerFactory, rpcControllerFactory, getBatchPool());
744 }
745
746 @Override
747 public Admin getAdmin() throws IOException {
748 if (managed) {
749 throw new NeedUnmanagedConnectionException();
750 }
751 return new HBaseAdmin(this);
752 }
753
754 private ExecutorService getBatchPool() {
755 if (batchPool == null) {
756
757 synchronized (this) {
758 if (batchPool == null) {
759 int maxThreads = conf.getInt("hbase.hconnection.threads.max", 256);
760 int coreThreads = conf.getInt("hbase.hconnection.threads.core", 256);
761 if (maxThreads == 0) {
762 maxThreads = Runtime.getRuntime().availableProcessors() * 8;
763 }
764 if (coreThreads == 0) {
765 coreThreads = Runtime.getRuntime().availableProcessors() * 8;
766 }
767 long keepAliveTime = conf.getLong("hbase.hconnection.threads.keepalivetime", 60);
768 LinkedBlockingQueue<Runnable> workQueue =
769 new LinkedBlockingQueue<Runnable>(maxThreads *
770 conf.getInt(HConstants.HBASE_CLIENT_MAX_TOTAL_TASKS,
771 HConstants.DEFAULT_HBASE_CLIENT_MAX_TOTAL_TASKS));
772 ThreadPoolExecutor tpe = new ThreadPoolExecutor(
773 coreThreads,
774 maxThreads,
775 keepAliveTime,
776 TimeUnit.SECONDS,
777 workQueue,
778 Threads.newDaemonThreadFactory(toString() + "-shared-"));
779 tpe.allowCoreThreadTimeOut(true);
780 this.batchPool = tpe;
781 }
782 this.cleanupPool = true;
783 }
784 }
785 return this.batchPool;
786 }
787
788 protected ExecutorService getCurrentBatchPool() {
789 return batchPool;
790 }
791
792 private void shutdownBatchPool() {
793 if (this.cleanupPool && this.batchPool != null && !this.batchPool.isShutdown()) {
794 this.batchPool.shutdown();
795 try {
796 if (!this.batchPool.awaitTermination(10, TimeUnit.SECONDS)) {
797 this.batchPool.shutdownNow();
798 }
799 } catch (InterruptedException e) {
800 this.batchPool.shutdownNow();
801 }
802 }
803 }
804
805
806
807
808
809 private Registry setupRegistry() throws IOException {
810 return RegistryFactory.getRegistry(this);
811 }
812
813
814
815
816 @VisibleForTesting
817 RpcClient getRpcClient() {
818 return rpcClient;
819 }
820
821
822
823
824 @Override
825 public String toString(){
826 return "hconnection-0x" + Integer.toHexString(hashCode());
827 }
828
829 protected String clusterId = null;
830
831 void retrieveClusterId() {
832 if (clusterId != null) return;
833 this.clusterId = this.registry.getClusterId();
834 if (clusterId == null) {
835 clusterId = HConstants.CLUSTER_ID_DEFAULT;
836 LOG.debug("clusterid came back null, using default " + clusterId);
837 }
838 }
839
840 @Override
841 public Configuration getConfiguration() {
842 return this.conf;
843 }
844
845 private void checkIfBaseNodeAvailable(ZooKeeperWatcher zkw)
846 throws MasterNotRunningException {
847 String errorMsg;
848 try {
849 if (ZKUtil.checkExists(zkw, zkw.baseZNode) == -1) {
850 errorMsg = "The node " + zkw.baseZNode+" is not in ZooKeeper. "
851 + "It should have been written by the master. "
852 + "Check the value configured in 'zookeeper.znode.parent'. "
853 + "There could be a mismatch with the one configured in the master.";
854 LOG.error(errorMsg);
855 throw new MasterNotRunningException(errorMsg);
856 }
857 } catch (KeeperException e) {
858 errorMsg = "Can't get connection to ZooKeeper: " + e.getMessage();
859 LOG.error(errorMsg);
860 throw new MasterNotRunningException(errorMsg, e);
861 }
862 }
863
864
865
866
867
868
869 @Deprecated
870 @Override
871 public boolean isMasterRunning()
872 throws MasterNotRunningException, ZooKeeperConnectionException {
873
874
875
876 MasterKeepAliveConnection m = getKeepAliveMasterService();
877 m.close();
878 return true;
879 }
880
881 @Override
882 public HRegionLocation getRegionLocation(final TableName tableName,
883 final byte [] row, boolean reload)
884 throws IOException {
885 return reload? relocateRegion(tableName, row): locateRegion(tableName, row);
886 }
887
888 @Override
889 public HRegionLocation getRegionLocation(final byte[] tableName,
890 final byte [] row, boolean reload)
891 throws IOException {
892 return getRegionLocation(TableName.valueOf(tableName), row, reload);
893 }
894
895 @Override
896 public boolean isTableEnabled(TableName tableName) throws IOException {
897 return this.registry.isTableOnlineState(tableName, true);
898 }
899
900 @Override
901 public boolean isTableEnabled(byte[] tableName) throws IOException {
902 return isTableEnabled(TableName.valueOf(tableName));
903 }
904
905 @Override
906 public boolean isTableDisabled(TableName tableName) throws IOException {
907 return this.registry.isTableOnlineState(tableName, false);
908 }
909
910 @Override
911 public boolean isTableDisabled(byte[] tableName) throws IOException {
912 return isTableDisabled(TableName.valueOf(tableName));
913 }
914
915 @Override
916 public boolean isTableAvailable(final TableName tableName) throws IOException {
917 final AtomicBoolean available = new AtomicBoolean(true);
918 final AtomicInteger regionCount = new AtomicInteger(0);
919 MetaScannerVisitor visitor = new MetaScannerVisitorBase() {
920 @Override
921 public boolean processRow(Result row) throws IOException {
922 HRegionInfo info = MetaScanner.getHRegionInfo(row);
923 if (info != null && !info.isSplitParent()) {
924 if (tableName.equals(info.getTable())) {
925 ServerName server = HRegionInfo.getServerName(row);
926 if (server == null) {
927 available.set(false);
928 return false;
929 }
930 regionCount.incrementAndGet();
931 } else if (tableName.compareTo(info.getTable()) < 0) {
932
933 return false;
934 }
935 }
936 return true;
937 }
938 };
939 MetaScanner.metaScan(this, visitor, tableName);
940 return available.get() && (regionCount.get() > 0);
941 }
942
943 @Override
944 public boolean isTableAvailable(final byte[] tableName) throws IOException {
945 return isTableAvailable(TableName.valueOf(tableName));
946 }
947
948 @Override
949 public boolean isTableAvailable(final TableName tableName, final byte[][] splitKeys)
950 throws IOException {
951 final AtomicBoolean available = new AtomicBoolean(true);
952 final AtomicInteger regionCount = new AtomicInteger(0);
953 MetaScannerVisitor visitor = new MetaScannerVisitorBase() {
954 @Override
955 public boolean processRow(Result row) throws IOException {
956 HRegionInfo info = MetaScanner.getHRegionInfo(row);
957 if (info != null && !info.isSplitParent()) {
958 if (tableName.equals(info.getTable())) {
959 ServerName server = HRegionInfo.getServerName(row);
960 if (server == null) {
961 available.set(false);
962 return false;
963 }
964 if (!Bytes.equals(info.getStartKey(), HConstants.EMPTY_BYTE_ARRAY)) {
965 for (byte[] splitKey : splitKeys) {
966
967 if (Bytes.equals(info.getStartKey(), splitKey)) {
968 regionCount.incrementAndGet();
969 break;
970 }
971 }
972 } else {
973
974 regionCount.incrementAndGet();
975 }
976 } else if (tableName.compareTo(info.getTable()) < 0) {
977
978 return false;
979 }
980 }
981 return true;
982 }
983 };
984 MetaScanner.metaScan(this, visitor, tableName);
985
986 return available.get() && (regionCount.get() == splitKeys.length + 1);
987 }
988
989 @Override
990 public boolean isTableAvailable(final byte[] tableName, final byte[][] splitKeys)
991 throws IOException {
992 return isTableAvailable(TableName.valueOf(tableName), splitKeys);
993 }
994
995 @Override
996 public HRegionLocation locateRegion(final byte[] regionName) throws IOException {
997 RegionLocations locations = locateRegion(HRegionInfo.getTable(regionName),
998 HRegionInfo.getStartKey(regionName), false, true);
999 return locations == null ? null : locations.getRegionLocation();
1000 }
1001
1002 @Override
1003 public boolean isDeadServer(ServerName sn) {
1004 if (clusterStatusListener == null) {
1005 return false;
1006 } else {
1007 return clusterStatusListener.isDeadServer(sn);
1008 }
1009 }
1010
1011 @Override
1012 public List<HRegionLocation> locateRegions(final TableName tableName)
1013 throws IOException {
1014 return locateRegions (tableName, false, true);
1015 }
1016
1017 @Override
1018 public List<HRegionLocation> locateRegions(final byte[] tableName)
1019 throws IOException {
1020 return locateRegions(TableName.valueOf(tableName));
1021 }
1022
1023 @Override
1024 public List<HRegionLocation> locateRegions(final TableName tableName,
1025 final boolean useCache, final boolean offlined) throws IOException {
1026 NavigableMap<HRegionInfo, ServerName> regions = MetaScanner.allTableRegions(this, tableName);
1027 final List<HRegionLocation> locations = new ArrayList<HRegionLocation>();
1028 for (HRegionInfo regionInfo : regions.keySet()) {
1029 RegionLocations list = locateRegion(tableName, regionInfo.getStartKey(), useCache, true);
1030 if (list != null) {
1031 for (HRegionLocation loc : list.getRegionLocations()) {
1032 if (loc != null) {
1033 locations.add(loc);
1034 }
1035 }
1036 }
1037 }
1038 return locations;
1039 }
1040
1041 @Override
1042 public List<HRegionLocation> locateRegions(final byte[] tableName,
1043 final boolean useCache, final boolean offlined) throws IOException {
1044 return locateRegions(TableName.valueOf(tableName), useCache, offlined);
1045 }
1046
1047 @Override
1048 public HRegionLocation locateRegion(
1049 final TableName tableName, final byte[] row) throws IOException{
1050 RegionLocations locations = locateRegion(tableName, row, true, true);
1051 return locations == null ? null : locations.getRegionLocation();
1052 }
1053
1054 @Override
1055 public HRegionLocation locateRegion(final byte[] tableName,
1056 final byte [] row)
1057 throws IOException{
1058 return locateRegion(TableName.valueOf(tableName), row);
1059 }
1060
1061 @Override
1062 public HRegionLocation relocateRegion(final TableName tableName,
1063 final byte [] row) throws IOException{
1064 RegionLocations locations = relocateRegion(tableName, row,
1065 RegionReplicaUtil.DEFAULT_REPLICA_ID);
1066 return locations == null ? null :
1067 locations.getRegionLocation(RegionReplicaUtil.DEFAULT_REPLICA_ID);
1068 }
1069
1070 @Override
1071 public RegionLocations relocateRegion(final TableName tableName,
1072 final byte [] row, int replicaId) throws IOException{
1073
1074
1075
1076 if (!tableName.equals(TableName.META_TABLE_NAME) && isTableDisabled(tableName)) {
1077 throw new TableNotEnabledException(tableName.getNameAsString() + " is disabled.");
1078 }
1079
1080 return locateRegion(tableName, row, false, true, replicaId);
1081 }
1082
1083 @Override
1084 public HRegionLocation relocateRegion(final byte[] tableName,
1085 final byte [] row) throws IOException {
1086 return relocateRegion(TableName.valueOf(tableName), row);
1087 }
1088
1089 @Override
1090 public RegionLocations locateRegion(final TableName tableName,
1091 final byte [] row, boolean useCache, boolean retry)
1092 throws IOException {
1093 return locateRegion(tableName, row, useCache, retry, RegionReplicaUtil.DEFAULT_REPLICA_ID);
1094 }
1095
1096 @Override
1097 public RegionLocations locateRegion(final TableName tableName,
1098 final byte [] row, boolean useCache, boolean retry, int replicaId)
1099 throws IOException {
1100 if (this.closed) throw new IOException(toString() + " closed");
1101 if (tableName== null || tableName.getName().length == 0) {
1102 throw new IllegalArgumentException(
1103 "table name cannot be null or zero length");
1104 }
1105 if (tableName.equals(TableName.META_TABLE_NAME)) {
1106 return locateMeta(tableName, useCache, replicaId);
1107 } else {
1108
1109 return locateRegionInMeta(tableName, row, useCache, retry, replicaId);
1110 }
1111 }
1112
1113 private RegionLocations locateMeta(final TableName tableName,
1114 boolean useCache, int replicaId) throws IOException {
1115
1116
1117
1118 byte[] metaCacheKey = HConstants.EMPTY_START_ROW;
1119 RegionLocations locations = null;
1120 if (useCache) {
1121 locations = getCachedLocation(tableName, metaCacheKey);
1122 if (locations != null) {
1123 return locations;
1124 }
1125 }
1126
1127
1128 synchronized (metaRegionLock) {
1129
1130
1131 if (useCache) {
1132 locations = getCachedLocation(tableName, metaCacheKey);
1133 if (locations != null) {
1134 return locations;
1135 }
1136 }
1137
1138
1139 locations = this.registry.getMetaRegionLocation();
1140 if (locations != null) {
1141 cacheLocation(tableName, locations);
1142 }
1143 }
1144 return locations;
1145 }
1146
1147
1148
1149
1150
1151 private RegionLocations locateRegionInMeta(TableName tableName, byte[] row,
1152 boolean useCache, boolean retry, int replicaId) throws IOException {
1153
1154
1155
1156 if (useCache) {
1157 RegionLocations locations = getCachedLocation(tableName, row);
1158 if (locations != null && locations.getRegionLocation(replicaId) != null) {
1159 return locations;
1160 }
1161 }
1162
1163
1164
1165
1166 byte[] metaKey = HRegionInfo.createRegionName(tableName, row, HConstants.NINES, false);
1167
1168 Scan s = new Scan();
1169 s.setReversed(true);
1170 s.setStartRow(metaKey);
1171 s.setSmall(true);
1172 s.setCaching(1);
1173
1174 int localNumRetries = (retry ? numTries : 1);
1175
1176 for (int tries = 0; true; tries++) {
1177 if (tries >= localNumRetries) {
1178 throw new NoServerForRegionException("Unable to find region for "
1179 + Bytes.toStringBinary(row) + " in " + tableName +
1180 " after " + localNumRetries + " tries.");
1181 }
1182 if (useCache) {
1183 RegionLocations locations = getCachedLocation(tableName, row);
1184 if (locations != null && locations.getRegionLocation(replicaId) != null) {
1185 return locations;
1186 }
1187 } else {
1188
1189
1190 metaCache.clearCache(tableName, row);
1191 }
1192
1193
1194 try {
1195 Result regionInfoRow = null;
1196 ReversedClientScanner rcs = null;
1197 try {
1198 rcs = new ClientSmallReversedScanner(conf, s, TableName.META_TABLE_NAME, this,
1199 rpcCallerFactory, rpcControllerFactory, getBatchPool(), 0);
1200 regionInfoRow = rcs.next();
1201 } finally {
1202 if (rcs != null) {
1203 rcs.close();
1204 }
1205 }
1206
1207 if (regionInfoRow == null) {
1208 throw new TableNotFoundException(tableName);
1209 }
1210
1211
1212 RegionLocations locations = MetaTableAccessor.getRegionLocations(regionInfoRow);
1213 if (locations == null || locations.getRegionLocation(replicaId) == null) {
1214 throw new IOException("HRegionInfo was null in " +
1215 tableName + ", row=" + regionInfoRow);
1216 }
1217 HRegionInfo regionInfo = locations.getRegionLocation(replicaId).getRegionInfo();
1218 if (regionInfo == null) {
1219 throw new IOException("HRegionInfo was null or empty in " +
1220 TableName.META_TABLE_NAME + ", row=" + regionInfoRow);
1221 }
1222
1223
1224 if (!regionInfo.getTable().equals(tableName)) {
1225 throw new TableNotFoundException(
1226 "Table '" + tableName + "' was not found, got: " +
1227 regionInfo.getTable() + ".");
1228 }
1229 if (regionInfo.isSplit()) {
1230 throw new RegionOfflineException("the only available region for" +
1231 " the required row is a split parent," +
1232 " the daughters should be online soon: " +
1233 regionInfo.getRegionNameAsString());
1234 }
1235 if (regionInfo.isOffline()) {
1236 throw new RegionOfflineException("the region is offline, could" +
1237 " be caused by a disable table call: " +
1238 regionInfo.getRegionNameAsString());
1239 }
1240
1241 ServerName serverName = locations.getRegionLocation(replicaId).getServerName();
1242 if (serverName == null) {
1243 throw new NoServerForRegionException("No server address listed " +
1244 "in " + TableName.META_TABLE_NAME + " for region " +
1245 regionInfo.getRegionNameAsString() + " containing row " +
1246 Bytes.toStringBinary(row));
1247 }
1248
1249 if (isDeadServer(serverName)){
1250 throw new RegionServerStoppedException("hbase:meta says the region "+
1251 regionInfo.getRegionNameAsString()+" is managed by the server " + serverName +
1252 ", but it is dead.");
1253 }
1254
1255 cacheLocation(tableName, locations);
1256 return locations;
1257 } catch (TableNotFoundException e) {
1258
1259
1260
1261 throw e;
1262 } catch (IOException e) {
1263 ExceptionUtil.rethrowIfInterrupt(e);
1264
1265 if (e instanceof RemoteException) {
1266 e = ((RemoteException)e).unwrapRemoteException();
1267 }
1268 if (tries < localNumRetries - 1) {
1269 if (LOG.isDebugEnabled()) {
1270 LOG.debug("locateRegionInMeta parentTable=" +
1271 TableName.META_TABLE_NAME + ", metaLocation=" +
1272 ", attempt=" + tries + " of " +
1273 localNumRetries + " failed; retrying after sleep of " +
1274 ConnectionUtils.getPauseTime(this.pause, tries) + " because: " + e.getMessage());
1275 }
1276 } else {
1277 throw e;
1278 }
1279
1280 if(!(e instanceof RegionOfflineException ||
1281 e instanceof NoServerForRegionException)) {
1282 relocateRegion(TableName.META_TABLE_NAME, metaKey, replicaId);
1283 }
1284 }
1285 try{
1286 Thread.sleep(ConnectionUtils.getPauseTime(this.pause, tries));
1287 } catch (InterruptedException e) {
1288 throw new InterruptedIOException("Giving up trying to location region in " +
1289 "meta: thread is interrupted.");
1290 }
1291 }
1292 }
1293
1294
1295
1296
1297
1298
1299 private void cacheLocation(final TableName tableName, final RegionLocations location) {
1300 metaCache.cacheLocation(tableName, location);
1301 }
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311 RegionLocations getCachedLocation(final TableName tableName,
1312 final byte [] row) {
1313 return metaCache.getCachedLocation(tableName, row);
1314 }
1315
1316 public void clearRegionCache(final TableName tableName, byte[] row) {
1317 metaCache.clearCache(tableName, row);
1318 }
1319
1320
1321
1322
1323 @Override
1324 public void clearCaches(final ServerName serverName) {
1325 metaCache.clearCache(serverName);
1326 }
1327
1328 @Override
1329 public void clearRegionCache() {
1330 metaCache.clearCache();
1331 }
1332
1333 @Override
1334 public void clearRegionCache(final TableName tableName) {
1335 metaCache.clearCache(tableName);
1336 }
1337
1338 @Override
1339 public void clearRegionCache(final byte[] tableName) {
1340 clearRegionCache(TableName.valueOf(tableName));
1341 }
1342
1343
1344
1345
1346
1347
1348
1349 private void cacheLocation(final TableName tableName, final ServerName source,
1350 final HRegionLocation location) {
1351 metaCache.cacheLocation(tableName, source, location);
1352 }
1353
1354
1355 private final ConcurrentHashMap<String, Object> stubs =
1356 new ConcurrentHashMap<String, Object>();
1357
1358 private final ConcurrentHashMap<String, String> connectionLock =
1359 new ConcurrentHashMap<String, String>();
1360
1361
1362
1363
1364 static class MasterServiceState {
1365 HConnection connection;
1366 MasterService.BlockingInterface stub;
1367 int userCount;
1368 long keepAliveUntil = Long.MAX_VALUE;
1369
1370 MasterServiceState (final HConnection connection) {
1371 super();
1372 this.connection = connection;
1373 }
1374
1375 @Override
1376 public String toString() {
1377 return "MasterService";
1378 }
1379
1380 Object getStub() {
1381 return this.stub;
1382 }
1383
1384 void clearStub() {
1385 this.stub = null;
1386 }
1387
1388 boolean isMasterRunning() throws ServiceException {
1389 IsMasterRunningResponse response =
1390 this.stub.isMasterRunning(null, RequestConverter.buildIsMasterRunningRequest());
1391 return response != null? response.getIsMasterRunning(): false;
1392 }
1393 }
1394
1395
1396
1397
1398
1399
1400 abstract class StubMaker {
1401
1402
1403
1404 protected abstract String getServiceName();
1405
1406
1407
1408
1409
1410 protected abstract Object makeStub(final BlockingRpcChannel channel);
1411
1412
1413
1414
1415
1416 protected abstract void isMasterRunning() throws ServiceException;
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426 private Object makeStubNoRetries() throws IOException, KeeperException, ServiceException {
1427 ZooKeeperKeepAliveConnection zkw;
1428 try {
1429 zkw = getKeepAliveZooKeeperWatcher();
1430 } catch (IOException e) {
1431 ExceptionUtil.rethrowIfInterrupt(e);
1432 throw new ZooKeeperConnectionException("Can't connect to ZooKeeper", e);
1433 }
1434 try {
1435 checkIfBaseNodeAvailable(zkw);
1436 ServerName sn = MasterAddressTracker.getMasterAddress(zkw);
1437 if (sn == null) {
1438 String msg = "ZooKeeper available but no active master location found";
1439 LOG.info(msg);
1440 throw new MasterNotRunningException(msg);
1441 }
1442 if (isDeadServer(sn)) {
1443 throw new MasterNotRunningException(sn + " is dead.");
1444 }
1445
1446 String key = getStubKey(getServiceName(), sn.getHostAndPort());
1447 connectionLock.putIfAbsent(key, key);
1448 Object stub = null;
1449 synchronized (connectionLock.get(key)) {
1450 stub = stubs.get(key);
1451 if (stub == null) {
1452 BlockingRpcChannel channel = rpcClient.createBlockingRpcChannel(sn, user, rpcTimeout);
1453 stub = makeStub(channel);
1454 isMasterRunning();
1455 stubs.put(key, stub);
1456 }
1457 }
1458 return stub;
1459 } finally {
1460 zkw.close();
1461 }
1462 }
1463
1464
1465
1466
1467
1468
1469 Object makeStub() throws IOException {
1470
1471
1472 synchronized (masterAndZKLock) {
1473 Exception exceptionCaught = null;
1474 if (!closed) {
1475 try {
1476 return makeStubNoRetries();
1477 } catch (IOException e) {
1478 exceptionCaught = e;
1479 } catch (KeeperException e) {
1480 exceptionCaught = e;
1481 } catch (ServiceException e) {
1482 exceptionCaught = e;
1483 }
1484
1485 throw new MasterNotRunningException(exceptionCaught);
1486 } else {
1487 throw new DoNotRetryIOException("Connection was closed while trying to get master");
1488 }
1489 }
1490 }
1491 }
1492
1493
1494
1495
1496 class MasterServiceStubMaker extends StubMaker {
1497 private MasterService.BlockingInterface stub;
1498 @Override
1499 protected String getServiceName() {
1500 return MasterService.getDescriptor().getName();
1501 }
1502
1503 @Override
1504 MasterService.BlockingInterface makeStub() throws IOException {
1505 return (MasterService.BlockingInterface)super.makeStub();
1506 }
1507
1508 @Override
1509 protected Object makeStub(BlockingRpcChannel channel) {
1510 this.stub = MasterService.newBlockingStub(channel);
1511 return this.stub;
1512 }
1513
1514 @Override
1515 protected void isMasterRunning() throws ServiceException {
1516 this.stub.isMasterRunning(null, RequestConverter.buildIsMasterRunningRequest());
1517 }
1518 }
1519
1520 @Override
1521 public AdminService.BlockingInterface getAdmin(final ServerName serverName)
1522 throws IOException {
1523 return getAdmin(serverName, false);
1524 }
1525
1526 @Override
1527
1528 public AdminService.BlockingInterface getAdmin(final ServerName serverName,
1529 final boolean master)
1530 throws IOException {
1531 if (isDeadServer(serverName)) {
1532 throw new RegionServerStoppedException(serverName + " is dead.");
1533 }
1534 String key = getStubKey(AdminService.BlockingInterface.class.getName(),
1535 serverName.getHostAndPort());
1536 this.connectionLock.putIfAbsent(key, key);
1537 AdminService.BlockingInterface stub = null;
1538 synchronized (this.connectionLock.get(key)) {
1539 stub = (AdminService.BlockingInterface)this.stubs.get(key);
1540 if (stub == null) {
1541 BlockingRpcChannel channel =
1542 this.rpcClient.createBlockingRpcChannel(serverName, user, rpcTimeout);
1543 stub = AdminService.newBlockingStub(channel);
1544 this.stubs.put(key, stub);
1545 }
1546 }
1547 return stub;
1548 }
1549
1550 @Override
1551 public ClientService.BlockingInterface getClient(final ServerName sn)
1552 throws IOException {
1553 if (isDeadServer(sn)) {
1554 throw new RegionServerStoppedException(sn + " is dead.");
1555 }
1556 String key = getStubKey(ClientService.BlockingInterface.class.getName(), sn.getHostAndPort());
1557 this.connectionLock.putIfAbsent(key, key);
1558 ClientService.BlockingInterface stub = null;
1559 synchronized (this.connectionLock.get(key)) {
1560 stub = (ClientService.BlockingInterface)this.stubs.get(key);
1561 if (stub == null) {
1562 BlockingRpcChannel channel =
1563 this.rpcClient.createBlockingRpcChannel(sn, user, rpcTimeout);
1564 stub = ClientService.newBlockingStub(channel);
1565
1566
1567 this.stubs.put(key, stub);
1568 }
1569 }
1570 return stub;
1571 }
1572
1573 static String getStubKey(final String serviceName, final String rsHostnamePort) {
1574 return serviceName + "@" + rsHostnamePort;
1575 }
1576
1577 private ZooKeeperKeepAliveConnection keepAliveZookeeper;
1578 private AtomicInteger keepAliveZookeeperUserCount = new AtomicInteger(0);
1579 private boolean canCloseZKW = true;
1580
1581
1582 private static final long keepAlive = 5 * 60 * 1000;
1583
1584
1585
1586
1587
1588 ZooKeeperKeepAliveConnection getKeepAliveZooKeeperWatcher()
1589 throws IOException {
1590 synchronized (masterAndZKLock) {
1591 if (keepAliveZookeeper == null) {
1592 if (this.closed) {
1593 throw new IOException(toString() + " closed");
1594 }
1595
1596
1597 keepAliveZookeeper = new ZooKeeperKeepAliveConnection(conf, this.toString(), this);
1598 }
1599 keepAliveZookeeperUserCount.addAndGet(1);
1600 keepZooKeeperWatcherAliveUntil = Long.MAX_VALUE;
1601 return keepAliveZookeeper;
1602 }
1603 }
1604
1605 void releaseZooKeeperWatcher(final ZooKeeperWatcher zkw) {
1606 if (zkw == null){
1607 return;
1608 }
1609 if (keepAliveZookeeperUserCount.addAndGet(-1) <= 0 ){
1610 keepZooKeeperWatcherAliveUntil = System.currentTimeMillis() + keepAlive;
1611 }
1612 }
1613
1614
1615
1616
1617
1618
1619
1620
1621 private static class DelayedClosing extends Chore implements Stoppable {
1622 private HConnectionImplementation hci;
1623 Stoppable stoppable;
1624
1625 private DelayedClosing(
1626 HConnectionImplementation hci, Stoppable stoppable){
1627 super(
1628 "ZooKeeperWatcher and Master delayed closing for connection "+hci,
1629 60*1000,
1630 stoppable);
1631 this.hci = hci;
1632 this.stoppable = stoppable;
1633 }
1634
1635 static DelayedClosing createAndStart(HConnectionImplementation hci){
1636 Stoppable stoppable = new Stoppable() {
1637 private volatile boolean isStopped = false;
1638 @Override public void stop(String why) { isStopped = true;}
1639 @Override public boolean isStopped() {return isStopped;}
1640 };
1641
1642 return new DelayedClosing(hci, stoppable);
1643 }
1644
1645 protected void closeMasterProtocol(MasterServiceState protocolState) {
1646 if (System.currentTimeMillis() > protocolState.keepAliveUntil) {
1647 hci.closeMasterService(protocolState);
1648 protocolState.keepAliveUntil = Long.MAX_VALUE;
1649 }
1650 }
1651
1652 @Override
1653 protected void chore() {
1654 synchronized (hci.masterAndZKLock) {
1655 if (hci.canCloseZKW) {
1656 if (System.currentTimeMillis() >
1657 hci.keepZooKeeperWatcherAliveUntil) {
1658
1659 hci.closeZooKeeperWatcher();
1660 hci.keepZooKeeperWatcherAliveUntil = Long.MAX_VALUE;
1661 }
1662 }
1663 closeMasterProtocol(hci.masterServiceState);
1664 closeMasterProtocol(hci.masterServiceState);
1665 }
1666 }
1667
1668 @Override
1669 public void stop(String why) {
1670 stoppable.stop(why);
1671 }
1672
1673 @Override
1674 public boolean isStopped() {
1675 return stoppable.isStopped();
1676 }
1677 }
1678
1679 private void closeZooKeeperWatcher() {
1680 synchronized (masterAndZKLock) {
1681 if (keepAliveZookeeper != null) {
1682 LOG.info("Closing zookeeper sessionid=0x" +
1683 Long.toHexString(
1684 keepAliveZookeeper.getRecoverableZooKeeper().getSessionId()));
1685 keepAliveZookeeper.internalClose();
1686 keepAliveZookeeper = null;
1687 }
1688 keepAliveZookeeperUserCount.set(0);
1689 }
1690 }
1691
1692 final MasterServiceState masterServiceState = new MasterServiceState(this);
1693
1694 @Override
1695 public MasterService.BlockingInterface getMaster() throws MasterNotRunningException {
1696 return getKeepAliveMasterService();
1697 }
1698
1699 private void resetMasterServiceState(final MasterServiceState mss) {
1700 mss.userCount++;
1701 mss.keepAliveUntil = Long.MAX_VALUE;
1702 }
1703
1704 @Override
1705 public MasterKeepAliveConnection getKeepAliveMasterService()
1706 throws MasterNotRunningException {
1707 synchronized (masterAndZKLock) {
1708 if (!isKeepAliveMasterConnectedAndRunning(this.masterServiceState)) {
1709 MasterServiceStubMaker stubMaker = new MasterServiceStubMaker();
1710 try {
1711 this.masterServiceState.stub = stubMaker.makeStub();
1712 } catch (MasterNotRunningException ex) {
1713 throw ex;
1714 } catch (IOException e) {
1715
1716 throw new MasterNotRunningException(e);
1717 }
1718 }
1719 resetMasterServiceState(this.masterServiceState);
1720 }
1721
1722 final MasterService.BlockingInterface stub = this.masterServiceState.stub;
1723 return new MasterKeepAliveConnection() {
1724 MasterServiceState mss = masterServiceState;
1725 @Override
1726 public AddColumnResponse addColumn(RpcController controller, AddColumnRequest request)
1727 throws ServiceException {
1728 return stub.addColumn(controller, request);
1729 }
1730
1731 @Override
1732 public DeleteColumnResponse deleteColumn(RpcController controller,
1733 DeleteColumnRequest request)
1734 throws ServiceException {
1735 return stub.deleteColumn(controller, request);
1736 }
1737
1738 @Override
1739 public ModifyColumnResponse modifyColumn(RpcController controller,
1740 ModifyColumnRequest request)
1741 throws ServiceException {
1742 return stub.modifyColumn(controller, request);
1743 }
1744
1745 @Override
1746 public MoveRegionResponse moveRegion(RpcController controller,
1747 MoveRegionRequest request) throws ServiceException {
1748 return stub.moveRegion(controller, request);
1749 }
1750
1751 @Override
1752 public DispatchMergingRegionsResponse dispatchMergingRegions(
1753 RpcController controller, DispatchMergingRegionsRequest request)
1754 throws ServiceException {
1755 return stub.dispatchMergingRegions(controller, request);
1756 }
1757
1758 @Override
1759 public AssignRegionResponse assignRegion(RpcController controller,
1760 AssignRegionRequest request) throws ServiceException {
1761 return stub.assignRegion(controller, request);
1762 }
1763
1764 @Override
1765 public UnassignRegionResponse unassignRegion(RpcController controller,
1766 UnassignRegionRequest request) throws ServiceException {
1767 return stub.unassignRegion(controller, request);
1768 }
1769
1770 @Override
1771 public OfflineRegionResponse offlineRegion(RpcController controller,
1772 OfflineRegionRequest request) throws ServiceException {
1773 return stub.offlineRegion(controller, request);
1774 }
1775
1776 @Override
1777 public DeleteTableResponse deleteTable(RpcController controller,
1778 DeleteTableRequest request) throws ServiceException {
1779 return stub.deleteTable(controller, request);
1780 }
1781
1782 @Override
1783 public TruncateTableResponse truncateTable(RpcController controller,
1784 TruncateTableRequest request) throws ServiceException {
1785 return stub.truncateTable(controller, request);
1786 }
1787
1788 @Override
1789 public EnableTableResponse enableTable(RpcController controller,
1790 EnableTableRequest request) throws ServiceException {
1791 return stub.enableTable(controller, request);
1792 }
1793
1794 @Override
1795 public DisableTableResponse disableTable(RpcController controller,
1796 DisableTableRequest request) throws ServiceException {
1797 return stub.disableTable(controller, request);
1798 }
1799
1800 @Override
1801 public ModifyTableResponse modifyTable(RpcController controller,
1802 ModifyTableRequest request) throws ServiceException {
1803 return stub.modifyTable(controller, request);
1804 }
1805
1806 @Override
1807 public CreateTableResponse createTable(RpcController controller,
1808 CreateTableRequest request) throws ServiceException {
1809 return stub.createTable(controller, request);
1810 }
1811
1812 @Override
1813 public ShutdownResponse shutdown(RpcController controller,
1814 ShutdownRequest request) throws ServiceException {
1815 return stub.shutdown(controller, request);
1816 }
1817
1818 @Override
1819 public StopMasterResponse stopMaster(RpcController controller,
1820 StopMasterRequest request) throws ServiceException {
1821 return stub.stopMaster(controller, request);
1822 }
1823
1824 @Override
1825 public BalanceResponse balance(RpcController controller,
1826 BalanceRequest request) throws ServiceException {
1827 return stub.balance(controller, request);
1828 }
1829
1830 @Override
1831 public SetBalancerRunningResponse setBalancerRunning(
1832 RpcController controller, SetBalancerRunningRequest request)
1833 throws ServiceException {
1834 return stub.setBalancerRunning(controller, request);
1835 }
1836
1837 @Override
1838 public RunCatalogScanResponse runCatalogScan(RpcController controller,
1839 RunCatalogScanRequest request) throws ServiceException {
1840 return stub.runCatalogScan(controller, request);
1841 }
1842
1843 @Override
1844 public EnableCatalogJanitorResponse enableCatalogJanitor(
1845 RpcController controller, EnableCatalogJanitorRequest request)
1846 throws ServiceException {
1847 return stub.enableCatalogJanitor(controller, request);
1848 }
1849
1850 @Override
1851 public IsCatalogJanitorEnabledResponse isCatalogJanitorEnabled(
1852 RpcController controller, IsCatalogJanitorEnabledRequest request)
1853 throws ServiceException {
1854 return stub.isCatalogJanitorEnabled(controller, request);
1855 }
1856
1857 @Override
1858 public CoprocessorServiceResponse execMasterService(
1859 RpcController controller, CoprocessorServiceRequest request)
1860 throws ServiceException {
1861 return stub.execMasterService(controller, request);
1862 }
1863
1864 @Override
1865 public SnapshotResponse snapshot(RpcController controller,
1866 SnapshotRequest request) throws ServiceException {
1867 return stub.snapshot(controller, request);
1868 }
1869
1870 @Override
1871 public GetCompletedSnapshotsResponse getCompletedSnapshots(
1872 RpcController controller, GetCompletedSnapshotsRequest request)
1873 throws ServiceException {
1874 return stub.getCompletedSnapshots(controller, request);
1875 }
1876
1877 @Override
1878 public DeleteSnapshotResponse deleteSnapshot(RpcController controller,
1879 DeleteSnapshotRequest request) throws ServiceException {
1880 return stub.deleteSnapshot(controller, request);
1881 }
1882
1883 @Override
1884 public IsSnapshotDoneResponse isSnapshotDone(RpcController controller,
1885 IsSnapshotDoneRequest request) throws ServiceException {
1886 return stub.isSnapshotDone(controller, request);
1887 }
1888
1889 @Override
1890 public RestoreSnapshotResponse restoreSnapshot(
1891 RpcController controller, RestoreSnapshotRequest request)
1892 throws ServiceException {
1893 return stub.restoreSnapshot(controller, request);
1894 }
1895
1896 @Override
1897 public IsRestoreSnapshotDoneResponse isRestoreSnapshotDone(
1898 RpcController controller, IsRestoreSnapshotDoneRequest request)
1899 throws ServiceException {
1900 return stub.isRestoreSnapshotDone(controller, request);
1901 }
1902
1903 @Override
1904 public ExecProcedureResponse execProcedure(
1905 RpcController controller, ExecProcedureRequest request)
1906 throws ServiceException {
1907 return stub.execProcedure(controller, request);
1908 }
1909
1910 @Override
1911 public ExecProcedureResponse execProcedureWithRet(
1912 RpcController controller, ExecProcedureRequest request)
1913 throws ServiceException {
1914 return stub.execProcedureWithRet(controller, request);
1915 }
1916
1917 @Override
1918 public IsProcedureDoneResponse isProcedureDone(RpcController controller,
1919 IsProcedureDoneRequest request) throws ServiceException {
1920 return stub.isProcedureDone(controller, request);
1921 }
1922
1923 @Override
1924 public IsMasterRunningResponse isMasterRunning(
1925 RpcController controller, IsMasterRunningRequest request)
1926 throws ServiceException {
1927 return stub.isMasterRunning(controller, request);
1928 }
1929
1930 @Override
1931 public ModifyNamespaceResponse modifyNamespace(RpcController controller,
1932 ModifyNamespaceRequest request)
1933 throws ServiceException {
1934 return stub.modifyNamespace(controller, request);
1935 }
1936
1937 @Override
1938 public CreateNamespaceResponse createNamespace(
1939 RpcController controller, CreateNamespaceRequest request) throws ServiceException {
1940 return stub.createNamespace(controller, request);
1941 }
1942
1943 @Override
1944 public DeleteNamespaceResponse deleteNamespace(
1945 RpcController controller, DeleteNamespaceRequest request) throws ServiceException {
1946 return stub.deleteNamespace(controller, request);
1947 }
1948
1949 @Override
1950 public GetNamespaceDescriptorResponse getNamespaceDescriptor(RpcController controller,
1951 GetNamespaceDescriptorRequest request) throws ServiceException {
1952 return stub.getNamespaceDescriptor(controller, request);
1953 }
1954
1955 @Override
1956 public ListNamespaceDescriptorsResponse listNamespaceDescriptors(RpcController controller,
1957 ListNamespaceDescriptorsRequest request) throws ServiceException {
1958 return stub.listNamespaceDescriptors(controller, request);
1959 }
1960
1961 @Override
1962 public ListTableDescriptorsByNamespaceResponse listTableDescriptorsByNamespace(
1963 RpcController controller, ListTableDescriptorsByNamespaceRequest request)
1964 throws ServiceException {
1965 return stub.listTableDescriptorsByNamespace(controller, request);
1966 }
1967
1968 @Override
1969 public ListTableNamesByNamespaceResponse listTableNamesByNamespace(
1970 RpcController controller, ListTableNamesByNamespaceRequest request)
1971 throws ServiceException {
1972 return stub.listTableNamesByNamespace(controller, request);
1973 }
1974
1975 @Override
1976 public void close() {
1977 release(this.mss);
1978 }
1979
1980 @Override
1981 public GetSchemaAlterStatusResponse getSchemaAlterStatus(
1982 RpcController controller, GetSchemaAlterStatusRequest request)
1983 throws ServiceException {
1984 return stub.getSchemaAlterStatus(controller, request);
1985 }
1986
1987 @Override
1988 public GetTableDescriptorsResponse getTableDescriptors(
1989 RpcController controller, GetTableDescriptorsRequest request)
1990 throws ServiceException {
1991 return stub.getTableDescriptors(controller, request);
1992 }
1993
1994 @Override
1995 public GetTableNamesResponse getTableNames(
1996 RpcController controller, GetTableNamesRequest request)
1997 throws ServiceException {
1998 return stub.getTableNames(controller, request);
1999 }
2000
2001 @Override
2002 public GetClusterStatusResponse getClusterStatus(
2003 RpcController controller, GetClusterStatusRequest request)
2004 throws ServiceException {
2005 return stub.getClusterStatus(controller, request);
2006 }
2007 };
2008 }
2009
2010
2011 private static void release(MasterServiceState mss) {
2012 if (mss != null && mss.connection != null) {
2013 ((HConnectionImplementation)mss.connection).releaseMaster(mss);
2014 }
2015 }
2016
2017 private boolean isKeepAliveMasterConnectedAndRunning(MasterServiceState mss) {
2018 if (mss.getStub() == null){
2019 return false;
2020 }
2021 try {
2022 return mss.isMasterRunning();
2023 } catch (UndeclaredThrowableException e) {
2024
2025
2026 LOG.info("Master connection is not running anymore", e.getUndeclaredThrowable());
2027 return false;
2028 } catch (ServiceException se) {
2029 LOG.warn("Checking master connection", se);
2030 return false;
2031 }
2032 }
2033
2034 void releaseMaster(MasterServiceState mss) {
2035 if (mss.getStub() == null) return;
2036 synchronized (masterAndZKLock) {
2037 --mss.userCount;
2038 if (mss.userCount <= 0) {
2039 mss.keepAliveUntil = System.currentTimeMillis() + keepAlive;
2040 }
2041 }
2042 }
2043
2044 private void closeMasterService(MasterServiceState mss) {
2045 if (mss.getStub() != null) {
2046 LOG.info("Closing master protocol: " + mss);
2047 mss.clearStub();
2048 }
2049 mss.userCount = 0;
2050 }
2051
2052
2053
2054
2055
2056 private void closeMaster() {
2057 synchronized (masterAndZKLock) {
2058 closeMasterService(masterServiceState);
2059 }
2060 }
2061
2062 void updateCachedLocation(HRegionInfo hri, ServerName source,
2063 ServerName serverName, long seqNum) {
2064 HRegionLocation newHrl = new HRegionLocation(hri, serverName, seqNum);
2065 cacheLocation(hri.getTable(), source, newHrl);
2066 }
2067
2068 @Override
2069 public void deleteCachedRegionLocation(final HRegionLocation location) {
2070 metaCache.clearCache(location);
2071 }
2072
2073 @Override
2074 public void updateCachedLocations(final TableName tableName, byte[] rowkey,
2075 final Object exception, final HRegionLocation source) {
2076 assert source != null;
2077 updateCachedLocations(tableName, source.getRegionInfo().getRegionName()
2078 , rowkey, exception, source.getServerName());
2079 }
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089 @Override
2090 public void updateCachedLocations(final TableName tableName, byte[] regionName, byte[] rowkey,
2091 final Object exception, final ServerName source) {
2092 if (rowkey == null || tableName == null) {
2093 LOG.warn("Coding error, see method javadoc. row=" + (rowkey == null ? "null" : rowkey) +
2094 ", tableName=" + (tableName == null ? "null" : tableName));
2095 return;
2096 }
2097
2098 if (source == null) {
2099
2100 return;
2101 }
2102
2103 if (regionName == null) {
2104
2105 metaCache.clearCache(tableName, rowkey, source);
2106 return;
2107 }
2108
2109
2110 final RegionLocations oldLocations = getCachedLocation(tableName, rowkey);
2111 HRegionLocation oldLocation = null;
2112 if (oldLocations != null) {
2113 oldLocation = oldLocations.getRegionLocationByRegionName(regionName);
2114 }
2115 if (oldLocation == null || !source.equals(oldLocation.getServerName())) {
2116
2117
2118 return;
2119 }
2120
2121 HRegionInfo regionInfo = oldLocation.getRegionInfo();
2122 Throwable cause = findException(exception);
2123 if (cause != null) {
2124 if (cause instanceof RegionTooBusyException || cause instanceof RegionOpeningException) {
2125
2126 return;
2127 }
2128
2129 if (cause instanceof RegionMovedException) {
2130 RegionMovedException rme = (RegionMovedException) cause;
2131 if (LOG.isTraceEnabled()) {
2132 LOG.trace("Region " + regionInfo.getRegionNameAsString() + " moved to " +
2133 rme.getHostname() + ":" + rme.getPort() +
2134 " according to " + source.getHostAndPort());
2135 }
2136
2137
2138 updateCachedLocation(
2139 regionInfo, source, rme.getServerName(), rme.getLocationSeqNum());
2140 return;
2141 }
2142 }
2143
2144
2145
2146 metaCache.clearCache(regionInfo);
2147 }
2148
2149 @Override
2150 public void updateCachedLocations(final byte[] tableName, byte[] rowkey,
2151 final Object exception, final HRegionLocation source) {
2152 updateCachedLocations(TableName.valueOf(tableName), rowkey, exception, source);
2153 }
2154
2155 @Override
2156 @Deprecated
2157 public void processBatch(List<? extends Row> list,
2158 final TableName tableName,
2159 ExecutorService pool,
2160 Object[] results) throws IOException, InterruptedException {
2161
2162
2163
2164 if (results.length != list.size()) {
2165 throw new IllegalArgumentException(
2166 "argument results must be the same size as argument list");
2167 }
2168 processBatchCallback(list, tableName, pool, results, null);
2169 }
2170
2171 @Override
2172 @Deprecated
2173 public void processBatch(List<? extends Row> list,
2174 final byte[] tableName,
2175 ExecutorService pool,
2176 Object[] results) throws IOException, InterruptedException {
2177 processBatch(list, TableName.valueOf(tableName), pool, results);
2178 }
2179
2180
2181
2182
2183
2184
2185
2186
2187 @Override
2188 @Deprecated
2189 public <R> void processBatchCallback(
2190 List<? extends Row> list,
2191 TableName tableName,
2192 ExecutorService pool,
2193 Object[] results,
2194 Batch.Callback<R> callback)
2195 throws IOException, InterruptedException {
2196
2197 AsyncRequestFuture ars = this.asyncProcess.submitAll(
2198 pool, tableName, list, callback, results);
2199 ars.waitUntilDone();
2200 if (ars.hasError()) {
2201 throw ars.getErrors();
2202 }
2203 }
2204
2205 @Override
2206 @Deprecated
2207 public <R> void processBatchCallback(
2208 List<? extends Row> list,
2209 byte[] tableName,
2210 ExecutorService pool,
2211 Object[] results,
2212 Batch.Callback<R> callback)
2213 throws IOException, InterruptedException {
2214 processBatchCallback(list, TableName.valueOf(tableName), pool, results, callback);
2215 }
2216
2217
2218 protected AsyncProcess createAsyncProcess(Configuration conf) {
2219
2220 return new AsyncProcess(this, conf, this.batchPool,
2221 RpcRetryingCallerFactory.instantiate(conf, this.getStatisticsTracker()), false,
2222 RpcControllerFactory.instantiate(conf));
2223 }
2224
2225 @Override
2226 public AsyncProcess getAsyncProcess() {
2227 return asyncProcess;
2228 }
2229
2230 @Override
2231 public ServerStatisticTracker getStatisticsTracker() {
2232 return this.stats;
2233 }
2234
2235 @Override
2236 public ClientBackoffPolicy getBackoffPolicy() {
2237 return this.backoffPolicy;
2238 }
2239
2240
2241
2242
2243
2244 @VisibleForTesting
2245 int getNumberOfCachedRegionLocations(final TableName tableName) {
2246 return metaCache.getNumberOfCachedRegionLocations(tableName);
2247 }
2248
2249 @Override
2250 @Deprecated
2251 public void setRegionCachePrefetch(final TableName tableName, final boolean enable) {
2252 }
2253
2254 @Override
2255 @Deprecated
2256 public void setRegionCachePrefetch(final byte[] tableName,
2257 final boolean enable) {
2258 }
2259
2260 @Override
2261 @Deprecated
2262 public boolean getRegionCachePrefetch(TableName tableName) {
2263 return false;
2264 }
2265
2266 @Override
2267 @Deprecated
2268 public boolean getRegionCachePrefetch(byte[] tableName) {
2269 return false;
2270 }
2271
2272 @Override
2273 public void abort(final String msg, Throwable t) {
2274 if (t instanceof KeeperException.SessionExpiredException
2275 && keepAliveZookeeper != null) {
2276 synchronized (masterAndZKLock) {
2277 if (keepAliveZookeeper != null) {
2278 LOG.warn("This client just lost it's session with ZooKeeper," +
2279 " closing it." +
2280 " It will be recreated next time someone needs it", t);
2281 closeZooKeeperWatcher();
2282 }
2283 }
2284 } else {
2285 if (t != null) {
2286 LOG.fatal(msg, t);
2287 } else {
2288 LOG.fatal(msg);
2289 }
2290 this.aborted = true;
2291 close();
2292 this.closed = true;
2293 }
2294 }
2295
2296 @Override
2297 public boolean isClosed() {
2298 return this.closed;
2299 }
2300
2301 @Override
2302 public boolean isAborted(){
2303 return this.aborted;
2304 }
2305
2306 @Override
2307 public int getCurrentNrHRS() throws IOException {
2308 return this.registry.getCurrentNrHRS();
2309 }
2310
2311
2312
2313
2314 void incCount() {
2315 ++refCount;
2316 }
2317
2318
2319
2320
2321 void decCount() {
2322 if (refCount > 0) {
2323 --refCount;
2324 }
2325 }
2326
2327
2328
2329
2330
2331
2332 boolean isZeroReference() {
2333 return refCount == 0;
2334 }
2335
2336 void internalClose() {
2337 if (this.closed) {
2338 return;
2339 }
2340 delayedClosing.stop("Closing connection");
2341 closeMaster();
2342 shutdownBatchPool();
2343 this.closed = true;
2344 closeZooKeeperWatcher();
2345 this.stubs.clear();
2346 if (clusterStatusListener != null) {
2347 clusterStatusListener.close();
2348 }
2349 if (rpcClient != null) {
2350 rpcClient.close();
2351 }
2352 }
2353
2354 @Override
2355 public void close() {
2356 if (managed) {
2357 if (aborted) {
2358 ConnectionManager.deleteStaleConnection(this);
2359 } else {
2360 ConnectionManager.deleteConnection(this, false);
2361 }
2362 } else {
2363 internalClose();
2364 }
2365 }
2366
2367
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378 @Override
2379 protected void finalize() throws Throwable {
2380 super.finalize();
2381
2382 refCount = 1;
2383 close();
2384 }
2385
2386
2387
2388
2389 @Deprecated
2390 @Override
2391 public HTableDescriptor[] listTables() throws IOException {
2392 MasterKeepAliveConnection master = getKeepAliveMasterService();
2393 try {
2394 GetTableDescriptorsRequest req =
2395 RequestConverter.buildGetTableDescriptorsRequest((List<TableName>)null);
2396 return ProtobufUtil.getHTableDescriptorArray(master.getTableDescriptors(null, req));
2397 } catch (ServiceException se) {
2398 throw ProtobufUtil.getRemoteException(se);
2399 } finally {
2400 master.close();
2401 }
2402 }
2403
2404
2405
2406
2407 @Deprecated
2408 @Override
2409 public String[] getTableNames() throws IOException {
2410 TableName[] tableNames = listTableNames();
2411 String result[] = new String[tableNames.length];
2412 for (int i = 0; i < tableNames.length; i++) {
2413 result[i] = tableNames[i].getNameAsString();
2414 }
2415 return result;
2416 }
2417
2418
2419
2420
2421 @Deprecated
2422 @Override
2423 public TableName[] listTableNames() throws IOException {
2424 MasterKeepAliveConnection master = getKeepAliveMasterService();
2425 try {
2426 return ProtobufUtil.getTableNameArray(master.getTableNames(null,
2427 GetTableNamesRequest.newBuilder().build())
2428 .getTableNamesList());
2429 } catch (ServiceException se) {
2430 throw ProtobufUtil.getRemoteException(se);
2431 } finally {
2432 master.close();
2433 }
2434 }
2435
2436
2437
2438
2439 @Deprecated
2440 @Override
2441 public HTableDescriptor[] getHTableDescriptorsByTableName(
2442 List<TableName> tableNames) throws IOException {
2443 if (tableNames == null || tableNames.isEmpty()) return new HTableDescriptor[0];
2444 MasterKeepAliveConnection master = getKeepAliveMasterService();
2445 try {
2446 GetTableDescriptorsRequest req =
2447 RequestConverter.buildGetTableDescriptorsRequest(tableNames);
2448 return ProtobufUtil.getHTableDescriptorArray(master.getTableDescriptors(null, req));
2449 } catch (ServiceException se) {
2450 throw ProtobufUtil.getRemoteException(se);
2451 } finally {
2452 master.close();
2453 }
2454 }
2455
2456
2457
2458
2459 @Deprecated
2460 @Override
2461 public HTableDescriptor[] getHTableDescriptors(
2462 List<String> names) throws IOException {
2463 List<TableName> tableNames = new ArrayList<TableName>(names.size());
2464 for(String name : names) {
2465 tableNames.add(TableName.valueOf(name));
2466 }
2467
2468 return getHTableDescriptorsByTableName(tableNames);
2469 }
2470
2471 @Override
2472 public NonceGenerator getNonceGenerator() {
2473 return this.nonceGenerator;
2474 }
2475
2476
2477
2478
2479
2480
2481
2482
2483 @Deprecated
2484 @Override
2485 public HTableDescriptor getHTableDescriptor(final TableName tableName)
2486 throws IOException {
2487 if (tableName == null) return null;
2488 MasterKeepAliveConnection master = getKeepAliveMasterService();
2489 GetTableDescriptorsResponse htds;
2490 try {
2491 GetTableDescriptorsRequest req =
2492 RequestConverter.buildGetTableDescriptorsRequest(tableName);
2493 htds = master.getTableDescriptors(null, req);
2494 } catch (ServiceException se) {
2495 throw ProtobufUtil.getRemoteException(se);
2496 } finally {
2497 master.close();
2498 }
2499 if (!htds.getTableSchemaList().isEmpty()) {
2500 return HTableDescriptor.convert(htds.getTableSchemaList().get(0));
2501 }
2502 throw new TableNotFoundException(tableName.getNameAsString());
2503 }
2504
2505
2506
2507
2508 @Deprecated
2509 @Override
2510 public HTableDescriptor getHTableDescriptor(final byte[] tableName)
2511 throws IOException {
2512 return getHTableDescriptor(TableName.valueOf(tableName));
2513 }
2514
2515 @Override
2516 public RpcRetryingCallerFactory getNewRpcRetryingCallerFactory(Configuration conf) {
2517 return RpcRetryingCallerFactory
2518 .instantiate(conf, this.interceptor, this.getStatisticsTracker());
2519 }
2520
2521 @Override
2522 public boolean isManaged() {
2523 return managed;
2524 }
2525 }
2526
2527
2528
2529
2530 static class ServerErrorTracker {
2531
2532 private final ConcurrentMap<ServerName, ServerErrors> errorsByServer =
2533 new ConcurrentHashMap<ServerName, ServerErrors>();
2534 private final long canRetryUntil;
2535 private final int maxRetries;
2536 private final long startTrackingTime;
2537
2538 public ServerErrorTracker(long timeout, int maxRetries) {
2539 this.maxRetries = maxRetries;
2540 this.canRetryUntil = EnvironmentEdgeManager.currentTime() + timeout;
2541 this.startTrackingTime = new Date().getTime();
2542 }
2543
2544
2545
2546
2547 boolean canRetryMore(int numRetry) {
2548
2549 return numRetry < maxRetries || (maxRetries > 1 &&
2550 EnvironmentEdgeManager.currentTime() < this.canRetryUntil);
2551 }
2552
2553
2554
2555
2556
2557
2558
2559
2560 long calculateBackoffTime(ServerName server, long basePause) {
2561 long result;
2562 ServerErrors errorStats = errorsByServer.get(server);
2563 if (errorStats != null) {
2564 result = ConnectionUtils.getPauseTime(basePause, errorStats.retries.get());
2565 } else {
2566 result = 0;
2567 }
2568 return result;
2569 }
2570
2571
2572
2573
2574
2575
2576 void reportServerError(ServerName server) {
2577 ServerErrors errors = errorsByServer.get(server);
2578 if (errors != null) {
2579 errors.addError();
2580 } else {
2581 errors = errorsByServer.putIfAbsent(server, new ServerErrors());
2582 if (errors != null){
2583 errors.addError();
2584 }
2585 }
2586 }
2587
2588 long getStartTrackingTime() {
2589 return startTrackingTime;
2590 }
2591
2592
2593
2594
2595 private static class ServerErrors {
2596 public final AtomicInteger retries = new AtomicInteger(0);
2597
2598 public void addError() {
2599 retries.incrementAndGet();
2600 }
2601 }
2602 }
2603
2604
2605
2606
2607
2608
2609
2610
2611
2612 public static Throwable findException(Object exception) {
2613 if (exception == null || !(exception instanceof Throwable)) {
2614 return null;
2615 }
2616 Throwable cur = (Throwable) exception;
2617 while (cur != null) {
2618 if (cur instanceof RegionMovedException || cur instanceof RegionOpeningException
2619 || cur instanceof RegionTooBusyException) {
2620 return cur;
2621 }
2622 if (cur instanceof RemoteException) {
2623 RemoteException re = (RemoteException) cur;
2624 cur = re.unwrapRemoteException(
2625 RegionOpeningException.class, RegionMovedException.class,
2626 RegionTooBusyException.class);
2627 if (cur == null) {
2628 cur = re.unwrapRemoteException();
2629 }
2630
2631
2632
2633 if (cur == re) {
2634 return null;
2635 }
2636 } else {
2637 cur = cur.getCause();
2638 }
2639 }
2640
2641 return null;
2642 }
2643 }