1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20 package org.apache.hadoop.hbase.regionserver;
21
22 import java.io.IOException;
23 import java.util.ArrayList;
24 import java.util.List;
25 import java.util.ListIterator;
26 import java.util.concurrent.Callable;
27 import java.util.concurrent.ExecutionException;
28 import java.util.concurrent.Executors;
29 import java.util.concurrent.Future;
30 import java.util.concurrent.ThreadFactory;
31 import java.util.concurrent.ThreadPoolExecutor;
32 import java.util.concurrent.TimeUnit;
33
34 import org.apache.commons.logging.Log;
35 import org.apache.commons.logging.LogFactory;
36 import org.apache.hadoop.conf.Configuration;
37 import org.apache.hadoop.fs.FileStatus;
38 import org.apache.hadoop.fs.FileSystem;
39 import org.apache.hadoop.fs.Path;
40 import org.apache.hadoop.hbase.HBaseFileSystem;
41 import org.apache.hadoop.hbase.HRegionInfo;
42 import org.apache.hadoop.hbase.Server;
43 import org.apache.hadoop.hbase.ServerName;
44 import org.apache.hadoop.hbase.catalog.MetaEditor;
45 import org.apache.hadoop.hbase.executor.EventHandler.EventType;
46 import org.apache.hadoop.hbase.executor.RegionTransitionData;
47 import org.apache.hadoop.hbase.io.Reference.Range;
48 import org.apache.hadoop.hbase.util.Bytes;
49 import org.apache.hadoop.hbase.util.CancelableProgressable;
50 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
51 import org.apache.hadoop.hbase.util.FSUtils;
52 import org.apache.hadoop.hbase.util.HasThread;
53 import org.apache.hadoop.hbase.util.PairOfSameType;
54 import org.apache.hadoop.hbase.util.Writables;
55 import org.apache.hadoop.hbase.zookeeper.ZKAssign;
56 import org.apache.hadoop.hbase.zookeeper.ZKUtil;
57 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
58 import org.apache.zookeeper.KeeperException;
59 import org.apache.zookeeper.KeeperException.NodeExistsException;
60
61 import com.google.common.util.concurrent.ThreadFactoryBuilder;
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86 public class SplitTransaction {
87 private static final Log LOG = LogFactory.getLog(SplitTransaction.class);
88 private static final String SPLITDIR = ".splits";
89
90
91
92
93 private final HRegion parent;
94 private HRegionInfo hri_a;
95 private HRegionInfo hri_b;
96 private Path splitdir;
97 private long fileSplitTimeout = 30000;
98 private int znodeVersion = -1;
99
100
101
102
103 private final byte [] splitrow;
104
105
106
107
108
109
110 enum JournalEntry {
111
112
113
114 SET_SPLITTING_IN_ZK,
115
116
117
118 CREATE_SPLIT_DIR,
119
120
121
122 CLOSED_PARENT_REGION,
123
124
125
126 OFFLINED_PARENT,
127
128
129
130 STARTED_REGION_A_CREATION,
131
132
133
134 STARTED_REGION_B_CREATION,
135
136
137
138
139
140 PONR
141 }
142
143
144
145
146 private final List<JournalEntry> journal = new ArrayList<JournalEntry>();
147
148
149
150
151
152
153 public SplitTransaction(final HRegion r, final byte [] splitrow) {
154 this.parent = r;
155 this.splitrow = splitrow;
156 this.splitdir = getSplitDir(this.parent);
157 }
158
159
160
161
162
163
164 public boolean prepare() {
165 if (!this.parent.isSplittable()) return false;
166
167 if (this.splitrow == null) return false;
168 HRegionInfo hri = this.parent.getRegionInfo();
169 parent.prepareToSplit();
170
171 byte [] startKey = hri.getStartKey();
172 byte [] endKey = hri.getEndKey();
173 if (Bytes.equals(startKey, splitrow) ||
174 !this.parent.getRegionInfo().containsRow(splitrow)) {
175 LOG.info("Split row is not inside region key range or is equal to " +
176 "startkey: " + Bytes.toStringBinary(this.splitrow));
177 return false;
178 }
179 long rid = getDaughterRegionIdTimestamp(hri);
180 this.hri_a = new HRegionInfo(hri.getTableName(), startKey, this.splitrow,
181 false, rid);
182 this.hri_b = new HRegionInfo(hri.getTableName(), this.splitrow, endKey,
183 false, rid);
184 return true;
185 }
186
187
188
189
190
191
192 private static long getDaughterRegionIdTimestamp(final HRegionInfo hri) {
193 long rid = EnvironmentEdgeManager.currentTimeMillis();
194
195
196 if (rid < hri.getRegionId()) {
197 LOG.warn("Clock skew; parent regions id is " + hri.getRegionId() +
198 " but current time here is " + rid);
199 rid = hri.getRegionId() + 1;
200 }
201 return rid;
202 }
203
204 private static IOException closedByOtherException = new IOException(
205 "Failed to close region: already closed by another thread");
206
207
208
209
210
211
212
213
214
215
216 final RegionServerServices services) throws IOException {
217 LOG.info("Starting split of region " + this.parent);
218 if ((server != null && server.isStopped()) ||
219 (services != null && services.isStopping())) {
220 throw new IOException("Server is stopped or stopping");
221 }
222 assert !this.parent.lock.writeLock().isHeldByCurrentThread(): "Unsafe to hold write lock while performing RPCs";
223
224
225 if (this.parent.getCoprocessorHost() != null) {
226 this.parent.getCoprocessorHost().preSplit();
227 }
228
229
230 boolean testing = server == null? true:
231 server.getConfiguration().getBoolean("hbase.testing.nocluster", false);
232 this.fileSplitTimeout = testing ? this.fileSplitTimeout :
233 server.getConfiguration().getLong("hbase.regionserver.fileSplitTimeout",
234 this.fileSplitTimeout);
235
236
237
238 if (server != null && server.getZooKeeper() != null) {
239 try {
240 createNodeSplitting(server.getZooKeeper(),
241 this.parent.getRegionInfo(), server.getServerName());
242 } catch (KeeperException e) {
243 throw new IOException("Failed creating SPLITTING znode on " +
244 this.parent.getRegionNameAsString(), e);
245 }
246 }
247 this.journal.add(JournalEntry.SET_SPLITTING_IN_ZK);
248 if (server != null && server.getZooKeeper() != null) {
249 try {
250
251
252
253
254 this.znodeVersion = transitionNodeSplitting(server.getZooKeeper(),
255 this.parent.getRegionInfo(), server.getServerName(), -1);
256 } catch (KeeperException e) {
257 throw new IOException("Failed setting SPLITTING znode on "
258 + this.parent.getRegionNameAsString(), e);
259 }
260 }
261 createSplitDir(this.parent.getFilesystem(), this.splitdir);
262 this.journal.add(JournalEntry.CREATE_SPLIT_DIR);
263
264 List<StoreFile> hstoreFilesToSplit = null;
265 Exception exceptionToThrow = null;
266 try{
267 hstoreFilesToSplit = this.parent.close(false);
268 } catch (Exception e) {
269 exceptionToThrow = e;
270 }
271 if (exceptionToThrow == null && hstoreFilesToSplit == null) {
272
273
274
275
276
277 exceptionToThrow = closedByOtherException;
278 }
279 if (exceptionToThrow != closedByOtherException) {
280 this.journal.add(JournalEntry.CLOSED_PARENT_REGION);
281 }
282 if (exceptionToThrow != null) {
283 if (exceptionToThrow instanceof IOException) throw (IOException)exceptionToThrow;
284 throw new IOException(exceptionToThrow);
285 }
286
287 if (!testing) {
288 services.removeFromOnlineRegions(this.parent.getRegionInfo().getEncodedName());
289 }
290 this.journal.add(JournalEntry.OFFLINED_PARENT);
291
292
293
294
295
296
297
298 splitStoreFiles(this.splitdir, hstoreFilesToSplit);
299
300
301
302
303
304 this.journal.add(JournalEntry.STARTED_REGION_A_CREATION);
305 HRegion a = createDaughterRegion(this.hri_a, this.parent.rsServices);
306
307
308 this.journal.add(JournalEntry.STARTED_REGION_B_CREATION);
309 HRegion b = createDaughterRegion(this.hri_b, this.parent.rsServices);
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326 this.journal.add(JournalEntry.PONR);
327
328
329 if (!testing) {
330 MetaEditor.offlineParentInMeta(server.getCatalogTracker(),
331 this.parent.getRegionInfo(), a.getRegionInfo(), b.getRegionInfo());
332 }
333 return new PairOfSameType<HRegion>(a, b);
334 }
335
336
337
338
339
340
341
342
343
344
345
346 final RegionServerServices services, HRegion a, HRegion b)
347 throws IOException {
348 boolean stopped = server != null && server.isStopped();
349 boolean stopping = services != null && services.isStopping();
350
351 if (stopped || stopping) {
352 LOG.info("Not opening daughters " +
353 b.getRegionInfo().getRegionNameAsString() +
354 " and " +
355 a.getRegionInfo().getRegionNameAsString() +
356 " because stopping=" + stopping + ", stopped=" + stopped);
357 } else {
358
359 DaughterOpener aOpener = new DaughterOpener(server, a);
360 DaughterOpener bOpener = new DaughterOpener(server, b);
361 aOpener.start();
362 bOpener.start();
363 try {
364 aOpener.join();
365 bOpener.join();
366 } catch (InterruptedException e) {
367 Thread.currentThread().interrupt();
368 throw new IOException("Interrupted " + e.getMessage());
369 }
370 if (aOpener.getException() != null) {
371 throw new IOException("Failed " +
372 aOpener.getName(), aOpener.getException());
373 }
374 if (bOpener.getException() != null) {
375 throw new IOException("Failed " +
376 bOpener.getName(), bOpener.getException());
377 }
378 if (services != null) {
379 try {
380
381 services.postOpenDeployTasks(b, server.getCatalogTracker(), true);
382
383 services.addToOnlineRegions(b);
384 services.postOpenDeployTasks(a, server.getCatalogTracker(), true);
385 services.addToOnlineRegions(a);
386 } catch (KeeperException ke) {
387 throw new IOException(ke);
388 }
389 }
390 }
391 }
392
393
394
395
396
397
398
399
400
401
402
403 final RegionServerServices services, HRegion a, HRegion b)
404 throws IOException {
405
406 if (server != null && server.getZooKeeper() != null) {
407 try {
408 this.znodeVersion = transitionNodeSplit(server.getZooKeeper(),
409 parent.getRegionInfo(), a.getRegionInfo(), b.getRegionInfo(),
410 server.getServerName(), this.znodeVersion);
411
412 int spins = 0;
413
414
415
416 do {
417 if (spins % 10 == 0) {
418 LOG.debug("Still waiting on the master to process the split for " +
419 this.parent.getRegionInfo().getEncodedName());
420 }
421 Thread.sleep(100);
422
423 this.znodeVersion = tickleNodeSplit(server.getZooKeeper(),
424 parent.getRegionInfo(), a.getRegionInfo(), b.getRegionInfo(),
425 server.getServerName(), this.znodeVersion);
426 spins++;
427 } while (this.znodeVersion != -1 && !server.isStopped()
428 && !services.isStopping());
429 } catch (Exception e) {
430 if (e instanceof InterruptedException) {
431 Thread.currentThread().interrupt();
432 }
433 throw new IOException("Failed telling master about split", e);
434 }
435 }
436
437
438 if (this.parent.getCoprocessorHost() != null) {
439 this.parent.getCoprocessorHost().postSplit(a,b);
440 }
441
442
443
444
445 }
446
447
448
449
450
451
452
453
454
455
456
457 public PairOfSameType<HRegion> execute(final Server server,
458 final RegionServerServices services)
459 throws IOException {
460 PairOfSameType<HRegion> regions = createDaughters(server, services);
461 openDaughters(server, services, regions.getFirst(), regions.getSecond());
462 transitionZKNode(server, services, regions.getFirst(), regions.getSecond());
463 return regions;
464 }
465
466
467
468
469
470 class DaughterOpener extends HasThread {
471 private final Server server;
472 private final HRegion r;
473 private Throwable t = null;
474
475 DaughterOpener(final Server s, final HRegion r) {
476 super((s == null? "null-services": s.getServerName()) +
477 "-daughterOpener=" + r.getRegionInfo().getEncodedName());
478 setDaemon(true);
479 this.server = s;
480 this.r = r;
481 }
482
483
484
485
486
487 Throwable getException() {
488 return this.t;
489 }
490
491 @Override
492 public void run() {
493 try {
494 openDaughterRegion(this.server, r);
495 } catch (Throwable t) {
496 this.t = t;
497 }
498 }
499 }
500
501
502
503
504
505
506
507
508
509 void openDaughterRegion(final Server server, final HRegion daughter)
510 throws IOException, KeeperException {
511 HRegionInfo hri = daughter.getRegionInfo();
512 LoggingProgressable reporter = server == null? null:
513 new LoggingProgressable(hri, server.getConfiguration());
514 daughter.openHRegion(reporter);
515 }
516
517 static class LoggingProgressable implements CancelableProgressable {
518 private final HRegionInfo hri;
519 private long lastLog = -1;
520 private final long interval;
521
522 LoggingProgressable(final HRegionInfo hri, final Configuration c) {
523 this.hri = hri;
524 this.interval = c.getLong("hbase.regionserver.split.daughter.open.log.interval",
525 10000);
526 }
527
528 @Override
529 public boolean progress() {
530 long now = System.currentTimeMillis();
531 if (now - lastLog > this.interval) {
532 LOG.info("Opening " + this.hri.getRegionNameAsString());
533 this.lastLog = now;
534 }
535 return true;
536 }
537 }
538
539 private static Path getSplitDir(final HRegion r) {
540 return new Path(r.getRegionDir(), SPLITDIR);
541 }
542
543
544
545
546
547
548
549
550 void createSplitDir(final FileSystem fs, final Path splitdir)
551 throws IOException {
552 if (fs.exists(splitdir)) {
553 LOG.info("The " + splitdir
554 + " directory exists. Hence deleting it to recreate it");
555 if (!HBaseFileSystem.deleteDirFromFileSystem(fs, splitdir)) {
556 throw new IOException("Failed deletion of " + splitdir
557 + " before creating them again.");
558 }
559 }
560 if (!HBaseFileSystem.makeDirOnFileSystem(fs, splitdir))
561 throw new IOException("Failed create of " + splitdir);
562 }
563
564 private static void cleanupSplitDir(final FileSystem fs, final Path splitdir)
565 throws IOException {
566
567 deleteDir(fs, splitdir, false);
568 }
569
570
571
572
573
574
575
576
577 private static void deleteDir(final FileSystem fs, final Path dir,
578 final boolean mustPreExist)
579 throws IOException {
580 if (!fs.exists(dir)) {
581 if (mustPreExist) throw new IOException(dir.toString() + " does not exist!");
582 } else if (!HBaseFileSystem.deleteDirFromFileSystem(fs, dir)) {
583 throw new IOException("Failed delete of " + dir);
584 }
585 }
586
587 private void splitStoreFiles(final Path splitdir,
588 final List<StoreFile> hstoreFilesToSplit)
589 throws IOException {
590 if (hstoreFilesToSplit == null) {
591
592 throw new IOException("Close returned empty list of StoreFiles");
593 }
594
595
596
597 int nbFiles = hstoreFilesToSplit.size();
598 if (nbFiles == 0) {
599
600 return;
601 }
602 LOG.info("Preparing to split " + nbFiles + " storefiles for region " + this.parent);
603 ThreadFactoryBuilder builder = new ThreadFactoryBuilder();
604 builder.setNameFormat("StoreFileSplitter-%1$d");
605 ThreadFactory factory = builder.build();
606 ThreadPoolExecutor threadPool =
607 (ThreadPoolExecutor) Executors.newFixedThreadPool(nbFiles, factory);
608 List<Future<Void>> futures = new ArrayList<Future<Void>>(nbFiles);
609
610
611 for (StoreFile sf: hstoreFilesToSplit) {
612
613 StoreFileSplitter sfs = new StoreFileSplitter(sf, splitdir);
614 futures.add(threadPool.submit(sfs));
615 }
616
617 threadPool.shutdown();
618
619
620 try {
621 boolean stillRunning = !threadPool.awaitTermination(
622 this.fileSplitTimeout, TimeUnit.MILLISECONDS);
623 if (stillRunning) {
624 threadPool.shutdownNow();
625
626 while (!threadPool.isTerminated()) {
627 Thread.sleep(50);
628 }
629 throw new IOException("Took too long to split the" +
630 " files and create the references, aborting split");
631 }
632 } catch (InterruptedException e) {
633 Thread.currentThread().interrupt();
634 throw new IOException("Interrupted while waiting for file splitters", e);
635 }
636
637
638 for (Future<Void> future: futures) {
639 try {
640 future.get();
641 } catch (InterruptedException e) {
642 Thread.currentThread().interrupt();
643 throw new IOException(
644 "Interrupted while trying to get the results of file splitters", e);
645 } catch (ExecutionException e) {
646 throw new IOException(e);
647 }
648 }
649 }
650
651 private void splitStoreFile(final StoreFile sf, final Path splitdir)
652 throws IOException {
653 FileSystem fs = this.parent.getFilesystem();
654 byte [] family = sf.getFamily();
655 String encoded = this.hri_a.getEncodedName();
656 Path storedir = Store.getStoreHomedir(splitdir, encoded, family);
657 StoreFile.split(fs, storedir, sf, this.splitrow, Range.bottom);
658 encoded = this.hri_b.getEncodedName();
659 storedir = Store.getStoreHomedir(splitdir, encoded, family);
660 StoreFile.split(fs, storedir, sf, this.splitrow, Range.top);
661 }
662
663
664
665
666
667 class StoreFileSplitter implements Callable<Void> {
668
669 private final StoreFile sf;
670 private final Path splitdir;
671
672
673
674
675
676
677 public StoreFileSplitter(final StoreFile sf, final Path splitdir) {
678 this.sf = sf;
679 this.splitdir = splitdir;
680 }
681
682 public Void call() throws IOException {
683 splitStoreFile(sf, splitdir);
684 return null;
685 }
686 }
687
688
689
690
691
692
693
694
695 HRegion createDaughterRegion(final HRegionInfo hri,
696 final RegionServerServices rsServices)
697 throws IOException {
698
699 FileSystem fs = this.parent.getFilesystem();
700 Path regionDir = getSplitDirForDaughter(this.parent.getFilesystem(),
701 this.splitdir, hri);
702 HRegion r = HRegion.newHRegion(this.parent.getTableDir(),
703 this.parent.getLog(), fs, this.parent.getBaseConf(),
704 hri, this.parent.getTableDesc(), rsServices);
705 long halfParentReadRequestCount = this.parent.getReadRequestsCount() / 2;
706 r.readRequestsCount.set(halfParentReadRequestCount);
707 r.setOpMetricsReadRequestCount(halfParentReadRequestCount);
708 long halfParentWriteRequest = this.parent.getWriteRequestsCount() / 2;
709 r.writeRequestsCount.set(halfParentWriteRequest);
710 r.setOpMetricsWriteRequestCount(halfParentWriteRequest);
711 HRegion.moveInitialFilesIntoPlace(fs, regionDir, r.getRegionDir());
712 return r;
713 }
714
715 private static void cleanupDaughterRegion(final FileSystem fs,
716 final Path tabledir, final String encodedName)
717 throws IOException {
718 Path regiondir = HRegion.getRegionDir(tabledir, encodedName);
719
720 deleteDir(fs, regiondir, false);
721 }
722
723
724
725
726
727
728
729
730
731
732 private static Path getSplitDirForDaughter(final FileSystem fs,
733 final Path splitdir, final HRegionInfo hri)
734 throws IOException {
735 return new Path(splitdir, hri.getEncodedName());
736 }
737
738
739
740
741
742
743
744
745 public boolean rollback(final Server server, final RegionServerServices services)
746 throws IOException {
747 boolean result = true;
748 FileSystem fs = this.parent.getFilesystem();
749 ListIterator<JournalEntry> iterator =
750 this.journal.listIterator(this.journal.size());
751
752 while (iterator.hasPrevious()) {
753 JournalEntry je = iterator.previous();
754 switch(je) {
755
756 case SET_SPLITTING_IN_ZK:
757 if (server != null && server.getZooKeeper() != null) {
758 cleanZK(server, this.parent.getRegionInfo());
759 }
760 break;
761
762 case CREATE_SPLIT_DIR:
763 this.parent.writestate.writesEnabled = true;
764 cleanupSplitDir(fs, this.splitdir);
765 break;
766
767 case CLOSED_PARENT_REGION:
768 try {
769
770
771
772
773
774 this.parent.initialize();
775 } catch (IOException e) {
776 LOG.error("Failed rollbacking CLOSED_PARENT_REGION of region " +
777 this.parent.getRegionNameAsString(), e);
778 throw new RuntimeException(e);
779 }
780 break;
781
782 case STARTED_REGION_A_CREATION:
783 cleanupDaughterRegion(fs, this.parent.getTableDir(),
784 this.hri_a.getEncodedName());
785 break;
786
787 case STARTED_REGION_B_CREATION:
788 cleanupDaughterRegion(fs, this.parent.getTableDir(),
789 this.hri_b.getEncodedName());
790 break;
791
792 case OFFLINED_PARENT:
793 if (services != null) services.addToOnlineRegions(this.parent);
794 break;
795
796 case PONR:
797
798
799
800
801 return false;
802
803 default:
804 throw new RuntimeException("Unhandled journal entry: " + je);
805 }
806 }
807 return result;
808 }
809
810 HRegionInfo getFirstDaughter() {
811 return hri_a;
812 }
813
814 HRegionInfo getSecondDaughter() {
815 return hri_b;
816 }
817
818
819 Path getSplitDir() {
820 return this.splitdir;
821 }
822
823
824
825
826
827
828
829
830
831 static void cleanupAnySplitDetritus(final HRegion r) throws IOException {
832 Path splitdir = getSplitDir(r);
833 FileSystem fs = r.getFilesystem();
834 if (!fs.exists(splitdir)) return;
835
836
837
838
839
840
841
842 FileStatus [] daughters = fs.listStatus(splitdir, new FSUtils.DirFilter(fs));
843 for (int i = 0; i < daughters.length; i++) {
844 cleanupDaughterRegion(fs, r.getTableDir(),
845 daughters[i].getPath().getName());
846 }
847 cleanupSplitDir(r.getFilesystem(), splitdir);
848 LOG.info("Cleaned up old failed split transaction detritus: " + splitdir);
849 }
850
851 private static void cleanZK(final Server server, final HRegionInfo hri) {
852 try {
853
854 ZKAssign.deleteNode(server.getZooKeeper(), hri.getEncodedName(),
855 EventType.RS_ZK_REGION_SPLITTING);
856 } catch (KeeperException e) {
857 server.abort("Failed cleanup of " + hri.getRegionNameAsString(), e);
858 }
859 }
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875 void createNodeSplitting(final ZooKeeperWatcher zkw, final HRegionInfo region,
876 final ServerName serverName) throws KeeperException, IOException {
877 LOG.debug(zkw.prefix("Creating ephemeral node for " +
878 region.getEncodedName() + " in SPLITTING state"));
879 RegionTransitionData data =
880 new RegionTransitionData(EventType.RS_ZK_REGION_SPLITTING,
881 region.getRegionName(), serverName);
882
883 String node = ZKAssign.getNodeName(zkw, region.getEncodedName());
884 if (!ZKUtil.createEphemeralNodeAndWatch(zkw, node, data.getBytes())) {
885 throw new IOException("Failed create of ephemeral " + node);
886 }
887 }
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922 private static int transitionNodeSplit(ZooKeeperWatcher zkw,
923 HRegionInfo parent, HRegionInfo a, HRegionInfo b, ServerName serverName,
924 final int znodeVersion)
925 throws KeeperException, IOException {
926 byte [] payload = Writables.getBytes(a, b);
927 return ZKAssign.transitionNode(zkw, parent, serverName,
928 EventType.RS_ZK_REGION_SPLITTING, EventType.RS_ZK_REGION_SPLIT,
929 znodeVersion, payload);
930 }
931
932
933
934
935
936
937
938
939
940
941
942 int transitionNodeSplitting(final ZooKeeperWatcher zkw, final HRegionInfo parent,
943 final ServerName serverName, final int version) throws KeeperException, IOException {
944 return ZKAssign.transitionNode(zkw, parent, serverName,
945 EventType.RS_ZK_REGION_SPLITTING, EventType.RS_ZK_REGION_SPLITTING, version);
946 }
947
948 private static int tickleNodeSplit(ZooKeeperWatcher zkw,
949 HRegionInfo parent, HRegionInfo a, HRegionInfo b, ServerName serverName,
950 final int znodeVersion)
951 throws KeeperException, IOException {
952 byte [] payload = Writables.getBytes(a, b);
953 return ZKAssign.transitionNode(zkw, parent, serverName,
954 EventType.RS_ZK_REGION_SPLIT, EventType.RS_ZK_REGION_SPLIT,
955 znodeVersion, payload);
956 }
957 }