1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20 package org.apache.hadoop.hbase.regionserver;
21
22 import java.io.IOException;
23 import java.util.ArrayList;
24 import java.util.List;
25 import java.util.ListIterator;
26 import java.util.concurrent.Callable;
27 import java.util.concurrent.ExecutionException;
28 import java.util.concurrent.Executors;
29 import java.util.concurrent.Future;
30 import java.util.concurrent.ThreadFactory;
31 import java.util.concurrent.ThreadPoolExecutor;
32 import java.util.concurrent.TimeUnit;
33
34 import org.apache.commons.logging.Log;
35 import org.apache.commons.logging.LogFactory;
36 import org.apache.hadoop.conf.Configuration;
37 import org.apache.hadoop.fs.FileStatus;
38 import org.apache.hadoop.fs.FileSystem;
39 import org.apache.hadoop.fs.Path;
40 import org.apache.hadoop.hbase.HBaseFileSystem;
41 import org.apache.hadoop.hbase.HRegionInfo;
42 import org.apache.hadoop.hbase.Server;
43 import org.apache.hadoop.hbase.ServerName;
44 import org.apache.hadoop.hbase.catalog.MetaEditor;
45 import org.apache.hadoop.hbase.executor.EventHandler.EventType;
46 import org.apache.hadoop.hbase.executor.RegionTransitionData;
47 import org.apache.hadoop.hbase.io.Reference.Range;
48 import org.apache.hadoop.hbase.util.Bytes;
49 import org.apache.hadoop.hbase.util.CancelableProgressable;
50 import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
51 import org.apache.hadoop.hbase.util.FSUtils;
52 import org.apache.hadoop.hbase.util.HasThread;
53 import org.apache.hadoop.hbase.util.PairOfSameType;
54 import org.apache.hadoop.hbase.util.Writables;
55 import org.apache.hadoop.hbase.zookeeper.ZKAssign;
56 import org.apache.hadoop.hbase.zookeeper.ZKUtil;
57 import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
58 import org.apache.zookeeper.KeeperException;
59 import org.apache.zookeeper.KeeperException.NodeExistsException;
60
61 import com.google.common.util.concurrent.ThreadFactoryBuilder;
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86 public class SplitTransaction {
87 private static final Log LOG = LogFactory.getLog(SplitTransaction.class);
88 private static final String SPLITDIR = ".splits";
89
90
91
92
93 private final HRegion parent;
94 private HRegionInfo hri_a;
95 private HRegionInfo hri_b;
96 private Path splitdir;
97 private long fileSplitTimeout = 30000;
98 private int znodeVersion = -1;
99
100
101
102
103 private final byte [] splitrow;
104
105
106
107
108
109
110 enum JournalEntry {
111
112
113
114 SET_SPLITTING_IN_ZK,
115
116
117
118 CREATE_SPLIT_DIR,
119
120
121
122 CLOSED_PARENT_REGION,
123
124
125
126 OFFLINED_PARENT,
127
128
129
130 STARTED_REGION_A_CREATION,
131
132
133
134 STARTED_REGION_B_CREATION,
135
136
137
138
139
140 PONR
141 }
142
143
144
145
146 private final List<JournalEntry> journal = new ArrayList<JournalEntry>();
147
148
149
150
151
152
153 public SplitTransaction(final HRegion r, final byte [] splitrow) {
154 this.parent = r;
155 this.splitrow = splitrow;
156 this.splitdir = getSplitDir(this.parent);
157 }
158
159
160
161
162
163
164 public boolean prepare() {
165 if (!this.parent.isSplittable()) return false;
166
167 if (this.splitrow == null) return false;
168 HRegionInfo hri = this.parent.getRegionInfo();
169 parent.prepareToSplit();
170
171 byte [] startKey = hri.getStartKey();
172 byte [] endKey = hri.getEndKey();
173 if (Bytes.equals(startKey, splitrow) ||
174 !this.parent.getRegionInfo().containsRow(splitrow)) {
175 LOG.info("Split row is not inside region key range or is equal to " +
176 "startkey: " + Bytes.toStringBinary(this.splitrow));
177 return false;
178 }
179 long rid = getDaughterRegionIdTimestamp(hri);
180 this.hri_a = new HRegionInfo(hri.getTableName(), startKey, this.splitrow,
181 false, rid);
182 this.hri_b = new HRegionInfo(hri.getTableName(), this.splitrow, endKey,
183 false, rid);
184 return true;
185 }
186
187
188
189
190
191
192 private static long getDaughterRegionIdTimestamp(final HRegionInfo hri) {
193 long rid = EnvironmentEdgeManager.currentTimeMillis();
194
195
196 if (rid < hri.getRegionId()) {
197 LOG.warn("Clock skew; parent regions id is " + hri.getRegionId() +
198 " but current time here is " + rid);
199 rid = hri.getRegionId() + 1;
200 }
201 return rid;
202 }
203
204 private static IOException closedByOtherException = new IOException(
205 "Failed to close region: already closed by another thread");
206
207
208
209
210
211
212
213
214
215
216 final RegionServerServices services) throws IOException {
217 LOG.info("Starting split of region " + this.parent);
218 if ((server != null && server.isStopped()) ||
219 (services != null && services.isStopping())) {
220 throw new IOException("Server is stopped or stopping");
221 }
222 assert !this.parent.lock.writeLock().isHeldByCurrentThread(): "Unsafe to hold write lock while performing RPCs";
223
224
225 if (this.parent.getCoprocessorHost() != null) {
226 this.parent.getCoprocessorHost().preSplit();
227 }
228
229
230 boolean testing = server == null? true:
231 server.getConfiguration().getBoolean("hbase.testing.nocluster", false);
232 this.fileSplitTimeout = testing ? this.fileSplitTimeout :
233 server.getConfiguration().getLong("hbase.regionserver.fileSplitTimeout",
234 this.fileSplitTimeout);
235
236
237
238 if (server != null && server.getZooKeeper() != null) {
239 try {
240 createNodeSplitting(server.getZooKeeper(),
241 this.parent.getRegionInfo(), server.getServerName());
242 } catch (KeeperException e) {
243 throw new IOException("Failed creating SPLITTING znode on " +
244 this.parent.getRegionNameAsString(), e);
245 }
246 }
247 this.journal.add(JournalEntry.SET_SPLITTING_IN_ZK);
248 if (server != null && server.getZooKeeper() != null) {
249 try {
250
251
252
253
254 this.znodeVersion = transitionNodeSplitting(server.getZooKeeper(),
255 this.parent.getRegionInfo(), server.getServerName(), -1);
256 } catch (KeeperException e) {
257 throw new IOException("Failed setting SPLITTING znode on "
258 + this.parent.getRegionNameAsString(), e);
259 }
260 }
261 createSplitDir(this.parent.getFilesystem(), this.splitdir);
262 this.journal.add(JournalEntry.CREATE_SPLIT_DIR);
263
264 List<StoreFile> hstoreFilesToSplit = null;
265 Exception exceptionToThrow = null;
266 try{
267 hstoreFilesToSplit = this.parent.close(false);
268 } catch (Exception e) {
269 exceptionToThrow = e;
270 }
271 if (exceptionToThrow == null && hstoreFilesToSplit == null) {
272
273
274
275
276
277 exceptionToThrow = closedByOtherException;
278 }
279 if (exceptionToThrow != closedByOtherException) {
280 this.journal.add(JournalEntry.CLOSED_PARENT_REGION);
281 }
282 if (exceptionToThrow != null) {
283 if (exceptionToThrow instanceof IOException) throw (IOException)exceptionToThrow;
284 throw new IOException(exceptionToThrow);
285 }
286
287 if (!testing) {
288 services.removeFromOnlineRegions(this.parent.getRegionInfo().getEncodedName());
289 }
290 this.journal.add(JournalEntry.OFFLINED_PARENT);
291
292
293
294
295
296
297
298 splitStoreFiles(this.splitdir, hstoreFilesToSplit);
299
300
301
302
303
304 this.journal.add(JournalEntry.STARTED_REGION_A_CREATION);
305 HRegion a = createDaughterRegion(this.hri_a, this.parent.rsServices);
306
307
308 this.journal.add(JournalEntry.STARTED_REGION_B_CREATION);
309 HRegion b = createDaughterRegion(this.hri_b, this.parent.rsServices);
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326 this.journal.add(JournalEntry.PONR);
327
328
329 if (!testing) {
330 MetaEditor.offlineParentInMeta(server.getCatalogTracker(),
331 this.parent.getRegionInfo(), a.getRegionInfo(), b.getRegionInfo());
332 }
333 return new PairOfSameType<HRegion>(a, b);
334 }
335
336
337
338
339
340
341
342
343
344
345
346 final RegionServerServices services, HRegion a, HRegion b)
347 throws IOException {
348 boolean stopped = server != null && server.isStopped();
349 boolean stopping = services != null && services.isStopping();
350
351 if (stopped || stopping) {
352 LOG.info("Not opening daughters " +
353 b.getRegionInfo().getRegionNameAsString() +
354 " and " +
355 a.getRegionInfo().getRegionNameAsString() +
356 " because stopping=" + stopping + ", stopped=" + stopped);
357 } else {
358
359 DaughterOpener aOpener = new DaughterOpener(server, a);
360 DaughterOpener bOpener = new DaughterOpener(server, b);
361 aOpener.start();
362 bOpener.start();
363 try {
364 aOpener.join();
365 bOpener.join();
366 } catch (InterruptedException e) {
367 Thread.currentThread().interrupt();
368 throw new IOException("Interrupted " + e.getMessage());
369 }
370 if (aOpener.getException() != null) {
371 throw new IOException("Failed " +
372 aOpener.getName(), aOpener.getException());
373 }
374 if (bOpener.getException() != null) {
375 throw new IOException("Failed " +
376 bOpener.getName(), bOpener.getException());
377 }
378 if (services != null) {
379 try {
380
381 services.postOpenDeployTasks(b, server.getCatalogTracker(), true);
382
383 services.addToOnlineRegions(b);
384 services.postOpenDeployTasks(a, server.getCatalogTracker(), true);
385 services.addToOnlineRegions(a);
386 } catch (KeeperException ke) {
387 throw new IOException(ke);
388 }
389 }
390 }
391 }
392
393
394
395
396
397
398
399
400
401
402
403 final RegionServerServices services, HRegion a, HRegion b)
404 throws IOException {
405
406 if (server != null && server.getZooKeeper() != null) {
407 try {
408 this.znodeVersion = transitionNodeSplit(server.getZooKeeper(),
409 parent.getRegionInfo(), a.getRegionInfo(), b.getRegionInfo(),
410 server.getServerName(), this.znodeVersion);
411
412 int spins = 0;
413
414
415
416 do {
417 if (spins % 10 == 0) {
418 LOG.debug("Still waiting on the master to process the split for " +
419 this.parent.getRegionInfo().getEncodedName());
420 }
421 Thread.sleep(100);
422
423 this.znodeVersion = tickleNodeSplit(server.getZooKeeper(),
424 parent.getRegionInfo(), a.getRegionInfo(), b.getRegionInfo(),
425 server.getServerName(), this.znodeVersion);
426 spins++;
427 } while (this.znodeVersion != -1 && !server.isStopped()
428 && !services.isStopping());
429 } catch (Exception e) {
430 if (e instanceof InterruptedException) {
431 Thread.currentThread().interrupt();
432 }
433 throw new IOException("Failed telling master about split", e);
434 }
435 }
436
437
438 if (this.parent.getCoprocessorHost() != null) {
439 this.parent.getCoprocessorHost().postSplit(a,b);
440 }
441
442
443
444
445 }
446
447
448
449
450
451
452
453
454
455
456
457 public PairOfSameType<HRegion> execute(final Server server,
458 final RegionServerServices services)
459 throws IOException {
460 PairOfSameType<HRegion> regions = createDaughters(server, services);
461 openDaughters(server, services, regions.getFirst(), regions.getSecond());
462 transitionZKNode(server, services, regions.getFirst(), regions.getSecond());
463 return regions;
464 }
465
466
467
468
469
470 class DaughterOpener extends HasThread {
471 private final Server server;
472 private final HRegion r;
473 private Throwable t = null;
474
475 DaughterOpener(final Server s, final HRegion r) {
476 super((s == null? "null-services": s.getServerName()) +
477 "-daughterOpener=" + r.getRegionInfo().getEncodedName());
478 setDaemon(true);
479 this.server = s;
480 this.r = r;
481 }
482
483
484
485
486
487 Throwable getException() {
488 return this.t;
489 }
490
491 @Override
492 public void run() {
493 try {
494 openDaughterRegion(this.server, r);
495 } catch (Throwable t) {
496 this.t = t;
497 }
498 }
499 }
500
501
502
503
504
505
506
507
508
509 void openDaughterRegion(final Server server, final HRegion daughter)
510 throws IOException, KeeperException {
511 HRegionInfo hri = daughter.getRegionInfo();
512 LoggingProgressable reporter = server == null? null:
513 new LoggingProgressable(hri, server.getConfiguration());
514 daughter.openHRegion(reporter);
515 }
516
517 static class LoggingProgressable implements CancelableProgressable {
518 private final HRegionInfo hri;
519 private long lastLog = -1;
520 private final long interval;
521
522 LoggingProgressable(final HRegionInfo hri, final Configuration c) {
523 this.hri = hri;
524 this.interval = c.getLong("hbase.regionserver.split.daughter.open.log.interval",
525 10000);
526 }
527
528 @Override
529 public boolean progress() {
530 long now = System.currentTimeMillis();
531 if (now - lastLog > this.interval) {
532 LOG.info("Opening " + this.hri.getRegionNameAsString());
533 this.lastLog = now;
534 }
535 return true;
536 }
537 }
538
539 private static Path getSplitDir(final HRegion r) {
540 return new Path(r.getRegionDir(), SPLITDIR);
541 }
542
543
544
545
546
547
548
549
550 void createSplitDir(final FileSystem fs, final Path splitdir)
551 throws IOException {
552 if (fs.exists(splitdir)) {
553 LOG.info("The " + splitdir
554 + " directory exists. Hence deleting it to recreate it");
555 if (!HBaseFileSystem.deleteDirFromFileSystem(fs, splitdir)) {
556 throw new IOException("Failed deletion of " + splitdir
557 + " before creating them again.");
558 }
559 }
560 if (!HBaseFileSystem.makeDirOnFileSystem(fs, splitdir))
561 throw new IOException("Failed create of " + splitdir);
562 }
563
564 private static void cleanupSplitDir(final FileSystem fs, final Path splitdir)
565 throws IOException {
566
567 deleteDir(fs, splitdir, false);
568 }
569
570
571
572
573
574
575
576
577 private static void deleteDir(final FileSystem fs, final Path dir,
578 final boolean mustPreExist)
579 throws IOException {
580 if (!fs.exists(dir)) {
581 if (mustPreExist) throw new IOException(dir.toString() + " does not exist!");
582 } else if (!HBaseFileSystem.deleteDirFromFileSystem(fs, dir)) {
583 throw new IOException("Failed delete of " + dir);
584 }
585 }
586
587 private void splitStoreFiles(final Path splitdir,
588 final List<StoreFile> hstoreFilesToSplit)
589 throws IOException {
590 if (hstoreFilesToSplit == null) {
591
592 throw new IOException("Close returned empty list of StoreFiles");
593 }
594
595
596
597 int nbFiles = hstoreFilesToSplit.size();
598 if (nbFiles == 0) {
599
600 return;
601 }
602 ThreadFactoryBuilder builder = new ThreadFactoryBuilder();
603 builder.setNameFormat("StoreFileSplitter-%1$d");
604 ThreadFactory factory = builder.build();
605 ThreadPoolExecutor threadPool =
606 (ThreadPoolExecutor) Executors.newFixedThreadPool(nbFiles, factory);
607 List<Future<Void>> futures = new ArrayList<Future<Void>>(nbFiles);
608
609
610 for (StoreFile sf: hstoreFilesToSplit) {
611
612 StoreFileSplitter sfs = new StoreFileSplitter(sf, splitdir);
613 futures.add(threadPool.submit(sfs));
614 }
615
616 threadPool.shutdown();
617
618
619 try {
620 boolean stillRunning = !threadPool.awaitTermination(
621 this.fileSplitTimeout, TimeUnit.MILLISECONDS);
622 if (stillRunning) {
623 threadPool.shutdownNow();
624
625 while (!threadPool.isTerminated()) {
626 Thread.sleep(50);
627 }
628 throw new IOException("Took too long to split the" +
629 " files and create the references, aborting split");
630 }
631 } catch (InterruptedException e) {
632 Thread.currentThread().interrupt();
633 throw new IOException("Interrupted while waiting for file splitters", e);
634 }
635
636
637 for (Future<Void> future: futures) {
638 try {
639 future.get();
640 } catch (InterruptedException e) {
641 Thread.currentThread().interrupt();
642 throw new IOException(
643 "Interrupted while trying to get the results of file splitters", e);
644 } catch (ExecutionException e) {
645 throw new IOException(e);
646 }
647 }
648 }
649
650 private void splitStoreFile(final StoreFile sf, final Path splitdir)
651 throws IOException {
652 FileSystem fs = this.parent.getFilesystem();
653 byte [] family = sf.getFamily();
654 String encoded = this.hri_a.getEncodedName();
655 Path storedir = Store.getStoreHomedir(splitdir, encoded, family);
656 StoreFile.split(fs, storedir, sf, this.splitrow, Range.bottom);
657 encoded = this.hri_b.getEncodedName();
658 storedir = Store.getStoreHomedir(splitdir, encoded, family);
659 StoreFile.split(fs, storedir, sf, this.splitrow, Range.top);
660 }
661
662
663
664
665
666 class StoreFileSplitter implements Callable<Void> {
667
668 private final StoreFile sf;
669 private final Path splitdir;
670
671
672
673
674
675
676 public StoreFileSplitter(final StoreFile sf, final Path splitdir) {
677 this.sf = sf;
678 this.splitdir = splitdir;
679 }
680
681 public Void call() throws IOException {
682 splitStoreFile(sf, splitdir);
683 return null;
684 }
685 }
686
687
688
689
690
691
692
693
694 HRegion createDaughterRegion(final HRegionInfo hri,
695 final RegionServerServices rsServices)
696 throws IOException {
697
698 FileSystem fs = this.parent.getFilesystem();
699 Path regionDir = getSplitDirForDaughter(this.parent.getFilesystem(),
700 this.splitdir, hri);
701 HRegion r = HRegion.newHRegion(this.parent.getTableDir(),
702 this.parent.getLog(), fs, this.parent.getBaseConf(),
703 hri, this.parent.getTableDesc(), rsServices);
704 long halfParentReadRequestCount = this.parent.getReadRequestsCount() / 2;
705 r.readRequestsCount.set(halfParentReadRequestCount);
706 r.setOpMetricsReadRequestCount(halfParentReadRequestCount);
707 long halfParentWriteRequest = this.parent.getWriteRequestsCount() / 2;
708 r.writeRequestsCount.set(halfParentWriteRequest);
709 r.setOpMetricsWriteRequestCount(halfParentWriteRequest);
710 HRegion.moveInitialFilesIntoPlace(fs, regionDir, r.getRegionDir());
711 return r;
712 }
713
714 private static void cleanupDaughterRegion(final FileSystem fs,
715 final Path tabledir, final String encodedName)
716 throws IOException {
717 Path regiondir = HRegion.getRegionDir(tabledir, encodedName);
718
719 deleteDir(fs, regiondir, false);
720 }
721
722
723
724
725
726
727
728
729
730
731 private static Path getSplitDirForDaughter(final FileSystem fs,
732 final Path splitdir, final HRegionInfo hri)
733 throws IOException {
734 return new Path(splitdir, hri.getEncodedName());
735 }
736
737
738
739
740
741
742
743
744 public boolean rollback(final Server server, final RegionServerServices services)
745 throws IOException {
746 boolean result = true;
747 FileSystem fs = this.parent.getFilesystem();
748 ListIterator<JournalEntry> iterator =
749 this.journal.listIterator(this.journal.size());
750
751 while (iterator.hasPrevious()) {
752 JournalEntry je = iterator.previous();
753 switch(je) {
754
755 case SET_SPLITTING_IN_ZK:
756 if (server != null && server.getZooKeeper() != null) {
757 cleanZK(server, this.parent.getRegionInfo());
758 }
759 break;
760
761 case CREATE_SPLIT_DIR:
762 this.parent.writestate.writesEnabled = true;
763 cleanupSplitDir(fs, this.splitdir);
764 break;
765
766 case CLOSED_PARENT_REGION:
767 try {
768
769
770
771
772
773 this.parent.initialize();
774 } catch (IOException e) {
775 LOG.error("Failed rollbacking CLOSED_PARENT_REGION of region " +
776 this.parent.getRegionNameAsString(), e);
777 throw new RuntimeException(e);
778 }
779 break;
780
781 case STARTED_REGION_A_CREATION:
782 cleanupDaughterRegion(fs, this.parent.getTableDir(),
783 this.hri_a.getEncodedName());
784 break;
785
786 case STARTED_REGION_B_CREATION:
787 cleanupDaughterRegion(fs, this.parent.getTableDir(),
788 this.hri_b.getEncodedName());
789 break;
790
791 case OFFLINED_PARENT:
792 if (services != null) services.addToOnlineRegions(this.parent);
793 break;
794
795 case PONR:
796
797
798
799
800 return false;
801
802 default:
803 throw new RuntimeException("Unhandled journal entry: " + je);
804 }
805 }
806 return result;
807 }
808
809 HRegionInfo getFirstDaughter() {
810 return hri_a;
811 }
812
813 HRegionInfo getSecondDaughter() {
814 return hri_b;
815 }
816
817
818 Path getSplitDir() {
819 return this.splitdir;
820 }
821
822
823
824
825
826
827
828
829
830 static void cleanupAnySplitDetritus(final HRegion r) throws IOException {
831 Path splitdir = getSplitDir(r);
832 FileSystem fs = r.getFilesystem();
833 if (!fs.exists(splitdir)) return;
834
835
836
837
838
839
840
841 FileStatus [] daughters = fs.listStatus(splitdir, new FSUtils.DirFilter(fs));
842 for (int i = 0; i < daughters.length; i++) {
843 cleanupDaughterRegion(fs, r.getTableDir(),
844 daughters[i].getPath().getName());
845 }
846 cleanupSplitDir(r.getFilesystem(), splitdir);
847 LOG.info("Cleaned up old failed split transaction detritus: " + splitdir);
848 }
849
850 private static void cleanZK(final Server server, final HRegionInfo hri) {
851 try {
852
853 ZKAssign.deleteNode(server.getZooKeeper(), hri.getEncodedName(),
854 EventType.RS_ZK_REGION_SPLITTING);
855 } catch (KeeperException e) {
856 server.abort("Failed cleanup of " + hri.getRegionNameAsString(), e);
857 }
858 }
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874 void createNodeSplitting(final ZooKeeperWatcher zkw, final HRegionInfo region,
875 final ServerName serverName) throws KeeperException, IOException {
876 LOG.debug(zkw.prefix("Creating ephemeral node for " +
877 region.getEncodedName() + " in SPLITTING state"));
878 RegionTransitionData data =
879 new RegionTransitionData(EventType.RS_ZK_REGION_SPLITTING,
880 region.getRegionName(), serverName);
881
882 String node = ZKAssign.getNodeName(zkw, region.getEncodedName());
883 if (!ZKUtil.createEphemeralNodeAndWatch(zkw, node, data.getBytes())) {
884 throw new IOException("Failed create of ephemeral " + node);
885 }
886 }
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921 private static int transitionNodeSplit(ZooKeeperWatcher zkw,
922 HRegionInfo parent, HRegionInfo a, HRegionInfo b, ServerName serverName,
923 final int znodeVersion)
924 throws KeeperException, IOException {
925 byte [] payload = Writables.getBytes(a, b);
926 return ZKAssign.transitionNode(zkw, parent, serverName,
927 EventType.RS_ZK_REGION_SPLITTING, EventType.RS_ZK_REGION_SPLIT,
928 znodeVersion, payload);
929 }
930
931
932
933
934
935
936
937
938
939
940
941 int transitionNodeSplitting(final ZooKeeperWatcher zkw, final HRegionInfo parent,
942 final ServerName serverName, final int version) throws KeeperException, IOException {
943 return ZKAssign.transitionNode(zkw, parent, serverName,
944 EventType.RS_ZK_REGION_SPLITTING, EventType.RS_ZK_REGION_SPLITTING, version);
945 }
946
947 private static int tickleNodeSplit(ZooKeeperWatcher zkw,
948 HRegionInfo parent, HRegionInfo a, HRegionInfo b, ServerName serverName,
949 final int znodeVersion)
950 throws KeeperException, IOException {
951 byte [] payload = Writables.getBytes(a, b);
952 return ZKAssign.transitionNode(zkw, parent, serverName,
953 EventType.RS_ZK_REGION_SPLIT, EventType.RS_ZK_REGION_SPLIT,
954 znodeVersion, payload);
955 }
956 }