1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20 package org.apache.hadoop.hbase.regionserver.wal;
21
22 import static org.junit.Assert.assertEquals;
23 import static org.junit.Assert.assertTrue;
24
25 import java.io.IOException;
26 import java.lang.reflect.Method;
27 import java.util.HashMap;
28 import java.util.List;
29 import java.util.Map;
30
31 import org.apache.commons.logging.Log;
32 import org.apache.commons.logging.LogFactory;
33 import org.apache.commons.logging.impl.Log4JLogger;
34 import org.apache.hadoop.conf.Configuration;
35 import org.apache.hadoop.fs.FSDataInputStream;
36 import org.apache.hadoop.fs.FSDataOutputStream;
37 import org.apache.hadoop.fs.FileStatus;
38 import org.apache.hadoop.fs.FileSystem;
39 import org.apache.hadoop.fs.Path;
40 import org.apache.hadoop.hbase.HBaseTestingUtility;
41 import org.apache.hadoop.hbase.HConstants;
42 import org.apache.hadoop.hbase.HRegionInfo;
43 import org.apache.hadoop.hbase.HTableDescriptor;
44 import org.apache.hadoop.hbase.KeyValue;
45 import org.apache.hadoop.hbase.regionserver.wal.HLog.Reader;
46 import org.apache.hadoop.hbase.util.Bytes;
47 import org.apache.hadoop.hbase.util.FSUtils;
48 import org.apache.hadoop.hdfs.DFSClient;
49 import org.apache.hadoop.hdfs.MiniDFSCluster;
50 import org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction;
51 import org.apache.hadoop.hdfs.server.datanode.DataNode;
52 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
53 import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
54 import org.apache.hadoop.io.SequenceFile;
55 import org.apache.log4j.Level;
56 import org.junit.After;
57 import org.junit.Before;
58 import org.junit.BeforeClass;
59 import org.junit.Test;
60
61
62 public class TestHLog {
63 private static final Log LOG = LogFactory.getLog(TestHLog.class);
64 {
65 ((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.ALL);
66 ((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.ALL);
67 ((Log4JLogger)FSNamesystem.LOG).getLogger().setLevel(Level.ALL);
68 ((Log4JLogger)DFSClient.LOG).getLogger().setLevel(Level.ALL);
69 ((Log4JLogger)HLog.LOG).getLogger().setLevel(Level.ALL);
70 }
71
72 private static Configuration conf;
73 private static FileSystem fs;
74 private static Path dir;
75 private static MiniDFSCluster cluster;
76 private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
77 private static Path hbaseDir;
78 private static Path oldLogDir;
79
80 @Before
81 public void setUp() throws Exception {
82
83 FileStatus[] entries = fs.listStatus(new Path("/"));
84 for (FileStatus dir : entries) {
85 fs.delete(dir.getPath(), true);
86 }
87
88 }
89
90 @After
91 public void tearDown() throws Exception {
92 }
93 @BeforeClass
94 public static void setUpBeforeClass() throws Exception {
95
96 TEST_UTIL.getConfiguration().setInt("dfs.blocksize", 1024 * 1024);
97 TEST_UTIL.getConfiguration().setInt(
98 "hbase.regionserver.flushlogentries", 1);
99
100 TEST_UTIL.getConfiguration().setBoolean("dfs.support.append", true);
101
102 TEST_UTIL.getConfiguration().setInt("heartbeat.recheck.interval", 5000);
103 TEST_UTIL.getConfiguration().setInt("dfs.heartbeat.interval", 1);
104 TEST_UTIL.getConfiguration().setInt("dfs.socket.timeout", 5000);
105
106 TEST_UTIL.getConfiguration()
107 .setInt("ipc.client.connect.max.retries", 1);
108 TEST_UTIL.getConfiguration().setInt(
109 "dfs.client.block.recovery.retries", 1);
110 TEST_UTIL.startMiniCluster(3);
111
112 conf = TEST_UTIL.getConfiguration();
113 cluster = TEST_UTIL.getDFSCluster();
114 fs = cluster.getFileSystem();
115
116 hbaseDir = new Path(TEST_UTIL.getConfiguration().get("hbase.rootdir"));
117 oldLogDir = new Path(hbaseDir, ".oldlogs");
118 dir = new Path(hbaseDir, getName());
119 }
120 private static String getName() {
121
122 return "TestHLog";
123 }
124
125
126
127
128
129
130 @Test
131 public void testSplit() throws IOException {
132
133 final byte [] tableName = Bytes.toBytes(getName());
134 final byte [] rowName = tableName;
135 Path logdir = new Path(hbaseDir, HConstants.HREGION_LOGDIR_NAME);
136 HLog log = new HLog(fs, logdir, oldLogDir, conf);
137 final int howmany = 3;
138 HRegionInfo[] infos = new HRegionInfo[3];
139 Path tabledir = new Path(hbaseDir, getName());
140 fs.mkdirs(tabledir);
141 for(int i = 0; i < howmany; i++) {
142 infos[i] = new HRegionInfo(new HTableDescriptor(tableName),
143 Bytes.toBytes("" + i), Bytes.toBytes("" + (i+1)), false);
144 fs.mkdirs(new Path(tabledir, infos[i].getEncodedName()));
145 LOG.info("allo " + new Path(tabledir, infos[i].getEncodedName()).toString());
146 }
147
148 try {
149 for (int ii = 0; ii < howmany; ii++) {
150 for (int i = 0; i < howmany; i++) {
151
152 for (int j = 0; j < howmany; j++) {
153 WALEdit edit = new WALEdit();
154 byte [] family = Bytes.toBytes("column");
155 byte [] qualifier = Bytes.toBytes(Integer.toString(j));
156 byte [] column = Bytes.toBytes("column:" + Integer.toString(j));
157 edit.add(new KeyValue(rowName, family, qualifier,
158 System.currentTimeMillis(), column));
159 LOG.info("Region " + i + ": " + edit);
160 log.append(infos[i], tableName, edit,
161 System.currentTimeMillis());
162 }
163 }
164 log.rollWriter();
165 }
166 log.close();
167 HLogSplitter logSplitter = HLogSplitter.createLogSplitter(conf,
168 hbaseDir, logdir, this.oldLogDir, this.fs);
169 List<Path> splits =
170 logSplitter.splitLog();
171 verifySplits(splits, howmany);
172 log = null;
173 } finally {
174 if (log != null) {
175 log.closeAndDelete();
176 }
177 }
178 }
179
180
181
182
183
184 @Test
185 public void Broken_testSync() throws Exception {
186 byte [] bytes = Bytes.toBytes(getName());
187
188 Path p = new Path(dir, getName() + ".fsdos");
189 FSDataOutputStream out = fs.create(p);
190 out.write(bytes);
191 out.sync();
192 FSDataInputStream in = fs.open(p);
193 assertTrue(in.available() > 0);
194 byte [] buffer = new byte [1024];
195 int read = in.read(buffer);
196 assertEquals(bytes.length, read);
197 out.close();
198 in.close();
199 Path subdir = new Path(dir, "hlogdir");
200 HLog wal = new HLog(fs, subdir, oldLogDir, conf);
201 final int total = 20;
202
203 HRegionInfo info = new HRegionInfo(new HTableDescriptor(bytes),
204 null,null, false);
205
206 for (int i = 0; i < total; i++) {
207 WALEdit kvs = new WALEdit();
208 kvs.add(new KeyValue(Bytes.toBytes(i), bytes, bytes));
209 wal.append(info, bytes, kvs, System.currentTimeMillis());
210 }
211
212
213 wal.sync();
214
215 Path walPath = wal.computeFilename();
216 HLog.Reader reader = HLog.getReader(fs, walPath, conf);
217 int count = 0;
218 HLog.Entry entry = new HLog.Entry();
219 while ((entry = reader.next(entry)) != null) count++;
220 assertEquals(total, count);
221 reader.close();
222
223
224 for (int i = 0; i < total; i++) {
225 WALEdit kvs = new WALEdit();
226 kvs.add(new KeyValue(Bytes.toBytes(i), bytes, bytes));
227 wal.append(info, bytes, kvs, System.currentTimeMillis());
228 }
229 reader = HLog.getReader(fs, walPath, conf);
230 count = 0;
231 while((entry = reader.next(entry)) != null) count++;
232 assertTrue(count >= total);
233 reader.close();
234
235 wal.sync();
236 reader = HLog.getReader(fs, walPath, conf);
237 count = 0;
238 while((entry = reader.next(entry)) != null) count++;
239 assertEquals(total * 2, count);
240
241
242 final byte [] value = new byte[1025 * 1024];
243 for (int i = 0; i < total; i++) {
244 WALEdit kvs = new WALEdit();
245 kvs.add(new KeyValue(Bytes.toBytes(i), bytes, value));
246 wal.append(info, bytes, kvs, System.currentTimeMillis());
247 }
248
249 wal.sync();
250 reader = HLog.getReader(fs, walPath, conf);
251 count = 0;
252 while((entry = reader.next(entry)) != null) count++;
253 assertEquals(total * 3, count);
254 reader.close();
255
256 wal.close();
257 reader = HLog.getReader(fs, walPath, conf);
258 count = 0;
259 while((entry = reader.next(entry)) != null) count++;
260 assertEquals(total * 3, count);
261 reader.close();
262 }
263
264
265
266
267
268 @Test
269 public void testFindMemstoresWithEditsEqualOrOlderThan() throws IOException {
270 Map<byte [], Long> regionsToSeqids = new HashMap<byte [], Long>();
271 for (int i = 0; i < 10; i++) {
272 Long l = Long.valueOf(i);
273 regionsToSeqids.put(l.toString().getBytes(), l);
274 }
275 byte [][] regions =
276 HLog.findMemstoresWithEditsEqualOrOlderThan(1, regionsToSeqids);
277 assertEquals(2, regions.length);
278 assertTrue(Bytes.equals(regions[0], "0".getBytes()) ||
279 Bytes.equals(regions[0], "1".getBytes()));
280 regions = HLog.findMemstoresWithEditsEqualOrOlderThan(3, regionsToSeqids);
281 int count = 4;
282 assertEquals(count, regions.length);
283
284 for (int i = 0; i < count; i++) {
285 assertTrue(Bytes.equals(regions[i], "0".getBytes()) ||
286 Bytes.equals(regions[i], "1".getBytes()) ||
287 Bytes.equals(regions[i], "2".getBytes()) ||
288 Bytes.equals(regions[i], "3".getBytes()));
289 }
290 }
291
292 private void verifySplits(List<Path> splits, final int howmany)
293 throws IOException {
294 assertEquals(howmany, splits.size());
295 for (int i = 0; i < splits.size(); i++) {
296 LOG.info("Verifying=" + splits.get(i));
297 HLog.Reader reader = HLog.getReader(fs, splits.get(i), conf);
298 try {
299 int count = 0;
300 String previousRegion = null;
301 long seqno = -1;
302 HLog.Entry entry = new HLog.Entry();
303 while((entry = reader.next(entry)) != null) {
304 HLogKey key = entry.getKey();
305 String region = Bytes.toString(key.getEncodedRegionName());
306
307 if (previousRegion != null) {
308 assertEquals(previousRegion, region);
309 }
310 LOG.info("oldseqno=" + seqno + ", newseqno=" + key.getLogSeqNum());
311 assertTrue(seqno < key.getLogSeqNum());
312 seqno = key.getLogSeqNum();
313 previousRegion = region;
314 count++;
315 }
316 assertEquals(howmany * howmany, count);
317 } finally {
318 reader.close();
319 }
320 }
321 }
322
323
324
325
326
327
328 @Test
329 public void testAppendClose() throws Exception {
330 byte [] tableName = Bytes.toBytes(getName());
331 HRegionInfo regioninfo = new HRegionInfo(new HTableDescriptor(tableName),
332 HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, false);
333 Path subdir = new Path(dir, "hlogdir");
334 Path archdir = new Path(dir, "hlogdir_archive");
335 HLog wal = new HLog(fs, subdir, archdir, conf);
336 final int total = 20;
337
338 for (int i = 0; i < total; i++) {
339 WALEdit kvs = new WALEdit();
340 kvs.add(new KeyValue(Bytes.toBytes(i), tableName, tableName));
341 wal.append(regioninfo, tableName, kvs, System.currentTimeMillis());
342 }
343
344 wal.sync();
345 int namenodePort = cluster.getNameNodePort();
346 final Path walPath = wal.computeFilename();
347
348
349
350 try {
351 cluster.getNameNode().setSafeMode(SafeModeAction.SAFEMODE_ENTER);
352 cluster.shutdown();
353 try {
354
355
356 wal.close();
357 } catch (IOException e) {
358 LOG.info(e);
359 }
360 fs.close();
361 LOG.info("STOPPED first instance of the cluster");
362 } finally {
363
364 while (cluster.isClusterUp()){
365 LOG.error("Waiting for cluster to go down");
366 Thread.sleep(1000);
367 }
368 cluster = new MiniDFSCluster(namenodePort, conf, 5, false, true, true, null, null, null, null);
369 cluster.waitActive();
370 fs = cluster.getFileSystem();
371 LOG.info("START second instance.");
372 }
373
374
375
376 Method setLeasePeriod = cluster.getClass()
377 .getDeclaredMethod("setLeasePeriod", new Class[]{Long.TYPE, Long.TYPE});
378 setLeasePeriod.setAccessible(true);
379 setLeasePeriod.invoke(cluster,
380 new Object[]{new Long(1000), new Long(1000)});
381 try {
382 Thread.sleep(1000);
383 } catch (InterruptedException e) {
384 LOG.info(e);
385 }
386
387
388 final FileSystem recoveredFs = fs;
389 final Configuration rlConf = conf;
390
391 class RecoverLogThread extends Thread {
392 public Exception exception = null;
393 public void run() {
394 try {
395 FSUtils.recoverFileLease(recoveredFs, walPath, rlConf);
396 } catch (IOException e) {
397 exception = e;
398 }
399 }
400 }
401
402 RecoverLogThread t = new RecoverLogThread();
403 t.start();
404
405 t.join(60 * 1000);
406 if(t.isAlive()) {
407 t.interrupt();
408 throw new Exception("Timed out waiting for HLog.recoverLog()");
409 }
410
411 if (t.exception != null)
412 throw t.exception;
413
414
415 SequenceFile.Reader reader
416 = new SequenceFile.Reader(this.fs, walPath, this.conf);
417 int count = 0;
418 HLogKey key = HLog.newKey(conf);
419 WALEdit val = new WALEdit();
420 while (reader.next(key, val)) {
421 count++;
422 assertTrue("Should be one KeyValue per WALEdit",
423 val.getKeyValues().size() == 1);
424 }
425 assertEquals(total, count);
426 reader.close();
427 }
428
429
430
431
432
433 @Test
434 public void testEditAdd() throws IOException {
435 final int COL_COUNT = 10;
436 final byte [] tableName = Bytes.toBytes("tablename");
437 final byte [] row = Bytes.toBytes("row");
438 HLog.Reader reader = null;
439 HLog log = new HLog(fs, dir, oldLogDir, conf);
440 try {
441
442
443 long timestamp = System.currentTimeMillis();
444 WALEdit cols = new WALEdit();
445 for (int i = 0; i < COL_COUNT; i++) {
446 cols.add(new KeyValue(row, Bytes.toBytes("column"),
447 Bytes.toBytes(Integer.toString(i)),
448 timestamp, new byte[] { (byte)(i + '0') }));
449 }
450 HRegionInfo info = new HRegionInfo(new HTableDescriptor(tableName),
451 row,Bytes.toBytes(Bytes.toString(row) + "1"), false);
452 log.append(info, tableName, cols, System.currentTimeMillis());
453 long logSeqId = log.startCacheFlush();
454 log.completeCacheFlush(info.getEncodedNameAsBytes(), tableName, logSeqId, info.isMetaRegion());
455 log.close();
456 Path filename = log.computeFilename();
457 log = null;
458
459 reader = HLog.getReader(fs, filename, conf);
460
461
462 for (int i = 0; i < 1; i++) {
463 HLog.Entry entry = reader.next(null);
464 if (entry == null) break;
465 HLogKey key = entry.getKey();
466 WALEdit val = entry.getEdit();
467 assertTrue(Bytes.equals(info.getEncodedNameAsBytes(), key.getEncodedRegionName()));
468 assertTrue(Bytes.equals(tableName, key.getTablename()));
469 KeyValue kv = val.getKeyValues().get(0);
470 assertTrue(Bytes.equals(row, kv.getRow()));
471 assertEquals((byte)(i + '0'), kv.getValue()[0]);
472 System.out.println(key + " " + val);
473 }
474 HLog.Entry entry = null;
475 while ((entry = reader.next(null)) != null) {
476 HLogKey key = entry.getKey();
477 WALEdit val = entry.getEdit();
478
479 assertTrue(Bytes.equals(info.getEncodedNameAsBytes(), key.getEncodedRegionName()));
480 assertTrue(Bytes.equals(tableName, key.getTablename()));
481 KeyValue kv = val.getKeyValues().get(0);
482 assertTrue(Bytes.equals(HLog.METAROW, kv.getRow()));
483 assertTrue(Bytes.equals(HLog.METAFAMILY, kv.getFamily()));
484 assertEquals(0, Bytes.compareTo(HLog.COMPLETE_CACHE_FLUSH,
485 val.getKeyValues().get(0).getValue()));
486 System.out.println(key + " " + val);
487 }
488 } finally {
489 if (log != null) {
490 log.closeAndDelete();
491 }
492 if (reader != null) {
493 reader.close();
494 }
495 }
496 }
497
498
499
500
501 @Test
502 public void testAppend() throws IOException {
503 final int COL_COUNT = 10;
504 final byte [] tableName = Bytes.toBytes("tablename");
505 final byte [] row = Bytes.toBytes("row");
506 Reader reader = null;
507 HLog log = new HLog(fs, dir, oldLogDir, conf);
508 try {
509
510
511 long timestamp = System.currentTimeMillis();
512 WALEdit cols = new WALEdit();
513 for (int i = 0; i < COL_COUNT; i++) {
514 cols.add(new KeyValue(row, Bytes.toBytes("column"),
515 Bytes.toBytes(Integer.toString(i)),
516 timestamp, new byte[] { (byte)(i + '0') }));
517 }
518 HRegionInfo hri = new HRegionInfo(new HTableDescriptor(tableName),
519 HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
520 log.append(hri, tableName, cols, System.currentTimeMillis());
521 long logSeqId = log.startCacheFlush();
522 log.completeCacheFlush(hri.getEncodedNameAsBytes(), tableName, logSeqId, false);
523 log.close();
524 Path filename = log.computeFilename();
525 log = null;
526
527 reader = HLog.getReader(fs, filename, conf);
528 HLog.Entry entry = reader.next();
529 assertEquals(COL_COUNT, entry.getEdit().size());
530 int idx = 0;
531 for (KeyValue val : entry.getEdit().getKeyValues()) {
532 assertTrue(Bytes.equals(hri.getEncodedNameAsBytes(),
533 entry.getKey().getEncodedRegionName()));
534 assertTrue(Bytes.equals(tableName, entry.getKey().getTablename()));
535 assertTrue(Bytes.equals(row, val.getRow()));
536 assertEquals((byte)(idx + '0'), val.getValue()[0]);
537 System.out.println(entry.getKey() + " " + val);
538 idx++;
539 }
540
541
542 entry = reader.next();
543 assertEquals(1, entry.getEdit().size());
544 for (KeyValue val : entry.getEdit().getKeyValues()) {
545 assertTrue(Bytes.equals(hri.getEncodedNameAsBytes(),
546 entry.getKey().getEncodedRegionName()));
547 assertTrue(Bytes.equals(tableName, entry.getKey().getTablename()));
548 assertTrue(Bytes.equals(HLog.METAROW, val.getRow()));
549 assertTrue(Bytes.equals(HLog.METAFAMILY, val.getFamily()));
550 assertEquals(0, Bytes.compareTo(HLog.COMPLETE_CACHE_FLUSH,
551 val.getValue()));
552 System.out.println(entry.getKey() + " " + val);
553 }
554 } finally {
555 if (log != null) {
556 log.closeAndDelete();
557 }
558 if (reader != null) {
559 reader.close();
560 }
561 }
562 }
563
564
565
566
567
568 @Test
569 public void testVisitors() throws Exception {
570 final int COL_COUNT = 10;
571 final byte [] tableName = Bytes.toBytes("tablename");
572 final byte [] row = Bytes.toBytes("row");
573 HLog log = new HLog(fs, dir, oldLogDir, conf);
574 DumbWALObserver visitor = new DumbWALObserver();
575 log.registerWALActionsListener(visitor);
576 long timestamp = System.currentTimeMillis();
577 HRegionInfo hri = new HRegionInfo(new HTableDescriptor(tableName),
578 HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
579 for (int i = 0; i < COL_COUNT; i++) {
580 WALEdit cols = new WALEdit();
581 cols.add(new KeyValue(row, Bytes.toBytes("column"),
582 Bytes.toBytes(Integer.toString(i)),
583 timestamp, new byte[]{(byte) (i + '0')}));
584 log.append(hri, tableName, cols, System.currentTimeMillis());
585 }
586 assertEquals(COL_COUNT, visitor.increments);
587 log.unregisterWALActionsListener(visitor);
588 WALEdit cols = new WALEdit();
589 cols.add(new KeyValue(row, Bytes.toBytes("column"),
590 Bytes.toBytes(Integer.toString(11)),
591 timestamp, new byte[]{(byte) (11 + '0')}));
592 log.append(hri, tableName, cols, System.currentTimeMillis());
593 assertEquals(COL_COUNT, visitor.increments);
594 }
595
596 @Test
597 public void testLogCleaning() throws Exception {
598 LOG.info("testLogCleaning");
599 final byte [] tableName = Bytes.toBytes("testLogCleaning");
600 final byte [] tableName2 = Bytes.toBytes("testLogCleaning2");
601
602 HLog log = new HLog(fs, dir, oldLogDir, conf);
603 HRegionInfo hri = new HRegionInfo(new HTableDescriptor(tableName),
604 HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
605 HRegionInfo hri2 = new HRegionInfo(new HTableDescriptor(tableName2),
606 HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
607
608
609
610 addEdits(log, hri, tableName, 1);
611 log.rollWriter();
612 assertEquals(1, log.getNumLogFiles());
613
614
615 addEdits(log, hri, tableName, 2);
616 log.rollWriter();
617 assertEquals(2, log.getNumLogFiles());
618
619
620 addEdits(log, hri, tableName, 1);
621 addEdits(log, hri2, tableName2, 1);
622 addEdits(log, hri, tableName, 1);
623 addEdits(log, hri2, tableName2, 1);
624 log.rollWriter();
625 assertEquals(3, log.getNumLogFiles());
626
627
628
629 long seqId = log.startCacheFlush();
630 log.completeCacheFlush(hri.getEncodedNameAsBytes(), tableName, seqId, false);
631 log.rollWriter();
632 assertEquals(2, log.getNumLogFiles());
633
634
635
636
637 seqId = log.startCacheFlush();
638 log.completeCacheFlush(hri2.getEncodedNameAsBytes(), tableName2, seqId, false);
639 log.rollWriter();
640 assertEquals(0, log.getNumLogFiles());
641 }
642
643 private void addEdits(HLog log, HRegionInfo hri, byte [] tableName,
644 int times) throws IOException {
645 final byte [] row = Bytes.toBytes("row");
646 for (int i = 0; i < times; i++) {
647 long timestamp = System.currentTimeMillis();
648 WALEdit cols = new WALEdit();
649 cols.add(new KeyValue(row, row, row, timestamp, row));
650 log.append(hri, tableName, cols, timestamp);
651 }
652 }
653
654 static class DumbWALObserver implements WALObserver {
655 int increments = 0;
656
657 @Override
658 public void visitLogEntryBeforeWrite(HRegionInfo info, HLogKey logKey,
659 WALEdit logEdit) {
660 increments++;
661 }
662
663 @Override
664 public void logRolled(Path newFile) {
665
666
667 }
668
669 @Override
670 public void logRollRequested() {
671
672
673 }
674
675 @Override
676 public void logCloseRequested() {
677
678 }
679 }
680 }