1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19 package org.apache.hadoop.hbase.catalog;
20
21 import java.io.IOException;
22 import java.util.List;
23
24 import org.apache.commons.logging.Log;
25 import org.apache.commons.logging.LogFactory;
26 import org.apache.hadoop.hbase.exceptions.DeserializationException;
27 import org.apache.hadoop.hbase.HConstants;
28 import org.apache.hadoop.hbase.HRegionInfo;
29 import org.apache.hadoop.hbase.catalog.MetaReader.Visitor;
30 import org.apache.hadoop.hbase.client.Put;
31 import org.apache.hadoop.hbase.client.Result;
32 import org.apache.hadoop.hbase.master.MasterServices;
33 import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
34 import org.apache.hadoop.hbase.util.Bytes;
35
36
37
38
39
40
41 @Deprecated
42 public class MetaMigrationConvertingToPB {
43
44 private static final Log LOG = LogFactory.getLog(MetaMigrationConvertingToPB.class);
45
46 private static class ConvertToPBMetaVisitor implements Visitor {
47 private final MasterServices services;
48 private long numMigratedRows;
49
50 public ConvertToPBMetaVisitor(MasterServices services) {
51 this.services = services;
52 numMigratedRows = 0;
53 }
54
55 @Override
56 public boolean visit(Result r) throws IOException {
57 if (r == null || r.isEmpty()) return true;
58
59
60 byte [] hriBytes = getBytes(r, HConstants.REGIONINFO_QUALIFIER);
61
62
63
64 if (isMigrated(hriBytes)) return true;
65
66
67
68
69 HRegionInfo hri = parseFrom(hriBytes);
70
71
72 Put p = MetaEditor.makePutFromRegionInfo(hri);
73
74
75 migrateSplitIfNecessary(r, p, HConstants.SPLITA_QUALIFIER);
76 migrateSplitIfNecessary(r, p, HConstants.SPLITB_QUALIFIER);
77
78 MetaEditor.putToCatalogTable(this.services.getCatalogTracker(), p);
79 if (LOG.isDebugEnabled()) {
80 LOG.debug("Migrated " + Bytes.toString(p.getRow()));
81 }
82 numMigratedRows++;
83 return true;
84 }
85 }
86
87 static void migrateSplitIfNecessary(final Result r, final Put p, final byte [] which)
88 throws IOException {
89 byte [] hriSplitBytes = getBytes(r, which);
90 if (!isMigrated(hriSplitBytes)) {
91
92
93 HRegionInfo hri = parseFrom(hriSplitBytes);
94 p.add(HConstants.CATALOG_FAMILY, which, hri.toByteArray());
95 }
96 }
97
98 static HRegionInfo parseFrom(byte[] hriBytes) throws IOException {
99 try {
100 return HRegionInfo.parseFrom(hriBytes);
101 } catch (DeserializationException ex) {
102 throw new IOException(ex);
103 }
104 }
105
106
107
108
109
110
111 static byte [] getBytes(final Result r, final byte [] qualifier) {
112 byte [] hriBytes = r.getValue(HConstants.CATALOG_FAMILY, qualifier);
113 if (hriBytes == null || hriBytes.length <= 0) return null;
114 return hriBytes;
115 }
116
117 static boolean isMigrated(final byte [] hriBytes) {
118 if (hriBytes == null || hriBytes.length <= 0) return true;
119
120 return ProtobufUtil.isPBMagicPrefix(hriBytes);
121 }
122
123
124
125
126
127
128
129
130 public static long updateRootAndMetaIfNecessary(final MasterServices services)
131 throws IOException {
132 if (isMetaHRIUpdated(services.getCatalogTracker())) {
133 LOG.info("ROOT/META already up-to date with PB serialization");
134 return 0;
135 }
136 LOG.info("ROOT/META has Writable serializations, migrating ROOT and META to PB serialization");
137 try {
138 long rows = updateRootAndMeta(services);
139 LOG.info("ROOT and META updated with PB serialization. Total rows updated: " + rows);
140 return rows;
141 } catch (IOException e) {
142 LOG.warn("Update ROOT/META with PB serialization failed." +
143 "Master startup aborted.");
144 throw e;
145 }
146 }
147
148
149
150
151
152 static long updateRootAndMeta(final MasterServices masterServices)
153 throws IOException {
154 long rows = updateRoot(masterServices);
155 rows += updateMeta(masterServices);
156 return rows;
157 }
158
159
160
161
162
163 static long updateRoot(final MasterServices masterServices)
164 throws IOException {
165 LOG.info("Starting update of ROOT");
166 ConvertToPBMetaVisitor v = new ConvertToPBMetaVisitor(masterServices);
167 MetaReader.fullScan(masterServices.getCatalogTracker(), v, null);
168 LOG.info("Finished update of ROOT. Total rows updated:" + v.numMigratedRows);
169 return v.numMigratedRows;
170 }
171
172
173
174
175
176 static long updateMeta(final MasterServices masterServices) throws IOException {
177 LOG.info("Starting update of META");
178 ConvertToPBMetaVisitor v = new ConvertToPBMetaVisitor(masterServices);
179 MetaReader.fullScan(masterServices.getCatalogTracker(), v);
180
181 LOG.info("Finished update of META. Total rows updated:" + v.numMigratedRows);
182 return v.numMigratedRows;
183 }
184
185
186
187
188
189
190 static void updateRootWithMetaMigrationStatus(final CatalogTracker catalogTracker)
191 throws IOException {
192 Put p = new Put(HRegionInfo.FIRST_META_REGIONINFO.getRegionName());
193 p.add(HConstants.CATALOG_FAMILY, HConstants.META_VERSION_QUALIFIER,
194 Bytes.toBytes(HConstants.META_VERSION));
195
196
197 LOG.info("Updated -ROOT- meta version=" + HConstants.META_VERSION);
198 }
199
200
201
202
203
204
205 static boolean isMetaHRIUpdated(final CatalogTracker catalogTracker) throws IOException {
206 List<Result> results = MetaReader.fullScanOfMeta(catalogTracker);
207 if (results == null || results.isEmpty()) {
208 LOG.info(".META. is not migrated");
209 return false;
210 }
211
212 Result r = results.get(0);
213 byte [] value = r.getValue(HConstants.CATALOG_FAMILY,
214 HConstants.META_VERSION_QUALIFIER);
215 short version = value == null || value.length <= 0? -1: Bytes.toShort(value);
216
217 boolean migrated = version >= HConstants.META_VERSION;
218 LOG.info("Meta version=" + version + "; migrated=" + migrated);
219 return migrated;
220 }
221 }