View Javadoc

1   // Generated by the protocol buffer compiler.  DO NOT EDIT!
2   // source: StorageClusterStatusMessage.proto
3   
4   package org.apache.hadoop.hbase.rest.protobuf.generated;
5   
6   public final class StorageClusterStatusMessage {
7     private StorageClusterStatusMessage() {}
8     public static void registerAllExtensions(
9         com.google.protobuf.ExtensionRegistry registry) {
10    }
11    public static final class StorageClusterStatus extends
12        com.google.protobuf.GeneratedMessage {
13      // Use StorageClusterStatus.newBuilder() to construct.
14      private StorageClusterStatus() {
15        initFields();
16      }
17      private StorageClusterStatus(boolean noInit) {}
18      
19      private static final StorageClusterStatus defaultInstance;
20      public static StorageClusterStatus getDefaultInstance() {
21        return defaultInstance;
22      }
23      
24      public StorageClusterStatus getDefaultInstanceForType() {
25        return defaultInstance;
26      }
27      
28      public static final com.google.protobuf.Descriptors.Descriptor
29          getDescriptor() {
30        return org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_StorageClusterStatus_descriptor;
31      }
32      
33      protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
34          internalGetFieldAccessorTable() {
35        return org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_StorageClusterStatus_fieldAccessorTable;
36      }
37      
38      public static final class Region extends
39          com.google.protobuf.GeneratedMessage {
40        // Use Region.newBuilder() to construct.
41        private Region() {
42          initFields();
43        }
44        private Region(boolean noInit) {}
45        
46        private static final Region defaultInstance;
47        public static Region getDefaultInstance() {
48          return defaultInstance;
49        }
50        
51        public Region getDefaultInstanceForType() {
52          return defaultInstance;
53        }
54        
55        public static final com.google.protobuf.Descriptors.Descriptor
56            getDescriptor() {
57          return org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_StorageClusterStatus_Region_descriptor;
58        }
59        
60        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
61            internalGetFieldAccessorTable() {
62          return org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_StorageClusterStatus_Region_fieldAccessorTable;
63        }
64        
65        // required bytes name = 1;
66        public static final int NAME_FIELD_NUMBER = 1;
67        private boolean hasName;
68        private com.google.protobuf.ByteString name_ = com.google.protobuf.ByteString.EMPTY;
69        public boolean hasName() { return hasName; }
70        public com.google.protobuf.ByteString getName() { return name_; }
71        
72        // optional int32 stores = 2;
73        public static final int STORES_FIELD_NUMBER = 2;
74        private boolean hasStores;
75        private int stores_ = 0;
76        public boolean hasStores() { return hasStores; }
77        public int getStores() { return stores_; }
78        
79        // optional int32 storefiles = 3;
80        public static final int STOREFILES_FIELD_NUMBER = 3;
81        private boolean hasStorefiles;
82        private int storefiles_ = 0;
83        public boolean hasStorefiles() { return hasStorefiles; }
84        public int getStorefiles() { return storefiles_; }
85        
86        // optional int32 storefileSizeMB = 4;
87        public static final int STOREFILESIZEMB_FIELD_NUMBER = 4;
88        private boolean hasStorefileSizeMB;
89        private int storefileSizeMB_ = 0;
90        public boolean hasStorefileSizeMB() { return hasStorefileSizeMB; }
91        public int getStorefileSizeMB() { return storefileSizeMB_; }
92        
93        // optional int32 memstoreSizeMB = 5;
94        public static final int MEMSTORESIZEMB_FIELD_NUMBER = 5;
95        private boolean hasMemstoreSizeMB;
96        private int memstoreSizeMB_ = 0;
97        public boolean hasMemstoreSizeMB() { return hasMemstoreSizeMB; }
98        public int getMemstoreSizeMB() { return memstoreSizeMB_; }
99        
100       // optional int32 storefileIndexSizeMB = 6;
101       public static final int STOREFILEINDEXSIZEMB_FIELD_NUMBER = 6;
102       private boolean hasStorefileIndexSizeMB;
103       private int storefileIndexSizeMB_ = 0;
104       public boolean hasStorefileIndexSizeMB() { return hasStorefileIndexSizeMB; }
105       public int getStorefileIndexSizeMB() { return storefileIndexSizeMB_; }
106       
107       private void initFields() {
108       }
109       public final boolean isInitialized() {
110         if (!hasName) return false;
111         return true;
112       }
113       
114       public void writeTo(com.google.protobuf.CodedOutputStream output)
115                           throws java.io.IOException {
116         getSerializedSize();
117         if (hasName()) {
118           output.writeBytes(1, getName());
119         }
120         if (hasStores()) {
121           output.writeInt32(2, getStores());
122         }
123         if (hasStorefiles()) {
124           output.writeInt32(3, getStorefiles());
125         }
126         if (hasStorefileSizeMB()) {
127           output.writeInt32(4, getStorefileSizeMB());
128         }
129         if (hasMemstoreSizeMB()) {
130           output.writeInt32(5, getMemstoreSizeMB());
131         }
132         if (hasStorefileIndexSizeMB()) {
133           output.writeInt32(6, getStorefileIndexSizeMB());
134         }
135         getUnknownFields().writeTo(output);
136       }
137       
138       private int memoizedSerializedSize = -1;
139       public int getSerializedSize() {
140         int size = memoizedSerializedSize;
141         if (size != -1) return size;
142       
143         size = 0;
144         if (hasName()) {
145           size += com.google.protobuf.CodedOutputStream
146             .computeBytesSize(1, getName());
147         }
148         if (hasStores()) {
149           size += com.google.protobuf.CodedOutputStream
150             .computeInt32Size(2, getStores());
151         }
152         if (hasStorefiles()) {
153           size += com.google.protobuf.CodedOutputStream
154             .computeInt32Size(3, getStorefiles());
155         }
156         if (hasStorefileSizeMB()) {
157           size += com.google.protobuf.CodedOutputStream
158             .computeInt32Size(4, getStorefileSizeMB());
159         }
160         if (hasMemstoreSizeMB()) {
161           size += com.google.protobuf.CodedOutputStream
162             .computeInt32Size(5, getMemstoreSizeMB());
163         }
164         if (hasStorefileIndexSizeMB()) {
165           size += com.google.protobuf.CodedOutputStream
166             .computeInt32Size(6, getStorefileIndexSizeMB());
167         }
168         size += getUnknownFields().getSerializedSize();
169         memoizedSerializedSize = size;
170         return size;
171       }
172       
173       public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region parseFrom(
174           com.google.protobuf.ByteString data)
175           throws com.google.protobuf.InvalidProtocolBufferException {
176         return newBuilder().mergeFrom(data).buildParsed();
177       }
178       public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region parseFrom(
179           com.google.protobuf.ByteString data,
180           com.google.protobuf.ExtensionRegistryLite extensionRegistry)
181           throws com.google.protobuf.InvalidProtocolBufferException {
182         return newBuilder().mergeFrom(data, extensionRegistry)
183                  .buildParsed();
184       }
185       public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region parseFrom(byte[] data)
186           throws com.google.protobuf.InvalidProtocolBufferException {
187         return newBuilder().mergeFrom(data).buildParsed();
188       }
189       public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region parseFrom(
190           byte[] data,
191           com.google.protobuf.ExtensionRegistryLite extensionRegistry)
192           throws com.google.protobuf.InvalidProtocolBufferException {
193         return newBuilder().mergeFrom(data, extensionRegistry)
194                  .buildParsed();
195       }
196       public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region parseFrom(java.io.InputStream input)
197           throws java.io.IOException {
198         return newBuilder().mergeFrom(input).buildParsed();
199       }
200       public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region parseFrom(
201           java.io.InputStream input,
202           com.google.protobuf.ExtensionRegistryLite extensionRegistry)
203           throws java.io.IOException {
204         return newBuilder().mergeFrom(input, extensionRegistry)
205                  .buildParsed();
206       }
207       public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region parseDelimitedFrom(java.io.InputStream input)
208           throws java.io.IOException {
209         Builder builder = newBuilder();
210         if (builder.mergeDelimitedFrom(input)) {
211           return builder.buildParsed();
212         } else {
213           return null;
214         }
215       }
216       public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region parseDelimitedFrom(
217           java.io.InputStream input,
218           com.google.protobuf.ExtensionRegistryLite extensionRegistry)
219           throws java.io.IOException {
220         Builder builder = newBuilder();
221         if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
222           return builder.buildParsed();
223         } else {
224           return null;
225         }
226       }
227       public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region parseFrom(
228           com.google.protobuf.CodedInputStream input)
229           throws java.io.IOException {
230         return newBuilder().mergeFrom(input).buildParsed();
231       }
232       public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region parseFrom(
233           com.google.protobuf.CodedInputStream input,
234           com.google.protobuf.ExtensionRegistryLite extensionRegistry)
235           throws java.io.IOException {
236         return newBuilder().mergeFrom(input, extensionRegistry)
237                  .buildParsed();
238       }
239       
240       public static Builder newBuilder() { return Builder.create(); }
241       public Builder newBuilderForType() { return newBuilder(); }
242       public static Builder newBuilder(org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region prototype) {
243         return newBuilder().mergeFrom(prototype);
244       }
245       public Builder toBuilder() { return newBuilder(this); }
246       
247       public static final class Builder extends
248           com.google.protobuf.GeneratedMessage.Builder<Builder> {
249         private org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region result;
250         
251         // Construct using org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region.newBuilder()
252         private Builder() {}
253         
254         private static Builder create() {
255           Builder builder = new Builder();
256           builder.result = new org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region();
257           return builder;
258         }
259         
260         protected org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region internalGetResult() {
261           return result;
262         }
263         
264         public Builder clear() {
265           if (result == null) {
266             throw new IllegalStateException(
267               "Cannot call clear() after build().");
268           }
269           result = new org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region();
270           return this;
271         }
272         
273         public Builder clone() {
274           return create().mergeFrom(result);
275         }
276         
277         public com.google.protobuf.Descriptors.Descriptor
278             getDescriptorForType() {
279           return org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region.getDescriptor();
280         }
281         
282         public org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region getDefaultInstanceForType() {
283           return org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region.getDefaultInstance();
284         }
285         
286         public boolean isInitialized() {
287           return result.isInitialized();
288         }
289         public org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region build() {
290           if (result != null && !isInitialized()) {
291             throw newUninitializedMessageException(result);
292           }
293           return buildPartial();
294         }
295         
296         private org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region buildParsed()
297             throws com.google.protobuf.InvalidProtocolBufferException {
298           if (!isInitialized()) {
299             throw newUninitializedMessageException(
300               result).asInvalidProtocolBufferException();
301           }
302           return buildPartial();
303         }
304         
305         public org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region buildPartial() {
306           if (result == null) {
307             throw new IllegalStateException(
308               "build() has already been called on this Builder.");
309           }
310           org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region returnMe = result;
311           result = null;
312           return returnMe;
313         }
314         
315         public Builder mergeFrom(com.google.protobuf.Message other) {
316           if (other instanceof org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region) {
317             return mergeFrom((org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region)other);
318           } else {
319             super.mergeFrom(other);
320             return this;
321           }
322         }
323         
324         public Builder mergeFrom(org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region other) {
325           if (other == org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region.getDefaultInstance()) return this;
326           if (other.hasName()) {
327             setName(other.getName());
328           }
329           if (other.hasStores()) {
330             setStores(other.getStores());
331           }
332           if (other.hasStorefiles()) {
333             setStorefiles(other.getStorefiles());
334           }
335           if (other.hasStorefileSizeMB()) {
336             setStorefileSizeMB(other.getStorefileSizeMB());
337           }
338           if (other.hasMemstoreSizeMB()) {
339             setMemstoreSizeMB(other.getMemstoreSizeMB());
340           }
341           if (other.hasStorefileIndexSizeMB()) {
342             setStorefileIndexSizeMB(other.getStorefileIndexSizeMB());
343           }
344           this.mergeUnknownFields(other.getUnknownFields());
345           return this;
346         }
347         
348         public Builder mergeFrom(
349             com.google.protobuf.CodedInputStream input,
350             com.google.protobuf.ExtensionRegistryLite extensionRegistry)
351             throws java.io.IOException {
352           com.google.protobuf.UnknownFieldSet.Builder unknownFields =
353             com.google.protobuf.UnknownFieldSet.newBuilder(
354               this.getUnknownFields());
355           while (true) {
356             int tag = input.readTag();
357             switch (tag) {
358               case 0:
359                 this.setUnknownFields(unknownFields.build());
360                 return this;
361               default: {
362                 if (!parseUnknownField(input, unknownFields,
363                                        extensionRegistry, tag)) {
364                   this.setUnknownFields(unknownFields.build());
365                   return this;
366                 }
367                 break;
368               }
369               case 10: {
370                 setName(input.readBytes());
371                 break;
372               }
373               case 16: {
374                 setStores(input.readInt32());
375                 break;
376               }
377               case 24: {
378                 setStorefiles(input.readInt32());
379                 break;
380               }
381               case 32: {
382                 setStorefileSizeMB(input.readInt32());
383                 break;
384               }
385               case 40: {
386                 setMemstoreSizeMB(input.readInt32());
387                 break;
388               }
389               case 48: {
390                 setStorefileIndexSizeMB(input.readInt32());
391                 break;
392               }
393             }
394           }
395         }
396         
397         
398         // required bytes name = 1;
399         public boolean hasName() {
400           return result.hasName();
401         }
402         public com.google.protobuf.ByteString getName() {
403           return result.getName();
404         }
405         public Builder setName(com.google.protobuf.ByteString value) {
406           if (value == null) {
407     throw new NullPointerException();
408   }
409   result.hasName = true;
410           result.name_ = value;
411           return this;
412         }
413         public Builder clearName() {
414           result.hasName = false;
415           result.name_ = getDefaultInstance().getName();
416           return this;
417         }
418         
419         // optional int32 stores = 2;
420         public boolean hasStores() {
421           return result.hasStores();
422         }
423         public int getStores() {
424           return result.getStores();
425         }
426         public Builder setStores(int value) {
427           result.hasStores = true;
428           result.stores_ = value;
429           return this;
430         }
431         public Builder clearStores() {
432           result.hasStores = false;
433           result.stores_ = 0;
434           return this;
435         }
436         
437         // optional int32 storefiles = 3;
438         public boolean hasStorefiles() {
439           return result.hasStorefiles();
440         }
441         public int getStorefiles() {
442           return result.getStorefiles();
443         }
444         public Builder setStorefiles(int value) {
445           result.hasStorefiles = true;
446           result.storefiles_ = value;
447           return this;
448         }
449         public Builder clearStorefiles() {
450           result.hasStorefiles = false;
451           result.storefiles_ = 0;
452           return this;
453         }
454         
455         // optional int32 storefileSizeMB = 4;
456         public boolean hasStorefileSizeMB() {
457           return result.hasStorefileSizeMB();
458         }
459         public int getStorefileSizeMB() {
460           return result.getStorefileSizeMB();
461         }
462         public Builder setStorefileSizeMB(int value) {
463           result.hasStorefileSizeMB = true;
464           result.storefileSizeMB_ = value;
465           return this;
466         }
467         public Builder clearStorefileSizeMB() {
468           result.hasStorefileSizeMB = false;
469           result.storefileSizeMB_ = 0;
470           return this;
471         }
472         
473         // optional int32 memstoreSizeMB = 5;
474         public boolean hasMemstoreSizeMB() {
475           return result.hasMemstoreSizeMB();
476         }
477         public int getMemstoreSizeMB() {
478           return result.getMemstoreSizeMB();
479         }
480         public Builder setMemstoreSizeMB(int value) {
481           result.hasMemstoreSizeMB = true;
482           result.memstoreSizeMB_ = value;
483           return this;
484         }
485         public Builder clearMemstoreSizeMB() {
486           result.hasMemstoreSizeMB = false;
487           result.memstoreSizeMB_ = 0;
488           return this;
489         }
490         
491         // optional int32 storefileIndexSizeMB = 6;
492         public boolean hasStorefileIndexSizeMB() {
493           return result.hasStorefileIndexSizeMB();
494         }
495         public int getStorefileIndexSizeMB() {
496           return result.getStorefileIndexSizeMB();
497         }
498         public Builder setStorefileIndexSizeMB(int value) {
499           result.hasStorefileIndexSizeMB = true;
500           result.storefileIndexSizeMB_ = value;
501           return this;
502         }
503         public Builder clearStorefileIndexSizeMB() {
504           result.hasStorefileIndexSizeMB = false;
505           result.storefileIndexSizeMB_ = 0;
506           return this;
507         }
508         
509         // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Region)
510       }
511       
512       static {
513         defaultInstance = new Region(true);
514         org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.internalForceInit();
515         defaultInstance.initFields();
516       }
517       
518       // @@protoc_insertion_point(class_scope:org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Region)
519     }
520     
521     public static final class Node extends
522         com.google.protobuf.GeneratedMessage {
523       // Use Node.newBuilder() to construct.
524       private Node() {
525         initFields();
526       }
527       private Node(boolean noInit) {}
528       
529       private static final Node defaultInstance;
530       public static Node getDefaultInstance() {
531         return defaultInstance;
532       }
533       
534       public Node getDefaultInstanceForType() {
535         return defaultInstance;
536       }
537       
538       public static final com.google.protobuf.Descriptors.Descriptor
539           getDescriptor() {
540         return org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_StorageClusterStatus_Node_descriptor;
541       }
542       
543       protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
544           internalGetFieldAccessorTable() {
545         return org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_StorageClusterStatus_Node_fieldAccessorTable;
546       }
547       
548       // required string name = 1;
549       public static final int NAME_FIELD_NUMBER = 1;
550       private boolean hasName;
551       private java.lang.String name_ = "";
552       public boolean hasName() { return hasName; }
553       public java.lang.String getName() { return name_; }
554       
555       // optional int64 startCode = 2;
556       public static final int STARTCODE_FIELD_NUMBER = 2;
557       private boolean hasStartCode;
558       private long startCode_ = 0L;
559       public boolean hasStartCode() { return hasStartCode; }
560       public long getStartCode() { return startCode_; }
561       
562       // optional int32 requests = 3;
563       public static final int REQUESTS_FIELD_NUMBER = 3;
564       private boolean hasRequests;
565       private int requests_ = 0;
566       public boolean hasRequests() { return hasRequests; }
567       public int getRequests() { return requests_; }
568       
569       // optional int32 heapSizeMB = 4;
570       public static final int HEAPSIZEMB_FIELD_NUMBER = 4;
571       private boolean hasHeapSizeMB;
572       private int heapSizeMB_ = 0;
573       public boolean hasHeapSizeMB() { return hasHeapSizeMB; }
574       public int getHeapSizeMB() { return heapSizeMB_; }
575       
576       // optional int32 maxHeapSizeMB = 5;
577       public static final int MAXHEAPSIZEMB_FIELD_NUMBER = 5;
578       private boolean hasMaxHeapSizeMB;
579       private int maxHeapSizeMB_ = 0;
580       public boolean hasMaxHeapSizeMB() { return hasMaxHeapSizeMB; }
581       public int getMaxHeapSizeMB() { return maxHeapSizeMB_; }
582       
583       // repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Region regions = 6;
584       public static final int REGIONS_FIELD_NUMBER = 6;
585       private java.util.List<org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region> regions_ =
586         java.util.Collections.emptyList();
587       public java.util.List<org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region> getRegionsList() {
588         return regions_;
589       }
590       public int getRegionsCount() { return regions_.size(); }
591       public org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region getRegions(int index) {
592         return regions_.get(index);
593       }
594       
595       private void initFields() {
596       }
597       public final boolean isInitialized() {
598         if (!hasName) return false;
599         for (org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region element : getRegionsList()) {
600           if (!element.isInitialized()) return false;
601         }
602         return true;
603       }
604       
605       public void writeTo(com.google.protobuf.CodedOutputStream output)
606                           throws java.io.IOException {
607         getSerializedSize();
608         if (hasName()) {
609           output.writeString(1, getName());
610         }
611         if (hasStartCode()) {
612           output.writeInt64(2, getStartCode());
613         }
614         if (hasRequests()) {
615           output.writeInt32(3, getRequests());
616         }
617         if (hasHeapSizeMB()) {
618           output.writeInt32(4, getHeapSizeMB());
619         }
620         if (hasMaxHeapSizeMB()) {
621           output.writeInt32(5, getMaxHeapSizeMB());
622         }
623         for (org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region element : getRegionsList()) {
624           output.writeMessage(6, element);
625         }
626         getUnknownFields().writeTo(output);
627       }
628       
629       private int memoizedSerializedSize = -1;
630       public int getSerializedSize() {
631         int size = memoizedSerializedSize;
632         if (size != -1) return size;
633       
634         size = 0;
635         if (hasName()) {
636           size += com.google.protobuf.CodedOutputStream
637             .computeStringSize(1, getName());
638         }
639         if (hasStartCode()) {
640           size += com.google.protobuf.CodedOutputStream
641             .computeInt64Size(2, getStartCode());
642         }
643         if (hasRequests()) {
644           size += com.google.protobuf.CodedOutputStream
645             .computeInt32Size(3, getRequests());
646         }
647         if (hasHeapSizeMB()) {
648           size += com.google.protobuf.CodedOutputStream
649             .computeInt32Size(4, getHeapSizeMB());
650         }
651         if (hasMaxHeapSizeMB()) {
652           size += com.google.protobuf.CodedOutputStream
653             .computeInt32Size(5, getMaxHeapSizeMB());
654         }
655         for (org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region element : getRegionsList()) {
656           size += com.google.protobuf.CodedOutputStream
657             .computeMessageSize(6, element);
658         }
659         size += getUnknownFields().getSerializedSize();
660         memoizedSerializedSize = size;
661         return size;
662       }
663       
664       public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node parseFrom(
665           com.google.protobuf.ByteString data)
666           throws com.google.protobuf.InvalidProtocolBufferException {
667         return newBuilder().mergeFrom(data).buildParsed();
668       }
669       public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node parseFrom(
670           com.google.protobuf.ByteString data,
671           com.google.protobuf.ExtensionRegistryLite extensionRegistry)
672           throws com.google.protobuf.InvalidProtocolBufferException {
673         return newBuilder().mergeFrom(data, extensionRegistry)
674                  .buildParsed();
675       }
676       public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node parseFrom(byte[] data)
677           throws com.google.protobuf.InvalidProtocolBufferException {
678         return newBuilder().mergeFrom(data).buildParsed();
679       }
680       public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node parseFrom(
681           byte[] data,
682           com.google.protobuf.ExtensionRegistryLite extensionRegistry)
683           throws com.google.protobuf.InvalidProtocolBufferException {
684         return newBuilder().mergeFrom(data, extensionRegistry)
685                  .buildParsed();
686       }
687       public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node parseFrom(java.io.InputStream input)
688           throws java.io.IOException {
689         return newBuilder().mergeFrom(input).buildParsed();
690       }
691       public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node parseFrom(
692           java.io.InputStream input,
693           com.google.protobuf.ExtensionRegistryLite extensionRegistry)
694           throws java.io.IOException {
695         return newBuilder().mergeFrom(input, extensionRegistry)
696                  .buildParsed();
697       }
698       public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node parseDelimitedFrom(java.io.InputStream input)
699           throws java.io.IOException {
700         Builder builder = newBuilder();
701         if (builder.mergeDelimitedFrom(input)) {
702           return builder.buildParsed();
703         } else {
704           return null;
705         }
706       }
707       public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node parseDelimitedFrom(
708           java.io.InputStream input,
709           com.google.protobuf.ExtensionRegistryLite extensionRegistry)
710           throws java.io.IOException {
711         Builder builder = newBuilder();
712         if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
713           return builder.buildParsed();
714         } else {
715           return null;
716         }
717       }
718       public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node parseFrom(
719           com.google.protobuf.CodedInputStream input)
720           throws java.io.IOException {
721         return newBuilder().mergeFrom(input).buildParsed();
722       }
723       public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node parseFrom(
724           com.google.protobuf.CodedInputStream input,
725           com.google.protobuf.ExtensionRegistryLite extensionRegistry)
726           throws java.io.IOException {
727         return newBuilder().mergeFrom(input, extensionRegistry)
728                  .buildParsed();
729       }
730       
731       public static Builder newBuilder() { return Builder.create(); }
732       public Builder newBuilderForType() { return newBuilder(); }
733       public static Builder newBuilder(org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node prototype) {
734         return newBuilder().mergeFrom(prototype);
735       }
736       public Builder toBuilder() { return newBuilder(this); }
737       
738       public static final class Builder extends
739           com.google.protobuf.GeneratedMessage.Builder<Builder> {
740         private org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node result;
741         
742         // Construct using org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node.newBuilder()
743         private Builder() {}
744         
745         private static Builder create() {
746           Builder builder = new Builder();
747           builder.result = new org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node();
748           return builder;
749         }
750         
751         protected org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node internalGetResult() {
752           return result;
753         }
754         
755         public Builder clear() {
756           if (result == null) {
757             throw new IllegalStateException(
758               "Cannot call clear() after build().");
759           }
760           result = new org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node();
761           return this;
762         }
763         
764         public Builder clone() {
765           return create().mergeFrom(result);
766         }
767         
768         public com.google.protobuf.Descriptors.Descriptor
769             getDescriptorForType() {
770           return org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node.getDescriptor();
771         }
772         
773         public org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node getDefaultInstanceForType() {
774           return org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node.getDefaultInstance();
775         }
776         
777         public boolean isInitialized() {
778           return result.isInitialized();
779         }
780         public org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node build() {
781           if (result != null && !isInitialized()) {
782             throw newUninitializedMessageException(result);
783           }
784           return buildPartial();
785         }
786         
787         private org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node buildParsed()
788             throws com.google.protobuf.InvalidProtocolBufferException {
789           if (!isInitialized()) {
790             throw newUninitializedMessageException(
791               result).asInvalidProtocolBufferException();
792           }
793           return buildPartial();
794         }
795         
796         public org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node buildPartial() {
797           if (result == null) {
798             throw new IllegalStateException(
799               "build() has already been called on this Builder.");
800           }
801           if (result.regions_ != java.util.Collections.EMPTY_LIST) {
802             result.regions_ =
803               java.util.Collections.unmodifiableList(result.regions_);
804           }
805           org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node returnMe = result;
806           result = null;
807           return returnMe;
808         }
809         
810         public Builder mergeFrom(com.google.protobuf.Message other) {
811           if (other instanceof org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node) {
812             return mergeFrom((org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node)other);
813           } else {
814             super.mergeFrom(other);
815             return this;
816           }
817         }
818         
819         public Builder mergeFrom(org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node other) {
820           if (other == org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node.getDefaultInstance()) return this;
821           if (other.hasName()) {
822             setName(other.getName());
823           }
824           if (other.hasStartCode()) {
825             setStartCode(other.getStartCode());
826           }
827           if (other.hasRequests()) {
828             setRequests(other.getRequests());
829           }
830           if (other.hasHeapSizeMB()) {
831             setHeapSizeMB(other.getHeapSizeMB());
832           }
833           if (other.hasMaxHeapSizeMB()) {
834             setMaxHeapSizeMB(other.getMaxHeapSizeMB());
835           }
836           if (!other.regions_.isEmpty()) {
837             if (result.regions_.isEmpty()) {
838               result.regions_ = new java.util.ArrayList<org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region>();
839             }
840             result.regions_.addAll(other.regions_);
841           }
842           this.mergeUnknownFields(other.getUnknownFields());
843           return this;
844         }
845         
846         public Builder mergeFrom(
847             com.google.protobuf.CodedInputStream input,
848             com.google.protobuf.ExtensionRegistryLite extensionRegistry)
849             throws java.io.IOException {
850           com.google.protobuf.UnknownFieldSet.Builder unknownFields =
851             com.google.protobuf.UnknownFieldSet.newBuilder(
852               this.getUnknownFields());
853           while (true) {
854             int tag = input.readTag();
855             switch (tag) {
856               case 0:
857                 this.setUnknownFields(unknownFields.build());
858                 return this;
859               default: {
860                 if (!parseUnknownField(input, unknownFields,
861                                        extensionRegistry, tag)) {
862                   this.setUnknownFields(unknownFields.build());
863                   return this;
864                 }
865                 break;
866               }
867               case 10: {
868                 setName(input.readString());
869                 break;
870               }
871               case 16: {
872                 setStartCode(input.readInt64());
873                 break;
874               }
875               case 24: {
876                 setRequests(input.readInt32());
877                 break;
878               }
879               case 32: {
880                 setHeapSizeMB(input.readInt32());
881                 break;
882               }
883               case 40: {
884                 setMaxHeapSizeMB(input.readInt32());
885                 break;
886               }
887               case 50: {
888                 org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region.Builder subBuilder = org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region.newBuilder();
889                 input.readMessage(subBuilder, extensionRegistry);
890                 addRegions(subBuilder.buildPartial());
891                 break;
892               }
893             }
894           }
895         }
896         
897         
898         // required string name = 1;
899         public boolean hasName() {
900           return result.hasName();
901         }
902         public java.lang.String getName() {
903           return result.getName();
904         }
905         public Builder setName(java.lang.String value) {
906           if (value == null) {
907     throw new NullPointerException();
908   }
909   result.hasName = true;
910           result.name_ = value;
911           return this;
912         }
913         public Builder clearName() {
914           result.hasName = false;
915           result.name_ = getDefaultInstance().getName();
916           return this;
917         }
918         
919         // optional int64 startCode = 2;
920         public boolean hasStartCode() {
921           return result.hasStartCode();
922         }
923         public long getStartCode() {
924           return result.getStartCode();
925         }
926         public Builder setStartCode(long value) {
927           result.hasStartCode = true;
928           result.startCode_ = value;
929           return this;
930         }
931         public Builder clearStartCode() {
932           result.hasStartCode = false;
933           result.startCode_ = 0L;
934           return this;
935         }
936         
937         // optional int32 requests = 3;
938         public boolean hasRequests() {
939           return result.hasRequests();
940         }
941         public int getRequests() {
942           return result.getRequests();
943         }
944         public Builder setRequests(int value) {
945           result.hasRequests = true;
946           result.requests_ = value;
947           return this;
948         }
949         public Builder clearRequests() {
950           result.hasRequests = false;
951           result.requests_ = 0;
952           return this;
953         }
954         
955         // optional int32 heapSizeMB = 4;
956         public boolean hasHeapSizeMB() {
957           return result.hasHeapSizeMB();
958         }
959         public int getHeapSizeMB() {
960           return result.getHeapSizeMB();
961         }
962         public Builder setHeapSizeMB(int value) {
963           result.hasHeapSizeMB = true;
964           result.heapSizeMB_ = value;
965           return this;
966         }
967         public Builder clearHeapSizeMB() {
968           result.hasHeapSizeMB = false;
969           result.heapSizeMB_ = 0;
970           return this;
971         }
972         
973         // optional int32 maxHeapSizeMB = 5;
974         public boolean hasMaxHeapSizeMB() {
975           return result.hasMaxHeapSizeMB();
976         }
977         public int getMaxHeapSizeMB() {
978           return result.getMaxHeapSizeMB();
979         }
980         public Builder setMaxHeapSizeMB(int value) {
981           result.hasMaxHeapSizeMB = true;
982           result.maxHeapSizeMB_ = value;
983           return this;
984         }
985         public Builder clearMaxHeapSizeMB() {
986           result.hasMaxHeapSizeMB = false;
987           result.maxHeapSizeMB_ = 0;
988           return this;
989         }
990         
991         // repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Region regions = 6;
992         public java.util.List<org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region> getRegionsList() {
993           return java.util.Collections.unmodifiableList(result.regions_);
994         }
995         public int getRegionsCount() {
996           return result.getRegionsCount();
997         }
998         public org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region getRegions(int index) {
999           return result.getRegions(index);
1000         }
1001         public Builder setRegions(int index, org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region value) {
1002           if (value == null) {
1003             throw new NullPointerException();
1004           }
1005           result.regions_.set(index, value);
1006           return this;
1007         }
1008         public Builder setRegions(int index, org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region.Builder builderForValue) {
1009           result.regions_.set(index, builderForValue.build());
1010           return this;
1011         }
1012         public Builder addRegions(org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region value) {
1013           if (value == null) {
1014             throw new NullPointerException();
1015           }
1016           if (result.regions_.isEmpty()) {
1017             result.regions_ = new java.util.ArrayList<org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region>();
1018           }
1019           result.regions_.add(value);
1020           return this;
1021         }
1022         public Builder addRegions(org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region.Builder builderForValue) {
1023           if (result.regions_.isEmpty()) {
1024             result.regions_ = new java.util.ArrayList<org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region>();
1025           }
1026           result.regions_.add(builderForValue.build());
1027           return this;
1028         }
1029         public Builder addAllRegions(
1030             java.lang.Iterable<? extends org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region> values) {
1031           if (result.regions_.isEmpty()) {
1032             result.regions_ = new java.util.ArrayList<org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region>();
1033           }
1034           super.addAll(values, result.regions_);
1035           return this;
1036         }
1037         public Builder clearRegions() {
1038           result.regions_ = java.util.Collections.emptyList();
1039           return this;
1040         }
1041         
1042         // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Node)
1043       }
1044       
1045       static {
1046         defaultInstance = new Node(true);
1047         org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.internalForceInit();
1048         defaultInstance.initFields();
1049       }
1050       
1051       // @@protoc_insertion_point(class_scope:org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Node)
1052     }
1053     
1054     // repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Node liveNodes = 1;
1055     public static final int LIVENODES_FIELD_NUMBER = 1;
1056     private java.util.List<org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node> liveNodes_ =
1057       java.util.Collections.emptyList();
1058     public java.util.List<org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node> getLiveNodesList() {
1059       return liveNodes_;
1060     }
1061     public int getLiveNodesCount() { return liveNodes_.size(); }
1062     public org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node getLiveNodes(int index) {
1063       return liveNodes_.get(index);
1064     }
1065     
1066     // repeated string deadNodes = 2;
1067     public static final int DEADNODES_FIELD_NUMBER = 2;
1068     private java.util.List<java.lang.String> deadNodes_ =
1069       java.util.Collections.emptyList();
1070     public java.util.List<java.lang.String> getDeadNodesList() {
1071       return deadNodes_;
1072     }
1073     public int getDeadNodesCount() { return deadNodes_.size(); }
1074     public java.lang.String getDeadNodes(int index) {
1075       return deadNodes_.get(index);
1076     }
1077     
1078     // optional int32 regions = 3;
1079     public static final int REGIONS_FIELD_NUMBER = 3;
1080     private boolean hasRegions;
1081     private int regions_ = 0;
1082     public boolean hasRegions() { return hasRegions; }
1083     public int getRegions() { return regions_; }
1084     
1085     // optional int32 requests = 4;
1086     public static final int REQUESTS_FIELD_NUMBER = 4;
1087     private boolean hasRequests;
1088     private int requests_ = 0;
1089     public boolean hasRequests() { return hasRequests; }
1090     public int getRequests() { return requests_; }
1091     
1092     // optional double averageLoad = 5;
1093     public static final int AVERAGELOAD_FIELD_NUMBER = 5;
1094     private boolean hasAverageLoad;
1095     private double averageLoad_ = 0D;
1096     public boolean hasAverageLoad() { return hasAverageLoad; }
1097     public double getAverageLoad() { return averageLoad_; }
1098     
1099     private void initFields() {
1100     }
1101     public final boolean isInitialized() {
1102       for (org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node element : getLiveNodesList()) {
1103         if (!element.isInitialized()) return false;
1104       }
1105       return true;
1106     }
1107     
1108     public void writeTo(com.google.protobuf.CodedOutputStream output)
1109                         throws java.io.IOException {
1110       getSerializedSize();
1111       for (org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node element : getLiveNodesList()) {
1112         output.writeMessage(1, element);
1113       }
1114       for (java.lang.String element : getDeadNodesList()) {
1115         output.writeString(2, element);
1116       }
1117       if (hasRegions()) {
1118         output.writeInt32(3, getRegions());
1119       }
1120       if (hasRequests()) {
1121         output.writeInt32(4, getRequests());
1122       }
1123       if (hasAverageLoad()) {
1124         output.writeDouble(5, getAverageLoad());
1125       }
1126       getUnknownFields().writeTo(output);
1127     }
1128     
1129     private int memoizedSerializedSize = -1;
1130     public int getSerializedSize() {
1131       int size = memoizedSerializedSize;
1132       if (size != -1) return size;
1133     
1134       size = 0;
1135       for (org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node element : getLiveNodesList()) {
1136         size += com.google.protobuf.CodedOutputStream
1137           .computeMessageSize(1, element);
1138       }
1139       {
1140         int dataSize = 0;
1141         for (java.lang.String element : getDeadNodesList()) {
1142           dataSize += com.google.protobuf.CodedOutputStream
1143             .computeStringSizeNoTag(element);
1144         }
1145         size += dataSize;
1146         size += 1 * getDeadNodesList().size();
1147       }
1148       if (hasRegions()) {
1149         size += com.google.protobuf.CodedOutputStream
1150           .computeInt32Size(3, getRegions());
1151       }
1152       if (hasRequests()) {
1153         size += com.google.protobuf.CodedOutputStream
1154           .computeInt32Size(4, getRequests());
1155       }
1156       if (hasAverageLoad()) {
1157         size += com.google.protobuf.CodedOutputStream
1158           .computeDoubleSize(5, getAverageLoad());
1159       }
1160       size += getUnknownFields().getSerializedSize();
1161       memoizedSerializedSize = size;
1162       return size;
1163     }
1164     
1165     public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus parseFrom(
1166         com.google.protobuf.ByteString data)
1167         throws com.google.protobuf.InvalidProtocolBufferException {
1168       return newBuilder().mergeFrom(data).buildParsed();
1169     }
1170     public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus parseFrom(
1171         com.google.protobuf.ByteString data,
1172         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
1173         throws com.google.protobuf.InvalidProtocolBufferException {
1174       return newBuilder().mergeFrom(data, extensionRegistry)
1175                .buildParsed();
1176     }
1177     public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus parseFrom(byte[] data)
1178         throws com.google.protobuf.InvalidProtocolBufferException {
1179       return newBuilder().mergeFrom(data).buildParsed();
1180     }
1181     public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus parseFrom(
1182         byte[] data,
1183         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
1184         throws com.google.protobuf.InvalidProtocolBufferException {
1185       return newBuilder().mergeFrom(data, extensionRegistry)
1186                .buildParsed();
1187     }
1188     public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus parseFrom(java.io.InputStream input)
1189         throws java.io.IOException {
1190       return newBuilder().mergeFrom(input).buildParsed();
1191     }
1192     public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus parseFrom(
1193         java.io.InputStream input,
1194         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
1195         throws java.io.IOException {
1196       return newBuilder().mergeFrom(input, extensionRegistry)
1197                .buildParsed();
1198     }
1199     public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus parseDelimitedFrom(java.io.InputStream input)
1200         throws java.io.IOException {
1201       Builder builder = newBuilder();
1202       if (builder.mergeDelimitedFrom(input)) {
1203         return builder.buildParsed();
1204       } else {
1205         return null;
1206       }
1207     }
1208     public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus parseDelimitedFrom(
1209         java.io.InputStream input,
1210         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
1211         throws java.io.IOException {
1212       Builder builder = newBuilder();
1213       if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
1214         return builder.buildParsed();
1215       } else {
1216         return null;
1217       }
1218     }
1219     public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus parseFrom(
1220         com.google.protobuf.CodedInputStream input)
1221         throws java.io.IOException {
1222       return newBuilder().mergeFrom(input).buildParsed();
1223     }
1224     public static org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus parseFrom(
1225         com.google.protobuf.CodedInputStream input,
1226         com.google.protobuf.ExtensionRegistryLite extensionRegistry)
1227         throws java.io.IOException {
1228       return newBuilder().mergeFrom(input, extensionRegistry)
1229                .buildParsed();
1230     }
1231     
1232     public static Builder newBuilder() { return Builder.create(); }
1233     public Builder newBuilderForType() { return newBuilder(); }
1234     public static Builder newBuilder(org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus prototype) {
1235       return newBuilder().mergeFrom(prototype);
1236     }
1237     public Builder toBuilder() { return newBuilder(this); }
1238     
1239     public static final class Builder extends
1240         com.google.protobuf.GeneratedMessage.Builder<Builder> {
1241       private org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus result;
1242       
1243       // Construct using org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.newBuilder()
1244       private Builder() {}
1245       
1246       private static Builder create() {
1247         Builder builder = new Builder();
1248         builder.result = new org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus();
1249         return builder;
1250       }
1251       
1252       protected org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus internalGetResult() {
1253         return result;
1254       }
1255       
1256       public Builder clear() {
1257         if (result == null) {
1258           throw new IllegalStateException(
1259             "Cannot call clear() after build().");
1260         }
1261         result = new org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus();
1262         return this;
1263       }
1264       
1265       public Builder clone() {
1266         return create().mergeFrom(result);
1267       }
1268       
1269       public com.google.protobuf.Descriptors.Descriptor
1270           getDescriptorForType() {
1271         return org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.getDescriptor();
1272       }
1273       
1274       public org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus getDefaultInstanceForType() {
1275         return org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.getDefaultInstance();
1276       }
1277       
1278       public boolean isInitialized() {
1279         return result.isInitialized();
1280       }
1281       public org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus build() {
1282         if (result != null && !isInitialized()) {
1283           throw newUninitializedMessageException(result);
1284         }
1285         return buildPartial();
1286       }
1287       
1288       private org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus buildParsed()
1289           throws com.google.protobuf.InvalidProtocolBufferException {
1290         if (!isInitialized()) {
1291           throw newUninitializedMessageException(
1292             result).asInvalidProtocolBufferException();
1293         }
1294         return buildPartial();
1295       }
1296       
1297       public org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus buildPartial() {
1298         if (result == null) {
1299           throw new IllegalStateException(
1300             "build() has already been called on this Builder.");
1301         }
1302         if (result.liveNodes_ != java.util.Collections.EMPTY_LIST) {
1303           result.liveNodes_ =
1304             java.util.Collections.unmodifiableList(result.liveNodes_);
1305         }
1306         if (result.deadNodes_ != java.util.Collections.EMPTY_LIST) {
1307           result.deadNodes_ =
1308             java.util.Collections.unmodifiableList(result.deadNodes_);
1309         }
1310         org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus returnMe = result;
1311         result = null;
1312         return returnMe;
1313       }
1314       
1315       public Builder mergeFrom(com.google.protobuf.Message other) {
1316         if (other instanceof org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus) {
1317           return mergeFrom((org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus)other);
1318         } else {
1319           super.mergeFrom(other);
1320           return this;
1321         }
1322       }
1323       
1324       public Builder mergeFrom(org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus other) {
1325         if (other == org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.getDefaultInstance()) return this;
1326         if (!other.liveNodes_.isEmpty()) {
1327           if (result.liveNodes_.isEmpty()) {
1328             result.liveNodes_ = new java.util.ArrayList<org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node>();
1329           }
1330           result.liveNodes_.addAll(other.liveNodes_);
1331         }
1332         if (!other.deadNodes_.isEmpty()) {
1333           if (result.deadNodes_.isEmpty()) {
1334             result.deadNodes_ = new java.util.ArrayList<java.lang.String>();
1335           }
1336           result.deadNodes_.addAll(other.deadNodes_);
1337         }
1338         if (other.hasRegions()) {
1339           setRegions(other.getRegions());
1340         }
1341         if (other.hasRequests()) {
1342           setRequests(other.getRequests());
1343         }
1344         if (other.hasAverageLoad()) {
1345           setAverageLoad(other.getAverageLoad());
1346         }
1347         this.mergeUnknownFields(other.getUnknownFields());
1348         return this;
1349       }
1350       
1351       public Builder mergeFrom(
1352           com.google.protobuf.CodedInputStream input,
1353           com.google.protobuf.ExtensionRegistryLite extensionRegistry)
1354           throws java.io.IOException {
1355         com.google.protobuf.UnknownFieldSet.Builder unknownFields =
1356           com.google.protobuf.UnknownFieldSet.newBuilder(
1357             this.getUnknownFields());
1358         while (true) {
1359           int tag = input.readTag();
1360           switch (tag) {
1361             case 0:
1362               this.setUnknownFields(unknownFields.build());
1363               return this;
1364             default: {
1365               if (!parseUnknownField(input, unknownFields,
1366                                      extensionRegistry, tag)) {
1367                 this.setUnknownFields(unknownFields.build());
1368                 return this;
1369               }
1370               break;
1371             }
1372             case 10: {
1373               org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node.Builder subBuilder = org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node.newBuilder();
1374               input.readMessage(subBuilder, extensionRegistry);
1375               addLiveNodes(subBuilder.buildPartial());
1376               break;
1377             }
1378             case 18: {
1379               addDeadNodes(input.readString());
1380               break;
1381             }
1382             case 24: {
1383               setRegions(input.readInt32());
1384               break;
1385             }
1386             case 32: {
1387               setRequests(input.readInt32());
1388               break;
1389             }
1390             case 41: {
1391               setAverageLoad(input.readDouble());
1392               break;
1393             }
1394           }
1395         }
1396       }
1397       
1398       
1399       // repeated .org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus.Node liveNodes = 1;
1400       public java.util.List<org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node> getLiveNodesList() {
1401         return java.util.Collections.unmodifiableList(result.liveNodes_);
1402       }
1403       public int getLiveNodesCount() {
1404         return result.getLiveNodesCount();
1405       }
1406       public org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node getLiveNodes(int index) {
1407         return result.getLiveNodes(index);
1408       }
1409       public Builder setLiveNodes(int index, org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node value) {
1410         if (value == null) {
1411           throw new NullPointerException();
1412         }
1413         result.liveNodes_.set(index, value);
1414         return this;
1415       }
1416       public Builder setLiveNodes(int index, org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node.Builder builderForValue) {
1417         result.liveNodes_.set(index, builderForValue.build());
1418         return this;
1419       }
1420       public Builder addLiveNodes(org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node value) {
1421         if (value == null) {
1422           throw new NullPointerException();
1423         }
1424         if (result.liveNodes_.isEmpty()) {
1425           result.liveNodes_ = new java.util.ArrayList<org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node>();
1426         }
1427         result.liveNodes_.add(value);
1428         return this;
1429       }
1430       public Builder addLiveNodes(org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node.Builder builderForValue) {
1431         if (result.liveNodes_.isEmpty()) {
1432           result.liveNodes_ = new java.util.ArrayList<org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node>();
1433         }
1434         result.liveNodes_.add(builderForValue.build());
1435         return this;
1436       }
1437       public Builder addAllLiveNodes(
1438           java.lang.Iterable<? extends org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node> values) {
1439         if (result.liveNodes_.isEmpty()) {
1440           result.liveNodes_ = new java.util.ArrayList<org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node>();
1441         }
1442         super.addAll(values, result.liveNodes_);
1443         return this;
1444       }
1445       public Builder clearLiveNodes() {
1446         result.liveNodes_ = java.util.Collections.emptyList();
1447         return this;
1448       }
1449       
1450       // repeated string deadNodes = 2;
1451       public java.util.List<java.lang.String> getDeadNodesList() {
1452         return java.util.Collections.unmodifiableList(result.deadNodes_);
1453       }
1454       public int getDeadNodesCount() {
1455         return result.getDeadNodesCount();
1456       }
1457       public java.lang.String getDeadNodes(int index) {
1458         return result.getDeadNodes(index);
1459       }
1460       public Builder setDeadNodes(int index, java.lang.String value) {
1461         if (value == null) {
1462     throw new NullPointerException();
1463   }
1464   result.deadNodes_.set(index, value);
1465         return this;
1466       }
1467       public Builder addDeadNodes(java.lang.String value) {
1468         if (value == null) {
1469     throw new NullPointerException();
1470   }
1471   if (result.deadNodes_.isEmpty()) {
1472           result.deadNodes_ = new java.util.ArrayList<java.lang.String>();
1473         }
1474         result.deadNodes_.add(value);
1475         return this;
1476       }
1477       public Builder addAllDeadNodes(
1478           java.lang.Iterable<? extends java.lang.String> values) {
1479         if (result.deadNodes_.isEmpty()) {
1480           result.deadNodes_ = new java.util.ArrayList<java.lang.String>();
1481         }
1482         super.addAll(values, result.deadNodes_);
1483         return this;
1484       }
1485       public Builder clearDeadNodes() {
1486         result.deadNodes_ = java.util.Collections.emptyList();
1487         return this;
1488       }
1489       
1490       // optional int32 regions = 3;
1491       public boolean hasRegions() {
1492         return result.hasRegions();
1493       }
1494       public int getRegions() {
1495         return result.getRegions();
1496       }
1497       public Builder setRegions(int value) {
1498         result.hasRegions = true;
1499         result.regions_ = value;
1500         return this;
1501       }
1502       public Builder clearRegions() {
1503         result.hasRegions = false;
1504         result.regions_ = 0;
1505         return this;
1506       }
1507       
1508       // optional int32 requests = 4;
1509       public boolean hasRequests() {
1510         return result.hasRequests();
1511       }
1512       public int getRequests() {
1513         return result.getRequests();
1514       }
1515       public Builder setRequests(int value) {
1516         result.hasRequests = true;
1517         result.requests_ = value;
1518         return this;
1519       }
1520       public Builder clearRequests() {
1521         result.hasRequests = false;
1522         result.requests_ = 0;
1523         return this;
1524       }
1525       
1526       // optional double averageLoad = 5;
1527       public boolean hasAverageLoad() {
1528         return result.hasAverageLoad();
1529       }
1530       public double getAverageLoad() {
1531         return result.getAverageLoad();
1532       }
1533       public Builder setAverageLoad(double value) {
1534         result.hasAverageLoad = true;
1535         result.averageLoad_ = value;
1536         return this;
1537       }
1538       public Builder clearAverageLoad() {
1539         result.hasAverageLoad = false;
1540         result.averageLoad_ = 0D;
1541         return this;
1542       }
1543       
1544       // @@protoc_insertion_point(builder_scope:org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus)
1545     }
1546     
1547     static {
1548       defaultInstance = new StorageClusterStatus(true);
1549       org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.internalForceInit();
1550       defaultInstance.initFields();
1551     }
1552     
1553     // @@protoc_insertion_point(class_scope:org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatus)
1554   }
1555   
1556   private static com.google.protobuf.Descriptors.Descriptor
1557     internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_StorageClusterStatus_descriptor;
1558   private static
1559     com.google.protobuf.GeneratedMessage.FieldAccessorTable
1560       internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_StorageClusterStatus_fieldAccessorTable;
1561   private static com.google.protobuf.Descriptors.Descriptor
1562     internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_StorageClusterStatus_Region_descriptor;
1563   private static
1564     com.google.protobuf.GeneratedMessage.FieldAccessorTable
1565       internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_StorageClusterStatus_Region_fieldAccessorTable;
1566   private static com.google.protobuf.Descriptors.Descriptor
1567     internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_StorageClusterStatus_Node_descriptor;
1568   private static
1569     com.google.protobuf.GeneratedMessage.FieldAccessorTable
1570       internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_StorageClusterStatus_Node_fieldAccessorTable;
1571   
1572   public static com.google.protobuf.Descriptors.FileDescriptor
1573       getDescriptor() {
1574     return descriptor;
1575   }
1576   private static com.google.protobuf.Descriptors.FileDescriptor
1577       descriptor;
1578   static {
1579     java.lang.String[] descriptorData = {
1580       "\n!StorageClusterStatusMessage.proto\022/org" +
1581       ".apache.hadoop.hbase.rest.protobuf.gener" +
1582       "ated\"\222\004\n\024StorageClusterStatus\022]\n\tliveNod" +
1583       "es\030\001 \003(\0132J.org.apache.hadoop.hbase.rest." +
1584       "protobuf.generated.StorageClusterStatus." +
1585       "Node\022\021\n\tdeadNodes\030\002 \003(\t\022\017\n\007regions\030\003 \001(\005" +
1586       "\022\020\n\010requests\030\004 \001(\005\022\023\n\013averageLoad\030\005 \001(\001\032" +
1587       "\211\001\n\006Region\022\014\n\004name\030\001 \002(\014\022\016\n\006stores\030\002 \001(\005" +
1588       "\022\022\n\nstorefiles\030\003 \001(\005\022\027\n\017storefileSizeMB\030" +
1589       "\004 \001(\005\022\026\n\016memstoreSizeMB\030\005 \001(\005\022\034\n\024storefi",
1590       "leIndexSizeMB\030\006 \001(\005\032\303\001\n\004Node\022\014\n\004name\030\001 \002" +
1591       "(\t\022\021\n\tstartCode\030\002 \001(\003\022\020\n\010requests\030\003 \001(\005\022" +
1592       "\022\n\nheapSizeMB\030\004 \001(\005\022\025\n\rmaxHeapSizeMB\030\005 \001" +
1593       "(\005\022]\n\007regions\030\006 \003(\0132L.org.apache.hadoop." +
1594       "hbase.rest.protobuf.generated.StorageClu" +
1595       "sterStatus.Region"
1596     };
1597     com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
1598       new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
1599         public com.google.protobuf.ExtensionRegistry assignDescriptors(
1600             com.google.protobuf.Descriptors.FileDescriptor root) {
1601           descriptor = root;
1602           internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_StorageClusterStatus_descriptor =
1603             getDescriptor().getMessageTypes().get(0);
1604           internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_StorageClusterStatus_fieldAccessorTable = new
1605             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
1606               internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_StorageClusterStatus_descriptor,
1607               new java.lang.String[] { "LiveNodes", "DeadNodes", "Regions", "Requests", "AverageLoad", },
1608               org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.class,
1609               org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Builder.class);
1610           internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_StorageClusterStatus_Region_descriptor =
1611             internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_StorageClusterStatus_descriptor.getNestedTypes().get(0);
1612           internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_StorageClusterStatus_Region_fieldAccessorTable = new
1613             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
1614               internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_StorageClusterStatus_Region_descriptor,
1615               new java.lang.String[] { "Name", "Stores", "Storefiles", "StorefileSizeMB", "MemstoreSizeMB", "StorefileIndexSizeMB", },
1616               org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region.class,
1617               org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Region.Builder.class);
1618           internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_StorageClusterStatus_Node_descriptor =
1619             internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_StorageClusterStatus_descriptor.getNestedTypes().get(1);
1620           internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_StorageClusterStatus_Node_fieldAccessorTable = new
1621             com.google.protobuf.GeneratedMessage.FieldAccessorTable(
1622               internal_static_org_apache_hadoop_hbase_rest_protobuf_generated_StorageClusterStatus_Node_descriptor,
1623               new java.lang.String[] { "Name", "StartCode", "Requests", "HeapSizeMB", "MaxHeapSizeMB", "Regions", },
1624               org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node.class,
1625               org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus.Node.Builder.class);
1626           return null;
1627         }
1628       };
1629     com.google.protobuf.Descriptors.FileDescriptor
1630       .internalBuildGeneratedFileFrom(descriptorData,
1631         new com.google.protobuf.Descriptors.FileDescriptor[] {
1632         }, assigner);
1633   }
1634   
1635   public static void internalForceInit() {}
1636   
1637   // @@protoc_insertion_point(outer_class_scope)
1638 }