Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,7 @@
import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
import org.apache.hadoop.hbase.regionserver.StoreFileWriter;
import org.apache.hadoop.hbase.regionserver.StoreUtils;
import org.apache.hadoop.hbase.security.access.AbstractReadOnlyController;
import org.apache.hadoop.hbase.util.CommonFSUtils;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.HFileArchiveUtil;
Expand Down Expand Up @@ -84,29 +85,39 @@ protected StoreFileTrackerBase(Configuration conf, boolean isPrimaryReplica, Sto
this.ctx = ctx;
}

private boolean isReadOnlyEnabled() {
return conf.getBoolean(HConstants.HBASE_GLOBAL_READONLY_ENABLED_KEY,
HConstants.HBASE_GLOBAL_READONLY_ENABLED_DEFAULT);
}

private boolean isNonWritableTableWhenReadOnlyMode() {
return isReadOnlyEnabled()
&& !AbstractReadOnlyController.isWritableInReadOnlyMode(ctx.getTableName());
}

@Override
public final List<StoreFileInfo> load() throws IOException {
return doLoadStoreFiles(!isPrimaryReplica || isReadOnlyEnabled());
return doLoadStoreFiles(!isPrimaryReplica || isNonWritableTableWhenReadOnlyMode());
}

@Override
public final void add(Collection<StoreFileInfo> newFiles) throws IOException {
if (isPrimaryReplica && !isReadOnlyEnabled()) {
if (isPrimaryReplica && !isNonWritableTableWhenReadOnlyMode()) {
doAddNewStoreFiles(newFiles);
}
}

@Override
public final void replace(Collection<StoreFileInfo> compactedFiles,
Collection<StoreFileInfo> newFiles) throws IOException {
if (isPrimaryReplica && !isReadOnlyEnabled()) {
if (isPrimaryReplica && !isNonWritableTableWhenReadOnlyMode()) {
doAddCompactionResults(compactedFiles, newFiles);
}
}

@Override
public final void set(List<StoreFileInfo> files) throws IOException {
if (isPrimaryReplica && !isReadOnlyEnabled()) {
if (isPrimaryReplica && !isNonWritableTableWhenReadOnlyMode()) {
doSetStoreFiles(files);
}
}
Expand Down Expand Up @@ -141,7 +152,7 @@ private HFileContext createFileContext(Compression.Algorithm compression,

@Override
public final StoreFileWriter createWriter(CreateStoreFileWriterParams params) throws IOException {
if (!isPrimaryReplica || isReadOnlyEnabled()) {
if (!isPrimaryReplica || isNonWritableTableWhenReadOnlyMode()) {
throw new IllegalStateException(
"Should not call create writer on secondary replicas or in read-only mode");
}
Expand Down Expand Up @@ -387,11 +398,6 @@ protected void archiveStoreFiles(List<HStoreFile> storeFiles) throws IOException
storeFiles);
}

private boolean isReadOnlyEnabled() {
return conf.getBoolean(HConstants.HBASE_GLOBAL_READONLY_ENABLED_KEY,
HConstants.HBASE_GLOBAL_READONLY_ENABLED_DEFAULT);
}

/**
* For primary replica, we will call load once when opening a region, and the implementation could
* choose to do some cleanup work. So here we use {@code readOnly} to indicate that whether you
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
import java.io.FileNotFoundException;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.util.Set;
import org.apache.commons.io.IOUtils;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileSystem;
Expand All @@ -29,18 +30,32 @@
import org.apache.hadoop.hbase.DoNotRetryIOException;
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.coprocessor.ObserverContext;
import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
import org.apache.hadoop.hbase.master.MasterFileSystem;
import org.apache.hadoop.hbase.master.MasterServices;
import org.apache.hadoop.hbase.master.region.MasterRegionFactory;
import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.yetus.audience.InterfaceAudience;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG)
public abstract class AbstractReadOnlyController implements Coprocessor {
private MasterServices masterServices;
private static final Logger LOG = LoggerFactory.getLogger(AbstractReadOnlyController.class);

private static final Set<TableName> writableTables =
Set.of(TableName.META_TABLE_NAME, MasterRegionFactory.TABLE_NAME);

public static boolean
isWritableInReadOnlyMode(final ObserverContext<? extends RegionCoprocessorEnvironment> c) {
return writableTables.contains(c.getEnvironment().getRegionInfo().getTable());
}

public static boolean isWritableInReadOnlyMode(final TableName tableName) {
return writableTables.contains(tableName);
}

protected void internalReadOnlyGuard() throws DoNotRetryIOException {
throw new DoNotRetryIOException("Operation not allowed in Read-Only Mode");
}
Expand Down Expand Up @@ -76,10 +91,10 @@ public static void manageActiveClusterIdFile(boolean readOnlyEnabled, MasterFile
+ "Actual data: {}, Expected data: {}",
new String(actualClusterFileData), new String(expectedClusterFileData));
}
} catch (FileNotFoundException e) {
} catch (FileNotFoundException e) {
LOG.debug("Active cluster file does not exist at: {}. No need to delete.",
activeClusterFile);
} catch (IOException e) {
} catch (IOException e) {
LOG.error(
"Failed to delete active cluster file: {}. "
+ "Read-only flag will be updated, but file system state is inconsistent.",
Expand All @@ -89,7 +104,8 @@ public static void manageActiveClusterIdFile(boolean readOnlyEnabled, MasterFile
// DISABLING READ-ONLY (true -> false), create the active cluster file id file
int wait = mfs.getConfiguration().getInt(HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000);
if (!fs.exists(activeClusterFile)) {
FSUtils.setActiveClusterSuffix(fs, rootDir, mfs.computeAndSetSuffixFileDataToWrite(), wait);
FSUtils.setActiveClusterSuffix(fs, rootDir, mfs.computeAndSetSuffixFileDataToWrite(),
wait);
} else {
LOG.debug("Active cluster file already exists at: {}. No need to create it again.",
activeClusterFile);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,6 @@
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.CompareOperator;
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Append;
import org.apache.hadoop.hbase.client.CheckAndMutate;
import org.apache.hadoop.hbase.client.CheckAndMutateResult;
Expand Down Expand Up @@ -60,10 +59,6 @@
public class RegionReadOnlyController extends AbstractReadOnlyController
implements RegionCoprocessor, RegionObserver {

private boolean isOnMeta(final ObserverContext<? extends RegionCoprocessorEnvironment> c) {
return TableName.isMetaTableName(c.getEnvironment().getRegionInfo().getTable());
}

@Override
public Optional<RegionObserver> getRegionObserver() {
return Optional.of(this);
Expand All @@ -72,7 +67,7 @@ public Optional<RegionObserver> getRegionObserver() {
@Override
public void preFlushScannerOpen(ObserverContext<? extends RegionCoprocessorEnvironment> c,
Store store, ScanOptions options, FlushLifeCycleTracker tracker) throws IOException {
if (!isOnMeta(c)) {
if (!isWritableInReadOnlyMode(c)) {
internalReadOnlyGuard();
}
RegionObserver.super.preFlushScannerOpen(c, store, options, tracker);
Expand All @@ -81,7 +76,7 @@ public void preFlushScannerOpen(ObserverContext<? extends RegionCoprocessorEnvir
@Override
public void preFlush(final ObserverContext<? extends RegionCoprocessorEnvironment> c,
FlushLifeCycleTracker tracker) throws IOException {
if (!isOnMeta(c)) {
if (!isWritableInReadOnlyMode(c)) {
internalReadOnlyGuard();
}
RegionObserver.super.preFlush(c, tracker);
Expand All @@ -90,7 +85,7 @@ public void preFlush(final ObserverContext<? extends RegionCoprocessorEnvironmen
@Override
public InternalScanner preFlush(ObserverContext<? extends RegionCoprocessorEnvironment> c,
Store store, InternalScanner scanner, FlushLifeCycleTracker tracker) throws IOException {
if (!isOnMeta(c)) {
if (!isWritableInReadOnlyMode(c)) {
internalReadOnlyGuard();
}
return RegionObserver.super.preFlush(c, store, scanner, tracker);
Expand Down Expand Up @@ -123,7 +118,7 @@ public InternalScanner preMemStoreCompactionCompact(
public void preCompactSelection(ObserverContext<? extends RegionCoprocessorEnvironment> c,
Store store, List<? extends StoreFile> candidates, CompactionLifeCycleTracker tracker)
throws IOException {
if (!isOnMeta(c)) {
if (!isWritableInReadOnlyMode(c)) {
internalReadOnlyGuard();
}
RegionObserver.super.preCompactSelection(c, store, candidates, tracker);
Expand All @@ -133,7 +128,7 @@ public void preCompactSelection(ObserverContext<? extends RegionCoprocessorEnvir
public void preCompactScannerOpen(ObserverContext<? extends RegionCoprocessorEnvironment> c,
Store store, ScanType scanType, ScanOptions options, CompactionLifeCycleTracker tracker,
CompactionRequest request) throws IOException {
if (!isOnMeta(c)) {
if (!isWritableInReadOnlyMode(c)) {
internalReadOnlyGuard();
}
RegionObserver.super.preCompactScannerOpen(c, store, scanType, options, tracker, request);
Expand All @@ -143,7 +138,7 @@ public void preCompactScannerOpen(ObserverContext<? extends RegionCoprocessorEnv
public InternalScanner preCompact(ObserverContext<? extends RegionCoprocessorEnvironment> c,
Store store, InternalScanner scanner, ScanType scanType, CompactionLifeCycleTracker tracker,
CompactionRequest request) throws IOException {
if (!isOnMeta(c)) {
if (!isWritableInReadOnlyMode(c)) {
internalReadOnlyGuard();
}
return RegionObserver.super.preCompact(c, store, scanner, scanType, tracker, request);
Expand All @@ -152,7 +147,7 @@ public InternalScanner preCompact(ObserverContext<? extends RegionCoprocessorEnv
@Override
public void prePut(ObserverContext<? extends RegionCoprocessorEnvironment> c, Put put,
WALEdit edit, Durability durability) throws IOException {
if (!isOnMeta(c)) {
if (!isWritableInReadOnlyMode(c)) {
internalReadOnlyGuard();
}
RegionObserver.super.prePut(c, put, edit, durability);
Expand All @@ -161,7 +156,7 @@ public void prePut(ObserverContext<? extends RegionCoprocessorEnvironment> c, Pu
@Override
public void prePut(ObserverContext<? extends RegionCoprocessorEnvironment> c, Put put,
WALEdit edit) throws IOException {
if (!isOnMeta(c)) {
if (!isWritableInReadOnlyMode(c)) {
internalReadOnlyGuard();
}
RegionObserver.super.prePut(c, put, edit);
Expand All @@ -170,7 +165,7 @@ public void prePut(ObserverContext<? extends RegionCoprocessorEnvironment> c, Pu
@Override
public void preDelete(ObserverContext<? extends RegionCoprocessorEnvironment> c, Delete delete,
WALEdit edit, Durability durability) throws IOException {
if (!isOnMeta(c)) {
if (!isWritableInReadOnlyMode(c)) {
internalReadOnlyGuard();
}
RegionObserver.super.preDelete(c, delete, edit, durability);
Expand All @@ -179,7 +174,7 @@ public void preDelete(ObserverContext<? extends RegionCoprocessorEnvironment> c,
@Override
public void preDelete(ObserverContext<? extends RegionCoprocessorEnvironment> c, Delete delete,
WALEdit edit) throws IOException {
if (!isOnMeta(c)) {
if (!isWritableInReadOnlyMode(c)) {
internalReadOnlyGuard();
}
RegionObserver.super.preDelete(c, delete, edit);
Expand All @@ -188,7 +183,7 @@ public void preDelete(ObserverContext<? extends RegionCoprocessorEnvironment> c,
@Override
public void preBatchMutate(ObserverContext<? extends RegionCoprocessorEnvironment> c,
MiniBatchOperationInProgress<Mutation> miniBatchOp) throws IOException {
if (!isOnMeta(c)) {
if (!isWritableInReadOnlyMode(c)) {
internalReadOnlyGuard();
}
RegionObserver.super.preBatchMutate(c, miniBatchOp);
Expand All @@ -198,7 +193,7 @@ public void preBatchMutate(ObserverContext<? extends RegionCoprocessorEnvironmen
public boolean preCheckAndPut(ObserverContext<? extends RegionCoprocessorEnvironment> c,
byte[] row, byte[] family, byte[] qualifier, CompareOperator op, ByteArrayComparable comparator,
Put put, boolean result) throws IOException {
if (!isOnMeta(c)) {
if (!isWritableInReadOnlyMode(c)) {
internalReadOnlyGuard();
}
return RegionObserver.super.preCheckAndPut(c, row, family, qualifier, op, comparator, put,
Expand All @@ -208,7 +203,7 @@ public boolean preCheckAndPut(ObserverContext<? extends RegionCoprocessorEnviron
@Override
public boolean preCheckAndPut(ObserverContext<? extends RegionCoprocessorEnvironment> c,
byte[] row, Filter filter, Put put, boolean result) throws IOException {
if (!isOnMeta(c)) {
if (!isWritableInReadOnlyMode(c)) {
internalReadOnlyGuard();
}
return RegionObserver.super.preCheckAndPut(c, row, filter, put, result);
Expand All @@ -219,7 +214,7 @@ public boolean preCheckAndPutAfterRowLock(
ObserverContext<? extends RegionCoprocessorEnvironment> c, byte[] row, byte[] family,
byte[] qualifier, CompareOperator op, ByteArrayComparable comparator, Put put, boolean result)
throws IOException {
if (!isOnMeta(c)) {
if (!isWritableInReadOnlyMode(c)) {
internalReadOnlyGuard();
}
return RegionObserver.super.preCheckAndPutAfterRowLock(c, row, family, qualifier, op,
Expand All @@ -230,7 +225,7 @@ public boolean preCheckAndPutAfterRowLock(
public boolean preCheckAndPutAfterRowLock(
ObserverContext<? extends RegionCoprocessorEnvironment> c, byte[] row, Filter filter, Put put,
boolean result) throws IOException {
if (!isOnMeta(c)) {
if (!isWritableInReadOnlyMode(c)) {
internalReadOnlyGuard();
}
return RegionObserver.super.preCheckAndPutAfterRowLock(c, row, filter, put, result);
Expand All @@ -240,7 +235,7 @@ public boolean preCheckAndPutAfterRowLock(
public boolean preCheckAndDelete(ObserverContext<? extends RegionCoprocessorEnvironment> c,
byte[] row, byte[] family, byte[] qualifier, CompareOperator op, ByteArrayComparable comparator,
Delete delete, boolean result) throws IOException {
if (!isOnMeta(c)) {
if (!isWritableInReadOnlyMode(c)) {
internalReadOnlyGuard();
}
return RegionObserver.super.preCheckAndDelete(c, row, family, qualifier, op, comparator, delete,
Expand All @@ -250,7 +245,7 @@ public boolean preCheckAndDelete(ObserverContext<? extends RegionCoprocessorEnvi
@Override
public boolean preCheckAndDelete(ObserverContext<? extends RegionCoprocessorEnvironment> c,
byte[] row, Filter filter, Delete delete, boolean result) throws IOException {
if (!isOnMeta(c)) {
if (!isWritableInReadOnlyMode(c)) {
internalReadOnlyGuard();
}
return RegionObserver.super.preCheckAndDelete(c, row, filter, delete, result);
Expand All @@ -261,7 +256,7 @@ public boolean preCheckAndDeleteAfterRowLock(
ObserverContext<? extends RegionCoprocessorEnvironment> c, byte[] row, byte[] family,
byte[] qualifier, CompareOperator op, ByteArrayComparable comparator, Delete delete,
boolean result) throws IOException {
if (!isOnMeta(c)) {
if (!isWritableInReadOnlyMode(c)) {
internalReadOnlyGuard();
}
return RegionObserver.super.preCheckAndDeleteAfterRowLock(c, row, family, qualifier, op,
Expand All @@ -272,7 +267,7 @@ public boolean preCheckAndDeleteAfterRowLock(
public boolean preCheckAndDeleteAfterRowLock(
ObserverContext<? extends RegionCoprocessorEnvironment> c, byte[] row, Filter filter,
Delete delete, boolean result) throws IOException {
if (!isOnMeta(c)) {
if (!isWritableInReadOnlyMode(c)) {
internalReadOnlyGuard();
}
return RegionObserver.super.preCheckAndDeleteAfterRowLock(c, row, filter, delete, result);
Expand Down Expand Up @@ -339,7 +334,7 @@ public Result preIncrementAfterRowLock(ObserverContext<? extends RegionCoprocess
@Override
public void preReplayWALs(ObserverContext<? extends RegionCoprocessorEnvironment> ctx,
RegionInfo info, Path edits) throws IOException {
if (!isOnMeta(ctx)) {
if (!isWritableInReadOnlyMode(ctx)) {
internalReadOnlyGuard();
}
RegionObserver.super.preReplayWALs(ctx, info, edits);
Expand All @@ -362,8 +357,9 @@ public void preCommitStoreFile(ObserverContext<? extends RegionCoprocessorEnviro
@Override
public void preWALAppend(ObserverContext<? extends RegionCoprocessorEnvironment> ctx, WALKey key,
WALEdit edit) throws IOException {
// Only allow this operation for meta table
if (!TableName.isMetaTableName(key.getTableName())) {
// Only allow this operation for whitelisted table.
// See {@link writableTables set} for details.
if (!isWritableInReadOnlyMode(key.getTableName())) {
internalReadOnlyGuard();
}
RegionObserver.super.preWALAppend(ctx, key, edit);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,16 +22,41 @@
import java.util.Collections;
import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.client.RegionInfoBuilder;
import org.apache.hadoop.hbase.regionserver.HRegionFileSystem;
import org.apache.hadoop.hbase.regionserver.StoreContext;
import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
import org.mockito.Mockito;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

public class DummyStoreFileTrackerForReadOnlyMode extends StoreFileTrackerBase {
private static final Logger LOG =
LoggerFactory.getLogger(DummyStoreFileTrackerForReadOnlyMode.class);

private boolean readOnlyUsed = false;
private boolean compactionExecuted = false;
private boolean addExecuted = false;
private boolean setExecuted = false;

public DummyStoreFileTrackerForReadOnlyMode(Configuration conf, boolean isPrimaryReplica) {
super(conf, isPrimaryReplica, null);
private static StoreContext buildStoreContext(Configuration conf, TableName tableName) {
RegionInfo regionInfo = RegionInfoBuilder.newBuilder(tableName).build();
HRegionFileSystem hfs = Mockito.mock(HRegionFileSystem.class);
try {
Mockito.when(hfs.getRegionInfo()).thenReturn(regionInfo);
Mockito.when(hfs.getFileSystem()).thenReturn(FileSystem.get(conf));
} catch (IOException e) {
LOG.error("Failed to get FileSystem for StoreContext creation", e);
}
return StoreContext.getBuilder().withRegionFileSystem(hfs).build();
}

public DummyStoreFileTrackerForReadOnlyMode(Configuration conf, boolean isPrimaryReplica,
TableName tableName) {
super(conf, isPrimaryReplica, buildStoreContext(conf, tableName));
}

@Override
Expand Down
Loading