Мы можем создать таблицу Phoenix поверх существующей таблицы Hbase. Он внутренне использует сопроцессор Phoenix для связи с таблицей Phoenix.
Да, данные будут реплицированы.
См. Ниже шаги, чтобы сделать то же самое:
create 'DEV_HBASE:TestPhoneixIntegration','cf'
describe 'HBASE_PAS:TestPhoneixIntegration'
Table HBASE_PAS:TestPhoneixIntegration is ENABLED
HBASE_PAS:TestPhoneixIntegration
COLUMN FAMILIES DESCRIPTION
{NAME => 'cf', BLOOMFILTER => 'ROW', VERSIONS => '1', IN_MEMORY => 'false', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', COMPRESSION => 'NONE', MIN_VERSIONS => '0', BLOCKCAC
HE => 'true', BLOCKSIZE => '65536', REPLICATION_SCOPE => '0'}
1 row(s) in 0.0630 seconds
------------------------------------------------------
create table "DEV_HBASE"."TestPhoneixIntegration"(ROWKEY VARCHAR PRIMARY KEY , "cf"."name" VARCHAR);
SELECT * FROM "DEV_HBASE"."TestPhoneixIntegration";
+----------+-------+
| ROWKEY | name |
+----------+-------+
| rowkey1 | John |
+----------+-------+
--------------------------------------------------------
describe 'DEV_HBASE:TestPhoneixIntegration'
Table DEV_HBASE:TestPhoneixIntegration is ENABLED
DEV_HBASE:TestPhoneixIntegration, {TABLE_ATTRIBUTES => {coprocessor$1 => '|org.apache.phoenix.coprocessor.ScanRegionObserver|805306366|', coprocessor$2 => '|org.apache.phoenix.coprocessor.UngroupedAggrega
teRegionObserver|805306366|', coprocessor$3 => '|org.apache.phoenix.coprocessor.GroupedAggregateRegionObserver|805306366|', coprocessor$4 => '|org.apache.phoenix.coprocessor.ServerCachingEndpointImpl|80530636
6|', coprocessor$5 => '|org.apache.phoenix.hbase.index.Indexer|805306366|org.apache.hadoop.hbase.index.codec.class=org.apache.phoenix.index.PhoenixIndexCodec,index.builder=org.apache.phoenix.index.PhoenixInde
xBuilder'}
COLUMN FAMILIES DESCRIPTION
{NAME => 'cf', BLOOMFILTER => 'ROW', VERSIONS => '1', IN_MEMORY => 'false', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', COMPRESSION => 'NONE', MIN_VERSIONS => '0', BLOCKCAC
HE => 'true', BLOCKSIZE => '65536', REPLICATION_SCOPE => '0'}
1 row(s) in 0.0580 seconds