ATLAS-4901: updated JanusGraph to 1.0.0
This commit is contained in:
parent
88f829f6ce
commit
1e66c8b243
|
|
@ -37,6 +37,7 @@ atlas.graph.index.search.backend=solr
|
||||||
|
|
||||||
#Berkeley storage directory
|
#Berkeley storage directory
|
||||||
atlas.graph.storage.directory=${sys:atlas.data}/berkley
|
atlas.graph.storage.directory=${sys:atlas.data}/berkley
|
||||||
|
atlas.graph.storage.transactions=true
|
||||||
|
|
||||||
#hbase
|
#hbase
|
||||||
#For standalone mode , specify localhost
|
#For standalone mode , specify localhost
|
||||||
|
|
|
||||||
|
|
@ -37,6 +37,7 @@ atlas.graph.index.search.backend=solr
|
||||||
|
|
||||||
#Berkeley storage directory
|
#Berkeley storage directory
|
||||||
atlas.graph.storage.directory=${sys:atlas.data}/berkley
|
atlas.graph.storage.directory=${sys:atlas.data}/berkley
|
||||||
|
atlas.graph.storage.transactions=true
|
||||||
|
|
||||||
#hbase
|
#hbase
|
||||||
#For standalone mode , specify localhost
|
#For standalone mode , specify localhost
|
||||||
|
|
|
||||||
|
|
@ -611,7 +611,7 @@ public class HiveHookIT extends HiveITBase {
|
||||||
}
|
}
|
||||||
|
|
||||||
private String createTestDFSFile(String path) throws Exception {
|
private String createTestDFSFile(String path) throws Exception {
|
||||||
return "pfile://" + file(path);
|
return "file://" + file(path);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
|
|
@ -1213,7 +1213,7 @@ public class HiveHookIT extends HiveITBase {
|
||||||
Assert.assertNotNull(ddlQueries);
|
Assert.assertNotNull(ddlQueries);
|
||||||
Assert.assertEquals(ddlQueries.size(), 1);
|
Assert.assertEquals(ddlQueries.size(), 1);
|
||||||
|
|
||||||
String filename = "pfile://" + mkdir("export");
|
String filename = "file://" + mkdir("export");
|
||||||
|
|
||||||
query = "export table " + tableName + " to \"" + filename + "\"";
|
query = "export table " + tableName + " to \"" + filename + "\"";
|
||||||
|
|
||||||
|
|
@ -1272,7 +1272,7 @@ public class HiveHookIT extends HiveITBase {
|
||||||
Assert.assertNotEquals(processEntity1.getGuid(), processEntity2.getGuid());
|
Assert.assertNotEquals(processEntity1.getGuid(), processEntity2.getGuid());
|
||||||
|
|
||||||
//Export should update same process
|
//Export should update same process
|
||||||
filename = "pfile://" + mkdir("export2");
|
filename = "file://" + mkdir("export2");
|
||||||
query = "export table " + tableName + " to \"" + filename + "\"";
|
query = "export table " + tableName + " to \"" + filename + "\"";
|
||||||
|
|
||||||
runCommand(query);
|
runCommand(query);
|
||||||
|
|
|
||||||
|
|
@ -36,6 +36,7 @@ atlas.graph.index.search.backend=solr
|
||||||
|
|
||||||
#Berkeley storage directory
|
#Berkeley storage directory
|
||||||
atlas.graph.storage.directory=${sys:atlas.data}/berkley
|
atlas.graph.storage.directory=${sys:atlas.data}/berkley
|
||||||
|
atlas.graph.storage.transactions=true
|
||||||
|
|
||||||
#hbase
|
#hbase
|
||||||
#For standalone mode , specify localhost
|
#For standalone mode , specify localhost
|
||||||
|
|
@ -122,4 +123,4 @@ atlas.authentication.method.file=true
|
||||||
atlas.authentication.method.ldap.type=none
|
atlas.authentication.method.ldap.type=none
|
||||||
atlas.authentication.method.kerberos=false
|
atlas.authentication.method.kerberos=false
|
||||||
# atlas.authentication.method.file.filename=users-credentials.properties
|
# atlas.authentication.method.file.filename=users-credentials.properties
|
||||||
atlas.hook.hive.hs2.ignore.ddl.operations=false
|
atlas.hook.hive.hs2.ignore.ddl.operations=false
|
||||||
|
|
|
||||||
|
|
@ -36,6 +36,7 @@ atlas.graph.index.search.backend=solr
|
||||||
|
|
||||||
#Berkeley storage directory
|
#Berkeley storage directory
|
||||||
atlas.graph.storage.directory=${sys:atlas.data}/berkley
|
atlas.graph.storage.directory=${sys:atlas.data}/berkley
|
||||||
|
atlas.graph.storage.transactions=true
|
||||||
|
|
||||||
#hbase
|
#hbase
|
||||||
#For standalone mode , specify localhost
|
#For standalone mode , specify localhost
|
||||||
|
|
|
||||||
|
|
@ -37,6 +37,7 @@ atlas.graph.index.search.backend=solr
|
||||||
|
|
||||||
#Berkeley storage directory
|
#Berkeley storage directory
|
||||||
atlas.graph.storage.directory=${sys:atlas.data}/berkley
|
atlas.graph.storage.directory=${sys:atlas.data}/berkley
|
||||||
|
atlas.graph.storage.transactions=true
|
||||||
|
|
||||||
#hbase
|
#hbase
|
||||||
#For standalone mode , specify localhost
|
#For standalone mode , specify localhost
|
||||||
|
|
|
||||||
|
|
@ -36,6 +36,7 @@ atlas.graph.index.search.backend=solr
|
||||||
|
|
||||||
#Berkeley storage directory
|
#Berkeley storage directory
|
||||||
atlas.graph.storage.directory=${sys:atlas.data}/berkley
|
atlas.graph.storage.directory=${sys:atlas.data}/berkley
|
||||||
|
atlas.graph.storage.transactions=true
|
||||||
|
|
||||||
#hbase
|
#hbase
|
||||||
#For standalone mode , specify localhost
|
#For standalone mode , specify localhost
|
||||||
|
|
|
||||||
|
|
@ -38,6 +38,7 @@ atlas.graph.index.search.backend=solr
|
||||||
|
|
||||||
#Berkeley storage directory
|
#Berkeley storage directory
|
||||||
atlas.graph.storage.directory=${sys:atlas.data}/berkley
|
atlas.graph.storage.directory=${sys:atlas.data}/berkley
|
||||||
|
atlas.graph.storage.transactions=true
|
||||||
|
|
||||||
#hbase
|
#hbase
|
||||||
#For standalone mode , specify localhost
|
#For standalone mode , specify localhost
|
||||||
|
|
|
||||||
|
|
@ -53,6 +53,7 @@ atlas.graph.index.search.backend=${graph.index.backend}
|
||||||
|
|
||||||
#Berkeley storage directory
|
#Berkeley storage directory
|
||||||
atlas.graph.storage.directory=${sys:atlas.data}/berkley
|
atlas.graph.storage.directory=${sys:atlas.data}/berkley
|
||||||
|
atlas.graph.storage.transactions=true
|
||||||
|
|
||||||
#hbase
|
#hbase
|
||||||
#For standalone mode , specify localhost
|
#For standalone mode , specify localhost
|
||||||
|
|
|
||||||
|
|
@ -44,5 +44,10 @@
|
||||||
<groupId>org.apache.atlas</groupId>
|
<groupId>org.apache.atlas</groupId>
|
||||||
<artifactId>atlas-intg</artifactId>
|
<artifactId>atlas-intg</artifactId>
|
||||||
</dependency>
|
</dependency>
|
||||||
|
|
||||||
|
<dependency>
|
||||||
|
<groupId>javax.xml.bind</groupId>
|
||||||
|
<artifactId>jaxb-api</artifactId>
|
||||||
|
</dependency>
|
||||||
</dependencies>
|
</dependencies>
|
||||||
</project>
|
</project>
|
||||||
|
|
|
||||||
|
|
@ -19,8 +19,8 @@ ATLAS_SERVER_JAVA_VERSION=8
|
||||||
|
|
||||||
ATLAS_VERSION=3.0.0-SNAPSHOT
|
ATLAS_VERSION=3.0.0-SNAPSHOT
|
||||||
UBUNTU_VERSION=20.04
|
UBUNTU_VERSION=20.04
|
||||||
HADOOP_VERSION=3.3.0
|
HADOOP_VERSION=3.3.6
|
||||||
HBASE_VERSION=2.3.3
|
HBASE_VERSION=2.6.0
|
||||||
KAFKA_VERSION=2.8.1
|
KAFKA_VERSION=2.8.2
|
||||||
HIVE_VERSION=3.1.2
|
HIVE_VERSION=3.1.3
|
||||||
HIVE_HADOOP_VERSION=3.1.1
|
HIVE_HADOOP_VERSION=3.1.1
|
||||||
|
|
|
||||||
|
|
@ -14,4 +14,4 @@
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
FROM zookeeper:3.5.9
|
FROM zookeeper:3.9.2
|
||||||
|
|
|
||||||
|
|
@ -61,6 +61,11 @@
|
||||||
</exclusion>
|
</exclusion>
|
||||||
</exclusions>
|
</exclusions>
|
||||||
</dependency>
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.apache.tinkerpop</groupId>
|
||||||
|
<artifactId>gremlin-util</artifactId>
|
||||||
|
<version>${tinkerpop.version}</version>
|
||||||
|
</dependency>
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>org.apache.commons</groupId>
|
<groupId>org.apache.commons</groupId>
|
||||||
<artifactId>commons-text</artifactId>
|
<artifactId>commons-text</artifactId>
|
||||||
|
|
|
||||||
|
|
@ -1,132 +0,0 @@
|
||||||
<?xml version="1.0" encoding="UTF-8"?>
|
|
||||||
<!--
|
|
||||||
~ Licensed to the Apache Software Foundation (ASF) under one
|
|
||||||
~ or more contributor license agreements. See the NOTICE file
|
|
||||||
~ distributed with this work for additional information
|
|
||||||
~ regarding copyright ownership. The ASF licenses this file
|
|
||||||
~ to you under the Apache License, Version 2.0 (the
|
|
||||||
~ "License"); you may not use this file except in compliance
|
|
||||||
~ with the License. You may obtain a copy of the License at
|
|
||||||
~
|
|
||||||
~ http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
~
|
|
||||||
~ Unless required by applicable law or agreed to in writing, software
|
|
||||||
~ distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
~ See the License for the specific language governing permissions and
|
|
||||||
~ limitations under the License.
|
|
||||||
-->
|
|
||||||
|
|
||||||
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
|
||||||
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
|
|
||||||
<modelVersion>4.0.0</modelVersion>
|
|
||||||
<parent>
|
|
||||||
<artifactId>atlas-graphdb</artifactId>
|
|
||||||
<groupId>org.apache.atlas</groupId>
|
|
||||||
<version>3.0.0-SNAPSHOT</version>
|
|
||||||
</parent>
|
|
||||||
<artifactId>atlas-janusgraph-hbase2</artifactId>
|
|
||||||
<description>Apache Atlas JanusGraph-HBase2 Module</description>
|
|
||||||
<name>Apache Atlas JanusGraph-HBase2 Module</name>
|
|
||||||
<packaging>jar</packaging>
|
|
||||||
|
|
||||||
<dependencies>
|
|
||||||
<dependency>
|
|
||||||
<groupId>org.janusgraph</groupId>
|
|
||||||
<artifactId>janusgraph-core</artifactId>
|
|
||||||
<version>${janusgraph.version}</version>
|
|
||||||
<exclusions>
|
|
||||||
<exclusion>
|
|
||||||
<groupId>com.codahale.metrics</groupId>
|
|
||||||
<artifactId>*</artifactId>
|
|
||||||
</exclusion>
|
|
||||||
<exclusion>
|
|
||||||
<groupId>org.noggit</groupId>
|
|
||||||
<artifactId>noggit</artifactId>
|
|
||||||
</exclusion>
|
|
||||||
<exclusion>
|
|
||||||
<groupId>org.apache.tinkerpop</groupId>
|
|
||||||
<artifactId>gremlin-shaded</artifactId>
|
|
||||||
</exclusion>
|
|
||||||
<exclusion>
|
|
||||||
<groupId>org.apache.tinkerpop</groupId>
|
|
||||||
<artifactId>gremlin-server</artifactId>
|
|
||||||
</exclusion>
|
|
||||||
<exclusion>
|
|
||||||
<groupId>org.apache.tinkerpop</groupId>
|
|
||||||
<artifactId>gremlin-groovy</artifactId>
|
|
||||||
</exclusion>
|
|
||||||
<exclusion>
|
|
||||||
<groupId>org.apache.tinkerpop</groupId>
|
|
||||||
<artifactId>gremlin-core</artifactId>
|
|
||||||
</exclusion>
|
|
||||||
<exclusion>
|
|
||||||
<groupId>org.apache.tinkerpop</groupId>
|
|
||||||
<artifactId>gremlin-driver</artifactId>
|
|
||||||
</exclusion>
|
|
||||||
<exclusion>
|
|
||||||
<groupId>org.apache.tinkerpop</groupId>
|
|
||||||
<artifactId>tinkergraph-gremlin</artifactId>
|
|
||||||
</exclusion>
|
|
||||||
<exclusion>
|
|
||||||
<groupId>org.apache.commons</groupId>
|
|
||||||
<artifactId>commons-configuration2</artifactId>
|
|
||||||
</exclusion>
|
|
||||||
<exclusion>
|
|
||||||
<groupId>org.apache.commons</groupId>
|
|
||||||
<artifactId>commons-text</artifactId>
|
|
||||||
</exclusion>
|
|
||||||
<exclusion>
|
|
||||||
<groupId>com.rabbitmq</groupId>
|
|
||||||
<artifactId>amqp-client</artifactId>
|
|
||||||
</exclusion>
|
|
||||||
</exclusions>
|
|
||||||
</dependency>
|
|
||||||
|
|
||||||
<dependency>
|
|
||||||
<groupId>org.apache.hadoop</groupId>
|
|
||||||
<artifactId>hadoop-common</artifactId>
|
|
||||||
<version>${hadoop.version}</version>
|
|
||||||
<scope>provided</scope>
|
|
||||||
<exclusions>
|
|
||||||
<exclusion>
|
|
||||||
<groupId>org.apache.commons</groupId>
|
|
||||||
<artifactId>commons-configuration2</artifactId>
|
|
||||||
</exclusion>
|
|
||||||
<exclusion>
|
|
||||||
<groupId>org.apache.commons</groupId>
|
|
||||||
<artifactId>commons-text</artifactId>
|
|
||||||
</exclusion>
|
|
||||||
</exclusions>
|
|
||||||
</dependency>
|
|
||||||
|
|
||||||
<dependency>
|
|
||||||
<groupId>org.apache.hbase</groupId>
|
|
||||||
<artifactId>hbase-shaded-client</artifactId>
|
|
||||||
<version>${hbase.version}</version>
|
|
||||||
<optional>true</optional>
|
|
||||||
<exclusions>
|
|
||||||
<exclusion>
|
|
||||||
<artifactId>avro</artifactId>
|
|
||||||
<groupId>org.apache.avro</groupId>
|
|
||||||
</exclusion>
|
|
||||||
<exclusion>
|
|
||||||
<artifactId>jruby-complete</artifactId>
|
|
||||||
<groupId>org.jruby</groupId>
|
|
||||||
</exclusion>
|
|
||||||
<exclusion>
|
|
||||||
<artifactId>asm</artifactId>
|
|
||||||
<groupId>asm</groupId>
|
|
||||||
</exclusion>
|
|
||||||
</exclusions>
|
|
||||||
</dependency>
|
|
||||||
|
|
||||||
<dependency>
|
|
||||||
<groupId>org.apache.commons</groupId>
|
|
||||||
<artifactId>commons-text</artifactId>
|
|
||||||
<version>${commons-text.version}</version>
|
|
||||||
</dependency>
|
|
||||||
|
|
||||||
</dependencies>
|
|
||||||
|
|
||||||
</project>
|
|
||||||
|
|
@ -1,74 +0,0 @@
|
||||||
// Copyright 2017 JanusGraph Authors
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Copyright DataStax, Inc.
|
|
||||||
* <p>
|
|
||||||
* Please see the included license file for details.
|
|
||||||
*/
|
|
||||||
package org.janusgraph.diskstorage.hbase2;
|
|
||||||
|
|
||||||
import org.apache.hadoop.hbase.ClusterStatus;
|
|
||||||
import org.apache.hadoop.hbase.TableNotFoundException;
|
|
||||||
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
|
|
||||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
|
||||||
import org.apache.hadoop.hbase.client.TableDescriptor;
|
|
||||||
|
|
||||||
import java.io.Closeable;
|
|
||||||
import java.io.IOException;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* This interface hides ABI/API breaking changes that HBase has made to its Admin/HBaseAdmin over the course
|
|
||||||
* of development from 0.94 to 1.0 and beyond.
|
|
||||||
*/
|
|
||||||
public interface AdminMask extends Closeable
|
|
||||||
{
|
|
||||||
|
|
||||||
void clearTable(String tableName, long timestamp) throws IOException;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Drop given table. Table can be either enabled or disabled.
|
|
||||||
* @param tableName Name of the table to delete
|
|
||||||
* @throws IOException
|
|
||||||
*/
|
|
||||||
void dropTable(String tableName) throws IOException;
|
|
||||||
|
|
||||||
TableDescriptor getTableDescriptor(String tableName) throws TableNotFoundException, IOException;
|
|
||||||
|
|
||||||
boolean tableExists(String tableName) throws IOException;
|
|
||||||
|
|
||||||
void createTable(TableDescriptor desc) throws IOException;
|
|
||||||
|
|
||||||
void createTable(TableDescriptor desc, byte[] startKey, byte[] endKey, int numRegions) throws IOException;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Estimate the number of regionservers in the HBase cluster.
|
|
||||||
*
|
|
||||||
* This is usually implemented by calling
|
|
||||||
* {@link HBaseAdmin#getClusterStatus()} and then
|
|
||||||
* {@link ClusterStatus#getServers()} and finally {@code size()} on the
|
|
||||||
* returned server list.
|
|
||||||
*
|
|
||||||
* @return the number of servers in the cluster or -1 if it could not be determined
|
|
||||||
*/
|
|
||||||
int getEstimatedRegionServerCount();
|
|
||||||
|
|
||||||
void disableTable(String tableName) throws IOException;
|
|
||||||
|
|
||||||
void enableTable(String tableName) throws IOException;
|
|
||||||
|
|
||||||
boolean isTableDisabled(String tableName) throws IOException;
|
|
||||||
|
|
||||||
void addColumn(String tableName, ColumnFamilyDescriptor columnDescriptor) throws IOException;
|
|
||||||
}
|
|
||||||
|
|
@ -1,55 +0,0 @@
|
||||||
// Copyright 2017 JanusGraph Authors
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Copyright DataStax, Inc.
|
|
||||||
* <p>
|
|
||||||
* Please see the included license file for details.
|
|
||||||
*/
|
|
||||||
package org.janusgraph.diskstorage.hbase2;
|
|
||||||
|
|
||||||
import org.apache.hadoop.hbase.HRegionLocation;
|
|
||||||
|
|
||||||
import java.io.Closeable;
|
|
||||||
import java.io.IOException;
|
|
||||||
import java.util.List;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* This interface hides ABI/API breaking changes that HBase has made to its (H)Connection class over the course
|
|
||||||
* of development from 0.94 to 1.0 and beyond.
|
|
||||||
*/
|
|
||||||
public interface ConnectionMask extends Closeable
|
|
||||||
{
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Retrieve the TableMask compatibility layer object for the supplied table name.
|
|
||||||
* @return The TableMask for the specified table.
|
|
||||||
* @throws IOException in the case of backend exceptions.
|
|
||||||
*/
|
|
||||||
TableMask getTable(String name) throws IOException;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Retrieve the AdminMask compatibility layer object for this Connection.
|
|
||||||
* @return The AdminMask for this Connection
|
|
||||||
* @throws IOException in the case of backend exceptions.
|
|
||||||
*/
|
|
||||||
AdminMask getAdmin() throws IOException;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Retrieve the RegionLocations for the supplied table name.
|
|
||||||
* @return A map of HRegionInfo to ServerName that describes the storage regions for the named table.
|
|
||||||
* @throws IOException in the case of backend exceptions.
|
|
||||||
*/
|
|
||||||
List<HRegionLocation> getRegionLocations(String tablename) throws IOException;
|
|
||||||
}
|
|
||||||
|
|
@ -1,167 +0,0 @@
|
||||||
// Copyright 2017 JanusGraph Authors
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package org.janusgraph.diskstorage.hbase2;
|
|
||||||
|
|
||||||
import org.apache.hadoop.hbase.TableName;
|
|
||||||
import org.apache.hadoop.hbase.TableNotFoundException;
|
|
||||||
import org.apache.hadoop.hbase.client.Admin;
|
|
||||||
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
|
|
||||||
import org.apache.hadoop.hbase.client.Delete;
|
|
||||||
import org.apache.hadoop.hbase.client.Result;
|
|
||||||
import org.apache.hadoop.hbase.client.ResultScanner;
|
|
||||||
import org.apache.hadoop.hbase.client.Scan;
|
|
||||||
import org.apache.hadoop.hbase.client.Table;
|
|
||||||
import org.apache.hadoop.hbase.client.TableDescriptor;
|
|
||||||
import org.slf4j.Logger;
|
|
||||||
import org.slf4j.LoggerFactory;
|
|
||||||
|
|
||||||
import java.io.IOException;
|
|
||||||
import java.util.ArrayList;
|
|
||||||
import java.util.Iterator;
|
|
||||||
import java.util.List;
|
|
||||||
|
|
||||||
public class HBaseAdmin2_0 implements AdminMask
|
|
||||||
{
|
|
||||||
|
|
||||||
private static final Logger log = LoggerFactory.getLogger(HBaseAdmin2_0.class);
|
|
||||||
|
|
||||||
private final Admin adm;
|
|
||||||
|
|
||||||
public HBaseAdmin2_0(Admin adm)
|
|
||||||
{
|
|
||||||
this.adm = adm;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Delete all rows from the given table. This method is intended only for development and testing use.
|
|
||||||
* @param tableString
|
|
||||||
* @param timestamp
|
|
||||||
* @throws IOException
|
|
||||||
*/
|
|
||||||
@Override
|
|
||||||
public void clearTable(String tableString, long timestamp) throws IOException
|
|
||||||
{
|
|
||||||
TableName tableName = TableName.valueOf(tableString);
|
|
||||||
|
|
||||||
if (!adm.tableExists(tableName)) {
|
|
||||||
log.debug("Attempted to clear table {} before it exists (noop)", tableString);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Unfortunately, linear scanning and deleting rows is faster in HBase when running integration tests than
|
|
||||||
// disabling and deleting/truncating tables.
|
|
||||||
final Scan scan = new Scan();
|
|
||||||
scan.setCacheBlocks(false);
|
|
||||||
scan.setCaching(2000);
|
|
||||||
scan.setTimeRange(0, Long.MAX_VALUE);
|
|
||||||
scan.readVersions(1);
|
|
||||||
|
|
||||||
try (final Table table = adm.getConnection().getTable(tableName);
|
|
||||||
final ResultScanner scanner = table.getScanner(scan)) {
|
|
||||||
final Iterator<Result> iterator = scanner.iterator();
|
|
||||||
final int batchSize = 1000;
|
|
||||||
final List<Delete> deleteList = new ArrayList<>();
|
|
||||||
while (iterator.hasNext()) {
|
|
||||||
deleteList.add(new Delete(iterator.next().getRow(), timestamp));
|
|
||||||
if (!iterator.hasNext() || deleteList.size() == batchSize) {
|
|
||||||
table.delete(deleteList);
|
|
||||||
deleteList.clear();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void dropTable(String tableString) throws IOException {
|
|
||||||
final TableName tableName = TableName.valueOf(tableString);
|
|
||||||
|
|
||||||
if (!adm.tableExists(tableName)) {
|
|
||||||
log.debug("Attempted to drop table {} before it exists (noop)", tableString);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (adm.isTableEnabled(tableName)) {
|
|
||||||
adm.disableTable(tableName);
|
|
||||||
}
|
|
||||||
adm.deleteTable(tableName);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public TableDescriptor getTableDescriptor(String tableString) throws TableNotFoundException, IOException
|
|
||||||
{
|
|
||||||
return adm.getDescriptor(TableName.valueOf(tableString));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public boolean tableExists(String tableString) throws IOException
|
|
||||||
{
|
|
||||||
return adm.tableExists(TableName.valueOf(tableString));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void createTable(TableDescriptor desc) throws IOException
|
|
||||||
{
|
|
||||||
adm.createTable(desc);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void createTable(TableDescriptor desc, byte[] startKey, byte[] endKey, int numRegions) throws IOException
|
|
||||||
{
|
|
||||||
adm.createTable(desc, startKey, endKey, numRegions);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public int getEstimatedRegionServerCount()
|
|
||||||
{
|
|
||||||
int serverCount = -1;
|
|
||||||
try {
|
|
||||||
serverCount = adm.getClusterStatus().getServers().size();
|
|
||||||
log.debug("Read {} servers from HBase ClusterStatus", serverCount);
|
|
||||||
} catch (IOException e) {
|
|
||||||
log.debug("Unable to retrieve HBase cluster status", e);
|
|
||||||
}
|
|
||||||
return serverCount;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void disableTable(String tableString) throws IOException
|
|
||||||
{
|
|
||||||
adm.disableTable(TableName.valueOf(tableString));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void enableTable(String tableString) throws IOException
|
|
||||||
{
|
|
||||||
adm.enableTable(TableName.valueOf(tableString));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public boolean isTableDisabled(String tableString) throws IOException
|
|
||||||
{
|
|
||||||
return adm.isTableDisabled(TableName.valueOf(tableString));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void addColumn(String tableString, ColumnFamilyDescriptor columnDescriptor) throws IOException
|
|
||||||
{
|
|
||||||
adm.addColumnFamily(TableName.valueOf(tableString), columnDescriptor);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void close() throws IOException
|
|
||||||
{
|
|
||||||
adm.close();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,58 +0,0 @@
|
||||||
// Copyright 2017 JanusGraph Authors
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package org.janusgraph.diskstorage.hbase2;
|
|
||||||
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
|
||||||
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
|
|
||||||
import org.apache.hadoop.hbase.client.Delete;
|
|
||||||
import org.apache.hadoop.hbase.client.TableDescriptor;
|
|
||||||
|
|
||||||
import java.io.IOException;
|
|
||||||
|
|
||||||
public interface HBaseCompat {
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Configure the compression scheme {@code algo} on a column family
|
|
||||||
* descriptor {@code cd}. The {@code algo} parameter is a string value
|
|
||||||
* corresponding to one of the values of HBase's Compression enum. The
|
|
||||||
* Compression enum has moved between packages as HBase has evolved, which
|
|
||||||
* is why this method has a String argument in the signature instead of the
|
|
||||||
* enum itself.
|
|
||||||
* @param cd
|
|
||||||
* column family to configure
|
|
||||||
* @param algo
|
|
||||||
*/
|
|
||||||
public ColumnFamilyDescriptor setCompression(ColumnFamilyDescriptor cd, String algo);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Create and return a HTableDescriptor instance with the given name. The
|
|
||||||
* constructors on this method have remained stable over HBase development
|
|
||||||
* so far, but the old HTableDescriptor(String) constructor & byte[] friends
|
|
||||||
* are now marked deprecated and may eventually be removed in favor of the
|
|
||||||
* HTableDescriptor(TableName) constructor. That constructor (and the
|
|
||||||
* TableName type) only exists in newer HBase versions. Hence this method.
|
|
||||||
*
|
|
||||||
* @param tableName
|
|
||||||
* HBase table name
|
|
||||||
* @return a new table descriptor instance
|
|
||||||
*/
|
|
||||||
public TableDescriptor newTableDescriptor(String tableName);
|
|
||||||
|
|
||||||
ConnectionMask createConnection(Configuration conf) throws IOException;
|
|
||||||
|
|
||||||
TableDescriptor addColumnFamilyToTableDescriptor(TableDescriptor tdesc, ColumnFamilyDescriptor cdesc);
|
|
||||||
|
|
||||||
void setTimestamp(Delete d, long timestamp);
|
|
||||||
}
|
|
||||||
|
|
@ -1,61 +0,0 @@
|
||||||
// Copyright 2017 JanusGraph Authors
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package org.janusgraph.diskstorage.hbase2;
|
|
||||||
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
|
||||||
import org.apache.hadoop.hbase.TableName;
|
|
||||||
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
|
|
||||||
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
|
|
||||||
import org.apache.hadoop.hbase.client.ConnectionFactory;
|
|
||||||
import org.apache.hadoop.hbase.client.Delete;
|
|
||||||
import org.apache.hadoop.hbase.client.TableDescriptor;
|
|
||||||
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
|
|
||||||
import org.apache.hadoop.hbase.io.compress.Compression;
|
|
||||||
|
|
||||||
import java.io.IOException;
|
|
||||||
|
|
||||||
public class HBaseCompat2_0 implements HBaseCompat {
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public ColumnFamilyDescriptor setCompression(ColumnFamilyDescriptor cd, String algo) {
|
|
||||||
return ColumnFamilyDescriptorBuilder.newBuilder(cd).setCompressionType(Compression.Algorithm.valueOf(algo)).build();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public TableDescriptor newTableDescriptor(String tableName) {
|
|
||||||
TableName tn = TableName.valueOf(tableName);
|
|
||||||
|
|
||||||
return TableDescriptorBuilder.newBuilder(tn).build();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public ConnectionMask createConnection(Configuration conf) throws IOException
|
|
||||||
{
|
|
||||||
return new HConnection2_0(ConnectionFactory.createConnection(conf));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public TableDescriptor addColumnFamilyToTableDescriptor(TableDescriptor tdesc, ColumnFamilyDescriptor cdesc)
|
|
||||||
{
|
|
||||||
return TableDescriptorBuilder.newBuilder(tdesc).addColumnFamily(cdesc).build();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void setTimestamp(Delete d, long timestamp)
|
|
||||||
{
|
|
||||||
d.setTimestamp(timestamp);
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
@ -1,90 +0,0 @@
|
||||||
// Copyright 2017 JanusGraph Authors
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package org.janusgraph.diskstorage.hbase2;
|
|
||||||
|
|
||||||
import org.apache.hadoop.hbase.util.VersionInfo;
|
|
||||||
import org.slf4j.Logger;
|
|
||||||
import org.slf4j.LoggerFactory;
|
|
||||||
|
|
||||||
public class HBaseCompatLoader {
|
|
||||||
|
|
||||||
private static final Logger log = LoggerFactory.getLogger(HBaseCompatLoader.class);
|
|
||||||
|
|
||||||
private static final String DEFAULT_HBASE_COMPAT_VERSION = "1.2";
|
|
||||||
|
|
||||||
private static final String HBASE_VERSION_2_STRING = "2.";
|
|
||||||
|
|
||||||
private static final String DEFAULT_HBASE_COMPAT_CLASS_NAME =
|
|
||||||
"org.janusgraph.diskstorage.hbase2.HBaseCompat2_0";
|
|
||||||
|
|
||||||
private static final String[] HBASE_SUPPORTED_VERSIONS =
|
|
||||||
new String[] { "0.98", "1.0", "1.1", "1.2", "1.3", "2.0" };
|
|
||||||
|
|
||||||
private static HBaseCompat cachedCompat;
|
|
||||||
|
|
||||||
public synchronized static HBaseCompat getCompat(String classOverride) {
|
|
||||||
|
|
||||||
if (null != cachedCompat) {
|
|
||||||
log.debug("Returning cached HBase compatibility layer: {}", cachedCompat);
|
|
||||||
return cachedCompat;
|
|
||||||
}
|
|
||||||
|
|
||||||
HBaseCompat compat;
|
|
||||||
String className = null;
|
|
||||||
String classNameSource = null;
|
|
||||||
|
|
||||||
if (null != classOverride) {
|
|
||||||
className = classOverride;
|
|
||||||
classNameSource = "from explicit configuration";
|
|
||||||
} else {
|
|
||||||
String hbaseVersion = VersionInfo.getVersion();
|
|
||||||
for (String supportedVersion : HBASE_SUPPORTED_VERSIONS) {
|
|
||||||
if (hbaseVersion.startsWith(supportedVersion + ".")) {
|
|
||||||
if (hbaseVersion.startsWith(HBASE_VERSION_2_STRING)) {
|
|
||||||
// All HBase 2.x maps to HBaseCompat2_0.
|
|
||||||
className = DEFAULT_HBASE_COMPAT_CLASS_NAME;
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
className = "org.janusgraph.diskstorage.hbase2.HBaseCompat" + supportedVersion.replaceAll("\\.", "_");
|
|
||||||
}
|
|
||||||
classNameSource = "supporting runtime HBase version " + hbaseVersion;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (null == className) {
|
|
||||||
log.info("The HBase version {} is not explicitly supported by JanusGraph. " +
|
|
||||||
"Loading JanusGraph's compatibility layer for its most recent supported HBase version ({})",
|
|
||||||
hbaseVersion, DEFAULT_HBASE_COMPAT_VERSION);
|
|
||||||
className = DEFAULT_HBASE_COMPAT_CLASS_NAME;
|
|
||||||
classNameSource = " by default";
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
final String errTemplate = " when instantiating HBase compatibility class " + className;
|
|
||||||
|
|
||||||
try {
|
|
||||||
compat = (HBaseCompat)Class.forName(className).newInstance();
|
|
||||||
log.info("Instantiated HBase compatibility layer {}: {}", classNameSource, compat.getClass().getCanonicalName());
|
|
||||||
} catch (IllegalAccessException e) {
|
|
||||||
throw new RuntimeException(e.getClass().getSimpleName() + errTemplate, e);
|
|
||||||
} catch (InstantiationException e) {
|
|
||||||
throw new RuntimeException(e.getClass().getSimpleName() + errTemplate, e);
|
|
||||||
} catch (ClassNotFoundException e) {
|
|
||||||
throw new RuntimeException(e.getClass().getSimpleName() + errTemplate, e);
|
|
||||||
}
|
|
||||||
|
|
||||||
return cachedCompat = compat;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,391 +0,0 @@
|
||||||
// Copyright 2017 JanusGraph Authors
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package org.janusgraph.diskstorage.hbase2;
|
|
||||||
|
|
||||||
import com.google.common.base.Preconditions;
|
|
||||||
import com.google.common.collect.ImmutableMap;
|
|
||||||
import com.google.common.collect.Iterables;
|
|
||||||
import com.google.common.collect.Iterators;
|
|
||||||
import org.apache.hadoop.hbase.client.Get;
|
|
||||||
import org.apache.hadoop.hbase.client.Result;
|
|
||||||
import org.apache.hadoop.hbase.client.ResultScanner;
|
|
||||||
import org.apache.hadoop.hbase.client.Scan;
|
|
||||||
import org.apache.hadoop.hbase.filter.ColumnPaginationFilter;
|
|
||||||
import org.apache.hadoop.hbase.filter.ColumnRangeFilter;
|
|
||||||
import org.apache.hadoop.hbase.filter.Filter;
|
|
||||||
import org.apache.hadoop.hbase.filter.FilterList;
|
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
|
||||||
import org.janusgraph.diskstorage.BackendException;
|
|
||||||
import org.janusgraph.diskstorage.Entry;
|
|
||||||
import org.janusgraph.diskstorage.EntryList;
|
|
||||||
import org.janusgraph.diskstorage.EntryMetaData;
|
|
||||||
import org.janusgraph.diskstorage.PermanentBackendException;
|
|
||||||
import org.janusgraph.diskstorage.StaticBuffer;
|
|
||||||
import org.janusgraph.diskstorage.TemporaryBackendException;
|
|
||||||
import org.janusgraph.diskstorage.keycolumnvalue.KCVMutation;
|
|
||||||
import org.janusgraph.diskstorage.keycolumnvalue.KCVSUtil;
|
|
||||||
import org.janusgraph.diskstorage.keycolumnvalue.KeyColumnValueStore;
|
|
||||||
import org.janusgraph.diskstorage.keycolumnvalue.KeyIterator;
|
|
||||||
import org.janusgraph.diskstorage.keycolumnvalue.KeyRangeQuery;
|
|
||||||
import org.janusgraph.diskstorage.keycolumnvalue.KeySliceQuery;
|
|
||||||
import org.janusgraph.diskstorage.keycolumnvalue.KeySlicesIterator;
|
|
||||||
import org.janusgraph.diskstorage.keycolumnvalue.SliceQuery;
|
|
||||||
import org.janusgraph.diskstorage.keycolumnvalue.MultiSlicesQuery;
|
|
||||||
import org.janusgraph.diskstorage.keycolumnvalue.StoreTransaction;
|
|
||||||
import org.janusgraph.diskstorage.util.RecordIterator;
|
|
||||||
import org.janusgraph.diskstorage.util.StaticArrayBuffer;
|
|
||||||
import org.janusgraph.diskstorage.util.StaticArrayEntry;
|
|
||||||
import org.janusgraph.diskstorage.util.StaticArrayEntryList;
|
|
||||||
import org.janusgraph.util.system.IOUtils;
|
|
||||||
import org.slf4j.Logger;
|
|
||||||
import org.slf4j.LoggerFactory;
|
|
||||||
|
|
||||||
import javax.annotation.Nullable;
|
|
||||||
import java.io.Closeable;
|
|
||||||
import java.io.IOException;
|
|
||||||
import java.io.InterruptedIOException;
|
|
||||||
import java.util.ArrayList;
|
|
||||||
import java.util.Arrays;
|
|
||||||
import java.util.HashMap;
|
|
||||||
import java.util.Iterator;
|
|
||||||
import java.util.List;
|
|
||||||
import java.util.Map;
|
|
||||||
import java.util.NavigableMap;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Here are some areas that might need work:
|
|
||||||
* <p/>
|
|
||||||
* - batching? (consider HTable#batch, HTable#setAutoFlush(false)
|
|
||||||
* - tuning HTable#setWriteBufferSize (?)
|
|
||||||
* - writing a server-side filter to replace ColumnCountGetFilter, which drops
|
|
||||||
* all columns on the row where it reaches its limit. This requires getSlice,
|
|
||||||
* currently, to impose its limit on the client side. That obviously won't
|
|
||||||
* scale.
|
|
||||||
* - RowMutations for combining Puts+Deletes (need a newer HBase than 0.92 for this)
|
|
||||||
* - (maybe) fiddle with HTable#setRegionCachePrefetch and/or #prewarmRegionCache
|
|
||||||
* <p/>
|
|
||||||
* There may be other problem areas. These are just the ones of which I'm aware.
|
|
||||||
*/
|
|
||||||
public class HBaseKeyColumnValueStore implements KeyColumnValueStore {
|
|
||||||
|
|
||||||
private static final Logger logger = LoggerFactory.getLogger(HBaseKeyColumnValueStore.class);
|
|
||||||
|
|
||||||
private final String tableName;
|
|
||||||
private final HBaseStoreManager storeManager;
|
|
||||||
|
|
||||||
// When using shortened CF names, columnFamily is the shortname and storeName is the longname
|
|
||||||
// When not using shortened CF names, they are the same
|
|
||||||
//private final String columnFamily;
|
|
||||||
private final String storeName;
|
|
||||||
// This is columnFamily.getBytes()
|
|
||||||
private final byte[] columnFamilyBytes;
|
|
||||||
private final HBaseGetter entryGetter;
|
|
||||||
|
|
||||||
private final ConnectionMask cnx;
|
|
||||||
|
|
||||||
HBaseKeyColumnValueStore(HBaseStoreManager storeManager, ConnectionMask cnx, String tableName, String columnFamily, String storeName) {
|
|
||||||
this.storeManager = storeManager;
|
|
||||||
this.cnx = cnx;
|
|
||||||
this.tableName = tableName;
|
|
||||||
//this.columnFamily = columnFamily;
|
|
||||||
this.storeName = storeName;
|
|
||||||
this.columnFamilyBytes = Bytes.toBytes(columnFamily);
|
|
||||||
this.entryGetter = new HBaseGetter(storeManager.getMetaDataSchema(storeName));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void close() throws BackendException {
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public EntryList getSlice(KeySliceQuery query, StoreTransaction txh) throws BackendException {
|
|
||||||
Map<StaticBuffer, EntryList> result = getHelper(Arrays.asList(query.getKey()), getFilter(query));
|
|
||||||
return Iterables.getOnlyElement(result.values(), EntryList.EMPTY_LIST);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public Map<StaticBuffer,EntryList> getSlice(List<StaticBuffer> keys, SliceQuery query, StoreTransaction txh) throws BackendException {
|
|
||||||
return getHelper(keys, getFilter(query));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void mutate(StaticBuffer key, List<Entry> additions, List<StaticBuffer> deletions, StoreTransaction txh) throws BackendException {
|
|
||||||
Map<StaticBuffer, KCVMutation> mutations = ImmutableMap.of(key, new KCVMutation(additions, deletions));
|
|
||||||
mutateMany(mutations, txh);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void acquireLock(StaticBuffer key,
|
|
||||||
StaticBuffer column,
|
|
||||||
StaticBuffer expectedValue,
|
|
||||||
StoreTransaction txh) throws BackendException {
|
|
||||||
throw new UnsupportedOperationException();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public KeyIterator getKeys(KeyRangeQuery query, StoreTransaction txh) throws BackendException {
|
|
||||||
return executeKeySliceQuery(query.getKeyStart().as(StaticBuffer.ARRAY_FACTORY),
|
|
||||||
query.getKeyEnd().as(StaticBuffer.ARRAY_FACTORY),
|
|
||||||
new FilterList(FilterList.Operator.MUST_PASS_ALL),
|
|
||||||
query);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public String getName() {
|
|
||||||
return storeName;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public KeyIterator getKeys(SliceQuery query, StoreTransaction txh) throws BackendException {
|
|
||||||
return executeKeySliceQuery(new FilterList(FilterList.Operator.MUST_PASS_ALL), query);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public KeySlicesIterator getKeys(MultiSlicesQuery queries, StoreTransaction txh) throws BackendException {
|
|
||||||
throw new UnsupportedOperationException();
|
|
||||||
}
|
|
||||||
|
|
||||||
public static Filter getFilter(SliceQuery query) {
|
|
||||||
byte[] colStartBytes = query.getSliceStart().length() > 0 ? query.getSliceStart().as(StaticBuffer.ARRAY_FACTORY) : null;
|
|
||||||
byte[] colEndBytes = query.getSliceEnd().length() > 0 ? query.getSliceEnd().as(StaticBuffer.ARRAY_FACTORY) : null;
|
|
||||||
|
|
||||||
Filter filter = new ColumnRangeFilter(colStartBytes, true, colEndBytes, false);
|
|
||||||
|
|
||||||
if (query.hasLimit()) {
|
|
||||||
filter = new FilterList(FilterList.Operator.MUST_PASS_ALL,
|
|
||||||
filter,
|
|
||||||
new ColumnPaginationFilter(query.getLimit(), 0));
|
|
||||||
}
|
|
||||||
|
|
||||||
logger.debug("Generated HBase Filter {}", filter);
|
|
||||||
|
|
||||||
return filter;
|
|
||||||
}
|
|
||||||
|
|
||||||
private Map<StaticBuffer,EntryList> getHelper(List<StaticBuffer> keys, Filter getFilter) throws BackendException {
|
|
||||||
List<Get> requests = new ArrayList<Get>(keys.size());
|
|
||||||
{
|
|
||||||
for (StaticBuffer key : keys) {
|
|
||||||
Get g = new Get(key.as(StaticBuffer.ARRAY_FACTORY)).addFamily(columnFamilyBytes).setFilter(getFilter);
|
|
||||||
try {
|
|
||||||
g.setTimeRange(0, Long.MAX_VALUE);
|
|
||||||
} catch (IOException e) {
|
|
||||||
throw new PermanentBackendException(e);
|
|
||||||
}
|
|
||||||
requests.add(g);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Map<StaticBuffer,EntryList> resultMap = new HashMap<StaticBuffer,EntryList>(keys.size());
|
|
||||||
|
|
||||||
try {
|
|
||||||
TableMask table = null;
|
|
||||||
Result[] results = null;
|
|
||||||
|
|
||||||
try {
|
|
||||||
table = cnx.getTable(tableName);
|
|
||||||
results = table.get(requests);
|
|
||||||
} finally {
|
|
||||||
IOUtils.closeQuietly(table);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (results == null)
|
|
||||||
return KCVSUtil.emptyResults(keys);
|
|
||||||
|
|
||||||
assert results.length==keys.size();
|
|
||||||
|
|
||||||
for (int i = 0; i < results.length; i++) {
|
|
||||||
Result result = results[i];
|
|
||||||
NavigableMap<byte[], NavigableMap<byte[], NavigableMap<Long, byte[]>>> f = result.getMap();
|
|
||||||
|
|
||||||
if (f == null) { // no result for this key
|
|
||||||
resultMap.put(keys.get(i), EntryList.EMPTY_LIST);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
// actual key with <timestamp, value>
|
|
||||||
NavigableMap<byte[], NavigableMap<Long, byte[]>> r = f.get(columnFamilyBytes);
|
|
||||||
resultMap.put(keys.get(i), (r == null)
|
|
||||||
? EntryList.EMPTY_LIST
|
|
||||||
: StaticArrayEntryList.ofBytes(r.entrySet(), entryGetter));
|
|
||||||
}
|
|
||||||
|
|
||||||
return resultMap;
|
|
||||||
} catch (InterruptedIOException e) {
|
|
||||||
// added to support traversal interruption
|
|
||||||
Thread.currentThread().interrupt();
|
|
||||||
throw new PermanentBackendException(e);
|
|
||||||
} catch (IOException e) {
|
|
||||||
throw new TemporaryBackendException(e);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
private void mutateMany(Map<StaticBuffer, KCVMutation> mutations, StoreTransaction txh) throws BackendException {
|
|
||||||
storeManager.mutateMany(ImmutableMap.of(storeName, mutations), txh);
|
|
||||||
}
|
|
||||||
|
|
||||||
private KeyIterator executeKeySliceQuery(FilterList filters, @Nullable SliceQuery columnSlice) throws BackendException {
|
|
||||||
return executeKeySliceQuery(null, null, filters, columnSlice);
|
|
||||||
}
|
|
||||||
|
|
||||||
private KeyIterator executeKeySliceQuery(@Nullable byte[] startKey,
|
|
||||||
@Nullable byte[] endKey,
|
|
||||||
FilterList filters,
|
|
||||||
@Nullable SliceQuery columnSlice) throws BackendException {
|
|
||||||
Scan scan = new Scan().addFamily(columnFamilyBytes);
|
|
||||||
|
|
||||||
try {
|
|
||||||
scan.setTimeRange(0, Long.MAX_VALUE);
|
|
||||||
} catch (IOException e) {
|
|
||||||
throw new PermanentBackendException(e);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (startKey != null)
|
|
||||||
scan.withStartRow(startKey);
|
|
||||||
|
|
||||||
if (endKey != null)
|
|
||||||
scan.withStopRow(endKey);
|
|
||||||
|
|
||||||
if (columnSlice != null) {
|
|
||||||
filters.addFilter(getFilter(columnSlice));
|
|
||||||
}
|
|
||||||
|
|
||||||
TableMask table = null;
|
|
||||||
|
|
||||||
try {
|
|
||||||
table = cnx.getTable(tableName);
|
|
||||||
return new RowIterator(table, table.getScanner(scan.setFilter(filters)), columnFamilyBytes);
|
|
||||||
} catch (IOException e) {
|
|
||||||
IOUtils.closeQuietly(table);
|
|
||||||
throw new PermanentBackendException(e);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
private class RowIterator implements KeyIterator {
|
|
||||||
private final Closeable table;
|
|
||||||
private final Iterator<Result> rows;
|
|
||||||
private final byte[] columnFamilyBytes;
|
|
||||||
|
|
||||||
private Result currentRow;
|
|
||||||
private boolean isClosed;
|
|
||||||
|
|
||||||
public RowIterator(Closeable table, ResultScanner rows, byte[] columnFamilyBytes) {
|
|
||||||
this.table = table;
|
|
||||||
this.columnFamilyBytes = Arrays.copyOf(columnFamilyBytes, columnFamilyBytes.length);
|
|
||||||
this.rows = Iterators.filter(rows.iterator(), result -> null != result && null != result.getRow());
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public RecordIterator<Entry> getEntries() {
|
|
||||||
ensureOpen();
|
|
||||||
|
|
||||||
return new RecordIterator<Entry>() {
|
|
||||||
private final Iterator<Map.Entry<byte[], NavigableMap<Long, byte[]>>> kv;
|
|
||||||
{
|
|
||||||
final Map<byte[], NavigableMap<byte[], NavigableMap<Long, byte[]>>> map = currentRow.getMap();
|
|
||||||
Preconditions.checkNotNull(map);
|
|
||||||
kv = map.get(columnFamilyBytes).entrySet().iterator();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public boolean hasNext() {
|
|
||||||
ensureOpen();
|
|
||||||
return kv.hasNext();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public Entry next() {
|
|
||||||
ensureOpen();
|
|
||||||
return StaticArrayEntry.ofBytes(kv.next(), entryGetter);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void close() {
|
|
||||||
isClosed = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void remove() {
|
|
||||||
throw new UnsupportedOperationException();
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public boolean hasNext() {
|
|
||||||
ensureOpen();
|
|
||||||
return rows.hasNext();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public StaticBuffer next() {
|
|
||||||
ensureOpen();
|
|
||||||
|
|
||||||
currentRow = rows.next();
|
|
||||||
return StaticArrayBuffer.of(currentRow.getRow());
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void close() {
|
|
||||||
IOUtils.closeQuietly(table);
|
|
||||||
isClosed = true;
|
|
||||||
logger.debug("RowIterator closed table {}", table);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void remove() {
|
|
||||||
throw new UnsupportedOperationException();
|
|
||||||
}
|
|
||||||
|
|
||||||
private void ensureOpen() {
|
|
||||||
if (isClosed)
|
|
||||||
throw new IllegalStateException("Iterator has been closed.");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
private static class HBaseGetter implements StaticArrayEntry.GetColVal<Map.Entry<byte[], NavigableMap<Long, byte[]>>, byte[]> {
|
|
||||||
|
|
||||||
private final EntryMetaData[] schema;
|
|
||||||
|
|
||||||
private HBaseGetter(EntryMetaData[] schema) {
|
|
||||||
this.schema = schema;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public byte[] getColumn(Map.Entry<byte[], NavigableMap<Long, byte[]>> element) {
|
|
||||||
return element.getKey();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public byte[] getValue(Map.Entry<byte[], NavigableMap<Long, byte[]>> element) {
|
|
||||||
return element.getValue().lastEntry().getValue();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public EntryMetaData[] getMetaSchema(Map.Entry<byte[], NavigableMap<Long, byte[]>> element) {
|
|
||||||
return schema;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public Object getMetaData(Map.Entry<byte[], NavigableMap<Long, byte[]>> element, EntryMetaData meta) {
|
|
||||||
switch(meta) {
|
|
||||||
case TIMESTAMP:
|
|
||||||
return element.getValue().lastEntry().getKey();
|
|
||||||
default:
|
|
||||||
throw new UnsupportedOperationException("Unsupported meta data: " + meta);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,988 +0,0 @@
|
||||||
// Copyright 2017 JanusGraph Authors
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package org.janusgraph.diskstorage.hbase2;
|
|
||||||
|
|
||||||
import com.google.common.annotations.VisibleForTesting;
|
|
||||||
import com.google.common.base.Joiner;
|
|
||||||
import com.google.common.base.Preconditions;
|
|
||||||
import com.google.common.collect.BiMap;
|
|
||||||
import com.google.common.collect.ImmutableBiMap;
|
|
||||||
import com.google.common.collect.ImmutableMap;
|
|
||||||
import com.google.common.collect.Sets;
|
|
||||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
|
||||||
import org.apache.hadoop.hbase.HRegionInfo;
|
|
||||||
import org.apache.hadoop.hbase.HRegionLocation;
|
|
||||||
import org.apache.hadoop.hbase.MasterNotRunningException;
|
|
||||||
import org.apache.hadoop.hbase.ServerName;
|
|
||||||
import org.apache.hadoop.hbase.TableNotEnabledException;
|
|
||||||
import org.apache.hadoop.hbase.TableNotFoundException;
|
|
||||||
import org.apache.hadoop.hbase.ZooKeeperConnectionException;
|
|
||||||
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
|
|
||||||
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
|
|
||||||
import org.apache.hadoop.hbase.client.Delete;
|
|
||||||
import org.apache.hadoop.hbase.client.Mutation;
|
|
||||||
import org.apache.hadoop.hbase.client.Put;
|
|
||||||
import org.apache.hadoop.hbase.client.Row;
|
|
||||||
import org.apache.hadoop.hbase.client.TableDescriptor;
|
|
||||||
import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
|
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
|
||||||
import org.apache.hadoop.hbase.util.Pair;
|
|
||||||
import org.apache.hadoop.hbase.util.VersionInfo;
|
|
||||||
import org.janusgraph.core.JanusGraphException;
|
|
||||||
import org.janusgraph.diskstorage.BackendException;
|
|
||||||
import org.janusgraph.diskstorage.BaseTransactionConfig;
|
|
||||||
import org.janusgraph.diskstorage.Entry;
|
|
||||||
import org.janusgraph.diskstorage.EntryMetaData;
|
|
||||||
import org.janusgraph.diskstorage.PermanentBackendException;
|
|
||||||
import org.janusgraph.diskstorage.StaticBuffer;
|
|
||||||
import org.janusgraph.diskstorage.StoreMetaData;
|
|
||||||
import org.janusgraph.diskstorage.TemporaryBackendException;
|
|
||||||
import org.janusgraph.diskstorage.common.DistributedStoreManager;
|
|
||||||
import org.janusgraph.diskstorage.configuration.ConfigElement;
|
|
||||||
import org.janusgraph.diskstorage.configuration.ConfigNamespace;
|
|
||||||
import org.janusgraph.diskstorage.configuration.ConfigOption;
|
|
||||||
import org.janusgraph.diskstorage.configuration.Configuration;
|
|
||||||
import org.janusgraph.diskstorage.keycolumnvalue.KCVMutation;
|
|
||||||
import org.janusgraph.diskstorage.keycolumnvalue.KeyColumnValueStore;
|
|
||||||
import org.janusgraph.diskstorage.keycolumnvalue.KeyColumnValueStoreManager;
|
|
||||||
import org.janusgraph.diskstorage.keycolumnvalue.KeyRange;
|
|
||||||
import org.janusgraph.diskstorage.keycolumnvalue.StandardStoreFeatures;
|
|
||||||
import org.janusgraph.diskstorage.keycolumnvalue.StoreFeatures;
|
|
||||||
import org.janusgraph.diskstorage.keycolumnvalue.StoreTransaction;
|
|
||||||
import org.janusgraph.diskstorage.util.BufferUtil;
|
|
||||||
import org.janusgraph.diskstorage.util.StaticArrayBuffer;
|
|
||||||
import org.janusgraph.diskstorage.util.time.TimestampProviders;
|
|
||||||
import org.janusgraph.graphdb.configuration.GraphDatabaseConfiguration;
|
|
||||||
import org.janusgraph.graphdb.configuration.PreInitializeConfigOptions;
|
|
||||||
import org.janusgraph.util.system.IOUtils;
|
|
||||||
import org.janusgraph.util.system.NetworkUtil;
|
|
||||||
import org.slf4j.Logger;
|
|
||||||
import org.slf4j.LoggerFactory;
|
|
||||||
|
|
||||||
import java.io.IOException;
|
|
||||||
import java.nio.ByteBuffer;
|
|
||||||
import java.util.ArrayList;
|
|
||||||
import java.util.Collection;
|
|
||||||
import java.util.HashMap;
|
|
||||||
import java.util.LinkedList;
|
|
||||||
import java.util.List;
|
|
||||||
import java.util.Map;
|
|
||||||
import java.util.concurrent.ConcurrentHashMap;
|
|
||||||
import java.util.concurrent.ConcurrentMap;
|
|
||||||
|
|
||||||
import static org.janusgraph.diskstorage.Backend.EDGESTORE_NAME;
|
|
||||||
import static org.janusgraph.diskstorage.Backend.INDEXSTORE_NAME;
|
|
||||||
import static org.janusgraph.diskstorage.Backend.LOCK_STORE_SUFFIX;
|
|
||||||
import static org.janusgraph.diskstorage.Backend.SYSTEM_MGMT_LOG_NAME;
|
|
||||||
import static org.janusgraph.diskstorage.Backend.SYSTEM_TX_LOG_NAME;
|
|
||||||
import static org.janusgraph.graphdb.configuration.GraphDatabaseConfiguration.DROP_ON_CLEAR;
|
|
||||||
import static org.janusgraph.graphdb.configuration.GraphDatabaseConfiguration.GRAPH_NAME;
|
|
||||||
import static org.janusgraph.graphdb.configuration.GraphDatabaseConfiguration.IDS_STORE_NAME;
|
|
||||||
import static org.janusgraph.graphdb.configuration.GraphDatabaseConfiguration.SYSTEM_PROPERTIES_STORE_NAME;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Storage Manager for HBase
|
|
||||||
*/
|
|
||||||
@PreInitializeConfigOptions
|
|
||||||
public class HBaseStoreManager extends DistributedStoreManager implements KeyColumnValueStoreManager {
|
|
||||||
|
|
||||||
private static final Logger logger = LoggerFactory.getLogger(HBaseStoreManager.class);
|
|
||||||
|
|
||||||
public static final ConfigNamespace HBASE_NS =
|
|
||||||
new ConfigNamespace(GraphDatabaseConfiguration.STORAGE_NS, "hbase", "HBase storage options");
|
|
||||||
|
|
||||||
public static final ConfigOption<Boolean> SHORT_CF_NAMES =
|
|
||||||
new ConfigOption<>(HBASE_NS, "short-cf-names",
|
|
||||||
"Whether to shorten the names of JanusGraph's column families to one-character mnemonics " +
|
|
||||||
"to conserve storage space", ConfigOption.Type.FIXED, true);
|
|
||||||
|
|
||||||
public static final String COMPRESSION_DEFAULT = "-DEFAULT-";
|
|
||||||
|
|
||||||
public static final ConfigOption<String> COMPRESSION =
|
|
||||||
new ConfigOption<>(HBASE_NS, "compression-algorithm",
|
|
||||||
"An HBase Compression.Algorithm enum string which will be applied to newly created column families. " +
|
|
||||||
"The compression algorithm must be installed and available on the HBase cluster. JanusGraph cannot install " +
|
|
||||||
"and configure new compression algorithms on the HBase cluster by itself.",
|
|
||||||
ConfigOption.Type.MASKABLE, "SNAPPY");
|
|
||||||
|
|
||||||
public static final ConfigOption<Boolean> SKIP_SCHEMA_CHECK =
|
|
||||||
new ConfigOption<>(HBASE_NS, "skip-schema-check",
|
|
||||||
"Assume that JanusGraph's HBase table and column families already exist. " +
|
|
||||||
"When this is true, JanusGraph will not check for the existence of its table/CFs, " +
|
|
||||||
"nor will it attempt to create them under any circumstances. This is useful " +
|
|
||||||
"when running JanusGraph without HBase admin privileges.",
|
|
||||||
ConfigOption.Type.MASKABLE, false);
|
|
||||||
|
|
||||||
public static final ConfigOption<String> HBASE_TABLE =
|
|
||||||
new ConfigOption<>(HBASE_NS, "table",
|
|
||||||
"The name of the table JanusGraph will use. When " + ConfigElement.getPath(SKIP_SCHEMA_CHECK) +
|
|
||||||
" is false, JanusGraph will automatically create this table if it does not already exist." +
|
|
||||||
" If this configuration option is not provided but graph.graphname is, the table will be set" +
|
|
||||||
" to that value.",
|
|
||||||
ConfigOption.Type.LOCAL, "janusgraph");
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Related bug fixed in 0.98.0, 0.94.7, 0.95.0:
|
|
||||||
*
|
|
||||||
* https://issues.apache.org/jira/browse/HBASE-8170
|
|
||||||
*/
|
|
||||||
public static final int MIN_REGION_COUNT = 3;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* The total number of HBase regions to create with JanusGraph's table. This
|
|
||||||
* setting only effects table creation; this normally happens just once when
|
|
||||||
* JanusGraph connects to an HBase backend for the first time.
|
|
||||||
*/
|
|
||||||
public static final ConfigOption<Integer> REGION_COUNT =
|
|
||||||
new ConfigOption<Integer>(HBASE_NS, "region-count",
|
|
||||||
"The number of initial regions set when creating JanusGraph's HBase table",
|
|
||||||
ConfigOption.Type.MASKABLE, Integer.class, input -> null != input && MIN_REGION_COUNT <= input);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* This setting is used only when {@link #REGION_COUNT} is unset.
|
|
||||||
* <p/>
|
|
||||||
* If JanusGraph's HBase table does not exist, then it will be created with total
|
|
||||||
* region count = (number of servers reported by ClusterStatus) * (this
|
|
||||||
* value).
|
|
||||||
* <p/>
|
|
||||||
* The Apache HBase manual suggests an order-of-magnitude range of potential
|
|
||||||
* values for this setting:
|
|
||||||
*
|
|
||||||
* <ul>
|
|
||||||
* <li>
|
|
||||||
* <a href="https://hbase.apache.org/book/important_configurations.html#disable.splitting">2.5.2.7. Managed Splitting</a>:
|
|
||||||
* <blockquote>
|
|
||||||
* What's the optimal number of pre-split regions to create? Mileage will
|
|
||||||
* vary depending upon your application. You could start low with 10
|
|
||||||
* pre-split regions / server and watch as data grows over time. It's
|
|
||||||
* better to err on the side of too little regions and rolling split later.
|
|
||||||
* </blockquote>
|
|
||||||
* </li>
|
|
||||||
* <li>
|
|
||||||
* <a href="https://hbase.apache.org/book/regions.arch.html">9.7 Regions</a>:
|
|
||||||
* <blockquote>
|
|
||||||
* In general, HBase is designed to run with a small (20-200) number of
|
|
||||||
* relatively large (5-20Gb) regions per server... Typically you want to
|
|
||||||
* keep your region count low on HBase for numerous reasons. Usually
|
|
||||||
* right around 100 regions per RegionServer has yielded the best results.
|
|
||||||
* </blockquote>
|
|
||||||
* </li>
|
|
||||||
* </ul>
|
|
||||||
*
|
|
||||||
* These considerations may differ for other HBase implementations (e.g. MapR).
|
|
||||||
*/
|
|
||||||
public static final ConfigOption<Integer> REGIONS_PER_SERVER =
|
|
||||||
new ConfigOption<>(HBASE_NS, "regions-per-server",
|
|
||||||
"The number of regions per regionserver to set when creating JanusGraph's HBase table",
|
|
||||||
ConfigOption.Type.MASKABLE, Integer.class);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* If this key is present in either the JVM system properties or the process
|
|
||||||
* environment (checked in the listed order, first hit wins), then its value
|
|
||||||
* must be the full package and class name of an implementation of
|
|
||||||
* {@link HBaseCompat} that has a no-arg public constructor.
|
|
||||||
* <p>
|
|
||||||
* When this <b>is not</b> set, JanusGraph attempts to automatically detect the
|
|
||||||
* HBase runtime version by calling {@link VersionInfo#getVersion()}. JanusGraph
|
|
||||||
* then checks the returned version string against a hard-coded list of
|
|
||||||
* supported version prefixes and instantiates the associated compat layer
|
|
||||||
* if a match is found.
|
|
||||||
* <p>
|
|
||||||
* When this <b>is</b> set, JanusGraph will not call
|
|
||||||
* {@code VersionInfo.getVersion()} or read its hard-coded list of supported
|
|
||||||
* version prefixes. JanusGraph will instead attempt to instantiate the class
|
|
||||||
* specified (via the no-arg constructor which must exist) and then attempt
|
|
||||||
* to cast it to HBaseCompat and use it as such. JanusGraph will assume the
|
|
||||||
* supplied implementation is compatible with the runtime HBase version and
|
|
||||||
* make no attempt to verify that assumption.
|
|
||||||
* <p>
|
|
||||||
* Setting this key incorrectly could cause runtime exceptions at best or
|
|
||||||
* silent data corruption at worst. This setting is intended for users
|
|
||||||
* running exotic HBase implementations that don't support VersionInfo or
|
|
||||||
* implementations which return values from {@code VersionInfo.getVersion()}
|
|
||||||
* that are inconsistent with Apache's versioning convention. It may also be
|
|
||||||
* useful to users who want to run against a new release of HBase that JanusGraph
|
|
||||||
* doesn't yet officially support.
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
public static final ConfigOption<String> COMPAT_CLASS =
|
|
||||||
new ConfigOption<>(HBASE_NS, "compat-class",
|
|
||||||
"The package and class name of the HBaseCompat implementation. HBaseCompat masks version-specific HBase API differences. " +
|
|
||||||
"When this option is unset, JanusGraph calls HBase's VersionInfo.getVersion() and loads the matching compat class " +
|
|
||||||
"at runtime. Setting this option forces JanusGraph to instead reflectively load and instantiate the specified class.",
|
|
||||||
ConfigOption.Type.MASKABLE, String.class);
|
|
||||||
|
|
||||||
public static final int PORT_DEFAULT = 9160;
|
|
||||||
|
|
||||||
public static final TimestampProviders PREFERRED_TIMESTAMPS = TimestampProviders.MICRO;
|
|
||||||
|
|
||||||
public static final ConfigNamespace HBASE_CONFIGURATION_NAMESPACE =
|
|
||||||
new ConfigNamespace(HBASE_NS, "ext", "Overrides for hbase-{site,default}.xml options", true);
|
|
||||||
|
|
||||||
private static final StaticBuffer FOUR_ZERO_BYTES = BufferUtil.zeroBuffer(4);
|
|
||||||
|
|
||||||
// Immutable instance fields
|
|
||||||
private final BiMap<String, String> shortCfNameMap;
|
|
||||||
private final String tableName;
|
|
||||||
private final String compression;
|
|
||||||
private final int regionCount;
|
|
||||||
private final int regionsPerServer;
|
|
||||||
private final ConnectionMask cnx;
|
|
||||||
private final org.apache.hadoop.conf.Configuration hconf;
|
|
||||||
private final boolean shortCfNames;
|
|
||||||
private final boolean skipSchemaCheck;
|
|
||||||
private final String compatClass;
|
|
||||||
private final HBaseCompat compat;
|
|
||||||
// Cached return value of getDeployment() as requesting it can be expensive.
|
|
||||||
private Deployment deployment = null;
|
|
||||||
|
|
||||||
private static final ConcurrentHashMap<HBaseStoreManager, Throwable> openManagers = new ConcurrentHashMap<>();
|
|
||||||
|
|
||||||
// Mutable instance state
|
|
||||||
private final ConcurrentMap<String, HBaseKeyColumnValueStore> openStores;
|
|
||||||
|
|
||||||
public HBaseStoreManager(org.janusgraph.diskstorage.configuration.Configuration config) throws BackendException {
|
|
||||||
super(config, PORT_DEFAULT);
|
|
||||||
|
|
||||||
shortCfNameMap = createShortCfMap(config);
|
|
||||||
|
|
||||||
Preconditions.checkArgument(null != shortCfNameMap);
|
|
||||||
Collection<String> shorts = shortCfNameMap.values();
|
|
||||||
Preconditions.checkArgument(Sets.newHashSet(shorts).size() == shorts.size());
|
|
||||||
|
|
||||||
checkConfigDeprecation(config);
|
|
||||||
|
|
||||||
this.tableName = determineTableName(config);
|
|
||||||
this.compression = config.get(COMPRESSION);
|
|
||||||
this.regionCount = config.has(REGION_COUNT) ? config.get(REGION_COUNT) : -1;
|
|
||||||
this.regionsPerServer = config.has(REGIONS_PER_SERVER) ? config.get(REGIONS_PER_SERVER) : -1;
|
|
||||||
this.skipSchemaCheck = config.get(SKIP_SCHEMA_CHECK);
|
|
||||||
this.compatClass = config.has(COMPAT_CLASS) ? config.get(COMPAT_CLASS) : null;
|
|
||||||
this.compat = HBaseCompatLoader.getCompat(compatClass);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Specifying both region count options is permitted but may be
|
|
||||||
* indicative of a misunderstanding, so issue a warning.
|
|
||||||
*/
|
|
||||||
if (config.has(REGIONS_PER_SERVER) && config.has(REGION_COUNT)) {
|
|
||||||
logger.warn("Both {} and {} are set in JanusGraph's configuration, but "
|
|
||||||
+ "the former takes precedence and the latter will be ignored.",
|
|
||||||
REGION_COUNT, REGIONS_PER_SERVER);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* This static factory calls HBaseConfiguration.addHbaseResources(),
|
|
||||||
* which in turn applies the contents of hbase-default.xml and then
|
|
||||||
* applies the contents of hbase-site.xml.
|
|
||||||
*/
|
|
||||||
this.hconf = HBaseConfiguration.create();
|
|
||||||
|
|
||||||
// Copy a subset of our commons config into a Hadoop config
|
|
||||||
int keysLoaded=0;
|
|
||||||
Map<String,Object> configSub = config.getSubset(HBASE_CONFIGURATION_NAMESPACE);
|
|
||||||
for (Map.Entry<String,Object> entry : configSub.entrySet()) {
|
|
||||||
logger.info("HBase configuration: setting {}={}", entry.getKey(), entry.getValue());
|
|
||||||
if (entry.getValue()==null) continue;
|
|
||||||
hconf.set(entry.getKey(), entry.getValue().toString());
|
|
||||||
keysLoaded++;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Special case for STORAGE_HOSTS
|
|
||||||
if (config.has(GraphDatabaseConfiguration.STORAGE_HOSTS)) {
|
|
||||||
String zkQuorumKey = "hbase.zookeeper.quorum";
|
|
||||||
String csHostList = Joiner.on(",").join(config.get(GraphDatabaseConfiguration.STORAGE_HOSTS));
|
|
||||||
hconf.set(zkQuorumKey, csHostList);
|
|
||||||
logger.info("Copied host list from {} to {}: {}", GraphDatabaseConfiguration.STORAGE_HOSTS, zkQuorumKey, csHostList);
|
|
||||||
}
|
|
||||||
|
|
||||||
logger.debug("HBase configuration: set a total of {} configuration values", keysLoaded);
|
|
||||||
|
|
||||||
this.shortCfNames = config.get(SHORT_CF_NAMES);
|
|
||||||
|
|
||||||
try {
|
|
||||||
//this.cnx = HConnectionManager.createConnection(hconf);
|
|
||||||
this.cnx = compat.createConnection(hconf);
|
|
||||||
} catch (IOException e) {
|
|
||||||
throw new PermanentBackendException(e);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (logger.isTraceEnabled()) {
|
|
||||||
openManagers.put(this, new Throwable("Manager Opened"));
|
|
||||||
dumpOpenManagers();
|
|
||||||
}
|
|
||||||
|
|
||||||
logger.debug("Dumping HBase config key=value pairs");
|
|
||||||
for (Map.Entry<String, String> entry : hconf) {
|
|
||||||
logger.debug("[HBaseConfig] " + entry.getKey() + "=" + entry.getValue());
|
|
||||||
}
|
|
||||||
logger.debug("End of HBase config key=value pairs");
|
|
||||||
|
|
||||||
openStores = new ConcurrentHashMap<>();
|
|
||||||
}
|
|
||||||
|
|
||||||
public static BiMap<String, String> createShortCfMap(Configuration config) {
|
|
||||||
return ImmutableBiMap.<String, String>builder()
|
|
||||||
.put(INDEXSTORE_NAME, "g")
|
|
||||||
.put(INDEXSTORE_NAME + LOCK_STORE_SUFFIX, "h")
|
|
||||||
.put(config.get(IDS_STORE_NAME), "i")
|
|
||||||
.put(EDGESTORE_NAME, "e")
|
|
||||||
.put(EDGESTORE_NAME + LOCK_STORE_SUFFIX, "f")
|
|
||||||
.put(SYSTEM_PROPERTIES_STORE_NAME, "s")
|
|
||||||
.put(SYSTEM_PROPERTIES_STORE_NAME + LOCK_STORE_SUFFIX, "t")
|
|
||||||
.put(SYSTEM_MGMT_LOG_NAME, "m")
|
|
||||||
.put(SYSTEM_TX_LOG_NAME, "l")
|
|
||||||
.build();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public Deployment getDeployment() {
|
|
||||||
if (null != deployment) {
|
|
||||||
return deployment;
|
|
||||||
}
|
|
||||||
|
|
||||||
List<KeyRange> local;
|
|
||||||
try {
|
|
||||||
local = getLocalKeyPartition();
|
|
||||||
deployment = null != local && !local.isEmpty() ? Deployment.LOCAL : Deployment.REMOTE;
|
|
||||||
} catch (BackendException e) {
|
|
||||||
throw new RuntimeException(e);
|
|
||||||
}
|
|
||||||
return deployment;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public String toString() {
|
|
||||||
return "hbase[" + tableName + "@" + super.toString() + "]";
|
|
||||||
}
|
|
||||||
|
|
||||||
public void dumpOpenManagers() {
|
|
||||||
int estimatedSize = openManagers.size();
|
|
||||||
logger.trace("---- Begin open HBase store manager list ({} managers) ----", estimatedSize);
|
|
||||||
for (HBaseStoreManager m : openManagers.keySet()) {
|
|
||||||
logger.trace("Manager {} opened at:", m, openManagers.get(m));
|
|
||||||
}
|
|
||||||
logger.trace("---- End open HBase store manager list ({} managers) ----", estimatedSize);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void close() {
|
|
||||||
openStores.clear();
|
|
||||||
if (logger.isTraceEnabled())
|
|
||||||
openManagers.remove(this);
|
|
||||||
IOUtils.closeQuietly(cnx);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public StoreFeatures getFeatures() {
|
|
||||||
|
|
||||||
Configuration c = GraphDatabaseConfiguration.buildGraphConfiguration();
|
|
||||||
|
|
||||||
StandardStoreFeatures.Builder fb = new StandardStoreFeatures.Builder()
|
|
||||||
.orderedScan(true).unorderedScan(true).batchMutation(true)
|
|
||||||
.multiQuery(true).distributed(true).keyOrdered(true).storeTTL(true)
|
|
||||||
.cellTTL(true).timestamps(true).preferredTimestamps(PREFERRED_TIMESTAMPS)
|
|
||||||
.optimisticLocking(true).keyConsistent(c);
|
|
||||||
|
|
||||||
try {
|
|
||||||
fb.localKeyPartition(getDeployment() == Deployment.LOCAL);
|
|
||||||
} catch (Exception e) {
|
|
||||||
logger.warn("Unexpected exception during getDeployment()", e);
|
|
||||||
}
|
|
||||||
|
|
||||||
return fb.build();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void mutateMany(Map<String, Map<StaticBuffer, KCVMutation>> mutations, StoreTransaction txh) throws BackendException {
|
|
||||||
final MaskedTimestamp commitTime = new MaskedTimestamp(txh);
|
|
||||||
// In case of an addition and deletion with identical timestamps, the
|
|
||||||
// deletion tombstone wins.
|
|
||||||
// http://hbase.apache.org/book/versions.html#d244e4250
|
|
||||||
final Map<StaticBuffer, Pair<List<Put>, Delete>> commandsPerKey =
|
|
||||||
convertToCommands(
|
|
||||||
mutations,
|
|
||||||
commitTime.getAdditionTime(times),
|
|
||||||
commitTime.getDeletionTime(times));
|
|
||||||
|
|
||||||
final List<Row> batch = new ArrayList<>(commandsPerKey.size()); // actual batch operation
|
|
||||||
|
|
||||||
// convert sorted commands into representation required for 'batch' operation
|
|
||||||
for (Pair<List<Put>, Delete> commands : commandsPerKey.values()) {
|
|
||||||
if (commands.getFirst() != null && !commands.getFirst().isEmpty())
|
|
||||||
batch.addAll(commands.getFirst());
|
|
||||||
|
|
||||||
if (commands.getSecond() != null)
|
|
||||||
batch.add(commands.getSecond());
|
|
||||||
}
|
|
||||||
|
|
||||||
try {
|
|
||||||
TableMask table = null;
|
|
||||||
|
|
||||||
try {
|
|
||||||
table = cnx.getTable(tableName);
|
|
||||||
table.batch(batch, new Object[batch.size()]);
|
|
||||||
} finally {
|
|
||||||
IOUtils.closeQuietly(table);
|
|
||||||
}
|
|
||||||
} catch (IOException e) {
|
|
||||||
throw new TemporaryBackendException(e);
|
|
||||||
} catch (InterruptedException e) {
|
|
||||||
throw new TemporaryBackendException(e);
|
|
||||||
}
|
|
||||||
|
|
||||||
this.sleepAfterWrite(commitTime);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public KeyColumnValueStore openDatabase(String longName, StoreMetaData.Container metaData) throws BackendException {
|
|
||||||
// HBase does not support retrieving cell-level TTL by the client.
|
|
||||||
Preconditions.checkArgument(!storageConfig.has(GraphDatabaseConfiguration.STORE_META_TTL, longName)
|
|
||||||
|| !storageConfig.get(GraphDatabaseConfiguration.STORE_META_TTL, longName));
|
|
||||||
|
|
||||||
HBaseKeyColumnValueStore store = openStores.get(longName);
|
|
||||||
|
|
||||||
if (store == null) {
|
|
||||||
final String cfName = getCfNameForStoreName(longName);
|
|
||||||
|
|
||||||
HBaseKeyColumnValueStore newStore = new HBaseKeyColumnValueStore(this, cnx, tableName, cfName, longName);
|
|
||||||
|
|
||||||
store = openStores.putIfAbsent(longName, newStore); // nothing bad happens if we loose to other thread
|
|
||||||
|
|
||||||
if (store == null) {
|
|
||||||
if (!skipSchemaCheck) {
|
|
||||||
int cfTTLInSeconds = -1;
|
|
||||||
if (metaData.contains(StoreMetaData.TTL)) {
|
|
||||||
cfTTLInSeconds = metaData.get(StoreMetaData.TTL);
|
|
||||||
}
|
|
||||||
ensureColumnFamilyExists(tableName, cfName, cfTTLInSeconds);
|
|
||||||
}
|
|
||||||
|
|
||||||
store = newStore;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return store;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public StoreTransaction beginTransaction(final BaseTransactionConfig config) throws BackendException {
|
|
||||||
return new HBaseTransaction(config);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public String getName() {
|
|
||||||
return tableName;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Deletes the specified table with all its columns.
|
|
||||||
* ATTENTION: Invoking this method will delete the table if it exists and therefore causes data loss.
|
|
||||||
*/
|
|
||||||
@Override
|
|
||||||
public void clearStorage() throws BackendException {
|
|
||||||
try (AdminMask adm = getAdminInterface()) {
|
|
||||||
if (this.storageConfig.get(DROP_ON_CLEAR)) {
|
|
||||||
adm.dropTable(tableName);
|
|
||||||
} else {
|
|
||||||
adm.clearTable(tableName, times.getTime(times.getTime()));
|
|
||||||
}
|
|
||||||
} catch (IOException e)
|
|
||||||
{
|
|
||||||
throw new TemporaryBackendException(e);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public boolean exists() throws BackendException {
|
|
||||||
try (final AdminMask adm = getAdminInterface()) {
|
|
||||||
return adm.tableExists(tableName);
|
|
||||||
} catch (IOException e) {
|
|
||||||
throw new TemporaryBackendException(e);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public List<KeyRange> getLocalKeyPartition() throws BackendException {
|
|
||||||
List<KeyRange> result = new LinkedList<>();
|
|
||||||
try {
|
|
||||||
ensureTableExists(
|
|
||||||
tableName, getCfNameForStoreName(GraphDatabaseConfiguration.SYSTEM_PROPERTIES_STORE_NAME), 0);
|
|
||||||
Map<KeyRange, ServerName> normed = normalizeKeyBounds(cnx.getRegionLocations(tableName));
|
|
||||||
|
|
||||||
for (Map.Entry<KeyRange, ServerName> e : normed.entrySet()) {
|
|
||||||
if (NetworkUtil.isLocalConnection(e.getValue().getHostname())) {
|
|
||||||
result.add(e.getKey());
|
|
||||||
logger.debug("Found local key/row partition {} on host {}", e.getKey(), e.getValue());
|
|
||||||
} else {
|
|
||||||
logger.debug("Discarding remote {}", e.getValue());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} catch (MasterNotRunningException e) {
|
|
||||||
logger.warn("Unexpected MasterNotRunningException", e);
|
|
||||||
} catch (ZooKeeperConnectionException e) {
|
|
||||||
logger.warn("Unexpected ZooKeeperConnectionException", e);
|
|
||||||
} catch (IOException e) {
|
|
||||||
logger.warn("Unexpected IOException", e);
|
|
||||||
}
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* each key from an {@link HRegionInfo} to a {@link KeyRange} expressing the
|
|
||||||
* region's start and end key bounds using JanusGraph-partitioning-friendly
|
|
||||||
* conventions (start inclusive, end exclusive, zero bytes appended where
|
|
||||||
* necessary to make all keys at least 4 bytes long).
|
|
||||||
* <p/>
|
|
||||||
* This method iterates over the entries in its map parameter and performs
|
|
||||||
* the following conditional conversions on its keys. "Require" below means
|
|
||||||
* either a {@link Preconditions} invocation or an assertion. HRegionInfo
|
|
||||||
* sometimes returns start and end keys of zero length; this method replaces
|
|
||||||
* zero length keys with null before doing any of the checks described
|
|
||||||
* below. The parameter map and the values it contains are only read and
|
|
||||||
* never modified.
|
|
||||||
*
|
|
||||||
* <ul>
|
|
||||||
* <li>If an entry's HRegionInfo has null start and end keys, then first
|
|
||||||
* require that the parameter map is a singleton, and then return a
|
|
||||||
* single-entry map whose {@code KeyRange} has start and end buffers that
|
|
||||||
* are both four bytes of zeros.</li>
|
|
||||||
* <li>If the entry has a null end key (but non-null start key), put an
|
|
||||||
* equivalent entry in the result map with a start key identical to the
|
|
||||||
* input, except that zeros are appended to values less than 4 bytes long,
|
|
||||||
* and an end key that is four bytes of zeros.
|
|
||||||
* <li>If the entry has a null start key (but non-null end key), put an
|
|
||||||
* equivalent entry in the result map where the start key is four bytes of
|
|
||||||
* zeros, and the end key has zeros appended, if necessary, to make it at
|
|
||||||
* least 4 bytes long, after which one is added to the padded value in
|
|
||||||
* unsigned 32-bit arithmetic with overflow allowed.</li>
|
|
||||||
* <li>Any entry which matches none of the above criteria results in an
|
|
||||||
* equivalent entry in the returned map, except that zeros are appended to
|
|
||||||
* both keys to make each at least 4 bytes long, and the end key is then
|
|
||||||
* incremented as described in the last bullet point.</li>
|
|
||||||
* </ul>
|
|
||||||
*
|
|
||||||
* After iterating over the parameter map, this method checks that it either
|
|
||||||
* saw no entries with null keys, one entry with a null start key and a
|
|
||||||
* different entry with a null end key, or one entry with both start and end
|
|
||||||
* keys null. If any null keys are observed besides these three cases, the
|
|
||||||
* method will die with a precondition failure.
|
|
||||||
*
|
|
||||||
* @param locations A list of HRegionInfo
|
|
||||||
* @return JanusGraph-friendly expression of each region's rowkey boundaries
|
|
||||||
*/
|
|
||||||
private Map<KeyRange, ServerName> normalizeKeyBounds(List<HRegionLocation> locations) {
|
|
||||||
|
|
||||||
HRegionLocation nullStart = null;
|
|
||||||
HRegionLocation nullEnd = null;
|
|
||||||
|
|
||||||
ImmutableMap.Builder<KeyRange, ServerName> b = ImmutableMap.builder();
|
|
||||||
|
|
||||||
for (HRegionLocation location : locations) {
|
|
||||||
HRegionInfo regionInfo = location.getRegionInfo();
|
|
||||||
ServerName serverName = location.getServerName();
|
|
||||||
byte startKey[] = regionInfo.getStartKey();
|
|
||||||
byte endKey[] = regionInfo.getEndKey();
|
|
||||||
|
|
||||||
if (0 == startKey.length) {
|
|
||||||
startKey = null;
|
|
||||||
logger.trace("Converted zero-length HBase startKey byte array to null");
|
|
||||||
}
|
|
||||||
|
|
||||||
if (0 == endKey.length) {
|
|
||||||
endKey = null;
|
|
||||||
logger.trace("Converted zero-length HBase endKey byte array to null");
|
|
||||||
}
|
|
||||||
|
|
||||||
if (null == startKey && null == endKey) {
|
|
||||||
Preconditions.checkState(1 == locations.size());
|
|
||||||
logger.debug("HBase table {} has a single region {}", tableName, regionInfo);
|
|
||||||
// Choose arbitrary shared value = startKey = endKey
|
|
||||||
return b.put(new KeyRange(FOUR_ZERO_BYTES, FOUR_ZERO_BYTES), serverName).build();
|
|
||||||
} else if (null == startKey) {
|
|
||||||
logger.debug("Found HRegionInfo with null startKey on server {}: {}", serverName, regionInfo);
|
|
||||||
Preconditions.checkState(null == nullStart);
|
|
||||||
nullStart = location;
|
|
||||||
// I thought endBuf would be inclusive from the HBase javadoc, but in practice it is exclusive
|
|
||||||
StaticBuffer endBuf = StaticArrayBuffer.of(zeroExtend(endKey));
|
|
||||||
// Replace null start key with zeroes
|
|
||||||
b.put(new KeyRange(FOUR_ZERO_BYTES, endBuf), serverName);
|
|
||||||
} else if (null == endKey) {
|
|
||||||
logger.debug("Found HRegionInfo with null endKey on server {}: {}", serverName, regionInfo);
|
|
||||||
Preconditions.checkState(null == nullEnd);
|
|
||||||
nullEnd = location;
|
|
||||||
// Replace null end key with zeroes
|
|
||||||
b.put(new KeyRange(StaticArrayBuffer.of(zeroExtend(startKey)), FOUR_ZERO_BYTES), serverName);
|
|
||||||
} else {
|
|
||||||
Preconditions.checkState(null != startKey);
|
|
||||||
Preconditions.checkState(null != endKey);
|
|
||||||
|
|
||||||
// Convert HBase's inclusive end keys into exclusive JanusGraph end keys
|
|
||||||
StaticBuffer startBuf = StaticArrayBuffer.of(zeroExtend(startKey));
|
|
||||||
StaticBuffer endBuf = StaticArrayBuffer.of(zeroExtend(endKey));
|
|
||||||
|
|
||||||
KeyRange kr = new KeyRange(startBuf, endBuf);
|
|
||||||
b.put(kr, serverName);
|
|
||||||
logger.debug("Found HRegionInfo with non-null end and start keys on server {}: {}", serverName, regionInfo);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Require either no null key bounds or a pair of them
|
|
||||||
Preconditions.checkState(!(null == nullStart ^ null == nullEnd));
|
|
||||||
|
|
||||||
// Check that every key in the result is at least 4 bytes long
|
|
||||||
Map<KeyRange, ServerName> result = b.build();
|
|
||||||
for (KeyRange kr : result.keySet()) {
|
|
||||||
Preconditions.checkState(4 <= kr.getStart().length());
|
|
||||||
Preconditions.checkState(4 <= kr.getEnd().length());
|
|
||||||
}
|
|
||||||
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* If the parameter is shorter than 4 bytes, then create and return a new 4
|
|
||||||
* byte array with the input array's bytes followed by zero bytes. Otherwise
|
|
||||||
* return the parameter.
|
|
||||||
*
|
|
||||||
* @param dataToPad non-null but possibly zero-length byte array
|
|
||||||
* @return either the parameter or a new array
|
|
||||||
*/
|
|
||||||
private final byte[] zeroExtend(byte[] dataToPad) {
|
|
||||||
assert null != dataToPad;
|
|
||||||
|
|
||||||
final int targetLength = 4;
|
|
||||||
|
|
||||||
if (targetLength <= dataToPad.length)
|
|
||||||
return dataToPad;
|
|
||||||
|
|
||||||
byte padded[] = new byte[targetLength];
|
|
||||||
|
|
||||||
for (int i = 0; i < dataToPad.length; i++)
|
|
||||||
padded[i] = dataToPad[i];
|
|
||||||
|
|
||||||
for (int i = dataToPad.length; i < padded.length; i++)
|
|
||||||
padded[i] = (byte)0;
|
|
||||||
|
|
||||||
return padded;
|
|
||||||
}
|
|
||||||
|
|
||||||
public static String shortenCfName(BiMap<String, String> shortCfNameMap, String longName) throws PermanentBackendException {
|
|
||||||
final String s;
|
|
||||||
if (shortCfNameMap.containsKey(longName)) {
|
|
||||||
s = shortCfNameMap.get(longName);
|
|
||||||
Preconditions.checkNotNull(s);
|
|
||||||
logger.debug("Substituted default CF name \"{}\" with short form \"{}\" to reduce HBase KeyValue size", longName, s);
|
|
||||||
} else {
|
|
||||||
if (shortCfNameMap.containsValue(longName)) {
|
|
||||||
String fmt = "Must use CF long-form name \"%s\" instead of the short-form name \"%s\" when configured with %s=true";
|
|
||||||
String msg = String.format(fmt, shortCfNameMap.inverse().get(longName), longName, SHORT_CF_NAMES.getName());
|
|
||||||
throw new PermanentBackendException(msg);
|
|
||||||
}
|
|
||||||
s = longName;
|
|
||||||
logger.debug("Kept default CF name \"{}\" because it has no associated short form", s);
|
|
||||||
}
|
|
||||||
return s;
|
|
||||||
}
|
|
||||||
|
|
||||||
private TableDescriptor ensureTableExists(String tableName, String initialCFName, int ttlInSeconds) throws BackendException {
|
|
||||||
AdminMask adm = null;
|
|
||||||
|
|
||||||
TableDescriptor desc;
|
|
||||||
|
|
||||||
try { // Create our table, if necessary
|
|
||||||
adm = getAdminInterface();
|
|
||||||
/*
|
|
||||||
* Some HBase versions/impls respond badly to attempts to create a
|
|
||||||
* table without at least one CF. See #661. Creating a CF along with
|
|
||||||
* the table avoids HBase carping.
|
|
||||||
*/
|
|
||||||
if (adm.tableExists(tableName)) {
|
|
||||||
desc = adm.getTableDescriptor(tableName);
|
|
||||||
// Check and warn if long and short cf names are mixedly used for the same table.
|
|
||||||
if (shortCfNames && initialCFName.equals(shortCfNameMap.get(SYSTEM_PROPERTIES_STORE_NAME))) {
|
|
||||||
String longCFName = shortCfNameMap.inverse().get(initialCFName);
|
|
||||||
if (desc.getColumnFamily(Bytes.toBytes(longCFName)) != null) {
|
|
||||||
logger.warn("Configuration {}=true, but the table \"{}\" already has column family with long name \"{}\".",
|
|
||||||
SHORT_CF_NAMES.getName(), tableName, longCFName);
|
|
||||||
logger.warn("Check {} configuration.", SHORT_CF_NAMES.getName());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
else if (!shortCfNames && initialCFName.equals(SYSTEM_PROPERTIES_STORE_NAME)) {
|
|
||||||
String shortCFName = shortCfNameMap.get(initialCFName);
|
|
||||||
if (desc.getColumnFamily(Bytes.toBytes(shortCFName)) != null) {
|
|
||||||
logger.warn("Configuration {}=false, but the table \"{}\" already has column family with short name \"{}\".",
|
|
||||||
SHORT_CF_NAMES.getName(), tableName, shortCFName);
|
|
||||||
logger.warn("Check {} configuration.", SHORT_CF_NAMES.getName());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
desc = createTable(tableName, initialCFName, ttlInSeconds, adm);
|
|
||||||
}
|
|
||||||
} catch (IOException e) {
|
|
||||||
throw new TemporaryBackendException(e);
|
|
||||||
} finally {
|
|
||||||
IOUtils.closeQuietly(adm);
|
|
||||||
}
|
|
||||||
|
|
||||||
return desc;
|
|
||||||
}
|
|
||||||
|
|
||||||
private TableDescriptor createTable(String tableName, String cfName, int ttlInSeconds, AdminMask adm) throws IOException {
|
|
||||||
TableDescriptor desc = compat.newTableDescriptor(tableName);
|
|
||||||
|
|
||||||
ColumnFamilyDescriptor cdesc = ColumnFamilyDescriptorBuilder.of(cfName);
|
|
||||||
cdesc = setCFOptions(cdesc, ttlInSeconds);
|
|
||||||
|
|
||||||
desc = compat.addColumnFamilyToTableDescriptor(desc, cdesc);
|
|
||||||
|
|
||||||
int count; // total regions to create
|
|
||||||
String src;
|
|
||||||
|
|
||||||
if (MIN_REGION_COUNT <= (count = regionCount)) {
|
|
||||||
src = "region count configuration";
|
|
||||||
} else if (0 < regionsPerServer &&
|
|
||||||
MIN_REGION_COUNT <= (count = regionsPerServer * adm.getEstimatedRegionServerCount())) {
|
|
||||||
src = "ClusterStatus server count";
|
|
||||||
} else {
|
|
||||||
count = -1;
|
|
||||||
src = "default";
|
|
||||||
}
|
|
||||||
|
|
||||||
if (MIN_REGION_COUNT < count) {
|
|
||||||
adm.createTable(desc, getStartKey(count), getEndKey(count), count);
|
|
||||||
logger.debug("Created table {} with region count {} from {}", tableName, count, src);
|
|
||||||
} else {
|
|
||||||
adm.createTable(desc);
|
|
||||||
logger.debug("Created table {} with default start key, end key, and region count", tableName);
|
|
||||||
}
|
|
||||||
|
|
||||||
return desc;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* <p/>
|
|
||||||
* From the {@code createTable} javadoc:
|
|
||||||
* "The start key specified will become the end key of the first region of
|
|
||||||
* the table, and the end key specified will become the start key of the
|
|
||||||
* last region of the table (the first region has a null start key and
|
|
||||||
* the last region has a null end key)"
|
|
||||||
* <p/>
|
|
||||||
* To summarize, the {@code createTable} argument called "startKey" is
|
|
||||||
* actually the end key of the first region.
|
|
||||||
*/
|
|
||||||
private byte[] getStartKey(int regionCount) {
|
|
||||||
ByteBuffer regionWidth = ByteBuffer.allocate(4);
|
|
||||||
regionWidth.putInt((int)(((1L << 32) - 1L) / regionCount)).flip();
|
|
||||||
return StaticArrayBuffer.of(regionWidth).getBytes(0, 4);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Companion to {@link #getStartKey(int)}. See its javadoc for details.
|
|
||||||
*/
|
|
||||||
private byte[] getEndKey(int regionCount) {
|
|
||||||
ByteBuffer regionWidth = ByteBuffer.allocate(4);
|
|
||||||
regionWidth.putInt((int)(((1L << 32) - 1L) / regionCount * (regionCount - 1))).flip();
|
|
||||||
return StaticArrayBuffer.of(regionWidth).getBytes(0, 4);
|
|
||||||
}
|
|
||||||
|
|
||||||
private void ensureColumnFamilyExists(String tableName, String columnFamily, int ttlInSeconds) throws BackendException {
|
|
||||||
AdminMask adm = null;
|
|
||||||
try {
|
|
||||||
adm = getAdminInterface();
|
|
||||||
TableDescriptor desc = ensureTableExists(tableName, columnFamily, ttlInSeconds);
|
|
||||||
|
|
||||||
Preconditions.checkNotNull(desc);
|
|
||||||
|
|
||||||
ColumnFamilyDescriptor cf = desc.getColumnFamily(Bytes.toBytes(columnFamily));
|
|
||||||
|
|
||||||
// Create our column family, if necessary
|
|
||||||
if (cf == null) {
|
|
||||||
try {
|
|
||||||
if (!adm.isTableDisabled(tableName)) {
|
|
||||||
adm.disableTable(tableName);
|
|
||||||
}
|
|
||||||
} catch (TableNotEnabledException e) {
|
|
||||||
logger.debug("Table {} already disabled", tableName);
|
|
||||||
} catch (IOException e) {
|
|
||||||
throw new TemporaryBackendException(e);
|
|
||||||
}
|
|
||||||
|
|
||||||
try {
|
|
||||||
ColumnFamilyDescriptor cdesc = ColumnFamilyDescriptorBuilder.of(columnFamily);
|
|
||||||
|
|
||||||
cdesc = setCFOptions(cdesc, ttlInSeconds);
|
|
||||||
|
|
||||||
adm.addColumn(tableName, cdesc);
|
|
||||||
|
|
||||||
try {
|
|
||||||
logger.debug("Added HBase ColumnFamily {}, waiting for 1 sec. to propogate.", columnFamily);
|
|
||||||
Thread.sleep(1000L);
|
|
||||||
} catch (InterruptedException ie) {
|
|
||||||
throw new TemporaryBackendException(ie);
|
|
||||||
}
|
|
||||||
|
|
||||||
adm.enableTable(tableName);
|
|
||||||
} catch (TableNotFoundException ee) {
|
|
||||||
logger.error("TableNotFoundException", ee);
|
|
||||||
throw new PermanentBackendException(ee);
|
|
||||||
} catch (org.apache.hadoop.hbase.TableExistsException ee) {
|
|
||||||
logger.debug("Swallowing exception {}", ee);
|
|
||||||
} catch (IOException ee) {
|
|
||||||
throw new TemporaryBackendException(ee);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} finally {
|
|
||||||
IOUtils.closeQuietly(adm);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
private ColumnFamilyDescriptor setCFOptions(ColumnFamilyDescriptor cdesc, int ttlInSeconds) {
|
|
||||||
ColumnFamilyDescriptor ret = null;
|
|
||||||
|
|
||||||
if (null != compression && !compression.equals(COMPRESSION_DEFAULT)) {
|
|
||||||
cdesc = ColumnFamilyDescriptorBuilder.newBuilder(cdesc).setDataBlockEncoding( DataBlockEncoding.FAST_DIFF).build();
|
|
||||||
ret = compat.setCompression(cdesc, compression);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (ttlInSeconds > 0) {
|
|
||||||
ret = ColumnFamilyDescriptorBuilder.newBuilder(cdesc).setTimeToLive(ttlInSeconds).build();
|
|
||||||
}
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Convert JanusGraph internal Mutation representation into HBase native commands.
|
|
||||||
*
|
|
||||||
* @param mutations Mutations to convert into HBase commands.
|
|
||||||
* @param putTimestamp The timestamp to use for Put commands.
|
|
||||||
* @param delTimestamp The timestamp to use for Delete commands.
|
|
||||||
* @return Commands sorted by key converted from JanusGraph internal representation.
|
|
||||||
* @throws org.janusgraph.diskstorage.PermanentBackendException
|
|
||||||
*/
|
|
||||||
@VisibleForTesting
|
|
||||||
Map<StaticBuffer, Pair<List<Put>, Delete>> convertToCommands(Map<String, Map<StaticBuffer, KCVMutation>> mutations,
|
|
||||||
final long putTimestamp,
|
|
||||||
final long delTimestamp) throws PermanentBackendException {
|
|
||||||
// A map of rowkey to commands (list of Puts, Delete)
|
|
||||||
final Map<StaticBuffer, Pair<List<Put>, Delete>> commandsPerKey = new HashMap<>();
|
|
||||||
|
|
||||||
for (Map.Entry<String, Map<StaticBuffer, KCVMutation>> entry : mutations.entrySet()) {
|
|
||||||
|
|
||||||
String cfString = getCfNameForStoreName(entry.getKey());
|
|
||||||
byte[] cfName = Bytes.toBytes(cfString);
|
|
||||||
|
|
||||||
for (Map.Entry<StaticBuffer, KCVMutation> m : entry.getValue().entrySet()) {
|
|
||||||
final byte[] key = m.getKey().as(StaticBuffer.ARRAY_FACTORY);
|
|
||||||
KCVMutation mutation = m.getValue();
|
|
||||||
|
|
||||||
Pair<List<Put>, Delete> commands = commandsPerKey.get(m.getKey());
|
|
||||||
|
|
||||||
// The firt time we go through the list of input <rowkey, KCVMutation>,
|
|
||||||
// create the holder for a particular rowkey
|
|
||||||
if (commands == null) {
|
|
||||||
commands = new Pair<>();
|
|
||||||
// List of all the Puts for this rowkey, including the ones without TTL and with TTL.
|
|
||||||
final List<Put> putList = new ArrayList<>();
|
|
||||||
commands.setFirst(putList);
|
|
||||||
commandsPerKey.put(m.getKey(), commands);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (mutation.hasDeletions()) {
|
|
||||||
if (commands.getSecond() == null) {
|
|
||||||
Delete d = new Delete(key);
|
|
||||||
compat.setTimestamp(d, delTimestamp);
|
|
||||||
commands.setSecond(d);
|
|
||||||
}
|
|
||||||
|
|
||||||
for (StaticBuffer b : mutation.getDeletions()) {
|
|
||||||
// commands.getSecond() is a Delete for this rowkey.
|
|
||||||
commands.getSecond().addColumns(cfName, b.as(StaticBuffer.ARRAY_FACTORY), delTimestamp);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (mutation.hasAdditions()) {
|
|
||||||
// All the entries (column cells) with the rowkey use this one Put, except the ones with TTL.
|
|
||||||
final Put putColumnsWithoutTtl = new Put(key, putTimestamp);
|
|
||||||
// At the end of this loop, there will be one Put entry in the commands.getFirst() list that
|
|
||||||
// contains all additions without TTL set, and possible multiple Put entries for columns
|
|
||||||
// that have TTL set.
|
|
||||||
for (Entry e : mutation.getAdditions()) {
|
|
||||||
|
|
||||||
// Deal with TTL within the entry (column cell) first
|
|
||||||
// HBase cell level TTL is actually set at the Mutation/Put level.
|
|
||||||
// Therefore we need to construct a new Put for each entry (column cell) with TTL.
|
|
||||||
// We can not combine them because column cells within the same rowkey may:
|
|
||||||
// 1. have no TTL
|
|
||||||
// 2. have TTL
|
|
||||||
// 3. have different TTL
|
|
||||||
final Integer ttl = (Integer) e.getMetaData().get(EntryMetaData.TTL);
|
|
||||||
if (null != ttl && ttl > 0) {
|
|
||||||
// Create a new Put
|
|
||||||
Put putColumnWithTtl = new Put(key, putTimestamp);
|
|
||||||
addColumnToPut(putColumnWithTtl, cfName, putTimestamp, e);
|
|
||||||
// Convert ttl from second (JanusGraph TTL) to millisec (HBase TTL)
|
|
||||||
// @see JanusGraphManagement#setTTL(JanusGraphSchemaType, Duration)
|
|
||||||
// Cast Put to Mutation for backward compatibility with HBase 0.98.x
|
|
||||||
// HBase supports cell-level TTL for versions 0.98.6 and above.
|
|
||||||
((Mutation) putColumnWithTtl).setTTL(ttl * 1000);
|
|
||||||
// commands.getFirst() is the list of Puts for this rowkey. Add this
|
|
||||||
// Put column with TTL to the list.
|
|
||||||
commands.getFirst().add(putColumnWithTtl);
|
|
||||||
} else {
|
|
||||||
addColumnToPut(putColumnsWithoutTtl, cfName, putTimestamp, e);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// If there were any mutations without TTL set, add them to commands.getFirst()
|
|
||||||
if (!putColumnsWithoutTtl.isEmpty()) {
|
|
||||||
commands.getFirst().add(putColumnsWithoutTtl);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return commandsPerKey;
|
|
||||||
}
|
|
||||||
|
|
||||||
private void addColumnToPut(Put p, byte[] cfName, long putTimestamp, Entry e) {
|
|
||||||
p.addColumn(cfName, e.getColumnAs(StaticBuffer.ARRAY_FACTORY), putTimestamp,
|
|
||||||
e.getValueAs(StaticBuffer.ARRAY_FACTORY));
|
|
||||||
}
|
|
||||||
|
|
||||||
private String getCfNameForStoreName(String storeName) throws PermanentBackendException {
|
|
||||||
return shortCfNames ? shortenCfName(shortCfNameMap, storeName) : storeName;
|
|
||||||
}
|
|
||||||
|
|
||||||
private void checkConfigDeprecation(org.janusgraph.diskstorage.configuration.Configuration config) {
|
|
||||||
if (config.has(GraphDatabaseConfiguration.STORAGE_PORT)) {
|
|
||||||
logger.warn("The configuration property {} is ignored for HBase. Set hbase.zookeeper.property.clientPort in hbase-site.xml or {}.hbase.zookeeper.property.clientPort in JanusGraph's configuration file.",
|
|
||||||
ConfigElement.getPath(GraphDatabaseConfiguration.STORAGE_PORT), ConfigElement.getPath(HBASE_CONFIGURATION_NAMESPACE));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
private AdminMask getAdminInterface() {
|
|
||||||
try {
|
|
||||||
return cnx.getAdmin();
|
|
||||||
} catch (IOException e) {
|
|
||||||
throw new JanusGraphException(e);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
private String determineTableName(org.janusgraph.diskstorage.configuration.Configuration config) {
|
|
||||||
if ((!config.has(HBASE_TABLE)) && (config.has(GRAPH_NAME))) {
|
|
||||||
return config.get(GRAPH_NAME);
|
|
||||||
}
|
|
||||||
return config.get(HBASE_TABLE);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,31 +0,0 @@
|
||||||
// Copyright 2017 JanusGraph Authors
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package org.janusgraph.diskstorage.hbase2;
|
|
||||||
|
|
||||||
import org.janusgraph.diskstorage.BaseTransactionConfig;
|
|
||||||
import org.janusgraph.diskstorage.common.AbstractStoreTransaction;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* This class overrides and adds nothing compared with
|
|
||||||
* {@link org.janusgraph.diskstorage.locking.consistentkey.ExpectedValueCheckingTransaction}; however, it creates a transaction type specific
|
|
||||||
* to HBase, which lets us check for user errors like passing a Cassandra
|
|
||||||
* transaction into a HBase method.
|
|
||||||
*/
|
|
||||||
public class HBaseTransaction extends AbstractStoreTransaction {
|
|
||||||
|
|
||||||
public HBaseTransaction(final BaseTransactionConfig config) {
|
|
||||||
super(config);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,58 +0,0 @@
|
||||||
// Copyright 2017 JanusGraph Authors
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package org.janusgraph.diskstorage.hbase2;
|
|
||||||
|
|
||||||
import org.apache.hadoop.hbase.HRegionLocation;
|
|
||||||
import org.apache.hadoop.hbase.TableName;
|
|
||||||
import org.apache.hadoop.hbase.client.Connection;
|
|
||||||
|
|
||||||
import java.io.IOException;
|
|
||||||
import java.util.List;
|
|
||||||
|
|
||||||
public class HConnection2_0 implements ConnectionMask
|
|
||||||
{
|
|
||||||
|
|
||||||
private final Connection cnx;
|
|
||||||
|
|
||||||
public HConnection2_0(Connection cnx)
|
|
||||||
{
|
|
||||||
this.cnx = cnx;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public TableMask getTable(String name) throws IOException
|
|
||||||
{
|
|
||||||
return new HTable2_0(cnx.getTable(TableName.valueOf(name)));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public AdminMask getAdmin() throws IOException
|
|
||||||
{
|
|
||||||
return new HBaseAdmin2_0(cnx.getAdmin());
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void close() throws IOException
|
|
||||||
{
|
|
||||||
cnx.close();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public List<HRegionLocation> getRegionLocations(String tableName)
|
|
||||||
throws IOException
|
|
||||||
{
|
|
||||||
return this.cnx.getRegionLocator(TableName.valueOf(tableName)).getAllRegionLocations();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,60 +0,0 @@
|
||||||
// Copyright 2017 JanusGraph Authors
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package org.janusgraph.diskstorage.hbase2;
|
|
||||||
|
|
||||||
import org.apache.hadoop.hbase.client.Get;
|
|
||||||
import org.apache.hadoop.hbase.client.Result;
|
|
||||||
import org.apache.hadoop.hbase.client.ResultScanner;
|
|
||||||
import org.apache.hadoop.hbase.client.Row;
|
|
||||||
import org.apache.hadoop.hbase.client.Scan;
|
|
||||||
import org.apache.hadoop.hbase.client.Table;
|
|
||||||
|
|
||||||
import java.io.IOException;
|
|
||||||
import java.util.List;
|
|
||||||
|
|
||||||
public class HTable2_0 implements TableMask
|
|
||||||
{
|
|
||||||
private final Table table;
|
|
||||||
|
|
||||||
public HTable2_0(Table table)
|
|
||||||
{
|
|
||||||
this.table = table;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public ResultScanner getScanner(Scan filter) throws IOException
|
|
||||||
{
|
|
||||||
return table.getScanner(filter);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public Result[] get(List<Get> gets) throws IOException
|
|
||||||
{
|
|
||||||
return table.get(gets);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void batch(List<Row> writes, Object[] results) throws IOException, InterruptedException
|
|
||||||
{
|
|
||||||
table.batch(writes, results);
|
|
||||||
/* table.flushCommits(); not needed anymore */
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void close() throws IOException
|
|
||||||
{
|
|
||||||
table.close();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,45 +0,0 @@
|
||||||
// Copyright 2017 JanusGraph Authors
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Copyright DataStax, Inc.
|
|
||||||
* <p>
|
|
||||||
* Please see the included license file for details.
|
|
||||||
*/
|
|
||||||
package org.janusgraph.diskstorage.hbase2;
|
|
||||||
|
|
||||||
import org.apache.hadoop.hbase.client.Get;
|
|
||||||
import org.apache.hadoop.hbase.client.Result;
|
|
||||||
import org.apache.hadoop.hbase.client.ResultScanner;
|
|
||||||
import org.apache.hadoop.hbase.client.Row;
|
|
||||||
import org.apache.hadoop.hbase.client.Scan;
|
|
||||||
|
|
||||||
import java.io.Closeable;
|
|
||||||
import java.io.IOException;
|
|
||||||
import java.util.List;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* This interface hides ABI/API breaking changes that HBase has made to its Table/HTableInterface over the course
|
|
||||||
* of development from 0.94 to 1.0 and beyond.
|
|
||||||
*/
|
|
||||||
public interface TableMask extends Closeable
|
|
||||||
{
|
|
||||||
|
|
||||||
ResultScanner getScanner(Scan filter) throws IOException;
|
|
||||||
|
|
||||||
Result[] get(List<Get> gets) throws IOException;
|
|
||||||
|
|
||||||
void batch(List<Row> writes, Object[] results) throws IOException, InterruptedException;
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
@ -51,12 +51,6 @@
|
||||||
<scope>provided</scope>
|
<scope>provided</scope>
|
||||||
</dependency>
|
</dependency>
|
||||||
|
|
||||||
<dependency>
|
|
||||||
<groupId>org.apache.atlas</groupId>
|
|
||||||
<artifactId>atlas-janusgraph-hbase2</artifactId>
|
|
||||||
<version>${project.version}</version>
|
|
||||||
</dependency>
|
|
||||||
|
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>org.apache.atlas</groupId>
|
<groupId>org.apache.atlas</groupId>
|
||||||
<artifactId>atlas-testtools</artifactId>
|
<artifactId>atlas-testtools</artifactId>
|
||||||
|
|
@ -106,6 +100,10 @@
|
||||||
<groupId>org.apache.tinkerpop</groupId>
|
<groupId>org.apache.tinkerpop</groupId>
|
||||||
<artifactId>gremlin-driver</artifactId>
|
<artifactId>gremlin-driver</artifactId>
|
||||||
</exclusion>
|
</exclusion>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>org.noggit</groupId>
|
||||||
|
<artifactId>noggit</artifactId>
|
||||||
|
</exclusion>
|
||||||
</exclusions>
|
</exclusions>
|
||||||
</dependency>
|
</dependency>
|
||||||
|
|
||||||
|
|
@ -220,6 +218,10 @@
|
||||||
<groupId>org.codehaus.woodstox</groupId>
|
<groupId>org.codehaus.woodstox</groupId>
|
||||||
<artifactId>woodstox-core-asl</artifactId>
|
<artifactId>woodstox-core-asl</artifactId>
|
||||||
</exclusion>
|
</exclusion>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>org.apache.zookeeper</groupId>
|
||||||
|
<artifactId>zookeeper-jute</artifactId>
|
||||||
|
</exclusion>
|
||||||
</exclusions>
|
</exclusions>
|
||||||
</dependency>
|
</dependency>
|
||||||
<!-- CVE Overrides for Lucene -->
|
<!-- CVE Overrides for Lucene -->
|
||||||
|
|
@ -262,6 +264,12 @@
|
||||||
</exclusions>
|
</exclusions>
|
||||||
</dependency>
|
</dependency>
|
||||||
|
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.apache.tinkerpop</groupId>
|
||||||
|
<artifactId>gremlin-util</artifactId>
|
||||||
|
<version>${tinkerpop.version}</version>
|
||||||
|
</dependency>
|
||||||
|
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>org.apache.tinkerpop</groupId>
|
<groupId>org.apache.tinkerpop</groupId>
|
||||||
<artifactId>gremlin-groovy</artifactId>
|
<artifactId>gremlin-groovy</artifactId>
|
||||||
|
|
|
||||||
|
|
@ -39,6 +39,7 @@ import org.janusgraph.core.schema.JanusGraphManagement;
|
||||||
import org.janusgraph.diskstorage.StandardIndexProvider;
|
import org.janusgraph.diskstorage.StandardIndexProvider;
|
||||||
import org.janusgraph.diskstorage.StandardStoreManager;
|
import org.janusgraph.diskstorage.StandardStoreManager;
|
||||||
import org.janusgraph.diskstorage.es.ElasticSearch7Index;
|
import org.janusgraph.diskstorage.es.ElasticSearch7Index;
|
||||||
|
import org.janusgraph.diskstorage.hbase.HBaseStoreManager;
|
||||||
import org.janusgraph.diskstorage.solr.Solr6Index;
|
import org.janusgraph.diskstorage.solr.Solr6Index;
|
||||||
import org.janusgraph.graphdb.database.serialize.attribute.SerializableSerializer;
|
import org.janusgraph.graphdb.database.serialize.attribute.SerializableSerializer;
|
||||||
import org.janusgraph.graphdb.tinkerpop.JanusGraphIoRegistry;
|
import org.janusgraph.graphdb.tinkerpop.JanusGraphIoRegistry;
|
||||||
|
|
@ -85,7 +86,7 @@ public class AtlasJanusGraphDatabase implements GraphDatabase<AtlasJanusVertex,
|
||||||
|
|
||||||
public AtlasJanusGraphDatabase() {
|
public AtlasJanusGraphDatabase() {
|
||||||
//update registry
|
//update registry
|
||||||
GraphSONMapper.build().addRegistry(JanusGraphIoRegistry.getInstance()).create();
|
GraphSONMapper.build().addRegistry(JanusGraphIoRegistry.instance()).create();
|
||||||
}
|
}
|
||||||
|
|
||||||
public static Configuration getConfiguration() throws AtlasException {
|
public static Configuration getConfiguration() throws AtlasException {
|
||||||
|
|
@ -136,11 +137,11 @@ public class AtlasJanusGraphDatabase implements GraphDatabase<AtlasJanusVertex,
|
||||||
modifiersField.setInt(field, field.getModifiers() & ~Modifier.FINAL);
|
modifiersField.setInt(field, field.getModifiers() & ~Modifier.FINAL);
|
||||||
|
|
||||||
Map<String, String> customMap = new HashMap<>(StandardStoreManager.getAllManagerClasses());
|
Map<String, String> customMap = new HashMap<>(StandardStoreManager.getAllManagerClasses());
|
||||||
customMap.put("hbase2", org.janusgraph.diskstorage.hbase2.HBaseStoreManager.class.getName());
|
customMap.put("hbase2", HBaseStoreManager.class.getName());
|
||||||
ImmutableMap<String, String> immap = ImmutableMap.copyOf(customMap);
|
ImmutableMap<String, String> immap = ImmutableMap.copyOf(customMap);
|
||||||
field.set(null, immap);
|
field.set(null, immap);
|
||||||
|
|
||||||
LOG.debug("Injected HBase2 support - {}", org.janusgraph.diskstorage.hbase2.HBaseStoreManager.class.getName());
|
LOG.debug("Injected HBase2 support - {}", HBaseStoreManager.class.getName());
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
throw new RuntimeException(e);
|
throw new RuntimeException(e);
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -45,7 +45,7 @@ public class AtlasJanusIndexQuery implements AtlasIndexQuery<AtlasJanusVertex, A
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Iterator<Result<AtlasJanusVertex, AtlasJanusEdge>> vertices() {
|
public Iterator<Result<AtlasJanusVertex, AtlasJanusEdge>> vertices() {
|
||||||
Iterator<JanusGraphIndexQuery.Result<JanusGraphVertex>> results = query.vertices().iterator();
|
Iterator<JanusGraphIndexQuery.Result<JanusGraphVertex>> results = query.vertexStream().iterator();
|
||||||
|
|
||||||
Function<JanusGraphIndexQuery.Result<JanusGraphVertex>, Result<AtlasJanusVertex, AtlasJanusEdge>> function =
|
Function<JanusGraphIndexQuery.Result<JanusGraphVertex>, Result<AtlasJanusVertex, AtlasJanusEdge>> function =
|
||||||
new Function<JanusGraphIndexQuery.Result<JanusGraphVertex>, Result<AtlasJanusVertex, AtlasJanusEdge>>() {
|
new Function<JanusGraphIndexQuery.Result<JanusGraphVertex>, Result<AtlasJanusVertex, AtlasJanusEdge>>() {
|
||||||
|
|
@ -66,7 +66,7 @@ public class AtlasJanusIndexQuery implements AtlasIndexQuery<AtlasJanusVertex, A
|
||||||
Iterator<JanusGraphIndexQuery.Result<JanusGraphVertex>> results = query
|
Iterator<JanusGraphIndexQuery.Result<JanusGraphVertex>> results = query
|
||||||
.offset(offset)
|
.offset(offset)
|
||||||
.limit(limit)
|
.limit(limit)
|
||||||
.vertices().iterator();
|
.vertexStream().iterator();
|
||||||
|
|
||||||
Function<JanusGraphIndexQuery.Result<JanusGraphVertex>, Result<AtlasJanusVertex, AtlasJanusEdge>> function =
|
Function<JanusGraphIndexQuery.Result<JanusGraphVertex>, Result<AtlasJanusVertex, AtlasJanusEdge>> function =
|
||||||
new Function<JanusGraphIndexQuery.Result<JanusGraphVertex>, Result<AtlasJanusVertex, AtlasJanusEdge>>() {
|
new Function<JanusGraphIndexQuery.Result<JanusGraphVertex>, Result<AtlasJanusVertex, AtlasJanusEdge>>() {
|
||||||
|
|
@ -89,7 +89,7 @@ public class AtlasJanusIndexQuery implements AtlasIndexQuery<AtlasJanusVertex, A
|
||||||
.orderBy(sortBy, sortOrder)
|
.orderBy(sortBy, sortOrder)
|
||||||
.offset(offset)
|
.offset(offset)
|
||||||
.limit(limit)
|
.limit(limit)
|
||||||
.vertices().iterator();
|
.vertexStream().iterator();
|
||||||
|
|
||||||
Function<JanusGraphIndexQuery.Result<JanusGraphVertex>, Result<AtlasJanusVertex, AtlasJanusEdge>> function =
|
Function<JanusGraphIndexQuery.Result<JanusGraphVertex>, Result<AtlasJanusVertex, AtlasJanusEdge>> function =
|
||||||
new Function<JanusGraphIndexQuery.Result<JanusGraphVertex>, Result<AtlasJanusVertex, AtlasJanusEdge>>() {
|
new Function<JanusGraphIndexQuery.Result<JanusGraphVertex>, Result<AtlasJanusVertex, AtlasJanusEdge>>() {
|
||||||
|
|
@ -115,7 +115,7 @@ public class AtlasJanusIndexQuery implements AtlasIndexQuery<AtlasJanusVertex, A
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Iterator<Result<AtlasJanusVertex, AtlasJanusEdge>> edges() {
|
public Iterator<Result<AtlasJanusVertex, AtlasJanusEdge>> edges() {
|
||||||
Iterator<JanusGraphIndexQuery.Result<JanusGraphEdge>> results = query.edges().iterator();
|
Iterator<JanusGraphIndexQuery.Result<JanusGraphEdge>> results = query.edgeStream().iterator();
|
||||||
|
|
||||||
Function<JanusGraphIndexQuery.Result<JanusGraphEdge>, Result<AtlasJanusVertex, AtlasJanusEdge>> function =
|
Function<JanusGraphIndexQuery.Result<JanusGraphEdge>, Result<AtlasJanusVertex, AtlasJanusEdge>> function =
|
||||||
new Function<JanusGraphIndexQuery.Result<JanusGraphEdge>, Result<AtlasJanusVertex, AtlasJanusEdge>>() {
|
new Function<JanusGraphIndexQuery.Result<JanusGraphEdge>, Result<AtlasJanusVertex, AtlasJanusEdge>>() {
|
||||||
|
|
@ -136,7 +136,7 @@ public class AtlasJanusIndexQuery implements AtlasIndexQuery<AtlasJanusVertex, A
|
||||||
Iterator<JanusGraphIndexQuery.Result<JanusGraphEdge>> results = query
|
Iterator<JanusGraphIndexQuery.Result<JanusGraphEdge>> results = query
|
||||||
.offset(offset)
|
.offset(offset)
|
||||||
.limit(limit)
|
.limit(limit)
|
||||||
.edges().iterator();
|
.edgeStream().iterator();
|
||||||
|
|
||||||
Function<JanusGraphIndexQuery.Result<JanusGraphEdge>, Result<AtlasJanusVertex, AtlasJanusEdge>> function =
|
Function<JanusGraphIndexQuery.Result<JanusGraphEdge>, Result<AtlasJanusVertex, AtlasJanusEdge>> function =
|
||||||
new Function<JanusGraphIndexQuery.Result<JanusGraphEdge>, Result<AtlasJanusVertex, AtlasJanusEdge>>() {
|
new Function<JanusGraphIndexQuery.Result<JanusGraphEdge>, Result<AtlasJanusVertex, AtlasJanusEdge>>() {
|
||||||
|
|
@ -159,7 +159,7 @@ public class AtlasJanusIndexQuery implements AtlasIndexQuery<AtlasJanusVertex, A
|
||||||
.orderBy(sortBy, sortOrder)
|
.orderBy(sortBy, sortOrder)
|
||||||
.offset(offset)
|
.offset(offset)
|
||||||
.limit(limit)
|
.limit(limit)
|
||||||
.edges().iterator();
|
.edgeStream().iterator();
|
||||||
|
|
||||||
Function<JanusGraphIndexQuery.Result<JanusGraphEdge>, Result<AtlasJanusVertex, AtlasJanusEdge>> function =
|
Function<JanusGraphIndexQuery.Result<JanusGraphEdge>, Result<AtlasJanusVertex, AtlasJanusEdge>> function =
|
||||||
new Function<JanusGraphIndexQuery.Result<JanusGraphEdge>, Result<AtlasJanusVertex, AtlasJanusEdge>>() {
|
new Function<JanusGraphIndexQuery.Result<JanusGraphEdge>, Result<AtlasJanusVertex, AtlasJanusEdge>>() {
|
||||||
|
|
|
||||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
|
@ -32,6 +32,7 @@ atlas.graph.index.search.backend=${graph.index.backend}
|
||||||
|
|
||||||
# Berkeley storage directory
|
# Berkeley storage directory
|
||||||
atlas.graph.storage.directory=${sys:atlas.data}/berkeley
|
atlas.graph.storage.directory=${sys:atlas.data}/berkeley
|
||||||
|
atlas.graph.storage.transactions=true
|
||||||
|
|
||||||
# HBase
|
# HBase
|
||||||
# For standalone mode , specify localhost
|
# For standalone mode , specify localhost
|
||||||
|
|
|
||||||
|
|
@ -36,7 +36,6 @@
|
||||||
<module>api</module>
|
<module>api</module>
|
||||||
<module>common</module>
|
<module>common</module>
|
||||||
<module>graphdb-impls</module>
|
<module>graphdb-impls</module>
|
||||||
<module>janus-hbase2</module>
|
|
||||||
<module>janus</module>
|
<module>janus</module>
|
||||||
</modules>
|
</modules>
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -53,6 +53,7 @@ atlas.graph.index.search.backend=${graph.index.backend}
|
||||||
|
|
||||||
#Berkeley storage directory
|
#Berkeley storage directory
|
||||||
atlas.graph.storage.directory=${sys:atlas.data}/berkley
|
atlas.graph.storage.directory=${sys:atlas.data}/berkley
|
||||||
|
atlas.graph.storage.transactions=true
|
||||||
|
|
||||||
#hbase
|
#hbase
|
||||||
#For standalone mode , specify localhost
|
#For standalone mode , specify localhost
|
||||||
|
|
|
||||||
|
|
@ -100,7 +100,7 @@ public abstract class AtlasHook {
|
||||||
|
|
||||||
String failedMessageFile = atlasProperties.getString(ATLAS_NOTIFICATION_FAILED_MESSAGES_FILENAME_KEY, ATLAS_HOOK_FAILED_MESSAGES_LOG_DEFAULT_NAME);
|
String failedMessageFile = atlasProperties.getString(ATLAS_NOTIFICATION_FAILED_MESSAGES_FILENAME_KEY, ATLAS_HOOK_FAILED_MESSAGES_LOG_DEFAULT_NAME);
|
||||||
|
|
||||||
logFailedMessages = atlasProperties.getBoolean(ATLAS_NOTIFICATION_LOG_FAILED_MESSAGES_ENABLED_KEY, true);
|
logFailedMessages = atlasProperties.getBoolean(ATLAS_NOTIFICATION_LOG_FAILED_MESSAGES_ENABLED_KEY, false);
|
||||||
|
|
||||||
if (logFailedMessages) {
|
if (logFailedMessages) {
|
||||||
failedMessagesLogger = new FailedMessagesLogger(failedMessageFile);
|
failedMessagesLogger = new FailedMessagesLogger(failedMessageFile);
|
||||||
|
|
|
||||||
37
pom.xml
37
pom.xml
|
|
@ -628,7 +628,7 @@
|
||||||
<entity.repository.impl>org.apache.atlas.repository.audit.HBaseBasedAuditRepository
|
<entity.repository.impl>org.apache.atlas.repository.audit.HBaseBasedAuditRepository
|
||||||
</entity.repository.impl>
|
</entity.repository.impl>
|
||||||
<graph.index.backend>solr</graph.index.backend>
|
<graph.index.backend>solr</graph.index.backend>
|
||||||
<graph.storage.backend>hbase2</graph.storage.backend>
|
<graph.storage.backend>hbase</graph.storage.backend>
|
||||||
<graph.storage.hostname>localhost</graph.storage.hostname>
|
<graph.storage.hostname>localhost</graph.storage.hostname>
|
||||||
<solr.zk.address>localhost:9983</solr.zk.address>
|
<solr.zk.address>localhost:9983</solr.zk.address>
|
||||||
</properties>
|
</properties>
|
||||||
|
|
@ -730,7 +730,7 @@
|
||||||
<curator.version>4.3.0</curator.version>
|
<curator.version>4.3.0</curator.version>
|
||||||
<doxia.version>1.8</doxia.version>
|
<doxia.version>1.8</doxia.version>
|
||||||
<dropwizard-metrics>3.2.2</dropwizard-metrics>
|
<dropwizard-metrics>3.2.2</dropwizard-metrics>
|
||||||
<elasticsearch.version>7.6.0</elasticsearch.version>
|
<elasticsearch.version>7.17.8</elasticsearch.version>
|
||||||
<entity.repository.impl>org.apache.atlas.repository.audit.InMemoryEntityAuditRepository</entity.repository.impl>
|
<entity.repository.impl>org.apache.atlas.repository.audit.InMemoryEntityAuditRepository</entity.repository.impl>
|
||||||
<enunciate-maven-plugin.version>2.13.2</enunciate-maven-plugin.version>
|
<enunciate-maven-plugin.version>2.13.2</enunciate-maven-plugin.version>
|
||||||
<failsafe.version>2.18.1</failsafe.version>
|
<failsafe.version>2.18.1</failsafe.version>
|
||||||
|
|
@ -742,17 +742,18 @@
|
||||||
<guava.version>25.1-jre</guava.version>
|
<guava.version>25.1-jre</guava.version>
|
||||||
<guice.version>4.1.0</guice.version>
|
<guice.version>4.1.0</guice.version>
|
||||||
<hadoop.hdfs-client.version>${hadoop.version}</hadoop.hdfs-client.version>
|
<hadoop.hdfs-client.version>${hadoop.version}</hadoop.hdfs-client.version>
|
||||||
<hadoop.version>3.3.0</hadoop.version>
|
<hadoop.version>3.3.6</hadoop.version>
|
||||||
<hbase.version>2.3.3</hbase.version>
|
<hbase.version>2.6.0</hbase.version>
|
||||||
<hive.version>3.1.2</hive.version>
|
<hive.version>3.1.3</hive.version>
|
||||||
<hppc.version>0.8.1</hppc.version>
|
<hppc.version>0.8.1</hppc.version>
|
||||||
<httpcomponents-httpclient.version>4.5.13</httpcomponents-httpclient.version>
|
<httpcomponents-httpclient.version>4.5.13</httpcomponents-httpclient.version>
|
||||||
<httpcomponents-httpcore.version>4.4.13</httpcomponents-httpcore.version>
|
<httpcomponents-httpcore.version>4.4.13</httpcomponents-httpcore.version>
|
||||||
<ivy.version>2.5.2</ivy.version>
|
<ivy.version>2.5.2</ivy.version>
|
||||||
<jackson.databind.version>2.11.3</jackson.databind.version>
|
<jackson.databind.version>2.12.7</jackson.databind.version>
|
||||||
<jackson.version>2.11.3</jackson.version>
|
<jackson.version>2.12.7</jackson.version>
|
||||||
<janusgraph.version>0.6.4</janusgraph.version>
|
<janusgraph.version>1.0.0</janusgraph.version>
|
||||||
<janusgraph.cassandra.version>0.5.3</janusgraph.cassandra.version>
|
<janusgraph.cassandra.version>0.5.3</janusgraph.cassandra.version>
|
||||||
|
<jaxb.api.version>2.3.1</jaxb.api.version>
|
||||||
<javax-inject.version>1</javax-inject.version>
|
<javax-inject.version>1</javax-inject.version>
|
||||||
<javax.servlet.version>3.1.0</javax.servlet.version>
|
<javax.servlet.version>3.1.0</javax.servlet.version>
|
||||||
<java.version.required>1.8</java.version.required>
|
<java.version.required>1.8</java.version.required>
|
||||||
|
|
@ -762,18 +763,18 @@
|
||||||
<jersey.version>1.19</jersey.version>
|
<jersey.version>1.19</jersey.version>
|
||||||
<jettison.version>1.5.4</jettison.version>
|
<jettison.version>1.5.4</jettison.version>
|
||||||
<jetty-maven-plugin.stopWait>10</jetty-maven-plugin.stopWait>
|
<jetty-maven-plugin.stopWait>10</jetty-maven-plugin.stopWait>
|
||||||
<jetty.version>9.4.31.v20200723</jetty.version>
|
<jetty.version>9.4.53.v20231009</jetty.version>
|
||||||
<joda-time.version>2.10.6</joda-time.version>
|
<joda-time.version>2.10.6</joda-time.version>
|
||||||
<json.version>3.2.11</json.version>
|
<json.version>3.2.11</json.version>
|
||||||
<json-simple.version>1.1.1</json-simple.version>
|
<json-simple.version>1.1.1</json-simple.version>
|
||||||
<jsr.version>1.1</jsr.version>
|
<jsr.version>1.1</jsr.version>
|
||||||
<junit.version>4.13.2</junit.version>
|
<junit.version>4.13.2</junit.version>
|
||||||
<kafka.scala.binary.version>2.12</kafka.scala.binary.version>
|
<kafka.scala.binary.version>2.12</kafka.scala.binary.version>
|
||||||
<kafka.version>2.8.1</kafka.version>
|
<kafka.version>2.8.2</kafka.version>
|
||||||
<keycloak.version>6.0.1</keycloak.version>
|
<keycloak.version>6.0.1</keycloak.version>
|
||||||
<log4j.version>1.2.17</log4j.version>
|
<log4j.version>1.2.17</log4j.version>
|
||||||
<log4j2.version>2.17.1</log4j2.version>
|
<log4j2.version>2.17.1</log4j2.version>
|
||||||
<lucene-solr.version>8.6.3</lucene-solr.version>
|
<lucene-solr.version>8.11.3</lucene-solr.version>
|
||||||
<maven-site-plugin.version>3.7</maven-site-plugin.version>
|
<maven-site-plugin.version>3.7</maven-site-plugin.version>
|
||||||
<MaxPermGen>512m</MaxPermGen>
|
<MaxPermGen>512m</MaxPermGen>
|
||||||
<netty.version>4.1.100.Final</netty.version>
|
<netty.version>4.1.100.Final</netty.version>
|
||||||
|
|
@ -798,8 +799,8 @@
|
||||||
<skipTests>false</skipTests>
|
<skipTests>false</skipTests>
|
||||||
<skipUTs>false</skipUTs>
|
<skipUTs>false</skipUTs>
|
||||||
<slf4j.version>1.7.30</slf4j.version>
|
<slf4j.version>1.7.30</slf4j.version>
|
||||||
<solr-test-framework.version>8.6.3</solr-test-framework.version>
|
<solr-test-framework.version>8.11.3</solr-test-framework.version>
|
||||||
<solr.version>8.6.3</solr.version>
|
<solr.version>8.11.3</solr.version>
|
||||||
<spray.version>1.3.1</spray.version>
|
<spray.version>1.3.1</spray.version>
|
||||||
<spring.security.version>5.8.11</spring.security.version>
|
<spring.security.version>5.8.11</spring.security.version>
|
||||||
<spring.version>5.3.27</spring.version>
|
<spring.version>5.3.27</spring.version>
|
||||||
|
|
@ -808,9 +809,9 @@
|
||||||
<surefire.forkCount>2C</surefire.forkCount>
|
<surefire.forkCount>2C</surefire.forkCount>
|
||||||
<surefire.version>3.0.0-M5</surefire.version>
|
<surefire.version>3.0.0-M5</surefire.version>
|
||||||
<testng.version>7.0.0</testng.version>
|
<testng.version>7.0.0</testng.version>
|
||||||
<tinkerpop.version>3.5.7</tinkerpop.version>
|
<tinkerpop.version>3.7.0</tinkerpop.version>
|
||||||
<woodstox-core.version>5.0.3</woodstox-core.version>
|
<woodstox-core.version>5.0.3</woodstox-core.version>
|
||||||
<zookeeper.version>3.5.7</zookeeper.version>
|
<zookeeper.version>3.9.2</zookeeper.version>
|
||||||
</properties>
|
</properties>
|
||||||
|
|
||||||
<modules>
|
<modules>
|
||||||
|
|
@ -949,6 +950,12 @@
|
||||||
<version>${slf4j.version}</version>
|
<version>${slf4j.version}</version>
|
||||||
</dependency>
|
</dependency>
|
||||||
|
|
||||||
|
<dependency>
|
||||||
|
<groupId>javax.xml.bind</groupId>
|
||||||
|
<artifactId>jaxb-api</artifactId>
|
||||||
|
<version>${jaxb.api.version}</version>
|
||||||
|
</dependency>
|
||||||
|
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>log4j</groupId>
|
<groupId>log4j</groupId>
|
||||||
<artifactId>log4j</artifactId>
|
<artifactId>log4j</artifactId>
|
||||||
|
|
|
||||||
|
|
@ -373,23 +373,26 @@ public class AtlasDiscoveryServiceTest extends BasicTestSetup {
|
||||||
SearchParameters params = new SearchParameters();
|
SearchParameters params = new SearchParameters();
|
||||||
params.setTypeName(HIVE_TABLE_TYPE);
|
params.setTypeName(HIVE_TABLE_TYPE);
|
||||||
params.setExcludeDeletedEntities(true);
|
params.setExcludeDeletedEntities(true);
|
||||||
|
params.setMarker(SearchContext.MarkerUtil.MARKER_START);
|
||||||
|
|
||||||
|
int totalCount = discoveryService.searchWithParameters(params).getEntities().size();
|
||||||
|
|
||||||
params.setMarker(SearchContext.MarkerUtil.MARKER_START);
|
params.setMarker(SearchContext.MarkerUtil.MARKER_START);
|
||||||
params.setLimit(5);
|
params.setLimit(5);
|
||||||
AtlasSearchResult searchResult = discoveryService.searchWithParameters(params);
|
|
||||||
|
AtlasSearchResult searchResult = discoveryService.searchWithParameters(params);
|
||||||
List<AtlasEntityHeader> entityHeaders = searchResult.getEntities();
|
List<AtlasEntityHeader> entityHeaders = searchResult.getEntities();
|
||||||
|
|
||||||
Assert.assertTrue(CollectionUtils.isNotEmpty(entityHeaders));
|
Assert.assertTrue(CollectionUtils.isNotEmpty(entityHeaders));
|
||||||
assertEquals(entityHeaders.size(), 5);
|
assertEquals(entityHeaders.size(), 5);
|
||||||
Assert.assertTrue(StringUtils.isNotEmpty(searchResult.getNextMarker()));
|
Assert.assertTrue(StringUtils.isNotEmpty(searchResult.getNextMarker()));
|
||||||
|
|
||||||
long maxEntities = searchResult.getApproximateCount();
|
|
||||||
|
|
||||||
//get next marker and set in marker of subsequent request
|
//get next marker and set in marker of subsequent request
|
||||||
params.setMarker(SearchContext.MarkerUtil.MARKER_START);
|
params.setMarker(SearchContext.MarkerUtil.MARKER_START);
|
||||||
params.setLimit((int)maxEntities + 10);
|
params.setLimit(totalCount + 10);
|
||||||
AtlasSearchResult nextsearchResult = discoveryService.searchWithParameters(params);
|
AtlasSearchResult nextsearchResult = discoveryService.searchWithParameters(params);
|
||||||
|
|
||||||
Assert.assertTrue(nextsearchResult.getNextMarker().equals("-1"));
|
Assert.assertEquals(nextsearchResult.getNextMarker(), "-1");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -53,7 +53,7 @@ public class TraversalComposerTest extends BaseDSLComposer {
|
||||||
"[JanusGraphStep([],[__typeName.eq(DB)]), DedupGlobalStep(null,null)@[d], RangeGlobalStep(0,25)]");
|
"[JanusGraphStep([],[__typeName.eq(DB)]), DedupGlobalStep(null,null)@[d], RangeGlobalStep(0,25)]");
|
||||||
|
|
||||||
verify("Table groupby(owner) select name, owner, clusterName orderby name",
|
verify("Table groupby(owner) select name, owner, clusterName orderby name",
|
||||||
"[JanusGraphStep([],[__typeName.eq(Table), Table.owner.neq]), GroupStep(value([CoalesceStep([value(Table.owner), (null)])]),[FoldStep]), DedupGlobalStep(null,null), RangeGlobalStep(0,25)]");
|
"[JanusGraphStep([],[__typeName.eq(Table), Table.owner.neq]), GroupStep(value(Table.owner),[FoldStep]), DedupGlobalStep(null,null), RangeGlobalStep(0,25)]");
|
||||||
|
|
||||||
verify("hive_column where name = 'test'",
|
verify("hive_column where name = 'test'",
|
||||||
"[JanusGraphStep([],[__typeName.eq(hive_column), hive_column.name.eq(test)]), DedupGlobalStep(null,null), RangeGlobalStep(0,25)]");
|
"[JanusGraphStep([],[__typeName.eq(hive_column), hive_column.name.eq(test)]), DedupGlobalStep(null,null), RangeGlobalStep(0,25)]");
|
||||||
|
|
@ -71,7 +71,7 @@ public class TraversalComposerTest extends BaseDSLComposer {
|
||||||
"[JanusGraphStep([],[__typeName.eq(hive_column), hive_column.name.eq(test_limit)]), DedupGlobalStep(null,null), DedupGlobalStep(null,null), RangeGlobalStep(4,6)]");
|
"[JanusGraphStep([],[__typeName.eq(hive_column), hive_column.name.eq(test_limit)]), DedupGlobalStep(null,null), DedupGlobalStep(null,null), RangeGlobalStep(4,6)]");
|
||||||
|
|
||||||
verify("hive_db where owner != 'hdfs'",
|
verify("hive_db where owner != 'hdfs'",
|
||||||
"[JanusGraphStep([],[__typeName.eq(hive_db)]), OrStep([[HasStep([hive_db.owner.neq(hdfs)])], [NotStep([JanusGraphPropertiesStep([hive_db.owner],property)])]]), DedupGlobalStep(null,null), RangeGlobalStep(0,25)]");
|
"[JanusGraphStep([],[__typeName.eq(hive_db)]), JanusGraphMultiQueryStep, NoOpBarrierStep(2500), OrStep([[JanusGraphHasStep([hive_db.owner.neq(hdfs)])], [NotStep([JanusGraphPropertiesStep([hive_db.owner],property)])]]), DedupGlobalStep(null,null), RangeGlobalStep(0,25)]");
|
||||||
}
|
}
|
||||||
|
|
||||||
private void verify(String dsl, String expected) {
|
private void verify(String dsl, String expected) {
|
||||||
|
|
|
||||||
|
|
@ -68,8 +68,17 @@
|
||||||
<groupId>org.apache.commons</groupId>
|
<groupId>org.apache.commons</groupId>
|
||||||
<artifactId>commons-configuration2</artifactId>
|
<artifactId>commons-configuration2</artifactId>
|
||||||
</exclusion>
|
</exclusion>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>org.apache.zookeeper</groupId>
|
||||||
|
<artifactId>zookeeper-jute</artifactId>
|
||||||
|
</exclusion>
|
||||||
</exclusions>
|
</exclusions>
|
||||||
</dependency>
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.apache.zookeeper</groupId>
|
||||||
|
<artifactId>zookeeper-jute</artifactId>
|
||||||
|
<version>${zookeeper.version}</version>
|
||||||
|
</dependency>
|
||||||
|
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>com.vividsolutions</groupId>
|
<groupId>com.vividsolutions</groupId>
|
||||||
|
|
|
||||||
|
|
@ -59,6 +59,10 @@
|
||||||
<groupId>com.rabbitmq</groupId>
|
<groupId>com.rabbitmq</groupId>
|
||||||
<artifactId>amqp-client</artifactId>
|
<artifactId>amqp-client</artifactId>
|
||||||
</exclusion>
|
</exclusion>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>org.noggit</groupId>
|
||||||
|
<artifactId>noggit</artifactId>
|
||||||
|
</exclusion>
|
||||||
</exclusions>
|
</exclusions>
|
||||||
</dependency>
|
</dependency>
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -151,18 +151,6 @@
|
||||||
<artifactId>atlas-intg</artifactId>
|
<artifactId>atlas-intg</artifactId>
|
||||||
</dependency>
|
</dependency>
|
||||||
|
|
||||||
<dependency>
|
|
||||||
<groupId>org.apache.atlas</groupId>
|
|
||||||
<artifactId>atlas-janusgraph-hbase2</artifactId>
|
|
||||||
<version>${project.version}</version>
|
|
||||||
<exclusions>
|
|
||||||
<exclusion>
|
|
||||||
<groupId>org.noggit</groupId>
|
|
||||||
<artifactId>noggit</artifactId>
|
|
||||||
</exclusion>
|
|
||||||
</exclusions>
|
|
||||||
</dependency>
|
|
||||||
|
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<artifactId>hadoop-common</artifactId>
|
<artifactId>hadoop-common</artifactId>
|
||||||
|
|
|
||||||
|
|
@ -19,6 +19,7 @@ package org.apache.atlas.web.service;
|
||||||
|
|
||||||
import org.apache.atlas.web.model.DebugMetrics;
|
import org.apache.atlas.web.model.DebugMetrics;
|
||||||
import org.apache.commons.lang.StringUtils;
|
import org.apache.commons.lang.StringUtils;
|
||||||
|
import org.apache.hadoop.hbase.shaded.org.apache.commons.configuration2.SubsetConfiguration;
|
||||||
import org.apache.hadoop.metrics2.AbstractMetric;
|
import org.apache.hadoop.metrics2.AbstractMetric;
|
||||||
import org.apache.hadoop.metrics2.MetricsRecord;
|
import org.apache.hadoop.metrics2.MetricsRecord;
|
||||||
import org.apache.hadoop.metrics2.MetricsSink;
|
import org.apache.hadoop.metrics2.MetricsSink;
|
||||||
|
|
@ -59,7 +60,7 @@ public class AtlasDebugMetricsSink implements MetricsSink {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void init(org.apache.commons.configuration2.SubsetConfiguration subsetConfiguration) {
|
public void init(SubsetConfiguration subsetConfiguration) {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|
|
||||||
|
|
@ -29,7 +29,7 @@ import javax.ws.rs.HttpMethod;
|
||||||
import javax.ws.rs.core.Response;
|
import javax.ws.rs.core.Response;
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
|
|
||||||
import static org.testng.Assert.assertEquals;
|
import static org.testng.Assert.assertTrue;
|
||||||
import static org.testng.Assert.fail;
|
import static org.testng.Assert.fail;
|
||||||
|
|
||||||
public class DebugMetricsIT extends BaseResourceIT {
|
public class DebugMetricsIT extends BaseResourceIT {
|
||||||
|
|
@ -71,7 +71,7 @@ public class DebugMetricsIT extends BaseResourceIT {
|
||||||
if(newCreateOrUpdateDTO != null) {
|
if(newCreateOrUpdateDTO != null) {
|
||||||
newCreateOrUpdateCount = newCreateOrUpdateDTO.getNumops();
|
newCreateOrUpdateCount = newCreateOrUpdateDTO.getNumops();
|
||||||
}
|
}
|
||||||
assertEquals(newCreateOrUpdateCount, (currentCreateOrUpdateCount + 2), "Count didn't increase after making API call");
|
assertTrue(newCreateOrUpdateCount > currentCreateOrUpdateCount, "Count didn't increase after making API call: expected [" + (currentCreateOrUpdateCount + 2) + "] but found [" + newCreateOrUpdateCount + "]");
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
fail("Caught exception while running the test: " + e.getMessage(), e);
|
fail("Caught exception while running the test: " + e.getMessage(), e);
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -36,6 +36,7 @@ atlas.graph.index.search.backend=solr
|
||||||
|
|
||||||
#Berkeley storage directory
|
#Berkeley storage directory
|
||||||
atlas.graph.storage.directory=${sys:atlas.data}/berkley
|
atlas.graph.storage.directory=${sys:atlas.data}/berkley
|
||||||
|
atlas.graph.storage.transactions=true
|
||||||
|
|
||||||
#hbase
|
#hbase
|
||||||
#For standalone mode , specify localhost
|
#For standalone mode , specify localhost
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue