BUG-32766 Code cleanup and refactoring. Contributed by Venkatesh Seetharam
|
|
@ -17,14 +17,16 @@
|
|||
~ limitations under the License.
|
||||
-->
|
||||
|
||||
<project xmlns="http://maven.apache.org/POM/4.0.0"
|
||||
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||
<project xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||
xmlns="http://maven.apache.org/POM/4.0.0"
|
||||
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<parent>
|
||||
<artifactId>metadata-governance</artifactId>
|
||||
<groupId>org.apache.hadoop.metadata</groupId>
|
||||
<version>0.1-incubating-SNAPSHOT</version>
|
||||
</parent>
|
||||
<!--
|
||||
<parent>
|
||||
<artifactId>metadata-governance</artifactId>
|
||||
<groupId>org.apache.hadoop.metadata</groupId>
|
||||
<version>0.1-incubating-SNAPSHOT</version>
|
||||
</parent>
|
||||
-->
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
|
||||
<artifactId>metadata-falcontypes</artifactId>
|
||||
|
|
@ -6,9 +6,9 @@
|
|||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* <p/>
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* <p/>
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
|
|
@ -34,11 +34,11 @@ import org.apache.hadoop.metadata.MetadataException;
|
|||
import org.apache.hadoop.metadata.Referenceable;
|
||||
import org.apache.hadoop.metadata.Struct;
|
||||
import org.apache.hadoop.metadata.repository.MetadataRepository;
|
||||
import org.apache.hadoop.metadata.types.EnumType;
|
||||
import org.apache.hadoop.metadata.types.Multiplicity;
|
||||
import org.apache.hadoop.metadata.types.StructType;
|
||||
import org.apache.hadoop.metadata.types.TraitType;
|
||||
import org.apache.hadoop.metadata.types.TypeSystem;
|
||||
import org.apache.hadoop.metadata.typesystem.types.EnumType;
|
||||
import org.apache.hadoop.metadata.typesystem.types.Multiplicity;
|
||||
import org.apache.hadoop.metadata.typesystem.types.StructType;
|
||||
import org.apache.hadoop.metadata.typesystem.types.TraitType;
|
||||
import org.apache.hadoop.metadata.typesystem.types.TypeSystem;
|
||||
import org.parboiled.common.StringUtils;
|
||||
|
||||
import javax.xml.bind.JAXBException;
|
||||
|
|
@ -6,9 +6,9 @@
|
|||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* <p/>
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* <p/>
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
|
|
@ -20,16 +20,17 @@ package org.apache.metadata.falcon;
|
|||
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import org.apache.hadoop.metadata.MetadataException;
|
||||
import org.apache.hadoop.metadata.types.AttributeDefinition;
|
||||
import org.apache.hadoop.metadata.types.ClassType;
|
||||
import org.apache.hadoop.metadata.types.DataTypes;
|
||||
import org.apache.hadoop.metadata.types.EnumTypeDefinition;
|
||||
import org.apache.hadoop.metadata.types.EnumValue;
|
||||
import org.apache.hadoop.metadata.types.HierarchicalTypeDefinition;
|
||||
import org.apache.hadoop.metadata.types.Multiplicity;
|
||||
import org.apache.hadoop.metadata.types.StructTypeDefinition;
|
||||
import org.apache.hadoop.metadata.types.TraitType;
|
||||
import org.apache.hadoop.metadata.types.TypeSystem;
|
||||
import org.apache.hadoop.metadata.typesystem.types.AttributeDefinition;
|
||||
import org.apache.hadoop.metadata.typesystem.types.ClassType;
|
||||
import org.apache.hadoop.metadata.typesystem.types.DataTypes;
|
||||
import org.apache.hadoop.metadata.typesystem.types.EnumTypeDefinition;
|
||||
import org.apache.hadoop.metadata.typesystem.types.EnumValue;
|
||||
import org.apache.hadoop.metadata.typesystem.types.HierarchicalTypeDefinition;
|
||||
import org.apache.hadoop.metadata.typesystem.types.IDataType;
|
||||
import org.apache.hadoop.metadata.typesystem.types.Multiplicity;
|
||||
import org.apache.hadoop.metadata.typesystem.types.StructTypeDefinition;
|
||||
import org.apache.hadoop.metadata.typesystem.types.TraitType;
|
||||
import org.apache.hadoop.metadata.typesystem.types.TypeSystem;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
|
|
@ -38,31 +39,28 @@ import java.util.List;
|
|||
|
||||
public class FalconTypeSystem {
|
||||
public static final Logger LOG = LoggerFactory.getLogger(FalconTypeSystem.class);
|
||||
|
||||
private static FalconTypeSystem INSTANCE;
|
||||
public static final TypeSystem TYPE_SYSTEM = TypeSystem.getInstance();
|
||||
|
||||
private List<StructTypeDefinition> structTypeDefinitions = new ArrayList<>();
|
||||
private List<HierarchicalTypeDefinition<TraitType>> traitTypeDefinitions = new ArrayList<>();
|
||||
|
||||
public static FalconTypeSystem getInstance() throws MetadataException {
|
||||
if (INSTANCE == null) {
|
||||
synchronized(LOG) {
|
||||
if (INSTANCE == null) {
|
||||
INSTANCE = new FalconTypeSystem();
|
||||
}
|
||||
}
|
||||
}
|
||||
return INSTANCE;
|
||||
}
|
||||
|
||||
private FalconTypeSystem() throws MetadataException {
|
||||
HierarchicalTypeDefinition<ClassType> cluster = defineCluster();
|
||||
//TODO define feed and process
|
||||
|
||||
TYPE_SYSTEM.defineTypes(ImmutableList.copyOf(structTypeDefinitions), ImmutableList.copyOf(traitTypeDefinitions),
|
||||
ImmutableList.of(cluster));
|
||||
}
|
||||
|
||||
public static FalconTypeSystem getInstance() throws MetadataException {
|
||||
if (INSTANCE == null) {
|
||||
synchronized (LOG) {
|
||||
if (INSTANCE == null) {
|
||||
INSTANCE = new FalconTypeSystem();
|
||||
}
|
||||
}
|
||||
}
|
||||
return INSTANCE;
|
||||
}
|
||||
|
||||
private HierarchicalTypeDefinition<ClassType> defineCluster() throws MetadataException {
|
||||
|
|
@ -80,7 +78,8 @@ public class FalconTypeSystem {
|
|||
new AttributeDefinition("properties", TYPE_SYSTEM.defineMapType(DataTypes.STRING_TYPE, DataTypes.STRING_TYPE).getName(), Multiplicity.OPTIONAL, false, null),
|
||||
};
|
||||
HierarchicalTypeDefinition<ClassType> cluster =
|
||||
new HierarchicalTypeDefinition<>(ClassType.class, DefinedTypes.CLUSTER.name(), ImmutableList.<String>of(), attributeDefinitions);
|
||||
new HierarchicalTypeDefinition<>(ClassType.class, DefinedTypes.CLUSTER.name(),
|
||||
ImmutableList.<String>of(), attributeDefinitions);
|
||||
LOG.debug("Created definition for " + DefinedTypes.CLUSTER.name());
|
||||
return cluster;
|
||||
}
|
||||
|
|
@ -109,8 +108,10 @@ public class FalconTypeSystem {
|
|||
TYPE_SYSTEM.defineEnumType(locationType);
|
||||
|
||||
AttributeDefinition[] attributeDefinitions = new AttributeDefinition[]{
|
||||
new AttributeDefinition("type", DefinedTypes.CLUSTER_LOCATION_TYPE.name(), Multiplicity.REQUIRED, false, null),
|
||||
new AttributeDefinition("path", DataTypes.STRING_TYPE.getName(), Multiplicity.REQUIRED, false, null),
|
||||
new AttributeDefinition("type", DefinedTypes.CLUSTER_LOCATION_TYPE.name(),
|
||||
Multiplicity.REQUIRED, false, null),
|
||||
new AttributeDefinition("path", DataTypes.STRING_TYPE.getName(),
|
||||
Multiplicity.REQUIRED, false, null),
|
||||
};
|
||||
LOG.debug("Created definition for " + DefinedTypes.CLUSTER_LOCATION.name());
|
||||
StructTypeDefinition location = new StructTypeDefinition(DefinedTypes.CLUSTER_LOCATION.name(), attributeDefinitions);
|
||||
|
|
@ -133,9 +134,12 @@ public class FalconTypeSystem {
|
|||
TYPE_SYSTEM.defineEnumType(interfaceType);
|
||||
|
||||
AttributeDefinition[] attributeDefinitions = new AttributeDefinition[]{
|
||||
new AttributeDefinition("type", DefinedTypes.CLUSTER_INTERFACE_TYPE.name(), Multiplicity.REQUIRED, false, null),
|
||||
new AttributeDefinition("endpoint", DataTypes.STRING_TYPE.getName(), Multiplicity.REQUIRED, false, null),
|
||||
new AttributeDefinition("version", DataTypes.STRING_TYPE.getName(), Multiplicity.REQUIRED, false, null),
|
||||
new AttributeDefinition("type", DefinedTypes.CLUSTER_INTERFACE_TYPE.name(),
|
||||
Multiplicity.REQUIRED, false, null),
|
||||
new AttributeDefinition("endpoint", DataTypes.STRING_TYPE.getName(),
|
||||
Multiplicity.REQUIRED, false, null),
|
||||
new AttributeDefinition("version", DataTypes.STRING_TYPE.getName(),
|
||||
Multiplicity.REQUIRED, false, null),
|
||||
};
|
||||
LOG.debug("Created definition for " + DefinedTypes.CLUSTER_INTERFACE.name());
|
||||
StructTypeDefinition interfaceEntity = new StructTypeDefinition(DefinedTypes.CLUSTER_INTERFACE.name(), attributeDefinitions);
|
||||
|
|
@ -156,9 +160,12 @@ public class FalconTypeSystem {
|
|||
|
||||
private StructTypeDefinition defineACL() {
|
||||
AttributeDefinition[] attributeDefinitions = new AttributeDefinition[]{
|
||||
new AttributeDefinition("owner", DataTypes.STRING_TYPE.getName(), Multiplicity.REQUIRED, false, null),
|
||||
new AttributeDefinition("group", DataTypes.STRING_TYPE.getName(), Multiplicity.REQUIRED, false, null),
|
||||
new AttributeDefinition("permission", DataTypes.STRING_TYPE.getName(), Multiplicity.OPTIONAL, false, null),
|
||||
new AttributeDefinition("owner", DataTypes.STRING_TYPE.getName(),
|
||||
Multiplicity.REQUIRED, false, null),
|
||||
new AttributeDefinition("group", DataTypes.STRING_TYPE.getName(),
|
||||
Multiplicity.REQUIRED, false, null),
|
||||
new AttributeDefinition("permission", DataTypes.STRING_TYPE.getName(),
|
||||
Multiplicity.OPTIONAL, false, null),
|
||||
};
|
||||
LOG.debug("Created definition for " + DefinedTypes.ACL.name());
|
||||
StructTypeDefinition acl = new StructTypeDefinition(DefinedTypes.ACL.name(), attributeDefinitions);
|
||||
|
|
@ -6,9 +6,9 @@
|
|||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* <p/>
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* <p/>
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
|
|
@ -6,9 +6,9 @@
|
|||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* <p/>
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* <p/>
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
|
|
@ -17,14 +17,16 @@
|
|||
~ limitations under the License.
|
||||
-->
|
||||
|
||||
<project xmlns="http://maven.apache.org/POM/4.0.0"
|
||||
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||
<project xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||
xmlns="http://maven.apache.org/POM/4.0.0"
|
||||
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<parent>
|
||||
<artifactId>metadata-governance</artifactId>
|
||||
<groupId>org.apache.hadoop.metadata</groupId>
|
||||
<version>0.1-incubating-SNAPSHOT</version>
|
||||
</parent>
|
||||
<!--
|
||||
<parent>
|
||||
<artifactId>metadata-governance</artifactId>
|
||||
<groupId>org.apache.hadoop.metadata</groupId>
|
||||
<version>0.1-incubating-SNAPSHOT</version>
|
||||
</parent>
|
||||
-->
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
|
||||
<artifactId>metadata-hivetypes</artifactId>
|
||||
|
|
@ -41,14 +43,14 @@
|
|||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>org.apache.calcite</groupId>
|
||||
<artifactId>calcite-avatica</artifactId>
|
||||
</dependency>
|
||||
<groupId>org.apache.calcite</groupId>
|
||||
<artifactId>calcite-avatica</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.calcite</groupId>
|
||||
<artifactId>calcite-core</artifactId>
|
||||
</dependency>
|
||||
|
||||
<groupId>org.apache.calcite</groupId>
|
||||
<artifactId>calcite-core</artifactId>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>com.google.code.gson</groupId>
|
||||
<artifactId>gson</artifactId>
|
||||
|
|
@ -6,9 +6,9 @@
|
|||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* <p/>
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* <p/>
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
|
|
@ -66,27 +66,21 @@ import java.util.concurrent.TimeUnit;
|
|||
public class HiveHook implements ExecuteWithHookContext, HiveSemanticAnalyzerHook {
|
||||
|
||||
private static final Log LOG = LogFactory.getLog(HiveHook.class.getName());
|
||||
private static ExecutorService executor;
|
||||
private HiveTypeSystem hiveTypeSystem;
|
||||
private static final String dgcDumpDir = "/tmp/dgcfiles";
|
||||
// wait time determines how long we wait before we exit the jvm on
|
||||
// shutdown. Pending requests after that will not be sent.
|
||||
private static final int WAIT_TIME = 3;
|
||||
Connection connection = null;
|
||||
PreparedStatement insertStatement = null;
|
||||
PreparedStatement updateStatement = null;
|
||||
private static final String dbHost = "10.11.4.125";
|
||||
private static final String url = "jdbc:postgres://" + dbHost + "/dgctest";
|
||||
private static final String user = "postgres";
|
||||
private static final String password = "postgres";
|
||||
|
||||
|
||||
private static final String insertQuery =
|
||||
"insert into query_info(query_id, query_text, query_plan, start_time, user_name, query_graph) "
|
||||
+ "values (?, ?, ?, ?, ?, ?";
|
||||
|
||||
"insert into query_info(query_id, query_text, query_plan, start_time, user_name, " +
|
||||
"query_graph) "
|
||||
+ "values (?, ?, ?, ?, ?, ?";
|
||||
private static final String updateQuery =
|
||||
"update query_info set end_time = ? where query_id = ?";
|
||||
private static ExecutorService executor;
|
||||
|
||||
static {
|
||||
// anything shared should be initialized here and destroyed in the
|
||||
|
|
@ -124,31 +118,23 @@ public class HiveHook implements ExecuteWithHookContext, HiveSemanticAnalyzerHoo
|
|||
LOG.info("Created DgiHook");
|
||||
}
|
||||
|
||||
public HiveHook() {
|
||||
Connection connection = null;
|
||||
PreparedStatement insertStatement = null;
|
||||
PreparedStatement updateStatement = null;
|
||||
private HiveTypeSystem hiveTypeSystem;
|
||||
|
||||
public HiveHook() {
|
||||
try {
|
||||
File dgcDumpFile = new File(dgcDumpDir);
|
||||
dgcDumpFile.mkdirs();
|
||||
connection = DriverManager.getConnection(url, user, password);
|
||||
insertStatement = connection.prepareStatement(insertQuery);
|
||||
updateStatement = connection.prepareStatement(updateQuery);
|
||||
}
|
||||
catch(Exception e) {
|
||||
} catch (Exception e) {
|
||||
LOG.error("Exception initializing HiveHook " + e);
|
||||
}
|
||||
}
|
||||
|
||||
private class MySemanticAnaylzer extends BaseSemanticAnalyzer {
|
||||
public MySemanticAnaylzer(HiveConf conf) throws SemanticException {
|
||||
super(conf);
|
||||
}
|
||||
public void analyzeInternal(ASTNode ast) throws SemanticException {
|
||||
throw new RuntimeException("Not implemented");
|
||||
}
|
||||
public void setInputs(HashSet<ReadEntity> inputs) {
|
||||
this.inputs = inputs;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void run(final HookContext hookContext) throws Exception {
|
||||
if (executor == null) {
|
||||
|
|
@ -188,10 +174,12 @@ public class HiveHook implements ExecuteWithHookContext, HiveSemanticAnalyzerHoo
|
|||
String query = plan.getQueryStr();
|
||||
List<Task<?>> rootTasks = plan.getRootTasks();
|
||||
|
||||
//We need to somehow get the sem associated with the plan and use it here.
|
||||
//We need to somehow get the sem associated with the plan and
|
||||
// use it here.
|
||||
//MySemanticAnaylzer sem = new MySemanticAnaylzer(conf);
|
||||
//sem.setInputs(plan.getInputs());
|
||||
//ExplainWork ew = new ExplainWork(null, null, rootTasks, plan.getFetchTask(), null, sem,
|
||||
//ExplainWork ew = new ExplainWork(null, null, rootTasks,
|
||||
// plan.getFetchTask(), null, sem,
|
||||
// false, true, false, false, false);
|
||||
//JSONObject explainPlan =
|
||||
// explain.getJSONLogicalPlan(null, ew);
|
||||
|
|
@ -205,15 +193,17 @@ public class HiveHook implements ExecuteWithHookContext, HiveSemanticAnalyzerHoo
|
|||
fireAndForget(conf,
|
||||
createPreHookEvent(queryId, query,
|
||||
explainPlan, queryStartTime,
|
||||
user, inputs, outputs, graph));
|
||||
user, inputs, outputs, graph));
|
||||
break;
|
||||
case POST_EXEC_HOOK: // command succeeded successfully
|
||||
fireAndForget(conf, createPostHookEvent(queryId, currentTime, user,
|
||||
true, inputs, outputs));
|
||||
fireAndForget(conf,
|
||||
createPostHookEvent(queryId, currentTime, user,
|
||||
true, inputs, outputs));
|
||||
break;
|
||||
case ON_FAILURE_HOOK: // command failed
|
||||
fireAndForget(conf, createPostHookEvent(queryId, currentTime, user,
|
||||
false, inputs, outputs));
|
||||
fireAndForget(conf,
|
||||
createPostHookEvent(queryId, currentTime, user,
|
||||
false, inputs, outputs));
|
||||
break;
|
||||
default:
|
||||
//ignore
|
||||
|
|
@ -230,7 +220,7 @@ public class HiveHook implements ExecuteWithHookContext, HiveSemanticAnalyzerHoo
|
|||
|
||||
private void appendEntities(JSONObject obj, String key,
|
||||
Set<? extends Entity> entities)
|
||||
throws JSONException {
|
||||
throws JSONException {
|
||||
|
||||
for (Entity e : entities) {
|
||||
if (e != null) {
|
||||
|
|
@ -245,8 +235,9 @@ public class HiveHook implements ExecuteWithHookContext, HiveSemanticAnalyzerHoo
|
|||
|
||||
private JSONObject createPreHookEvent(String queryId, String query,
|
||||
JSONObject explainPlan, long startTime, String user,
|
||||
Set<ReadEntity> inputs, Set<WriteEntity> outputs, String graph)
|
||||
throws JSONException {
|
||||
Set<ReadEntity> inputs, Set<WriteEntity> outputs,
|
||||
String graph)
|
||||
throws JSONException {
|
||||
|
||||
JSONObject queryObj = new JSONObject();
|
||||
|
||||
|
|
@ -271,7 +262,7 @@ public class HiveHook implements ExecuteWithHookContext, HiveSemanticAnalyzerHoo
|
|||
private JSONObject createPostHookEvent(String queryId, long stopTime,
|
||||
String user, boolean success, Set<ReadEntity> inputs,
|
||||
Set<WriteEntity> outputs)
|
||||
throws JSONException {
|
||||
throws JSONException {
|
||||
|
||||
JSONObject completionObj = new JSONObject();
|
||||
|
||||
|
|
@ -295,18 +286,19 @@ public class HiveHook implements ExecuteWithHookContext, HiveSemanticAnalyzerHoo
|
|||
|
||||
LOG.info("Submitting: " + obj.toString(2));
|
||||
|
||||
String queryId = (String)obj.get("queryId");
|
||||
String queryId = (String) obj.get("queryId");
|
||||
|
||||
try {
|
||||
BufferedWriter fw = new BufferedWriter(new FileWriter(new File(dgcDumpDir, queryId), true));
|
||||
BufferedWriter fw = new BufferedWriter(
|
||||
new FileWriter(new File(dgcDumpDir, queryId), true));
|
||||
fw.write(obj.toString(2));
|
||||
fw.flush();
|
||||
fw.close();
|
||||
}
|
||||
catch (Exception e) {
|
||||
} catch (Exception e) {
|
||||
LOG.error("Unable to log logical plan to file", e);
|
||||
}
|
||||
}
|
||||
|
||||
private void analyzeHiveParseTree(ASTNode ast) {
|
||||
String astStr = ast.dump();
|
||||
Tree tab = ast.getChild(0);
|
||||
|
|
@ -326,12 +318,13 @@ public class HiveHook implements ExecuteWithHookContext, HiveSemanticAnalyzerHoo
|
|||
|
||||
try {
|
||||
|
||||
BufferedWriter fw = new BufferedWriter(new FileWriter(new File(dgcDumpDir, "ASTDump"), true));
|
||||
BufferedWriter fw = new BufferedWriter(
|
||||
new FileWriter(new File(dgcDumpDir, "ASTDump"), true));
|
||||
|
||||
fw.write("Full AST Dump" + astStr);
|
||||
|
||||
|
||||
switch(ast.getToken().getType()) {
|
||||
switch (ast.getToken().getType()) {
|
||||
case HiveParser.TOK_CREATETABLE:
|
||||
|
||||
|
||||
|
|
@ -340,13 +333,15 @@ public class HiveHook implements ExecuteWithHookContext, HiveSemanticAnalyzerHoo
|
|||
LOG.error("Ignoring malformed Create table statement");
|
||||
}
|
||||
if (tab.getChildCount() == 2) {
|
||||
String dbName = BaseSemanticAnalyzer.unescapeIdentifier(tab.getChild(0).getText());
|
||||
String tableName = BaseSemanticAnalyzer.unescapeIdentifier(tab.getChild(1).getText());
|
||||
String dbName = BaseSemanticAnalyzer
|
||||
.unescapeIdentifier(tab.getChild(0).getText());
|
||||
String tableName = BaseSemanticAnalyzer
|
||||
.unescapeIdentifier(tab.getChild(1).getText());
|
||||
|
||||
fullTableName = dbName + "." + tableName;
|
||||
}
|
||||
else {
|
||||
fullTableName = BaseSemanticAnalyzer.unescapeIdentifier(tab.getChild(0).getText());
|
||||
} else {
|
||||
fullTableName = BaseSemanticAnalyzer
|
||||
.unescapeIdentifier(tab.getChild(0).getText());
|
||||
}
|
||||
LOG.info("Creating table " + fullTableName);
|
||||
int numCh = ast.getChildCount();
|
||||
|
|
@ -361,14 +356,18 @@ public class HiveHook implements ExecuteWithHookContext, HiveSemanticAnalyzerHoo
|
|||
"Incomplete specification of File Format. " +
|
||||
"You must provide InputFormat, OutputFormat.");
|
||||
}
|
||||
inputFormat = BaseSemanticAnalyzer.unescapeSQLString(child.getChild(0).getText());
|
||||
outputFormat = BaseSemanticAnalyzer.unescapeSQLString(child.getChild(1).getText());
|
||||
inputFormat = BaseSemanticAnalyzer
|
||||
.unescapeSQLString(child.getChild(0).getText());
|
||||
outputFormat = BaseSemanticAnalyzer
|
||||
.unescapeSQLString(child.getChild(1).getText());
|
||||
if (child.getChildCount() == 3) {
|
||||
serde = BaseSemanticAnalyzer.unescapeSQLString(child.getChild(2).getText());
|
||||
serde = BaseSemanticAnalyzer
|
||||
.unescapeSQLString(child.getChild(2).getText());
|
||||
}
|
||||
break;
|
||||
case HiveParser.TOK_STORAGEHANDLER:
|
||||
storageHandler = BaseSemanticAnalyzer.unescapeSQLString(child.getChild(0).getText());
|
||||
storageHandler = BaseSemanticAnalyzer
|
||||
.unescapeSQLString(child.getChild(0).getText());
|
||||
if (child.getChildCount() == 2) {
|
||||
BaseSemanticAnalyzer.readProps(
|
||||
(ASTNode) (child.getChild(1).getChild(0)),
|
||||
|
|
@ -377,7 +376,8 @@ public class HiveHook implements ExecuteWithHookContext, HiveSemanticAnalyzerHoo
|
|||
break;
|
||||
case HiveParser.TOK_FILEFORMAT_GENERIC:
|
||||
ASTNode grandChild = (ASTNode) child.getChild(0);
|
||||
String name = (grandChild == null ? "" : grandChild.getText()).trim().toUpperCase();
|
||||
String name = (grandChild == null ? "" : grandChild.getText())
|
||||
.trim().toUpperCase();
|
||||
if (name.isEmpty()) {
|
||||
LOG.error("File format in STORED AS clause is empty");
|
||||
break;
|
||||
|
|
@ -395,14 +395,16 @@ public class HiveHook implements ExecuteWithHookContext, HiveSemanticAnalyzerHoo
|
|||
break;
|
||||
case HiveParser.TOK_LIKETABLE:
|
||||
if (child.getChildCount() > 0) {
|
||||
likeTableName = BaseSemanticAnalyzer.getUnescapedName((ASTNode) child.getChild(0));
|
||||
likeTableName = BaseSemanticAnalyzer
|
||||
.getUnescapedName((ASTNode) child.getChild(0));
|
||||
}
|
||||
break;
|
||||
case HiveParser.TOK_QUERY:
|
||||
ctasNode = child;
|
||||
break;
|
||||
case HiveParser.TOK_TABLECOMMENT:
|
||||
comment = BaseSemanticAnalyzer.unescapeSQLString(child.getChild(0).getText());
|
||||
comment = BaseSemanticAnalyzer
|
||||
.unescapeSQLString(child.getChild(0).getText());
|
||||
break;
|
||||
case HiveParser.TOK_TABLEPARTCOLS:
|
||||
case HiveParser.TOK_TABCOLLIST:
|
||||
|
|
@ -412,13 +414,15 @@ public class HiveHook implements ExecuteWithHookContext, HiveSemanticAnalyzerHoo
|
|||
rowFormatNode = child;
|
||||
break;
|
||||
case HiveParser.TOK_TABLELOCATION:
|
||||
location = BaseSemanticAnalyzer.unescapeSQLString(child.getChild(0).getText());
|
||||
location = BaseSemanticAnalyzer
|
||||
.unescapeSQLString(child.getChild(0).getText());
|
||||
break;
|
||||
case HiveParser.TOK_TABLEPROPERTIES:
|
||||
break;
|
||||
case HiveParser.TOK_TABLESERIALIZER:
|
||||
child = (ASTNode) child.getChild(0);
|
||||
serde = BaseSemanticAnalyzer.unescapeSQLString(child.getChild(0).getText());
|
||||
serde = BaseSemanticAnalyzer
|
||||
.unescapeSQLString(child.getChild(0).getText());
|
||||
break;
|
||||
case HiveParser.TOK_TABLESKEWED:
|
||||
break;
|
||||
|
|
@ -455,22 +459,23 @@ public class HiveHook implements ExecuteWithHookContext, HiveSemanticAnalyzerHoo
|
|||
sb.append("\tctasNode: ").append(((ASTNode) ctasNode).dump());
|
||||
}
|
||||
if (rowFormatNode != null) {
|
||||
sb.append("\trowFormatNode: ").append(((ASTNode)rowFormatNode).dump());
|
||||
sb.append("\trowFormatNode: ").append(((ASTNode) rowFormatNode).dump());
|
||||
}
|
||||
fw.write(sb.toString());
|
||||
}
|
||||
fw.flush();
|
||||
fw.close();
|
||||
}
|
||||
catch (Exception e) {
|
||||
} catch (Exception e) {
|
||||
LOG.error("Unable to log logical plan to file", e);
|
||||
}
|
||||
}
|
||||
|
||||
private void parseQuery(String sqlText) throws Exception {
|
||||
ParseDriver parseDriver = new ParseDriver();
|
||||
ASTNode node = parseDriver.parse(sqlText);
|
||||
analyzeHiveParseTree(node);
|
||||
}
|
||||
|
||||
/**
|
||||
* This is an attempt to use the parser. Sematnic issues are not handled here.
|
||||
*
|
||||
|
|
@ -484,13 +489,29 @@ public class HiveHook implements ExecuteWithHookContext, HiveSemanticAnalyzerHoo
|
|||
* @throws SemanticException
|
||||
*/
|
||||
@Override
|
||||
public ASTNode preAnalyze(HiveSemanticAnalyzerHookContext context, ASTNode ast) throws SemanticException {
|
||||
public ASTNode preAnalyze(HiveSemanticAnalyzerHookContext context, ASTNode ast)
|
||||
throws SemanticException {
|
||||
analyzeHiveParseTree(ast);
|
||||
return ast;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void postAnalyze(HiveSemanticAnalyzerHookContext context, List<Task<? extends Serializable>> rootTasks) throws SemanticException {
|
||||
public void postAnalyze(HiveSemanticAnalyzerHookContext context,
|
||||
List<Task<? extends Serializable>> rootTasks) throws SemanticException {
|
||||
|
||||
}
|
||||
|
||||
private class MySemanticAnaylzer extends BaseSemanticAnalyzer {
|
||||
public MySemanticAnaylzer(HiveConf conf) throws SemanticException {
|
||||
super(conf);
|
||||
}
|
||||
|
||||
public void analyzeInternal(ASTNode ast) throws SemanticException {
|
||||
throw new RuntimeException("Not implemented");
|
||||
}
|
||||
|
||||
public void setInputs(HashSet<ReadEntity> inputs) {
|
||||
this.inputs = inputs;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -6,9 +6,9 @@
|
|||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* <p/>
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* <p/>
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
|
|
@ -34,12 +34,12 @@ import org.apache.hadoop.metadata.MetadataException;
|
|||
import org.apache.hadoop.metadata.Referenceable;
|
||||
import org.apache.hadoop.metadata.Struct;
|
||||
import org.apache.hadoop.metadata.repository.MetadataRepository;
|
||||
import org.apache.hadoop.metadata.storage.IRepository;
|
||||
import org.apache.hadoop.metadata.storage.Id;
|
||||
import org.apache.hadoop.metadata.storage.RepositoryException;
|
||||
import org.apache.hadoop.metadata.types.IDataType;
|
||||
import org.apache.hadoop.metadata.types.Multiplicity;
|
||||
import org.apache.hadoop.metadata.types.StructType;
|
||||
import org.apache.hadoop.metadata.typesystem.persistence.IRepository;
|
||||
import org.apache.hadoop.metadata.typesystem.persistence.Id;
|
||||
import org.apache.hadoop.metadata.typesystem.persistence.RepositoryException;
|
||||
import org.apache.hadoop.metadata.typesystem.types.IDataType;
|
||||
import org.apache.hadoop.metadata.typesystem.types.Multiplicity;
|
||||
import org.apache.hadoop.metadata.typesystem.types.StructType;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
|
|
@ -48,10 +48,9 @@ import java.util.List;
|
|||
|
||||
public class HiveImporter {
|
||||
|
||||
private final HiveMetaStoreClient hiveMetastoreClient;
|
||||
|
||||
private static final Logger LOG =
|
||||
LoggerFactory.getLogger(HiveImporter.class);
|
||||
private final HiveMetaStoreClient hiveMetastoreClient;
|
||||
private IRepository repository;
|
||||
private MetadataRepository graphRepository;
|
||||
private HiveTypeSystem hiveTypeSystem;
|
||||
|
|
@ -64,33 +63,8 @@ public class HiveImporter {
|
|||
private List<Id> processInstances;
|
||||
|
||||
|
||||
private class Pair<L, R> {
|
||||
final L left;
|
||||
final R right;
|
||||
|
||||
public Pair(L left, R right) {
|
||||
this.left = left;
|
||||
this.right = right;
|
||||
}
|
||||
|
||||
public L left() {
|
||||
return this.left;
|
||||
}
|
||||
|
||||
public R right() {
|
||||
return this.right;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
private class InstancePair extends Pair<ITypedReferenceableInstance, Referenceable> {
|
||||
public InstancePair(ITypedReferenceableInstance left, Referenceable right) {
|
||||
super(left, right);
|
||||
}
|
||||
}
|
||||
|
||||
public HiveImporter( MetadataRepository repo, HiveTypeSystem hts, HiveMetaStoreClient hmc)
|
||||
throws RepositoryException {
|
||||
public HiveImporter(MetadataRepository repo, HiveTypeSystem hts, HiveMetaStoreClient hmc)
|
||||
throws RepositoryException {
|
||||
this(hts, hmc);
|
||||
|
||||
if (repo == null) {
|
||||
|
|
@ -102,8 +76,9 @@ public class HiveImporter {
|
|||
|
||||
}
|
||||
|
||||
|
||||
public HiveImporter(IRepository repo, HiveTypeSystem hts, HiveMetaStoreClient hmc)
|
||||
throws RepositoryException {
|
||||
throws RepositoryException {
|
||||
this(hts, hmc);
|
||||
|
||||
if (repo == null) {
|
||||
|
|
@ -174,7 +149,7 @@ public class HiveImporter {
|
|||
}
|
||||
|
||||
private InstancePair createInstance(Referenceable ref)
|
||||
throws MetadataException {
|
||||
throws MetadataException {
|
||||
if (usingMemRepository()) {
|
||||
return new InstancePair(repository.create(ref), null);
|
||||
} else {
|
||||
|
|
@ -187,7 +162,8 @@ public class HiveImporter {
|
|||
System.out.println("creating instance of type " + typeName + " dataType " + dataType
|
||||
+ ", guid: " + guid);
|
||||
|
||||
return new InstancePair(null, new Referenceable(guid, ref.getTypeName(), ref.getValuesMap()));
|
||||
return new InstancePair(null,
|
||||
new Referenceable(guid, ref.getTypeName(), ref.getValuesMap()));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -200,7 +176,6 @@ public class HiveImporter {
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
private void importDatabase(String db) throws MetadataException {
|
||||
try {
|
||||
LOG.info("Importing objects from database : " + db);
|
||||
|
|
@ -235,13 +210,15 @@ public class HiveImporter {
|
|||
}
|
||||
}
|
||||
|
||||
private void importTable(String db, String table, InstancePair dbRefTyped) throws MetadataException {
|
||||
private void importTable(String db, String table, InstancePair dbRefTyped)
|
||||
throws MetadataException {
|
||||
try {
|
||||
LOG.info("Importing objects from " + db + "." + table);
|
||||
|
||||
Table hiveTable = hiveMetastoreClient.getTable(db, table);
|
||||
|
||||
Referenceable tableRef = new Referenceable(HiveTypeSystem.DefinedTypes.HIVE_TABLE.name());
|
||||
Referenceable tableRef = new Referenceable(
|
||||
HiveTypeSystem.DefinedTypes.HIVE_TABLE.name());
|
||||
setReferenceInstanceAttribute(tableRef, "dbName", dbRefTyped);
|
||||
tableRef.set("tableName", hiveTable.getTableName());
|
||||
tableRef.set("owner", hiveTable.getOwner());
|
||||
|
|
@ -309,9 +286,10 @@ public class HiveImporter {
|
|||
|
||||
private void importPartitions(String db, String table, InstancePair dbRefTyped,
|
||||
InstancePair tableRefTyped, InstancePair sdRefTyped)
|
||||
throws MetadataException {
|
||||
throws MetadataException {
|
||||
try {
|
||||
List<Partition> tableParts = hiveMetastoreClient.listPartitions(db, table, Short.MAX_VALUE);
|
||||
List<Partition> tableParts = hiveMetastoreClient
|
||||
.listPartitions(db, table, Short.MAX_VALUE);
|
||||
if (tableParts.size() > 0) {
|
||||
for (Partition hivePart : tableParts) {
|
||||
importPartition(hivePart, dbRefTyped, tableRefTyped, sdRefTyped);
|
||||
|
|
@ -322,19 +300,21 @@ public class HiveImporter {
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
private void importPartition(Partition hivePart,
|
||||
InstancePair dbRefTyped, InstancePair tableRefTyped, InstancePair sdRefTyped)
|
||||
throws MetadataException {
|
||||
InstancePair dbRefTyped, InstancePair tableRefTyped,
|
||||
InstancePair sdRefTyped)
|
||||
throws MetadataException {
|
||||
try {
|
||||
Referenceable partRef = new Referenceable(HiveTypeSystem.DefinedTypes.HIVE_PARTITION.name());
|
||||
Referenceable partRef = new Referenceable(
|
||||
HiveTypeSystem.DefinedTypes.HIVE_PARTITION.name());
|
||||
partRef.set("values", hivePart.getValues());
|
||||
setReferenceInstanceAttribute(partRef, "dbName", dbRefTyped);
|
||||
setReferenceInstanceAttribute(partRef, "tableName", tableRefTyped);
|
||||
partRef.set("createTime", hivePart.getCreateTime());
|
||||
partRef.set("lastAccessTime", hivePart.getLastAccessTime());
|
||||
//sdStruct = fillStorageDescStruct(hivePart.getSd());
|
||||
// Instead of creating copies of the sdstruct for partitions we are reusing existing ones
|
||||
// Instead of creating copies of the sdstruct for partitions we are reusing existing
|
||||
// ones
|
||||
// will fix to identify partitions with differing schema.
|
||||
setReferenceInstanceAttribute(partRef, "sd", sdRefTyped);
|
||||
partRef.set("parameters", hivePart.getParameters());
|
||||
|
|
@ -347,9 +327,9 @@ public class HiveImporter {
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
private void importIndexes(String db, String table, InstancePair dbRefTyped, Referenceable tableRef)
|
||||
throws MetadataException {
|
||||
private void importIndexes(String db, String table, InstancePair dbRefTyped,
|
||||
Referenceable tableRef)
|
||||
throws MetadataException {
|
||||
try {
|
||||
List<Index> indexes = hiveMetastoreClient.listIndexes(db, table, Short.MAX_VALUE);
|
||||
if (indexes.size() > 0) {
|
||||
|
|
@ -364,9 +344,10 @@ public class HiveImporter {
|
|||
|
||||
private void importIndex(Index index,
|
||||
InstancePair dbRefTyped, Referenceable tableRef)
|
||||
throws MetadataException {
|
||||
throws MetadataException {
|
||||
try {
|
||||
Referenceable indexRef = new Referenceable(HiveTypeSystem.DefinedTypes.HIVE_INDEX.name());
|
||||
Referenceable indexRef = new Referenceable(
|
||||
HiveTypeSystem.DefinedTypes.HIVE_INDEX.name());
|
||||
indexRef.set("indexName", index.getIndexName());
|
||||
indexRef.set("indexHandlerClass", index.getIndexHandlerClass());
|
||||
setReferenceInstanceAttribute(indexRef, "dbName", dbRefTyped);
|
||||
|
|
@ -389,7 +370,8 @@ public class HiveImporter {
|
|||
}
|
||||
|
||||
private InstancePair fillStorageDescStruct(StorageDescriptor storageDesc) throws Exception {
|
||||
Referenceable sdRef = new Referenceable(HiveTypeSystem.DefinedTypes.HIVE_STORAGEDESC.name());
|
||||
Referenceable sdRef = new Referenceable(
|
||||
HiveTypeSystem.DefinedTypes.HIVE_STORAGEDESC.name());
|
||||
|
||||
SerDeInfo serdeInfo = storageDesc.getSerdeInfo();
|
||||
// SkewedInfo skewedInfo = storageDesc.getSkewedInfo();
|
||||
|
|
@ -421,7 +403,8 @@ public class HiveImporter {
|
|||
//if (skewedInfo.getSkewedColNames().size() > 0) {
|
||||
// skewedInfoStruct.set("skewedColNames", skewedInfo.getSkewedColNames());
|
||||
// skewedInfoStruct.set("skewedColValues", skewedInfo.getSkewedColValues());
|
||||
// skewedInfoStruct.set("skewedColValueLocationMaps", skewedInfo.getSkewedColValueLocationMaps());
|
||||
// skewedInfoStruct.set("skewedColValueLocationMaps", skewedInfo
|
||||
// .getSkewedColValueLocationMaps());
|
||||
// StructType skewedInfotype = (StructType) hiveTypeSystem.getDataType(skewedInfoName);
|
||||
// ITypedStruct skewedInfoStructTyped =
|
||||
// skewedInfotype.convert(skewedInfoStruct, Multiplicity.OPTIONAL);
|
||||
|
|
@ -487,4 +470,28 @@ public class HiveImporter {
|
|||
|
||||
return sdRefTyped;
|
||||
}
|
||||
|
||||
private class Pair<L, R> {
|
||||
final L left;
|
||||
final R right;
|
||||
|
||||
public Pair(L left, R right) {
|
||||
this.left = left;
|
||||
this.right = right;
|
||||
}
|
||||
|
||||
public L left() {
|
||||
return this.left;
|
||||
}
|
||||
|
||||
public R right() {
|
||||
return this.right;
|
||||
}
|
||||
}
|
||||
|
||||
private class InstancePair extends Pair<ITypedReferenceableInstance, Referenceable> {
|
||||
public InstancePair(ITypedReferenceableInstance left, Referenceable right) {
|
||||
super(left, right);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -6,9 +6,9 @@
|
|||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* <p/>
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* <p/>
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
|
|
@ -20,18 +20,18 @@ package org.apache.hadoop.metadata.hivetypes;
|
|||
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import org.apache.hadoop.metadata.MetadataException;
|
||||
import org.apache.hadoop.metadata.types.AttributeDefinition;
|
||||
import org.apache.hadoop.metadata.types.ClassType;
|
||||
import org.apache.hadoop.metadata.types.DataTypes;
|
||||
import org.apache.hadoop.metadata.types.EnumTypeDefinition;
|
||||
import org.apache.hadoop.metadata.types.EnumValue;
|
||||
import org.apache.hadoop.metadata.types.HierarchicalType;
|
||||
import org.apache.hadoop.metadata.types.HierarchicalTypeDefinition;
|
||||
import org.apache.hadoop.metadata.types.IDataType;
|
||||
import org.apache.hadoop.metadata.types.Multiplicity;
|
||||
import org.apache.hadoop.metadata.types.StructTypeDefinition;
|
||||
import org.apache.hadoop.metadata.types.TraitType;
|
||||
import org.apache.hadoop.metadata.types.TypeSystem;
|
||||
import org.apache.hadoop.metadata.typesystem.types.AttributeDefinition;
|
||||
import org.apache.hadoop.metadata.typesystem.types.ClassType;
|
||||
import org.apache.hadoop.metadata.typesystem.types.DataTypes;
|
||||
import org.apache.hadoop.metadata.typesystem.types.EnumTypeDefinition;
|
||||
import org.apache.hadoop.metadata.typesystem.types.EnumValue;
|
||||
import org.apache.hadoop.metadata.typesystem.types.HierarchicalType;
|
||||
import org.apache.hadoop.metadata.typesystem.types.HierarchicalTypeDefinition;
|
||||
import org.apache.hadoop.metadata.typesystem.types.IDataType;
|
||||
import org.apache.hadoop.metadata.typesystem.types.Multiplicity;
|
||||
import org.apache.hadoop.metadata.typesystem.types.StructTypeDefinition;
|
||||
import org.apache.hadoop.metadata.typesystem.types.TraitType;
|
||||
import org.apache.hadoop.metadata.typesystem.types.TypeSystem;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
|
|
@ -45,56 +45,14 @@ public class HiveTypeSystem {
|
|||
|
||||
private static final Logger LOG =
|
||||
LoggerFactory.getLogger(HiveTypeSystem.class);
|
||||
public static final class Holder {
|
||||
public static final HiveTypeSystem instance = new HiveTypeSystem();
|
||||
}
|
||||
|
||||
private boolean valid = false;
|
||||
|
||||
public enum DefinedTypes {
|
||||
|
||||
// Enums
|
||||
HIVE_OBJECTTYPE,
|
||||
HIVE_PRINCIPALTYPE,
|
||||
HIVE_RESOURCETYPE,
|
||||
HIVE_FUNCTIONTYPE,
|
||||
|
||||
// Structs
|
||||
HIVE_SERDE,
|
||||
HIVE_SKEWEDINFO,
|
||||
HIVE_ORDER,
|
||||
HIVE_RESOURCEURI,
|
||||
|
||||
|
||||
// Classes
|
||||
HIVE_DB,
|
||||
HIVE_STORAGEDESC,
|
||||
HIVE_TABLE,
|
||||
HIVE_COLUMN,
|
||||
HIVE_PARTITION,
|
||||
HIVE_INDEX,
|
||||
HIVE_FUNCTION,
|
||||
HIVE_ROLE,
|
||||
HIVE_TYPE,
|
||||
HIVE_PROCESS,
|
||||
//HIVE_VIEW,
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
||||
private Map<String, HierarchicalTypeDefinition<ClassType>> classTypeDefinitions;
|
||||
private Map<String, EnumTypeDefinition> enumTypeDefinitionMap;
|
||||
private Map<String, StructTypeDefinition> structTypeDefinitionMap;
|
||||
|
||||
private DataTypes.MapType mapStrToStrMap;
|
||||
private DataTypes.ArrayType strArrayType;
|
||||
private Map<String, IDataType> typeMap;
|
||||
private List<IDataType> enumTypes;
|
||||
|
||||
|
||||
// private static Multiplicity ZeroOrMore = new Multiplicity(0, Integer.MAX_VALUE, true);
|
||||
|
||||
private HiveTypeSystem() {
|
||||
classTypeDefinitions = new HashMap<>();
|
||||
enumTypeDefinitionMap = new HashMap<>();
|
||||
|
|
@ -103,6 +61,19 @@ public class HiveTypeSystem {
|
|||
enumTypes = new ArrayList<>();
|
||||
}
|
||||
|
||||
public synchronized static HiveTypeSystem getInstance() throws MetadataException {
|
||||
HiveTypeSystem hs = Holder.instance;
|
||||
if (hs.valid) {
|
||||
LOG.info("Returning pre-initialized HiveTypeSystem singleton");
|
||||
return hs;
|
||||
}
|
||||
hs.initialize();
|
||||
return hs;
|
||||
}
|
||||
|
||||
|
||||
// private static Multiplicity ZeroOrMore = new Multiplicity(0, Integer.MAX_VALUE, true);
|
||||
|
||||
private void initialize() throws MetadataException {
|
||||
|
||||
LOG.info("Initializing the Hive Typesystem");
|
||||
|
|
@ -151,17 +122,6 @@ public class HiveTypeSystem {
|
|||
return typeMap;
|
||||
}
|
||||
|
||||
public synchronized static HiveTypeSystem getInstance() throws MetadataException {
|
||||
HiveTypeSystem hs = Holder.instance;
|
||||
if (hs.valid) {
|
||||
LOG.info("Returning pre-initialized HiveTypeSystem singleton");
|
||||
return hs;
|
||||
}
|
||||
hs.initialize();
|
||||
return hs;
|
||||
}
|
||||
|
||||
|
||||
public IDataType getDataType(String typeName) {
|
||||
return typeMap.get(typeName);
|
||||
}
|
||||
|
|
@ -188,12 +148,10 @@ public class HiveTypeSystem {
|
|||
return ImmutableList.copyOf(enumTypeDefinitionMap.values());
|
||||
}
|
||||
|
||||
|
||||
public ImmutableList<StructTypeDefinition> getStructTypeDefinitions() {
|
||||
return ImmutableList.copyOf(structTypeDefinitionMap.values());
|
||||
}
|
||||
|
||||
|
||||
public ImmutableList<HierarchicalTypeDefinition<ClassType>> getClassTypeDefinitions() {
|
||||
return ImmutableList.copyOf(classTypeDefinitions.values());
|
||||
}
|
||||
|
|
@ -224,7 +182,7 @@ public class HiveTypeSystem {
|
|||
new EnumValue("GROUP", 3),
|
||||
};
|
||||
|
||||
EnumTypeDefinition definition = new EnumTypeDefinition(
|
||||
EnumTypeDefinition definition = new EnumTypeDefinition(
|
||||
DefinedTypes.HIVE_PRINCIPALTYPE.name(), values);
|
||||
|
||||
enumTypeDefinitionMap.put(DefinedTypes.HIVE_PRINCIPALTYPE.name(), definition);
|
||||
|
|
@ -237,7 +195,7 @@ public class HiveTypeSystem {
|
|||
new EnumValue("JAVA", 1),
|
||||
};
|
||||
|
||||
EnumTypeDefinition definition = new EnumTypeDefinition(
|
||||
EnumTypeDefinition definition = new EnumTypeDefinition(
|
||||
DefinedTypes.HIVE_FUNCTIONTYPE.name(), values);
|
||||
enumTypeDefinitionMap.put(DefinedTypes.HIVE_FUNCTIONTYPE.name(), definition);
|
||||
LOG.debug("Created definition for " + DefinedTypes.HIVE_FUNCTIONTYPE.name());
|
||||
|
|
@ -250,104 +208,136 @@ public class HiveTypeSystem {
|
|||
new EnumValue("FILE", 2),
|
||||
new EnumValue("ARCHIVE", 3),
|
||||
};
|
||||
EnumTypeDefinition definition = new EnumTypeDefinition(
|
||||
EnumTypeDefinition definition = new EnumTypeDefinition(
|
||||
DefinedTypes.HIVE_RESOURCETYPE.name(), values);
|
||||
enumTypeDefinitionMap.put(DefinedTypes.HIVE_RESOURCETYPE.name(), definition);
|
||||
LOG.debug("Created definition for " + DefinedTypes.HIVE_RESOURCETYPE.name());
|
||||
|
||||
}
|
||||
|
||||
|
||||
private void createSerDeStruct() throws MetadataException {
|
||||
AttributeDefinition[] attributeDefinitions = new AttributeDefinition[]{
|
||||
new AttributeDefinition("name", DataTypes.STRING_TYPE.getName(), Multiplicity.OPTIONAL, false, null),
|
||||
new AttributeDefinition("serializationLib", DataTypes.STRING_TYPE.getName(), Multiplicity.OPTIONAL, false, null),
|
||||
new AttributeDefinition("parameters", mapStrToStrMap.getName(), Multiplicity.OPTIONAL, false, null),
|
||||
new AttributeDefinition("name", DataTypes.STRING_TYPE.getName(),
|
||||
Multiplicity.OPTIONAL, false, null),
|
||||
new AttributeDefinition("serializationLib", DataTypes.STRING_TYPE.getName(),
|
||||
Multiplicity.OPTIONAL, false, null),
|
||||
new AttributeDefinition("parameters", mapStrToStrMap.getName(),
|
||||
Multiplicity.OPTIONAL, false, null),
|
||||
};
|
||||
StructTypeDefinition definition = new StructTypeDefinition(DefinedTypes.HIVE_SERDE.name(), attributeDefinitions);
|
||||
StructTypeDefinition definition = new StructTypeDefinition(DefinedTypes.HIVE_SERDE.name(),
|
||||
attributeDefinitions);
|
||||
structTypeDefinitionMap.put(DefinedTypes.HIVE_SERDE.name(), definition);
|
||||
|
||||
LOG.debug("Created definition for " + DefinedTypes.HIVE_SERDE.name());
|
||||
|
||||
}
|
||||
|
||||
/** Revisit later after nested array types are handled by the typesystem **/
|
||||
/**
|
||||
private void createSkewedInfoStruct() throws MetadataException {
|
||||
AttributeDefinition[] attributeDefinitions = new AttributeDefinition[]{
|
||||
new AttributeDefinition("skewedColNames", String.format("array<%s>", DataTypes.STRING_TYPE.getName()),
|
||||
ZeroOrMore, false, null),
|
||||
new AttributeDefinition("skewedColValues", String.format("array<%s>", strArrayType.getName()),
|
||||
ZeroOrMore, false, null),
|
||||
new AttributeDefinition("skewedColValueLocationMaps", mapStrToStrMap.getName(), Multiplicity.OPTIONAL, false, null),
|
||||
};
|
||||
StructTypeDefinition definition = new StructTypeDefinition(DefinedTypes.HIVE_SKEWEDINFO.name(), attributeDefinitions);
|
||||
private void createSkewedInfoStruct() throws MetadataException {
|
||||
AttributeDefinition[] attributeDefinitions = new AttributeDefinition[]{
|
||||
new AttributeDefinition("skewedColNames", String.format("array<%s>", DataTypes.STRING_TYPE
|
||||
.getName()),
|
||||
ZeroOrMore, false, null),
|
||||
new AttributeDefinition("skewedColValues", String.format("array<%s>", strArrayType.getName()),
|
||||
ZeroOrMore, false, null),
|
||||
new AttributeDefinition("skewedColValueLocationMaps", mapStrToStrMap.getName(), Multiplicity
|
||||
.OPTIONAL, false, null),
|
||||
};
|
||||
StructTypeDefinition definition = new StructTypeDefinition(DefinedTypes.HIVE_SKEWEDINFO.name
|
||||
(), attributeDefinitions);
|
||||
|
||||
structTypeDefinitionMap.put(DefinedTypes.HIVE_SKEWEDINFO.name(), definition);
|
||||
LOG.debug("Created definition for " + DefinedTypes.HIVE_SKEWEDINFO.name());
|
||||
structTypeDefinitionMap.put(DefinedTypes.HIVE_SKEWEDINFO.name(), definition);
|
||||
LOG.debug("Created definition for " + DefinedTypes.HIVE_SKEWEDINFO.name());
|
||||
|
||||
}
|
||||
**/
|
||||
}
|
||||
**/
|
||||
|
||||
private void createOrderStruct() throws MetadataException {
|
||||
AttributeDefinition[] attributeDefinitions = new AttributeDefinition[]{
|
||||
new AttributeDefinition("col", DataTypes.STRING_TYPE.getName(), Multiplicity.REQUIRED, false, null),
|
||||
new AttributeDefinition("order", DataTypes.INT_TYPE.getName(), Multiplicity.REQUIRED, false, null),
|
||||
new AttributeDefinition("col", DataTypes.STRING_TYPE.getName(),
|
||||
Multiplicity.REQUIRED, false, null),
|
||||
new AttributeDefinition("order", DataTypes.INT_TYPE.getName(),
|
||||
Multiplicity.REQUIRED, false, null),
|
||||
};
|
||||
|
||||
StructTypeDefinition definition = new StructTypeDefinition(DefinedTypes.HIVE_ORDER.name(), attributeDefinitions);
|
||||
StructTypeDefinition definition = new StructTypeDefinition(DefinedTypes.HIVE_ORDER.name(),
|
||||
attributeDefinitions);
|
||||
|
||||
structTypeDefinitionMap.put(DefinedTypes.HIVE_ORDER.name(), definition);
|
||||
LOG.debug("Created definition for " + DefinedTypes.HIVE_ORDER.name());
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
||||
private void createStorageDescClass() throws MetadataException {
|
||||
AttributeDefinition[] attributeDefinitions = new AttributeDefinition[]{
|
||||
new AttributeDefinition("cols", String.format("array<%s>", DefinedTypes.HIVE_COLUMN.name()), Multiplicity.COLLECTION, false, null),
|
||||
new AttributeDefinition("location", DataTypes.STRING_TYPE.getName(), Multiplicity.OPTIONAL, false, null),
|
||||
new AttributeDefinition("inputFormat", DataTypes.STRING_TYPE.getName(), Multiplicity.OPTIONAL, false, null),
|
||||
new AttributeDefinition("outputFormat", DataTypes.STRING_TYPE.getName(), Multiplicity.OPTIONAL, false, null),
|
||||
new AttributeDefinition("compressed", DataTypes.BOOLEAN_TYPE.getName(), Multiplicity.REQUIRED, false, null),
|
||||
new AttributeDefinition("numBuckets", DataTypes.INT_TYPE.getName(), Multiplicity.OPTIONAL, false, null),
|
||||
new AttributeDefinition("serdeInfo", DefinedTypes.HIVE_SERDE.name(), Multiplicity.OPTIONAL, false, null),
|
||||
new AttributeDefinition("bucketCols", String.format("array<%s>", DataTypes.STRING_TYPE.getName()), Multiplicity.OPTIONAL, false, null),
|
||||
new AttributeDefinition("sortCols", String.format("array<%s>", DefinedTypes.HIVE_ORDER.name()), Multiplicity.OPTIONAL, false, null),
|
||||
new AttributeDefinition("parameters", mapStrToStrMap.getName(), Multiplicity.OPTIONAL, false, null),
|
||||
//new AttributeDefinition("skewedInfo", DefinedTypes.HIVE_SKEWEDINFO.name(), Multiplicity.OPTIONAL, false, null),
|
||||
new AttributeDefinition("storedAsSubDirectories", DataTypes.BOOLEAN_TYPE.getName(), Multiplicity.OPTIONAL, false, null),
|
||||
new AttributeDefinition("cols",
|
||||
String.format("array<%s>", DefinedTypes.HIVE_COLUMN.name()),
|
||||
Multiplicity.COLLECTION, false, null),
|
||||
new AttributeDefinition("location", DataTypes.STRING_TYPE.getName(),
|
||||
Multiplicity.OPTIONAL, false, null),
|
||||
new AttributeDefinition("inputFormat", DataTypes.STRING_TYPE.getName(),
|
||||
Multiplicity.OPTIONAL, false, null),
|
||||
new AttributeDefinition("outputFormat", DataTypes.STRING_TYPE.getName(),
|
||||
Multiplicity.OPTIONAL, false, null),
|
||||
new AttributeDefinition("compressed", DataTypes.BOOLEAN_TYPE.getName(),
|
||||
Multiplicity.REQUIRED, false, null),
|
||||
new AttributeDefinition("numBuckets", DataTypes.INT_TYPE.getName(),
|
||||
Multiplicity.OPTIONAL, false, null),
|
||||
new AttributeDefinition("serdeInfo", DefinedTypes.HIVE_SERDE.name(),
|
||||
Multiplicity.OPTIONAL, false, null),
|
||||
new AttributeDefinition("bucketCols",
|
||||
String.format("array<%s>", DataTypes.STRING_TYPE.getName()),
|
||||
Multiplicity.OPTIONAL, false, null),
|
||||
new AttributeDefinition("sortCols",
|
||||
String.format("array<%s>", DefinedTypes.HIVE_ORDER.name()),
|
||||
Multiplicity.OPTIONAL, false, null),
|
||||
new AttributeDefinition("parameters", mapStrToStrMap.getName(),
|
||||
Multiplicity.OPTIONAL, false, null),
|
||||
//new AttributeDefinition("skewedInfo", DefinedTypes.HIVE_SKEWEDINFO.name(),
|
||||
// Multiplicity.OPTIONAL, false, null),
|
||||
new AttributeDefinition("storedAsSubDirectories", DataTypes.BOOLEAN_TYPE.getName(),
|
||||
Multiplicity.OPTIONAL, false, null),
|
||||
|
||||
};
|
||||
HierarchicalTypeDefinition<ClassType> definition =
|
||||
new HierarchicalTypeDefinition<>(ClassType.class, DefinedTypes.HIVE_STORAGEDESC.name(),
|
||||
new HierarchicalTypeDefinition<>(ClassType.class,
|
||||
DefinedTypes.HIVE_STORAGEDESC.name(),
|
||||
null, attributeDefinitions);
|
||||
classTypeDefinitions.put(DefinedTypes.HIVE_STORAGEDESC.name(), definition);
|
||||
LOG.debug("Created definition for " + DefinedTypes.HIVE_STORAGEDESC.name());
|
||||
|
||||
}
|
||||
|
||||
/** Revisit later after nested array types are handled by the typesystem **/
|
||||
|
||||
private void createResourceUriStruct() throws MetadataException {
|
||||
AttributeDefinition[] attributeDefinitions = new AttributeDefinition[]{
|
||||
new AttributeDefinition("resourceType", DefinedTypes.HIVE_RESOURCETYPE.name(), Multiplicity.REQUIRED, false, null),
|
||||
new AttributeDefinition("uri", DataTypes.STRING_TYPE.getName(), Multiplicity.REQUIRED, false, null),
|
||||
new AttributeDefinition("resourceType", DefinedTypes.HIVE_RESOURCETYPE.name(),
|
||||
Multiplicity.REQUIRED, false, null),
|
||||
new AttributeDefinition("uri", DataTypes.STRING_TYPE.getName(),
|
||||
Multiplicity.REQUIRED, false, null),
|
||||
};
|
||||
StructTypeDefinition definition = new StructTypeDefinition(DefinedTypes.HIVE_RESOURCEURI.name(), attributeDefinitions);
|
||||
StructTypeDefinition definition = new StructTypeDefinition(
|
||||
DefinedTypes.HIVE_RESOURCEURI.name(), attributeDefinitions);
|
||||
structTypeDefinitionMap.put(DefinedTypes.HIVE_RESOURCEURI.name(), definition);
|
||||
LOG.debug("Created definition for " + DefinedTypes.HIVE_RESOURCEURI.name());
|
||||
|
||||
}
|
||||
|
||||
|
||||
private void createDBClass() throws MetadataException {
|
||||
AttributeDefinition[] attributeDefinitions = new AttributeDefinition[]{
|
||||
new AttributeDefinition("name", DataTypes.STRING_TYPE.getName(), Multiplicity.REQUIRED, false, null),
|
||||
new AttributeDefinition("description", DataTypes.STRING_TYPE.getName(), Multiplicity.OPTIONAL, false, null),
|
||||
new AttributeDefinition("locationUri", DataTypes.STRING_TYPE.getName(), Multiplicity.REQUIRED, false, null),
|
||||
new AttributeDefinition("parameters", mapStrToStrMap.getName(), Multiplicity.OPTIONAL, false, null),
|
||||
new AttributeDefinition("ownerName", DataTypes.STRING_TYPE.getName(), Multiplicity.OPTIONAL, false, null),
|
||||
new AttributeDefinition("ownerType", DefinedTypes.HIVE_PRINCIPALTYPE.name(), Multiplicity.OPTIONAL, false, null),
|
||||
new AttributeDefinition("name", DataTypes.STRING_TYPE.getName(),
|
||||
Multiplicity.REQUIRED, false, null),
|
||||
new AttributeDefinition("description", DataTypes.STRING_TYPE.getName(),
|
||||
Multiplicity.OPTIONAL, false, null),
|
||||
new AttributeDefinition("locationUri", DataTypes.STRING_TYPE.getName(),
|
||||
Multiplicity.REQUIRED, false, null),
|
||||
new AttributeDefinition("parameters", mapStrToStrMap.getName(),
|
||||
Multiplicity.OPTIONAL, false, null),
|
||||
new AttributeDefinition("ownerName", DataTypes.STRING_TYPE.getName(),
|
||||
Multiplicity.OPTIONAL, false, null),
|
||||
new AttributeDefinition("ownerType", DefinedTypes.HIVE_PRINCIPALTYPE.name(),
|
||||
Multiplicity.OPTIONAL, false, null),
|
||||
};
|
||||
|
||||
HierarchicalTypeDefinition<ClassType> definition =
|
||||
|
|
@ -361,9 +351,12 @@ public class HiveTypeSystem {
|
|||
|
||||
private void createTypeClass() throws MetadataException {
|
||||
AttributeDefinition[] attributeDefinitions = new AttributeDefinition[]{
|
||||
new AttributeDefinition("name", DataTypes.STRING_TYPE.getName(), Multiplicity.REQUIRED, false, null),
|
||||
new AttributeDefinition("type1", DataTypes.STRING_TYPE.getName(), Multiplicity.OPTIONAL, false, null),
|
||||
new AttributeDefinition("type2", DataTypes.STRING_TYPE.getName(), Multiplicity.OPTIONAL, false, null),
|
||||
new AttributeDefinition("name", DataTypes.STRING_TYPE.getName(),
|
||||
Multiplicity.REQUIRED, false, null),
|
||||
new AttributeDefinition("type1", DataTypes.STRING_TYPE.getName(),
|
||||
Multiplicity.OPTIONAL, false, null),
|
||||
new AttributeDefinition("type2", DataTypes.STRING_TYPE.getName(),
|
||||
Multiplicity.OPTIONAL, false, null),
|
||||
new AttributeDefinition("fields", String.format("array<%s>",
|
||||
DefinedTypes.HIVE_COLUMN.name()), Multiplicity.OPTIONAL, false, null),
|
||||
};
|
||||
|
|
@ -376,13 +369,16 @@ public class HiveTypeSystem {
|
|||
|
||||
}
|
||||
|
||||
|
||||
private void createColumnClass() throws MetadataException {
|
||||
AttributeDefinition[] attributeDefinitions = new AttributeDefinition[]{
|
||||
new AttributeDefinition("name", DataTypes.STRING_TYPE.getName(), Multiplicity.REQUIRED, false, null),
|
||||
//new AttributeDefinition("type", DefinedTypes.HIVE_TYPE.name(), Multiplicity.REQUIRED, false, null),
|
||||
new AttributeDefinition("type", DataTypes.STRING_TYPE.getName(), Multiplicity.REQUIRED, false, null),
|
||||
new AttributeDefinition("comment", DataTypes.STRING_TYPE.getName(), Multiplicity.OPTIONAL, false, null),
|
||||
new AttributeDefinition("name", DataTypes.STRING_TYPE.getName(),
|
||||
Multiplicity.REQUIRED, false, null),
|
||||
//new AttributeDefinition("type", DefinedTypes.HIVE_TYPE.name(), Multiplicity
|
||||
// .REQUIRED, false, null),
|
||||
new AttributeDefinition("type", DataTypes.STRING_TYPE.getName(),
|
||||
Multiplicity.REQUIRED, false, null),
|
||||
new AttributeDefinition("comment", DataTypes.STRING_TYPE.getName(),
|
||||
Multiplicity.OPTIONAL, false, null),
|
||||
};
|
||||
HierarchicalTypeDefinition<ClassType> definition =
|
||||
new HierarchicalTypeDefinition<>(ClassType.class, DefinedTypes.HIVE_COLUMN.name(),
|
||||
|
|
@ -394,23 +390,31 @@ public class HiveTypeSystem {
|
|||
|
||||
}
|
||||
|
||||
|
||||
private void createPartitionClass() throws MetadataException {
|
||||
|
||||
AttributeDefinition[] attributeDefinitions = new AttributeDefinition[]{
|
||||
new AttributeDefinition("values", DataTypes.STRING_TYPE.getName(), Multiplicity.COLLECTION, false, null),
|
||||
new AttributeDefinition("dbName", DefinedTypes.HIVE_DB.name(), Multiplicity.REQUIRED, false, null),
|
||||
new AttributeDefinition("tableName", DefinedTypes.HIVE_TABLE.name(), Multiplicity.REQUIRED, false, null),
|
||||
new AttributeDefinition("createTime", DataTypes.INT_TYPE.getName(), Multiplicity.OPTIONAL, false, null),
|
||||
new AttributeDefinition("lastAccessTime", DataTypes.INT_TYPE.getName(), Multiplicity.OPTIONAL, false, null),
|
||||
new AttributeDefinition("sd", DefinedTypes.HIVE_STORAGEDESC.name(), Multiplicity.REQUIRED, false, null),
|
||||
//new AttributeDefinition("columns", String.format("array<%s>", DefinedTypes.HIVE_COLUMN.name()),
|
||||
new AttributeDefinition("values", DataTypes.STRING_TYPE.getName(),
|
||||
Multiplicity.COLLECTION, false, null),
|
||||
new AttributeDefinition("dbName", DefinedTypes.HIVE_DB.name(),
|
||||
Multiplicity.REQUIRED, false, null),
|
||||
new AttributeDefinition("tableName", DefinedTypes.HIVE_TABLE.name(),
|
||||
Multiplicity.REQUIRED, false, null),
|
||||
new AttributeDefinition("createTime", DataTypes.INT_TYPE.getName(),
|
||||
Multiplicity.OPTIONAL, false, null),
|
||||
new AttributeDefinition("lastAccessTime", DataTypes.INT_TYPE.getName(),
|
||||
Multiplicity.OPTIONAL, false, null),
|
||||
new AttributeDefinition("sd", DefinedTypes.HIVE_STORAGEDESC.name(),
|
||||
Multiplicity.REQUIRED, false, null),
|
||||
//new AttributeDefinition("columns", String.format("array<%s>", DefinedTypes
|
||||
// .HIVE_COLUMN.name()),
|
||||
// Multiplicity.COLLECTION, true, null),
|
||||
new AttributeDefinition("parameters", mapStrToStrMap.getName(), Multiplicity.OPTIONAL, false, null),
|
||||
new AttributeDefinition("parameters", mapStrToStrMap.getName(),
|
||||
Multiplicity.OPTIONAL, false, null),
|
||||
|
||||
};
|
||||
HierarchicalTypeDefinition<ClassType> definition =
|
||||
new HierarchicalTypeDefinition<>(ClassType.class, DefinedTypes.HIVE_PARTITION.name(),
|
||||
new HierarchicalTypeDefinition<>(ClassType.class,
|
||||
DefinedTypes.HIVE_PARTITION.name(),
|
||||
null, attributeDefinitions);
|
||||
classTypeDefinitions.put(DefinedTypes.HIVE_PARTITION.name(), definition);
|
||||
LOG.debug("Created definition for " + DefinedTypes.HIVE_PARTITION.name());
|
||||
|
|
@ -420,22 +424,36 @@ public class HiveTypeSystem {
|
|||
|
||||
private void createTableClass() throws MetadataException {
|
||||
AttributeDefinition[] attributeDefinitions = new AttributeDefinition[]{
|
||||
new AttributeDefinition("tableName", DataTypes.STRING_TYPE.getName(), Multiplicity.REQUIRED, false, null),
|
||||
new AttributeDefinition("dbName", DefinedTypes.HIVE_DB.name(), Multiplicity.REQUIRED, false, null),
|
||||
new AttributeDefinition("owner", DataTypes.STRING_TYPE.getName(), Multiplicity.OPTIONAL, false, null),
|
||||
new AttributeDefinition("createTime", DataTypes.INT_TYPE.getName(), Multiplicity.OPTIONAL, false, null),
|
||||
new AttributeDefinition("lastAccessTime", DataTypes.INT_TYPE.getName(), Multiplicity.OPTIONAL, false, null),
|
||||
new AttributeDefinition("retention", DataTypes.INT_TYPE.getName(), Multiplicity.OPTIONAL, false, null),
|
||||
new AttributeDefinition("sd", DefinedTypes.HIVE_STORAGEDESC.name(), Multiplicity.OPTIONAL, false, null),
|
||||
new AttributeDefinition("partitionKeys", String.format("array<%s>", DefinedTypes.HIVE_COLUMN.name()),
|
||||
new AttributeDefinition("tableName", DataTypes.STRING_TYPE.getName(),
|
||||
Multiplicity.REQUIRED, false, null),
|
||||
new AttributeDefinition("dbName", DefinedTypes.HIVE_DB.name(),
|
||||
Multiplicity.REQUIRED, false, null),
|
||||
new AttributeDefinition("owner", DataTypes.STRING_TYPE.getName(),
|
||||
Multiplicity.OPTIONAL, false, null),
|
||||
//new AttributeDefinition("columns", String.format("array<%s>", DefinedTypes.HIVE_COLUMN.name()),
|
||||
new AttributeDefinition("createTime", DataTypes.INT_TYPE.getName(),
|
||||
Multiplicity.OPTIONAL, false, null),
|
||||
new AttributeDefinition("lastAccessTime", DataTypes.INT_TYPE.getName(),
|
||||
Multiplicity.OPTIONAL, false, null),
|
||||
new AttributeDefinition("retention", DataTypes.INT_TYPE.getName(),
|
||||
Multiplicity.OPTIONAL, false, null),
|
||||
new AttributeDefinition("sd", DefinedTypes.HIVE_STORAGEDESC.name(),
|
||||
Multiplicity.OPTIONAL, false, null),
|
||||
new AttributeDefinition("partitionKeys",
|
||||
String.format("array<%s>", DefinedTypes.HIVE_COLUMN.name()),
|
||||
Multiplicity.OPTIONAL, false, null),
|
||||
//new AttributeDefinition("columns", String.format("array<%s>", DefinedTypes
|
||||
// .HIVE_COLUMN.name()),
|
||||
// Multiplicity.COLLECTION, true, null),
|
||||
new AttributeDefinition("parameters", mapStrToStrMap.getName(), Multiplicity.OPTIONAL, false, null),
|
||||
new AttributeDefinition("viewOriginalText", DataTypes.STRING_TYPE.getName(), Multiplicity.OPTIONAL, false, null),
|
||||
new AttributeDefinition("viewExpandedText", DataTypes.STRING_TYPE.getName(), Multiplicity.OPTIONAL, false, null),
|
||||
new AttributeDefinition("tableType", DataTypes.STRING_TYPE.getName(), Multiplicity.OPTIONAL, false, null),
|
||||
new AttributeDefinition("temporary", DataTypes.BOOLEAN_TYPE.getName(), Multiplicity.OPTIONAL, false, null),
|
||||
new AttributeDefinition("parameters", mapStrToStrMap.getName(),
|
||||
Multiplicity.OPTIONAL, false, null),
|
||||
new AttributeDefinition("viewOriginalText", DataTypes.STRING_TYPE.getName(),
|
||||
Multiplicity.OPTIONAL, false, null),
|
||||
new AttributeDefinition("viewExpandedText", DataTypes.STRING_TYPE.getName(),
|
||||
Multiplicity.OPTIONAL, false, null),
|
||||
new AttributeDefinition("tableType", DataTypes.STRING_TYPE.getName(),
|
||||
Multiplicity.OPTIONAL, false, null),
|
||||
new AttributeDefinition("temporary", DataTypes.BOOLEAN_TYPE.getName(),
|
||||
Multiplicity.OPTIONAL, false, null),
|
||||
};
|
||||
HierarchicalTypeDefinition<ClassType> definition =
|
||||
new HierarchicalTypeDefinition<>(ClassType.class, DefinedTypes.HIVE_TABLE.name(),
|
||||
|
|
@ -447,16 +465,26 @@ public class HiveTypeSystem {
|
|||
|
||||
private void createIndexClass() throws MetadataException {
|
||||
AttributeDefinition[] attributeDefinitions = new AttributeDefinition[]{
|
||||
new AttributeDefinition("indexName", DataTypes.STRING_TYPE.getName(), Multiplicity.REQUIRED, false, null),
|
||||
new AttributeDefinition("indexHandlerClass", DataTypes.STRING_TYPE.getName(), Multiplicity.REQUIRED, false, null),
|
||||
new AttributeDefinition("dbName", DefinedTypes.HIVE_DB.name(), Multiplicity.REQUIRED, false, null),
|
||||
new AttributeDefinition("createTime", DataTypes.INT_TYPE.getName(), Multiplicity.OPTIONAL, false, null),
|
||||
new AttributeDefinition("lastAccessTime", DataTypes.INT_TYPE.getName(), Multiplicity.OPTIONAL, false, null),
|
||||
new AttributeDefinition("origTableName", DefinedTypes.HIVE_TABLE.name(), Multiplicity.REQUIRED, false, null),
|
||||
new AttributeDefinition("indexTableName", DefinedTypes.HIVE_TABLE.name(), Multiplicity.OPTIONAL, false, null),
|
||||
new AttributeDefinition("sd", DefinedTypes.HIVE_STORAGEDESC.name(), Multiplicity.REQUIRED, false, null),
|
||||
new AttributeDefinition("parameters", mapStrToStrMap.getName(), Multiplicity.OPTIONAL, false, null),
|
||||
new AttributeDefinition("deferredRebuild", DataTypes.BOOLEAN_TYPE.getName(), Multiplicity.OPTIONAL, false, null),
|
||||
new AttributeDefinition("indexName", DataTypes.STRING_TYPE.getName(),
|
||||
Multiplicity.REQUIRED, false, null),
|
||||
new AttributeDefinition("indexHandlerClass", DataTypes.STRING_TYPE.getName(),
|
||||
Multiplicity.REQUIRED, false, null),
|
||||
new AttributeDefinition("dbName", DefinedTypes.HIVE_DB.name(),
|
||||
Multiplicity.REQUIRED, false, null),
|
||||
new AttributeDefinition("createTime", DataTypes.INT_TYPE.getName(),
|
||||
Multiplicity.OPTIONAL, false, null),
|
||||
new AttributeDefinition("lastAccessTime", DataTypes.INT_TYPE.getName(),
|
||||
Multiplicity.OPTIONAL, false, null),
|
||||
new AttributeDefinition("origTableName", DefinedTypes.HIVE_TABLE.name(),
|
||||
Multiplicity.REQUIRED, false, null),
|
||||
new AttributeDefinition("indexTableName", DefinedTypes.HIVE_TABLE.name(),
|
||||
Multiplicity.OPTIONAL, false, null),
|
||||
new AttributeDefinition("sd", DefinedTypes.HIVE_STORAGEDESC.name(),
|
||||
Multiplicity.REQUIRED, false, null),
|
||||
new AttributeDefinition("parameters", mapStrToStrMap.getName(),
|
||||
Multiplicity.OPTIONAL, false, null),
|
||||
new AttributeDefinition("deferredRebuild", DataTypes.BOOLEAN_TYPE.getName(),
|
||||
Multiplicity.OPTIONAL, false, null),
|
||||
};
|
||||
|
||||
HierarchicalTypeDefinition<ClassType> definition =
|
||||
|
|
@ -469,14 +497,22 @@ public class HiveTypeSystem {
|
|||
|
||||
private void createFunctionClass() throws MetadataException {
|
||||
AttributeDefinition[] attributeDefinitions = new AttributeDefinition[]{
|
||||
new AttributeDefinition("functionName", DataTypes.STRING_TYPE.getName(), Multiplicity.REQUIRED, false, null),
|
||||
new AttributeDefinition("dbName", DefinedTypes.HIVE_DB.name(), Multiplicity.REQUIRED, false, null),
|
||||
new AttributeDefinition("className", DataTypes.INT_TYPE.getName(), Multiplicity.OPTIONAL, false, null),
|
||||
new AttributeDefinition("ownerName", DataTypes.INT_TYPE.getName(), Multiplicity.OPTIONAL, false, null),
|
||||
new AttributeDefinition("ownerType", DefinedTypes.HIVE_PRINCIPALTYPE.name(), Multiplicity.REQUIRED, false, null),
|
||||
new AttributeDefinition("createTime", DataTypes.INT_TYPE.getName(), Multiplicity.REQUIRED, false, null),
|
||||
new AttributeDefinition("functionType", DefinedTypes.HIVE_FUNCTIONTYPE.name(), Multiplicity.REQUIRED, false, null),
|
||||
new AttributeDefinition("resourceUris", DefinedTypes.HIVE_RESOURCEURI.name(), Multiplicity.COLLECTION, false, null),
|
||||
new AttributeDefinition("functionName", DataTypes.STRING_TYPE.getName(),
|
||||
Multiplicity.REQUIRED, false, null),
|
||||
new AttributeDefinition("dbName", DefinedTypes.HIVE_DB.name(),
|
||||
Multiplicity.REQUIRED, false, null),
|
||||
new AttributeDefinition("className", DataTypes.INT_TYPE.getName(),
|
||||
Multiplicity.OPTIONAL, false, null),
|
||||
new AttributeDefinition("ownerName", DataTypes.INT_TYPE.getName(),
|
||||
Multiplicity.OPTIONAL, false, null),
|
||||
new AttributeDefinition("ownerType", DefinedTypes.HIVE_PRINCIPALTYPE.name(),
|
||||
Multiplicity.REQUIRED, false, null),
|
||||
new AttributeDefinition("createTime", DataTypes.INT_TYPE.getName(),
|
||||
Multiplicity.REQUIRED, false, null),
|
||||
new AttributeDefinition("functionType", DefinedTypes.HIVE_FUNCTIONTYPE.name(),
|
||||
Multiplicity.REQUIRED, false, null),
|
||||
new AttributeDefinition("resourceUris", DefinedTypes.HIVE_RESOURCEURI.name(),
|
||||
Multiplicity.COLLECTION, false, null),
|
||||
};
|
||||
|
||||
HierarchicalTypeDefinition<ClassType> definition =
|
||||
|
|
@ -489,9 +525,12 @@ public class HiveTypeSystem {
|
|||
|
||||
private void createRoleClass() throws MetadataException {
|
||||
AttributeDefinition[] attributeDefinitions = new AttributeDefinition[]{
|
||||
new AttributeDefinition("roleName", DataTypes.STRING_TYPE.getName(), Multiplicity.REQUIRED, false, null),
|
||||
new AttributeDefinition("createTime", DataTypes.INT_TYPE.getName(), Multiplicity.REQUIRED, false, null),
|
||||
new AttributeDefinition("ownerName", DataTypes.STRING_TYPE.getName(), Multiplicity.REQUIRED, false, null),
|
||||
new AttributeDefinition("roleName", DataTypes.STRING_TYPE.getName(),
|
||||
Multiplicity.REQUIRED, false, null),
|
||||
new AttributeDefinition("createTime", DataTypes.INT_TYPE.getName(),
|
||||
Multiplicity.REQUIRED, false, null),
|
||||
new AttributeDefinition("ownerName", DataTypes.STRING_TYPE.getName(),
|
||||
Multiplicity.REQUIRED, false, null),
|
||||
};
|
||||
HierarchicalTypeDefinition<ClassType> definition =
|
||||
new HierarchicalTypeDefinition<>(ClassType.class, DefinedTypes.HIVE_ROLE.name(),
|
||||
|
|
@ -502,19 +541,30 @@ public class HiveTypeSystem {
|
|||
|
||||
}
|
||||
|
||||
|
||||
private void createProcessClass() throws MetadataException {
|
||||
AttributeDefinition[] attributeDefinitions = new AttributeDefinition[]{
|
||||
new AttributeDefinition("processName", DataTypes.STRING_TYPE.getName(), Multiplicity.REQUIRED, false, null),
|
||||
new AttributeDefinition("startTime", DataTypes.INT_TYPE.getName(), Multiplicity.REQUIRED, false, null),
|
||||
new AttributeDefinition("endTime", DataTypes.INT_TYPE.getName(), Multiplicity.REQUIRED, false, null),
|
||||
new AttributeDefinition("userName", DataTypes.STRING_TYPE.getName(), Multiplicity.REQUIRED, false, null),
|
||||
new AttributeDefinition("sourceTableNames", String.format("array<%s>", DefinedTypes.HIVE_TABLE.name()), Multiplicity.OPTIONAL, false, null),
|
||||
new AttributeDefinition("targetTableNames", String.format("array<%s>", DefinedTypes.HIVE_TABLE.name()), Multiplicity.OPTIONAL, false, null),
|
||||
new AttributeDefinition("queryText", DataTypes.STRING_TYPE.getName(), Multiplicity.REQUIRED, false, null),
|
||||
new AttributeDefinition("queryPlan", DataTypes.STRING_TYPE.getName(), Multiplicity.REQUIRED, false, null),
|
||||
new AttributeDefinition("queryId", DataTypes.STRING_TYPE.getName(), Multiplicity.REQUIRED, false, null),
|
||||
new AttributeDefinition("queryGraph", DataTypes.STRING_TYPE.getName(), Multiplicity.OPTIONAL, false, null),
|
||||
new AttributeDefinition("processName", DataTypes.STRING_TYPE.getName(),
|
||||
Multiplicity.REQUIRED, false, null),
|
||||
new AttributeDefinition("startTime", DataTypes.INT_TYPE.getName(),
|
||||
Multiplicity.REQUIRED, false, null),
|
||||
new AttributeDefinition("endTime", DataTypes.INT_TYPE.getName(),
|
||||
Multiplicity.REQUIRED, false, null),
|
||||
new AttributeDefinition("userName", DataTypes.STRING_TYPE.getName(),
|
||||
Multiplicity.REQUIRED, false, null),
|
||||
new AttributeDefinition("sourceTableNames",
|
||||
String.format("array<%s>", DefinedTypes.HIVE_TABLE.name()),
|
||||
Multiplicity.OPTIONAL, false, null),
|
||||
new AttributeDefinition("targetTableNames",
|
||||
String.format("array<%s>", DefinedTypes.HIVE_TABLE.name()),
|
||||
Multiplicity.OPTIONAL, false, null),
|
||||
new AttributeDefinition("queryText", DataTypes.STRING_TYPE.getName(),
|
||||
Multiplicity.REQUIRED, false, null),
|
||||
new AttributeDefinition("queryPlan", DataTypes.STRING_TYPE.getName(),
|
||||
Multiplicity.REQUIRED, false, null),
|
||||
new AttributeDefinition("queryId", DataTypes.STRING_TYPE.getName(),
|
||||
Multiplicity.REQUIRED, false, null),
|
||||
new AttributeDefinition("queryGraph", DataTypes.STRING_TYPE.getName(),
|
||||
Multiplicity.OPTIONAL, false, null),
|
||||
};
|
||||
HierarchicalTypeDefinition<ClassType> definition =
|
||||
new HierarchicalTypeDefinition<>(ClassType.class, DefinedTypes.HIVE_PROCESS.name(),
|
||||
|
|
@ -525,4 +575,38 @@ public class HiveTypeSystem {
|
|||
|
||||
}
|
||||
|
||||
public enum DefinedTypes {
|
||||
|
||||
// Enums
|
||||
HIVE_OBJECTTYPE,
|
||||
HIVE_PRINCIPALTYPE,
|
||||
HIVE_RESOURCETYPE,
|
||||
HIVE_FUNCTIONTYPE,
|
||||
|
||||
// Structs
|
||||
HIVE_SERDE,
|
||||
HIVE_SKEWEDINFO,
|
||||
HIVE_ORDER,
|
||||
HIVE_RESOURCEURI,
|
||||
|
||||
|
||||
// Classes
|
||||
HIVE_DB,
|
||||
HIVE_STORAGEDESC,
|
||||
HIVE_TABLE,
|
||||
HIVE_COLUMN,
|
||||
HIVE_PARTITION,
|
||||
HIVE_INDEX,
|
||||
HIVE_FUNCTION,
|
||||
HIVE_ROLE,
|
||||
HIVE_TYPE,
|
||||
HIVE_PROCESS,
|
||||
//HIVE_VIEW,
|
||||
|
||||
}
|
||||
|
||||
public static final class Holder {
|
||||
public static final HiveTypeSystem instance = new HiveTypeSystem();
|
||||
}
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,137 @@
|
|||
<!--
|
||||
~ Licensed to the Apache Software Foundation (ASF) under one
|
||||
~ or more contributor license agreements. See the NOTICE file
|
||||
~ distributed with this work for additional information
|
||||
~ regarding copyright ownership. The ASF licenses this file
|
||||
~ to you under the Apache License, Version 2.0 (the
|
||||
~ "License"); you may not use this file except in compliance
|
||||
~ with the License. You may obtain a copy of the License at
|
||||
~
|
||||
~ http://www.apache.org/licenses/LICENSE-2.0
|
||||
~
|
||||
~ Unless required by applicable law or agreed to in writing, software
|
||||
~ distributed under the License is distributed on an "AS IS" BASIS,
|
||||
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
~ See the License for the specific language governing permissions and
|
||||
~ limitations under the License.
|
||||
-->
|
||||
|
||||
<!--Wed Feb 4 03:23:58 2015-->
|
||||
<configuration>
|
||||
|
||||
<property>
|
||||
<name>fs.defaultFS</name>
|
||||
<value>hdfs://c6501.ambari.apache.org:8020</value>
|
||||
<final>true</final>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>fs.trash.interval</name>
|
||||
<value>360</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hadoop.http.authentication.simple.anonymous.allowed</name>
|
||||
<value>true</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hadoop.proxyuser.hcat.groups</name>
|
||||
<value>users</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hadoop.proxyuser.hcat.hosts</name>
|
||||
<value>c6501.ambari.apache.org</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hadoop.proxyuser.hive.groups</name>
|
||||
<value>users</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hadoop.proxyuser.hive.hosts</name>
|
||||
<value>c6501.ambari.apache.org</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hadoop.proxyuser.oozie.groups</name>
|
||||
<value>*</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hadoop.proxyuser.oozie.hosts</name>
|
||||
<value>c6501.ambari.apache.org</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hadoop.security.auth_to_local</name>
|
||||
<value>
|
||||
RULE:[2:$1@$0]([rn]m@.*)s/.*/yarn/
|
||||
RULE:[2:$1@$0](jhs@.*)s/.*/mapred/
|
||||
RULE:[2:$1@$0]([nd]n@.*)s/.*/hdfs/
|
||||
RULE:[2:$1@$0](hm@.*)s/.*/hbase/
|
||||
RULE:[2:$1@$0](rs@.*)s/.*/hbase/
|
||||
DEFAULT
|
||||
</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hadoop.security.authentication</name>
|
||||
<value>simple</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hadoop.security.authorization</name>
|
||||
<value>false</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>io.compression.codecs</name>
|
||||
<value>
|
||||
org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.SnappyCodec
|
||||
</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>io.file.buffer.size</name>
|
||||
<value>131072</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>io.serializations</name>
|
||||
<value>org.apache.hadoop.io.serializer.WritableSerialization</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>ipc.client.connect.max.retries</name>
|
||||
<value>50</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>ipc.client.connection.maxidletime</name>
|
||||
<value>30000</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>ipc.client.idlethreshold</name>
|
||||
<value>8000</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>ipc.server.tcpnodelay</name>
|
||||
<value>true</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>mapreduce.jobtracker.webinterface.trusted</name>
|
||||
<value>false</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>proxyuser_group</name>
|
||||
<value>users</value>
|
||||
</property>
|
||||
|
||||
</configuration>
|
||||
|
|
@ -0,0 +1,259 @@
|
|||
<!--
|
||||
~ Licensed to the Apache Software Foundation (ASF) under one
|
||||
~ or more contributor license agreements. See the NOTICE file
|
||||
~ distributed with this work for additional information
|
||||
~ regarding copyright ownership. The ASF licenses this file
|
||||
~ to you under the Apache License, Version 2.0 (the
|
||||
~ "License"); you may not use this file except in compliance
|
||||
~ with the License. You may obtain a copy of the License at
|
||||
~
|
||||
~ http://www.apache.org/licenses/LICENSE-2.0
|
||||
~
|
||||
~ Unless required by applicable law or agreed to in writing, software
|
||||
~ distributed under the License is distributed on an "AS IS" BASIS,
|
||||
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
~ See the License for the specific language governing permissions and
|
||||
~ limitations under the License.
|
||||
-->
|
||||
|
||||
<!--Wed Feb 4 03:23:35 2015-->
|
||||
<configuration>
|
||||
|
||||
<property>
|
||||
<name>dfs.block.access.token.enable</name>
|
||||
<value>true</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.blockreport.initialDelay</name>
|
||||
<value>120</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.blocksize</name>
|
||||
<value>134217728</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.client.read.shortcircuit</name>
|
||||
<value>true</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.client.read.shortcircuit.streams.cache.size</name>
|
||||
<value>4096</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.cluster.administrators</name>
|
||||
<value>hdfs</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.datanode.address</name>
|
||||
<value>0.0.0.0:50010</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.datanode.balance.bandwidthPerSec</name>
|
||||
<value>6250000</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.datanode.data.dir</name>
|
||||
<value>/hadoop/hdfs/data</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.datanode.data.dir.perm</name>
|
||||
<value>750</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.datanode.du.reserved</name>
|
||||
<value>1073741824</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.datanode.failed.volumes.tolerated</name>
|
||||
<value>0</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.datanode.http.address</name>
|
||||
<value>0.0.0.0:50075</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.datanode.https.address</name>
|
||||
<value>0.0.0.0:50475</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.datanode.ipc.address</name>
|
||||
<value>0.0.0.0:8010</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.datanode.max.transfer.threads</name>
|
||||
<value>4096</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.domain.socket.path</name>
|
||||
<value>/var/lib/hadoop-hdfs/dn_socket</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.heartbeat.interval</name>
|
||||
<value>3</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.hosts.exclude</name>
|
||||
<value>/etc/hadoop/conf/dfs.exclude</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.http.policy</name>
|
||||
<value>HTTP_ONLY</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.https.port</name>
|
||||
<value>50470</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.journalnode.edits.dir</name>
|
||||
<value>/hadoop/hdfs/journalnode</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.journalnode.http-address</name>
|
||||
<value>0.0.0.0:8480</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.namenode.accesstime.precision</name>
|
||||
<value>0</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.namenode.avoid.read.stale.datanode</name>
|
||||
<value>true</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.namenode.avoid.write.stale.datanode</name>
|
||||
<value>true</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.namenode.checkpoint.dir</name>
|
||||
<value>/hadoop/hdfs/namesecondary</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.namenode.checkpoint.edits.dir</name>
|
||||
<value>${dfs.namenode.checkpoint.dir}</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.namenode.checkpoint.period</name>
|
||||
<value>21600</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.namenode.checkpoint.txns</name>
|
||||
<value>1000000</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.namenode.handler.count</name>
|
||||
<value>40</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.namenode.http-address</name>
|
||||
<value>c6501.ambari.apache.org:50070</value>
|
||||
<final>true</final>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.namenode.https-address</name>
|
||||
<value>c6501.ambari.apache.org:50470</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.namenode.name.dir</name>
|
||||
<value>/hadoop/hdfs/namenode</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.namenode.name.dir.restore</name>
|
||||
<value>true</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.namenode.safemode.threshold-pct</name>
|
||||
<value>1.0f</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.namenode.secondary.http-address</name>
|
||||
<value>c6501.ambari.apache.org:50090</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.namenode.stale.datanode.interval</name>
|
||||
<value>30000</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.namenode.startup.delay.block.deletion.sec</name>
|
||||
<value>3600</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.namenode.write.stale.datanode.ratio</name>
|
||||
<value>1.0f</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.permissions.enabled</name>
|
||||
<value>true</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.permissions.superusergroup</name>
|
||||
<value>hdfs</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.replication</name>
|
||||
<value>3</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.replication.max</name>
|
||||
<value>50</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.support.append</name>
|
||||
<value>true</value>
|
||||
<final>true</final>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.webhdfs.enabled</name>
|
||||
<value>true</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>fs.permissions.umask-mode</name>
|
||||
<value>022</value>
|
||||
</property>
|
||||
|
||||
</configuration>
|
||||
|
|
@ -0,0 +1,806 @@
|
|||
<!--
|
||||
~ Licensed to the Apache Software Foundation (ASF) under one
|
||||
~ or more contributor license agreements. See the NOTICE file
|
||||
~ distributed with this work for additional information
|
||||
~ regarding copyright ownership. The ASF licenses this file
|
||||
~ to you under the Apache License, Version 2.0 (the
|
||||
~ "License"); you may not use this file except in compliance
|
||||
~ with the License. You may obtain a copy of the License at
|
||||
~
|
||||
~ http://www.apache.org/licenses/LICENSE-2.0
|
||||
~
|
||||
~ Unless required by applicable law or agreed to in writing, software
|
||||
~ distributed under the License is distributed on an "AS IS" BASIS,
|
||||
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
~ See the License for the specific language governing permissions and
|
||||
~ limitations under the License.
|
||||
-->
|
||||
|
||||
<!--Wed Feb 4 03:19:28 2015-->
|
||||
<configuration>
|
||||
|
||||
<property>
|
||||
<name>ambari.hive.db.schema.name</name>
|
||||
<value>hive</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>datanucleus.cache.level2.type</name>
|
||||
<value>none</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.auto.convert.join</name>
|
||||
<value>true</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.auto.convert.join.noconditionaltask</name>
|
||||
<value>true</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.auto.convert.join.noconditionaltask.size</name>
|
||||
<value>238026752</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.auto.convert.sortmerge.join</name>
|
||||
<value>true</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.auto.convert.sortmerge.join.noconditionaltask</name>
|
||||
<value>true</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.auto.convert.sortmerge.join.to.mapjoin</name>
|
||||
<value>false</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.cbo.enable</name>
|
||||
<value>true</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.cli.print.header</name>
|
||||
<value>false</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.cluster.delegation.token.store.class</name>
|
||||
<value>org.apache.hadoop.hive.thrift.ZooKeeperTokenStore</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.cluster.delegation.token.store.zookeeper.connectString</name>
|
||||
<value>c6501.ambari.apache.org:2181</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.cluster.delegation.token.store.zookeeper.znode</name>
|
||||
<value>/hive/cluster/delegation</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.compactor.abortedtxn.threshold</name>
|
||||
<value>1000</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.compactor.check.interval</name>
|
||||
<value>300L</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.compactor.delta.num.threshold</name>
|
||||
<value>10</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.compactor.delta.pct.threshold</name>
|
||||
<value>0.1f</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.compactor.initiator.on</name>
|
||||
<value>false</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.compactor.worker.threads</name>
|
||||
<value>0</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.compactor.worker.timeout</name>
|
||||
<value>86400L</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.compute.query.using.stats</name>
|
||||
<value>true</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.conf.restricted.list</name>
|
||||
<value>
|
||||
hive.security.authenticator.manager,hive.security.authorization.manager,hive.users.in.admin.role
|
||||
</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.convert.join.bucket.mapjoin.tez</name>
|
||||
<value>false</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.enforce.bucketing</name>
|
||||
<value>true</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.enforce.sorting</name>
|
||||
<value>true</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.enforce.sortmergebucketmapjoin</name>
|
||||
<value>true</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.exec.compress.intermediate</name>
|
||||
<value>false</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.exec.compress.output</name>
|
||||
<value>false</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.exec.dynamic.partition</name>
|
||||
<value>true</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.exec.dynamic.partition.mode</name>
|
||||
<value>nonstrict</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.exec.failure.hooks</name>
|
||||
<value>org.apache.hadoop.hive.ql.hooks.ATSHook</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.exec.max.created.files</name>
|
||||
<value>100000</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.exec.max.dynamic.partitions</name>
|
||||
<value>5000</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.exec.max.dynamic.partitions.pernode</name>
|
||||
<value>2000</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.exec.orc.compression.strategy</name>
|
||||
<value>SPEED</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.exec.orc.default.compress</name>
|
||||
<value>ZLIB</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.exec.orc.default.stripe.size</name>
|
||||
<value>67108864</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.exec.parallel</name>
|
||||
<value>false</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.exec.parallel.thread.number</name>
|
||||
<value>8</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.exec.post.hooks</name>
|
||||
<value>org.apache.hadoop.hive.ql.hooks.ATSHook</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.exec.pre.hooks</name>
|
||||
<value>org.apache.hadoop.hive.ql.hooks.ATSHook</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.exec.reducers.bytes.per.reducer</name>
|
||||
<value>67108864</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.exec.reducers.max</name>
|
||||
<value>1009</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.exec.scratchdir</name>
|
||||
<value>/tmp/hive</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.exec.submit.local.task.via.child</name>
|
||||
<value>true</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.exec.submitviachild</name>
|
||||
<value>false</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.execution.engine</name>
|
||||
<value>mr</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.fetch.task.aggr</name>
|
||||
<value>false</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.fetch.task.conversion</name>
|
||||
<value>more</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.fetch.task.conversion.threshold</name>
|
||||
<value>1073741824</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.heapsize</name>
|
||||
<value>1024</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.limit.optimize.enable</name>
|
||||
<value>true</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.limit.pushdown.memory.usage</name>
|
||||
<value>0.04</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.map.aggr</name>
|
||||
<value>true</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.map.aggr.hash.force.flush.memory.threshold</name>
|
||||
<value>0.9</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.map.aggr.hash.min.reduction</name>
|
||||
<value>0.5</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.map.aggr.hash.percentmemory</name>
|
||||
<value>0.5</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.mapjoin.bucket.cache.size</name>
|
||||
<value>10000</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.mapjoin.optimized.hashtable</name>
|
||||
<value>true</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.mapred.reduce.tasks.speculative.execution</name>
|
||||
<value>false</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.merge.mapfiles</name>
|
||||
<value>true</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.merge.mapredfiles</name>
|
||||
<value>false</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.merge.orcfile.stripe.level</name>
|
||||
<value>true</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.merge.rcfile.block.level</name>
|
||||
<value>true</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.merge.size.per.task</name>
|
||||
<value>256000000</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.merge.smallfiles.avgsize</name>
|
||||
<value>16000000</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.merge.tezfiles</name>
|
||||
<value>false</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.metastore.authorization.storage.checks</name>
|
||||
<value>false</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.metastore.cache.pinobjtypes</name>
|
||||
<value>Table,Database,Type,FieldSchema,Order</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.metastore.client.connect.retry.delay</name>
|
||||
<value>5s</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.metastore.client.socket.timeout</name>
|
||||
<value>1800s</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.metastore.connect.retries</name>
|
||||
<value>24</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.metastore.execute.setugi</name>
|
||||
<value>true</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.metastore.failure.retries</name>
|
||||
<value>24</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.metastore.kerberos.keytab.file</name>
|
||||
<value>/etc/security/keytabs/hive.service.keytab</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.metastore.kerberos.principal</name>
|
||||
<value>hive/_HOST@EXAMPLE.COM</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.metastore.pre.event.listeners</name>
|
||||
<value>org.apache.hadoop.hive.ql.security.authorization.AuthorizationPreEventListener
|
||||
</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.metastore.sasl.enabled</name>
|
||||
<value>false</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.metastore.server.max.threads</name>
|
||||
<value>100000</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.metastore.uris</name>
|
||||
<value>thrift://c6501.ambari.apache.org:9083</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.metastore.warehouse.dir</name>
|
||||
<value>/apps/hive/warehouse</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.optimize.bucketmapjoin</name>
|
||||
<value>true</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.optimize.bucketmapjoin.sortedmerge</name>
|
||||
<value>false</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.optimize.constant.propagation</name>
|
||||
<value>true</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.optimize.index.filter</name>
|
||||
<value>true</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.optimize.mapjoin.mapreduce</name>
|
||||
<value>true</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.optimize.metadataonly</name>
|
||||
<value>true</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.optimize.null.scan</name>
|
||||
<value>true</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.optimize.reducededuplication</name>
|
||||
<value>true</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.optimize.reducededuplication.min.reducer</name>
|
||||
<value>4</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.optimize.sort.dynamic.partition</name>
|
||||
<value>false</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.orc.compute.splits.num.threads</name>
|
||||
<value>10</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.orc.splits.include.file.footer</name>
|
||||
<value>false</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.prewarm.enabled</name>
|
||||
<value>false</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.prewarm.numcontainers</name>
|
||||
<value>10</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.security.authenticator.manager</name>
|
||||
<value>org.apache.hadoop.hive.ql.security.ProxyUserAuthenticator</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.security.authorization.enabled</name>
|
||||
<value>false</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.security.authorization.manager</name>
|
||||
<value>
|
||||
org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdConfOnlyAuthorizerFactory
|
||||
</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.security.metastore.authenticator.manager</name>
|
||||
<value>org.apache.hadoop.hive.ql.security.HadoopDefaultMetastoreAuthenticator</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.security.metastore.authorization.auth.reads</name>
|
||||
<value>true</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.security.metastore.authorization.manager</name>
|
||||
<value>
|
||||
org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider,org.apache.hadoop.hive.ql.security.authorization.MetaStoreAuthzAPIAuthorizerEmbedOnly
|
||||
</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.server2.allow.user.substitution</name>
|
||||
<value>true</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.server2.authentication</name>
|
||||
<value>NONE</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.server2.authentication.spnego.keytab</name>
|
||||
<value>HTTP/_HOST@EXAMPLE.COM</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.server2.authentication.spnego.principal</name>
|
||||
<value>/etc/security/keytabs/spnego.service.keytab</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.server2.enable.doAs</name>
|
||||
<value>true</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.server2.enable.impersonation</name>
|
||||
<value>true</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.server2.logging.operation.enabled</name>
|
||||
<value>true</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.server2.logging.operation.log.location</name>
|
||||
<value>${system:java.io.tmpdir}/${system:user.name}/operation_logs</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.server2.support.dynamic.service.discovery</name>
|
||||
<value>true</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.server2.table.type.mapping</name>
|
||||
<value>CLASSIC</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.server2.tez.default.queues</name>
|
||||
<value>default</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.server2.tez.initialize.default.sessions</name>
|
||||
<value>false</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.server2.tez.sessions.per.default.queue</name>
|
||||
<value>1</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.server2.thrift.http.path</name>
|
||||
<value>cliservice</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.server2.thrift.http.port</name>
|
||||
<value>10001</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.server2.thrift.max.worker.threads</name>
|
||||
<value>500</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.server2.thrift.port</name>
|
||||
<value>10000</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.server2.thrift.sasl.qop</name>
|
||||
<value>auth</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.server2.transport.mode</name>
|
||||
<value>binary</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.server2.use.SSL</name>
|
||||
<value>false</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.server2.zookeeper.namespace</name>
|
||||
<value>hiveserver2</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.smbjoin.cache.rows</name>
|
||||
<value>10000</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.stats.autogather</name>
|
||||
<value>true</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.stats.dbclass</name>
|
||||
<value>fs</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.stats.fetch.column.stats</name>
|
||||
<value>false</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.stats.fetch.partition.stats</name>
|
||||
<value>true</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.support.concurrency</name>
|
||||
<value>false</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.tez.auto.reducer.parallelism</name>
|
||||
<value>false</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.tez.container.size</name>
|
||||
<value>682</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.tez.cpu.vcores</name>
|
||||
<value>-1</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.tez.dynamic.partition.pruning</name>
|
||||
<value>true</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.tez.dynamic.partition.pruning.max.data.size</name>
|
||||
<value>104857600</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.tez.dynamic.partition.pruning.max.event.size</name>
|
||||
<value>1048576</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.tez.input.format</name>
|
||||
<value>org.apache.hadoop.hive.ql.io.HiveInputFormat</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.tez.java.opts</name>
|
||||
<value>-server -Xmx546m -Djava.net.preferIPv4Stack=true -XX:NewRatio=8 -XX:+UseNUMA
|
||||
-XX:+UseParallelGC -XX:+PrintGCDetails -verbose:gc -XX:+PrintGCTimeStamps
|
||||
</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.tez.log.level</name>
|
||||
<value>INFO</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.tez.max.partition.factor</name>
|
||||
<value>2.0</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.tez.min.partition.factor</name>
|
||||
<value>0.25</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.tez.smb.number.waves</name>
|
||||
<value>0.5</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.txn.manager</name>
|
||||
<value>org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.txn.max.open.batch</name>
|
||||
<value>1000</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.txn.timeout</name>
|
||||
<value>300</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.user.install.directory</name>
|
||||
<value>/user/</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.vectorized.execution.enabled</name>
|
||||
<value>true</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.vectorized.execution.reduce.enabled</name>
|
||||
<value>false</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.vectorized.groupby.checkinterval</name>
|
||||
<value>4096</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.vectorized.groupby.flush.percent</name>
|
||||
<value>0.1</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.vectorized.groupby.maxentries</name>
|
||||
<value>100000</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.zookeeper.client.port</name>
|
||||
<value>2181</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.zookeeper.namespace</name>
|
||||
<value>hive_zookeeper_namespace</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hive.zookeeper.quorum</name>
|
||||
<value>c6501.ambari.apache.org:2181</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>javax.jdo.option.ConnectionDriverName</name>
|
||||
<value>com.mysql.jdbc.Driver</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>javax.jdo.option.ConnectionURL</name>
|
||||
<value>jdbc:mysql://c6501.ambari.apache.org/hive?createDatabaseIfNotExist=true</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>javax.jdo.option.ConnectionUserName</name>
|
||||
<value>hive</value>
|
||||
</property>
|
||||
|
||||
</configuration>
|
||||
|
|
@ -0,0 +1,243 @@
|
|||
<!--
|
||||
~ Licensed to the Apache Software Foundation (ASF) under one
|
||||
~ or more contributor license agreements. See the NOTICE file
|
||||
~ distributed with this work for additional information
|
||||
~ regarding copyright ownership. The ASF licenses this file
|
||||
~ to you under the Apache License, Version 2.0 (the
|
||||
~ "License"); you may not use this file except in compliance
|
||||
~ with the License. You may obtain a copy of the License at
|
||||
~
|
||||
~ http://www.apache.org/licenses/LICENSE-2.0
|
||||
~
|
||||
~ Unless required by applicable law or agreed to in writing, software
|
||||
~ distributed under the License is distributed on an "AS IS" BASIS,
|
||||
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
~ See the License for the specific language governing permissions and
|
||||
~ limitations under the License.
|
||||
-->
|
||||
|
||||
<!--Wed Feb 4 03:23:58 2015-->
|
||||
<configuration>
|
||||
|
||||
<property>
|
||||
<name>mapreduce.admin.map.child.java.opts</name>
|
||||
<value>-server -XX:NewRatio=8 -Djava.net.preferIPv4Stack=true -Dhdp.version=${hdp.version}
|
||||
</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>mapreduce.admin.reduce.child.java.opts</name>
|
||||
<value>-server -XX:NewRatio=8 -Djava.net.preferIPv4Stack=true -Dhdp.version=${hdp.version}
|
||||
</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>mapreduce.admin.user.env</name>
|
||||
<value>
|
||||
LD_LIBRARY_PATH=/usr/hdp/${hdp.version}/hadoop/lib/native:/usr/hdp/${hdp.version}/hadoop/lib/native/Linux-amd64-64
|
||||
</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>mapreduce.am.max-attempts</name>
|
||||
<value>2</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>mapreduce.application.classpath</name>
|
||||
<value>
|
||||
$PWD/mr-framework/hadoop/share/hadoop/mapreduce/*:$PWD/mr-framework/hadoop/share/hadoop/mapreduce/lib/*:$PWD/mr-framework/hadoop/share/hadoop/common/*:$PWD/mr-framework/hadoop/share/hadoop/common/lib/*:$PWD/mr-framework/hadoop/share/hadoop/yarn/*:$PWD/mr-framework/hadoop/share/hadoop/yarn/lib/*:$PWD/mr-framework/hadoop/share/hadoop/hdfs/*:$PWD/mr-framework/hadoop/share/hadoop/hdfs/lib/*:/usr/hdp/${hdp.version}/hadoop/lib/hadoop-lzo-0.6.0.${hdp.version}.jar:/etc/hadoop/conf/secure
|
||||
</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>mapreduce.application.framework.path</name>
|
||||
<value>/hdp/apps/${hdp.version}/mapreduce/mapreduce.tar.gz#mr-framework</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>mapreduce.cluster.administrators</name>
|
||||
<value>hadoop</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>mapreduce.framework.name</name>
|
||||
<value>yarn</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>mapreduce.job.emit-timeline-data</name>
|
||||
<value>false</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>mapreduce.job.reduce.slowstart.completedmaps</name>
|
||||
<value>0.05</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>mapreduce.jobhistory.address</name>
|
||||
<value>c6501.ambari.apache.org:10020</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>mapreduce.jobhistory.bind-host</name>
|
||||
<value>0.0.0.0</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>mapreduce.jobhistory.done-dir</name>
|
||||
<value>/mr-history/done</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>mapreduce.jobhistory.intermediate-done-dir</name>
|
||||
<value>/mr-history/tmp</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>mapreduce.jobhistory.webapp.address</name>
|
||||
<value>c6501.ambari.apache.org:19888</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>mapreduce.map.java.opts</name>
|
||||
<value>-Xmx546m</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>mapreduce.map.log.level</name>
|
||||
<value>INFO</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>mapreduce.map.memory.mb</name>
|
||||
<value>682</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>mapreduce.map.output.compress</name>
|
||||
<value>false</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>mapreduce.map.sort.spill.percent</name>
|
||||
<value>0.7</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>mapreduce.map.speculative</name>
|
||||
<value>false</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>mapreduce.output.fileoutputformat.compress</name>
|
||||
<value>false</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>mapreduce.output.fileoutputformat.compress.type</name>
|
||||
<value>BLOCK</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>mapreduce.reduce.input.buffer.percent</name>
|
||||
<value>0.0</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>mapreduce.reduce.java.opts</name>
|
||||
<value>-Xmx546m</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>mapreduce.reduce.log.level</name>
|
||||
<value>INFO</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>mapreduce.reduce.memory.mb</name>
|
||||
<value>682</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>mapreduce.reduce.shuffle.fetch.retry.enabled</name>
|
||||
<value>1</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>mapreduce.reduce.shuffle.fetch.retry.interval-ms</name>
|
||||
<value>1000</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>mapreduce.reduce.shuffle.fetch.retry.timeout-ms</name>
|
||||
<value>30000</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>mapreduce.reduce.shuffle.input.buffer.percent</name>
|
||||
<value>0.7</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>mapreduce.reduce.shuffle.merge.percent</name>
|
||||
<value>0.66</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>mapreduce.reduce.shuffle.parallelcopies</name>
|
||||
<value>30</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>mapreduce.reduce.speculative</name>
|
||||
<value>false</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>mapreduce.shuffle.port</name>
|
||||
<value>13562</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>mapreduce.task.io.sort.factor</name>
|
||||
<value>100</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>mapreduce.task.io.sort.mb</name>
|
||||
<value>273</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>mapreduce.task.timeout</name>
|
||||
<value>300000</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>yarn.app.mapreduce.am.admin-command-opts</name>
|
||||
<value>-Dhdp.version=${hdp.version}</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>yarn.app.mapreduce.am.command-opts</name>
|
||||
<value>-Xmx546m -Dhdp.version=${hdp.version}</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>yarn.app.mapreduce.am.log.level</name>
|
||||
<value>INFO</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>yarn.app.mapreduce.am.resource.mb</name>
|
||||
<value>682</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>yarn.app.mapreduce.am.staging-dir</name>
|
||||
<value>/user</value>
|
||||
</property>
|
||||
|
||||
</configuration>
|
||||
|
|
@ -0,0 +1,507 @@
|
|||
<!--
|
||||
~ Licensed to the Apache Software Foundation (ASF) under one
|
||||
~ or more contributor license agreements. See the NOTICE file
|
||||
~ distributed with this work for additional information
|
||||
~ regarding copyright ownership. The ASF licenses this file
|
||||
~ to you under the Apache License, Version 2.0 (the
|
||||
~ "License"); you may not use this file except in compliance
|
||||
~ with the License. You may obtain a copy of the License at
|
||||
~
|
||||
~ http://www.apache.org/licenses/LICENSE-2.0
|
||||
~
|
||||
~ Unless required by applicable law or agreed to in writing, software
|
||||
~ distributed under the License is distributed on an "AS IS" BASIS,
|
||||
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
~ See the License for the specific language governing permissions and
|
||||
~ limitations under the License.
|
||||
-->
|
||||
|
||||
<!--Wed Feb 4 03:23:58 2015-->
|
||||
<configuration>
|
||||
|
||||
<property>
|
||||
<name>hadoop.registry.rm.enabled</name>
|
||||
<value>true</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hadoop.registry.zk.quorum</name>
|
||||
<value>c6501.ambari.apache.org:2181</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>yarn.acl.enable</name>
|
||||
<value>false</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>yarn.admin.acl</name>
|
||||
<value></value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>yarn.application.classpath</name>
|
||||
<value>
|
||||
$HADOOP_CONF_DIR,/usr/hdp/current/hadoop-client/*,/usr/hdp/current/hadoop-client/lib/*,/usr/hdp/current/hadoop-hdfs-client/*,/usr/hdp/current/hadoop-hdfs-client/lib/*,/usr/hdp/current/hadoop-yarn-client/*,/usr/hdp/current/hadoop-yarn-client/lib/*
|
||||
</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>yarn.client.nodemanager-connect.max-wait-ms</name>
|
||||
<value>900000</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>yarn.client.nodemanager-connect.retry-interval-ms</name>
|
||||
<value>10000</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>yarn.log-aggregation-enable</name>
|
||||
<value>true</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>yarn.log-aggregation.retain-seconds</name>
|
||||
<value>2592000</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>yarn.log.server.url</name>
|
||||
<value>http://c6501.ambari.apache.org:19888/jobhistory/logs</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>yarn.node-labels.fs-store.retry-policy-spec</name>
|
||||
<value>2000, 500</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>yarn.node-labels.fs-store.root-dir</name>
|
||||
<value>/system/yarn/node-labels</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>yarn.node-labels.manager-class</name>
|
||||
<value>org.apache.hadoop.yarn.server.resourcemanager.nodelabels.MemoryRMNodeLabelsManager
|
||||
</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>yarn.nodemanager.address</name>
|
||||
<value>0.0.0.0:45454</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>yarn.nodemanager.admin-env</name>
|
||||
<value>MALLOC_ARENA_MAX=$MALLOC_ARENA_MAX</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>yarn.nodemanager.aux-services</name>
|
||||
<value>mapreduce_shuffle</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>yarn.nodemanager.aux-services.mapreduce_shuffle.class</name>
|
||||
<value>org.apache.hadoop.mapred.ShuffleHandler</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>yarn.nodemanager.bind-host</name>
|
||||
<value>0.0.0.0</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>yarn.nodemanager.container-executor.class</name>
|
||||
<value>org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>yarn.nodemanager.container-monitor.interval-ms</name>
|
||||
<value>3000</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>yarn.nodemanager.delete.debug-delay-sec</name>
|
||||
<value>0</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>yarn.nodemanager.disk-health-checker.max-disk-utilization-per-disk-percentage</name>
|
||||
<value>90</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>yarn.nodemanager.disk-health-checker.min-free-space-per-disk-mb</name>
|
||||
<value>1000</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>yarn.nodemanager.disk-health-checker.min-healthy-disks</name>
|
||||
<value>0.25</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>yarn.nodemanager.health-checker.interval-ms</name>
|
||||
<value>135000</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>yarn.nodemanager.health-checker.script.timeout-ms</name>
|
||||
<value>60000</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>yarn.nodemanager.linux-container-executor.cgroups.hierarchy</name>
|
||||
<value>hadoop-yarn</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>yarn.nodemanager.linux-container-executor.cgroups.mount</name>
|
||||
<value>false</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>yarn.nodemanager.linux-container-executor.cgroups.strict-resource-usage</name>
|
||||
<value>false</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>yarn.nodemanager.linux-container-executor.group</name>
|
||||
<value>hadoop</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>yarn.nodemanager.linux-container-executor.resources-handler.class</name>
|
||||
<value>org.apache.hadoop.yarn.server.nodemanager.util.DefaultLCEResourcesHandler</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>yarn.nodemanager.local-dirs</name>
|
||||
<value>/hadoop/yarn/local</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>yarn.nodemanager.log-aggregation.compression-type</name>
|
||||
<value>gz</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>yarn.nodemanager.log-aggregation.debug-enabled</name>
|
||||
<value>false</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>yarn.nodemanager.log-aggregation.num-log-files-per-app</name>
|
||||
<value>30</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>yarn.nodemanager.log-aggregation.roll-monitoring-interval-seconds</name>
|
||||
<value>-1</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>yarn.nodemanager.log-dirs</name>
|
||||
<value>/hadoop/yarn/log</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>yarn.nodemanager.log.retain-second</name>
|
||||
<value>604800</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>yarn.nodemanager.recovery.dir</name>
|
||||
<value>/var/log/hadoop-yarn/nodemanager/recovery-state</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>yarn.nodemanager.recovery.enabled</name>
|
||||
<value>false</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>yarn.nodemanager.remote-app-log-dir</name>
|
||||
<value>/app-logs</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>yarn.nodemanager.remote-app-log-dir-suffix</name>
|
||||
<value>logs</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>yarn.nodemanager.resource.cpu-vcores</name>
|
||||
<value>1</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>yarn.nodemanager.resource.memory-mb</name>
|
||||
<value>2048</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>yarn.nodemanager.resource.percentage-physical-cpu-limit</name>
|
||||
<value>100</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>yarn.nodemanager.vmem-check-enabled</name>
|
||||
<value>false</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>yarn.nodemanager.vmem-pmem-ratio</name>
|
||||
<value>2.1</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>yarn.resourcemanager.address</name>
|
||||
<value>c6501.ambari.apache.org:8050</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>yarn.resourcemanager.admin.address</name>
|
||||
<value>c6501.ambari.apache.org:8141</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>yarn.resourcemanager.am.max-attempts</name>
|
||||
<value>2</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>yarn.resourcemanager.bind-host</name>
|
||||
<value>0.0.0.0</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>yarn.resourcemanager.connect.max-wait.ms</name>
|
||||
<value>900000</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>yarn.resourcemanager.connect.retry-interval.ms</name>
|
||||
<value>30000</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>yarn.resourcemanager.fs.state-store.retry-policy-spec</name>
|
||||
<value>2000, 500</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>yarn.resourcemanager.fs.state-store.uri</name>
|
||||
<value></value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>yarn.resourcemanager.ha.enabled</name>
|
||||
<value>false</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>yarn.resourcemanager.hostname</name>
|
||||
<value>c6501.ambari.apache.org</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>yarn.resourcemanager.nodes.exclude-path</name>
|
||||
<value>/etc/hadoop/conf/yarn.exclude</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>yarn.resourcemanager.recovery.enabled</name>
|
||||
<value>false</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>yarn.resourcemanager.resource-tracker.address</name>
|
||||
<value>c6501.ambari.apache.org:8025</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>yarn.resourcemanager.scheduler.address</name>
|
||||
<value>c6501.ambari.apache.org:8030</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>yarn.resourcemanager.scheduler.class</name>
|
||||
<value>org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler
|
||||
</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>yarn.resourcemanager.state-store.max-completed-applications</name>
|
||||
<value>${yarn.resourcemanager.max-completed-applications}</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>yarn.resourcemanager.store.class</name>
|
||||
<value>org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>yarn.resourcemanager.system-metrics-publisher.dispatcher.pool-size</name>
|
||||
<value>10</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>yarn.resourcemanager.system-metrics-publisher.enabled</name>
|
||||
<value>true</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>yarn.resourcemanager.webapp.address</name>
|
||||
<value>c6501.ambari.apache.org:8088</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>yarn.resourcemanager.webapp.delegation-token-auth-filter.enabled</name>
|
||||
<value>false</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>yarn.resourcemanager.work-preserving-recovery.enabled</name>
|
||||
<value>false</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>yarn.resourcemanager.work-preserving-recovery.scheduling-wait-ms</name>
|
||||
<value>10000</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>yarn.resourcemanager.zk-acl</name>
|
||||
<value>world:anyone:rwcda</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>yarn.resourcemanager.zk-address</name>
|
||||
<value>localhost:2181</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>yarn.resourcemanager.zk-num-retries</name>
|
||||
<value>1000</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>yarn.resourcemanager.zk-retry-interval-ms</name>
|
||||
<value>1000</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>yarn.resourcemanager.zk-state-store.parent-path</name>
|
||||
<value>/rmstore</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>yarn.resourcemanager.zk-timeout-ms</name>
|
||||
<value>10000</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>yarn.scheduler.maximum-allocation-mb</name>
|
||||
<value>2048</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>yarn.scheduler.minimum-allocation-mb</name>
|
||||
<value>682</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>yarn.timeline-service.address</name>
|
||||
<value>c6501.ambari.apache.org:10200</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>yarn.timeline-service.bind-host</name>
|
||||
<value>0.0.0.0</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>yarn.timeline-service.client.max-retries</name>
|
||||
<value>30</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>yarn.timeline-service.client.retry-interval-ms</name>
|
||||
<value>1000</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>yarn.timeline-service.enabled</name>
|
||||
<value>true</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>yarn.timeline-service.generic-application-history.store-class</name>
|
||||
<value>org.apache.hadoop.yarn.server.applicationhistoryservice.NullApplicationHistoryStore
|
||||
</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>yarn.timeline-service.http-authentication.simple.anonymous.allowed</name>
|
||||
<value>true</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>yarn.timeline-service.http-authentication.type</name>
|
||||
<value>simple</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>yarn.timeline-service.leveldb-timeline-store.path</name>
|
||||
<value>/hadoop/yarn/timeline</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>yarn.timeline-service.leveldb-timeline-store.read-cache-size</name>
|
||||
<value>104857600</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>yarn.timeline-service.leveldb-timeline-store.start-time-read-cache-size</name>
|
||||
<value>10000</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>yarn.timeline-service.leveldb-timeline-store.start-time-write-cache-size</name>
|
||||
<value>10000</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>yarn.timeline-service.leveldb-timeline-store.ttl-interval-ms</name>
|
||||
<value>300000</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>yarn.timeline-service.store-class</name>
|
||||
<value>org.apache.hadoop.yarn.server.timeline.LeveldbTimelineStore</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>yarn.timeline-service.ttl-enable</name>
|
||||
<value>true</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>yarn.timeline-service.ttl-ms</name>
|
||||
<value>2678400000</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>yarn.timeline-service.webapp.address</name>
|
||||
<value>c6501.ambari.apache.org:8188</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>yarn.timeline-service.webapp.https.address</name>
|
||||
<value>c6501.ambari.apache.org:8190</value>
|
||||
</property>
|
||||
|
||||
</configuration>
|
||||
|
|
@ -6,9 +6,9 @@
|
|||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* <p/>
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* <p/>
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
|
|
@ -43,17 +43,16 @@ import java.io.File;
|
|||
import java.io.FileWriter;
|
||||
import java.util.List;
|
||||
|
||||
@Test (enabled = false)
|
||||
@Test(enabled = false)
|
||||
public class HiveGraphRepositoryTest {
|
||||
|
||||
public static final String HIVE_L4J_PROPS = "target/hive-log4j.properties";
|
||||
public static final String HIVE_EXEC_L4J_PROPS = "target/hive-exec-log4j.properties";
|
||||
private static final Logger LOG =
|
||||
LoggerFactory.getLogger(HiveGraphRepositoryTest.class);
|
||||
protected HiveTypeSystem hts;
|
||||
private GraphBackedMetadataRepository repository;
|
||||
private GraphService gs;
|
||||
public static final String HIVE_L4J_PROPS = "target/hive-log4j.properties";
|
||||
public static final String HIVE_EXEC_L4J_PROPS = "target/hive-exec-log4j.properties";
|
||||
|
||||
private static final Logger LOG =
|
||||
LoggerFactory.getLogger(HiveGraphRepositoryTest.class);
|
||||
|
||||
@BeforeClass
|
||||
public void setup() throws ConfigurationException, MetadataException {
|
||||
|
|
@ -79,7 +78,7 @@ public class HiveGraphRepositoryTest {
|
|||
System.out.println("*******************Graph Dump****************************");
|
||||
}
|
||||
|
||||
@Test (enabled = false)
|
||||
@Test(enabled = false)
|
||||
public void testHiveImport() throws Exception {
|
||||
HiveConf conf = new HiveConf();
|
||||
HiveMetaStoreClient hiveMetaStoreClient;
|
||||
|
|
@ -6,9 +6,9 @@
|
|||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* <p/>
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* <p/>
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
|
|
@ -23,9 +23,9 @@ import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
|
|||
import org.apache.hadoop.hive.metastore.api.MetaException;
|
||||
import org.apache.hadoop.metadata.ITypedReferenceableInstance;
|
||||
import org.apache.hadoop.metadata.MetadataException;
|
||||
import org.apache.hadoop.metadata.storage.Id;
|
||||
import org.apache.hadoop.metadata.storage.memory.MemRepository;
|
||||
import org.apache.hadoop.metadata.types.TypeSystem;
|
||||
import org.apache.hadoop.metadata.typesystem.persistence.Id;
|
||||
import org.apache.hadoop.metadata.typesystem.persistence.memory.MemRepository;
|
||||
import org.apache.hadoop.metadata.typesystem.types.TypeSystem;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.testng.annotations.BeforeClass;
|
||||
|
|
@ -39,15 +39,15 @@ import java.sql.Connection;
|
|||
import java.sql.DriverManager;
|
||||
import java.sql.Statement;
|
||||
|
||||
@Test (enabled = true)
|
||||
@Test(enabled = true)
|
||||
public class HiveTypeSystemTest {
|
||||
|
||||
protected MemRepository mr;
|
||||
protected HiveTypeSystem hts;
|
||||
private static final String hiveHost = "c6501.ambari.apache.org";
|
||||
private static final short hivePort = 10000;
|
||||
private static final Logger LOG =
|
||||
LoggerFactory.getLogger(HiveTypeSystemTest.class);
|
||||
protected MemRepository mr;
|
||||
protected HiveTypeSystem hts;
|
||||
|
||||
@BeforeClass
|
||||
public void setup() throws MetadataException {
|
||||
|
|
@ -58,7 +58,7 @@ public class HiveTypeSystemTest {
|
|||
hts = HiveTypeSystem.getInstance();
|
||||
}
|
||||
|
||||
@Test (enabled = true)
|
||||
@Test(enabled = true)
|
||||
public void testHiveImport() throws MetaException, MetadataException, IOException {
|
||||
HiveConf conf = new HiveConf();
|
||||
HiveMetaStoreClient hiveMetaStoreClient;
|
||||
|
|
@ -69,7 +69,8 @@ public class HiveTypeSystemTest {
|
|||
LOG.info("Defined DB instances");
|
||||
File f = new File("./target/logs/hiveobjs.txt");
|
||||
f.getParentFile().mkdirs();
|
||||
FileWriter fw = new FileWriter(f); BufferedWriter bw = new BufferedWriter(fw);
|
||||
FileWriter fw = new FileWriter(f);
|
||||
BufferedWriter bw = new BufferedWriter(fw);
|
||||
for (Id id : hImporter.getDBInstances()) {
|
||||
ITypedReferenceableInstance instance = mr.get(id);
|
||||
bw.write(instance.toString());
|
||||
|
|
@ -103,7 +104,7 @@ public class HiveTypeSystemTest {
|
|||
bw.close();
|
||||
}
|
||||
|
||||
@Test (enabled = true)
|
||||
@Test(enabled = true)
|
||||
public void testHiveLineage() throws MetaException, MetadataException, IOException, Exception {
|
||||
Class.forName("org.apache.hive.jdbc.HiveDriver");
|
||||
String url = "jdbc:hive2://" + hiveHost + ":" + hivePort;
|
||||
|
|
@ -117,5 +118,4 @@ public class HiveTypeSystemTest {
|
|||
}
|
||||
|
||||
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,17 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
* <p/>
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* <p/>
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
|
@ -0,0 +1,22 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
* <p/>
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* <p/>
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
drop table if exists t;
|
||||
create table t(a int, b string);
|
||||
drop table if exists t2;
|
||||
create table t2 as select * from t;
|
||||
120
common/pom.xml
|
|
@ -1,120 +0,0 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
|
||||
<!--
|
||||
~ Licensed to the Apache Software Foundation (ASF) under one
|
||||
~ or more contributor license agreements. See the NOTICE file
|
||||
~ distributed with this work for additional information
|
||||
~ regarding copyright ownership. The ASF licenses this file
|
||||
~ to you under the Apache License, Version 2.0 (the
|
||||
~ "License"); you may not use this file except in compliance
|
||||
~ with the License. You may obtain a copy of the License at
|
||||
~
|
||||
~ http://www.apache.org/licenses/LICENSE-2.0
|
||||
~
|
||||
~ Unless required by applicable law or agreed to in writing, software
|
||||
~ distributed under the License is distributed on an "AS IS" BASIS,
|
||||
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
~ See the License for the specific language governing permissions and
|
||||
~ limitations under the License.
|
||||
-->
|
||||
|
||||
<project xmlns="http://maven.apache.org/POM/4.0.0"
|
||||
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
|
||||
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
<parent>
|
||||
<groupId>org.apache.hadoop.metadata</groupId>
|
||||
<artifactId>metadata-governance</artifactId>
|
||||
<version>0.1-incubating-SNAPSHOT</version>
|
||||
</parent>
|
||||
<artifactId>metadata-common</artifactId>
|
||||
<description>Apache Metadata Common Module</description>
|
||||
<name>Apache Metadata Commons</name>
|
||||
<packaging>jar</packaging>
|
||||
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>org.apache.hadoop</groupId>
|
||||
<artifactId>hadoop-client</artifactId>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>commons-el</groupId>
|
||||
<artifactId>commons-el</artifactId>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>javax.servlet.jsp</groupId>
|
||||
<artifactId>jsp-api</artifactId>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>org.testng</groupId>
|
||||
<artifactId>testng</artifactId>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>org.slf4j</groupId>
|
||||
<artifactId>slf4j-log4j12</artifactId>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>org.slf4j</groupId>
|
||||
<artifactId>slf4j-api</artifactId>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>org.codehaus.jettison</groupId>
|
||||
<artifactId>jettison</artifactId>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>org.mockito</groupId>
|
||||
<artifactId>mockito-all</artifactId>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>net.sourceforge.findbugs</groupId>
|
||||
<artifactId>annotations</artifactId>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>com.googlecode.json-simple</groupId>
|
||||
<artifactId>json-simple</artifactId>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>com.tinkerpop.blueprints</groupId>
|
||||
<artifactId>blueprints-core</artifactId>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>com.thinkaurelius.titan</groupId>
|
||||
<artifactId>titan-core</artifactId>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
|
||||
<build>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-compiler-plugin</artifactId>
|
||||
<configuration>
|
||||
<source>1.7</source>
|
||||
<target>1.7</target>
|
||||
</configuration>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-jar-plugin</artifactId>
|
||||
<version>2.4</version>
|
||||
<configuration>
|
||||
<excludes>
|
||||
<exclude>**/log4j.xml</exclude>
|
||||
</excludes>
|
||||
</configuration>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
</project>
|
||||
|
|
@ -1,69 +0,0 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.metadata;
|
||||
|
||||
public class MetadataException extends Exception {
|
||||
/**
|
||||
* Constructs a new exception with the specified detail message. The
|
||||
* cause is not initialized, and may subsequently be initialized by
|
||||
* a call to {@link #initCause}.
|
||||
*
|
||||
* @param message the detail message. The detail message is saved for
|
||||
* later retrieval by the {@link #getMessage()} method.
|
||||
*/
|
||||
public MetadataException(String message) {
|
||||
super(message);
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructs a new exception with the specified detail message and
|
||||
* cause. <p>Note that the detail message associated with
|
||||
* {@code cause} is <i>not</i> automatically incorporated in
|
||||
* this exception's detail message.
|
||||
*
|
||||
* @param message the detail message (which is saved for later retrieval
|
||||
* by the {@link #getMessage()} method).
|
||||
* @param cause the cause (which is saved for later retrieval by the
|
||||
* {@link #getCause()} method). (A <tt>null</tt> value is
|
||||
* permitted, and indicates that the cause is nonexistent or
|
||||
* unknown.)
|
||||
* @since 1.4
|
||||
*/
|
||||
public MetadataException(String message, Throwable cause) {
|
||||
super(message, cause);
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructs a new exception with the specified cause and a detail
|
||||
* message of <tt>(cause==null ? null : cause.toString())</tt> (which
|
||||
* typically contains the class and detail message of <tt>cause</tt>).
|
||||
* This constructor is useful for exceptions that are little more than
|
||||
* wrappers for other throwables (for example, {@link
|
||||
* java.security.PrivilegedActionException}).
|
||||
*
|
||||
* @param cause the cause (which is saved for later retrieval by the
|
||||
* {@link #getCause()} method). (A <tt>null</tt> value is
|
||||
* permitted, and indicates that the cause is nonexistent or
|
||||
* unknown.)
|
||||
* @since 1.4
|
||||
*/
|
||||
public MetadataException(Throwable cause) {
|
||||
super(cause);
|
||||
}
|
||||
}
|
||||
|
|
@ -1,51 +0,0 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.metadata.service;
|
||||
|
||||
import java.io.Closeable;
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
* Service interface that's initialized at startup.
|
||||
*/
|
||||
//todo: needs to be removed, as it serves no purpose now with Guice
|
||||
@Deprecated
|
||||
public interface Service extends Closeable {
|
||||
|
||||
/**
|
||||
* Starts the service. This method blocks until the service has completely started.
|
||||
*
|
||||
* @throws Exception
|
||||
*/
|
||||
void start() throws Exception;
|
||||
|
||||
/**
|
||||
* Stops the service. This method blocks until the service has completely shut down.
|
||||
*/
|
||||
void stop();
|
||||
|
||||
/**
|
||||
* A version of stop() that is designed to be usable in Java7 closure
|
||||
* clauses.
|
||||
* Implementation classes MUST relay this directly to {@link #stop()}
|
||||
* @throws java.io.IOException never
|
||||
* @throws RuntimeException on any failure during the stop operation
|
||||
*/
|
||||
void close() throws IOException;
|
||||
}
|
||||
|
|
@ -1,68 +0,0 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.metadata.util;
|
||||
|
||||
import org.apache.hadoop.metadata.MetadataException;
|
||||
|
||||
import java.lang.reflect.Constructor;
|
||||
import java.lang.reflect.Method;
|
||||
|
||||
/**
|
||||
* Helper methods for class instantiation through reflection.
|
||||
*/
|
||||
public final class ReflectionUtils {
|
||||
|
||||
private ReflectionUtils() {}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public static <T> T getInstanceByClassName(String clazzName) throws MetadataException {
|
||||
try {
|
||||
Class<T> clazz = (Class<T>) ReflectionUtils.class.getClassLoader().loadClass(clazzName);
|
||||
try {
|
||||
return clazz.newInstance();
|
||||
} catch (IllegalAccessException e) {
|
||||
Method method = clazz.getMethod("get");
|
||||
return (T) method.invoke(null);
|
||||
}
|
||||
} catch (Exception e) {
|
||||
throw new MetadataException("Unable to get instance for " + clazzName, e);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Invokes constructor with one argument.
|
||||
* @param clazzName - classname
|
||||
* @param argCls - Class of the argument
|
||||
* @param arg - constructor argument
|
||||
* @param <T> - instance type
|
||||
* @return Class instance
|
||||
* @throws MetadataException
|
||||
*/
|
||||
@SuppressWarnings("unchecked")
|
||||
public static <T> T getInstanceByClassName(String clazzName, Class<?> argCls,
|
||||
Object arg) throws MetadataException {
|
||||
try {
|
||||
Class<T> clazz = (Class<T>) ReflectionUtils.class.getClassLoader().loadClass(clazzName);
|
||||
Constructor<T> constructor = clazz.getConstructor(argCls);
|
||||
return constructor.newInstance(arg);
|
||||
} catch (Exception e) {
|
||||
throw new MetadataException("Unable to get instance for " + clazzName, e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -1,20 +0,0 @@
|
|||
var DgcApp = angular.module('DgcApp', [
|
||||
'ngRoute',
|
||||
'DgcControllers',
|
||||
'ui.bootstrap'
|
||||
]);
|
||||
|
||||
DgcApp.config(['$routeProvider', function($routeProvider) {
|
||||
$routeProvider.
|
||||
when('/Search', {
|
||||
templateUrl: 'partials/search.html',
|
||||
controller: 'ListController'
|
||||
}).
|
||||
when('/Search/:Id', {
|
||||
templateUrl: 'partials/wiki.html',
|
||||
controller: 'DefinitionController'
|
||||
}).
|
||||
otherwise({
|
||||
redirectTo: '/Search'
|
||||
});
|
||||
}]);
|
||||
|
|
@ -1,2 +0,0 @@
|
|||
<h4 class="titletxt2">{{key}}:</h4>
|
||||
<p class="sm-txt2">{{value | date:'medium'}}</p>
|
||||
|
|
@ -1,8 +0,0 @@
|
|||
<div class="container" ng-controller="LineageController">
|
||||
<div class="lineage"></div>
|
||||
<svg ng-attr-width="{{width}}"
|
||||
ng-attr-height="{{height}}">
|
||||
|
||||
</svg>
|
||||
|
||||
</div>
|
||||
|
|
@ -1,22 +0,0 @@
|
|||
<div class="content">
|
||||
<div class="container">
|
||||
<div class="col-lg-12">
|
||||
<h4 class="txt1">{{Guid}}</h4>
|
||||
<br/>
|
||||
|
||||
<div ng-controller="LineageController" style="border-bottom: 2px solid #060;margin-bottom: 30px;">
|
||||
<div class="lineage"></div>
|
||||
<svg ng-attr-width="{{width}}"
|
||||
ng-attr-height="{{height}}">
|
||||
|
||||
</svg>
|
||||
</div>
|
||||
|
||||
<div ng-repeat="(key,value) in details" ng-if="isString(value)" ng-include="'partials/attribute.html'"></div>
|
||||
|
||||
<!--<tab heading="Raw"><pre>{{details}}</pre></tab>-->
|
||||
|
||||
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
|
@ -1,9 +0,0 @@
|
|||
.DS_Store
|
||||
.bower-*/
|
||||
.idea/
|
||||
node_modules/
|
||||
lib/
|
||||
public/lib
|
||||
public/dist
|
||||
*.log
|
||||
*.tgz
|
||||
|
|
@ -1,28 +0,0 @@
|
|||
.d3-tip {
|
||||
line-height: 1;
|
||||
font-weight: bold;
|
||||
padding: 12px;
|
||||
background: rgba(0, 0, 0, 0.8);
|
||||
color: #fff;
|
||||
border-radius: 2px;
|
||||
}
|
||||
|
||||
/* Creates a small triangle extender for the tooltip */
|
||||
.d3-tip:after {
|
||||
box-sizing: border-box;
|
||||
display: inline;
|
||||
font-size: 10px;
|
||||
width: 100%;
|
||||
line-height: 1;
|
||||
color: rgba(0, 0, 0, 0.8);
|
||||
content: "\25BC";
|
||||
position: absolute;
|
||||
text-align: center;
|
||||
}
|
||||
|
||||
/* Style northward tooltips differently */
|
||||
.d3-tip.n:after {
|
||||
margin: -1px 0 0 0;
|
||||
top: 100%;
|
||||
left: 0;
|
||||
}
|
||||
|
|
@ -1,30 +0,0 @@
|
|||
g circle {
|
||||
cursor: pointer;
|
||||
stroke: green;
|
||||
stroke-width: 2px;
|
||||
fill: url(#process-image);
|
||||
}
|
||||
|
||||
g circle.empty {
|
||||
fill: #90ef96;
|
||||
}
|
||||
|
||||
.link {
|
||||
fill: none;
|
||||
stroke: green;
|
||||
stroke-width: 2px;
|
||||
}
|
||||
|
||||
g text {
|
||||
pointer-events: none;
|
||||
text-shadow: 0 1px 0 #fff, 1px 0 0 #fff, 0 -1px 0 #fff, -1px 0 0 #fff;
|
||||
}
|
||||
|
||||
.d3-tip pre {
|
||||
max-width: 400px;
|
||||
}
|
||||
|
||||
div.lineage {
|
||||
border-bottom: 2px solid #006600;
|
||||
margin-bottom: 30px;
|
||||
}
|
||||
|
|
@ -1,11 +0,0 @@
|
|||
'use strict';
|
||||
|
||||
angular.element(document).ready(function() {
|
||||
/* Fixing facebook bug with redirect */
|
||||
if (window.location.hash === '#_=_') window.location.hash = '#!';
|
||||
|
||||
//Then init the app
|
||||
angular.bootstrap(document, ['dgc'], {
|
||||
strictDi: true
|
||||
});
|
||||
});
|
||||
|
|
@ -1,10 +0,0 @@
|
|||
'use strict';
|
||||
|
||||
//Setting up route
|
||||
angular.module('dgc').config(['$locationProvider', '$urlRouterProvider',
|
||||
function($locationProvider, $urlRouterProvider) {
|
||||
$locationProvider.hashPrefix('!');
|
||||
// For unmatched routes:
|
||||
$urlRouterProvider.otherwise('/');
|
||||
}
|
||||
]);
|
||||
|
|
@ -1,12 +0,0 @@
|
|||
'use strict';
|
||||
|
||||
angular.module('dgc.details').controller('DetailsController', ['$scope', '$stateParams', 'DetailsResource',
|
||||
function($scope, $stateParams, DetailsResource) {
|
||||
|
||||
$scope.details = DetailsResource.get({
|
||||
id: $stateParams.id
|
||||
});
|
||||
|
||||
$scope.isString = angular.isString;
|
||||
}
|
||||
]);
|
||||
|
|
@ -1,3 +0,0 @@
|
|||
'use strict';
|
||||
|
||||
angular.module('dgc.details', ['dgc.lineage']);
|
||||
|
|
@ -1,15 +0,0 @@
|
|||
'use strict';
|
||||
|
||||
angular.module('dgc.details').factory('DetailsResource', ['$resource', function($resource) {
|
||||
return $resource('/api/metadata/entities/definition/:id', {}, {
|
||||
get: {
|
||||
method: 'GET',
|
||||
transformResponse: function(data) {
|
||||
if (data) {
|
||||
return angular.fromJson(data.definition);
|
||||
}
|
||||
},
|
||||
responseType: 'json'
|
||||
}
|
||||
});
|
||||
}]);
|
||||
|
|
@ -1,12 +0,0 @@
|
|||
'use strict';
|
||||
|
||||
angular.module('dgc.details').config(['$stateProvider',
|
||||
function($stateProvider) {
|
||||
|
||||
// states for my app
|
||||
$stateProvider.state('details', {
|
||||
url: '/details/:id',
|
||||
templateUrl: '/modules/details/views/details.html'
|
||||
});
|
||||
}
|
||||
]);
|
||||
|
|
@ -1,2 +0,0 @@
|
|||
<h4>{{key}}:</h4>
|
||||
<p>{{value | date:'medium'}}</p>
|
||||
|
|
@ -1,9 +0,0 @@
|
|||
<div class="container details" data-ng-controller="DetailsController">
|
||||
<div class="col-lg-12">
|
||||
<h3>{{details.$id$.id}}</h3>
|
||||
<div class="lineage" data-ng-include="'/modules/lineage/views/lineage.html'"></div>
|
||||
<div class="wiki">
|
||||
<section data-ng-repeat="(key,value) in details" data-ng-if="isString(value)" data-ng-include="'/modules/details/views/attribute.html'"></section>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
|
@ -1,11 +0,0 @@
|
|||
'use strict';
|
||||
|
||||
angular.module('dgc.home').controller('HeaderController', ['$scope', function($scope) {
|
||||
|
||||
$scope.menu = [];
|
||||
|
||||
$scope.isCollapsed = true;
|
||||
$scope.isLoggedIn = function() {
|
||||
return true;
|
||||
};
|
||||
}]);
|
||||
|
|
@ -1,3 +0,0 @@
|
|||
'use strict';
|
||||
|
||||
angular.module('dgc.home', ['dgc.home.routes']);
|
||||
|
|
@ -1,13 +0,0 @@
|
|||
'use strict';
|
||||
|
||||
//Setting up route
|
||||
angular.module('dgc.home.routes', []).config(['$stateProvider',
|
||||
function($stateProvider) {
|
||||
|
||||
// states for my app
|
||||
$stateProvider.state('home', {
|
||||
url: '/',
|
||||
templateUrl: '/modules/home/views/home.html'
|
||||
});
|
||||
}
|
||||
]);
|
||||
|
|
@ -1,3 +0,0 @@
|
|||
<section class="text-center">
|
||||
<img data-ui-sref="search" data-ui-sref="search" class="pointer" src="modules/home/img/splash.png"/>
|
||||
</section>
|
||||
|
|
@ -1,3 +0,0 @@
|
|||
'use strict';
|
||||
|
||||
angular.module('dgc.lineage', []);
|
||||
|
|
@ -1,8 +0,0 @@
|
|||
'use strict';
|
||||
|
||||
angular.module('dgc.lineage').factory('LineageResource', ['$resource', function($resource) {
|
||||
return $resource('/api/metadata/discovery/search/relationships/:id', {
|
||||
depth: 3,
|
||||
edgesToFollow: 'HiveLineage.sourceTables.0,HiveLineage.sourceTables.1,HiveLineage.sourceTables.2,HiveLineage.tableName'
|
||||
});
|
||||
}]);
|
||||
|
|
@ -1,3 +0,0 @@
|
|||
<div data-ng-controller="LineageController">
|
||||
<svg></svg>
|
||||
</div>
|
||||
|
|
@ -1,12 +0,0 @@
|
|||
'use strict';
|
||||
|
||||
angular.module('dgc.system.notification').controller('NotificationController', ['$scope', 'NotificationService',
|
||||
function($scope, NotificationService) {
|
||||
|
||||
$scope.getNotifications = NotificationService.getAll;
|
||||
|
||||
$scope.close = function(notification) {
|
||||
NotificationService.close(notification);
|
||||
};
|
||||
}
|
||||
]);
|
||||
|
|
@ -1,12 +0,0 @@
|
|||
'use strict';
|
||||
|
||||
angular.module('dgc.system.notification', ['ui.router']).constant('ColorCoding', {
|
||||
info: 'success',
|
||||
error: 'danger'
|
||||
}).run(['$rootScope', 'NotificationService', function($rootScope, NotificationService) {
|
||||
$rootScope.$on('$locationChangeSuccess', function(evt, from, to) {
|
||||
if (from !== to) {
|
||||
NotificationService.reset();
|
||||
}
|
||||
});
|
||||
}]);
|
||||
|
|
@ -1,3 +0,0 @@
|
|||
'use strict';
|
||||
|
||||
angular.module('dgc.search', ['dgc.details']);
|
||||
|
|
@ -1,18 +0,0 @@
|
|||
'use strict';
|
||||
|
||||
angular.module('dgc.search').factory('SearchResource', ['$resource', function($resource) {
|
||||
return $resource('/api/metadata/discovery/search/fulltext', {}, {
|
||||
search: {
|
||||
'method': 'GET',
|
||||
'responseType': 'json',
|
||||
'isArray': true,
|
||||
'transformResponse': function(data) {
|
||||
var results = [];
|
||||
angular.forEach(data && data.vertices, function(val) {
|
||||
results.push(val);
|
||||
});
|
||||
return results;
|
||||
}
|
||||
}
|
||||
});
|
||||
}]);
|
||||
|
|
@ -1,16 +0,0 @@
|
|||
'use strict';
|
||||
|
||||
//Setting up route
|
||||
angular.module('dgc.search').config(['$stateProvider',
|
||||
function($stateProvider) {
|
||||
|
||||
// states for my app
|
||||
$stateProvider.state('search', {
|
||||
url: '/search',
|
||||
templateUrl: '/modules/search/views/search.html'
|
||||
}).state('search.results', {
|
||||
url: '/?',
|
||||
templateUrl: '/modules/search/views/searchResult.html'
|
||||
});
|
||||
}
|
||||
]);
|
||||
|
|
@ -1,7 +0,0 @@
|
|||
<h4>{{results.length}} results matching your search query "{{query}}" were found</h4>
|
||||
<ul class="list-unstyled">
|
||||
<li ng-repeat="result in results">
|
||||
<div data-ng-if="typeAvailable()" data-ng-include="'/modules/search/views/types/'+result.type.toLowerCase()+'.html'"></div>
|
||||
<div data-ng-if="!typeAvailable()" data-ng-include="'/modules/search/views/types/guid.html'"></div>
|
||||
</li>
|
||||
</ul>
|
||||
|
|
@ -1 +0,0 @@
|
|||
<a data-ui-sref="details({id:result.guid})">{{result.guid}}</a>
|
||||
|
|
@ -1 +0,0 @@
|
|||
<a data-ui-sref="details({id:result.guid})">{{result.guid}}</a>
|
||||
|
|
@ -1 +0,0 @@
|
|||
<a data-ui-sref="details({id:result.guid})">{{result["hive_table.name"]}}</a>
|
||||
|
|
@ -1,21 +0,0 @@
|
|||
<!doctype html>
|
||||
<html lang="en" xmlns="http://www.w3.org/1999/xhtml" itemscope="itemscope" itemtype="http://schema.org/Product">
|
||||
{% include 'includes/head.html' %}
|
||||
|
||||
<body>
|
||||
<header class="navbar navbar-static-top navbar-top" data-role="navigation">
|
||||
<div class="container" data-ng-include="'/modules/home/views/header.html'"></div>
|
||||
</header>
|
||||
<div class="content">
|
||||
<div data-ng-include="'/modules/notification/views/notifications.html'"></div>
|
||||
<div data-ui-view></div>
|
||||
</div>
|
||||
<footer class="footer navbar-bottom">
|
||||
<div class="container">
|
||||
<p align="right">Powered by<img src="modules/home/img/logo-green.png"></p>
|
||||
</div>
|
||||
</footer>
|
||||
{% include 'includes/foot.html' %}
|
||||
</body>
|
||||
|
||||
</html>
|
||||
|
|
@ -1,11 +0,0 @@
|
|||
'use strict';
|
||||
|
||||
module.exports = function(app) {
|
||||
|
||||
app.get('/', function(req, res) {
|
||||
res.render('index', {
|
||||
renderErrors: {}, //req.flash('error')
|
||||
app: app.config.app
|
||||
});
|
||||
});
|
||||
};
|
||||
|
|
@ -1,4 +1,22 @@
|
|||
<!DOCTYPE html>
|
||||
<!--
|
||||
~ Licensed to the Apache Software Foundation (ASF) under one
|
||||
~ or more contributor license agreements. See the NOTICE file
|
||||
~ distributed with this work for additional information
|
||||
~ regarding copyright ownership. The ASF licenses this file
|
||||
~ to you under the Apache License, Version 2.0 (the
|
||||
~ "License"); you may not use this file except in compliance
|
||||
~ with the License. You may obtain a copy of the License at
|
||||
~
|
||||
~ http://www.apache.org/licenses/LICENSE-2.0
|
||||
~
|
||||
~ Unless required by applicable law or agreed to in writing, software
|
||||
~ distributed under the License is distributed on an "AS IS" BASIS,
|
||||
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
~ See the License for the specific language governing permissions and
|
||||
~ limitations under the License.
|
||||
-->
|
||||
|
||||
<html ng-app="DgcApp" lang="en">
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
|
|
@ -1,3 +1,21 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
.bg-top2{
|
||||
background: #eeeded; /* for non-css3 browsers */
|
||||
|
||||
|
Before Width: | Height: | Size: 62 KiB After Width: | Height: | Size: 62 KiB |
|
Before Width: | Height: | Size: 7.1 KiB After Width: | Height: | Size: 7.1 KiB |
|
Before Width: | Height: | Size: 6.6 KiB After Width: | Height: | Size: 6.6 KiB |
|
Before Width: | Height: | Size: 5.2 KiB After Width: | Height: | Size: 5.2 KiB |
|
Before Width: | Height: | Size: 180 KiB After Width: | Height: | Size: 180 KiB |
|
Before Width: | Height: | Size: 8.6 KiB After Width: | Height: | Size: 8.6 KiB |
|
Before Width: | Height: | Size: 12 KiB After Width: | Height: | Size: 12 KiB |
|
Before Width: | Height: | Size: 38 KiB After Width: | Height: | Size: 38 KiB |
|
Before Width: | Height: | Size: 16 KiB After Width: | Height: | Size: 16 KiB |
|
Before Width: | Height: | Size: 9.0 KiB After Width: | Height: | Size: 9.0 KiB |
|
Before Width: | Height: | Size: 370 KiB After Width: | Height: | Size: 370 KiB |
|
Before Width: | Height: | Size: 141 KiB After Width: | Height: | Size: 141 KiB |
|
Before Width: | Height: | Size: 4.7 KiB After Width: | Height: | Size: 4.7 KiB |
|
Before Width: | Height: | Size: 2.1 KiB After Width: | Height: | Size: 2.1 KiB |
|
Before Width: | Height: | Size: 27 KiB After Width: | Height: | Size: 27 KiB |
|
Before Width: | Height: | Size: 20 KiB After Width: | Height: | Size: 20 KiB |
|
Before Width: | Height: | Size: 3.1 KiB After Width: | Height: | Size: 3.1 KiB |
|
Before Width: | Height: | Size: 154 KiB After Width: | Height: | Size: 154 KiB |
|
Before Width: | Height: | Size: 4.1 KiB After Width: | Height: | Size: 4.1 KiB |
|
|
@ -1,4 +1,22 @@
|
|||
<!DOCTYPE html>
|
||||
<!--
|
||||
~ Licensed to the Apache Software Foundation (ASF) under one
|
||||
~ or more contributor license agreements. See the NOTICE file
|
||||
~ distributed with this work for additional information
|
||||
~ regarding copyright ownership. The ASF licenses this file
|
||||
~ to you under the Apache License, Version 2.0 (the
|
||||
~ "License"); you may not use this file except in compliance
|
||||
~ with the License. You may obtain a copy of the License at
|
||||
~
|
||||
~ http://www.apache.org/licenses/LICENSE-2.0
|
||||
~
|
||||
~ Unless required by applicable law or agreed to in writing, software
|
||||
~ distributed under the License is distributed on an "AS IS" BASIS,
|
||||
~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
~ See the License for the specific language governing permissions and
|
||||
~ limitations under the License.
|
||||
-->
|
||||
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
|
|
@ -0,0 +1,38 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
var DgcApp = angular.module('DgcApp', [
|
||||
'ngRoute',
|
||||
'DgcControllers',
|
||||
'ui.bootstrap'
|
||||
]);
|
||||
|
||||
DgcApp.config(['$routeProvider', function($routeProvider) {
|
||||
$routeProvider.
|
||||
when('/Search', {
|
||||
templateUrl: 'partials/search.html',
|
||||
controller: 'ListController'
|
||||
}).
|
||||
when('/Search/:Id', {
|
||||
templateUrl: 'partials/wiki.html',
|
||||
controller: 'DefinitionController'
|
||||
}).
|
||||
otherwise({
|
||||
redirectTo: '/Search'
|
||||
});
|
||||
}]);
|
||||
|
|
@ -1,3 +1,21 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
var DgcControllers = angular.module("DgcControllers", []);
|
||||
|
||||
DgcControllers.controller("ListController", ['$scope','$http', function($scope, $http)
|
||||