ATLAS-3939: added profile berkeley-solr
This commit is contained in:
parent
eccc37623e
commit
7690e51d10
|
|
@ -0,0 +1,20 @@
|
|||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Maven
|
||||
elasticsearch
|
||||
hbase
|
||||
zk
|
||||
|
|
@ -150,6 +150,102 @@ atlas.graph.index.search.solr.wait-searcher=true
|
|||
</build>
|
||||
</profile>
|
||||
|
||||
<!-- profile to configure berkeley and solr with the distribution -->
|
||||
<profile>
|
||||
<id>berkeley-solr</id>
|
||||
<activation>
|
||||
<activeByDefault>false</activeByDefault>
|
||||
</activation>
|
||||
<properties>
|
||||
<graph.storage.backend>berkeleyje</graph.storage.backend>
|
||||
<graph.storage.properties>#Berkeley
|
||||
atlas.graph.storage.directory=${sys:atlas.home}/data/berkeley
|
||||
atlas.graph.storage.lock.clean-expired=true
|
||||
atlas.graph.storage.lock.expiry-time=500
|
||||
atlas.graph.storage.lock.wait-time=300
|
||||
</graph.storage.properties>
|
||||
<graph.index.backend>solr</graph.index.backend>
|
||||
<graph.index.properties>#Solr
|
||||
#Solr cloud mode properties
|
||||
atlas.graph.index.search.solr.mode=cloud
|
||||
atlas.graph.index.search.solr.zookeeper-url=localhost:2181
|
||||
atlas.graph.index.search.solr.zookeeper-connect-timeout=60000
|
||||
atlas.graph.index.search.solr.zookeeper-session-timeout=60000
|
||||
atlas.graph.index.search.solr.wait-searcher=true
|
||||
|
||||
#Solr http mode properties
|
||||
#atlas.graph.index.search.solr.mode=http
|
||||
#atlas.graph.index.search.solr.http-urls=http://localhost:8983/solr
|
||||
</graph.index.properties>
|
||||
<entity.repository.properties>atlas.EntityAuditRepository.impl=org.apache.atlas.repository.audit.NoopEntityAuditRepository</entity.repository.properties>
|
||||
|
||||
<cassandra.embedded>false</cassandra.embedded>
|
||||
<hbase.embedded>false</hbase.embedded>
|
||||
<solr.embedded>true</solr.embedded>
|
||||
|
||||
<solr.dir>${project.build.directory}/solr</solr.dir>
|
||||
<solr.tar>https://archive.apache.org/dist/lucene/solr/${solr.version}/solr-${solr.version}.tgz</solr.tar>
|
||||
<solr.folder>solr-${solr.version}</solr.folder>
|
||||
|
||||
<zk.dir>${project.build.directory}/zk</zk.dir>
|
||||
<zk.tar>https://archive.apache.org/dist/zookeeper/zookeeper-${zookeeper.version}/zookeeper-${zookeeper.version}.tar.gz</zk.tar>
|
||||
<zk.folder>zookeeper-${zookeeper.version}</zk.folder>
|
||||
</properties>
|
||||
<build>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-antrun-plugin</artifactId>
|
||||
<version>1.7</version>
|
||||
<executions>
|
||||
<!-- package solr -->
|
||||
<execution>
|
||||
<id>solr</id>
|
||||
<phase>generate-resources</phase>
|
||||
<goals>
|
||||
<goal>run</goal>
|
||||
</goals>
|
||||
<configuration>
|
||||
<target name="Download SOLR">
|
||||
<mkdir dir="${solr.dir}" />
|
||||
<mkdir dir="${project.basedir}/solr" />
|
||||
<get src="${solr.tar}" dest="${project.basedir}/solr/${solr.folder}.tgz" usetimestamp="true" verbose="true" skipexisting="true" />
|
||||
<untar src="${project.basedir}/solr/${solr.folder}.tgz" dest="${project.build.directory}/solr.temp" compression="gzip" />
|
||||
<copy todir="${solr.dir}">
|
||||
<fileset dir="${project.build.directory}/solr.temp/${solr.folder}">
|
||||
<include name="**/*" />
|
||||
</fileset>
|
||||
</copy>
|
||||
</target>
|
||||
</configuration>
|
||||
</execution>
|
||||
<!-- package zookeeper -->
|
||||
<execution>
|
||||
<id>zk</id>
|
||||
<phase>generate-resources</phase>
|
||||
<goals>
|
||||
<goal>run</goal>
|
||||
</goals>
|
||||
<configuration>
|
||||
<target name="Download zookeeper">
|
||||
<mkdir dir="${zk.dir}" />
|
||||
<mkdir dir="${project.basedir}/zk" />
|
||||
<get src="${zk.tar}" dest="${project.basedir}/zk/${zk.folder}.tgz" usetimestamp="true" verbose="true" skipexisting="true" />
|
||||
<untar src="${project.basedir}/zk/${zk.folder}.tgz" dest="${project.build.directory}/zk.temp" compression="gzip" />
|
||||
<copy todir="${zk.dir}">
|
||||
<fileset dir="${project.build.directory}/zk.temp/${zk.folder}">
|
||||
<include name="**/*" />
|
||||
</fileset>
|
||||
</copy>
|
||||
</target>
|
||||
</configuration>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
</profile>
|
||||
|
||||
<!-- profile to configure berkeley and elasticsearch with the distribution -->
|
||||
<profile>
|
||||
<id>berkeley-elasticsearch</id>
|
||||
|
|
|
|||
|
|
@ -406,6 +406,13 @@ def wait_for_shutdown(pid, msg, wait):
|
|||
|
||||
sys.stdout.write('\n')
|
||||
|
||||
def is_berkelydb(confdir):
|
||||
confFile = os.path.join(confdir, CONF_FILE)
|
||||
storageBackEnd = getConfig(confFile, STORAGE_BACKEND_CONF)
|
||||
if storageBackEnd is not None:
|
||||
storageBackEnd = storageBackEnd.strip()
|
||||
return storageBackEnd is not None and storageBackEnd == 'berkeleyje'
|
||||
|
||||
def is_hbase(confdir):
|
||||
confFile = os.path.join(confdir, CONF_FILE)
|
||||
storageBackEnd = getConfig(confFile, STORAGE_BACKEND_CONF)
|
||||
|
|
@ -470,6 +477,9 @@ def is_elasticsearch_local():
|
|||
|
||||
return True
|
||||
|
||||
def is_zookeeper_local(confdir):
|
||||
return is_berkelydb(confdir) or is_cassandra_local(confdir)
|
||||
|
||||
def get_solr_zk_url(confdir):
|
||||
confdir = os.path.join(confdir, CONF_FILE)
|
||||
return getConfig(confdir, SOLR_INDEX_ZK_URL)
|
||||
|
|
|
|||
|
|
@ -122,6 +122,8 @@ def main():
|
|||
if mc.is_cassandra_local(confdir):
|
||||
print "Cassandra embedded configured."
|
||||
mc.configure_cassandra(atlas_home)
|
||||
|
||||
if mc.is_zookeeper_local(confdir):
|
||||
mc.configure_zookeeper(atlas_home)
|
||||
mc.run_zookeeper(mc.zookeeperBinDir(atlas_home), "start", logdir)
|
||||
print "zookeeper started."
|
||||
|
|
|
|||
|
|
@ -68,10 +68,9 @@ def main():
|
|||
|
||||
# stop solr
|
||||
if mc.is_solr_local(confdir):
|
||||
|
||||
mc.run_solr(mc.solrBinDir(atlas_home), "stop", None, mc.solrPort(), None, True)
|
||||
|
||||
if mc.is_cassandra_local(confdir):
|
||||
if mc.is_zookeeper_local(confdir):
|
||||
mc.run_zookeeper(mc.zookeeperBinDir(atlas_home), "stop")
|
||||
|
||||
# stop elasticsearch
|
||||
|
|
|
|||
|
|
@ -55,6 +55,17 @@ Using the embedded-hbase-solr profile will configure Apache Atlas so that an Apa
|
|||
|
||||
>NOTE: This distribution profile is only intended to be used for single node development not in production.
|
||||
|
||||
### Packaging Apache Atlas with BerkeleyDB & Apache Solr
|
||||
To create Apache Atlas package that includes BerkeleyDB and Apache Solr, build with the berkeley-solr profile as shown below:
|
||||
|
||||
<SyntaxHighlighter wrapLines={true} language="powershell" style={theme.dark}>
|
||||
{`mvn clean -DskipTests package -Pdist,berkeley-solr`}
|
||||
</SyntaxHighlighter>
|
||||
|
||||
Using the berkeley-solr profile will configure Apache Atlas so that instances of Apache Solr and Apache Zookeeper will be started and stopped along with the Apache Atlas server.
|
||||
|
||||
>NOTE: This distribution profile is only intended to be used for single node development not in production.
|
||||
|
||||
### Packaging Apache Atlas with embedded Apache Cassandra & Apache Solr
|
||||
To create Apache Atlas package that includes Apache Cassandra and Apache Solr, build with the embedded-cassandra-solr profile as shown below:
|
||||
|
||||
|
|
|
|||
|
|
@ -30,6 +30,14 @@ export MANAGE_LOCAL_SOLR=true
|
|||
bin/atlas_start.py`}
|
||||
</SyntaxHighlighter>
|
||||
|
||||
#### Running Apache Atlas with BerkeleyDB & Apache Solr
|
||||
To run Apache Atlas with BerkeleyDB, and local instances of Apache Solr and Apache Zookeeper, run following commands:
|
||||
|
||||
<SyntaxHighlighter wrapLines={true} language="powershell" style={theme.dark}>
|
||||
{`export MANAGE_LOCAL_SOLR=true
|
||||
bin/atlas_start.py`}
|
||||
</SyntaxHighlighter>
|
||||
|
||||
#### Using Apache Atlas
|
||||
|
||||
* To verify if Apache Atlas server is up and running, run curl command as shown below:
|
||||
|
|
|
|||
|
|
@ -58,6 +58,16 @@
|
|||
</properties>
|
||||
</profile>
|
||||
|
||||
<profile>
|
||||
<id>berkeley-solr</id>
|
||||
<activation>
|
||||
<activeByDefault>false</activeByDefault>
|
||||
</activation>
|
||||
<properties>
|
||||
<packages.to.exclude>WEB-INF/lib/dom4j-*.jar</packages.to.exclude>
|
||||
</properties>
|
||||
</profile>
|
||||
|
||||
<profile>
|
||||
<id>Windows</id>
|
||||
<activation>
|
||||
|
|
|
|||
Loading…
Reference in New Issue