diff --git a/README.md b/README.md
index 7198173b25d9e6bbe1a77c6a88156826c59914d7..d104ad7e407083aaed33c1a3d3d9559a892cd4f3 100644
--- a/README.md
+++ b/README.md
@@ -9,6 +9,7 @@ Proof of concept project within the Microservice initiative, the Foundation look
 1. Installed and configured JDK 1.8+
 1. Apache Maven 3.5.3+
 1. Running instance of MariaDB (Docker instructions below)
+1. Running instance of Solr server (version 5.5.5 currently supported)
 1. GraalVM (for compilation of native-image)
 
 ### Optional requirements
@@ -25,6 +26,8 @@ This section will outline configuration values that need to be checked and updat
 1. Create a copy of `./config/sample.secret.properties` named `secret.properties` in a location of your choosing on the system, with the config folder in the project root being default configured. If changed, keep this path as it is needed to start the environment later.
 1. Update `quarkus.datasource.password` to be the password for the MariaDB user in the newly created `secret.properties` file.
 1. Log in to the MariaDB instance and ensure that the database defined in the JDBC string exists. By default, the name of the database is `mpc_db`. This database can be created using the command `CREATE DATABASE mpc_db;`. 
+1. When using the Solr search engine, a couple of properties are needed to be added to the properties and secret.properties file. The first is the Solr host and core. The host property (`eclipse.solr.host`) should be the root URL to your Solr instance (e.g. http://localhost:8093/solr) to allow connections for search indexing. The core property (`eclipse.solr.core`) should be the name of the core that will store your indexes for marketplace. If a core does not exist yet, create one through the admin panel of the Solr server and update the core value if needed.
+1. To properly enable the core to work with this application, the configuration on the Solr server should be updated. Copy the contents of ./config/mpc_dev into your cores configuration folder. An example path for this folder is `/opt/solr/server/solr/marketplace`. This may change based on how the server is installed and configured. Ensure that these files match ownership of the other files in this location, otherwise the Solr core may not work as intended. 
 1. By default, this application binds to port 8090. If port 8090 is occupied by another service, the value of `quarkus.http.port` can be modified to designate a different port. 
 1. In order to protect endpoints for write operations, an introspection endpoint has been configured to validate OAuth tokens. This introspection endpoint should match the requirements set out by the OAuth group for such endpoints. The URL should be set in `quarkus.oauth2.introspection-url`.  
     * A property meant for development purposes has been added to this stack to bypass OAuth calls. If set, all calls will return as if authenticated as an admin. The property and value `eclipse.oauth.override=true` can be set in the `application.properties` file to enable this feature.
diff --git a/config/mpc_dev/conf/elevate.xml b/config/mpc_dev/conf/elevate.xml
new file mode 100755
index 0000000000000000000000000000000000000000..71ea0006cf12153977aa618d36761e7141071d31
--- /dev/null
+++ b/config/mpc_dev/conf/elevate.xml
@@ -0,0 +1,31 @@
+<?xml version="1.0" encoding="UTF-8" ?>
+
+<!--
+ This file allows you to boost certain search items to the top of search
+ results. You can find out an item's ID by searching directly on the Solr
+ server. The item IDs are in general constructed as follows:
+   Search API:
+     $document->id = $index_id . '-' . $item_id;
+   Apache Solr Search Integration:
+     $document->id = $site_hash . '/' . $entity_type . '/' . $entity->id;
+
+ If you want this file to be automatically re-loaded when a Solr commit takes
+ place (e.g., if you have an automatic script active which updates elevate.xml
+ according to newly-indexed data), place it into Solr's data/ directory.
+ Otherwise, place it with the other configuration files into the conf/
+ directory.
+
+ See http://wiki.apache.org/solr/QueryElevationComponent for more information.
+-->
+
+<elevate>
+<!-- Example for ranking the node #1 first in searches for "example query": -->
+<!--
+ <query text="example query">
+  <doc id="default_node_index-1" />
+  <doc id="7v3jsc/node/1" />
+ </query>
+-->
+<!-- Multiple <query> elements can be specified, contained in one <elevate>. -->
+<!-- <query text="...">...</query> -->
+</elevate>
diff --git a/config/mpc_dev/conf/lang/stopwords_en.txt b/config/mpc_dev/conf/lang/stopwords_en.txt
new file mode 100644
index 0000000000000000000000000000000000000000..2c164c0b2a1e57254b60ad2c0b1d113bad743911
--- /dev/null
+++ b/config/mpc_dev/conf/lang/stopwords_en.txt
@@ -0,0 +1,54 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# a couple of test stopwords to test that the words are really being
+# configured from this file:
+stopworda
+stopwordb
+
+# Standard english stop words taken from Lucene's StopAnalyzer
+a
+an
+and
+are
+as
+at
+be
+but
+by
+for
+if
+in
+into
+is
+it
+no
+not
+of
+on
+or
+such
+that
+the
+their
+then
+there
+these
+they
+this
+to
+was
+will
+with
diff --git a/config/mpc_dev/conf/mapping-ISOLatin1Accent.txt b/config/mpc_dev/conf/mapping-ISOLatin1Accent.txt
new file mode 100755
index 0000000000000000000000000000000000000000..ede7742581be89111d98d43192c6589226456b1a
--- /dev/null
+++ b/config/mpc_dev/conf/mapping-ISOLatin1Accent.txt
@@ -0,0 +1,246 @@
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Syntax:
+#   "source" => "target"
+#     "source".length() > 0 (source cannot be empty.)
+#     "target".length() >= 0 (target can be empty.)
+
+# example:
+#   "À" => "A"
+#   "\u00C0" => "A"
+#   "\u00C0" => "\u0041"
+#   "ß" => "ss"
+#   "\t" => " "
+#   "\n" => ""
+
+# À => A
+"\u00C0" => "A"
+
+# Á => A
+"\u00C1" => "A"
+
+# Â => A
+"\u00C2" => "A"
+
+# Ã => A
+"\u00C3" => "A"
+
+# Ä => A
+"\u00C4" => "A"
+
+# Ã… => A
+"\u00C5" => "A"
+
+# Æ => AE
+"\u00C6" => "AE"
+
+# Ç => C
+"\u00C7" => "C"
+
+# È => E
+"\u00C8" => "E"
+
+# É => E
+"\u00C9" => "E"
+
+# Ê => E
+"\u00CA" => "E"
+
+# Ë => E
+"\u00CB" => "E"
+
+# Ì => I
+"\u00CC" => "I"
+
+# Í => I
+"\u00CD" => "I"
+
+# ÃŽ => I
+"\u00CE" => "I"
+
+# Ï => I
+"\u00CF" => "I"
+
+# IJ => IJ
+"\u0132" => "IJ"
+
+# Ð => D
+"\u00D0" => "D"
+
+# Ñ => N
+"\u00D1" => "N"
+
+# Ã’ => O
+"\u00D2" => "O"
+
+# Ó => O
+"\u00D3" => "O"
+
+# Ô => O
+"\u00D4" => "O"
+
+# Õ => O
+"\u00D5" => "O"
+
+# Ö => O
+"\u00D6" => "O"
+
+# Ø => O
+"\u00D8" => "O"
+
+# Å’ => OE
+"\u0152" => "OE"
+
+# Þ
+"\u00DE" => "TH"
+
+# Ù => U
+"\u00D9" => "U"
+
+# Ú => U
+"\u00DA" => "U"
+
+# Û => U
+"\u00DB" => "U"
+
+# Ü => U
+"\u00DC" => "U"
+
+# Ý => Y
+"\u00DD" => "Y"
+
+# Ÿ => Y
+"\u0178" => "Y"
+
+# à => a
+"\u00E0" => "a"
+
+# á => a
+"\u00E1" => "a"
+
+# â => a
+"\u00E2" => "a"
+
+# ã => a
+"\u00E3" => "a"
+
+# ä => a
+"\u00E4" => "a"
+
+# å => a
+"\u00E5" => "a"
+
+# æ => ae
+"\u00E6" => "ae"
+
+# ç => c
+"\u00E7" => "c"
+
+# è => e
+"\u00E8" => "e"
+
+# é => e
+"\u00E9" => "e"
+
+# ê => e
+"\u00EA" => "e"
+
+# ë => e
+"\u00EB" => "e"
+
+# ì => i
+"\u00EC" => "i"
+
+# í => i
+"\u00ED" => "i"
+
+# î => i
+"\u00EE" => "i"
+
+# ï => i
+"\u00EF" => "i"
+
+# ij => ij
+"\u0133" => "ij"
+
+# ð => d
+"\u00F0" => "d"
+
+# ñ => n
+"\u00F1" => "n"
+
+# ò => o
+"\u00F2" => "o"
+
+# ó => o
+"\u00F3" => "o"
+
+# ô => o
+"\u00F4" => "o"
+
+# õ => o
+"\u00F5" => "o"
+
+# ö => o
+"\u00F6" => "o"
+
+# ø => o
+"\u00F8" => "o"
+
+# Å“ => oe
+"\u0153" => "oe"
+
+# ß => ss
+"\u00DF" => "ss"
+
+# þ => th
+"\u00FE" => "th"
+
+# ù => u
+"\u00F9" => "u"
+
+# ú => u
+"\u00FA" => "u"
+
+# û => u
+"\u00FB" => "u"
+
+# ü => u
+"\u00FC" => "u"
+
+# ý => y
+"\u00FD" => "y"
+
+# ÿ => y
+"\u00FF" => "y"
+
+# ff => ff
+"\uFB00" => "ff"
+
+# fi => fi
+"\uFB01" => "fi"
+
+# fl => fl
+"\uFB02" => "fl"
+
+# ffi => ffi
+"\uFB03" => "ffi"
+
+# ffl => ffl
+"\uFB04" => "ffl"
+
+# ſt => ft
+"\uFB05" => "ft"
+
+# st => st
+"\uFB06" => "st"
diff --git a/config/mpc_dev/conf/protwords.txt b/config/mpc_dev/conf/protwords.txt
new file mode 100755
index 0000000000000000000000000000000000000000..cda858149750acd3e5f7cf00c8deecb167fa6193
--- /dev/null
+++ b/config/mpc_dev/conf/protwords.txt
@@ -0,0 +1,7 @@
+#-----------------------------------------------------------------------
+# This file blocks words from being operated on by the stemmer and word delimiter.
+&amp;
+&lt;
+&gt;
+&#039;
+&quot;
diff --git a/config/mpc_dev/conf/schema.xml b/config/mpc_dev/conf/schema.xml
new file mode 100644
index 0000000000000000000000000000000000000000..e2f92fd154018ab0c3a0266e8661e39680056524
--- /dev/null
+++ b/config/mpc_dev/conf/schema.xml
@@ -0,0 +1,341 @@
+<?xml version="1.0" encoding="UTF-8" ?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements.  See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<schema name="mpc_data" version="1.6">
+
+   <!-- If you remove this field, you must _also_ disable the update log in solrconfig.xml
+      or Solr won't start. _version_ and update log are required for SolrCloud
+   -->
+	<field name="_version_" type="long" indexed="true" stored="true" />
+   
+   <!-- points to the root document of a block of nested documents. Required for nested
+      document support, may be removed otherwise
+   -->
+	<field name="_root_" type="string" indexed="true" stored="false" />
+
+   <!-- Only remove the "id" field if you have a very good reason to. While not strictly
+     required, it is highly recommended. A <uniqueKey> is present in almost all Solr 
+     installations. See the <uniqueKey> declaration below where <uniqueKey> is set to "id".
+     Do NOT change the type and apply index-time analysis to the <uniqueKey> as it will likely 
+     make routing in SolrCloud and document replacement in general fail. Limited _query_ time
+     analysis is possible as long as the indexing process is guaranteed to index the term
+     in a compatible way. Any analysis applied to the <uniqueKey> should _not_ produce multiple
+     tokens
+   -->
+	<field name="id" type="string" indexed="true" stored="true" required="true" multiValued="false" /> 
+
+   <!-- Dynamic field definitions allow using convention over configuration
+       for fields via the specification of patterns to match field names. 
+       EXAMPLE:  name="*_i" will match any field ending in _i (like myid_i, z_i)
+       RESTRICTION: the glob-like pattern in the name attribute must have
+       a "*" only at the start or the end.  -->
+
+	<dynamicField name="*_i" type="int" indexed="true" stored="true" />
+	<dynamicField name="*_is" type="int" indexed="true" stored="true" multiValued="true" />
+	<dynamicField name="*_s" type="string" indexed="true" stored="true" />
+	<dynamicField name="*_ss" type="string" indexed="true" stored="true" multiValued="true" />
+	<dynamicField name="*_l" type="long" indexed="true" stored="true" />
+	<dynamicField name="*_ls" type="long" indexed="true" stored="true" multiValued="true" />
+	<dynamicField name="*_t" type="text_general" indexed="true" stored="true" />
+	<dynamicField name="*_txt" type="text_general" indexed="true" stored="true" multiValued="true" />
+	<dynamicField name="*_en" type="text_en" indexed="true" stored="true" multiValued="true" />
+	<dynamicField name="*_b" type="boolean" indexed="true" stored="true" />
+	<dynamicField name="*_bs" type="boolean" indexed="true" stored="true" multiValued="true" />
+	<dynamicField name="*_f" type="float" indexed="true" stored="true" />
+	<dynamicField name="*_fs" type="float" indexed="true" stored="true" multiValued="true" />
+	<dynamicField name="*_d" type="double" indexed="true" stored="true" />
+	<dynamicField name="*_ds" type="double" indexed="true" stored="true" multiValued="true" />
+
+   <!-- Type used to index the lat and lon components for the "location" FieldType -->
+	<dynamicField name="*_coordinate" type="tdouble" indexed="true" stored="false" />
+
+	<dynamicField name="*_dt" type="date" indexed="true" stored="true" />
+	<dynamicField name="*_dts" type="date" indexed="true" stored="true" multiValued="true" />
+	<dynamicField name="*_p" type="location" indexed="true" stored="true" />
+
+   <!-- some trie-coded dynamic fields for faster range queries -->
+	<dynamicField name="*_ti" type="tint" indexed="true" stored="true" />
+	<dynamicField name="*_tl" type="tlong" indexed="true" stored="true" />
+	<dynamicField name="*_tf" type="tfloat" indexed="true" stored="true" />
+	<dynamicField name="*_td" type="tdouble" indexed="true" stored="true" />
+	<dynamicField name="*_tdt" type="tdate" indexed="true" stored="true" />
+
+	<dynamicField name="ignored_*" type="ignored" multiValued="true" />
+	<dynamicField name="*" type="ignored" multiValued="true" />
+	<dynamicField name="random_*" type="random" />
+
+ <!-- Field to use to determine and enforce document uniqueness. 
+      Unless this field is marked with required="false", it will be a required field
+   -->
+	<uniqueKey>id</uniqueKey>
+  
+    <!-- field type definitions. The "name" attribute is
+       just a label to be used by field definitions.  The "class"
+       attribute and any other attributes determine the real
+       behavior of the fieldType.
+         Class names starting with "solr" refer to java classes in a
+       standard package such as org.apache.solr.analysis
+    -->
+
+    <!-- The StrField type is not analyzed, but indexed/stored verbatim.
+       It supports doc values but in that case the field needs to be
+       single-valued and either required or have a default value.
+      -->
+	<fieldType name="string" class="solr.StrField" sortMissingLast="true" />
+
+    <!-- boolean type: "true" or "false" -->
+	<fieldType name="boolean" class="solr.BoolField" sortMissingLast="true" />
+
+    <!-- sortMissingLast and sortMissingFirst attributes are optional attributes are
+         currently supported on types that are sorted internally as strings
+         and on numeric types.
+       This includes "string","boolean", and, as of 3.5 (and 4.x),
+       int, float, long, date, double, including the "Trie" variants.
+       - If sortMissingLast="true", then a sort on this field will cause documents
+         without the field to come after documents with the field,
+         regardless of the requested sort order (asc or desc).
+       - If sortMissingFirst="true", then a sort on this field will cause documents
+         without the field to come before documents with the field,
+         regardless of the requested sort order.
+       - If sortMissingLast="false" and sortMissingFirst="false" (the default),
+         then default lucene sorting will be used which places docs without the
+         field first in an ascending sort and last in a descending sort.
+    -->    
+
+    <!--
+      Default numeric field types. For faster range queries, consider the tint/tfloat/tlong/tdouble types.
+
+      These fields support doc values, but they require the field to be
+      single-valued and either be required or have a default value.
+    -->
+	<fieldType name="int" class="solr.TrieIntField" precisionStep="0" positionIncrementGap="0" />
+	<fieldType name="float" class="solr.TrieFloatField" precisionStep="0" positionIncrementGap="0" />
+	<fieldType name="long" class="solr.TrieLongField" precisionStep="0" positionIncrementGap="0" />
+	<fieldType name="double" class="solr.TrieDoubleField" precisionStep="0" positionIncrementGap="0" />
+
+    <!--
+     Numeric field types that index each value at various levels of precision
+     to accelerate range queries when the number of values between the range
+     endpoints is large. See the javadoc for NumericRangeQuery for internal
+     implementation details.
+
+     Smaller precisionStep values (specified in bits) will lead to more tokens
+     indexed per value, slightly larger index size, and faster range queries.
+     A precisionStep of 0 disables indexing at different precision levels.
+    -->
+	<fieldType name="tint" class="solr.TrieIntField" precisionStep="8" positionIncrementGap="0" />
+	<fieldType name="tfloat" class="solr.TrieFloatField" precisionStep="8" positionIncrementGap="0" />
+	<fieldType name="tlong" class="solr.TrieLongField" precisionStep="8" positionIncrementGap="0" />
+	<fieldType name="tdouble" class="solr.TrieDoubleField" precisionStep="8" positionIncrementGap="0" />
+
+    <!-- The format for this date field is of the form 1995-12-31T23:59:59Z, and
+         is a more restricted form of the canonical representation of dateTime
+         http://www.w3.org/TR/xmlschema-2/#dateTime    
+         The trailing "Z" designates UTC time and is mandatory.
+         Optional fractional seconds are allowed: 1995-12-31T23:59:59.999Z
+         All other components are mandatory.
+
+         Expressions can also be used to denote calculations that should be
+         performed relative to "NOW" to determine the value, ie...
+
+               NOW/HOUR
+                  ... Round to the start of the current hour
+               NOW-1DAY
+                  ... Exactly 1 day prior to now
+               NOW/DAY+6MONTHS+3DAYS
+                  ... 6 months and 3 days in the future from the start of
+                      the current day
+                      
+         Consult the TrieDateField javadocs for more information.
+
+         Note: For faster range queries, consider the tdate type
+      -->
+	<fieldType name="date" class="solr.TrieDateField" precisionStep="0" positionIncrementGap="0" />
+
+    <!-- A Trie based date field for faster date range queries and date faceting. -->
+	<fieldType name="tdate" class="solr.TrieDateField" precisionStep="6" positionIncrementGap="0" />
+
+
+    <!--Binary data type. The data should be sent/retrieved in as Base64 encoded Strings -->
+	<fieldType name="binary" class="solr.BinaryField" />
+
+    <!-- The "RandomSortField" is not used to store or search any
+         data.  You can declare fields of this type it in your schema
+         to generate pseudo-random orderings of your docs for sorting 
+         or function purposes.  The ordering is generated based on the field
+         name and the version of the index. As long as the index version
+         remains unchanged, and the same field name is reused,
+         the ordering of the docs will be consistent.  
+         If you want different psuedo-random orderings of documents,
+         for the same version of the index, use a dynamicField and
+         change the field name in the request.
+     -->
+	<fieldType name="random" class="solr.RandomSortField" indexed="true" />
+
+    <!-- solr.TextField allows the specification of custom text analyzers
+         specified as a tokenizer and a list of token filters. Different
+         analyzers may be specified for indexing and querying.
+
+         The optional positionIncrementGap puts space between multiple fields of
+         this type on the same document, with the purpose of preventing false phrase
+         matching across fields.
+
+         For more info on customizing your analyzer chain, please see
+         http://wiki.apache.org/solr/AnalyzersTokenizersTokenFilters
+     -->
+    <!-- A text field that only splits on whitespace for exact matching of words -->
+	<dynamicField name="*_ws" type="text_ws" indexed="true" stored="true" />
+	<fieldType name="text_ws" class="solr.TextField" positionIncrementGap="100">
+		<analyzer>
+			<tokenizer class="solr.WhitespaceTokenizerFactory" />
+		</analyzer>
+	</fieldType>
+
+    <!-- A general text field that has reasonable, generic
+         cross-language defaults: it tokenizes with StandardTokenizer,
+   removes stop words from case-insensitive "stopwords.txt"
+   (empty by default), and down cases.  At query time only, it
+   also applies synonyms. -->
+	<dynamicField name="*_txt_gen" type="text_general" indexed="true" stored="false" />
+	<fieldType name="text_general" class="solr.TextField" positionIncrementGap="100">
+		<analyzer type="index">
+			<tokenizer class="solr.StandardTokenizerFactory" />
+			<filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt" />
+			<filter class="solr.LowerCaseFilterFactory" />
+		</analyzer>
+		<analyzer type="query">
+			<tokenizer class="solr.StandardTokenizerFactory" />
+			<filter class="solr.StopFilterFactory" ignoreCase="true" words="stopwords.txt" />
+			<filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true" />
+			<filter class="solr.LowerCaseFilterFactory" />
+		</analyzer>
+	</fieldType>
+
+    <!-- A text field with defaults appropriate for English: it
+         tokenizes with StandardTokenizer, removes English stop words
+         (lang/stopwords_en.txt), down cases, protects words from protwords.txt, and
+         finally applies Porter's stemming.  The query time analyzer
+         also applies synonyms from synonyms.txt. -->
+    <dynamicField name="*_txt_en" type="text_en_splitting" indexed="true" stored="false" />
+	<fieldType name="text_en" class="solr.TextField" positionIncrementGap="100">
+		<analyzer type="index">
+			<tokenizer class="solr.StandardTokenizerFactory" />
+			<filter class="solr.StopFilterFactory" ignoreCase="true" words="lang/stopwords_en.txt" />
+			<filter class="solr.LowerCaseFilterFactory" />
+			<filter class="solr.EnglishPossessiveFilterFactory" />
+			<filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt" />
+			<filter class="solr.PorterStemFilterFactory" />
+		</analyzer>
+		<analyzer type="query">
+			<tokenizer class="solr.StandardTokenizerFactory" />
+			<filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true" />
+			<filter class="solr.StopFilterFactory" ignoreCase="true" words="lang/stopwords_en.txt" />
+			<filter class="solr.LowerCaseFilterFactory" />
+			<filter class="solr.EnglishPossessiveFilterFactory" />
+			<filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt" />
+			<filter class="solr.PorterStemFilterFactory" />
+		</analyzer>
+	</fieldType>
+
+    <!-- A text field with defaults appropriate for English, plus
+   aggressive word-splitting and autophrase features enabled.
+   This field is just like text_en, except it adds
+   WordDelimiterFilter to enable splitting and matching of
+   words on case-change, alpha numeric boundaries, and
+   non-alphanumeric chars.  This means certain compound word
+   cases will work, for example query "wi fi" will match
+   document "WiFi" or "wi-fi".
+        -->
+	<dynamicField name="*_txt_en_split" type="text_en_splitting" indexed="true" stored="false" />
+	<fieldType name="text_en_splitting" class="solr.TextField" positionIncrementGap="100"
+		autoGeneratePhraseQueries="true">
+		<analyzer type="index">
+			<tokenizer class="solr.WhitespaceTokenizerFactory" />
+        <!-- in this example, we will only use synonyms at query time
+        <filter class="solr.SynonymFilterFactory" synonyms="index_synonyms.txt" ignoreCase="true" expand="false"/>
+        -->
+        <!-- Case insensitive stop word removal.
+        -->
+			<filter class="solr.StopFilterFactory" ignoreCase="true" words="lang/stopwords_en.txt" />
+			<filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1"
+				catenateWords="1" catenateNumbers="1" catenateAll="0" splitOnCaseChange="1" />
+			<filter class="solr.LowerCaseFilterFactory" />
+			<filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt" />
+			<filter class="solr.PorterStemFilterFactory" />
+		</analyzer>
+		<analyzer type="query">
+			<tokenizer class="solr.WhitespaceTokenizerFactory" />
+			<filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="true" />
+			<filter class="solr.StopFilterFactory" ignoreCase="true" words="lang/stopwords_en.txt" />
+			<filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1"
+				catenateWords="0" catenateNumbers="0" catenateAll="0" splitOnCaseChange="1" />
+			<filter class="solr.LowerCaseFilterFactory" />
+			<filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt" />
+			<filter class="solr.PorterStemFilterFactory" />
+		</analyzer>
+	</fieldType>
+
+    <!-- Less flexible matching, but less false matches.  Probably not ideal for product names,
+         but may be good for SKUs.  Can insert dashes in the wrong place and still match. -->
+	<dynamicField name="*_txt_en_split_tight" type="text_en_splitting_tight" indexed="true" stored="false" />
+	<fieldType name="text_en_splitting_tight" class="solr.TextField" positionIncrementGap="100"
+		autoGeneratePhraseQueries="true">
+		<analyzer>
+			<tokenizer class="solr.WhitespaceTokenizerFactory" />
+			<filter class="solr.SynonymFilterFactory" synonyms="synonyms.txt" ignoreCase="true" expand="false" />
+			<filter class="solr.StopFilterFactory" ignoreCase="true" words="lang/stopwords_en.txt" />
+			<filter class="solr.WordDelimiterFilterFactory" generateWordParts="0" generateNumberParts="0"
+				catenateWords="1" catenateNumbers="1" catenateAll="0" />
+			<filter class="solr.LowerCaseFilterFactory" />
+			<filter class="solr.KeywordMarkerFilterFactory" protected="protwords.txt" />
+			<filter class="solr.EnglishMinimalStemFilterFactory" />
+        <!-- this filter can remove any duplicate tokens that appear at the same position - sometimes
+             possible with WordDelimiterFilter in conjuncton with stemming. -->
+			<filter class="solr.RemoveDuplicatesTokenFilterFactory" />
+		</analyzer>
+	</fieldType>
+
+    <!-- lowercases the entire field value, keeping it as a single token.  -->
+	<dynamicField name="*_s_lower" type="lowercase" indexed="true" stored="false" />
+	<fieldType name="lowercase" class="solr.TextField" positionIncrementGap="100">
+		<analyzer>
+			<tokenizer class="solr.KeywordTokenizerFactory" />
+			<filter class="solr.LowerCaseFilterFactory" />
+		</analyzer>
+	</fieldType>
+
+    <!-- since fields of this type are by default not stored or indexed,
+         any data added to them will be ignored outright.  -->
+	<fieldType name="ignored" stored="false" indexed="false" multiValued="true" class="solr.StrField" />
+
+    <!-- This point type indexes the coordinates as separate fields (subFields)
+      If subFieldType is defined, it references a type, and a dynamic field
+      definition is created matching *___<typename>.  Alternately, if 
+      subFieldSuffix is defined, that is used to create the subFields.
+      Example: if subFieldType="double", then the coordinates would be
+        indexed in fields myloc_0___double,myloc_1___double.
+      Example: if subFieldSuffix="_d" then the coordinates would be indexed
+        in fields myloc_0_d,myloc_1_d
+      The subFields are an implementation detail of the fieldType, and end
+      users normally should not need to know about them.
+     -->
+	<fieldType name="point" class="solr.PointType" dimension="2" subFieldSuffix="_d" />
+
+    <!-- A specialized field for geospatial search. If indexed, this fieldType must not be multivalued. -->
+	<fieldType name="location" class="solr.LatLonType" subFieldSuffix="_coordinate" />
+</schema>
\ No newline at end of file
diff --git a/config/mpc_dev/conf/schema_8.x.xml b/config/mpc_dev/conf/schema_8.x.xml
new file mode 100755
index 0000000000000000000000000000000000000000..31368dd47ab3af2048890c6dbff176aa50605472
--- /dev/null
+++ b/config/mpc_dev/conf/schema_8.x.xml
@@ -0,0 +1,446 @@
+<?xml version="1.0" encoding="UTF-8" ?>
+ <!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+ 
+      http://www.apache.org/licenses/LICENSE-2.0
+ 
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+ -->
+ 
+ <!--
+ 
+  This example schema is the recommended starting point for users.
+  It should be kept correct and concise, usable out-of-the-box.
+ 
+ 
+  For more information, on how to customize this file, please see
+  http://lucene.apache.org/solr/guide/documents-fields-and-schema-design.html
+ 
+  PERFORMANCE NOTE: this schema includes many optional features and should not
+  be used for benchmarking.  To improve performance one could
+   - set stored="false" for all fields possible (esp large fields) when you
+     only need to search on the field but don't need to return the original
+     value.
+   - set indexed="false" if you don't need to search on the field, but only
+     return the field as a result of searching on other indexed fields.
+   - remove all unneeded copyField statements
+   - for best index size and searching performance, set "index" to false
+     for all general text fields, use copyField to copy them to the
+     catchall "text" field, and use that for searching.
+ -->
+
+<schema name="default-config" version="1.6">
+     <!-- attribute "name" is the name of this schema and is only used for display purposes.
+        version="x.y" is Solr's version number for the schema syntax and 
+        semantics.  It should not normally be changed by applications.
+ 
+        1.0: multiValued attribute did not exist, all fields are multiValued 
+             by nature
+        1.1: multiValued attribute introduced, false by default 
+        1.2: omitTermFreqAndPositions attribute introduced, true by default 
+             except for text fields.
+        1.3: removed optional field compress feature
+        1.4: autoGeneratePhraseQueries attribute introduced to drive QueryParser
+             behavior when a single string produces multiple tokens.  Defaults 
+             to off for version >= 1.4
+        1.5: omitNorms defaults to true for primitive field types 
+             (int, float, boolean, string...)
+        1.6: useDocValuesAsStored defaults to true.
+     -->
+ 
+     <!-- Valid attributes for fields:
+      name: mandatory - the name for the field
+      type: mandatory - the name of a field type from the 
+        fieldTypes section
+      indexed: true if this field should be indexed (searchable or sortable)
+      stored: true if this field should be retrievable
+      docValues: true if this field should have doc values. Doc Values is
+        recommended (required, if you are using *Point fields) for faceting,
+        grouping, sorting and function queries. Doc Values will make the index
+        faster to load, more NRT-friendly and more memory-efficient. 
+        They are currently only supported by StrField, UUIDField, all 
+        *PointFields, and depending on the field type, they might require
+        the field to be single-valued, be required or have a default value
+        (check the documentation of the field type you're interested in for
+        more information)
+      multiValued: true if this field may contain multiple values per document
+      omitNorms: (expert) set to true to omit the norms associated with
+        this field (this disables length normalization and index-time
+        boosting for the field, and saves some memory).  Only full-text
+        fields or fields that need an index-time boost need norms.
+        Norms are omitted for primitive (non-analyzed) types by default.
+      termVectors: [false] set to true to store the term vector for a
+        given field.
+        When using MoreLikeThis, fields used for similarity should be
+        stored for best performance.
+      termPositions: Store position information with the term vector.  
+        This will increase storage costs.
+      termOffsets: Store offset information with the term vector. This 
+        will increase storage costs.
+      required: The field is required.  It will throw an error if the
+        value does not exist
+      default: a value that should be used if no value is specified
+        when adding a document.
+     -->
+ 
+     <!-- field names should consist of alphanumeric or underscore characters only and
+       not start with a digit.  This is not currently strictly enforced,
+       but other field names will not have first class support from all components
+       and back compatibility is not guaranteed.  Names with both leading and
+       trailing underscores (e.g. _version_) are reserved.
+     -->
+ 
+     <!-- In this _default configset, only four fields are pre-declared:
+          id, _version_, and _text_ and _root_. All other fields will be type guessed and added via the
+          "add-unknown-fields-to-the-schema" update request processor chain declared in solrconfig.xml.
+          
+          Note that many dynamic fields are also defined - you can use them to specify a 
+          field's type via field naming conventions - see below.
+   
+          WARNING: The _text_ catch-all field will significantly increase your index size.
+          If you don't need it, consider removing it and the corresponding copyField directive.
+     -->
+
+	<field name="id" type="string" indexed="true" stored="true" required="true" multiValued="false" />
+     <!-- docValues are enabled by default for long type so we don't need to index the version field  -->
+	<field name="_version_" type="plong" indexed="false" stored="false" />
+ 
+     <!-- Dynamic field definitions allow using convention over configuration
+        for fields via the specification of patterns to match field names.
+        EXAMPLE:  name="*_i" will match any field ending in _i (like myid_i, z_i)
+        RESTRICTION: the glob-like pattern in the name attribute must have a "*" only at the start or the end.  -->
+
+	<dynamicField name="*_i" type="pint" indexed="true" stored="true" />
+	<dynamicField name="*_is" type="pints" indexed="true" stored="true" />
+	<dynamicField name="*_s" type="string" indexed="true" stored="true" />
+	<dynamicField name="*_ss" type="strings" indexed="true" stored="true" />
+	<dynamicField name="*_l" type="plong" indexed="true" stored="true" />
+	<dynamicField name="*_ls" type="plongs" indexed="true" stored="true" />
+	<dynamicField name="*_t" type="text_general" indexed="true" stored="true" multiValued="false" />
+	<dynamicField name="*_txt" type="text_general" indexed="true" stored="true" />
+	<dynamicField name="*_b" type="boolean" indexed="true" stored="true" />
+	<dynamicField name="*_bs" type="booleans" indexed="true" stored="true" />
+	<dynamicField name="*_f" type="pfloat" indexed="true" stored="true" />
+	<dynamicField name="*_fs" type="pfloats" indexed="true" stored="true" />
+	<dynamicField name="*_d" type="pdouble" indexed="true" stored="true" />
+	<dynamicField name="*_ds" type="pdoubles" indexed="true" stored="true" />
+	<dynamicField name="random_*" type="random" />
+	<dynamicField name="ignored_*" type="ignored" />
+
+	<dynamicField name="*_dt" type="pdate" indexed="true" stored="true" />
+	<dynamicField name="*_dts" type="pdate" indexed="true" stored="true" multiValued="true" />
+	<dynamicField name="*_p" type="location" indexed="true" stored="true" />
+	<dynamicField name="*_srpt" type="location_rpt" indexed="true" stored="true" />
+ 
+     <!-- payloaded dynamic fields -->
+	<dynamicField name="*_dpf" type="delimited_payloads_float" indexed="true" stored="true" />
+	<dynamicField name="*_dpi" type="delimited_payloads_int" indexed="true" stored="true" />
+	<dynamicField name="*_dps" type="delimited_payloads_string" indexed="true" stored="true" />
+
+	<dynamicField name="attr_*" type="text_general" indexed="true" stored="true" multiValued="true" />
+ 
+     <!-- Field to use to determine and enforce document uniqueness.
+       Unless this field is marked with required="false", it will be a required field
+     -->
+	<uniqueKey>id</uniqueKey>
+ 
+     <!-- field type definitions. The "name" attribute is
+        just a label to be used by field definitions.  The "class"
+        attribute and any other attributes determine the real
+        behavior of the fieldType.
+          Class names starting with "solr" refer to java classes in a
+        standard package such as org.apache.solr.analysis
+     -->
+ 
+     <!-- sortMissingLast and sortMissingFirst attributes are optional attributes are
+          currently supported on types that are sorted internally as strings
+          and on numeric types.
+        This includes "string", "boolean", "pint", "pfloat", "plong", "pdate", "pdouble".
+        - If sortMissingLast="true", then a sort on this field will cause documents
+          without the field to come after documents with the field,
+          regardless of the requested sort order (asc or desc).
+        - If sortMissingFirst="true", then a sort on this field will cause documents
+          without the field to come before documents with the field,
+          regardless of the requested sort order.
+        - If sortMissingLast="false" and sortMissingFirst="false" (the default),
+          then default lucene sorting will be used which places docs without the
+          field first in an ascending sort and last in a descending sort.
+     -->
+ 
+     <!-- The StrField type is not analyzed, but indexed/stored verbatim. -->
+	<fieldType name="string" class="solr.StrField" sortMissingLast="true" docValues="true" />
+	<fieldType name="strings" class="solr.StrField" sortMissingLast="true" multiValued="true" docValues="true" />
+ 
+     <!-- boolean type: "true" or "false" -->
+	<fieldType name="boolean" class="solr.BoolField" sortMissingLast="true" />
+	<fieldType name="booleans" class="solr.BoolField" sortMissingLast="true" multiValued="true" />
+ 
+     <!--
+       Numeric field types that index values using KD-trees.
+       Point fields don't support FieldCache, so they must have docValues="true" if needed for sorting, faceting, functions, etc.
+     -->
+	<fieldType name="pint" class="solr.IntPointField" docValues="true" />
+	<fieldType name="pfloat" class="solr.FloatPointField" docValues="true" />
+	<fieldType name="plong" class="solr.LongPointField" docValues="true" />
+	<fieldType name="pdouble" class="solr.DoublePointField" docValues="true" />
+
+	<fieldType name="pints" class="solr.IntPointField" docValues="true" multiValued="true" />
+	<fieldType name="pfloats" class="solr.FloatPointField" docValues="true" multiValued="true" />
+	<fieldType name="plongs" class="solr.LongPointField" docValues="true" multiValued="true" />
+	<fieldType name="pdoubles" class="solr.DoublePointField" docValues="true" multiValued="true" />
+	<fieldType name="random" class="solr.RandomSortField" indexed="true" />
+ 
+     <!-- since fields of this type are by default not stored or indexed,
+        any data added to them will be ignored outright.  -->
+	<fieldType name="ignored" stored="false" indexed="false" multiValued="true" class="solr.StrField" />
+ 
+     <!-- The format for this date field is of the form 1995-12-31T23:59:59Z, and
+          is a more restricted form of the canonical representation of dateTime
+          http://www.w3.org/TR/xmlschema-2/#dateTime    
+          The trailing "Z" designates UTC time and is mandatory.
+          Optional fractional seconds are allowed: 1995-12-31T23:59:59.999Z
+          All other components are mandatory.
+ 
+          Expressions can also be used to denote calculations that should be
+          performed relative to "NOW" to determine the value, ie...
+ 
+                NOW/HOUR
+                   ... Round to the start of the current hour
+                NOW-1DAY
+                   ... Exactly 1 day prior to now
+                NOW/DAY+6MONTHS+3DAYS
+                   ... 6 months and 3 days in the future from the start of
+                       the current day
+                       
+       -->
+     <!-- KD-tree versions of date fields -->
+	<fieldType name="pdate" class="solr.DatePointField" docValues="true" />
+	<fieldType name="pdates" class="solr.DatePointField" docValues="true" multiValued="true" />
+     
+     <!--Binary data type. The data should be sent/retrieved in as Base64 encoded Strings -->
+	<fieldType name="binary" class="solr.BinaryField" />
+
+     <!-- solr.TextField allows the specification of custom text analyzers
+          specified as a tokenizer and a list of token filters. Different
+          analyzers may be specified for indexing and querying.
+ 
+          The optional positionIncrementGap puts space between multiple fields of
+          this type on the same document, with the purpose of preventing false phrase
+          matching across fields.
+ 
+          For more info on customizing your analyzer chain, please see
+          http://lucene.apache.org/solr/guide/understanding-analyzers-tokenizers-and-filters.html#understanding-analyzers-tokenizers-and-filters
+      -->
+ 
+     <!-- One can also specify an existing Analyzer class that has a
+          default constructor via the class attribute on the analyzer element.
+          Example:
+     <fieldType name="text_greek" class="solr.TextField">
+       <analyzer class="org.apache.lucene.analysis.el.GreekAnalyzer"/>
+     </fieldType>
+     -->
+ 
+     <!-- A text field that only splits on whitespace for exact matching of words -->
+	<dynamicField name="*_ws" type="text_ws" indexed="true" stored="true" />
+	<fieldType name="text_ws" class="solr.TextField" positionIncrementGap="100">
+		<analyzer>
+			<tokenizer name="whitespace" />
+		</analyzer>
+	</fieldType>
+ 
+     <!-- A general text field that has reasonable, generic
+          cross-language defaults: it tokenizes with StandardTokenizer,
+                removes stop words from case-insensitive "stopwords.txt"
+                (empty by default), and down cases.  At query time only, it
+                also applies synonyms.
+           -->
+	<dynamicField name="*_txt_gen" type="text_general" indexed="true" stored="true" />
+	<fieldType name="text_general" class="solr.TextField" positionIncrementGap="100" multiValued="true">
+		<analyzer type="index">
+			<tokenizer name="standard" />
+			<filter name="stop" ignoreCase="true" words="stopwords.txt" />
+         <!-- in this example, we will only use synonyms at query time
+         <filter name="synonymGraph" synonyms="index_synonyms.txt" ignoreCase="true" expand="false"/>
+         <filter name="flattenGraph"/>
+         -->
+			<filter name="lowercase" />
+		</analyzer>
+		<analyzer type="query">
+			<tokenizer name="standard" />
+			<filter name="stop" ignoreCase="true" words="stopwords.txt" />
+			<filter name="synonymGraph" synonyms="synonyms.txt" ignoreCase="true" expand="true" />
+			<filter name="lowercase" />
+		</analyzer>
+	</fieldType>
+
+     <!-- A text field with defaults appropriate for English: it tokenizes with StandardTokenizer,
+          removes English stop words (lang/stopwords_en.txt), down cases, protects words from protwords.txt, and
+          finally applies Porter's stemming.  The query time analyzer also applies synonyms from synonyms.txt. -->
+	<dynamicField name="*_txt_en" type="text_en" indexed="true" stored="true" />
+	<fieldType name="text_en" class="solr.TextField" positionIncrementGap="100">
+		<analyzer type="index">
+			<tokenizer name="standard" />
+         <!-- in this example, we will only use synonyms at query time
+         <filter name="synonymGraph" synonyms="index_synonyms.txt" ignoreCase="true" expand="false"/>
+         <filter name="flattenGraph"/>
+         -->
+         <!-- Case insensitive stop word removal.
+         -->
+			<filter name="stop" ignoreCase="true" words="lang/stopwords_en.txt" />
+			<filter name="lowercase" />
+			<filter name="englishPossessive" />
+			<filter name="keywordMarker" protected="protwords.txt" />
+         <!-- Optionally you may want to use this less aggressive stemmer instead of PorterStemFilterFactory:
+         <filter name="englishMinimalStem"/>
+               -->
+			<filter name="porterStem" />
+		</analyzer>
+		<analyzer type="query">
+			<tokenizer name="standard" />
+			<filter name="synonymGraph" synonyms="synonyms.txt" ignoreCase="true" expand="true" />
+			<filter name="stop" ignoreCase="true" words="lang/stopwords_en.txt" />
+			<filter name="lowercase" />
+			<filter name="englishPossessive" />
+			<filter name="keywordMarker" protected="protwords.txt" />
+         <!-- Optionally you may want to use this less aggressive stemmer instead of PorterStemFilterFactory:
+         <filter name="englishMinimalStem"/>
+               -->
+			<filter name="porterStem" />
+		</analyzer>
+	</fieldType>
+ 
+     <!-- A text field with defaults appropriate for English, plus
+          aggressive word-splitting and autophrase features enabled.
+          This field is just like text_en, except it adds
+          WordDelimiterGraphFilter to enable splitting and matching of
+          words on case-change, alpha numeric boundaries, and
+          non-alphanumeric chars.  This means certain compound word
+          cases will work, for example query "wi fi" will match
+          document "WiFi" or "wi-fi".
+     -->
+	<dynamicField name="*_txt_en_split" type="text_en_splitting" indexed="true" stored="true" />
+	<fieldType name="text_en_splitting" class="solr.TextField" positionIncrementGap="100"
+		autoGeneratePhraseQueries="true">
+		<analyzer type="index">
+			<tokenizer name="whitespace" />
+         <!-- in this example, we will only use synonyms at query time
+         <filter name="synonymGraph" synonyms="index_synonyms.txt" ignoreCase="true" expand="false"/>
+         -->
+         <!-- Case insensitive stop word removal.
+         -->
+			<filter name="stop" ignoreCase="true" words="lang/stopwords_en.txt" />
+			<filter name="wordDelimiterGraph" generateWordParts="1" generateNumberParts="1" catenateWords="1"
+				catenateNumbers="1" catenateAll="0" splitOnCaseChange="1" />
+			<filter name="lowercase" />
+			<filter name="keywordMarker" protected="protwords.txt" />
+			<filter name="porterStem" />
+			<filter name="flattenGraph" />
+		</analyzer>
+		<analyzer type="query">
+			<tokenizer name="whitespace" />
+			<filter name="synonymGraph" synonyms="synonyms.txt" ignoreCase="true" expand="true" />
+			<filter name="stop" ignoreCase="true" words="lang/stopwords_en.txt" />
+			<filter name="wordDelimiterGraph" generateWordParts="1" generateNumberParts="1" catenateWords="0"
+				catenateNumbers="0" catenateAll="0" splitOnCaseChange="1" />
+			<filter name="lowercase" />
+			<filter name="keywordMarker" protected="protwords.txt" />
+			<filter name="porterStem" />
+		</analyzer>
+	</fieldType>
+ 
+     <!-- Less flexible matching, but less false matches.  Probably not ideal for product names,
+          but may be good for SKUs.  Can insert dashes in the wrong place and still match. -->
+	<dynamicField name="*_txt_en_split_tight" type="text_en_splitting_tight" indexed="true" stored="true" />
+	<fieldType name="text_en_splitting_tight" class="solr.TextField" positionIncrementGap="100"
+		autoGeneratePhraseQueries="true">
+		<analyzer type="index">
+			<tokenizer name="whitespace" />
+			<filter name="synonymGraph" synonyms="synonyms.txt" ignoreCase="true" expand="false" />
+			<filter name="stop" ignoreCase="true" words="lang/stopwords_en.txt" />
+			<filter name="wordDelimiterGraph" generateWordParts="0" generateNumberParts="0" catenateWords="1"
+				catenateNumbers="1" catenateAll="0" />
+			<filter name="lowercase" />
+			<filter name="keywordMarker" protected="protwords.txt" />
+			<filter name="englishMinimalStem" />
+         <!-- this filter can remove any duplicate tokens that appear at the same position - sometimes
+              possible with WordDelimiterGraphFilter in conjuncton with stemming. -->
+			<filter name="removeDuplicates" />
+			<filter name="flattenGraph" />
+		</analyzer>
+		<analyzer type="query">
+			<tokenizer name="whitespace" />
+			<filter name="synonymGraph" synonyms="synonyms.txt" ignoreCase="true" expand="false" />
+			<filter name="stop" ignoreCase="true" words="lang/stopwords_en.txt" />
+			<filter name="wordDelimiterGraph" generateWordParts="0" generateNumberParts="0" catenateWords="1"
+				catenateNumbers="1" catenateAll="0" />
+			<filter name="lowercase" />
+			<filter name="keywordMarker" protected="protwords.txt" />
+			<filter name="englishMinimalStem" />
+         <!-- this filter can remove any duplicate tokens that appear at the same position - sometimes
+              possible with WordDelimiterGraphFilter in conjuncton with stemming. -->
+			<filter name="removeDuplicates" />
+		</analyzer>
+	</fieldType>
+ 
+     <!-- lowercases the entire field value, keeping it as a single token.  -->
+	<dynamicField name="*_s_lower" type="lowercase" indexed="true" stored="true" />
+	<fieldType name="lowercase" class="solr.TextField" positionIncrementGap="100">
+		<analyzer>
+			<tokenizer name="keyword" />
+			<filter name="lowercase" />
+		</analyzer>
+	</fieldType>
+
+     <!-- This point type indexes the coordinates as separate fields (subFields)
+       If subFieldType is defined, it references a type, and a dynamic field
+       definition is created matching *___<typename>.  Alternately, if 
+       subFieldSuffix is defined, that is used to create the subFields.
+       Example: if subFieldType="double", then the coordinates would be
+         indexed in fields myloc_0___double,myloc_1___double.
+       Example: if subFieldSuffix="_d" then the coordinates would be indexed
+         in fields myloc_0_d,myloc_1_d
+       The subFields are an implementation detail of the fieldType, and end
+       users normally should not need to know about them.
+      -->
+	<dynamicField name="*_point" type="point" indexed="true" stored="true" />
+	<fieldType name="point" class="solr.PointType" dimension="2" subFieldSuffix="_d" />
+ 
+     <!-- A specialized field for geospatial search filters and distance sorting. -->
+	<fieldType name="location" class="solr.LatLonPointSpatialField" docValues="true" />
+ 
+     <!-- A geospatial field type that supports multiValued and polygon shapes.
+       For more information about this and other spatial fields see:
+       http://lucene.apache.org/solr/guide/spatial-search.html
+     -->
+	<fieldType name="location_rpt" class="solr.SpatialRecursivePrefixTreeFieldType" geo="true" distErrPct="0.025"
+		maxDistErr="0.001" distanceUnits="kilometers" />
+ 
+     <!-- Payloaded field types -->
+	<fieldType name="delimited_payloads_float" stored="false" indexed="true" class="solr.TextField">
+		<analyzer>
+			<tokenizer name="whitespace" />
+			<filter name="delimitedPayload" encoder="float" />
+		</analyzer>
+	</fieldType>
+	<fieldType name="delimited_payloads_int" stored="false" indexed="true" class="solr.TextField">
+		<analyzer>
+			<tokenizer name="whitespace" />
+			<filter name="delimitedPayload" encoder="integer" />
+		</analyzer>
+	</fieldType>
+	<fieldType name="delimited_payloads_string" stored="false" indexed="true" class="solr.TextField">
+		<analyzer>
+			<tokenizer name="whitespace" />
+			<filter name="delimitedPayload" encoder="identity" />
+		</analyzer>
+	</fieldType>
+</schema>
\ No newline at end of file
diff --git a/config/mpc_dev/conf/schema_extra_fields.xml b/config/mpc_dev/conf/schema_extra_fields.xml
new file mode 100755
index 0000000000000000000000000000000000000000..9ecd5f4fa637598bb55136694d1b7133089dd0c7
--- /dev/null
+++ b/config/mpc_dev/conf/schema_extra_fields.xml
@@ -0,0 +1,23 @@
+<fields>
+<!--
+  Adding German dynamic field types to our Solr Schema
+  If you enable this, make sure you have a folder called lang with stopwords_de.txt
+  and synonyms_de.txt in there
+  This also requires to enable the content in schema_extra_types.xml
+-->
+<!--
+   <field name="label_de" type="text_de" indexed="true" stored="true" termVectors="true" omitNorms="true"/>
+   <field name="content_de" type="text_de" indexed="true" stored="true" termVectors="true"/>
+   <field name="teaser_de" type="text_de" indexed="false" stored="true"/>
+   <field name="path_alias_de" type="text_de" indexed="true" stored="true" termVectors="true" omitNorms="true"/>
+   <field name="taxonomy_names_de" type="text_de" indexed="true" stored="false" termVectors="true" multiValued="true" omitNorms="true"/>
+   <field name="spell_de" type="text_de" indexed="true" stored="true" multiValued="true"/>
+   <copyField source="label_de" dest="spell_de"/>
+   <copyField source="content_de" dest="spell_de"/>
+   <dynamicField name="tags_de_*" type="text_de" indexed="true" stored="false" omitNorms="true"/>
+   <dynamicField name="ts_de_*" type="text_de" indexed="true" stored="true" multiValued="false" termVectors="true"/>
+   <dynamicField name="tm_de_*" type="text_de" indexed="true" stored="true" multiValued="true" termVectors="true"/>
+   <dynamicField name="tos_de_*" type="text_de" indexed="true" stored="true" multiValued="false" termVectors="true" omitNorms="true"/>
+   <dynamicField name="tom_de_*" type="text_de" indexed="true" stored="true" multiValued="true" termVectors="true" omitNorms="true"/>
+-->
+</fields>
diff --git a/config/mpc_dev/conf/schema_extra_types.xml b/config/mpc_dev/conf/schema_extra_types.xml
new file mode 100755
index 0000000000000000000000000000000000000000..e82072e2fb9235de0cf8b286f169f79d180630cb
--- /dev/null
+++ b/config/mpc_dev/conf/schema_extra_types.xml
@@ -0,0 +1,30 @@
+<types>
+<!--
+  Adding German language to our Solr Schema German
+  If you enable this, make sure you have a folder called lang with stopwords_de.txt
+  and synonyms_de.txt in there
+-->
+<!--
+    <fieldType name="text_de" class="solr.TextField" positionIncrementGap="100">
+      <analyzer type="index">
+        <charFilter class="solr.MappingCharFilterFactory" mapping="mapping-ISOLatin1Accent.txt"/>
+        <tokenizer class="solr.WhitespaceTokenizerFactory"/>
+        <filter class="solr.StopFilterFactory" words="lang/stopwords_de.txt" format="snowball" ignoreCase="true" enablePositionIncrements="true"/>
+        <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" splitOnCaseChange="1" splitOnNumerics="1" catenateWords="1" catenateNumbers="1" catenateAll="0" protected="protwords.txt" preserveOriginal="1"/>
+        <filter class="solr.LowerCaseFilterFactory"/>
+        <filter class="solr.GermanLightStemFilterFactory"/>
+        <filter class="solr.RemoveDuplicatesTokenFilterFactory"/>
+      </analyzer>
+      <analyzer type="query">
+        <charFilter class="solr.MappingCharFilterFactory" mapping="mapping-ISOLatin1Accent.txt"/>
+        <tokenizer class="solr.WhitespaceTokenizerFactory"/>
+        <filter class="solr.SynonymFilterFactory" synonyms="lang/synonyms_de.txt" ignoreCase="true" expand="true"/>
+        <filter class="solr.StopFilterFactory" words="lang/stopwords_de.txt" format="snowball" ignoreCase="true" enablePositionIncrements="true"/>
+        <filter class="solr.WordDelimiterFilterFactory" generateWordParts="1" generateNumberParts="1" splitOnCaseChange="1" splitOnNumerics="1" catenateWords="0" catenateNumbers="0" catenateAll="0" protected="protwords.txt" preserveOriginal="1"/>
+        <filter class="solr.LowerCaseFilterFactory"/>
+        <filter class="solr.GermanLightStemFilterFactory"/>
+        <filter class="solr.RemoveDuplicatesTokenFilterFactory"/>
+      </analyzer>
+    </fieldType>
+-->
+</types>
diff --git a/config/mpc_dev/conf/solrconfig.xml b/config/mpc_dev/conf/solrconfig.xml
new file mode 100755
index 0000000000000000000000000000000000000000..c75f57ab6f2cf47780bfdd0c543952d72aad6d39
--- /dev/null
+++ b/config/mpc_dev/conf/solrconfig.xml
@@ -0,0 +1,1641 @@
+<?xml version="1.0" encoding="UTF-8" ?>
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements.  See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<!--
+     For more details about configurations options that may appear in
+     this file, see http://wiki.apache.org/solr/SolrConfigXml.
+-->
+<config name="drupal-4.3-solr-4.x" >
+  <!-- In all configuration below, a prefix of "solr." for class names
+       is an alias that causes solr to search appropriate packages,
+       including org.apache.solr.(search|update|request|core|analysis)
+
+       You may also specify a fully qualified Java classname if you
+       have your own custom plugins.
+    -->
+
+  <!-- Set this to 'false' if you want solr to continue working after
+       it has encountered an severe configuration error.  In a
+       production environment, you may want solr to keep working even
+       if one handler is mis-configured.
+
+       You may also set this to false using by setting the system
+       property:
+
+         -Dsolr.abortOnConfigurationError=false
+    -->
+  <abortOnConfigurationError>${solr.abortOnConfigurationError:true}</abortOnConfigurationError>
+
+  <!-- Controls what version of Lucene various components of Solr
+       adhere to.  Generally, you want to use the latest version to
+       get all bug fixes and improvements. It is highly recommended
+       that you fully re-index after changing this setting as it can
+       affect both how text is indexed and queried.
+    -->
+  <luceneMatchVersion>${solr.luceneMatchVersion:LUCENE_40}</luceneMatchVersion>
+
+  <!-- lib directives can be used to instruct Solr to load an Jars
+       identified and use them to resolve any "plugins" specified in
+       your solrconfig.xml or schema.xml (ie: Analyzers, Request
+       Handlers, etc...).
+
+       All directories and paths are resolved relative to the
+       instanceDir.
+
+       If a "./lib" directory exists in your instanceDir, all files
+       found in it are included as if you had used the following
+       syntax...
+       
+              <lib dir="./lib" />
+    -->
+
+  <!-- A dir option by itself adds any files found in the directory to
+       the classpath, this is useful for including all jars in a
+       directory.
+    -->
+  <lib dir="${solr.contrib.dir:../../../contrib}/extraction/lib" />
+  <lib dir="${solr.contrib.dir:../../../contrib}/clustering/lib/" />
+
+  <!-- The velocity library has been known to crash Solr in some
+       instances when deployed as a war file to Tomcat. Therefore all
+       references have been removed from the default configuration.
+       @see http://drupal.org/node/1612556
+  -->
+  <!-- <lib dir="../../contrib/velocity/lib" /> -->
+
+  <!-- When a regex is specified in addition to a directory, only the
+       files in that directory which completely match the regex
+       (anchored on both ends) will be included.
+    -->
+  <!--<lib dir="../../dist/" regex="apache-solr-cell-\d.*\.jar" />-->
+  <!--<lib dir="../../dist/" regex="apache-solr-clustering-\d.*\.jar" />-->
+  <!--<lib dir="../../dist/" regex="apache-solr-dataimporthandler-\d.*\.jar" />-->
+  <!--<lib dir="../../dist/" regex="apache-solr-langid-\d.*\.jar" />-->
+  <!-- <lib dir="../../dist/" regex="apache-solr-velocity-\d.*\.jar" /> -->
+
+  <!-- If a dir option (with or without a regex) is used and nothing
+       is found that matches, it will be ignored
+    -->
+  <!--<lib dir="../../contrib/clustering/lib/" />-->
+  <!--<lib dir="/total/crap/dir/ignored" />-->
+
+  <!-- an exact path can be used to specify a specific file.  This
+       will cause a serious error to be logged if it can't be loaded.
+    -->
+  <!--
+  <lib path="../a-jar-that-does-not-exist.jar" /> 
+  -->
+  
+  <!-- Data Directory
+
+       Used to specify an alternate directory to hold all index data
+       other than the default ./data under the Solr home.  If
+       replication is in use, this should match the replication
+       configuration.
+    -->
+  <!-- <dataDir>${solr.data.dir:}</dataDir> -->
+
+
+  <!-- The DirectoryFactory to use for indexes.
+       
+       solr.StandardDirectoryFactory, the default, is filesystem
+       based and tries to pick the best implementation for the current
+       JVM and platform.  One can force a particular implementation
+       via solr.MMapDirectoryFactory, solr.NIOFSDirectoryFactory, or
+       solr.SimpleFSDirectoryFactory.
+
+       solr.RAMDirectoryFactory is memory based, not
+       persistent, and doesn't work with replication.
+    -->
+  <directoryFactory name="DirectoryFactory" 
+                    class="${solr.directoryFactory:solr.StandardDirectoryFactory}"/>
+
+  <!-- Index Defaults
+
+       Values here affect all index writers and act as a default
+       unless overridden.
+
+       WARNING: See also the <mainIndex> section below for parameters
+       that overfor Solr's main Lucene index.
+    -->
+  <indexConfig>
+
+    <useCompoundFile>false</useCompoundFile>
+
+    <mergeFactor>4</mergeFactor>
+    <!-- Sets the amount of RAM that may be used by Lucene indexing
+         for buffering added documents and deletions before they are
+         flushed to the Directory.  -->
+    <ramBufferSizeMB>32</ramBufferSizeMB>
+    <!-- If both ramBufferSizeMB and maxBufferedDocs is set, then
+         Lucene will flush based on whichever limit is hit first.  
+      -->
+    <!-- <maxBufferedDocs>1000</maxBufferedDocs> -->
+
+    <maxMergeDocs>2147483647</maxMergeDocs>
+    <maxFieldLength>100000</maxFieldLength>
+    <writeLockTimeout>1000</writeLockTimeout>
+
+    <!-- Expert: Merge Policy 
+
+         The Merge Policy in Lucene controls how merging is handled by
+         Lucene.  The default in Solr 3.3 is TieredMergePolicy.
+         
+         The default in 2.3 was the LogByteSizeMergePolicy,
+         previous versions used LogDocMergePolicy.
+         
+         LogByteSizeMergePolicy chooses segments to merge based on
+         their size.  The Lucene 2.2 default, LogDocMergePolicy chose
+         when to merge based on number of documents
+         
+         Other implementations of MergePolicy must have a no-argument
+         constructor
+      -->
+    <mergePolicy class="org.apache.lucene.index.LogByteSizeMergePolicy"/>
+
+    <!-- Expert: Merge Scheduler
+
+         The Merge Scheduler in Lucene controls how merges are
+         performed.  The ConcurrentMergeScheduler (Lucene 2.3 default)
+         can perform merges in the background using separate threads.
+         The SerialMergeScheduler (Lucene 2.2 default) does not.
+     -->
+    <!-- 
+       <mergeScheduler class="org.apache.lucene.index.ConcurrentMergeScheduler"/>
+       -->
+	  
+    <!-- LockFactory 
+
+         This option specifies which Lucene LockFactory implementation
+         to use.
+      
+         single = SingleInstanceLockFactory - suggested for a
+                  read-only index or when there is no possibility of
+                  another process trying to modify the index.
+         native = NativeFSLockFactory - uses OS native file locking.
+                  Do not use when multiple solr webapps in the same
+                  JVM are attempting to share a single index.
+         simple = SimpleFSLockFactory  - uses a plain file for locking
+
+         (For backwards compatibility with Solr 1.2, 'simple' is the
+         default if not specified.)
+
+         More details on the nuances of each LockFactory...
+         http://wiki.apache.org/lucene-java/AvailableLockFactories
+    -->
+    <lockType>single</lockType>
+
+    <!-- Expert: Controls how often Lucene loads terms into memory
+         Default is 128 and is likely good for most everyone.
+      -->
+    <!-- <termIndexInterval>256</termIndexInterval> -->
+
+    <!-- Unlock On Startup
+
+         If true, unlock any held write or commit locks on startup.
+         This defeats the locking mechanism that allows multiple
+         processes to safely access a lucene index, and should be used
+         with care.
+
+         This is not needed if lock type is 'none' or 'single'
+     -->
+    <unlockOnStartup>false</unlockOnStartup>
+    
+    <!-- If true, IndexReaders will be reopened (often more efficient)
+         instead of closed and then opened.
+      -->
+    <reopenReaders>true</reopenReaders>
+
+    <!-- Commit Deletion Policy
+
+         Custom deletion policies can specified here. The class must
+         implement org.apache.lucene.index.IndexDeletionPolicy.
+
+         http://lucene.apache.org/java/2_9_1/api/all/org/apache/lucene/index/IndexDeletionPolicy.html
+
+         The standard Solr IndexDeletionPolicy implementation supports
+         deleting index commit points on number of commits, age of
+         commit point and optimized status.
+         
+         The latest commit point should always be preserved regardless
+         of the criteria.
+    -->
+    <deletionPolicy class="solr.SolrDeletionPolicy">
+      <!-- The number of commit points to be kept -->
+      <str name="maxCommitsToKeep">1</str>
+      <!-- The number of optimized commit points to be kept -->
+      <str name="maxOptimizedCommitsToKeep">0</str>
+      <!--
+          Delete all commit points once they have reached the given age.
+          Supports DateMathParser syntax e.g.
+        -->
+      <!--
+         <str name="maxCommitAge">30MINUTES</str>
+         <str name="maxCommitAge">1DAY</str>
+      -->
+    </deletionPolicy>
+
+    <!-- Lucene Infostream
+       
+         To aid in advanced debugging, Lucene provides an "InfoStream"
+         of detailed information when indexing.
+
+         Setting The value to true will instruct the underlying Lucene
+         IndexWriter to write its debugging info the specified file
+      -->
+     <infoStream file="INFOSTREAM.txt">false</infoStream> 
+
+  </indexConfig>
+
+  <!-- JMX
+       
+       This example enables JMX if and only if an existing MBeanServer
+       is found, use this if you want to configure JMX through JVM
+       parameters. Remove this to disable exposing Solr configuration
+       and statistics to JMX.
+
+       For more details see http://wiki.apache.org/solr/SolrJmx
+    -->
+  <!-- <jmx /> -->
+  <!-- If you want to connect to a particular server, specify the
+       agentId 
+    -->
+  <!-- <jmx agentId="myAgent" /> -->
+  <!-- If you want to start a new MBeanServer, specify the serviceUrl -->
+  <!-- <jmx serviceUrl="service:jmx:rmi:///jndi/rmi://localhost:9999/solr"/>
+    -->
+
+  <!-- The default high-performance update handler -->
+  <updateHandler class="solr.DirectUpdateHandler2">
+
+    <!-- AutoCommit
+
+         Perform a <commit/> automatically under certain conditions.
+         Instead of enabling autoCommit, consider using "commitWithin"
+         when adding documents. 
+
+         http://wiki.apache.org/solr/UpdateXmlMessages
+
+         maxDocs - Maximum number of documents to add since the last
+                   commit before automatically triggering a new commit.
+
+         maxTime - Maximum amount of time that is allowed to pass
+                   since a document was added before automaticly
+                   triggering a new commit.
+      -->
+    <autoCommit>
+      <maxDocs>${solr.autoCommit.MaxDocs:10000}</maxDocs>
+      <maxTime>${solr.autoCommit.MaxTime:120000}</maxTime>
+    </autoCommit>
+
+    <!-- softAutoCommit is like autoCommit except it causes a
+         'soft' commit which only ensures that changes are visible
+         but does not ensure that data is synced to disk.  This is
+         faster and more near-realtime friendly than a hard commit.
+    -->
+    <autoSoftCommit>
+      <maxDocs>${solr.autoSoftCommit.MaxDocs:2000}</maxDocs>
+      <maxTime>${solr.autoSoftCommit.MaxTime:10000}</maxTime>
+    </autoSoftCommit>
+
+    <!-- Update Related Event Listeners
+
+         Various IndexWriter related events can trigger Listeners to
+         take actions.
+
+         postCommit - fired after every commit or optimize command
+         postOptimize - fired after every optimize command
+      -->
+    <!-- The RunExecutableListener executes an external command from a
+         hook such as postCommit or postOptimize.
+         
+         exe - the name of the executable to run
+         dir - dir to use as the current working directory. (default=".")
+         wait - the calling thread waits until the executable returns. 
+                (default="true")
+         args - the arguments to pass to the program.  (default is none)
+         env - environment variables to set.  (default is none)
+      -->
+    <!-- This example shows how RunExecutableListener could be used
+         with the script based replication...
+         http://wiki.apache.org/solr/CollectionDistribution
+      -->
+    <!--
+       <listener event="postCommit" class="solr.RunExecutableListener">
+         <str name="exe">solr/bin/snapshooter</str>
+         <str name="dir">.</str>
+         <bool name="wait">true</bool>
+         <arr name="args"> <str>arg1</str> <str>arg2</str> </arr>
+         <arr name="env"> <str>MYVAR=val1</str> </arr>
+       </listener>
+      -->
+    <!-- Enables a transaction log, currently used for real-time get.
+         "dir" - the target directory for transaction logs, defaults to the
+         solr data directory.  -->
+    <updateLog>
+      <str name="dir">${solr.data.dir:}</str>
+      <!-- if you want to take control of the synchronization you may specify
+           the syncLevel as one of the following where ''flush'' is the default.
+           Fsync will reduce throughput.
+           <str name="syncLevel">flush|fsync|none</str>
+      -->
+    </updateLog>
+  </updateHandler>
+  
+  <!-- IndexReaderFactory
+
+       Use the following format to specify a custom IndexReaderFactory,
+       which allows for alternate IndexReader implementations.
+
+       ** Experimental Feature **
+
+       Please note - Using a custom IndexReaderFactory may prevent
+       certain other features from working. The API to
+       IndexReaderFactory may change without warning or may even be
+       removed from future releases if the problems cannot be
+       resolved.
+
+
+       ** Features that may not work with custom IndexReaderFactory **
+
+       The ReplicationHandler assumes a disk-resident index. Using a
+       custom IndexReader implementation may cause incompatibility
+       with ReplicationHandler and may cause replication to not work
+       correctly. See SOLR-1366 for details.
+
+    -->
+  <!--
+  <indexReaderFactory name="IndexReaderFactory" class="package.class">
+    <str name="someArg">Some Value</str>
+  </indexReaderFactory >
+  -->
+  <!-- By explicitly declaring the Factory, the termIndexDivisor can
+       be specified.
+    -->
+  <!--
+     <indexReaderFactory name="IndexReaderFactory" 
+                         class="solr.StandardIndexReaderFactory">
+       <int name="setTermIndexDivisor">12</int>
+     </indexReaderFactory >
+    -->
+
+
+  <query>
+    <!-- Max Boolean Clauses
+
+         Maximum number of clauses in each BooleanQuery,  an exception
+         is thrown if exceeded.
+
+         ** WARNING **
+         
+         This option actually modifies a global Lucene property that
+         will affect all SolrCores.  If multiple solrconfig.xml files
+         disagree on this property, the value at any given moment will
+         be based on the last SolrCore to be initialized.
+         
+      -->
+    <maxBooleanClauses>1024</maxBooleanClauses>
+
+
+    <!-- Solr Internal Query Caches
+
+         There are two implementations of cache available for Solr,
+         LRUCache, based on a synchronized LinkedHashMap, and
+         FastLRUCache, based on a ConcurrentHashMap.  
+
+         FastLRUCache has faster gets and slower puts in single
+         threaded operation and thus is generally faster than LRUCache
+         when the hit ratio of the cache is high (> 75%), and may be
+         faster under other scenarios on multi-cpu systems.
+    -->
+
+    <!-- Filter Cache
+
+         Cache used by SolrIndexSearcher for filters (DocSets),
+         unordered sets of *all* documents that match a query.  When a
+         new searcher is opened, its caches may be prepopulated or
+         "autowarmed" using data from caches in the old searcher.
+         autowarmCount is the number of items to prepopulate.  For
+         LRUCache, the autowarmed items will be the most recently
+         accessed items.
+
+         Parameters:
+           class - the SolrCache implementation LRUCache or
+               (LRUCache or FastLRUCache)
+           size - the maximum number of entries in the cache
+           initialSize - the initial capacity (number of entries) of
+               the cache.  (see java.util.HashMap)
+           autowarmCount - the number of entries to prepopulate from
+               and old cache.  
+      -->
+    <filterCache class="solr.FastLRUCache"
+                 size="512"
+                 initialSize="512"
+                 autowarmCount="0"/>
+
+    <!-- Query Result Cache
+         
+         Caches results of searches - ordered lists of document ids
+         (DocList) based on a query, a sort, and the range of documents requested.  
+      -->
+    <queryResultCache class="solr.LRUCache"
+                     size="512"
+                     initialSize="512"
+                     autowarmCount="32"/>
+   
+    <!-- Document Cache
+
+         Caches Lucene Document objects (the stored fields for each
+         document).  Since Lucene internal document ids are transient,
+         this cache will not be autowarmed.  
+      -->
+    <documentCache class="solr.LRUCache"
+                   size="512"
+                   initialSize="512"
+                   autowarmCount="0"/>
+    
+    <!-- Field Value Cache
+         
+         Cache used to hold field values that are quickly accessible
+         by document id.  The fieldValueCache is created by default
+         even if not configured here.
+      -->
+    <!--
+       <fieldValueCache class="solr.FastLRUCache"
+                        size="512"
+                        autowarmCount="128"
+                        showItems="32" />
+      -->
+
+    <!-- Custom Cache
+
+         Example of a generic cache.  These caches may be accessed by
+         name through SolrIndexSearcher.getCache(),cacheLookup(), and
+         cacheInsert().  The purpose is to enable easy caching of
+         user/application level data.  The regenerator argument should
+         be specified as an implementation of solr.CacheRegenerator 
+         if autowarming is desired.  
+      -->
+    <!--
+       <cache name="myUserCache"
+              class="solr.LRUCache"
+              size="4096"
+              initialSize="1024"
+              autowarmCount="1024"
+              regenerator="com.mycompany.MyRegenerator"
+              />
+      -->
+
+
+    <!-- Lazy Field Loading
+
+         If true, stored fields that are not requested will be loaded
+         lazily.  This can result in a significant speed improvement
+         if the usual case is to not load all stored fields,
+         especially if the skipped fields are large compressed text
+         fields.
+    -->
+    <enableLazyFieldLoading>true</enableLazyFieldLoading>
+
+   <!-- Use Filter For Sorted Query
+
+        A possible optimization that attempts to use a filter to
+        satisfy a search.  If the requested sort does not include
+        score, then the filterCache will be checked for a filter
+        matching the query. If found, the filter will be used as the
+        source of document ids, and then the sort will be applied to
+        that.
+
+        For most situations, this will not be useful unless you
+        frequently get the same search repeatedly with different sort
+        options, and none of them ever use "score"
+     -->
+   <!--
+      <useFilterForSortedQuery>true</useFilterForSortedQuery>
+     -->
+
+   <!-- Result Window Size
+
+        An optimization for use with the queryResultCache.  When a search
+        is requested, a superset of the requested number of document ids
+        are collected.  For example, if a search for a particular query
+        requests matching documents 10 through 19, and queryWindowSize is 50,
+        then documents 0 through 49 will be collected and cached.  Any further
+        requests in that range can be satisfied via the cache.  
+     -->
+   <queryResultWindowSize>20</queryResultWindowSize>
+
+   <!-- Maximum number of documents to cache for any entry in the
+        queryResultCache. 
+     -->
+   <queryResultMaxDocsCached>200</queryResultMaxDocsCached>
+
+   <!-- Query Related Event Listeners
+
+        Various IndexSearcher related events can trigger Listeners to
+        take actions.
+
+        newSearcher - fired whenever a new searcher is being prepared
+        and there is a current searcher handling requests (aka
+        registered).  It can be used to prime certain caches to
+        prevent long request times for certain requests.
+
+        firstSearcher - fired whenever a new searcher is being
+        prepared but there is no current registered searcher to handle
+        requests or to gain autowarming data from.
+
+        
+     -->
+    <!-- QuerySenderListener takes an array of NamedList and executes a
+         local query request for each NamedList in sequence. 
+      -->
+    <listener event="newSearcher" class="solr.QuerySenderListener">
+      <arr name="queries">
+        <!--
+           <lst><str name="q">solr</str><str name="sort">price asc</str></lst>
+           <lst><str name="q">rocks</str><str name="sort">weight asc</str></lst>
+          -->
+      </arr>
+    </listener>
+    <listener event="firstSearcher" class="solr.QuerySenderListener">
+      <arr name="queries">
+        <lst>
+          <str name="q">solr rocks</str><str name="start">0</str><str name="rows">10</str>
+        </lst>
+      </arr>
+    </listener>
+
+    <!-- Use Cold Searcher
+
+         If a search request comes in and there is no current
+         registered searcher, then immediately register the still
+         warming searcher and use it.  If "false" then all requests
+         will block until the first searcher is done warming.
+      -->
+    <useColdSearcher>false</useColdSearcher>
+
+    <!-- Max Warming Searchers
+         
+         Maximum number of searchers that may be warming in the
+         background concurrently.  An error is returned if this limit
+         is exceeded.
+
+         Recommend values of 1-2 for read-only slaves, higher for
+         masters w/o cache warming.
+      -->
+    <maxWarmingSearchers>2</maxWarmingSearchers>
+
+  </query>
+
+
+  <!-- Request Dispatcher
+
+       This section contains instructions for how the SolrDispatchFilter
+       should behave when processing requests for this SolrCore.
+
+       handleSelect affects the behavior of requests such as /select?qt=XXX
+
+       handleSelect="true" will cause the SolrDispatchFilter to process
+       the request and will result in consistent error handling and
+       formatting for all types of requests.
+
+       handleSelect="false" will cause the SolrDispatchFilter to
+       ignore "/select" requests and fallback to using the legacy
+       SolrServlet and it's Solr 1.1 style error formatting
+    -->
+  <requestDispatcher handleSelect="true" >
+    <!-- Request Parsing
+
+         These settings indicate how Solr Requests may be parsed, and
+         what restrictions may be placed on the ContentStreams from
+         those requests
+
+         enableRemoteStreaming - enables use of the stream.file
+         and stream.url parameters for specifying remote streams.
+
+         multipartUploadLimitInKB - specifies the max size of
+         Multipart File Uploads that Solr will allow in a Request.
+         
+         *** WARNING ***
+         The settings below authorize Solr to fetch remote files, You
+         should make sure your system has some authentication before
+         using enableRemoteStreaming="true"
+
+      --> 
+    <requestParsers enableRemoteStreaming="true" 
+                    multipartUploadLimitInKB="2048000" />
+
+    <!-- HTTP Caching
+
+         Set HTTP caching related parameters (for proxy caches and clients).
+
+         The options below instruct Solr not to output any HTTP Caching
+         related headers
+      -->
+    <httpCaching never304="true" />
+    <!-- If you include a <cacheControl> directive, it will be used to
+         generate a Cache-Control header (as well as an Expires header
+         if the value contains "max-age=")
+         
+         By default, no Cache-Control header is generated.
+         
+         You can use the <cacheControl> option even if you have set
+         never304="true"
+      -->
+    <!--
+       <httpCaching never304="true" >
+         <cacheControl>max-age=30, public</cacheControl> 
+       </httpCaching>
+      -->
+    <!-- To enable Solr to respond with automatically generated HTTP
+         Caching headers, and to response to Cache Validation requests
+         correctly, set the value of never304="false"
+         
+         This will cause Solr to generate Last-Modified and ETag
+         headers based on the properties of the Index.
+
+         The following options can also be specified to affect the
+         values of these headers...
+
+         lastModFrom - the default value is "openTime" which means the
+         Last-Modified value (and validation against If-Modified-Since
+         requests) will all be relative to when the current Searcher
+         was opened.  You can change it to lastModFrom="dirLastMod" if
+         you want the value to exactly correspond to when the physical
+         index was last modified.
+
+         etagSeed="..." is an option you can change to force the ETag
+         header (and validation against If-None-Match requests) to be
+         different even if the index has not changed (ie: when making
+         significant changes to your config file)
+
+         (lastModifiedFrom and etagSeed are both ignored if you use
+         the never304="true" option)
+      -->
+    <!--
+       <httpCaching lastModifiedFrom="openTime"
+                    etagSeed="Solr">
+         <cacheControl>max-age=30, public</cacheControl> 
+       </httpCaching>
+      -->
+  </requestDispatcher>
+
+  <!-- Request Handlers 
+
+       http://wiki.apache.org/solr/SolrRequestHandler
+
+       incoming queries will be dispatched to the correct handler
+       based on the path or the qt (query type) param.
+
+       Names starting with a '/' are accessed with the a path equal to
+       the registered name.  Names without a leading '/' are accessed
+       with: http://host/app/[core/]select?qt=name
+
+       If a /select request is processed with out a qt param
+       specified, the requestHandler that declares default="true" will
+       be used.
+       
+       If a Request Handler is declared with startup="lazy", then it will
+       not be initialized until the first request that uses it.
+
+    -->
+  <!-- SearchHandler
+
+       http://wiki.apache.org/solr/SearchHandler
+
+       For processing Search Queries, the primary Request Handler
+       provided with Solr is "SearchHandler" It delegates to a sequent
+       of SearchComponents (see below) and supports distributed
+       queries across multiple shards
+    -->
+  <!--<requestHandler name="search" class="solr.SearchHandler" default="true">-->
+    <!-- default values for query parameters can be specified, these
+         will be overridden by parameters in the request
+      -->
+     <!--<lst name="defaults">
+       <str name="echoParams">explicit</str>
+       <int name="rows">10</int>
+     </lst>-->
+    <!-- In addition to defaults, "appends" params can be specified
+         to identify values which should be appended to the list of
+         multi-val params from the query (or the existing "defaults").
+      -->
+    <!-- In this example, the param "fq=instock:true" would be appended to
+         any query time fq params the user may specify, as a mechanism for
+         partitioning the index, independent of any user selected filtering
+         that may also be desired (perhaps as a result of faceted searching).
+
+         NOTE: there is *absolutely* nothing a client can do to prevent these
+         "appends" values from being used, so don't use this mechanism
+         unless you are sure you always want it.
+      -->
+    <!--
+       <lst name="appends">
+         <str name="fq">inStock:true</str>
+       </lst>
+      -->
+    <!-- "invariants" are a way of letting the Solr maintainer lock down
+         the options available to Solr clients.  Any params values
+         specified here are used regardless of what values may be specified
+         in either the query, the "defaults", or the "appends" params.
+
+         In this example, the facet.field and facet.query params would
+         be fixed, limiting the facets clients can use.  Faceting is
+         not turned on by default - but if the client does specify
+         facet=true in the request, these are the only facets they
+         will be able to see counts for; regardless of what other
+         facet.field or facet.query params they may specify.
+
+         NOTE: there is *absolutely* nothing a client can do to prevent these
+         "invariants" values from being used, so don't use this mechanism
+         unless you are sure you always want it.
+      -->
+    <!--
+       <lst name="invariants">
+         <str name="facet.field">cat</str>
+         <str name="facet.field">manu_exact</str>
+         <str name="facet.query">price:[* TO 500]</str>
+         <str name="facet.query">price:[500 TO *]</str>
+       </lst>
+      -->
+    <!-- If the default list of SearchComponents is not desired, that
+         list can either be overridden completely, or components can be
+         prepended or appended to the default list.  (see below)
+      -->
+    <!--
+       <arr name="components">
+         <str>nameOfCustomComponent1</str>
+         <str>nameOfCustomComponent2</str>
+       </arr>
+      -->
+    <!--</requestHandler>-->
+
+  <!-- A Robust Example
+
+       This example SearchHandler declaration shows off usage of the
+       SearchHandler with many defaults declared
+
+       Note that multiple instances of the same Request Handler
+       (SearchHandler) can be registered multiple times with different
+       names (and different init parameters)
+    -->
+  <!--
+  <requestHandler name="/browse" class="solr.SearchHandler">
+     <lst name="defaults">
+       <str name="echoParams">explicit</str>-->
+
+       <!-- VelocityResponseWriter settings -->
+       <!--<str name="wt">velocity</str>
+
+       <str name="v.template">browse</str>
+       <str name="v.layout">layout</str>
+       <str name="title">Solritas</str>
+
+       <str name="defType">edismax</str>
+       <str name="q.alt">*:*</str>
+       <str name="rows">10</str>
+       <str name="fl">*,score</str>
+       <str name="mlt.qf">
+         text^0.5 features^1.0 name^1.2 sku^1.5 id^10.0 manu^1.1 cat^1.4
+       </str>
+       <str name="mlt.fl">text,features,name,sku,id,manu,cat</str>
+       <int name="mlt.count">3</int>
+
+       <str name="qf">
+          text^0.5 features^1.0 name^1.2 sku^1.5 id^10.0 manu^1.1 cat^1.4
+       </str>
+
+       <str name="facet">on</str>
+       <str name="facet.field">cat</str>
+       <str name="facet.field">manu_exact</str>
+       <str name="facet.query">ipod</str>
+       <str name="facet.query">GB</str>
+       <str name="facet.mincount">1</str>
+       <str name="facet.pivot">cat,inStock</str>
+       <str name="facet.range.other">after</str>
+       <str name="facet.range">price</str>
+       <int name="f.price.facet.range.start">0</int>
+       <int name="f.price.facet.range.end">600</int>
+       <int name="f.price.facet.range.gap">50</int>
+       <str name="facet.range">popularity</str>
+       <int name="f.popularity.facet.range.start">0</int>
+       <int name="f.popularity.facet.range.end">10</int>
+       <int name="f.popularity.facet.range.gap">3</int>
+       <str name="facet.range">manufacturedate_dt</str>
+       <str name="f.manufacturedate_dt.facet.range.start">NOW/YEAR-10YEARS</str>
+       <str name="f.manufacturedate_dt.facet.range.end">NOW</str>
+       <str name="f.manufacturedate_dt.facet.range.gap">+1YEAR</str>
+       <str name="f.manufacturedate_dt.facet.range.other">before</str>
+       <str name="f.manufacturedate_dt.facet.range.other">after</str>-->
+
+
+       <!-- Highlighting defaults -->
+       <!--<str name="hl">on</str>
+       <str name="hl.fl">text features name</str>
+       <str name="f.name.hl.fragsize">0</str>
+       <str name="f.name.hl.alternateField">name</str>
+     </lst>
+     <arr name="last-components">
+       <str>spellcheck</str>
+     </arr>-->
+     <!--
+     <str name="url-scheme">httpx</str>
+     -->
+  <!--</requestHandler>-->
+  <!-- trivia: the name pinkPony requestHandler was an agreement between the Search API and the
+    apachesolr maintainers. The decision was taken during the Drupalcon Munich codesprint.
+    -->
+  <requestHandler name="pinkPony" class="solr.SearchHandler" default="true">
+    <lst name="defaults">
+      <str name="defType">edismax</str>
+      <str name="echoParams">explicit</str>
+      <bool name="omitHeader">true</bool>
+      <float name="tie">0.01</float>
+      <!-- Don't abort searches for the pinkPony request handler (set in solrcore.properties) -->
+      <int name="timeAllowed">${solr.pinkPony.timeAllowed:-1}</int>
+      <str name="q.alt">*:*</str>
+
+      <!-- By default, don't spell check -->
+      <str name="spellcheck">false</str>
+      <!-- Defaults for the spell checker when used -->
+      <str name="spellcheck.onlyMorePopular">true</str>
+      <str name="spellcheck.extendedResults">false</str>
+      <!--  The number of suggestions to return -->
+      <str name="spellcheck.count">1</str>
+    </lst>
+    <arr name="last-components">
+      <str>spellcheck</str>
+      <str>elevator</str>
+    </arr>
+  </requestHandler>
+
+  <!-- The more like this handler offers many advantages over the standard handler,
+     when performing moreLikeThis requests.-->
+  <requestHandler name="mlt" class="solr.MoreLikeThisHandler">
+    <lst name="defaults">
+      <str name="mlt.mintf">1</str>
+      <str name="mlt.mindf">1</str>
+      <str name="mlt.minwl">3</str>
+      <str name="mlt.maxwl">15</str>
+      <str name="mlt.maxqt">20</str>
+      <str name="mlt.match.include">false</str>
+      <!-- Abort any searches longer than 2 seconds (set in solrcore.properties) -->
+      <int name="timeAllowed">${solr.mlt.timeAllowed:2000}</int>
+    </lst>
+  </requestHandler>
+
+  <!-- A minimal query type for doing luene queries -->
+  <requestHandler name="standard" class="solr.SearchHandler">
+     <lst name="defaults">
+       <str name="echoParams">explicit</str>
+       <bool name="omitHeader">true</bool>
+     </lst>
+  </requestHandler>
+
+  <!-- XML Update Request Handler.  
+       
+       http://wiki.apache.org/solr/UpdateXmlMessages
+
+       The canonical Request Handler for Modifying the Index through
+       commands specified using XML.
+
+       Note: Since solr1.1 requestHandlers requires a valid content
+       type header if posted in the body. For example, curl now
+       requires: -H 'Content-type:text/xml; charset=utf-8'
+    -->
+  <requestHandler name="/update" 
+                  class="solr.UpdateRequestHandler">
+    <!-- See below for information on defining 
+         updateRequestProcessorChains that can be used by name 
+         on each Update Request
+      -->
+    <!--
+       <lst name="defaults">
+         <str name="update.chain">dedupe</str>
+       </lst>
+       -->
+    </requestHandler>
+  <!-- Binary Update Request Handler
+       http://wiki.apache.org/solr/javabin
+    -->
+  <requestHandler name="/update/javabin" 
+                  class="solr.UpdateRequestHandler" />
+
+  <!-- CSV Update Request Handler
+       http://wiki.apache.org/solr/UpdateCSV
+    -->
+  <requestHandler name="/update/csv" 
+                  class="solr.CSVRequestHandler" 
+                  startup="lazy" />
+
+  <!-- JSON Update Request Handler
+       http://wiki.apache.org/solr/UpdateJSON
+    -->
+  <requestHandler name="/update/json" 
+                  class="solr.JsonUpdateRequestHandler" 
+                  startup="lazy" />
+
+  <!-- Solr Cell Update Request Handler
+
+       http://wiki.apache.org/solr/ExtractingRequestHandler 
+
+    -->
+  <requestHandler name="/update/extract" 
+                  startup="lazy"
+                  class="solr.extraction.ExtractingRequestHandler" >
+    <lst name="defaults">
+      <!-- All the main content goes into "text"... if you need to return
+           the extracted text or do highlighting, use a stored field. -->
+      <str name="fmap.content">text</str>
+      <str name="lowernames">true</str>
+      <str name="uprefix">ignored_</str>
+
+      <!-- capture link hrefs but ignore div attributes -->
+      <str name="captureAttr">true</str>
+      <str name="fmap.a">links</str>
+      <str name="fmap.div">ignored_</str>
+    </lst>
+  </requestHandler>
+
+  <!-- XSLT Update Request Handler
+       Transforms incoming XML with stylesheet identified by tr=
+  -->
+  <requestHandler name="/update/xslt"
+                   startup="lazy"
+                   class="solr.XsltUpdateRequestHandler"/>
+
+  <!-- Field Analysis Request Handler
+
+       RequestHandler that provides much the same functionality as
+       analysis.jsp. Provides the ability to specify multiple field
+       types and field names in the same request and outputs
+       index-time and query-time analysis for each of them.
+
+       Request parameters are:
+       analysis.fieldname - field name whose analyzers are to be used
+
+       analysis.fieldtype - field type whose analyzers are to be used
+       analysis.fieldvalue - text for index-time analysis
+       q (or analysis.q) - text for query time analysis
+       analysis.showmatch (true|false) - When set to true and when
+           query analysis is performed, the produced tokens of the
+           field value analysis will be marked as "matched" for every
+           token that is produces by the query analysis
+   -->
+  <requestHandler name="/analysis/field" 
+                  startup="lazy"
+                  class="solr.FieldAnalysisRequestHandler" />
+
+
+  <!-- Document Analysis Handler
+
+       http://wiki.apache.org/solr/AnalysisRequestHandler
+
+       An analysis handler that provides a breakdown of the analysis
+       process of provided docuemnts. This handler expects a (single)
+       content stream with the following format:
+
+       <docs>
+         <doc>
+           <field name="id">1</field>
+           <field name="name">The Name</field>
+           <field name="text">The Text Value</field>
+         </doc>
+         <doc>...</doc>
+         <doc>...</doc>
+         ...
+       </docs>
+
+    Note: Each document must contain a field which serves as the
+    unique key. This key is used in the returned response to associate
+    an analysis breakdown to the analyzed document.
+
+    Like the FieldAnalysisRequestHandler, this handler also supports
+    query analysis by sending either an "analysis.query" or "q"
+    request parameter that holds the query text to be analyzed. It
+    also supports the "analysis.showmatch" parameter which when set to
+    true, all field tokens that match the query tokens will be marked
+    as a "match". 
+  -->
+  <requestHandler name="/analysis/document" 
+                  class="solr.DocumentAnalysisRequestHandler" 
+                  startup="lazy" />
+
+  <!-- Admin Handlers
+
+       Admin Handlers - This will register all the standard admin
+       RequestHandlers.  
+    -->
+  <requestHandler name="/admin/" class="solr.admin.AdminHandlers" />
+  <!-- This single handler is equivalent to the following... -->
+  <!--
+     <requestHandler name="/admin/luke"       class="solr.admin.LukeRequestHandler" />
+     <requestHandler name="/admin/system"     class="solr.admin.SystemInfoHandler" />
+     <requestHandler name="/admin/plugins"    class="solr.admin.PluginInfoHandler" />
+     <requestHandler name="/admin/threads"    class="solr.admin.ThreadDumpHandler" />
+     <requestHandler name="/admin/properties" class="solr.admin.PropertiesRequestHandler" />
+     <requestHandler name="/admin/file"       class="solr.admin.ShowFileRequestHandler" >
+    -->
+  <!-- If you wish to hide files under ${solr.home}/conf, explicitly
+       register the ShowFileRequestHandler using: 
+    -->
+  <!--
+     <requestHandler name="/admin/file" 
+                     class="solr.admin.ShowFileRequestHandler" >
+       <lst name="invariants">
+         <str name="hidden">synonyms.txt</str> 
+         <str name="hidden">anotherfile.txt</str> 
+       </lst>
+     </requestHandler>
+    -->
+
+  <!-- ping/healthcheck -->
+  <requestHandler name="/admin/ping" class="solr.PingRequestHandler">
+    <lst name="invariants">
+      <str name="qt">pinkPony</str>
+      <str name="q">solrpingquery</str>
+      <str name="omitHeader">false</str>
+    </lst>
+    <lst name="defaults">
+      <str name="echoParams">all</str>
+    </lst>
+    <!-- An optional feature of the PingRequestHandler is to configure the 
+         handler with a "healthcheckFile" which can be used to enable/disable 
+         the PingRequestHandler.
+         relative paths are resolved against the data dir 
+    -->
+    <!-- <str name="healthcheckFile">server-enabled.txt</str> -->
+  </requestHandler>
+
+  <!-- Echo the request contents back to the client -->
+  <requestHandler name="/debug/dump" class="solr.DumpRequestHandler" >
+    <lst name="defaults">
+     <str name="echoParams">explicit</str> 
+     <str name="echoHandler">true</str>
+    </lst>
+  </requestHandler>
+  
+  <!-- Solr Replication
+
+       The SolrReplicationHandler supports replicating indexes from a
+       "master" used for indexing and "slaves" used for queries.
+
+       http://wiki.apache.org/solr/SolrReplication
+
+       In the example below, remove the <lst name="master"> section if
+       this is just a slave and remove  the <lst name="slave"> section
+       if this is just a master.
+  -->
+  <requestHandler name="/replication" class="solr.ReplicationHandler" >
+    <lst name="master">
+      <str name="enable">${solr.replication.master:false}</str>
+      <str name="replicateAfter">commit</str>
+      <str name="replicateAfter">startup</str>
+      <str name="confFiles">${solr.replication.confFiles:schema.xml,mapping-ISOLatin1Accent.txt,protwords.txt,stopwords.txt,synonyms.txt,elevate.xml}</str>
+    </lst>
+    <lst name="slave">
+      <str name="enable">${solr.replication.slave:false}</str>
+      <str name="masterUrl">${solr.replication.masterUrl:http://localhost:8983/solr}/replication</str>
+      <str name="pollInterval">${solr.replication.pollInterval:00:00:60}</str>
+    </lst>
+  </requestHandler>
+
+  <!-- Realtime get handler, guaranteed to return the latest stored fields of
+       any document, without the need to commit or open a new searcher.  The
+       current implementation relies on the updateLog feature being enabled.
+  -->
+  <requestHandler name="/get" class="solr.RealTimeGetHandler">
+    <lst name="defaults">
+      <str name="omitHeader">true</str>
+      <str name="wt">json</str>
+      <str name="indent">true</str>
+    </lst>
+  </requestHandler>
+
+  <!-- Search Components
+
+       Search components are registered to SolrCore and used by
+       instances of SearchHandler (which can access them by name)
+
+       By default, the following components are available:
+
+       <searchComponent name="query"     class="solr.QueryComponent" />
+       <searchComponent name="facet"     class="solr.FacetComponent" />
+       <searchComponent name="mlt"       class="solr.MoreLikeThisComponent" />
+       <searchComponent name="highlight" class="solr.HighlightComponent" />
+       <searchComponent name="stats"     class="solr.StatsComponent" />
+       <searchComponent name="debug"     class="solr.DebugComponent" />
+
+       Default configuration in a requestHandler would look like:
+
+       <arr name="components">
+         <str>query</str>
+         <str>facet</str>
+         <str>mlt</str>
+         <str>highlight</str>
+         <str>stats</str>
+         <str>debug</str>
+       </arr>
+
+       If you register a searchComponent to one of the standard names, 
+       that will be used instead of the default.
+
+       To insert components before or after the 'standard' components, use:
+    
+       <arr name="first-components">
+         <str>myFirstComponentName</str>
+       </arr>
+    
+       <arr name="last-components">
+         <str>myLastComponentName</str>
+       </arr>
+
+       NOTE: The component registered with the name "debug" will
+       always be executed after the "last-components" 
+       
+     -->
+
+  <!-- A request handler for demonstrating the spellcheck component.  
+
+       NOTE: This is purely as an example.  The whole purpose of the
+       SpellCheckComponent is to hook it into the request handler that
+       handles your normal user queries so that a separate request is
+       not needed to get suggestions.
+
+       IN OTHER WORDS, THERE IS REALLY GOOD CHANCE THE SETUP BELOW IS
+       NOT WHAT YOU WANT FOR YOUR PRODUCTION SYSTEM!
+       
+       See http://wiki.apache.org/solr/SpellCheckComponent for details
+       on the request parameters.
+    -->
+  <requestHandler name="/spell" class="solr.SearchHandler" startup="lazy">
+    <lst name="defaults">
+      <str name="spellcheck.onlyMorePopular">false</str>
+      <str name="spellcheck.extendedResults">false</str>
+      <str name="spellcheck.count">1</str>
+    </lst>
+    <arr name="last-components">
+      <str>spellcheck</str>
+    </arr>
+  </requestHandler>
+
+  <!-- Term Vector Component
+
+       http://wiki.apache.org/solr/TermVectorComponent
+    -->
+  <searchComponent name="tvComponent" class="solr.TermVectorComponent"/>
+
+  <!-- A request handler for demonstrating the term vector component
+
+       This is purely as an example.
+
+       In reality you will likely want to add the component to your 
+       already specified request handlers. 
+    -->
+  <requestHandler name="tvrh" class="solr.SearchHandler" startup="lazy">
+    <lst name="defaults">
+      <bool name="tv">true</bool>
+    </lst>
+    <arr name="last-components">
+      <str>tvComponent</str>
+    </arr>
+  </requestHandler>
+
+  <!-- Clustering Component
+
+       http://wiki.apache.org/solr/ClusteringComponent
+
+       This relies on third party jars which are notincluded in the
+       release.  To use this component (and the "/clustering" handler)
+       Those jars will need to be downloaded, and you'll need to set
+       the solr.cluster.enabled system property when running solr...
+
+          java -Dsolr.clustering.enabled=true -jar start.jar
+    -->
+  <!-- <searchComponent name="clustering"
+                   enable="${solr.clustering.enabled:false}"
+                   class="solr.clustering.ClusteringComponent" > -->
+    <!-- Declare an engine -->
+    <!--<lst name="engine">-->
+      <!-- The name, only one can be named "default" -->
+      <!--<str name="name">default</str>-->
+
+      <!-- Class name of Carrot2 clustering algorithm. 
+           
+           Currently available algorithms are:
+           
+           * org.carrot2.clustering.lingo.LingoClusteringAlgorithm
+           * org.carrot2.clustering.stc.STCClusteringAlgorithm
+           * org.carrot2.clustering.kmeans.BisectingKMeansClusteringAlgorithm
+           
+           See http://project.carrot2.org/algorithms.html for the
+           algorithm's characteristics.
+        -->
+      <!--<str name="carrot.algorithm">org.carrot2.clustering.lingo.LingoClusteringAlgorithm</str>-->
+
+      <!-- Overriding values for Carrot2 default algorithm attributes.
+
+           For a description of all available attributes, see:
+           http://download.carrot2.org/stable/manual/#chapter.components.
+           Use attribute key as name attribute of str elements
+           below. These can be further overridden for individual
+           requests by specifying attribute key as request parameter
+           name and attribute value as parameter value.
+        -->
+      <!--<str name="LingoClusteringAlgorithm.desiredClusterCountBase">20</str>-->
+      
+      <!-- Location of Carrot2 lexical resources.
+
+           A directory from which to load Carrot2-specific stop words
+           and stop labels. Absolute or relative to Solr config directory.
+           If a specific resource (e.g. stopwords.en) is present in the
+           specified dir, it will completely override the corresponding
+           default one that ships with Carrot2.
+
+           For an overview of Carrot2 lexical resources, see:
+           http://download.carrot2.org/head/manual/#chapter.lexical-resources
+        -->
+      <!--<str name="carrot.lexicalResourcesDir">clustering/carrot2</str>-->
+
+      <!-- The language to assume for the documents.
+           
+           For a list of allowed values, see:
+           http://download.carrot2.org/stable/manual/#section.attribute.lingo.MultilingualClustering.defaultLanguage
+       -->
+      <!--<str name="MultilingualClustering.defaultLanguage">ENGLISH</str>
+    </lst>
+    <lst name="engine">
+      <str name="name">stc</str>
+      <str name="carrot.algorithm">org.carrot2.clustering.stc.STCClusteringAlgorithm</str>
+    </lst>
+  </searchComponent>-->
+
+  <!-- A request handler for demonstrating the clustering component
+
+       This is purely as an example.
+
+       In reality you will likely want to add the component to your 
+       already specified request handlers. 
+    -->
+  <!--<requestHandler name="/clustering"
+                  startup="lazy"
+                  enable="${solr.clustering.enabled:false}"
+                  class="solr.SearchHandler">
+    <lst name="defaults">
+      <bool name="clustering">true</bool>
+      <str name="clustering.engine">default</str>
+      <bool name="clustering.results">true</bool>-->
+      <!-- The title field -->
+      <!--<str name="carrot.title">name</str>-->
+      <!--<str name="carrot.url">id</str>-->
+      <!-- The field to cluster on -->
+       <!--<str name="carrot.snippet">features</str>-->
+       <!-- produce summaries -->
+       <!--<bool name="carrot.produceSummary">true</bool>-->
+       <!-- the maximum number of labels per cluster -->
+       <!--<int name="carrot.numDescriptions">5</int>-->
+       <!-- produce sub clusters -->
+       <!--<bool name="carrot.outputSubClusters">false</bool>-->
+       
+       <!--<str name="defType">edismax</str>
+       <str name="qf">
+          text^0.5 features^1.0 name^1.2 sku^1.5 id^10.0 manu^1.1 cat^1.4
+       </str>
+       <str name="q.alt">*:*</str>
+       <str name="rows">10</str>
+       <str name="fl">*,score</str>
+    </lst>     
+    <arr name="last-components">
+      <str>clustering</str>
+    </arr>
+  </requestHandler>-->
+  
+  <!-- Terms Component
+
+       http://wiki.apache.org/solr/TermsComponent
+
+       A component to return terms and document frequency of those
+       terms
+    -->
+  <searchComponent name="terms" class="solr.TermsComponent"/>
+
+  <!-- A request handler for demonstrating the terms component -->
+  <requestHandler name="/terms" class="solr.SearchHandler" startup="lazy">
+     <lst name="defaults">
+      <bool name="terms">true</bool>
+    </lst>     
+    <arr name="components">
+      <str>terms</str>
+    </arr>
+  </requestHandler>
+
+
+  <!-- Query Elevation Component
+
+       http://wiki.apache.org/solr/QueryElevationComponent
+
+       a search component that enables you to configure the top
+       results for a given query regardless of the normal lucene
+       scoring.
+    -->
+  <searchComponent name="elevator" class="solr.QueryElevationComponent" >
+    <!-- pick a fieldType to analyze queries -->
+    <str name="queryFieldType">string</str>
+    <str name="config-file">elevate.xml</str>
+  </searchComponent>
+
+  <!-- A request handler for demonstrating the elevator component -->
+  <requestHandler name="/elevate" class="solr.SearchHandler" startup="lazy">
+    <lst name="defaults">
+      <str name="echoParams">explicit</str>
+    </lst>
+    <arr name="last-components">
+      <str>elevator</str>
+    </arr>
+  </requestHandler>
+
+  <!-- Highlighting Component
+
+       http://wiki.apache.org/solr/HighlightingParameters
+    -->
+  <searchComponent class="solr.HighlightComponent" name="highlight">
+    <highlighting>
+      <!-- Configure the standard fragmenter -->
+      <!-- This could most likely be commented out in the "default" case -->
+      <fragmenter name="gap" 
+                  default="true"
+                  class="solr.highlight.GapFragmenter">
+        <lst name="defaults">
+          <int name="hl.fragsize">100</int>
+        </lst>
+      </fragmenter>
+
+      <!-- A regular-expression-based fragmenter 
+           (for sentence extraction) 
+        -->
+      <fragmenter name="regex" 
+                  class="solr.highlight.RegexFragmenter">
+        <lst name="defaults">
+          <!-- slightly smaller fragsizes work better because of slop -->
+          <int name="hl.fragsize">70</int>
+          <!-- allow 50% slop on fragment sizes -->
+          <float name="hl.regex.slop">0.5</float>
+          <!-- a basic sentence pattern -->
+          <str name="hl.regex.pattern">[-\w ,/\n\&quot;&apos;]{20,200}</str>
+        </lst>
+      </fragmenter>
+
+      <!-- Configure the standard formatter -->
+      <formatter name="html" 
+                 default="true"
+                 class="solr.highlight.HtmlFormatter">
+        <lst name="defaults">
+          <str name="hl.simple.pre"><![CDATA[<strong>]]></str>
+          <str name="hl.simple.post"><![CDATA[</strong>]]></str>
+        </lst>
+      </formatter>
+
+      <!-- Configure the standard encoder -->
+      <encoder name="html" 
+               class="solr.highlight.HtmlEncoder" />
+
+      <!-- Configure the standard fragListBuilder -->
+      <fragListBuilder name="simple" 
+                       default="true"
+                       class="solr.highlight.SimpleFragListBuilder"/>
+
+      <!-- Configure the single fragListBuilder -->
+      <fragListBuilder name="single" 
+                       class="solr.highlight.SingleFragListBuilder"/>
+
+      <!-- default tag FragmentsBuilder -->
+      <fragmentsBuilder name="default" 
+                        default="true"
+                        class="solr.highlight.ScoreOrderFragmentsBuilder">
+        <!-- 
+        <lst name="defaults">
+          <str name="hl.multiValuedSeparatorChar">/</str>
+        </lst>
+        -->
+      </fragmentsBuilder>
+
+      <!-- multi-colored tag FragmentsBuilder -->
+      <fragmentsBuilder name="colored" 
+                        class="solr.highlight.ScoreOrderFragmentsBuilder">
+        <lst name="defaults">
+          <str name="hl.tag.pre"><![CDATA[
+               <b style="background:yellow">,<b style="background:lawgreen">,
+               <b style="background:aquamarine">,<b style="background:magenta">,
+               <b style="background:palegreen">,<b style="background:coral">,
+               <b style="background:wheat">,<b style="background:khaki">,
+               <b style="background:lime">,<b style="background:deepskyblue">]]></str>
+          <str name="hl.tag.post"><![CDATA[</b>]]></str>
+        </lst>
+      </fragmentsBuilder>
+      
+      <boundaryScanner name="default" 
+                       default="true"
+                       class="solr.highlight.SimpleBoundaryScanner">
+        <lst name="defaults">
+          <str name="hl.bs.maxScan">10</str>
+          <str name="hl.bs.chars">.,!? &#9;&#10;&#13;</str>
+        </lst>
+      </boundaryScanner>
+      
+      <boundaryScanner name="breakIterator" 
+                       class="solr.highlight.BreakIteratorBoundaryScanner">
+        <lst name="defaults">
+          <!-- type should be one of CHARACTER, WORD(default), LINE and SENTENCE -->
+          <str name="hl.bs.type">WORD</str>
+          <!-- language and country are used when constructing Locale object.  -->
+          <!-- And the Locale object will be used when getting instance of BreakIterator -->
+          <str name="hl.bs.language">en</str>
+          <str name="hl.bs.country">US</str>
+        </lst>
+      </boundaryScanner>
+    </highlighting>
+  </searchComponent>
+
+  <!-- Update Processors
+
+       Chains of Update Processor Factories for dealing with Update
+       Requests can be declared, and then used by name in Update
+       Request Processors
+
+       http://wiki.apache.org/solr/UpdateRequestProcessor
+
+    --> 
+  <!-- Deduplication
+
+       An example dedup update processor that creates the "id" field
+       on the fly based on the hash code of some other fields.  This
+       example has overwriteDupes set to false since we are using the
+       id field as the signatureField and Solr will maintain
+       uniqueness based on that anyway.  
+       
+    -->
+  <!--
+     <updateRequestProcessorChain name="dedupe">
+       <processor class="solr.processor.SignatureUpdateProcessorFactory">
+         <bool name="enabled">true</bool>
+         <str name="signatureField">id</str>
+         <bool name="overwriteDupes">false</bool>
+         <str name="fields">name,features,cat</str>
+         <str name="signatureClass">solr.processor.Lookup3Signature</str>
+       </processor>
+       <processor class="solr.LogUpdateProcessorFactory" />
+       <processor class="solr.RunUpdateProcessorFactory" />
+     </updateRequestProcessorChain>
+    -->
+
+    <!--
+       This example update chain identifies the language of the incoming
+       documents using the langid contrib. The detected language is
+       written to field language_s. No field name mapping is done.
+       The fields used for detection are text, title, subject and description,
+       making this example suitable for detecting languages form full-text
+       rich documents injected via ExtractingRequestHandler.
+       See more about langId at http://wiki.apache.org/solr/LanguageDetection
+    -->
+    <!--
+     <updateRequestProcessorChain name="langid">
+       <processor class="org.apache.solr.update.processor.TikaLanguageIdentifierUpdateProcessorFactory">
+         <str name="langid.fl">text,title,subject,description</str>
+         <str name="langid.langField">language_s</str>
+         <str name="langid.fallback">en</str>
+       </processor>
+       <processor class="solr.LogUpdateProcessorFactory" />
+       <processor class="solr.RunUpdateProcessorFactory" />
+     </updateRequestProcessorChain>
+    -->
+ 
+  <!-- Response Writers
+
+       http://wiki.apache.org/solr/QueryResponseWriter
+
+       Request responses will be written using the writer specified by
+       the 'wt' request parameter matching the name of a registered
+       writer.
+
+       The "default" writer is the default and will be used if 'wt' is
+       not specified in the request.
+    -->
+  <!-- The following response writers are implicitly configured unless
+       overridden...
+    -->
+  <!--
+     <queryResponseWriter name="xml" 
+                          default="true"
+                          class="solr.XMLResponseWriter" />
+     <queryResponseWriter name="json" class="solr.JSONResponseWriter"/>
+     <queryResponseWriter name="python" class="solr.PythonResponseWriter"/>
+     <queryResponseWriter name="ruby" class="solr.RubyResponseWriter"/>
+     <queryResponseWriter name="php" class="solr.PHPResponseWriter"/>
+     <queryResponseWriter name="phps" class="solr.PHPSerializedResponseWriter"/>
+     <queryResponseWriter name="csv" class="solr.CSVResponseWriter"/>
+    -->
+
+  <queryResponseWriter name="json" class="solr.JSONResponseWriter">
+     <!-- For the purposes of the tutorial, JSON responses are written as
+      plain text so that they are easy to read in *any* browser.
+      If you expect a MIME type of "application/json" just remove this override.
+     -->
+    <str name="content-type">text/plain; charset=UTF-8</str>
+  </queryResponseWriter>
+  
+  <!--
+     Custom response writers can be declared as needed...
+    -->
+    <!-- The solr.velocity.enabled flag is used by Solr's test cases so that this response writer is not
+         loaded (causing an error if contrib/velocity has not been built fully) -->
+    <!-- <queryResponseWriter name="velocity" class="solr.VelocityResponseWriter" enable="${solr.velocity.enabled:true}"/> -->
+  
+
+  <!-- XSLT response writer transforms the XML output by any xslt file found
+       in Solr's conf/xslt directory.  Changes to xslt files are checked for
+       every xsltCacheLifetimeSeconds.  
+    -->
+  <queryResponseWriter name="xslt" class="solr.XSLTResponseWriter">
+    <int name="xsltCacheLifetimeSeconds">5</int>
+  </queryResponseWriter>
+
+  <!-- Query Parsers
+
+       http://wiki.apache.org/solr/SolrQuerySyntax
+
+       Multiple QParserPlugins can be registered by name, and then
+       used in either the "defType" param for the QueryComponent (used
+       by SearchHandler) or in LocalParams
+    -->
+  <!-- example of registering a query parser -->
+  <!--
+     <queryParser name="myparser" class="com.mycompany.MyQParserPlugin"/>
+    -->
+
+  <!-- Function Parsers
+
+       http://wiki.apache.org/solr/FunctionQuery
+
+       Multiple ValueSourceParsers can be registered by name, and then
+       used as function names when using the "func" QParser.
+    -->
+  <!-- example of registering a custom function parser  -->
+  <!--
+     <valueSourceParser name="myfunc" 
+                        class="com.mycompany.MyValueSourceParser" />
+    -->
+
+  <!-- Legacy config for the admin interface -->
+  <admin>
+    <defaultQuery>*:*</defaultQuery>
+
+    <!-- configure a healthcheck file for servers behind a
+         loadbalancer 
+      -->
+    <!--
+       <healthcheck type="file">server-enabled</healthcheck>
+      -->
+  </admin>
+
+  <!-- Following is a dynamic way to include other components or any customized solrconfig.xml stuff, added by other contrib modules -->
+  <xi:include href="solrconfig_extra.xml" xmlns:xi="http://www.w3.org/2001/XInclude">
+    <xi:fallback>
+    <!-- Spell Check
+
+        The spell check component can return a list of alternative spelling
+        suggestions. This component must be defined in
+        solrconfig_extra.xml if present, since it's used in the search handler.
+
+        http://wiki.apache.org/solr/SpellCheckComponent
+     -->
+    <searchComponent name="spellcheck" class="solr.SpellCheckComponent">
+
+    <str name="queryAnalyzerFieldType">textSpell</str>
+
+    <!-- a spellchecker built from a field of the main index -->
+      <lst name="spellchecker">
+        <str name="name">default</str>
+        <str name="field">spell</str>
+        <str name="spellcheckIndexDir">spellchecker</str>
+        <str name="buildOnOptimize">true</str>
+      </lst>
+    </searchComponent>
+    </xi:fallback>
+  </xi:include>
+
+</config>
diff --git a/config/mpc_dev/conf/solrconfig_extra.xml b/config/mpc_dev/conf/solrconfig_extra.xml
new file mode 100755
index 0000000000000000000000000000000000000000..c5bc3acfb52805c4f16d8ebf5239ea6443923030
--- /dev/null
+++ b/config/mpc_dev/conf/solrconfig_extra.xml
@@ -0,0 +1,80 @@
+<!-- Spell Check
+
+    The spell check component can return a list of alternative spelling
+    suggestions.
+
+    http://wiki.apache.org/solr/SpellCheckComponent
+ -->
+<searchComponent name="spellcheck" class="solr.SpellCheckComponent">
+
+<str name="queryAnalyzerFieldType">textSpell</str>
+
+<!-- Multiple "Spell Checkers" can be declared and used by this
+     component
+  -->
+
+<!-- a spellchecker built from a field of the main index, and
+     written to disk
+  -->
+<lst name="spellchecker">
+  <str name="name">default</str>
+  <str name="field">spell</str>
+  <str name="spellcheckIndexDir">spellchecker</str>
+  <str name="buildOnOptimize">true</str>
+  <!-- uncomment this to require terms to occur in 1% of the documents in order to be included in the dictionary
+    <float name="thresholdTokenFrequency">.01</float>
+  -->
+</lst>
+
+<!--
+  Adding German spellhecker index to our Solr index
+  This also requires to enable the content in schema_extra_types.xml and schema_extra_fields.xml
+-->
+<!--
+<lst name="spellchecker">
+  <str name="name">spellchecker_de</str>
+  <str name="field">spell_de</str>
+  <str name="spellcheckIndexDir">./spellchecker_de</str>
+  <str name="buildOnOptimize">true</str>
+</lst>
+-->
+
+<!-- a spellchecker that uses a different distance measure -->
+<!--
+   <lst name="spellchecker">
+     <str name="name">jarowinkler</str>
+     <str name="field">spell</str>
+     <str name="distanceMeasure">
+       org.apache.lucene.search.spell.JaroWinklerDistance
+     </str>
+     <str name="spellcheckIndexDir">spellcheckerJaro</str>
+   </lst>
+ -->
+
+<!-- a spellchecker that use an alternate comparator
+
+     comparatorClass be one of:
+      1. score (default)
+      2. freq (Frequency first, then score)
+      3. A fully qualified class name
+  -->
+<!--
+   <lst name="spellchecker">
+     <str name="name">freq</str>
+     <str name="field">lowerfilt</str>
+     <str name="spellcheckIndexDir">spellcheckerFreq</str>
+     <str name="comparatorClass">freq</str>
+     <str name="buildOnCommit">true</str>
+  -->
+
+<!-- A spellchecker that reads the list of words from a file -->
+<!--
+   <lst name="spellchecker">
+     <str name="classname">solr.FileBasedSpellChecker</str>
+     <str name="name">file</str>
+     <str name="sourceLocation">spellings.txt</str>
+     <str name="characterEncoding">UTF-8</str>
+     <str name="spellcheckIndexDir">spellcheckerFile</str>
+   </lst>
+  -->
+</searchComponent>
diff --git a/config/mpc_dev/conf/solrcore.properties b/config/mpc_dev/conf/solrcore.properties
new file mode 100755
index 0000000000000000000000000000000000000000..b7f8f6c801408ece28505ba9b354e76f9dcebbb5
--- /dev/null
+++ b/config/mpc_dev/conf/solrcore.properties
@@ -0,0 +1,20 @@
+# Defines Solr properties for this specific core.
+solr.replication.master=false
+solr.replication.slave=false
+solr.replication.pollInterval=00:00:60
+solr.replication.masterUrl=http://localhost:8983/solr
+solr.replication.confFiles=schema.xml,mapping-ISOLatin1Accent.txt,protwords.txt,stopwords.txt,synonyms.txt,elevate.xml
+solr.mlt.timeAllowed=2000
+# You should not set your luceneMatchVersion to anything lower than your Solr
+# Version.
+solr.luceneMatchVersion=LUCENE_40
+solr.pinkPony.timeAllowed=-1
+# autoCommit after 10000 docs
+solr.autoCommit.MaxDocs=10000
+# autoCommit after 2 minutes
+solr.autoCommit.MaxTime=120000
+# autoSoftCommit after 2000 docs
+solr.autoSoftCommit.MaxDocs=2000
+# autoSoftCommit after 10 seconds
+solr.autoSoftCommit.MaxTime=10000
+solr.contrib.dir=../../../contrib
diff --git a/config/mpc_dev/conf/stopwords.txt b/config/mpc_dev/conf/stopwords.txt
new file mode 100755
index 0000000000000000000000000000000000000000..d7f243e48a9f8706cf8ba5aeb3218558e716fa0b
--- /dev/null
+++ b/config/mpc_dev/conf/stopwords.txt
@@ -0,0 +1,4 @@
+# Contains words which shouldn't be indexed for fulltext fields, e.g., because
+# they're too common. For documentation of the format, see
+# http://wiki.apache.org/solr/AnalyzersTokenizersTokenFilters#solr.StopFilterFactory
+# (Lines starting with a pound character # are ignored.)
diff --git a/config/mpc_dev/conf/synonyms.txt b/config/mpc_dev/conf/synonyms.txt
new file mode 100755
index 0000000000000000000000000000000000000000..7d22eea6d6ee1548e239d42bfe44515370a2a091
--- /dev/null
+++ b/config/mpc_dev/conf/synonyms.txt
@@ -0,0 +1,3 @@
+# Contains synonyms to use for your index. For the format used, see
+# http://wiki.apache.org/solr/AnalyzersTokenizersTokenFilters#solr.SynonymFilterFactory
+# (Lines starting with a pound character # are ignored.)
diff --git a/config/mpc_dev/core.properties b/config/mpc_dev/core.properties
new file mode 100644
index 0000000000000000000000000000000000000000..887d6e8980e9ce61512e12b0954cc61677ad0ccc
--- /dev/null
+++ b/config/mpc_dev/core.properties
@@ -0,0 +1,6 @@
+#Written by CorePropertiesLocator
+#Wed Jun 03 15:46:22 UTC 2020
+name=mpc_dev
+config=solrconfig.xml
+schema=schema.xml
+dataDir=data
diff --git a/pom.xml b/pom.xml
index 1be700cd66c8acc91e067bf55c7f01d42dc7bedd..7e9d153b3da0d7988a25b1d7036ddae60709e1f2 100644
--- a/pom.xml
+++ b/pom.xml
@@ -10,7 +10,7 @@
 	<properties>
 		<project.reporting.outputEncoding>UTF-8</project.reporting.outputEncoding>
 		<surefire-plugin.version>2.22.0</surefire-plugin.version>
-		<quarkus.version>1.3.0.Final</quarkus.version>
+		<quarkus.version>1.6.0.Final</quarkus.version>
 		<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
 		<maven.compiler.source>1.8</maven.compiler.source>
 		<maven.compiler.target>1.8</maven.compiler.target>
@@ -87,11 +87,6 @@
 		</dependency>
 
 		<!-- Custom dependencies -->
-		<dependency>
-			<groupId>org.slf4j</groupId>
-			<artifactId>slf4j-log4j12</artifactId>
-			<version>1.7.25</version>
-		</dependency>
 		<dependency>
 			<groupId>org.apache.commons</groupId>
 			<artifactId>commons-lang3</artifactId>
diff --git a/src/main/java/org/eclipsefoundation/core/config/RoleAugmentor.java b/src/main/java/org/eclipsefoundation/core/config/RoleAugmentor.java
index 5a8bc4f5c39cefdcf4edf759a8db18f82cf92c3d..ed4c7249b4608eafad729b1c0eff4513b5c147be 100644
--- a/src/main/java/org/eclipsefoundation/core/config/RoleAugmentor.java
+++ b/src/main/java/org/eclipsefoundation/core/config/RoleAugmentor.java
@@ -1,7 +1,7 @@
 package org.eclipsefoundation.core.config;
 
-import java.util.concurrent.CompletableFuture;
-import java.util.concurrent.CompletionStage;
+import java.util.function.Supplier;
+
 import javax.enterprise.context.ApplicationScoped;
 
 import org.eclipse.microprofile.config.inject.ConfigProperty;
@@ -10,6 +10,7 @@ import io.quarkus.security.identity.AuthenticationRequestContext;
 import io.quarkus.security.identity.SecurityIdentity;
 import io.quarkus.security.identity.SecurityIdentityAugmentor;
 import io.quarkus.security.runtime.QuarkusSecurityIdentity;
+import io.smallrye.mutiny.Uni;
 
 /**
  * Custom override for production that can be enabled to set user roles to
@@ -32,9 +33,11 @@ public class RoleAugmentor implements SecurityIdentityAugmentor {
 	}
 
 	@Override
-	public CompletionStage<SecurityIdentity> augment(SecurityIdentity identity, AuthenticationRequestContext context) {
-		// create a future to contain the original/updated role
-		CompletableFuture<SecurityIdentity> cs = new CompletableFuture<>();
+	public Uni<SecurityIdentity> augment(SecurityIdentity identity, AuthenticationRequestContext context) {
+		return context.runBlocking(build(identity));
+	}
+
+	private Supplier<SecurityIdentity> build(SecurityIdentity identity) {
 		if (overrideRole) {
 			// create a new builder and copy principal, attributes, credentials and roles
 			// from the original
@@ -45,11 +48,10 @@ public class RoleAugmentor implements SecurityIdentityAugmentor {
 			// add custom role source here
 			builder.addRole(overrideRoleName);
 			// put the updated role in the future
-			cs.complete(builder.build());
+			return builder::build;
 		} else {
 			// put the unmodified identity in the future
-			cs.complete(identity);
+			return () -> identity;
 		}
-		return cs;
 	}
 }
\ No newline at end of file
diff --git a/src/main/java/org/eclipsefoundation/core/helper/ResponseHelper.java b/src/main/java/org/eclipsefoundation/core/helper/ResponseHelper.java
index 1ca5701d4ee34234a3a1a150acc396aa79e32459..25504f6d42bc7ee38afcdc04eac811d9f4d1fc89 100644
--- a/src/main/java/org/eclipsefoundation/core/helper/ResponseHelper.java
+++ b/src/main/java/org/eclipsefoundation/core/helper/ResponseHelper.java
@@ -22,6 +22,8 @@ import javax.xml.bind.DatatypeConverter;
 
 import org.eclipsefoundation.core.model.RequestWrapper;
 import org.eclipsefoundation.core.service.CachingService;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Helper class that transforms data into a response usable for the RESTeasy
@@ -33,6 +35,7 @@ import org.eclipsefoundation.core.service.CachingService;
  */
 @ApplicationScoped
 public class ResponseHelper {
+	private static final Logger LOGGER = LoggerFactory.getLogger(ResponseHelper.class);
 
 	private static final MessageDigest DIGEST;
 	static {
@@ -68,6 +71,7 @@ public class ResponseHelper {
 			// get the TTL for the current entry
 			Optional<Long> ttl = cachingService.getExpiration(id, wrapper);
 			if (!ttl.isPresent()) {
+				LOGGER.error("TTL not present!");
 				return Response.serverError().build();
 			}
 
diff --git a/src/main/java/org/eclipsefoundation/core/model/RequestWrapper.java b/src/main/java/org/eclipsefoundation/core/model/RequestWrapper.java
index b76f300dbaa12e7a0bc0bd5fe31a59c337ffdf57..ecaf0a1fe1e13e8c0b42019c51d21e6820f7c4f3 100644
--- a/src/main/java/org/eclipsefoundation/core/model/RequestWrapper.java
+++ b/src/main/java/org/eclipsefoundation/core/model/RequestWrapper.java
@@ -6,7 +6,6 @@
  */
 package org.eclipsefoundation.core.model;
 
-import java.util.ArrayList;
 import java.util.Collections;
 import java.util.Date;
 import java.util.HashMap;
@@ -14,7 +13,6 @@ import java.util.List;
 import java.util.Map;
 import java.util.Objects;
 import java.util.Optional;
-import java.util.stream.Collectors;
 
 import javax.enterprise.context.RequestScoped;
 import javax.servlet.http.HttpServletRequest;
@@ -24,7 +22,9 @@ import javax.ws.rs.core.UriInfo;
 import org.apache.commons.lang3.StringUtils;
 import org.eclipsefoundation.core.namespace.DeprecatedHeader;
 import org.eclipsefoundation.core.namespace.RequestHeaderNames;
+import org.eclipsefoundation.core.namespace.UrlParameterName;
 import org.eclipsefoundation.core.request.CacheBypassFilter;
+import org.eclipsefoundation.marketplace.model.QueryParameters;
 import org.eclipsefoundation.marketplace.model.UserAgent;
 import org.jboss.resteasy.core.ResteasyContext;
 
@@ -40,7 +40,7 @@ import org.jboss.resteasy.core.ResteasyContext;
 public class RequestWrapper {
 	private static final String EMPTY_KEY_MESSAGE = "Key must not be null or blank";
 
-	private Map<String, List<String>> params;
+	private QueryParameters params;
 
 	private UriInfo uriInfo;
 	private HttpServletRequest request;
@@ -67,7 +67,7 @@ public class RequestWrapper {
 	 * @return the first value set in the parameter map for the given key, or null
 	 *         if absent.
 	 */
-	public Optional<String> getFirstParam(UrlParameterNames parameter) {
+	public Optional<String> getFirstParam(UrlParameterName parameter) {
 		if (parameter == null) {
 			throw new IllegalArgumentException(EMPTY_KEY_MESSAGE);
 		}
@@ -87,7 +87,7 @@ public class RequestWrapper {
 	 * @return the value list for the given key if it exists, or an empty collection
 	 *         if none exists.
 	 */
-	public List<String> getParams(UrlParameterNames parameter) {
+	public List<String> getParams(UrlParameterName parameter) {
 		if (parameter == null) {
 			throw new IllegalArgumentException(EMPTY_KEY_MESSAGE);
 		}
@@ -114,6 +114,21 @@ public class RequestWrapper {
 		getParams().add(key, value);
 	}
 
+	/**
+	 * Adds the given value for the given key, preserving previous values if they
+	 * exist.
+	 * 
+	 * @param key   string key to add the value to, must not be null
+	 * @param value the value to add to the key
+	 */
+	public void addParam(UrlParameterName key, String value) {
+		if (key == null) {
+			throw new IllegalArgumentException(EMPTY_KEY_MESSAGE);
+		}
+		Objects.requireNonNull(value);
+		getParams().add(key.getParameterName(), value);
+	}
+
 	/**
 	 * Sets the value as the value for the given key, removing previous values if
 	 * they exist.
@@ -131,10 +146,6 @@ public class RequestWrapper {
 		addParam(key, value);
 	}
 
-	public List<UrlParameterNames> getActiveParameters() {
-		return params.asMap().keySet().stream().map(UrlParameterNames::getByParameterName).filter(Objects::nonNull)
-				.collect(Collectors.toList());
-	}
 
 	/**
 	 * Returns this QueryParams object as a Map of param values indexed by the param
diff --git a/src/main/java/org/eclipsefoundation/core/namespace/DefaultUrlParameterNames.java b/src/main/java/org/eclipsefoundation/core/namespace/DefaultUrlParameterNames.java
new file mode 100644
index 0000000000000000000000000000000000000000..e586f56d3afcfa49d79e92b10ea2cdd5545531c7
--- /dev/null
+++ b/src/main/java/org/eclipsefoundation/core/namespace/DefaultUrlParameterNames.java
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2019 Eclipse Foundation and others.
+ * 
+ * This program and the accompanying materials are made
+ * available under the terms of the Eclipse Public License 2.0
+ * which is available at https://www.eclipse.org/legal/epl-2.0/
+ * 
+ * SPDX-License-Identifier: EPL-2.0
+*/
+package org.eclipsefoundation.core.namespace;
+
+/**
+ * Namespace containing common URL parameters used throughout the API.
+ * 
+ * @author Martin Lowe
+ */
+public enum DefaultUrlParameterNames implements UrlParameterName {
+
+	QUERY_STRING("q"),
+	PAGE("page"),
+	LIMIT("limit"),
+	IDS("ids"),
+	ID("id");
+
+	private String parameterName;
+	private DefaultUrlParameterNames(String parameterName) {
+		this.parameterName = parameterName;
+	}
+	
+	/**
+	 * @return the URL parameters name
+	 */
+	@Override
+	public String getParameterName() {
+		return parameterName;
+	}
+	
+	/**
+	 * Retrieves the UrlParameterName for the given name.
+	 * 
+	 * @param name the name to retrieve a URL parameter for
+	 * @return the URL parameter name if it exists, or null if no match is found
+	 */
+	public static DefaultUrlParameterNames getByParameterName(String name) {
+		for (DefaultUrlParameterNames param: values()) {
+			if (param.getParameterName().equalsIgnoreCase(name)) {
+				return param;
+			}
+		}
+		return null;
+	}
+}
diff --git a/src/main/java/org/eclipsefoundation/core/namespace/UrlParameterName.java b/src/main/java/org/eclipsefoundation/core/namespace/UrlParameterName.java
new file mode 100644
index 0000000000000000000000000000000000000000..51e28080d3cf9595539fbe3505f0b58d66919a19
--- /dev/null
+++ b/src/main/java/org/eclipsefoundation/core/namespace/UrlParameterName.java
@@ -0,0 +1,5 @@
+package org.eclipsefoundation.core.namespace;
+
+public interface UrlParameterName {
+	String getParameterName();
+}
diff --git a/src/main/java/org/eclipsefoundation/core/request/CacheBypassFilter.java b/src/main/java/org/eclipsefoundation/core/request/CacheBypassFilter.java
index b8f71f7d87161f8a30f504dbee062e7404538bfa..128a6738e652e12fc4c049f403eb599c513669e2 100644
--- a/src/main/java/org/eclipsefoundation/core/request/CacheBypassFilter.java
+++ b/src/main/java/org/eclipsefoundation/core/request/CacheBypassFilter.java
@@ -8,6 +8,8 @@ package org.eclipsefoundation.core.request;
 
 import java.io.IOException;
 
+import javax.enterprise.inject.Instance;
+import javax.inject.Inject;
 import javax.servlet.http.HttpServletRequest;
 import javax.servlet.http.HttpServletResponse;
 import javax.ws.rs.container.ContainerRequestContext;
@@ -15,9 +17,6 @@ import javax.ws.rs.container.ContainerRequestFilter;
 import javax.ws.rs.core.Context;
 import javax.ws.rs.ext.Provider;
 
-import org.eclipsefoundation.marketplace.namespace.UrlParameterNames;
-import org.eclipsefoundation.persistence.model.SortOrder;
-
 /**
  * Checks passed parameters and if any match one of the criteria for bypassing
  * caching, an attribute will be set to the request to skip cache requests and
@@ -30,6 +29,9 @@ import org.eclipsefoundation.persistence.model.SortOrder;
 public class CacheBypassFilter implements ContainerRequestFilter {
 	public static final String ATTRIBUTE_NAME = "bypass-cache";
 
+	@Inject
+	Instance<BypassCondition> conditions;
+
 	@Context
 	HttpServletRequest request;
 
@@ -39,14 +41,10 @@ public class CacheBypassFilter implements ContainerRequestFilter {
 	@Override
 	public void filter(ContainerRequestContext requestContext) throws IOException {
 		// check for random sort order, which always bypasses cache
-		String[] sortVals = request.getParameterValues(UrlParameterNames.SORT.getParameterName());
-		if (sortVals != null) {
-			for (String sortVal : sortVals) {
-				// check if the sort order for request matches RANDOM
-				if (SortOrder.RANDOM.equals(SortOrder.getOrderFromValue(sortVal))) {
-					setBypass();
-					return;
-				}
+		for (BypassCondition cond : conditions) {
+			if (cond.matches(requestContext, request)) {
+				setBypass();
+				return;
 			}
 		}
 		request.setAttribute(ATTRIBUTE_NAME, Boolean.FALSE);
@@ -57,4 +55,24 @@ public class CacheBypassFilter implements ContainerRequestFilter {
 		// no-store should be used as cache bypass should not return
 		response.setHeader("Cache-Control", "no-store");
 	}
+
+	/**
+	 * Interface for adding a bypass condition to requests made against a given
+	 * server.
+	 * 
+	 * @author Martin Lowe
+	 *
+	 */
+	public interface BypassCondition {
+		/**
+		 * Tests the request context to check whether any data fetched for this request
+		 * should bypass the cache layer.
+		 * 
+		 * @param requestContext the current requests container context
+		 * @param request        raw servlet request containing more information about
+		 *                       the request
+		 * @return true if the request should bypass the cache, false otherwise.
+		 */
+		boolean matches(ContainerRequestContext requestContext, HttpServletRequest request);
+	}
 }
diff --git a/src/main/java/org/eclipsefoundation/core/service/CachingService.java b/src/main/java/org/eclipsefoundation/core/service/CachingService.java
index 815d677bb10190119f55d1d6dcbf63f88b664c23..a05c04e1c285dbb16a9e991d4475383c2ddafece 100644
--- a/src/main/java/org/eclipsefoundation/core/service/CachingService.java
+++ b/src/main/java/org/eclipsefoundation/core/service/CachingService.java
@@ -6,8 +6,6 @@
  */
 package org.eclipsefoundation.core.service;
 
-import java.util.List;
-import java.util.Map;
 import java.util.Optional;
 import java.util.Set;
 import java.util.concurrent.Callable;
@@ -34,8 +32,7 @@ public interface CachingService<T> {
 	 * @param callable a runnable that returns an object of type T
 	 * @return the cached result
 	 */
-	Optional<T> get(String id, RequestWrapper wrapper, Map<String, List<String>> params,
-			Callable<? extends T> callable);
+	Optional<T> get(String id, RequestWrapper wrapper, Callable<? extends T> callable);
 
 	/**
 	 * Returns the expiration date in millis since epoch.
@@ -80,16 +77,14 @@ public interface CachingService<T> {
 	 * @param params  parameters to use in place of wrapper parameters when set
 	 * @return the unique cache key for the request.
 	 */
-	default String getCacheKey(String id, RequestWrapper wrapper, Map<String, List<String>> params) {
+	default String getCacheKey(String id, RequestWrapper wrapper) {
 		StringBuilder sb = new StringBuilder();
 		sb.append('[').append(wrapper.getEndpoint()).append(']');
 		sb.append("id:").append(id);
 
-		// get the used set of parameters for filtering data
-		Map<String, List<String>> actual = params == null ? wrapper.asMap() : params;
 		// join all the non-empty params to the key to create distinct entries for
 		// filtered values
-		actual.entrySet().stream().filter(e -> !e.getValue().isEmpty())
+		wrapper.asMap().entrySet().stream().filter(e -> !e.getValue().isEmpty())
 				.map(e -> e.getKey() + '=' + StringUtils.join(e.getValue(), ','))
 				.forEach(s -> sb.append('|').append(s));
 
diff --git a/src/main/java/org/eclipsefoundation/marketplace/dto/Listing.java b/src/main/java/org/eclipsefoundation/marketplace/dto/Listing.java
index c79d1c22dd1ebd5ce17f53d686a2ba531593c070..d1eaccb34ef12b00a184a038cd87511804052a0c 100644
--- a/src/main/java/org/eclipsefoundation/marketplace/dto/Listing.java
+++ b/src/main/java/org/eclipsefoundation/marketplace/dto/Listing.java
@@ -32,12 +32,12 @@ import javax.persistence.PostLoad;
 import javax.persistence.Table;
 import javax.persistence.Transient;
 
-import org.eclipsefoundation.marketplace.model.SortableField;
 import org.eclipsefoundation.marketplace.namespace.DatabaseFieldNames;
 import org.eclipsefoundation.persistence.dto.BareNode;
 import org.eclipsefoundation.persistence.dto.NodeBase;
 import org.eclipsefoundation.persistence.model.SortableField;
 import org.eclipsefoundation.search.model.Indexed;
+import org.eclipsefoundation.search.namespace.IndexerTextProcessingType;
 import org.hibernate.annotations.NotFound;
 import org.hibernate.annotations.NotFoundAction;
 
@@ -50,8 +50,7 @@ import org.hibernate.annotations.NotFoundAction;
 @Table(
 	indexes = {
 		@Index(columnList="licenseType"),
-		@Index(columnList="changed"),
-		@Index(columnList="seed")
+		@Index(columnList="changed")
 	}
 )
 public class Listing extends NodeBase {
@@ -59,10 +58,10 @@ public class Listing extends NodeBase {
 	private String supportUrl;
 	private String homepageUrl;
 	@Lob
-	@Indexed
+	@Indexed(textProcessing = IndexerTextProcessingType.AGGRESSIVE)
 	private String teaser;
-	@Indexed
 	@Lob
+	@Indexed(textProcessing = IndexerTextProcessingType.AGGRESSIVE)
 	private String body;
 	private String status;
 	private String logo;
@@ -103,6 +102,7 @@ public class Listing extends NodeBase {
 	@OneToOne(mappedBy = "listing")
 	@NotFound(action = NotFoundAction.IGNORE)
 	private InstallMetrics metrics;
+	private boolean isPromotion;
 
 	/**
 	 * Default constructor, sets lists to empty lists to stop null pointers
@@ -124,8 +124,9 @@ public class Listing extends NodeBase {
 			// get recent installs
 			Calendar c = Calendar.getInstance();
 			int thisMonth = c.get(Calendar.MONTH);
+			int thisYear = c.get(Calendar.YEAR);
 			Optional<MetricPeriod> current = metrics.getPeriods().stream()
-					.filter(p -> p.getStart().toInstant().get(ChronoField.MONTH_OF_YEAR) == thisMonth).findFirst();
+					.filter(p -> p.getStart().toInstant().get(ChronoField.MONTH_OF_YEAR) == thisMonth && p.getStart().toInstant().get(ChronoField.YEAR) == thisYear).findFirst();
 			// check if we have an entry for the current month
 			if (current.isPresent()) {
 				this.installsRecent = current.get().getCount();
@@ -480,7 +481,8 @@ public class Listing extends NodeBase {
 				&& Objects.equals(this.getOrganization(), other.getOrganization()) && Objects.equals(status, other.status)
 				&& Objects.equals(supportUrl, other.supportUrl) && Objects.equals(this.getTags(), other.getTags())
 				&& Objects.equals(teaser, other.teaser) && changed == other.changed
-				&& Objects.equals(this.getVersions(), other.getVersions()) && Objects.equals(this.getScreenshots(), other.getScreenshots());
+				&& Objects.equals(this.getVersions(), other.getVersions())
+				&& Objects.equals(this.getScreenshots(), other.getScreenshots());
 	}
 
 	@Override
diff --git a/src/main/java/org/eclipsefoundation/marketplace/dto/Promotion.java b/src/main/java/org/eclipsefoundation/marketplace/dto/Promotion.java
index 3339a0d56d3ad841a42b2a8d764d5f639d2e688b..9b30427374be2b2f9fa6f34f88af11c7b5d6e5bf 100644
--- a/src/main/java/org/eclipsefoundation/marketplace/dto/Promotion.java
+++ b/src/main/java/org/eclipsefoundation/marketplace/dto/Promotion.java
@@ -1,25 +1,12 @@
 package org.eclipsefoundation.marketplace.dto;
 
-public class Promotion {
+import org.eclipsefoundation.persistence.dto.BareNode;
+
+public class Promotion extends BareNode {
 
-	private String id;
 	private String listingId;
 	private int weight = 1;
 
-	/**
-	 * @return the id
-	 */
-	public String getId() {
-		return id;
-	}
-
-	/**
-	 * @param id the id to set
-	 */
-	public void setId(String id) {
-		this.id = id;
-	}
-
 	/**
 	 * @return the listingId
 	 */
diff --git a/src/main/java/org/eclipsefoundation/marketplace/dto/codecs/PromotionCodec.java b/src/main/java/org/eclipsefoundation/marketplace/dto/codecs/PromotionCodec.java
deleted file mode 100644
index 5af459f4f625423ba1f87aacf2c6f6d9c6508eb7..0000000000000000000000000000000000000000
--- a/src/main/java/org/eclipsefoundation/marketplace/dto/codecs/PromotionCodec.java
+++ /dev/null
@@ -1,86 +0,0 @@
-/* Copyright (c) 2019 Eclipse Foundation and others.
- * This program and the accompanying materials are made available
- * under the terms of the Eclipse Public License 2.0
- * which is available at http://www.eclipse.org/legal/epl-v20.html,
- * SPDX-License-Identifier: EPL-2.0
- */
-package org.eclipsefoundation.marketplace.dto.codecs;
-
-import java.util.UUID;
-
-import org.apache.commons.lang3.StringUtils;
-import org.bson.BsonReader;
-import org.bson.BsonString;
-import org.bson.BsonValue;
-import org.bson.BsonWriter;
-import org.bson.Document;
-import org.bson.codecs.Codec;
-import org.bson.codecs.CollectibleCodec;
-import org.bson.codecs.DecoderContext;
-import org.bson.codecs.EncoderContext;
-import org.eclipsefoundation.marketplace.dto.Promotion;
-import org.eclipsefoundation.marketplace.namespace.DatabaseFieldNames;
-
-import com.mongodb.MongoClient;
-
-/**
- * Codec for reading and writing {@linkplain Promotion} objectss to database objects.
- * 
- * @author Martin Lowe
- *
- */
-public class PromotionCodec implements CollectibleCodec<Promotion> {
-	
-	private final Codec<Document> documentCodec;
-
-	/**
-	 * Creates the codec and initializes the codecs and converters needed to create
-	 * a listing from end to end.
-	 */
-	public PromotionCodec() {
-		this.documentCodec = MongoClient.getDefaultCodecRegistry().get(Document.class);
-	}
-
-	@Override
-	public void encode(BsonWriter writer, Promotion value, EncoderContext encoderContext) {
-		Document doc = new Document();
-		doc.put(DatabaseFieldNames.DOCID, value.getId());
-		doc.put(DatabaseFieldNames.LISTING_ID, value.getListingId());
-		documentCodec.encode(writer, doc, encoderContext);
-	}
-
-	@Override
-	public Class<Promotion> getEncoderClass() {
-		return Promotion.class;
-	}
-
-	@Override
-	public Promotion decode(BsonReader reader, DecoderContext decoderContext) {
-		Document value = documentCodec.decode(reader, decoderContext);
-
-		Promotion out = new Promotion();
-		out.setId(value.getString(DatabaseFieldNames.DOCID));
-		out.setListingId(value.getString(DatabaseFieldNames.LISTING_ID));
-		out.setWeight(value.getInteger(DatabaseFieldNames.PROMOTION_WEIGHTING, 1));
-		return out;
-	}
-
-	@Override
-	public Promotion generateIdIfAbsentFromDocument(Promotion document) {
-		if (!documentHasId(document)) {
-			document.setId(UUID.randomUUID().toString());
-		}
-		return document;
-	}
-
-	@Override
-	public boolean documentHasId(Promotion document) {
-		return !StringUtils.isBlank(document.getId());
-	}
-
-	@Override
-	public BsonValue getDocumentId(Promotion document) {
-		return new BsonString(document.getId());
-	}
-
-}
diff --git a/src/main/java/org/eclipsefoundation/marketplace/dto/filter/CatalogFilter.java b/src/main/java/org/eclipsefoundation/marketplace/dto/filter/CatalogFilter.java
index 13abf697322e3f7166df5c7fe10587b2b5c86a19..fdc40a473dc6da862385ec252caf2d0dba2cc6e2 100644
--- a/src/main/java/org/eclipsefoundation/marketplace/dto/filter/CatalogFilter.java
+++ b/src/main/java/org/eclipsefoundation/marketplace/dto/filter/CatalogFilter.java
@@ -13,10 +13,10 @@ import javax.enterprise.context.ApplicationScoped;
 import javax.inject.Inject;
 
 import org.eclipsefoundation.core.model.RequestWrapper;
+import org.eclipsefoundation.core.namespace.DefaultUrlParameterNames;
 import org.eclipsefoundation.marketplace.dto.Catalog;
 import org.eclipsefoundation.marketplace.namespace.DatabaseFieldNames;
 import org.eclipsefoundation.marketplace.namespace.DtoTableNames;
-import org.eclipsefoundation.marketplace.namespace.UrlParameterNames;
 import org.eclipsefoundation.persistence.dto.filter.DtoFilter;
 import org.eclipsefoundation.persistence.model.ParameterizedSQLStatement;
 import org.eclipsefoundation.persistence.model.ParameterizedSQLStatementBuilder;
@@ -31,13 +31,13 @@ public class CatalogFilter implements DtoFilter<Catalog> {
 
 	@Inject
 	ParameterizedSQLStatementBuilder builder;
-	
+
 	@Override
 	public ParameterizedSQLStatement getFilters(RequestWrapper wrap, boolean isRoot) {
 		ParameterizedSQLStatement stmt = builder.build(DtoTableNames.CATALOG.getTable());
 		if (isRoot) {
 			// ID check
-			Optional<String> id = wrap.getFirstParam(UrlParameterNames.ID);
+			Optional<String> id = wrap.getFirstParam(DefaultUrlParameterNames.ID);
 			if (id.isPresent()) {
 				stmt.addClause(new ParameterizedSQLStatement.Clause(
 						DtoTableNames.CATALOG.getAlias() + "." + DatabaseFieldNames.DOCID + " = ?",
diff --git a/src/main/java/org/eclipsefoundation/marketplace/dto/filter/CategoryFilter.java b/src/main/java/org/eclipsefoundation/marketplace/dto/filter/CategoryFilter.java
index 708c81c05080278ee01fddfa396a89a1a8376aeb..d64d15187368967a0d38e239bcac0fdb3b4d2c01 100644
--- a/src/main/java/org/eclipsefoundation/marketplace/dto/filter/CategoryFilter.java
+++ b/src/main/java/org/eclipsefoundation/marketplace/dto/filter/CategoryFilter.java
@@ -13,10 +13,10 @@ import javax.enterprise.context.ApplicationScoped;
 import javax.inject.Inject;
 
 import org.eclipsefoundation.core.model.RequestWrapper;
+import org.eclipsefoundation.core.namespace.DefaultUrlParameterNames;
 import org.eclipsefoundation.marketplace.dto.Category;
 import org.eclipsefoundation.marketplace.namespace.DatabaseFieldNames;
 import org.eclipsefoundation.marketplace.namespace.DtoTableNames;
-import org.eclipsefoundation.marketplace.namespace.UrlParameterNames;
 import org.eclipsefoundation.persistence.dto.filter.DtoFilter;
 import org.eclipsefoundation.persistence.model.ParameterizedSQLStatement;
 import org.eclipsefoundation.persistence.model.ParameterizedSQLStatementBuilder;
@@ -31,14 +31,13 @@ public class CategoryFilter implements DtoFilter<Category> {
 
 	@Inject
 	ParameterizedSQLStatementBuilder builder;
-	
 
 	@Override
 	public ParameterizedSQLStatement getFilters(RequestWrapper wrap, boolean isRoot) {
 		ParameterizedSQLStatement stmt = builder.build(DtoTableNames.CATEGORY.getTable());
 		if (isRoot) {
 			// ID check
-			Optional<String> id = wrap.getFirstParam(UrlParameterNames.ID);
+			Optional<String> id = wrap.getFirstParam(DefaultUrlParameterNames.ID);
 			if (id.isPresent()) {
 				stmt.addClause(new ParameterizedSQLStatement.Clause(
 						DtoTableNames.CATEGORY.getAlias() + "." + DatabaseFieldNames.DOCID + " = ?",
diff --git a/src/main/java/org/eclipsefoundation/marketplace/dto/filter/DtoFilter.java b/src/main/java/org/eclipsefoundation/marketplace/dto/filter/DtoFilter.java
deleted file mode 100644
index d4cedace6e3e6d1fe0282604708d03ed3ce203e7..0000000000000000000000000000000000000000
--- a/src/main/java/org/eclipsefoundation/marketplace/dto/filter/DtoFilter.java
+++ /dev/null
@@ -1,97 +0,0 @@
-/* Copyright (c) 2019 Eclipse Foundation and others.
- * This program and the accompanying materials are made available
- * under the terms of the Eclipse Public License 2.0
- * which is available at http://www.eclipse.org/legal/epl-v20.html,
- * SPDX-License-Identifier: EPL-2.0
- */
-package org.eclipsefoundation.marketplace.dto.filter;
-
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.List;
-import java.util.Optional;
-
-import org.bson.conversions.Bson;
-import org.eclipsefoundation.marketplace.model.QueryParameters;
-import org.eclipsefoundation.marketplace.namespace.DatabaseFieldNames;
-import org.eclipsefoundation.marketplace.namespace.UrlParameterNames;
-
-import com.mongodb.client.model.Aggregates;
-import com.mongodb.client.model.Filters;
-
-/**
- * Filter interface for usage when querying data.
- * 
- * @author Martin Lowe
- */
-public interface DtoFilter<T> {
-
-	/**
-	 * Retrieve filter objects for the current arguments.
-	 * 
-	 * @param params     parameters to use in filter construction
-	 * @param nestedPath current path for nesting of filters
-	 * @return list of filters for the current request, or empty if there are no
-	 *         applicable filters.
-	 */
-	default List<Bson> getFilters(QueryParameters params, String root) {
-		List<Bson> filters = new ArrayList<>();
-		// perform following checks only if there is no doc root
-		if (root == null) {
-			// ID check
-			Optional<String> id = params.getFirstIfPresent(UrlParameterNames.ID.getParameterName());
-			if (id.isPresent()) {
-				filters.add(Filters.eq(DatabaseFieldNames.DOCID, id.get()));
-			}
-		}
-		return filters;
-
-	}
-
-	/**
-	 * Retrieve aggregate filter operations for the current arguments.
-	 * 
-	 * @param params parameters to use in aggregate construction
-	 * @return list of aggregates for the current request, or empty if there are no
-	 *         applicable aggregates.
-	 */
-	default List<Bson> getAggregates(QueryParameters params) {
-		return Collections.emptyList();
-	}
-
-	/**
-	 * Returns the type of data this object will filter for.
-	 * 
-	 * @return class of object to filter
-	 */
-	Class<T> getType();
-
-	/**
-	 * Wraps each of the filters present for a given filter type in an aggregate
-	 * match operation to port filter operations into an aggregate pipeline. This is
-	 * handy when importing nested types and enabling filters.
-	 * 
-	 * @param params     parameters for the current call
-	 * @param nestedPath current path for nesting of filters
-	 * @return a list of aggregate pipeline operations representing the filters for
-	 *         the current request.
-	 */
-	default Bson wrapFiltersToAggregate(QueryParameters params, String nestedPath) {
-		List<Bson> filters = getFilters(params, nestedPath);
-		if (!filters.isEmpty()) {
-			return Aggregates.match(Filters.elemMatch(nestedPath, Filters.and(filters)));
-		}
-		return null;
-	}
-
-	/**
-	 * Whether this type of data should be restrained to a limited set, or return
-	 * all data that is found.
-	 * 
-	 * @return true if limit should be used, false otherwise.
-	 */
-	default boolean useLimit() {
-		return true;
-	}
-
-}
diff --git a/src/main/java/org/eclipsefoundation/marketplace/dto/filter/ErrorReportFilter.java b/src/main/java/org/eclipsefoundation/marketplace/dto/filter/ErrorReportFilter.java
index f16ca52f6c267fb3694da4b6dcf73d9e0f34d693..4c31e58238a9a93eaa0687e564a365a132278c29 100644
--- a/src/main/java/org/eclipsefoundation/marketplace/dto/filter/ErrorReportFilter.java
+++ b/src/main/java/org/eclipsefoundation/marketplace/dto/filter/ErrorReportFilter.java
@@ -6,20 +6,23 @@
  */
 package org.eclipsefoundation.marketplace.dto.filter;
 
-import java.util.ArrayList;
-import java.util.Collections;
 import java.util.List;
 import java.util.Optional;
+import java.util.UUID;
+import java.util.stream.Collectors;
 
 import javax.enterprise.context.ApplicationScoped;
+import javax.inject.Inject;
 
-import org.bson.conversions.Bson;
+import org.eclipsefoundation.core.model.RequestWrapper;
+import org.eclipsefoundation.core.namespace.DefaultUrlParameterNames;
 import org.eclipsefoundation.marketplace.dto.ErrorReport;
-import org.eclipsefoundation.marketplace.model.RequestWrapper;
 import org.eclipsefoundation.marketplace.namespace.DatabaseFieldNames;
+import org.eclipsefoundation.marketplace.namespace.DtoTableNames;
 import org.eclipsefoundation.marketplace.namespace.UrlParameterNames;
-
-import com.mongodb.client.model.Filters;
+import org.eclipsefoundation.persistence.dto.filter.DtoFilter;
+import org.eclipsefoundation.persistence.model.ParameterizedSQLStatement;
+import org.eclipsefoundation.persistence.model.ParameterizedSQLStatementBuilder;
 
 /**
  * Filter implementation for the ErrorReport class.
@@ -31,14 +34,13 @@ public class ErrorReportFilter implements DtoFilter<ErrorReport> {
 
 	@Inject
 	ParameterizedSQLStatementBuilder builder;
-	
 
 	@Override
 	public ParameterizedSQLStatement getFilters(RequestWrapper wrap, boolean isRoot) {
 		ParameterizedSQLStatement stmt = builder.build(DtoTableNames.ERRORREPORT.getTable());
 		if (isRoot) {
 			// ID check
-			Optional<String> id = wrap.getFirstParam(UrlParameterNames.ID);
+			Optional<String> id = wrap.getFirstParam(DefaultUrlParameterNames.ID);
 			if (id.isPresent()) {
 				stmt.addClause(new ParameterizedSQLStatement.Clause(
 						DtoTableNames.ERRORREPORT.getAlias() + "." + DatabaseFieldNames.DOCID + " = ?",
@@ -46,7 +48,7 @@ public class ErrorReportFilter implements DtoFilter<ErrorReport> {
 			}
 		}
 		// IDS
-		List<String> ids = wrap.getParams(UrlParameterNames.IDS);
+		List<String> ids = wrap.getParams(DefaultUrlParameterNames.IDS);
 		if (!ids.isEmpty()) {
 			stmt.addClause(new ParameterizedSQLStatement.Clause(
 					DtoTableNames.ERRORREPORT.getAlias() + "." + DatabaseFieldNames.DOCID + " = ?",
diff --git a/src/main/java/org/eclipsefoundation/marketplace/dto/filter/InstallFilter.java b/src/main/java/org/eclipsefoundation/marketplace/dto/filter/InstallFilter.java
index 9cf0ef9822256ad1485251d48a5cfd0ea143cf23..c3f32939d4d0d9aee65a6f07152f4eb6ce399e2b 100644
--- a/src/main/java/org/eclipsefoundation/marketplace/dto/filter/InstallFilter.java
+++ b/src/main/java/org/eclipsefoundation/marketplace/dto/filter/InstallFilter.java
@@ -6,10 +6,6 @@
  */
 package org.eclipsefoundation.marketplace.dto.filter;
 
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.Date;
-import java.util.List;
 import java.util.Optional;
 import java.util.UUID;
 
@@ -18,8 +14,8 @@ import javax.inject.Inject;
 
 import org.apache.commons.lang3.StringUtils;
 import org.eclipsefoundation.core.model.RequestWrapper;
+import org.eclipsefoundation.core.namespace.DefaultUrlParameterNames;
 import org.eclipsefoundation.marketplace.dto.Install;
-import org.eclipsefoundation.marketplace.model.RequestWrapper;
 import org.eclipsefoundation.marketplace.namespace.DatabaseFieldNames;
 import org.eclipsefoundation.marketplace.namespace.DtoTableNames;
 import org.eclipsefoundation.marketplace.namespace.UrlParameterNames;
@@ -45,7 +41,7 @@ public class InstallFilter implements DtoFilter<Install> {
 
 		if (isRoot) {
 			// ID check
-			Optional<String> id = wrap.getFirstParam(UrlParameterNames.ID);
+			Optional<String> id = wrap.getFirstParam(DefaultUrlParameterNames.ID);
 			if (id.isPresent()) {
 				stmt.addClause(new ParameterizedSQLStatement.Clause(
 						DtoTableNames.INSTALL.getAlias() + "." + DatabaseFieldNames.LISTING_ID + " = ?",
diff --git a/src/main/java/org/eclipsefoundation/marketplace/dto/filter/InstallMetricsFilter.java b/src/main/java/org/eclipsefoundation/marketplace/dto/filter/InstallMetricsFilter.java
deleted file mode 100644
index 3d8474901b063d9625417fc1982b9017739ad1ca..0000000000000000000000000000000000000000
--- a/src/main/java/org/eclipsefoundation/marketplace/dto/filter/InstallMetricsFilter.java
+++ /dev/null
@@ -1,27 +0,0 @@
-/* Copyright (c) 2019 Eclipse Foundation and others.
- * This program and the accompanying materials are made available
- * under the terms of the Eclipse Public License 2.0
- * which is available at http://www.eclipse.org/legal/epl-v20.html,
- * SPDX-License-Identifier: EPL-2.0
- */
-package org.eclipsefoundation.marketplace.dto.filter;
-
-import javax.enterprise.context.ApplicationScoped;
-
-import org.eclipsefoundation.marketplace.dto.InstallMetrics;
-
-/**
- * Filter implementation for the {@linkplain InstallMetrics} class.
- * 
- * @author Martin Lowe
- *
- */
-@ApplicationScoped
-public class InstallMetricsFilter implements DtoFilter<InstallMetrics> {
-
-	@Override
-	public Class<InstallMetrics> getType() {
-		return InstallMetrics.class;
-	}
-
-}
diff --git a/src/main/java/org/eclipsefoundation/marketplace/dto/filter/ListingFilter.java b/src/main/java/org/eclipsefoundation/marketplace/dto/filter/ListingFilter.java
index b966df476b318ceab453db83772e084db0aeeb47..e805a6dac51aa24e8a4a78b9e4323943ff487577 100644
--- a/src/main/java/org/eclipsefoundation/marketplace/dto/filter/ListingFilter.java
+++ b/src/main/java/org/eclipsefoundation/marketplace/dto/filter/ListingFilter.java
@@ -6,18 +6,18 @@
  */
 package org.eclipsefoundation.marketplace.dto.filter;
 
-import java.util.ArrayList;
 import java.util.List;
 import java.util.Optional;
 import java.util.UUID;
+import java.util.stream.Collectors;
 
 import javax.enterprise.context.ApplicationScoped;
 import javax.inject.Inject;
 
 import org.eclipsefoundation.core.model.RequestWrapper;
+import org.eclipsefoundation.core.namespace.DefaultUrlParameterNames;
 import org.eclipsefoundation.marketplace.dto.Listing;
 import org.eclipsefoundation.marketplace.dto.ListingVersion;
-import org.eclipsefoundation.marketplace.model.RequestWrapper;
 import org.eclipsefoundation.marketplace.namespace.DatabaseFieldNames;
 import org.eclipsefoundation.marketplace.namespace.DtoTableNames;
 import org.eclipsefoundation.marketplace.namespace.UrlParameterNames;
@@ -43,7 +43,7 @@ public class ListingFilter implements DtoFilter<Listing> {
 		ParameterizedSQLStatement stmt = builder.build(DtoTableNames.LISTING.getTable());
 		if (isRoot) {
 			// ID check
-			Optional<String> id = wrap.getFirstParam(UrlParameterNames.ID);
+			Optional<String> id = wrap.getFirstParam(DefaultUrlParameterNames.ID);
 			if (id.isPresent()) {
 				stmt.addClause(new ParameterizedSQLStatement.Clause(
 						DtoTableNames.LISTING.getAlias() + "." + DatabaseFieldNames.DOCID + " = ?",
@@ -52,30 +52,21 @@ public class ListingFilter implements DtoFilter<Listing> {
 		}
 
 		// select by multiple IDs
-		List<String> ids = wrap.getParams(UrlParameterNames.IDS);
+		List<String> ids = wrap.getParams(DefaultUrlParameterNames.IDS);
 		if (!ids.isEmpty()) {
+			// convert the IDs to UUID objects
+			List<UUID> actualIDs = ids.stream().map(UUID::fromString).collect(Collectors.toList());
 			stmt.addClause(new ParameterizedSQLStatement.Clause(
-					DtoTableNames.LISTING.getAlias() + "." + DatabaseFieldNames.DOCID + " = ?", new Object[] { ids }));
+					DtoTableNames.LISTING.getAlias() + "." + DatabaseFieldNames.DOCID + " in (?)", new Object[] { actualIDs }));
 		}
 
 		// Listing license type check
-		Optional<String> licType = wrap.getFirstParam(DatabaseFieldNames.LICENSE_TYPE);
+		Optional<String> licType = wrap.getFirstParam(UrlParameterNames.LICENSE_TYPE);
 		if (licType.isPresent()) {
 			stmt.addClause(new ParameterizedSQLStatement.Clause(DtoTableNames.LISTING.getAlias() + ".licenseType = ?",
 					new Object[] { licType.get() }));
 		}
 
-		// TODO, this might need some data structure jigging
-		// select by multiple tags
-		List<String> tags = wrap.getParams(UrlParameterNames.TAGS);
-		//
-		// if (!tags.isEmpty()) {
-		// stmt.addClause(new ParameterizedSQLStatement.Clause(
-		// DtoTableNames.LISTING.getAlias() + "." + DatabaseFieldNames.DOCID + " = ?",
-		// new Object[] { ids },
-		// new JDBCType[] { JDBCType.ARRAY }));
-		// }
-
 		// retrieve the listing version filters.
 		stmt.addJoin(
 				new ParameterizedSQLStatement.Join(DtoTableNames.LISTING.getTable(), DtoTableNames.LISTING_VERSION.getTable(), "versions"));
diff --git a/src/main/java/org/eclipsefoundation/marketplace/dto/filter/ListingVersionFilter.java b/src/main/java/org/eclipsefoundation/marketplace/dto/filter/ListingVersionFilter.java
index 4a3e7f46e1486c6821f391826bb81a551f38c859..4868faf32d746de605cb1e80f5080a368a21f6e5 100644
--- a/src/main/java/org/eclipsefoundation/marketplace/dto/filter/ListingVersionFilter.java
+++ b/src/main/java/org/eclipsefoundation/marketplace/dto/filter/ListingVersionFilter.java
@@ -6,9 +6,6 @@
  */
 package org.eclipsefoundation.marketplace.dto.filter;
 
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.List;
 import java.util.Optional;
 import java.util.UUID;
 
@@ -17,6 +14,7 @@ import javax.inject.Inject;
 
 import org.apache.commons.lang3.StringUtils;
 import org.eclipsefoundation.core.model.RequestWrapper;
+import org.eclipsefoundation.core.namespace.DefaultUrlParameterNames;
 import org.eclipsefoundation.marketplace.dto.ListingVersion;
 import org.eclipsefoundation.marketplace.namespace.DtoTableNames;
 import org.eclipsefoundation.marketplace.namespace.UrlParameterNames;
@@ -41,7 +39,7 @@ public class ListingVersionFilter implements DtoFilter<ListingVersion> {
 		ParameterizedSQLStatement stmt = builder.build(DtoTableNames.LISTING_VERSION.getTable());
 		if (isRoot) {
 			// ID check
-			Optional<String> id = wrap.getFirstParam(UrlParameterNames.ID);
+			Optional<String> id = wrap.getFirstParam(DefaultUrlParameterNames.ID);
 			if (id.isPresent()) {
 				stmt.addClause(
 						new ParameterizedSQLStatement.Clause(DtoTableNames.LISTING_VERSION.getAlias() + ".id = ?",
diff --git a/src/main/java/org/eclipsefoundation/marketplace/dto/filter/MarketFilter.java b/src/main/java/org/eclipsefoundation/marketplace/dto/filter/MarketFilter.java
index 21fb37aba53a3e8739b6560414319b59ca0e121b..5a4fe30db6ccc33f912944b3c64ed71555f1a76e 100644
--- a/src/main/java/org/eclipsefoundation/marketplace/dto/filter/MarketFilter.java
+++ b/src/main/java/org/eclipsefoundation/marketplace/dto/filter/MarketFilter.java
@@ -13,6 +13,7 @@ import javax.enterprise.context.ApplicationScoped;
 import javax.inject.Inject;
 
 import org.eclipsefoundation.core.model.RequestWrapper;
+import org.eclipsefoundation.core.namespace.DefaultUrlParameterNames;
 import org.eclipsefoundation.marketplace.dto.Market;
 import org.eclipsefoundation.marketplace.namespace.DatabaseFieldNames;
 import org.eclipsefoundation.marketplace.namespace.DtoTableNames;
@@ -37,7 +38,7 @@ public class MarketFilter implements DtoFilter<Market> {
 		ParameterizedSQLStatement stmt = builder.build(DtoTableNames.MARKET.getTable());
 		if (isRoot) {
 			// ID check
-			Optional<String> id = wrap.getFirstParam(UrlParameterNames.ID);
+			Optional<String> id = wrap.getFirstParam(DefaultUrlParameterNames.ID);
 			if (id.isPresent()) {
 				stmt.addClause(new ParameterizedSQLStatement.Clause(
 						DtoTableNames.MARKET.getAlias() + "." + DatabaseFieldNames.DOCID + " = ?",
diff --git a/src/main/java/org/eclipsefoundation/marketplace/dto/filter/MetricPeriodFilter.java b/src/main/java/org/eclipsefoundation/marketplace/dto/filter/MetricPeriodFilter.java
index 09aed32576b3069587a1e2889941503d261417c4..dfb0b6d3d68a28fa16698ca30407a0cb7da0d1cd 100644
--- a/src/main/java/org/eclipsefoundation/marketplace/dto/filter/MetricPeriodFilter.java
+++ b/src/main/java/org/eclipsefoundation/marketplace/dto/filter/MetricPeriodFilter.java
@@ -6,12 +6,6 @@
  */
 package org.eclipsefoundation.marketplace.dto.filter;
 
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.List;
-import java.util.Optional;
-
 import javax.enterprise.context.ApplicationScoped;
 import javax.inject.Inject;
 
diff --git a/src/main/java/org/eclipsefoundation/marketplace/dto/filter/PromotionFilter.java b/src/main/java/org/eclipsefoundation/marketplace/dto/filter/PromotionFilter.java
index d7852402e29dca1aca9cb4663d834897e3783066..c60baa41bb41727b11df676788c8606bf17374a9 100644
--- a/src/main/java/org/eclipsefoundation/marketplace/dto/filter/PromotionFilter.java
+++ b/src/main/java/org/eclipsefoundation/marketplace/dto/filter/PromotionFilter.java
@@ -6,10 +6,20 @@
  */
 package org.eclipsefoundation.marketplace.dto.filter;
 
+import java.util.Optional;
+import java.util.UUID;
+
 import javax.enterprise.context.ApplicationScoped;
+import javax.inject.Inject;
 
+import org.eclipsefoundation.core.model.RequestWrapper;
+import org.eclipsefoundation.core.namespace.DefaultUrlParameterNames;
 import org.eclipsefoundation.marketplace.dto.Promotion;
-
+import org.eclipsefoundation.marketplace.namespace.DatabaseFieldNames;
+import org.eclipsefoundation.marketplace.namespace.DtoTableNames;
+import org.eclipsefoundation.persistence.dto.filter.DtoFilter;
+import org.eclipsefoundation.persistence.model.ParameterizedSQLStatement;
+import org.eclipsefoundation.persistence.model.ParameterizedSQLStatementBuilder;
 
 /**
  * Filter implementation for the {@link Promotion} class.
@@ -20,6 +30,24 @@ import org.eclipsefoundation.marketplace.dto.Promotion;
 @ApplicationScoped
 public class PromotionFilter implements DtoFilter<Promotion> {
 
+	@Inject
+	ParameterizedSQLStatementBuilder builder;
+
+	@Override
+	public ParameterizedSQLStatement getFilters(RequestWrapper wrap, boolean isRoot) {
+		ParameterizedSQLStatement stmt = builder.build(DtoTableNames.LISTING.getTable());
+		if (isRoot) {
+			// ID check
+			Optional<String> id = wrap.getFirstParam(DefaultUrlParameterNames.ID);
+			if (id.isPresent()) {
+				stmt.addClause(new ParameterizedSQLStatement.Clause(
+						DtoTableNames.LISTING.getAlias() + "." + DatabaseFieldNames.DOCID + " = ?",
+						new Object[] { UUID.fromString(id.get()) }));
+			}
+		}
+		return stmt;
+	}
+
 	@Override
 	public Class<Promotion> getType() {
 		return Promotion.class;
diff --git a/src/main/java/org/eclipsefoundation/marketplace/dto/providers/PromotionCodecProvider.java b/src/main/java/org/eclipsefoundation/marketplace/dto/providers/PromotionCodecProvider.java
deleted file mode 100644
index 80f26042991aab079f009dbe17db60b14bd27c95..0000000000000000000000000000000000000000
--- a/src/main/java/org/eclipsefoundation/marketplace/dto/providers/PromotionCodecProvider.java
+++ /dev/null
@@ -1,35 +0,0 @@
-/* Copyright (c) 2019 Eclipse Foundation and others.
- * This program and the accompanying materials are made available
- * under the terms of the Eclipse Public License 2.0
- * which is available at http://www.eclipse.org/legal/epl-v20.html,
- * SPDX-License-Identifier: EPL-2.0
- */
-package org.eclipsefoundation.marketplace.dto.providers;
-
-import org.bson.codecs.Codec;
-import org.bson.codecs.configuration.CodecProvider;
-import org.bson.codecs.configuration.CodecRegistry;
-import org.eclipsefoundation.marketplace.dto.Promotion;
-import org.eclipsefoundation.marketplace.dto.codecs.PromotionCodec;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * Provides the {@link PromotionCodec} to MongoDB for conversions of
- * {@link Promotion} objects.
- * 
- * @author Martin Lowe
- */
-public class PromotionCodecProvider implements CodecProvider {
-	private static final Logger LOGGER = LoggerFactory.getLogger(PromotionCodecProvider.class);
-
-	@SuppressWarnings("unchecked")
-	@Override
-	public <T> Codec<T> get(Class<T> clazz, CodecRegistry registry) {
-		if (clazz == Promotion.class) {
-			LOGGER.debug("Registering custom Promotion class MongoDB codec");
-			return (Codec<T>) new PromotionCodec();
-		}
-		return null;
-	}
-}
diff --git a/src/main/java/org/eclipsefoundation/marketplace/namespace/UrlParameterNames.java b/src/main/java/org/eclipsefoundation/marketplace/namespace/UrlParameterNames.java
index 5316dbad62be0e8fe7525af6521c5da409285454..9239cc1a4780c2e8f3ab35c50e8c76d17fcf4e56 100644
--- a/src/main/java/org/eclipsefoundation/marketplace/namespace/UrlParameterNames.java
+++ b/src/main/java/org/eclipsefoundation/marketplace/namespace/UrlParameterNames.java
@@ -9,33 +9,55 @@
 */
 package org.eclipsefoundation.marketplace.namespace;
 
+import org.eclipsefoundation.core.namespace.UrlParameterName;
+
 /**
  * Namespace containing URL parameters used throughout the API.
  * 
  * @author Martin Lowe
  */
-public final class UrlParameterNames {
+public enum UrlParameterNames implements UrlParameterName {
+
+	SORT("sort"),
+	OS("os"),
+	ECLIPSE_VERSION("eclipse_version"),
+	JAVA_VERSION("min_java_version"),
+	TAGS("tags"),
+	MARKET_IDS("market_ids"),
+	LISTING_ID("listing_id"),
+	READ("read"),
+	FEATURE_ID("feature_id"),
+	VERSION("version"),
+	DATE_FROM("from"),
+	END("end"),
+	START("start"), 
+	LICENSE_TYPE("license_type");
 
-	public static final String QUERY_STRING = "q";
-	public static final String PAGE = "page";
-	public static final String LIMIT = "limit";
-	public static final String SORT = "sort";
-	public static final String OS = "os";
-	public static final String ECLIPSE_VERSION = "eclipse_version";
-	public static final String JAVA_VERSION = "min_java_version";
-	public static final String IDS = "ids";
-	public static final String TAGS = "tags";
-	public static final String MARKET_IDS = "market_ids";
-	public static final String ID = "id";
-	public static final String LISTING_ID = "listing_id";
-	public static final String READ = "read";
-	public static final String FEATURE_ID = "feature_id";
-	public static final String VERSION = "version";
-	public static final String DATE_FROM = "from";
-	public static final String END = "end";
-	public static final String START = "start";
-	public static final String SEED = "seed";
+	private String parameterName;
+	private UrlParameterNames(String parameterName) {
+		this.parameterName = parameterName;
+	}
+	
+	/**
+	 * @return the URL parameters name
+	 */
+	@Override
+	public String getParameterName() {
+		return parameterName;
+	}
 	
-	private UrlParameterNames() {
+	/**
+	 * Retrieves the UrlParameterName for the given name.
+	 * 
+	 * @param name the name to retrieve a URL parameter for
+	 * @return the URL parameter name if it exists, or null if no match is found
+	 */
+	public static UrlParameterName getByParameterName(String name) {
+		for (UrlParameterNames param: values()) {
+			if (param.getParameterName().equalsIgnoreCase(name)) {
+				return param;
+			}
+		}
+		return null;
 	}
 }
diff --git a/src/main/java/org/eclipsefoundation/marketplace/request/SearchBypassCondition.java b/src/main/java/org/eclipsefoundation/marketplace/request/SearchBypassCondition.java
new file mode 100644
index 0000000000000000000000000000000000000000..93dc368e788849bd19116a01dd9c257e4ff3a5f9
--- /dev/null
+++ b/src/main/java/org/eclipsefoundation/marketplace/request/SearchBypassCondition.java
@@ -0,0 +1,25 @@
+package org.eclipsefoundation.marketplace.request;
+
+import javax.enterprise.context.ApplicationScoped;
+import javax.servlet.http.HttpServletRequest;
+import javax.ws.rs.container.ContainerRequestContext;
+
+import org.apache.commons.lang3.StringUtils;
+import org.eclipsefoundation.core.namespace.DefaultUrlParameterNames;
+import org.eclipsefoundation.core.request.CacheBypassFilter.BypassCondition;
+
+/**
+ * Designates that the request should bypass caching when query string is set.
+ * 
+ * @author Martin Lowe
+ */
+@ApplicationScoped
+public class SearchBypassCondition implements BypassCondition {
+
+	@Override
+	public boolean matches(ContainerRequestContext requestContext, HttpServletRequest request) {
+		String[] queryStringVals = request.getParameterValues(DefaultUrlParameterNames.QUERY_STRING.getParameterName());
+		return queryStringVals != null && !StringUtils.isAllBlank(queryStringVals);
+	}
+
+}
diff --git a/src/main/java/org/eclipsefoundation/marketplace/request/SortBypassCondition.java b/src/main/java/org/eclipsefoundation/marketplace/request/SortBypassCondition.java
new file mode 100644
index 0000000000000000000000000000000000000000..160ceae23518fa63c5ea9b4390a4d61d1bff8e7b
--- /dev/null
+++ b/src/main/java/org/eclipsefoundation/marketplace/request/SortBypassCondition.java
@@ -0,0 +1,33 @@
+package org.eclipsefoundation.marketplace.request;
+
+import javax.enterprise.context.ApplicationScoped;
+import javax.servlet.http.HttpServletRequest;
+import javax.ws.rs.container.ContainerRequestContext;
+
+import org.eclipsefoundation.core.request.CacheBypassFilter.BypassCondition;
+import org.eclipsefoundation.marketplace.namespace.UrlParameterNames;
+import org.eclipsefoundation.persistence.model.SortOrder;
+
+/**
+ * Designates that the request should bypass caching when sorting is set to random
+ * 
+ * @author Martin Lowe
+ */
+@ApplicationScoped
+public class SortBypassCondition implements BypassCondition {
+
+	@Override
+	public boolean matches(ContainerRequestContext requestContext, HttpServletRequest request) {
+		String[] sortVals = request.getParameterValues(UrlParameterNames.SORT.getParameterName());
+		if (sortVals != null) {
+			for (String sortVal : sortVals) {
+				// check if the sort order for request matches RANDOM
+				if (SortOrder.RANDOM.equals(SortOrder.getOrderFromValue(sortVal))) {
+					return true;
+				}
+			}
+		}
+		return false;
+	}
+
+}
diff --git a/src/main/java/org/eclipsefoundation/marketplace/resource/CatalogResource.java b/src/main/java/org/eclipsefoundation/marketplace/resource/CatalogResource.java
index 51052dab55c1869c2ba31281b58ced0e7c038328..c5e82a7b45d80f84784944e59e2b664d852da60f 100644
--- a/src/main/java/org/eclipsefoundation/marketplace/resource/CatalogResource.java
+++ b/src/main/java/org/eclipsefoundation/marketplace/resource/CatalogResource.java
@@ -23,13 +23,12 @@ import javax.ws.rs.Path;
 import javax.ws.rs.Produces;
 import javax.ws.rs.core.MediaType;
 import javax.ws.rs.core.Response;
-import javax.ws.rs.core.Response.Status;
 
 import org.eclipsefoundation.core.helper.ResponseHelper;
 import org.eclipsefoundation.core.model.RequestWrapper;
+import org.eclipsefoundation.core.namespace.DefaultUrlParameterNames;
 import org.eclipsefoundation.core.service.CachingService;
 import org.eclipsefoundation.marketplace.dto.Catalog;
-import org.eclipsefoundation.marketplace.namespace.UrlParameterNames;
 import org.eclipsefoundation.persistence.dao.PersistenceDao;
 import org.eclipsefoundation.persistence.dto.filter.DtoFilter;
 import org.eclipsefoundation.persistence.model.RDBMSQuery;
@@ -101,7 +100,7 @@ public class CatalogResource {
 	@GET
 	@Path("/{catalogId}")
 	public Response select(@PathParam("catalogId") String catalogId) {
-		params.addParam(UrlParameterNames.ID, catalogId);
+		params.addParam(DefaultUrlParameterNames.ID, catalogId);
 
 		RDBMSQuery<Catalog> q = new RDBMSQuery<>(params, dtoFilter);
 		// retrieve a cached version of the value for the current listing
@@ -129,7 +128,7 @@ public class CatalogResource {
 	@RolesAllowed({ "marketplace_catalog_delete", "marketplace_admin_access" })
 	@Path("/{catalogId}")
 	public Response delete(@PathParam("catalogId") String catalogId) {
-		params.addParam(UrlParameterNames.ID, catalogId);
+		params.addParam(DefaultUrlParameterNames.ID, catalogId);
 		dao.delete(new RDBMSQuery<>(params, dtoFilter));
 
 		// return the results as a response
diff --git a/src/main/java/org/eclipsefoundation/marketplace/resource/CategoryResource.java b/src/main/java/org/eclipsefoundation/marketplace/resource/CategoryResource.java
index c07ce489a94c9c609fb3a1fe81afd1b385effc9c..b7cea1a1cdb2940cd2b46be10f462a75da8427a9 100644
--- a/src/main/java/org/eclipsefoundation/marketplace/resource/CategoryResource.java
+++ b/src/main/java/org/eclipsefoundation/marketplace/resource/CategoryResource.java
@@ -26,9 +26,9 @@ import javax.ws.rs.core.Response;
 
 import org.eclipsefoundation.core.helper.ResponseHelper;
 import org.eclipsefoundation.core.model.RequestWrapper;
+import org.eclipsefoundation.core.namespace.DefaultUrlParameterNames;
 import org.eclipsefoundation.core.service.CachingService;
 import org.eclipsefoundation.marketplace.dto.Category;
-import org.eclipsefoundation.marketplace.namespace.UrlParameterNames;
 import org.eclipsefoundation.persistence.dao.PersistenceDao;
 import org.eclipsefoundation.persistence.dto.filter.DtoFilter;
 import org.eclipsefoundation.persistence.model.RDBMSQuery;
@@ -101,7 +101,7 @@ public class CategoryResource {
 	@GET
 	@Path("/{categoryId}")
 	public Response select(@PathParam("categoryId") String categoryId) {
-		params.addParam(UrlParameterNames.ID, categoryId);
+		params.addParam(DefaultUrlParameterNames.ID, categoryId);
 
 		RDBMSQuery<Category> q = new RDBMSQuery<>(params, dtoFilter);
 		// retrieve a cached version of the value for the current listing
@@ -129,7 +129,7 @@ public class CategoryResource {
 	@RolesAllowed({ "marketplace_category_delete", "marketplace_admin_access" })
 	@Path("/{categoryId}")
 	public Response delete(@PathParam("categoryId") String categoryId) {
-		params.addParam(UrlParameterNames.ID, categoryId);
+		params.addParam(DefaultUrlParameterNames.ID, categoryId);
 		dao.delete(new RDBMSQuery<>(params, dtoFilter));
 
 		// return the results as a response
diff --git a/src/main/java/org/eclipsefoundation/marketplace/resource/ErrorReportResource.java b/src/main/java/org/eclipsefoundation/marketplace/resource/ErrorReportResource.java
index 0cfce420b8d4e83d000f2e4b6e4410f6feb60c49..9b5a14f3ff15be89e5c201cbc9c72ef4ca7e7baf 100644
--- a/src/main/java/org/eclipsefoundation/marketplace/resource/ErrorReportResource.java
+++ b/src/main/java/org/eclipsefoundation/marketplace/resource/ErrorReportResource.java
@@ -23,9 +23,9 @@ import javax.ws.rs.core.Response;
 
 import org.eclipsefoundation.core.helper.ResponseHelper;
 import org.eclipsefoundation.core.model.RequestWrapper;
+import org.eclipsefoundation.core.namespace.DefaultUrlParameterNames;
 import org.eclipsefoundation.core.service.CachingService;
 import org.eclipsefoundation.marketplace.dto.ErrorReport;
-import org.eclipsefoundation.marketplace.namespace.UrlParameterNames;
 import org.eclipsefoundation.persistence.dao.PersistenceDao;
 import org.eclipsefoundation.persistence.dto.filter.DtoFilter;
 import org.eclipsefoundation.persistence.model.RDBMSQuery;
@@ -101,7 +101,7 @@ public class ErrorReportResource {
 	@GET
 	@Path("/{errorReportId}")
 	public Response select(@PathParam("errorReportId") String errorReportId) {
-		params.addParam(UrlParameterNames.ID, errorReportId);
+		params.addParam(DefaultUrlParameterNames.ID, errorReportId);
 		RDBMSQuery<ErrorReport> q = new RDBMSQuery<>(params, dtoFilter);
 		// retrieve a cached version of the value for the current ErrorReport
 		Optional<List<ErrorReport>> cachedResults = cachingService.get(errorReportId, params,
diff --git a/src/main/java/org/eclipsefoundation/marketplace/resource/InstallResource.java b/src/main/java/org/eclipsefoundation/marketplace/resource/InstallResource.java
index 824b01d1893dd32bf33796816c688b9d0b5ccb2d..b10a107fd794ea86b008687a5a6ee464db71a4ba 100644
--- a/src/main/java/org/eclipsefoundation/marketplace/resource/InstallResource.java
+++ b/src/main/java/org/eclipsefoundation/marketplace/resource/InstallResource.java
@@ -12,7 +12,6 @@ import java.util.Optional;
 import java.util.UUID;
 
 import javax.annotation.security.PermitAll;
-import javax.annotation.security.RolesAllowed;
 import javax.enterprise.context.RequestScoped;
 import javax.inject.Inject;
 import javax.ws.rs.Consumes;
@@ -27,6 +26,7 @@ import javax.ws.rs.core.Response.Status;
 import org.eclipsefoundation.core.helper.ResponseHelper;
 import org.eclipsefoundation.core.model.Error;
 import org.eclipsefoundation.core.model.RequestWrapper;
+import org.eclipsefoundation.core.namespace.DefaultUrlParameterNames;
 import org.eclipsefoundation.core.service.CachingService;
 import org.eclipsefoundation.marketplace.dto.Install;
 import org.eclipsefoundation.marketplace.dto.InstallMetrics;
@@ -67,8 +67,6 @@ public class InstallResource {
 	DtoFilter<MetricPeriod> periodFilter;
 	@Inject
 	DtoFilter<InstallMetrics> metricFilter;
-	@Inject
-	CachingService<List<InstallMetrics>> installCache;
 
 
 	// Inject 2 caching service references, as we want to cache count results.
@@ -88,7 +86,7 @@ public class InstallResource {
 	@PermitAll
 	@Path("/{listingId}")
 	public Response selectInstallCount(@PathParam("listingId") String listingId) {
-		wrapper.addParam(UrlParameterNames.ID, listingId);
+		wrapper.addParam(DefaultUrlParameterNames.ID, listingId);
 		RDBMSQuery<Install> q = new RDBMSQuery<>(wrapper, dtoFilter);
 		Optional<Long> cachedResults = countCache.get(listingId, wrapper,
 				() -> StreamHelper.awaitCompletionStage(dao.count(q)));
@@ -114,7 +112,7 @@ public class InstallResource {
 	@PermitAll
 	@Path("/{listingId}/{version}")
 	public Response selectInstallCount(@PathParam("listingId") String listingId, @PathParam("version") String version) {
-		wrapper.addParam(UrlParameterNames.ID, listingId);
+		wrapper.addParam(DefaultUrlParameterNames.ID, listingId);
 		wrapper.addParam(UrlParameterNames.VERSION, version);
 		RDBMSQuery<Install> q = new RDBMSQuery<>(wrapper, dtoFilter);
 		Optional<Long> cachedResults = countCache.get(getCompositeKey(listingId, version), wrapper,
@@ -139,7 +137,7 @@ public class InstallResource {
 	@PermitAll
 	@Path("/{listingId}/metrics")
 	public Response selectInstallMetrics(@PathParam("listingId") String listingId) {
-		wrapper.addParam(UrlParameterNames.ID, listingId);
+		wrapper.addParam(DefaultUrlParameterNames.ID, listingId);
 		RDBMSQuery<InstallMetrics> q = new RDBMSQuery<>(wrapper, metricFilter);
 		Optional<List<InstallMetrics>> cachedResults = installCache.get(listingId, wrapper,
 				() -> dao.get(q));
diff --git a/src/main/java/org/eclipsefoundation/marketplace/resource/ListingResource.java b/src/main/java/org/eclipsefoundation/marketplace/resource/ListingResource.java
index 50ac6cbb8bbedfef991c0908e5794b95ec99c120..73a25be08fc82109ae2e25e66fb421622e1452fa 100644
--- a/src/main/java/org/eclipsefoundation/marketplace/resource/ListingResource.java
+++ b/src/main/java/org/eclipsefoundation/marketplace/resource/ListingResource.java
@@ -29,12 +29,14 @@ import javax.ws.rs.core.Response;
 
 import org.eclipsefoundation.core.helper.ResponseHelper;
 import org.eclipsefoundation.core.model.RequestWrapper;
+import org.eclipsefoundation.core.namespace.DefaultUrlParameterNames;
 import org.eclipsefoundation.core.service.CachingService;
 import org.eclipsefoundation.marketplace.dto.Listing;
-import org.eclipsefoundation.marketplace.namespace.UrlParameterNames;
+import org.eclipsefoundation.marketplace.service.PromotionService;
 import org.eclipsefoundation.persistence.dao.PersistenceDao;
 import org.eclipsefoundation.persistence.dto.filter.DtoFilter;
 import org.eclipsefoundation.persistence.model.RDBMSQuery;
+import org.eclipsefoundation.search.service.PersistenceBackedSearchService;
 import org.jboss.resteasy.annotations.jaxrs.PathParam;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -51,10 +53,14 @@ import org.slf4j.LoggerFactory;
 public class ListingResource {
 	private static final Logger LOGGER = LoggerFactory.getLogger(ListingResource.class);
 
+	// service/access layers
 	@Inject
 	PersistenceDao dao;
 	@Inject
+	PersistenceBackedSearchService searchService;
+	@Inject
 	CachingService<List<Listing>> cachingService;
+	
 	@Inject
 	PromotionService promoService;
 
@@ -76,13 +82,20 @@ public class ListingResource {
 	@PermitAll
 	public Response select() {
 		// retrieve the possible cached object
-		Optional<List<Listing>> cachedResults = cachingService.get("all", params,
-				() -> dao.get(new RDBMSQuery<>(params, dtoFilter)));
+		Optional<String> searchTerm = params.getFirstParam(DefaultUrlParameterNames.QUERY_STRING);
+		Optional<List<Listing>> cachedResults;
+		// if there is a search term set, use it
+		if (searchTerm.isPresent()) {
+			cachedResults = cachingService.get("all", params,
+					() -> searchService.find(params, dtoFilter));
+		} else {
+			cachedResults = cachingService.get("all", params,
+					() -> dao.get(new RDBMSQuery<>(params, dtoFilter)));
+		}
 		if (!cachedResults.isPresent()) {
 			LOGGER.error("Error while retrieving cached listings");
 			return Response.serverError().build();
 		}
-
 		// return the results as a response
 		return responseBuider.build("all", params, cachedResults.get());
 	}
@@ -114,7 +127,7 @@ public class ListingResource {
 	@PermitAll
 	@Path("/{listingId}")
 	public Response select(@PathParam("listingId") String listingId) {
-		params.addParam(UrlParameterNames.ID, listingId);
+		params.addParam(DefaultUrlParameterNames.ID.getParameterName(), listingId);
 
 		// retrieve a cached version of the value for the current listing
 		Optional<List<Listing>> cachedResults = cachingService.get(listingId, params,
@@ -141,7 +154,7 @@ public class ListingResource {
 	@RolesAllowed({ "marketplace_listing_delete", "marketplace_admin_access" })
 	@Path("/{listingId}")
 	public Response delete(@PathParam("listingId") String listingId) {
-		params.addParam(UrlParameterNames.ID, listingId);
+		params.addParam(DefaultUrlParameterNames.ID.getParameterName(), listingId);
 		// delete the currently selected asset
 		dao.delete(new RDBMSQuery<>(params, dtoFilter));
 		// return the results as a response
diff --git a/src/main/java/org/eclipsefoundation/marketplace/resource/ListingVersionResource.java b/src/main/java/org/eclipsefoundation/marketplace/resource/ListingVersionResource.java
index e5637baf2848fffeddf2e5aeda4d981a36df87b0..a067a168e9cb6fdcb3d96540a42b2b8867e6ec00 100644
--- a/src/main/java/org/eclipsefoundation/marketplace/resource/ListingVersionResource.java
+++ b/src/main/java/org/eclipsefoundation/marketplace/resource/ListingVersionResource.java
@@ -25,9 +25,9 @@ import javax.ws.rs.core.Response;
 
 import org.eclipsefoundation.core.helper.ResponseHelper;
 import org.eclipsefoundation.core.model.RequestWrapper;
+import org.eclipsefoundation.core.namespace.DefaultUrlParameterNames;
 import org.eclipsefoundation.core.service.CachingService;
 import org.eclipsefoundation.marketplace.dto.ListingVersion;
-import org.eclipsefoundation.marketplace.namespace.UrlParameterNames;
 import org.eclipsefoundation.persistence.dao.PersistenceDao;
 import org.eclipsefoundation.persistence.dto.filter.DtoFilter;
 import org.eclipsefoundation.persistence.model.RDBMSQuery;
@@ -99,7 +99,7 @@ public class ListingVersionResource {
 	@GET
 	@Path("/{listingVersionId}")
 	public Response select(@PathParam("listingVersionId") String listingVersionId) {
-		params.addParam(UrlParameterNames.ID, listingVersionId);
+		params.addParam(DefaultUrlParameterNames.ID, listingVersionId);
 
 		// retrieve a cached version of the value for the current listing
 		Optional<List<ListingVersion>> cachedResults = cachingService.get(listingVersionId, params,
@@ -127,7 +127,7 @@ public class ListingVersionResource {
 	@RolesAllowed({ "marketplace_version_delete", "marketplace_admin_access" })
 	@Path("/{listingVersionId}")
 	public Response delete(@PathParam("listingVersionId") String listingVersionId) {
-		params.addParam(UrlParameterNames.ID, listingVersionId);
+		params.addParam(DefaultUrlParameterNames.ID, listingVersionId);
 		// delete the currently selected asset
 		dao.delete(new RDBMSQuery<>(params, dtoFilter));
 		// return the results as a response
diff --git a/src/main/java/org/eclipsefoundation/marketplace/resource/MarketResource.java b/src/main/java/org/eclipsefoundation/marketplace/resource/MarketResource.java
index c8a9d9c72422da2815dd3d7d0193cfc75f12d1e8..93187839c9adb6268d6acc2087d7f6ec8c1871c3 100644
--- a/src/main/java/org/eclipsefoundation/marketplace/resource/MarketResource.java
+++ b/src/main/java/org/eclipsefoundation/marketplace/resource/MarketResource.java
@@ -25,9 +25,9 @@ import javax.ws.rs.core.Response;
 
 import org.eclipsefoundation.core.helper.ResponseHelper;
 import org.eclipsefoundation.core.model.RequestWrapper;
+import org.eclipsefoundation.core.namespace.DefaultUrlParameterNames;
 import org.eclipsefoundation.core.service.CachingService;
 import org.eclipsefoundation.marketplace.dto.Market;
-import org.eclipsefoundation.marketplace.namespace.UrlParameterNames;
 import org.eclipsefoundation.persistence.dao.PersistenceDao;
 import org.eclipsefoundation.persistence.dto.filter.DtoFilter;
 import org.eclipsefoundation.persistence.model.RDBMSQuery;
@@ -98,8 +98,8 @@ public class MarketResource {
 
 	@Path("/{marketId}")
 	public Response select(@PathParam("marketId") String marketId) {
-		params.addParam(UrlParameterNames.ID, marketId);
-
+		params.addParam(DefaultUrlParameterNames.ID, marketId);
+		
 		// retrieve a cached version of the value for the current listing
 		Optional<List<Market>> cachedResults = cachingService.get(marketId, params,
 				() -> dao.get(new RDBMSQuery<>(params, dtoFilter)));
@@ -126,7 +126,7 @@ public class MarketResource {
 	@RolesAllowed({ "marketplace_market_delete", "marketplace_admin_access" })
 	@Path("/{marketId}")
 	public Response delete(@PathParam("marketId") String marketId) {
-		params.addParam(UrlParameterNames.ID, marketId);
+		params.addParam(DefaultUrlParameterNames.ID, marketId);
 		// delete the currently selected asset
 		dao.delete(new RDBMSQuery<>(params, dtoFilter));
 		// return the results as a response
diff --git a/src/main/java/org/eclipsefoundation/marketplace/resource/PromotionResource.java b/src/main/java/org/eclipsefoundation/marketplace/resource/PromotionResource.java
index 9cd8b03c25ce47f4402eaf5e777674917bda0f49..254f49b5eab141e0ce925cca479b069bd6da6f2c 100644
--- a/src/main/java/org/eclipsefoundation/marketplace/resource/PromotionResource.java
+++ b/src/main/java/org/eclipsefoundation/marketplace/resource/PromotionResource.java
@@ -1,7 +1,6 @@
 package org.eclipsefoundation.marketplace.resource;
 
 import java.util.Arrays;
-import java.util.Collections;
 import java.util.List;
 import java.util.Optional;
 
@@ -16,24 +15,19 @@ import javax.ws.rs.Path;
 import javax.ws.rs.Produces;
 import javax.ws.rs.core.MediaType;
 import javax.ws.rs.core.Response;
-import javax.ws.rs.core.Response.Status;
 
-import org.eclipsefoundation.marketplace.dao.MongoDao;
+import org.eclipsefoundation.core.helper.ResponseHelper;
+import org.eclipsefoundation.core.model.RequestWrapper;
+import org.eclipsefoundation.core.namespace.DefaultUrlParameterNames;
+import org.eclipsefoundation.core.service.CachingService;
 import org.eclipsefoundation.marketplace.dto.Promotion;
-import org.eclipsefoundation.marketplace.dto.filter.DtoFilter;
-import org.eclipsefoundation.marketplace.helper.ResponseHelper;
-import org.eclipsefoundation.marketplace.helper.StreamHelper;
-import org.eclipsefoundation.marketplace.model.Error;
-import org.eclipsefoundation.marketplace.model.MongoQuery;
-import org.eclipsefoundation.marketplace.model.RequestWrapper;
-import org.eclipsefoundation.marketplace.namespace.UrlParameterNames;
-import org.eclipsefoundation.marketplace.service.CachingService;
+import org.eclipsefoundation.persistence.dao.PersistenceDao;
+import org.eclipsefoundation.persistence.dto.filter.DtoFilter;
+import org.eclipsefoundation.persistence.model.RDBMSQuery;
 import org.jboss.resteasy.annotations.jaxrs.PathParam;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.mongodb.client.result.DeleteResult;
-
 /**
  * Resource for interacting with promotions within the API.
  * 
@@ -48,7 +42,7 @@ public class PromotionResource {
 	private static final Logger LOGGER = LoggerFactory.getLogger(PromotionResource.class);
 
 	@Inject
-	MongoDao dao;
+	PersistenceDao dao;
 	@Inject
 	DtoFilter<Promotion> dtoFilter;
 	@Inject
@@ -68,10 +62,9 @@ public class PromotionResource {
 	@GET
 	@RolesAllowed({ "marketplace_promotion_get", "marketplace_admin_access" })
 	public Response select() {
-		MongoQuery<Promotion> q = new MongoQuery<>(params, dtoFilter);
+		RDBMSQuery<Promotion> q = new RDBMSQuery<>(params, dtoFilter);
 		// retrieve the possible cached object
-		Optional<List<Promotion>> cachedResults = cachingService.get("all", params, Collections.emptyMap(),
-				() -> StreamHelper.awaitCompletionStage(dao.get(q)));
+		Optional<List<Promotion>> cachedResults = cachingService.get("all", params, () -> dao.get(q));
 		if (!cachedResults.isPresent()) {
 			LOGGER.error("Error while retrieving cached promotions");
 			return Response.serverError().build();
@@ -91,11 +84,11 @@ public class PromotionResource {
 	@RolesAllowed({ "marketplace_promotion_put", "marketplace_admin_access" })
 	public Response putPromotion(Promotion promotion) {
 		if (promotion.getId() != null) {
-			params.addParam(UrlParameterNames.ID.getParameterName(), promotion.getId());
+			params.addParam(DefaultUrlParameterNames.ID, promotion.getId().toString());
 		}
-		MongoQuery<Promotion> q = new MongoQuery<>(params, null, dtoFilter);
+		RDBMSQuery<Promotion> q = new RDBMSQuery<>(params, dtoFilter);
 		// add the object, and await the result
-		StreamHelper.awaitCompletionStage(dao.add(q, Arrays.asList(promotion)));
+		dao.add(q, Arrays.asList(promotion));
 
 		// return the results as a response
 		return Response.ok().build();
@@ -112,12 +105,11 @@ public class PromotionResource {
 	@RolesAllowed({ "marketplace_promotion_get", "marketplace_admin_access" })
 	@Path("/{promotionId}")
 	public Response select(@PathParam("promotionId") String promotionId) {
-		params.addParam(UrlParameterNames.ID.getParameterName(), promotionId);
+		params.addParam(DefaultUrlParameterNames.ID.getParameterName(), promotionId);
 
-		MongoQuery<Promotion> q = new MongoQuery<>(params, null, dtoFilter);
+		RDBMSQuery<Promotion> q = new RDBMSQuery<>(params, dtoFilter);
 		// retrieve a cached version of the value for the current listing
-		Optional<List<Promotion>> cachedResults = cachingService.get(promotionId, params, Collections.emptyMap(),
-				() -> StreamHelper.awaitCompletionStage(dao.get(q)));
+		Optional<List<Promotion>> cachedResults = cachingService.get(promotionId, params, () -> dao.get(q));
 		if (!cachedResults.isPresent()) {
 			LOGGER.error("Error while retrieving cached listing for ID {}", promotionId);
 			return Response.serverError().build();
@@ -138,14 +130,11 @@ public class PromotionResource {
 	@RolesAllowed({ "marketplace_promotion_delete", "marketplace_admin_access" })
 	@Path("/{promotionId}")
 	public Response delete(@PathParam("promotionId") String promotionId) {
-		params.addParam(UrlParameterNames.ID.getParameterName(), promotionId);
-
-		MongoQuery<Promotion> q = new MongoQuery<>(params, null, dtoFilter);
+		params.addParam(DefaultUrlParameterNames.ID.getParameterName(), promotionId);
+		RDBMSQuery<Promotion> q = new RDBMSQuery<>(params, dtoFilter);
 		// delete the currently selected asset
-		DeleteResult result = StreamHelper.awaitCompletionStage(dao.delete(q));
-		if (result.getDeletedCount() == 0 || !result.wasAcknowledged()) {
-			return new Error(Status.NOT_FOUND, "Did not find an asset to delete for current call").asResponse();
-		}
+		dao.delete(q);
+		
 		// return the results as a response
 		return Response.ok().build();
 	}
diff --git a/src/main/java/org/eclipsefoundation/marketplace/service/PromotionService.java b/src/main/java/org/eclipsefoundation/marketplace/service/PromotionService.java
index 7d82f0947c8a0e361818183b0613a63714179779..91f51671038dddccbad1bc28a38a5e5ced69ca4f 100644
--- a/src/main/java/org/eclipsefoundation/marketplace/service/PromotionService.java
+++ b/src/main/java/org/eclipsefoundation/marketplace/service/PromotionService.java
@@ -2,9 +2,9 @@ package org.eclipsefoundation.marketplace.service;
 
 import java.util.List;
 
+import org.eclipsefoundation.core.model.RequestWrapper;
 import org.eclipsefoundation.marketplace.dto.Listing;
 import org.eclipsefoundation.marketplace.dto.Promotion;
-import org.eclipsefoundation.marketplace.model.RequestWrapper;
 
 /**
  * Interface for retrieving promotions within the application.
diff --git a/src/main/java/org/eclipsefoundation/marketplace/service/impl/DefaultPromotionService.java b/src/main/java/org/eclipsefoundation/marketplace/service/impl/DefaultPromotionService.java
index c8f8ab60a940bcff2e0a4e037f31dc6b91f2eafb..bea628151f3516f71a5b83ee424283e66302a790 100644
--- a/src/main/java/org/eclipsefoundation/marketplace/service/impl/DefaultPromotionService.java
+++ b/src/main/java/org/eclipsefoundation/marketplace/service/impl/DefaultPromotionService.java
@@ -8,23 +8,21 @@ import java.util.List;
 import java.util.Map;
 import java.util.Optional;
 import java.util.Random;
-import java.util.stream.Collectors;
 
 import javax.enterprise.context.ApplicationScoped;
 import javax.inject.Inject;
 
 import org.eclipse.microprofile.config.inject.ConfigProperty;
-import org.eclipsefoundation.marketplace.dao.MongoDao;
+import org.eclipsefoundation.core.model.RequestWrapper;
+import org.eclipsefoundation.core.namespace.DefaultUrlParameterNames;
+import org.eclipsefoundation.core.service.CachingService;
 import org.eclipsefoundation.marketplace.dto.Listing;
 import org.eclipsefoundation.marketplace.dto.Promotion;
-import org.eclipsefoundation.marketplace.dto.filter.DtoFilter;
-import org.eclipsefoundation.marketplace.helper.StreamHelper;
-import org.eclipsefoundation.marketplace.model.MongoQuery;
-import org.eclipsefoundation.marketplace.model.RequestWrapper;
 import org.eclipsefoundation.marketplace.namespace.MicroprofilePropertyNames;
-import org.eclipsefoundation.marketplace.namespace.UrlParameterNames;
-import org.eclipsefoundation.marketplace.service.CachingService;
 import org.eclipsefoundation.marketplace.service.PromotionService;
+import org.eclipsefoundation.persistence.dao.PersistenceDao;
+import org.eclipsefoundation.persistence.dto.filter.DtoFilter;
+import org.eclipsefoundation.persistence.model.RDBMSQuery;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -47,7 +45,7 @@ public class DefaultPromotionService implements PromotionService {
 	int defaultWeight;
 
 	@Inject
-	MongoDao dao;
+	PersistenceDao dao;
 
 	@Inject
 	DtoFilter<Listing> listingFilter;
@@ -72,13 +70,11 @@ public class DefaultPromotionService implements PromotionService {
 		// for caching
 		Map<String, List<String>> adds = new HashMap<>();
 		adds.put("type", Arrays.asList("Listing"));
-		adds.put(UrlParameterNames.IDS.getParameterName(),
-				promos.stream().map(Promotion::getListingId).collect(Collectors.toList()));
+		promos.stream().map(Promotion::getListingId).forEach(val -> wrapper.addParam(DefaultUrlParameterNames.IDS, val));
 
-		MongoQuery<Listing> q = new MongoQuery<>(null, adds, listingFilter);
+		RDBMSQuery<Listing> q = new RDBMSQuery<>(wrapper, listingFilter);
 		// retrieve the possible cached object
-		Optional<List<Listing>> cachedResults = listingCache.get("promo|listings", wrapper, adds,
-				() -> StreamHelper.awaitCompletionStage(dao.get(q)));
+		Optional<List<Listing>> cachedResults = listingCache.get("promo|listings", wrapper, () -> dao.get(q));
 		if (!cachedResults.isPresent()) {
 			LOGGER.error("Error while retrieving cached promotion listings");
 			return Collections.emptyList();
@@ -91,10 +87,9 @@ public class DefaultPromotionService implements PromotionService {
 	@Override
 	public List<Listing> retrievePromotions(RequestWrapper wrapper, List<Listing> listings) {
 		// create an empty promo query to get all promos
-		MongoQuery<Promotion> q = new MongoQuery<>(null, Collections.emptyMap(), promotionFilter);
+		RDBMSQuery<Promotion> q = new RDBMSQuery<>(wrapper, promotionFilter);
 		// retrieve the possible cached object
-		Optional<List<Promotion>> cachedResults = promoCache.get("all|promo", wrapper, Collections.emptyMap(),
-				() -> StreamHelper.awaitCompletionStage(dao.get(q)));
+		Optional<List<Promotion>> cachedResults = promoCache.get("all|promo", wrapper, () -> dao.get(q));
 		if (!cachedResults.isPresent() || cachedResults.get().isEmpty()) {
 			LOGGER.debug("Could not find any promotions to inject, returning");
 			return listings;
@@ -158,7 +153,7 @@ public class DefaultPromotionService implements PromotionService {
 		// get a random number in the range of the total weighting
 		int rnd = r.nextInt(totalWeighting);
 		Promotion result = null;
-		for (Promotion p: promos) {
+		for (Promotion p : promos) {
 			// reduce the random number by the weight
 			rnd -= p.getWeight();
 			// check if we are in range of the current entry
diff --git a/src/main/java/org/eclipsefoundation/marketplace/service/impl/GuavaCachingService.java b/src/main/java/org/eclipsefoundation/marketplace/service/impl/GuavaCachingService.java
index df09e084bd6526f3444764037669015a2f5c2cb3..cddef8badf5ed21ef52b008f570232bdcc667d1b 100644
--- a/src/main/java/org/eclipsefoundation/marketplace/service/impl/GuavaCachingService.java
+++ b/src/main/java/org/eclipsefoundation/marketplace/service/impl/GuavaCachingService.java
@@ -62,23 +62,18 @@ public class GuavaCachingService<T> implements CachingService<T> {
 	public void init() {
 		this.ttl = new HashMap<>();
 		// create cache with configured settings that maintains a TTL map
-		cache = CacheBuilder
-					.newBuilder()
-					.maximumSize(maxSize)
-					.expireAfterWrite(ttlWrite, TimeUnit.SECONDS)
-					.removalListener(not -> ttl.remove(not.getKey()))
-					.build();
+		cache = CacheBuilder.newBuilder().maximumSize(maxSize).expireAfterWrite(ttlWrite, TimeUnit.SECONDS)
+				.removalListener(not -> ttl.remove(not.getKey())).build();
 
 	}
 
 	@Override
-	public Optional<T> get(String id, RequestWrapper wrapper, Map<String, List<String>> params,
-			Callable<? extends T> callable) {
+	public Optional<T> get(String id, RequestWrapper wrapper, Callable<? extends T> callable) {
 		Objects.requireNonNull(id);
 		Objects.requireNonNull(wrapper);
 		Objects.requireNonNull(callable);
 
-		String cacheKey = getCacheKey(id, wrapper, params);
+		String cacheKey = getCacheKey(id, wrapper);
 		LOGGER.debug("Retrieving cache value for '{}'", cacheKey);
 		try {
 			// check if the cache is bypassed for the request
@@ -90,13 +85,14 @@ public class GuavaCachingService<T> implements CachingService<T> {
 				}
 				return Optional.of(result);
 			}
-			
+
 			// get entry, and enter a ttl as soon as it returns
 			T data = cache.get(cacheKey, callable);
 			if (data != null) {
-				ttl.putIfAbsent(cacheKey, System.currentTimeMillis() + TimeUnit.MILLISECONDS.convert(ttlWrite, TimeUnit.SECONDS));
+				ttl.putIfAbsent(cacheKey,
+						System.currentTimeMillis() + TimeUnit.MILLISECONDS.convert(ttlWrite, TimeUnit.SECONDS));
 			}
-			return Optional.of(cache.get(cacheKey, callable));
+			return Optional.of(data);
 		} catch (InvalidCacheLoadException | UncheckedExecutionException e) {
 			LOGGER.error("Error while retrieving fresh value for cachekey: {}", cacheKey, e);
 		} catch (Exception e) {
@@ -107,9 +103,10 @@ public class GuavaCachingService<T> implements CachingService<T> {
 
 	@Override
 	public Optional<Long> getExpiration(String id, RequestWrapper params) {
-		return Optional.ofNullable(ttl.get(getCacheKey(Objects.requireNonNull(id), Objects.requireNonNull(params), null)));
+		return Optional
+				.ofNullable(ttl.get(getCacheKey(Objects.requireNonNull(id), Objects.requireNonNull(params))));
 	}
-	
+
 	@Override
 	public Set<String> getCacheKeys() {
 		return cache.asMap().keySet();
@@ -129,4 +126,5 @@ public class GuavaCachingService<T> implements CachingService<T> {
 	public long getMaxAge() {
 		return ttlWrite;
 	}
+
 }
diff --git a/src/main/java/org/eclipsefoundation/marketplace/dao/impl/DefaultHibernateDao.java b/src/main/java/org/eclipsefoundation/persistence/dao/impl/DefaultHibernateDao.java
similarity index 93%
rename from src/main/java/org/eclipsefoundation/marketplace/dao/impl/DefaultHibernateDao.java
rename to src/main/java/org/eclipsefoundation/persistence/dao/impl/DefaultHibernateDao.java
index 4254d8f9ca3cabef2332d3aedc989e9d25d8583b..621734d5d292fb335f4812693f7c3761d5d9a0e8 100644
--- a/src/main/java/org/eclipsefoundation/marketplace/dao/impl/DefaultHibernateDao.java
+++ b/src/main/java/org/eclipsefoundation/persistence/dao/impl/DefaultHibernateDao.java
@@ -4,7 +4,7 @@
  * which is available at http://www.eclipse.org/legal/epl-v20.html,
  * SPDX-License-Identifier: EPL-2.0
  */
-package org.eclipsefoundation.marketplace.dao.impl;
+package org.eclipsefoundation.persistence.dao.impl;
 
 import java.util.List;
 import java.util.concurrent.CompletionStage;
@@ -14,6 +14,7 @@ import javax.inject.Inject;
 import javax.persistence.EntityManager;
 import javax.persistence.NoResultException;
 import javax.persistence.TypedQuery;
+import javax.resource.NotSupportedException;
 import javax.transaction.Transactional;
 
 import org.eclipse.microprofile.config.inject.ConfigProperty;
@@ -21,10 +22,10 @@ import org.eclipse.microprofile.health.HealthCheckResponse;
 import org.eclipse.microprofile.health.HealthCheckResponseBuilder;
 import org.eclipsefoundation.core.exception.MaintenanceException;
 import org.eclipsefoundation.persistence.dao.PersistenceDao;
-import org.eclipsefoundation.persistence.model.RDBMSQuery;
-import org.eclipsefoundation.persistence.model.ParameterizedSQLStatement.Clause;
 import org.eclipsefoundation.persistence.dto.BareNode;
-import org.eclipsefoundation.search.dao.SearchIndexDAO;
+import org.eclipsefoundation.persistence.model.ParameterizedSQLStatement.Clause;
+import org.eclipsefoundation.persistence.model.RDBMSQuery;
+import org.eclipsefoundation.search.dao.SearchIndexDao;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -40,7 +41,7 @@ public class DefaultHibernateDao implements PersistenceDao {
 	@Inject
 	EntityManager em;
 	@Inject
-	SearchIndexDAO indexDAO;
+	SearchIndexDao indexDAO;
 
 	@ConfigProperty(name = "eclipse.db.default.limit")
 	int defaultLimit;
@@ -59,7 +60,6 @@ public class DefaultHibernateDao implements PersistenceDao {
 		if (LOGGER.isDebugEnabled()) {
 			LOGGER.debug("Querying DB using the following query: {}", q);
 		}
-		LOGGER.error("SQL: {}\nParams: {}", q.getFilter().getSelectSql(), q.getFilter().getParams());
 
 		// build base query
 		TypedQuery<T> query = em.createQuery(q.getFilter().getSelectSql(), q.getDocType());
@@ -105,7 +105,7 @@ public class DefaultHibernateDao implements PersistenceDao {
 				em.persist(doc);
 			}
 		}
-		// indexDAO.createOrUpdate(q, documents);
+		indexDAO.createOrUpdate(documents, q.getDocType());
 	}
 
 	@Transactional
@@ -137,7 +137,7 @@ public class DefaultHibernateDao implements PersistenceDao {
 		if (LOGGER.isDebugEnabled()) {
 			LOGGER.debug("Counting documents in DB that match the following query: {}", q);
 		}
-		throw new RuntimeException();
+		throw new RuntimeException("Not yet supported");
 	}
 
 	@Override
diff --git a/src/main/java/org/eclipsefoundation/persistence/dto/BareNode.java b/src/main/java/org/eclipsefoundation/persistence/dto/BareNode.java
index df87a8d6f03ae11c355c9e6f72d868c82c2c7c46..296d8e5c5a8e01c5d1efb22c45400bf9f218d58f 100644
--- a/src/main/java/org/eclipsefoundation/persistence/dto/BareNode.java
+++ b/src/main/java/org/eclipsefoundation/persistence/dto/BareNode.java
@@ -1,10 +1,8 @@
 package org.eclipsefoundation.persistence.dto;
 
 import java.util.Objects;
-import java.util.Random;
 import java.util.UUID;
 
-import javax.json.bind.annotation.JsonbTransient;
 import javax.persistence.Column;
 import javax.persistence.Id;
 import javax.persistence.MappedSuperclass;
@@ -13,21 +11,17 @@ import org.hibernate.id.uuid.StandardRandomStrategy;
 import org.hibernate.search.annotations.Field;
 
 /**
- * Represents a bare node with just ID for sake of persistence.
+ * Represents a bare node with just ID and title for sake of persistence.
  * 
  * @author Martin Lowe
  */
 @MappedSuperclass
 public abstract class BareNode {
-	private static final Random RND = new Random();
-
 	@Id
 	@Column(unique = true, nullable = false, columnDefinition = "BINARY(16)")
 	@Field
 	private UUID id;
-	@JsonbTransient
-	@Column
-	private float seed;
+	private String title;
 
 	/**
 	 * Use auto-generated value internally, rather than generator. Currently
@@ -36,7 +30,6 @@ public abstract class BareNode {
 	 */
 	public BareNode() {
 		this.id = StandardRandomStrategy.INSTANCE.generateUUID(null);
-		this.seed = RND.nextFloat();
 	}
 
 	/**
@@ -62,22 +55,22 @@ public abstract class BareNode {
 	}
 
 	/**
-	 * @return the seed
+	 * @return the title
 	 */
-	public float getSeed() {
-		return seed;
+	public String getTitle() {
+		return title;
 	}
 
 	/**
-	 * @param seed the seed to set
+	 * @param title the title to set
 	 */
-	public void setSeed(float seed) {
-		this.seed = seed;
+	public void setTitle(String title) {
+		this.title = title;
 	}
 
 	@Override
 	public int hashCode() {
-		return Objects.hash(id, seed);
+		return Objects.hash(id, title);
 	}
 
 	@Override
@@ -89,7 +82,7 @@ public abstract class BareNode {
 		if (getClass() != obj.getClass())
 			return false;
 		BareNode other = (BareNode) obj;
-		return Objects.equals(id, other.id);
+		return Objects.equals(id, other.id) && Objects.equals(title, other.title);
 	}
 
 }
diff --git a/src/main/java/org/eclipsefoundation/persistence/dto/NodeBase.java b/src/main/java/org/eclipsefoundation/persistence/dto/NodeBase.java
index 9b246f28fe4427fca82b2ad13d5388e84faf1a41..ba6a6538f8ad588068d5648709afe5da37c2e87e 100644
--- a/src/main/java/org/eclipsefoundation/persistence/dto/NodeBase.java
+++ b/src/main/java/org/eclipsefoundation/persistence/dto/NodeBase.java
@@ -19,23 +19,8 @@ import org.apache.commons.lang3.StringUtils;
  */
 @MappedSuperclass
 public abstract class NodeBase extends BareNode {
-	private String title;
 	private String url;
 
-	/**
-	 * @return the title
-	 */
-	public String getTitle() {
-		return title;
-	}
-
-	/**
-	 * @param title the title to set
-	 */
-	public void setTitle(String title) {
-		this.title = title;
-	}
-
 	/**
 	 * @return the url
 	 */
@@ -56,12 +41,12 @@ public abstract class NodeBase extends BareNode {
 	 * @return whether the current node is valid.
 	 */
 	public boolean validate() {
-		return StringUtils.isAnyEmpty(title, url);
+		return StringUtils.isAnyEmpty(url);
 	}
 
 	@Override
 	public int hashCode() {
-		return Objects.hash(super.hashCode(), title, url);
+		return Objects.hash(super.hashCode(), url);
 	}
 
 	@Override
@@ -76,6 +61,6 @@ public abstract class NodeBase extends BareNode {
 			return false;
 		}
 		NodeBase other = (NodeBase) obj;
-		return super.equals(obj) && Objects.equals(title, other.title) && Objects.equals(url, other.url);
+		return super.equals(obj) && Objects.equals(url, other.url);
 	}
 }
diff --git a/src/main/java/org/eclipsefoundation/persistence/model/RDBMSQuery.java b/src/main/java/org/eclipsefoundation/persistence/model/RDBMSQuery.java
index 76ea43f5d08af7ed99978eb3145a5254d10dd809..ff0cb7cc95876e23db1d332e3d9d86268d81e84f 100644
--- a/src/main/java/org/eclipsefoundation/persistence/model/RDBMSQuery.java
+++ b/src/main/java/org/eclipsefoundation/persistence/model/RDBMSQuery.java
@@ -11,6 +11,7 @@ import java.util.Optional;
 
 import org.apache.commons.lang3.StringUtils;
 import org.eclipsefoundation.core.model.RequestWrapper;
+import org.eclipsefoundation.core.namespace.DefaultUrlParameterNames;
 import org.eclipsefoundation.marketplace.namespace.UrlParameterNames;
 import org.eclipsefoundation.persistence.dto.BareNode;
 import org.eclipsefoundation.persistence.dto.filter.DtoFilter;
@@ -20,9 +21,8 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 /**
- * Wrapper for initializing MongoDB BSON filters, sort clauses, and document
- * type when interacting with MongoDB. This should only be called from within
- * the scope of a request with a defined {@link ResourceDataType}
+ * Wrapper for initializing DB filters, sort clauses, and document
+ * type when interacting with RDBMS'.
  * 
  * @author Martin Lowe
  */
@@ -71,8 +71,7 @@ public class RDBMSQuery<T extends BareNode> {
 			}
 		}
 		// check if the page param has been set, defaulting to the first page if not set
-		Optional<String> pageOpt = wrapper.getFirstParam(UrlParameterNames.PAGE);
-		int page = 1;
+		Optional<String> pageOpt = wrapper.getFirstParam(DefaultUrlParameterNames.PAGE);
 		if (pageOpt.isPresent() && StringUtils.isNumeric(pageOpt.get())) {
 			int tmpPage = Integer.parseInt(pageOpt.get());
 			if (tmpPage > 0) {
@@ -83,14 +82,14 @@ public class RDBMSQuery<T extends BareNode> {
 	}
 
 	/**
-	 * Checks the URL parameter of {@link UrlParameterNames.LIMIT} for a numeric
+	 * Checks the URL parameter of {@link DefaultUrlParameterNames.LIMIT} for a numeric
 	 * value and returns it if present.
 	 * 
-	 * @return the value of the URL parameter {@link UrlParameterNames.LIMIT} if
+	 * @return the value of the URL parameter {@link DefaultUrlParameterNames.LIMIT} if
 	 *         present and numeric, otherwise returns -1.
 	 */
 	public int getLimit() {
-		Optional<String> limitVal = wrapper.getFirstParam(UrlParameterNames.LIMIT);
+		Optional<String> limitVal = wrapper.getFirstParam(DefaultUrlParameterNames.LIMIT);
 		if (limitVal.isPresent() && StringUtils.isNumeric(limitVal.get())) {
 			return Integer.parseInt(limitVal.get());
 		}
diff --git a/src/main/java/org/eclipsefoundation/search/dao/SearchIndexDAO.java b/src/main/java/org/eclipsefoundation/search/dao/SearchIndexDao.java
similarity index 86%
rename from src/main/java/org/eclipsefoundation/search/dao/SearchIndexDAO.java
rename to src/main/java/org/eclipsefoundation/search/dao/SearchIndexDao.java
index ab8928712a9f8f55b1a16037c1f17aafb07f910b..1ff4c5261b8fba2bdaae192934c60d5416492b29 100644
--- a/src/main/java/org/eclipsefoundation/search/dao/SearchIndexDAO.java
+++ b/src/main/java/org/eclipsefoundation/search/dao/SearchIndexDao.java
@@ -3,8 +3,7 @@ package org.eclipsefoundation.search.dao;
 import java.io.Closeable;
 import java.util.List;
 
-import org.apache.lucene.document.Document;
-import org.eclipsefoundation.persistence.model.RDBMSQuery;
+import org.apache.solr.common.SolrDocument;
 import org.eclipsefoundation.persistence.dto.BareNode;
 import org.eclipsefoundation.search.model.IndexerResponse;
 
@@ -16,7 +15,7 @@ import org.eclipsefoundation.search.model.IndexerResponse;
  * @author Martin Lowe
  *
  */
-public interface SearchIndexDAO extends Closeable {
+public interface SearchIndexDao extends Closeable {
 
 	/**
 	 * Retrieves indexed and ranked information for the given query. This
@@ -26,7 +25,7 @@ public interface SearchIndexDAO extends Closeable {
 	 * @param q   the current RDBMS query to get ranked indexed documents for
 	 * @return an ordered list of bare documents
 	 */
-	<T extends BareNode> List<Document> get(RDBMSQuery<T> q);
+	<T extends BareNode> List<SolrDocument> get(String searchTerm, Class<T> docType);
 
 	/**
 	 * Update or create entries in the search indexer for the given entities.
diff --git a/src/main/java/org/eclipsefoundation/search/dao/impl/SolrIndexDAO.java b/src/main/java/org/eclipsefoundation/search/dao/impl/SolrIndexDAO.java
index bd29ea0b1751caa9618978a4351dd88a088ebb81..ebb537970de7a5cce4068dd15e1db65677241afa 100644
--- a/src/main/java/org/eclipsefoundation/search/dao/impl/SolrIndexDAO.java
+++ b/src/main/java/org/eclipsefoundation/search/dao/impl/SolrIndexDAO.java
@@ -5,21 +5,24 @@ import java.util.Collections;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
+import java.util.Objects;
 import java.util.UUID;
 import java.util.stream.Collectors;
 
 import javax.annotation.PostConstruct;
 import javax.enterprise.context.ApplicationScoped;
 
-import org.apache.lucene.document.Document;
+import org.apache.commons.lang3.StringUtils;
 import org.apache.solr.client.solrj.SolrServerException;
 import org.apache.solr.client.solrj.impl.ConcurrentUpdateSolrClient;
+import org.apache.solr.client.solrj.response.QueryResponse;
 import org.apache.solr.client.solrj.response.UpdateResponse;
+import org.apache.solr.common.SolrDocument;
 import org.apache.solr.common.SolrInputDocument;
+import org.apache.solr.common.params.SolrParams;
 import org.eclipse.microprofile.config.inject.ConfigProperty;
-import org.eclipsefoundation.persistence.model.RDBMSQuery;
 import org.eclipsefoundation.persistence.dto.BareNode;
-import org.eclipsefoundation.search.dao.SearchIndexDAO;
+import org.eclipsefoundation.search.dao.SearchIndexDao;
 import org.eclipsefoundation.search.model.IndexerResponse;
 import org.eclipsefoundation.search.model.SolrDocumentConverter;
 import org.eclipsefoundation.search.namespace.IndexerResponseStatus;
@@ -27,7 +30,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 @ApplicationScoped
-public class SolrIndexDAO implements SearchIndexDAO {
+public class SolrIndexDAO implements SearchIndexDao {
 	private static final Logger LOGGER = LoggerFactory.getLogger(SolrIndexDAO.class);
 
 	// DAO settings
@@ -43,9 +46,9 @@ public class SolrIndexDAO implements SearchIndexDAO {
 	String core;
 	@ConfigProperty(name = "eclipse.solr.timeout", defaultValue = "10000")
 	int solrTimeout;
-	@ConfigProperty(name = "eclipse.solr.queue", defaultValue = "20")
+	@ConfigProperty(name = "eclipse.solr.queue", defaultValue = "100")
 	int queueSize;
-	@ConfigProperty(name = "eclipse.solr.threads", defaultValue = "10")
+	@ConfigProperty(name = "eclipse.solr.threads", defaultValue = "25")
 	int threadCount;
 
 	// internal state members
@@ -59,9 +62,7 @@ public class SolrIndexDAO implements SearchIndexDAO {
 		} else {
 			// create solr server
 			ConcurrentUpdateSolrClient.Builder b = new ConcurrentUpdateSolrClient.Builder(solrURL + '/' + core)
-					.withConnectionTimeout(solrTimeout)
-					.withQueueSize(queueSize)
-					.withThreadCount(threadCount);
+					.withConnectionTimeout(solrTimeout).withQueueSize(queueSize).withThreadCount(threadCount);
 			this.solrServer = b.build();
 			this.converters = new HashMap<>();
 			LOGGER.debug("Started Solr server for index processing");
@@ -76,15 +77,22 @@ public class SolrIndexDAO implements SearchIndexDAO {
 	}
 
 	@Override
-	public <T extends BareNode> List<Document> get(RDBMSQuery<T> q) {
+	public <T extends BareNode> List<SolrDocument> get(String searchTerm, Class<T> docType) {
 		// check whether call should proceed
-		if (!stateCheck()) {
+		if (!stateCheck() || StringUtils.isBlank(searchTerm)) {
 			return Collections.emptyList();
 		}
-
-		// TODO Auto-generated method stub
-
-		return Collections.emptyList();
+		
+		// get the current doctype converter
+		SolrDocumentConverter<T> converter = getConverter(docType);
+		SolrParams query = converter.getBaseQuery(searchTerm);
+		try {
+			QueryResponse response = solrServer.query(query);
+			return response.getResults();
+		} catch (SolrServerException | IOException e) {
+			LOGGER.error("Error while retrieving search results",e);
+			return null;
+		}
 	}
 
 	@Override
@@ -94,15 +102,20 @@ public class SolrIndexDAO implements SearchIndexDAO {
 			return IndexerResponse.getMaintenanceResponse();
 		}
 
-		// only way of setting value, so type is known and is safe
-		@SuppressWarnings("unchecked")
-		SolrDocumentConverter<T> converter = (SolrDocumentConverter<T>) converters.computeIfAbsent(docType,
-				SolrDocumentConverter::new);
+		// get the current doctype converter
+		SolrDocumentConverter<T> converter = getConverter(docType);
 
 		// convert the documents
-		List<SolrInputDocument> docs = entities.stream().map(converter::convert).collect(Collectors.toList());
+		List<SolrInputDocument> docs = entities.stream().map(converter::convert).filter(Objects::nonNull)
+				.collect(Collectors.toList());
 		// attempt to update + commit the changes
 		long now = System.currentTimeMillis();
+		if (docs.isEmpty()) {
+			LOGGER.debug("No documents to be indexed for current call (recieved {} entities)", docs.size());
+
+			// TODO: should return some empty response base line rather than maint
+			return IndexerResponse.getMaintenanceResponse();
+		}
 		try {
 			// attempting to add documents to solr core
 			solrServer.add(docs);
@@ -144,6 +157,11 @@ public class SolrIndexDAO implements SearchIndexDAO {
 		}
 	}
 
+	@SuppressWarnings("unchecked")
+	private <T extends BareNode> SolrDocumentConverter<T> getConverter(Class<T> docType) {
+		return converters.computeIfAbsent(docType, SolrDocumentConverter::new);
+	}
+
 	/**
 	 * Generate a generic indexer response object based on the Solr update response
 	 * document and a set of messages that will be set into the response depending
diff --git a/src/main/java/org/eclipsefoundation/search/helper/FieldHelper.java b/src/main/java/org/eclipsefoundation/search/helper/FieldHelper.java
new file mode 100644
index 0000000000000000000000000000000000000000..3c7c84a0961ad4eff351989202846790d684c375
--- /dev/null
+++ b/src/main/java/org/eclipsefoundation/search/helper/FieldHelper.java
@@ -0,0 +1,61 @@
+package org.eclipsefoundation.search.helper;
+
+import java.util.Date;
+import java.util.List;
+import java.util.UUID;
+
+import org.eclipsefoundation.search.model.IndexedClassDescriptor.IndexedDescriptor;
+import org.eclipsefoundation.search.namespace.IndexerTextProcessingType;
+
+public class FieldHelper {
+
+	public static String convertNameToField(IndexedDescriptor c) {
+		return convertNameToField(c, false);
+	}
+	
+	public static String convertNameToField(IndexedDescriptor c, boolean nested) {
+		String name = c.getName();
+		if (c.getType().equals(List.class) && !nested) {
+			// not supported operation
+			if (!c.getTextProcessing().equals(IndexerTextProcessingType.NONE)) {
+				return "ignored_" + name;
+			}
+			return convertNameToField(c, true) + "s";
+		} else if (c.getType().equals(String.class)) {
+			return name + getTextProcessingSuffix(c);
+		} else if (c.getType().equals(UUID.class)) {
+			return name + "_s";
+		} else if (c.getType().equals(Integer.class)) {
+			return name + "_i";
+		} else if (c.getType().equals(Long.class)) {
+			return name + "_l";
+		} else if (c.getType().equals(Float.class)) {
+			return name + "_f";
+		} else if (c.getType().equals(Boolean.class)) {
+			return name + "_b";
+		} else if (c.getType().equals(Double.class)) {
+			return name + "_d";
+		} else if (c.getType().equals(Date.class)) {
+			return name + "_dt";
+		} else {
+			// fallback for unknown types getting indexed
+			return "ignored_" + name;
+		}
+	}
+	
+	public static String getTextProcessingSuffix(IndexedDescriptor c) {
+		switch (c.getTextProcessing()) {
+		case GENERAL:
+			return "_txt_gen";
+		case STANDARD:
+			return "_txt_en";
+		case AGGRESSIVE:
+			return "_txt_en_split";
+		default:
+			return "_s";
+		}
+	}
+
+	private FieldHelper() {
+	}
+}
diff --git a/src/main/java/org/eclipsefoundation/search/model/Indexed.java b/src/main/java/org/eclipsefoundation/search/model/Indexed.java
index e333c09354cbdece2ad6e0710cb54f0fb8aea69f..e9805efe1d507059c551feb47633565ccee78470 100644
--- a/src/main/java/org/eclipsefoundation/search/model/Indexed.java
+++ b/src/main/java/org/eclipsefoundation/search/model/Indexed.java
@@ -7,6 +7,14 @@ import static java.lang.annotation.RetentionPolicy.RUNTIME;
 import java.lang.annotation.Retention;
 import java.lang.annotation.Target;
 
+import org.eclipsefoundation.search.namespace.IndexerTextProcessingType;
+
+/**
+ * Annotation controlling how entities are indexed via the search DAO.
+ * 
+ * @author Martin Lowe
+ *
+ */
 @Retention(RUNTIME)
 @Target({ TYPE, FIELD })
 public @interface Indexed {
@@ -18,5 +26,14 @@ public @interface Indexed {
 	 * 
 	 * @return the boost value for the field, or the default value of 1
 	 */
-	int boost() default 1;
+	float boost() default 1f;
+
+	/**
+	 * Whether the value should be stored as is and returned.
+	 * 
+	 * @return true if the value should be stored on index, false otherwise.
+	 */
+	boolean stored() default false;
+	
+	IndexerTextProcessingType textProcessing() default IndexerTextProcessingType.NONE;
 }
diff --git a/src/main/java/org/eclipsefoundation/search/model/IndexedClassDescriptor.java b/src/main/java/org/eclipsefoundation/search/model/IndexedClassDescriptor.java
index 7e39115598757b57206c6c044050b129139da82c..10d52ac26e34b4e1eeee1798515aeebe2a480f75 100644
--- a/src/main/java/org/eclipsefoundation/search/model/IndexedClassDescriptor.java
+++ b/src/main/java/org/eclipsefoundation/search/model/IndexedClassDescriptor.java
@@ -5,19 +5,31 @@ import java.beans.Introspector;
 import java.beans.PropertyDescriptor;
 import java.lang.reflect.Field;
 import java.lang.reflect.Method;
+import java.lang.reflect.ParameterizedType;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.List;
 import java.util.Optional;
 
 import org.eclipsefoundation.persistence.dto.BareNode;
+import org.eclipsefoundation.search.namespace.IndexerTextProcessingType;
 
+/**
+ * Describes an entity type that should be indexed. This class works on
+ * reflection of the Runtime class to retrieve internal fields that have been
+ * annotated with the Indexed field
+ * 
+ * @author Martin Lowe
+ *
+ * @param <T> the entity type this descriptor describes.
+ */
 public class IndexedClassDescriptor<T extends BareNode> {
 	private List<IndexedDescriptor> internal;
 
 	public IndexedClassDescriptor(Class<T> clazz) {
+		this.internal = new ArrayList<>();
 		try {
-			for (Field f : clazz.getFields()) {
+			for (Field f : clazz.getDeclaredFields()) {
 				Indexed annotation = f.getAnnotation(Indexed.class);
 				if (annotation != null) {
 					String name = f.getName();
@@ -28,8 +40,7 @@ public class IndexedClassDescriptor<T extends BareNode> {
 						throw new RuntimeException("Could not generate SolrDocumentConverter for " + clazz.getName());
 					}
 					PropertyDescriptor property = propertyOpt.get();
-					internal.add(
-							new IndexedDescriptor(property.getName(), property.getReadMethod(), annotation.boost()));
+					internal.add(new IndexedDescriptor(f, property.getReadMethod(), annotation));
 				}
 			}
 		} catch (IntrospectionException e) {
@@ -40,16 +51,74 @@ public class IndexedClassDescriptor<T extends BareNode> {
 	public List<IndexedDescriptor> getDescriptors() {
 		return new ArrayList<>(internal);
 	}
-	
+
 	public static class IndexedDescriptor {
-		String name;
-		Method getter;
-		int boost;
+		final String name;
+		final Method getter;
+		final float boost;
+		final boolean stored;
+		final IndexerTextProcessingType textProcessing;
+		final Class<?> type;
+		final Class<?> subtype;
 
-		private IndexedDescriptor(String name, Method getter, int boost) {
-			this.name = name;
+		private IndexedDescriptor(Field field, Method getter, Indexed annotation) {
+			this.name = field.getName();
 			this.getter = getter;
-			this.boost = boost;
+			this.boost = annotation.boost();
+			this.stored = annotation.stored();
+			this.textProcessing = annotation.textProcessing();
+			this.type = field.getType();
+			// if the generic type is different from the base, indicates a generic
+			// this will not work for entity types nested within themselves(List in List, Map in Map etc.)
+			if (field.getGenericType() instanceof ParameterizedType) {
+				// using reflection, gets declared type from the raw source
+				this.subtype = (Class<?>)((ParameterizedType)field.getGenericType()).getActualTypeArguments()[0];
+			} else {
+				this.subtype = null;
+			}
+		}
+
+		/**
+		 * @return the name
+		 */
+		public String getName() {
+			return name;
+		}
+
+		/**
+		 * @return the getter
+		 */
+		public Method getGetter() {
+			return getter;
+		}
+
+		/**
+		 * @return the boost
+		 */
+		public float getBoost() {
+			return boost;
+		}
+
+		/**
+		 * @return the stored
+		 */
+		public boolean isStored() {
+			return stored;
+		}
+
+		/**
+		 * @return the textProcessing
+		 */
+		public IndexerTextProcessingType getTextProcessing() {
+			return textProcessing;
+		}
+
+		public Class<?> getType() {
+			return this.type;
+		}
+		
+		public Class<?> getSubtype() {
+			return this.subtype;
 		}
 	}
 }
diff --git a/src/main/java/org/eclipsefoundation/search/model/SolrDocumentConverter.java b/src/main/java/org/eclipsefoundation/search/model/SolrDocumentConverter.java
index c241f7393476cc82eaec865dccc4d1ebd5d0cf31..cabc53a499288d19d8d86c4631520c2b203ce59f 100644
--- a/src/main/java/org/eclipsefoundation/search/model/SolrDocumentConverter.java
+++ b/src/main/java/org/eclipsefoundation/search/model/SolrDocumentConverter.java
@@ -1,9 +1,21 @@
 package org.eclipsefoundation.search.model;
 
 import java.lang.reflect.InvocationTargetException;
+import java.util.HashMap;
+import java.util.Map;
 
+import org.apache.lucene.index.Term;
+import org.apache.lucene.queries.function.BoostedQuery;
+import org.apache.lucene.queries.function.valuesource.ConstValueSource;
+import org.apache.lucene.search.BooleanClause;
+import org.apache.lucene.search.BooleanClause.Occur;
+import org.apache.lucene.search.BooleanQuery.Builder;
+import org.apache.lucene.search.TermQuery;
 import org.apache.solr.common.SolrInputDocument;
+import org.apache.solr.common.params.MapSolrParams;
+import org.apache.solr.common.params.SolrParams;
 import org.eclipsefoundation.persistence.dto.BareNode;
+import org.eclipsefoundation.search.helper.FieldHelper;
 import org.eclipsefoundation.search.model.IndexedClassDescriptor.IndexedDescriptor;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -20,11 +32,20 @@ public class SolrDocumentConverter<T extends BareNode> {
 	}
 
 	public SolrInputDocument convert(T entity) {
+		// don't index documents with no fields to index
+		if (internal.getDescriptors().isEmpty()) {
+			return null;
+		}
 		SolrInputDocument in = new SolrInputDocument();
 		try {
 			for (IndexedDescriptor c : internal.getDescriptors()) {
-				in.addField(c.name, c.getter.invoke(entity));
+				Object data = c.getter.invoke(entity);
+				in.addField(FieldHelper.convertNameToField(c), data);
 			}
+			// get the standard fields
+			in.addField("id", entity.getId().toString());
+			in.addField("type_s", clazz.getName());
+			in.addField("title_s", entity.getTitle());
 		} catch (IllegalAccessException e) {
 			LOGGER.error("Could not invoke getter while converting entity of type {}", clazz.getName(), e);
 		} catch (IllegalArgumentException e) {
@@ -35,4 +56,31 @@ public class SolrDocumentConverter<T extends BareNode> {
 		}
 		return in;
 	}
+
+	public SolrParams getBaseQuery(String searchTerm) {
+		// build text match query, where at least 1 values needs to match
+		Builder textMatches = new Builder();
+		// add title manually
+		textMatches.add(new TermQuery(new Term("title_s", searchTerm)), Occur.SHOULD);
+		for (IndexedDescriptor c : internal.getDescriptors()) {
+			TermQuery base = new TermQuery(new Term(FieldHelper.convertNameToField(c), searchTerm));
+			if (c.getBoost() != 1.0f) {
+				textMatches.add(
+						new BooleanClause(new BoostedQuery(base, new ConstValueSource(c.getBoost())), Occur.SHOULD));
+			} else {
+				textMatches.add(base, Occur.SHOULD);
+			}
+		}
+		// build document type + text match boolean query
+		Builder textAndTypeBuilder = new Builder();
+		textAndTypeBuilder.add(textMatches.build(), Occur.MUST);
+		textAndTypeBuilder.add(new TermQuery(new Term("type_s", clazz.getName())), Occur.MUST);
+		
+		// set up base query from the required values
+		Map<String, String> queryParamMap = new HashMap<>();
+		queryParamMap.put("q", textAndTypeBuilder.build().toString());
+		queryParamMap.put("fl", "id");
+		queryParamMap.put("df", "*");
+		return new MapSolrParams(queryParamMap);
+	}
 }
diff --git a/src/main/java/org/eclipsefoundation/search/namespace/IndexerTextProcessingType.java b/src/main/java/org/eclipsefoundation/search/namespace/IndexerTextProcessingType.java
new file mode 100644
index 0000000000000000000000000000000000000000..a8419a45ed9939083fe31d931c1228ebec2c935b
--- /dev/null
+++ b/src/main/java/org/eclipsefoundation/search/namespace/IndexerTextProcessingType.java
@@ -0,0 +1,59 @@
+package org.eclipsefoundation.search.namespace;
+
+/**
+ * Allows for the support of multiple different types of text post-processing
+ * based on the types of requirements on the data. Generally the more complex
+ * the sort the more expensive it is computationally both at query and index
+ * time.
+ * 
+ * <ul>
+ * <li>
+ * <p>
+ * <strong>GENERAL:</strong>
+ * </p>
+ * <p>
+ * Uses generic cross-language processing when available. This is appropriate
+ * for fields/searches where English may not be the primary language but complex
+ * indexing and searching is desired.
+ * </p>
+ * </li>
+ * <li>
+ * <p>
+ * <strong>STANDARD:</strong>
+ * </p>
+ * <p>
+ * Uses standard processing given English language text stopwords and protected
+ * words. This field should be used when the language of the data entered is
+ * known to be English as it provides more accurate results and allows for qtime
+ * analysis for synonyms.
+ * </p>
+ * </li>
+ * <li>
+ * <p>
+ * <strong>AGGRESSIVE:</strong>
+ * </p>
+ * <p>
+ * Uses aggressive processing given English language text stopwords and
+ * protected words. This type of processing will also process individual words
+ * to allow matching on potentially split words using indicators of word
+ * boundaries like changed casing, numbers, and non alpha-numeric characters.
+ * This will allow for matches on things like brand names like ASCIIDoc on match
+ * for searches like "ascii doc".
+ * </p>
+ * </li>
+ * <li>
+ * <p>
+ * <strong>NONE:</strong>
+ * </p>
+ * <p>
+ * No text processing will be done on this text and it will be posted and processed as is.
+ * </p>
+ * </li>
+ * </ul>
+ * 
+ * @author Martin Lowe
+ *
+ */
+public enum IndexerTextProcessingType {
+	GENERAL, STANDARD, AGGRESSIVE, NONE;
+}
diff --git a/src/main/java/org/eclipsefoundation/search/service/PersistenceBackedSearchService.java b/src/main/java/org/eclipsefoundation/search/service/PersistenceBackedSearchService.java
new file mode 100644
index 0000000000000000000000000000000000000000..ed249867a955bdde55b167e8ec174a85241a0700
--- /dev/null
+++ b/src/main/java/org/eclipsefoundation/search/service/PersistenceBackedSearchService.java
@@ -0,0 +1,12 @@
+package org.eclipsefoundation.search.service;
+
+import java.util.List;
+
+import org.eclipsefoundation.core.model.RequestWrapper;
+import org.eclipsefoundation.persistence.dto.BareNode;
+import org.eclipsefoundation.persistence.dto.filter.DtoFilter;
+
+public interface PersistenceBackedSearchService {
+
+	<T extends BareNode> List<T> find(RequestWrapper wrap, DtoFilter<T> filter);
+}
diff --git a/src/main/java/org/eclipsefoundation/search/service/impl/DefaultPersistenceBackedSearchService.java b/src/main/java/org/eclipsefoundation/search/service/impl/DefaultPersistenceBackedSearchService.java
new file mode 100644
index 0000000000000000000000000000000000000000..20b3f668f9d7db8dc5ef9f4efc7ba924fec4d73b
--- /dev/null
+++ b/src/main/java/org/eclipsefoundation/search/service/impl/DefaultPersistenceBackedSearchService.java
@@ -0,0 +1,83 @@
+package org.eclipsefoundation.search.service.impl;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Optional;
+import java.util.UUID;
+import java.util.stream.Collectors;
+
+import javax.enterprise.context.ApplicationScoped;
+import javax.inject.Inject;
+
+import org.apache.solr.common.SolrDocument;
+import org.eclipsefoundation.core.model.RequestWrapper;
+import org.eclipsefoundation.core.namespace.DefaultUrlParameterNames;
+import org.eclipsefoundation.persistence.dao.PersistenceDao;
+import org.eclipsefoundation.persistence.dto.BareNode;
+import org.eclipsefoundation.persistence.dto.filter.DtoFilter;
+import org.eclipsefoundation.persistence.model.RDBMSQuery;
+import org.eclipsefoundation.search.dao.SearchIndexDao;
+import org.eclipsefoundation.search.service.PersistenceBackedSearchService;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * 
+ * @author Martin Lowe
+ *
+ */
+@ApplicationScoped
+public class DefaultPersistenceBackedSearchService implements PersistenceBackedSearchService {
+	private static final Logger LOGGER = LoggerFactory.getLogger(DefaultPersistenceBackedSearchService.class);
+
+	@Inject
+	SearchIndexDao searchDAO;
+	@Inject
+	PersistenceDao dbDAO;
+
+	@Override
+	public <T extends BareNode> List<T> find(RequestWrapper wrap, DtoFilter<T> filter) {
+		// get search term
+		Optional<String> searchTerm = wrap.getFirstParam(DefaultUrlParameterNames.QUERY_STRING);
+		List<String> resultsOrder = null;
+		if (searchTerm.isPresent()) {
+			// get the ranked results from search engine. Results should have docids as id
+			List<SolrDocument> rankedResults = searchDAO.get(searchTerm.get(), filter.getType());
+			// if we got results, store the ranked order and set restriction to request
+			if (rankedResults != null && !rankedResults.isEmpty()) {
+				resultsOrder = rankedResults.stream().map(d -> (String) d.getFieldValue("id"))
+						.collect(Collectors.toList());
+				// restrict id results to given IDs (if supported)
+				resultsOrder.forEach(id -> wrap.addParam("ids", id));
+			}
+		}
+		// get the results from the DB dao
+		List<T> results = dbDAO.get(new RDBMSQuery<T>(wrap, filter));
+		// if we have an order to apply
+		if (resultsOrder != null) {
+			return getMarshalledList(resultsOrder, results);
+		}
+		// if we couldn't properly search, return native order
+		return results;
+	}
+	
+	private <T extends BareNode> List<T> getMarshalledList(List<String> resultsOrder, List<T> results){
+		// create a sized array list
+		List<T> marshelledResults = new ArrayList<>(Math.max(resultsOrder.size(), results.size()));
+		for (String id : resultsOrder) {
+			// get ID for current ordered result
+			UUID docid = UUID.fromString(id);
+			LOGGER.debug("Checking for result document with ID {}", docid);
+			// iterate through the results and add them to the marshalled results
+			for (T result : results) {
+				if (docid.equals(result.getId())) {
+					marshelledResults.add(result);
+					LOGGER.debug("Found result document with ID {}", docid);
+					break;
+				}
+			}
+		}
+		return marshelledResults;
+	}
+
+}
diff --git a/src/main/resources/application.properties b/src/main/resources/application.properties
index a244642591890ba936ab5fe6ebf51a440f0860f5..78e6e2ae92e87823a00c74a18eecc16e395944b4 100644
--- a/src/main/resources/application.properties
+++ b/src/main/resources/application.properties
@@ -15,14 +15,16 @@ quarkus.datasource.min-size = 5
 quarkus.datasource.max-size = 15
 eclipse.db.default.limit=25
 eclipse.db.default.limit.max=100
-#eclipse.solr.host=http://solr.dev.docker/solr
-#eclipse.solr.core=mpc_dev
 
-quarkus.hibernate-orm.database.generation=drop-and-create
+#quarkus.hibernate-orm.database.generation=drop-and-create
 quarkus.hibernate-orm.dialect=org.eclipsefoundation.marketplace.config.CustomMariaDBDialect
-##quarkus.hibernate-orm.log.sql=true
 
 # MISC
 quarkus.resteasy.gzip.enabled=true
 quarkus.http.port=8090
-eclipse.oauth.override=true
\ No newline at end of file
+#eclipse.oauth.override=true
+
+## SOLR
+eclipse.solr.enabled=true
+eclipse.solr.host=http://192.168.1.253:32790/solr
+eclipse.solr.core=marketplace
\ No newline at end of file
diff --git a/src/test/java/org/eclipsefoundation/core/config/RoleAugmentor.java b/src/test/java/org/eclipsefoundation/core/config/MockRoleAugmentor.java
similarity index 72%
rename from src/test/java/org/eclipsefoundation/core/config/RoleAugmentor.java
rename to src/test/java/org/eclipsefoundation/core/config/MockRoleAugmentor.java
index f990118b8e65358b3e2a6c9b158b1447e762a7d8..2d3f7ecb255acec11d55d49941096226a4ca9173 100644
--- a/src/test/java/org/eclipsefoundation/core/config/RoleAugmentor.java
+++ b/src/test/java/org/eclipsefoundation/core/config/MockRoleAugmentor.java
@@ -1,14 +1,12 @@
 package org.eclipsefoundation.core.config;
 
-import java.util.concurrent.CompletableFuture;
-import java.util.concurrent.CompletionStage;
-
 import javax.enterprise.context.ApplicationScoped;
 
 import io.quarkus.security.identity.AuthenticationRequestContext;
 import io.quarkus.security.identity.SecurityIdentity;
 import io.quarkus.security.identity.SecurityIdentityAugmentor;
 import io.quarkus.security.runtime.QuarkusSecurityIdentity;
+import io.smallrye.mutiny.Uni;
 
 /**
  * Custom override for test classes that ignores current login state and sets
@@ -17,7 +15,7 @@ import io.quarkus.security.runtime.QuarkusSecurityIdentity;
  * @author Martin Lowe
  */
 @ApplicationScoped
-public class RoleAugmentor implements SecurityIdentityAugmentor {
+public class MockRoleAugmentor implements SecurityIdentityAugmentor {
 
 	@Override
 	public int priority() {
@@ -25,8 +23,7 @@ public class RoleAugmentor implements SecurityIdentityAugmentor {
 	}
 
 	@Override
-	public CompletionStage<SecurityIdentity> augment(SecurityIdentity identity, AuthenticationRequestContext context) {
-
+	public Uni<SecurityIdentity> augment(SecurityIdentity identity, AuthenticationRequestContext context) {
 		// create a new builder and copy principal, attributes, credentials and roles
 		// from the original
 		QuarkusSecurityIdentity.Builder builder = QuarkusSecurityIdentity.builder()
@@ -35,10 +32,6 @@ public class RoleAugmentor implements SecurityIdentityAugmentor {
 
 		// add custom role source here
 		builder.addRole("marketplace_admin_access");
-
-		CompletableFuture<SecurityIdentity> cs = new CompletableFuture<>();
-		cs.complete(builder.build());
-
-		return cs;
+		return context.runBlocking(builder::build);
 	}
 }
\ No newline at end of file
diff --git a/src/test/java/org/eclipsefoundation/marketplace/dao/impl/MockHibernateDao.java b/src/test/java/org/eclipsefoundation/marketplace/dao/impl/MockHibernateDao.java
index 14da028af7a1fe70f42c616a7a4092b1a318b498..d0a715a411a86c72e5b7ad32e64239c4de22d050 100644
--- a/src/test/java/org/eclipsefoundation/marketplace/dao/impl/MockHibernateDao.java
+++ b/src/test/java/org/eclipsefoundation/marketplace/dao/impl/MockHibernateDao.java
@@ -20,6 +20,8 @@ import org.eclipsefoundation.marketplace.dto.ErrorReport;
 import org.eclipsefoundation.marketplace.dto.Listing;
 import org.eclipsefoundation.marketplace.dto.ListingVersion;
 import org.eclipsefoundation.marketplace.dto.Market;
+import org.eclipsefoundation.marketplace.namesace.TestUrlParameterNames;
+import org.eclipsefoundation.persistence.dao.impl.DefaultHibernateDao;
 import org.eclipsefoundation.persistence.dto.BareNode;
 import org.eclipsefoundation.persistence.model.RDBMSQuery;
 
@@ -77,7 +79,7 @@ public class MockHibernateDao extends DefaultHibernateDao {
 	@Override
 	public <T extends BareNode> List<T> get(RDBMSQuery<T> q) {
 		capturedQuery = q;
-		Optional<String> useTestData = q.getWrapper().getFirstParam("test-data-exists");
+		Optional<String> useTestData = q.getWrapper().getFirstParam(TestUrlParameterNames.TEST_DATA_EXISTS);
 		if (useTestData.isPresent() && "false".equals(useTestData.get())) {
 			return Collections.emptyList();
 		}
@@ -111,7 +113,7 @@ public class MockHibernateDao extends DefaultHibernateDao {
 	public <T extends BareNode> void delete(RDBMSQuery<T> q) {
 		capturedQuery = q;
 		// throw the same exception as the main would
-		Optional<String> useTestData = q.getWrapper().getFirstParam("test-data-exists");
+		Optional<String> useTestData = q.getWrapper().getFirstParam(TestUrlParameterNames.TEST_DATA_EXISTS);
 		if (useTestData.isPresent() && "false".equals(useTestData.get())) {
 			throw new NoResultException("Could not find any documents with given filters");
 		}
diff --git a/src/test/java/org/eclipsefoundation/marketplace/helper/JavaVersionHelperTest.java b/src/test/java/org/eclipsefoundation/marketplace/helper/JavaVersionHelperTest.java
index b32ae4d25435b072dcbf512d3e8e0007b83a4296..8b1692acd48aeea5f9ea8894c8b89aa78f85bf82 100644
--- a/src/test/java/org/eclipsefoundation/marketplace/helper/JavaVersionHelperTest.java
+++ b/src/test/java/org/eclipsefoundation/marketplace/helper/JavaVersionHelperTest.java
@@ -17,10 +17,10 @@ import io.quarkus.test.junit.QuarkusTest;
  * @author Martin Lowe
  */
 @QuarkusTest
-public class JavaVersionHelperTest {
+class JavaVersionHelperTest {
 
 	@Test
-	public void testConvertToDBSafe() {
+	void testConvertToDBSafe() {
 		Assertions.assertEquals("7", JavaVersionHelper.convertToDBSafe("1.7"));
 		Assertions.assertEquals("7", JavaVersionHelper.convertToDBSafe("1.7.0_4"));
 		Assertions.assertEquals("8", JavaVersionHelper.convertToDBSafe("1.8.45"));
@@ -31,7 +31,7 @@ public class JavaVersionHelperTest {
 		Assertions.assertEquals(null, JavaVersionHelper.convertToDBSafe(null));
 	}
 	@Test
-	public void testConvertToDisplayValue() {
+	void testConvertToDisplayValue() {
 		Assertions.assertEquals("1.7", JavaVersionHelper.convertToDisplayValue("7"));
 		Assertions.assertEquals("1.8", JavaVersionHelper.convertToDisplayValue("8"));
 		Assertions.assertEquals("9", JavaVersionHelper.convertToDisplayValue("9"));
diff --git a/src/test/java/org/eclipsefoundation/marketplace/helper/SortableHelperTest.java b/src/test/java/org/eclipsefoundation/marketplace/helper/SortableHelperTest.java
index e508c9f00a7cd3463270b2dfa3c6e5e823e05130..25630172fc3e95df7f62b1225096a3afa471e244 100644
--- a/src/test/java/org/eclipsefoundation/marketplace/helper/SortableHelperTest.java
+++ b/src/test/java/org/eclipsefoundation/marketplace/helper/SortableHelperTest.java
@@ -15,19 +15,17 @@ import org.eclipsefoundation.persistence.model.SortableField;
 import org.junit.jupiter.api.Assertions;
 import org.junit.jupiter.api.Test;
 
-import io.quarkus.test.junit.DisabledOnSubstrate;
 import io.quarkus.test.junit.QuarkusTest;
 
 /**
  * @author Martin Lowe
  *
  */
-@DisabledOnSubstrate
 @QuarkusTest
-public class SortableHelperTest {
+class SortableHelperTest {
 
 	@Test
-	public void testGetSortableFieldsValidClass() {
+	void testGetSortableFieldsValidClass() {
 		List<Sortable<?>> sortables = SortableHelper.getSortableFields(CustomDocType.class);
 
 		// check that our list gets returned
@@ -43,7 +41,7 @@ public class SortableHelperTest {
 	}
 
 	@Test
-	public void testGetSortableFieldsNoAnnotations() {
+	void testGetSortableFieldsNoAnnotations() {
 		List<Sortable<?>> sortables = SortableHelper.getSortableFields(Object.class);
 
 		Assertions.assertNotNull(sortables);
@@ -51,13 +49,13 @@ public class SortableHelperTest {
 	}
 
 	@Test
-	public void testGetSortableFieldsNoClass() {
+	void testGetSortableFieldsNoClass() {
 		// this should throw as the class is required
 		Assertions.assertThrows(NullPointerException.class, () -> SortableHelper.getSortableFields(null));
 	}
 
 	@Test
-	public void testGetSortableFieldsNested() {
+	void testGetSortableFieldsNested() {
 		List<Sortable<?>> sortables = SortableHelper.getSortableFields(CustomDocType.class);
 		Optional<Sortable<?>> sOpt = sortables.stream().filter(c -> c.getName().equals("name")).findFirst();
 
@@ -71,7 +69,7 @@ public class SortableHelperTest {
 	}
 
 	@Test
-	public void testGetSortableFieldsCustomName() {
+	void testGetSortableFieldsCustomName() {
 		List<Sortable<?>> sortables = SortableHelper.getSortableFields(CustomDocType.class);
 		Optional<Sortable<?>> sOpt = sortables.stream().filter(c -> c.getName().equals("grp")).findFirst();
 
@@ -85,7 +83,7 @@ public class SortableHelperTest {
 	}
 	
 	@Test
-	public void testGetSortableFieldsCustomPath() {
+	void testGetSortableFieldsCustomPath() {
 		List<Sortable<?>> sortables = SortableHelper.getSortableFields(CustomDocType.class);
 		Optional<Sortable<?>> sOpt = sortables.stream().filter(c -> c.getName().equals("cat")).findFirst();
 
@@ -99,7 +97,7 @@ public class SortableHelperTest {
 	}
 	
 	@Test
-	public void testGetSortableFieldByName() {
+	void testGetSortableFieldByName() {
 		List<Sortable<?>> sortables = SortableHelper.getSortableFields(CustomDocType.class);
 		Optional<Sortable<?>> s = SortableHelper.getSortableFieldByName(sortables, "name");
 
@@ -108,7 +106,7 @@ public class SortableHelperTest {
 	}
 	
 	@Test
-	public void testGetSortableFieldByNameCustomName() {
+	void testGetSortableFieldByNameCustomName() {
 		List<Sortable<?>> sortables = SortableHelper.getSortableFields(CustomDocType.class);
 
 		// assert that we can find the sortable with custom name rather than the field name
@@ -117,13 +115,13 @@ public class SortableHelperTest {
 	}
 	
 	@Test
-	public void testGetSortableFieldByNameNullName() {
+	void testGetSortableFieldByNameNullName() {
 		List<Sortable<?>> sortables = SortableHelper.getSortableFields(CustomDocType.class);
 		Assertions.assertThrows(NullPointerException.class, () -> SortableHelper.getSortableFieldByName(sortables, null));
 	}
 	
 	@Test
-	public void testGetSortableFieldByNameNullSortables() {
+	void testGetSortableFieldByNameNullSortables() {
 		Assertions.assertThrows(NullPointerException.class, () -> SortableHelper.getSortableFieldByName(null, "sample"));
 	}
 
@@ -133,23 +131,23 @@ public class SortableHelperTest {
 	 * 
 	 * @author Martin Lowe
 	 */
-	public static class CustomDocType {
+	static class CustomDocType {
 		@SortableField
 		private long id;
 		@SortableField
-		public int count;
+		int count;
 		@SortableField
 		long members;
 		@SortableField
 		protected long time;
 		CustomNestedType nt;
 
-		public CustomDocType() {
+		CustomDocType() {
 			this.nt = new CustomNestedType();
 		}
 	}
 
-	public static class CustomNestedType {
+	static class CustomNestedType {
 		@SortableField
 		String name;
 		@SortableField(name = "grp")
diff --git a/src/test/java/org/eclipsefoundation/marketplace/namesace/TestUrlParameterNames.java b/src/test/java/org/eclipsefoundation/marketplace/namesace/TestUrlParameterNames.java
new file mode 100644
index 0000000000000000000000000000000000000000..ec1f7a76cd78bc4e1299355189403079996221bf
--- /dev/null
+++ b/src/test/java/org/eclipsefoundation/marketplace/namesace/TestUrlParameterNames.java
@@ -0,0 +1,22 @@
+package org.eclipsefoundation.marketplace.namesace;
+
+import org.eclipsefoundation.core.namespace.UrlParameterName;
+
+public enum TestUrlParameterNames implements UrlParameterName {
+
+	TEST_DATA_EXISTS("test-data-exists");
+	
+	private String parameterName;
+	private TestUrlParameterNames(String parameterName) {
+		this.parameterName = parameterName;
+	}
+	
+	/**
+	 * @return the URL parameters name
+	 */
+	@Override
+	public String getParameterName() {
+		return parameterName;
+	}
+
+}
diff --git a/src/test/java/org/eclipsefoundation/marketplace/resource/CatalogResourceTest.java b/src/test/java/org/eclipsefoundation/marketplace/resource/CatalogResourceTest.java
index 5de2b8deda6ccfb31018abb9e27163f077d5da29..f71ff660f51801e7e43d192f3b0b83af71d16cbf 100644
--- a/src/test/java/org/eclipsefoundation/marketplace/resource/CatalogResourceTest.java
+++ b/src/test/java/org/eclipsefoundation/marketplace/resource/CatalogResourceTest.java
@@ -25,19 +25,19 @@ import io.restassured.http.ContentType;
  * @author Martin Lowe
  */
 @QuarkusTest
-public class CatalogResourceTest {
+class CatalogResourceTest {
 
 	// explicitly use the mock DAO to avoid potential issues with standard DAO
 	@Inject
 	private MockHibernateDao dao;
 
 	@BeforeEach
-	public void cleanDao() {
+	void cleanDao() {
 		dao.init();
 	}
 
 	@Test
-	public void testCatalogs() {
+	void testCatalogs() {
 		given()
 			.when().get("/catalogs")
 				.then()
@@ -45,7 +45,7 @@ public class CatalogResourceTest {
 	}
 	
 	@Test
-	public void testCatalogIdEndpoint() {
+	void testCatalogIdEndpoint() {
 		given()
 			.when().get("/catalogs/" + TestHelper.SAMPLE_UUID)
 				.then()
@@ -53,7 +53,7 @@ public class CatalogResourceTest {
 	}
 
 	@Test
-	public void testCatalogIdEndpointNoResults() {
+	void testCatalogIdEndpointNoResults() {
 		given()
 			.param(TestHelper.DATA_EXISTS_PARAM_NAME, false)
 			.when().get("/catalogs/" + TestHelper.SAMPLE_UUID)
@@ -62,7 +62,7 @@ public class CatalogResourceTest {
 	}
 	
 	@Test
-	public void testCatalogIdEndpointInvalidUUID() {
+	void testCatalogIdEndpointInvalidUUID() {
 		given()
 			.when().get("/catalogs/invalid-uuid-string")
 				.then()
@@ -70,7 +70,7 @@ public class CatalogResourceTest {
 	}
 	
 	@Test
-	public void testDeletion() {
+	void testDeletion() {
 		given()
 			.when().delete("/catalogs/" + TestHelper.SAMPLE_UUID)
 				.then()
@@ -78,7 +78,7 @@ public class CatalogResourceTest {
 	}
 	
 	@Test
-	public void testDeletionNotExists() {
+	void testDeletionNotExists() {
 		// pass param that tells the mock service that no data should be returned for
 		// this call
 		given()
@@ -89,7 +89,7 @@ public class CatalogResourceTest {
 	}
 	
 	@Test
-	public void testDeletionInvalidUUID() {
+	void testDeletionInvalidUUID() {
 		given()
 			.when().delete("/catalogs/invalid-uuid-string")
 				.then()
@@ -97,7 +97,7 @@ public class CatalogResourceTest {
 	}
 	
 	@Test
-	public void testPutJSON() {
+	void testPutJSON() {
 		// JSON string for a catalog
 		String json = 
 			"{\n" + 
@@ -125,7 +125,7 @@ public class CatalogResourceTest {
 	}
 	
 	@Test
-	public void testPutInvalidJSON() {
+	void testPutInvalidJSON() {
 		// expect bad request response as whole object needs to be posted
 		given()
 			.body("{'id':'" + TestHelper.SAMPLE_UUID + "'}")
diff --git a/src/test/java/org/eclipsefoundation/marketplace/resource/CategoryResourceTest.java b/src/test/java/org/eclipsefoundation/marketplace/resource/CategoryResourceTest.java
index b17082846e85bcf7f28d93ba94e18dcffbc33881..2518c426851273b24102e973a5bd7f86f9792373 100644
--- a/src/test/java/org/eclipsefoundation/marketplace/resource/CategoryResourceTest.java
+++ b/src/test/java/org/eclipsefoundation/marketplace/resource/CategoryResourceTest.java
@@ -25,19 +25,19 @@ import io.restassured.http.ContentType;
  * @author Martin Lowe
  */
 @QuarkusTest
-public class CategoryResourceTest {
+class CategoryResourceTest {
 
 	// explicitly use the mock DAO to avoid potential issues with standard DAO
 	@Inject
 	private MockHibernateDao dao;
 
 	@BeforeEach
-	public void cleanDao() {
+	void cleanDao() {
 		dao.init();
 	}
 
 	@Test
-	public void testCategorys() {
+	void testCategorys() {
 		given()
 			.when().get("/categories")
 				.then()
@@ -45,7 +45,7 @@ public class CategoryResourceTest {
 	}
 	
 	@Test
-	public void testCategoryIdEndpoint() {
+	void testCategoryIdEndpoint() {
 		given()
 			.when().get("/categories/" + TestHelper.SAMPLE_UUID)
 				.then()
@@ -53,7 +53,7 @@ public class CategoryResourceTest {
 	}
 
 	@Test
-	public void testCategoryIdEndpointNoResults() {
+	void testCategoryIdEndpointNoResults() {
 		given()
 			.param(TestHelper.DATA_EXISTS_PARAM_NAME, false)
 			.when().get("/categories/" + TestHelper.SAMPLE_UUID)
@@ -62,7 +62,7 @@ public class CategoryResourceTest {
 	}
 	
 	@Test
-	public void testCategoryIdEndpointInvalidUUID() {
+	void testCategoryIdEndpointInvalidUUID() {
 		given()
 			.when().get("/categories/invalid-uuid-string")
 				.then()
@@ -70,7 +70,7 @@ public class CategoryResourceTest {
 	}
 	
 	@Test
-	public void testDeletion() {
+	void testDeletion() {
 		given()
 			.when().delete("/categories/" + TestHelper.SAMPLE_UUID)
 				.then()
@@ -78,7 +78,7 @@ public class CategoryResourceTest {
 	}
 	
 	@Test
-	public void testDeletionNotExists() {
+	void testDeletionNotExists() {
 		// pass param that tells the mock service that no data should be returned for
 		// this call
 		given()
@@ -89,7 +89,7 @@ public class CategoryResourceTest {
 	}
 	
 	@Test
-	public void testDeletionInvalidUUID() {
+	void testDeletionInvalidUUID() {
 		given()
 			.when().delete("/categories/invalid-uuid-string")
 				.then()
@@ -97,7 +97,7 @@ public class CategoryResourceTest {
 	}
 	
 	// TODO this test is broken, not sure why
-	public void testPutJSON() {
+	void testPutJSON() {
 		// JSON string for a category
 		String json = 
 			"{" + 
@@ -114,7 +114,7 @@ public class CategoryResourceTest {
 	}
 	
 	@Test
-	public void testPutInvalidJSON() {
+	void testPutInvalidJSON() {
 		// expect bad request response as whole object needs to be posted
 		given()
 			.body("{'id':'" + TestHelper.SAMPLE_UUID + "'}")
diff --git a/src/test/java/org/eclipsefoundation/marketplace/resource/ErrorReportResourceTest.java b/src/test/java/org/eclipsefoundation/marketplace/resource/ErrorReportResourceTest.java
index 9f44d7a3376748c743d42b5ad3303e40557c6bbe..6e7f56d2532dc3739584a2e6f56ae8a84f501131 100644
--- a/src/test/java/org/eclipsefoundation/marketplace/resource/ErrorReportResourceTest.java
+++ b/src/test/java/org/eclipsefoundation/marketplace/resource/ErrorReportResourceTest.java
@@ -25,19 +25,19 @@ import io.restassured.http.ContentType;
  * @author Martin Lowe
  */
 @QuarkusTest
-public class ErrorReportResourceTest {
+class ErrorReportResourceTest {
 
 	// explicitly use the mock DAO to avoid potential issues with standard DAO
 	@Inject
 	private MockHibernateDao dao;
 
 	@BeforeEach
-	public void cleanDao() {
+	void cleanDao() {
 		dao.init();
 	}
 
 	@Test
-	public void testErrorReports() {
+	void testErrorReports() {
 		given()
 			.when().get("/error_reports")
 				.then()
@@ -45,7 +45,7 @@ public class ErrorReportResourceTest {
 	}
 	
 	@Test
-	public void testErrorReportIdEndpoint() {
+	void testErrorReportIdEndpoint() {
 		given()
 			.when().get("/error_reports/" + TestHelper.SAMPLE_UUID)
 				.then()
@@ -53,7 +53,7 @@ public class ErrorReportResourceTest {
 	}
 
 	@Test
-	public void testErrorReportIdEndpointNoResults() {
+	void testErrorReportIdEndpointNoResults() {
 		given()
 			.param(TestHelper.DATA_EXISTS_PARAM_NAME, false)
 			.when().get("/error_reports/" + TestHelper.SAMPLE_UUID)
@@ -62,7 +62,7 @@ public class ErrorReportResourceTest {
 	}
 	
 	@Test
-	public void testErrorReportIdEndpointInvalidUUID() {
+	void testErrorReportIdEndpointInvalidUUID() {
 		given()
 			.when().get("/error_reports/invalid-uuid-string")
 				.then()
@@ -70,7 +70,7 @@ public class ErrorReportResourceTest {
 	}
 	
 	@Test
-	public void testPostJSON() {
+	void testPostJSON() {
 		// JSON string for an error report
 		String json = 
 			"{\n" + 
@@ -93,7 +93,7 @@ public class ErrorReportResourceTest {
 	}
 	
 	@Test
-	public void testPostInvalidJSON() {
+	void testPostInvalidJSON() {
 		// expect bad request response as whole object needs to be posted
 		given()
 			.body("{'id':'" + TestHelper.SAMPLE_UUID + "'}")
diff --git a/src/test/java/org/eclipsefoundation/marketplace/resource/ListingResourceTest.java b/src/test/java/org/eclipsefoundation/marketplace/resource/ListingResourceTest.java
index cad09957e056b826e36aa70bf6e817b25afc711b..834291bdaffd22161e84b35916742b8c734a77df 100644
--- a/src/test/java/org/eclipsefoundation/marketplace/resource/ListingResourceTest.java
+++ b/src/test/java/org/eclipsefoundation/marketplace/resource/ListingResourceTest.java
@@ -25,19 +25,19 @@ import io.restassured.http.ContentType;
  * @author Martin Lowe
  */
 @QuarkusTest
-public class ListingResourceTest {
+class ListingResourceTest {
 
 	// explicitly use the mock DAO to avoid potential issues with standard DAO
 	@Inject
 	private MockHibernateDao dao;
 
 	@BeforeEach
-	public void cleanDao() {
+	void cleanDao() {
 		dao.init();
 	}
 
 	@Test
-	public void testListings() {
+	void testListings() {
 		given()
 			.when().get("/listings")
 				.then()
@@ -45,7 +45,7 @@ public class ListingResourceTest {
 	}
 	
 	@Test
-	public void testListingIdEndpoint() {
+	void testListingIdEndpoint() {
 		given()
 			.when().get("/listings/" + TestHelper.SAMPLE_UUID)
 				.then()
@@ -53,7 +53,7 @@ public class ListingResourceTest {
 	}
 
 	@Test
-	public void testListingIdEndpointNoResults() {
+	void testListingIdEndpointNoResults() {
 		given()
 			.param(TestHelper.DATA_EXISTS_PARAM_NAME, false)
 			.when().get("/listings/" + TestHelper.SAMPLE_UUID)
@@ -62,7 +62,7 @@ public class ListingResourceTest {
 	}
 	
 	@Test
-	public void testListingIdEndpointInvalidUUID() {
+	void testListingIdEndpointInvalidUUID() {
 		given()
 			.when().get("/listings/invalid-uuid-string")
 				.then()
@@ -70,7 +70,7 @@ public class ListingResourceTest {
 	}
 	
 	@Test
-	public void testDeletion() {
+	void testDeletion() {
 		given()
 			.when().delete("/listings/" + TestHelper.SAMPLE_UUID)
 				.then()
@@ -78,7 +78,7 @@ public class ListingResourceTest {
 	}
 	
 	@Test
-	public void testDeletionNotExists() {
+	void testDeletionNotExists() {
 		// pass param that tells the mock service that no data should be returned for
 		// this call
 		given()
@@ -89,7 +89,7 @@ public class ListingResourceTest {
 	}
 	
 	@Test
-	public void testDeletionInvalidUUID() {
+	void testDeletionInvalidUUID() {
 		given()
 			.when().delete("/listings/invalid-uuid-string")
 				.then()
@@ -97,7 +97,7 @@ public class ListingResourceTest {
 	}
 	
 	@Test
-	public void testPutJSON() {
+	void testPutJSON() {
 		// JSON string for a listing
 		String json = 
 			"{" + 
@@ -159,7 +159,7 @@ public class ListingResourceTest {
 	}
 	
 	@Test
-	public void testPutInvalidJSON() {
+	void testPutInvalidJSON() {
 		// expect bad request response as whole object needs to be posted
 		given()
 			.body("{'id':'" + TestHelper.SAMPLE_UUID + "'}")
diff --git a/src/test/java/org/eclipsefoundation/marketplace/resource/ListingVersionResourceTest.java b/src/test/java/org/eclipsefoundation/marketplace/resource/ListingVersionResourceTest.java
index c954b9d284c9806612c398852e04dffaad5efba1..f95b1364157626171fad7474b2b9536953ab9b7b 100644
--- a/src/test/java/org/eclipsefoundation/marketplace/resource/ListingVersionResourceTest.java
+++ b/src/test/java/org/eclipsefoundation/marketplace/resource/ListingVersionResourceTest.java
@@ -25,19 +25,19 @@ import io.restassured.http.ContentType;
  * @author Martin Lowe
  */
 @QuarkusTest
-public class ListingVersionResourceTest {
+class ListingVersionResourceTest {
 
 	// explicitly use the mock DAO to avoid potential issues with standard DAO
 	@Inject
 	private MockHibernateDao dao;
 
 	@BeforeEach
-	public void cleanDao() {
+	void cleanDao() {
 		dao.init();
 	}
 
 	@Test
-	public void testListingVersions() {
+	void testListingVersions() {
 		given()
 			.when().get("/listing_versions")
 				.then()
@@ -45,7 +45,7 @@ public class ListingVersionResourceTest {
 	}
 	
 	@Test
-	public void testListingVersionIdEndpoint() {
+	void testListingVersionIdEndpoint() {
 		given()
 			.when().get("/listing_versions/" + TestHelper.SAMPLE_UUID)
 				.then()
@@ -53,7 +53,7 @@ public class ListingVersionResourceTest {
 	}
 
 	@Test
-	public void testListingVersionIdEndpointNoResults() {
+	void testListingVersionIdEndpointNoResults() {
 		given()
 			.param(TestHelper.DATA_EXISTS_PARAM_NAME, false)
 			.when().get("/listing_versions/" + TestHelper.SAMPLE_UUID)
@@ -62,7 +62,7 @@ public class ListingVersionResourceTest {
 	}
 	
 	@Test
-	public void testListingVersionIdEndpointInvalidUUID() {
+	void testListingVersionIdEndpointInvalidUUID() {
 		given()
 			.when().get("/listing_versions/invalid-uuid-string")
 				.then()
@@ -70,7 +70,7 @@ public class ListingVersionResourceTest {
 	}
 	
 	@Test
-	public void testDeletion() {
+	void testDeletion() {
 		given()
 			.when().delete("/listing_versions/" + TestHelper.SAMPLE_UUID)
 				.then()
@@ -78,7 +78,7 @@ public class ListingVersionResourceTest {
 	}
 	
 	@Test
-	public void testDeletionNotExists() {
+	void testDeletionNotExists() {
 		// pass param that tells the mock service that no data should be returned for
 		// this call
 		given()
@@ -89,7 +89,7 @@ public class ListingVersionResourceTest {
 	}
 	
 	@Test
-	public void testDeletionInvalidUUID() {
+	void testDeletionInvalidUUID() {
 		given()
 			.when().delete("/listing_versions/invalid-uuid-string")
 				.then()
@@ -97,7 +97,7 @@ public class ListingVersionResourceTest {
 	}
 	
 	@Test
-	public void testPutJSON() {
+	void testPutJSON() {
 		// JSON string for a listing version
 		String json = 
 			"{" +
@@ -118,7 +118,7 @@ public class ListingVersionResourceTest {
 	}
 	
 	@Test
-	public void testPutInvalidJSON() {
+	void testPutInvalidJSON() {
 		// expect bad request response as whole object needs to be posted
 		given()
 			.body("{'id':'" + TestHelper.SAMPLE_UUID + "'}")
diff --git a/src/test/java/org/eclipsefoundation/marketplace/resource/MarketResourceTest.java b/src/test/java/org/eclipsefoundation/marketplace/resource/MarketResourceTest.java
index 0d449ed2c01ee8202ac7d79ea247d9c55cdbfdeb..cce47f7f485c573f5170942bb0748105adbc0441 100644
--- a/src/test/java/org/eclipsefoundation/marketplace/resource/MarketResourceTest.java
+++ b/src/test/java/org/eclipsefoundation/marketplace/resource/MarketResourceTest.java
@@ -25,19 +25,19 @@ import io.restassured.http.ContentType;
  * @author Martin Lowe
  */
 @QuarkusTest
-public class MarketResourceTest {
+class MarketResourceTest {
 
 	// explicitly use the mock DAO to avoid potential issues with standard DAO
 	@Inject
 	private MockHibernateDao dao;
 
 	@BeforeEach
-	public void cleanDao() {
+	void cleanDao() {
 		dao.init();
 	}
 
 	@Test
-	public void testMarkets() {
+	void testMarkets() {
 		given()
 			.when().get("/markets")
 				.then()
@@ -45,7 +45,7 @@ public class MarketResourceTest {
 	}
 	
 	@Test
-	public void testMarketIdEndpoint() {
+	void testMarketIdEndpoint() {
 		given()
 			.when().get("/markets/" + TestHelper.SAMPLE_UUID)
 				.then()
@@ -53,7 +53,7 @@ public class MarketResourceTest {
 	}
 
 	@Test
-	public void testMarketIdEndpointNoResults() {
+	void testMarketIdEndpointNoResults() {
 		given()
 			.param(TestHelper.DATA_EXISTS_PARAM_NAME, false)
 			.when().get("/markets/" + TestHelper.SAMPLE_UUID)
@@ -62,7 +62,7 @@ public class MarketResourceTest {
 	}
 	
 	@Test
-	public void testMarketIdEndpointInvalidUUID() {
+	void testMarketIdEndpointInvalidUUID() {
 		given()
 			.when().get("/markets/invalid-uuid-string")
 				.then()
@@ -70,7 +70,7 @@ public class MarketResourceTest {
 	}
 	
 	@Test
-	public void testDeletion() {
+	void testDeletion() {
 		given()
 			.when().delete("/markets/" + TestHelper.SAMPLE_UUID)
 				.then()
@@ -78,7 +78,7 @@ public class MarketResourceTest {
 	}
 	
 	@Test
-	public void testDeletionNotExists() {
+	void testDeletionNotExists() {
 		// pass param that tells the mock service that no data should be returned for
 		// this call
 		given()
@@ -89,7 +89,7 @@ public class MarketResourceTest {
 	}
 	
 	@Test
-	public void testDeletionInvalidUUID() {
+	void testDeletionInvalidUUID() {
 		given()
 			.when().delete("/markets/invalid-uuid-string")
 				.then()
@@ -97,7 +97,7 @@ public class MarketResourceTest {
 	}
 	
 	@Test
-	public void testPutJSON() {
+	void testPutJSON() {
 		// JSON string for a market
 		String json = 
 			"{" + 
@@ -114,7 +114,7 @@ public class MarketResourceTest {
 	}
 	
 	@Test
-	public void testPutInvalidJSON() {
+	void testPutInvalidJSON() {
 		// expect bad request response as whole object needs to be posted
 		given()
 			.body("{'id':'" + TestHelper.SAMPLE_UUID + "'}")
diff --git a/src/test/java/org/eclipsefoundation/marketplace/service/impl/GuavaCachingServiceTest.java b/src/test/java/org/eclipsefoundation/marketplace/service/impl/GuavaCachingServiceTest.java
index 9c6c68188e9123b4857e96732e97e2da995d3b6a..301cf3ee5018174d20f352daee2f425e8f70bd14 100644
--- a/src/test/java/org/eclipsefoundation/marketplace/service/impl/GuavaCachingServiceTest.java
+++ b/src/test/java/org/eclipsefoundation/marketplace/service/impl/GuavaCachingServiceTest.java
@@ -6,7 +6,6 @@
  */
 package org.eclipsefoundation.marketplace.service.impl;
 
-import java.util.Collections;
 import java.util.Optional;
 
 import javax.inject.Inject;
@@ -29,7 +28,7 @@ import io.undertow.servlet.spec.HttpServletRequestImpl;
  *
  */
 @QuarkusTest
-public class GuavaCachingServiceTest {
+class GuavaCachingServiceTest {
 
 	@Inject
 	GuavaCachingService<Object> gcs;
@@ -39,7 +38,7 @@ public class GuavaCachingServiceTest {
 	 * Clear the cache before every test
 	 */
 	@BeforeEach
-	public void pre() {
+	void pre() {
 		// inject empty objects into the Request context before creating a mock object
 		ResteasyContext.pushContext(UriInfo.class, new ResteasyUriInfo("", ""));
 
@@ -55,55 +54,55 @@ public class GuavaCachingServiceTest {
 	 * and using javax injection, which is the expected use case.
 	 */
 	@Test
-	public void testCacheInstantiation() {
+	void testCacheInstantiation() {
 		// create a manual object of cache to test instantiation of manual cache object
 		GuavaCachingService<Object> gcsManual = new GuavaCachingService<>();
 
 		// without post construct init via javax management, cache will not be properly
 		// set
-		Assertions.assertTrue(!gcsManual.get("sampleKey", sample, Collections.emptyMap(), Object::new).isPresent(),
+		Assertions.assertTrue(!gcsManual.get("sampleKey", sample, Object::new).isPresent(),
 				"Object should not be generated when there is no cache initialized");
 
 		// initialize the cache w/ configs
 		gcsManual.init();
 
 		// run a command to interact with cache
-		Assertions.assertTrue(gcsManual.get("sampleKey", sample, Collections.emptyMap(), Object::new).isPresent(),
+		Assertions.assertTrue(gcsManual.get("sampleKey", sample, Object::new).isPresent(),
 				"Object should be generated once cache is instantiated");
 
 		// test the injected cache service (which is the normal use case)
-		Assertions.assertTrue(gcs.get("sampleKey", sample, Collections.emptyMap(), Object::new).isPresent(),
+		Assertions.assertTrue(gcs.get("sampleKey", sample, Object::new).isPresent(),
 				"Object should be generated once cache is instantiated");
 	}
 
 	@Test
-	public void testGet() {
+	void testGet() {
 		Object cachableObject = new Object();
 		String key = "k";
 
 		// get the cached obj from a fresh cache
-		Optional<Object> cachedObj = gcs.get(key, sample, Collections.emptyMap(), () -> cachableObject);
+		Optional<Object> cachedObj = gcs.get(key, sample, () -> cachableObject);
 
 		Assertions.assertTrue(cachedObj.isPresent());
 		Assertions.assertEquals(cachableObject, cachedObj.get());
 	}
 
 	@Test
-	public void testGetNullCallable() {
+	void testGetNullCallable() {
 		Assertions.assertThrows(NullPointerException.class, () -> {
-			gcs.get("key", sample, Collections.emptyMap(), null);
+			gcs.get("key", sample, null);
 		});
 	}
 
 	@Test
-	public void testGetNullCallableResult() {
-		Optional<Object> emptyObj = gcs.get("failure key", sample, Collections.emptyMap(), () -> null);
+	void testGetNullCallableResult() {
+		Optional<Object> emptyObj = gcs.get("failure key", sample, () -> null);
 		Assertions.assertFalse(emptyObj.isPresent());
 	}
 
 	@Test
-	public void testGetExceptionalCallable() {
-		Optional<Object> emptyObj = gcs.get("k", sample, Collections.emptyMap(), () -> {
+	void testGetExceptionalCallable() {
+		Optional<Object> emptyObj = gcs.get("k", sample, () -> {
 			throw new IllegalStateException();
 		});
 		Assertions.assertFalse(emptyObj.isPresent());
diff --git a/src/test/resources/application.properties b/src/test/resources/application.properties
index 3e2d2e91e138136e454048273fe32598fe428481..ee61c1d62a806d648656ccf02b9cd062c76061d0 100644
--- a/src/test/resources/application.properties
+++ b/src/test/resources/application.properties
@@ -13,7 +13,6 @@ quarkus.datasource.username = root
 quarkus.datasource.password = my-secret-pw
 quarkus.datasource.min-size = 5
 quarkus.datasource.max-size = 15
-quarkus.hibernate-orm.physical-naming-strategy=org.eclipsefoundation.marketplace.config.DatabaseNamingStrategy
 eclipse.db.default.limit=25
 eclipse.db.default.limit.max=100