001package com.avaje.ebean; 002 003import com.avaje.ebeanservice.docstore.api.DocQueryRequest; 004import org.jetbrains.annotations.Nullable; 005 006import java.io.IOException; 007import java.util.List; 008 009/** 010 * Document storage operations. 011 */ 012public interface DocumentStore { 013 014 /** 015 * Update the associated document store using the result of the query. 016 * <p> 017 * This will execute the query against the database creating a document for each 018 * bean graph and sending this to the document store. 019 * </p> 020 * <p> 021 * Note that the select and fetch paths of the query is set for you to match the 022 * document structure needed based on <code>@DocStore</code> and <code>@DocStoreEmbedded</code> 023 * so what this query requires is the predicates only. 024 * </p> 025 * <p> 026 * This query will be executed using findEach so it is safe to use a query 027 * that will fetch a lot of beans. The default bulkBatchSize is used. 028 * </p> 029 * 030 * @param query The query that selects object to send to the document store. 031 */ 032 <T> void indexByQuery(Query<T> query); 033 034 /** 035 * Update the associated document store index using the result of the query additionally specifying a 036 * bulkBatchSize to use for sending the messages to ElasticSearch. 037 * 038 * @param query The query that selects object to send to the document store. 039 * @param bulkBatchSize The batch size to use when bulk sending to the document store. 040 */ 041 <T> void indexByQuery(Query<T> query, int bulkBatchSize); 042 043 /** 044 * Update the document store for all beans of this type. 045 * <p> 046 * This is the same as indexByQuery where the query has no predicates and so fetches all rows. 047 * </p> 048 */ 049 void indexAll(Class<?> beanType); 050 051 /** 052 * Return the bean by fetching it's content from the document store. 053 * If the document is not found null is returned. 054 * <p> 055 * Typically this is called indirectly by findUnique() on the query. 056 * </p> 057 * <pre>{@code 058 * 059 * Customer customer = 060 * server.find(Customer.class) 061 * .setUseDocStore(true) 062 * .setId(42) 063 * .findUnique(); 064 * 065 * }</pre> 066 */ 067 @Nullable 068 <T> T find(DocQueryRequest<T> request); 069 070 /** 071 * Execute the find list query. This request is prepared to execute secondary queries. 072 * <p> 073 * Typically this is called indirectly by findList() on the query that has setUseDocStore(true). 074 * </p> 075 * <pre>{@code 076 * 077 * List<Customer> newCustomers = 078 * server.find(Customer.class) 079 * .setUseDocStore(true) 080 * .where().eq("status, Customer.Status.NEW) 081 * .findList(); 082 * 083 * }</pre> 084 */ 085 <T> List<T> findList(DocQueryRequest<T> request); 086 087 /** 088 * Execute the query against the document store returning the paged list. 089 * <p> 090 * The query should have <code>firstRow</code> or <code>maxRows</code> set prior to calling this method. 091 * </p> 092 * <p> 093 * Typically this is called indirectly by findPagedList() on the query that has setUseDocStore(true). 094 * </p> 095 * 096 * <pre>{@code 097 * 098 * PagedList<Customer> newCustomers = 099 * server.find(Customer.class) 100 * .setUseDocStore(true) 101 * .where().eq("status, Customer.Status.NEW) 102 * .setMaxRows(50) 103 * .findPagedList(); 104 * 105 * }</pre> 106 * 107 */ 108 <T> PagedList<T> findPagedList(DocQueryRequest<T> request); 109 110 /** 111 * Execute the query against the document store with the expectation of a large set of results 112 * that are processed in a scrolling resultSet fashion. 113 * <p> 114 * For example, with the ElasticSearch doc store this uses SCROLL. 115 * </p> 116 * <p> 117 * Typically this is called indirectly by findEach() on the query that has setUseDocStore(true). 118 * </p> 119 * 120 * <pre>{@code 121 * 122 * server.find(Order.class) 123 * .setUseDocStore(true) 124 * .where()... // perhaps add predicates 125 * .findEach(new QueryEachConsumer<Order>() { 126 * @Override 127 * public void accept(Order bean) { 128 * // process the bean 129 * } 130 * }); 131 * 132 * }</pre> 133 */ 134 <T> void findEach(DocQueryRequest<T> query, QueryEachConsumer<T> consumer); 135 136 /** 137 * Execute the query against the document store with the expectation of a large set of results 138 * that are processed in a scrolling resultSet fashion. 139 * <p> 140 * Unlike findEach() this provides the opportunity to stop iterating through the large query. 141 * </p> 142 * <p> 143 * For example, with the ElasticSearch doc store this uses SCROLL. 144 * </p> 145 * <p> 146 * Typically this is called indirectly by findEachWhile() on the query that has setUseDocStore(true). 147 * </p> 148 * 149 * 150 * <pre>{@code 151 * 152 * server.find(Order.class) 153 * .setUseDocStore(true) 154 * .where()... // perhaps add predicates 155 * .findEachWhile(new QueryEachWhileConsumer<Order>() { 156 * @Override 157 * public void accept(Order bean) { 158 * // process the bean 159 * 160 * // return true to continue, false to stop 161 * // boolean shouldContinue = ... 162 * return shouldContinue; 163 * } 164 * }); 165 * 166 * }</pre> 167 */ 168 <T> void findEachWhile(DocQueryRequest<T> query, QueryEachWhileConsumer<T> consumer); 169 170 /** 171 * Process the queue entries sending updates to the document store or queuing them for later processing. 172 */ 173 long process(List<DocStoreQueueEntry> queueEntries) throws IOException; 174 175 /** 176 * Drop the index from the document store (similar to DDL drop table). 177 * 178 * <pre>{@code 179 * 180 * DocumentStore documentStore = server.docStore(); 181 * 182 * documentStore.dropIndex("product_copy"); 183 * 184 * }</pre> 185 * 186 */ 187 void dropIndex(String indexName); 188 189 /** 190 * Create an index given a mapping file as a resource in the classPath (similar to DDL create table). 191 * 192 * <pre>{@code 193 * 194 * DocumentStore documentStore = server.docStore(); 195 * 196 * // uses product_copy.mapping.json resource 197 * // ... to define mappings for the index 198 * 199 * documentStore.createIndex("product_copy", null); 200 * 201 * }</pre> 202 * 203 * @param indexName the name of the new index 204 * @param alias the alias of the index 205 */ 206 void createIndex(String indexName, String alias); 207 208 /** 209 * Copy the index to a new index. 210 * <p> 211 * This copy process does not use the database but instead will copy from the source index to a destination index. 212 * </p> 213 * 214 * <pre>{@code 215 * 216 * long copyCount = documentStore.copyIndex(Product.class, "product_copy"); 217 * 218 * }</pre> 219 * 220 * @param beanType The bean type of the source index 221 * @param newIndex The name of the index to copy to 222 * @return the number of documents copied to the new index 223 */ 224 long copyIndex(Class<?> beanType, String newIndex); 225 226 /** 227 * Copy entries from an index to a new index but limiting to documents that have been 228 * modified since the sinceEpochMillis time. 229 * <p> 230 * To support this the document needs to have a <code>@WhenModified</code> property. 231 * </p> 232 * 233 * <pre>{@code 234 * 235 * long copyCount = documentStore.copyIndex(Product.class, "product_copy", sinceMillis); 236 * 237 * }</pre> 238 * 239 * @param beanType The bean type of the source index 240 * @param newIndex The name of the index to copy to 241 * @return the number of documents copied to the new index 242 */ 243 long copyIndex(Class<?> beanType, String newIndex, long sinceEpochMillis); 244 245 /** 246 * Copy from a source index to a new index taking only the documents 247 * matching the given query. 248 * 249 * <pre>{@code 250 * 251 * // predicates to select the source documents to copy 252 * Query<Product> query = server.find(Product.class) 253 * .where() 254 * .ge("whenModified", new Timestamp(since)) 255 * .ge("name", "A") 256 * .lt("name", "D") 257 * .query(); 258 * 259 * // copy from the source index to "product_copy" index 260 * long copyCount = documentStore.copyIndex(query, "product_copy", 1000); 261 * 262 * }</pre> 263 * 264 * @param query The query to select the source documents to copy 265 * @param newIndex The target index to copy the documents to 266 * @param bulkBatchSize The ElasticSearch bulk batch size, if 0 uses the default. 267 * @return The number of documents copied to the new index. 268 */ 269 long copyIndex(Query<?> query, String newIndex, int bulkBatchSize); 270}