mirror of
https://github.com/meilisearch/meilisearch.git
synced 2024-11-22 18:17:39 +08:00
Merge branch 'main' into stable
This commit is contained in:
commit
aa50fcb1f0
4
.github/ISSUE_TEMPLATE/bug_report.md
vendored
4
.github/ISSUE_TEMPLATE/bug_report.md
vendored
@ -23,8 +23,8 @@ A clear and concise description of what you expected to happen.
|
|||||||
**Screenshots**
|
**Screenshots**
|
||||||
If applicable, add screenshots to help explain your problem.
|
If applicable, add screenshots to help explain your problem.
|
||||||
|
|
||||||
**MeiliSearch version:** [e.g. v0.20.0]
|
**Meilisearch version:** [e.g. v0.20.0]
|
||||||
|
|
||||||
**Additional context**
|
**Additional context**
|
||||||
Additional information that may be relevant to the issue.
|
Additional information that may be relevant to the issue.
|
||||||
[e.g. architecture, device, OS, browser]
|
[e.g. architecture, device, OS, browser]
|
||||||
|
2
.github/ISSUE_TEMPLATE/config.yml
vendored
2
.github/ISSUE_TEMPLATE/config.yml
vendored
@ -6,5 +6,5 @@ contact_links:
|
|||||||
url: https://github.com/meilisearch/documentation/issues/new
|
url: https://github.com/meilisearch/documentation/issues/new
|
||||||
about: For documentation issues, open an issue or a PR in the documentation repository
|
about: For documentation issues, open an issue or a PR in the documentation repository
|
||||||
- name: Support questions & other
|
- name: Support questions & other
|
||||||
url: https://github.com/meilisearch/MeiliSearch/discussions/new
|
url: https://github.com/meilisearch/meilisearch/discussions/new
|
||||||
about: For any other question, open a discussion in this repository
|
about: For any other question, open a discussion in this repository
|
||||||
|
2
.github/is-latest-release.sh
vendored
2
.github/is-latest-release.sh
vendored
@ -74,7 +74,7 @@ semverLT() {
|
|||||||
# Returns the tag of the latest stable release (in terms of semver and not of release date)
|
# Returns the tag of the latest stable release (in terms of semver and not of release date)
|
||||||
get_latest() {
|
get_latest() {
|
||||||
temp_file='temp_file' # temp_file needed because the grep would start before the download is over
|
temp_file='temp_file' # temp_file needed because the grep would start before the download is over
|
||||||
curl -s 'https://api.github.com/repos/meilisearch/MeiliSearch/releases' > "$temp_file"
|
curl -s 'https://api.github.com/repos/meilisearch/meiliSearch/releases' > "$temp_file"
|
||||||
releases=$(cat "$temp_file" | \
|
releases=$(cat "$temp_file" | \
|
||||||
grep -E "tag_name|draft|prerelease" \
|
grep -E "tag_name|draft|prerelease" \
|
||||||
| tr -d ',"' | cut -d ':' -f2 | tr -d ' ')
|
| tr -d ',"' | cut -d ':' -f2 | tr -d ' ')
|
||||||
|
2
.github/workflows/README.md
vendored
2
.github/workflows/README.md
vendored
@ -1,4 +1,4 @@
|
|||||||
# GitHub Actions Workflow for MeiliSearch
|
# GitHub Actions Workflow for Meilisearch
|
||||||
|
|
||||||
> **Note:**
|
> **Note:**
|
||||||
|
|
||||||
|
@ -1,7 +1,10 @@
|
|||||||
# Contributing
|
# Contributing
|
||||||
|
|
||||||
First, thank you for contributing to MeiliSearch! The goal of this document is to provide everything you need to start contributing to MeiliSearch.
|
First, thank you for contributing to Meilisearch! The goal of this document is to provide everything you need to start contributing to Meilisearch.
|
||||||
|
|
||||||
|
Remember that there are many ways to contribute other than writing code: writing [tutorials or blog posts](https://github.com/meilisearch/awesome-meilisearch), improving [the documentation](https://github.com/meilisearch/documentation), submitting [bug reports](https://github.com/meilisearch/meilisearch/issues/new?assignees=&labels=&template=bug_report.md&title=) and [feature requests](https://github.com/meilisearch/product/discussions/categories/feedback-feature-proposal)...
|
||||||
|
|
||||||
|
## Table of Contents
|
||||||
- [Assumptions](#assumptions)
|
- [Assumptions](#assumptions)
|
||||||
- [How to Contribute](#how-to-contribute)
|
- [How to Contribute](#how-to-contribute)
|
||||||
- [Development Workflow](#development-workflow)
|
- [Development Workflow](#development-workflow)
|
||||||
@ -10,8 +13,8 @@ First, thank you for contributing to MeiliSearch! The goal of this document is t
|
|||||||
## Assumptions
|
## Assumptions
|
||||||
|
|
||||||
1. **You're familiar with [Github](https://github.com) and the [Pull Requests](https://help.github.com/en/github/collaborating-with-issues-and-pull-requests/about-pull-requests)(PR) workflow.**
|
1. **You're familiar with [Github](https://github.com) and the [Pull Requests](https://help.github.com/en/github/collaborating-with-issues-and-pull-requests/about-pull-requests)(PR) workflow.**
|
||||||
2. **You've read the MeiliSearch [documentation](https://docs.meilisearch.com).**
|
2. **You've read the Meilisearch [documentation](https://docs.meilisearch.com).**
|
||||||
3. **You know about the [MeiliSearch community](https://docs.meilisearch.com/learn/what_is_meilisearch/contact.html).
|
3. **You know about the [Meilisearch community](https://docs.meilisearch.com/learn/what_is_meilisearch/contact.html).
|
||||||
Please use this for help.**
|
Please use this for help.**
|
||||||
|
|
||||||
## How to Contribute
|
## How to Contribute
|
||||||
@ -19,21 +22,21 @@ First, thank you for contributing to MeiliSearch! The goal of this document is t
|
|||||||
1. Ensure your change has an issue! Find an
|
1. Ensure your change has an issue! Find an
|
||||||
[existing issue](https://github.com/meilisearch/meilisearch/issues/) or [open a new issue](https://github.com/meilisearch/meilisearch/issues/new).
|
[existing issue](https://github.com/meilisearch/meilisearch/issues/) or [open a new issue](https://github.com/meilisearch/meilisearch/issues/new).
|
||||||
* This is where you can get a feel if the change will be accepted or not.
|
* This is where you can get a feel if the change will be accepted or not.
|
||||||
2. Once approved, [fork the MeiliSearch repository](https://help.github.com/en/github/getting-started-with-github/fork-a-repo) in your own Github account.
|
2. Once approved, [fork the Meilisearch repository](https://help.github.com/en/github/getting-started-with-github/fork-a-repo) in your own Github account.
|
||||||
3. [Create a new Git branch](https://help.github.com/en/github/collaborating-with-issues-and-pull-requests/creating-and-deleting-branches-within-your-repository)
|
3. [Create a new Git branch](https://help.github.com/en/github/collaborating-with-issues-and-pull-requests/creating-and-deleting-branches-within-your-repository)
|
||||||
4. Review the [Development Workflow](#development-workflow) section that describes the steps to maintain the repository.
|
4. Review the [Development Workflow](#development-workflow) section that describes the steps to maintain the repository.
|
||||||
5. Make your changes on your branch.
|
5. Make your changes on your branch.
|
||||||
6. [Submit the branch as a Pull Request](https://help.github.com/en/github/collaborating-with-issues-and-pull-requests/creating-a-pull-request-from-a-fork) pointing to the `main` branch of the MeiliSearch repository. A maintainer should comment and/or review your Pull Request within a few days. Although depending on the circumstances, it may take longer.
|
6. [Submit the branch as a Pull Request](https://help.github.com/en/github/collaborating-with-issues-and-pull-requests/creating-a-pull-request-from-a-fork) pointing to the `main` branch of the Meilisearch repository. A maintainer should comment and/or review your Pull Request within a few days. Although depending on the circumstances, it may take longer.
|
||||||
|
|
||||||
## Development Workflow
|
## Development Workflow
|
||||||
|
|
||||||
### Setup and run MeiliSearch
|
### Setup and run Meilisearch
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
cargo run --release
|
cargo run --release
|
||||||
```
|
```
|
||||||
|
|
||||||
We recommend using the `--release` flag to test the full performance of MeiliSearch.
|
We recommend using the `--release` flag to test the full performance of Meilisearch.
|
||||||
|
|
||||||
### Test
|
### Test
|
||||||
|
|
||||||
|
1019
Cargo.lock
generated
1019
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@ -5,5 +5,3 @@ members = [
|
|||||||
"meilisearch-lib",
|
"meilisearch-lib",
|
||||||
"meilisearch-auth",
|
"meilisearch-auth",
|
||||||
]
|
]
|
||||||
|
|
||||||
resolver = "2"
|
|
||||||
|
42
README.md
42
README.md
@ -1,8 +1,8 @@
|
|||||||
<p align="center">
|
<p align="center">
|
||||||
<img src="assets/logo.svg" alt="MeiliSearch" width="200" height="200" />
|
<img src="assets/logo.svg" alt="Meilisearch" width="200" height="200" />
|
||||||
</p>
|
</p>
|
||||||
|
|
||||||
<h1 align="center">MeiliSearch</h1>
|
<h1 align="center">Meilisearch</h1>
|
||||||
|
|
||||||
<h4 align="center">
|
<h4 align="center">
|
||||||
<a href="https://www.meilisearch.com">Website</a> |
|
<a href="https://www.meilisearch.com">Website</a> |
|
||||||
@ -15,17 +15,17 @@
|
|||||||
</h4>
|
</h4>
|
||||||
|
|
||||||
<p align="center">
|
<p align="center">
|
||||||
<a href="https://github.com/meilisearch/MeiliSearch/actions"><img src="https://github.com/meilisearch/MeiliSearch/workflows/Cargo%20test/badge.svg" alt="Build Status"></a>
|
<a href="https://github.com/meilisearch/meilisearch/actions"><img src="https://github.com/meilisearch/meilisearch/workflows/Cargo%20test/badge.svg" alt="Build Status"></a>
|
||||||
<a href="https://deps.rs/repo/github/meilisearch/MeiliSearch"><img src="https://deps.rs/repo/github/meilisearch/MeiliSearch/status.svg" alt="Dependency status"></a>
|
<a href="https://deps.rs/repo/github/meilisearch/meilisearch"><img src="https://deps.rs/repo/github/meilisearch/meilisearch/status.svg" alt="Dependency status"></a>
|
||||||
<a href="https://github.com/meilisearch/MeiliSearch/blob/main/LICENSE"><img src="https://img.shields.io/badge/license-MIT-informational" alt="License"></a>
|
<a href="https://github.com/meilisearch/meilisearch/blob/main/LICENSE"><img src="https://img.shields.io/badge/license-MIT-informational" alt="License"></a>
|
||||||
<a href="https://slack.meilisearch.com"><img src="https://img.shields.io/badge/slack-MeiliSearch-blue.svg?logo=slack" alt="Slack"></a>
|
<a href="https://slack.meilisearch.com"><img src="https://img.shields.io/badge/slack-meilisearch-blue.svg?logo=slack" alt="Slack"></a>
|
||||||
<a href="https://github.com/meilisearch/MeiliSearch/discussions" alt="Discussions"><img src="https://img.shields.io/badge/github-discussions-red" /></a>
|
<a href="https://github.com/meilisearch/meilisearch/discussions" alt="Discussions"><img src="https://img.shields.io/badge/github-discussions-red" /></a>
|
||||||
<a href="https://app.bors.tech/repositories/26457"><img src="https://bors.tech/images/badge_small.svg" alt="Bors enabled"></a>
|
<a href="https://app.bors.tech/repositories/26457"><img src="https://bors.tech/images/badge_small.svg" alt="Bors enabled"></a>
|
||||||
</p>
|
</p>
|
||||||
|
|
||||||
<p align="center">⚡ Lightning Fast, Ultra Relevant, and Typo-Tolerant Search Engine 🔍</p>
|
<p align="center">⚡ Lightning Fast, Ultra Relevant, and Typo-Tolerant Search Engine 🔍</p>
|
||||||
|
|
||||||
**MeiliSearch** is a powerful, fast, open-source, easy to use and deploy search engine. Both searching and indexing are highly customizable. Features such as typo-tolerance, filters, and synonyms are provided out-of-the-box.
|
**Meilisearch** is a powerful, fast, open-source, easy to use and deploy search engine. Both searching and indexing are highly customizable. Features such as typo-tolerance, filters, and synonyms are provided out-of-the-box.
|
||||||
For more information about features go to [our documentation](https://docs.meilisearch.com/).
|
For more information about features go to [our documentation](https://docs.meilisearch.com/).
|
||||||
|
|
||||||
<p align="center">
|
<p align="center">
|
||||||
@ -61,13 +61,13 @@ meilisearch
|
|||||||
docker run -p 7700:7700 -v "$(pwd)/data.ms:/data.ms" getmeili/meilisearch
|
docker run -p 7700:7700 -v "$(pwd)/data.ms:/data.ms" getmeili/meilisearch
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Announcing a cloud-hosted MeiliSearch
|
#### Announcing a cloud-hosted Meilisearch
|
||||||
|
|
||||||
Join the closed beta by filling out this [form](https://meilisearch.typeform.com/to/FtnzvZfh).
|
Join the closed beta by filling out this [form](https://meilisearch.typeform.com/to/FtnzvZfh).
|
||||||
|
|
||||||
#### Try MeiliSearch in our Sandbox
|
#### Try Meilisearch in our Sandbox
|
||||||
|
|
||||||
Create a MeiliSearch instance in [MeiliSearch Sandbox](https://sandbox.meilisearch.com/). This instance is free, and will be active for 48 hours.
|
Create a Meilisearch instance in [Meilisearch Sandbox](https://sandbox.meilisearch.com/). This instance is free, and will be active for 48 hours.
|
||||||
|
|
||||||
#### Run on Digital Ocean
|
#### Run on Digital Ocean
|
||||||
|
|
||||||
@ -99,8 +99,8 @@ curl -L https://install.meilisearch.com | sh
|
|||||||
If you have the latest stable Rust toolchain installed on your local system, clone the repository and change it to your working directory.
|
If you have the latest stable Rust toolchain installed on your local system, clone the repository and change it to your working directory.
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
git clone https://github.com/meilisearch/MeiliSearch.git
|
git clone https://github.com/meilisearch/meilisearch.git
|
||||||
cd MeiliSearch
|
cd meilisearch
|
||||||
cargo run --release
|
cargo run --release
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -161,19 +161,19 @@ curl 'http://127.0.0.1:7700/indexes/movies/search?q=botman+robin&limit=2' | jq
|
|||||||
|
|
||||||
#### Use the Web Interface
|
#### Use the Web Interface
|
||||||
|
|
||||||
We also deliver an **out-of-the-box [web interface](https://github.com/meilisearch/mini-dashboard)** in which you can test MeiliSearch interactively.
|
We also deliver an **out-of-the-box [web interface](https://github.com/meilisearch/mini-dashboard)** in which you can test Meilisearch interactively.
|
||||||
|
|
||||||
You can access the web interface in your web browser at the root of the server. The default URL is [http://127.0.0.1:7700](http://127.0.0.1:7700). All you need to do is open your web browser and enter MeiliSearch’s address to visit it. This will lead you to a web page with a search bar that will allow you to search in the selected index.
|
You can access the web interface in your web browser at the root of the server. The default URL is [http://127.0.0.1:7700](http://127.0.0.1:7700). All you need to do is open your web browser and enter Meilisearch’s address to visit it. This will lead you to a web page with a search bar that will allow you to search in the selected index.
|
||||||
|
|
||||||
| [See the gif above](#demo)
|
| [See the gif above](#demo)
|
||||||
|
|
||||||
## Documentation
|
## Documentation
|
||||||
|
|
||||||
Now that your MeiliSearch server is up and running, you can learn more about how to tune your search engine in [the documentation](https://docs.meilisearch.com).
|
Now that your Meilisearch server is up and running, you can learn more about how to tune your search engine in [the documentation](https://docs.meilisearch.com).
|
||||||
|
|
||||||
## Contributing
|
## Contributing
|
||||||
|
|
||||||
Hey! We're glad you're thinking about contributing to MeiliSearch! Feel free to pick an [issue labeled as `good first issue`](https://github.com/meilisearch/MeiliSearch/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22), and to ask any question you need. Some points might not be clear and we are available to help you!
|
Hey! We're glad you're thinking about contributing to Meilisearch! Feel free to pick an [issue labeled as `good first issue`](https://github.com/meilisearch/meilisearch/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22), and to ask any question you need. Some points might not be clear and we are available to help you!
|
||||||
|
|
||||||
Also, we recommend following the [CONTRIBUTING](./CONTRIBUTING.md) to create your PR.
|
Also, we recommend following the [CONTRIBUTING](./CONTRIBUTING.md) to create your PR.
|
||||||
|
|
||||||
@ -184,8 +184,8 @@ The code in this repository is only concerned with managing multiple indexes, ha
|
|||||||
Search and indexation are the domain of our core engine, [`milli`](https://github.com/meilisearch/milli), while tokenization is handled by [our `tokenizer` library](https://github.com/meilisearch/tokenizer/).
|
Search and indexation are the domain of our core engine, [`milli`](https://github.com/meilisearch/milli), while tokenization is handled by [our `tokenizer` library](https://github.com/meilisearch/tokenizer/).
|
||||||
## Telemetry
|
## Telemetry
|
||||||
|
|
||||||
MeiliSearch collects anonymous data regarding general usage.
|
Meilisearch collects anonymous data regarding general usage.
|
||||||
This helps us better understand developers' usage of MeiliSearch features.
|
This helps us better understand developers' usage of Meilisearch features.
|
||||||
|
|
||||||
To find out more on what information we're retrieving, please see our documentation on [Telemetry](https://docs.meilisearch.com/learn/what_is_meilisearch/telemetry.html).
|
To find out more on what information we're retrieving, please see our documentation on [Telemetry](https://docs.meilisearch.com/learn/what_is_meilisearch/telemetry.html).
|
||||||
|
|
||||||
@ -193,7 +193,7 @@ This program is optional, you can disable these analytics by using the `MEILI_NO
|
|||||||
|
|
||||||
## Feature request
|
## Feature request
|
||||||
|
|
||||||
The feature requests are not managed in this repository. Please visit our [dedicated repository](https://github.com/meilisearch/product) to see our work about the MeiliSearch product.
|
The feature requests are not managed in this repository. Please visit our [dedicated repository](https://github.com/meilisearch/product) to see our work about the Meilisearch product.
|
||||||
|
|
||||||
If you have a feature request or any feedback about an existing feature, please open [a discussion](https://github.com/meilisearch/product/discussions).
|
If you have a feature request or any feedback about an existing feature, please open [a discussion](https://github.com/meilisearch/product/discussions).
|
||||||
Also, feel free to participate in the current discussions, we are looking forward to reading your comments.
|
Also, feel free to participate in the current discussions, we are looking forward to reading your comments.
|
||||||
@ -202,4 +202,4 @@ Also, feel free to participate in the current discussions, we are looking forwar
|
|||||||
|
|
||||||
Please visit [this page](https://docs.meilisearch.com/learn/what_is_meilisearch/contact.html#contact-us).
|
Please visit [this page](https://docs.meilisearch.com/learn/what_is_meilisearch/contact.html#contact-us).
|
||||||
|
|
||||||
MeiliSearch is developed by [Meili](https://www.meilisearch.com), a young company. To know more about us, you can [read our blog](https://blog.meilisearch.com). Any suggestion or feedback is highly appreciated. Thank you for your support!
|
Meilisearch is developed by [Meili](https://www.meilisearch.com), a young company. To know more about us, you can [read our blog](https://blog.meilisearch.com). Any suggestion or feedback is highly appreciated. Thank you for your support!
|
||||||
|
@ -1,16 +1,16 @@
|
|||||||
# Security
|
# Security
|
||||||
|
|
||||||
MeiliSearch takes the security of our software products and services seriously.
|
Meilisearch takes the security of our software products and services seriously.
|
||||||
|
|
||||||
If you believe you have found a security vulnerability in any MeiliSearch-owned repository, please report it to us as described below.
|
If you believe you have found a security vulnerability in any Meilisearch-owned repository, please report it to us as described below.
|
||||||
|
|
||||||
## Suported versions
|
## Suported versions
|
||||||
|
|
||||||
As long as we are pre-v1.0, only the latest version of MeiliSearch will be supported with security updates.
|
As long as we are pre-v1.0, only the latest version of Meilisearch will be supported with security updates.
|
||||||
|
|
||||||
## Reporting security issues
|
## Reporting security issues
|
||||||
|
|
||||||
⚠️ Please do not report security vulnerabilities through public GitHub issues. ⚠️
|
⚠️ Please do not report security vulnerabilities through public GitHub issues. ⚠️
|
||||||
|
|
||||||
Instead, please kindly email us at security@meilisearch.com
|
Instead, please kindly email us at security@meilisearch.com
|
||||||
|
|
||||||
|
@ -1,17 +1,19 @@
|
|||||||
<svg width="360" height="360" viewBox="0 0 360 360" fill="none" xmlns="http://www.w3.org/2000/svg">
|
<svg width="300" height="300" viewBox="0 0 300 300" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||||
<g id="logo_main">
|
<path d="M0 237L55.426 96.7678C63.2367 77.0063 82.499 64 103.955 64H137.371L81.9447 204.232C74.1341 223.993 54.8717 237 33.4156 237H0Z" fill="url(#paint0_linear_1_898)"/>
|
||||||
<rect id="Rectangle" x="107.333" y="0.150146" width="274.315" height="274.315" rx="98.8334" transform="rotate(23 107.333 0.150146)" fill="url(#paint0_linear)"/>
|
<path d="M81.3123 237L136.738 96.7682C144.549 77.0067 163.811 64.0004 185.267 64.0004H218.683L163.257 204.232C155.446 223.994 136.184 237 114.728 237H81.3123Z" fill="url(#paint1_linear_1_898)"/>
|
||||||
<path id="Rectangle_2" fill-rule="evenodd" clip-rule="evenodd" d="M61.3296 230.199C46.2224 194.608 38.6688 176.813 38.208 160.329C37.5286 136.025 47.0175 112.539 64.3891 95.5282C76.1718 83.9904 93.9669 76.4368 129.557 61.3296C165.147 46.2224 182.943 38.6688 199.427 38.208C223.731 37.5286 247.217 47.0175 264.228 64.3891C275.766 76.1718 283.319 93.9669 298.426 129.557C313.534 165.147 321.087 182.943 321.548 199.427C322.227 223.731 312.738 247.217 295.367 264.228C283.584 275.766 265.789 283.319 230.199 298.426C194.608 313.534 176.813 321.087 160.329 321.548C136.025 322.227 112.539 312.738 95.5282 295.367C83.9903 283.584 76.4368 265.789 61.3296 230.199Z" fill="url(#paint1_linear)"/>
|
<path d="M162.629 237L218.055 96.7682C225.866 77.0067 245.128 64.0004 266.584 64.0004H300L244.574 204.232C236.763 223.994 217.501 237 196.045 237H162.629Z" fill="url(#paint2_linear_1_898)"/>
|
||||||
<path id="m" fill-rule="evenodd" clip-rule="evenodd" d="M219.568 130.748C242.363 130.748 259.263 147.451 259.263 174.569V229.001H227.232V179.678C227.232 166.119 220.747 159.634 210.136 159.634C205.223 159.634 200.311 161.796 195.595 167.494C195.791 169.852 195.988 172.21 195.988 174.569V229.001H164.154V179.678C164.154 166.119 157.472 159.634 147.057 159.634C142.145 159.634 137.429 161.992 132.712 168.084V229.001H100.878V133.695H132.712V139.394C139.197 133.892 145.878 130.748 156.49 130.748C168.477 130.748 178.695 135.267 185.769 143.52C195.791 134.678 205.42 130.748 219.568 130.748Z" fill="white"/>
|
|
||||||
</g>
|
|
||||||
<defs>
|
<defs>
|
||||||
<linearGradient id="paint0_linear" x1="-13.6248" y1="129.208" x2="244.49" y2="403.522" gradientUnits="userSpaceOnUse">
|
<linearGradient id="paint0_linear_1_898" x1="300.001" y1="50.7858" x2="1.63474" y2="221.244" gradientUnits="userSpaceOnUse">
|
||||||
<stop stop-color="#E41359"/>
|
<stop stop-color="#FF5CAA"/>
|
||||||
<stop offset="1" stop-color="#F23C79"/>
|
<stop offset="1" stop-color="#FF4E62"/>
|
||||||
</linearGradient>
|
</linearGradient>
|
||||||
<linearGradient id="paint1_linear" x1="11.0088" y1="111.65" x2="111.65" y2="348.747" gradientUnits="userSpaceOnUse">
|
<linearGradient id="paint1_linear_1_898" x1="300.001" y1="50.7858" x2="1.63474" y2="221.244" gradientUnits="userSpaceOnUse">
|
||||||
<stop stop-color="#24222F"/>
|
<stop stop-color="#FF5CAA"/>
|
||||||
<stop offset="1" stop-color="#2B2937"/>
|
<stop offset="1" stop-color="#FF4E62"/>
|
||||||
|
</linearGradient>
|
||||||
|
<linearGradient id="paint2_linear_1_898" x1="300.001" y1="50.7858" x2="1.63474" y2="221.244" gradientUnits="userSpaceOnUse">
|
||||||
|
<stop stop-color="#FF5CAA"/>
|
||||||
|
<stop offset="1" stop-color="#FF4E62"/>
|
||||||
</linearGradient>
|
</linearGradient>
|
||||||
</defs>
|
</defs>
|
||||||
</svg>
|
</svg>
|
||||||
|
Before Width: | Height: | Size: 2.0 KiB After Width: | Height: | Size: 1.3 KiB |
@ -74,9 +74,9 @@ get_latest() {
|
|||||||
temp_file='temp_file' # temp_file needed because the grep would start before the download is over
|
temp_file='temp_file' # temp_file needed because the grep would start before the download is over
|
||||||
|
|
||||||
if [ -z "$GITHUB_PAT" ]; then
|
if [ -z "$GITHUB_PAT" ]; then
|
||||||
curl -s 'https://api.github.com/repos/meilisearch/MeiliSearch/releases' > "$temp_file" || return 1
|
curl -s 'https://api.github.com/repos/meilisearch/meilisearch/releases' > "$temp_file" || return 1
|
||||||
else
|
else
|
||||||
curl -H "Authorization: token $GITHUB_PAT" -s 'https://api.github.com/repos/meilisearch/MeiliSearch/releases' > "$temp_file" || return 1
|
curl -H "Authorization: token $GITHUB_PAT" -s 'https://api.github.com/repos/meilisearch/meilisearch/releases' > "$temp_file" || return 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
releases=$(cat "$temp_file" | \
|
releases=$(cat "$temp_file" | \
|
||||||
@ -161,7 +161,7 @@ get_archi() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
success_usage() {
|
success_usage() {
|
||||||
printf "$GREEN%s\n$DEFAULT" "MeiliSearch $latest binary successfully downloaded as '$binary_name' file."
|
printf "$GREEN%s\n$DEFAULT" "Meilisearch $latest binary successfully downloaded as '$binary_name' file."
|
||||||
echo ''
|
echo ''
|
||||||
echo 'Run it:'
|
echo 'Run it:'
|
||||||
echo ' $ ./meilisearch'
|
echo ' $ ./meilisearch'
|
||||||
@ -170,7 +170,7 @@ success_usage() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
failure_usage() {
|
failure_usage() {
|
||||||
printf "$RED%s\n$DEFAULT" 'ERROR: MeiliSearch binary is not available for your OS distribution or your architecture yet.'
|
printf "$RED%s\n$DEFAULT" 'ERROR: Meilisearch binary is not available for your OS distribution or your architecture yet.'
|
||||||
echo ''
|
echo ''
|
||||||
echo 'However, you can easily compile the binary from the source files.'
|
echo 'However, you can easily compile the binary from the source files.'
|
||||||
echo 'Follow the steps at the page ("Source" tab): https://docs.meilisearch.com/learn/getting_started/installation.html'
|
echo 'Follow the steps at the page ("Source" tab): https://docs.meilisearch.com/learn/getting_started/installation.html'
|
||||||
@ -181,8 +181,8 @@ latest="$(get_latest)"
|
|||||||
|
|
||||||
if [ "$latest" = '' ]; then
|
if [ "$latest" = '' ]; then
|
||||||
echo ''
|
echo ''
|
||||||
echo 'Impossible to get the latest stable version of MeiliSearch.'
|
echo 'Impossible to get the latest stable version of Meilisearch.'
|
||||||
echo 'Please let us know about this issue: https://github.com/meilisearch/MeiliSearch/issues/new/choose'
|
echo 'Please let us know about this issue: https://github.com/meilisearch/meilisearch/issues/new/choose'
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
@ -196,7 +196,7 @@ if ! get_archi; then
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
echo "Downloading MeiliSearch binary $latest for $os, architecture $archi..."
|
echo "Downloading Meilisearch binary $latest for $os, architecture $archi..."
|
||||||
case "$os" in
|
case "$os" in
|
||||||
'windows')
|
'windows')
|
||||||
release_file="meilisearch-$os-$archi.exe"
|
release_file="meilisearch-$os-$archi.exe"
|
||||||
@ -208,7 +208,7 @@ case "$os" in
|
|||||||
binary_name='meilisearch'
|
binary_name='meilisearch'
|
||||||
|
|
||||||
esac
|
esac
|
||||||
link="https://github.com/meilisearch/MeiliSearch/releases/download/$latest/$release_file"
|
link="https://github.com/meilisearch/meilisearch/releases/download/$latest/$release_file"
|
||||||
curl -OL "$link"
|
curl -OL "$link"
|
||||||
mv "$release_file" "$binary_name"
|
mv "$release_file" "$binary_name"
|
||||||
chmod 744 "$binary_name"
|
chmod 744 "$binary_name"
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "meilisearch-auth"
|
name = "meilisearch-auth"
|
||||||
version = "0.25.0"
|
version = "0.25.0"
|
||||||
edition = "2018"
|
edition = "2021"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
enum-iterator = "0.7.0"
|
enum-iterator = "0.7.0"
|
||||||
|
@ -6,6 +6,7 @@ use std::convert::TryInto;
|
|||||||
use std::fs::create_dir_all;
|
use std::fs::create_dir_all;
|
||||||
use std::path::Path;
|
use std::path::Path;
|
||||||
use std::str;
|
use std::str;
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
use chrono::{DateTime, Utc};
|
use chrono::{DateTime, Utc};
|
||||||
use heed::types::{ByteSlice, DecodeIgnore, SerdeJson};
|
use heed::types::{ByteSlice, DecodeIgnore, SerdeJson};
|
||||||
@ -24,11 +25,19 @@ pub type KeyId = [u8; KEY_ID_LENGTH];
|
|||||||
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct HeedAuthStore {
|
pub struct HeedAuthStore {
|
||||||
env: Env,
|
env: Arc<Env>,
|
||||||
keys: Database<ByteSlice, SerdeJson<Key>>,
|
keys: Database<ByteSlice, SerdeJson<Key>>,
|
||||||
action_keyid_index_expiration: Database<KeyIdActionCodec, SerdeJson<Option<DateTime<Utc>>>>,
|
action_keyid_index_expiration: Database<KeyIdActionCodec, SerdeJson<Option<DateTime<Utc>>>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl Drop for HeedAuthStore {
|
||||||
|
fn drop(&mut self) {
|
||||||
|
if Arc::strong_count(&self.env) == 1 {
|
||||||
|
self.env.as_ref().clone().prepare_for_closing();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl HeedAuthStore {
|
impl HeedAuthStore {
|
||||||
pub fn new(path: impl AsRef<Path>) -> Result<Self> {
|
pub fn new(path: impl AsRef<Path>) -> Result<Self> {
|
||||||
let path = path.as_ref().join(AUTH_DB_PATH);
|
let path = path.as_ref().join(AUTH_DB_PATH);
|
||||||
@ -36,7 +45,7 @@ impl HeedAuthStore {
|
|||||||
let mut options = EnvOpenOptions::new();
|
let mut options = EnvOpenOptions::new();
|
||||||
options.map_size(AUTH_STORE_SIZE); // 1GB
|
options.map_size(AUTH_STORE_SIZE); // 1GB
|
||||||
options.max_dbs(2);
|
options.max_dbs(2);
|
||||||
let env = options.open(path)?;
|
let env = Arc::new(options.open(path)?);
|
||||||
let keys = env.create_database(Some(KEY_DB_NAME))?;
|
let keys = env.create_database(Some(KEY_DB_NAME))?;
|
||||||
let action_keyid_index_expiration =
|
let action_keyid_index_expiration =
|
||||||
env.create_database(Some(KEY_ID_ACTION_INDEX_EXPIRATION_DB_NAME))?;
|
env.create_database(Some(KEY_ID_ACTION_INDEX_EXPIRATION_DB_NAME))?;
|
||||||
|
@ -2,11 +2,10 @@
|
|||||||
name = "meilisearch-error"
|
name = "meilisearch-error"
|
||||||
version = "0.25.2"
|
version = "0.25.2"
|
||||||
authors = ["marin <postma.marin@protonmail.com>"]
|
authors = ["marin <postma.marin@protonmail.com>"]
|
||||||
edition = "2018"
|
edition = "2021"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
actix-http = "=3.0.0-beta.10"
|
actix-web = { version = "4.0.0-beta.21", default-features = false }
|
||||||
actix-web = "4.0.0-beta.9"
|
|
||||||
proptest = { version = "1.0.0", optional = true }
|
proptest = { version = "1.0.0", optional = true }
|
||||||
proptest-derive = { version = "0.3.0", optional = true }
|
proptest-derive = { version = "0.3.0", optional = true }
|
||||||
serde = { version = "1.0.130", features = ["derive"] }
|
serde = { version = "1.0.130", features = ["derive"] }
|
||||||
|
@ -1,7 +1,6 @@
|
|||||||
use std::fmt;
|
use std::fmt;
|
||||||
|
|
||||||
use actix_http::{body::Body, http::StatusCode};
|
use actix_web::{self as aweb, http::StatusCode, HttpResponseBuilder};
|
||||||
use actix_web::{self as aweb, HttpResponseBuilder};
|
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)]
|
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)]
|
||||||
@ -59,7 +58,7 @@ where
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl aweb::error::ResponseError for ResponseError {
|
impl aweb::error::ResponseError for ResponseError {
|
||||||
fn error_response(&self) -> aweb::HttpResponse<Body> {
|
fn error_response(&self) -> aweb::HttpResponse {
|
||||||
let json = serde_json::to_vec(self).unwrap();
|
let json = serde_json::to_vec(self).unwrap();
|
||||||
HttpResponseBuilder::new(self.status_code())
|
HttpResponseBuilder::new(self.status_code())
|
||||||
.content_type("application/json")
|
.content_type("application/json")
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
[package]
|
[package]
|
||||||
authors = ["Quentin de Quelen <quentin@dequelen.me>", "Clément Renault <clement@meilisearch.com>"]
|
authors = ["Quentin de Quelen <quentin@dequelen.me>", "Clément Renault <clement@meilisearch.com>"]
|
||||||
description = "MeiliSearch HTTP server"
|
description = "Meilisearch HTTP server"
|
||||||
edition = "2018"
|
edition = "2021"
|
||||||
license = "MIT"
|
license = "MIT"
|
||||||
name = "meilisearch-http"
|
name = "meilisearch-http"
|
||||||
version = "0.25.2"
|
version = "0.25.2"
|
||||||
@ -11,7 +11,7 @@ name = "meilisearch"
|
|||||||
path = "src/main.rs"
|
path = "src/main.rs"
|
||||||
|
|
||||||
[build-dependencies]
|
[build-dependencies]
|
||||||
actix-web-static-files = { git = "https://github.com/MarinPostma/actix-web-static-files.git", rev = "39d8006", optional = true }
|
static-files = { version = "0.2.1", optional = true }
|
||||||
anyhow = { version = "1.0.43", optional = true }
|
anyhow = { version = "1.0.43", optional = true }
|
||||||
cargo_toml = { version = "0.9", optional = true }
|
cargo_toml = { version = "0.9", optional = true }
|
||||||
hex = { version = "0.4.3", optional = true }
|
hex = { version = "0.4.3", optional = true }
|
||||||
@ -22,17 +22,15 @@ vergen = { version = "5.1.15", default-features = false, features = ["git"] }
|
|||||||
zip = { version = "0.5.13", optional = true }
|
zip = { version = "0.5.13", optional = true }
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
actix-cors = { git = "https://github.com/MarinPostma/actix-extras.git", rev = "963ac94d" }
|
actix-cors = "0.6.0-beta.8"
|
||||||
actix-web = { version = "4.0.0-beta.9", features = ["rustls"] }
|
actix-web = { version = "4.0.0-beta.21", features = ["rustls"] }
|
||||||
actix-web-static-files = { git = "https://github.com/MarinPostma/actix-web-static-files.git", rev = "39d8006", optional = true }
|
actix-web-static-files = { git = "https://github.com/robjtede/actix-web-static-files.git", rev = "ed74153", optional = true }
|
||||||
# TODO: specifying this dependency so semver doesn't bump to next beta
|
|
||||||
actix-tls = "=3.0.0-beta.5"
|
|
||||||
anyhow = { version = "1.0.43", features = ["backtrace"] }
|
anyhow = { version = "1.0.43", features = ["backtrace"] }
|
||||||
arc-swap = "1.3.2"
|
arc-swap = "1.3.2"
|
||||||
async-stream = "0.3.2"
|
async-stream = "0.3.2"
|
||||||
async-trait = "0.1.51"
|
async-trait = "0.1.51"
|
||||||
bstr = "0.2.17"
|
bstr = "0.2.17"
|
||||||
byte-unit = { version = "4.0.12", default-features = false, features = ["std"] }
|
byte-unit = { version = "4.0.12", default-features = false, features = ["std", "serde"] }
|
||||||
bytes = "1.1.0"
|
bytes = "1.1.0"
|
||||||
chrono = { version = "0.4.19", features = ["serde"] }
|
chrono = { version = "0.4.19", features = ["serde"] }
|
||||||
crossbeam-channel = "0.5.1"
|
crossbeam-channel = "0.5.1"
|
||||||
@ -60,14 +58,16 @@ platform-dirs = "0.3.0"
|
|||||||
rand = "0.8.4"
|
rand = "0.8.4"
|
||||||
rayon = "1.5.1"
|
rayon = "1.5.1"
|
||||||
regex = "1.5.4"
|
regex = "1.5.4"
|
||||||
rustls = "0.19.1"
|
rustls = "0.20.2"
|
||||||
|
rustls-pemfile = "0.2"
|
||||||
segment = { version = "0.1.2", optional = true }
|
segment = { version = "0.1.2", optional = true }
|
||||||
serde = { version = "1.0.130", features = ["derive"] }
|
serde = { version = "1.0.130", features = ["derive"] }
|
||||||
serde_json = { version = "1.0.67", features = ["preserve_order"] }
|
serde_json = { version = "1.0.67", features = ["preserve_order"] }
|
||||||
sha2 = "0.9.6"
|
sha2 = "0.9.6"
|
||||||
siphasher = "0.3.7"
|
siphasher = "0.3.7"
|
||||||
slice-group-by = "0.2.6"
|
slice-group-by = "0.2.6"
|
||||||
structopt = "0.3.25"
|
static-files = { version = "0.2.1", optional = true }
|
||||||
|
clap = { version = "3.0", features = ["derive", "env"] }
|
||||||
sysinfo = "0.20.2"
|
sysinfo = "0.20.2"
|
||||||
tar = "0.4.37"
|
tar = "0.4.37"
|
||||||
tempfile = "3.2.0"
|
tempfile = "3.2.0"
|
||||||
@ -88,6 +88,7 @@ urlencoding = "2.1.0"
|
|||||||
[features]
|
[features]
|
||||||
mini-dashboard = [
|
mini-dashboard = [
|
||||||
"actix-web-static-files",
|
"actix-web-static-files",
|
||||||
|
"static-files",
|
||||||
"anyhow",
|
"anyhow",
|
||||||
"cargo_toml",
|
"cargo_toml",
|
||||||
"hex",
|
"hex",
|
||||||
|
@ -16,11 +16,11 @@ mod mini_dashboard {
|
|||||||
use std::io::{Cursor, Read, Write};
|
use std::io::{Cursor, Read, Write};
|
||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
|
|
||||||
use actix_web_static_files::resource_dir;
|
|
||||||
use anyhow::Context;
|
use anyhow::Context;
|
||||||
use cargo_toml::Manifest;
|
use cargo_toml::Manifest;
|
||||||
use reqwest::blocking::get;
|
use reqwest::blocking::get;
|
||||||
use sha1::{Digest, Sha1};
|
use sha1::{Digest, Sha1};
|
||||||
|
use static_files::resource_dir;
|
||||||
|
|
||||||
pub fn setup_mini_dashboard() -> anyhow::Result<()> {
|
pub fn setup_mini_dashboard() -> anyhow::Result<()> {
|
||||||
let cargo_manifest_dir = PathBuf::from(env::var("CARGO_MANIFEST_DIR").unwrap());
|
let cargo_manifest_dir = PathBuf::from(env::var("CARGO_MANIFEST_DIR").unwrap());
|
||||||
|
@ -29,12 +29,12 @@ pub type SegmentAnalytics = segment_analytics::SegmentAnalytics;
|
|||||||
#[cfg(all(not(debug_assertions), feature = "analytics"))]
|
#[cfg(all(not(debug_assertions), feature = "analytics"))]
|
||||||
pub type SearchAggregator = segment_analytics::SearchAggregator;
|
pub type SearchAggregator = segment_analytics::SearchAggregator;
|
||||||
|
|
||||||
/// The MeiliSearch config dir:
|
/// The Meilisearch config dir:
|
||||||
/// `~/.config/MeiliSearch` on *NIX or *BSD.
|
/// `~/.config/Meilisearch` on *NIX or *BSD.
|
||||||
/// `~/Library/ApplicationSupport` on macOS.
|
/// `~/Library/ApplicationSupport` on macOS.
|
||||||
/// `%APPDATA` (= `C:\Users%USERNAME%\AppData\Roaming`) on windows.
|
/// `%APPDATA` (= `C:\Users%USERNAME%\AppData\Roaming`) on windows.
|
||||||
static MEILISEARCH_CONFIG_PATH: Lazy<Option<PathBuf>> =
|
static MEILISEARCH_CONFIG_PATH: Lazy<Option<PathBuf>> =
|
||||||
Lazy::new(|| AppDirs::new(Some("MeiliSearch"), false).map(|appdir| appdir.config_dir));
|
Lazy::new(|| AppDirs::new(Some("Meilisearch"), false).map(|appdir| appdir.config_dir));
|
||||||
|
|
||||||
fn config_user_id_path(db_path: &Path) -> Option<PathBuf> {
|
fn config_user_id_path(db_path: &Path) -> Option<PathBuf> {
|
||||||
db_path
|
db_path
|
||||||
@ -50,7 +50,7 @@ fn config_user_id_path(db_path: &Path) -> Option<PathBuf> {
|
|||||||
.map(|(filename, config_path)| config_path.join(filename.trim_start_matches('-')))
|
.map(|(filename, config_path)| config_path.join(filename.trim_start_matches('-')))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Look for the instance-uid in the `data.ms` or in `~/.config/MeiliSearch/path-to-db-instance-uid`
|
/// Look for the instance-uid in the `data.ms` or in `~/.config/Meilisearch/path-to-db-instance-uid`
|
||||||
fn find_user_id(db_path: &Path) -> Option<String> {
|
fn find_user_id(db_path: &Path) -> Option<String> {
|
||||||
fs::read_to_string(db_path.join("instance-uid"))
|
fs::read_to_string(db_path.join("instance-uid"))
|
||||||
.ok()
|
.ok()
|
||||||
|
@ -1,11 +1,12 @@
|
|||||||
use std::collections::{BinaryHeap, HashMap, HashSet};
|
use std::collections::{BinaryHeap, HashMap, HashSet};
|
||||||
use std::fs;
|
use std::fs;
|
||||||
use std::path::Path;
|
use std::path::{Path, PathBuf};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::time::{Duration, Instant};
|
use std::time::{Duration, Instant};
|
||||||
|
|
||||||
use actix_web::http::header::USER_AGENT;
|
use actix_web::http::header::USER_AGENT;
|
||||||
use actix_web::HttpRequest;
|
use actix_web::HttpRequest;
|
||||||
|
use chrono::{DateTime, Utc};
|
||||||
use http::header::CONTENT_TYPE;
|
use http::header::CONTENT_TYPE;
|
||||||
use meilisearch_lib::index::{SearchQuery, SearchResult};
|
use meilisearch_lib::index::{SearchQuery, SearchResult};
|
||||||
use meilisearch_lib::index_controller::Stats;
|
use meilisearch_lib::index_controller::Stats;
|
||||||
@ -77,7 +78,7 @@ impl SegmentAnalytics {
|
|||||||
let user = User::UserId { user_id };
|
let user = User::UserId { user_id };
|
||||||
let mut batcher = AutoBatcher::new(client, Batcher::new(None), SEGMENT_API_KEY.to_string());
|
let mut batcher = AutoBatcher::new(client, Batcher::new(None), SEGMENT_API_KEY.to_string());
|
||||||
|
|
||||||
// If MeiliSearch is Launched for the first time:
|
// If Meilisearch is Launched for the first time:
|
||||||
// 1. Send an event Launched associated to the user `total_launch`.
|
// 1. Send an event Launched associated to the user `total_launch`.
|
||||||
// 2. Batch an event Launched with the real instance-id and send it in one hour.
|
// 2. Batch an event Launched with the real instance-id and send it in one hour.
|
||||||
if first_time_run {
|
if first_time_run {
|
||||||
@ -210,10 +211,30 @@ impl Segment {
|
|||||||
"server_provider": std::env::var("MEILI_SERVER_PROVIDER").ok(),
|
"server_provider": std::env::var("MEILI_SERVER_PROVIDER").ok(),
|
||||||
})
|
})
|
||||||
});
|
});
|
||||||
let infos = json!({
|
// The infos are all cli option except every option containing sensitive information.
|
||||||
"env": opt.env.clone(),
|
// We consider an information as sensible if it contains a path, an address or a key.
|
||||||
"has_snapshot": opt.schedule_snapshot,
|
let infos = {
|
||||||
});
|
// First we see if any sensitive fields were used.
|
||||||
|
let db_path = opt.db_path != PathBuf::from("./data.ms");
|
||||||
|
let import_dump = opt.import_dump.is_some();
|
||||||
|
let dumps_dir = opt.dumps_dir != PathBuf::from("dumps/");
|
||||||
|
let import_snapshot = opt.import_snapshot.is_some();
|
||||||
|
let snapshots_dir = opt.snapshot_dir != PathBuf::from("snapshots/");
|
||||||
|
let http_addr = opt.http_addr != "127.0.0.1:7700";
|
||||||
|
|
||||||
|
let mut infos = serde_json::to_value(opt).unwrap();
|
||||||
|
|
||||||
|
// Then we overwrite all sensitive field with a boolean representing if
|
||||||
|
// the feature was used or not.
|
||||||
|
infos["db_path"] = json!(db_path);
|
||||||
|
infos["import_dump"] = json!(import_dump);
|
||||||
|
infos["dumps_dir"] = json!(dumps_dir);
|
||||||
|
infos["import_snapshot"] = json!(import_snapshot);
|
||||||
|
infos["snapshot_dir"] = json!(snapshots_dir);
|
||||||
|
infos["http_addr"] = json!(http_addr);
|
||||||
|
|
||||||
|
infos
|
||||||
|
};
|
||||||
|
|
||||||
let number_of_documents = stats
|
let number_of_documents = stats
|
||||||
.indexes
|
.indexes
|
||||||
@ -301,6 +322,8 @@ impl Segment {
|
|||||||
|
|
||||||
#[derive(Default)]
|
#[derive(Default)]
|
||||||
pub struct SearchAggregator {
|
pub struct SearchAggregator {
|
||||||
|
timestamp: Option<DateTime<Utc>>,
|
||||||
|
|
||||||
// context
|
// context
|
||||||
user_agents: HashSet<String>,
|
user_agents: HashSet<String>,
|
||||||
|
|
||||||
@ -336,6 +359,8 @@ pub struct SearchAggregator {
|
|||||||
impl SearchAggregator {
|
impl SearchAggregator {
|
||||||
pub fn from_query(query: &SearchQuery, request: &HttpRequest) -> Self {
|
pub fn from_query(query: &SearchQuery, request: &HttpRequest) -> Self {
|
||||||
let mut ret = Self::default();
|
let mut ret = Self::default();
|
||||||
|
ret.timestamp = Some(chrono::offset::Utc::now());
|
||||||
|
|
||||||
ret.total_received = 1;
|
ret.total_received = 1;
|
||||||
ret.user_agents = extract_user_agents(request).into_iter().collect();
|
ret.user_agents = extract_user_agents(request).into_iter().collect();
|
||||||
|
|
||||||
@ -389,6 +414,10 @@ impl SearchAggregator {
|
|||||||
|
|
||||||
/// Aggregate one [SearchAggregator] into another.
|
/// Aggregate one [SearchAggregator] into another.
|
||||||
pub fn aggregate(&mut self, mut other: Self) {
|
pub fn aggregate(&mut self, mut other: Self) {
|
||||||
|
if self.timestamp.is_none() {
|
||||||
|
self.timestamp = other.timestamp;
|
||||||
|
}
|
||||||
|
|
||||||
// context
|
// context
|
||||||
for user_agent in other.user_agents.into_iter() {
|
for user_agent in other.user_agents.into_iter() {
|
||||||
self.user_agents.insert(user_agent);
|
self.user_agents.insert(user_agent);
|
||||||
@ -462,6 +491,7 @@ impl SearchAggregator {
|
|||||||
});
|
});
|
||||||
|
|
||||||
Some(Track {
|
Some(Track {
|
||||||
|
timestamp: self.timestamp,
|
||||||
user: user.clone(),
|
user: user.clone(),
|
||||||
event: event_name.to_string(),
|
event: event_name.to_string(),
|
||||||
properties,
|
properties,
|
||||||
@ -473,6 +503,8 @@ impl SearchAggregator {
|
|||||||
|
|
||||||
#[derive(Default)]
|
#[derive(Default)]
|
||||||
pub struct DocumentsAggregator {
|
pub struct DocumentsAggregator {
|
||||||
|
timestamp: Option<DateTime<Utc>>,
|
||||||
|
|
||||||
// set to true when at least one request was received
|
// set to true when at least one request was received
|
||||||
updated: bool,
|
updated: bool,
|
||||||
|
|
||||||
@ -491,6 +523,7 @@ impl DocumentsAggregator {
|
|||||||
request: &HttpRequest,
|
request: &HttpRequest,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
let mut ret = Self::default();
|
let mut ret = Self::default();
|
||||||
|
ret.timestamp = Some(chrono::offset::Utc::now());
|
||||||
|
|
||||||
ret.updated = true;
|
ret.updated = true;
|
||||||
ret.user_agents = extract_user_agents(request).into_iter().collect();
|
ret.user_agents = extract_user_agents(request).into_iter().collect();
|
||||||
@ -511,6 +544,10 @@ impl DocumentsAggregator {
|
|||||||
|
|
||||||
/// Aggregate one [DocumentsAggregator] into another.
|
/// Aggregate one [DocumentsAggregator] into another.
|
||||||
pub fn aggregate(&mut self, other: Self) {
|
pub fn aggregate(&mut self, other: Self) {
|
||||||
|
if self.timestamp.is_none() {
|
||||||
|
self.timestamp = other.timestamp;
|
||||||
|
}
|
||||||
|
|
||||||
self.updated |= other.updated;
|
self.updated |= other.updated;
|
||||||
// we can't create a union because there is no `into_union` method
|
// we can't create a union because there is no `into_union` method
|
||||||
for user_agent in other.user_agents.into_iter() {
|
for user_agent in other.user_agents.into_iter() {
|
||||||
@ -537,6 +574,7 @@ impl DocumentsAggregator {
|
|||||||
});
|
});
|
||||||
|
|
||||||
Some(Track {
|
Some(Track {
|
||||||
|
timestamp: self.timestamp,
|
||||||
user: user.clone(),
|
user: user.clone(),
|
||||||
event: event_name.to_string(),
|
event: event_name.to_string(),
|
||||||
properties,
|
properties,
|
||||||
|
@ -32,8 +32,6 @@ impl<T, D> Deref for GuardedData<T, D> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl<P: Policy + 'static, D: 'static + Clone> FromRequest for GuardedData<P, D> {
|
impl<P: Policy + 'static, D: 'static + Clone> FromRequest for GuardedData<P, D> {
|
||||||
type Config = ();
|
|
||||||
|
|
||||||
type Error = ResponseError;
|
type Error = ResponseError;
|
||||||
|
|
||||||
type Future = Ready<Result<Self, Self::Error>>;
|
type Future = Ready<Result<Self, Self::Error>>;
|
||||||
|
@ -28,8 +28,6 @@ impl Default for PayloadConfig {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl FromRequest for Payload {
|
impl FromRequest for Payload {
|
||||||
type Config = PayloadConfig;
|
|
||||||
|
|
||||||
type Error = PayloadError;
|
type Error = PayloadError;
|
||||||
|
|
||||||
type Future = Ready<Result<Payload, Self::Error>>;
|
type Future = Ready<Result<Payload, Self::Error>>;
|
||||||
@ -39,7 +37,7 @@ impl FromRequest for Payload {
|
|||||||
let limit = req
|
let limit = req
|
||||||
.app_data::<PayloadConfig>()
|
.app_data::<PayloadConfig>()
|
||||||
.map(|c| c.limit)
|
.map(|c| c.limit)
|
||||||
.unwrap_or(Self::Config::default().limit);
|
.unwrap_or(PayloadConfig::default().limit);
|
||||||
ready(Ok(Payload {
|
ready(Ok(Payload {
|
||||||
payload: payload.take(),
|
payload: payload.take(),
|
||||||
limit,
|
limit,
|
||||||
|
@ -30,11 +30,15 @@ pub fn setup_meilisearch(opt: &Opt) -> anyhow::Result<MeiliSearch> {
|
|||||||
meilisearch
|
meilisearch
|
||||||
.set_max_index_size(opt.max_index_size.get_bytes() as usize)
|
.set_max_index_size(opt.max_index_size.get_bytes() as usize)
|
||||||
.set_max_task_store_size(opt.max_task_db_size.get_bytes() as usize)
|
.set_max_task_store_size(opt.max_task_db_size.get_bytes() as usize)
|
||||||
|
// snapshot
|
||||||
.set_ignore_missing_snapshot(opt.ignore_missing_snapshot)
|
.set_ignore_missing_snapshot(opt.ignore_missing_snapshot)
|
||||||
.set_ignore_snapshot_if_db_exists(opt.ignore_snapshot_if_db_exists)
|
.set_ignore_snapshot_if_db_exists(opt.ignore_snapshot_if_db_exists)
|
||||||
.set_dump_dst(opt.dumps_dir.clone())
|
|
||||||
.set_snapshot_interval(Duration::from_secs(opt.snapshot_interval_sec))
|
.set_snapshot_interval(Duration::from_secs(opt.snapshot_interval_sec))
|
||||||
.set_snapshot_dir(opt.snapshot_dir.clone());
|
.set_snapshot_dir(opt.snapshot_dir.clone())
|
||||||
|
// dump
|
||||||
|
.set_ignore_missing_dump(opt.ignore_missing_dump)
|
||||||
|
.set_ignore_dump_if_db_exists(opt.ignore_dump_if_db_exists)
|
||||||
|
.set_dump_dst(opt.dumps_dir.clone());
|
||||||
|
|
||||||
if let Some(ref path) = opt.import_snapshot {
|
if let Some(ref path) = opt.import_snapshot {
|
||||||
meilisearch.set_import_snapshot(path.clone());
|
meilisearch.set_import_snapshot(path.clone());
|
||||||
@ -90,7 +94,7 @@ pub fn configure_data(
|
|||||||
#[cfg(feature = "mini-dashboard")]
|
#[cfg(feature = "mini-dashboard")]
|
||||||
pub fn dashboard(config: &mut web::ServiceConfig, enable_frontend: bool) {
|
pub fn dashboard(config: &mut web::ServiceConfig, enable_frontend: bool) {
|
||||||
use actix_web::HttpResponse;
|
use actix_web::HttpResponse;
|
||||||
use actix_web_static_files::Resource;
|
use static_files::Resource;
|
||||||
|
|
||||||
mod generated {
|
mod generated {
|
||||||
include!(concat!(env!("OUT_DIR"), "/generated.rs"));
|
include!(concat!(env!("OUT_DIR"), "/generated.rs"));
|
||||||
|
@ -2,12 +2,12 @@ use std::env;
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use actix_web::HttpServer;
|
use actix_web::HttpServer;
|
||||||
|
use clap::Parser;
|
||||||
use meilisearch_auth::AuthController;
|
use meilisearch_auth::AuthController;
|
||||||
use meilisearch_http::analytics;
|
use meilisearch_http::analytics;
|
||||||
use meilisearch_http::analytics::Analytics;
|
use meilisearch_http::analytics::Analytics;
|
||||||
use meilisearch_http::{create_app, setup_meilisearch, Opt};
|
use meilisearch_http::{create_app, setup_meilisearch, Opt};
|
||||||
use meilisearch_lib::MeiliSearch;
|
use meilisearch_lib::MeiliSearch;
|
||||||
use structopt::StructOpt;
|
|
||||||
|
|
||||||
#[cfg(target_os = "linux")]
|
#[cfg(target_os = "linux")]
|
||||||
#[global_allocator]
|
#[global_allocator]
|
||||||
@ -29,7 +29,7 @@ fn setup(opt: &Opt) -> anyhow::Result<()> {
|
|||||||
|
|
||||||
#[actix_web::main]
|
#[actix_web::main]
|
||||||
async fn main() -> anyhow::Result<()> {
|
async fn main() -> anyhow::Result<()> {
|
||||||
let opt = Opt::from_args();
|
let opt = Opt::parse();
|
||||||
|
|
||||||
setup(&opt)?;
|
setup(&opt)?;
|
||||||
|
|
||||||
@ -50,7 +50,7 @@ async fn main() -> anyhow::Result<()> {
|
|||||||
let auth_controller = AuthController::new(&opt.db_path, &opt.master_key)?;
|
let auth_controller = AuthController::new(&opt.db_path, &opt.master_key)?;
|
||||||
|
|
||||||
#[cfg(all(not(debug_assertions), feature = "analytics"))]
|
#[cfg(all(not(debug_assertions), feature = "analytics"))]
|
||||||
let (analytics, user) = if opt.analytics() {
|
let (analytics, user) = if !opt.no_analytics {
|
||||||
analytics::SegmentAnalytics::new(&opt, &meilisearch).await
|
analytics::SegmentAnalytics::new(&opt, &meilisearch).await
|
||||||
} else {
|
} else {
|
||||||
analytics::MockAnalytics::new(&opt)
|
analytics::MockAnalytics::new(&opt)
|
||||||
@ -101,14 +101,14 @@ pub fn print_launch_resume(opt: &Opt, user: &str) {
|
|||||||
let commit_date = option_env!("VERGEN_GIT_COMMIT_TIMESTAMP").unwrap_or("unknown");
|
let commit_date = option_env!("VERGEN_GIT_COMMIT_TIMESTAMP").unwrap_or("unknown");
|
||||||
|
|
||||||
let ascii_name = r#"
|
let ascii_name = r#"
|
||||||
888b d888 d8b 888 d8b .d8888b. 888
|
888b d888 d8b 888 d8b 888
|
||||||
8888b d8888 Y8P 888 Y8P d88P Y88b 888
|
8888b d8888 Y8P 888 Y8P 888
|
||||||
88888b.d88888 888 Y88b. 888
|
88888b.d88888 888 888
|
||||||
888Y88888P888 .d88b. 888 888 888 "Y888b. .d88b. 8888b. 888d888 .d8888b 88888b.
|
888Y88888P888 .d88b. 888 888 888 .d8888b .d88b. 8888b. 888d888 .d8888b 88888b.
|
||||||
888 Y888P 888 d8P Y8b 888 888 888 "Y88b. d8P Y8b "88b 888P" d88P" 888 "88b
|
888 Y888P 888 d8P Y8b 888 888 888 88K d8P Y8b "88b 888P" d88P" 888 "88b
|
||||||
888 Y8P 888 88888888 888 888 888 "888 88888888 .d888888 888 888 888 888
|
888 Y8P 888 88888888 888 888 888 "Y8888b. 88888888 .d888888 888 888 888 888
|
||||||
888 " 888 Y8b. 888 888 888 Y88b d88P Y8b. 888 888 888 Y88b. 888 888
|
888 " 888 Y8b. 888 888 888 X88 Y8b. 888 888 888 Y88b. 888 888
|
||||||
888 888 "Y8888 888 888 888 "Y8888P" "Y8888 "Y888888 888 "Y8888P 888 888
|
888 888 "Y8888 888 888 888 88888P' "Y8888 "Y888888 888 "Y8888P 888 888
|
||||||
"#;
|
"#;
|
||||||
|
|
||||||
eprintln!("{}", ascii_name);
|
eprintln!("{}", ascii_name);
|
||||||
@ -125,10 +125,10 @@ pub fn print_launch_resume(opt: &Opt, user: &str) {
|
|||||||
|
|
||||||
#[cfg(all(not(debug_assertions), feature = "analytics"))]
|
#[cfg(all(not(debug_assertions), feature = "analytics"))]
|
||||||
{
|
{
|
||||||
if opt.analytics() {
|
if !opt.no_analytics {
|
||||||
eprintln!(
|
eprintln!(
|
||||||
"
|
"
|
||||||
Thank you for using MeiliSearch!
|
Thank you for using Meilisearch!
|
||||||
|
|
||||||
We collect anonymized analytics to improve our product and your experience. To learn more, including how to turn off analytics, visit our dedicated documentation page: https://docs.meilisearch.com/learn/what_is_meilisearch/telemetry.html
|
We collect anonymized analytics to improve our product and your experience. To learn more, including how to turn off analytics, visit our dedicated documentation page: https://docs.meilisearch.com/learn/what_is_meilisearch/telemetry.html
|
||||||
|
|
||||||
@ -146,7 +146,7 @@ Anonymous telemetry:\t\"Enabled\""
|
|||||||
eprintln!();
|
eprintln!();
|
||||||
|
|
||||||
if opt.master_key.is_some() {
|
if opt.master_key.is_some() {
|
||||||
eprintln!("A Master Key has been set. Requests to MeiliSearch won't be authorized unless you provide an authentication key.");
|
eprintln!("A Master Key has been set. Requests to Meilisearch won't be authorized unless you provide an authentication key.");
|
||||||
} else {
|
} else {
|
||||||
eprintln!("No master key found; The server will accept unidentified requests. \
|
eprintln!("No master key found; The server will accept unidentified requests. \
|
||||||
If you need some protection in development mode, please export a key: export MEILI_MASTER_KEY=xxx");
|
If you need some protection in development mode, please export a key: export MEILI_MASTER_KEY=xxx");
|
||||||
|
@ -4,144 +4,157 @@ use std::path::PathBuf;
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use byte_unit::Byte;
|
use byte_unit::Byte;
|
||||||
|
use clap::Parser;
|
||||||
use meilisearch_lib::options::IndexerOpts;
|
use meilisearch_lib::options::IndexerOpts;
|
||||||
use rustls::internal::pemfile::{certs, pkcs8_private_keys, rsa_private_keys};
|
|
||||||
use rustls::{
|
use rustls::{
|
||||||
AllowAnyAnonymousOrAuthenticatedClient, AllowAnyAuthenticatedClient, NoClientAuth,
|
server::{
|
||||||
|
AllowAnyAnonymousOrAuthenticatedClient, AllowAnyAuthenticatedClient,
|
||||||
|
ServerSessionMemoryCache,
|
||||||
|
},
|
||||||
RootCertStore,
|
RootCertStore,
|
||||||
};
|
};
|
||||||
use structopt::StructOpt;
|
use rustls_pemfile::{certs, pkcs8_private_keys, rsa_private_keys};
|
||||||
|
use serde::Serialize;
|
||||||
|
|
||||||
const POSSIBLE_ENV: [&str; 2] = ["development", "production"];
|
const POSSIBLE_ENV: [&str; 2] = ["development", "production"];
|
||||||
|
|
||||||
#[derive(Debug, Clone, StructOpt)]
|
#[derive(Debug, Clone, Parser, Serialize)]
|
||||||
pub struct Opt {
|
pub struct Opt {
|
||||||
/// The destination where the database must be created.
|
/// The destination where the database must be created.
|
||||||
#[structopt(long, env = "MEILI_DB_PATH", default_value = "./data.ms")]
|
#[clap(long, env = "MEILI_DB_PATH", default_value = "./data.ms")]
|
||||||
pub db_path: PathBuf,
|
pub db_path: PathBuf,
|
||||||
|
|
||||||
/// The address on which the http server will listen.
|
/// The address on which the http server will listen.
|
||||||
#[structopt(long, env = "MEILI_HTTP_ADDR", default_value = "127.0.0.1:7700")]
|
#[clap(long, env = "MEILI_HTTP_ADDR", default_value = "127.0.0.1:7700")]
|
||||||
pub http_addr: String,
|
pub http_addr: String,
|
||||||
|
|
||||||
/// The master key allowing you to do everything on the server.
|
/// The master key allowing you to do everything on the server.
|
||||||
#[structopt(long, env = "MEILI_MASTER_KEY")]
|
#[serde(skip)]
|
||||||
|
#[clap(long, env = "MEILI_MASTER_KEY")]
|
||||||
pub master_key: Option<String>,
|
pub master_key: Option<String>,
|
||||||
|
|
||||||
/// This environment variable must be set to `production` if you are running in production.
|
/// This environment variable must be set to `production` if you are running in production.
|
||||||
/// If the server is running in development mode more logs will be displayed,
|
/// If the server is running in development mode more logs will be displayed,
|
||||||
/// and the master key can be avoided which implies that there is no security on the updates routes.
|
/// and the master key can be avoided which implies that there is no security on the updates routes.
|
||||||
/// This is useful to debug when integrating the engine with another service.
|
/// This is useful to debug when integrating the engine with another service.
|
||||||
#[structopt(long, env = "MEILI_ENV", default_value = "development", possible_values = &POSSIBLE_ENV)]
|
#[clap(long, env = "MEILI_ENV", default_value = "development", possible_values = &POSSIBLE_ENV)]
|
||||||
pub env: String,
|
pub env: String,
|
||||||
|
|
||||||
/// Do not send analytics to Meili.
|
/// Do not send analytics to Meili.
|
||||||
#[cfg(all(not(debug_assertions), feature = "analytics"))]
|
#[cfg(all(not(debug_assertions), feature = "analytics"))]
|
||||||
#[structopt(long, env = "MEILI_NO_ANALYTICS")]
|
#[clap(long, env = "MEILI_NO_ANALYTICS")]
|
||||||
pub no_analytics: Option<Option<bool>>,
|
pub no_analytics: bool,
|
||||||
|
|
||||||
/// The maximum size, in bytes, of the main lmdb database directory
|
/// The maximum size, in bytes, of the main lmdb database directory
|
||||||
#[structopt(long, env = "MEILI_MAX_INDEX_SIZE", default_value = "100 GiB")]
|
#[clap(long, env = "MEILI_MAX_INDEX_SIZE", default_value = "100 GiB")]
|
||||||
pub max_index_size: Byte,
|
pub max_index_size: Byte,
|
||||||
|
|
||||||
/// The maximum size, in bytes, of the update lmdb database directory
|
/// The maximum size, in bytes, of the update lmdb database directory
|
||||||
#[structopt(long, env = "MEILI_MAX_TASK_DB_SIZE", default_value = "100 GiB")]
|
#[clap(long, env = "MEILI_MAX_TASK_DB_SIZE", default_value = "100 GiB")]
|
||||||
pub max_task_db_size: Byte,
|
pub max_task_db_size: Byte,
|
||||||
|
|
||||||
/// The maximum size, in bytes, of accepted JSON payloads
|
/// The maximum size, in bytes, of accepted JSON payloads
|
||||||
#[structopt(long, env = "MEILI_HTTP_PAYLOAD_SIZE_LIMIT", default_value = "100 MB")]
|
#[clap(long, env = "MEILI_HTTP_PAYLOAD_SIZE_LIMIT", default_value = "100 MB")]
|
||||||
pub http_payload_size_limit: Byte,
|
pub http_payload_size_limit: Byte,
|
||||||
|
|
||||||
/// Read server certificates from CERTFILE.
|
/// Read server certificates from CERTFILE.
|
||||||
/// This should contain PEM-format certificates
|
/// This should contain PEM-format certificates
|
||||||
/// in the right order (the first certificate should
|
/// in the right order (the first certificate should
|
||||||
/// certify KEYFILE, the last should be a root CA).
|
/// certify KEYFILE, the last should be a root CA).
|
||||||
#[structopt(long, env = "MEILI_SSL_CERT_PATH", parse(from_os_str))]
|
#[serde(skip)]
|
||||||
|
#[clap(long, env = "MEILI_SSL_CERT_PATH", parse(from_os_str))]
|
||||||
pub ssl_cert_path: Option<PathBuf>,
|
pub ssl_cert_path: Option<PathBuf>,
|
||||||
|
|
||||||
/// Read private key from KEYFILE. This should be a RSA
|
/// Read private key from KEYFILE. This should be a RSA
|
||||||
/// private key or PKCS8-encoded private key, in PEM format.
|
/// private key or PKCS8-encoded private key, in PEM format.
|
||||||
#[structopt(long, env = "MEILI_SSL_KEY_PATH", parse(from_os_str))]
|
#[serde(skip)]
|
||||||
|
#[clap(long, env = "MEILI_SSL_KEY_PATH", parse(from_os_str))]
|
||||||
pub ssl_key_path: Option<PathBuf>,
|
pub ssl_key_path: Option<PathBuf>,
|
||||||
|
|
||||||
/// Enable client authentication, and accept certificates
|
/// Enable client authentication, and accept certificates
|
||||||
/// signed by those roots provided in CERTFILE.
|
/// signed by those roots provided in CERTFILE.
|
||||||
#[structopt(long, env = "MEILI_SSL_AUTH_PATH", parse(from_os_str))]
|
#[clap(long, env = "MEILI_SSL_AUTH_PATH", parse(from_os_str))]
|
||||||
|
#[serde(skip)]
|
||||||
pub ssl_auth_path: Option<PathBuf>,
|
pub ssl_auth_path: Option<PathBuf>,
|
||||||
|
|
||||||
/// Read DER-encoded OCSP response from OCSPFILE and staple to certificate.
|
/// Read DER-encoded OCSP response from OCSPFILE and staple to certificate.
|
||||||
/// Optional
|
/// Optional
|
||||||
#[structopt(long, env = "MEILI_SSL_OCSP_PATH", parse(from_os_str))]
|
#[serde(skip)]
|
||||||
|
#[clap(long, env = "MEILI_SSL_OCSP_PATH", parse(from_os_str))]
|
||||||
pub ssl_ocsp_path: Option<PathBuf>,
|
pub ssl_ocsp_path: Option<PathBuf>,
|
||||||
|
|
||||||
/// Send a fatal alert if the client does not complete client authentication.
|
/// Send a fatal alert if the client does not complete client authentication.
|
||||||
#[structopt(long, env = "MEILI_SSL_REQUIRE_AUTH")]
|
#[serde(skip)]
|
||||||
|
#[clap(long, env = "MEILI_SSL_REQUIRE_AUTH")]
|
||||||
pub ssl_require_auth: bool,
|
pub ssl_require_auth: bool,
|
||||||
|
|
||||||
/// SSL support session resumption
|
/// SSL support session resumption
|
||||||
#[structopt(long, env = "MEILI_SSL_RESUMPTION")]
|
#[serde(skip)]
|
||||||
|
#[clap(long, env = "MEILI_SSL_RESUMPTION")]
|
||||||
pub ssl_resumption: bool,
|
pub ssl_resumption: bool,
|
||||||
|
|
||||||
/// SSL support tickets.
|
/// SSL support tickets.
|
||||||
#[structopt(long, env = "MEILI_SSL_TICKETS")]
|
#[serde(skip)]
|
||||||
|
#[clap(long, env = "MEILI_SSL_TICKETS")]
|
||||||
pub ssl_tickets: bool,
|
pub ssl_tickets: bool,
|
||||||
|
|
||||||
/// Defines the path of the snapshot file to import.
|
/// Defines the path of the snapshot file to import.
|
||||||
/// This option will, by default, stop the process if a database already exist or if no snapshot exists at
|
/// This option will, by default, stop the process if a database already exist or if no snapshot exists at
|
||||||
/// the given path. If this option is not specified no snapshot is imported.
|
/// the given path. If this option is not specified no snapshot is imported.
|
||||||
#[structopt(long)]
|
#[clap(long)]
|
||||||
pub import_snapshot: Option<PathBuf>,
|
pub import_snapshot: Option<PathBuf>,
|
||||||
|
|
||||||
/// The engine will ignore a missing snapshot and not return an error in such case.
|
/// The engine will ignore a missing snapshot and not return an error in such case.
|
||||||
#[structopt(long, requires = "import-snapshot")]
|
#[clap(long, requires = "import-snapshot")]
|
||||||
pub ignore_missing_snapshot: bool,
|
pub ignore_missing_snapshot: bool,
|
||||||
|
|
||||||
/// The engine will skip snapshot importation and not return an error in such case.
|
/// The engine will skip snapshot importation and not return an error in such case.
|
||||||
#[structopt(long, requires = "import-snapshot")]
|
#[clap(long, requires = "import-snapshot")]
|
||||||
pub ignore_snapshot_if_db_exists: bool,
|
pub ignore_snapshot_if_db_exists: bool,
|
||||||
|
|
||||||
/// Defines the directory path where meilisearch will create snapshot each snapshot_time_gap.
|
/// Defines the directory path where meilisearch will create snapshot each snapshot_time_gap.
|
||||||
#[structopt(long, env = "MEILI_SNAPSHOT_DIR", default_value = "snapshots/")]
|
#[clap(long, env = "MEILI_SNAPSHOT_DIR", default_value = "snapshots/")]
|
||||||
pub snapshot_dir: PathBuf,
|
pub snapshot_dir: PathBuf,
|
||||||
|
|
||||||
/// Activate snapshot scheduling.
|
/// Activate snapshot scheduling.
|
||||||
#[structopt(long, env = "MEILI_SCHEDULE_SNAPSHOT")]
|
#[clap(long, env = "MEILI_SCHEDULE_SNAPSHOT")]
|
||||||
pub schedule_snapshot: bool,
|
pub schedule_snapshot: bool,
|
||||||
|
|
||||||
/// Defines time interval, in seconds, between each snapshot creation.
|
/// Defines time interval, in seconds, between each snapshot creation.
|
||||||
#[structopt(long, env = "MEILI_SNAPSHOT_INTERVAL_SEC", default_value = "86400")] // 24h
|
#[clap(long, env = "MEILI_SNAPSHOT_INTERVAL_SEC", default_value = "86400")] // 24h
|
||||||
pub snapshot_interval_sec: u64,
|
pub snapshot_interval_sec: u64,
|
||||||
|
|
||||||
/// Folder where dumps are created when the dump route is called.
|
|
||||||
#[structopt(long, env = "MEILI_DUMPS_DIR", default_value = "dumps/")]
|
|
||||||
pub dumps_dir: PathBuf,
|
|
||||||
|
|
||||||
/// Import a dump from the specified path, must be a `.dump` file.
|
/// Import a dump from the specified path, must be a `.dump` file.
|
||||||
#[structopt(long, conflicts_with = "import-snapshot")]
|
#[clap(long, conflicts_with = "import-snapshot")]
|
||||||
pub import_dump: Option<PathBuf>,
|
pub import_dump: Option<PathBuf>,
|
||||||
|
|
||||||
|
/// If the dump doesn't exists, load or create the database specified by `db-path` instead.
|
||||||
|
#[clap(long, requires = "import-dump")]
|
||||||
|
pub ignore_missing_dump: bool,
|
||||||
|
|
||||||
|
/// Ignore the dump if a database already exists, and load that database instead.
|
||||||
|
#[clap(long, requires = "import-dump")]
|
||||||
|
pub ignore_dump_if_db_exists: bool,
|
||||||
|
|
||||||
|
/// Folder where dumps are created when the dump route is called.
|
||||||
|
#[clap(long, env = "MEILI_DUMPS_DIR", default_value = "dumps/")]
|
||||||
|
pub dumps_dir: PathBuf,
|
||||||
|
|
||||||
/// Set the log level
|
/// Set the log level
|
||||||
#[structopt(long, env = "MEILI_LOG_LEVEL", default_value = "info")]
|
#[clap(long, env = "MEILI_LOG_LEVEL", default_value = "info")]
|
||||||
pub log_level: String,
|
pub log_level: String,
|
||||||
|
|
||||||
#[structopt(skip)]
|
#[serde(skip)]
|
||||||
|
#[clap(skip)]
|
||||||
pub indexer_options: IndexerOpts,
|
pub indexer_options: IndexerOpts,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Opt {
|
impl Opt {
|
||||||
/// Wether analytics should be enabled or not.
|
|
||||||
#[cfg(all(not(debug_assertions), feature = "analytics"))]
|
|
||||||
pub fn analytics(&self) -> bool {
|
|
||||||
match self.no_analytics {
|
|
||||||
None => true,
|
|
||||||
Some(None) => false,
|
|
||||||
Some(Some(disabled)) => !disabled,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn get_ssl_config(&self) -> anyhow::Result<Option<rustls::ServerConfig>> {
|
pub fn get_ssl_config(&self) -> anyhow::Result<Option<rustls::ServerConfig>> {
|
||||||
if let (Some(cert_path), Some(key_path)) = (&self.ssl_cert_path, &self.ssl_key_path) {
|
if let (Some(cert_path), Some(key_path)) = (&self.ssl_cert_path, &self.ssl_key_path) {
|
||||||
let client_auth = match &self.ssl_auth_path {
|
let config = rustls::ServerConfig::builder().with_safe_defaults();
|
||||||
|
|
||||||
|
let config = match &self.ssl_auth_path {
|
||||||
Some(auth_path) => {
|
Some(auth_path) => {
|
||||||
let roots = load_certs(auth_path.to_path_buf())?;
|
let roots = load_certs(auth_path.to_path_buf())?;
|
||||||
let mut client_auth_roots = RootCertStore::empty();
|
let mut client_auth_roots = RootCertStore::empty();
|
||||||
@ -149,30 +162,32 @@ impl Opt {
|
|||||||
client_auth_roots.add(&root).unwrap();
|
client_auth_roots.add(&root).unwrap();
|
||||||
}
|
}
|
||||||
if self.ssl_require_auth {
|
if self.ssl_require_auth {
|
||||||
AllowAnyAuthenticatedClient::new(client_auth_roots)
|
let verifier = AllowAnyAuthenticatedClient::new(client_auth_roots);
|
||||||
|
config.with_client_cert_verifier(verifier)
|
||||||
} else {
|
} else {
|
||||||
AllowAnyAnonymousOrAuthenticatedClient::new(client_auth_roots)
|
let verifier =
|
||||||
|
AllowAnyAnonymousOrAuthenticatedClient::new(client_auth_roots);
|
||||||
|
config.with_client_cert_verifier(verifier)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
None => NoClientAuth::new(),
|
None => config.with_no_client_auth(),
|
||||||
};
|
};
|
||||||
|
|
||||||
let mut config = rustls::ServerConfig::new(client_auth);
|
|
||||||
config.key_log = Arc::new(rustls::KeyLogFile::new());
|
|
||||||
|
|
||||||
let certs = load_certs(cert_path.to_path_buf())?;
|
let certs = load_certs(cert_path.to_path_buf())?;
|
||||||
let privkey = load_private_key(key_path.to_path_buf())?;
|
let privkey = load_private_key(key_path.to_path_buf())?;
|
||||||
let ocsp = load_ocsp(&self.ssl_ocsp_path)?;
|
let ocsp = load_ocsp(&self.ssl_ocsp_path)?;
|
||||||
config
|
let mut config = config
|
||||||
.set_single_cert_with_ocsp_and_sct(certs, privkey, ocsp, vec![])
|
.with_single_cert_with_ocsp_and_sct(certs, privkey, ocsp, vec![])
|
||||||
.map_err(|_| anyhow::anyhow!("bad certificates/private key"))?;
|
.map_err(|_| anyhow::anyhow!("bad certificates/private key"))?;
|
||||||
|
|
||||||
|
config.key_log = Arc::new(rustls::KeyLogFile::new());
|
||||||
|
|
||||||
if self.ssl_resumption {
|
if self.ssl_resumption {
|
||||||
config.set_persistence(rustls::ServerSessionMemoryCache::new(256));
|
config.session_storage = ServerSessionMemoryCache::new(256);
|
||||||
}
|
}
|
||||||
|
|
||||||
if self.ssl_tickets {
|
if self.ssl_tickets {
|
||||||
config.ticketer = rustls::Ticketer::new();
|
config.ticketer = rustls::Ticketer::new().unwrap();
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(Some(config))
|
Ok(Some(config))
|
||||||
@ -186,7 +201,9 @@ fn load_certs(filename: PathBuf) -> anyhow::Result<Vec<rustls::Certificate>> {
|
|||||||
let certfile =
|
let certfile =
|
||||||
fs::File::open(filename).map_err(|_| anyhow::anyhow!("cannot open certificate file"))?;
|
fs::File::open(filename).map_err(|_| anyhow::anyhow!("cannot open certificate file"))?;
|
||||||
let mut reader = BufReader::new(certfile);
|
let mut reader = BufReader::new(certfile);
|
||||||
certs(&mut reader).map_err(|_| anyhow::anyhow!("cannot read certificate file"))
|
certs(&mut reader)
|
||||||
|
.map(|certs| certs.into_iter().map(rustls::Certificate).collect())
|
||||||
|
.map_err(|_| anyhow::anyhow!("cannot read certificate file"))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn load_private_key(filename: PathBuf) -> anyhow::Result<rustls::PrivateKey> {
|
fn load_private_key(filename: PathBuf) -> anyhow::Result<rustls::PrivateKey> {
|
||||||
@ -211,10 +228,10 @@ fn load_private_key(filename: PathBuf) -> anyhow::Result<rustls::PrivateKey> {
|
|||||||
|
|
||||||
// prefer to load pkcs8 keys
|
// prefer to load pkcs8 keys
|
||||||
if !pkcs8_keys.is_empty() {
|
if !pkcs8_keys.is_empty() {
|
||||||
Ok(pkcs8_keys[0].clone())
|
Ok(rustls::PrivateKey(pkcs8_keys[0].clone()))
|
||||||
} else {
|
} else {
|
||||||
assert!(!rsa_keys.is_empty());
|
assert!(!rsa_keys.is_empty());
|
||||||
Ok(rsa_keys[0].clone())
|
Ok(rustls::PrivateKey(rsa_keys[0].clone()))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -117,11 +117,11 @@ impl IndexUpdateResponse {
|
|||||||
/// Always return a 200 with:
|
/// Always return a 200 with:
|
||||||
/// ```json
|
/// ```json
|
||||||
/// {
|
/// {
|
||||||
/// "status": "MeiliSearch is running"
|
/// "status": "Meilisearch is running"
|
||||||
/// }
|
/// }
|
||||||
/// ```
|
/// ```
|
||||||
pub async fn running() -> HttpResponse {
|
pub async fn running() -> HttpResponse {
|
||||||
HttpResponse::Ok().json(serde_json::json!({ "status": "MeiliSearch is running" }))
|
HttpResponse::Ok().json(serde_json::json!({ "status": "Meilisearch is running" }))
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn get_stats(
|
async fn get_stats(
|
||||||
|
@ -1,56 +1,61 @@
|
|||||||
use crate::common::Server;
|
use crate::common::Server;
|
||||||
use chrono::{Duration, Utc};
|
use chrono::{Duration, Utc};
|
||||||
use maplit::hashmap;
|
use maplit::{hashmap, hashset};
|
||||||
use once_cell::sync::Lazy;
|
use once_cell::sync::Lazy;
|
||||||
use serde_json::{json, Value};
|
use serde_json::{json, Value};
|
||||||
use std::collections::{HashMap, HashSet};
|
use std::collections::{HashMap, HashSet};
|
||||||
|
|
||||||
static AUTHORIZATIONS: Lazy<HashMap<(&'static str, &'static str), &'static str>> =
|
static AUTHORIZATIONS: Lazy<HashMap<(&'static str, &'static str), HashSet<&'static str>>> =
|
||||||
Lazy::new(|| {
|
Lazy::new(|| {
|
||||||
hashmap! {
|
hashmap! {
|
||||||
("POST", "/indexes/products/search") => "search",
|
("POST", "/indexes/products/search") => hashset!{"search", "*"},
|
||||||
("GET", "/indexes/products/search") => "search",
|
("GET", "/indexes/products/search") => hashset!{"search", "*"},
|
||||||
("POST", "/indexes/products/documents") => "documents.add",
|
("POST", "/indexes/products/documents") => hashset!{"documents.add", "*"},
|
||||||
("GET", "/indexes/products/documents") => "documents.get",
|
("GET", "/indexes/products/documents") => hashset!{"documents.get", "*"},
|
||||||
("GET", "/indexes/products/documents/0") => "documents.get",
|
("GET", "/indexes/products/documents/0") => hashset!{"documents.get", "*"},
|
||||||
("DELETE", "/indexes/products/documents/0") => "documents.delete",
|
("DELETE", "/indexes/products/documents/0") => hashset!{"documents.delete", "*"},
|
||||||
("GET", "/tasks") => "tasks.get",
|
("GET", "/tasks") => hashset!{"tasks.get", "*"},
|
||||||
("GET", "/indexes/products/tasks") => "tasks.get",
|
("GET", "/indexes/products/tasks") => hashset!{"tasks.get", "*"},
|
||||||
("GET", "/indexes/products/tasks/0") => "tasks.get",
|
("GET", "/indexes/products/tasks/0") => hashset!{"tasks.get", "*"},
|
||||||
("PUT", "/indexes/products/") => "indexes.update",
|
("PUT", "/indexes/products/") => hashset!{"indexes.update", "*"},
|
||||||
("GET", "/indexes/products/") => "indexes.get",
|
("GET", "/indexes/products/") => hashset!{"indexes.get", "*"},
|
||||||
("DELETE", "/indexes/products/") => "indexes.delete",
|
("DELETE", "/indexes/products/") => hashset!{"indexes.delete", "*"},
|
||||||
("POST", "/indexes") => "indexes.create",
|
("POST", "/indexes") => hashset!{"indexes.create", "*"},
|
||||||
("GET", "/indexes") => "indexes.get",
|
("GET", "/indexes") => hashset!{"indexes.get", "*"},
|
||||||
("GET", "/indexes/products/settings") => "settings.get",
|
("GET", "/indexes/products/settings") => hashset!{"settings.get", "*"},
|
||||||
("GET", "/indexes/products/settings/displayed-attributes") => "settings.get",
|
("GET", "/indexes/products/settings/displayed-attributes") => hashset!{"settings.get", "*"},
|
||||||
("GET", "/indexes/products/settings/distinct-attribute") => "settings.get",
|
("GET", "/indexes/products/settings/distinct-attribute") => hashset!{"settings.get", "*"},
|
||||||
("GET", "/indexes/products/settings/filterable-attributes") => "settings.get",
|
("GET", "/indexes/products/settings/filterable-attributes") => hashset!{"settings.get", "*"},
|
||||||
("GET", "/indexes/products/settings/ranking-rules") => "settings.get",
|
("GET", "/indexes/products/settings/ranking-rules") => hashset!{"settings.get", "*"},
|
||||||
("GET", "/indexes/products/settings/searchable-attributes") => "settings.get",
|
("GET", "/indexes/products/settings/searchable-attributes") => hashset!{"settings.get", "*"},
|
||||||
("GET", "/indexes/products/settings/sortable-attributes") => "settings.get",
|
("GET", "/indexes/products/settings/sortable-attributes") => hashset!{"settings.get", "*"},
|
||||||
("GET", "/indexes/products/settings/stop-words") => "settings.get",
|
("GET", "/indexes/products/settings/stop-words") => hashset!{"settings.get", "*"},
|
||||||
("GET", "/indexes/products/settings/synonyms") => "settings.get",
|
("GET", "/indexes/products/settings/synonyms") => hashset!{"settings.get", "*"},
|
||||||
("DELETE", "/indexes/products/settings") => "settings.update",
|
("DELETE", "/indexes/products/settings") => hashset!{"settings.update", "*"},
|
||||||
("POST", "/indexes/products/settings") => "settings.update",
|
("POST", "/indexes/products/settings") => hashset!{"settings.update", "*"},
|
||||||
("POST", "/indexes/products/settings/displayed-attributes") => "settings.update",
|
("POST", "/indexes/products/settings/displayed-attributes") => hashset!{"settings.update", "*"},
|
||||||
("POST", "/indexes/products/settings/distinct-attribute") => "settings.update",
|
("POST", "/indexes/products/settings/distinct-attribute") => hashset!{"settings.update", "*"},
|
||||||
("POST", "/indexes/products/settings/filterable-attributes") => "settings.update",
|
("POST", "/indexes/products/settings/filterable-attributes") => hashset!{"settings.update", "*"},
|
||||||
("POST", "/indexes/products/settings/ranking-rules") => "settings.update",
|
("POST", "/indexes/products/settings/ranking-rules") => hashset!{"settings.update", "*"},
|
||||||
("POST", "/indexes/products/settings/searchable-attributes") => "settings.update",
|
("POST", "/indexes/products/settings/searchable-attributes") => hashset!{"settings.update", "*"},
|
||||||
("POST", "/indexes/products/settings/sortable-attributes") => "settings.update",
|
("POST", "/indexes/products/settings/sortable-attributes") => hashset!{"settings.update", "*"},
|
||||||
("POST", "/indexes/products/settings/stop-words") => "settings.update",
|
("POST", "/indexes/products/settings/stop-words") => hashset!{"settings.update", "*"},
|
||||||
("POST", "/indexes/products/settings/synonyms") => "settings.update",
|
("POST", "/indexes/products/settings/synonyms") => hashset!{"settings.update", "*"},
|
||||||
("GET", "/indexes/products/stats") => "stats.get",
|
("GET", "/indexes/products/stats") => hashset!{"stats.get", "*"},
|
||||||
("GET", "/stats") => "stats.get",
|
("GET", "/stats") => hashset!{"stats.get", "*"},
|
||||||
("POST", "/dumps") => "dumps.create",
|
("POST", "/dumps") => hashset!{"dumps.create", "*"},
|
||||||
("GET", "/dumps/0/status") => "dumps.get",
|
("GET", "/dumps/0/status") => hashset!{"dumps.get", "*"},
|
||||||
("GET", "/version") => "version",
|
("GET", "/version") => hashset!{"version", "*"},
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
static ALL_ACTIONS: Lazy<HashSet<&'static str>> =
|
static ALL_ACTIONS: Lazy<HashSet<&'static str>> = Lazy::new(|| {
|
||||||
Lazy::new(|| AUTHORIZATIONS.values().cloned().collect());
|
AUTHORIZATIONS
|
||||||
|
.values()
|
||||||
|
.cloned()
|
||||||
|
.reduce(|l, r| l.union(&r).cloned().collect())
|
||||||
|
.unwrap()
|
||||||
|
});
|
||||||
|
|
||||||
static INVALID_RESPONSE: Lazy<Value> = Lazy::new(|| {
|
static INVALID_RESPONSE: Lazy<Value> = Lazy::new(|| {
|
||||||
json!({"message": "The provided API key is invalid.",
|
json!({"message": "The provided API key is invalid.",
|
||||||
@ -61,6 +66,7 @@ static INVALID_RESPONSE: Lazy<Value> = Lazy::new(|| {
|
|||||||
});
|
});
|
||||||
|
|
||||||
#[actix_rt::test]
|
#[actix_rt::test]
|
||||||
|
#[cfg_attr(target_os = "windows", ignore)]
|
||||||
async fn error_access_expired_key() {
|
async fn error_access_expired_key() {
|
||||||
use std::{thread, time};
|
use std::{thread, time};
|
||||||
|
|
||||||
@ -92,6 +98,7 @@ async fn error_access_expired_key() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[actix_rt::test]
|
#[actix_rt::test]
|
||||||
|
#[cfg_attr(target_os = "windows", ignore)]
|
||||||
async fn error_access_unauthorized_index() {
|
async fn error_access_unauthorized_index() {
|
||||||
let mut server = Server::new_auth().await;
|
let mut server = Server::new_auth().await;
|
||||||
server.use_api_key("MASTER_KEY");
|
server.use_api_key("MASTER_KEY");
|
||||||
@ -122,6 +129,7 @@ async fn error_access_unauthorized_index() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[actix_rt::test]
|
#[actix_rt::test]
|
||||||
|
#[cfg_attr(target_os = "windows", ignore)]
|
||||||
async fn error_access_unauthorized_action() {
|
async fn error_access_unauthorized_action() {
|
||||||
let mut server = Server::new_auth().await;
|
let mut server = Server::new_auth().await;
|
||||||
server.use_api_key("MASTER_KEY");
|
server.use_api_key("MASTER_KEY");
|
||||||
@ -144,7 +152,7 @@ async fn error_access_unauthorized_action() {
|
|||||||
|
|
||||||
// Patch API key letting all rights but the needed one.
|
// Patch API key letting all rights but the needed one.
|
||||||
let content = json!({
|
let content = json!({
|
||||||
"actions": ALL_ACTIONS.iter().cloned().filter(|a| a != action).collect::<Vec<_>>(),
|
"actions": ALL_ACTIONS.difference(action).collect::<Vec<_>>(),
|
||||||
});
|
});
|
||||||
let (_, code) = server.patch_api_key(&key, content).await;
|
let (_, code) = server.patch_api_key(&key, content).await;
|
||||||
assert_eq!(code, 200);
|
assert_eq!(code, 200);
|
||||||
@ -158,6 +166,7 @@ async fn error_access_unauthorized_action() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[actix_rt::test]
|
#[actix_rt::test]
|
||||||
|
#[cfg_attr(target_os = "windows", ignore)]
|
||||||
async fn access_authorized_restricted_index() {
|
async fn access_authorized_restricted_index() {
|
||||||
let mut server = Server::new_auth().await;
|
let mut server = Server::new_auth().await;
|
||||||
server.use_api_key("MASTER_KEY");
|
server.use_api_key("MASTER_KEY");
|
||||||
@ -175,40 +184,28 @@ async fn access_authorized_restricted_index() {
|
|||||||
let key = response["key"].as_str().unwrap();
|
let key = response["key"].as_str().unwrap();
|
||||||
server.use_api_key(&key);
|
server.use_api_key(&key);
|
||||||
|
|
||||||
for ((method, route), action) in AUTHORIZATIONS.iter() {
|
for ((method, route), actions) in AUTHORIZATIONS.iter() {
|
||||||
// Patch API key letting only the needed action.
|
for action in actions {
|
||||||
let content = json!({
|
// Patch API key letting only the needed action.
|
||||||
"actions": [action],
|
let content = json!({
|
||||||
});
|
"actions": [action],
|
||||||
|
});
|
||||||
|
|
||||||
server.use_api_key("MASTER_KEY");
|
server.use_api_key("MASTER_KEY");
|
||||||
let (_, code) = server.patch_api_key(&key, content).await;
|
let (_, code) = server.patch_api_key(&key, content).await;
|
||||||
assert_eq!(code, 200);
|
assert_eq!(code, 200);
|
||||||
|
|
||||||
server.use_api_key(&key);
|
server.use_api_key(&key);
|
||||||
let (response, code) = server.dummy_request(method, route).await;
|
let (response, code) = server.dummy_request(method, route).await;
|
||||||
|
|
||||||
assert_ne!(response, INVALID_RESPONSE.clone());
|
assert_ne!(response, INVALID_RESPONSE.clone());
|
||||||
assert_ne!(code, 403);
|
assert_ne!(code, 403);
|
||||||
|
}
|
||||||
// Patch API key using action all action.
|
|
||||||
let content = json!({
|
|
||||||
"actions": ["*"],
|
|
||||||
});
|
|
||||||
|
|
||||||
server.use_api_key("MASTER_KEY");
|
|
||||||
let (_, code) = server.patch_api_key(&key, content).await;
|
|
||||||
assert_eq!(code, 200);
|
|
||||||
|
|
||||||
server.use_api_key(&key);
|
|
||||||
let (response, code) = server.dummy_request(method, route).await;
|
|
||||||
|
|
||||||
assert_ne!(response, INVALID_RESPONSE.clone());
|
|
||||||
assert_ne!(code, 403);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[actix_rt::test]
|
#[actix_rt::test]
|
||||||
|
#[cfg_attr(target_os = "windows", ignore)]
|
||||||
async fn access_authorized_no_index_restriction() {
|
async fn access_authorized_no_index_restriction() {
|
||||||
let mut server = Server::new_auth().await;
|
let mut server = Server::new_auth().await;
|
||||||
server.use_api_key("MASTER_KEY");
|
server.use_api_key("MASTER_KEY");
|
||||||
@ -226,40 +223,28 @@ async fn access_authorized_no_index_restriction() {
|
|||||||
let key = response["key"].as_str().unwrap();
|
let key = response["key"].as_str().unwrap();
|
||||||
server.use_api_key(&key);
|
server.use_api_key(&key);
|
||||||
|
|
||||||
for ((method, route), action) in AUTHORIZATIONS.iter() {
|
for ((method, route), actions) in AUTHORIZATIONS.iter() {
|
||||||
server.use_api_key("MASTER_KEY");
|
for action in actions {
|
||||||
|
server.use_api_key("MASTER_KEY");
|
||||||
|
|
||||||
// Patch API key letting only the needed action.
|
// Patch API key letting only the needed action.
|
||||||
let content = json!({
|
let content = json!({
|
||||||
"actions": [action],
|
"actions": [action],
|
||||||
});
|
});
|
||||||
let (_, code) = server.patch_api_key(&key, content).await;
|
let (_, code) = server.patch_api_key(&key, content).await;
|
||||||
assert_eq!(code, 200);
|
assert_eq!(code, 200);
|
||||||
|
|
||||||
server.use_api_key(&key);
|
server.use_api_key(&key);
|
||||||
let (response, code) = server.dummy_request(method, route).await;
|
let (response, code) = server.dummy_request(method, route).await;
|
||||||
|
|
||||||
assert_ne!(response, INVALID_RESPONSE.clone());
|
assert_ne!(response, INVALID_RESPONSE.clone());
|
||||||
assert_ne!(code, 403);
|
assert_ne!(code, 403);
|
||||||
|
}
|
||||||
// Patch API key using action all action.
|
|
||||||
let content = json!({
|
|
||||||
"actions": ["*"],
|
|
||||||
});
|
|
||||||
|
|
||||||
server.use_api_key("MASTER_KEY");
|
|
||||||
let (_, code) = server.patch_api_key(&key, content).await;
|
|
||||||
assert_eq!(code, 200);
|
|
||||||
|
|
||||||
server.use_api_key(&key);
|
|
||||||
let (response, code) = server.dummy_request(method, route).await;
|
|
||||||
|
|
||||||
assert_ne!(response, INVALID_RESPONSE.clone());
|
|
||||||
assert_ne!(code, 403);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[actix_rt::test]
|
#[actix_rt::test]
|
||||||
|
#[cfg_attr(target_os = "windows", ignore)]
|
||||||
async fn access_authorized_stats_restricted_index() {
|
async fn access_authorized_stats_restricted_index() {
|
||||||
let mut server = Server::new_auth().await;
|
let mut server = Server::new_auth().await;
|
||||||
server.use_api_key("MASTER_KEY");
|
server.use_api_key("MASTER_KEY");
|
||||||
@ -299,6 +284,7 @@ async fn access_authorized_stats_restricted_index() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[actix_rt::test]
|
#[actix_rt::test]
|
||||||
|
#[cfg_attr(target_os = "windows", ignore)]
|
||||||
async fn access_authorized_stats_no_index_restriction() {
|
async fn access_authorized_stats_no_index_restriction() {
|
||||||
let mut server = Server::new_auth().await;
|
let mut server = Server::new_auth().await;
|
||||||
server.use_api_key("MASTER_KEY");
|
server.use_api_key("MASTER_KEY");
|
||||||
@ -338,6 +324,7 @@ async fn access_authorized_stats_no_index_restriction() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[actix_rt::test]
|
#[actix_rt::test]
|
||||||
|
#[cfg_attr(target_os = "windows", ignore)]
|
||||||
async fn list_authorized_indexes_restricted_index() {
|
async fn list_authorized_indexes_restricted_index() {
|
||||||
let mut server = Server::new_auth().await;
|
let mut server = Server::new_auth().await;
|
||||||
server.use_api_key("MASTER_KEY");
|
server.use_api_key("MASTER_KEY");
|
||||||
@ -378,6 +365,7 @@ async fn list_authorized_indexes_restricted_index() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[actix_rt::test]
|
#[actix_rt::test]
|
||||||
|
#[cfg_attr(target_os = "windows", ignore)]
|
||||||
async fn list_authorized_indexes_no_index_restriction() {
|
async fn list_authorized_indexes_no_index_restriction() {
|
||||||
let mut server = Server::new_auth().await;
|
let mut server = Server::new_auth().await;
|
||||||
server.use_api_key("MASTER_KEY");
|
server.use_api_key("MASTER_KEY");
|
||||||
@ -505,7 +493,8 @@ async fn error_creating_index_without_action() {
|
|||||||
// create key with access on all indexes.
|
// create key with access on all indexes.
|
||||||
let content = json!({
|
let content = json!({
|
||||||
"indexes": ["*"],
|
"indexes": ["*"],
|
||||||
"actions": ALL_ACTIONS.iter().cloned().filter(|a| *a != "indexes.create").collect::<Vec<_>>(),
|
// Give all action but the ones allowing to create an index.
|
||||||
|
"actions": ALL_ACTIONS.iter().cloned().filter(|a| !AUTHORIZATIONS.get(&("POST","/indexes")).unwrap().contains(a)).collect::<Vec<_>>(),
|
||||||
"expiresAt": "2050-11-13T00:00:00Z"
|
"expiresAt": "2050-11-13T00:00:00Z"
|
||||||
});
|
});
|
||||||
let (response, code) = server.add_api_key(content).await;
|
let (response, code) = server.add_api_key(content).await;
|
||||||
|
@ -130,7 +130,7 @@ pub fn default_settings(dir: impl AsRef<Path>) -> Opt {
|
|||||||
master_key: None,
|
master_key: None,
|
||||||
env: "development".to_owned(),
|
env: "development".to_owned(),
|
||||||
#[cfg(all(not(debug_assertions), feature = "analytics"))]
|
#[cfg(all(not(debug_assertions), feature = "analytics"))]
|
||||||
no_analytics: Some(Some(true)),
|
no_analytics: true,
|
||||||
max_index_size: Byte::from_unit(4.0, ByteUnit::GiB).unwrap(),
|
max_index_size: Byte::from_unit(4.0, ByteUnit::GiB).unwrap(),
|
||||||
max_task_db_size: Byte::from_unit(4.0, ByteUnit::GiB).unwrap(),
|
max_task_db_size: Byte::from_unit(4.0, ByteUnit::GiB).unwrap(),
|
||||||
http_payload_size_limit: Byte::from_unit(10.0, ByteUnit::MiB).unwrap(),
|
http_payload_size_limit: Byte::from_unit(10.0, ByteUnit::MiB).unwrap(),
|
||||||
@ -148,6 +148,8 @@ pub fn default_settings(dir: impl AsRef<Path>) -> Opt {
|
|||||||
schedule_snapshot: false,
|
schedule_snapshot: false,
|
||||||
snapshot_interval_sec: 0,
|
snapshot_interval_sec: 0,
|
||||||
import_dump: None,
|
import_dump: None,
|
||||||
|
ignore_missing_dump: false,
|
||||||
|
ignore_dump_if_db_exists: false,
|
||||||
indexer_options: IndexerOpts {
|
indexer_options: IndexerOpts {
|
||||||
// memory has to be unlimited because several meilisearch are running in test context.
|
// memory has to be unlimited because several meilisearch are running in test context.
|
||||||
max_memory: MaxMemory::unlimited(),
|
max_memory: MaxMemory::unlimited(),
|
||||||
|
@ -710,20 +710,11 @@ async fn replace_document() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[actix_rt::test]
|
#[actix_rt::test]
|
||||||
async fn error_add_no_documents() {
|
async fn add_no_documents() {
|
||||||
let server = Server::new().await;
|
let server = Server::new().await;
|
||||||
let index = server.index("test");
|
let index = server.index("test");
|
||||||
let (response, code) = index.add_documents(json!([]), None).await;
|
let (_response, code) = index.add_documents(json!([]), None).await;
|
||||||
|
assert_eq!(code, 202);
|
||||||
let expected_response = json!({
|
|
||||||
"message": "The `json` payload must contain at least one document.",
|
|
||||||
"code": "malformed_payload",
|
|
||||||
"type": "invalid_request",
|
|
||||||
"link": "https://docs.meilisearch.com/errors#malformed_payload"
|
|
||||||
});
|
|
||||||
|
|
||||||
assert_eq!(response, expected_response);
|
|
||||||
assert_eq!(code, 400);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[actix_rt::test]
|
#[actix_rt::test]
|
||||||
|
@ -43,8 +43,8 @@ async fn error_delete_unexisting_index() {
|
|||||||
assert_eq!(response["error"], expected_response);
|
assert_eq!(response["error"], expected_response);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(not(windows))]
|
|
||||||
#[actix_rt::test]
|
#[actix_rt::test]
|
||||||
|
#[cfg_attr(target_os = "windows", ignore)]
|
||||||
async fn loop_delete_add_documents() {
|
async fn loop_delete_add_documents() {
|
||||||
let server = Server::new().await;
|
let server = Server::new().await;
|
||||||
let index = server.index("test");
|
let index = server.index("test");
|
||||||
|
@ -1,14 +1,12 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "meilisearch-lib"
|
name = "meilisearch-lib"
|
||||||
version = "0.25.2"
|
version = "0.25.2"
|
||||||
edition = "2018"
|
edition = "2021"
|
||||||
resolver = "2"
|
|
||||||
|
|
||||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
actix-web = { version = "4.0.0-beta.9", features = ["rustls"] }
|
actix-web = { version = "4.0.0-beta.21", default-features = false }
|
||||||
actix-web-static-files = { git = "https://github.com/MarinPostma/actix-web-static-files.git", rev = "39d8006", optional = true }
|
|
||||||
anyhow = { version = "1.0.43", features = ["backtrace"] }
|
anyhow = { version = "1.0.43", features = ["backtrace"] }
|
||||||
async-stream = "0.3.2"
|
async-stream = "0.3.2"
|
||||||
async-trait = "0.1.51"
|
async-trait = "0.1.51"
|
||||||
@ -43,7 +41,7 @@ serde = { version = "1.0.130", features = ["derive"] }
|
|||||||
serde_json = { version = "1.0.67", features = ["preserve_order"] }
|
serde_json = { version = "1.0.67", features = ["preserve_order"] }
|
||||||
siphasher = "0.3.7"
|
siphasher = "0.3.7"
|
||||||
slice-group-by = "0.2.6"
|
slice-group-by = "0.2.6"
|
||||||
structopt = "0.3.23"
|
clap = { version = "3.0", features = ["derive", "env"] }
|
||||||
tar = "0.4.37"
|
tar = "0.4.37"
|
||||||
tempfile = "3.2.0"
|
tempfile = "3.2.0"
|
||||||
thiserror = "1.0.28"
|
thiserror = "1.0.28"
|
||||||
|
@ -32,8 +32,6 @@ pub enum DocumentFormatError {
|
|||||||
Box<dyn std::error::Error + Send + Sync + 'static>,
|
Box<dyn std::error::Error + Send + Sync + 'static>,
|
||||||
PayloadType,
|
PayloadType,
|
||||||
),
|
),
|
||||||
#[error("The `{0}` payload must contain at least one document.")]
|
|
||||||
EmptyPayload(PayloadType),
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<(PayloadType, milli::documents::Error)> for DocumentFormatError {
|
impl From<(PayloadType, milli::documents::Error)> for DocumentFormatError {
|
||||||
@ -50,7 +48,6 @@ impl ErrorCode for DocumentFormatError {
|
|||||||
match self {
|
match self {
|
||||||
DocumentFormatError::Internal(_) => Code::Internal,
|
DocumentFormatError::Internal(_) => Code::Internal,
|
||||||
DocumentFormatError::MalformedPayload(_, _) => Code::MalformedPayload,
|
DocumentFormatError::MalformedPayload(_, _) => Code::MalformedPayload,
|
||||||
DocumentFormatError::EmptyPayload(_) => Code::MalformedPayload,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -63,10 +60,6 @@ pub fn read_csv(input: impl Read, writer: impl Write + Seek) -> Result<usize> {
|
|||||||
let builder =
|
let builder =
|
||||||
DocumentBatchBuilder::from_csv(input, writer).map_err(|e| (PayloadType::Csv, e))?;
|
DocumentBatchBuilder::from_csv(input, writer).map_err(|e| (PayloadType::Csv, e))?;
|
||||||
|
|
||||||
if builder.len() == 0 {
|
|
||||||
return Err(DocumentFormatError::EmptyPayload(PayloadType::Csv));
|
|
||||||
}
|
|
||||||
|
|
||||||
let count = builder.finish().map_err(|e| (PayloadType::Csv, e))?;
|
let count = builder.finish().map_err(|e| (PayloadType::Csv, e))?;
|
||||||
|
|
||||||
Ok(count)
|
Ok(count)
|
||||||
@ -81,16 +74,17 @@ pub fn read_ndjson(input: impl Read, writer: impl Write + Seek) -> Result<usize>
|
|||||||
let mut buf = String::new();
|
let mut buf = String::new();
|
||||||
|
|
||||||
while reader.read_line(&mut buf)? > 0 {
|
while reader.read_line(&mut buf)? > 0 {
|
||||||
|
// skip empty lines
|
||||||
|
if buf == "\n" {
|
||||||
|
buf.clear();
|
||||||
|
continue;
|
||||||
|
}
|
||||||
builder
|
builder
|
||||||
.extend_from_json(Cursor::new(&buf.as_bytes()))
|
.extend_from_json(Cursor::new(&buf.as_bytes()))
|
||||||
.map_err(|e| (PayloadType::Ndjson, e))?;
|
.map_err(|e| (PayloadType::Ndjson, e))?;
|
||||||
buf.clear();
|
buf.clear();
|
||||||
}
|
}
|
||||||
|
|
||||||
if builder.len() == 0 {
|
|
||||||
return Err(DocumentFormatError::EmptyPayload(PayloadType::Ndjson));
|
|
||||||
}
|
|
||||||
|
|
||||||
let count = builder.finish().map_err(|e| (PayloadType::Ndjson, e))?;
|
let count = builder.finish().map_err(|e| (PayloadType::Ndjson, e))?;
|
||||||
|
|
||||||
Ok(count)
|
Ok(count)
|
||||||
@ -104,10 +98,6 @@ pub fn read_json(input: impl Read, writer: impl Write + Seek) -> Result<usize> {
|
|||||||
.extend_from_json(input)
|
.extend_from_json(input)
|
||||||
.map_err(|e| (PayloadType::Json, e))?;
|
.map_err(|e| (PayloadType::Json, e))?;
|
||||||
|
|
||||||
if builder.len() == 0 {
|
|
||||||
return Err(DocumentFormatError::EmptyPayload(PayloadType::Json));
|
|
||||||
}
|
|
||||||
|
|
||||||
let count = builder.finish().map_err(|e| (PayloadType::Json, e))?;
|
let count = builder.finish().map_err(|e| (PayloadType::Json, e))?;
|
||||||
|
|
||||||
Ok(count)
|
Ok(count)
|
||||||
|
@ -8,7 +8,7 @@ use indexmap::IndexMap;
|
|||||||
use milli::documents::DocumentBatchReader;
|
use milli::documents::DocumentBatchReader;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
use crate::document_formats::{read_ndjson, DocumentFormatError};
|
use crate::document_formats::read_ndjson;
|
||||||
use crate::index::update_handler::UpdateHandler;
|
use crate::index::update_handler::UpdateHandler;
|
||||||
use crate::index::updates::apply_settings_to_builder;
|
use crate::index::updates::apply_settings_to_builder;
|
||||||
|
|
||||||
@ -128,8 +128,8 @@ impl Index {
|
|||||||
|
|
||||||
let empty = match read_ndjson(reader, &mut tmp_doc_file) {
|
let empty = match read_ndjson(reader, &mut tmp_doc_file) {
|
||||||
// if there was no document in the file it's because the index was empty
|
// if there was no document in the file it's because the index was empty
|
||||||
|
Ok(0) => true,
|
||||||
Ok(_) => false,
|
Ok(_) => false,
|
||||||
Err(DocumentFormatError::EmptyPayload(_)) => true,
|
|
||||||
Err(e) => return Err(e.into()),
|
Err(e) => return Err(e.into()),
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -877,7 +877,7 @@ mod test {
|
|||||||
assert_eq!(value["publication_year"], "<em>1937</em>");
|
assert_eq!(value["publication_year"], "<em>1937</em>");
|
||||||
}
|
}
|
||||||
|
|
||||||
/// https://github.com/meilisearch/MeiliSearch/issues/1368
|
/// https://github.com/meilisearch/meilisearch/issues/1368
|
||||||
#[test]
|
#[test]
|
||||||
fn formatted_with_highlight_emoji() {
|
fn formatted_with_highlight_emoji() {
|
||||||
let stop_words = fst::Set::default();
|
let stop_words = fst::Set::default();
|
||||||
|
@ -1,4 +1,5 @@
|
|||||||
use std::path::Path;
|
use std::path::Path;
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
use heed::EnvOpenOptions;
|
use heed::EnvOpenOptions;
|
||||||
use log::info;
|
use log::info;
|
||||||
@ -27,7 +28,7 @@ pub fn load_dump(
|
|||||||
let mut options = EnvOpenOptions::new();
|
let mut options = EnvOpenOptions::new();
|
||||||
options.map_size(meta_env_size);
|
options.map_size(meta_env_size);
|
||||||
options.max_dbs(100);
|
options.max_dbs(100);
|
||||||
let env = options.open(&dst)?;
|
let env = Arc::new(options.open(&dst)?);
|
||||||
|
|
||||||
IndexResolver::load_dump(
|
IndexResolver::load_dump(
|
||||||
src.as_ref(),
|
src.as_ref(),
|
||||||
|
@ -1,14 +1,16 @@
|
|||||||
use std::fs::File;
|
use std::fs::File;
|
||||||
use std::path::{Path, PathBuf};
|
use std::path::{Path, PathBuf};
|
||||||
|
|
||||||
|
use anyhow::bail;
|
||||||
use chrono::{DateTime, Utc};
|
use chrono::{DateTime, Utc};
|
||||||
use log::{info, trace, warn};
|
use log::{info, trace};
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
pub use actor::DumpActor;
|
pub use actor::DumpActor;
|
||||||
pub use handle_impl::*;
|
pub use handle_impl::*;
|
||||||
use meilisearch_auth::AuthController;
|
use meilisearch_auth::AuthController;
|
||||||
pub use message::DumpMsg;
|
pub use message::DumpMsg;
|
||||||
|
use tempfile::TempDir;
|
||||||
use tokio::fs::create_dir_all;
|
use tokio::fs::create_dir_all;
|
||||||
use tokio::sync::oneshot;
|
use tokio::sync::oneshot;
|
||||||
|
|
||||||
@ -79,6 +81,47 @@ pub enum MetadataVersion {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl MetadataVersion {
|
impl MetadataVersion {
|
||||||
|
pub fn load_dump(
|
||||||
|
self,
|
||||||
|
src: impl AsRef<Path>,
|
||||||
|
dst: impl AsRef<Path>,
|
||||||
|
index_db_size: usize,
|
||||||
|
meta_env_size: usize,
|
||||||
|
indexing_options: &IndexerOpts,
|
||||||
|
) -> anyhow::Result<()> {
|
||||||
|
match self {
|
||||||
|
MetadataVersion::V1(_meta) => {
|
||||||
|
anyhow::bail!("The version 1 of the dumps is not supported anymore. You can re-export your dump from a version between 0.21 and 0.24, or start fresh from a version 0.25 onwards.")
|
||||||
|
}
|
||||||
|
MetadataVersion::V2(meta) => v2::load_dump(
|
||||||
|
meta,
|
||||||
|
src,
|
||||||
|
dst,
|
||||||
|
index_db_size,
|
||||||
|
meta_env_size,
|
||||||
|
indexing_options,
|
||||||
|
)?,
|
||||||
|
MetadataVersion::V3(meta) => v3::load_dump(
|
||||||
|
meta,
|
||||||
|
src,
|
||||||
|
dst,
|
||||||
|
index_db_size,
|
||||||
|
meta_env_size,
|
||||||
|
indexing_options,
|
||||||
|
)?,
|
||||||
|
MetadataVersion::V4(meta) => v4::load_dump(
|
||||||
|
meta,
|
||||||
|
src,
|
||||||
|
dst,
|
||||||
|
index_db_size,
|
||||||
|
meta_env_size,
|
||||||
|
indexing_options,
|
||||||
|
)?,
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
pub fn new_v4(index_db_size: usize, update_db_size: usize) -> Self {
|
pub fn new_v4(index_db_size: usize, update_db_size: usize) -> Self {
|
||||||
let meta = Metadata::new(index_db_size, update_db_size);
|
let meta = Metadata::new(index_db_size, update_db_size);
|
||||||
Self::V4(meta)
|
Self::V4(meta)
|
||||||
@ -160,10 +203,46 @@ impl DumpInfo {
|
|||||||
pub fn load_dump(
|
pub fn load_dump(
|
||||||
dst_path: impl AsRef<Path>,
|
dst_path: impl AsRef<Path>,
|
||||||
src_path: impl AsRef<Path>,
|
src_path: impl AsRef<Path>,
|
||||||
|
ignore_dump_if_db_exists: bool,
|
||||||
|
ignore_missing_dump: bool,
|
||||||
index_db_size: usize,
|
index_db_size: usize,
|
||||||
update_db_size: usize,
|
update_db_size: usize,
|
||||||
indexer_opts: &IndexerOpts,
|
indexer_opts: &IndexerOpts,
|
||||||
) -> anyhow::Result<()> {
|
) -> anyhow::Result<()> {
|
||||||
|
let empty_db = crate::is_empty_db(&dst_path);
|
||||||
|
let src_path_exists = src_path.as_ref().exists();
|
||||||
|
|
||||||
|
if empty_db && src_path_exists {
|
||||||
|
let (tmp_src, tmp_dst, meta) = extract_dump(&dst_path, &src_path)?;
|
||||||
|
meta.load_dump(
|
||||||
|
tmp_src.path(),
|
||||||
|
tmp_dst.path(),
|
||||||
|
index_db_size,
|
||||||
|
update_db_size,
|
||||||
|
indexer_opts,
|
||||||
|
)?;
|
||||||
|
persist_dump(&dst_path, tmp_dst)?;
|
||||||
|
Ok(())
|
||||||
|
} else if !empty_db && !ignore_dump_if_db_exists {
|
||||||
|
bail!(
|
||||||
|
"database already exists at {:?}, try to delete it or rename it",
|
||||||
|
dst_path
|
||||||
|
.as_ref()
|
||||||
|
.canonicalize()
|
||||||
|
.unwrap_or_else(|_| dst_path.as_ref().to_owned())
|
||||||
|
)
|
||||||
|
} else if !src_path_exists && !ignore_missing_dump {
|
||||||
|
bail!("dump doesn't exist at {:?}", src_path.as_ref())
|
||||||
|
} else {
|
||||||
|
// there is nothing to do
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn extract_dump(
|
||||||
|
dst_path: impl AsRef<Path>,
|
||||||
|
src_path: impl AsRef<Path>,
|
||||||
|
) -> anyhow::Result<(TempDir, TempDir, MetadataVersion)> {
|
||||||
// Setup a temp directory path in the same path as the database, to prevent cross devices
|
// Setup a temp directory path in the same path as the database, to prevent cross devices
|
||||||
// references.
|
// references.
|
||||||
let temp_path = dst_path
|
let temp_path = dst_path
|
||||||
@ -186,7 +265,11 @@ pub fn load_dump(
|
|||||||
let mut meta_file = File::open(&meta_path)?;
|
let mut meta_file = File::open(&meta_path)?;
|
||||||
let meta: MetadataVersion = serde_json::from_reader(&mut meta_file)?;
|
let meta: MetadataVersion = serde_json::from_reader(&mut meta_file)?;
|
||||||
|
|
||||||
let tmp_dst = tempfile::tempdir()?;
|
if !dst_path.as_ref().exists() {
|
||||||
|
std::fs::create_dir_all(dst_path.as_ref())?;
|
||||||
|
}
|
||||||
|
|
||||||
|
let tmp_dst = tempfile::tempdir_in(dst_path.as_ref())?;
|
||||||
|
|
||||||
info!(
|
info!(
|
||||||
"Loading dump {}, dump database version: {}, dump version: {}",
|
"Loading dump {}, dump database version: {}, dump version: {}",
|
||||||
@ -197,43 +280,37 @@ pub fn load_dump(
|
|||||||
meta.version()
|
meta.version()
|
||||||
);
|
);
|
||||||
|
|
||||||
match meta {
|
Ok((tmp_src, tmp_dst, meta))
|
||||||
MetadataVersion::V1(_meta) => {
|
}
|
||||||
anyhow::bail!("The version 1 of the dumps is not supported anymore. You can re-export your dump from a version between 0.21 and 0.24, or start fresh from a version 0.25 onwards.")
|
|
||||||
}
|
fn persist_dump(dst_path: impl AsRef<Path>, tmp_dst: TempDir) -> anyhow::Result<()> {
|
||||||
MetadataVersion::V2(meta) => v2::load_dump(
|
|
||||||
meta,
|
|
||||||
&tmp_src_path,
|
|
||||||
tmp_dst.path(),
|
|
||||||
index_db_size,
|
|
||||||
update_db_size,
|
|
||||||
indexer_opts,
|
|
||||||
)?,
|
|
||||||
MetadataVersion::V3(meta) => v3::load_dump(
|
|
||||||
meta,
|
|
||||||
&tmp_src_path,
|
|
||||||
tmp_dst.path(),
|
|
||||||
index_db_size,
|
|
||||||
update_db_size,
|
|
||||||
indexer_opts,
|
|
||||||
)?,
|
|
||||||
MetadataVersion::V4(meta) => v4::load_dump(
|
|
||||||
meta,
|
|
||||||
&tmp_src_path,
|
|
||||||
tmp_dst.path(),
|
|
||||||
index_db_size,
|
|
||||||
update_db_size,
|
|
||||||
indexer_opts,
|
|
||||||
)?,
|
|
||||||
}
|
|
||||||
// Persist and atomically rename the db
|
|
||||||
let persisted_dump = tmp_dst.into_path();
|
let persisted_dump = tmp_dst.into_path();
|
||||||
|
|
||||||
|
// Delete everything in the `data.ms` except the tempdir.
|
||||||
if dst_path.as_ref().exists() {
|
if dst_path.as_ref().exists() {
|
||||||
warn!("Overwriting database at {}", dst_path.as_ref().display());
|
for file in dst_path.as_ref().read_dir().unwrap() {
|
||||||
std::fs::remove_dir_all(&dst_path)?;
|
let file = file.unwrap().path();
|
||||||
|
if file.file_name() == persisted_dump.file_name() {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
if file.is_file() {
|
||||||
|
std::fs::remove_file(&file)?;
|
||||||
|
} else {
|
||||||
|
std::fs::remove_dir_all(&file)?;
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
std::fs::rename(&persisted_dump, &dst_path)?;
|
// Move the whole content of the tempdir into the `data.ms`.
|
||||||
|
for file in persisted_dump.read_dir().unwrap() {
|
||||||
|
let file = file.unwrap().path();
|
||||||
|
|
||||||
|
std::fs::rename(&file, &dst_path.as_ref().join(file.file_name().unwrap()))?;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete the empty tempdir.
|
||||||
|
std::fs::remove_dir_all(&persisted_dump)?;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@ -281,6 +358,7 @@ impl DumpJob {
|
|||||||
AuthController::dump(&self.db_path, &temp_dump_path)?;
|
AuthController::dump(&self.db_path, &temp_dump_path)?;
|
||||||
|
|
||||||
let dump_path = tokio::task::spawn_blocking(move || -> Result<PathBuf> {
|
let dump_path = tokio::task::spawn_blocking(move || -> Result<PathBuf> {
|
||||||
|
let _ = &self;
|
||||||
// for now we simply copy the updates/updates_files
|
// for now we simply copy the updates/updates_files
|
||||||
// FIXME: We may copy more files than necessary, if new files are added while we are
|
// FIXME: We may copy more files than necessary, if new files are added while we are
|
||||||
// performing the dump. We need a way to filter them out.
|
// performing the dump. We need a way to filter them out.
|
||||||
|
@ -150,6 +150,8 @@ pub struct IndexControllerBuilder {
|
|||||||
schedule_snapshot: bool,
|
schedule_snapshot: bool,
|
||||||
dump_src: Option<PathBuf>,
|
dump_src: Option<PathBuf>,
|
||||||
dump_dst: Option<PathBuf>,
|
dump_dst: Option<PathBuf>,
|
||||||
|
ignore_dump_if_db_exists: bool,
|
||||||
|
ignore_missing_dump: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl IndexControllerBuilder {
|
impl IndexControllerBuilder {
|
||||||
@ -186,6 +188,8 @@ impl IndexControllerBuilder {
|
|||||||
load_dump(
|
load_dump(
|
||||||
db_path.as_ref(),
|
db_path.as_ref(),
|
||||||
src_path,
|
src_path,
|
||||||
|
self.ignore_dump_if_db_exists,
|
||||||
|
self.ignore_missing_dump,
|
||||||
index_size,
|
index_size,
|
||||||
task_store_size,
|
task_store_size,
|
||||||
&indexer_options,
|
&indexer_options,
|
||||||
@ -198,7 +202,7 @@ impl IndexControllerBuilder {
|
|||||||
options.map_size(task_store_size);
|
options.map_size(task_store_size);
|
||||||
options.max_dbs(20);
|
options.max_dbs(20);
|
||||||
|
|
||||||
let meta_env = options.open(&db_path)?;
|
let meta_env = Arc::new(options.open(&db_path)?);
|
||||||
|
|
||||||
let update_file_store = UpdateFileStore::new(&db_path)?;
|
let update_file_store = UpdateFileStore::new(&db_path)?;
|
||||||
// Create or overwrite the version file for this DB
|
// Create or overwrite the version file for this DB
|
||||||
@ -296,18 +300,6 @@ impl IndexControllerBuilder {
|
|||||||
self
|
self
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Set the index controller builder's dump src.
|
|
||||||
pub fn set_dump_src(&mut self, dump_src: PathBuf) -> &mut Self {
|
|
||||||
self.dump_src.replace(dump_src);
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set the index controller builder's dump dst.
|
|
||||||
pub fn set_dump_dst(&mut self, dump_dst: PathBuf) -> &mut Self {
|
|
||||||
self.dump_dst.replace(dump_dst);
|
|
||||||
self
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set the index controller builder's import snapshot.
|
/// Set the index controller builder's import snapshot.
|
||||||
pub fn set_import_snapshot(&mut self, import_snapshot: PathBuf) -> &mut Self {
|
pub fn set_import_snapshot(&mut self, import_snapshot: PathBuf) -> &mut Self {
|
||||||
self.import_snapshot.replace(import_snapshot);
|
self.import_snapshot.replace(import_snapshot);
|
||||||
@ -325,6 +317,30 @@ impl IndexControllerBuilder {
|
|||||||
self.schedule_snapshot = true;
|
self.schedule_snapshot = true;
|
||||||
self
|
self
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Set the index controller builder's dump src.
|
||||||
|
pub fn set_dump_src(&mut self, dump_src: PathBuf) -> &mut Self {
|
||||||
|
self.dump_src.replace(dump_src);
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Set the index controller builder's dump dst.
|
||||||
|
pub fn set_dump_dst(&mut self, dump_dst: PathBuf) -> &mut Self {
|
||||||
|
self.dump_dst.replace(dump_dst);
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Set the index controller builder's ignore dump if db exists.
|
||||||
|
pub fn set_ignore_dump_if_db_exists(&mut self, ignore_dump_if_db_exists: bool) -> &mut Self {
|
||||||
|
self.ignore_dump_if_db_exists = ignore_dump_if_db_exists;
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Set the index controller builder's ignore missing dump.
|
||||||
|
pub fn set_ignore_missing_dump(&mut self, ignore_missing_dump: bool) -> &mut Self {
|
||||||
|
self.ignore_missing_dump = ignore_missing_dump;
|
||||||
|
self
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<U, I> IndexController<U, I>
|
impl<U, I> IndexController<U, I>
|
||||||
|
@ -1,11 +1,14 @@
|
|||||||
#[derive(thiserror::Error, Debug)]
|
#[derive(thiserror::Error, Debug)]
|
||||||
pub enum VersionFileError {
|
pub enum VersionFileError {
|
||||||
#[error("Version file is missing or the previous MeiliSearch engine version was below 0.24.0. Use a dump to update MeiliSearch.")]
|
#[error(
|
||||||
|
"Meilisearch (v{}) failed to infer the version of the database. Please consider using a dump to load your data.",
|
||||||
|
env!("CARGO_PKG_VERSION").to_string()
|
||||||
|
)]
|
||||||
MissingVersionFile,
|
MissingVersionFile,
|
||||||
#[error("Version file is corrupted and thus MeiliSearch is unable to determine the version of the database.")]
|
#[error("Version file is corrupted and thus Meilisearch is unable to determine the version of the database.")]
|
||||||
MalformedVersionFile,
|
MalformedVersionFile,
|
||||||
#[error(
|
#[error(
|
||||||
"Expected MeiliSearch engine version: {major}.{minor}.{patch}, current engine version: {}. To update MeiliSearch use a dump.",
|
"Expected Meilisearch engine version: {major}.{minor}.{patch}, current engine version: {}. To update Meilisearch use a dump.",
|
||||||
env!("CARGO_PKG_VERSION").to_string()
|
env!("CARGO_PKG_VERSION").to_string()
|
||||||
)]
|
)]
|
||||||
VersionMismatch {
|
VersionMismatch {
|
||||||
|
@ -12,7 +12,7 @@ static VERSION_MAJOR: &str = env!("CARGO_PKG_VERSION_MAJOR");
|
|||||||
static VERSION_MINOR: &str = env!("CARGO_PKG_VERSION_MINOR");
|
static VERSION_MINOR: &str = env!("CARGO_PKG_VERSION_MINOR");
|
||||||
static VERSION_PATCH: &str = env!("CARGO_PKG_VERSION_PATCH");
|
static VERSION_PATCH: &str = env!("CARGO_PKG_VERSION_PATCH");
|
||||||
|
|
||||||
// Persists the version of the current MeiliSearch binary to a VERSION file
|
// Persists the version of the current Meilisearch binary to a VERSION file
|
||||||
pub fn create_version_file(db_path: &Path) -> anyhow::Result<()> {
|
pub fn create_version_file(db_path: &Path) -> anyhow::Result<()> {
|
||||||
let version_path = db_path.join(VERSION_FILE_NAME);
|
let version_path = db_path.join(VERSION_FILE_NAME);
|
||||||
fs::write(
|
fs::write(
|
||||||
@ -23,7 +23,7 @@ pub fn create_version_file(db_path: &Path) -> anyhow::Result<()> {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensures MeiliSearch version is compatible with the database, returns an error versions mismatch.
|
// Ensures Meilisearch version is compatible with the database, returns an error versions mismatch.
|
||||||
pub fn check_version_file(db_path: &Path) -> anyhow::Result<()> {
|
pub fn check_version_file(db_path: &Path) -> anyhow::Result<()> {
|
||||||
let version_path = db_path.join(VERSION_FILE_NAME);
|
let version_path = db_path.join(VERSION_FILE_NAME);
|
||||||
|
|
||||||
|
@ -2,6 +2,7 @@ use std::collections::HashSet;
|
|||||||
use std::fs::{create_dir_all, File};
|
use std::fs::{create_dir_all, File};
|
||||||
use std::io::{BufRead, BufReader, Write};
|
use std::io::{BufRead, BufReader, Write};
|
||||||
use std::path::{Path, PathBuf};
|
use std::path::{Path, PathBuf};
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
use heed::types::{SerdeBincode, Str};
|
use heed::types::{SerdeBincode, Str};
|
||||||
use heed::{CompactionOption, Database, Env};
|
use heed::{CompactionOption, Database, Env};
|
||||||
@ -42,12 +43,20 @@ pub struct IndexMeta {
|
|||||||
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct HeedMetaStore {
|
pub struct HeedMetaStore {
|
||||||
env: Env,
|
env: Arc<Env>,
|
||||||
db: Database<Str, SerdeBincode<IndexMeta>>,
|
db: Database<Str, SerdeBincode<IndexMeta>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl Drop for HeedMetaStore {
|
||||||
|
fn drop(&mut self) {
|
||||||
|
if Arc::strong_count(&self.env) == 1 {
|
||||||
|
self.env.as_ref().clone().prepare_for_closing();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl HeedMetaStore {
|
impl HeedMetaStore {
|
||||||
pub fn new(env: heed::Env) -> Result<Self> {
|
pub fn new(env: Arc<heed::Env>) -> Result<Self> {
|
||||||
let db = env.create_database(Some("uuids"))?;
|
let db = env.create_database(Some("uuids"))?;
|
||||||
Ok(Self { env, db })
|
Ok(Self { env, db })
|
||||||
}
|
}
|
||||||
@ -144,7 +153,7 @@ impl HeedMetaStore {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn load_dump(src: impl AsRef<Path>, env: heed::Env) -> Result<()> {
|
pub fn load_dump(src: impl AsRef<Path>, env: Arc<heed::Env>) -> Result<()> {
|
||||||
let src_indexes = src.as_ref().join(UUIDS_DB_PATH).join("data.jsonl");
|
let src_indexes = src.as_ref().join(UUIDS_DB_PATH).join("data.jsonl");
|
||||||
let indexes = File::open(&src_indexes)?;
|
let indexes = File::open(&src_indexes)?;
|
||||||
let mut indexes = BufReader::new(indexes);
|
let mut indexes = BufReader::new(indexes);
|
||||||
|
@ -4,6 +4,7 @@ pub mod meta_store;
|
|||||||
|
|
||||||
use std::convert::TryInto;
|
use std::convert::TryInto;
|
||||||
use std::path::Path;
|
use std::path::Path;
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
use chrono::Utc;
|
use chrono::Utc;
|
||||||
use error::{IndexResolverError, Result};
|
use error::{IndexResolverError, Result};
|
||||||
@ -16,13 +17,11 @@ use serde::{Deserialize, Serialize};
|
|||||||
use tokio::task::spawn_blocking;
|
use tokio::task::spawn_blocking;
|
||||||
use uuid::Uuid;
|
use uuid::Uuid;
|
||||||
|
|
||||||
use crate::index::update_handler::UpdateHandler;
|
use crate::index::{error::Result as IndexResult, update_handler::UpdateHandler, Index};
|
||||||
use crate::index::{error::Result as IndexResult, Index};
|
|
||||||
use crate::options::IndexerOpts;
|
use crate::options::IndexerOpts;
|
||||||
use crate::tasks::batch::Batch;
|
use crate::tasks::batch::Batch;
|
||||||
use crate::tasks::task::{DocumentDeletion, Job, Task, TaskContent, TaskEvent, TaskId, TaskResult};
|
use crate::tasks::task::{DocumentDeletion, Job, Task, TaskContent, TaskEvent, TaskId, TaskResult};
|
||||||
use crate::tasks::Pending;
|
use crate::tasks::{Pending, TaskPerformer};
|
||||||
use crate::tasks::TaskPerformer;
|
|
||||||
use crate::update_file_store::UpdateFileStore;
|
use crate::update_file_store::UpdateFileStore;
|
||||||
|
|
||||||
use self::meta_store::IndexMeta;
|
use self::meta_store::IndexMeta;
|
||||||
@ -39,7 +38,7 @@ pub fn create_index_resolver(
|
|||||||
path: impl AsRef<Path>,
|
path: impl AsRef<Path>,
|
||||||
index_size: usize,
|
index_size: usize,
|
||||||
indexer_opts: &IndexerOpts,
|
indexer_opts: &IndexerOpts,
|
||||||
meta_env: heed::Env,
|
meta_env: Arc<heed::Env>,
|
||||||
file_store: UpdateFileStore,
|
file_store: UpdateFileStore,
|
||||||
) -> anyhow::Result<HardStateIndexResolver> {
|
) -> anyhow::Result<HardStateIndexResolver> {
|
||||||
let uuid_store = HeedMetaStore::new(meta_env)?;
|
let uuid_store = HeedMetaStore::new(meta_env)?;
|
||||||
@ -153,7 +152,7 @@ impl IndexResolver<HeedMetaStore, MapIndexStore> {
|
|||||||
src: impl AsRef<Path>,
|
src: impl AsRef<Path>,
|
||||||
dst: impl AsRef<Path>,
|
dst: impl AsRef<Path>,
|
||||||
index_db_size: usize,
|
index_db_size: usize,
|
||||||
env: Env,
|
env: Arc<Env>,
|
||||||
indexer_opts: &IndexerOpts,
|
indexer_opts: &IndexerOpts,
|
||||||
) -> anyhow::Result<()> {
|
) -> anyhow::Result<()> {
|
||||||
HeedMetaStore::load_dump(&src, env)?;
|
HeedMetaStore::load_dump(&src, env)?;
|
||||||
|
@ -10,6 +10,8 @@ mod snapshot;
|
|||||||
pub mod tasks;
|
pub mod tasks;
|
||||||
mod update_file_store;
|
mod update_file_store;
|
||||||
|
|
||||||
|
use std::path::Path;
|
||||||
|
|
||||||
pub use index_controller::MeiliSearch;
|
pub use index_controller::MeiliSearch;
|
||||||
|
|
||||||
pub use milli;
|
pub use milli;
|
||||||
@ -33,3 +35,19 @@ impl EnvSizer for heed::Env {
|
|||||||
.fold(0, |acc, m| acc + m.len())
|
.fold(0, |acc, m| acc + m.len())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Check if a db is empty. It does not provide any information on the
|
||||||
|
/// validity of the data in it.
|
||||||
|
/// We consider a database as non empty when it's a non empty directory.
|
||||||
|
pub fn is_empty_db(db_path: impl AsRef<Path>) -> bool {
|
||||||
|
let db_path = db_path.as_ref();
|
||||||
|
|
||||||
|
if !db_path.exists() {
|
||||||
|
true
|
||||||
|
// if we encounter an error or if the db is a file we consider the db non empty
|
||||||
|
} else if let Ok(dir) = db_path.read_dir() {
|
||||||
|
dir.count() == 0
|
||||||
|
} else {
|
||||||
|
true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -2,19 +2,19 @@ use core::fmt;
|
|||||||
use std::{ops::Deref, str::FromStr};
|
use std::{ops::Deref, str::FromStr};
|
||||||
|
|
||||||
use byte_unit::{Byte, ByteError};
|
use byte_unit::{Byte, ByteError};
|
||||||
|
use clap::Parser;
|
||||||
use milli::CompressionType;
|
use milli::CompressionType;
|
||||||
use structopt::StructOpt;
|
|
||||||
use sysinfo::{RefreshKind, System, SystemExt};
|
use sysinfo::{RefreshKind, System, SystemExt};
|
||||||
|
|
||||||
#[derive(Debug, Clone, StructOpt)]
|
#[derive(Debug, Clone, Parser)]
|
||||||
pub struct IndexerOpts {
|
pub struct IndexerOpts {
|
||||||
/// The amount of documents to skip before printing
|
/// The amount of documents to skip before printing
|
||||||
/// a log regarding the indexing advancement.
|
/// a log regarding the indexing advancement.
|
||||||
#[structopt(long, default_value = "100000")] // 100k
|
#[clap(long, default_value = "100000")] // 100k
|
||||||
pub log_every_n: usize,
|
pub log_every_n: usize,
|
||||||
|
|
||||||
/// Grenad max number of chunks in bytes.
|
/// Grenad max number of chunks in bytes.
|
||||||
#[structopt(long)]
|
#[clap(long)]
|
||||||
pub max_nb_chunks: Option<usize>,
|
pub max_nb_chunks: Option<usize>,
|
||||||
|
|
||||||
/// The maximum amount of memory the indexer will use. It defaults to 2/3
|
/// The maximum amount of memory the indexer will use. It defaults to 2/3
|
||||||
@ -24,22 +24,22 @@ pub struct IndexerOpts {
|
|||||||
/// In case the engine is unable to retrieve the available memory the engine will
|
/// In case the engine is unable to retrieve the available memory the engine will
|
||||||
/// try to use the memory it needs but without real limit, this can lead to
|
/// try to use the memory it needs but without real limit, this can lead to
|
||||||
/// Out-Of-Memory issues and it is recommended to specify the amount of memory to use.
|
/// Out-Of-Memory issues and it is recommended to specify the amount of memory to use.
|
||||||
#[structopt(long, default_value)]
|
#[clap(long, default_value_t)]
|
||||||
pub max_memory: MaxMemory,
|
pub max_memory: MaxMemory,
|
||||||
|
|
||||||
/// The name of the compression algorithm to use when compressing intermediate
|
/// The name of the compression algorithm to use when compressing intermediate
|
||||||
/// Grenad chunks while indexing documents.
|
/// Grenad chunks while indexing documents.
|
||||||
///
|
///
|
||||||
/// Choosing a fast algorithm will make the indexing faster but may consume more memory.
|
/// Choosing a fast algorithm will make the indexing faster but may consume more memory.
|
||||||
#[structopt(long, default_value = "snappy", possible_values = &["snappy", "zlib", "lz4", "lz4hc", "zstd"])]
|
#[clap(long, default_value = "snappy", possible_values = &["snappy", "zlib", "lz4", "lz4hc", "zstd"])]
|
||||||
pub chunk_compression_type: CompressionType,
|
pub chunk_compression_type: CompressionType,
|
||||||
|
|
||||||
/// The level of compression of the chosen algorithm.
|
/// The level of compression of the chosen algorithm.
|
||||||
#[structopt(long, requires = "chunk-compression-type")]
|
#[clap(long, requires = "chunk-compression-type")]
|
||||||
pub chunk_compression_level: Option<u32>,
|
pub chunk_compression_level: Option<u32>,
|
||||||
|
|
||||||
/// Number of parallel jobs for indexing, defaults to # of CPUs.
|
/// Number of parallel jobs for indexing, defaults to # of CPUs.
|
||||||
#[structopt(long)]
|
#[clap(long)]
|
||||||
pub indexing_jobs: Option<usize>,
|
pub indexing_jobs: Option<usize>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -49,7 +49,10 @@ pub fn load_snapshot(
|
|||||||
ignore_snapshot_if_db_exists: bool,
|
ignore_snapshot_if_db_exists: bool,
|
||||||
ignore_missing_snapshot: bool,
|
ignore_missing_snapshot: bool,
|
||||||
) -> anyhow::Result<()> {
|
) -> anyhow::Result<()> {
|
||||||
if !db_path.as_ref().exists() && snapshot_path.as_ref().exists() {
|
let empty_db = crate::is_empty_db(&db_path);
|
||||||
|
let snapshot_path_exists = snapshot_path.as_ref().exists();
|
||||||
|
|
||||||
|
if empty_db && snapshot_path_exists {
|
||||||
match from_tar_gz(snapshot_path, &db_path) {
|
match from_tar_gz(snapshot_path, &db_path) {
|
||||||
Ok(()) => Ok(()),
|
Ok(()) => Ok(()),
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
@ -58,7 +61,7 @@ pub fn load_snapshot(
|
|||||||
Err(e)
|
Err(e)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else if db_path.as_ref().exists() && !ignore_snapshot_if_db_exists {
|
} else if !empty_db && !ignore_snapshot_if_db_exists {
|
||||||
bail!(
|
bail!(
|
||||||
"database already exists at {:?}, try to delete it or rename it",
|
"database already exists at {:?}, try to delete it or rename it",
|
||||||
db_path
|
db_path
|
||||||
@ -66,14 +69,8 @@ pub fn load_snapshot(
|
|||||||
.canonicalize()
|
.canonicalize()
|
||||||
.unwrap_or_else(|_| db_path.as_ref().to_owned())
|
.unwrap_or_else(|_| db_path.as_ref().to_owned())
|
||||||
)
|
)
|
||||||
} else if !snapshot_path.as_ref().exists() && !ignore_missing_snapshot {
|
} else if !snapshot_path_exists && !ignore_missing_snapshot {
|
||||||
bail!(
|
bail!("snapshot doesn't exist at {:?}", snapshot_path.as_ref())
|
||||||
"snapshot doesn't exist at {:?}",
|
|
||||||
snapshot_path
|
|
||||||
.as_ref()
|
|
||||||
.canonicalize()
|
|
||||||
.unwrap_or_else(|_| snapshot_path.as_ref().to_owned())
|
|
||||||
)
|
|
||||||
} else {
|
} else {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
@ -32,7 +32,7 @@ pub trait TaskPerformer: Sync + Send + 'static {
|
|||||||
async fn finish(&self, batch: &Batch);
|
async fn finish(&self, batch: &Batch);
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn create_task_store<P>(env: heed::Env, performer: Arc<P>) -> Result<TaskStore>
|
pub fn create_task_store<P>(env: Arc<heed::Env>, performer: Arc<P>) -> Result<TaskStore>
|
||||||
where
|
where
|
||||||
P: TaskPerformer,
|
P: TaskPerformer,
|
||||||
{
|
{
|
||||||
|
@ -55,7 +55,7 @@ pub enum TaskEvent {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
/// A task represents an operation that MeiliSearch must do.
|
/// A task represents an operation that Meilisearch must do.
|
||||||
/// It's stored on disk and executed from the lowest to highest Task id.
|
/// It's stored on disk and executed from the lowest to highest Task id.
|
||||||
/// Everytime a new task is created it has a higher Task id than the previous one.
|
/// Everytime a new task is created it has a higher Task id than the previous one.
|
||||||
/// See also `Job`.
|
/// See also `Job`.
|
||||||
@ -91,7 +91,7 @@ impl Task {
|
|||||||
|
|
||||||
/// A job is like a volatile priority `Task`.
|
/// A job is like a volatile priority `Task`.
|
||||||
/// It should be processed as fast as possible and is not stored on disk.
|
/// It should be processed as fast as possible and is not stored on disk.
|
||||||
/// This means, when MeiliSearch is closed all your unprocessed jobs will disappear.
|
/// This means, when Meilisearch is closed all your unprocessed jobs will disappear.
|
||||||
#[derive(Debug, derivative::Derivative)]
|
#[derive(Debug, derivative::Derivative)]
|
||||||
#[derivative(PartialEq)]
|
#[derivative(PartialEq)]
|
||||||
pub enum Job {
|
pub enum Job {
|
||||||
|
@ -114,7 +114,7 @@ impl Clone for TaskStore {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl TaskStore {
|
impl TaskStore {
|
||||||
pub fn new(env: heed::Env) -> Result<Self> {
|
pub fn new(env: Arc<heed::Env>) -> Result<Self> {
|
||||||
let mut store = Store::new(env)?;
|
let mut store = Store::new(env)?;
|
||||||
let unfinished_tasks = store.reset_and_return_unfinished_tasks()?;
|
let unfinished_tasks = store.reset_and_return_unfinished_tasks()?;
|
||||||
let store = Arc::new(store);
|
let store = Arc::new(store);
|
||||||
@ -293,7 +293,7 @@ impl TaskStore {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn load_dump(src: impl AsRef<Path>, env: Env) -> anyhow::Result<()> {
|
pub fn load_dump(src: impl AsRef<Path>, env: Arc<Env>) -> anyhow::Result<()> {
|
||||||
// create a dummy update field store, since it is not needed right now.
|
// create a dummy update field store, since it is not needed right now.
|
||||||
let store = Self::new(env.clone())?;
|
let store = Self::new(env.clone())?;
|
||||||
|
|
||||||
@ -340,7 +340,7 @@ pub mod test {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl MockTaskStore {
|
impl MockTaskStore {
|
||||||
pub fn new(env: heed::Env) -> Result<Self> {
|
pub fn new(env: Arc<heed::Env>) -> Result<Self> {
|
||||||
Ok(Self::Real(TaskStore::new(env)?))
|
Ok(Self::Real(TaskStore::new(env)?))
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -432,7 +432,7 @@ pub mod test {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn load_dump(path: impl AsRef<Path>, env: Env) -> anyhow::Result<()> {
|
pub fn load_dump(path: impl AsRef<Path>, env: Arc<Env>) -> anyhow::Result<()> {
|
||||||
TaskStore::load_dump(path, env)
|
TaskStore::load_dump(path, env)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -10,6 +10,7 @@ use std::convert::TryInto;
|
|||||||
use std::mem::size_of;
|
use std::mem::size_of;
|
||||||
use std::ops::Range;
|
use std::ops::Range;
|
||||||
use std::result::Result as StdResult;
|
use std::result::Result as StdResult;
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
use heed::types::{ByteSlice, OwnedType, SerdeJson, Unit};
|
use heed::types::{ByteSlice, OwnedType, SerdeJson, Unit};
|
||||||
use heed::{BytesDecode, BytesEncode, Database, Env, RoTxn, RwTxn};
|
use heed::{BytesDecode, BytesEncode, Database, Env, RoTxn, RwTxn};
|
||||||
@ -53,18 +54,26 @@ impl<'a> BytesDecode<'a> for IndexUidTaskIdCodec {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub struct Store {
|
pub struct Store {
|
||||||
env: Env,
|
env: Arc<Env>,
|
||||||
uids_task_ids: Database<IndexUidTaskIdCodec, Unit>,
|
uids_task_ids: Database<IndexUidTaskIdCodec, Unit>,
|
||||||
tasks: Database<OwnedType<BEU64>, SerdeJson<Task>>,
|
tasks: Database<OwnedType<BEU64>, SerdeJson<Task>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl Drop for Store {
|
||||||
|
fn drop(&mut self) {
|
||||||
|
if Arc::strong_count(&self.env) == 1 {
|
||||||
|
self.env.as_ref().clone().prepare_for_closing();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl Store {
|
impl Store {
|
||||||
/// Create a new store from the specified `Path`.
|
/// Create a new store from the specified `Path`.
|
||||||
/// Be really cautious when calling this function, the returned `Store` may
|
/// Be really cautious when calling this function, the returned `Store` may
|
||||||
/// be in an invalid state, with dangling processing tasks.
|
/// be in an invalid state, with dangling processing tasks.
|
||||||
/// You want to patch all un-finished tasks and put them in your pending
|
/// You want to patch all un-finished tasks and put them in your pending
|
||||||
/// queue with the `reset_and_return_unfinished_update` method.
|
/// queue with the `reset_and_return_unfinished_update` method.
|
||||||
pub fn new(env: heed::Env) -> Result<Self> {
|
pub fn new(env: Arc<heed::Env>) -> Result<Self> {
|
||||||
let uids_task_ids = env.create_database(Some(UID_TASK_IDS))?;
|
let uids_task_ids = env.create_database(Some(UID_TASK_IDS))?;
|
||||||
let tasks = env.create_database(Some(TASKS))?;
|
let tasks = env.create_database(Some(TASKS))?;
|
||||||
|
|
||||||
@ -78,7 +87,7 @@ impl Store {
|
|||||||
/// This function should be called *right after* creating the store.
|
/// This function should be called *right after* creating the store.
|
||||||
/// It put back all unfinished update in the `Created` state. This
|
/// It put back all unfinished update in the `Created` state. This
|
||||||
/// allow us to re-enqueue an update that didn't had the time to finish
|
/// allow us to re-enqueue an update that didn't had the time to finish
|
||||||
/// when MeiliSearch closed.
|
/// when Meilisearch closed.
|
||||||
pub fn reset_and_return_unfinished_tasks(&mut self) -> Result<BinaryHeap<Pending<TaskId>>> {
|
pub fn reset_and_return_unfinished_tasks(&mut self) -> Result<BinaryHeap<Pending<TaskId>>> {
|
||||||
let mut unfinished_tasks: BinaryHeap<Pending<TaskId>> = BinaryHeap::new();
|
let mut unfinished_tasks: BinaryHeap<Pending<TaskId>> = BinaryHeap::new();
|
||||||
|
|
||||||
@ -257,10 +266,10 @@ pub mod test {
|
|||||||
Fake(Mocker),
|
Fake(Mocker),
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct TmpEnv(TempDir, heed::Env);
|
pub struct TmpEnv(TempDir, Arc<heed::Env>);
|
||||||
|
|
||||||
impl TmpEnv {
|
impl TmpEnv {
|
||||||
pub fn env(&self) -> heed::Env {
|
pub fn env(&self) -> Arc<heed::Env> {
|
||||||
self.1.clone()
|
self.1.clone()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -271,13 +280,13 @@ pub mod test {
|
|||||||
let mut options = EnvOpenOptions::new();
|
let mut options = EnvOpenOptions::new();
|
||||||
options.map_size(4096 * 100000);
|
options.map_size(4096 * 100000);
|
||||||
options.max_dbs(1000);
|
options.max_dbs(1000);
|
||||||
let env = options.open(tmp.path()).unwrap();
|
let env = Arc::new(options.open(tmp.path()).unwrap());
|
||||||
|
|
||||||
TmpEnv(tmp, env)
|
TmpEnv(tmp, env)
|
||||||
}
|
}
|
||||||
|
|
||||||
impl MockStore {
|
impl MockStore {
|
||||||
pub fn new(env: heed::Env) -> Result<Self> {
|
pub fn new(env: Arc<heed::Env>) -> Result<Self> {
|
||||||
Ok(Self::Real(Store::new(env)?))
|
Ok(Self::Real(Store::new(env)?))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user