mirror of
https://github.com/meilisearch/meilisearch.git
synced 2024-11-22 10:07:40 +08:00
Merge remote-tracking branch 'milli/main' into import-milli
This commit is contained in:
commit
0cec352d2b
1
.github/dependabot.yml
vendored
1
.github/dependabot.yml
vendored
@ -2,7 +2,6 @@
|
||||
|
||||
version: 2
|
||||
updates:
|
||||
|
||||
- package-ecosystem: "github-actions"
|
||||
directory: "/"
|
||||
schedule:
|
||||
|
30
.github/release-draft-template.yml
vendored
Normal file
30
.github/release-draft-template.yml
vendored
Normal file
@ -0,0 +1,30 @@
|
||||
name-template: 'Milli v$RESOLVED_VERSION'
|
||||
tag-template: 'v$RESOLVED_VERSION'
|
||||
exclude-labels:
|
||||
- 'skip changelog'
|
||||
version-resolver:
|
||||
minor:
|
||||
labels:
|
||||
- 'DB breaking'
|
||||
- 'API breaking'
|
||||
default: patch
|
||||
categories:
|
||||
- title: 'API breaking'
|
||||
label: 'API breaking'
|
||||
- title: 'DB breaking'
|
||||
label: 'DB breaking'
|
||||
- title: 'Changes'
|
||||
label: 'no breaking'
|
||||
template: |
|
||||
$CHANGES
|
||||
|
||||
Thanks again to $CONTRIBUTORS! 🎉
|
||||
no-changes-template: 'Changes are coming soon 😎'
|
||||
sort-direction: 'ascending'
|
||||
replacers:
|
||||
- search: '/(?:and )?@dependabot-preview(?:\[bot\])?,?/g'
|
||||
replace: ''
|
||||
- search: '/(?:and )?@bors(?:\[bot\])?,?/g'
|
||||
replace: ''
|
||||
- search: '/(?:and )?@meili-bot,?/g'
|
||||
replace: ''
|
14
.github/workflows/enforce-label.yml
vendored
Normal file
14
.github/workflows/enforce-label.yml
vendored
Normal file
@ -0,0 +1,14 @@
|
||||
name: Enforce PR labels
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [labeled, unlabeled, opened, edited, synchronize]
|
||||
|
||||
jobs:
|
||||
enforce-label:
|
||||
name: Specify breaking
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: yogevbd/enforce-label-action@2.2.2
|
||||
with:
|
||||
REQUIRED_LABELS_ANY: 'no breaking,DB breaking,API breaking,skip changelog'
|
77
.github/workflows/manual_benchmarks.yml
vendored
Normal file
77
.github/workflows/manual_benchmarks.yml
vendored
Normal file
@ -0,0 +1,77 @@
|
||||
name: Benchmarks
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
dataset_name:
|
||||
description: 'The name of the dataset used to benchmark (search_songs, search_wiki, search_geo or indexing)'
|
||||
required: false
|
||||
default: 'search_songs'
|
||||
|
||||
env:
|
||||
BENCH_NAME: ${{ github.event.inputs.dataset_name }}
|
||||
|
||||
jobs:
|
||||
benchmarks:
|
||||
name: Run and upload benchmarks
|
||||
runs-on: benchmarks
|
||||
timeout-minutes: 4320 # 72h
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
profile: minimal
|
||||
toolchain: stable
|
||||
override: true
|
||||
|
||||
# Set variables
|
||||
- name: Set current branch name
|
||||
shell: bash
|
||||
run: echo "name=$(echo ${GITHUB_REF#refs/heads/})" >> $GITHUB_OUTPUT
|
||||
id: current_branch
|
||||
- name: Set normalized current branch name # Replace `/` by `_` in branch name to avoid issues when pushing to S3
|
||||
shell: bash
|
||||
run: echo "name=$(echo ${GITHUB_REF#refs/heads/} | tr '/' '_')" >> $GITHUB_OUTPUT
|
||||
id: normalized_current_branch
|
||||
- name: Set shorter commit SHA
|
||||
shell: bash
|
||||
run: echo "short=$(echo $GITHUB_SHA | cut -c1-8)" >> $GITHUB_OUTPUT
|
||||
id: commit_sha
|
||||
- name: Set file basename with format "dataset_branch_commitSHA"
|
||||
shell: bash
|
||||
run: echo "basename=$(echo ${BENCH_NAME}_${{ steps.normalized_current_branch.outputs.name }}_${{ steps.commit_sha.outputs.short }})" >> $GITHUB_OUTPUT
|
||||
id: file
|
||||
|
||||
# Run benchmarks
|
||||
- name: Run benchmarks - Dataset ${BENCH_NAME} - Branch ${{ steps.current_branch.outputs.name }} - Commit ${{ steps.commit_sha.outputs.short }}
|
||||
run: |
|
||||
cd benchmarks
|
||||
cargo bench --bench ${BENCH_NAME} -- --save-baseline ${{ steps.file.outputs.basename }}
|
||||
|
||||
# Generate critcmp files
|
||||
- name: Install critcmp
|
||||
uses: taiki-e/install-action@v2
|
||||
with:
|
||||
tool: critcmp
|
||||
- name: Export cripcmp file
|
||||
run: |
|
||||
critcmp --export ${{ steps.file.outputs.basename }} > ${{ steps.file.outputs.basename }}.json
|
||||
|
||||
# Upload benchmarks
|
||||
- name: Upload ${{ steps.file.outputs.basename }}.json to DO Spaces # DigitalOcean Spaces = S3
|
||||
uses: BetaHuhn/do-spaces-action@v2
|
||||
with:
|
||||
access_key: ${{ secrets.DO_SPACES_ACCESS_KEY }}
|
||||
secret_key: ${{ secrets.DO_SPACES_SECRET_KEY }}
|
||||
space_name: ${{ secrets.DO_SPACES_SPACE_NAME }}
|
||||
space_region: ${{ secrets.DO_SPACES_SPACE_REGION }}
|
||||
source: ${{ steps.file.outputs.basename }}.json
|
||||
out_dir: critcmp_results
|
||||
|
||||
# Helper
|
||||
- name: 'README: compare with another benchmark'
|
||||
run: |
|
||||
echo "${{ steps.file.outputs.basename }}.json has just been pushed."
|
||||
echo 'How to compare this benchmark with another one?'
|
||||
echo ' - Check the available files with: ./benchmarks/scripts/list.sh'
|
||||
echo " - Run the following command: ./benchmaks/scipts/compare.sh <file-to-compare-with> ${{ steps.file.outputs.basename }}.json"
|
79
.github/workflows/push_benchmarks_indexing.yml
vendored
Normal file
79
.github/workflows/push_benchmarks_indexing.yml
vendored
Normal file
@ -0,0 +1,79 @@
|
||||
name: Benchmarks indexing (push)
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
|
||||
env:
|
||||
INFLUX_TOKEN: ${{ secrets.INFLUX_TOKEN }}
|
||||
BENCH_NAME: "indexing"
|
||||
|
||||
jobs:
|
||||
benchmarks:
|
||||
name: Run and upload benchmarks
|
||||
runs-on: benchmarks
|
||||
timeout-minutes: 4320 # 72h
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
profile: minimal
|
||||
toolchain: stable
|
||||
override: true
|
||||
|
||||
# Set variables
|
||||
- name: Set current branch name
|
||||
shell: bash
|
||||
run: echo "name=$(echo ${GITHUB_REF#refs/heads/})" >> $GITHUB_OUTPUT
|
||||
id: current_branch
|
||||
- name: Set normalized current branch name # Replace `/` by `_` in branch name to avoid issues when pushing to S3
|
||||
shell: bash
|
||||
run: echo "name=$(echo ${GITHUB_REF#refs/heads/} | tr '/' '_')" >> $GITHUB_OUTPUT
|
||||
id: normalized_current_branch
|
||||
- name: Set shorter commit SHA
|
||||
shell: bash
|
||||
run: echo "short=$(echo $GITHUB_SHA | cut -c1-8)" >> $GITHUB_OUTPUT
|
||||
id: commit_sha
|
||||
- name: Set file basename with format "dataset_branch_commitSHA"
|
||||
shell: bash
|
||||
run: echo "basename=$(echo ${BENCH_NAME}_${{ steps.normalized_current_branch.outputs.name }}_${{ steps.commit_sha.outputs.short }})" >> $GITHUB_OUTPUT
|
||||
id: file
|
||||
|
||||
# Run benchmarks
|
||||
- name: Run benchmarks - Dataset ${BENCH_NAME} - Branch ${{ steps.current_branch.outputs.name }} - Commit ${{ steps.commit_sha.outputs.short }}
|
||||
run: |
|
||||
cd benchmarks
|
||||
cargo bench --bench ${BENCH_NAME} -- --save-baseline ${{ steps.file.outputs.basename }}
|
||||
|
||||
# Generate critcmp files
|
||||
- name: Install critcmp
|
||||
uses: taiki-e/install-action@v2
|
||||
with:
|
||||
tool: critcmp
|
||||
- name: Export cripcmp file
|
||||
run: |
|
||||
critcmp --export ${{ steps.file.outputs.basename }} > ${{ steps.file.outputs.basename }}.json
|
||||
|
||||
# Upload benchmarks
|
||||
- name: Upload ${{ steps.file.outputs.basename }}.json to DO Spaces # DigitalOcean Spaces = S3
|
||||
uses: BetaHuhn/do-spaces-action@v2
|
||||
with:
|
||||
access_key: ${{ secrets.DO_SPACES_ACCESS_KEY }}
|
||||
secret_key: ${{ secrets.DO_SPACES_SECRET_KEY }}
|
||||
space_name: ${{ secrets.DO_SPACES_SPACE_NAME }}
|
||||
space_region: ${{ secrets.DO_SPACES_SPACE_REGION }}
|
||||
source: ${{ steps.file.outputs.basename }}.json
|
||||
out_dir: critcmp_results
|
||||
|
||||
# Upload benchmarks to influxdb
|
||||
- name: Upload ${{ steps.file.outputs.basename }}.json to influxDB
|
||||
run: telegraf --config https://eu-central-1-1.aws.cloud2.influxdata.com/api/v2/telegrafs/08b52e34a370b000 --once --debug
|
||||
|
||||
# Helper
|
||||
- name: 'README: compare with another benchmark'
|
||||
run: |
|
||||
echo "${{ steps.file.outputs.basename }}.json has just been pushed."
|
||||
echo 'How to compare this benchmark with another one?'
|
||||
echo ' - Check the available files with: ./benchmarks/scripts/list.sh'
|
||||
echo " - Run the following command: ./benchmaks/scipts/compare.sh <file-to-compare-with> ${{ steps.file.outputs.basename }}.json"
|
78
.github/workflows/push_benchmarks_search_geo.yml
vendored
Normal file
78
.github/workflows/push_benchmarks_search_geo.yml
vendored
Normal file
@ -0,0 +1,78 @@
|
||||
name: Benchmarks search geo (push)
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
|
||||
env:
|
||||
BENCH_NAME: "search_geo"
|
||||
INFLUX_TOKEN: ${{ secrets.INFLUX_TOKEN }}
|
||||
|
||||
jobs:
|
||||
benchmarks:
|
||||
name: Run and upload benchmarks
|
||||
runs-on: benchmarks
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
profile: minimal
|
||||
toolchain: stable
|
||||
override: true
|
||||
|
||||
# Set variables
|
||||
- name: Set current branch name
|
||||
shell: bash
|
||||
run: echo "name=$(echo ${GITHUB_REF#refs/heads/})" >> $GITHUB_OUTPUT
|
||||
id: current_branch
|
||||
- name: Set normalized current branch name # Replace `/` by `_` in branch name to avoid issues when pushing to S3
|
||||
shell: bash
|
||||
run: echo "name=$(echo ${GITHUB_REF#refs/heads/} | tr '/' '_')" >> $GITHUB_OUTPUT
|
||||
id: normalized_current_branch
|
||||
- name: Set shorter commit SHA
|
||||
shell: bash
|
||||
run: echo "short=$(echo $GITHUB_SHA | cut -c1-8)" >> $GITHUB_OUTPUT
|
||||
id: commit_sha
|
||||
- name: Set file basename with format "dataset_branch_commitSHA"
|
||||
shell: bash
|
||||
run: echo "basename=$(echo ${BENCH_NAME}_${{ steps.normalized_current_branch.outputs.name }}_${{ steps.commit_sha.outputs.short }})" >> $GITHUB_OUTPUT
|
||||
id: file
|
||||
|
||||
# Run benchmarks
|
||||
- name: Run benchmarks - Dataset ${BENCH_NAME} - Branch ${{ steps.current_branch.outputs.name }} - Commit ${{ steps.commit_sha.outputs.short }}
|
||||
run: |
|
||||
cd benchmarks
|
||||
cargo bench --bench ${BENCH_NAME} -- --save-baseline ${{ steps.file.outputs.basename }}
|
||||
|
||||
# Generate critcmp files
|
||||
- name: Install critcmp
|
||||
uses: taiki-e/install-action@v2
|
||||
with:
|
||||
tool: critcmp
|
||||
- name: Export cripcmp file
|
||||
run: |
|
||||
critcmp --export ${{ steps.file.outputs.basename }} > ${{ steps.file.outputs.basename }}.json
|
||||
|
||||
# Upload benchmarks
|
||||
- name: Upload ${{ steps.file.outputs.basename }}.json to DO Spaces # DigitalOcean Spaces = S3
|
||||
uses: BetaHuhn/do-spaces-action@v2
|
||||
with:
|
||||
access_key: ${{ secrets.DO_SPACES_ACCESS_KEY }}
|
||||
secret_key: ${{ secrets.DO_SPACES_SECRET_KEY }}
|
||||
space_name: ${{ secrets.DO_SPACES_SPACE_NAME }}
|
||||
space_region: ${{ secrets.DO_SPACES_SPACE_REGION }}
|
||||
source: ${{ steps.file.outputs.basename }}.json
|
||||
out_dir: critcmp_results
|
||||
|
||||
# Upload benchmarks to influxdb
|
||||
- name: Upload ${{ steps.file.outputs.basename }}.json to influxDB
|
||||
run: telegraf --config https://eu-central-1-1.aws.cloud2.influxdata.com/api/v2/telegrafs/08b52e34a370b000 --once --debug
|
||||
|
||||
# Helper
|
||||
- name: 'README: compare with another benchmark'
|
||||
run: |
|
||||
echo "${{ steps.file.outputs.basename }}.json has just been pushed."
|
||||
echo 'How to compare this benchmark with another one?'
|
||||
echo ' - Check the available files with: ./benchmarks/scripts/list.sh'
|
||||
echo " - Run the following command: ./benchmaks/scipts/compare.sh <file-to-compare-with> ${{ steps.file.outputs.basename }}.json"
|
78
.github/workflows/push_benchmarks_search_songs.yml
vendored
Normal file
78
.github/workflows/push_benchmarks_search_songs.yml
vendored
Normal file
@ -0,0 +1,78 @@
|
||||
name: Benchmarks search songs (push)
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
|
||||
env:
|
||||
BENCH_NAME: "search_songs"
|
||||
INFLUX_TOKEN: ${{ secrets.INFLUX_TOKEN }}
|
||||
|
||||
jobs:
|
||||
benchmarks:
|
||||
name: Run and upload benchmarks
|
||||
runs-on: benchmarks
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
profile: minimal
|
||||
toolchain: stable
|
||||
override: true
|
||||
|
||||
# Set variables
|
||||
- name: Set current branch name
|
||||
shell: bash
|
||||
run: echo "name=$(echo ${GITHUB_REF#refs/heads/})" >> $GITHUB_OUTPUT
|
||||
id: current_branch
|
||||
- name: Set normalized current branch name # Replace `/` by `_` in branch name to avoid issues when pushing to S3
|
||||
shell: bash
|
||||
run: echo "name=$(echo ${GITHUB_REF#refs/heads/} | tr '/' '_')" >> $GITHUB_OUTPUT
|
||||
id: normalized_current_branch
|
||||
- name: Set shorter commit SHA
|
||||
shell: bash
|
||||
run: echo "short=$(echo $GITHUB_SHA | cut -c1-8)" >> $GITHUB_OUTPUT
|
||||
id: commit_sha
|
||||
- name: Set file basename with format "dataset_branch_commitSHA"
|
||||
shell: bash
|
||||
run: echo "basename=$(echo ${BENCH_NAME}_${{ steps.normalized_current_branch.outputs.name }}_${{ steps.commit_sha.outputs.short }})" >> $GITHUB_OUTPUT
|
||||
id: file
|
||||
|
||||
# Run benchmarks
|
||||
- name: Run benchmarks - Dataset ${BENCH_NAME} - Branch ${{ steps.current_branch.outputs.name }} - Commit ${{ steps.commit_sha.outputs.short }}
|
||||
run: |
|
||||
cd benchmarks
|
||||
cargo bench --bench ${BENCH_NAME} -- --save-baseline ${{ steps.file.outputs.basename }}
|
||||
|
||||
# Generate critcmp files
|
||||
- name: Install critcmp
|
||||
uses: taiki-e/install-action@v2
|
||||
with:
|
||||
tool: critcmp
|
||||
- name: Export cripcmp file
|
||||
run: |
|
||||
critcmp --export ${{ steps.file.outputs.basename }} > ${{ steps.file.outputs.basename }}.json
|
||||
|
||||
# Upload benchmarks
|
||||
- name: Upload ${{ steps.file.outputs.basename }}.json to DO Spaces # DigitalOcean Spaces = S3
|
||||
uses: BetaHuhn/do-spaces-action@v2
|
||||
with:
|
||||
access_key: ${{ secrets.DO_SPACES_ACCESS_KEY }}
|
||||
secret_key: ${{ secrets.DO_SPACES_SECRET_KEY }}
|
||||
space_name: ${{ secrets.DO_SPACES_SPACE_NAME }}
|
||||
space_region: ${{ secrets.DO_SPACES_SPACE_REGION }}
|
||||
source: ${{ steps.file.outputs.basename }}.json
|
||||
out_dir: critcmp_results
|
||||
|
||||
# Upload benchmarks to influxdb
|
||||
- name: Upload ${{ steps.file.outputs.basename }}.json to influxDB
|
||||
run: telegraf --config https://eu-central-1-1.aws.cloud2.influxdata.com/api/v2/telegrafs/08b52e34a370b000 --once --debug
|
||||
|
||||
# Helper
|
||||
- name: 'README: compare with another benchmark'
|
||||
run: |
|
||||
echo "${{ steps.file.outputs.basename }}.json has just been pushed."
|
||||
echo 'How to compare this benchmark with another one?'
|
||||
echo ' - Check the available files with: ./benchmarks/scripts/list.sh'
|
||||
echo " - Run the following command: ./benchmaks/scipts/compare.sh <file-to-compare-with> ${{ steps.file.outputs.basename }}.json"
|
78
.github/workflows/push_benchmarks_search_wiki.yml
vendored
Normal file
78
.github/workflows/push_benchmarks_search_wiki.yml
vendored
Normal file
@ -0,0 +1,78 @@
|
||||
name: Benchmarks search wikipedia articles (push)
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
|
||||
env:
|
||||
BENCH_NAME: "search_wiki"
|
||||
INFLUX_TOKEN: ${{ secrets.INFLUX_TOKEN }}
|
||||
|
||||
jobs:
|
||||
benchmarks:
|
||||
name: Run and upload benchmarks
|
||||
runs-on: benchmarks
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
profile: minimal
|
||||
toolchain: stable
|
||||
override: true
|
||||
|
||||
# Set variables
|
||||
- name: Set current branch name
|
||||
shell: bash
|
||||
run: echo "name=$(echo ${GITHUB_REF#refs/heads/})" >> $GITHUB_OUTPUT
|
||||
id: current_branch
|
||||
- name: Set normalized current branch name # Replace `/` by `_` in branch name to avoid issues when pushing to S3
|
||||
shell: bash
|
||||
run: echo "name=$(echo ${GITHUB_REF#refs/heads/} | tr '/' '_')" >> $GITHUB_OUTPUT
|
||||
id: normalized_current_branch
|
||||
- name: Set shorter commit SHA
|
||||
shell: bash
|
||||
run: echo "short=$(echo $GITHUB_SHA | cut -c1-8)" >> $GITHUB_OUTPUT
|
||||
id: commit_sha
|
||||
- name: Set file basename with format "dataset_branch_commitSHA"
|
||||
shell: bash
|
||||
run: echo "basename=$(echo ${BENCH_NAME}_${{ steps.normalized_current_branch.outputs.name }}_${{ steps.commit_sha.outputs.short }})" >> $GITHUB_OUTPUT
|
||||
id: file
|
||||
|
||||
# Run benchmarks
|
||||
- name: Run benchmarks - Dataset ${BENCH_NAME} - Branch ${{ steps.current_branch.outputs.name }} - Commit ${{ steps.commit_sha.outputs.short }}
|
||||
run: |
|
||||
cd benchmarks
|
||||
cargo bench --bench ${BENCH_NAME} -- --save-baseline ${{ steps.file.outputs.basename }}
|
||||
|
||||
# Generate critcmp files
|
||||
- name: Install critcmp
|
||||
uses: taiki-e/install-action@v2
|
||||
with:
|
||||
tool: critcmp
|
||||
- name: Export cripcmp file
|
||||
run: |
|
||||
critcmp --export ${{ steps.file.outputs.basename }} > ${{ steps.file.outputs.basename }}.json
|
||||
|
||||
# Upload benchmarks
|
||||
- name: Upload ${{ steps.file.outputs.basename }}.json to DO Spaces # DigitalOcean Spaces = S3
|
||||
uses: BetaHuhn/do-spaces-action@v2
|
||||
with:
|
||||
access_key: ${{ secrets.DO_SPACES_ACCESS_KEY }}
|
||||
secret_key: ${{ secrets.DO_SPACES_SECRET_KEY }}
|
||||
space_name: ${{ secrets.DO_SPACES_SPACE_NAME }}
|
||||
space_region: ${{ secrets.DO_SPACES_SPACE_REGION }}
|
||||
source: ${{ steps.file.outputs.basename }}.json
|
||||
out_dir: critcmp_results
|
||||
|
||||
# Upload benchmarks to influxdb
|
||||
- name: Upload ${{ steps.file.outputs.basename }}.json to influxDB
|
||||
run: telegraf --config https://eu-central-1-1.aws.cloud2.influxdata.com/api/v2/telegrafs/08b52e34a370b000 --once --debug
|
||||
|
||||
# Helper
|
||||
- name: 'README: compare with another benchmark'
|
||||
run: |
|
||||
echo "${{ steps.file.outputs.basename }}.json has just been pushed."
|
||||
echo 'How to compare this benchmark with another one?'
|
||||
echo ' - Check the available files with: ./benchmarks/scripts/list.sh'
|
||||
echo " - Run the following command: ./benchmaks/scipts/compare.sh <file-to-compare-with> ${{ steps.file.outputs.basename }}.json"
|
16
.github/workflows/release-drafter.yml
vendored
Normal file
16
.github/workflows/release-drafter.yml
vendored
Normal file
@ -0,0 +1,16 @@
|
||||
name: Release Drafter
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
|
||||
jobs:
|
||||
update_release_draft:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: release-drafter/release-drafter@v5
|
||||
with:
|
||||
config-name: release-draft-template.yml
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.RELEASE_DRAFTER_TOKEN }}
|
@ -13,7 +13,6 @@ env:
|
||||
GH_TOKEN: ${{ secrets.MEILI_BOT_GH_PAT }}
|
||||
|
||||
jobs:
|
||||
|
||||
update-version-cargo-toml:
|
||||
name: Update version in Cargo.toml files
|
||||
runs-on: ubuntu-latest
|
||||
|
4
.gitignore
vendored
4
.gitignore
vendored
@ -8,9 +8,11 @@
|
||||
/snapshots
|
||||
/dumps
|
||||
|
||||
|
||||
# Snapshots
|
||||
## ... large
|
||||
*.full.snap
|
||||
## ... unreviewed
|
||||
*.snap.new
|
||||
|
||||
# Fuzzcheck data for the facet indexing fuzz test
|
||||
milli/fuzz/update::facet::incremental::fuzz::fuzz/
|
||||
|
692
Cargo.lock
generated
692
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@ -9,6 +9,12 @@ members = [
|
||||
"dump",
|
||||
"file-store",
|
||||
"permissive-json-pointer",
|
||||
"milli",
|
||||
"filter-parser",
|
||||
"flatten-serde-json",
|
||||
"json-depth-checker",
|
||||
"benchmarks",
|
||||
"cli"
|
||||
]
|
||||
|
||||
[profile.release]
|
||||
|
6
assets/logo-black.svg
Normal file
6
assets/logo-black.svg
Normal file
@ -0,0 +1,6 @@
|
||||
<svg width="277" height="236" viewBox="0 0 277 236" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<path fill-rule="evenodd" clip-rule="evenodd" d="M213.085 190L242.907 86H276.196L246.375 190H213.085Z" fill="#494949"/>
|
||||
<path fill-rule="evenodd" clip-rule="evenodd" d="M0 190L29.8215 86H63.1111L33.2896 190H0Z" fill="#494949"/>
|
||||
<path fill-rule="evenodd" clip-rule="evenodd" d="M124.986 0L57.5772 235.083L60.7752 236H90.6038L158.276 0H124.986Z" fill="#494949"/>
|
||||
<path fill-rule="evenodd" clip-rule="evenodd" d="M195.273 0L127.601 236H160.891L228.563 0H195.273Z" fill="#494949"/>
|
||||
</svg>
|
After Width: | Height: | Size: 585 B |
1
benchmarks/.gitignore
vendored
Normal file
1
benchmarks/.gitignore
vendored
Normal file
@ -0,0 +1 @@
|
||||
benches/datasets_paths.rs
|
48
benchmarks/Cargo.toml
Normal file
48
benchmarks/Cargo.toml
Normal file
@ -0,0 +1,48 @@
|
||||
[package]
|
||||
name = "benchmarks"
|
||||
version = "0.39.0"
|
||||
edition = "2018"
|
||||
publish = false
|
||||
|
||||
[dependencies]
|
||||
anyhow = "1.0.65"
|
||||
csv = "1.1.6"
|
||||
milli = { path = "../milli", default-features = false }
|
||||
mimalloc = { version = "0.1.29", default-features = false }
|
||||
serde_json = { version = "1.0.85", features = ["preserve_order"] }
|
||||
|
||||
[dev-dependencies]
|
||||
criterion = { version = "0.4.0", features = ["html_reports"] }
|
||||
rand = "0.8.5"
|
||||
rand_chacha = "0.3.1"
|
||||
roaring = "0.10.1"
|
||||
|
||||
[build-dependencies]
|
||||
anyhow = "1.0.65"
|
||||
bytes = "1.2.1"
|
||||
convert_case = "0.6.0"
|
||||
flate2 = "1.0.24"
|
||||
reqwest = { version = "0.11.12", features = ["blocking", "rustls-tls"], default-features = false }
|
||||
|
||||
[features]
|
||||
default = ["milli/default"]
|
||||
|
||||
[[bench]]
|
||||
name = "search_songs"
|
||||
harness = false
|
||||
|
||||
[[bench]]
|
||||
name = "search_wiki"
|
||||
harness = false
|
||||
|
||||
[[bench]]
|
||||
name = "search_geo"
|
||||
harness = false
|
||||
|
||||
[[bench]]
|
||||
name = "indexing"
|
||||
harness = false
|
||||
|
||||
[[bench]]
|
||||
name = "formatting"
|
||||
harness = false
|
138
benchmarks/README.md
Normal file
138
benchmarks/README.md
Normal file
@ -0,0 +1,138 @@
|
||||
Benchmarks
|
||||
==========
|
||||
|
||||
## TOC
|
||||
|
||||
- [Run the benchmarks](#run-the-benchmarks)
|
||||
- [Comparison between benchmarks](#comparison-between-benchmarks)
|
||||
- [Datasets](#datasets)
|
||||
|
||||
## Run the benchmarks
|
||||
|
||||
### On our private server
|
||||
|
||||
The Meili team has self-hosted his own GitHub runner to run benchmarks on our dedicated bare metal server.
|
||||
|
||||
To trigger the benchmark workflow:
|
||||
- Go to the `Actions` tab of this repository.
|
||||
- Select the `Benchmarks` workflow on the left.
|
||||
- Click on `Run workflow` in the blue banner.
|
||||
- Select the branch on which you want to run the benchmarks and select the dataset you want (default: `songs`).
|
||||
- Finally, click on `Run workflow`.
|
||||
|
||||
This GitHub workflow will run the benchmarks and push the `critcmp` report to a DigitalOcean Space (= S3).
|
||||
|
||||
The name of the uploaded file is displayed in the workflow.
|
||||
|
||||
_[More about critcmp](https://github.com/BurntSushi/critcmp)._
|
||||
|
||||
💡 To compare the just-uploaded benchmark with another one, check out the [next section](#comparison-between-benchmarks).
|
||||
|
||||
### On your machine
|
||||
|
||||
To run all the benchmarks (~5h):
|
||||
|
||||
```bash
|
||||
cargo bench
|
||||
```
|
||||
|
||||
To run only the `search_songs` (~1h), `search_wiki` (~3h), `search_geo` (~20m) or `indexing` (~2h) benchmark:
|
||||
|
||||
```bash
|
||||
cargo bench --bench <dataset name>
|
||||
```
|
||||
|
||||
By default, the benchmarks will be downloaded and uncompressed automatically in the target directory.<br>
|
||||
If you don't want to download the datasets every time you update something on the code, you can specify a custom directory with the environment variable `MILLI_BENCH_DATASETS_PATH`:
|
||||
|
||||
```bash
|
||||
mkdir ~/datasets
|
||||
MILLI_BENCH_DATASETS_PATH=~/datasets cargo bench --bench search_songs # the four datasets are downloaded
|
||||
touch build.rs
|
||||
MILLI_BENCH_DATASETS_PATH=~/datasets cargo bench --bench songs # the code is compiled again but the datasets are not downloaded
|
||||
```
|
||||
|
||||
## Comparison between benchmarks
|
||||
|
||||
The benchmark reports we push are generated with `critcmp`. Thus, we use `critcmp` to show the result of a benchmark, or compare results between multiple benchmarks.
|
||||
|
||||
We provide a script to download and display the comparison report.
|
||||
|
||||
Requirements:
|
||||
- `grep`
|
||||
- `curl`
|
||||
- [`critcmp`](https://github.com/BurntSushi/critcmp)
|
||||
|
||||
List the available file in the DO Space:
|
||||
|
||||
```bash
|
||||
./benchmarks/script/list.sh
|
||||
```
|
||||
```bash
|
||||
songs_main_09a4321.json
|
||||
songs_geosearch_24ec456.json
|
||||
search_songs_main_cb45a10b.json
|
||||
```
|
||||
|
||||
Run the comparison script:
|
||||
|
||||
```bash
|
||||
# we get the result of ONE benchmark, this give you an idea of how much time an operation took
|
||||
./benchmarks/scripts/compare.sh son songs_geosearch_24ec456.json
|
||||
# we compare two benchmarks
|
||||
./benchmarks/scripts/compare.sh songs_main_09a4321.json songs_geosearch_24ec456.json
|
||||
# we compare three benchmarks
|
||||
./benchmarks/scripts/compare.sh songs_main_09a4321.json songs_geosearch_24ec456.json search_songs_main_cb45a10b.json
|
||||
```
|
||||
|
||||
## Datasets
|
||||
|
||||
The benchmarks uses the following datasets:
|
||||
- `smol-songs`
|
||||
- `smol-wiki`
|
||||
- `movies`
|
||||
- `smol-all-countries`
|
||||
|
||||
### Songs
|
||||
|
||||
`smol-songs` is a subset of the [`songs.csv` dataset](https://milli-benchmarks.fra1.digitaloceanspaces.com/datasets/songs.csv.gz).
|
||||
|
||||
It was generated with this command:
|
||||
|
||||
```bash
|
||||
xsv sample --seed 42 1000000 songs.csv -o smol-songs.csv
|
||||
```
|
||||
|
||||
_[Download the generated `smol-songs` dataset](https://milli-benchmarks.fra1.digitaloceanspaces.com/datasets/smol-songs.csv.gz)._
|
||||
|
||||
### Wiki
|
||||
|
||||
`smol-wiki` is a subset of the [`wikipedia-articles.csv` dataset](https://milli-benchmarks.fra1.digitaloceanspaces.com/datasets/wiki-articles.csv.gz).
|
||||
|
||||
It was generated with the following command:
|
||||
|
||||
```bash
|
||||
xsv sample --seed 42 500000 wiki-articles.csv -o smol-wiki-articles.csv
|
||||
```
|
||||
|
||||
_[Download the `smol-wiki` dataset](https://milli-benchmarks.fra1.digitaloceanspaces.com/datasets/smol-wiki-articles.csv.gz)._
|
||||
|
||||
### Movies
|
||||
|
||||
`movies` is a really small dataset we uses as our example in the [getting started](https://docs.meilisearch.com/learn/getting_started/)
|
||||
|
||||
_[Download the `movies` dataset](https://docs.meilisearch.com/movies.json)._
|
||||
|
||||
|
||||
### All Countries
|
||||
|
||||
`smol-all-countries` is a subset of the [`all-countries.csv` dataset](https://milli-benchmarks.fra1.digitaloceanspaces.com/datasets/all-countries.csv.gz)
|
||||
It has been converted to jsonlines and then edited so it matches our format for the `_geo` field.
|
||||
|
||||
It was generated with the following command:
|
||||
```bash
|
||||
bat all-countries.csv.gz | gunzip | xsv sample --seed 42 1000000 | csv2json-lite | sd '"latitude":"(.*?)","longitude":"(.*?)"' '"_geo": { "lat": $1, "lng": $2 }' | sd '\[|\]|,$' '' | gzip > smol-all-countries.jsonl.gz
|
||||
```
|
||||
|
||||
_[Download the `smol-all-countries` dataset](https://milli-benchmarks.fra1.digitaloceanspaces.com/datasets/smol-all-countries.jsonl.gz)._
|
||||
|
67
benchmarks/benches/formatting.rs
Normal file
67
benchmarks/benches/formatting.rs
Normal file
@ -0,0 +1,67 @@
|
||||
use std::rc::Rc;
|
||||
|
||||
use criterion::{criterion_group, criterion_main};
|
||||
use milli::tokenizer::TokenizerBuilder;
|
||||
use milli::{FormatOptions, MatcherBuilder, MatchingWord, MatchingWords};
|
||||
|
||||
#[global_allocator]
|
||||
static ALLOC: mimalloc::MiMalloc = mimalloc::MiMalloc;
|
||||
|
||||
struct Conf<'a> {
|
||||
name: &'a str,
|
||||
text: &'a str,
|
||||
matching_words: MatcherBuilder<'a, Vec<u8>>,
|
||||
}
|
||||
|
||||
fn bench_formatting(c: &mut criterion::Criterion) {
|
||||
#[rustfmt::skip]
|
||||
let confs = &[
|
||||
Conf {
|
||||
name: "'the door d'",
|
||||
text: r#"He used to do the door sounds in "Star Trek" with his mouth, phssst, phssst. The MD-11 passenger and cargo doors also tend to behave like electromagnetic apertures, because the doors do not have continuous electrical contact with the door frames around the door perimeter. But Theodor said that the doors don't work."#,
|
||||
matching_words: MatcherBuilder::new(MatchingWords::new(vec![
|
||||
(vec![Rc::new(MatchingWord::new("t".to_string(), 0, false).unwrap()), Rc::new(MatchingWord::new("he".to_string(), 0, false).unwrap())], vec![0]),
|
||||
(vec![Rc::new(MatchingWord::new("the".to_string(), 0, false).unwrap())], vec![0]),
|
||||
(vec![Rc::new(MatchingWord::new("door".to_string(), 1, false).unwrap())], vec![1]),
|
||||
(vec![Rc::new(MatchingWord::new("do".to_string(), 0, false).unwrap()), Rc::new(MatchingWord::new("or".to_string(), 0, false).unwrap())], vec![0]),
|
||||
(vec![Rc::new(MatchingWord::new("thedoor".to_string(), 1, false).unwrap())], vec![0, 1]),
|
||||
(vec![Rc::new(MatchingWord::new("d".to_string(), 0, true).unwrap())], vec![2]),
|
||||
(vec![Rc::new(MatchingWord::new("thedoord".to_string(), 1, true).unwrap())], vec![0, 1, 2]),
|
||||
(vec![Rc::new(MatchingWord::new("doord".to_string(), 1, true).unwrap())], vec![1, 2]),
|
||||
]
|
||||
), TokenizerBuilder::default().build()),
|
||||
},
|
||||
];
|
||||
|
||||
let format_options = &[
|
||||
FormatOptions { highlight: false, crop: None },
|
||||
FormatOptions { highlight: true, crop: None },
|
||||
FormatOptions { highlight: false, crop: Some(10) },
|
||||
FormatOptions { highlight: true, crop: Some(10) },
|
||||
FormatOptions { highlight: false, crop: Some(20) },
|
||||
FormatOptions { highlight: true, crop: Some(20) },
|
||||
];
|
||||
|
||||
for option in format_options {
|
||||
let highlight = if option.highlight { "highlight" } else { "no-highlight" };
|
||||
|
||||
let name = match option.crop {
|
||||
Some(size) => format!("{}-crop({})", highlight, size),
|
||||
None => format!("{}-no-crop", highlight),
|
||||
};
|
||||
|
||||
let mut group = c.benchmark_group(&name);
|
||||
for conf in confs {
|
||||
group.bench_function(conf.name, |b| {
|
||||
b.iter(|| {
|
||||
let mut matcher = conf.matching_words.build(conf.text);
|
||||
matcher.format(option.clone());
|
||||
})
|
||||
});
|
||||
}
|
||||
group.finish();
|
||||
}
|
||||
}
|
||||
|
||||
criterion_group!(benches, bench_formatting);
|
||||
criterion_main!(benches);
|
1380
benchmarks/benches/indexing.rs
Normal file
1380
benchmarks/benches/indexing.rs
Normal file
File diff suppressed because it is too large
Load Diff
122
benchmarks/benches/search_geo.rs
Normal file
122
benchmarks/benches/search_geo.rs
Normal file
@ -0,0 +1,122 @@
|
||||
mod datasets_paths;
|
||||
mod utils;
|
||||
|
||||
use criterion::{criterion_group, criterion_main};
|
||||
use milli::update::Settings;
|
||||
use utils::Conf;
|
||||
|
||||
#[global_allocator]
|
||||
static ALLOC: mimalloc::MiMalloc = mimalloc::MiMalloc;
|
||||
|
||||
fn base_conf(builder: &mut Settings) {
|
||||
let displayed_fields =
|
||||
["geonameid", "name", "asciiname", "alternatenames", "_geo", "population"]
|
||||
.iter()
|
||||
.map(|s| s.to_string())
|
||||
.collect();
|
||||
builder.set_displayed_fields(displayed_fields);
|
||||
|
||||
let searchable_fields =
|
||||
["name", "alternatenames", "elevation"].iter().map(|s| s.to_string()).collect();
|
||||
builder.set_searchable_fields(searchable_fields);
|
||||
|
||||
let filterable_fields =
|
||||
["_geo", "population", "elevation"].iter().map(|s| s.to_string()).collect();
|
||||
builder.set_filterable_fields(filterable_fields);
|
||||
|
||||
let sortable_fields =
|
||||
["_geo", "population", "elevation"].iter().map(|s| s.to_string()).collect();
|
||||
builder.set_sortable_fields(sortable_fields);
|
||||
}
|
||||
|
||||
#[rustfmt::skip]
|
||||
const BASE_CONF: Conf = Conf {
|
||||
dataset: datasets_paths::SMOL_ALL_COUNTRIES,
|
||||
dataset_format: "jsonl",
|
||||
queries: &[
|
||||
"",
|
||||
],
|
||||
configure: base_conf,
|
||||
primary_key: Some("geonameid"),
|
||||
..Conf::BASE
|
||||
};
|
||||
|
||||
fn bench_geo(c: &mut criterion::Criterion) {
|
||||
#[rustfmt::skip]
|
||||
let confs = &[
|
||||
// A basic placeholder with no geo
|
||||
utils::Conf {
|
||||
group_name: "placeholder with no geo",
|
||||
..BASE_CONF
|
||||
},
|
||||
// Medium aglomeration: probably the most common usecase
|
||||
utils::Conf {
|
||||
group_name: "asc sort from Lille",
|
||||
sort: Some(vec!["_geoPoint(50.62999333378238, 3.086269263384099):asc"]),
|
||||
..BASE_CONF
|
||||
},
|
||||
utils::Conf {
|
||||
group_name: "desc sort from Lille",
|
||||
sort: Some(vec!["_geoPoint(50.62999333378238, 3.086269263384099):desc"]),
|
||||
..BASE_CONF
|
||||
},
|
||||
// Big agglomeration: a lot of documents close to our point
|
||||
utils::Conf {
|
||||
group_name: "asc sort from Tokyo",
|
||||
sort: Some(vec!["_geoPoint(35.749512532692144, 139.61664952543356):asc"]),
|
||||
..BASE_CONF
|
||||
},
|
||||
utils::Conf {
|
||||
group_name: "desc sort from Tokyo",
|
||||
sort: Some(vec!["_geoPoint(35.749512532692144, 139.61664952543356):desc"]),
|
||||
..BASE_CONF
|
||||
},
|
||||
// The furthest point from any civilization
|
||||
utils::Conf {
|
||||
group_name: "asc sort from Point Nemo",
|
||||
sort: Some(vec!["_geoPoint(-48.87561645055408, -123.39275749319793):asc"]),
|
||||
..BASE_CONF
|
||||
},
|
||||
utils::Conf {
|
||||
group_name: "desc sort from Point Nemo",
|
||||
sort: Some(vec!["_geoPoint(-48.87561645055408, -123.39275749319793):desc"]),
|
||||
..BASE_CONF
|
||||
},
|
||||
// Filters
|
||||
utils::Conf {
|
||||
group_name: "filter of 100km from Lille",
|
||||
filter: Some("_geoRadius(50.62999333378238, 3.086269263384099, 100000)"),
|
||||
..BASE_CONF
|
||||
},
|
||||
utils::Conf {
|
||||
group_name: "filter of 1km from Lille",
|
||||
filter: Some("_geoRadius(50.62999333378238, 3.086269263384099, 1000)"),
|
||||
..BASE_CONF
|
||||
},
|
||||
utils::Conf {
|
||||
group_name: "filter of 100km from Tokyo",
|
||||
filter: Some("_geoRadius(35.749512532692144, 139.61664952543356, 100000)"),
|
||||
..BASE_CONF
|
||||
},
|
||||
utils::Conf {
|
||||
group_name: "filter of 1km from Tokyo",
|
||||
filter: Some("_geoRadius(35.749512532692144, 139.61664952543356, 1000)"),
|
||||
..BASE_CONF
|
||||
},
|
||||
utils::Conf {
|
||||
group_name: "filter of 100km from Point Nemo",
|
||||
filter: Some("_geoRadius(-48.87561645055408, -123.39275749319793, 100000)"),
|
||||
..BASE_CONF
|
||||
},
|
||||
utils::Conf {
|
||||
group_name: "filter of 1km from Point Nemo",
|
||||
filter: Some("_geoRadius(-48.87561645055408, -123.39275749319793, 1000)"),
|
||||
..BASE_CONF
|
||||
},
|
||||
];
|
||||
|
||||
utils::run_benches(c, confs);
|
||||
}
|
||||
|
||||
criterion_group!(benches, bench_geo);
|
||||
criterion_main!(benches);
|
196
benchmarks/benches/search_songs.rs
Normal file
196
benchmarks/benches/search_songs.rs
Normal file
@ -0,0 +1,196 @@
|
||||
mod datasets_paths;
|
||||
mod utils;
|
||||
|
||||
use criterion::{criterion_group, criterion_main};
|
||||
use milli::update::Settings;
|
||||
use utils::Conf;
|
||||
|
||||
#[global_allocator]
|
||||
static ALLOC: mimalloc::MiMalloc = mimalloc::MiMalloc;
|
||||
|
||||
fn base_conf(builder: &mut Settings) {
|
||||
let displayed_fields =
|
||||
["id", "title", "album", "artist", "genre", "country", "released", "duration"]
|
||||
.iter()
|
||||
.map(|s| s.to_string())
|
||||
.collect();
|
||||
builder.set_displayed_fields(displayed_fields);
|
||||
|
||||
let searchable_fields = ["title", "album", "artist"].iter().map(|s| s.to_string()).collect();
|
||||
builder.set_searchable_fields(searchable_fields);
|
||||
|
||||
let faceted_fields = ["released-timestamp", "duration-float", "genre", "country", "artist"]
|
||||
.iter()
|
||||
.map(|s| s.to_string())
|
||||
.collect();
|
||||
builder.set_filterable_fields(faceted_fields);
|
||||
}
|
||||
|
||||
#[rustfmt::skip]
|
||||
const BASE_CONF: Conf = Conf {
|
||||
dataset: datasets_paths::SMOL_SONGS,
|
||||
queries: &[
|
||||
"john ", // 9097
|
||||
"david ", // 4794
|
||||
"charles ", // 1957
|
||||
"david bowie ", // 1200
|
||||
"michael jackson ", // 600
|
||||
"thelonious monk ", // 303
|
||||
"charles mingus ", // 142
|
||||
"marcus miller ", // 60
|
||||
"tamo ", // 13
|
||||
"Notstandskomitee ", // 4
|
||||
],
|
||||
configure: base_conf,
|
||||
primary_key: Some("id"),
|
||||
..Conf::BASE
|
||||
};
|
||||
|
||||
fn bench_songs(c: &mut criterion::Criterion) {
|
||||
let default_criterion: Vec<String> =
|
||||
milli::default_criteria().iter().map(|criteria| criteria.to_string()).collect();
|
||||
let default_criterion = default_criterion.iter().map(|s| s.as_str());
|
||||
let asc_default: Vec<&str> =
|
||||
std::iter::once("released-timestamp:asc").chain(default_criterion.clone()).collect();
|
||||
let desc_default: Vec<&str> =
|
||||
std::iter::once("released-timestamp:desc").chain(default_criterion.clone()).collect();
|
||||
|
||||
let basic_with_quote: Vec<String> = BASE_CONF
|
||||
.queries
|
||||
.iter()
|
||||
.map(|s| {
|
||||
s.trim().split(' ').map(|s| format!(r#""{}""#, s)).collect::<Vec<String>>().join(" ")
|
||||
})
|
||||
.collect();
|
||||
let basic_with_quote: &[&str] =
|
||||
&basic_with_quote.iter().map(|s| s.as_str()).collect::<Vec<&str>>();
|
||||
|
||||
#[rustfmt::skip]
|
||||
let confs = &[
|
||||
/* first we bench each criterion alone */
|
||||
utils::Conf {
|
||||
group_name: "proximity",
|
||||
queries: &[
|
||||
"black saint sinner lady ",
|
||||
"les dangeureuses 1960 ",
|
||||
"The Disneyland Sing-Along Chorus ",
|
||||
"Under Great Northern Lights ",
|
||||
"7000 Danses Un Jour Dans Notre Vie ",
|
||||
],
|
||||
criterion: Some(&["proximity"]),
|
||||
optional_words: false,
|
||||
..BASE_CONF
|
||||
},
|
||||
utils::Conf {
|
||||
group_name: "typo",
|
||||
queries: &[
|
||||
"mongus ",
|
||||
"thelonius monk ",
|
||||
"Disnaylande ",
|
||||
"the white striper ",
|
||||
"indochie ",
|
||||
"indochien ",
|
||||
"klub des loopers ",
|
||||
"fear of the duck ",
|
||||
"michel depech ",
|
||||
"stromal ",
|
||||
"dire straights ",
|
||||
"Arethla Franklin ",
|
||||
],
|
||||
criterion: Some(&["typo"]),
|
||||
optional_words: false,
|
||||
..BASE_CONF
|
||||
},
|
||||
utils::Conf {
|
||||
group_name: "words",
|
||||
queries: &[
|
||||
"the black saint and the sinner lady and the good doggo ", // four words to pop
|
||||
"les liaisons dangeureuses 1793 ", // one word to pop
|
||||
"The Disneyland Children's Sing-Alone song ", // two words to pop
|
||||
"seven nation mummy ", // one word to pop
|
||||
"7000 Danses / Le Baiser / je me trompe de mots ", // four words to pop
|
||||
"Bring Your Daughter To The Slaughter but now this is not part of the title ", // nine words to pop
|
||||
"whathavenotnsuchforth and a good amount of words to pop to match the first one ", // 13
|
||||
],
|
||||
criterion: Some(&["words"]),
|
||||
..BASE_CONF
|
||||
},
|
||||
utils::Conf {
|
||||
group_name: "asc",
|
||||
criterion: Some(&["released-timestamp:desc"]),
|
||||
..BASE_CONF
|
||||
},
|
||||
utils::Conf {
|
||||
group_name: "desc",
|
||||
criterion: Some(&["released-timestamp:desc"]),
|
||||
..BASE_CONF
|
||||
},
|
||||
|
||||
/* then we bench the asc and desc criterion on top of the default criterion */
|
||||
utils::Conf {
|
||||
group_name: "asc + default",
|
||||
criterion: Some(&asc_default[..]),
|
||||
..BASE_CONF
|
||||
},
|
||||
utils::Conf {
|
||||
group_name: "desc + default",
|
||||
criterion: Some(&desc_default[..]),
|
||||
..BASE_CONF
|
||||
},
|
||||
|
||||
/* we bench the filters with the default request */
|
||||
utils::Conf {
|
||||
group_name: "basic filter: <=",
|
||||
filter: Some("released-timestamp <= 946728000"), // year 2000
|
||||
..BASE_CONF
|
||||
},
|
||||
utils::Conf {
|
||||
group_name: "basic filter: TO",
|
||||
filter: Some("released-timestamp 946728000 TO 1262347200"), // year 2000 to 2010
|
||||
..BASE_CONF
|
||||
},
|
||||
utils::Conf {
|
||||
group_name: "big filter",
|
||||
filter: Some("released-timestamp != 1262347200 AND (NOT (released-timestamp = 946728000)) AND (duration-float = 1 OR (duration-float 1.1 TO 1.5 AND released-timestamp > 315576000))"),
|
||||
..BASE_CONF
|
||||
},
|
||||
|
||||
/* the we bench some global / normal search with all the default criterion in the default
|
||||
* order */
|
||||
utils::Conf {
|
||||
group_name: "basic placeholder",
|
||||
queries: &[""],
|
||||
..BASE_CONF
|
||||
},
|
||||
utils::Conf {
|
||||
group_name: "basic without quote",
|
||||
queries: &BASE_CONF
|
||||
.queries
|
||||
.iter()
|
||||
.map(|s| s.trim()) // we remove the space at the end of each request
|
||||
.collect::<Vec<&str>>(),
|
||||
..BASE_CONF
|
||||
},
|
||||
utils::Conf {
|
||||
group_name: "basic with quote",
|
||||
queries: basic_with_quote,
|
||||
..BASE_CONF
|
||||
},
|
||||
utils::Conf {
|
||||
group_name: "prefix search",
|
||||
queries: &[
|
||||
"s", // 500k+ results
|
||||
"a", //
|
||||
"b", //
|
||||
"i", //
|
||||
"x", // only 7k results
|
||||
],
|
||||
..BASE_CONF
|
||||
},
|
||||
];
|
||||
|
||||
utils::run_benches(c, confs);
|
||||
}
|
||||
|
||||
criterion_group!(benches, bench_songs);
|
||||
criterion_main!(benches);
|
129
benchmarks/benches/search_wiki.rs
Normal file
129
benchmarks/benches/search_wiki.rs
Normal file
@ -0,0 +1,129 @@
|
||||
mod datasets_paths;
|
||||
mod utils;
|
||||
|
||||
use criterion::{criterion_group, criterion_main};
|
||||
use milli::update::Settings;
|
||||
use utils::Conf;
|
||||
|
||||
#[global_allocator]
|
||||
static ALLOC: mimalloc::MiMalloc = mimalloc::MiMalloc;
|
||||
|
||||
fn base_conf(builder: &mut Settings) {
|
||||
let displayed_fields = ["title", "body", "url"].iter().map(|s| s.to_string()).collect();
|
||||
builder.set_displayed_fields(displayed_fields);
|
||||
|
||||
let searchable_fields = ["title", "body"].iter().map(|s| s.to_string()).collect();
|
||||
builder.set_searchable_fields(searchable_fields);
|
||||
}
|
||||
|
||||
#[rustfmt::skip]
|
||||
const BASE_CONF: Conf = Conf {
|
||||
dataset: datasets_paths::SMOL_WIKI_ARTICLES,
|
||||
queries: &[
|
||||
"mingus ", // 46 candidates
|
||||
"miles davis ", // 159
|
||||
"rock and roll ", // 1007
|
||||
"machine ", // 3448
|
||||
"spain ", // 7002
|
||||
"japan ", // 10.593
|
||||
"france ", // 17.616
|
||||
"film ", // 24.959
|
||||
],
|
||||
configure: base_conf,
|
||||
..Conf::BASE
|
||||
};
|
||||
|
||||
fn bench_songs(c: &mut criterion::Criterion) {
|
||||
let basic_with_quote: Vec<String> = BASE_CONF
|
||||
.queries
|
||||
.iter()
|
||||
.map(|s| {
|
||||
s.trim().split(' ').map(|s| format!(r#""{}""#, s)).collect::<Vec<String>>().join(" ")
|
||||
})
|
||||
.collect();
|
||||
let basic_with_quote: &[&str] =
|
||||
&basic_with_quote.iter().map(|s| s.as_str()).collect::<Vec<&str>>();
|
||||
|
||||
#[rustfmt::skip]
|
||||
let confs = &[
|
||||
/* first we bench each criterion alone */
|
||||
utils::Conf {
|
||||
group_name: "proximity",
|
||||
queries: &[
|
||||
"herald sings ",
|
||||
"april paris ",
|
||||
"tea two ",
|
||||
"diesel engine ",
|
||||
],
|
||||
criterion: Some(&["proximity"]),
|
||||
optional_words: false,
|
||||
..BASE_CONF
|
||||
},
|
||||
utils::Conf {
|
||||
group_name: "typo",
|
||||
queries: &[
|
||||
"migrosoft ",
|
||||
"linax ",
|
||||
"Disnaylande ",
|
||||
"phytogropher ",
|
||||
"nympalidea ",
|
||||
"aritmetric ",
|
||||
"the fronce ",
|
||||
"sisan ",
|
||||
],
|
||||
criterion: Some(&["typo"]),
|
||||
optional_words: false,
|
||||
..BASE_CONF
|
||||
},
|
||||
utils::Conf {
|
||||
group_name: "words",
|
||||
queries: &[
|
||||
"the black saint and the sinner lady and the good doggo ", // four words to pop, 27 results
|
||||
"Kameya Tokujirō mingus monk ", // two words to pop, 55
|
||||
"Ulrich Hensel meilisearch milli ", // two words to pop, 306
|
||||
"Idaho Bellevue pizza ", // one word to pop, 800
|
||||
"Abraham machin ", // one word to pop, 1141
|
||||
],
|
||||
criterion: Some(&["words"]),
|
||||
..BASE_CONF
|
||||
},
|
||||
/* the we bench some global / normal search with all the default criterion in the default
|
||||
* order */
|
||||
utils::Conf {
|
||||
group_name: "basic placeholder",
|
||||
queries: &[""],
|
||||
..BASE_CONF
|
||||
},
|
||||
utils::Conf {
|
||||
group_name: "basic without quote",
|
||||
queries: &BASE_CONF
|
||||
.queries
|
||||
.iter()
|
||||
.map(|s| s.trim()) // we remove the space at the end of each request
|
||||
.collect::<Vec<&str>>(),
|
||||
..BASE_CONF
|
||||
},
|
||||
utils::Conf {
|
||||
group_name: "basic with quote",
|
||||
queries: basic_with_quote,
|
||||
..BASE_CONF
|
||||
},
|
||||
utils::Conf {
|
||||
group_name: "prefix search",
|
||||
queries: &[
|
||||
"t", // 453k results
|
||||
"c", // 405k
|
||||
"g", // 318k
|
||||
"j", // 227k
|
||||
"q", // 71k
|
||||
"x", // 17k
|
||||
],
|
||||
..BASE_CONF
|
||||
},
|
||||
];
|
||||
|
||||
utils::run_benches(c, confs);
|
||||
}
|
||||
|
||||
criterion_group!(benches, bench_songs);
|
||||
criterion_main!(benches);
|
256
benchmarks/benches/utils.rs
Normal file
256
benchmarks/benches/utils.rs
Normal file
@ -0,0 +1,256 @@
|
||||
#![allow(dead_code)]
|
||||
|
||||
use std::fs::{create_dir_all, remove_dir_all, File};
|
||||
use std::io::{self, BufRead, BufReader, Cursor, Read, Seek};
|
||||
use std::num::ParseFloatError;
|
||||
use std::path::Path;
|
||||
use std::str::FromStr;
|
||||
|
||||
use criterion::BenchmarkId;
|
||||
use milli::documents::{DocumentsBatchBuilder, DocumentsBatchReader};
|
||||
use milli::heed::EnvOpenOptions;
|
||||
use milli::update::{
|
||||
IndexDocuments, IndexDocumentsConfig, IndexDocumentsMethod, IndexerConfig, Settings,
|
||||
};
|
||||
use milli::{Criterion, Filter, Index, Object, TermsMatchingStrategy};
|
||||
use serde_json::Value;
|
||||
|
||||
pub struct Conf<'a> {
|
||||
/// where we are going to create our database.mmdb directory
|
||||
/// each benchmark will first try to delete it and then recreate it
|
||||
pub database_name: &'a str,
|
||||
/// the dataset to be used, it must be an uncompressed csv
|
||||
pub dataset: &'a str,
|
||||
/// The format of the dataset
|
||||
pub dataset_format: &'a str,
|
||||
pub group_name: &'a str,
|
||||
pub queries: &'a [&'a str],
|
||||
/// here you can change which criterion are used and in which order.
|
||||
/// - if you specify something all the base configuration will be thrown out
|
||||
/// - if you don't specify anything (None) the default configuration will be kept
|
||||
pub criterion: Option<&'a [&'a str]>,
|
||||
/// the last chance to configure your database as you want
|
||||
pub configure: fn(&mut Settings),
|
||||
pub filter: Option<&'a str>,
|
||||
pub sort: Option<Vec<&'a str>>,
|
||||
/// enable or disable the optional words on the query
|
||||
pub optional_words: bool,
|
||||
/// primary key, if there is None we'll auto-generate docids for every documents
|
||||
pub primary_key: Option<&'a str>,
|
||||
}
|
||||
|
||||
impl Conf<'_> {
|
||||
pub const BASE: Self = Conf {
|
||||
database_name: "benches.mmdb",
|
||||
dataset_format: "csv",
|
||||
dataset: "",
|
||||
group_name: "",
|
||||
queries: &[],
|
||||
criterion: None,
|
||||
configure: |_| (),
|
||||
filter: None,
|
||||
sort: None,
|
||||
optional_words: true,
|
||||
primary_key: None,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn base_setup(conf: &Conf) -> Index {
|
||||
match remove_dir_all(&conf.database_name) {
|
||||
Ok(_) => (),
|
||||
Err(e) if e.kind() == std::io::ErrorKind::NotFound => (),
|
||||
Err(e) => panic!("{}", e),
|
||||
}
|
||||
create_dir_all(&conf.database_name).unwrap();
|
||||
|
||||
let mut options = EnvOpenOptions::new();
|
||||
options.map_size(100 * 1024 * 1024 * 1024); // 100 GB
|
||||
options.max_readers(10);
|
||||
let index = Index::new(options, conf.database_name).unwrap();
|
||||
|
||||
let config = IndexerConfig::default();
|
||||
let mut wtxn = index.write_txn().unwrap();
|
||||
let mut builder = Settings::new(&mut wtxn, &index, &config);
|
||||
|
||||
if let Some(primary_key) = conf.primary_key {
|
||||
builder.set_primary_key(primary_key.to_string());
|
||||
}
|
||||
|
||||
if let Some(criterion) = conf.criterion {
|
||||
builder.reset_filterable_fields();
|
||||
builder.reset_criteria();
|
||||
builder.reset_stop_words();
|
||||
|
||||
let criterion = criterion.iter().map(|s| Criterion::from_str(s).unwrap()).collect();
|
||||
builder.set_criteria(criterion);
|
||||
}
|
||||
|
||||
(conf.configure)(&mut builder);
|
||||
|
||||
builder.execute(|_| (), || false).unwrap();
|
||||
wtxn.commit().unwrap();
|
||||
|
||||
let config = IndexerConfig::default();
|
||||
let mut wtxn = index.write_txn().unwrap();
|
||||
let indexing_config = IndexDocumentsConfig {
|
||||
autogenerate_docids: conf.primary_key.is_none(),
|
||||
update_method: IndexDocumentsMethod::ReplaceDocuments,
|
||||
..Default::default()
|
||||
};
|
||||
let builder =
|
||||
IndexDocuments::new(&mut wtxn, &index, &config, indexing_config, |_| (), || false).unwrap();
|
||||
let documents = documents_from(conf.dataset, conf.dataset_format);
|
||||
let (builder, user_error) = builder.add_documents(documents).unwrap();
|
||||
user_error.unwrap();
|
||||
builder.execute().unwrap();
|
||||
wtxn.commit().unwrap();
|
||||
|
||||
index
|
||||
}
|
||||
|
||||
pub fn run_benches(c: &mut criterion::Criterion, confs: &[Conf]) {
|
||||
for conf in confs {
|
||||
let index = base_setup(conf);
|
||||
|
||||
let file_name = Path::new(conf.dataset).file_name().and_then(|f| f.to_str()).unwrap();
|
||||
let name = format!("{}: {}", file_name, conf.group_name);
|
||||
let mut group = c.benchmark_group(&name);
|
||||
|
||||
for &query in conf.queries {
|
||||
group.bench_with_input(BenchmarkId::from_parameter(query), &query, |b, &query| {
|
||||
b.iter(|| {
|
||||
let rtxn = index.read_txn().unwrap();
|
||||
let mut search = index.search(&rtxn);
|
||||
search.query(query).terms_matching_strategy(TermsMatchingStrategy::default());
|
||||
if let Some(filter) = conf.filter {
|
||||
let filter = Filter::from_str(filter).unwrap().unwrap();
|
||||
search.filter(filter);
|
||||
}
|
||||
if let Some(sort) = &conf.sort {
|
||||
let sort = sort.iter().map(|sort| sort.parse().unwrap()).collect();
|
||||
search.sort_criteria(sort);
|
||||
}
|
||||
let _ids = search.execute().unwrap();
|
||||
});
|
||||
});
|
||||
}
|
||||
group.finish();
|
||||
|
||||
index.prepare_for_closing().wait();
|
||||
}
|
||||
}
|
||||
|
||||
pub fn documents_from(filename: &str, filetype: &str) -> DocumentsBatchReader<impl BufRead + Seek> {
|
||||
let reader =
|
||||
File::open(filename).expect(&format!("could not find the dataset in: {}", filename));
|
||||
let reader = BufReader::new(reader);
|
||||
let documents = match filetype {
|
||||
"csv" => documents_from_csv(reader).unwrap(),
|
||||
"json" => documents_from_json(reader).unwrap(),
|
||||
"jsonl" => documents_from_jsonl(reader).unwrap(),
|
||||
otherwise => panic!("invalid update format {:?}", otherwise),
|
||||
};
|
||||
DocumentsBatchReader::from_reader(Cursor::new(documents)).unwrap()
|
||||
}
|
||||
|
||||
fn documents_from_jsonl(reader: impl BufRead) -> anyhow::Result<Vec<u8>> {
|
||||
let mut documents = DocumentsBatchBuilder::new(Vec::new());
|
||||
|
||||
for result in serde_json::Deserializer::from_reader(reader).into_iter::<Object>() {
|
||||
let object = result?;
|
||||
documents.append_json_object(&object)?;
|
||||
}
|
||||
|
||||
documents.into_inner().map_err(Into::into)
|
||||
}
|
||||
|
||||
fn documents_from_json(reader: impl BufRead) -> anyhow::Result<Vec<u8>> {
|
||||
let mut documents = DocumentsBatchBuilder::new(Vec::new());
|
||||
|
||||
documents.append_json_array(reader)?;
|
||||
|
||||
documents.into_inner().map_err(Into::into)
|
||||
}
|
||||
|
||||
fn documents_from_csv(reader: impl BufRead) -> anyhow::Result<Vec<u8>> {
|
||||
let csv = csv::Reader::from_reader(reader);
|
||||
|
||||
let mut documents = DocumentsBatchBuilder::new(Vec::new());
|
||||
documents.append_csv(csv)?;
|
||||
|
||||
documents.into_inner().map_err(Into::into)
|
||||
}
|
||||
|
||||
enum AllowedType {
|
||||
String,
|
||||
Number,
|
||||
}
|
||||
|
||||
fn parse_csv_header(header: &str) -> (String, AllowedType) {
|
||||
// if there are several separators we only split on the last one.
|
||||
match header.rsplit_once(':') {
|
||||
Some((field_name, field_type)) => match field_type {
|
||||
"string" => (field_name.to_string(), AllowedType::String),
|
||||
"number" => (field_name.to_string(), AllowedType::Number),
|
||||
// we may return an error in this case.
|
||||
_otherwise => (header.to_string(), AllowedType::String),
|
||||
},
|
||||
None => (header.to_string(), AllowedType::String),
|
||||
}
|
||||
}
|
||||
|
||||
struct CSVDocumentDeserializer<R>
|
||||
where
|
||||
R: Read,
|
||||
{
|
||||
documents: csv::StringRecordsIntoIter<R>,
|
||||
headers: Vec<(String, AllowedType)>,
|
||||
}
|
||||
|
||||
impl<R: Read> CSVDocumentDeserializer<R> {
|
||||
fn from_reader(reader: R) -> io::Result<Self> {
|
||||
let mut records = csv::Reader::from_reader(reader);
|
||||
|
||||
let headers = records.headers()?.into_iter().map(parse_csv_header).collect();
|
||||
|
||||
Ok(Self { documents: records.into_records(), headers })
|
||||
}
|
||||
}
|
||||
|
||||
impl<R: Read> Iterator for CSVDocumentDeserializer<R> {
|
||||
type Item = anyhow::Result<Object>;
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
let csv_document = self.documents.next()?;
|
||||
|
||||
match csv_document {
|
||||
Ok(csv_document) => {
|
||||
let mut document = Object::new();
|
||||
|
||||
for ((field_name, field_type), value) in
|
||||
self.headers.iter().zip(csv_document.into_iter())
|
||||
{
|
||||
let parsed_value: Result<Value, ParseFloatError> = match field_type {
|
||||
AllowedType::Number => {
|
||||
value.parse::<f64>().map(Value::from).map_err(Into::into)
|
||||
}
|
||||
AllowedType::String => Ok(Value::String(value.to_string())),
|
||||
};
|
||||
|
||||
match parsed_value {
|
||||
Ok(value) => drop(document.insert(field_name.to_string(), value)),
|
||||
Err(_e) => {
|
||||
return Some(Err(anyhow::anyhow!(
|
||||
"Value '{}' is not a valid number",
|
||||
value
|
||||
)))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Some(Ok(document))
|
||||
}
|
||||
Err(e) => Some(Err(anyhow::anyhow!("Error parsing csv document: {}", e))),
|
||||
}
|
||||
}
|
||||
}
|
115
benchmarks/build.rs
Normal file
115
benchmarks/build.rs
Normal file
@ -0,0 +1,115 @@
|
||||
use std::fs::File;
|
||||
use std::io::{Cursor, Read, Seek, Write};
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::{env, fs};
|
||||
|
||||
use bytes::Bytes;
|
||||
use convert_case::{Case, Casing};
|
||||
use flate2::read::GzDecoder;
|
||||
use reqwest::IntoUrl;
|
||||
|
||||
const BASE_URL: &str = "https://milli-benchmarks.fra1.digitaloceanspaces.com/datasets";
|
||||
|
||||
const DATASET_SONGS: (&str, &str) = ("smol-songs", "csv");
|
||||
const DATASET_SONGS_1_2: (&str, &str) = ("smol-songs-1_2", "csv");
|
||||
const DATASET_SONGS_3_4: (&str, &str) = ("smol-songs-3_4", "csv");
|
||||
const DATASET_SONGS_4_4: (&str, &str) = ("smol-songs-4_4", "csv");
|
||||
const DATASET_WIKI: (&str, &str) = ("smol-wiki-articles", "csv");
|
||||
const DATASET_WIKI_1_2: (&str, &str) = ("smol-wiki-articles-1_2", "csv");
|
||||
const DATASET_WIKI_3_4: (&str, &str) = ("smol-wiki-articles-3_4", "csv");
|
||||
const DATASET_WIKI_4_4: (&str, &str) = ("smol-wiki-articles-4_4", "csv");
|
||||
const DATASET_MOVIES: (&str, &str) = ("movies", "json");
|
||||
const DATASET_MOVIES_1_2: (&str, &str) = ("movies-1_2", "json");
|
||||
const DATASET_MOVIES_3_4: (&str, &str) = ("movies-3_4", "json");
|
||||
const DATASET_MOVIES_4_4: (&str, &str) = ("movies-4_4", "json");
|
||||
const DATASET_NESTED_MOVIES: (&str, &str) = ("nested_movies", "json");
|
||||
const DATASET_GEO: (&str, &str) = ("smol-all-countries", "jsonl");
|
||||
|
||||
const ALL_DATASETS: &[(&str, &str)] = &[
|
||||
DATASET_SONGS,
|
||||
DATASET_SONGS_1_2,
|
||||
DATASET_SONGS_3_4,
|
||||
DATASET_SONGS_4_4,
|
||||
DATASET_WIKI,
|
||||
DATASET_WIKI_1_2,
|
||||
DATASET_WIKI_3_4,
|
||||
DATASET_WIKI_4_4,
|
||||
DATASET_MOVIES,
|
||||
DATASET_MOVIES_1_2,
|
||||
DATASET_MOVIES_3_4,
|
||||
DATASET_MOVIES_4_4,
|
||||
DATASET_NESTED_MOVIES,
|
||||
DATASET_GEO,
|
||||
];
|
||||
|
||||
/// The name of the environment variable used to select the path
|
||||
/// of the directory containing the datasets
|
||||
const BASE_DATASETS_PATH_KEY: &str = "MILLI_BENCH_DATASETS_PATH";
|
||||
|
||||
fn main() -> anyhow::Result<()> {
|
||||
let out_dir = PathBuf::from(env::var(BASE_DATASETS_PATH_KEY).unwrap_or(env::var("OUT_DIR")?));
|
||||
|
||||
let benches_dir = PathBuf::from(env::var("CARGO_MANIFEST_DIR")?).join("benches");
|
||||
let mut manifest_paths_file = File::create(benches_dir.join("datasets_paths.rs"))?;
|
||||
write!(
|
||||
manifest_paths_file,
|
||||
r#"//! This file is generated by the build script.
|
||||
//! Do not modify by hand, use the build.rs file.
|
||||
#![allow(dead_code)]
|
||||
"#
|
||||
)?;
|
||||
writeln!(manifest_paths_file)?;
|
||||
|
||||
for (dataset, extension) in ALL_DATASETS {
|
||||
let out_path = out_dir.join(dataset);
|
||||
let out_file = out_path.with_extension(extension);
|
||||
|
||||
writeln!(
|
||||
&mut manifest_paths_file,
|
||||
r#"pub const {}: &str = {:?};"#,
|
||||
dataset.to_case(Case::ScreamingSnake),
|
||||
out_file.display(),
|
||||
)?;
|
||||
|
||||
if out_file.exists() {
|
||||
eprintln!(
|
||||
"The dataset {} already exists on the file system and will not be downloaded again",
|
||||
out_path.display(),
|
||||
);
|
||||
continue;
|
||||
}
|
||||
let url = format!("{}/{}.{}.gz", BASE_URL, dataset, extension);
|
||||
eprintln!("downloading: {}", url);
|
||||
let bytes = retry(|| download_dataset(url.clone()), 10)?;
|
||||
eprintln!("{} downloaded successfully", url);
|
||||
eprintln!("uncompressing in {}", out_file.display());
|
||||
uncompress_in_file(bytes, &out_file)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn retry<Ok, Err>(fun: impl Fn() -> Result<Ok, Err>, times: usize) -> Result<Ok, Err> {
|
||||
for _ in 0..times {
|
||||
if let ok @ Ok(_) = fun() {
|
||||
return ok;
|
||||
}
|
||||
}
|
||||
fun()
|
||||
}
|
||||
|
||||
fn download_dataset<U: IntoUrl>(url: U) -> anyhow::Result<Cursor<Bytes>> {
|
||||
let bytes =
|
||||
reqwest::blocking::Client::builder().timeout(None).build()?.get(url).send()?.bytes()?;
|
||||
Ok(Cursor::new(bytes))
|
||||
}
|
||||
|
||||
fn uncompress_in_file<R: Read + Seek, P: AsRef<Path>>(bytes: R, path: P) -> anyhow::Result<()> {
|
||||
let path = path.as_ref();
|
||||
let mut gz = GzDecoder::new(bytes);
|
||||
let mut dataset = Vec::new();
|
||||
gz.read_to_end(&mut dataset)?;
|
||||
|
||||
fs::write(path, dataset)?;
|
||||
Ok(())
|
||||
}
|
38
benchmarks/scripts/compare.sh
Executable file
38
benchmarks/scripts/compare.sh
Executable file
@ -0,0 +1,38 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Requirements:
|
||||
# - critcmp. See: https://github.com/BurntSushi/critcmp
|
||||
# - curl
|
||||
|
||||
# Usage
|
||||
# $ bash compare.sh json_file1 json_file1
|
||||
# ex: bash compare.sh songs_main_09a4321.json songs_geosearch_24ec456.json
|
||||
|
||||
# Checking that critcmp is installed
|
||||
command -v critcmp > /dev/null 2>&1
|
||||
if [[ "$?" -ne 0 ]]; then
|
||||
echo 'You must install critcmp to make this script work.'
|
||||
echo 'See: https://github.com/BurntSushi/critcmp'
|
||||
echo ' $ cargo install critcmp'
|
||||
exit 1
|
||||
fi
|
||||
|
||||
s3_url='https://milli-benchmarks.fra1.digitaloceanspaces.com/critcmp_results'
|
||||
|
||||
for file in $@
|
||||
do
|
||||
file_s3_url="$s3_url/$file"
|
||||
file_local_path="/tmp/$file"
|
||||
|
||||
if [[ ! -f $file_local_path ]]; then
|
||||
curl $file_s3_url --output $file_local_path --silent
|
||||
if [[ "$?" -ne 0 ]]; then
|
||||
echo 'curl command failed.'
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
path_list=$(echo " $@" | sed 's/ / \/tmp\//g')
|
||||
|
||||
critcmp $path_list
|
14
benchmarks/scripts/list.sh
Executable file
14
benchmarks/scripts/list.sh
Executable file
@ -0,0 +1,14 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Requirements:
|
||||
# - curl
|
||||
# - grep
|
||||
|
||||
res=$(curl -s https://milli-benchmarks.fra1.digitaloceanspaces.com | grep -o '<Key>[^<]\+' | cut -c 5- | grep critcmp_results/ | cut -c 18-)
|
||||
|
||||
for pattern in "$@"
|
||||
do
|
||||
res=$(echo "$res" | grep $pattern)
|
||||
done
|
||||
|
||||
echo "$res"
|
5
benchmarks/src/lib.rs
Normal file
5
benchmarks/src/lib.rs
Normal file
@ -0,0 +1,5 @@
|
||||
//! This library is only used to isolate the benchmarks
|
||||
//! from the original milli library.
|
||||
//!
|
||||
//! It does not include interesting functions for milli library
|
||||
//! users only for milli contributors.
|
23
cli/Cargo.toml
Normal file
23
cli/Cargo.toml
Normal file
@ -0,0 +1,23 @@
|
||||
[package]
|
||||
name = "cli"
|
||||
version = "0.39.0"
|
||||
edition = "2018"
|
||||
description = "A CLI to interact with a milli index"
|
||||
publish = false
|
||||
|
||||
[dependencies]
|
||||
bimap = "0.6.2"
|
||||
byte-unit = { version = "4.0.14", default-features = false, features = ["std", "serde"] }
|
||||
color-eyre = "0.6.2"
|
||||
csv = "1.1.6"
|
||||
eyre = "0.6.8"
|
||||
indicatif = "0.17.1"
|
||||
milli = { path = "../milli", default-features = false }
|
||||
mimalloc = { version = "0.1.29", default-features = false }
|
||||
serde = "1.0.145"
|
||||
serde_json = "1.0.85"
|
||||
stderrlog = "0.5.3"
|
||||
structopt = "0.3.26"
|
||||
|
||||
[features]
|
||||
default = ["milli/default"]
|
559
cli/src/main.rs
Normal file
559
cli/src/main.rs
Normal file
@ -0,0 +1,559 @@
|
||||
use std::collections::BTreeMap;
|
||||
use std::fmt::Display;
|
||||
use std::fs::File;
|
||||
use std::io::{stdin, BufRead, BufReader, Cursor, Read, Write};
|
||||
use std::path::PathBuf;
|
||||
use std::str::FromStr;
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
use byte_unit::Byte;
|
||||
use eyre::Result;
|
||||
use indicatif::{MultiProgress, ProgressBar, ProgressStyle};
|
||||
use milli::documents::{DocumentsBatchBuilder, DocumentsBatchReader};
|
||||
use milli::update::UpdateIndexingStep::{
|
||||
ComputeIdsAndMergeDocuments, IndexDocuments, MergeDataIntoFinalDatabase, RemapDocumentAddition,
|
||||
};
|
||||
use milli::update::{self, IndexDocumentsConfig, IndexDocumentsMethod, IndexerConfig};
|
||||
use milli::{heed, CriterionImplementationStrategy, Index, Object};
|
||||
use structopt::StructOpt;
|
||||
|
||||
#[global_allocator]
|
||||
static ALLOC: mimalloc::MiMalloc = mimalloc::MiMalloc;
|
||||
|
||||
#[derive(Debug, StructOpt)]
|
||||
#[structopt(name = "Milli CLI", about = "A simple CLI to manipulate a milli index.")]
|
||||
struct Cli {
|
||||
#[structopt(short, long, default_value = ".")]
|
||||
index_path: PathBuf,
|
||||
#[structopt(short = "s", long, default_value = "100GiB")]
|
||||
index_size: Byte,
|
||||
/// Verbose mode (-v, -vv, -vvv, etc.)
|
||||
#[structopt(short, long, parse(from_occurrences))]
|
||||
verbose: usize,
|
||||
#[structopt(subcommand)]
|
||||
subcommand: Command,
|
||||
}
|
||||
|
||||
#[derive(Debug, StructOpt)]
|
||||
enum Command {
|
||||
Documents {
|
||||
#[structopt(subcommand)]
|
||||
cmd: Documents,
|
||||
},
|
||||
Search(Search),
|
||||
Settings {
|
||||
#[structopt(subcommand)]
|
||||
cmd: Settings,
|
||||
},
|
||||
}
|
||||
|
||||
impl Performer for Command {
|
||||
fn perform(self, index: Index) -> Result<()> {
|
||||
match self {
|
||||
Command::Documents { cmd } => cmd.perform(index),
|
||||
Command::Search(cmd) => cmd.perform(index),
|
||||
Command::Settings { cmd } => cmd.perform(index),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, StructOpt)]
|
||||
enum Settings {
|
||||
Update(SettingsUpdate),
|
||||
Show,
|
||||
}
|
||||
|
||||
impl Settings {
|
||||
fn show(&self, index: Index) -> Result<()> {
|
||||
let txn = index.read_txn()?;
|
||||
let displayed_attributes = index
|
||||
.displayed_fields(&txn)?
|
||||
.map(|fields| fields.into_iter().map(String::from).collect());
|
||||
|
||||
let searchable_attributes: Option<Vec<_>> = index
|
||||
.searchable_fields(&txn)?
|
||||
.map(|fields| fields.into_iter().map(String::from).collect());
|
||||
|
||||
let filterable_attributes: Vec<_> = index.filterable_fields(&txn)?.into_iter().collect();
|
||||
|
||||
let sortable_attributes: Vec<_> = index.sortable_fields(&txn)?.into_iter().collect();
|
||||
|
||||
let criteria: Vec<_> = index.criteria(&txn)?.into_iter().map(|c| c.to_string()).collect();
|
||||
|
||||
let stop_words = index
|
||||
.stop_words(&txn)?
|
||||
.map(|stop_words| -> Result<Vec<_>> {
|
||||
Ok(stop_words.stream().into_strs()?.into_iter().collect())
|
||||
})
|
||||
.transpose()?
|
||||
.unwrap_or_else(Vec::new);
|
||||
let distinct_field = index.distinct_field(&txn)?.map(String::from);
|
||||
|
||||
// in milli each word in the synonyms map were split on their separator. Since we lost
|
||||
// this information we are going to put space between words.
|
||||
let synonyms: BTreeMap<_, Vec<_>> = index
|
||||
.synonyms(&txn)?
|
||||
.iter()
|
||||
.map(|(key, values)| {
|
||||
(key.join(" "), values.iter().map(|value| value.join(" ")).collect())
|
||||
})
|
||||
.collect();
|
||||
|
||||
let exact_attributes = index.exact_attributes(&txn)?;
|
||||
|
||||
println!(
|
||||
"displayed attributes:\n\t{}\nsearchable attributes:\n\t{}\nfilterable attributes:\n\t{}\nsortable attributes:\n\t{}\ncriterion:\n\t{}\nstop words:\n\t{}\ndistinct fields:\n\t{}\nsynonyms:\n\t{}\nexact attributes:\n\t{}\n",
|
||||
displayed_attributes.unwrap_or(vec!["*".to_owned()]).join("\n\t"),
|
||||
searchable_attributes.unwrap_or(vec!["*".to_owned()]).join("\n\t"),
|
||||
filterable_attributes.join("\n\t"),
|
||||
sortable_attributes.join("\n\t"),
|
||||
criteria.join("\n\t"),
|
||||
stop_words.join("\n\t"),
|
||||
distinct_field.unwrap_or_default(),
|
||||
synonyms.into_iter().map(|(k, v)| format!("\n\t{}:\n{:?}", k, v)).collect::<String>(),
|
||||
exact_attributes.join("\n\t"),
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl Performer for Settings {
|
||||
fn perform(self, index: Index) -> Result<()> {
|
||||
match self {
|
||||
Settings::Update(update) => update.perform(index),
|
||||
Settings::Show => self.show(index),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, StructOpt)]
|
||||
enum Documents {
|
||||
Add(DocumentAddition),
|
||||
}
|
||||
|
||||
impl Performer for Documents {
|
||||
fn perform(self, index: Index) -> Result<()> {
|
||||
match self {
|
||||
Self::Add(addition) => addition.perform(index),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
trait Performer {
|
||||
fn perform(self, index: Index) -> Result<()>;
|
||||
}
|
||||
|
||||
fn setup(opt: &Cli) -> Result<()> {
|
||||
color_eyre::install()?;
|
||||
stderrlog::new()
|
||||
.verbosity(opt.verbose)
|
||||
.show_level(false)
|
||||
.timestamp(stderrlog::Timestamp::Off)
|
||||
.init()?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn main() -> Result<()> {
|
||||
let command = Cli::from_args();
|
||||
|
||||
setup(&command)?;
|
||||
|
||||
let mut options = heed::EnvOpenOptions::new();
|
||||
options.map_size(command.index_size.get_bytes() as usize);
|
||||
let index = milli::Index::new(options, command.index_path)?;
|
||||
|
||||
command.subcommand.perform(index)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
enum DocumentAdditionFormat {
|
||||
Csv,
|
||||
Json,
|
||||
Jsonl,
|
||||
}
|
||||
|
||||
impl FromStr for DocumentAdditionFormat {
|
||||
type Err = eyre::Error;
|
||||
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
match s {
|
||||
"csv" => Ok(Self::Csv),
|
||||
"jsonl" => Ok(Self::Jsonl),
|
||||
"json" => Ok(Self::Json),
|
||||
other => eyre::bail!("invalid format: {}", other),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, StructOpt)]
|
||||
struct DocumentAddition {
|
||||
#[structopt(short, long, default_value = "json", possible_values = &["csv", "jsonl", "json"])]
|
||||
format: DocumentAdditionFormat,
|
||||
/// Path to the update file, if not present, will read from stdin.
|
||||
#[structopt(short, long)]
|
||||
path: Option<PathBuf>,
|
||||
/// Specify the primary key.
|
||||
#[structopt(long)]
|
||||
primary: Option<String>,
|
||||
/// Whether to generate missing document ids.
|
||||
#[structopt(short, long)]
|
||||
autogen_docids: bool,
|
||||
/// Whether to update or replace the documents if they already exist.
|
||||
#[structopt(short, long)]
|
||||
update_documents: bool,
|
||||
}
|
||||
|
||||
impl Performer for DocumentAddition {
|
||||
fn perform(self, index: milli::Index) -> Result<()> {
|
||||
let reader: Box<dyn Read> = match self.path {
|
||||
Some(ref path) => {
|
||||
let file = File::open(path)?;
|
||||
Box::new(file)
|
||||
}
|
||||
None => Box::new(stdin()),
|
||||
};
|
||||
|
||||
println!("parsing documents...");
|
||||
|
||||
let reader = BufReader::new(reader);
|
||||
|
||||
let documents = match self.format {
|
||||
DocumentAdditionFormat::Csv => documents_from_csv(reader)?,
|
||||
DocumentAdditionFormat::Json => documents_from_json(reader)?,
|
||||
DocumentAdditionFormat::Jsonl => documents_from_jsonl(reader)?,
|
||||
};
|
||||
|
||||
let reader = DocumentsBatchReader::from_reader(Cursor::new(documents))?;
|
||||
|
||||
println!("Adding {} documents to the index.", reader.documents_count());
|
||||
|
||||
let mut txn = index.write_txn()?;
|
||||
let config = milli::update::IndexerConfig { log_every_n: Some(100), ..Default::default() };
|
||||
let update_method = if self.update_documents {
|
||||
IndexDocumentsMethod::UpdateDocuments
|
||||
} else {
|
||||
IndexDocumentsMethod::ReplaceDocuments
|
||||
};
|
||||
|
||||
if let Some(primary) = self.primary {
|
||||
let mut builder = update::Settings::new(&mut txn, &index, &config);
|
||||
builder.set_primary_key(primary);
|
||||
builder.execute(|_| (), || false).unwrap();
|
||||
}
|
||||
|
||||
let indexing_config = IndexDocumentsConfig {
|
||||
update_method,
|
||||
autogenerate_docids: self.autogen_docids,
|
||||
..Default::default()
|
||||
};
|
||||
let mut bars = Vec::new();
|
||||
let progesses = MultiProgress::new();
|
||||
for _ in 0..4 {
|
||||
let bar = ProgressBar::hidden();
|
||||
let bar = progesses.add(bar);
|
||||
bars.push(bar);
|
||||
}
|
||||
let addition = milli::update::IndexDocuments::new(
|
||||
&mut txn,
|
||||
&index,
|
||||
&config,
|
||||
indexing_config,
|
||||
|step| indexing_callback(step, &bars),
|
||||
|| false,
|
||||
)
|
||||
.unwrap();
|
||||
let (addition, user_error) = addition.add_documents(reader)?;
|
||||
if let Err(error) = user_error {
|
||||
return Err(error.into());
|
||||
}
|
||||
|
||||
let result = addition.execute()?;
|
||||
|
||||
txn.commit()?;
|
||||
|
||||
println!("{:?}", result);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
fn indexing_callback(step: milli::update::UpdateIndexingStep, bars: &[ProgressBar]) {
|
||||
let step_index = step.step();
|
||||
let bar = &bars[step_index];
|
||||
if step_index > 0 {
|
||||
let prev = &bars[step_index - 1];
|
||||
if !prev.is_finished() {
|
||||
prev.disable_steady_tick();
|
||||
prev.finish();
|
||||
}
|
||||
}
|
||||
|
||||
let style = ProgressStyle::default_bar()
|
||||
.progress_chars("##-")
|
||||
.template("[eta: {eta_precise}] {bar:40.cyan/blue} {pos:>7}/{len:7} {msg}")
|
||||
.unwrap();
|
||||
|
||||
match step {
|
||||
RemapDocumentAddition { documents_seen } => {
|
||||
bar.set_style(ProgressStyle::default_spinner());
|
||||
bar.set_message(format!("remapped {} documents so far.", documents_seen));
|
||||
}
|
||||
ComputeIdsAndMergeDocuments { documents_seen, total_documents } => {
|
||||
bar.set_style(style);
|
||||
bar.set_length(total_documents as u64);
|
||||
bar.set_message("Merging documents...");
|
||||
bar.set_position(documents_seen as u64);
|
||||
}
|
||||
IndexDocuments { documents_seen, total_documents } => {
|
||||
bar.set_style(style);
|
||||
bar.set_length(total_documents as u64);
|
||||
bar.set_message("Indexing documents...");
|
||||
bar.set_position(documents_seen as u64);
|
||||
}
|
||||
MergeDataIntoFinalDatabase { databases_seen, total_databases } => {
|
||||
bar.set_style(style);
|
||||
bar.set_length(total_databases as u64);
|
||||
bar.set_message("Merging databases...");
|
||||
bar.set_position(databases_seen as u64);
|
||||
}
|
||||
}
|
||||
bar.enable_steady_tick(Duration::from_millis(200));
|
||||
}
|
||||
|
||||
fn documents_from_jsonl(reader: impl Read) -> Result<Vec<u8>> {
|
||||
let mut documents = DocumentsBatchBuilder::new(Vec::new());
|
||||
let reader = BufReader::new(reader);
|
||||
|
||||
for result in serde_json::Deserializer::from_reader(reader).into_iter::<Object>() {
|
||||
let object = result?;
|
||||
documents.append_json_object(&object)?;
|
||||
}
|
||||
|
||||
documents.into_inner().map_err(Into::into)
|
||||
}
|
||||
|
||||
fn documents_from_json(reader: impl Read) -> Result<Vec<u8>> {
|
||||
let mut documents = DocumentsBatchBuilder::new(Vec::new());
|
||||
|
||||
documents.append_json_array(reader)?;
|
||||
|
||||
documents.into_inner().map_err(Into::into)
|
||||
}
|
||||
|
||||
fn documents_from_csv(reader: impl Read) -> Result<Vec<u8>> {
|
||||
let csv = csv::Reader::from_reader(reader);
|
||||
|
||||
let mut documents = DocumentsBatchBuilder::new(Vec::new());
|
||||
documents.append_csv(csv)?;
|
||||
|
||||
documents.into_inner().map_err(Into::into)
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
struct SearchStrategyOption(CriterionImplementationStrategy);
|
||||
impl FromStr for SearchStrategyOption {
|
||||
type Err = String;
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
match s.to_lowercase().as_str() {
|
||||
"dynamic" => Ok(SearchStrategyOption(CriterionImplementationStrategy::Dynamic)),
|
||||
"set" => Ok(SearchStrategyOption(CriterionImplementationStrategy::OnlySetBased)),
|
||||
"iterative" => Ok(SearchStrategyOption(CriterionImplementationStrategy::OnlyIterative)),
|
||||
_ => Err("could not parse {s} as a criterion implementation strategy, available options are `dynamic`, `set`, and `iterative`".to_owned()),
|
||||
}
|
||||
}
|
||||
}
|
||||
impl Display for SearchStrategyOption {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self.0 {
|
||||
CriterionImplementationStrategy::OnlyIterative => Display::fmt("iterative", f),
|
||||
CriterionImplementationStrategy::OnlySetBased => Display::fmt("set", f),
|
||||
CriterionImplementationStrategy::Dynamic => Display::fmt("dynamic", f),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, StructOpt)]
|
||||
struct Search {
|
||||
query: Option<String>,
|
||||
#[structopt(short, long)]
|
||||
filter: Option<String>,
|
||||
#[structopt(short, long)]
|
||||
offset: Option<usize>,
|
||||
#[structopt(short, long)]
|
||||
limit: Option<usize>,
|
||||
#[structopt(short, long, conflicts_with = "query")]
|
||||
interactive: bool,
|
||||
#[structopt(short, long)]
|
||||
strategy: Option<SearchStrategyOption>,
|
||||
}
|
||||
|
||||
impl Performer for Search {
|
||||
fn perform(self, index: milli::Index) -> Result<()> {
|
||||
if self.interactive {
|
||||
let stdin = std::io::stdin();
|
||||
let mut lines = stdin.lock().lines();
|
||||
loop {
|
||||
eprint!("> ");
|
||||
std::io::stdout().flush()?;
|
||||
match lines.next() {
|
||||
Some(Ok(line)) => {
|
||||
let now = Instant::now();
|
||||
let jsons = Self::perform_single_search(
|
||||
&index,
|
||||
&Some(line),
|
||||
&self.filter,
|
||||
&self.offset,
|
||||
&self.limit,
|
||||
&self.strategy,
|
||||
)?;
|
||||
|
||||
let time = now.elapsed();
|
||||
|
||||
let hits = serde_json::to_string_pretty(&jsons)?;
|
||||
|
||||
println!("{}", hits);
|
||||
|
||||
eprintln!("found {} results in {:.02?}", jsons.len(), time);
|
||||
}
|
||||
_ => break,
|
||||
}
|
||||
}
|
||||
} else {
|
||||
let now = Instant::now();
|
||||
let jsons = Self::perform_single_search(
|
||||
&index,
|
||||
&self.query,
|
||||
&self.filter,
|
||||
&self.offset,
|
||||
&self.limit,
|
||||
&self.strategy,
|
||||
)?;
|
||||
|
||||
let time = now.elapsed();
|
||||
|
||||
let hits = serde_json::to_string_pretty(&jsons)?;
|
||||
|
||||
println!("{}", hits);
|
||||
eprintln!("found {} results in {:.02?}", jsons.len(), time);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl Search {
|
||||
fn perform_single_search(
|
||||
index: &milli::Index,
|
||||
query: &Option<String>,
|
||||
filter: &Option<String>,
|
||||
offset: &Option<usize>,
|
||||
limit: &Option<usize>,
|
||||
strategy: &Option<SearchStrategyOption>,
|
||||
) -> Result<Vec<Object>> {
|
||||
let txn = index.read_txn()?;
|
||||
let mut search = index.search(&txn);
|
||||
|
||||
if let Some(ref query) = query {
|
||||
search.query(query);
|
||||
}
|
||||
|
||||
if let Some(ref filter) = filter {
|
||||
if let Some(condition) = milli::Filter::from_str(filter)? {
|
||||
search.filter(condition);
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(offset) = offset {
|
||||
search.offset(*offset);
|
||||
}
|
||||
|
||||
if let Some(limit) = limit {
|
||||
search.limit(*limit);
|
||||
}
|
||||
if let Some(strategy) = strategy {
|
||||
search.criterion_implementation_strategy(strategy.0);
|
||||
}
|
||||
|
||||
let result = search.execute()?;
|
||||
|
||||
let fields_ids_map = index.fields_ids_map(&txn)?;
|
||||
let displayed_fields =
|
||||
index.displayed_fields_ids(&txn)?.unwrap_or_else(|| fields_ids_map.ids().collect());
|
||||
let documents = index.documents(&txn, result.documents_ids)?;
|
||||
let mut jsons = Vec::new();
|
||||
for (_, obkv) in documents {
|
||||
let json = milli::obkv_to_json(&displayed_fields, &fields_ids_map, obkv)?;
|
||||
jsons.push(json);
|
||||
}
|
||||
|
||||
Ok(jsons)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, StructOpt)]
|
||||
struct SettingsUpdate {
|
||||
#[structopt(long)]
|
||||
filterable_attributes: Option<Vec<String>>,
|
||||
#[structopt(long)]
|
||||
criteria: Option<Vec<String>>,
|
||||
#[structopt(long)]
|
||||
exact_attributes: Option<Vec<String>>,
|
||||
#[structopt(long)]
|
||||
distinct_attribute: Option<String>,
|
||||
}
|
||||
|
||||
impl Performer for SettingsUpdate {
|
||||
fn perform(self, index: milli::Index) -> Result<()> {
|
||||
let mut txn = index.write_txn()?;
|
||||
|
||||
let config = IndexerConfig { log_every_n: Some(100), ..Default::default() };
|
||||
|
||||
let mut update = milli::update::Settings::new(&mut txn, &index, &config);
|
||||
|
||||
if let Some(ref filterable_attributes) = self.filterable_attributes {
|
||||
if !filterable_attributes.is_empty() {
|
||||
update.set_filterable_fields(filterable_attributes.iter().cloned().collect());
|
||||
} else {
|
||||
update.reset_filterable_fields();
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(criteria) = self.criteria {
|
||||
if !criteria.is_empty() {
|
||||
update.set_criteria(criteria.iter().map(|c| c.parse()).collect::<Result<_, _>>()?);
|
||||
} else {
|
||||
update.reset_criteria();
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(exact_attributes) = self.exact_attributes {
|
||||
if !exact_attributes.is_empty() {
|
||||
update.set_exact_attributes(exact_attributes.into_iter().collect());
|
||||
} else {
|
||||
update.reset_exact_attributes();
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(distinct_attr) = self.distinct_attribute {
|
||||
if !distinct_attr.is_empty() {
|
||||
update.set_distinct_field(distinct_attr);
|
||||
} else {
|
||||
update.reset_distinct_field();
|
||||
}
|
||||
}
|
||||
|
||||
let mut bars = Vec::new();
|
||||
let progesses = MultiProgress::new();
|
||||
for _ in 0..4 {
|
||||
let bar = ProgressBar::hidden();
|
||||
let bar = progesses.add(bar);
|
||||
bars.push(bar);
|
||||
}
|
||||
|
||||
update.execute(|step| indexing_callback(step, &bars), || false)?;
|
||||
|
||||
txn.commit()?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
13
filter-parser/Cargo.toml
Normal file
13
filter-parser/Cargo.toml
Normal file
@ -0,0 +1,13 @@
|
||||
[package]
|
||||
name = "filter-parser"
|
||||
version = "0.39.0"
|
||||
edition = "2021"
|
||||
description = "The parser for the Meilisearch filter syntax"
|
||||
publish = false
|
||||
|
||||
[dependencies]
|
||||
nom = "7.1.1"
|
||||
nom_locate = "4.0.0"
|
||||
|
||||
[dev-dependencies]
|
||||
insta = "1.21.0"
|
36
filter-parser/README.md
Normal file
36
filter-parser/README.md
Normal file
@ -0,0 +1,36 @@
|
||||
# Filter parser
|
||||
|
||||
This workspace is dedicated to the parsing of the Meilisearch filters.
|
||||
|
||||
Most of the code and explanation are in the [`lib.rs`](./src/lib.rs). Especially, the BNF of the filters at the top of this file.
|
||||
|
||||
The parser use [nom](https://docs.rs/nom/) to do most of its work and [nom-locate](https://docs.rs/nom_locate/) to keep track of what we were doing when we encountered an error.
|
||||
|
||||
## Cli
|
||||
A simple main is provided to quick-test if a filter can be parsed or not without bringing milli.
|
||||
It takes one argument and try to parse it.
|
||||
```
|
||||
cargo run -- 'field = value' # success
|
||||
cargo run -- 'field = "doggo' # error => missing closing delimiter "
|
||||
```
|
||||
|
||||
## Fuzz
|
||||
The workspace have been fuzzed with [cargo-fuzz](https://rust-fuzz.github.io/book/cargo-fuzz.html).
|
||||
|
||||
### Setup
|
||||
You'll need rust-nightly to execute the fuzzer.
|
||||
|
||||
```
|
||||
cargo install cargo-fuzz
|
||||
```
|
||||
|
||||
### Run
|
||||
When the filter parser is executed by the fuzzer it's triggering a stackoverflow really fast. We can avoid this problem by limiting the `max_len` of [libfuzzer](https://llvm.org/docs/LibFuzzer.html) at 500 characters.
|
||||
```
|
||||
cargo fuzz run parse -- -max_len=500
|
||||
```
|
||||
|
||||
## What to do if you find a bug in the parser
|
||||
|
||||
- Write a test at the end of the [`lib.rs`](./src/lib.rs) to ensure it never happens again.
|
||||
- Add a file in [the corpus directory](./fuzz/corpus/parse/) with your filter to help the fuzzer find new bugs. Since this directory is going to be heavily polluted by the execution of the fuzzer it's in the gitignore and you'll need to force push your new test.
|
3
filter-parser/fuzz/.gitignore
vendored
Normal file
3
filter-parser/fuzz/.gitignore
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
/corpus/
|
||||
/artifacts/
|
||||
/target/
|
25
filter-parser/fuzz/Cargo.toml
Normal file
25
filter-parser/fuzz/Cargo.toml
Normal file
@ -0,0 +1,25 @@
|
||||
[package]
|
||||
name = "filter-parser-fuzz"
|
||||
version = "0.0.0"
|
||||
authors = ["Automatically generated"]
|
||||
publish = false
|
||||
edition = "2018"
|
||||
|
||||
[package.metadata]
|
||||
cargo-fuzz = true
|
||||
|
||||
[dependencies]
|
||||
libfuzzer-sys = "0.4"
|
||||
|
||||
[dependencies.filter-parser]
|
||||
path = ".."
|
||||
|
||||
# Prevent this from interfering with workspaces
|
||||
[workspace]
|
||||
members = ["."]
|
||||
|
||||
[[bin]]
|
||||
name = "parse"
|
||||
path = "fuzz_targets/parse.rs"
|
||||
test = false
|
||||
doc = false
|
1
filter-parser/fuzz/corpus/parse/test_1
Normal file
1
filter-parser/fuzz/corpus/parse/test_1
Normal file
@ -0,0 +1 @@
|
||||
channel = Ponce
|
1
filter-parser/fuzz/corpus/parse/test_10
Normal file
1
filter-parser/fuzz/corpus/parse/test_10
Normal file
@ -0,0 +1 @@
|
||||
channel != ponce
|
1
filter-parser/fuzz/corpus/parse/test_11
Normal file
1
filter-parser/fuzz/corpus/parse/test_11
Normal file
@ -0,0 +1 @@
|
||||
NOT channel = ponce
|
1
filter-parser/fuzz/corpus/parse/test_12
Normal file
1
filter-parser/fuzz/corpus/parse/test_12
Normal file
@ -0,0 +1 @@
|
||||
subscribers < 1000
|
1
filter-parser/fuzz/corpus/parse/test_13
Normal file
1
filter-parser/fuzz/corpus/parse/test_13
Normal file
@ -0,0 +1 @@
|
||||
subscribers > 1000
|
1
filter-parser/fuzz/corpus/parse/test_14
Normal file
1
filter-parser/fuzz/corpus/parse/test_14
Normal file
@ -0,0 +1 @@
|
||||
subscribers <= 1000
|
1
filter-parser/fuzz/corpus/parse/test_15
Normal file
1
filter-parser/fuzz/corpus/parse/test_15
Normal file
@ -0,0 +1 @@
|
||||
subscribers >= 1000
|
1
filter-parser/fuzz/corpus/parse/test_16
Normal file
1
filter-parser/fuzz/corpus/parse/test_16
Normal file
@ -0,0 +1 @@
|
||||
NOT subscribers < 1000
|
1
filter-parser/fuzz/corpus/parse/test_17
Normal file
1
filter-parser/fuzz/corpus/parse/test_17
Normal file
@ -0,0 +1 @@
|
||||
NOT subscribers > 1000
|
1
filter-parser/fuzz/corpus/parse/test_18
Normal file
1
filter-parser/fuzz/corpus/parse/test_18
Normal file
@ -0,0 +1 @@
|
||||
NOT subscribers <= 1000
|
1
filter-parser/fuzz/corpus/parse/test_19
Normal file
1
filter-parser/fuzz/corpus/parse/test_19
Normal file
@ -0,0 +1 @@
|
||||
NOT subscribers >= 1000
|
1
filter-parser/fuzz/corpus/parse/test_2
Normal file
1
filter-parser/fuzz/corpus/parse/test_2
Normal file
@ -0,0 +1 @@
|
||||
subscribers = 12
|
1
filter-parser/fuzz/corpus/parse/test_20
Normal file
1
filter-parser/fuzz/corpus/parse/test_20
Normal file
@ -0,0 +1 @@
|
||||
subscribers 100 TO 1000
|
1
filter-parser/fuzz/corpus/parse/test_21
Normal file
1
filter-parser/fuzz/corpus/parse/test_21
Normal file
@ -0,0 +1 @@
|
||||
NOT subscribers 100 TO 1000
|
1
filter-parser/fuzz/corpus/parse/test_22
Normal file
1
filter-parser/fuzz/corpus/parse/test_22
Normal file
@ -0,0 +1 @@
|
||||
_geoRadius(12, 13, 14)
|
1
filter-parser/fuzz/corpus/parse/test_23
Normal file
1
filter-parser/fuzz/corpus/parse/test_23
Normal file
@ -0,0 +1 @@
|
||||
NOT _geoRadius(12, 13, 14)
|
1
filter-parser/fuzz/corpus/parse/test_24
Normal file
1
filter-parser/fuzz/corpus/parse/test_24
Normal file
@ -0,0 +1 @@
|
||||
channel = ponce AND 'dog race' != 'bernese mountain'
|
1
filter-parser/fuzz/corpus/parse/test_25
Normal file
1
filter-parser/fuzz/corpus/parse/test_25
Normal file
@ -0,0 +1 @@
|
||||
channel = ponce OR 'dog race' != 'bernese mountain'
|
1
filter-parser/fuzz/corpus/parse/test_26
Normal file
1
filter-parser/fuzz/corpus/parse/test_26
Normal file
@ -0,0 +1 @@
|
||||
channel = ponce AND 'dog race' != 'bernese mountain' OR subscribers > 1000
|
1
filter-parser/fuzz/corpus/parse/test_27
Normal file
1
filter-parser/fuzz/corpus/parse/test_27
Normal file
@ -0,0 +1 @@
|
||||
channel = ponce AND ( 'dog race' != 'bernese mountain' OR subscribers > 1000 )
|
1
filter-parser/fuzz/corpus/parse/test_28
Normal file
1
filter-parser/fuzz/corpus/parse/test_28
Normal file
@ -0,0 +1 @@
|
||||
(channel = ponce AND 'dog race' != 'bernese mountain' OR subscribers > 1000) AND _geoRadius(12, 13, 14)
|
1
filter-parser/fuzz/corpus/parse/test_29
Normal file
1
filter-parser/fuzz/corpus/parse/test_29
Normal file
@ -0,0 +1 @@
|
||||
channel = Ponce = 12
|
1
filter-parser/fuzz/corpus/parse/test_3
Normal file
1
filter-parser/fuzz/corpus/parse/test_3
Normal file
@ -0,0 +1 @@
|
||||
channel = 'Mister Mv'
|
1
filter-parser/fuzz/corpus/parse/test_30
Normal file
1
filter-parser/fuzz/corpus/parse/test_30
Normal file
@ -0,0 +1 @@
|
||||
channel =
|
1
filter-parser/fuzz/corpus/parse/test_31
Normal file
1
filter-parser/fuzz/corpus/parse/test_31
Normal file
@ -0,0 +1 @@
|
||||
channel = 🐻
|
1
filter-parser/fuzz/corpus/parse/test_32
Normal file
1
filter-parser/fuzz/corpus/parse/test_32
Normal file
@ -0,0 +1 @@
|
||||
OR
|
1
filter-parser/fuzz/corpus/parse/test_33
Normal file
1
filter-parser/fuzz/corpus/parse/test_33
Normal file
@ -0,0 +1 @@
|
||||
AND
|
1
filter-parser/fuzz/corpus/parse/test_34
Normal file
1
filter-parser/fuzz/corpus/parse/test_34
Normal file
@ -0,0 +1 @@
|
||||
channel Ponce
|
1
filter-parser/fuzz/corpus/parse/test_35
Normal file
1
filter-parser/fuzz/corpus/parse/test_35
Normal file
@ -0,0 +1 @@
|
||||
channel = Ponce OR
|
1
filter-parser/fuzz/corpus/parse/test_36
Normal file
1
filter-parser/fuzz/corpus/parse/test_36
Normal file
@ -0,0 +1 @@
|
||||
_geoRadius
|
1
filter-parser/fuzz/corpus/parse/test_37
Normal file
1
filter-parser/fuzz/corpus/parse/test_37
Normal file
@ -0,0 +1 @@
|
||||
_geoRadius = 12
|
1
filter-parser/fuzz/corpus/parse/test_38
Normal file
1
filter-parser/fuzz/corpus/parse/test_38
Normal file
@ -0,0 +1 @@
|
||||
_geoPoint(12, 13, 14)
|
1
filter-parser/fuzz/corpus/parse/test_39
Normal file
1
filter-parser/fuzz/corpus/parse/test_39
Normal file
@ -0,0 +1 @@
|
||||
position <= _geoPoint(12, 13, 14)
|
1
filter-parser/fuzz/corpus/parse/test_4
Normal file
1
filter-parser/fuzz/corpus/parse/test_4
Normal file
@ -0,0 +1 @@
|
||||
channel = "Mister Mv"
|
1
filter-parser/fuzz/corpus/parse/test_40
Normal file
1
filter-parser/fuzz/corpus/parse/test_40
Normal file
@ -0,0 +1 @@
|
||||
position <= _geoRadius(12, 13, 14)
|
1
filter-parser/fuzz/corpus/parse/test_41
Normal file
1
filter-parser/fuzz/corpus/parse/test_41
Normal file
@ -0,0 +1 @@
|
||||
channel = 'ponce
|
1
filter-parser/fuzz/corpus/parse/test_42
Normal file
1
filter-parser/fuzz/corpus/parse/test_42
Normal file
@ -0,0 +1 @@
|
||||
channel = "ponce
|
1
filter-parser/fuzz/corpus/parse/test_43
Normal file
1
filter-parser/fuzz/corpus/parse/test_43
Normal file
@ -0,0 +1 @@
|
||||
channel = mv OR (followers >= 1000
|
1
filter-parser/fuzz/corpus/parse/test_5
Normal file
1
filter-parser/fuzz/corpus/parse/test_5
Normal file
@ -0,0 +1 @@
|
||||
'dog race' = Borzoi
|
1
filter-parser/fuzz/corpus/parse/test_6
Normal file
1
filter-parser/fuzz/corpus/parse/test_6
Normal file
@ -0,0 +1 @@
|
||||
"dog race" = Chusky
|
1
filter-parser/fuzz/corpus/parse/test_7
Normal file
1
filter-parser/fuzz/corpus/parse/test_7
Normal file
@ -0,0 +1 @@
|
||||
"dog race" = "Bernese Mountain"
|
1
filter-parser/fuzz/corpus/parse/test_8
Normal file
1
filter-parser/fuzz/corpus/parse/test_8
Normal file
@ -0,0 +1 @@
|
||||
'dog race' = 'Bernese Mountain'
|
1
filter-parser/fuzz/corpus/parse/test_9
Normal file
1
filter-parser/fuzz/corpus/parse/test_9
Normal file
@ -0,0 +1 @@
|
||||
"dog race" = 'Bernese Mountain'
|
18
filter-parser/fuzz/fuzz_targets/parse.rs
Normal file
18
filter-parser/fuzz/fuzz_targets/parse.rs
Normal file
@ -0,0 +1,18 @@
|
||||
#![no_main]
|
||||
use filter_parser::{ErrorKind, FilterCondition};
|
||||
use libfuzzer_sys::fuzz_target;
|
||||
|
||||
fuzz_target!(|data: &[u8]| {
|
||||
if let Ok(s) = std::str::from_utf8(data) {
|
||||
// When we are fuzzing the parser we can get a stack overflow very easily.
|
||||
// But since this doesn't happens with a normal build we are just going to limit the fuzzer to 500 characters.
|
||||
if s.len() < 500 {
|
||||
match FilterCondition::parse(s) {
|
||||
Err(e) if matches!(e.kind(), ErrorKind::InternalError(_)) => {
|
||||
panic!("Found an internal error: `{:?}`", e)
|
||||
}
|
||||
_ => (),
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
67
filter-parser/src/condition.rs
Normal file
67
filter-parser/src/condition.rs
Normal file
@ -0,0 +1,67 @@
|
||||
//! BNF grammar:
|
||||
//!
|
||||
//! ```text
|
||||
//! condition = value ("==" | ">" ...) value
|
||||
//! to = value value TO value
|
||||
//! ```
|
||||
|
||||
use nom::branch::alt;
|
||||
use nom::bytes::complete::tag;
|
||||
use nom::character::complete::multispace1;
|
||||
use nom::combinator::cut;
|
||||
use nom::sequence::{terminated, tuple};
|
||||
use Condition::*;
|
||||
|
||||
use crate::{parse_value, FilterCondition, IResult, Span, Token};
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub enum Condition<'a> {
|
||||
GreaterThan(Token<'a>),
|
||||
GreaterThanOrEqual(Token<'a>),
|
||||
Equal(Token<'a>),
|
||||
NotEqual(Token<'a>),
|
||||
Exists,
|
||||
LowerThan(Token<'a>),
|
||||
LowerThanOrEqual(Token<'a>),
|
||||
Between { from: Token<'a>, to: Token<'a> },
|
||||
}
|
||||
|
||||
/// condition = value ("==" | ">" ...) value
|
||||
pub fn parse_condition(input: Span) -> IResult<FilterCondition> {
|
||||
let operator = alt((tag("<="), tag(">="), tag("!="), tag("<"), tag(">"), tag("=")));
|
||||
let (input, (fid, op, value)) = tuple((parse_value, operator, cut(parse_value)))(input)?;
|
||||
|
||||
let condition = match *op.fragment() {
|
||||
"<=" => FilterCondition::Condition { fid, op: LowerThanOrEqual(value) },
|
||||
">=" => FilterCondition::Condition { fid, op: GreaterThanOrEqual(value) },
|
||||
"!=" => FilterCondition::Condition { fid, op: NotEqual(value) },
|
||||
"<" => FilterCondition::Condition { fid, op: LowerThan(value) },
|
||||
">" => FilterCondition::Condition { fid, op: GreaterThan(value) },
|
||||
"=" => FilterCondition::Condition { fid, op: Equal(value) },
|
||||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
Ok((input, condition))
|
||||
}
|
||||
|
||||
/// exist = value "EXISTS"
|
||||
pub fn parse_exists(input: Span) -> IResult<FilterCondition> {
|
||||
let (input, key) = terminated(parse_value, tag("EXISTS"))(input)?;
|
||||
|
||||
Ok((input, FilterCondition::Condition { fid: key, op: Exists }))
|
||||
}
|
||||
/// exist = value "NOT" WS+ "EXISTS"
|
||||
pub fn parse_not_exists(input: Span) -> IResult<FilterCondition> {
|
||||
let (input, key) = parse_value(input)?;
|
||||
|
||||
let (input, _) = tuple((tag("NOT"), multispace1, tag("EXISTS")))(input)?;
|
||||
Ok((input, FilterCondition::Not(Box::new(FilterCondition::Condition { fid: key, op: Exists }))))
|
||||
}
|
||||
|
||||
/// to = value value "TO" WS+ value
|
||||
pub fn parse_to(input: Span) -> IResult<FilterCondition> {
|
||||
let (input, (key, from, _, _, to)) =
|
||||
tuple((parse_value, parse_value, tag("TO"), multispace1, cut(parse_value)))(input)?;
|
||||
|
||||
Ok((input, FilterCondition::Condition { fid: key, op: Between { from, to } }))
|
||||
}
|
198
filter-parser/src/error.rs
Normal file
198
filter-parser/src/error.rs
Normal file
@ -0,0 +1,198 @@
|
||||
use std::fmt::Display;
|
||||
|
||||
use nom::error::{self, ParseError};
|
||||
use nom::Parser;
|
||||
|
||||
use crate::{IResult, Span};
|
||||
|
||||
pub trait NomErrorExt<E> {
|
||||
fn is_failure(&self) -> bool;
|
||||
fn map_err<O: FnOnce(E) -> E>(self, op: O) -> nom::Err<E>;
|
||||
fn map_fail<O: FnOnce(E) -> E>(self, op: O) -> nom::Err<E>;
|
||||
}
|
||||
|
||||
impl<E> NomErrorExt<E> for nom::Err<E> {
|
||||
fn is_failure(&self) -> bool {
|
||||
matches!(self, Self::Failure(_))
|
||||
}
|
||||
|
||||
fn map_err<O: FnOnce(E) -> E>(self, op: O) -> nom::Err<E> {
|
||||
match self {
|
||||
e @ Self::Failure(_) => e,
|
||||
e => e.map(op),
|
||||
}
|
||||
}
|
||||
|
||||
fn map_fail<O: FnOnce(E) -> E>(self, op: O) -> nom::Err<E> {
|
||||
match self {
|
||||
e @ Self::Error(_) => e,
|
||||
e => e.map(op),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// cut a parser and map the error
|
||||
pub fn cut_with_err<'a, O>(
|
||||
mut parser: impl FnMut(Span<'a>) -> IResult<'a, O>,
|
||||
mut with: impl FnMut(Error<'a>) -> Error<'a>,
|
||||
) -> impl FnMut(Span<'a>) -> IResult<O> {
|
||||
move |input| match parser.parse(input) {
|
||||
Err(nom::Err::Error(e)) => Err(nom::Err::Failure(with(e))),
|
||||
rest => rest,
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct Error<'a> {
|
||||
context: Span<'a>,
|
||||
kind: ErrorKind<'a>,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum ExpectedValueKind {
|
||||
ReservedKeyword,
|
||||
Other,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum ErrorKind<'a> {
|
||||
ReservedGeo(&'a str),
|
||||
Geo,
|
||||
MisusedGeo,
|
||||
InvalidPrimary,
|
||||
ExpectedEof,
|
||||
ExpectedValue(ExpectedValueKind),
|
||||
MalformedValue,
|
||||
InOpeningBracket,
|
||||
InClosingBracket,
|
||||
NonFiniteFloat,
|
||||
InExpectedValue(ExpectedValueKind),
|
||||
ReservedKeyword(String),
|
||||
MissingClosingDelimiter(char),
|
||||
Char(char),
|
||||
InternalError(error::ErrorKind),
|
||||
DepthLimitReached,
|
||||
External(String),
|
||||
}
|
||||
|
||||
impl<'a> Error<'a> {
|
||||
pub fn kind(&self) -> &ErrorKind<'a> {
|
||||
&self.kind
|
||||
}
|
||||
|
||||
pub fn context(&self) -> &Span<'a> {
|
||||
&self.context
|
||||
}
|
||||
|
||||
pub fn new_from_kind(context: Span<'a>, kind: ErrorKind<'a>) -> Self {
|
||||
Self { context, kind }
|
||||
}
|
||||
|
||||
pub fn new_from_external(context: Span<'a>, error: impl std::error::Error) -> Self {
|
||||
Self::new_from_kind(context, ErrorKind::External(error.to_string()))
|
||||
}
|
||||
|
||||
pub fn char(self) -> char {
|
||||
match self.kind {
|
||||
ErrorKind::Char(c) => c,
|
||||
error => panic!("Internal filter parser error: {:?}", error),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> ParseError<Span<'a>> for Error<'a> {
|
||||
fn from_error_kind(input: Span<'a>, kind: error::ErrorKind) -> Self {
|
||||
let kind = match kind {
|
||||
error::ErrorKind::Eof => ErrorKind::ExpectedEof,
|
||||
kind => ErrorKind::InternalError(kind),
|
||||
};
|
||||
Self { context: input, kind }
|
||||
}
|
||||
|
||||
fn append(_input: Span<'a>, _kind: error::ErrorKind, other: Self) -> Self {
|
||||
other
|
||||
}
|
||||
|
||||
fn from_char(input: Span<'a>, c: char) -> Self {
|
||||
Self { context: input, kind: ErrorKind::Char(c) }
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> Display for Error<'a> {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
let input = self.context.fragment();
|
||||
// When printing our error message we want to escape all `\n` to be sure we keep our format with the
|
||||
// first line being the diagnostic and the second line being the incriminated filter.
|
||||
let escaped_input = input.escape_debug();
|
||||
|
||||
match &self.kind {
|
||||
ErrorKind::ExpectedValue(_) if input.trim().is_empty() => {
|
||||
writeln!(f, "Was expecting a value but instead got nothing.")?
|
||||
}
|
||||
ErrorKind::ExpectedValue(ExpectedValueKind::ReservedKeyword) => {
|
||||
writeln!(f, "Was expecting a value but instead got `{escaped_input}`, which is a reserved keyword. To use `{escaped_input}` as a field name or a value, surround it by quotes.")?
|
||||
}
|
||||
ErrorKind::ExpectedValue(ExpectedValueKind::Other) => {
|
||||
writeln!(f, "Was expecting a value but instead got `{}`.", escaped_input)?
|
||||
}
|
||||
ErrorKind::MalformedValue => {
|
||||
writeln!(f, "Malformed value: `{}`.", escaped_input)?
|
||||
}
|
||||
ErrorKind::MissingClosingDelimiter(c) => {
|
||||
writeln!(f, "Expression `{}` is missing the following closing delimiter: `{}`.", escaped_input, c)?
|
||||
}
|
||||
ErrorKind::InvalidPrimary if input.trim().is_empty() => {
|
||||
writeln!(f, "Was expecting an operation `=`, `!=`, `>=`, `>`, `<=`, `<`, `IN`, `NOT IN`, `TO`, `EXISTS`, `NOT EXISTS`, or `_geoRadius` but instead got nothing.")?
|
||||
}
|
||||
ErrorKind::InvalidPrimary => {
|
||||
writeln!(f, "Was expecting an operation `=`, `!=`, `>=`, `>`, `<=`, `<`, `IN`, `NOT IN`, `TO`, `EXISTS`, `NOT EXISTS`, or `_geoRadius` at `{}`.", escaped_input)?
|
||||
}
|
||||
ErrorKind::ExpectedEof => {
|
||||
writeln!(f, "Found unexpected characters at the end of the filter: `{}`. You probably forgot an `OR` or an `AND` rule.", escaped_input)?
|
||||
}
|
||||
ErrorKind::Geo => {
|
||||
writeln!(f, "The `_geoRadius` filter expects three arguments: `_geoRadius(latitude, longitude, radius)`.")?
|
||||
}
|
||||
ErrorKind::ReservedGeo(name) => {
|
||||
writeln!(f, "`{}` is a reserved keyword and thus can't be used as a filter expression. Use the `_geoRadius(latitude, longitude, distance) built-in rule to filter on `_geo` coordinates.", name.escape_debug())?
|
||||
}
|
||||
ErrorKind::MisusedGeo => {
|
||||
writeln!(f, "The `_geoRadius` filter is an operation and can't be used as a value.")?
|
||||
}
|
||||
ErrorKind::ReservedKeyword(word) => {
|
||||
writeln!(f, "`{word}` is a reserved keyword and thus cannot be used as a field name unless it is put inside quotes. Use \"{word}\" or \'{word}\' instead.")?
|
||||
}
|
||||
ErrorKind::InOpeningBracket => {
|
||||
writeln!(f, "Expected `[` after `IN` keyword.")?
|
||||
}
|
||||
ErrorKind::InClosingBracket => {
|
||||
writeln!(f, "Expected matching `]` after the list of field names given to `IN[`")?
|
||||
}
|
||||
ErrorKind::NonFiniteFloat => {
|
||||
writeln!(f, "Non finite floats are not supported")?
|
||||
}
|
||||
ErrorKind::InExpectedValue(ExpectedValueKind::ReservedKeyword) => {
|
||||
writeln!(f, "Expected only comma-separated field names inside `IN[..]` but instead found `{escaped_input}`, which is a keyword. To use `{escaped_input}` as a field name or a value, surround it by quotes.")?
|
||||
}
|
||||
ErrorKind::InExpectedValue(ExpectedValueKind::Other) => {
|
||||
writeln!(f, "Expected only comma-separated field names inside `IN[..]` but instead found `{escaped_input}`.")?
|
||||
}
|
||||
ErrorKind::Char(c) => {
|
||||
panic!("Tried to display a char error with `{}`", c)
|
||||
}
|
||||
ErrorKind::DepthLimitReached => writeln!(
|
||||
f,
|
||||
"The filter exceeded the maximum depth limit. Try rewriting the filter so that it contains fewer nested conditions."
|
||||
)?,
|
||||
ErrorKind::InternalError(kind) => writeln!(
|
||||
f,
|
||||
"Encountered an internal `{:?}` error while parsing your filter. Please fill an issue", kind
|
||||
)?,
|
||||
ErrorKind::External(ref error) => writeln!(f, "{}", error)?,
|
||||
}
|
||||
let base_column = self.context.get_utf8_column();
|
||||
let size = self.context.fragment().chars().count();
|
||||
|
||||
write!(f, "{}:{} {}", base_column, base_column + size, self.context.extra)
|
||||
}
|
||||
}
|
739
filter-parser/src/lib.rs
Normal file
739
filter-parser/src/lib.rs
Normal file
@ -0,0 +1,739 @@
|
||||
//! BNF grammar:
|
||||
//!
|
||||
//! ```text
|
||||
//! filter = expression EOF
|
||||
//! expression = or
|
||||
//! or = and ("OR" WS+ and)*
|
||||
//! and = not ("AND" WS+ not)*
|
||||
//! not = ("NOT" WS+ not) | primary
|
||||
//! primary = (WS* "(" WS* expression WS* ")" WS*) | geoRadius | in | condition | exists | not_exists | to
|
||||
//! in = value "IN" WS* "[" value_list "]"
|
||||
//! condition = value ("=" | "!=" | ">" | ">=" | "<" | "<=") value
|
||||
//! exists = value "EXISTS"
|
||||
//! not_exists = value "NOT" WS+ "EXISTS"
|
||||
//! to = value value "TO" WS+ value
|
||||
//! value = WS* ( word | singleQuoted | doubleQuoted) WS+
|
||||
//! value_list = (value ("," value)* ","?)?
|
||||
//! singleQuoted = "'" .* all but quotes "'"
|
||||
//! doubleQuoted = "\"" .* all but double quotes "\""
|
||||
//! word = (alphanumeric | _ | - | .)+
|
||||
//! geoRadius = "_geoRadius(" WS* float WS* "," WS* float WS* "," float WS* ")"
|
||||
//! ```
|
||||
//!
|
||||
//! Other BNF grammar used to handle some specific errors:
|
||||
//! ```text
|
||||
//! geoPoint = WS* "_geoPoint(" (float ",")* ")"
|
||||
//! ```
|
||||
//!
|
||||
//! Specific errors:
|
||||
//! ================
|
||||
//! - If a user try to use a geoPoint, as a primary OR as a value we must throw an error.
|
||||
//! ```text
|
||||
//! field = _geoPoint(12, 13, 14)
|
||||
//! field < 12 AND _geoPoint(1, 2)
|
||||
//! ```
|
||||
//!
|
||||
//! - If a user try to use a geoRadius as a value we must throw an error.
|
||||
//! ```text
|
||||
//! field = _geoRadius(12, 13, 14)
|
||||
//! ```
|
||||
//!
|
||||
|
||||
mod condition;
|
||||
mod error;
|
||||
mod value;
|
||||
|
||||
use std::fmt::Debug;
|
||||
|
||||
pub use condition::{parse_condition, parse_to, Condition};
|
||||
use condition::{parse_exists, parse_not_exists};
|
||||
use error::{cut_with_err, ExpectedValueKind, NomErrorExt};
|
||||
pub use error::{Error, ErrorKind};
|
||||
use nom::branch::alt;
|
||||
use nom::bytes::complete::tag;
|
||||
use nom::character::complete::{char, multispace0};
|
||||
use nom::combinator::{cut, eof, map, opt};
|
||||
use nom::multi::{many0, separated_list1};
|
||||
use nom::number::complete::recognize_float;
|
||||
use nom::sequence::{delimited, preceded, terminated, tuple};
|
||||
use nom::Finish;
|
||||
use nom_locate::LocatedSpan;
|
||||
pub(crate) use value::parse_value;
|
||||
use value::word_exact;
|
||||
|
||||
pub type Span<'a> = LocatedSpan<&'a str, &'a str>;
|
||||
|
||||
type IResult<'a, Ret> = nom::IResult<Span<'a>, Ret, Error<'a>>;
|
||||
|
||||
const MAX_FILTER_DEPTH: usize = 200;
|
||||
|
||||
#[derive(Debug, Clone, Eq)]
|
||||
pub struct Token<'a> {
|
||||
/// The token in the original input, it should be used when possible.
|
||||
span: Span<'a>,
|
||||
/// If you need to modify the original input you can use the `value` field
|
||||
/// to store your modified input.
|
||||
value: Option<String>,
|
||||
}
|
||||
|
||||
impl<'a> PartialEq for Token<'a> {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
self.span.fragment() == other.span.fragment()
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> Token<'a> {
|
||||
pub fn new(span: Span<'a>, value: Option<String>) -> Self {
|
||||
Self { span, value }
|
||||
}
|
||||
|
||||
pub fn lexeme(&self) -> &str {
|
||||
&self.span
|
||||
}
|
||||
|
||||
pub fn value(&self) -> &str {
|
||||
self.value.as_ref().map_or(&self.span, |value| value)
|
||||
}
|
||||
|
||||
pub fn as_external_error(&self, error: impl std::error::Error) -> Error<'a> {
|
||||
Error::new_from_external(self.span, error)
|
||||
}
|
||||
|
||||
pub fn parse_finite_float(&self) -> Result<f64, Error> {
|
||||
let value: f64 = self.span.parse().map_err(|e| self.as_external_error(e))?;
|
||||
if value.is_finite() {
|
||||
Ok(value)
|
||||
} else {
|
||||
Err(Error::new_from_kind(self.span, ErrorKind::NonFiniteFloat))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> From<Span<'a>> for Token<'a> {
|
||||
fn from(span: Span<'a>) -> Self {
|
||||
Self { span, value: None }
|
||||
}
|
||||
}
|
||||
|
||||
/// Allow [Token] to be constructed from &[str]
|
||||
impl<'a> From<&'a str> for Token<'a> {
|
||||
fn from(s: &'a str) -> Self {
|
||||
Token::from(Span::new_extra(s, s))
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub enum FilterCondition<'a> {
|
||||
Not(Box<Self>),
|
||||
Condition { fid: Token<'a>, op: Condition<'a> },
|
||||
In { fid: Token<'a>, els: Vec<Token<'a>> },
|
||||
Or(Vec<Self>),
|
||||
And(Vec<Self>),
|
||||
GeoLowerThan { point: [Token<'a>; 2], radius: Token<'a> },
|
||||
}
|
||||
|
||||
impl<'a> FilterCondition<'a> {
|
||||
/// Returns the first token found at the specified depth, `None` if no token at this depth.
|
||||
pub fn token_at_depth(&self, depth: usize) -> Option<&Token> {
|
||||
match self {
|
||||
FilterCondition::Condition { fid, .. } if depth == 0 => Some(fid),
|
||||
FilterCondition::Or(subfilters) => {
|
||||
let depth = depth.saturating_sub(1);
|
||||
for f in subfilters.iter() {
|
||||
if let Some(t) = f.token_at_depth(depth) {
|
||||
return Some(t);
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
FilterCondition::And(subfilters) => {
|
||||
let depth = depth.saturating_sub(1);
|
||||
for f in subfilters.iter() {
|
||||
if let Some(t) = f.token_at_depth(depth) {
|
||||
return Some(t);
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
FilterCondition::GeoLowerThan { point: [point, _], .. } if depth == 0 => Some(point),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn parse(input: &'a str) -> Result<Option<Self>, Error> {
|
||||
if input.trim().is_empty() {
|
||||
return Ok(None);
|
||||
}
|
||||
let span = Span::new_extra(input, input);
|
||||
parse_filter(span).finish().map(|(_rem, output)| Some(output))
|
||||
}
|
||||
}
|
||||
|
||||
/// remove OPTIONAL whitespaces before AND after the provided parser.
|
||||
fn ws<'a, O>(
|
||||
inner: impl FnMut(Span<'a>) -> IResult<'a, O>,
|
||||
) -> impl FnMut(Span<'a>) -> IResult<'a, O> {
|
||||
delimited(multispace0, inner, multispace0)
|
||||
}
|
||||
|
||||
/// value_list = (value ("," value)* ","?)?
|
||||
fn parse_value_list(input: Span) -> IResult<Vec<Token>> {
|
||||
let (input, first_value) = opt(parse_value)(input)?;
|
||||
if let Some(first_value) = first_value {
|
||||
let value_list_el_parser = preceded(ws(tag(",")), parse_value);
|
||||
|
||||
let (input, mut values) = many0(value_list_el_parser)(input)?;
|
||||
let (input, _) = opt(ws(tag(",")))(input)?;
|
||||
values.insert(0, first_value);
|
||||
|
||||
Ok((input, values))
|
||||
} else {
|
||||
Ok((input, vec![]))
|
||||
}
|
||||
}
|
||||
|
||||
/// "IN" WS* "[" value_list "]"
|
||||
fn parse_in_body(input: Span) -> IResult<Vec<Token>> {
|
||||
let (input, _) = ws(word_exact("IN"))(input)?;
|
||||
|
||||
// everything after `IN` can be a failure
|
||||
let (input, _) =
|
||||
cut_with_err(tag("["), |_| Error::new_from_kind(input, ErrorKind::InOpeningBracket))(
|
||||
input,
|
||||
)?;
|
||||
|
||||
let (input, content) = cut(parse_value_list)(input)?;
|
||||
|
||||
// everything after `IN` can be a failure
|
||||
let (input, _) = cut_with_err(ws(tag("]")), |_| {
|
||||
if eof::<_, ()>(input).is_ok() {
|
||||
Error::new_from_kind(input, ErrorKind::InClosingBracket)
|
||||
} else {
|
||||
let expected_value_kind = match parse_value(input) {
|
||||
Err(nom::Err::Error(e)) => match e.kind() {
|
||||
ErrorKind::ReservedKeyword(_) => ExpectedValueKind::ReservedKeyword,
|
||||
_ => ExpectedValueKind::Other,
|
||||
},
|
||||
_ => ExpectedValueKind::Other,
|
||||
};
|
||||
Error::new_from_kind(input, ErrorKind::InExpectedValue(expected_value_kind))
|
||||
}
|
||||
})(input)?;
|
||||
|
||||
Ok((input, content))
|
||||
}
|
||||
|
||||
/// in = value "IN" "[" value_list "]"
|
||||
fn parse_in(input: Span) -> IResult<FilterCondition> {
|
||||
let (input, value) = parse_value(input)?;
|
||||
let (input, content) = parse_in_body(input)?;
|
||||
|
||||
let filter = FilterCondition::In { fid: value, els: content };
|
||||
Ok((input, filter))
|
||||
}
|
||||
|
||||
/// in = value "NOT" WS* "IN" "[" value_list "]"
|
||||
fn parse_not_in(input: Span) -> IResult<FilterCondition> {
|
||||
let (input, value) = parse_value(input)?;
|
||||
let (input, _) = word_exact("NOT")(input)?;
|
||||
let (input, content) = parse_in_body(input)?;
|
||||
|
||||
let filter = FilterCondition::Not(Box::new(FilterCondition::In { fid: value, els: content }));
|
||||
Ok((input, filter))
|
||||
}
|
||||
|
||||
/// or = and ("OR" and)
|
||||
fn parse_or(input: Span, depth: usize) -> IResult<FilterCondition> {
|
||||
if depth > MAX_FILTER_DEPTH {
|
||||
return Err(nom::Err::Error(Error::new_from_kind(input, ErrorKind::DepthLimitReached)));
|
||||
}
|
||||
let (input, first_filter) = parse_and(input, depth + 1)?;
|
||||
// if we found a `OR` then we MUST find something next
|
||||
let (input, mut ors) =
|
||||
many0(preceded(ws(word_exact("OR")), cut(|input| parse_and(input, depth + 1))))(input)?;
|
||||
|
||||
let filter = if ors.is_empty() {
|
||||
first_filter
|
||||
} else {
|
||||
ors.insert(0, first_filter);
|
||||
FilterCondition::Or(ors)
|
||||
};
|
||||
|
||||
Ok((input, filter))
|
||||
}
|
||||
|
||||
/// and = not ("AND" not)*
|
||||
fn parse_and(input: Span, depth: usize) -> IResult<FilterCondition> {
|
||||
if depth > MAX_FILTER_DEPTH {
|
||||
return Err(nom::Err::Error(Error::new_from_kind(input, ErrorKind::DepthLimitReached)));
|
||||
}
|
||||
let (input, first_filter) = parse_not(input, depth + 1)?;
|
||||
// if we found a `AND` then we MUST find something next
|
||||
let (input, mut ands) =
|
||||
many0(preceded(ws(word_exact("AND")), cut(|input| parse_not(input, depth + 1))))(input)?;
|
||||
|
||||
let filter = if ands.is_empty() {
|
||||
first_filter
|
||||
} else {
|
||||
ands.insert(0, first_filter);
|
||||
FilterCondition::And(ands)
|
||||
};
|
||||
|
||||
Ok((input, filter))
|
||||
}
|
||||
|
||||
/// not = ("NOT" WS+ not) | primary
|
||||
/// We can have multiple consecutive not, eg: `NOT NOT channel = mv`.
|
||||
/// If we parse a `NOT` we MUST parse something behind.
|
||||
fn parse_not(input: Span, depth: usize) -> IResult<FilterCondition> {
|
||||
if depth > MAX_FILTER_DEPTH {
|
||||
return Err(nom::Err::Error(Error::new_from_kind(input, ErrorKind::DepthLimitReached)));
|
||||
}
|
||||
alt((
|
||||
map(
|
||||
preceded(ws(word_exact("NOT")), cut(|input| parse_not(input, depth + 1))),
|
||||
|e| match e {
|
||||
FilterCondition::Not(e) => *e,
|
||||
_ => FilterCondition::Not(Box::new(e)),
|
||||
},
|
||||
),
|
||||
|input| parse_primary(input, depth + 1),
|
||||
))(input)
|
||||
}
|
||||
|
||||
/// geoRadius = WS* "_geoRadius(float WS* "," WS* float WS* "," WS* float)
|
||||
/// If we parse `_geoRadius` we MUST parse the rest of the expression.
|
||||
fn parse_geo_radius(input: Span) -> IResult<FilterCondition> {
|
||||
// we want to allow space BEFORE the _geoRadius but not after
|
||||
let parsed = preceded(
|
||||
tuple((multispace0, word_exact("_geoRadius"))),
|
||||
// if we were able to parse `_geoRadius` and can't parse the rest of the input we return a failure
|
||||
cut(delimited(char('('), separated_list1(tag(","), ws(recognize_float)), char(')'))),
|
||||
)(input)
|
||||
.map_err(|e| e.map(|_| Error::new_from_kind(input, ErrorKind::Geo)));
|
||||
|
||||
let (input, args) = parsed?;
|
||||
|
||||
if args.len() != 3 {
|
||||
return Err(nom::Err::Failure(Error::new_from_kind(input, ErrorKind::Geo)));
|
||||
}
|
||||
|
||||
let res = FilterCondition::GeoLowerThan {
|
||||
point: [args[0].into(), args[1].into()],
|
||||
radius: args[2].into(),
|
||||
};
|
||||
Ok((input, res))
|
||||
}
|
||||
|
||||
/// geoPoint = WS* "_geoPoint(float WS* "," WS* float WS* "," WS* float)
|
||||
fn parse_geo_point(input: Span) -> IResult<FilterCondition> {
|
||||
// we want to forbid space BEFORE the _geoPoint but not after
|
||||
tuple((
|
||||
multispace0,
|
||||
tag("_geoPoint"),
|
||||
// if we were able to parse `_geoPoint` we are going to return a Failure whatever happens next.
|
||||
cut(delimited(char('('), separated_list1(tag(","), ws(recognize_float)), char(')'))),
|
||||
))(input)
|
||||
.map_err(|e| e.map(|_| Error::new_from_kind(input, ErrorKind::ReservedGeo("_geoPoint"))))?;
|
||||
// if we succeeded we still return a `Failure` because geoPoints are not allowed
|
||||
Err(nom::Err::Failure(Error::new_from_kind(input, ErrorKind::ReservedGeo("_geoPoint"))))
|
||||
}
|
||||
|
||||
fn parse_error_reserved_keyword(input: Span) -> IResult<FilterCondition> {
|
||||
match parse_condition(input) {
|
||||
Ok(result) => Ok(result),
|
||||
Err(nom::Err::Error(inner) | nom::Err::Failure(inner)) => match inner.kind() {
|
||||
ErrorKind::ExpectedValue(ExpectedValueKind::ReservedKeyword) => {
|
||||
Err(nom::Err::Failure(inner))
|
||||
}
|
||||
_ => Err(nom::Err::Error(inner)),
|
||||
},
|
||||
Err(e) => Err(e),
|
||||
}
|
||||
}
|
||||
|
||||
/// primary = (WS* "(" WS* expression WS* ")" WS*) | geoRadius | condition | exists | not_exists | to
|
||||
fn parse_primary(input: Span, depth: usize) -> IResult<FilterCondition> {
|
||||
if depth > MAX_FILTER_DEPTH {
|
||||
return Err(nom::Err::Error(Error::new_from_kind(input, ErrorKind::DepthLimitReached)));
|
||||
}
|
||||
alt((
|
||||
// if we find a first parenthesis, then we must parse an expression and find the closing parenthesis
|
||||
delimited(
|
||||
ws(char('(')),
|
||||
cut(|input| parse_expression(input, depth + 1)),
|
||||
cut_with_err(ws(char(')')), |c| {
|
||||
Error::new_from_kind(input, ErrorKind::MissingClosingDelimiter(c.char()))
|
||||
}),
|
||||
),
|
||||
parse_geo_radius,
|
||||
parse_in,
|
||||
parse_not_in,
|
||||
parse_condition,
|
||||
parse_exists,
|
||||
parse_not_exists,
|
||||
parse_to,
|
||||
// the next lines are only for error handling and are written at the end to have the less possible performance impact
|
||||
parse_geo_point,
|
||||
parse_error_reserved_keyword,
|
||||
))(input)
|
||||
// if the inner parsers did not match enough information to return an accurate error
|
||||
.map_err(|e| e.map_err(|_| Error::new_from_kind(input, ErrorKind::InvalidPrimary)))
|
||||
}
|
||||
|
||||
/// expression = or
|
||||
pub fn parse_expression(input: Span, depth: usize) -> IResult<FilterCondition> {
|
||||
parse_or(input, depth)
|
||||
}
|
||||
|
||||
/// filter = expression EOF
|
||||
pub fn parse_filter(input: Span) -> IResult<FilterCondition> {
|
||||
terminated(|input| parse_expression(input, 0), eof)(input)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub mod tests {
|
||||
use super::*;
|
||||
|
||||
/// Create a raw [Token]. You must specify the string that appear BEFORE your element followed by your element
|
||||
pub fn rtok<'a>(before: &'a str, value: &'a str) -> Token<'a> {
|
||||
// if the string is empty we still need to return 1 for the line number
|
||||
let lines = before.is_empty().then(|| 1).unwrap_or_else(|| before.lines().count());
|
||||
let offset = before.chars().count();
|
||||
// the extra field is not checked in the tests so we can set it to nothing
|
||||
unsafe { Span::new_from_raw_offset(offset, lines as u32, value, "") }.into()
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse() {
|
||||
use FilterCondition as Fc;
|
||||
|
||||
fn p(s: &str) -> impl std::fmt::Display + '_ {
|
||||
Fc::parse(s).unwrap().unwrap()
|
||||
}
|
||||
|
||||
// Test equal
|
||||
insta::assert_display_snapshot!(p("channel = Ponce"), @"{channel} = {Ponce}");
|
||||
insta::assert_display_snapshot!(p("subscribers = 12"), @"{subscribers} = {12}");
|
||||
insta::assert_display_snapshot!(p("channel = 'Mister Mv'"), @"{channel} = {Mister Mv}");
|
||||
insta::assert_display_snapshot!(p("channel = \"Mister Mv\""), @"{channel} = {Mister Mv}");
|
||||
insta::assert_display_snapshot!(p("'dog race' = Borzoi"), @"{dog race} = {Borzoi}");
|
||||
insta::assert_display_snapshot!(p("\"dog race\" = Chusky"), @"{dog race} = {Chusky}");
|
||||
insta::assert_display_snapshot!(p("\"dog race\" = \"Bernese Mountain\""), @"{dog race} = {Bernese Mountain}");
|
||||
insta::assert_display_snapshot!(p("'dog race' = 'Bernese Mountain'"), @"{dog race} = {Bernese Mountain}");
|
||||
insta::assert_display_snapshot!(p("\"dog race\" = 'Bernese Mountain'"), @"{dog race} = {Bernese Mountain}");
|
||||
|
||||
// Test IN
|
||||
insta::assert_display_snapshot!(p("colour IN[]"), @"{colour} IN[]");
|
||||
insta::assert_display_snapshot!(p("colour IN[green]"), @"{colour} IN[{green}, ]");
|
||||
insta::assert_display_snapshot!(p("colour IN[green,]"), @"{colour} IN[{green}, ]");
|
||||
insta::assert_display_snapshot!(p("colour NOT IN[green,blue]"), @"NOT ({colour} IN[{green}, {blue}, ])");
|
||||
insta::assert_display_snapshot!(p(" colour IN [ green , blue , ]"), @"{colour} IN[{green}, {blue}, ]");
|
||||
|
||||
// Test IN + OR/AND/()
|
||||
insta::assert_display_snapshot!(p(" colour IN [green, blue] AND color = green "), @"AND[{colour} IN[{green}, {blue}, ], {color} = {green}, ]");
|
||||
insta::assert_display_snapshot!(p("NOT (colour IN [green, blue]) AND color = green "), @"AND[NOT ({colour} IN[{green}, {blue}, ]), {color} = {green}, ]");
|
||||
insta::assert_display_snapshot!(p("x = 1 OR NOT (colour IN [green, blue] OR color = green) "), @"OR[{x} = {1}, NOT (OR[{colour} IN[{green}, {blue}, ], {color} = {green}, ]), ]");
|
||||
|
||||
// Test whitespace start/end
|
||||
insta::assert_display_snapshot!(p(" colour = green "), @"{colour} = {green}");
|
||||
insta::assert_display_snapshot!(p(" (colour = green OR colour = red) "), @"OR[{colour} = {green}, {colour} = {red}, ]");
|
||||
insta::assert_display_snapshot!(p(" colour IN [green, blue] AND color = green "), @"AND[{colour} IN[{green}, {blue}, ], {color} = {green}, ]");
|
||||
insta::assert_display_snapshot!(p(" colour NOT IN [green, blue] "), @"NOT ({colour} IN[{green}, {blue}, ])");
|
||||
insta::assert_display_snapshot!(p(" colour IN [green, blue] "), @"{colour} IN[{green}, {blue}, ]");
|
||||
|
||||
// Test conditions
|
||||
insta::assert_display_snapshot!(p("channel != ponce"), @"{channel} != {ponce}");
|
||||
insta::assert_display_snapshot!(p("NOT channel = ponce"), @"NOT ({channel} = {ponce})");
|
||||
insta::assert_display_snapshot!(p("subscribers < 1000"), @"{subscribers} < {1000}");
|
||||
insta::assert_display_snapshot!(p("subscribers > 1000"), @"{subscribers} > {1000}");
|
||||
insta::assert_display_snapshot!(p("subscribers <= 1000"), @"{subscribers} <= {1000}");
|
||||
insta::assert_display_snapshot!(p("subscribers >= 1000"), @"{subscribers} >= {1000}");
|
||||
insta::assert_display_snapshot!(p("subscribers <= 1000"), @"{subscribers} <= {1000}");
|
||||
insta::assert_display_snapshot!(p("subscribers 100 TO 1000"), @"{subscribers} {100} TO {1000}");
|
||||
|
||||
// Test NOT + EXISTS
|
||||
insta::assert_display_snapshot!(p("subscribers EXISTS"), @"{subscribers} EXISTS");
|
||||
insta::assert_display_snapshot!(p("NOT subscribers < 1000"), @"NOT ({subscribers} < {1000})");
|
||||
insta::assert_display_snapshot!(p("NOT subscribers EXISTS"), @"NOT ({subscribers} EXISTS)");
|
||||
insta::assert_display_snapshot!(p("subscribers NOT EXISTS"), @"NOT ({subscribers} EXISTS)");
|
||||
insta::assert_display_snapshot!(p("NOT subscribers NOT EXISTS"), @"{subscribers} EXISTS");
|
||||
insta::assert_display_snapshot!(p("subscribers NOT EXISTS"), @"NOT ({subscribers} EXISTS)");
|
||||
insta::assert_display_snapshot!(p("NOT subscribers 100 TO 1000"), @"NOT ({subscribers} {100} TO {1000})");
|
||||
|
||||
// Test nested NOT
|
||||
insta::assert_display_snapshot!(p("NOT NOT NOT NOT x = 5"), @"{x} = {5}");
|
||||
insta::assert_display_snapshot!(p("NOT NOT (NOT NOT x = 5)"), @"{x} = {5}");
|
||||
|
||||
// Test geo radius
|
||||
insta::assert_display_snapshot!(p("_geoRadius(12, 13, 14)"), @"_geoRadius({12}, {13}, {14})");
|
||||
insta::assert_display_snapshot!(p("NOT _geoRadius(12, 13, 14)"), @"NOT (_geoRadius({12}, {13}, {14}))");
|
||||
|
||||
// Test OR + AND
|
||||
insta::assert_display_snapshot!(p("channel = ponce AND 'dog race' != 'bernese mountain'"), @"AND[{channel} = {ponce}, {dog race} != {bernese mountain}, ]");
|
||||
insta::assert_display_snapshot!(p("channel = ponce OR 'dog race' != 'bernese mountain'"), @"OR[{channel} = {ponce}, {dog race} != {bernese mountain}, ]");
|
||||
insta::assert_display_snapshot!(p("channel = ponce AND 'dog race' != 'bernese mountain' OR subscribers > 1000"), @"OR[AND[{channel} = {ponce}, {dog race} != {bernese mountain}, ], {subscribers} > {1000}, ]");
|
||||
insta::assert_display_snapshot!(
|
||||
p("channel = ponce AND 'dog race' != 'bernese mountain' OR subscribers > 1000 OR colour = red OR colour = blue AND size = 7"),
|
||||
@"OR[AND[{channel} = {ponce}, {dog race} != {bernese mountain}, ], {subscribers} > {1000}, {colour} = {red}, AND[{colour} = {blue}, {size} = {7}, ], ]"
|
||||
);
|
||||
|
||||
// Test parentheses
|
||||
insta::assert_display_snapshot!(p("channel = ponce AND ( 'dog race' != 'bernese mountain' OR subscribers > 1000 )"), @"AND[{channel} = {ponce}, OR[{dog race} != {bernese mountain}, {subscribers} > {1000}, ], ]");
|
||||
insta::assert_display_snapshot!(p("(channel = ponce AND 'dog race' != 'bernese mountain' OR subscribers > 1000) AND _geoRadius(12, 13, 14)"), @"AND[OR[AND[{channel} = {ponce}, {dog race} != {bernese mountain}, ], {subscribers} > {1000}, ], _geoRadius({12}, {13}, {14}), ]");
|
||||
|
||||
// Test recursion
|
||||
// This is the most that is allowed
|
||||
insta::assert_display_snapshot!(
|
||||
p("(((((((((((((((((((((((((((((((((((((((((((((((((x = 1)))))))))))))))))))))))))))))))))))))))))))))))))"),
|
||||
@"{x} = {1}"
|
||||
);
|
||||
insta::assert_display_snapshot!(
|
||||
p("NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT x = 1"),
|
||||
@"NOT ({x} = {1})"
|
||||
);
|
||||
|
||||
// Confusing keywords
|
||||
insta::assert_display_snapshot!(p(r#"NOT "OR" EXISTS AND "EXISTS" NOT EXISTS"#), @"AND[NOT ({OR} EXISTS), NOT ({EXISTS} EXISTS), ]");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn error() {
|
||||
use FilterCondition as Fc;
|
||||
|
||||
fn p(s: &str) -> impl std::fmt::Display + '_ {
|
||||
Fc::parse(s).unwrap_err().to_string()
|
||||
}
|
||||
|
||||
insta::assert_display_snapshot!(p("channel = Ponce = 12"), @r###"
|
||||
Found unexpected characters at the end of the filter: `= 12`. You probably forgot an `OR` or an `AND` rule.
|
||||
17:21 channel = Ponce = 12
|
||||
"###);
|
||||
|
||||
insta::assert_display_snapshot!(p("channel = "), @r###"
|
||||
Was expecting a value but instead got nothing.
|
||||
14:14 channel =
|
||||
"###);
|
||||
|
||||
insta::assert_display_snapshot!(p("channel = 🐻"), @r###"
|
||||
Was expecting a value but instead got `🐻`.
|
||||
11:12 channel = 🐻
|
||||
"###);
|
||||
|
||||
insta::assert_display_snapshot!(p("channel = 🐻 AND followers < 100"), @r###"
|
||||
Was expecting a value but instead got `🐻`.
|
||||
11:12 channel = 🐻 AND followers < 100
|
||||
"###);
|
||||
|
||||
insta::assert_display_snapshot!(p("'OR'"), @r###"
|
||||
Was expecting an operation `=`, `!=`, `>=`, `>`, `<=`, `<`, `IN`, `NOT IN`, `TO`, `EXISTS`, `NOT EXISTS`, or `_geoRadius` at `\'OR\'`.
|
||||
1:5 'OR'
|
||||
"###);
|
||||
|
||||
insta::assert_display_snapshot!(p("OR"), @r###"
|
||||
Was expecting a value but instead got `OR`, which is a reserved keyword. To use `OR` as a field name or a value, surround it by quotes.
|
||||
1:3 OR
|
||||
"###);
|
||||
|
||||
insta::assert_display_snapshot!(p("channel Ponce"), @r###"
|
||||
Was expecting an operation `=`, `!=`, `>=`, `>`, `<=`, `<`, `IN`, `NOT IN`, `TO`, `EXISTS`, `NOT EXISTS`, or `_geoRadius` at `channel Ponce`.
|
||||
1:14 channel Ponce
|
||||
"###);
|
||||
|
||||
insta::assert_display_snapshot!(p("channel = Ponce OR"), @r###"
|
||||
Was expecting an operation `=`, `!=`, `>=`, `>`, `<=`, `<`, `IN`, `NOT IN`, `TO`, `EXISTS`, `NOT EXISTS`, or `_geoRadius` but instead got nothing.
|
||||
19:19 channel = Ponce OR
|
||||
"###);
|
||||
|
||||
insta::assert_display_snapshot!(p("_geoRadius"), @r###"
|
||||
The `_geoRadius` filter expects three arguments: `_geoRadius(latitude, longitude, radius)`.
|
||||
1:11 _geoRadius
|
||||
"###);
|
||||
|
||||
insta::assert_display_snapshot!(p("_geoRadius = 12"), @r###"
|
||||
The `_geoRadius` filter expects three arguments: `_geoRadius(latitude, longitude, radius)`.
|
||||
1:16 _geoRadius = 12
|
||||
"###);
|
||||
|
||||
insta::assert_display_snapshot!(p("_geoPoint(12, 13, 14)"), @r###"
|
||||
`_geoPoint` is a reserved keyword and thus can't be used as a filter expression. Use the `_geoRadius(latitude, longitude, distance) built-in rule to filter on `_geo` coordinates.
|
||||
1:22 _geoPoint(12, 13, 14)
|
||||
"###);
|
||||
|
||||
insta::assert_display_snapshot!(p("position <= _geoPoint(12, 13, 14)"), @r###"
|
||||
`_geoPoint` is a reserved keyword and thus can't be used as a filter expression. Use the `_geoRadius(latitude, longitude, distance) built-in rule to filter on `_geo` coordinates.
|
||||
13:34 position <= _geoPoint(12, 13, 14)
|
||||
"###);
|
||||
|
||||
insta::assert_display_snapshot!(p("position <= _geoRadius(12, 13, 14)"), @r###"
|
||||
The `_geoRadius` filter is an operation and can't be used as a value.
|
||||
13:35 position <= _geoRadius(12, 13, 14)
|
||||
"###);
|
||||
|
||||
insta::assert_display_snapshot!(p("channel = 'ponce"), @r###"
|
||||
Expression `\'ponce` is missing the following closing delimiter: `'`.
|
||||
11:17 channel = 'ponce
|
||||
"###);
|
||||
|
||||
insta::assert_display_snapshot!(p("channel = \"ponce"), @r###"
|
||||
Expression `\"ponce` is missing the following closing delimiter: `"`.
|
||||
11:17 channel = "ponce
|
||||
"###);
|
||||
|
||||
insta::assert_display_snapshot!(p("channel = mv OR (followers >= 1000"), @r###"
|
||||
Expression `(followers >= 1000` is missing the following closing delimiter: `)`.
|
||||
17:35 channel = mv OR (followers >= 1000
|
||||
"###);
|
||||
|
||||
insta::assert_display_snapshot!(p("channel = mv OR followers >= 1000)"), @r###"
|
||||
Found unexpected characters at the end of the filter: `)`. You probably forgot an `OR` or an `AND` rule.
|
||||
34:35 channel = mv OR followers >= 1000)
|
||||
"###);
|
||||
|
||||
insta::assert_display_snapshot!(p("colour NOT EXIST"), @r###"
|
||||
Was expecting an operation `=`, `!=`, `>=`, `>`, `<=`, `<`, `IN`, `NOT IN`, `TO`, `EXISTS`, `NOT EXISTS`, or `_geoRadius` at `colour NOT EXIST`.
|
||||
1:17 colour NOT EXIST
|
||||
"###);
|
||||
|
||||
insta::assert_display_snapshot!(p("subscribers 100 TO1000"), @r###"
|
||||
Was expecting an operation `=`, `!=`, `>=`, `>`, `<=`, `<`, `IN`, `NOT IN`, `TO`, `EXISTS`, `NOT EXISTS`, or `_geoRadius` at `subscribers 100 TO1000`.
|
||||
1:23 subscribers 100 TO1000
|
||||
"###);
|
||||
|
||||
insta::assert_display_snapshot!(p("channel = ponce ORdog != 'bernese mountain'"), @r###"
|
||||
Found unexpected characters at the end of the filter: `ORdog != \'bernese mountain\'`. You probably forgot an `OR` or an `AND` rule.
|
||||
17:44 channel = ponce ORdog != 'bernese mountain'
|
||||
"###);
|
||||
|
||||
insta::assert_display_snapshot!(p("colour IN blue, green]"), @r###"
|
||||
Expected `[` after `IN` keyword.
|
||||
11:23 colour IN blue, green]
|
||||
"###);
|
||||
|
||||
insta::assert_display_snapshot!(p("colour IN [blue, green, 'blue' > 2]"), @r###"
|
||||
Expected only comma-separated field names inside `IN[..]` but instead found `> 2]`.
|
||||
32:36 colour IN [blue, green, 'blue' > 2]
|
||||
"###);
|
||||
|
||||
insta::assert_display_snapshot!(p("colour IN [blue, green, AND]"), @r###"
|
||||
Expected only comma-separated field names inside `IN[..]` but instead found `AND]`.
|
||||
25:29 colour IN [blue, green, AND]
|
||||
"###);
|
||||
|
||||
insta::assert_display_snapshot!(p("colour IN [blue, green"), @r###"
|
||||
Expected matching `]` after the list of field names given to `IN[`
|
||||
23:23 colour IN [blue, green
|
||||
"###);
|
||||
|
||||
insta::assert_display_snapshot!(p("colour IN ['blue, green"), @r###"
|
||||
Expression `\'blue, green` is missing the following closing delimiter: `'`.
|
||||
12:24 colour IN ['blue, green
|
||||
"###);
|
||||
|
||||
insta::assert_display_snapshot!(p("x = EXISTS"), @r###"
|
||||
Was expecting a value but instead got `EXISTS`, which is a reserved keyword. To use `EXISTS` as a field name or a value, surround it by quotes.
|
||||
5:11 x = EXISTS
|
||||
"###);
|
||||
|
||||
insta::assert_display_snapshot!(p("AND = 8"), @r###"
|
||||
Was expecting a value but instead got `AND`, which is a reserved keyword. To use `AND` as a field name or a value, surround it by quotes.
|
||||
1:4 AND = 8
|
||||
"###);
|
||||
|
||||
insta::assert_display_snapshot!(p("((((((((((((((((((((((((((((((((((((((((((((((((((x = 1))))))))))))))))))))))))))))))))))))))))))))))))))"), @r###"
|
||||
The filter exceeded the maximum depth limit. Try rewriting the filter so that it contains fewer nested conditions.
|
||||
51:106 ((((((((((((((((((((((((((((((((((((((((((((((((((x = 1))))))))))))))))))))))))))))))))))))))))))))))))))
|
||||
"###);
|
||||
|
||||
insta::assert_display_snapshot!(
|
||||
p("NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT x = 1"),
|
||||
@r###"
|
||||
The filter exceeded the maximum depth limit. Try rewriting the filter so that it contains fewer nested conditions.
|
||||
797:802 NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT NOT x = 1
|
||||
"###
|
||||
);
|
||||
|
||||
insta::assert_display_snapshot!(p(r#"NOT OR EXISTS AND EXISTS NOT EXISTS"#), @r###"
|
||||
Was expecting a value but instead got `OR`, which is a reserved keyword. To use `OR` as a field name or a value, surround it by quotes.
|
||||
5:7 NOT OR EXISTS AND EXISTS NOT EXISTS
|
||||
"###);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn depth() {
|
||||
let filter = FilterCondition::parse("account_ids=1 OR account_ids=2 OR account_ids=3 OR account_ids=4 OR account_ids=5 OR account_ids=6").unwrap().unwrap();
|
||||
assert!(filter.token_at_depth(1).is_some());
|
||||
assert!(filter.token_at_depth(2).is_none());
|
||||
|
||||
let filter = FilterCondition::parse("(account_ids=1 OR (account_ids=2 AND account_ids=3) OR (account_ids=4 AND account_ids=5) OR account_ids=6)").unwrap().unwrap();
|
||||
assert!(filter.token_at_depth(2).is_some());
|
||||
assert!(filter.token_at_depth(3).is_none());
|
||||
|
||||
let filter = FilterCondition::parse("account_ids=1 OR account_ids=2 AND account_ids=3 OR account_ids=4 AND account_ids=5 OR account_ids=6").unwrap().unwrap();
|
||||
assert!(filter.token_at_depth(2).is_some());
|
||||
assert!(filter.token_at_depth(3).is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn token_from_str() {
|
||||
let s = "test string that should not be parsed";
|
||||
let token: Token = s.into();
|
||||
assert_eq!(token.value(), s);
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> std::fmt::Display for FilterCondition<'a> {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
FilterCondition::Not(filter) => {
|
||||
write!(f, "NOT ({filter})")
|
||||
}
|
||||
FilterCondition::Condition { fid, op } => {
|
||||
write!(f, "{fid} {op}")
|
||||
}
|
||||
FilterCondition::In { fid, els } => {
|
||||
write!(f, "{fid} IN[")?;
|
||||
for el in els {
|
||||
write!(f, "{el}, ")?;
|
||||
}
|
||||
write!(f, "]")
|
||||
}
|
||||
FilterCondition::Or(els) => {
|
||||
write!(f, "OR[")?;
|
||||
for el in els {
|
||||
write!(f, "{el}, ")?;
|
||||
}
|
||||
write!(f, "]")
|
||||
}
|
||||
FilterCondition::And(els) => {
|
||||
write!(f, "AND[")?;
|
||||
for el in els {
|
||||
write!(f, "{el}, ")?;
|
||||
}
|
||||
write!(f, "]")
|
||||
}
|
||||
FilterCondition::GeoLowerThan { point, radius } => {
|
||||
write!(f, "_geoRadius({}, {}, {})", point[0], point[1], radius)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
impl<'a> std::fmt::Display for Condition<'a> {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
Condition::GreaterThan(token) => write!(f, "> {token}"),
|
||||
Condition::GreaterThanOrEqual(token) => write!(f, ">= {token}"),
|
||||
Condition::Equal(token) => write!(f, "= {token}"),
|
||||
Condition::NotEqual(token) => write!(f, "!= {token}"),
|
||||
Condition::Exists => write!(f, "EXISTS"),
|
||||
Condition::LowerThan(token) => write!(f, "< {token}"),
|
||||
Condition::LowerThanOrEqual(token) => write!(f, "<= {token}"),
|
||||
Condition::Between { from, to } => write!(f, "{from} TO {to}"),
|
||||
}
|
||||
}
|
||||
}
|
||||
impl<'a> std::fmt::Display for Token<'a> {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{{{}}}", self.value())
|
||||
}
|
||||
}
|
16
filter-parser/src/main.rs
Normal file
16
filter-parser/src/main.rs
Normal file
@ -0,0 +1,16 @@
|
||||
fn main() {
|
||||
let input = std::env::args().nth(1).expect("You must provide a filter to test");
|
||||
|
||||
println!("Trying to execute the following filter:\n{}\n", input);
|
||||
|
||||
match filter_parser::FilterCondition::parse(&input) {
|
||||
Ok(filter) => {
|
||||
println!("✅ Valid filter");
|
||||
println!("{:#?}", filter);
|
||||
}
|
||||
Err(e) => {
|
||||
println!("❎ Invalid filter");
|
||||
println!("{}", e);
|
||||
}
|
||||
}
|
||||
}
|
341
filter-parser/src/value.rs
Normal file
341
filter-parser/src/value.rs
Normal file
@ -0,0 +1,341 @@
|
||||
use nom::branch::alt;
|
||||
use nom::bytes::complete::{take_till, take_while, take_while1};
|
||||
use nom::character::complete::{char, multispace0};
|
||||
use nom::combinator::cut;
|
||||
use nom::sequence::{delimited, terminated};
|
||||
use nom::{InputIter, InputLength, InputTake, Slice};
|
||||
|
||||
use crate::error::{ExpectedValueKind, NomErrorExt};
|
||||
use crate::{parse_geo_point, parse_geo_radius, Error, ErrorKind, IResult, Span, Token};
|
||||
|
||||
/// This function goes through all characters in the [Span] if it finds any escaped character (`\`).
|
||||
/// It generates a new string with all `\` removed from the [Span].
|
||||
fn unescape(buf: Span, char_to_escape: char) -> String {
|
||||
let to_escape = format!("\\{}", char_to_escape);
|
||||
buf.replace(&to_escape, &char_to_escape.to_string())
|
||||
}
|
||||
|
||||
/// Parse a value in quote. If it encounter an escaped quote it'll unescape it.
|
||||
fn quoted_by(quote: char, input: Span) -> IResult<Token> {
|
||||
// empty fields / values are valid in json
|
||||
if input.is_empty() {
|
||||
return Ok((input.slice(input.input_len()..), input.into()));
|
||||
}
|
||||
|
||||
let mut escaped = false;
|
||||
let mut i = input.iter_indices();
|
||||
|
||||
while let Some((idx, c)) = i.next() {
|
||||
if c == quote {
|
||||
let (rem, output) = input.take_split(idx);
|
||||
return Ok((rem, Token::new(output, escaped.then(|| unescape(output, quote)))));
|
||||
} else if c == '\\' {
|
||||
if let Some((_, c)) = i.next() {
|
||||
escaped |= c == quote;
|
||||
} else {
|
||||
return Err(nom::Err::Error(Error::new_from_kind(
|
||||
input,
|
||||
ErrorKind::MalformedValue,
|
||||
)));
|
||||
}
|
||||
}
|
||||
// if it was preceeded by a `\` or if it was anything else we can continue to advance
|
||||
}
|
||||
|
||||
Ok((
|
||||
input.slice(input.input_len()..),
|
||||
Token::new(input, escaped.then(|| unescape(input, quote))),
|
||||
))
|
||||
}
|
||||
|
||||
// word = (alphanumeric | _ | - | .)+ except for reserved keywords
|
||||
pub fn word_not_keyword<'a>(input: Span<'a>) -> IResult<Token<'a>> {
|
||||
let (input, word): (_, Token<'a>) =
|
||||
take_while1(is_value_component)(input).map(|(s, t)| (s, t.into()))?;
|
||||
if is_keyword(word.value()) {
|
||||
return Err(nom::Err::Error(Error::new_from_kind(
|
||||
input,
|
||||
ErrorKind::ReservedKeyword(word.value().to_owned()),
|
||||
)));
|
||||
}
|
||||
Ok((input, word))
|
||||
}
|
||||
|
||||
// word = {tag}
|
||||
pub fn word_exact<'a, 'b: 'a>(tag: &'b str) -> impl Fn(Span<'a>) -> IResult<'a, Token<'a>> {
|
||||
move |input| {
|
||||
let (input, word): (_, Token<'a>) =
|
||||
take_while1(is_value_component)(input).map(|(s, t)| (s, t.into()))?;
|
||||
if word.value() == tag {
|
||||
Ok((input, word))
|
||||
} else {
|
||||
Err(nom::Err::Error(Error::new_from_kind(
|
||||
input,
|
||||
ErrorKind::InternalError(nom::error::ErrorKind::Tag),
|
||||
)))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// value = WS* ( word | singleQuoted | doubleQuoted) WS+
|
||||
pub fn parse_value(input: Span) -> IResult<Token> {
|
||||
// to get better diagnostic message we are going to strip the left whitespaces from the input right now
|
||||
let (input, _) = take_while(char::is_whitespace)(input)?;
|
||||
|
||||
// then, we want to check if the user is misusing a geo expression
|
||||
// This expression can’t finish without error.
|
||||
// We want to return an error in case of failure.
|
||||
if let Err(err) = parse_geo_point(input) {
|
||||
if err.is_failure() {
|
||||
return Err(err);
|
||||
}
|
||||
}
|
||||
match parse_geo_radius(input) {
|
||||
Ok(_) => return Err(nom::Err::Failure(Error::new_from_kind(input, ErrorKind::MisusedGeo))),
|
||||
// if we encountered a failure it means the user badly wrote a _geoRadius filter.
|
||||
// But instead of showing him how to fix his syntax we are going to tell him he should not use this filter as a value.
|
||||
Err(e) if e.is_failure() => {
|
||||
return Err(nom::Err::Failure(Error::new_from_kind(input, ErrorKind::MisusedGeo)))
|
||||
}
|
||||
_ => (),
|
||||
}
|
||||
|
||||
// this parser is only used when an error is encountered and it parse the
|
||||
// largest string possible that do not contain any “language” syntax.
|
||||
// If we try to parse `name = 🦀 AND language = rust` we want to return an
|
||||
// error saying we could not parse `🦀`. Not that no value were found or that
|
||||
// we could note parse `🦀 AND language = rust`.
|
||||
// we want to remove the space before entering the alt because if we don't,
|
||||
// when we create the errors from the output of the alt we have spaces everywhere
|
||||
let error_word = take_till::<_, _, Error>(is_syntax_component);
|
||||
|
||||
let (input, value) = terminated(
|
||||
alt((
|
||||
delimited(char('\''), cut(|input| quoted_by('\'', input)), cut(char('\''))),
|
||||
delimited(char('"'), cut(|input| quoted_by('"', input)), cut(char('"'))),
|
||||
word_not_keyword,
|
||||
)),
|
||||
multispace0,
|
||||
)(input)
|
||||
// if we found nothing in the alt it means the user specified something that was not recognized as a value
|
||||
.map_err(|e: nom::Err<Error>| {
|
||||
e.map_err(|error| {
|
||||
let expected_value_kind = if matches!(error.kind(), ErrorKind::ReservedKeyword(_)) {
|
||||
ExpectedValueKind::ReservedKeyword
|
||||
} else {
|
||||
ExpectedValueKind::Other
|
||||
};
|
||||
Error::new_from_kind(
|
||||
error_word(input).unwrap().1,
|
||||
ErrorKind::ExpectedValue(expected_value_kind),
|
||||
)
|
||||
})
|
||||
})
|
||||
.map_err(|e| {
|
||||
e.map_fail(|failure| {
|
||||
// if we found encountered a char failure it means the user had an unmatched quote
|
||||
if matches!(failure.kind(), ErrorKind::Char(_)) {
|
||||
Error::new_from_kind(input, ErrorKind::MissingClosingDelimiter(failure.char()))
|
||||
} else {
|
||||
// else we let the failure untouched
|
||||
failure
|
||||
}
|
||||
})
|
||||
})?;
|
||||
|
||||
Ok((input, value))
|
||||
}
|
||||
|
||||
fn is_value_component(c: char) -> bool {
|
||||
c.is_alphanumeric() || ['_', '-', '.'].contains(&c)
|
||||
}
|
||||
|
||||
fn is_syntax_component(c: char) -> bool {
|
||||
c.is_whitespace() || ['(', ')', '=', '<', '>', '!'].contains(&c)
|
||||
}
|
||||
|
||||
fn is_keyword(s: &str) -> bool {
|
||||
matches!(s, "AND" | "OR" | "IN" | "NOT" | "TO" | "EXISTS" | "_geoRadius")
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub mod test {
|
||||
use nom::Finish;
|
||||
|
||||
use super::*;
|
||||
use crate::tests::rtok;
|
||||
|
||||
#[test]
|
||||
fn test_span() {
|
||||
let test_case = [
|
||||
("channel", rtok("", "channel")),
|
||||
(".private", rtok("", ".private")),
|
||||
("I-love-kebab", rtok("", "I-love-kebab")),
|
||||
("but_snakes_is_also_good", rtok("", "but_snakes_is_also_good")),
|
||||
("parens(", rtok("", "parens")),
|
||||
("parens)", rtok("", "parens")),
|
||||
("not!", rtok("", "not")),
|
||||
(" channel", rtok(" ", "channel")),
|
||||
("channel ", rtok("", "channel")),
|
||||
(" channel ", rtok(" ", "channel")),
|
||||
("'channel'", rtok("'", "channel")),
|
||||
("\"channel\"", rtok("\"", "channel")),
|
||||
("'cha)nnel'", rtok("'", "cha)nnel")),
|
||||
("'cha\"nnel'", rtok("'", "cha\"nnel")),
|
||||
("\"cha'nnel\"", rtok("\"", "cha'nnel")),
|
||||
("\" some spaces \"", rtok("\"", " some spaces ")),
|
||||
("\"cha'nnel\"", rtok("'", "cha'nnel")),
|
||||
("\"cha'nnel\"", rtok("'", "cha'nnel")),
|
||||
("I'm tamo", rtok("'m tamo", "I")),
|
||||
("\"I'm \\\"super\\\" tamo\"", rtok("\"", "I'm \\\"super\\\" tamo")),
|
||||
];
|
||||
|
||||
for (input, expected) in test_case {
|
||||
let input = Span::new_extra(input, input);
|
||||
let result = parse_value(input);
|
||||
|
||||
assert!(
|
||||
result.is_ok(),
|
||||
"Filter `{:?}` was supposed to be parsed but failed with the following error: `{}`",
|
||||
expected,
|
||||
result.unwrap_err()
|
||||
);
|
||||
let token = result.unwrap().1;
|
||||
assert_eq!(token, expected, "Filter `{}` failed.", input);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_escape_inside_double_quote() {
|
||||
// (input, remaining, expected output token, output value)
|
||||
let test_case = [
|
||||
("aaaa", "", rtok("", "aaaa"), "aaaa"),
|
||||
(r#"aa"aa"#, r#""aa"#, rtok("", "aa"), "aa"),
|
||||
(r#"aa\"aa"#, r#""#, rtok("", r#"aa\"aa"#), r#"aa"aa"#),
|
||||
(r#"aa\\\aa"#, r#""#, rtok("", r#"aa\\\aa"#), r#"aa\\\aa"#),
|
||||
(r#"aa\\"\aa"#, r#""\aa"#, rtok("", r#"aa\\"#), r#"aa\\"#),
|
||||
(r#"aa\\\"\aa"#, r#""#, rtok("", r#"aa\\\"\aa"#), r#"aa\\"\aa"#),
|
||||
(r#"\"\""#, r#""#, rtok("", r#"\"\""#), r#""""#),
|
||||
];
|
||||
|
||||
for (input, remaining, expected_tok, expected_val) in test_case {
|
||||
let span = Span::new_extra(input, "");
|
||||
let result = quoted_by('"', span);
|
||||
assert!(result.is_ok());
|
||||
|
||||
let (rem, output) = result.unwrap();
|
||||
assert_eq!(rem.to_string(), remaining);
|
||||
assert_eq!(output, expected_tok);
|
||||
assert_eq!(output.value(), expected_val.to_string());
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_unescape() {
|
||||
// double quote
|
||||
assert_eq!(
|
||||
unescape(Span::new_extra(r#"Hello \"World\""#, ""), '"'),
|
||||
r#"Hello "World""#.to_string()
|
||||
);
|
||||
assert_eq!(
|
||||
unescape(Span::new_extra(r#"Hello \\\"World\\\""#, ""), '"'),
|
||||
r#"Hello \\"World\\""#.to_string()
|
||||
);
|
||||
// simple quote
|
||||
assert_eq!(
|
||||
unescape(Span::new_extra(r#"Hello \'World\'"#, ""), '\''),
|
||||
r#"Hello 'World'"#.to_string()
|
||||
);
|
||||
assert_eq!(
|
||||
unescape(Span::new_extra(r#"Hello \\\'World\\\'"#, ""), '\''),
|
||||
r#"Hello \\'World\\'"#.to_string()
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_value() {
|
||||
let test_case = [
|
||||
// (input, expected value, if a string was generated to hold the new value)
|
||||
("channel", "channel", false),
|
||||
// All the base test, no escaped string should be generated
|
||||
(".private", ".private", false),
|
||||
("I-love-kebab", "I-love-kebab", false),
|
||||
("but_snakes_is_also_good", "but_snakes_is_also_good", false),
|
||||
("parens(", "parens", false),
|
||||
("parens)", "parens", false),
|
||||
("not!", "not", false),
|
||||
(" channel", "channel", false),
|
||||
("channel ", "channel", false),
|
||||
(" channel ", "channel", false),
|
||||
("'channel'", "channel", false),
|
||||
("\"channel\"", "channel", false),
|
||||
("'cha)nnel'", "cha)nnel", false),
|
||||
("'cha\"nnel'", "cha\"nnel", false),
|
||||
("\"cha'nnel\"", "cha'nnel", false),
|
||||
("\" some spaces \"", " some spaces ", false),
|
||||
("\"cha'nnel\"", "cha'nnel", false),
|
||||
("\"cha'nnel\"", "cha'nnel", false),
|
||||
("I'm tamo", "I", false),
|
||||
// escaped thing but not quote
|
||||
(r#""\\""#, r#"\\"#, false),
|
||||
(r#""\\\\\\""#, r#"\\\\\\"#, false),
|
||||
(r#""aa\\aa""#, r#"aa\\aa"#, false),
|
||||
// with double quote
|
||||
(r#""Hello \"world\"""#, r#"Hello "world""#, true),
|
||||
(r#""Hello \\\"world\\\"""#, r#"Hello \\"world\\""#, true),
|
||||
(r#""I'm \"super\" tamo""#, r#"I'm "super" tamo"#, true),
|
||||
(r#""\"\"""#, r#""""#, true),
|
||||
// with simple quote
|
||||
(r#"'Hello \'world\''"#, r#"Hello 'world'"#, true),
|
||||
(r#"'Hello \\\'world\\\''"#, r#"Hello \\'world\\'"#, true),
|
||||
(r#"'I\'m "super" tamo'"#, r#"I'm "super" tamo"#, true),
|
||||
(r#"'\'\''"#, r#"''"#, true),
|
||||
];
|
||||
|
||||
for (input, expected, escaped) in test_case {
|
||||
let input = Span::new_extra(input, input);
|
||||
let result = parse_value(input);
|
||||
|
||||
assert!(
|
||||
result.is_ok(),
|
||||
"Filter `{:?}` was supposed to be parsed but failed with the following error: `{}`",
|
||||
expected,
|
||||
result.unwrap_err()
|
||||
);
|
||||
let token = result.unwrap().1;
|
||||
assert_eq!(
|
||||
token.value.is_some(),
|
||||
escaped,
|
||||
"Filter `{}` was not supposed to be escaped",
|
||||
input
|
||||
);
|
||||
assert_eq!(token.value(), expected, "Filter `{}` failed.", input);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn diagnostic() {
|
||||
let test_case = [
|
||||
("🦀", "🦀"),
|
||||
(" 🦀", "🦀"),
|
||||
("🦀 AND crab = truc", "🦀"),
|
||||
("🦀_in_name", "🦀_in_name"),
|
||||
(" (name = ...", ""),
|
||||
];
|
||||
|
||||
for (input, expected) in test_case {
|
||||
let input = Span::new_extra(input, input);
|
||||
let result = parse_value(input);
|
||||
|
||||
assert!(
|
||||
result.is_err(),
|
||||
"Filter `{}` wasn’t supposed to be parsed but it did with the following result: `{:?}`",
|
||||
expected,
|
||||
result.unwrap()
|
||||
);
|
||||
// get the inner string referenced in the error
|
||||
let value = *result.finish().unwrap_err().context().fragment();
|
||||
assert_eq!(value, expected, "Filter `{}` was supposed to fail with the following value: `{}`, but it failed with: `{}`.", input, expected, value);
|
||||
}
|
||||
}
|
||||
}
|
17
flatten-serde-json/Cargo.toml
Normal file
17
flatten-serde-json/Cargo.toml
Normal file
@ -0,0 +1,17 @@
|
||||
[package]
|
||||
name = "flatten-serde-json"
|
||||
version = "0.39.0"
|
||||
edition = "2021"
|
||||
description = "Flatten serde-json objects like elastic search"
|
||||
readme = "README.md"
|
||||
publish = false
|
||||
|
||||
[dependencies]
|
||||
serde_json = "1.0"
|
||||
|
||||
[dev-dependencies]
|
||||
criterion = { version = "0.4.0", features = ["html_reports"] }
|
||||
|
||||
[[bench]]
|
||||
name = "benchmarks"
|
||||
harness = false
|
153
flatten-serde-json/README.md
Normal file
153
flatten-serde-json/README.md
Normal file
@ -0,0 +1,153 @@
|
||||
# Flatten serde Json
|
||||
|
||||
This crate flatten [`serde_json`](https://docs.rs/serde_json/latest/serde_json/) `Object` in a format
|
||||
similar to [elastic search](https://www.elastic.co/guide/en/elasticsearch/reference/current/nested.html).
|
||||
|
||||
## Examples
|
||||
|
||||
### There is nothing to do
|
||||
|
||||
```json
|
||||
{
|
||||
"id": "287947",
|
||||
"title": "Shazam!",
|
||||
"release_date": 1553299200,
|
||||
"genres": [
|
||||
"Action",
|
||||
"Comedy",
|
||||
"Fantasy"
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
Flattens to:
|
||||
```json
|
||||
{
|
||||
"id": "287947",
|
||||
"title": "Shazam!",
|
||||
"release_date": 1553299200,
|
||||
"genres": [
|
||||
"Action",
|
||||
"Comedy",
|
||||
"Fantasy"
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
------------
|
||||
|
||||
### Objects
|
||||
|
||||
```json
|
||||
{
|
||||
"a": {
|
||||
"b": "c",
|
||||
"d": "e",
|
||||
"f": "g"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Flattens to:
|
||||
```json
|
||||
{
|
||||
"a.b": "c",
|
||||
"a.d": "e",
|
||||
"a.f": "g"
|
||||
}
|
||||
```
|
||||
|
||||
------------
|
||||
|
||||
### Array of objects
|
||||
|
||||
```json
|
||||
{
|
||||
"a": [
|
||||
{ "b": "c" },
|
||||
{ "b": "d" },
|
||||
{ "b": "e" },
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
Flattens to:
|
||||
```json
|
||||
{
|
||||
"a.b": ["c", "d", "e"],
|
||||
}
|
||||
```
|
||||
|
||||
------------
|
||||
|
||||
### Array of objects with normal value in the array
|
||||
|
||||
```json
|
||||
{
|
||||
"a": [
|
||||
42,
|
||||
{ "b": "c" },
|
||||
{ "b": "d" },
|
||||
{ "b": "e" },
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
Flattens to:
|
||||
```json
|
||||
{
|
||||
"a": 42,
|
||||
"a.b": ["c", "d", "e"],
|
||||
}
|
||||
```
|
||||
|
||||
------------
|
||||
|
||||
### Array of objects of array of objects of ...
|
||||
|
||||
```json
|
||||
{
|
||||
"a": [
|
||||
"b",
|
||||
["c", "d"],
|
||||
{ "e": ["f", "g"] },
|
||||
[
|
||||
{ "h": "i" },
|
||||
{ "e": ["j", { "z": "y" }] },
|
||||
],
|
||||
["l"],
|
||||
"m",
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
Flattens to:
|
||||
```json
|
||||
{
|
||||
"a": ["b", "c", "d", "l", "m"],
|
||||
"a.e": ["f", "g", "j"],
|
||||
"a.h": "i",
|
||||
"a.e.z": "y",
|
||||
}
|
||||
```
|
||||
|
||||
------------
|
||||
|
||||
### Collision between a generated field name and an already existing field
|
||||
|
||||
```json
|
||||
{
|
||||
"a": {
|
||||
"b": "c",
|
||||
},
|
||||
"a.b": "d",
|
||||
}
|
||||
```
|
||||
|
||||
Flattens to:
|
||||
```json
|
||||
{
|
||||
"a.b": ["c", "d"],
|
||||
}
|
||||
```
|
||||
|
42
flatten-serde-json/benches/benchmarks.rs
Normal file
42
flatten-serde-json/benches/benchmarks.rs
Normal file
@ -0,0 +1,42 @@
|
||||
use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion};
|
||||
use flatten_serde_json::flatten;
|
||||
use serde_json::json;
|
||||
|
||||
pub fn flatten_simple(c: &mut Criterion) {
|
||||
let mut input = json!({
|
||||
"a": {
|
||||
"b": "c",
|
||||
"d": "e",
|
||||
"f": "g"
|
||||
}
|
||||
});
|
||||
let object = input.as_object_mut().unwrap();
|
||||
|
||||
c.bench_with_input(BenchmarkId::new("flatten", "simple"), &object, |b, input| {
|
||||
b.iter(|| flatten(input))
|
||||
});
|
||||
}
|
||||
|
||||
pub fn flatten_complex(c: &mut Criterion) {
|
||||
let mut input = json!({
|
||||
"a": [
|
||||
"b",
|
||||
["c", "d"],
|
||||
{ "e": ["f", "g"] },
|
||||
[
|
||||
{ "h": "i" },
|
||||
{ "e": ["j", { "z": "y" }] },
|
||||
],
|
||||
["l"],
|
||||
"m",
|
||||
]
|
||||
});
|
||||
let object = input.as_object_mut().unwrap();
|
||||
|
||||
c.bench_with_input(BenchmarkId::new("flatten", "complex"), &object, |b, input| {
|
||||
b.iter(|| flatten(input))
|
||||
});
|
||||
}
|
||||
|
||||
criterion_group!(benches, flatten_simple, flatten_complex);
|
||||
criterion_main!(benches);
|
27
flatten-serde-json/fuzz/Cargo.toml
Normal file
27
flatten-serde-json/fuzz/Cargo.toml
Normal file
@ -0,0 +1,27 @@
|
||||
[package]
|
||||
name = "flatten-serde-json-fuzz"
|
||||
version = "0.0.0"
|
||||
authors = ["Automatically generated"]
|
||||
publish = false
|
||||
edition = "2018"
|
||||
|
||||
[package.metadata]
|
||||
cargo-fuzz = true
|
||||
|
||||
[dependencies]
|
||||
libfuzzer-sys = "0.4"
|
||||
arbitrary-json = "0.1.1"
|
||||
json-depth-checker = { path = "../../json-depth-checker" }
|
||||
|
||||
[dependencies.flatten-serde-json]
|
||||
path = ".."
|
||||
|
||||
# Prevent this from interfering with workspaces
|
||||
[workspace]
|
||||
members = ["."]
|
||||
|
||||
[[bin]]
|
||||
name = "flatten"
|
||||
path = "fuzz_targets/flatten.rs"
|
||||
test = false
|
||||
doc = false
|
12
flatten-serde-json/fuzz/fuzz_targets/flatten.rs
Normal file
12
flatten-serde-json/fuzz/fuzz_targets/flatten.rs
Normal file
@ -0,0 +1,12 @@
|
||||
#![no_main]
|
||||
use arbitrary_json::ArbitraryObject;
|
||||
use flatten_serde_json::flatten;
|
||||
use json_depth_checker::should_flatten_from_value;
|
||||
use libfuzzer_sys::fuzz_target;
|
||||
|
||||
fuzz_target!(|object: ArbitraryObject| {
|
||||
let object = flatten(&object);
|
||||
if !object.is_empty() {
|
||||
assert!(object.values().any(|value| !should_flatten_from_value(value)));
|
||||
}
|
||||
});
|
305
flatten-serde-json/src/lib.rs
Normal file
305
flatten-serde-json/src/lib.rs
Normal file
@ -0,0 +1,305 @@
|
||||
#![doc = include_str!("../README.md")]
|
||||
|
||||
use serde_json::{Map, Value};
|
||||
|
||||
pub fn flatten(json: &Map<String, Value>) -> Map<String, Value> {
|
||||
let mut obj = Map::new();
|
||||
let mut all_keys = vec![];
|
||||
insert_object(&mut obj, None, json, &mut all_keys);
|
||||
for key in all_keys {
|
||||
obj.entry(key).or_insert(Value::Array(vec![]));
|
||||
}
|
||||
obj
|
||||
}
|
||||
|
||||
fn insert_object(
|
||||
base_json: &mut Map<String, Value>,
|
||||
base_key: Option<&str>,
|
||||
object: &Map<String, Value>,
|
||||
all_keys: &mut Vec<String>,
|
||||
) {
|
||||
for (key, value) in object {
|
||||
let new_key = base_key.map_or_else(|| key.clone(), |base_key| format!("{base_key}.{key}"));
|
||||
all_keys.push(new_key.clone());
|
||||
if let Some(array) = value.as_array() {
|
||||
insert_array(base_json, &new_key, array, all_keys);
|
||||
} else if let Some(object) = value.as_object() {
|
||||
insert_object(base_json, Some(&new_key), object, all_keys);
|
||||
} else {
|
||||
insert_value(base_json, &new_key, value.clone());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn insert_array(
|
||||
base_json: &mut Map<String, Value>,
|
||||
base_key: &str,
|
||||
array: &Vec<Value>,
|
||||
all_keys: &mut Vec<String>,
|
||||
) {
|
||||
for value in array {
|
||||
if let Some(object) = value.as_object() {
|
||||
insert_object(base_json, Some(base_key), object, all_keys);
|
||||
} else if let Some(sub_array) = value.as_array() {
|
||||
insert_array(base_json, base_key, sub_array, all_keys);
|
||||
} else {
|
||||
insert_value(base_json, base_key, value.clone());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn insert_value(base_json: &mut Map<String, Value>, key: &str, to_insert: Value) {
|
||||
debug_assert!(!to_insert.is_object());
|
||||
debug_assert!(!to_insert.is_array());
|
||||
|
||||
// does the field already exists?
|
||||
if let Some(value) = base_json.get_mut(key) {
|
||||
// is it already an array
|
||||
if let Some(array) = value.as_array_mut() {
|
||||
array.push(to_insert);
|
||||
// or is there a collision
|
||||
} else {
|
||||
let value = std::mem::take(value);
|
||||
base_json[key] = Value::Array(vec![value, to_insert]);
|
||||
}
|
||||
// if it does not exist we can push the value untouched
|
||||
} else {
|
||||
base_json.insert(key.to_string(), to_insert);
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use serde_json::json;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn no_flattening() {
|
||||
let mut base: Value = json!({
|
||||
"id": "287947",
|
||||
"title": "Shazam!",
|
||||
"release_date": 1553299200,
|
||||
"genres": [
|
||||
"Action",
|
||||
"Comedy",
|
||||
"Fantasy"
|
||||
]
|
||||
});
|
||||
let json = std::mem::take(base.as_object_mut().unwrap());
|
||||
let flat = flatten(&json);
|
||||
|
||||
println!(
|
||||
"got:\n{}\nexpected:\n{}\n",
|
||||
serde_json::to_string_pretty(&flat).unwrap(),
|
||||
serde_json::to_string_pretty(&json).unwrap()
|
||||
);
|
||||
|
||||
assert_eq!(flat, json);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn flatten_object() {
|
||||
let mut base: Value = json!({
|
||||
"a": {
|
||||
"b": "c",
|
||||
"d": "e",
|
||||
"f": "g"
|
||||
}
|
||||
});
|
||||
let json = std::mem::take(base.as_object_mut().unwrap());
|
||||
let flat = flatten(&json);
|
||||
|
||||
assert_eq!(
|
||||
&flat,
|
||||
json!({
|
||||
"a": [],
|
||||
"a.b": "c",
|
||||
"a.d": "e",
|
||||
"a.f": "g"
|
||||
})
|
||||
.as_object()
|
||||
.unwrap()
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn flatten_array() {
|
||||
let mut base: Value = json!({
|
||||
"a": [
|
||||
1,
|
||||
"b",
|
||||
[],
|
||||
[{}],
|
||||
{ "b": "c" },
|
||||
{ "b": "d" },
|
||||
{ "b": "e" },
|
||||
]
|
||||
});
|
||||
let json = std::mem::take(base.as_object_mut().unwrap());
|
||||
let flat = flatten(&json);
|
||||
|
||||
assert_eq!(
|
||||
&flat,
|
||||
json!({
|
||||
"a": [1, "b"],
|
||||
"a.b": ["c", "d", "e"],
|
||||
})
|
||||
.as_object()
|
||||
.unwrap()
|
||||
);
|
||||
|
||||
// here we must keep 42 in "a"
|
||||
let mut base: Value = json!({
|
||||
"a": [
|
||||
42,
|
||||
{ "b": "c" },
|
||||
{ "b": "d" },
|
||||
{ "b": "e" },
|
||||
]
|
||||
});
|
||||
let json = std::mem::take(base.as_object_mut().unwrap());
|
||||
let flat = flatten(&json);
|
||||
|
||||
assert_eq!(
|
||||
&flat,
|
||||
json!({
|
||||
"a": 42,
|
||||
"a.b": ["c", "d", "e"],
|
||||
})
|
||||
.as_object()
|
||||
.unwrap()
|
||||
);
|
||||
|
||||
// here we must keep 42 in "a"
|
||||
let mut base: Value = json!({
|
||||
"a": [
|
||||
{ "b": "c" },
|
||||
{ "b": "d" },
|
||||
{ "b": "e" },
|
||||
null,
|
||||
]
|
||||
});
|
||||
let json = std::mem::take(base.as_object_mut().unwrap());
|
||||
let flat = flatten(&json);
|
||||
|
||||
assert_eq!(
|
||||
&flat,
|
||||
json!({
|
||||
"a": null,
|
||||
"a.b": ["c", "d", "e"],
|
||||
})
|
||||
.as_object()
|
||||
.unwrap()
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn collision_with_object() {
|
||||
let mut base: Value = json!({
|
||||
"a": {
|
||||
"b": "c",
|
||||
},
|
||||
"a.b": "d",
|
||||
});
|
||||
let json = std::mem::take(base.as_object_mut().unwrap());
|
||||
let flat = flatten(&json);
|
||||
|
||||
assert_eq!(
|
||||
&flat,
|
||||
json!({
|
||||
"a": [],
|
||||
"a.b": ["c", "d"],
|
||||
})
|
||||
.as_object()
|
||||
.unwrap()
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn collision_with_array() {
|
||||
let mut base: Value = json!({
|
||||
"a": [
|
||||
{ "b": "c" },
|
||||
{ "b": "d", "c": "e" },
|
||||
[35],
|
||||
],
|
||||
"a.b": "f",
|
||||
});
|
||||
let json = std::mem::take(base.as_object_mut().unwrap());
|
||||
let flat = flatten(&json);
|
||||
|
||||
assert_eq!(
|
||||
&flat,
|
||||
json!({
|
||||
"a.b": ["c", "d", "f"],
|
||||
"a.c": "e",
|
||||
"a": 35,
|
||||
})
|
||||
.as_object()
|
||||
.unwrap()
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn flatten_nested_arrays() {
|
||||
let mut base: Value = json!({
|
||||
"a": [
|
||||
["b", "c"],
|
||||
{ "d": "e" },
|
||||
["f", "g"],
|
||||
[
|
||||
{ "h": "i" },
|
||||
{ "d": "j" },
|
||||
],
|
||||
["k", "l"],
|
||||
]
|
||||
});
|
||||
let json = std::mem::take(base.as_object_mut().unwrap());
|
||||
let flat = flatten(&json);
|
||||
|
||||
assert_eq!(
|
||||
&flat,
|
||||
json!({
|
||||
"a": ["b", "c", "f", "g", "k", "l"],
|
||||
"a.d": ["e", "j"],
|
||||
"a.h": "i",
|
||||
})
|
||||
.as_object()
|
||||
.unwrap()
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn flatten_nested_arrays_and_objects() {
|
||||
let mut base: Value = json!({
|
||||
"a": [
|
||||
"b",
|
||||
["c", "d"],
|
||||
{ "e": ["f", "g"] },
|
||||
[
|
||||
{ "h": "i" },
|
||||
{ "e": ["j", { "z": "y" }] },
|
||||
],
|
||||
["l"],
|
||||
"m",
|
||||
]
|
||||
});
|
||||
let json = std::mem::take(base.as_object_mut().unwrap());
|
||||
let flat = flatten(&json);
|
||||
|
||||
println!("{}", serde_json::to_string_pretty(&flat).unwrap());
|
||||
|
||||
assert_eq!(
|
||||
&flat,
|
||||
json!({
|
||||
"a": ["b", "c", "d", "l", "m"],
|
||||
"a.e": ["f", "g", "j"],
|
||||
"a.h": "i",
|
||||
"a.e.z": "y",
|
||||
})
|
||||
.as_object()
|
||||
.unwrap()
|
||||
);
|
||||
}
|
||||
}
|
11
flatten-serde-json/src/main.rs
Normal file
11
flatten-serde-json/src/main.rs
Normal file
@ -0,0 +1,11 @@
|
||||
use std::io::stdin;
|
||||
|
||||
use flatten_serde_json::flatten;
|
||||
use serde_json::{Map, Value};
|
||||
|
||||
fn main() {
|
||||
let json: Map<String, Value> = serde_json::from_reader(stdin()).unwrap();
|
||||
|
||||
let result = flatten(&json);
|
||||
println!("{}", serde_json::to_string_pretty(&result).unwrap());
|
||||
}
|
16
json-depth-checker/Cargo.toml
Normal file
16
json-depth-checker/Cargo.toml
Normal file
@ -0,0 +1,16 @@
|
||||
[package]
|
||||
name = "json-depth-checker"
|
||||
version = "0.39.0"
|
||||
edition = "2021"
|
||||
description = "A library that indicates if a JSON must be flattened"
|
||||
publish = false
|
||||
|
||||
[dependencies]
|
||||
serde_json = "1.0"
|
||||
|
||||
[dev-dependencies]
|
||||
criterion = "0.4.0"
|
||||
|
||||
[[bench]]
|
||||
name = "depth"
|
||||
harness = false
|
59
json-depth-checker/benches/depth.rs
Normal file
59
json-depth-checker/benches/depth.rs
Normal file
@ -0,0 +1,59 @@
|
||||
use criterion::{criterion_group, criterion_main, Criterion};
|
||||
use json_depth_checker::should_flatten_from_unchecked_slice;
|
||||
use serde_json::json;
|
||||
|
||||
fn criterion_benchmark(c: &mut Criterion) {
|
||||
let null = serde_json::to_vec(&json!(null)).unwrap();
|
||||
let bool_true = serde_json::to_vec(&json!(true)).unwrap();
|
||||
let bool_false = serde_json::to_vec(&json!(false)).unwrap();
|
||||
let integer = serde_json::to_vec(&json!(42)).unwrap();
|
||||
let float = serde_json::to_vec(&json!(1456.258)).unwrap();
|
||||
let string = serde_json::to_vec(&json!("hello world")).unwrap();
|
||||
let object = serde_json::to_vec(&json!({ "hello": "world",})).unwrap();
|
||||
let complex_object = serde_json::to_vec(&json!({
|
||||
"doggos": [
|
||||
{ "bernard": true },
|
||||
{ "michel": 42 },
|
||||
false,
|
||||
],
|
||||
"bouvier": true,
|
||||
"caniche": null,
|
||||
}))
|
||||
.unwrap();
|
||||
let simple_array = serde_json::to_vec(&json!([
|
||||
1,
|
||||
2,
|
||||
3,
|
||||
"viva",
|
||||
"l\"algeria",
|
||||
true,
|
||||
"[array]",
|
||||
"escaped string \""
|
||||
]))
|
||||
.unwrap();
|
||||
let array_of_array = serde_json::to_vec(&json!([1, [2, [3]]])).unwrap();
|
||||
let array_of_object = serde_json::to_vec(&json!([1, [2, [3]], {}])).unwrap();
|
||||
|
||||
c.bench_function("null", |b| b.iter(|| should_flatten_from_unchecked_slice(&null)));
|
||||
c.bench_function("true", |b| b.iter(|| should_flatten_from_unchecked_slice(&bool_true)));
|
||||
c.bench_function("false", |b| b.iter(|| should_flatten_from_unchecked_slice(&bool_false)));
|
||||
c.bench_function("integer", |b| b.iter(|| should_flatten_from_unchecked_slice(&integer)));
|
||||
c.bench_function("float", |b| b.iter(|| should_flatten_from_unchecked_slice(&float)));
|
||||
c.bench_function("string", |b| b.iter(|| should_flatten_from_unchecked_slice(&string)));
|
||||
c.bench_function("object", |b| b.iter(|| should_flatten_from_unchecked_slice(&object)));
|
||||
c.bench_function("complex object", |b| {
|
||||
b.iter(|| should_flatten_from_unchecked_slice(&complex_object))
|
||||
});
|
||||
c.bench_function("simple array", |b| {
|
||||
b.iter(|| should_flatten_from_unchecked_slice(&simple_array))
|
||||
});
|
||||
c.bench_function("array of array", |b| {
|
||||
b.iter(|| should_flatten_from_unchecked_slice(&array_of_array))
|
||||
});
|
||||
c.bench_function("array of object", |b| {
|
||||
b.iter(|| should_flatten_from_unchecked_slice(&array_of_object))
|
||||
});
|
||||
}
|
||||
|
||||
criterion_group!(benches, criterion_benchmark);
|
||||
criterion_main!(benches);
|
27
json-depth-checker/fuzz/Cargo.toml
Normal file
27
json-depth-checker/fuzz/Cargo.toml
Normal file
@ -0,0 +1,27 @@
|
||||
[package]
|
||||
name = "json-depth-checker"
|
||||
version = "0.0.0"
|
||||
authors = ["Automatically generated"]
|
||||
publish = false
|
||||
edition = "2018"
|
||||
|
||||
[package.metadata]
|
||||
cargo-fuzz = true
|
||||
|
||||
[dependencies]
|
||||
libfuzzer-sys = "0.4"
|
||||
arbitrary-json = "0.1.1"
|
||||
serde_json = "1.0.79"
|
||||
|
||||
[dependencies.json-depth-checker]
|
||||
path = ".."
|
||||
|
||||
# Prevent this from interfering with workspaces
|
||||
[workspace]
|
||||
members = ["."]
|
||||
|
||||
[[bin]]
|
||||
name = "depth"
|
||||
path = "fuzz_targets/depth.rs"
|
||||
test = false
|
||||
doc = false
|
13
json-depth-checker/fuzz/fuzz_targets/depth.rs
Normal file
13
json-depth-checker/fuzz/fuzz_targets/depth.rs
Normal file
@ -0,0 +1,13 @@
|
||||
#![no_main]
|
||||
use arbitrary_json::ArbitraryValue;
|
||||
use json_depth_checker::*;
|
||||
use libfuzzer_sys::fuzz_target;
|
||||
|
||||
fuzz_target!(|value: ArbitraryValue| {
|
||||
let value = serde_json::Value::from(value);
|
||||
let left = should_flatten_from_value(&value);
|
||||
let value = serde_json::to_vec(&value).unwrap();
|
||||
let right = should_flatten_from_unchecked_slice(&value);
|
||||
|
||||
assert_eq!(left, right);
|
||||
});
|
114
json-depth-checker/src/lib.rs
Normal file
114
json-depth-checker/src/lib.rs
Normal file
@ -0,0 +1,114 @@
|
||||
use serde_json::Value;
|
||||
|
||||
/// Your json MUST BE valid and generated by `serde_json::to_vec` before being
|
||||
/// sent in this function. This function is DUMB and FAST but makes a lot of
|
||||
/// asumption about the way `serde_json` will generate its input.
|
||||
///
|
||||
/// Will return `true` if the JSON contains an object, an array of array
|
||||
/// or an array containing an object. Returns `false` for everything else.
|
||||
pub fn should_flatten_from_unchecked_slice(json: &[u8]) -> bool {
|
||||
if json.is_empty() {
|
||||
return false;
|
||||
}
|
||||
|
||||
// since the json we receive has been generated by serde_json we know
|
||||
// it doesn't contains any whitespace at the beginning thus we can check
|
||||
// directly if we're looking at an object.
|
||||
if json[0] == b'{' {
|
||||
return true;
|
||||
} else if json[0] != b'[' {
|
||||
// if the json isn't an object or an array it means it's a simple value.
|
||||
return false;
|
||||
}
|
||||
|
||||
// The array case is a little bit more complex. We are looking for a second
|
||||
// `[` but we need to ensure that it doesn't appear inside of a string. Thus
|
||||
// we need to keep track of if we're in a string or not.
|
||||
|
||||
// will be used when we met a `\` to skip the next character.
|
||||
let mut skip_next = false;
|
||||
let mut in_string = false;
|
||||
|
||||
for byte in json.iter().skip(1) {
|
||||
match byte {
|
||||
// handle the backlash.
|
||||
_ if skip_next => skip_next = false,
|
||||
b'\\' => skip_next = true,
|
||||
|
||||
// handle the strings.
|
||||
byte if in_string => {
|
||||
if *byte == b'"' {
|
||||
in_string = false;
|
||||
}
|
||||
}
|
||||
b'"' => in_string = true,
|
||||
|
||||
// handle the arrays.
|
||||
b'[' => return true,
|
||||
// since we know the json is valid we don't need to ensure the
|
||||
// array is correctly closed
|
||||
|
||||
// handle the objects.
|
||||
b'{' => return true,
|
||||
|
||||
// ignore everything else
|
||||
_ => (),
|
||||
}
|
||||
}
|
||||
|
||||
false
|
||||
}
|
||||
|
||||
/// Consider using [`should_flatten_from_unchecked_slice`] when you can.
|
||||
/// Will returns `true` if the json contains an object, an array of array
|
||||
/// or an array containing an object.
|
||||
/// Returns `false` for everything else.
|
||||
/// This function has been written to test the [`should_flatten_from_unchecked_slice`].
|
||||
pub fn should_flatten_from_value(json: &Value) -> bool {
|
||||
match json {
|
||||
Value::Object(..) => true,
|
||||
Value::Array(array) => array.iter().any(|value| value.is_array() || value.is_object()),
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use serde_json::*;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_shouldnt_flatten() {
|
||||
let shouldnt_flatten = vec![
|
||||
json!(null),
|
||||
json!(true),
|
||||
json!(false),
|
||||
json!("a superb string"),
|
||||
json!("a string escaping other \"string\""),
|
||||
json!([null, true, false]),
|
||||
json!(["hello", "world", "!"]),
|
||||
json!(["a \"string\" escaping 'an other'", "\"[\"", "\"{\""]),
|
||||
];
|
||||
for value in shouldnt_flatten {
|
||||
assert!(!should_flatten_from_value(&value));
|
||||
let value = serde_json::to_vec(&value).unwrap();
|
||||
assert!(!should_flatten_from_unchecked_slice(&value));
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_should_flatten() {
|
||||
let should_flatten = vec![
|
||||
json!({}),
|
||||
json!({ "hello": "world" }),
|
||||
json!(["hello", ["world"]]),
|
||||
json!([true, true, true, true, true, true, true, true, true, {}]),
|
||||
];
|
||||
for value in should_flatten {
|
||||
assert!(should_flatten_from_value(&value));
|
||||
let value = serde_json::to_vec(&value).unwrap();
|
||||
assert!(should_flatten_from_unchecked_slice(&value));
|
||||
}
|
||||
}
|
||||
}
|
80
milli/Cargo.toml
Normal file
80
milli/Cargo.toml
Normal file
@ -0,0 +1,80 @@
|
||||
[package]
|
||||
name = "milli"
|
||||
version = "0.39.0"
|
||||
authors = ["Kerollmops <clement@meilisearch.com>"]
|
||||
edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
bimap = { version = "0.6.2", features = ["serde"] }
|
||||
bincode = "1.3.3"
|
||||
bstr = "1.0.1"
|
||||
byteorder = "1.4.3"
|
||||
charabia = { version = "0.7.0", default-features = false }
|
||||
concat-arrays = "0.1.2"
|
||||
crossbeam-channel = "0.5.6"
|
||||
deserr = "0.1.4"
|
||||
either = "1.8.0"
|
||||
flatten-serde-json = { path = "../flatten-serde-json" }
|
||||
fst = "0.4.7"
|
||||
fxhash = "0.2.1"
|
||||
geoutils = "0.5.1"
|
||||
grenad = { version = "0.4.3", default-features = false, features = ["tempfile"] }
|
||||
heed = { git = "https://github.com/meilisearch/heed", tag = "v0.12.4", default-features = false, features = ["lmdb", "sync-read-txn"] }
|
||||
json-depth-checker = { path = "../json-depth-checker" }
|
||||
levenshtein_automata = { version = "0.2.1", features = ["fst_automaton"] }
|
||||
memmap2 = "0.5.7"
|
||||
obkv = "0.2.0"
|
||||
once_cell = "1.15.0"
|
||||
ordered-float = "3.2.0"
|
||||
rayon = "1.5.3"
|
||||
roaring = "0.10.1"
|
||||
rstar = { version = "0.9.3", features = ["serde"] }
|
||||
serde = { version = "1.0.145", features = ["derive"] }
|
||||
serde_json = { version = "1.0.85", features = ["preserve_order"] }
|
||||
slice-group-by = "0.3.0"
|
||||
smallstr = { version = "0.3.0", features = ["serde"] }
|
||||
smallvec = "1.10.0"
|
||||
smartstring = "1.0.1"
|
||||
tempfile = "3.3.0"
|
||||
thiserror = "1.0.37"
|
||||
time = { version = "0.3.15", features = ["serde-well-known", "formatting", "parsing", "macros"] }
|
||||
uuid = { version = "1.1.2", features = ["v4"] }
|
||||
|
||||
filter-parser = { path = "../filter-parser" }
|
||||
|
||||
# documents words self-join
|
||||
itertools = "0.10.5"
|
||||
|
||||
# logging
|
||||
log = "0.4.17"
|
||||
logging_timer = "1.1.0"
|
||||
csv = "1.1.6"
|
||||
|
||||
[dev-dependencies]
|
||||
big_s = "1.0.2"
|
||||
insta = "1.21.0"
|
||||
maplit = "1.0.2"
|
||||
md5 = "0.7.0"
|
||||
rand = {version = "0.8.5", features = ["small_rng"] }
|
||||
|
||||
[target.'cfg(fuzzing)'.dev-dependencies]
|
||||
fuzzcheck = "0.12.1"
|
||||
|
||||
[features]
|
||||
default = [ "charabia/default" ]
|
||||
|
||||
# allow chinese specialized tokenization
|
||||
chinese = ["charabia/chinese"]
|
||||
|
||||
# allow hebrew specialized tokenization
|
||||
hebrew = ["charabia/hebrew"]
|
||||
|
||||
# allow japanese specialized tokenization
|
||||
japanese = ["charabia/japanese"]
|
||||
japanese-transliteration = ["charabia/japanese-transliteration"]
|
||||
|
||||
# allow korean specialized tokenization
|
||||
korean = ["charabia/korean"]
|
||||
|
||||
# allow thai specialized tokenization
|
||||
thai = ["charabia/thai"]
|
297
milli/src/asc_desc.rs
Normal file
297
milli/src/asc_desc.rs
Normal file
@ -0,0 +1,297 @@
|
||||
//! This module provides the `AscDesc` type and defines all the errors related to this type.
|
||||
|
||||
use std::fmt;
|
||||
use std::str::FromStr;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
use thiserror::Error;
|
||||
|
||||
use crate::error::is_reserved_keyword;
|
||||
use crate::{CriterionError, Error, UserError};
|
||||
|
||||
/// This error type is never supposed to be shown to the end user.
|
||||
/// You must always cast it to a sort error or a criterion error.
|
||||
#[derive(Debug)]
|
||||
pub enum AscDescError {
|
||||
InvalidLatitude,
|
||||
InvalidLongitude,
|
||||
InvalidSyntax { name: String },
|
||||
ReservedKeyword { name: String },
|
||||
}
|
||||
|
||||
impl fmt::Display for AscDescError {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
match self {
|
||||
Self::InvalidLatitude => {
|
||||
write!(f, "Latitude must be contained between -90 and 90 degrees.",)
|
||||
}
|
||||
Self::InvalidLongitude => {
|
||||
write!(f, "Longitude must be contained between -180 and 180 degrees.",)
|
||||
}
|
||||
Self::InvalidSyntax { name } => {
|
||||
write!(f, "Invalid syntax for the asc/desc parameter: expected expression ending by `:asc` or `:desc`, found `{}`.", name)
|
||||
}
|
||||
Self::ReservedKeyword { name } => {
|
||||
write!(
|
||||
f,
|
||||
"`{}` is a reserved keyword and thus can't be used as a asc/desc rule.",
|
||||
name
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<AscDescError> for CriterionError {
|
||||
fn from(error: AscDescError) -> Self {
|
||||
match error {
|
||||
AscDescError::InvalidLatitude | AscDescError::InvalidLongitude => {
|
||||
CriterionError::ReservedNameForSort { name: "_geoPoint".to_string() }
|
||||
}
|
||||
AscDescError::InvalidSyntax { name } => CriterionError::InvalidName { name },
|
||||
AscDescError::ReservedKeyword { name } if name.starts_with("_geoPoint") => {
|
||||
CriterionError::ReservedNameForSort { name: "_geoPoint".to_string() }
|
||||
}
|
||||
AscDescError::ReservedKeyword { name } if name.starts_with("_geoRadius") => {
|
||||
CriterionError::ReservedNameForFilter { name: "_geoRadius".to_string() }
|
||||
}
|
||||
AscDescError::ReservedKeyword { name } => CriterionError::ReservedName { name },
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
|
||||
pub enum Member {
|
||||
Field(String),
|
||||
Geo([f64; 2]),
|
||||
}
|
||||
|
||||
impl FromStr for Member {
|
||||
type Err = AscDescError;
|
||||
|
||||
fn from_str(text: &str) -> Result<Member, Self::Err> {
|
||||
match text.strip_prefix("_geoPoint(").and_then(|text| text.strip_suffix(')')) {
|
||||
Some(point) => {
|
||||
let (lat, lng) = point
|
||||
.split_once(',')
|
||||
.ok_or_else(|| AscDescError::ReservedKeyword { name: text.to_string() })
|
||||
.and_then(|(lat, lng)| {
|
||||
lat.trim()
|
||||
.parse()
|
||||
.and_then(|lat| lng.trim().parse().map(|lng| (lat, lng)))
|
||||
.map_err(|_| AscDescError::ReservedKeyword { name: text.to_string() })
|
||||
})?;
|
||||
if !(-90.0..=90.0).contains(&lat) {
|
||||
return Err(AscDescError::InvalidLatitude)?;
|
||||
} else if !(-180.0..=180.0).contains(&lng) {
|
||||
return Err(AscDescError::InvalidLongitude)?;
|
||||
}
|
||||
Ok(Member::Geo([lat, lng]))
|
||||
}
|
||||
None => {
|
||||
if is_reserved_keyword(text) || text.starts_with("_geoRadius(") {
|
||||
return Err(AscDescError::ReservedKeyword { name: text.to_string() })?;
|
||||
}
|
||||
Ok(Member::Field(text.to_string()))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for Member {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match self {
|
||||
Member::Field(name) => f.write_str(name),
|
||||
Member::Geo([lat, lng]) => write!(f, "_geoPoint({}, {})", lat, lng),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Member {
|
||||
pub fn field(&self) -> Option<&str> {
|
||||
match self {
|
||||
Member::Field(field) => Some(field),
|
||||
Member::Geo(_) => None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn geo_point(&self) -> Option<&[f64; 2]> {
|
||||
match self {
|
||||
Member::Geo(point) => Some(point),
|
||||
Member::Field(_) => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)]
|
||||
pub enum AscDesc {
|
||||
Asc(Member),
|
||||
Desc(Member),
|
||||
}
|
||||
|
||||
impl AscDesc {
|
||||
pub fn member(&self) -> &Member {
|
||||
match self {
|
||||
AscDesc::Asc(member) => member,
|
||||
AscDesc::Desc(member) => member,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn field(&self) -> Option<&str> {
|
||||
self.member().field()
|
||||
}
|
||||
}
|
||||
|
||||
impl FromStr for AscDesc {
|
||||
type Err = AscDescError;
|
||||
|
||||
fn from_str(text: &str) -> Result<AscDesc, Self::Err> {
|
||||
match text.rsplit_once(':') {
|
||||
Some((left, "asc")) => Ok(AscDesc::Asc(left.parse()?)),
|
||||
Some((left, "desc")) => Ok(AscDesc::Desc(left.parse()?)),
|
||||
_ => Err(AscDescError::InvalidSyntax { name: text.to_string() }),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Error, Debug)]
|
||||
pub enum SortError {
|
||||
#[error("{}", AscDescError::InvalidLatitude)]
|
||||
InvalidLatitude,
|
||||
#[error("{}", AscDescError::InvalidLongitude)]
|
||||
InvalidLongitude,
|
||||
#[error("Invalid syntax for the geo parameter: expected expression formated like \
|
||||
`_geoPoint(latitude, longitude)` and ending by `:asc` or `:desc`, found `{name}`.")]
|
||||
BadGeoPointUsage { name: String },
|
||||
#[error("Invalid syntax for the sort parameter: expected expression ending by `:asc` or `:desc`, found `{name}`.")]
|
||||
InvalidName { name: String },
|
||||
#[error("`{name}` is a reserved keyword and thus can't be used as a sort expression.")]
|
||||
ReservedName { name: String },
|
||||
#[error("`{name}` is a reserved keyword and thus can't be used as a sort expression. \
|
||||
Use the _geoPoint(latitude, longitude) built-in rule to sort on _geo field coordinates.")]
|
||||
ReservedNameForSettings { name: String },
|
||||
#[error("`{name}` is a reserved keyword and thus can't be used as a sort expression. \
|
||||
Use the _geoPoint(latitude, longitude) built-in rule to sort on _geo field coordinates.")]
|
||||
ReservedNameForFilter { name: String },
|
||||
}
|
||||
|
||||
impl From<AscDescError> for SortError {
|
||||
fn from(error: AscDescError) -> Self {
|
||||
match error {
|
||||
AscDescError::InvalidLatitude => SortError::InvalidLatitude,
|
||||
AscDescError::InvalidLongitude => SortError::InvalidLongitude,
|
||||
AscDescError::InvalidSyntax { name } => SortError::InvalidName { name },
|
||||
AscDescError::ReservedKeyword { name } if name.starts_with("_geoPoint") => {
|
||||
SortError::BadGeoPointUsage { name }
|
||||
}
|
||||
AscDescError::ReservedKeyword { name } if &name == "_geo" => {
|
||||
SortError::ReservedNameForSettings { name }
|
||||
}
|
||||
AscDescError::ReservedKeyword { name } if name.starts_with("_geoRadius") => {
|
||||
SortError::ReservedNameForFilter { name: String::from("_geoRadius") }
|
||||
}
|
||||
AscDescError::ReservedKeyword { name } => SortError::ReservedName { name },
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<SortError> for Error {
|
||||
fn from(error: SortError) -> Self {
|
||||
Self::UserError(UserError::SortError(error))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use big_s::S;
|
||||
use AscDesc::*;
|
||||
use AscDescError::*;
|
||||
use Member::*;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn parse_asc_desc() {
|
||||
let valid_req = [
|
||||
("truc:asc", Asc(Field(S("truc")))),
|
||||
("bidule:desc", Desc(Field(S("bidule")))),
|
||||
("a-b:desc", Desc(Field(S("a-b")))),
|
||||
("a:b:desc", Desc(Field(S("a:b")))),
|
||||
("a12:asc", Asc(Field(S("a12")))),
|
||||
("42:asc", Asc(Field(S("42")))),
|
||||
("_geoPoint(42, 59):asc", Asc(Geo([42., 59.]))),
|
||||
("_geoPoint(42.459, 59):desc", Desc(Geo([42.459, 59.]))),
|
||||
("_geoPoint(42, 59.895):desc", Desc(Geo([42., 59.895]))),
|
||||
("_geoPoint(42, 59.895):desc", Desc(Geo([42., 59.895]))),
|
||||
("_geoPoint(90.000000000, 180):desc", Desc(Geo([90., 180.]))),
|
||||
("_geoPoint(-90, -180.0000000000):asc", Asc(Geo([-90., -180.]))),
|
||||
("_geoPoint(42.0002, 59.895):desc", Desc(Geo([42.0002, 59.895]))),
|
||||
("_geoPoint(42., 59.):desc", Desc(Geo([42., 59.]))),
|
||||
("truc(12, 13):desc", Desc(Field(S("truc(12, 13)")))),
|
||||
];
|
||||
|
||||
for (req, expected) in valid_req {
|
||||
let res = req.parse::<AscDesc>();
|
||||
assert!(
|
||||
res.is_ok(),
|
||||
"Failed to parse `{}`, was expecting `{:?}` but instead got `{:?}`",
|
||||
req,
|
||||
expected,
|
||||
res
|
||||
);
|
||||
assert_eq!(res.unwrap(), expected);
|
||||
}
|
||||
|
||||
let invalid_req = [
|
||||
("truc:machin", InvalidSyntax { name: S("truc:machin") }),
|
||||
("truc:deesc", InvalidSyntax { name: S("truc:deesc") }),
|
||||
("truc:asc:deesc", InvalidSyntax { name: S("truc:asc:deesc") }),
|
||||
("42desc", InvalidSyntax { name: S("42desc") }),
|
||||
("_geoPoint:asc", ReservedKeyword { name: S("_geoPoint") }),
|
||||
("_geoDistance:asc", ReservedKeyword { name: S("_geoDistance") }),
|
||||
("_geoPoint(42.12 , 59.598)", InvalidSyntax { name: S("_geoPoint(42.12 , 59.598)") }),
|
||||
(
|
||||
"_geoPoint(42.12 , 59.598):deesc",
|
||||
InvalidSyntax { name: S("_geoPoint(42.12 , 59.598):deesc") },
|
||||
),
|
||||
(
|
||||
"_geoPoint(42.12 , 59.598):machin",
|
||||
InvalidSyntax { name: S("_geoPoint(42.12 , 59.598):machin") },
|
||||
),
|
||||
(
|
||||
"_geoPoint(42.12 , 59.598):asc:aasc",
|
||||
InvalidSyntax { name: S("_geoPoint(42.12 , 59.598):asc:aasc") },
|
||||
),
|
||||
(
|
||||
"_geoPoint(42,12 , 59,598):desc",
|
||||
ReservedKeyword { name: S("_geoPoint(42,12 , 59,598)") },
|
||||
),
|
||||
("_geoPoint(35, 85, 75):asc", ReservedKeyword { name: S("_geoPoint(35, 85, 75)") }),
|
||||
("_geoPoint(18):asc", ReservedKeyword { name: S("_geoPoint(18)") }),
|
||||
("_geoPoint(200, 200):asc", InvalidLatitude),
|
||||
("_geoPoint(90.000001, 0):asc", InvalidLatitude),
|
||||
("_geoPoint(0, -180.000001):desc", InvalidLongitude),
|
||||
("_geoPoint(159.256, 130):asc", InvalidLatitude),
|
||||
("_geoPoint(12, -2021):desc", InvalidLongitude),
|
||||
];
|
||||
|
||||
for (req, expected_error) in invalid_req {
|
||||
let res = req.parse::<AscDesc>();
|
||||
assert!(
|
||||
res.is_err(),
|
||||
"Should no be able to parse `{}`, was expecting an error but instead got: `{:?}`",
|
||||
req,
|
||||
res,
|
||||
);
|
||||
let res = res.unwrap_err();
|
||||
assert_eq!(
|
||||
res.to_string(),
|
||||
expected_error.to_string(),
|
||||
"Bad error for input {}: got `{:?}` instead of `{:?}`",
|
||||
req,
|
||||
res,
|
||||
expected_error
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
183
milli/src/criterion.rs
Normal file
183
milli/src/criterion.rs
Normal file
@ -0,0 +1,183 @@
|
||||
use std::fmt;
|
||||
use std::str::FromStr;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
use thiserror::Error;
|
||||
|
||||
use crate::{AscDesc, Member};
|
||||
|
||||
#[derive(Error, Debug)]
|
||||
pub enum CriterionError {
|
||||
#[error("`{name}` ranking rule is invalid. Valid ranking rules are words, typo, sort, proximity, attribute, exactness and custom ranking rules.")]
|
||||
InvalidName { name: String },
|
||||
#[error("`{name}` is a reserved keyword and thus can't be used as a ranking rule")]
|
||||
ReservedName { name: String },
|
||||
#[error(
|
||||
"`{name}` is a reserved keyword and thus can't be used as a ranking rule. \
|
||||
`{name}` can only be used for sorting at search time"
|
||||
)]
|
||||
ReservedNameForSort { name: String },
|
||||
#[error(
|
||||
"`{name}` is a reserved keyword and thus can't be used as a ranking rule. \
|
||||
`{name}` can only be used for filtering at search time"
|
||||
)]
|
||||
ReservedNameForFilter { name: String },
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)]
|
||||
pub enum Criterion {
|
||||
/// Sorted by decreasing number of matched query terms.
|
||||
/// Query words at the front of an attribute is considered better than if it was at the back.
|
||||
Words,
|
||||
/// Sorted by increasing number of typos.
|
||||
Typo,
|
||||
/// Sorted by increasing distance between matched query terms.
|
||||
Proximity,
|
||||
/// Documents with quey words contained in more important
|
||||
/// attributes are considered better.
|
||||
Attribute,
|
||||
/// Dynamically sort at query time the documents. None, one or multiple Asc/Desc sortable
|
||||
/// attributes can be used in place of this criterion at query time.
|
||||
Sort,
|
||||
/// Sorted by the similarity of the matched words with the query words.
|
||||
Exactness,
|
||||
/// Sorted by the increasing value of the field specified.
|
||||
Asc(String),
|
||||
/// Sorted by the decreasing value of the field specified.
|
||||
Desc(String),
|
||||
}
|
||||
|
||||
impl Criterion {
|
||||
/// Returns the field name parameter of this criterion.
|
||||
pub fn field_name(&self) -> Option<&str> {
|
||||
match self {
|
||||
Criterion::Asc(name) | Criterion::Desc(name) => Some(name),
|
||||
_otherwise => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl FromStr for Criterion {
|
||||
type Err = CriterionError;
|
||||
|
||||
fn from_str(text: &str) -> Result<Criterion, Self::Err> {
|
||||
match text {
|
||||
"words" => Ok(Criterion::Words),
|
||||
"typo" => Ok(Criterion::Typo),
|
||||
"proximity" => Ok(Criterion::Proximity),
|
||||
"attribute" => Ok(Criterion::Attribute),
|
||||
"sort" => Ok(Criterion::Sort),
|
||||
"exactness" => Ok(Criterion::Exactness),
|
||||
text => match AscDesc::from_str(text)? {
|
||||
AscDesc::Asc(Member::Field(field)) => Ok(Criterion::Asc(field)),
|
||||
AscDesc::Desc(Member::Field(field)) => Ok(Criterion::Desc(field)),
|
||||
AscDesc::Asc(Member::Geo(_)) | AscDesc::Desc(Member::Geo(_)) => {
|
||||
Err(CriterionError::ReservedNameForSort { name: "_geoPoint".to_string() })?
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn default_criteria() -> Vec<Criterion> {
|
||||
vec![
|
||||
Criterion::Words,
|
||||
Criterion::Typo,
|
||||
Criterion::Proximity,
|
||||
Criterion::Attribute,
|
||||
Criterion::Sort,
|
||||
Criterion::Exactness,
|
||||
]
|
||||
}
|
||||
|
||||
impl fmt::Display for Criterion {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
use Criterion::*;
|
||||
|
||||
match self {
|
||||
Words => f.write_str("words"),
|
||||
Typo => f.write_str("typo"),
|
||||
Proximity => f.write_str("proximity"),
|
||||
Attribute => f.write_str("attribute"),
|
||||
Sort => f.write_str("sort"),
|
||||
Exactness => f.write_str("exactness"),
|
||||
Asc(attr) => write!(f, "{}:asc", attr),
|
||||
Desc(attr) => write!(f, "{}:desc", attr),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use big_s::S;
|
||||
use CriterionError::*;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn parse_criterion() {
|
||||
let valid_criteria = [
|
||||
("words", Criterion::Words),
|
||||
("typo", Criterion::Typo),
|
||||
("proximity", Criterion::Proximity),
|
||||
("attribute", Criterion::Attribute),
|
||||
("sort", Criterion::Sort),
|
||||
("exactness", Criterion::Exactness),
|
||||
("price:asc", Criterion::Asc(S("price"))),
|
||||
("price:desc", Criterion::Desc(S("price"))),
|
||||
("price:asc:desc", Criterion::Desc(S("price:asc"))),
|
||||
("truc:machin:desc", Criterion::Desc(S("truc:machin"))),
|
||||
("hello-world!:desc", Criterion::Desc(S("hello-world!"))),
|
||||
("it's spacy over there:asc", Criterion::Asc(S("it's spacy over there"))),
|
||||
];
|
||||
|
||||
for (input, expected) in valid_criteria {
|
||||
let res = input.parse::<Criterion>();
|
||||
assert!(
|
||||
res.is_ok(),
|
||||
"Failed to parse `{}`, was expecting `{:?}` but instead got `{:?}`",
|
||||
input,
|
||||
expected,
|
||||
res
|
||||
);
|
||||
assert_eq!(res.unwrap(), expected);
|
||||
}
|
||||
|
||||
let invalid_criteria = [
|
||||
("words suffix", InvalidName { name: S("words suffix") }),
|
||||
("prefix typo", InvalidName { name: S("prefix typo") }),
|
||||
("proximity attribute", InvalidName { name: S("proximity attribute") }),
|
||||
("price", InvalidName { name: S("price") }),
|
||||
("asc:price", InvalidName { name: S("asc:price") }),
|
||||
("price:deesc", InvalidName { name: S("price:deesc") }),
|
||||
("price:aasc", InvalidName { name: S("price:aasc") }),
|
||||
("price:asc and desc", InvalidName { name: S("price:asc and desc") }),
|
||||
("price:asc:truc", InvalidName { name: S("price:asc:truc") }),
|
||||
("_geo:asc", ReservedName { name: S("_geo") }),
|
||||
("_geoDistance:asc", ReservedName { name: S("_geoDistance") }),
|
||||
("_geoPoint:asc", ReservedNameForSort { name: S("_geoPoint") }),
|
||||
("_geoPoint(42, 75):asc", ReservedNameForSort { name: S("_geoPoint") }),
|
||||
("_geoRadius:asc", ReservedNameForFilter { name: S("_geoRadius") }),
|
||||
("_geoRadius(42, 75, 59):asc", ReservedNameForFilter { name: S("_geoRadius") }),
|
||||
];
|
||||
|
||||
for (input, expected) in invalid_criteria {
|
||||
let res = input.parse::<Criterion>();
|
||||
assert!(
|
||||
res.is_err(),
|
||||
"Should no be able to parse `{}`, was expecting an error but instead got: `{:?}`",
|
||||
input,
|
||||
res
|
||||
);
|
||||
let res = res.unwrap_err();
|
||||
assert_eq!(
|
||||
res.to_string(),
|
||||
expected.to_string(),
|
||||
"Bad error for input {}: got `{:?}` instead of `{:?}`",
|
||||
input,
|
||||
res,
|
||||
expected
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
579
milli/src/documents/builder.rs
Normal file
579
milli/src/documents/builder.rs
Normal file
@ -0,0 +1,579 @@
|
||||
use std::io::{self, Write};
|
||||
|
||||
use grenad::{CompressionType, WriterBuilder};
|
||||
use serde::de::Deserializer;
|
||||
use serde_json::{to_writer, Value};
|
||||
|
||||
use super::{DocumentsBatchIndex, Error, DOCUMENTS_BATCH_INDEX_KEY};
|
||||
use crate::documents::serde_impl::DocumentVisitor;
|
||||
use crate::Object;
|
||||
|
||||
/// The `DocumentsBatchBuilder` provides a way to build a documents batch in the intermediary
|
||||
/// format used by milli.
|
||||
///
|
||||
/// The writer used by the `DocumentsBatchBuilder` can be read using a `DocumentsBatchReader`
|
||||
/// to iterate over the documents.
|
||||
///
|
||||
/// ## example:
|
||||
/// ```
|
||||
/// use serde_json::json;
|
||||
/// use milli::documents::DocumentsBatchBuilder;
|
||||
///
|
||||
/// let json = json!({ "id": 1, "name": "foo" });
|
||||
///
|
||||
/// let mut builder = DocumentsBatchBuilder::new(Vec::new());
|
||||
/// builder.append_json_object(json.as_object().unwrap()).unwrap();
|
||||
/// let _vector = builder.into_inner().unwrap();
|
||||
/// ```
|
||||
pub struct DocumentsBatchBuilder<W> {
|
||||
/// The inner grenad writer, the last value must always be the `DocumentsBatchIndex`.
|
||||
writer: grenad::Writer<W>,
|
||||
/// A map that creates the relation between field ids and field names.
|
||||
fields_index: DocumentsBatchIndex,
|
||||
/// The number of documents that were added to this builder,
|
||||
/// it doesn't take the primary key of the documents into account at this point.
|
||||
documents_count: u32,
|
||||
|
||||
/// A buffer to store a temporary obkv buffer and avoid reallocating.
|
||||
obkv_buffer: Vec<u8>,
|
||||
/// A buffer to serialize the values and avoid reallocating,
|
||||
/// serialized values are stored in an obkv.
|
||||
value_buffer: Vec<u8>,
|
||||
}
|
||||
|
||||
impl<W: Write> DocumentsBatchBuilder<W> {
|
||||
pub fn new(writer: W) -> DocumentsBatchBuilder<W> {
|
||||
DocumentsBatchBuilder {
|
||||
writer: WriterBuilder::new().compression_type(CompressionType::None).build(writer),
|
||||
fields_index: DocumentsBatchIndex::default(),
|
||||
documents_count: 0,
|
||||
obkv_buffer: Vec::new(),
|
||||
value_buffer: Vec::new(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the number of documents inserted into this builder.
|
||||
pub fn documents_count(&self) -> u32 {
|
||||
self.documents_count
|
||||
}
|
||||
|
||||
/// Appends a new JSON object into the batch and updates the `DocumentsBatchIndex` accordingly.
|
||||
pub fn append_json_object(&mut self, object: &Object) -> io::Result<()> {
|
||||
// Make sure that we insert the fields ids in order as the obkv writer has this requirement.
|
||||
let mut fields_ids: Vec<_> = object.keys().map(|k| self.fields_index.insert(k)).collect();
|
||||
fields_ids.sort_unstable();
|
||||
|
||||
self.obkv_buffer.clear();
|
||||
let mut writer = obkv::KvWriter::new(&mut self.obkv_buffer);
|
||||
for field_id in fields_ids {
|
||||
let key = self.fields_index.name(field_id).unwrap();
|
||||
self.value_buffer.clear();
|
||||
to_writer(&mut self.value_buffer, &object[key])?;
|
||||
writer.insert(field_id, &self.value_buffer)?;
|
||||
}
|
||||
|
||||
let internal_id = self.documents_count.to_be_bytes();
|
||||
let document_bytes = writer.into_inner()?;
|
||||
self.writer.insert(internal_id, &document_bytes)?;
|
||||
self.documents_count += 1;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Appends a new JSON array of objects into the batch and updates the `DocumentsBatchIndex` accordingly.
|
||||
pub fn append_json_array<R: io::Read>(&mut self, reader: R) -> Result<(), Error> {
|
||||
let mut de = serde_json::Deserializer::from_reader(reader);
|
||||
let mut visitor = DocumentVisitor::new(self);
|
||||
de.deserialize_any(&mut visitor)?
|
||||
}
|
||||
|
||||
/// Appends a new CSV file into the batch and updates the `DocumentsBatchIndex` accordingly.
|
||||
pub fn append_csv<R: io::Read>(&mut self, mut reader: csv::Reader<R>) -> Result<(), Error> {
|
||||
// Make sure that we insert the fields ids in order as the obkv writer has this requirement.
|
||||
let mut typed_fields_ids: Vec<_> = reader
|
||||
.headers()?
|
||||
.into_iter()
|
||||
.map(parse_csv_header)
|
||||
.map(|(k, t)| (self.fields_index.insert(k), t))
|
||||
.enumerate()
|
||||
.collect();
|
||||
// Make sure that we insert the fields ids in order as the obkv writer has this requirement.
|
||||
typed_fields_ids.sort_unstable_by_key(|(_, (fid, _))| *fid);
|
||||
|
||||
let mut record = csv::StringRecord::new();
|
||||
let mut line = 0;
|
||||
while reader.read_record(&mut record)? {
|
||||
// We increment here and not at the end of the while loop to take
|
||||
// the header offset into account.
|
||||
line += 1;
|
||||
|
||||
self.obkv_buffer.clear();
|
||||
let mut writer = obkv::KvWriter::new(&mut self.obkv_buffer);
|
||||
|
||||
for (i, (field_id, type_)) in typed_fields_ids.iter() {
|
||||
self.value_buffer.clear();
|
||||
|
||||
let value = &record[*i];
|
||||
match type_ {
|
||||
AllowedType::Number => {
|
||||
if value.trim().is_empty() {
|
||||
to_writer(&mut self.value_buffer, &Value::Null)?;
|
||||
} else if let Ok(integer) = value.trim().parse::<i64>() {
|
||||
to_writer(&mut self.value_buffer, &integer)?;
|
||||
} else {
|
||||
match value.trim().parse::<f64>() {
|
||||
Ok(float) => {
|
||||
to_writer(&mut self.value_buffer, &float)?;
|
||||
}
|
||||
Err(error) => {
|
||||
return Err(Error::ParseFloat {
|
||||
error,
|
||||
line,
|
||||
value: value.to_string(),
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
AllowedType::String => {
|
||||
if value.is_empty() {
|
||||
to_writer(&mut self.value_buffer, &Value::Null)?;
|
||||
} else {
|
||||
to_writer(&mut self.value_buffer, value)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// We insert into the obkv writer the value buffer that has been filled just above.
|
||||
writer.insert(*field_id, &self.value_buffer)?;
|
||||
}
|
||||
|
||||
let internal_id = self.documents_count.to_be_bytes();
|
||||
let document_bytes = writer.into_inner()?;
|
||||
self.writer.insert(internal_id, &document_bytes)?;
|
||||
self.documents_count += 1;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Flushes the content on disk and stores the final version of the `DocumentsBatchIndex`.
|
||||
pub fn into_inner(mut self) -> io::Result<W> {
|
||||
let DocumentsBatchBuilder { mut writer, fields_index, .. } = self;
|
||||
|
||||
// We serialize and insert the `DocumentsBatchIndex` as the last key of the grenad writer.
|
||||
self.value_buffer.clear();
|
||||
to_writer(&mut self.value_buffer, &fields_index)?;
|
||||
writer.insert(DOCUMENTS_BATCH_INDEX_KEY, &self.value_buffer)?;
|
||||
|
||||
writer.into_inner()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
enum AllowedType {
|
||||
String,
|
||||
Number,
|
||||
}
|
||||
|
||||
fn parse_csv_header(header: &str) -> (&str, AllowedType) {
|
||||
// if there are several separators we only split on the last one.
|
||||
match header.rsplit_once(':') {
|
||||
Some((field_name, field_type)) => match field_type {
|
||||
"string" => (field_name, AllowedType::String),
|
||||
"number" => (field_name, AllowedType::Number),
|
||||
// if the pattern isn't reconized, we keep the whole field.
|
||||
_otherwise => (header, AllowedType::String),
|
||||
},
|
||||
None => (header, AllowedType::String),
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use std::io::Cursor;
|
||||
|
||||
use serde_json::json;
|
||||
|
||||
use super::*;
|
||||
use crate::documents::{obkv_to_object, DocumentsBatchReader};
|
||||
|
||||
#[test]
|
||||
fn add_single_documents_json() {
|
||||
let json = serde_json::json!({
|
||||
"id": 1,
|
||||
"field": "hello!",
|
||||
});
|
||||
|
||||
let mut builder = DocumentsBatchBuilder::new(Vec::new());
|
||||
builder.append_json_object(json.as_object().unwrap()).unwrap();
|
||||
|
||||
let json = serde_json::json!({
|
||||
"blabla": false,
|
||||
"field": "hello!",
|
||||
"id": 1,
|
||||
});
|
||||
|
||||
builder.append_json_object(json.as_object().unwrap()).unwrap();
|
||||
|
||||
assert_eq!(builder.documents_count(), 2);
|
||||
let vector = builder.into_inner().unwrap();
|
||||
|
||||
let (mut cursor, index) = DocumentsBatchReader::from_reader(Cursor::new(vector))
|
||||
.unwrap()
|
||||
.into_cursor_and_fields_index();
|
||||
assert_eq!(index.len(), 3);
|
||||
|
||||
let document = cursor.next_document().unwrap().unwrap();
|
||||
assert_eq!(document.iter().count(), 2);
|
||||
|
||||
let document = cursor.next_document().unwrap().unwrap();
|
||||
assert_eq!(document.iter().count(), 3);
|
||||
|
||||
assert!(cursor.next_document().unwrap().is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn add_documents_csv() {
|
||||
let csv_content = "id:number,field:string\n1,hello!\n2,blabla";
|
||||
let csv = csv::Reader::from_reader(Cursor::new(csv_content));
|
||||
|
||||
let mut builder = DocumentsBatchBuilder::new(Vec::new());
|
||||
builder.append_csv(csv).unwrap();
|
||||
assert_eq!(builder.documents_count(), 2);
|
||||
let vector = builder.into_inner().unwrap();
|
||||
|
||||
let (mut cursor, index) = DocumentsBatchReader::from_reader(Cursor::new(vector))
|
||||
.unwrap()
|
||||
.into_cursor_and_fields_index();
|
||||
assert_eq!(index.len(), 2);
|
||||
|
||||
let document = cursor.next_document().unwrap().unwrap();
|
||||
assert_eq!(document.iter().count(), 2);
|
||||
|
||||
let document = cursor.next_document().unwrap().unwrap();
|
||||
assert_eq!(document.iter().count(), 2);
|
||||
|
||||
assert!(cursor.next_document().unwrap().is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn simple_csv_document() {
|
||||
let csv_content = r#"city,country,pop
|
||||
"Boston","United States","4628910""#;
|
||||
let csv = csv::Reader::from_reader(Cursor::new(csv_content));
|
||||
|
||||
let mut builder = DocumentsBatchBuilder::new(Vec::new());
|
||||
builder.append_csv(csv).unwrap();
|
||||
let vector = builder.into_inner().unwrap();
|
||||
|
||||
let (mut cursor, index) = DocumentsBatchReader::from_reader(Cursor::new(vector))
|
||||
.unwrap()
|
||||
.into_cursor_and_fields_index();
|
||||
let doc = cursor.next_document().unwrap().unwrap();
|
||||
let val = obkv_to_object(&doc, &index).map(Value::from).unwrap();
|
||||
|
||||
assert_eq!(
|
||||
val,
|
||||
json!({
|
||||
"city": "Boston",
|
||||
"country": "United States",
|
||||
"pop": "4628910",
|
||||
})
|
||||
);
|
||||
|
||||
assert!(cursor.next_document().unwrap().is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn coma_in_field() {
|
||||
let csv_content = r#"city,country,pop
|
||||
"Boston","United, States","4628910""#;
|
||||
let csv = csv::Reader::from_reader(Cursor::new(csv_content));
|
||||
|
||||
let mut builder = DocumentsBatchBuilder::new(Vec::new());
|
||||
builder.append_csv(csv).unwrap();
|
||||
let vector = builder.into_inner().unwrap();
|
||||
|
||||
let (mut cursor, index) = DocumentsBatchReader::from_reader(Cursor::new(vector))
|
||||
.unwrap()
|
||||
.into_cursor_and_fields_index();
|
||||
|
||||
let doc = cursor.next_document().unwrap().unwrap();
|
||||
let val = obkv_to_object(&doc, &index).map(Value::from).unwrap();
|
||||
|
||||
assert_eq!(
|
||||
val,
|
||||
json!({
|
||||
"city": "Boston",
|
||||
"country": "United, States",
|
||||
"pop": "4628910",
|
||||
})
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn quote_in_field() {
|
||||
let csv_content = r#"city,country,pop
|
||||
"Boston","United"" States","4628910""#;
|
||||
let csv = csv::Reader::from_reader(Cursor::new(csv_content));
|
||||
|
||||
let mut builder = DocumentsBatchBuilder::new(Vec::new());
|
||||
builder.append_csv(csv).unwrap();
|
||||
let vector = builder.into_inner().unwrap();
|
||||
|
||||
let (mut cursor, index) = DocumentsBatchReader::from_reader(Cursor::new(vector))
|
||||
.unwrap()
|
||||
.into_cursor_and_fields_index();
|
||||
|
||||
let doc = cursor.next_document().unwrap().unwrap();
|
||||
let val = obkv_to_object(&doc, &index).map(Value::from).unwrap();
|
||||
|
||||
assert_eq!(
|
||||
val,
|
||||
json!({
|
||||
"city": "Boston",
|
||||
"country": "United\" States",
|
||||
"pop": "4628910",
|
||||
})
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn integer_in_field() {
|
||||
let csv_content = r#"city,country,pop:number
|
||||
"Boston","United States","4628910""#;
|
||||
let csv = csv::Reader::from_reader(Cursor::new(csv_content));
|
||||
|
||||
let mut builder = DocumentsBatchBuilder::new(Vec::new());
|
||||
builder.append_csv(csv).unwrap();
|
||||
let vector = builder.into_inner().unwrap();
|
||||
|
||||
let (mut cursor, index) = DocumentsBatchReader::from_reader(Cursor::new(vector))
|
||||
.unwrap()
|
||||
.into_cursor_and_fields_index();
|
||||
|
||||
let doc = cursor.next_document().unwrap().unwrap();
|
||||
let val = obkv_to_object(&doc, &index).map(Value::from).unwrap();
|
||||
|
||||
assert_eq!(
|
||||
val,
|
||||
json!({
|
||||
"city": "Boston",
|
||||
"country": "United States",
|
||||
"pop": 4628910,
|
||||
})
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn integer_as_id() {
|
||||
let csv_content = r#""id:number","title:string","comment:string"
|
||||
"1239","Pride and Prejudice","A great book""#;
|
||||
let csv = csv::Reader::from_reader(Cursor::new(csv_content));
|
||||
|
||||
let mut builder = DocumentsBatchBuilder::new(Vec::new());
|
||||
builder.append_csv(csv).unwrap();
|
||||
let vector = builder.into_inner().unwrap();
|
||||
|
||||
let (mut cursor, index) = DocumentsBatchReader::from_reader(Cursor::new(vector))
|
||||
.unwrap()
|
||||
.into_cursor_and_fields_index();
|
||||
|
||||
let doc = cursor.next_document().unwrap().unwrap();
|
||||
let val = obkv_to_object(&doc, &index).map(Value::from).unwrap();
|
||||
|
||||
assert_eq!(
|
||||
val,
|
||||
json!({
|
||||
"id": 1239,
|
||||
"title": "Pride and Prejudice",
|
||||
"comment": "A great book",
|
||||
})
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn float_in_field() {
|
||||
let csv_content = r#"city,country,pop:number
|
||||
"Boston","United States","4628910.01""#;
|
||||
let csv = csv::Reader::from_reader(Cursor::new(csv_content));
|
||||
|
||||
let mut builder = DocumentsBatchBuilder::new(Vec::new());
|
||||
builder.append_csv(csv).unwrap();
|
||||
let vector = builder.into_inner().unwrap();
|
||||
|
||||
let (mut cursor, index) = DocumentsBatchReader::from_reader(Cursor::new(vector))
|
||||
.unwrap()
|
||||
.into_cursor_and_fields_index();
|
||||
|
||||
let doc = cursor.next_document().unwrap().unwrap();
|
||||
let val = obkv_to_object(&doc, &index).map(Value::from).unwrap();
|
||||
|
||||
assert_eq!(
|
||||
val,
|
||||
json!({
|
||||
"city": "Boston",
|
||||
"country": "United States",
|
||||
"pop": 4628910.01,
|
||||
})
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn several_colon_in_header() {
|
||||
let csv_content = r#"city:love:string,country:state,pop
|
||||
"Boston","United States","4628910""#;
|
||||
let csv = csv::Reader::from_reader(Cursor::new(csv_content));
|
||||
|
||||
let mut builder = DocumentsBatchBuilder::new(Vec::new());
|
||||
builder.append_csv(csv).unwrap();
|
||||
let vector = builder.into_inner().unwrap();
|
||||
|
||||
let (mut cursor, index) = DocumentsBatchReader::from_reader(Cursor::new(vector))
|
||||
.unwrap()
|
||||
.into_cursor_and_fields_index();
|
||||
|
||||
let doc = cursor.next_document().unwrap().unwrap();
|
||||
let val = obkv_to_object(&doc, &index).map(Value::from).unwrap();
|
||||
|
||||
assert_eq!(
|
||||
val,
|
||||
json!({
|
||||
"city:love": "Boston",
|
||||
"country:state": "United States",
|
||||
"pop": "4628910",
|
||||
})
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn ending_by_colon_in_header() {
|
||||
let csv_content = r#"city:,country,pop
|
||||
"Boston","United States","4628910""#;
|
||||
let csv = csv::Reader::from_reader(Cursor::new(csv_content));
|
||||
|
||||
let mut builder = DocumentsBatchBuilder::new(Vec::new());
|
||||
builder.append_csv(csv).unwrap();
|
||||
let vector = builder.into_inner().unwrap();
|
||||
|
||||
let (mut cursor, index) = DocumentsBatchReader::from_reader(Cursor::new(vector))
|
||||
.unwrap()
|
||||
.into_cursor_and_fields_index();
|
||||
|
||||
let doc = cursor.next_document().unwrap().unwrap();
|
||||
let val = obkv_to_object(&doc, &index).map(Value::from).unwrap();
|
||||
|
||||
assert_eq!(
|
||||
val,
|
||||
json!({
|
||||
"city:": "Boston",
|
||||
"country": "United States",
|
||||
"pop": "4628910",
|
||||
})
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn starting_by_colon_in_header() {
|
||||
let csv_content = r#":city,country,pop
|
||||
"Boston","United States","4628910""#;
|
||||
let csv = csv::Reader::from_reader(Cursor::new(csv_content));
|
||||
|
||||
let mut builder = DocumentsBatchBuilder::new(Vec::new());
|
||||
builder.append_csv(csv).unwrap();
|
||||
let vector = builder.into_inner().unwrap();
|
||||
|
||||
let (mut cursor, index) = DocumentsBatchReader::from_reader(Cursor::new(vector))
|
||||
.unwrap()
|
||||
.into_cursor_and_fields_index();
|
||||
|
||||
let doc = cursor.next_document().unwrap().unwrap();
|
||||
let val = obkv_to_object(&doc, &index).map(Value::from).unwrap();
|
||||
|
||||
assert_eq!(
|
||||
val,
|
||||
json!({
|
||||
":city": "Boston",
|
||||
"country": "United States",
|
||||
"pop": "4628910",
|
||||
})
|
||||
);
|
||||
}
|
||||
|
||||
#[ignore]
|
||||
#[test]
|
||||
fn starting_by_colon_in_header2() {
|
||||
let csv_content = r#":string,country,pop
|
||||
"Boston","United States","4628910""#;
|
||||
let csv = csv::Reader::from_reader(Cursor::new(csv_content));
|
||||
|
||||
let mut builder = DocumentsBatchBuilder::new(Vec::new());
|
||||
builder.append_csv(csv).unwrap();
|
||||
let vector = builder.into_inner().unwrap();
|
||||
|
||||
let (mut cursor, _) = DocumentsBatchReader::from_reader(Cursor::new(vector))
|
||||
.unwrap()
|
||||
.into_cursor_and_fields_index();
|
||||
|
||||
assert!(cursor.next_document().is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn double_colon_in_header() {
|
||||
let csv_content = r#"city::string,country,pop
|
||||
"Boston","United States","4628910""#;
|
||||
let csv = csv::Reader::from_reader(Cursor::new(csv_content));
|
||||
|
||||
let mut builder = DocumentsBatchBuilder::new(Vec::new());
|
||||
builder.append_csv(csv).unwrap();
|
||||
let vector = builder.into_inner().unwrap();
|
||||
|
||||
let (mut cursor, index) = DocumentsBatchReader::from_reader(Cursor::new(vector))
|
||||
.unwrap()
|
||||
.into_cursor_and_fields_index();
|
||||
|
||||
let doc = cursor.next_document().unwrap().unwrap();
|
||||
let val = obkv_to_object(&doc, &index).map(Value::from).unwrap();
|
||||
|
||||
assert_eq!(
|
||||
val,
|
||||
json!({
|
||||
"city:": "Boston",
|
||||
"country": "United States",
|
||||
"pop": "4628910",
|
||||
})
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn bad_type_in_header() {
|
||||
let csv_content = r#"city,country:number,pop
|
||||
"Boston","United States","4628910""#;
|
||||
let csv = csv::Reader::from_reader(Cursor::new(csv_content));
|
||||
|
||||
let mut builder = DocumentsBatchBuilder::new(Vec::new());
|
||||
assert!(builder.append_csv(csv).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn bad_column_count1() {
|
||||
let csv_content = r#"city,country,pop
|
||||
"Boston","United States","4628910", "too much
|
||||
let csv = csv::Reader::from_reader(Cursor::new(csv_content"#;
|
||||
let csv = csv::Reader::from_reader(Cursor::new(csv_content));
|
||||
|
||||
let mut builder = DocumentsBatchBuilder::new(Vec::new());
|
||||
assert!(builder.append_csv(csv).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn bad_column_count2() {
|
||||
let csv_content = r#"city,country,pop
|
||||
"Boston","United States""#;
|
||||
let csv = csv::Reader::from_reader(Cursor::new(csv_content));
|
||||
|
||||
let mut builder = DocumentsBatchBuilder::new(Vec::new());
|
||||
assert!(builder.append_csv(csv).is_err());
|
||||
}
|
||||
}
|
109
milli/src/documents/enriched.rs
Normal file
109
milli/src/documents/enriched.rs
Normal file
@ -0,0 +1,109 @@
|
||||
use std::fs::File;
|
||||
use std::{io, str};
|
||||
|
||||
use obkv::KvReader;
|
||||
|
||||
use super::{
|
||||
DocumentsBatchCursor, DocumentsBatchCursorError, DocumentsBatchIndex, DocumentsBatchReader,
|
||||
Error,
|
||||
};
|
||||
use crate::update::DocumentId;
|
||||
use crate::FieldId;
|
||||
|
||||
/// The `EnrichedDocumentsBatchReader` provides a way to iterate over documents that have
|
||||
/// been created with a `DocumentsBatchWriter` and, for the enriched data,
|
||||
/// a simple `grenad::Reader<File>`.
|
||||
///
|
||||
/// The documents are returned in the form of `obkv::Reader` where each field is identified with a
|
||||
/// `FieldId`. The mapping between the field ids and the field names is done thanks to the index.
|
||||
pub struct EnrichedDocumentsBatchReader<R> {
|
||||
documents: DocumentsBatchReader<R>,
|
||||
primary_key: String,
|
||||
external_ids: grenad::ReaderCursor<File>,
|
||||
}
|
||||
|
||||
impl<R: io::Read + io::Seek> EnrichedDocumentsBatchReader<R> {
|
||||
pub fn new(
|
||||
documents: DocumentsBatchReader<R>,
|
||||
primary_key: String,
|
||||
external_ids: grenad::Reader<File>,
|
||||
) -> Result<Self, Error> {
|
||||
if documents.documents_count() as u64 == external_ids.len() {
|
||||
Ok(EnrichedDocumentsBatchReader {
|
||||
documents,
|
||||
primary_key,
|
||||
external_ids: external_ids.into_cursor()?,
|
||||
})
|
||||
} else {
|
||||
Err(Error::InvalidEnrichedData)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn documents_count(&self) -> u32 {
|
||||
self.documents.documents_count()
|
||||
}
|
||||
|
||||
pub fn primary_key(&self) -> &str {
|
||||
&self.primary_key
|
||||
}
|
||||
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.documents.is_empty()
|
||||
}
|
||||
|
||||
pub fn documents_batch_index(&self) -> &DocumentsBatchIndex {
|
||||
self.documents.documents_batch_index()
|
||||
}
|
||||
|
||||
/// This method returns a forward cursor over the enriched documents.
|
||||
pub fn into_cursor_and_fields_index(
|
||||
self,
|
||||
) -> (EnrichedDocumentsBatchCursor<R>, DocumentsBatchIndex) {
|
||||
let EnrichedDocumentsBatchReader { documents, primary_key, mut external_ids } = self;
|
||||
let (documents, fields_index) = documents.into_cursor_and_fields_index();
|
||||
external_ids.reset();
|
||||
(EnrichedDocumentsBatchCursor { documents, primary_key, external_ids }, fields_index)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct EnrichedDocument<'a> {
|
||||
pub document: KvReader<'a, FieldId>,
|
||||
pub document_id: DocumentId,
|
||||
}
|
||||
|
||||
pub struct EnrichedDocumentsBatchCursor<R> {
|
||||
documents: DocumentsBatchCursor<R>,
|
||||
primary_key: String,
|
||||
external_ids: grenad::ReaderCursor<File>,
|
||||
}
|
||||
|
||||
impl<R> EnrichedDocumentsBatchCursor<R> {
|
||||
pub fn primary_key(&self) -> &str {
|
||||
&self.primary_key
|
||||
}
|
||||
/// Resets the cursor to be able to read from the start again.
|
||||
pub fn reset(&mut self) {
|
||||
self.documents.reset();
|
||||
self.external_ids.reset();
|
||||
}
|
||||
}
|
||||
|
||||
impl<R: io::Read + io::Seek> EnrichedDocumentsBatchCursor<R> {
|
||||
/// Returns the next document, starting from the first one. Subsequent calls to
|
||||
/// `next_document` advance the document reader until all the documents have been read.
|
||||
pub fn next_enriched_document(
|
||||
&mut self,
|
||||
) -> Result<Option<EnrichedDocument>, DocumentsBatchCursorError> {
|
||||
let document = self.documents.next_document()?;
|
||||
let document_id = match self.external_ids.move_on_next()? {
|
||||
Some((_, bytes)) => serde_json::from_slice(bytes).map(Some)?,
|
||||
None => None,
|
||||
};
|
||||
|
||||
match document.zip(document_id) {
|
||||
Some((document, document_id)) => Ok(Some(EnrichedDocument { document, document_id })),
|
||||
None => Ok(None),
|
||||
}
|
||||
}
|
||||
}
|
292
milli/src/documents/mod.rs
Normal file
292
milli/src/documents/mod.rs
Normal file
@ -0,0 +1,292 @@
|
||||
mod builder;
|
||||
mod enriched;
|
||||
mod reader;
|
||||
mod serde_impl;
|
||||
|
||||
use std::fmt::{self, Debug};
|
||||
use std::io;
|
||||
use std::str::Utf8Error;
|
||||
|
||||
use bimap::BiHashMap;
|
||||
pub use builder::DocumentsBatchBuilder;
|
||||
pub use enriched::{EnrichedDocument, EnrichedDocumentsBatchCursor, EnrichedDocumentsBatchReader};
|
||||
use obkv::KvReader;
|
||||
pub use reader::{DocumentsBatchCursor, DocumentsBatchCursorError, DocumentsBatchReader};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::error::{FieldIdMapMissingEntry, InternalError};
|
||||
use crate::{FieldId, Object, Result};
|
||||
|
||||
/// The key that is used to store the `DocumentsBatchIndex` datastructure,
|
||||
/// it is the absolute last key of the list.
|
||||
const DOCUMENTS_BATCH_INDEX_KEY: [u8; 8] = u64::MAX.to_be_bytes();
|
||||
|
||||
/// Helper function to convert an obkv reader into a JSON object.
|
||||
pub fn obkv_to_object(obkv: &KvReader<FieldId>, index: &DocumentsBatchIndex) -> Result<Object> {
|
||||
obkv.iter()
|
||||
.map(|(field_id, value)| {
|
||||
let field_name = index
|
||||
.name(field_id)
|
||||
.ok_or(FieldIdMapMissingEntry::FieldId { field_id, process: "obkv_to_object" })?;
|
||||
let value = serde_json::from_slice(value).map_err(InternalError::SerdeJson)?;
|
||||
Ok((field_name.to_string(), value))
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// A bidirectional map that links field ids to their name in a document batch.
|
||||
#[derive(Default, Clone, Debug, Serialize, Deserialize)]
|
||||
pub struct DocumentsBatchIndex(pub BiHashMap<FieldId, String>);
|
||||
|
||||
impl DocumentsBatchIndex {
|
||||
/// Insert the field in the map, or return it's field id if it doesn't already exists.
|
||||
pub fn insert(&mut self, field: &str) -> FieldId {
|
||||
match self.0.get_by_right(field) {
|
||||
Some(field_id) => *field_id,
|
||||
None => {
|
||||
let field_id = self.0.len() as FieldId;
|
||||
self.0.insert(field_id, field.to_string());
|
||||
field_id
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.0.is_empty()
|
||||
}
|
||||
|
||||
pub fn len(&self) -> usize {
|
||||
self.0.len()
|
||||
}
|
||||
|
||||
pub fn iter(&self) -> bimap::hash::Iter<FieldId, String> {
|
||||
self.0.iter()
|
||||
}
|
||||
|
||||
pub fn name(&self, id: FieldId) -> Option<&str> {
|
||||
self.0.get_by_left(&id).map(AsRef::as_ref)
|
||||
}
|
||||
|
||||
pub fn id(&self, name: &str) -> Option<FieldId> {
|
||||
self.0.get_by_right(name).cloned()
|
||||
}
|
||||
|
||||
pub fn recreate_json(&self, document: &obkv::KvReaderU16) -> Result<Object> {
|
||||
let mut map = Object::new();
|
||||
|
||||
for (k, v) in document.iter() {
|
||||
// TODO: TAMO: update the error type
|
||||
let key =
|
||||
self.0.get_by_left(&k).ok_or(crate::error::InternalError::DatabaseClosing)?.clone();
|
||||
let value = serde_json::from_slice::<serde_json::Value>(v)
|
||||
.map_err(crate::error::InternalError::SerdeJson)?;
|
||||
map.insert(key, value);
|
||||
}
|
||||
|
||||
Ok(map)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum Error {
|
||||
ParseFloat { error: std::num::ParseFloatError, line: usize, value: String },
|
||||
InvalidDocumentFormat,
|
||||
InvalidEnrichedData,
|
||||
InvalidUtf8(Utf8Error),
|
||||
Csv(csv::Error),
|
||||
Json(serde_json::Error),
|
||||
Serialize(serde_json::Error),
|
||||
Grenad(grenad::Error),
|
||||
Io(io::Error),
|
||||
}
|
||||
|
||||
impl From<csv::Error> for Error {
|
||||
fn from(e: csv::Error) -> Self {
|
||||
Self::Csv(e)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<io::Error> for Error {
|
||||
fn from(other: io::Error) -> Self {
|
||||
Self::Io(other)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<serde_json::Error> for Error {
|
||||
fn from(other: serde_json::Error) -> Self {
|
||||
Self::Json(other)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<grenad::Error> for Error {
|
||||
fn from(other: grenad::Error) -> Self {
|
||||
Self::Grenad(other)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Utf8Error> for Error {
|
||||
fn from(other: Utf8Error) -> Self {
|
||||
Self::InvalidUtf8(other)
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for Error {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match self {
|
||||
Error::ParseFloat { error, line, value } => {
|
||||
write!(f, "Error parsing number {:?} at line {}: {}", value, line, error)
|
||||
}
|
||||
Error::InvalidDocumentFormat => {
|
||||
f.write_str("Invalid document addition format, missing the documents batch index.")
|
||||
}
|
||||
Error::InvalidEnrichedData => f.write_str("Invalid enriched data."),
|
||||
Error::InvalidUtf8(e) => write!(f, "{}", e),
|
||||
Error::Io(e) => write!(f, "{}", e),
|
||||
Error::Serialize(e) => write!(f, "{}", e),
|
||||
Error::Grenad(e) => write!(f, "{}", e),
|
||||
Error::Csv(e) => write!(f, "{}", e),
|
||||
Error::Json(e) => write!(f, "{}", e),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl std::error::Error for Error {}
|
||||
|
||||
#[cfg(test)]
|
||||
pub fn objects_from_json_value(json: serde_json::Value) -> Vec<crate::Object> {
|
||||
let documents = match json {
|
||||
object @ serde_json::Value::Object(_) => vec![object],
|
||||
serde_json::Value::Array(objects) => objects,
|
||||
invalid => {
|
||||
panic!("an array of objects must be specified, {:#?} is not an array", invalid)
|
||||
}
|
||||
};
|
||||
let mut objects = vec![];
|
||||
for document in documents {
|
||||
let object = match document {
|
||||
serde_json::Value::Object(object) => object,
|
||||
invalid => panic!("an object must be specified, {:#?} is not an object", invalid),
|
||||
};
|
||||
objects.push(object);
|
||||
}
|
||||
objects
|
||||
}
|
||||
|
||||
/// Macro used to generate documents, with the same syntax as `serde_json::json`
|
||||
#[cfg(test)]
|
||||
macro_rules! documents {
|
||||
($data:tt) => {{
|
||||
let documents = serde_json::json!($data);
|
||||
let documents = $crate::documents::objects_from_json_value(documents);
|
||||
$crate::documents::documents_batch_reader_from_objects(documents)
|
||||
}};
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub fn documents_batch_reader_from_objects(
|
||||
objects: impl IntoIterator<Item = Object>,
|
||||
) -> DocumentsBatchReader<std::io::Cursor<Vec<u8>>> {
|
||||
let mut builder = DocumentsBatchBuilder::new(Vec::new());
|
||||
for object in objects {
|
||||
builder.append_json_object(&object).unwrap();
|
||||
}
|
||||
let vector = builder.into_inner().unwrap();
|
||||
DocumentsBatchReader::from_reader(std::io::Cursor::new(vector)).unwrap()
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use std::io::Cursor;
|
||||
|
||||
use serde_json::{json, Value};
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn create_documents_no_errors() {
|
||||
let value = json!({
|
||||
"number": 1,
|
||||
"string": "this is a field",
|
||||
"array": ["an", "array"],
|
||||
"object": {
|
||||
"key": "value",
|
||||
},
|
||||
"bool": true
|
||||
});
|
||||
|
||||
let mut builder = DocumentsBatchBuilder::new(Vec::new());
|
||||
builder.append_json_object(value.as_object().unwrap()).unwrap();
|
||||
let vector = builder.into_inner().unwrap();
|
||||
|
||||
let (mut documents, index) = DocumentsBatchReader::from_reader(Cursor::new(vector))
|
||||
.unwrap()
|
||||
.into_cursor_and_fields_index();
|
||||
|
||||
assert_eq!(index.iter().count(), 5);
|
||||
let reader = documents.next_document().unwrap().unwrap();
|
||||
assert_eq!(reader.iter().count(), 5);
|
||||
assert!(documents.next_document().unwrap().is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_add_multiple_documents() {
|
||||
let doc1 = json!({
|
||||
"bool": true,
|
||||
});
|
||||
let doc2 = json!({
|
||||
"toto": false,
|
||||
});
|
||||
|
||||
let mut builder = DocumentsBatchBuilder::new(Vec::new());
|
||||
builder.append_json_object(doc1.as_object().unwrap()).unwrap();
|
||||
builder.append_json_object(doc2.as_object().unwrap()).unwrap();
|
||||
let vector = builder.into_inner().unwrap();
|
||||
|
||||
let (mut documents, index) = DocumentsBatchReader::from_reader(io::Cursor::new(vector))
|
||||
.unwrap()
|
||||
.into_cursor_and_fields_index();
|
||||
assert_eq!(index.iter().count(), 2);
|
||||
let reader = documents.next_document().unwrap().unwrap();
|
||||
assert_eq!(reader.iter().count(), 1);
|
||||
assert!(documents.next_document().unwrap().is_some());
|
||||
assert!(documents.next_document().unwrap().is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_nested() {
|
||||
let docs_reader = documents!([{
|
||||
"hello": {
|
||||
"toto": ["hello"]
|
||||
}
|
||||
}]);
|
||||
|
||||
let (mut cursor, _) = docs_reader.into_cursor_and_fields_index();
|
||||
let doc = cursor.next_document().unwrap().unwrap();
|
||||
let nested: Value = serde_json::from_slice(doc.get(0).unwrap()).unwrap();
|
||||
assert_eq!(nested, json!({ "toto": ["hello"] }));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn out_of_order_json_fields() {
|
||||
let _documents = documents!([
|
||||
{"id": 1,"b": 0},
|
||||
{"id": 2,"a": 0,"b": 0},
|
||||
]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn out_of_order_csv_fields() {
|
||||
let csv1_content = "id:number,b\n1,0";
|
||||
let csv1 = csv::Reader::from_reader(Cursor::new(csv1_content));
|
||||
|
||||
let csv2_content = "id:number,a,b\n2,0,0";
|
||||
let csv2 = csv::Reader::from_reader(Cursor::new(csv2_content));
|
||||
|
||||
let mut builder = DocumentsBatchBuilder::new(Vec::new());
|
||||
builder.append_csv(csv1).unwrap();
|
||||
builder.append_csv(csv2).unwrap();
|
||||
let vector = builder.into_inner().unwrap();
|
||||
|
||||
DocumentsBatchReader::from_reader(Cursor::new(vector)).unwrap();
|
||||
}
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user