pytests: merge benchmark and test crates

This commit is contained in:
David Hewitt 2022-02-05 09:39:56 +00:00
parent abd7eafafb
commit 8b47f4f120
44 changed files with 132 additions and 201 deletions

View File

@ -83,10 +83,8 @@ jobs:
- name: Run benchmarks
run: |
cd pytests/pyo3-benchmarks
pip install -r requirements-dev.txt
pip install .
pytest --benchmark-json ../../output.json --benchmark-enable
pip install nox
nox -f pytests/noxfile.py -s bench -- --benchmark-json $(pwd)/output.json
- name: Store benchmark result
uses: rhysd/github-action-benchmark@v1
with:

View File

@ -130,10 +130,10 @@ harness = false
[workspace]
members = [
"pyo3-ffi",
"pyo3-build-config",
"pyo3-macros",
"pyo3-macros-backend",
"pytests/pyo3-benchmarks",
"pytests/pyo3-pytests",
"pytests",
"examples",
"xtask"
]

View File

@ -115,7 +115,7 @@ First, there are Rust-based benchmarks located in the `benches` subdirectory. As
cargo +nightly bench
Second, there is a Python-based benchmark contained in the `pyo3-benchmarks` example. You can read more about it [here](examples/pyo3-benchmarks).
Second, there is a Python-based benchmark contained in the `pytests` subdirectory. You can read more about it [here](pytests).
## Sponsor this project

View File

@ -14,37 +14,22 @@ test: lint test_py
test_py:
@for example in examples/*/noxfile.py; do echo "-- Running nox for $$example --"; nox -f $$example/noxfile.py || exit 1; echo ""; done
@for package in pytests/*/noxfile.py; do echo "-- Running nox for $$package --"; nox -f $$package/noxfile.py || exit 1; echo ""; done
echo "-- Running nox for pytests/noxfile.py --";
nox -f pytests/noxfile.py || exit 1;
fmt_py:
black . --check
fmt_rust:
cargo fmt --all -- --check
for package in pytests/*/; do cargo fmt --manifest-path $$package/Cargo.toml -- --check || exit 1; done
fmt: fmt_rust fmt_py
@true
coverage:
# cargo llvm-cov clean --workspace
# cargo llvm-cov $(COVERAGE_PACKAGES) --no-report
# cargo llvm-cov $(COVERAGE_PACKAGES) --no-report --features abi3
# cargo llvm-cov $(COVERAGE_PACKAGES) --no-report --features $(ALL_ADDITIVE_FEATURES)
# cargo llvm-cov $(COVERAGE_PACKAGES) --no-report --features abi3 $(ALL_ADDITIVE_FEATURES)
bash -c "\
set -a\
source <(cargo llvm-cov show-env)\
make test_py\
"
cargo llvm-cov $(COVERAGE_PACKAGES) --no-run --summary-only
clippy:
cargo clippy --features="$(ALL_ADDITIVE_FEATURES)" --all-targets --workspace -- -Dwarnings
cargo clippy --features="abi3 $(ALL_ADDITIVE_FEATURES)" --all-targets --workspace -- -Dwarnings
for example in examples/*/; do cargo clippy --manifest-path $$example/Cargo.toml -- -Dwarnings || exit 1; done
for package in pytests/*/; do cargo clippy --manifest-path $$package/Cargo.toml -- -Dwarnings || exit 1; done
lint: fmt clippy
@true

View File

@ -6,10 +6,10 @@ description = "Python-based tests for PyO3"
edition = "2018"
[dependencies]
pyo3 = { path = "../../", features = ["extension-module"] }
pyo3 = { path = "../", features = ["extension-module"] }
[build-dependencies]
pyo3-build-config = { path = "../../pyo3-build-config" }
pyo3-build-config = { path = "../pyo3-build-config" }
[lib]
name = "pyo3_pytests"

View File

@ -1,10 +1,34 @@
# PyO3 Python tests
# pyo3-pytests
These crates are a collection of test extension modules built with PyO3. They are all tested using `nox` in PyO3's CI.
An extension module built using PyO3, used to test and benchmark PyO3 from Python.
Below is a brief description of each of these:
## Testing
| Example | Description |
| ------- | ----------- |
| `pyo3-benchmarks` | A project containing some benchmarks of PyO3 functionality called from Python. |
| `pyo3-pytests` | A project containing some tests of PyO3 functionality called from Python. |
This package is intended to be built using `maturin`. Once built, you can run the tests using `pytest`:
```shell
pip install maturin
maturin develop
pytest
```
Alternatively, install nox and run the tests inside an isolated environment:
```shell
nox
```
## Running benchmarks
You can install the module in your Python environment and then run the benchmarks with pytest:
```shell
pip install .
pytest --benchmark-enable
```
Or with nox:
```shell
nox -s bench
```

View File

@ -15,4 +15,4 @@ def test(session):
def bench(session):
session.install("-rrequirements-dev.txt")
session.install(".")
session.run("pytest", "--benchmark-enable", *session.posargs)
session.run("pytest", "--benchmark-enable", "--benchmark-only", *session.posargs)

View File

@ -1,16 +0,0 @@
[package]
authors = ["PyO3 Authors"]
name = "pyo3-benchmarks"
version = "0.1.0"
description = "Python-based benchmarks for various PyO3 functionality"
edition = "2018"
[dependencies]
[dependencies.pyo3]
path = "../../"
features = ["extension-module"]
[lib]
name = "pyo3_benchmarks"
crate-type = ["cdylib"]

View File

@ -1,3 +0,0 @@
include Cargo.toml
recursive-include src *
recursive-include tests

View File

@ -1,18 +0,0 @@
# pyo3-benchmarks
This extension module contains benchmarks for pieces of PyO3's API accessible from Python.
## Running the benchmarks
You can install the module in your Python environment and then run the benchmarks with pytest:
```shell
pip install .
pytest --benchmark-enable
```
Or with nox:
```shell
nox -s bench
```

View File

@ -1,4 +0,0 @@
pytest>=3.5.0
setuptools_rust~=1.0.0
pytest-benchmark~=3.2
pip>=21.3

View File

@ -1,19 +0,0 @@
# pyo3-pytests
An extension module built using PyO3, used to test PyO3 from Python.
## Testing
This package is intended to be built using `maturin`. Once built, you can run the tests using `pytest`:
```shell
pip install maturin
maturin develop
pytest
```
Alternatively, install nox and run the tests inside an isolated environment:
```shell
nox
```

View File

@ -1,9 +0,0 @@
import nox
@nox.session
def python(session):
session.install("-rrequirements-dev.txt")
session.install("maturin")
session.run_always("maturin", "develop")
session.run("pytest")

View File

@ -1,3 +0,0 @@
[build-system]
requires = ["maturin>=0.12,<0.13"]
build-backend = "maturin"

View File

@ -1,4 +0,0 @@
hypothesis>=3.55
pytest>=3.5.0
psutil>=5.6
pip>=21.3

View File

@ -1,34 +0,0 @@
use pyo3::class::iter::{IterNextOutput, PyIterProtocol};
use pyo3::prelude::*;
/// This is for demonstrating how to return a value from __next__
#[pyclass]
struct PyClassIter {
count: usize,
}
#[pymethods]
impl PyClassIter {
#[new]
pub fn new() -> Self {
PyClassIter { count: 0 }
}
}
#[pyproto]
impl PyIterProtocol for PyClassIter {
fn __next__(mut slf: PyRefMut<Self>) -> IterNextOutput<usize, &'static str> {
if slf.count < 5 {
slf.count += 1;
IterNextOutput::Yield(slf.count)
} else {
IterNextOutput::Return("Ended")
}
}
}
#[pymodule]
pub fn pyclass_iter(_py: Python, m: &PyModule) -> PyResult<()> {
m.add_class::<PyClassIter>()?;
Ok(())
}

View File

@ -0,0 +1,4 @@
hypothesis>=3.55
pytest>=6.0
pytest-benchmark>=3.4
psutil>=5.6

View File

@ -9,7 +9,8 @@ pub mod misc;
pub mod objstore;
pub mod othermod;
pub mod path;
pub mod pyclass_iter;
pub mod pyclasses;
pub mod pyfunctions;
pub mod subclassing;
#[pymodule]
@ -23,7 +24,8 @@ fn pyo3_pytests(py: Python, m: &PyModule) -> PyResult<()> {
m.add_wrapped(wrap_pymodule!(objstore::objstore))?;
m.add_wrapped(wrap_pymodule!(othermod::othermod))?;
m.add_wrapped(wrap_pymodule!(path::path))?;
m.add_wrapped(wrap_pymodule!(pyclass_iter::pyclass_iter))?;
m.add_wrapped(wrap_pymodule!(pyclasses::pyclasses))?;
m.add_wrapped(wrap_pymodule!(pyfunctions::pyfunctions))?;
m.add_wrapped(wrap_pymodule!(subclassing::subclassing))?;
// Inserting to sys.modules allows importing submodules nicely from Python
@ -38,7 +40,8 @@ fn pyo3_pytests(py: Python, m: &PyModule) -> PyResult<()> {
sys_modules.set_item("pyo3_pytests.objstore", m.getattr("objstore")?)?;
sys_modules.set_item("pyo3_pytests.othermod", m.getattr("othermod")?)?;
sys_modules.set_item("pyo3_pytests.path", m.getattr("path")?)?;
sys_modules.set_item("pyo3_pytests.pyclass_iter", m.getattr("pyclass_iter")?)?;
sys_modules.set_item("pyo3_pytests.pyclasses", m.getattr("pyclasses")?)?;
sys_modules.set_item("pyo3_pytests.pyfunctions", m.getattr("pyfunctions")?)?;
sys_modules.set_item("pyo3_pytests.subclassing", m.getattr("subclassing")?)?;
Ok(())

43
pytests/src/pyclasses.rs Normal file
View File

@ -0,0 +1,43 @@
use pyo3::iter::IterNextOutput;
use pyo3::prelude::*;
#[pyclass]
struct EmptyClass {}
#[pymethods]
impl EmptyClass {
#[new]
fn new() -> Self {
EmptyClass {}
}
}
/// This is for demonstrating how to return a value from __next__
#[pyclass]
struct PyClassIter {
count: usize,
}
#[pymethods]
impl PyClassIter {
#[new]
pub fn new() -> Self {
PyClassIter { count: 0 }
}
fn __next__(&mut self) -> IterNextOutput<usize, &'static str> {
if self.count < 5 {
self.count += 1;
IterNextOutput::Yield(self.count)
} else {
IterNextOutput::Return("Ended")
}
}
}
#[pymodule]
pub fn pyclasses(_py: Python<'_>, m: &PyModule) -> PyResult<()> {
m.add_class::<EmptyClass>()?;
m.add_class::<PyClassIter>()?;
Ok(())
}

View File

@ -54,25 +54,13 @@ fn args_kwargs<'a>(
(args, kwargs)
}
#[pyclass]
struct EmptyClass {}
#[pymethods]
impl EmptyClass {
#[new]
fn new() -> Self {
EmptyClass {}
}
}
#[pymodule]
fn pyo3_benchmarks(_py: Python<'_>, m: &PyModule) -> PyResult<()> {
pub fn pyfunctions(_py: Python<'_>, m: &PyModule) -> PyResult<()> {
m.add_function(wrap_pyfunction!(none, m)?)?;
m.add_function(wrap_pyfunction!(simple, m)?)?;
m.add_function(wrap_pyfunction!(simple_args, m)?)?;
m.add_function(wrap_pyfunction!(simple_kwargs, m)?)?;
m.add_function(wrap_pyfunction!(simple_args_kwargs, m)?)?;
m.add_function(wrap_pyfunction!(args_kwargs, m)?)?;
m.add_class::<EmptyClass>()?;
Ok(())
}

View File

@ -1,9 +1,21 @@
import pytest
from pyo3_pytests import pyclass_iter
from pyo3_pytests import pyclasses
def test_empty_class_init(benchmark):
benchmark(pyclasses.EmptyClass)
class EmptyClassPy:
pass
def test_empty_class_init_py(benchmark):
benchmark(EmptyClassPy)
def test_iter():
i = pyclass_iter.PyClassIter()
i = pyclasses.PyClassIter()
assert next(i) == 1
assert next(i) == 2
assert next(i) == 3

View File

@ -1,4 +1,4 @@
import pyo3_benchmarks
from pyo3_pytests import pyfunctions
def none_py():
@ -10,10 +10,10 @@ def test_none_py(benchmark):
def test_none_rs(benchmark):
rust = pyo3_benchmarks.none()
rust = pyfunctions.none()
py = none_py()
assert rust == py
benchmark(pyo3_benchmarks.none)
benchmark(pyfunctions.none)
def simple_py(a, b="bar", *, c=None):
@ -25,10 +25,10 @@ def test_simple_py(benchmark):
def test_simple_rs(benchmark):
rust = pyo3_benchmarks.simple(1, "foo", c={1: 2})
rust = pyfunctions.simple(1, "foo", c={1: 2})
py = simple_py(1, "foo", c={1: 2})
assert rust == py
benchmark(pyo3_benchmarks.simple, 1, "foo", c={1: 2})
benchmark(pyfunctions.simple, 1, "foo", c={1: 2})
def simple_args_py(a, b="bar", *args, c=None):
@ -40,10 +40,10 @@ def test_simple_args_py(benchmark):
def test_simple_args_rs(benchmark):
rust = pyo3_benchmarks.simple_args(1, "foo", 4, 5, 6, c={1: 2})
rust = pyfunctions.simple_args(1, "foo", 4, 5, 6, c={1: 2})
py = simple_args_py(1, "foo", 4, 5, 6, c={1: 2})
assert rust == py
benchmark(pyo3_benchmarks.simple_args, 1, "foo", 4, 5, 6, c={1: 2})
benchmark(pyfunctions.simple_args, 1, "foo", 4, 5, 6, c={1: 2})
def simple_kwargs_py(a, b="bar", c=None, **kwargs):
@ -55,10 +55,10 @@ def test_simple_kwargs_py(benchmark):
def test_simple_kwargs_rs(benchmark):
rust = pyo3_benchmarks.simple_kwargs(1, "foo", c={1: 2}, bar=4, foo=10)
rust = pyfunctions.simple_kwargs(1, "foo", c={1: 2}, bar=4, foo=10)
py = simple_kwargs_py(1, "foo", c={1: 2}, bar=4, foo=10)
assert rust == py
benchmark(pyo3_benchmarks.simple_kwargs, 1, "foo", c={1: 2}, bar=4, foo=10)
benchmark(pyfunctions.simple_kwargs, 1, "foo", c={1: 2}, bar=4, foo=10)
def simple_args_kwargs_py(a, b="bar", *args, c=None, **kwargs):
@ -70,10 +70,10 @@ def test_simple_args_kwargs_py(benchmark):
def test_simple_args_kwargs_rs(benchmark):
rust = pyo3_benchmarks.simple_args_kwargs(1, "foo", "baz", bar=4, foo=10)
rust = pyfunctions.simple_args_kwargs(1, "foo", "baz", bar=4, foo=10)
py = simple_args_kwargs_py(1, "foo", "baz", bar=4, foo=10)
assert rust == py
benchmark(pyo3_benchmarks.simple_args_kwargs, 1, "foo", "baz", bar=4, foo=10)
benchmark(pyfunctions.simple_args_kwargs, 1, "foo", "baz", bar=4, foo=10)
def args_kwargs_py(*args, **kwargs):
@ -85,19 +85,7 @@ def test_args_kwargs_py(benchmark):
def test_args_kwargs_rs(benchmark):
rust = pyo3_benchmarks.args_kwargs(1, "foo", {1: 2}, bar=4, foo=10)
rust = pyfunctions.args_kwargs(1, "foo", {1: 2}, bar=4, foo=10)
py = args_kwargs_py(1, "foo", {1: 2}, bar=4, foo=10)
assert rust == py
benchmark(pyo3_benchmarks.args_kwargs, 1, "foo", {1: 2}, a=4, foo=10)
def test_empty_class_init(benchmark):
benchmark(pyo3_benchmarks.EmptyClass)
class EmptyClassPy:
pass
def test_empty_class_init_py(benchmark):
benchmark(EmptyClassPy)
benchmark(pyfunctions.args_kwargs, 1, "foo", {1: 2}, a=4, foo=10)

View File

@ -41,7 +41,7 @@ use crate::{ffi, IntoPy, IntoPyPointer, PyClass, PyObject, Python};
/// # Python::with_gil(|py| {
/// # let inst = Py::new(py, Iter { count: 0 }).unwrap();
/// # pyo3::py_run!(py, inst, "assert next(inst) == 1");
/// # }); // test of StopIteration is done in examples/pyo3-pytests/pyclass_iter.rs
/// # }); // test of StopIteration is done in pytests/src/pyclasses.rs
/// ```
#[allow(unused_variables)]
pub trait PyIterProtocol<'p>: PyClass {

View File

@ -1,5 +1,5 @@
use anyhow::{ensure, Context, Result};
use std::{collections::HashMap, process::Command};
use std::{collections::HashMap, path::Path, process::Command};
use structopt::StructOpt;
#[derive(StructOpt)]
@ -116,16 +116,12 @@ fn llvm_cov_command(args: &[&str]) -> Command {
fn run_python_tests<'a>(
env: impl IntoIterator<Item = (&'a String, &'a String)> + Copy,
) -> Result<()> {
for entry in std::fs::read_dir("pytests")? {
let path = entry?.path();
if path.is_dir() && path.join("noxfile.py").exists() {
run(Command::new("nox")
.arg("--non-interactive")
.arg("-f")
.arg(path.join("noxfile.py"))
.arg(Path::new("pytests").join("noxfile.py"))
.envs(env))?;
}
}
for entry in std::fs::read_dir("examples")? {
let path = entry?.path();
if path.is_dir() && path.join("noxfile.py").exists() {