Skip to content

Commit

Permalink
Fixing doctests
Browse files Browse the repository at this point in the history
  • Loading branch information
coreylowman committed Oct 25, 2023
1 parent 341ee78 commit 929602e
Show file tree
Hide file tree
Showing 94 changed files with 184 additions and 194 deletions.
4 changes: 2 additions & 2 deletions dfdx-core/src/data/arange.rs
Original file line number Diff line number Diff line change
Expand Up @@ -11,15 +11,15 @@ pub trait Arange<E: Dtype>: Storage<E> + ZerosTensor<E> + TensorFromVec<E> {
///
/// Const sized tensor:
/// ```rust
/// # use dfdx::{prelude::*, data::Arange};
/// # use dfdx_core::{prelude::*, data::Arange};
/// # let dev: Cpu = Default::default();
/// let t: Tensor<Rank1<5>, f32, _> = dev.arange(Const::<5>);
/// assert_eq!(t.array(), [0.0, 1.0, 2.0, 3.0, 4.0]);
/// ```
///
/// Runtime sized tensor:
/// ```rust
/// # use dfdx::{prelude::*, data::Arange};
/// # use dfdx_core::{prelude::*, data::Arange};
/// # let dev: Cpu = Default::default();
/// let t: Tensor<(usize, ), f32, _> = dev.arange(5);
/// assert_eq!(t.as_vec(), [0.0, 1.0, 2.0, 3.0, 4.0]);
Expand Down
6 changes: 3 additions & 3 deletions dfdx-core/src/data/batch.rs
Original file line number Diff line number Diff line change
Expand Up @@ -80,14 +80,14 @@ pub trait IteratorBatchExt: Iterator {
///
/// Const batches:
/// ```rust
/// # use dfdx::{prelude::*, data::IteratorBatchExt};
/// # use dfdx_core::{prelude::*, data::IteratorBatchExt};
/// let items: Vec<[usize; 5]> = (0..12).batch_exact(Const::<5>).collect();
/// assert_eq!(&items, &[[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]);
/// ```
///
/// Runtime batches:
/// ```rust
/// # use dfdx::{prelude::*, data::IteratorBatchExt};
/// # use dfdx_core::{prelude::*, data::IteratorBatchExt};
/// let items: Vec<Vec<usize>> = (0..12).batch_exact(5).collect();
/// assert_eq!(&items, &[[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]);
/// ```
Expand All @@ -104,7 +104,7 @@ pub trait IteratorBatchExt: Iterator {
///
/// Example:
/// ```rust
/// # use dfdx::{prelude::*, data::IteratorBatchExt};
/// # use dfdx_core::{prelude::*, data::IteratorBatchExt};
/// let items: Vec<Vec<usize>> = (0..12).batch_with_last(5).collect();
/// assert_eq!(&items, &[vec![0, 1, 2, 3, 4], vec![5, 6, 7, 8, 9], vec![10, 11]]);
/// ```
Expand Down
2 changes: 1 addition & 1 deletion dfdx-core/src/data/collate.rs
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,7 @@ pub trait IteratorCollateExt: Iterator {
///
/// Example implementations:
/// ```rust
/// # use dfdx::data::IteratorCollateExt;
/// # use dfdx_core::data::IteratorCollateExt;
/// let data = [[('a', 'b'); 10], [('c', 'd'); 10], [('e', 'f'); 10]];
/// // we use collate to transform each batch:
/// let mut iter = data.into_iter().collate();
Expand Down
8 changes: 4 additions & 4 deletions dfdx-core/src/data/one_hot_encode.rs
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ pub trait OneHotEncode<E: Dtype>: Storage<E> + ZerosTensor<E> + TensorFromVec<E>
///
/// Const class labels and const n:
/// ```rust
/// # use dfdx::{prelude::*, data::OneHotEncode};
/// # use dfdx_core::{prelude::*, data::OneHotEncode};
/// # let dev: Cpu = Default::default();
/// let class_labels = [0, 1, 2, 1, 1];
/// let probs: Tensor<Rank2<5, 3>, f32, _> = dev.one_hot_encode(Const::<3>, class_labels);
Expand All @@ -31,7 +31,7 @@ pub trait OneHotEncode<E: Dtype>: Storage<E> + ZerosTensor<E> + TensorFromVec<E>
///
/// Runtime class labels and const n:
/// ```rust
/// # use dfdx::{prelude::*, data::OneHotEncode};
/// # use dfdx_core::{prelude::*, data::OneHotEncode};
/// # let dev: Cpu = Default::default();
/// let class_labels = [0, 1, 2, 1, 1];
/// let probs: Tensor<(Const<5>, usize), f32, _> = dev.one_hot_encode(3, class_labels);
Expand All @@ -46,7 +46,7 @@ pub trait OneHotEncode<E: Dtype>: Storage<E> + ZerosTensor<E> + TensorFromVec<E>
///
/// Const class labels and runtime n:
/// ```rust
/// # use dfdx::{prelude::*, data::OneHotEncode};
/// # use dfdx_core::{prelude::*, data::OneHotEncode};
/// # let dev: Cpu = Default::default();
/// let class_labels = std::vec![0, 1, 2, 1, 1];
/// let probs: Tensor<(usize, Const<3>), f32, _> = dev.one_hot_encode(Const, class_labels);
Expand All @@ -61,7 +61,7 @@ pub trait OneHotEncode<E: Dtype>: Storage<E> + ZerosTensor<E> + TensorFromVec<E>
///
/// Runtime both:
/// ```rust
/// # use dfdx::{prelude::*, data::OneHotEncode};
/// # use dfdx_core::{prelude::*, data::OneHotEncode};
/// # let dev: Cpu = Default::default();
/// let class_labels = std::vec![0, 1, 2, 1, 1];
/// let probs: Tensor<(usize, usize), f32, _> = dev.one_hot_encode(3, class_labels);
Expand Down
2 changes: 1 addition & 1 deletion dfdx-core/src/data/stack.rs
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ pub trait IteratorStackExt: Iterator {
///
/// Example implementations:
/// ```rust
/// # use dfdx::{data::IteratorStackExt, prelude::*};
/// # use dfdx_core::{data::IteratorStackExt, prelude::*};
/// # let dev: Cpu = Default::default();
/// let a: Tensor<Rank1<3>, f32, _> = dev.zeros();
/// let data = [[a.clone(), a.clone(), a]];
Expand Down
2 changes: 1 addition & 1 deletion dfdx-core/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@
//! Here's how you might use a device:
//!
//! ```rust
//! # use dfdx::prelude::*;
//! # use dfdx_core::prelude::*;
//! let dev: Cpu = Default::default();
//! let t: Tensor<Rank2<2, 3>, f32, _> = dev.zeros();
//! ```
Expand Down
2 changes: 1 addition & 1 deletion dfdx-core/src/shapes/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
//!
//! Example shapes:
//! ```rust
//! # use dfdx::shapes::*;
//! # use dfdx_core::shapes::*;
//! let _: Rank3<2, 3, 4> = Default::default();
//! let _: (Const<2>, Const<3>) = Default::default();
//! let _: (usize, Const<4>) = (3, Const);
Expand Down
16 changes: 8 additions & 8 deletions dfdx-core/src/tensor/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -15,13 +15,13 @@
//! In order to do anything with tensors, you first need to construct the device that they will be stored on:
//!
//! ```rust
//! # use dfdx::prelude::*;
//! # use dfdx_core::prelude::*;
//! let dev: Cpu = Default::default();
//! let dev: Cpu = Cpu::seed_from_u64(0);
//! ```
//!
//! ```ignore
//! # use dfdx::prelude::*;
//! # use dfdx_core::prelude::*;
//! let dev: Cuda = Default::default();
//! let dev: Cuda = Cuda::seed_from_u64(1234);
//! let dev: Cuda = Cuda::try_build(0, 1234).unwrap();
Expand All @@ -34,7 +34,7 @@
//! See [TensorFrom] & [TensorFromVec].
//!
//! ```rust
//! # use dfdx::prelude::*;
//! # use dfdx_core::prelude::*;
//! # let dev: Cpu = Default::default();
//! let _ = dev.tensor([1.0, 2.0, 3.0]);
//! let _ = dev.tensor_from_vec(vec![1.0, 2.0, 3.0], (3, ));
Expand All @@ -45,7 +45,7 @@
//! See [ZerosTensor] and [OnesTensor].
//!
//! ```rust
//! # use dfdx::prelude::*;
//! # use dfdx_core::prelude::*;
//! # let dev: Cpu = Default::default();
//! let _: Tensor<Rank1<5>,f32 , _> = dev.zeros();
//! let _: Tensor<Rank2<3, 2>, f32, _> = dev.ones();
Expand All @@ -56,7 +56,7 @@
//! See [SampleTensor]
//!
//! ```rust
//! # use dfdx::prelude::*;
//! # use dfdx_core::prelude::*;
//! # let dev: Cpu = Default::default();
//! let _: Tensor<Rank1<5>, f32, _> = dev.sample_uniform();
//! let _: Tensor<Rank2<3, 5>, f32, _> = dev.sample_normal();
Expand All @@ -70,7 +70,7 @@
//! You can use [Tensor::copy_from] and [Tensor::copy_into] to copy data into a tensor:
//!
//! ```rust
//! # use dfdx::prelude::*;
//! # use dfdx_core::prelude::*;
//! # let dev: Cpu = Default::default();
//! let mut a: Tensor<Rank1<1000>, f32, _> = dev.zeros();
//! let buf: Vec<f32> = vec![1.0; 1000];
Expand All @@ -90,7 +90,7 @@
//! with them directly.
//!
//! ```rust
//! # use dfdx::prelude::*;
//! # use dfdx_core::prelude::*;
//! # let dev: Cpu = Default::default();
//! let t: Tensor<Rank2<2, 3>, f32, _> = dev.zeros();
//! let t: [[f32; 3]; 2] = t.array();
Expand All @@ -105,7 +105,7 @@
//! Note that these two methods are only present for tensors without a tape already.
//!
//! ```rust
//! # use dfdx::prelude::*;
//! # use dfdx_core::prelude::*;
//! # let dev: Cpu = Default::default();
//! let t: Tensor<Rank1<5>,f32, _> = dev.zeros();
//! let mut grads = Gradients::leaky();
Expand Down
22 changes: 11 additions & 11 deletions dfdx-core/src/tensor/storage_traits.rs
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,7 @@ impl<S: Shape, E, D: CopySlice<E>, T> Tensor<S, E, D, T> {
/// Copy *physical* data from a slice - **panics** if there are not enough elements in the slice.
///
/// ```rust
/// # use dfdx::prelude::*;
/// # use dfdx_core::prelude::*;
/// # let dev: Cpu = Default::default();
/// let data = [1.0, 2.0, 3.0, 4.0];
/// let mut t: Tensor<Rank2<2, 2>, f32, _> = dev.zeros();
Expand All @@ -118,7 +118,7 @@ impl<S: Shape, E, D: CopySlice<E>, T> Tensor<S, E, D, T> {
/// Copy *physical* data into a slice - **panics** if there are not enough elements in the tensor.
///
/// ```rust
/// # use dfdx::prelude::*;
/// # use dfdx_core::prelude::*;
/// # let dev: Cpu = Default::default();
/// let t: Tensor<Rank2<2, 2>, f32, _> = dev.tensor([[1.0, 2.0], [3.0, 4.0]]);
/// let mut data = [0.0; 4];
Expand All @@ -134,7 +134,7 @@ impl<S: Shape, E, D: CopySlice<E>, T> Tensor<S, E, D, T> {
pub trait ZerosTensor<E>: Storage<E> {
/// Creates a tensor filled with zeros.
/// ```rust
/// # use dfdx::prelude::*;
/// # use dfdx_core::prelude::*;
/// # let dev: Cpu = Default::default();
/// let a: Tensor<Rank2<2, 3>, f32, _> = dev.zeros();
/// ```
Expand All @@ -151,14 +151,14 @@ pub trait ZerosTensor<E>: Storage<E> {
///
/// Given a shape directly:
/// ```rust
/// # use dfdx::prelude::*;
/// # use dfdx_core::prelude::*;
/// # let dev: Cpu = Default::default();
/// let a: Tensor<(usize, Const<3>), f32, _> = dev.zeros_like(&(5, Const));
/// ```
///
/// Given another tensor:
/// ```rust
/// # use dfdx::prelude::*;
/// # use dfdx_core::prelude::*;
/// # let dev: Cpu = Default::default();
/// let a: Tensor<Rank2<2, 3>, f32, _> = dev.zeros();
/// let b: Tensor<Rank2<2, 3>, f32, _> = dev.zeros_like(&a);
Expand All @@ -179,7 +179,7 @@ pub trait ZeroFillStorage<E>: Storage<E> {
pub trait OnesTensor<E>: Storage<E> {
/// Creates a tensor filled with ones.
/// ```rust
/// # use dfdx::prelude::*;
/// # use dfdx_core::prelude::*;
/// # let dev: Cpu = Default::default();
/// let a: Tensor<Rank2<2, 3>, f32, _> = dev.ones();
/// ```
Expand All @@ -196,14 +196,14 @@ pub trait OnesTensor<E>: Storage<E> {
///
/// Given a shape directly:
/// ```rust
/// # use dfdx::prelude::*;
/// # use dfdx_core::prelude::*;
/// # let dev: Cpu = Default::default();
/// let a: Tensor<(usize, Const<3>), f32, _> = dev.ones_like(&(5, Const));
/// ```
///
/// Given another tensor:
/// ```rust
/// # use dfdx::prelude::*;
/// # use dfdx_core::prelude::*;
/// # let dev: Cpu = Default::default();
/// let a: Tensor<Rank2<2, 3>, f32, _> = dev.ones();
/// let b: Tensor<_, f32, _> = dev.ones_like(&a);
Expand Down Expand Up @@ -231,7 +231,7 @@ pub trait TriangleTensor<E>: Storage<E> {
/// ## Examples
///
/// ```rust
/// # use dfdx::prelude::*;
/// # use dfdx_core::prelude::*;
/// # let dev: Cpu = Default::default();
/// let a: Tensor<Rank2<3, 3>, f32, _> = dev.upper_tri(1.0, None);
/// assert_eq!(a.array(),
Expand Down Expand Up @@ -297,7 +297,7 @@ pub trait TriangleTensor<E>: Storage<E> {
/// ## Examples
///
/// ```rust
/// # use dfdx::prelude::*;
/// # use dfdx_core::prelude::*;
/// # let dev: Cpu = Default::default();
/// let a: Tensor<Rank2<3, 3>, f32, _> = dev.lower_tri(1.0, None);
/// assert_eq!(a.array(),
Expand Down Expand Up @@ -480,7 +480,7 @@ impl<S: Shape, E, D: Storage<E>, T> Tensor<S, E, D, T> {
pub trait TensorFrom<Src, S: Shape, E>: Storage<E> {
/// Create a tensor from rust data
/// ```rust
/// # use dfdx::prelude::*;
/// # use dfdx_core::prelude::*;
/// # let dev: Cpu = Default::default();
/// let _: Tensor<Rank2<2, 3>, f32, Cpu> = dev.tensor([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]);
/// let _: Tensor<Rank2<2, 3>, f32, Cpu> = dev.tensor(vec![1.0, 2.0, 3.0, 4.0, 5.0, 6.0]);
Expand Down
6 changes: 3 additions & 3 deletions dfdx-core/src/tensor/tensor_impls.rs
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ use std::sync::Arc;
///
/// Examples:
/// ```rust
/// # use dfdx::prelude::*;
/// # use dfdx_core::prelude::*;
/// # let dev: Cpu = Default::default();
/// // A 1d tensor with 1000 f32 elements, stored on the Cpu
/// type A = Tensor<Rank1<1000>, f32, Cpu>;
Expand Down Expand Up @@ -116,7 +116,7 @@ impl<S: Shape, E, D: Storage<E>, T> Tensor<S, E, D, T> {
pub trait PutTape<T> {
type Output;
/// ```rust
/// # use dfdx::prelude::*;
/// # use dfdx_core::prelude::*;
/// # let dev: Cpu = Default::default();
/// let a: Tensor<Rank2<2, 3>, f32, _, NoneTape> = dev.zeros();
/// let a: Tensor<Rank2<2, 3>, f32, _, OwnedTape<f32, Cpu>> = a.put_tape(Default::default());
Expand Down Expand Up @@ -146,7 +146,7 @@ pub trait SplitTape {
type NoTape: Clone + PutTape<Self::Tape, Output = Self>;
/// Splits tape off of self
/// ```rust
/// # use dfdx::prelude::*;
/// # use dfdx_core::prelude::*;
/// # let dev: Cpu = Default::default();
/// # let grads = Gradients::leaky();
/// let a: Tensor<Rank1<5>, f32, _, OwnedTape<f32, _>> = dev.zeros().traced(grads);
Expand Down
2 changes: 1 addition & 1 deletion dfdx-core/src/tensor_ops/abs/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ pub struct AbsKernelOp;
///
/// Examples:
/// ```rust
/// # use dfdx::prelude::*;
/// # use dfdx_core::prelude::*;
/// # let dev: Cpu = Default::default();
/// let t = dev.tensor([-1.0, 0.0, 1.0, 2.0]);
/// let r = t.abs();
Expand Down
2 changes: 1 addition & 1 deletion dfdx-core/src/tensor_ops/accurate_gelu/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ pub struct AccurateGeLUKernelOp;
///
/// Examples:
/// ```rust
/// # use dfdx::prelude::*;
/// # use dfdx_core::prelude::*;
/// # let dev: Cpu = Default::default();
/// let t = dev.tensor([-1.0, 0.0, 1.0, 2.0]);
/// let r = t.accurate_gelu();
Expand Down
2 changes: 1 addition & 1 deletion dfdx-core/src/tensor_ops/adam/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ use super::WeightDecay;
///
/// Changing all default parameters:
/// ```rust
/// # use dfdx::prelude::*;
/// # use dfdx_core::prelude::*;
/// AdamConfig {
/// lr: 1e-2,
/// betas: [0.1, 0.2],
Expand Down
4 changes: 2 additions & 2 deletions dfdx-core/src/tensor_ops/add/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ pub struct ScalarAddKernelOp<E> {
///
/// Example:
/// ```rust
/// # use dfdx::prelude::*;
/// # use dfdx_core::prelude::*;
/// # let dev: Cpu = Default::default();
/// let a = dev.tensor([[1.0, 2.0, 3.0], [-1.0, -2.0, -3.0]]);
/// let r = a + dev.ones();
Expand All @@ -32,7 +32,7 @@ pub struct ScalarAddKernelOp<E> {
///
/// Adding a scalar:
/// ```rust
/// # use dfdx::prelude::*;
/// # use dfdx_core::prelude::*;
/// # let dev: Cpu = Default::default();
/// let a = dev.tensor([[1.0, 2.0, 3.0], [-1.0, -2.0, -3.0]]);
/// let r = a + 1.0;
Expand Down
Loading

0 comments on commit 929602e

Please sign in to comment.