1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
#![allow(clippy::unwrap_used)]

// About 1gb of image data.
const IMAGE_DIMENSION: u64 = 16_384;
const IMAGE_CHANNELS: u64 = 4;

// How many times we log the image.
// Each time with a single pixel changed.
const NUM_LOG_CALLS: usize = 4;

fn prepare() -> Vec<u8> {
    re_tracing::profile_function!();

    vec![0u8; (IMAGE_DIMENSION * IMAGE_DIMENSION * IMAGE_CHANNELS) as usize]

    // Skip filling with non-zero values, this adds a bit too much extra overhead.
    // image.resize_with(
    //     (IMAGE_DIMENSION * IMAGE_DIMENSION * IMAGE_CHANNELS) as usize,
    //     || {
    //         i += 1;
    //         i as u8
    //     },
    // );
    // image
}

fn execute(mut raw_image_data: Vec<u8>) -> anyhow::Result<()> {
    re_tracing::profile_function!();

    let (rec, _storage) =
        rerun::RecordingStreamBuilder::new("rerun_example_benchmark_").memory()?;

    for i in 0..NUM_LOG_CALLS {
        raw_image_data[i] += 1;

        rec.log(
            "test_image",
            &rerun::Image::from_rgba32(
                // TODO(andreas): We have to copy the image every time since the tensor buffer wants to
                // take ownership of it.
                // Note that even though our example here is *very* contrived, it's likely that a user
                // will want to keep their image, so this copy is definitely part of our API overhead!
                raw_image_data.clone(),
                [IMAGE_DIMENSION as _, IMAGE_DIMENSION as _],
            ),
        )?;
    }

    Ok(())
}

/// Log a single large image.
pub fn run() -> anyhow::Result<()> {
    re_tracing::profile_function!();
    let input = std::hint::black_box(prepare());
    execute(input)
}