From 45f6578bd80f12284cf19cb1d3a395cc73fe14e0 Mon Sep 17 00:00:00 2001 From: Jeremy Maitin-Shepard Date: Mon, 8 Jan 2024 21:14:10 -0800 Subject: [PATCH] docs: fix URL references for src/neuroglancer -> src rename --- README.md | 28 +++++++++++----------- python/ext/src/compress_segmentation.h | 2 +- python/neuroglancer/segment_colors.py | 6 ++--- src/annotation/user_layer.ts | 2 +- src/datasource/precomputed/volume.md | 4 ++-- src/gpu_hash/README.md | 2 +- src/image_user_layer.ts | 2 +- src/mesh/frontend.ts | 2 +- src/single_mesh_user_layer.ts | 2 +- src/ui/segmentation_display_options_tab.ts | 2 +- testdata/generate_npy_examples.py | 2 +- 11 files changed, 27 insertions(+), 27 deletions(-) diff --git a/README.md b/README.md index 9d0ba5555..d426fa068 100644 --- a/README.md +++ b/README.md @@ -36,15 +36,15 @@ The four-pane view consists of 3 orthogonal cross-sectional views as well as a 3 Neuroglancer itself is purely a client-side program, but it depends on data being accessible via HTTP in a suitable format. It is designed to easily support many different data sources, and there is existing support for the following data APIs/formats: -- [Neuroglancer precomputed format](src/neuroglancer/datasource/precomputed) -- [N5](src/neuroglancer/datasource/n5) -- [Zarr v2/v3](src/neuroglancer/datasource/zarr) +- [Neuroglancer precomputed format](src/datasource/precomputed) +- [N5](src/datasource/n5) +- [Zarr v2/v3](src/datasource/zarr) - [Python in-memory volumes](python/README.md) (with automatic mesh generation) - BOSS - DVID - Render - Single NIfTI files -- [Deep Zoom images](src/neuroglancer/datasource/deepzoom) +- [Deep Zoom images](src/datasource/deepzoom) # Supported browsers @@ -55,7 +55,7 @@ Neuroglancer itself is purely a client-side program, but it depends on data bein # Keyboard and mouse bindings For the complete set of bindings, see -[src/neuroglancer/ui/default_input_event_bindings.ts](src/neuroglancer/ui/default_input_event_bindings.ts), +[src/ui/default_input_event_bindings.ts](src/ui/default_input_event_bindings.ts), or within Neuroglancer, press `h` or click on the button labeled `?` in the upper right corner. - Click on a layer name to toggle its visibility. @@ -64,7 +64,7 @@ or within Neuroglancer, press `h` or click on the button labeled `?` in the uppe - Hover over a segmentation layer name to see the current list of objects shown and to access the opacity sliders. -- Hover over an image layer name to access the opacity slider and the text editor for modifying the [rendering code](src/neuroglancer/sliceview/image_layer_rendering.md). +- Hover over an image layer name to access the opacity slider and the text editor for modifying the [rendering code](src/sliceview/image_layer_rendering.md). # Troubleshooting @@ -111,11 +111,11 @@ The "frontend" UI thread handles user actions and rendering, while the "backend" # Documentation Index -- [Image Layer Rendering](src/neuroglancer/sliceview/image_layer_rendering.md) -- [Cross-sectional view implementation architecture](src/neuroglancer/sliceview/README.md) -- [Compressed segmentation format](src/neuroglancer/sliceview/compressed_segmentation/README.md) -- [Data chunk management](src/neuroglancer/chunk_manager/) -- [On-GPU hashing](src/neuroglancer/gpu_hash/) +- [Image Layer Rendering](src/sliceview/image_layer_rendering.md) +- [Cross-sectional view implementation architecture](src/sliceview/README.md) +- [Compressed segmentation format](src/sliceview/compressed_segmentation/README.md) +- [Data chunk management](src/chunk_manager/) +- [On-GPU hashing](src/gpu_hash/) # Building @@ -154,7 +154,7 @@ https://github.com/creationix/nvm For example, - `npm test -- --pattern='src/neuroglancer/util/uint64*'` + `npm test -- --pattern='src/util/uint64*'` 6. See [package.json](package.json) for other commands available. @@ -171,11 +171,11 @@ There is a Google Group/mailing list for discussion related to Neuroglancer: Docker deployment of the [Neuroglancer Python integration](python/README.md). - [FZJ-INM1-BDA/neuroglancer-scripts](https://github.com/FZJ-INM1-BDA/neuroglancer-scripts) - Scripts for converting the [BigBrain](https://bigbrain.loris.ca) dataset to the - Neuroglancer [precomputed data format](src/neuroglancer/datasource/precomputed), which may serve + Neuroglancer [precomputed data format](src/datasource/precomputed), which may serve as a useful example for converting other datasets. - [BigArrays.jl](https://github.com/seung-lab/BigArrays.jl) - Julia interface of neuroglancer precomputed data format. - [cloudvolume](https://github.com/seung-lab/cloud-volume) - Python interface of neuroglancer precomputed data format. -- [multiresolution-mesh-creator](https://github.com/janelia-cosem/multiresolution-mesh-creator) - Python tool for creating [multi-resolution meshes](https://github.com/google/neuroglancer/blob/master/src/neuroglancer/datasource/precomputed/meshes.md#multi-resolution-mesh-format) from single resolution - or multiscale - meshes. +- [multiresolution-mesh-creator](https://github.com/janelia-cosem/multiresolution-mesh-creator) - Python tool for creating [multi-resolution meshes](https://github.com/google/neuroglancer/blob/master/src/datasource/precomputed/meshes.md#multi-resolution-mesh-format) from single resolution - or multiscale - meshes. - [Igneous](https://github.com/seung-lab/igneous) - Python pipeline for scalable meshing, skeletonizing, downsampling, and managment of large 3d images focusing on Neuroglancer Precomputed format. # Contributing diff --git a/python/ext/src/compress_segmentation.h b/python/ext/src/compress_segmentation.h index e436060c1..67453296d 100644 --- a/python/ext/src/compress_segmentation.h +++ b/python/ext/src/compress_segmentation.h @@ -15,7 +15,7 @@ */ // Implements encoding into the compressed segmentation format described at -// https://github.com/google/neuroglancer/tree/master/src/neuroglancer/sliceview/compressed_segmentation. +// https://github.com/google/neuroglancer/tree/master/src/sliceview/compressed_segmentation. // // Only uint32 and uint64 volumes are supported. diff --git a/python/neuroglancer/segment_colors.py b/python/neuroglancer/segment_colors.py index 2751b77c1..3c51e6595 100644 --- a/python/neuroglancer/segment_colors.py +++ b/python/neuroglancer/segment_colors.py @@ -17,7 +17,7 @@ def hash_function(state, value): """Python implementation of hashCombine() function - in src/neuroglancer/gpu_hash/hash_function.ts, + in src/gpu_hash/hash_function.ts, a modified murmur hash """ k1 = 0xCC9E2D51 @@ -34,7 +34,7 @@ def hash_function(state, value): def hsv_to_rgb(h, s, v): """Convert H,S,V values to RGB values. - Python implementation of hsvToRgb in src/neuroglancer/util/colorspace.ts""" + Python implementation of hsvToRgb in src/util/colorspace.ts""" h *= 6 hue_index = math.floor(h) remainder = h - hue_index @@ -59,7 +59,7 @@ def hsv_to_rgb(h, s, v): def pack_color(rgb_vec): """Returns an integer formed by concatenating the channels of the input color vector. - Python implementation of packColor in src/neuroglancer/util/color.ts + Python implementation of packColor in src/util/color.ts """ result = 0 for i in range(len(rgb_vec)): diff --git a/src/annotation/user_layer.ts b/src/annotation/user_layer.ts index e67b863b4..edc9c1fc5 100644 --- a/src/annotation/user_layer.ts +++ b/src/annotation/user_layer.ts @@ -799,7 +799,7 @@ class RenderingOptionsTab extends Tab { topRow.appendChild( makeHelpButton({ title: "Documentation on annotation rendering", - href: "https://github.com/google/neuroglancer/blob/master/src/neuroglancer/annotation/rendering.md", + href: "https://github.com/google/neuroglancer/blob/master/src/annotation/rendering.md", }), ); element.appendChild(topRow); diff --git a/src/datasource/precomputed/volume.md b/src/datasource/precomputed/volume.md index f17715fbd..46b1a54c8 100644 --- a/src/datasource/precomputed/volume.md +++ b/src/datasource/precomputed/volume.md @@ -24,7 +24,7 @@ The root value must be a JSON object with the following members: permits automatically detecting paths to volumes, meshes, and skeletons. - `"type"`: One of `"image"` or `"segmentation"`, specifying the type of the volume. - `"data_type"`: A string value equal (case-insensitively) to the name of one of the supported - `DataType` values specified in [data_type.ts](/src/neuroglancer/util/data_type.ts). May be one of + `DataType` values specified in [data_type.ts](/src/util/data_type.ts). May be one of `"uint8"`, `"uint16"`, `"uint32"`, `"uint64"`, or `"float32"`. `"float32"` should only be specified for `"image"` volumes. - `"num_channels"`: An integer value specifying the number of channels in the volume. Must be `1` @@ -192,7 +192,7 @@ of the image corresponds to the flattened `[x, y, z]` Fortran-order representati ### compressed_segmentation chunk encoding The subvolume data for the chunk is encoded using the multi-channel -format [compressed segmentation format](/src/neuroglancer/sliceview/compressed_segmentation). The +format [compressed segmentation format](/src/sliceview/compressed_segmentation). The `"data_type"` must be either `"uint32"` or `"uint64"`. The compression block size is specified by the `"compressed_segmentation_block_size"` property in the `info` JSON file. diff --git a/src/gpu_hash/README.md b/src/gpu_hash/README.md index be2d1d3d4..96a2b7978 100644 --- a/src/gpu_hash/README.md +++ b/src/gpu_hash/README.md @@ -1,6 +1,6 @@ This directory implements hash functions, hash sets, and hash maps that support efficient use from both JavaScript and from WebGL shaders running on the GPU. These are used to -perform [pseudo-random color mapping of uint64 object IDs](/src/neuroglancer/segment_color.ts), +perform [pseudo-random color mapping of uint64 object IDs](/src/segment_color.ts), highlighting of a set of object ids, and equivalence maps over object ids. The design is heavily constrained by the limitations of WebGL 1.0 (OpenGL ES Shading Language 1.0) shaders: diff --git a/src/image_user_layer.ts b/src/image_user_layer.ts index 8b64fb20f..8e1544a0d 100644 --- a/src/image_user_layer.ts +++ b/src/image_user_layer.ts @@ -500,7 +500,7 @@ class RenderingOptionsTab extends Tab { topRow.appendChild( makeHelpButton({ title: "Documentation on image layer rendering", - href: "https://github.com/google/neuroglancer/blob/master/src/neuroglancer/sliceview/image_layer_rendering.md", + href: "https://github.com/google/neuroglancer/blob/master/src/sliceview/image_layer_rendering.md", }), ); diff --git a/src/mesh/frontend.ts b/src/mesh/frontend.ts index 9bce833db..18ff98189 100644 --- a/src/mesh/frontend.ts +++ b/src/mesh/frontend.ts @@ -79,7 +79,7 @@ const tempMat3 = mat3.create(); // To validate the octrees and to determine the multiscale fragment responsible for each framebuffer // location, set `DEBUG_MULTISCALE_FRAGMENTS=true` and also set `DEBUG_PICKING=true` in -// `src/neuroglancer/object_picking.ts`. +// `src/object_picking.ts`. const DEBUG_MULTISCALE_FRAGMENTS = false; function copyMeshDataToGpu( diff --git a/src/single_mesh_user_layer.ts b/src/single_mesh_user_layer.ts index 48c8ee937..ef50916c0 100644 --- a/src/single_mesh_user_layer.ts +++ b/src/single_mesh_user_layer.ts @@ -216,7 +216,7 @@ class DisplayOptionsTab extends Tab { topRow.appendChild( makeHelpButton({ title: "Documentation on single mesh layer rendering", - href: "https://github.com/google/neuroglancer/blob/master/src/neuroglancer/sliceview/image_layer_rendering.md", + href: "https://github.com/google/neuroglancer/blob/master/src/sliceview/image_layer_rendering.md", }), ); diff --git a/src/ui/segmentation_display_options_tab.ts b/src/ui/segmentation_display_options_tab.ts index 855fdd5bf..62ec572cb 100644 --- a/src/ui/segmentation_display_options_tab.ts +++ b/src/ui/segmentation_display_options_tab.ts @@ -93,7 +93,7 @@ export class DisplayOptionsTab extends Tab { topRow.appendChild( makeHelpButton({ title: "Documentation on skeleton rendering", - href: "https://github.com/google/neuroglancer/blob/master/src/neuroglancer/sliceview/image_layer_rendering.md", + href: "https://github.com/google/neuroglancer/blob/master/src/sliceview/image_layer_rendering.md", }), ); parent.appendChild(topRow); diff --git a/testdata/generate_npy_examples.py b/testdata/generate_npy_examples.py index 265f4afe4..387395d8c 100755 --- a/testdata/generate_npy_examples.py +++ b/testdata/generate_npy_examples.py @@ -15,7 +15,7 @@ # limitations under the License. # This file generates npy_test.*.{npy,json} files for use by -# src/neuroglancer/util/npy.spec.ts. +# src/util/npy.spec.ts. # # This should be run from within the testdata/ directory.