diff --git a/ci/build_docs.sh b/ci/build_docs.sh index 46a6005b8dc..938a2b55541 100755 --- a/ci/build_docs.sh +++ b/ci/build_docs.sh @@ -52,13 +52,6 @@ rapids-mamba-retry install \ export RAPIDS_DOCS_DIR="$(mktemp -d)" -# for PROJECT in libwholegraph; do -# rapids-logger "Download ${PROJECT} xml_tar" -# TMP_DIR=$(mktemp -d) -# export XML_DIR_${PROJECT^^}="$TMP_DIR" -# curl "https://d1664dvumjb44w.cloudfront.net/${PROJECT}/xml_tar/${RAPIDS_VERSION_NUMBER}/xml.tar.gz" | tar -xzf - -C "${TMP_DIR}" -# done - rapids-logger "Build CPP docs" pushd cpp/doxygen doxygen Doxyfile @@ -67,14 +60,4 @@ mkdir -p "${RAPIDS_DOCS_DIR}/libcugraph/xml_tar" tar -czf "${RAPIDS_DOCS_DIR}/libcugraph/xml_tar"/xml.tar.gz -C xml . popd -rapids-logger "Build Python docs" -pushd docs/cugraph -# Ensure cugraph is importable, since sphinx does not report details about this -# type of failure well. -python -c "import cugraph; print(f'Using cugraph: {cugraph}')" -sphinx-build -b dirhtml source _html -mkdir -p "${RAPIDS_DOCS_DIR}/cugraph/html" -mv _html/* "${RAPIDS_DOCS_DIR}/cugraph/html" -popd - rapids-upload-docs diff --git a/conda/environments/all_cuda-118_arch-x86_64.yaml b/conda/environments/all_cuda-118_arch-x86_64.yaml index 9de2c764924..143759b60f0 100644 --- a/conda/environments/all_cuda-118_arch-x86_64.yaml +++ b/conda/environments/all_cuda-118_arch-x86_64.yaml @@ -8,6 +8,7 @@ channels: - nvidia dependencies: - aiohttp +- breathe>=4.35.0 - c-compiler - certifi - cmake>=3.26.4,!=3.30.0 @@ -72,6 +73,4 @@ dependencies: - ucx-proc=*=gpu - ucx-py==0.42.*,>=0.0.0a0 - wheel -- pip: - - breathe>=4.35.0 name: all_cuda-118_arch-x86_64 diff --git a/conda/environments/all_cuda-125_arch-x86_64.yaml b/conda/environments/all_cuda-125_arch-x86_64.yaml index 9538dc2f36b..bd59b1f3506 100644 --- a/conda/environments/all_cuda-125_arch-x86_64.yaml +++ b/conda/environments/all_cuda-125_arch-x86_64.yaml @@ -8,6 +8,7 @@ channels: - nvidia dependencies: - aiohttp +- breathe>=4.35.0 - c-compiler - certifi - cmake>=3.26.4,!=3.30.0 @@ -77,6 +78,4 @@ dependencies: - ucx-proc=*=gpu - ucx-py==0.42.*,>=0.0.0a0 - wheel -- pip: - - breathe>=4.35.0 name: all_cuda-125_arch-x86_64 diff --git a/cpp/CMakeLists.txt b/cpp/CMakeLists.txt index 8ecab358dd0..ad30b3769d7 100644 --- a/cpp/CMakeLists.txt +++ b/cpp/CMakeLists.txt @@ -488,6 +488,7 @@ target_link_libraries(cugraph ${COMPILED_RAFT_LIB} cuco::cuco rmm::rmm_logger_impl + raft::raft_logger_impl ) ################################################################################ diff --git a/cpp/tests/CMakeLists.txt b/cpp/tests/CMakeLists.txt index 44963f91515..4cc9f2d94bf 100644 --- a/cpp/tests/CMakeLists.txt +++ b/cpp/tests/CMakeLists.txt @@ -169,6 +169,7 @@ function(ConfigureTest CMAKE_TEST_NAME) cugraphtestutil GTest::gtest GTest::gtest_main + test_logger_impls ) set_target_properties( ${CMAKE_TEST_NAME} @@ -199,6 +200,7 @@ function(ConfigureTestMG CMAKE_TEST_NAME) GTest::gtest_main NCCL::NCCL MPI::MPI_CXX + test_logger_impls ) set_target_properties( ${CMAKE_TEST_NAME} @@ -250,6 +252,7 @@ function(ConfigureCTest CMAKE_TEST_NAME) cugraph_c_testutil GTest::gtest GTest::gtest_main + test_logger_impls ) set_target_properties( ${CMAKE_TEST_NAME} @@ -282,6 +285,7 @@ function(ConfigureCTestMG CMAKE_TEST_NAME) GTest::gtest_main NCCL::NCCL MPI::MPI_CXX + test_logger_impls ) set_target_properties( ${CMAKE_TEST_NAME} @@ -310,6 +314,10 @@ function(ConfigureCTestMG CMAKE_TEST_NAME) endfunction() +add_library(test_logger_impls OBJECT) +target_link_libraries(test_logger_impls PRIVATE raft::raft_logger_impl) + + ################################################################################################### ### test sources ################################################################################## ################################################################################################### diff --git a/dependencies.yaml b/dependencies.yaml index 7a3c86f3e45..7b9d99f05de 100755 --- a/dependencies.yaml +++ b/dependencies.yaml @@ -368,6 +368,7 @@ dependencies: common: - output_types: [conda] packages: + - breathe>=4.35.0 - doxygen - graphviz - ipython @@ -379,9 +380,6 @@ dependencies: - sphinx-markdown-tables - sphinx - sphinxcontrib-websupport - - pip: - # Need new enough breathe - - breathe>=4.35.0 py_version: specific: - output_types: [conda] diff --git a/docs/cugraph/Makefile b/docs/cugraph/Makefile deleted file mode 100644 index ac16367ef52..00000000000 --- a/docs/cugraph/Makefile +++ /dev/null @@ -1,26 +0,0 @@ -# Minimal makefile for Sphinx documentation -# - -# You can set these variables from the command line. -SPHINXOPTS = "-v" -SPHINXBUILD = sphinx-build -SPHINXPROJ = cugraph -SOURCEDIR = source -BUILDDIR = build -IMGDIR = images - -gen_doc_dirs = build source/api_docs/api -# Put it first so that "make" without argument is like "make help". -help: - @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) - -.PHONY: help Makefile clean - -# clean to remove all the generated documentation files in build and source -clean: - rm -rf $(gen_doc_dirs) - -# Catch-all target: route all unknown targets to Sphinx using the new -# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). -%: Makefile - @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) diff --git a/docs/cugraph/README.md b/docs/cugraph/README.md deleted file mode 100644 index 970a03a040c..00000000000 --- a/docs/cugraph/README.md +++ /dev/null @@ -1,35 +0,0 @@ -# Building Documentation - -All prerequisite for building docs are in the cugraph development conda environment. -[See build instructions](../../SOURCEBUILD.md) on how to create the development conda environment - -## Steps to follow: - -In order to build the docs, we need the conda dev environment from cugraph and we need to build cugraph from source. - -1. Create a conda env and build cugraph from source. The dependencies to build rapids from source are installed in that conda environment, and then rapids is built and installed into the same environment. - -2. Once cugraph is built from source, navigate to `../docs/cugraph/`. If you have your documentation written and want to turn it into HTML, run makefile: - - -```bash -# most be in the /docs/cugraph directory -make html -``` - -This should run Sphinx in your shell, and outputs to `build/html/index.html` - - -## View docs web page by opening HTML in browser: - -First navigate to `/build/html/` folder, and then run the following command: - -```bash -python -m http.server -``` -Then, navigate a web browser to the IP address or hostname of the host machine at port 8000: - -``` -https://:8000 -``` -Now you can check if your docs edits formatted correctly, and read well. diff --git a/docs/cugraph/make.bat b/docs/cugraph/make.bat deleted file mode 100644 index 807e0de8507..00000000000 --- a/docs/cugraph/make.bat +++ /dev/null @@ -1,36 +0,0 @@ -@ECHO OFF - -pushd %~dp0 - -REM Command file for Sphinx documentation - -if "%SPHINXBUILD%" == "" ( - set SPHINXBUILD=sphinx-build -) -set SOURCEDIR=source -set BUILDDIR=build -set SPHINXPROJ=cuGraph - -if "%1" == "" goto help - -%SPHINXBUILD% >NUL 2>NUL -if errorlevel 9009 ( - echo. - echo.The 'sphinx-build' command was not found. Make sure you have Sphinx - echo.installed, then set the SPHINXBUILD environment variable to point - echo.to the full path of the 'sphinx-build' executable. Alternatively you - echo.may add the Sphinx directory to PATH. - echo. - echo.If you don't have Sphinx installed, grab it from - echo.http://sphinx-doc.org/ - exit /b 1 -) - -%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% -goto end - -:help -%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% - -:end -popd diff --git a/docs/cugraph/source/_static/EMPTY b/docs/cugraph/source/_static/EMPTY deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/docs/cugraph/source/_static/bc_benchmark.png b/docs/cugraph/source/_static/bc_benchmark.png deleted file mode 100644 index 9e385c97e99..00000000000 Binary files a/docs/cugraph/source/_static/bc_benchmark.png and /dev/null differ diff --git a/docs/cugraph/source/_static/colab.png b/docs/cugraph/source/_static/colab.png deleted file mode 100644 index c4c3f5b46e1..00000000000 Binary files a/docs/cugraph/source/_static/colab.png and /dev/null differ diff --git a/docs/cugraph/source/_static/nxcg-execution-diagram.jpg b/docs/cugraph/source/_static/nxcg-execution-diagram.jpg deleted file mode 100644 index 48136289af9..00000000000 Binary files a/docs/cugraph/source/_static/nxcg-execution-diagram.jpg and /dev/null differ diff --git a/docs/cugraph/source/_static/references.css b/docs/cugraph/source/_static/references.css deleted file mode 100644 index d1f647233a8..00000000000 --- a/docs/cugraph/source/_static/references.css +++ /dev/null @@ -1,23 +0,0 @@ - -/* Fix references to not look like parameters */ -dl.citation > dt.label { - display: unset !important; - float: left !important; - border: unset !important; - background: unset !important; - padding: unset !important; - margin: unset !important; - font-size: unset !important; - line-height: unset !important; - padding-right: 0.5rem !important; -} - -/* Add opening bracket */ -dl.citation > dt.label > span::before { - content: "["; -} - -/* Add closing bracket */ -dl.citation > dt.label > span::after { - content: "]"; -} diff --git a/docs/cugraph/source/api_docs/cugraph-dgl/cugraph_dgl.rst b/docs/cugraph/source/api_docs/cugraph-dgl/cugraph_dgl.rst deleted file mode 100644 index 4ffecd8d042..00000000000 --- a/docs/cugraph/source/api_docs/cugraph-dgl/cugraph_dgl.rst +++ /dev/null @@ -1,15 +0,0 @@ -~~~~~~~~~~~~~~~~~~~~~~~~~~ -cugraph-dgl API Reference -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -cugraph-dgl - -.. currentmodule:: cugraph_dgl - -Methods -------- -.. autosummary:: - :toctree: ../api/cugraph-dgl/ - - convert.cugraph_storage_from_heterograph - cugraph_storage.CuGraphStorage diff --git a/docs/cugraph/source/api_docs/cugraph-pyg/cugraph_pyg.rst b/docs/cugraph/source/api_docs/cugraph-pyg/cugraph_pyg.rst deleted file mode 100644 index d2b1d124ccb..00000000000 --- a/docs/cugraph/source/api_docs/cugraph-pyg/cugraph_pyg.rst +++ /dev/null @@ -1,43 +0,0 @@ -~~~~~~~~~~~~~~~~~~~~~~~~~ -cugraph-pyg API Reference -~~~~~~~~~~~~~~~~~~~~~~~~~ - -cugraph-pyg - -.. currentmodule:: cugraph_pyg - -Graph Storage -------------- -.. autosummary:: - :toctree: ../api/cugraph-pyg/ - - cugraph_pyg.data.dask_graph_store.DaskGraphStore - cugraph_pyg.data.graph_store.GraphStore - -Feature Storage ---------------- -.. autosummary:: - :toctree: ../api/cugraph-pyg/ - - cugraph_pyg.data.feature_store.TensorDictFeatureStore - cugraph_pyg.data.feature_store.WholeFeatureStore - -Data Loaders ------------- -.. autosummary:: - :toctree: ../api/cugraph-pyg/ - - cugraph_pyg.loader.dask_node_loader.DaskNeighborLoader - cugraph_pyg.loader.dask_node_loader.BulkSampleLoader - cugraph_pyg.loader.node_loader.NodeLoader - cugraph_pyg.loader.neighbor_loader.NeighborLoader - -Samplers --------- -.. autosummary:: - :toctree: ../api/cugraph-pyg/ - - cugraph_pyg.sampler.sampler.BaseSampler - cugraph_pyg.sampler.sampler.SampleReader - cugraph_pyg.sampler.sampler.HomogeneousSampleReader - cugraph_pyg.sampler.sampler.SampleIterator diff --git a/docs/cugraph/source/api_docs/cugraph/centrality.rst b/docs/cugraph/source/api_docs/cugraph/centrality.rst deleted file mode 100644 index 344c95195b7..00000000000 --- a/docs/cugraph/source/api_docs/cugraph/centrality.rst +++ /dev/null @@ -1,71 +0,0 @@ -========== -Centrality -========== -.. currentmodule:: cugraph - - - -Betweenness Centrality ----------------------- -single-GPU -^^^^^^^^^^ -.. autosummary:: - :toctree: ../api/cugraph/ - - cugraph.centrality.betweenness_centrality - cugraph.centrality.edge_betweenness_centrality - -multi-GPU -^^^^^^^^^^ -.. autosummary:: - :toctree: ../api/cugraph/ - - cugraph.dask.centrality.betweenness_centrality - - - -Katz Centrality ---------------- -single-GPU -^^^^^^^^^^ -.. autosummary:: - :toctree: ../api/cugraph/ - - cugraph.centrality.katz_centrality - -multi-GPU -^^^^^^^^^^ -.. autosummary:: - :toctree: ../api/cugraph/ - - cugraph.dask.centrality.katz_centrality.katz_centrality - - -Degree Centrality ------------------ -single-GPU -^^^^^^^^^^ -.. autosummary:: - :toctree: ../api/cugraph/ - - cugraph.centrality.degree_centrality - -multi-GPU -^^^^^^^^^^ - - -Eigenvector Centrality ----------------------- -single-GPU -^^^^^^^^^^ -.. autosummary:: - :toctree: ../api/cugraph/ - - cugraph.centrality.eigenvector_centrality - -multi-GPU -^^^^^^^^^^ -.. autosummary:: - :toctree: ../api/cugraph/ - - cugraph.dask.centrality.eigenvector_centrality.eigenvector_centrality diff --git a/docs/cugraph/source/api_docs/cugraph/community.rst b/docs/cugraph/source/api_docs/cugraph/community.rst deleted file mode 100644 index acbaa086f9a..00000000000 --- a/docs/cugraph/source/api_docs/cugraph/community.rst +++ /dev/null @@ -1,80 +0,0 @@ -========= -Community -========= -.. currentmodule:: cugraph - - - -EgoNet ------- -.. autosummary:: - :toctree: ../api/cugraph/ - - cugraph.batched_ego_graphs - cugraph.ego_graph - -Ensemble clustering for graphs (ECG) ------------------------------------- -.. autosummary:: - :toctree: ../api/cugraph/ - - cugraph.ecg - - -K-Truss -------- -.. autosummary:: - :toctree: ../api/cugraph/ - - cugraph.k_truss - cugraph.ktruss_subgraph - -Leiden ------- -.. autosummary:: - :toctree: ../api/cugraph/ - - cugraph.leiden - - -Louvain -------- -.. autosummary:: - :toctree: ../api/cugraph/ - - cugraph.louvain - - -Louvain (MG) ------------- -.. autosummary:: - :toctree: ../api/cugraph/ - - cugraph.dask.community.louvain.louvain - -Spectral Clustering -------------------- -.. autosummary:: - :toctree: ../api/cugraph/ - - cugraph.analyzeClustering_edge_cut - cugraph.analyzeClustering_modularity - cugraph.analyzeClustering_ratio_cut - cugraph.spectralBalancedCutClustering - cugraph.spectralModularityMaximizationClustering - - -Subgraph Extraction -------------------- -.. autosummary:: - :toctree: ../api/cugraph/ - - cugraph.subgraph - - -Triangle Counting ------------------ -.. autosummary:: - :toctree: ../api/cugraph/ - - cugraph.triangle_count diff --git a/docs/cugraph/source/api_docs/cugraph/components.rst b/docs/cugraph/source/api_docs/cugraph/components.rst deleted file mode 100644 index e61291fccf0..00000000000 --- a/docs/cugraph/source/api_docs/cugraph/components.rst +++ /dev/null @@ -1,22 +0,0 @@ -========== -Components -========== -.. currentmodule:: cugraph - - -Connected Components --------------------- -.. autosummary:: - :toctree: ../api/cugraph/ - - cugraph.connected_components - cugraph.strongly_connected_components - cugraph.weakly_connected_components - - -Connected Components (MG) -------------------------- -.. autosummary:: - :toctree: ../api/cugraph/ - - cugraph.dask.components.connectivity.weakly_connected_components diff --git a/docs/cugraph/source/api_docs/cugraph/cores.rst b/docs/cugraph/source/api_docs/cugraph/cores.rst deleted file mode 100644 index 9d274d1c484..00000000000 --- a/docs/cugraph/source/api_docs/cugraph/cores.rst +++ /dev/null @@ -1,21 +0,0 @@ -===== -Cores -===== -.. currentmodule:: cugraph - - - -Core Number ------------ -.. autosummary:: - :toctree: ../api/cugraph/ - - cugraph.core_number - - -K-Core ------- -.. autosummary:: - :toctree: ../api/cugraph/ - - cugraph.k_core diff --git a/docs/cugraph/source/api_docs/cugraph/dask-cugraph.rst b/docs/cugraph/source/api_docs/cugraph/dask-cugraph.rst deleted file mode 100644 index f5132dd658c..00000000000 --- a/docs/cugraph/source/api_docs/cugraph/dask-cugraph.rst +++ /dev/null @@ -1,78 +0,0 @@ -~~~~~~~~~~~~~~~~~~~~~~ -Multi-GPU with cuGraph -~~~~~~~~~~~~~~~~~~~~~~ - -cuGraph supports multi-GPU leveraging `Dask `_. Dask is a flexible library for parallel computing in Python which makes scaling out your workflow smooth and simple. cuGraph also uses other Dask-based RAPIDS projects such as `dask-cuda `_. - -Distributed graph analytics -=========================== - -The current solution is able to scale across multiple GPUs on multiple machines. Distributing the graph and computation lets you analyze datasets far larger than a single GPU’s memory. - -With cuGraph and Dask, whether you’re using a single NVIDIA GPU or multiple nodes, your RAPIDS workflow will run smoothly, intelligently distributing the workload across the available resources. - -If your graph comfortably fits in memory on a single GPU, you would want to use the single-GPU version of cuGraph. If you want to distribute your workflow across multiple GPUs and have more data than you can fit in memory on a single GPU, you would want to use cuGraph's multi-GPU features. - -Example -======== - -.. code-block:: python - - import dask_cudf - from dask.distributed import Client - from dask_cuda import LocalCUDACluster - - import cugraph - import cugraph.dask as dask_cugraph - import cugraph.dask.comms.comms as Comms - from cugraph.generators.rmat import rmat - - input_data_path = "input_data.csv" - - # cluster initialization - cluster = LocalCUDACluster() - client = Client(cluster) - Comms.initialize(p2p=True) - - # helper function to generate random input data - input_data = rmat( - scale=5, - num_edges=400, - a=0.30, - b=0.65, - c=0.05, - seed=456, - clip_and_flip=False, - scramble_vertex_ids=False, - create_using=None, - ) - input_data.to_csv(input_data_path, index=False) - - # helper function to set the reader chunk size to automatically get one partition per GPU - chunksize = dask_cugraph.get_chunksize(input_data_path) - - # multi-GPU CSV reader - e_list = dask_cudf.read_csv( - input_data_path, - blocksize=chunksize, - names=['src', 'dst'], - dtype=['int32', 'int32'], - ) - - # create graph from input data - G = cugraph.Graph(directed=True) - G.from_dask_cudf_edgelist(e_list, source='src', destination='dst') - - # run PageRank - pr_df = dask_cugraph.pagerank(G, tol=1e-4) - - # need to call compute to generate results - pr_df.compute() - - # cluster clean up - Comms.destroy() - client.close() - cluster.close() - - -| diff --git a/docs/cugraph/source/api_docs/cugraph/generators.rst b/docs/cugraph/source/api_docs/cugraph/generators.rst deleted file mode 100644 index f5180a172cd..00000000000 --- a/docs/cugraph/source/api_docs/cugraph/generators.rst +++ /dev/null @@ -1,13 +0,0 @@ -========== -Generators -========== -.. currentmodule:: cugraph - - - -RMAT ----- -.. autosummary:: - :toctree: ../api/cugraph/ - - cugraph.generators.rmat diff --git a/docs/cugraph/source/api_docs/cugraph/graph_implementation.rst b/docs/cugraph/source/api_docs/cugraph/graph_implementation.rst deleted file mode 100644 index ae14306ce27..00000000000 --- a/docs/cugraph/source/api_docs/cugraph/graph_implementation.rst +++ /dev/null @@ -1,34 +0,0 @@ -==================== -Graph Implementation -==================== -.. currentmodule:: cugraph.structure.graph_implementation.simpleGraphImpl - - -Graph Implementation --------------------- -.. autosummary:: - :toctree: ../api/cugraph/ - - view_edge_list - delete_edge_list - view_adj_list - view_transposed_adj_list - delete_adj_list - - enable_batch - - get_two_hop_neighbors - number_of_vertices - number_of_nodes - number_of_edges - in_degree - out_degree - degree - degrees - has_edge - has_node - has_self_loop - edges - nodes - neighbors - vertex_column_size diff --git a/docs/cugraph/source/api_docs/cugraph/helper_functions.rst b/docs/cugraph/source/api_docs/cugraph/helper_functions.rst deleted file mode 100644 index e7091c50c44..00000000000 --- a/docs/cugraph/source/api_docs/cugraph/helper_functions.rst +++ /dev/null @@ -1,22 +0,0 @@ -======================== -DASK MG Helper functions -======================== -.. currentmodule:: cugraph - - -Methods -------- -.. autosummary:: - :toctree: ../api/cugraph/ - - cugraph.dask.comms.comms.initialize - cugraph.dask.comms.comms.destroy - cugraph.dask.comms.comms.is_initialized - cugraph.dask.comms.comms.get_comms - cugraph.dask.comms.comms.get_workers - cugraph.dask.comms.comms.get_session_id - cugraph.dask.comms.comms.get_2D_partition - cugraph.dask.comms.comms.get_default_handle - cugraph.dask.comms.comms.get_handle - cugraph.dask.comms.comms.get_worker_id - cugraph.dask.common.read_utils.get_chunksize diff --git a/docs/cugraph/source/api_docs/cugraph/index.rst b/docs/cugraph/source/api_docs/cugraph/index.rst deleted file mode 100644 index 20b63d50ae6..00000000000 --- a/docs/cugraph/source/api_docs/cugraph/index.rst +++ /dev/null @@ -1,25 +0,0 @@ -~~~~~~~~~~~~~~~~~~~~~ -cugraph API Reference -~~~~~~~~~~~~~~~~~~~~~ - -.. toctree:: - :maxdepth: 2 - :caption: API Documentation - - structure - graph_implementation - property_graph - centrality - community - components - cores - layout - linear_assignment - link_analysis - link_prediction - sampling - traversal - tree - generators - helper_functions - dask-cugraph.rst diff --git a/docs/cugraph/source/api_docs/cugraph/layout.rst b/docs/cugraph/source/api_docs/cugraph/layout.rst deleted file mode 100644 index d416676a62e..00000000000 --- a/docs/cugraph/source/api_docs/cugraph/layout.rst +++ /dev/null @@ -1,12 +0,0 @@ -====== -Layout -====== -.. currentmodule:: cugraph - - -Force Atlas 2 -------------- -.. autosummary:: - :toctree: ../api/cugraph/ - - cugraph.force_atlas2 diff --git a/docs/cugraph/source/api_docs/cugraph/linear_assignment.rst b/docs/cugraph/source/api_docs/cugraph/linear_assignment.rst deleted file mode 100644 index e0b0b4d11bd..00000000000 --- a/docs/cugraph/source/api_docs/cugraph/linear_assignment.rst +++ /dev/null @@ -1,13 +0,0 @@ -================= -Linear Assignment -================= -.. currentmodule:: cugraph - - -Hungarian ---------- -.. autosummary:: - :toctree: ../api/cugraph/ - - cugraph.hungarian - cugraph.dense_hungarian diff --git a/docs/cugraph/source/api_docs/cugraph/link_analysis.rst b/docs/cugraph/source/api_docs/cugraph/link_analysis.rst deleted file mode 100644 index 698880c1b9e..00000000000 --- a/docs/cugraph/source/api_docs/cugraph/link_analysis.rst +++ /dev/null @@ -1,34 +0,0 @@ -============= -Link Analysis -============= -.. currentmodule:: cugraph - - -HITS ----- -.. autosummary:: - :toctree: ../api/cugraph/ - - cugraph.hits - -HITS (MG) ---------- -.. autosummary:: - :toctree: ../api/cugraph/ - - cugraph.dask.link_analysis.hits.hits - - -Pagerank --------- -.. autosummary:: - :toctree: ../api/cugraph/ - - cugraph.pagerank - -Pagerank (MG) -------------- -.. autosummary:: - :toctree: ../api/cugraph/ - - cugraph.dask.link_analysis.pagerank.pagerank diff --git a/docs/cugraph/source/api_docs/cugraph/link_prediction.rst b/docs/cugraph/source/api_docs/cugraph/link_prediction.rst deleted file mode 100644 index 3d2f9562e32..00000000000 --- a/docs/cugraph/source/api_docs/cugraph/link_prediction.rst +++ /dev/null @@ -1,31 +0,0 @@ -=============== -Link Prediction -=============== -.. currentmodule:: cugraph - - -Jaccard Coefficient -------------------- -.. autosummary:: - :toctree: ../api/cugraph/ - - cugraph.jaccard - cugraph.jaccard_coefficient - - -Overlap Coefficient -------------------- -.. autosummary:: - :toctree: ../api/cugraph/ - - cugraph.overlap - cugraph.overlap_coefficient - - -Sorensen Coefficient --------------------- -.. autosummary:: - :toctree: ../api/cugraph/ - - cugraph.sorensen - cugraph.sorensen_coefficient diff --git a/docs/cugraph/source/api_docs/cugraph/property_graph.rst b/docs/cugraph/source/api_docs/cugraph/property_graph.rst deleted file mode 100644 index 672aa7dae2d..00000000000 --- a/docs/cugraph/source/api_docs/cugraph/property_graph.rst +++ /dev/null @@ -1,29 +0,0 @@ -==================== -Property Graph -==================== -.. currentmodule:: cugraph.experimental - - -Property Graph -------------------------- -.. autosummary:: - :toctree: ../api/cugraph/ - - PropertySelection - PropertyGraph - PropertyGraph.add_edge_data - PropertyGraph.add_vertex_data - PropertyGraph.annotate_dataframe - PropertyGraph.edge_props_to_graph - PropertyGraph.extract_subgraph - PropertyGraph.get_edge_data - PropertyGraph.get_num_edges - PropertyGraph.get_num_vertices - PropertyGraph.get_vertex_data - PropertyGraph.get_vertices - PropertyGraph.has_duplicate_edges - PropertyGraph.is_multigraph - PropertyGraph.renumber_edges_by_type - PropertyGraph.renumber_vertices_by_type - PropertyGraph.select_edges - PropertyGraph.select_vertices diff --git a/docs/cugraph/source/api_docs/cugraph/sampling.rst b/docs/cugraph/source/api_docs/cugraph/sampling.rst deleted file mode 100644 index 52004a5b1cc..00000000000 --- a/docs/cugraph/source/api_docs/cugraph/sampling.rst +++ /dev/null @@ -1,22 +0,0 @@ -======== -Sampling -======== -.. currentmodule:: cugraph - - - -Random Walks ------------- -.. autosummary:: - :toctree: ../api/cugraph/ - - cugraph.random_walks - cugraph.ego_graph - cugraph.uniform_neighbor_sample - -Node2Vec ---------- -.. autosummary:: - :toctree: ../api/cugraph/ - - cugraph.node2vec diff --git a/docs/cugraph/source/api_docs/cugraph/structure.rst b/docs/cugraph/source/api_docs/cugraph/structure.rst deleted file mode 100644 index 6369e1bb3fd..00000000000 --- a/docs/cugraph/source/api_docs/cugraph/structure.rst +++ /dev/null @@ -1,99 +0,0 @@ -============= -Graph Classes -============= -.. currentmodule:: cugraph - -Constructors ------------- -.. autosummary:: - :toctree: ../api/cugraph/ - - Graph - MultiGraph - - -Adding Data ------------ -.. autosummary:: - :toctree: ../api/cugraph/ - - Graph.from_cudf_adjlist - Graph.from_cudf_edgelist - Graph.from_dask_cudf_edgelist - Graph.from_pandas_adjacency - Graph.from_pandas_edgelist - Graph.from_numpy_array - Graph.from_numpy_matrix - Graph.add_internal_vertex_id - Graph.add_nodes_from - Graph.clear - Graph.unrenumber - -Checks ------- -.. autosummary:: - :toctree: ../api/cugraph/ - - Graph.has_isolated_vertices - Graph.is_bipartite - Graph.is_directed - Graph.is_multigraph - Graph.is_multipartite - Graph.is_renumbered - Graph.is_weighted - Graph.lookup_internal_vertex_id - Graph.to_directed - Graph.to_undirected - - -Symmetrize ----------- -.. autosummary:: - :toctree: ../api/cugraph/ - - cugraph.symmetrize - cugraph.symmetrize_ddf - cugraph.symmetrize_df - - -Conversion from Other Formats ------------------------------ -.. autosummary:: - :toctree: ../api/cugraph/ - - cugraph.from_adjlist - cugraph.from_cudf_edgelist - cugraph.from_edgelist - cugraph.from_numpy_array - cugraph.from_numpy_matrix - cugraph.from_pandas_adjacency - cugraph.from_pandas_edgelist - cugraph.to_numpy_array - cugraph.to_numpy_matrix - cugraph.to_pandas_adjacency - cugraph.to_pandas_edgelist - -NumberMap ------------------------------ -.. autosummary:: - :toctree: ../api/cugraph/ - - cugraph.structure.NumberMap - cugraph.structure.NumberMap.from_internal_vertex_id - cugraph.structure.NumberMap.to_internal_vertex_id - cugraph.structure.NumberMap.add_internal_vertex_id - cugraph.structure.NumberMap.compute_vals - cugraph.structure.NumberMap.compute_vals_types - cugraph.structure.NumberMap.generate_unused_column_name - cugraph.structure.NumberMap.renumber - cugraph.structure.NumberMap.renumber_and_segment - cugraph.structure.NumberMap.set_renumbered_col_names - cugraph.structure.NumberMap.unrenumber - cugraph.structure.NumberMap.vertex_column_size - -Other ------------------------------ -.. autosummary:: - :toctree: ../api/cugraph/ - - cugraph.hypergraph diff --git a/docs/cugraph/source/api_docs/cugraph/traversal.rst b/docs/cugraph/source/api_docs/cugraph/traversal.rst deleted file mode 100644 index 31296f3b850..00000000000 --- a/docs/cugraph/source/api_docs/cugraph/traversal.rst +++ /dev/null @@ -1,37 +0,0 @@ -========= -Traversal -========= -.. currentmodule:: cugraph - - -Breadth-first-search --------------------- -.. autosummary:: - :toctree: ../api/cugraph/ - - cugraph.bfs - cugraph.bfs_edges - -Breadth-first-search (MG) -------------------------- -.. autosummary:: - :toctree: ../api/cugraph/ - - cugraph.dask.traversal.bfs.bfs - -Single-source-shortest-path ---------------------------- -.. autosummary:: - :toctree: ../api/cugraph/ - - cugraph.filter_unreachable - cugraph.shortest_path - cugraph.shortest_path_length - cugraph.sssp - -Single-source-shortest-path (MG) --------------------------------- -.. autosummary:: - :toctree: ../api/cugraph/ - - cugraph.dask.traversal.sssp.sssp diff --git a/docs/cugraph/source/api_docs/cugraph/tree.rst b/docs/cugraph/source/api_docs/cugraph/tree.rst deleted file mode 100644 index d8a89046aa3..00000000000 --- a/docs/cugraph/source/api_docs/cugraph/tree.rst +++ /dev/null @@ -1,20 +0,0 @@ -==== -Tree -==== -.. currentmodule:: cugraph - - -Minimum Spanning Tree ---------------------- -.. autosummary:: - :toctree: ../api/cugraph/ - - cugraph.tree.minimum_spanning_tree.minimum_spanning_tree - - -Maximum Spanning Tree ---------------------- -.. autosummary:: - :toctree: ../api/cugraph/ - - cugraph.tree.minimum_spanning_tree.maximum_spanning_tree diff --git a/docs/cugraph/source/api_docs/cugraph_c/centrality.rst b/docs/cugraph/source/api_docs/cugraph_c/centrality.rst deleted file mode 100644 index 3bea608fd5a..00000000000 --- a/docs/cugraph/source/api_docs/cugraph_c/centrality.rst +++ /dev/null @@ -1,50 +0,0 @@ -Centrality -========== - -PageRank --------- -.. doxygenfunction:: cugraph_pagerank - :project: libcugraph - -.. doxygenfunction:: cugraph_pagerank_allow_nonconvergence - :project: libcugraph - -Personalized PageRank ---------------------- -.. doxygenfunction:: cugraph_personalized_pagerank - :project: libcugraph - -.. doxygenfunction:: cugraph_personalized_pagerank_allow_nonconvergence - :project: libcugraph - -Eigenvector Centrality ----------------------- -.. doxygenfunction:: cugraph_eigenvector_centrality - :project: libcugraph - -Katz Centrality ---------------- -.. doxygenfunction:: cugraph_katz_centrality - :project: libcugraph - -Betweenness Centrality ----------------------- -.. doxygenfunction:: cugraph_betweenness_centrality - :project: libcugraph - -Edge Betweenness Centrality ---------------------------- -.. doxygenfunction:: cugraph_edge_betweenness_centrality - :project: libcugraph - -HITS Centrality ---------------- -.. doxygenfunction:: cugraph_hits - :project: libcugraph - -Centrality Support Functions ----------------------------- - .. doxygengroup:: centrality - :project: libcugraph - :members: - :content-only: diff --git a/docs/cugraph/source/api_docs/cugraph_c/community.rst b/docs/cugraph/source/api_docs/cugraph_c/community.rst deleted file mode 100644 index 6b500a972a7..00000000000 --- a/docs/cugraph/source/api_docs/cugraph_c/community.rst +++ /dev/null @@ -1,56 +0,0 @@ -Community -========= - -Triangle Counting ------------------ -.. doxygenfunction:: cugraph_triangle_count - :project: libcugraph - -Louvain -------- -.. doxygenfunction:: cugraph_louvain - :project: libcugraph - -Leiden ------- -.. doxygenfunction:: cugraph_leiden - :project: libcugraph - -ECG ---- -.. doxygenfunction:: cugraph_ecg - :project: libcugraph - -Extract Egonet --------------- -.. doxygenfunction:: cugraph_extract_ego - :project: libcugraph - -Balanced Cut ------------- -.. doxygenfunction:: cugraph_balanced_cut_clustering - :project: libcugraph - -Spectral Clustering - Modularity Maximization ---------------------------------------------- -.. doxygenfunction:: cugraph_spectral_modularity_maximization - :project: libcugraph - -.. doxygenfunction:: cugraph_analyze_clustering_modularity - :project: libcugraph - -Spectral Clustering - Edge Cut ------------------------------- -.. doxygenfunction:: cugraph_analyze_clustering_edge_cut - :project: libcugraph - -.. doxygenfunction:: cugraph_analyze_clustering_ratio_cut - :project: libcugraph - - -Community Support Functions ---------------------------- - .. doxygengroup:: community - :project: libcugraph - :members: - :content-only: diff --git a/docs/cugraph/source/api_docs/cugraph_c/core.rst b/docs/cugraph/source/api_docs/cugraph_c/core.rst deleted file mode 100644 index 34456c65e43..00000000000 --- a/docs/cugraph/source/api_docs/cugraph_c/core.rst +++ /dev/null @@ -1,21 +0,0 @@ -Core -==== - - -Core Number ------------ -.. doxygenfunction:: cugraph_core_number - :project: libcugraph - -K-Core ------- -.. doxygenfunction:: cugraph_k_core - :project: libcugraph - - -Core Support Functions ----------------------- - .. doxygengroup:: core - :project: libcugraph - :members: - :content-only: diff --git a/docs/cugraph/source/api_docs/cugraph_c/index.rst b/docs/cugraph/source/api_docs/cugraph_c/index.rst deleted file mode 100644 index 3dd37dbc374..00000000000 --- a/docs/cugraph/source/api_docs/cugraph_c/index.rst +++ /dev/null @@ -1,16 +0,0 @@ -=========================== -cuGraph C API documentation -=========================== - - -.. toctree:: - :maxdepth: 3 - :caption: API Documentation - - centrality.rst - community.rst - core.rst - labeling.rst - sampling.rst - similarity.rst - traversal.rst diff --git a/docs/cugraph/source/api_docs/cugraph_c/labeling.rst b/docs/cugraph/source/api_docs/cugraph_c/labeling.rst deleted file mode 100644 index 2b709ebd343..00000000000 --- a/docs/cugraph/source/api_docs/cugraph_c/labeling.rst +++ /dev/null @@ -1,20 +0,0 @@ -Components -========== - - -Weakly Connected Components ---------------------------- -.. doxygenfunction:: cugraph_weakly_connected_components - :project: libcugraph - -Strongly Connected Components ------------------------------ -.. doxygenfunction:: cugraph_strongly_connected_components - :project: libcugraph - -Labeling Support Functions --------------------------- - .. doxygengroup:: labeling - :project: libcugraph - :members: - :content-only: diff --git a/docs/cugraph/source/api_docs/cugraph_c/sampling.rst b/docs/cugraph/source/api_docs/cugraph_c/sampling.rst deleted file mode 100644 index 3d5af713c33..00000000000 --- a/docs/cugraph/source/api_docs/cugraph_c/sampling.rst +++ /dev/null @@ -1,34 +0,0 @@ -Sampling -======== - -Uniform Random Walks --------------------- -.. doxygenfunction:: cugraph_uniform_random_walks - :project: libcugraph - -Biased Random Walks -------------------- -.. doxygenfunction:: cugraph_biased_random_walks - :project: libcugraph - -Random Walks via Node2Vec -------------------------- -.. doxygenfunction:: cugraph_node2vec_random_walks - :project: libcugraph - -Node2Vec --------- -.. doxygenfunction:: cugraph_node2vec - :project: libcugraph - -Uniform Neighbor Sampling -------------------------- -.. doxygenfunction:: cugraph_uniform_neighbor_sample - :project: libcugraph - -Sampling Support Functions --------------------------- -.. doxygengroup:: samplingC - :project: libcugraph - :members: - :content-only: diff --git a/docs/cugraph/source/api_docs/cugraph_c/similarity.rst b/docs/cugraph/source/api_docs/cugraph_c/similarity.rst deleted file mode 100644 index 75735925e4d..00000000000 --- a/docs/cugraph/source/api_docs/cugraph_c/similarity.rst +++ /dev/null @@ -1,25 +0,0 @@ -Similarity -========== - - -Jaccard -------- -.. doxygenfunction:: cugraph_jaccard_coefficients - :project: libcugraph - -Sorensen --------- -.. doxygenfunction:: cugraph_sorensen_coefficients - :project: libcugraph - -Overlap -------- -.. doxygenfunction:: cugraph_overlap_coefficients - :project: libcugraph - -Similarty Support Functions ---------------------------- -.. doxygengroup:: similarity - :project: libcugraph - :members: - :content-only: diff --git a/docs/cugraph/source/api_docs/cugraph_c/traversal.rst b/docs/cugraph/source/api_docs/cugraph_c/traversal.rst deleted file mode 100644 index bde30f4fa6e..00000000000 --- a/docs/cugraph/source/api_docs/cugraph_c/traversal.rst +++ /dev/null @@ -1,30 +0,0 @@ -Traversal -========== - - -Breadth First Search (BFS) --------------------------- -.. doxygenfunction:: cugraph_bfs - :project: libcugraph - -Single-Source Shortest-Path (SSSP) ----------------------------------- -.. doxygenfunction:: cugraph_sssp - :project: libcugraph - -Path Extraction ---------------- -.. doxygenfunction:: cugraph_extract_paths - :project: libcugraph - -Extract Max Path Length ------------------------ -.. doxygenfunction:: cugraph_extract_paths_result_get_max_path_length - :project: libcugraph - -Traversal Support Functions ---------------------------- -.. doxygengroup:: traversal - :project: libcugraph - :members: - :content-only: diff --git a/docs/cugraph/source/api_docs/index.rst b/docs/cugraph/source/api_docs/index.rst deleted file mode 100644 index ccb7aacfeb5..00000000000 --- a/docs/cugraph/source/api_docs/index.rst +++ /dev/null @@ -1,36 +0,0 @@ -API Reference -============= - -This page provides a list of all publicly accessible Python modules with in the Graph collection - -Core Graph API Documentation ----------------------------- - -.. toctree:: - :maxdepth: 3 - :caption: Core Graph API Documentation - - cugraph/index.rst - plc/pylibcugraph.rst - cugraph_c/index.rst - cugraph_cpp/index.rst - -Graph Neural Networks API Documentation ---------------------------------------- - -.. toctree:: - :maxdepth: 3 - :caption: Graph Neural Networks API Documentation - - cugraph-dgl/cugraph_dgl.rst - cugraph-pyg/cugraph_pyg.rst - .. wholegraph/index.rst - -Additional Graph Packages API Documentation ----------------------------------- - -.. toctree:: - :maxdepth: 3 - :caption: Additional Graph Packages API Documentation - - service/index.rst diff --git a/docs/cugraph/source/api_docs/plc/pylibcugraph.rst b/docs/cugraph/source/api_docs/plc/pylibcugraph.rst deleted file mode 100644 index 7ebdd67e923..00000000000 --- a/docs/cugraph/source/api_docs/plc/pylibcugraph.rst +++ /dev/null @@ -1,22 +0,0 @@ -~~~~~~~~~~~~~~~~~~~~~~~~~~ -pylibcugraph API reference -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -pylibcugraph - -.. currentmodule:: pylibcugraph - -Methods -------- -.. autosummary:: - :toctree: ../api/plc/ - - pylibcugraph.eigenvector_centrality - pylibcugraph.katz_centrality - pylibcugraph.strongly_connected_components - pylibcugraph.weakly_connected_components - pylibcugraph.pagerank - pylibcugraph.hits - pylibcugraph.node2vec - pylibcugraph.bfs - pylibcugraph.sssp diff --git a/docs/cugraph/source/api_docs/service/cugraph_service_client.rst b/docs/cugraph/source/api_docs/service/cugraph_service_client.rst deleted file mode 100644 index 7e344d326f7..00000000000 --- a/docs/cugraph/source/api_docs/service/cugraph_service_client.rst +++ /dev/null @@ -1,20 +0,0 @@ -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -cugraph-service-client API Reference -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -cugraph-service - -.. currentmodule:: cugraph-service - -.. autosummary:: - :toctree: ../api/service/ - -.. cugraph_service_client.client.RunAsyncioThread - cugraph_service_client.client.run_async - cugraph_service_client.client.DeviceArrayAllocator - cugraph_service_client.client.CugraphServiceClient - cugraph_service_client.remote_graph_utils - cugraph_service_client.remote_graph.RemoteGraph - cugraph_service_client.types.UnionWrapper - cugraph_service_client.types.ValueWrapper - cugraph_service_client.types.GraphVertexEdgeIDWrapper diff --git a/docs/cugraph/source/api_docs/service/cugraph_service_server.rst b/docs/cugraph/source/api_docs/service/cugraph_service_server.rst deleted file mode 100644 index 09ca8360b6c..00000000000 --- a/docs/cugraph/source/api_docs/service/cugraph_service_server.rst +++ /dev/null @@ -1,14 +0,0 @@ -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -cugraph-service-server API Reference -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -cugraph-service - -.. currentmodule:: cugraph-service - -.. autosummary:: - :toctree: ../api/service/ - -.. cugraph_service_server.cugraph_handler.call_algo - cugraph_service_server.cugraph_handler.ExtensionServerFacade - cugraph_service_server.cugraph_handler.CugraphHandler diff --git a/docs/cugraph/source/api_docs/service/index.rst b/docs/cugraph/source/api_docs/service/index.rst deleted file mode 100644 index ca251e475d4..00000000000 --- a/docs/cugraph/source/api_docs/service/index.rst +++ /dev/null @@ -1,10 +0,0 @@ -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -cugraph-service API Reference -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. toctree:: - :maxdepth: 2 - :caption: cugraph-service API Documentation - - cugraph_service_client - cugraph_service_server diff --git a/docs/cugraph/source/api_docs/wholegraph/index.rst b/docs/cugraph/source/api_docs/wholegraph/index.rst deleted file mode 100644 index 80e231d4610..00000000000 --- a/docs/cugraph/source/api_docs/wholegraph/index.rst +++ /dev/null @@ -1,11 +0,0 @@ -WholeGraph API reference -======================== - -This page provides WholeGraph API reference - -.. toctree:: - :maxdepth: 2 - :caption: WholeGraph API Documentation - - libwholegraph/index.rst - pylibwholegraph/index.rst diff --git a/docs/cugraph/source/api_docs/wholegraph/libwholegraph/index.rst b/docs/cugraph/source/api_docs/wholegraph/libwholegraph/index.rst deleted file mode 100644 index 4ef68abef2d..00000000000 --- a/docs/cugraph/source/api_docs/wholegraph/libwholegraph/index.rst +++ /dev/null @@ -1,228 +0,0 @@ -===================== -libwholegraph API doc -===================== - -Doxygen WholeGraph C API documentation --------------------------------------- -For doxygen documentation, please refer to `Doxygen Documentation <../../doxygen_docs/libwholegraph/html/index.html>`_ - -WholeGraph C API documentation ------------------------------- - -Library Level APIs -++++++++++++++++++ - -.. doxygenenum:: wholememory_error_code_t - :project: libwholegraph -.. doxygenfunction:: wholememory_init - :project: libwholegraph -.. doxygenfunction:: wholememory_finalize - :project: libwholegraph -.. doxygenfunction:: fork_get_device_count - :project: libwholegraph - -WholeMemory Communicator APIs -+++++++++++++++++++++++++++++ - -.. doxygentypedef:: wholememory_comm_t - :project: libwholegraph -.. doxygenstruct:: wholememory_unique_id_t - :project: libwholegraph -.. doxygenfunction:: wholememory_create_unique_id - :project: libwholegraph -.. doxygenfunction:: wholememory_create_communicator - :project: libwholegraph -.. doxygenfunction:: wholememory_destroy_communicator - :project: libwholegraph -.. doxygenfunction:: wholememory_communicator_get_rank - :project: libwholegraph -.. doxygenfunction:: wholememory_communicator_get_size - :project: libwholegraph -.. doxygenfunction:: wholememory_communicator_barrier - :project: libwholegraph - -WholeMemoryHandle APIs -++++++++++++++++++++++ - -.. doxygenenum:: wholememory_memory_type_t - :project: libwholegraph -.. doxygenenum:: wholememory_memory_location_t - :project: libwholegraph -.. doxygentypedef:: wholememory_handle_t - :project: libwholegraph -.. doxygenstruct:: wholememory_gref_t - :project: libwholegraph -.. doxygenfunction:: wholememory_malloc - :project: libwholegraph -.. doxygenfunction:: wholememory_free - :project: libwholegraph -.. doxygenfunction:: wholememory_get_communicator - :project: libwholegraph -.. doxygenfunction:: wholememory_get_memory_type - :project: libwholegraph -.. doxygenfunction:: wholememory_get_memory_location - :project: libwholegraph -.. doxygenfunction:: wholememory_get_total_size - :project: libwholegraph -.. doxygenfunction:: wholememory_get_data_granularity - :project: libwholegraph -.. doxygenfunction:: wholememory_get_local_memory - :project: libwholegraph -.. doxygenfunction:: wholememory_get_rank_memory - :project: libwholegraph -.. doxygenfunction:: wholememory_get_global_pointer - :project: libwholegraph -.. doxygenfunction:: wholememory_get_global_reference - :project: libwholegraph -.. doxygenfunction:: wholememory_determine_partition_plan - :project: libwholegraph -.. doxygenfunction:: wholememory_determine_entry_partition_plan - :project: libwholegraph -.. doxygenfunction:: wholememory_get_partition_plan - :project: libwholegraph -.. doxygenfunction:: wholememory_load_from_file - :project: libwholegraph -.. doxygenfunction:: wholememory_store_to_file - :project: libwholegraph - -WholeMemoryTensor APIs -++++++++++++++++++++++ - -.. doxygenenum:: wholememory_dtype_t - :project: libwholegraph -.. doxygenstruct:: wholememory_array_description_t - :project: libwholegraph -.. doxygenstruct:: wholememory_matrix_description_t - :project: libwholegraph -.. doxygenstruct:: wholememory_tensor_description_t - :project: libwholegraph -.. doxygentypedef:: wholememory_tensor_t - :project: libwholegraph -.. doxygenfunction:: wholememory_dtype_get_element_size - :project: libwholegraph -.. doxygenfunction:: wholememory_dtype_is_floating_number - :project: libwholegraph -.. doxygenfunction:: wholememory_dtype_is_integer_number - :project: libwholegraph -.. doxygenfunction:: wholememory_create_array_desc - :project: libwholegraph -.. doxygenfunction:: wholememory_create_matrix_desc - :project: libwholegraph -.. doxygenfunction:: wholememory_initialize_tensor_desc - :project: libwholegraph -.. doxygenfunction:: wholememory_copy_array_desc_to_matrix - :project: libwholegraph -.. doxygenfunction:: wholememory_copy_array_desc_to_tensor - :project: libwholegraph -.. doxygenfunction:: wholememory_copy_matrix_desc_to_tensor - :project: libwholegraph -.. doxygenfunction:: wholememory_convert_tensor_desc_to_array - :project: libwholegraph -.. doxygenfunction:: wholememory_convert_tensor_desc_to_matrix - :project: libwholegraph -.. doxygenfunction:: wholememory_get_memory_element_count_from_array - :project: libwholegraph -.. doxygenfunction:: wholememory_get_memory_size_from_array - :project: libwholegraph -.. doxygenfunction:: wholememory_get_memory_element_count_from_matrix - :project: libwholegraph -.. doxygenfunction:: wholememory_get_memory_size_from_matrix - :project: libwholegraph -.. doxygenfunction:: wholememory_get_memory_element_count_from_tensor - :project: libwholegraph -.. doxygenfunction:: wholememory_get_memory_size_from_tensor - :project: libwholegraph -.. doxygenfunction:: wholememory_unsqueeze_tensor - :project: libwholegraph -.. doxygenfunction:: wholememory_create_tensor - :project: libwholegraph -.. doxygenfunction:: wholememory_destroy_tensor - :project: libwholegraph -.. doxygenfunction:: wholememory_make_tensor_from_pointer - :project: libwholegraph -.. doxygenfunction:: wholememory_make_tensor_from_handle - :project: libwholegraph -.. doxygenfunction:: wholememory_tensor_has_handle - :project: libwholegraph -.. doxygenfunction:: wholememory_tensor_get_memory_handle - :project: libwholegraph -.. doxygenfunction:: wholememory_tensor_get_tensor_description - :project: libwholegraph -.. doxygenfunction:: wholememory_tensor_get_global_reference - :project: libwholegraph -.. doxygenfunction:: wholememory_tensor_map_local_tensor - :project: libwholegraph -.. doxygenfunction:: wholememory_tensor_get_data_pointer - :project: libwholegraph -.. doxygenfunction:: wholememory_tensor_get_entry_per_partition - :project: libwholegraph -.. doxygenfunction:: wholememory_tensor_get_subtensor - :project: libwholegraph -.. doxygenfunction:: wholememory_tensor_get_root - :project: libwholegraph - -Ops on WholeMemory Tensors -++++++++++++++++++++++++++ - -.. doxygenfunction:: wholememory_gather - :project: libwholegraph -.. doxygenfunction:: wholememory_scatter - :project: libwholegraph - -WholeTensorEmbedding APIs -+++++++++++++++++++++++++ - -.. doxygentypedef:: wholememory_embedding_cache_policy_t - :project: libwholegraph -.. doxygentypedef:: wholememory_embedding_optimizer_t - :project: libwholegraph -.. doxygentypedef:: wholememory_embedding_t - :project: libwholegraph -.. doxygenenum:: wholememory_access_type_t - :project: libwholegraph -.. doxygenenum:: wholememory_optimizer_type_t - :project: libwholegraph -.. doxygenfunction:: wholememory_create_embedding_optimizer - :project: libwholegraph -.. doxygenfunction:: wholememory_optimizer_set_parameter - :project: libwholegraph -.. doxygenfunction:: wholememory_destroy_embedding_optimizer - :project: libwholegraph -.. doxygenfunction:: wholememory_create_embedding_cache_policy - :project: libwholegraph -.. doxygenfunction:: wholememory_destroy_embedding_cache_policy - :project: libwholegraph -.. doxygenfunction:: wholememory_create_embedding - :project: libwholegraph -.. doxygenfunction:: wholememory_destroy_embedding - :project: libwholegraph -.. doxygenfunction:: wholememory_embedding_get_embedding_tensor - :project: libwholegraph -.. doxygenfunction:: wholememory_embedding_gather - :project: libwholegraph -.. doxygenfunction:: wholememory_embedding_gather_gradient_apply - :project: libwholegraph -.. doxygenfunction:: wholememory_embedding_get_optimizer_state_names - :project: libwholegraph -.. doxygenfunction:: wholememory_embedding_get_optimizer_state - :project: libwholegraph -.. doxygenfunction:: wholememory_embedding_writeback_cache - :project: libwholegraph -.. doxygenfunction:: wholememory_embedding_drop_all_cache - :project: libwholegraph - -Ops on graphs stored in WholeMemory -+++++++++++++++++++++++++++++++++++ - -.. doxygenfunction:: wholegraph_csr_unweighted_sample_without_replacement - :project: libwholegraph -.. doxygenfunction:: wholegraph_csr_weighted_sample_without_replacement - :project: libwholegraph - -Miscellaneous Ops for graph -+++++++++++++++++++++++++++ - -.. doxygenfunction:: graph_append_unique - :project: libwholegraph -.. doxygenfunction:: csr_add_self_loop - :project: libwholegraph diff --git a/docs/cugraph/source/api_docs/wholegraph/pylibwholegraph/index.rst b/docs/cugraph/source/api_docs/wholegraph/pylibwholegraph/index.rst deleted file mode 100644 index 67aab00acef..00000000000 --- a/docs/cugraph/source/api_docs/wholegraph/pylibwholegraph/index.rst +++ /dev/null @@ -1,38 +0,0 @@ -======================= -pylibwholegraph API doc -======================= - -.. currentmodule:: pylibwholegraph - -APIs ----- -.. autosummary:: - :toctree: ../../api/wg - - torch.initialize.init_torch_env - torch.initialize.init_torch_env_and_create_wm_comm - torch.initialize.finalize - torch.comm.WholeMemoryCommunicator - torch.comm.set_world_info - torch.comm.create_group_communicator - torch.comm.destroy_communicator - torch.comm.get_global_communicator - torch.comm.get_local_node_communicator - torch.comm.get_local_device_communicator - torch.tensor.WholeMemoryTensor - torch.tensor.create_wholememory_tensor - torch.tensor.create_wholememory_tensor_from_filelist - torch.tensor.destroy_wholememory_tensor - torch.embedding.WholeMemoryOptimizer - torch.embedding.create_wholememory_optimizer - torch.embedding.destroy_wholememory_optimizer - torch.embedding.WholeMemoryCachePolicy - torch.embedding.create_wholememory_cache_policy - torch.embedding.create_builtin_cache_policy - torch.embedding.destroy_wholememory_cache_policy - torch.embedding.WholeMemoryEmbedding - torch.embedding.create_embedding - torch.embedding.create_embedding_from_filelist - torch.embedding.destroy_embedding - torch.embedding.WholeMemoryEmbeddingModule - torch.graph_structure.GraphStructure diff --git a/docs/cugraph/source/basics/index.md b/docs/cugraph/source/basics/index.md deleted file mode 100644 index 36aad5166bc..00000000000 --- a/docs/cugraph/source/basics/index.md +++ /dev/null @@ -1,67 +0,0 @@ -# cuGraph Introduction - -The Data Scientist has a collection of techniques within their -proverbial toolbox. Data engineering, statistical analysis, and -machine learning are among the most commonly known. However, there -are numerous cases where the focus of the analysis is on the -relationship between data elements. In those cases, the data is best -represented as a graph. Graph analysis, also called network analysis, -is a collection of algorithms for answering questions posed against -graph data. Graph analysis is not new. - -The first graph problem was posed by Euler in 1736, the [Seven Bridges of -Konigsberg](https://en.wikipedia.org/wiki/Seven_Bridges_of_K%C3%B6nigsberg), -and laid the foundation for the mathematical field of graph theory. -The application of graph analysis covers a wide variety of fields, including -marketing, biology, physics, computer science, sociology, and cyber to name a few. - -RAPIDS cuGraph is a library of graph algorithms that seamlessly integrates -into the RAPIDS data science ecosystem and allows the data scientist to easily -call graph algorithms using data stored in a GPU DataFrame, NetworkX Graphs, or even -CuPy or SciPy sparse Matrix. - -## Vision - -The vision of RAPIDS cuGraph is to ___make graph analysis ubiquitous to the -point that users just think in terms of analysis and not technologies or -frameworks___. This is a goal that many of us on the cuGraph team have been -working on for almost twenty years. Many of the early attempts focused on -solving one problem or using one technique. Those early attempts worked for -the initial goal but tended to break as the scope changed (e.g., shifting -to solving a dynamic graph problem with a static graph solution). The limiting -factors usually came down to compute power, ease-of-use, or choosing a data -structure that was not suited for all problems. NVIDIA GPUs, CUDA, and RAPIDS -have totally changed the paradigm and the goal of an accelerated unified graph -analytic library is now possible. - -The compute power of the latest NVIDIA GPUs (RAPIDS supports Pascal and later -GPU architectures) make graph analytics 1000x faster on average over NetworkX. -Moreover, the internal memory speed within a GPU allows cuGraph to rapidly -switch the data structure to best suit the needs of the analytic rather than -being restricted to a single data structure. cuGraph is working with several -frameworks for both static and dynamic graph data structures so that we always -have a solution to any graph problem. Since Python has emerged as the de facto -language for data science, allowing interactivity and the ability to run graph -analytics in Python makes cuGraph familiar and approachable. RAPIDS wraps all -the graph analytic goodness mentioned above with the ability to perform -high-speed ETL, statistics, and machine learning. To make things even better, -RAPIDS and DASK allows cuGraph to scale to multiple GPUs to support -multi-billion edge graphs. - -## Terminology - -cuGraph is a collection of GPU accelerated graph algorithms and graph utility -functions. The application of graph analysis covers a lot of areas. -For Example: -* [Network Science](https://en.wikipedia.org/wiki/Network_science) -* [Complex Network](https://en.wikipedia.org/wiki/Complex_network) -* [Graph Theory](https://en.wikipedia.org/wiki/Graph_theory) -* [Social Network Analysis](https://en.wikipedia.org/wiki/Social_network_analysis) - -cuGraph does not favor one field over another. Our developers span the -breadth of fields with the focus being to produce the best graph library -possible. However, each field has its own argot (jargon) for describing the -graph (or network). In our documentation, we try to be consistent. In Python -documentation we will mostly use the terms __Node__ and __Edge__ to better -match NetworkX preferred term use, as well as other Python-based tools. At -the CUDA/C layer, we favor the mathematical terms of __Vertex__ and __Edge__. diff --git a/docs/cugraph/source/conf.py b/docs/cugraph/source/conf.py deleted file mode 100644 index 6573349aaec..00000000000 --- a/docs/cugraph/source/conf.py +++ /dev/null @@ -1,228 +0,0 @@ -# Copyright (c) 2018-2024, NVIDIA CORPORATION. -# -# pygdf documentation build configuration file, created by -# sphinx-quickstart on Wed May 3 10:59:22 2017. -# -# This file is execfile()d with the current directory set to its -# containing dir. -# -# Note that not all possible configuration values are present in this -# autogenerated file. -# -# All configuration values have a default; values that are commented out -# serve to show the default. - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -# -import os -import sys - -from packaging.version import Version - -import cugraph - -# If extensions (or modules to document with autodoc) are in another -# directory, add these directories to sys.path here. If the directory -# is relative to the documentation root, use os.path.abspath to make it -# absolute, like shown here. -sys.path.insert(0, os.path.abspath('sphinxext')) - -from github_link import make_linkcode_resolve # noqa - -# -- General configuration ------------------------------------------------ - -# If your documentation needs a minimal Sphinx version, state it here. -# -# needs_sphinx = '1.0' - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom -# ones. -extensions = [ - "breathe", - "sphinx.ext.intersphinx", - "sphinx.ext.autodoc", - "sphinx.ext.autosummary", - "numpydoc", - "sphinx_markdown_tables", - 'sphinx.ext.doctest', - 'sphinx.ext.linkcode', - "IPython.sphinxext.ipython_console_highlighting", - "IPython.sphinxext.ipython_directive", - "nbsphinx", - "recommonmark", - "sphinx_copybutton", -] - - -ipython_mplbackend = 'str' - -# Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] - -# The suffix(es) of source filenames. -# You can specify multiple suffix as a list of string: -# -# source_suffix = ['.rst', '.md'] -source_suffix = {".rst": "restructuredtext", ".md": "markdown"} - -# The master toctree document. -master_doc = 'index' - -# General information about the project. -project = 'cugraph' -copyright = '2019-2023, NVIDIA Corporation' -author = 'NVIDIA Corporation' - -# The version info for the project you're documenting, acts as replacement for -# |version| and |release|, also used in various other places throughout the -# built documents. -# -CUGRAPH_VERSION = Version(cugraph.__version__) -# The short X.Y version. -version = f"{CUGRAPH_VERSION.major:02}.{CUGRAPH_VERSION.minor:02}" -# The full version, including alpha/beta/rc tags. -release = f"{CUGRAPH_VERSION.major:02}.{CUGRAPH_VERSION.minor:02}.{CUGRAPH_VERSION.micro:02}" - -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -# -# This is also used if you do content translation via gettext catalogs. -# Usually you set "language" from the command line for these cases. -language = 'en' - -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -# This patterns also effect to html_static_path and html_extra_path -exclude_patterns = [] - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' - -# If true, `todo` and `todoList` produce output, else they produce nothing. -todo_include_todos = False - - -# -- Options for HTML output ---------------------------------------------- - -# The theme to use for HTML and HTML Help pages. See the documentation for -# a list of builtin themes. -# - -html_theme = 'pydata_sphinx_theme' - -# If true, `todo` and `todoList` produce output, else they produce nothing. -todo_include_todos = False - -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -# -html_theme_options = { - "external_links": [], - "github_url": "https://github.com/rapidsai/cugraph", - "twitter_url": "https://twitter.com/rapidsai", - "show_toc_level": 1, - "navbar_align": "right", -} - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ['_static'] - - -# -- Options for HTMLHelp output ------------------------------------------ - -# Output file base name for HTML help builder. -htmlhelp_basename = 'cugraphdoc' - - -# -- Options for LaTeX output --------------------------------------------- - -latex_elements = { - # The paper size ('letterpaper' or 'a4paper'). - # - # 'papersize': 'letterpaper', - - # The font size ('10pt', '11pt' or '12pt'). - # - # 'pointsize': '10pt', - - # Additional stuff for the LaTeX preamble. - # - # 'preamble': '', - - # Latex figure (float) alignment - # - # 'figure_align': 'htbp', -} - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, -# author, documentclass [howto, manual, or own class]). -latex_documents = [ - (master_doc, 'cugraph.tex', 'cugraph Documentation', - 'NVIDIA Corporation', 'manual'), -] - - -# -- Options for manual page output --------------------------------------- - -# One entry per manual page. List of tuples -# (source start file, name, description, authors, manual section). -man_pages = [ - (master_doc, 'cugraph', 'cugraph Documentation', - [author], 1) -] - - -# -- Options for Texinfo output ------------------------------------------- - -# Grouping the document tree into Texinfo files. List of tuples -# (source start file, target name, title, author, -# dir menu entry, description, category) -texinfo_documents = [ - (master_doc, 'cugraph', 'cugraph Documentation', - author, 'cugraph', 'GPU-accelerated graph analysis.', - 'Miscellaneous'), -] - -# Connect docs in other projects -intersphinx_mapping = { - "networkx": ( - "https://networkx.org/documentation/stable/", - "https://networkx.org/documentation/stable/objects.inv", - ), - "python": ( - "https://docs.python.org/3", - "https://docs.python.org/3/objects.inv", - ), -} - -# Config numpydoc -numpydoc_show_inherited_class_members = False -numpydoc_class_members_toctree = False - - -def setup(app): - app.add_css_file("https://docs.rapids.ai/assets/css/custom.css") - app.add_js_file("https://docs.rapids.ai/assets/js/custom.js", loading_method="defer") - app.add_css_file("references.css") - - -source_suffix = ['.rst', '.md'] - -# The following is used by sphinx.ext.linkcode to provide links to github -linkcode_resolve = make_linkcode_resolve( - "https://github.com/rapidsai/cugraph/blob/{revision}/python/{path}#L{lineno}" -) - -breathe_projects = { - 'libcugraph': os.environ['XML_DIR_LIBCUGRAPH'], - # 'libwholegraph': os.environ['XML_DIR_LIBWHOLEGRAPH'] -} - -breathe_default_project = "libcugraph" diff --git a/docs/cugraph/source/dev_resources/API.rst b/docs/cugraph/source/dev_resources/API.rst deleted file mode 100644 index e32315d2fb9..00000000000 --- a/docs/cugraph/source/dev_resources/API.rst +++ /dev/null @@ -1,5 +0,0 @@ -=== -API -=== - -https://docs.rapids.ai/api/cugraph/nightly/api_docs/index.html diff --git a/docs/cugraph/source/dev_resources/index.rst b/docs/cugraph/source/dev_resources/index.rst deleted file mode 100644 index fc2c4f4780f..00000000000 --- a/docs/cugraph/source/dev_resources/index.rst +++ /dev/null @@ -1,10 +0,0 @@ -=================== -Developer Resources -=================== - - -.. toctree:: - :maxdepth: 3 - - https://docs.rapids.ai/maintainers - API.rst diff --git a/docs/cugraph/source/graph_support/DGL_support.md b/docs/cugraph/source/graph_support/DGL_support.md deleted file mode 100644 index 7d32a9efe37..00000000000 --- a/docs/cugraph/source/graph_support/DGL_support.md +++ /dev/null @@ -1,64 +0,0 @@ -# cugraph_dgl - -## Description - -[RAPIDS](https://rapids.ai) cugraph_dgl provides a duck-typed version of the [DGLGraph](https://docs.dgl.ai/api/python/dgl.DGLGraph.html#dgl.DGLGraph) class, which uses cugraph for storing graph structure and node/edge feature data. Using cugraph as the backend allows DGL users to access a collection of GPU accelerated algorithms for graph analytics, such as centrality computation and community detection. - -## Conda - -Install and update cugraph-dgl and the required dependencies using the command: - -```shell -# CUDA 11 -conda install -c rapidsai -c pytorch -c conda-forge -c nvidia -c dglteam/label/th23_cu118 cugraph-dgl - -# CUDA 12 -conda install -c rapidsai -c pytorch -c conda-forge -c nvidia -c dglteam/label/th23_cu121 cugraph-dgl -``` - -## Build from Source - -### Create the conda development environment -``` -conda env create -n cugraph_dgl_dev --file conda/environments/all_cuda-125_arch-x86_64.yaml -``` - -### Install in editable mode -``` -pip install -e . -``` - -### Run tests - -``` -pytest tests/* -``` - - -## Usage -```diff - -from cugraph_dgl.convert import cugraph_storage_from_heterograph -cugraph_g = cugraph_storage_from_heterograph(dgl_g) - -sampler = dgl.dataloading.NeighborSampler( - [15, 10, 5], prefetch_node_feats=['feat'], prefetch_labels=['label']) - -train_dataloader = dgl.dataloading.DataLoader( -cugraph_g, -train_idx, -sampler, -device=device, -batch_size=1024, -shuffle=True, -drop_last=False, -num_workers=0) -``` - -___ -Copyright (c) 2023, NVIDIA CORPORATION. - -Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -___ diff --git a/docs/cugraph/source/graph_support/PyG_support.md b/docs/cugraph/source/graph_support/PyG_support.md deleted file mode 100644 index b57ce7fcc6f..00000000000 --- a/docs/cugraph/source/graph_support/PyG_support.md +++ /dev/null @@ -1,3 +0,0 @@ -# cugraph_pyg - -[RAPIDS](https://rapids.ai) cugraph_pyg enables the ability to use cugraph graph storage and sampling with PyTorch Geometric (PyG). PyG users will have access to cuGraph through the PyG GraphStore, FeatureStore, and Sampler interfaces. diff --git a/docs/cugraph/source/graph_support/algorithms.md b/docs/cugraph/source/graph_support/algorithms.md deleted file mode 100644 index 2aac61325e0..00000000000 --- a/docs/cugraph/source/graph_support/algorithms.md +++ /dev/null @@ -1,95 +0,0 @@ -# List of Supported and Planned Algorithms - -## Supported Graph - -| Type | Description | -| ---------- | ----------------------------------------------------------- | -| Graph | A directed or undirected Graph (use directed={True, False}) | -| Multigraph | A Graph with multiple edges between a vertex pair | -| | | - -ALL Algorithms support Graphs and MultiGraph (directed and undirected) - ---- - -
- -# Supported Algorithms - -_Italic_ algorithms are planned for future releases. - -Note: Multi-GPU, or MG, includes support for Multi-Node Multi-GPU (also called MNMG). - -| Category | Notebooks | Scale | Notes | -| ----------------- | ---------------------------------- | ------------------- | --------------------------------------------------------------- | -| [Centrality](./algorithms/Centrality.html ) | [Centrality](https://github.com/rapidsai/cugraph/blob/main/notebooks/algorithms/centrality/Centrality.ipynb) | | | -| | [Katz](https://github.com/rapidsai/cugraph/blob/main/notebooks/algorithms/centrality/Katz.ipynb) | __Multi-GPU__ | | -| | [Betweenness Centrality](https://github.com/rapidsai/cugraph/blob/main/notebooks/algorithms/centrality/Betweenness.ipynb) | __Multi-GPU__ | MG as of 23.06 | -| | [Edge Betweenness Centrality](https://github.com/rapidsai/cugraph/blob/main/notebooks/algorithms/centrality/Betweenness.ipynb) | __Multi-GPU__ | MG as of 23.08 | -| | [Eigenvector Centrality](https://github.com/rapidsai/cugraph/blob/main/notebooks/algorithms/centrality/Eigenvector.ipynb) | __Multi-GPU__ | | -| | [Degree Centrality](https://github.com/rapidsai/cugraph/blob/main/notebooks/algorithms/centrality/Degree.ipynb) | __Multi-GPU__ | Python only | -| Community | | | | -| | [Leiden](https://github.com/rapidsai/cugraph/blob/main/notebooks/algorithms/community/Louvain.ipynb) | __Multi-GPU__ | MG as of 23.06 | -| | [Louvain](https://github.com/rapidsai/cugraph/blob/main/notebooks/algorithms/community/Louvain.ipynb) | __Multi-GPU__ | | -| | [Ensemble Clustering for Graphs](https://github.com/rapidsai/cugraph/blob/main/notebooks/algorithms/community/ECG.ipynb) | Single-GPU | MG planned for 24.02 | -| | [Spectral-Clustering - Balanced Cut](https://github.com/rapidsai/cugraph/blob/main/notebooks/algorithms/community/Spectral-Clustering.ipynb) | Single-GPU | | -| | [Spectral-Clustering - Modularity](https://github.com/rapidsai/cugraph/blob/main/notebooks/algorithms/community/Spectral-Clustering.ipynb) | Single-GPU | | -| | [Subgraph Extraction](https://github.com/rapidsai/cugraph/blob/main/notebooks/algorithms/community/Subgraph-Extraction.ipyn) | Single-GPU | | -| | [Triangle Counting](https://github.com/rapidsai/cugraph/blob/main/notebooks/algorithms/community/Triangle-Counting.ipynb) | __Multi-GPU__ | | -| | [K-Truss](https://github.com/rapidsai/cugraph/blob/main/notebooks/algorithms/community/ktruss.ipynb) | Single-GPU | MG planned for 2024 | -| Components | | | | -| | [Weakly Connected Components](https://github.com/rapidsai/cugraph/blob/main/notebooks/algorithms/components/ConnectedComponents.ipynb) | __Multi-GPU__ | | -| | [Strongly Connected Components](https://github.com/rapidsai/cugraph/blob/main/notebooks/algorithms/components/ConnectedComponents.ipynb) | Single-GPU | | -| Core | | | | -| | [K-Core](https://github.com/rapidsai/cugraph/blob/main/notebooks/algorithms/cores/kcore.ipynb) | __Multi-GPU__ | | -| | [Core Number](https://github.com/rapidsai/cugraph/blob/main/notebooks/algorithms/cores/core-number.ipynb) | __Multi-GPU__ | | -| _Flow_ | | | | -| | _MaxFlow_ | --- | | -| _Influence_ | | | | -| | _Influence Maximization_ | --- | | -| Layout | | | | -| | [Force Atlas 2](https://github.com/rapidsai/cugraph/blob/main/notebooks/algorithms/layout/Force-Atlas2.ipynb) | Single-GPU | | -| Linear Assignment | | | | -| | [Hungarian](https://docs.rapids.ai/api/cugraph/nightly/api_docs/cugraph/linear_assignment/#hungarian) | Single-GPU | [README](./algorithms/cpp_algorithms/linear_cpp.html) | -| Link Analysis | | | | -| | [Pagerank](https://github.com/rapidsai/cugraph/blob/main/notebooks/algorithms/link_analysis/Pagerank.ipynb) | __Multi-GPU__ | [C++ README](./algorithms/cpp_algorithms/centrality_cpp.html#Pagerank) | -| | [Personal Pagerank](https://github.com/rapidsai/cugraph/blob/main/notebooks/algorithms/link_analysis/Pagerank.ipynb) | __Multi-GPU__ | [C++ README](./algorithms/cpp_algorithms/centrality_cpp.html#Personalized-Pagerank) | -| | [HITS](https://github.com/rapidsai/cugraph/blob/main/notebooks/algorithms/link_analysis/HITS.ipynb) | __Multi-GPU__ | | -| [Link Prediction](algorithms/Similarity.html) | | | | -| | [Jaccard Similarity](https://github.com/rapidsai/cugraph/blob/main/notebooks/algorithms/link_prediction/Jaccard-Similarity.ipynb) | __Multi-GPU__ | Directed graph only | -| | [Weighted Jaccard Similarity](https://github.com/rapidsai/cugraph/blob/main/notebooks/algorithms/link_prediction/Jaccard-Similarity.ipynb) | Single-GPU | | -| | [Overlap Similarity](https://github.com/rapidsai/cugraph/blob/main/notebooks/algorithms/link_prediction/Overlap-Similarity.ipynb) | **Multi-GPU** | | -| | [Sorensen Coefficient](https://github.com/rapidsai/cugraph/blob/main/notebooks/algorithms/link_prediction/Sorensen_coefficient.ipynb) | __Multi-GPU__ | MG is unweighted only | -| | _Local Clustering Coefficient_ | --- | | -| Sampling | | | | -| | [Uniform Random Walks RW](https://github.com/rapidsai/cugraph/blob/main/notebooks/algorithms/sampling/RandomWalk.ipynb) | __Multi-GPU__ | | -| | *Biased Random Walks (RW)* | --- | | -| | Egonet | __Multi-GPU__ | | -| | Node2Vec | __Multi-GPU__ | | -| | Neighborhood sampling | __Multi-GPU__ | | -| Traversal | | | | -| | Breadth First Search (BFS) | __Multi-GPU__ | [C++ README](algorithms/cpp_algorithms/traversal_cpp.html#BFS) | -| | Single Source Shortest Path (SSSP) | __Multi-GPU__ | [C++ README](algorithms/cpp_algorithms/traversal_cpp.html#SSSP) | -| | _ASSP / APSP_ | --- | | -| Tree | | | | -| | Minimum Spanning Tree | Single-GPU | | -| | Maximum Spanning Tree | Single-GPU | | -| Other | | | | -| | Renumbering | __Multi-GPU__ | multiple columns, any data type | -| | Symmetrize | __Multi-GPU__ | | -| | Path Extraction | | Extract paths from BFS/SSP results in parallel | -| | Two Hop Neighbors | __Multi-GPU__ | | -| Data Generator | | | | -| | RMAT | __Multi-GPU__ | | -| | _Barabasi-Albert_ | --- | | -| | | | | - -

- -___ -Copyright (c) 2019 - 2023, NVIDIA CORPORATION. - -Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -___ diff --git a/docs/cugraph/source/graph_support/algorithms/Centrality.md b/docs/cugraph/source/graph_support/algorithms/Centrality.md deleted file mode 100644 index 8119e655236..00000000000 --- a/docs/cugraph/source/graph_support/algorithms/Centrality.md +++ /dev/null @@ -1,40 +0,0 @@ - -# cuGraph Centrality Notebooks - - - -The RAPIDS cuGraph Centrality folder contains a collection of Jupyter Notebooks that demonstrate algorithms to identify and quantify the importance of vertices to the structure of the graph. In the diagram above, the highlighted vertices are highly important and are likely answers to questions like: - -* Which vertices have the highest degree (most direct links) ? -* Which vertices are on the most efficient paths through the graph? -* Which vertices connect the most important vertices to each other? - -But which vertices are most important? The answer depends on which measure/algorithm is run. Manipulation of the data before or after the graph analytic is not covered here. Extended, more problem focused, notebooks are being created and available https://github.com/rapidsai/notebooks-extended - -## Summary - -|Algorithm |Notebooks Containing |Description | -| --------------- | ------------------------------------------------------------ | ------------------------------------------------------------ | -|[Degree Centrality](./degree_centrality.md)| [Centrality](https://github.com/rapidsai/cugraph/blob/main/notebooks/algorithms/centrality/Centrality.ipynb), [Degree](https://github.com/rapidsai/cugraph/blob/main/notebooks/algorithms/centrality/Degree.ipynb) |Measure based on counting direct connections for each vertex| -|[Betweenness Centrality](./betweenness_centrality.md)| [Centrality](https://github.com/rapidsai/cugraph/blob/main/notebooks/algorithms/centrality/Centrality.ipynb), [Betweenness](https://github.com/rapidsai/cugraph/blob/main/notebooks/algorithms/centrality/Betweenness.ipynb) |Number of shortest paths through the vertex| -|[Eigenvector Centrality](./eigenvector_centrality.md)|[Centrality](https://github.com/rapidsai/cugraph/blob/main/notebooks/algorithms/centrality/Centrality.ipynb), [Eigenvector](https://github.com/rapidsai/cugraph/blob/main/notebooks/algorithms/centrality/Eigenvector.ipynb)|Measure of connectivity to other important vertices (which also have high connectivity) often referred to as the influence measure of a vertex| -|[Katz Centrality](./katz_centrality.md)|[Centrality](https://github.com/rapidsai/cugraph/blob/main/notebooks/algorithms/centrality/Centrality.ipynb), [Katz](https://github.com/rapidsai/cugraph/blob/main/notebooks/algorithms/centrality/Katz.ipynb) |Similar to Eigenvector but has tweaks to measure more weakly connected graph | -|Pagerank|[Centrality](https://github.com/rapidsai/cugraph/blob/main/notebooks/algorithms/centrality/Centrality.ipynb), [Pagerank](https://github.com/rapidsai/cugraph/blob/main/notebooks/algorithms/link_analysis/Pagerank.ipynb) |Classified as both a link analysis and centrality measure by quantifying incoming links from central vertices. | - -[System Requirements](https://github.com/rapidsai/cugraph/blob/main/notebooks/README.md#requirements) - - - -| Author Credit | Date | Update | cuGraph Version | Test Hardware | -| --------------|------------|------------------|-----------------|----------------| -| Brad Rees | 04/19/2021 | created | 0.19 | GV100, CUDA 11.0 -| Don Acosta | 07/05/2022 | tested / updated | 22.08 nightly | DGX Tesla V100 CUDA 11.5 - -## Copyright - -Copyright (c) 2019 - 2023, NVIDIA CORPORATION. - -Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -___ diff --git a/docs/cugraph/source/graph_support/algorithms/Similarity.md b/docs/cugraph/source/graph_support/algorithms/Similarity.md deleted file mode 100644 index 96adc25ea69..00000000000 --- a/docs/cugraph/source/graph_support/algorithms/Similarity.md +++ /dev/null @@ -1,38 +0,0 @@ - -# cuGraph Similarity Notebooks - -The RAPIDS cuGraph Similarity folder contain a collection of Jupyter Notebooks that demonstrate algorithms to quantify the similarity between pairs of vertices in the graph. -Results of Similarity algorithms are often used to answer questions like: -* Could two vertices be duplicates or aliases of the same actor? -* Can we predict missing edges based of the similarity between two nodes? -* Are multiple similar communities within the graph? -* Can I create recommendations based on the similarity between vertices in the graph. - - -Manipulation of the data before or after the graph analytic is not covered here. Extended, more problem focused, notebooks are being created and available https://github.com/rapidsai/notebooks-extended - -## Summary - -|Algorithm |Notebooks Containing |Description | -| --------------- | ------------------------------------------------------------ | ------------------------------------------------------------ | -|[Jaccard Smiliarity](./jaccard_similarity.html)| [Jaccard Similarity](https://github.com/rapidsai/cugraph/blob/main/notebooks/algorithms/link_prediction/Jaccard-Similarity.ipynb) || -|[Overlap Similarity](./overlap_similarity.html)| [Overlap Similarity](https://github.com/rapidsai/cugraph/blob/main/notebooks/algorithms/link_prediction/Overlap-Similarity.ipynb) || -|[Sorensen](./sorensen_coefficient.html)|[Sorensen Similarity](https://github.com/rapidsai/cugraph/blob/main/notebooks/algorithms/link_prediction/Sorensen_coefficient.ipynb)|| -|Personal Pagerank|[Pagerank](https://github.com/rapidsai/cugraph/blob/main/notebooks/algorithms/link_analysis/Pagerank.ipynb) || - - -[System Requirements](../../README.md#requirements) - -| Author Credit | Date | Update | cuGraph Version | Test Hardware | -| --------------|------------|------------------|-----------------|----------------| -| Brad Rees | 04/19/2021 | created | 0.19 | GV100, CUDA 11.0 -| Don Acosta | 07/05/2022 | tested / updated | 22.08 nightly | DGX Tesla V100 CUDA 11.5 - -## Copyright - -Copyright (c) 2019 - 2023, NVIDIA CORPORATION. - -Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -___ diff --git a/docs/cugraph/source/graph_support/algorithms/betweenness_centrality.md b/docs/cugraph/source/graph_support/algorithms/betweenness_centrality.md deleted file mode 100644 index 89e5e0bdb92..00000000000 --- a/docs/cugraph/source/graph_support/algorithms/betweenness_centrality.md +++ /dev/null @@ -1,42 +0,0 @@ -# Betweenness Centrality (BC) - -Betweenness centrality is a measure of the relative importance based on measuring the number of shortest paths that pass through each vertex or over each edge. High betweenness centrality vertices have a greater number of path cross through the vertex. Likewise, high centrality edges have more shortest paths that pass over the edge. - -See [Betweenness on Wikipedia](https://en.wikipedia.org/wiki/Betweenness_centrality) for more details on the algorithm. - -Betweenness centrality of a node 𝑣 is the sum of the fraction of all-pairs shortest paths that pass through 𝑣 - -$c_B(v) =\sum_{s,t \in V} \frac{\sigma(s, t|v)}{\sigma(s, t)}$ - - -## When to use BC -Betweenness centrality is often used to answer questions like: -* Which vertices are most influential in the network? -* What are the bridge vertices in a network? -* How robust/redundant is the network? -* In a social network analysis, betweenness centrality can be used to identify roles in an organization. - -## When not to use BC -Betweenness Centrality is less efficient in certain circumstances: -* Large graphs may require approximationing betweenness centrality as the computational cost increases. -* Disconnected networks or networks with many isolated components limit the value of betweenness centrality -* Betweenness centality is more costly and less useful in weighted graphs. -* In networks with hierarchical structure, BC might not accurately reflect true influence -* Networks with multiple edge types often require a seperate method of measuring influence for each edge type. - - -## How computationally expensive is BC? -While cuGraph's parallelism migigates run time, [Big O notation](https://en.wikipedia.org/wiki/Big_O_notation) is still the standard to compare algorithm costs. -* The cost is O(V(E+V)) for a non-weighted graph and O(V(E+V)log(V)) for a weighted graph. -* A breadth-first search is done to determine shortest paths betweeb all nodes prior to calculating BC. - -## Sample benchmarks -Coming Soon - -___ -Copyright (c) 2023, NVIDIA CORPORATION. - -Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -___ diff --git a/docs/cugraph/source/graph_support/algorithms/cpp_algorithms/centrality_cpp.md b/docs/cugraph/source/graph_support/algorithms/cpp_algorithms/centrality_cpp.md deleted file mode 100644 index b3f7ac17d1a..00000000000 --- a/docs/cugraph/source/graph_support/algorithms/cpp_algorithms/centrality_cpp.md +++ /dev/null @@ -1,81 +0,0 @@ -# Centrality algorithms -cuGraph Pagerank is implemented using our graph primitive library - -## Pagerank - -The unit test code is the best place to search for examples on calling pagerank. - - * [SG Implementation](https://github.com/rapidsai/cugraph/blob/main/cpp/tests/link_analysis/pagerank_test.cpp) - * [MG Implementation](https://github.com/rapidsai/cugraph/blob/main/cpp/tests/link_analysis/mg_pagerank_test.cpp) - -## Simple pagerank - -The example assumes that you create an SG or MG graph somehow. The caller must create the pageranks vector in device memory and pass in the raw pointer to that vector into the pagerank function. - -```cpp -#include -... -using vertex_t = int32_t; // or int64_t, whichever is appropriate -using weight_t = float; // or double, whichever is appropriate -using result_t = weight_t; // could specify float or double also -raft::handle_t handle; // Must be configured if MG -auto graph_view = graph.view(); // assumes you have created a graph somehow - -result_t constexpr alpha{0.85}; -result_t constexpr epsilon{1e-6}; - -rmm::device_uvector pageranks_v(graph_view.number_of_vertices(), handle.get_stream()); - -// pagerank optionally supports three additional parameters: -// max_iterations - maximum number of iterations, if pagerank doesn't coverge by -// then we abort -// has_initial_guess - if true, values in the pagerank array when the call is initiated -// will be used as the initial pagerank values. These values will -// be normalized before use. If false (the default), the values -// in the pagerank array will be set to 1/num_vertices before -// starting the computation. -// do_expensive_check - perform extensive validation of the input data before -// executing algorithm. Off by default. Note: turning this on -// is expensive -cugraph::pagerank(handle, graph_view, nullptr, nullptr, nullptr, vertex_t{0}, - pageranks_v.data(), alpha, epsilon); -``` - -## Personalized Pagerank - -The example assumes that you create an SG or MG graph somehow. The caller must create the pageranks vector in device memory and pass in the raw pointer to that vector into the pagerank function. Additionally, the caller must create personalization_vertices and personalized_values vectors in device memory, populate them and pass in the raw pointers to those vectors. - -```cpp -#include -... -using vertex_t = int32_t; // or int64_t, whichever is appropriate -using weight_t = float; // or double, whichever is appropriate -using result_t = weight_t; // could specify float or double also -raft::handle_t handle; // Must be configured if MG -auto graph_view = graph.view(); // assumes you have created a graph somehow -vertex_t number_of_personalization_vertices; // Provided by caller - -result_t constexpr alpha{0.85}; -result_t constexpr epsilon{1e-6}; - -rmm::device_uvector pageranks_v(graph_view.number_of_vertices(), handle.get_stream()); -rmm::device_uvector personalization_vertices(number_of_personalization_vertices, handle.get_stream()); -rmm::device_uvector personalization_values(number_of_personalization_vertices, handle.get_stream()); - -// Populate personalization_vertices, personalization_values with user provided data - -// pagerank optionally supports three additional parameters: -// max_iterations - maximum number of iterations, if pagerank doesn't coverge by -// then we abort -// has_initial_guess - if true, values in the pagerank array when the call is initiated -// will be used as the initial pagerank values. These values will -// be normalized before use. If false (the default), the values -// in the pagerank array will be set to 1/num_vertices before -// starting the computation. -// do_expensive_check - perform extensive validation of the input data before -// executing algorithm. Off by default. Note: turning this on -// is expensive -cugraph::pagerank(handle, graph_view, nullptr, personalization_vertices.data(), - personalization_values.data(), number_of_personalization_vertices, - pageranks_v.data(), alpha, epsilon); -``` diff --git a/docs/cugraph/source/graph_support/algorithms/cpp_algorithms/linear_cpp.md b/docs/cugraph/source/graph_support/algorithms/cpp_algorithms/linear_cpp.md deleted file mode 100644 index 8af4a5042f6..00000000000 --- a/docs/cugraph/source/graph_support/algorithms/cpp_algorithms/linear_cpp.md +++ /dev/null @@ -1,37 +0,0 @@ -# LAP - -Implementation of ***O(n^3) Alternating Tree Variant*** of Hungarian Algorithm on NVIDIA CUDA-enabled GPU. - -This implementation solves a batch of ***k*** **Linear Assignment Problems (LAP)**, each with ***nxn*** matrix of single floating point cost values. At optimality, the algorithm produces an assignment with ***minimum*** cost. - -The API can be used to query optimal primal and dual costs, optimal assignment vector, and optimal row/column dual vectors for each subproblem in the batch. - -cuGraph exposes the Hungarian algorithm, the actual implementation is contained in the RAFT library which contains some common tools and kernels shared between cuGraph and cuML. - -Following parameters can be used to tune the performance of algorithm: - -1. epsilon: (in raft/lap/lap_kernels.cuh) This parameter controls the tolerance on the floating point precision. Setting this too small will result in increased solution time because the algorithm will search for precise solutions. Setting it too high may cause some inaccuracies. - -2. BLOCKDIMX, BLOCKDIMY: (in raft/lap/lap_functions.cuh) These parameters control threads_per_block to be used along the given dimension. Set these according to the device specifications and occupancy calculation. - -***This library is licensed under Apache License 2.0. Please cite our paper, if this library helps you in your research.*** - -- Harvard citation style - - Date, K. and Nagi, R., 2016. GPU-accelerated Hungarian algorithms for the Linear Assignment Problem. Parallel Computing, 57, pp.52-72. - -- BibTeX Citation block to be used in LaTeX bibliography file: - -``` -@article{date2016gpu, - title={GPU-accelerated Hungarian algorithms for the Linear Assignment Problem}, - author={Date, Ketan and Nagi, Rakesh}, - journal={Parallel Computing}, - volume={57}, - pages={52--72}, - year={2016}, - publisher={Elsevier} -} -``` - -The paper is available online on [ScienceDirect](https://www.sciencedirect.com/science/article/abs/pii/S016781911630045X). diff --git a/docs/cugraph/source/graph_support/algorithms/cpp_algorithms/traversal_cpp.md b/docs/cugraph/source/graph_support/algorithms/cpp_algorithms/traversal_cpp.md deleted file mode 100644 index 6480d885a38..00000000000 --- a/docs/cugraph/source/graph_support/algorithms/cpp_algorithms/traversal_cpp.md +++ /dev/null @@ -1,56 +0,0 @@ -# Traversal -cuGraph traversal algorithms are contained in this directory - -## SSSP - -The unit test code is the best place to search for examples on calling SSSP. - - * [SG Implementation](https://github.com/rapidsai/cugraph/blob/main/cpp/tests/traversal/sssp_test.cpp) - * [MG Implementation](https://github.com/rapidsai/cugraph/blob/main/cpp/tests/traversal/mg_sssp_test.cpp) - -## Simple SSSP - -The example assumes that you create an SG or MG graph somehow. The caller must create the distances and predecessors vectors in device memory and pass in the raw pointers to those vectors into the SSSP function. - -```cpp -#include -... -using vertex_t = int32_t; // or int64_t, whichever is appropriate -using weight_t = float; // or double, whichever is appropriate -using result_t = weight_t; // could specify float or double also -raft::handle_t handle; // Must be configured if MG -auto graph_view = graph.view(); // assumes you have created a graph somehow -vertex_t source; // Initialized by user - -rmm::device_uvector distances_v(graph_view.number_of_vertices(), handle.get_stream()); -rmm::device_uvector predecessors_v(graph_view.number_of_vertices(), handle.get_stream()); - -cugraph::sssp(handle, graph_view, distances_v.begin(), predecessors_v.begin(), source, std::numeric_limits::max(), false); -``` - -## BFS - -The unit test code is the best place to search for examples on calling BFS. - - * [SG Implementation](https://github.com/rapidsai/cugraph/blob/main/cpp/tests/traversal/bfs_test.cpp) - * [MG Implementation](https://github.com/rapidsai/cugraph/blob/main/cpp/tests/traversal/mg_bfs_test.cpp) - -## Simple BFS - -The example assumes that you create an SG or MG graph somehow. The caller must create the distances and predecessors vectors in device memory and pass in the raw pointers to those vectors into the BFS function. - -```cpp -#include -... -using vertex_t = int32_t; // or int64_t, whichever is appropriate -using weight_t = float; // or double, whichever is appropriate -using result_t = weight_t; // could specify float or double also -raft::handle_t handle; // Must be configured if MG -auto graph_view = graph.view(); // assumes you have created a graph somehow -vertex_t source; // Initialized by user - -rmm::device_uvector distances_v(graph_view.number_of_vertices(), handle.get_stream()); -rmm::device_uvector predecessors_v(graph_view.number_of_vertices(), handle.get_stream()); - -cugraph::bfs(handle, graph_view, d_distances.begin(), d_predecessors.begin(), source, false, std::numeric_limits::max(), false); -``` diff --git a/docs/cugraph/source/graph_support/algorithms/degree_centrality.md b/docs/cugraph/source/graph_support/algorithms/degree_centrality.md deleted file mode 100644 index 9253c665fb0..00000000000 --- a/docs/cugraph/source/graph_support/algorithms/degree_centrality.md +++ /dev/null @@ -1,32 +0,0 @@ -# Degree Centrality -Degree centrality is the simplest measure of the relative importance based on counting the connections with each vertex. Vertices with the most connections are the most central by this measure. - -See [Degree Centrality on Wikipedia](https://en.wikipedia.org/wiki/Degree_centrality) for more details on the algorithm. - -Degree centrality of a vertex 𝑣 is the sum of the edges incident on that node. - - - -## When to use Degree Centrality -* When you need a really quick identifcation of important nodes on very simply structured data. -* In cases like collaboration networks where all links have equal importance. -* In many biologic and transportation networks, shear number of connections is important to itentify critical nodes whether they be proteins or airports. -* In huge graphs, Degree centrality is a the quickest - -## When not to use Degree Centrality -* When weights, edge direction or edge types matter -* Graphs with self loops -* Multi-graphs ( graphs with multiple edges between the same two nodes) -* In general Degree Centrality falls short in most cases where the data is complex or nuanced. - -## How computationally expensive is it? -While cuGraph's parallelism migigates run time, [Big O notation](https://en.wikipedia.org/wiki/Big_O_notation) is still the standard to compare algorithm costs. - -The cost of Degree Centrality is O(n) where n is the number of nodes. -___ -Copyright (c) 2023, NVIDIA CORPORATION. - -Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -___ diff --git a/docs/cugraph/source/graph_support/algorithms/eigenvector_centrality.md b/docs/cugraph/source/graph_support/algorithms/eigenvector_centrality.md deleted file mode 100644 index 8a9c7c7c767..00000000000 --- a/docs/cugraph/source/graph_support/algorithms/eigenvector_centrality.md +++ /dev/null @@ -1,41 +0,0 @@ -# Eigenvector Centrality - -Eigenvector centrality computes the centrality for a vertex based on the -centrality of its neighbors. The Eigenvector of a node measures influence within a graph by taking into account a vertex's connections to other highly connected vertices. - - -See [Eigenvector Centrality on Wikipedia](https://en.wikipedia.org/wiki/Eigenvector_centrality) for more details on the algorithm. - -The eigenvector centrality for node i is the -i-th element of the vector x defined by the eigenvector equation. - - - - -Where M(v) is the adjacency list for the set of vertices(v) and λ is a constant. - -[Learn more about EigenVector Centrality](https://www.sci.unich.it/~francesc/teaching/network/eigenvector.html) - -## When to use Eigenvector Centrality -* When the quality and quantity of edges matters, in other words, connections to other high-degree nodes is important -* To calculate influence in nuanced networks like social and financial networks. - -## When not to use Eigenvector Centrality -* in graphs with many disconnected groups -* in graphs containing many distinct and different communities -* in networks with negative weights -* in huge networks eigenvector centrality can become computationally infeasible in single threaded systems. - - -## How computationally expensive is it? -While cuGraph's parallelism migigates run time, [Big O notation](https://en.wikipedia.org/wiki/Big_O_notation) is still the standard to compare algorithm costs. - -O(VE) where V is the number of vertices(nodes) and Eis the number of edges. - -___ -Copyright (c) 2023, NVIDIA CORPORATION. - -Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -___ diff --git a/docs/cugraph/source/graph_support/algorithms/jaccard_similarity.md b/docs/cugraph/source/graph_support/algorithms/jaccard_similarity.md deleted file mode 100644 index dde98d71ea3..00000000000 --- a/docs/cugraph/source/graph_support/algorithms/jaccard_similarity.md +++ /dev/null @@ -1,34 +0,0 @@ -# Jaccard Similarity - -The Jaccard similarity between two sets is defined as the ratio of the volume of their intersection divided by the volume of their union. - -The Jaccard Similarity can then be defined as - -Jaccard similarity coefficient = $\frac{|A \cap B|}{|A \cup B|}$ - -In graphs, the sets refer to the set of connected nodes or neighborhood of nodes A and B. - -[Learn more about Jaccard Similarity](https://en.wikipedia.org/wiki/Jaccard_index) - -## When to use Jaccard Similarity -* You want to find whether two nodes in a graph are in similar communities. -* You want to compare the structure of two graphs. -* You have a set of graphs and want to classify them as particular types - -## When not to use Jaccard Similarity -* In directed graphs -* in very large sparse graphs -* Graphs with large disparities in node degrees - -## How computationally expensive is it? -While cuGraph's parallelism mitigates run cost, [Big O notation](https://en.wikipedia.org/wiki/Big_O_notation) is still the standard to compare algorithm costs. - -The cost of calculating the Jaccard Similarity for a graph is O(d * n) where d is the average degree of the nodes and n is the number of nodes. - -___ -Copyright (c) 2023, NVIDIA CORPORATION. - -Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -___ diff --git a/docs/cugraph/source/graph_support/algorithms/katz_centrality.md b/docs/cugraph/source/graph_support/algorithms/katz_centrality.md deleted file mode 100644 index 69b5d6b27b9..00000000000 --- a/docs/cugraph/source/graph_support/algorithms/katz_centrality.md +++ /dev/null @@ -1,28 +0,0 @@ -# Katz Centrality - -Katz centrality is a measure of the relative importance of a vertex within the graph based on measuring the influence across the total number of walks between vertex pairs. Katz is similar to Eigenvector centrality. The main difference is that Katz also takes into account indirect relationships. The Katz calculation includes a user-controlled attenuation variable that controls the weight of indirect relationships. Otherwise it shares many of the advantages and disadvantages of Eigenvector centrality. - -$C_{katz}(i) = \sum_{k=1}^{\infty} \sum_{j=1}^{n} \alpha ^k(A^k)_{ji}$ - -See [Katz on Wikipedia](https://en.wikipedia.org/wiki/Katz_centrality) for more details on the algorithm. - -## When to use Katz Centrality -* in disconnected graphs -* in sparse graphs -* in graphs with multi-hop propogation like innovation - -## When not to use Katz Centrality -* in graphs with heavy cyclical dependency (feedback loops), Katz Centrality might not converge preventing usable results. -* when a graph contains multiple distinct influence factors Katz can blur them. -* Katz is very expensive so use in large graphs depends on cuGraph parallelism to be viable. - -## How computationally expensive is it? -Katz centraility has several stages with costs that add up as the graph gets larger. The overall cost is often O(n2) to O(n3) where n is the number of nodes. - -___ -Copyright (c) 2023, NVIDIA CORPORATION. - -Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -___ diff --git a/docs/cugraph/source/graph_support/algorithms/overlap_similarity.md b/docs/cugraph/source/graph_support/algorithms/overlap_similarity.md deleted file mode 100644 index d9f9f681ea2..00000000000 --- a/docs/cugraph/source/graph_support/algorithms/overlap_similarity.md +++ /dev/null @@ -1,32 +0,0 @@ -# Overlap Similarity - -The Overlap Coefficient, also known as th Szymkiewicz–Simpson coefficient, between two sets is defined as the ratio of the volume of their intersection divided by the volume of the smaller set. -The Overlap Coefficient can be defined as - -$overlap(A,B) = \frac{|A \cap B|}{min(|A|,|B|)}$ - -[Learn more about Overlap Similarity](https://en.wikipedia.org/wiki/Overlap_coefficient) - -## When to use Overlap Similarity -* You want to find similarty based on shared neighbors instead of the sets as a whole. -* You want to partition a graph into non-overlapping clusters. -* You want to compare subgraphs within a graph - -## When not to use Overlap Similarity -* You are trying to compare graphs of extremely different sizes -* In overly sparse or dense graph can overlap similarity can miss relationships or give fals positives respectively. -* In directed graphs, there are better algorithms to use. - - -## How computationally expensive is it? -While cuGraph's parallelism migigates run time, [Big O notation](https://en.wikipedia.org/wiki/Big_O_notation) is still the standard to compare algorithm costs. - -The cost to compute overlap similarity is O(n*d) where n is the number of nodes and d is the average degree of the nodes. - -___ -Copyright (c) 2023, NVIDIA CORPORATION. - -Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -___ diff --git a/docs/cugraph/source/graph_support/algorithms/sorensen_coefficient.md b/docs/cugraph/source/graph_support/algorithms/sorensen_coefficient.md deleted file mode 100644 index 67d981815cf..00000000000 --- a/docs/cugraph/source/graph_support/algorithms/sorensen_coefficient.md +++ /dev/null @@ -1,34 +0,0 @@ -# Sørensen Coefficient - -The Sørensen Coefficient, also called the Sørensen-Dice similarity coefficient, quantifies the similarity and overlap between two samples. - -It is defined as two times the size of the set intersection divided by the sum of the size of the two sets. The value ranges from 0 to 1. - -Sørensen coefficient = $\left(2 * |A \cap B| \right) \over \left(|A| + |B| \right)$ - - -In graphs, the sets refer to the set of connected nodes or neighborhood of nodes A and B. - -[Learn more about Sørensen Coefficient](https://en.wikipedia.org/wiki/S%C3%B8rensen%E2%80%93Dice_coefficient) - -## When to use Sørensen Coefficient -* When you want to compare nodes with vastly different sized neighborhoods. -* When the intersection of the node neigborhoods is more important than the overall similarity - - -## When not to use Sørensen Coefficient -* In directed graphs -* Comparing graphs with different underlying data relationships. -* In weighted graphs, while cuGraph does have a weighted Sørensen implementation, the algorithm did not originally use weights. - -## How computationally expensive is it? -While cuGraph's parallelism mitigates run cost, [Big O notation](https://en.wikipedia.org/wiki/Big_O_notation) is still the standard to compare algorithm execution time. -The cost to run O(n * m) where n is the number of nodes in the graph and m is the number of groups to test. - -___ -Copyright (c) 2023, NVIDIA CORPORATION. - -Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -___ diff --git a/docs/cugraph/source/graph_support/compatibility.rst b/docs/cugraph/source/graph_support/compatibility.rst deleted file mode 100644 index ddb3f1d5fe5..00000000000 --- a/docs/cugraph/source/graph_support/compatibility.rst +++ /dev/null @@ -1,8 +0,0 @@ -Compatibility -============= - - -.. toctree:: - :maxdepth: 3 - -Compatibility document coming soon diff --git a/docs/cugraph/source/graph_support/cugraph_service.rst b/docs/cugraph/source/graph_support/cugraph_service.rst deleted file mode 100644 index 658f9e731df..00000000000 --- a/docs/cugraph/source/graph_support/cugraph_service.rst +++ /dev/null @@ -1,9 +0,0 @@ -=============== -CuGraph Service -=============== - - -.. toctree:: - :maxdepth: 3 - -Cugraph Service for remote access to a server-based cuGraph(https://github.com/rapidsai/cugraph/blob/branch-23.04/python/cugraph-service/README.md) diff --git a/docs/cugraph/source/graph_support/datastores.rst b/docs/cugraph/source/graph_support/datastores.rst deleted file mode 100644 index 50d8461e7fe..00000000000 --- a/docs/cugraph/source/graph_support/datastores.rst +++ /dev/null @@ -1,8 +0,0 @@ -Data Stores -=========== -.. toctree:: - :maxdepth: 3 - - property_graph.md - knowledge_stores.md - feature_stores.md diff --git a/docs/cugraph/source/graph_support/feature_stores.md b/docs/cugraph/source/graph_support/feature_stores.md deleted file mode 100644 index ef9358c4cf8..00000000000 --- a/docs/cugraph/source/graph_support/feature_stores.md +++ /dev/null @@ -1,3 +0,0 @@ -# Feature Store - -Coming Soon diff --git a/docs/cugraph/source/graph_support/gnn_support.rst b/docs/cugraph/source/graph_support/gnn_support.rst deleted file mode 100644 index 71586621608..00000000000 --- a/docs/cugraph/source/graph_support/gnn_support.rst +++ /dev/null @@ -1,11 +0,0 @@ -============================ -Graph Neural Network Support -============================ - - -.. toctree:: - :maxdepth: 2 - - PyG_support.md - DGL_support.md - wholegraph_support.md diff --git a/docs/cugraph/source/graph_support/graph_algorithms.rst b/docs/cugraph/source/graph_support/graph_algorithms.rst deleted file mode 100644 index 38dd8ccc25b..00000000000 --- a/docs/cugraph/source/graph_support/graph_algorithms.rst +++ /dev/null @@ -1,8 +0,0 @@ -========== -Algorithms -========== - -.. toctree:: - :maxdepth: 3 - - algorithms.md diff --git a/docs/cugraph/source/graph_support/index.rst b/docs/cugraph/source/graph_support/index.rst deleted file mode 100644 index 67aba74288b..00000000000 --- a/docs/cugraph/source/graph_support/index.rst +++ /dev/null @@ -1,13 +0,0 @@ -============= -Graph Support -============= - - -.. toctree:: - :maxdepth: 3 - - graph_algorithms.rst - compatibility.rst - gnn_support.rst - datastores.rst - cugraph_service.rst diff --git a/docs/cugraph/source/graph_support/knowledge_stores.md b/docs/cugraph/source/graph_support/knowledge_stores.md deleted file mode 100644 index 1749eb2b4c1..00000000000 --- a/docs/cugraph/source/graph_support/knowledge_stores.md +++ /dev/null @@ -1,3 +0,0 @@ -# Knowledge Store - -Coming Soon diff --git a/docs/cugraph/source/graph_support/pg_example.png b/docs/cugraph/source/graph_support/pg_example.png deleted file mode 100644 index 5ce8a0f2054..00000000000 Binary files a/docs/cugraph/source/graph_support/pg_example.png and /dev/null differ diff --git a/docs/cugraph/source/graph_support/property_graph.md b/docs/cugraph/source/graph_support/property_graph.md deleted file mode 100644 index 94d170c18df..00000000000 --- a/docs/cugraph/source/graph_support/property_graph.md +++ /dev/null @@ -1,50 +0,0 @@ -# Property Graph - -Part of [RAPIDS](https://rapids.ai) cuGraph, Property Graph allows all the great benefits of cuGraph to be applied to property-rich datasets stored in a graph structure. A Property Graph is really a data model rather than a type of graph. Within the cuGraph ecosystem, a Property Graph is a meta-graph that can encapsulate and instantiate all the other graph types. That view stems from property graphs being originally created for database systems. Conceptually a Property Graph can be viewed as a property rich structure that can be projected onto any graph types. The Dataversity, has a good definition of [Property Graph](https://www.dataversity.net/what-is-a-property-graph) which contains definitions from a collection of resources. - -![Sample Property Graph](../images/pg_example.png) - - -Property Graph enables: - -* Multiple edge and node types as seen in the Property Graph API -* Subgraph extractions based on properties and/or edge and node types as seen below. -* Storage of properties either within the graph structure on gpu or using GNN-centric storage extensions on host storage. -* Adding additional properties, nodes and edges into the property graph to store derived data like analytic results. -* Client access managed by a remote server allowing shared access and remote operations using [CuGraph Service](./cugraph_service.md). - -This is an example of using the cuGraph Property Graph in a two stage analysis. - -``` -import cudf -import cugraph -from cugraph.experimental import PropertyGraph - -# Import a built-in dataset -from cugraph.datasets import karate - -# Read edgelist data into a DataFrame, load into PropertyGraph as edge data. -# Create a graph using the imported Dataset object -graph = cugraph.Graph(directed=False) -G = karate.get_graph(create_using=graph,fetch=True) - -df = G.edgelist.edgelist_df -pG = PropertyGraph() -pG. add_edge_data(df, vertex_col_names=("src", "dst")) - -# Run Louvain to get the partition number for each vertex. -# Set resolution accordingly to identify two primary partitions. -(partition_info, _) = cugraph.louvain(pG.extract_subgraph(create_using=graph), resolution=0.6) - -# Add the partition numbers back to the Property Graph as vertex properties -pG.add_vertex_data(partition_info, vertex_col_name="vertex") - -# Use the partition properties to extract a Graph for each partition. -G0 = pG.extract_subgraph(selection=pG.select_vertices("partition == 0")) -G1 = pG.extract_subgraph(selection=pG. select_vertices("partition == 1")) -# Run pagerank on each graph, print results. -pageranks0 = cugraph.pagerank(G0) -pageranks1 = cugraph.pagerank(G1) -print(pageranks0.sort_values (by="pagerank", ascending=False).head(3)) -print(pageranks1.sort_values (by="pagerank", ascending=False).head(3)) -``` diff --git a/docs/cugraph/source/graph_support/wholegraph_support.md b/docs/cugraph/source/graph_support/wholegraph_support.md deleted file mode 100644 index d1c5eaf7254..00000000000 --- a/docs/cugraph/source/graph_support/wholegraph_support.md +++ /dev/null @@ -1,4 +0,0 @@ -# WholeGraph - -[RAPIDS](https://rapids.ai) [WholeGraph](https://github.com/rapidsai/wholegraph) is designed to help train large-scale Graph Neural Networks(GNN). -Please see [WholeGraph Introduction](https://github.com/rapidsai/wholegraph/blob/main/README.md) for more details diff --git a/docs/cugraph/source/images/Nx_Cg_1.png b/docs/cugraph/source/images/Nx_Cg_1.png deleted file mode 100644 index 6d29f76ad4a..00000000000 Binary files a/docs/cugraph/source/images/Nx_Cg_1.png and /dev/null differ diff --git a/docs/cugraph/source/images/Nx_Cg_2.png b/docs/cugraph/source/images/Nx_Cg_2.png deleted file mode 100644 index f8f68538668..00000000000 Binary files a/docs/cugraph/source/images/Nx_Cg_2.png and /dev/null differ diff --git a/docs/cugraph/source/images/Stack2.png b/docs/cugraph/source/images/Stack2.png deleted file mode 100644 index 132e85c9d15..00000000000 Binary files a/docs/cugraph/source/images/Stack2.png and /dev/null differ diff --git a/docs/cugraph/source/images/ancestors.png b/docs/cugraph/source/images/ancestors.png deleted file mode 100644 index 37b8e7933a8..00000000000 Binary files a/docs/cugraph/source/images/ancestors.png and /dev/null differ diff --git a/docs/cugraph/source/images/bfs_tree.png b/docs/cugraph/source/images/bfs_tree.png deleted file mode 100644 index 5bca39ca3bf..00000000000 Binary files a/docs/cugraph/source/images/bfs_tree.png and /dev/null differ diff --git a/docs/cugraph/source/images/conn_component.png b/docs/cugraph/source/images/conn_component.png deleted file mode 100644 index b7db09657c8..00000000000 Binary files a/docs/cugraph/source/images/conn_component.png and /dev/null differ diff --git a/docs/cugraph/source/images/cugraph_logo_2.png b/docs/cugraph/source/images/cugraph_logo_2.png deleted file mode 100644 index 62dd79c4b98..00000000000 Binary files a/docs/cugraph/source/images/cugraph_logo_2.png and /dev/null differ diff --git a/docs/cugraph/source/images/descendents.png b/docs/cugraph/source/images/descendents.png deleted file mode 100644 index 8afc38b4ef4..00000000000 Binary files a/docs/cugraph/source/images/descendents.png and /dev/null differ diff --git a/docs/cugraph/source/images/k_truss.png b/docs/cugraph/source/images/k_truss.png deleted file mode 100644 index 78a1978d103..00000000000 Binary files a/docs/cugraph/source/images/k_truss.png and /dev/null differ diff --git a/docs/cugraph/source/images/katz.png b/docs/cugraph/source/images/katz.png deleted file mode 100644 index 9f2303a21e3..00000000000 Binary files a/docs/cugraph/source/images/katz.png and /dev/null differ diff --git a/docs/cugraph/source/images/pagerank.png b/docs/cugraph/source/images/pagerank.png deleted file mode 100644 index 193c0a8bbd1..00000000000 Binary files a/docs/cugraph/source/images/pagerank.png and /dev/null differ diff --git a/docs/cugraph/source/images/pg_example.png b/docs/cugraph/source/images/pg_example.png deleted file mode 100644 index 5ce8a0f2054..00000000000 Binary files a/docs/cugraph/source/images/pg_example.png and /dev/null differ diff --git a/docs/cugraph/source/images/sssp.png b/docs/cugraph/source/images/sssp.png deleted file mode 100644 index 2c9dfc36852..00000000000 Binary files a/docs/cugraph/source/images/sssp.png and /dev/null differ diff --git a/docs/cugraph/source/images/wcc.png b/docs/cugraph/source/images/wcc.png deleted file mode 100644 index 2d27a3f675c..00000000000 Binary files a/docs/cugraph/source/images/wcc.png and /dev/null differ diff --git a/docs/cugraph/source/images/zachary_graph_centrality.png b/docs/cugraph/source/images/zachary_graph_centrality.png deleted file mode 100644 index 54a91314d26..00000000000 Binary files a/docs/cugraph/source/images/zachary_graph_centrality.png and /dev/null differ diff --git a/docs/cugraph/source/index.rst b/docs/cugraph/source/index.rst deleted file mode 100644 index 0db1860b2b9..00000000000 --- a/docs/cugraph/source/index.rst +++ /dev/null @@ -1,100 +0,0 @@ -RAPIDS Graph documentation -========================== - -.. image:: images/cugraph_logo_2.png - :width: 600 - - -~~~~~~~~~~~~ -Introduction -~~~~~~~~~~~~ -cuGraph is a library of graph algorithms that seamlessly integrates into the -RAPIDS data science ecosystem and allows the data scientist to easily call -graph algorithms using data stored in cuDF/Pandas DataFrames or CuPy/SciPy -sparse matrices. - ---------------------------- -cuGraph Using NetworkX Code ---------------------------- - -cuGraph is now available as a NetworkX backend using `nx-cugraph `_. -Our major integration effort with NetworkX offers NetworkX users a **zero code change** option to accelerate -their existing NetworkX code using an NVIDIA GPU and cuGraph. - -Check out `zero code change accelerated NetworkX `_. If you would like to continue using standard cuGraph, then continue down below. - ----------------------------- -Getting started with cuGraph ----------------------------- - -Required hardware/software for cuGraph and `RAPIDS `_ - * NVIDIA GPU, Volta architecture or later, with `compute capability 7.0+ `_ - * CUDA 11.2-11.8, 12.0-12.5 - * Python version 3.10, 3.11, or 3.12 - -++++++++++++ -Installation -++++++++++++ - -Please see the latest `RAPIDS System Requirements documentation `_. - -This includes several ways to set up cuGraph - -* From Unix - - * `Conda `_ - * `Docker `_ - * `pip `_ - - -**Note: Windows use of RAPIDS depends on prior installation of** `WSL2 `_. - -* From Windows - - * `Conda `_ - * `Docker `_ - * `pip `_ - - Cugraph API Example - - .. code-block:: python - - import cugraph - import cudf - - # Create an instance of the popular Zachary Karate Club graph - from cugraph.datasets import karate - G = karate.get_graph() - - # Call cugraph.degree_centrality - vertex_bc = cugraph.degree_centrality(G) - - There are several resources containing cuGraph examples, the cuGraph `notebook repository `_ has many examples of loading graph data and running algorithms in Jupyter notebooks. - The cuGraph `test code `_ contains script examples of setting up and calling cuGraph algorithms. - - A simple example of `testing the degree centrality algorithm `_ is a good place to start. There are also `multi-GPU examples `_ with larger data sets as well. - ----- - -~~~~~~~~~~~~~~~~~ -Table of Contents -~~~~~~~~~~~~~~~~~ - -.. toctree:: - :maxdepth: 2 - - basics/index - nx_cugraph/index - installation/index - tutorials/index - graph_support/index - wholegraph/index - references/index - api_docs/index - -~~~~~~~~~~~~~~~~~~ -Indices and tables -~~~~~~~~~~~~~~~~~~ - -* :ref:`genindex` -* :ref:`search` diff --git a/docs/cugraph/source/installation/getting_cugraph.md b/docs/cugraph/source/installation/getting_cugraph.md deleted file mode 100644 index 0c553acf964..00000000000 --- a/docs/cugraph/source/installation/getting_cugraph.md +++ /dev/null @@ -1,65 +0,0 @@ - -# Getting cuGraph Packages - -Start by reading the [RAPIDS Instalation guide](https://docs.rapids.ai/install) -and checkout the [RAPIDS install selector](https://rapids.ai/start.html) for a pick list of install options. - - -There are 4 ways to get cuGraph packages: -1. [Quick start with Docker Repo](#docker) -2. [Conda Installation](#conda) -3. [Pip Installation](#pip) -4. [Build from Source](./source_build.md) - - -
- -## Docker -The RAPIDS Docker containers contain all RAPIDS packages, including all from cuGraph, as well as all required supporting packages. To download a RAPIDS container, please see the [Docker Hub page for rapidsai/base](https://hub.docker.com/r/rapidsai/base), choosing a tag based on the NVIDIA CUDA version you're running. Also, the [rapidsai/notebooks](https://hub.docker.com/r/rapidsai/notebooks) container provides a ready to run Docker container with example notebooks and data, showcasing how you can utilize all of the RAPIDS libraries: cuDF, cuML, and cuGraph. - -
- - -## Conda -It is easy to install cuGraph using conda. You can get a minimal conda installation with [miniforge](https://github.com/conda-forge/miniforge). - -cuGraph Conda packages - * cugraph - this will also import: - * pylibcugraph - * libcugraph - * cugraph-service-client - * cugraph-service-server - * cugraph-dgl - * cugraph-pyg - * nx-cugraph - -Replace the package name in the example below to the one you want to install. - - -Install and update cuGraph using the conda command: - -```bash -conda install -c rapidsai -c conda-forge -c nvidia cugraph cuda-version=12.0 -``` - -Alternatively, use `cuda-version=11.8` for packages supporting CUDA 11. - -Note: This conda installation only applies to Linux and Python versions 3.10/3.11/3.12. - -
- -## PIP -cuGraph, and all of RAPIDS, is available via pip. - -``` -pip install cugraph-cu12 --extra-index-url=https://pypi.nvidia.com -``` - -Replace `-cu12` with `-cu11` for packages supporting CUDA 11. - -Also available: - * cugraph-dgl-cu12 - * cugraph-pyg-cu12 - * nx-cugraph-cu12 - -
diff --git a/docs/cugraph/source/installation/index.rst b/docs/cugraph/source/installation/index.rst deleted file mode 100644 index 8ad12c5895b..00000000000 --- a/docs/cugraph/source/installation/index.rst +++ /dev/null @@ -1,10 +0,0 @@ -============ -Installation -============ - - -.. toctree:: - :maxdepth: 3 - - getting_cugraph - source_build diff --git a/docs/cugraph/source/installation/source_build.md b/docs/cugraph/source/installation/source_build.md deleted file mode 100644 index 243a62e5c81..00000000000 --- a/docs/cugraph/source/installation/source_build.md +++ /dev/null @@ -1,222 +0,0 @@ -# Building from Source - -These instructions are tested on supported versions/distributions of Linux, -CUDA, and Python - See [RAPIDS Getting Started](https://rapids.ai/start.html) -for the list of supported environments. Other environments _might be_ -compatible, but are not currently tested. - -## Prerequisites - -__Compilers:__ -* `gcc` version 9.3+ -* `nvcc` version 11.5+ - -__CUDA:__ -* CUDA 11.8+ -* NVIDIA GPU, Volta architecture or later, with [compute capability](https://developer.nvidia.com/cuda-gpus) 7.0+ - -Further details and download links for these prerequisites are available on the -[RAPIDS System Requirements page](https://docs.rapids.ai/install#system-req). - -## Setting up the development environment - -### Clone the repository: -```bash -CUGRAPH_HOME=$(pwd)/cugraph -git clone https://github.com/rapidsai/cugraph.git $CUGRAPH_HOME -cd $CUGRAPH_HOME -``` - -### Create the conda environment - -Using conda is the easiest way to install both the build and runtime -dependencies for cugraph. While it is possible to build and run cugraph without -conda, the required packages occasionally change, making it difficult to -document here. The best way to see the current dependencies needed for a build -and run environment is to examine the list of packages in the [conda -environment YAML -files](https://github.com/rapidsai/cugraph/blob/main/conda/environments). - -```bash -# for CUDA 11.x -conda env create --name cugraph_dev --file $CUGRAPH_HOME/conda/environments/all_cuda-118_arch-x86_64.yaml - -# for CUDA 12.x -conda env create --name cugraph_dev --file $CUGRAPH_HOME/conda/environments/all_cuda-125_arch-x86_64.yaml - - -# activate the environment -conda activate cugraph_dev - -# to deactivate an environment -conda deactivate -``` - -The environment can be updated as cugraph adds/removes/updates its dependencies. To do so, run: - -```bash -# for CUDA 11.x -conda env update --name cugraph_dev --file $CUGRAPH_HOME/conda/environments/all_cuda-118_arch-x86_64.yaml -conda activate cugraph_dev - -# for CUDA 12.x -conda env update --name cugraph_dev --file $CUGRAPH_HOME/conda/environments/all_cuda-125_arch-x86_64.yaml -conda activate cugraph_dev - - - -``` - -### Build and Install - -#### Build and install using `build.sh` -Using the `build.sh` script, located in the `$CUGRAPH_HOME` directory, is the -recommended way to build and install the cugraph libraries. By default, -`build.sh` will build and install a predefined set of targets -(packages/libraries), but can also accept a list of targets to build. - -For example, to build only the cugraph C++ library (`libcugraph`) and the -high-level python library (`cugraph`) without building the C++ test binaries, -run this command: - -```bash -$ cd $CUGRAPH_HOME -$ ./build.sh libcugraph pylibcugraph cugraph --skip_cpp_tests -``` - -There are several other options available on the build script for advanced -users. Refer to the output of `--help` for details. - -Note that libraries will be installed to the location set in `$PREFIX` if set -(i.e. `export PREFIX=/install/path`), otherwise to `$CONDA_PREFIX`. - -#### Updating the RAFT branch - -`libcugraph` uses the [RAFT](https://github.com/rapidsai/raft) library and -there are times when it might be desirable to build against a different RAFT -branch, such as when working on new features that might span both RAFT and -cuGraph. - -For local development, the `CPM_raft_SOURCE=` option can -be passed to the `cmake` command to enable `libcugraph` to use the local RAFT -branch. The `build.sh` script calls `cmake` to build the C/C++ targets, but -developers can call `cmake` directly in order to pass it options like those -described here. Refer to the `build.sh` script to see how to call `cmake` and -other commands directly. - -To have CI test a `cugraph` pull request against a different RAFT branch, -modify the bottom of the `cpp/cmake/thirdparty/get_raft.cmake` file as follows: - -```cmake -# Change pinned tag and fork here to test a commit in CI -# To use a different RAFT locally, set the CMake variable -# RPM_raft_SOURCE=/path/to/local/raft -find_and_configure_raft(VERSION ${CUGRAPH_MIN_VERSION_raft} - FORK - PINNED_TAG - - # When PINNED_TAG above doesn't match cugraph, - # force local raft clone in build directory - # even if it's already installed. - CLONE_ON_PIN ON - ) -``` - -When the above change is pushed to a pull request, the continuous integration -servers will use the specified RAFT branch to run the cuGraph tests. After the -changes in the RAFT branch are merged to the release branch, remember to revert -the `get_raft.cmake` file back to the original cuGraph branch. - - -## Run tests - -If you already have the datasets: - - ```bash - export RAPIDS_DATASET_ROOT_DIR= - ``` - If you do not have the datasets: - - ```bash - cd $CUGRAPH_HOME/datasets - source get_test_data.sh #This takes about 10 minutes and downloads 1GB data (>5 GB uncompressed) - ``` - -Run either the C++ or the Python tests with datasets - - - **Python tests with datasets** - - - ```bash - pip install python-louvain #some tests require this package to run - cd $CUGRAPH_HOME - cd python - pytest - ``` - - **C++ stand alone tests** - - From the build directory : - - ```bash - # Run the cugraph tests - cd $CUGRAPH_HOME - cd cpp/build - gtests/GDFGRAPH_TEST # this is an executable file - ``` - - **C++ tests with larger datasets** - - - - Run the C++ tests on large input: - - ```bash - cd $CUGRAPH_HOME/cpp/build - #test one particular analytics (eg. pagerank) - gtests/PAGERANK_TEST - #test everything - make test - ``` - -Note: This conda installation only applies to Linux and Python versions 3.10, 3.11, and 3.12. - -### (OPTIONAL) Set environment variable on activation - -It is possible to configure the conda environment to set environment variables -on activation. Providing instructions to set PATH to include the CUDA toolkit -bin directory and LD_LIBRARY_PATH to include the CUDA lib64 directory will be -helpful. - -```bash -cd ~/anaconda3/envs/cugraph_dev - -mkdir -p ./etc/conda/activate.d -mkdir -p ./etc/conda/deactivate.d -touch ./etc/conda/activate.d/env_vars.sh -touch ./etc/conda/deactivate.d/env_vars.sh -``` - -Next the env_vars.sh file needs to be edited - -```bash -vi ./etc/conda/activate.d/env_vars.sh - -#!/bin/bash -export PATH=/usr/local/cuda-11.0/bin:$PATH # or cuda-11.1 if using CUDA 11.1 and cuda-11.2 if using CUDA 11.2, respectively -export LD_LIBRARY_PATH=/usr/local/cuda-11.0/lib64:$LD_LIBRARY_PATH # or cuda-11.1 if using CUDA 11.1 and cuda-11.2 if using CUDA 11.2, respectively -``` - -``` -vi ./etc/conda/deactivate.d/env_vars.sh - -#!/bin/bash -unset PATH -unset LD_LIBRARY_PATH -``` - -## Creating documentation - -Python API documentation can be generated from _./docs/cugraph directory_. Or -through using "./build.sh docs" - -## Attribution -Portions adopted from https://github.com/pytorch/pytorch/blob/master/CONTRIBUTING.md diff --git a/docs/cugraph/source/nx_cugraph/benchmarks.md b/docs/cugraph/source/nx_cugraph/benchmarks.md deleted file mode 100644 index 9e0718159fa..00000000000 --- a/docs/cugraph/source/nx_cugraph/benchmarks.md +++ /dev/null @@ -1,26 +0,0 @@ -# Benchmarks - -## NetworkX vs. nx-cugraph -We ran several commonly used graph algorithms on both `networkx` and `nx-cugraph`. Here are the results - - -
- -![bench-image](../_static/bc_benchmark.png) - -
Results from running this Benchmark
-
- -## Reproducing Benchmarks - -Below are the steps to reproduce the results on your own. - -1. Clone the latest - -2. Follow the instructions to build and activate an environment - -4. Install the latest `nx-cugraph` by following the [Installation Guide](installation.md) - -5. Follow the instructions written in the README [here](https://github.com/rapidsai/nx-cugraph/blob/HEAD/benchmarks/nx-cugraph/pytest-based/README.md) diff --git a/docs/cugraph/source/nx_cugraph/how-it-works.md b/docs/cugraph/source/nx_cugraph/how-it-works.md deleted file mode 100644 index 0061b0445de..00000000000 --- a/docs/cugraph/source/nx_cugraph/how-it-works.md +++ /dev/null @@ -1,113 +0,0 @@ -# How it Works - -NetworkX has the ability to **dispatch function calls to separately-installed third-party backends**. - -NetworkX backends let users experience improved performance and/or additional functionality without changing their NetworkX Python code. Examples include backends that provide algorithm acceleration using GPUs, parallel processing, graph database integration, and more. - -While NetworkX is a pure-Python implementation, backends may be written to use other libraries and even specialized hardware. `nx-cugraph` is a NetworkX backend that uses RAPIDS cuGraph and NVIDIA GPUs to significantly improve NetworkX performance. - -![nxcg-execution-flow](../_static/nxcg-execution-diagram.jpg) - -## Enabling nx-cugraph - -It is recommended to use `networkx>=3.4` for optimal zero code change performance, but `nx-cugraph` will also work with `networkx 3.2+`. - -NetworkX will use `nx-cugraph` as the backend if any of the following are used: - -### `NX_CUGRAPH_AUTOCONFIG` environment variable. - -The `NX_CUGRAPH_AUTOCONFIG` environment variable can be used to configure NetworkX for full zero code change acceleration using `nx-cugraph`. If a NetworkX function is called that `nx-cugraph` supports, NetworkX will redirect the function call to `nx-cugraph` automatically, or fall back to either another backend if enabled or the default NetworkX implementation. See the [NetworkX documentation on backends](https://networkx.org/documentation/stable/reference/backends.html) for configuring NetworkX manually. - -``` -bash> NX_CUGRAPH_AUTOCONFIG=True python my_networkx_script.py -``` - -### `backend=` keyword argument - -To explicitly specify a particular backend for an API, use the `backend=` -keyword argument. This argument takes precedence over the -`NX_CUGRAPH_AUTOCONFIG` environment variable. This requires anyone -running code that uses the `backend=` keyword argument to have the specified -backend installed. - -Example: -```python -nx.betweenness_centrality(cit_patents_graph, k=k, backend="cugraph") -``` - -### Type-based dispatching - -NetworkX also supports automatically dispatching to backends associated with -specific graph types. Like the `backend=` keyword argument example above, this -requires the user to write code for a specific backend, and therefore requires -the backend to be installed, but has the advantage of ensuring a particular -behavior without the potential for runtime conversions. - -To use type-based dispatching with `nx-cugraph`, the user must import the backend -directly in their code to access the utilities provided to create a Graph -instance specifically for the `nx-cugraph` backend. - -Example: -```python -import networkx as nx -import nx_cugraph as nxcg - -G = nx.Graph() - -# populate the graph -# ... - -nxcg_G = nxcg.from_networkx(G) # conversion happens once here -nx.betweenness_centrality(nxcg_G, k=1000) # nxcg Graph type causes cugraph backend - # to be used, no conversion necessary -``` - -## Command Line Example - ---- - -Create `bc_demo.ipy` and paste the code below. - -```python -import pandas as pd -import networkx as nx - -url = "https://data.rapids.ai/cugraph/datasets/cit-Patents.csv" -df = pd.read_csv(url, sep=" ", names=["src", "dst"], dtype="int32") -G = nx.from_pandas_edgelist(df, source="src", target="dst") - -%time result = nx.betweenness_centrality(G, k=10) -``` -Run the command: -``` -user@machine:/# ipython bc_demo.ipy - -CPU times: user 7min 36s, sys: 5.22 s, total: 7min 41s -Wall time: 7min 41s -``` - -You will observe a run time of approximately 7 minutes...more or less depending on your CPU. - -Run the command again, this time specifying cugraph as the NetworkX backend. -```bash -user@machine:/# NX_CUGRAPH_AUTOCONFIG=True ipython bc_demo.ipy - -CPU times: user 4.14 s, sys: 1.13 s, total: 5.27 s -Wall time: 5.32 s -``` -This run will be much faster, typically around 5 seconds depending on your GPU. - -
- -*Note, the examples above were run using the following specs*: - -    *NetworkX 3.4*
-    *nx-cugraph 24.10*
-    *CPU: Intel(R) Xeon(R) Gold 6128 CPU @ 3.40GHz 45GB RAM*
-    *GPU: NVIDIA Quadro RTX 8000 80GB RAM*
- -
- ---- - -The latest list of algorithms supported by `nx-cugraph` can be found in [GitHub](https://github.com/rapidsai/nx-cugraph/blob/HEAD/README.md#supported-algorithms), or in the [Supported Algorithms Section](supported-algorithms.md). diff --git a/docs/cugraph/source/nx_cugraph/index.rst b/docs/cugraph/source/nx_cugraph/index.rst deleted file mode 100644 index 0eb8907b397..00000000000 --- a/docs/cugraph/source/nx_cugraph/index.rst +++ /dev/null @@ -1,66 +0,0 @@ -nx-cugraph ------------ - -``nx-cugraph`` is a NetworkX backend that provides **GPU acceleration** to many popular NetworkX algorithms. - -By simply `installing and enabling nx-cugraph `_, users can see significant speedup on workflows where performance is hindered by the default NetworkX implementation. - -Users can have GPU-based, large-scale performance **without** changing their familiar and easy-to-use NetworkX code. - -.. centered:: Timed result from running the following code snippet (called ``demo.ipy``, showing NetworkX with vs. without ``nx-cugraph``) - -.. code-block:: python - - import pandas as pd - import networkx as nx - - url = "https://data.rapids.ai/cugraph/datasets/cit-Patents.csv" - df = pd.read_csv(url, sep=" ", names=["src", "dst"], dtype="int32") - G = nx.from_pandas_edgelist(df, source="src", target="dst") - - %time result = nx.betweenness_centrality(G, k=10) - - -:: - - user@machine:/# ipython demo.ipy - CPU times: user 7min 36s, sys: 5.22 s, total: 7min 41s - Wall time: 7min 41s - - -:: - - user@machine:/# NX_CUGRAPH_AUTOCONFIG=True ipython demo.ipy - CPU times: user 4.14 s, sys: 1.13 s, total: 5.27 s - Wall time: 5.32 s - - -.. figure:: ../_static/colab.png - :width: 200px - :target: https://nvda.ws/4drM4re - - Try it on Google Colab! - - -+--------------------------------------------------------------------------------------------------------+ -| **Zero Code Change Acceleration** | -| | -| Just set the environment variable ``NX_CUGRAPH_AUTOCONFIG=True`` to enable ``nx-cugraph`` in NetworkX. | -+--------------------------------------------------------------------------------------------------------+ -| **Run the same code on CPU or GPU** | -| | -| Nothing changes, not even your ``import`` statements, when going from CPU to GPU. | -+--------------------------------------------------------------------------------------------------------+ - - -``nx-cugraph`` is now Generally Available (GA) as part of the ``RAPIDS`` package. See `RAPIDS -Quick Start `_ to get up-and-running with ``nx-cugraph``. - -.. toctree:: - :maxdepth: 1 - :caption: Contents: - - how-it-works - installation - supported-algorithms - benchmarks diff --git a/docs/cugraph/source/nx_cugraph/installation.md b/docs/cugraph/source/nx_cugraph/installation.md deleted file mode 100644 index 9675306c47b..00000000000 --- a/docs/cugraph/source/nx_cugraph/installation.md +++ /dev/null @@ -1,50 +0,0 @@ -# Installing nx-cugraph - -This guide describes how to install ``nx-cugraph`` and use it in your workflows. - - -## System Requirements - -`nx-cugraph` requires the following: - - - **Volta architecture or later NVIDIA GPU, with [compute capability](https://developer.nvidia.com/cuda-gpus) 7.0+** - - **[CUDA](https://docs.nvidia.com/cuda/index.html) 11.2, 11.4, 11.5, 11.8, 12.0, 12.2, or 12.5** - - **Python >= 3.10** - - **[NetworkX](https://networkx.org/documentation/stable/install.html#) >= 3.2 (version 3.4 or higher recommended)** - -More details about system requirements can be found in the [RAPIDS System Requirements Documentation](https://docs.rapids.ai/install#system-req). - -## Installing Packages - -Read the [RAPIDS Quick Start Guide](https://docs.rapids.ai/install) to learn more about installing all RAPIDS libraries. - -`nx-cugraph` can be installed using conda or pip. It is included in the RAPIDS metapackage, or can be installed separately. - -### Conda -**Nightly version** -```bash -conda install -c rapidsai-nightly -c conda-forge -c nvidia nx-cugraph -``` - -**Stable version** -```bash -conda install -c rapidsai -c conda-forge -c nvidia nx-cugraph -``` - -### pip -**Nightly version** -```bash -pip install nx-cugraph-cu11 --extra-index-url https://pypi.anaconda.org/rapidsai-wheels-nightly/simple -``` - -**Stable version** -```bash -pip install nx-cugraph-cu11 --extra-index-url https://pypi.nvidia.com -``` - -
- -**Note:** - - The `pip install` examples above are for CUDA 11. To install for CUDA 12, replace `-cu11` with `-cu12` - -
diff --git a/docs/cugraph/source/nx_cugraph/nx_cugraph.md b/docs/cugraph/source/nx_cugraph/nx_cugraph.md deleted file mode 100644 index 900362a6e2b..00000000000 --- a/docs/cugraph/source/nx_cugraph/nx_cugraph.md +++ /dev/null @@ -1,16 +0,0 @@ -### nx_cugraph - - -`nx-cugraph` is a [networkX backend]() that accelerates many popular NetworkX functions using cuGraph and NVIDIA GPUs. -Users simply [install and enable nx-cugraph](installation.md) to experience GPU speedups. - -Lets look at some examples of algorithm speedups comparing CPU based NetworkX to dispatched versions run on GPU with nx_cugraph. - -![Ancestors](../images/ancestors.png) -![BFS Tree](../images/bfs_tree.png) -![Connected Components](../images/conn_component.png) -![Descendents](../images/descendents.png) -![Katz](../images/katz.png) -![Pagerank](../images/pagerank.png) -![Single Source Shortest Path](../images/sssp.png) -![Weakly Connected Components](../images/wcc.png) diff --git a/docs/cugraph/source/nx_cugraph/supported-algorithms.rst b/docs/cugraph/source/nx_cugraph/supported-algorithms.rst deleted file mode 100644 index 8dc3c5ba2a7..00000000000 --- a/docs/cugraph/source/nx_cugraph/supported-algorithms.rst +++ /dev/null @@ -1,356 +0,0 @@ -Supported Algorithms -===================== - -The nx-cugraph backend to NetworkX connects -`pylibcugraph `_ (cuGraph's low-level Python -interface to its CUDA-based graph analytics library) and -`CuPy `_ (a GPU-accelerated array library) to NetworkX's -familiar and easy-to-use API. - -Below is the list of algorithms that are currently supported in nx-cugraph. - - -Algorithms ----------- - -+-----------------------------+ -| **Centrality** | -+=============================+ -| betweenness_centrality | -+-----------------------------+ -| edge_betweenness_centrality | -+-----------------------------+ -| degree_centrality | -+-----------------------------+ -| in_degree_centrality | -+-----------------------------+ -| out_degree_centrality | -+-----------------------------+ -| eigenvector_centrality | -+-----------------------------+ -| katz_centrality | -+-----------------------------+ - -+---------------------+ -| **Cluster** | -+=====================+ -| average_clustering | -+---------------------+ -| clustering | -+---------------------+ -| transitivity | -+---------------------+ -| triangles | -+---------------------+ - -+--------------------------+ -| **Community** | -+==========================+ -| louvain_communities | -+--------------------------+ - -+--------------------------+ -| **Bipartite** | -+==========================+ -| betweenness_centrality | -| complete_bipartite_graph | -+--------------------------+ - -+------------------------------------+ -| **Components** | -+====================================+ -| connected_components | -+------------------------------------+ -| is_connected | -+------------------------------------+ -| node_connected_component | -+------------------------------------+ -| number_connected_components | -+------------------------------------+ -| weakly_connected | -+------------------------------------+ -| is_weakly_connected | -+------------------------------------+ -| number_weakly_connected_components | -+------------------------------------+ -| weakly_connected_components | -+------------------------------------+ - -+-------------+ -| **Core** | -+=============+ -| core_number | -+-------------+ -| k_truss | -+-------------+ - -+-------------+ -| **DAG** | -+=============+ -| ancestors | -+-------------+ -| descendants | -+-------------+ - -+--------------------+ -| **Isolate** | -+====================+ -| is_isolate | -+--------------------+ -| isolates | -+--------------------+ -| number_of_isolates | -+--------------------+ - -+-------------------+ -| **Link analysis** | -+===================+ -| hits | -+-------------------+ -| pagerank | -+-------------------+ - -+----------------+ -| **Operators** | -+================+ -| complement | -+----------------+ -| reverse | -+----------------+ - -+----------------------+ -| **Reciprocity** | -+======================+ -| overall_reciprocity | -+----------------------+ -| reciprocity | -+----------------------+ - -+---------------------------------------+ -| **Shortest Paths** | -+=======================================+ -| has_path | -+---------------------------------------+ -| shortest_path | -+---------------------------------------+ -| shortest_path_length | -+---------------------------------------+ -| all_pairs_shortest_path | -+---------------------------------------+ -| all_pairs_shortest_path_length | -+---------------------------------------+ -| bidirectional_shortest_path | -+---------------------------------------+ -| single_source_shortest_path | -+---------------------------------------+ -| single_source_shortest_path_length | -+---------------------------------------+ -| single_target_shortest_path | -+---------------------------------------+ -| single_target_shortest_path_length | -+---------------------------------------+ -| all_pairs_bellman_ford_path | -+---------------------------------------+ -| all_pairs_bellman_ford_path_length | -+---------------------------------------+ -| all_pairs_dijkstra | -+---------------------------------------+ -| all_pairs_dijkstra_path | -+---------------------------------------+ -| all_pairs_dijkstra_path_length | -+---------------------------------------+ -| bellman_ford_path | -+---------------------------------------+ -| bellman_ford_path_length | -+---------------------------------------+ -| dijkstra_path | -+---------------------------------------+ -| dijkstra_path_length | -+---------------------------------------+ -| single_source_bellman_ford | -+---------------------------------------+ -| single_source_bellman_ford_path | -+---------------------------------------+ -| single_source_bellman_ford_path_length| -+---------------------------------------+ -| single_source_dijkstra | -+---------------------------------------+ -| single_source_dijkstra_path | -+---------------------------------------+ -| single_source_dijkstra_path_length | -+---------------------------------------+ - -+---------------------------+ -| **Traversal** | -+===========================+ -| bfs_edges | -+---------------------------+ -| bfs_layers | -+---------------------------+ -| bfs_predecessors | -+---------------------------+ -| bfs_successors | -+---------------------------+ -| bfs_tree | -+---------------------------+ -| descendants_at_distance | -+---------------------------+ -| generic_bfs_edges | -+---------------------------+ - -+---------------------+ -| **Tree** | -+=====================+ -| is_arborescence | -+---------------------+ -| is_branching | -+---------------------+ -| is_forest | -+---------------------+ -| is_tree | -+---------------------+ - - -Utilities -------- - -+-------------------------+ -| **Classes** | -+=========================+ -| is_negatively_weighted | -+-------------------------+ - -+----------------------+ -| **Convert** | -+======================+ -| from_dict_of_lists | -+----------------------+ -| to_dict_of_lists | -+----------------------+ - -+--------------------------+ -| **Convert Matrix** | -+==========================+ -| from_pandas_edgelist | -+--------------------------+ -| from_scipy_sparse_array | -+--------------------------+ - -+-----------------------------------+ -| **Relabel** | -+===================================+ -| convert_node_labels_to_integers | -+-----------------------------------+ -| relabel_nodes | -+-----------------------------------+ - -Generators ------------- - -+-------------------------------+ -| **Classic** | -+===============================+ -| barbell_graph | -+-------------------------------+ -| circular_ladder_graph | -+-------------------------------+ -| complete_graph | -+-------------------------------+ -| complete_multipartite_graph | -+-------------------------------+ -| cycle_graph | -+-------------------------------+ -| empty_graph | -+-------------------------------+ -| ladder_graph | -+-------------------------------+ -| lollipop_graph | -+-------------------------------+ -| null_graph | -+-------------------------------+ -| path_graph | -+-------------------------------+ -| star_graph | -+-------------------------------+ -| tadpole_graph | -+-------------------------------+ -| trivial_graph | -+-------------------------------+ -| turan_graph | -+-------------------------------+ -| wheel_graph | -+-------------------------------+ - -+-----------------+ -| **Classic** | -+=================+ -| caveman_graph | -+-----------------+ - -+------------+ -| **Ego** | -+============+ -| ego_graph | -+------------+ - -+------------------------------+ -| **small** | -+==============================+ -| bull_graph | -+------------------------------+ -| chvatal_graph | -+------------------------------+ -| cubical_graph | -+------------------------------+ -| desargues_graph | -+------------------------------+ -| diamond_graph | -+------------------------------+ -| dodecahedral_graph | -+------------------------------+ -| frucht_graph | -+------------------------------+ -| heawood_graph | -+------------------------------+ -| house_graph | -+------------------------------+ -| house_x_graph | -+------------------------------+ -| icosahedral_graph | -+------------------------------+ -| krackhardt_kite_graph | -+------------------------------+ -| moebius_kantor_graph | -+------------------------------+ -| octahedral_graph | -+------------------------------+ -| pappus_graph | -+------------------------------+ -| petersen_graph | -+------------------------------+ -| sedgewick_maze_graph | -+------------------------------+ -| tetrahedral_graph | -+------------------------------+ -| truncated_cube_graph | -+------------------------------+ -| truncated_tetrahedron_graph | -+------------------------------+ -| tutte_graph | -+------------------------------+ - -+-------------------------------+ -| **Social** | -+===============================+ -| davis_southern_women_graph | -+-------------------------------+ -| florentine_families_graph | -+-------------------------------+ -| karate_club_graph | -+-------------------------------+ -| les_miserables_graph | -+-------------------------------+ - - -To request nx-cugraph backend support for a NetworkX API that is not listed -above, visit the `nx-cugraph GitHub repo `_. diff --git a/docs/cugraph/source/references/cugraph_ref.md b/docs/cugraph/source/references/cugraph_ref.md deleted file mode 100644 index 845436a60f2..00000000000 --- a/docs/cugraph/source/references/cugraph_ref.md +++ /dev/null @@ -1,45 +0,0 @@ -# References - -## Architecture - -2-D Data Partitioning - -- Kang, S., Fender, A., Eaton, J., & Rees, B. (2020, September) *Computing PageRank Scores of Web Crawl Data Using DGX A100 Clusters*. In 2020 IEEE High Performance Extreme Computing Conference (HPEC) (pp. 1-4). IEEE. - -- S. Kang, J. Nke and B. Rees, (2022 September) *Analyzing Multi-trillion Edge Graphs on Large GPU Clusters: A Case Study with PageRank*, In 2022 IEEE High Performance Extreme Computing Conference (HPEC), Waltham, MA, USA, 2022, pp. 1-7, doi: 10.1109/HPEC55821.2022.9926341. - -

- -## Algorithms - -### Betweenness Centrality -- Brandes, U. (2001). *A faster algorithm for betweenness centrality*. Journal of mathematical sociology, 25(2), 163-177. -- Brandes, U. (2008). *On variants of shortest-path betweenness centrality and their generic computation*. Social Networks, 30(2), 136-145. -- McLaughlin, A., & Bader, D. A. (2018). *Accelerating GPU betweenness centrality*. Communications of the ACM, 61(8), 85-92. - -### Katz -- Katz, L. (1953). *A new status index derived from sociometric analysis*. Psychometrika, 18(1), 39-43. -- Foster, K.C., Muth, S.Q., Potterat, J.J. et al. *A faster Katz status score algorithm*. Computational & Mathematical Organization Theory (2001) 7: 275. - -### K-Truss -- J. Cohen, *Trusses: Cohesive subgraphs for social network analysis* National security agency technical report, 2008 -- O. Green, J. Fox, E. Kim, F. Busato, et al. *Quickly Finding a Truss in a Haystack* IEEE High Performance Extreme Computing Conference (HPEC), 2017 https://doi.org/10.1109/HPEC.2017.8091038 -- O. Green, P. Yalamanchili, L.M. Munguia, *Fast Triangle Counting on GPU* Irregular Applications: Architectures and Algorithms (IA3), 2014 - -### Hungarian Algorithm -- Date, K., & Nagi, R. (2016). GPU-accelerated Hungarian algorithms for the Linear Assignment Problem. Parallel Computing, 57, 52-72. - - -### Leiden -- Traag, V. A., Waltman, L., & Van Eck, N. J. (2019). *From Louvain to Leiden: guaranteeing well-connected communities*. Scientific reports, 9(1), 1-12. - -### Louvain -- VD Blondel, J-L Guillaume, R Lambiotte and E Lefebvre. *Fast unfolding of community hierarchies in large networks*. J Stat Mech P10008 (2008) - -

- -## Other Papers -- Hricik, T., Bader, D., & Green, O. (2020, September). *Using RAPIDS AI to Accelerate Graph Data Science Workflows*. In 2020 IEEE High Performance Extreme Computing Conference (HPEC) (pp. 1-4). IEEE. - - -

diff --git a/docs/cugraph/source/references/datasets.md b/docs/cugraph/source/references/datasets.md deleted file mode 100644 index 35234de87c6..00000000000 --- a/docs/cugraph/source/references/datasets.md +++ /dev/null @@ -1,21 +0,0 @@ -# Data Sets - -karate - - W. W. Zachary, *An information flow model for conflict and fission in small groups*, Journal of Anthropological Research 33, 452-473 (1977). -dolphins - - D. Lusseau, K. Schneider, O. J. Boisseau, P. Haase, E. Slooten, and S. M. Dawson, - *The bottlenose dolphin community of Doubtful Sound features a large proportion of long-lasting associations*, - Behavioral Ecology and Sociobiology 54, 396-405 (2003). -netscience - - M. E. J. Newman, - *Finding community structure in networks using the eigenvectors of matrices*, - Preprint physics/0605087 (2006). -email-Eu-core - - Hao Yin, Austin R. Benson, Jure Leskovec, and David F. Gleich. - *Local Higher-order Graph Clustering.* - In Proceedings of the 23rd ACM SIGKDD International Conference on Knowledge Discovery and Data Mining. 2017. - - J. Leskovec, J. Kleinberg and C. Faloutsos. - *Graph Evolution: Densification and Shrinking Diameters*. - ACM Transactions on Knowledge Discovery from Data (ACM TKDD), 1(1), 2007. http://www.cs.cmu.edu/~jure/pubs/powergrowth-tkdd.pdf -polbooks - - V. Krebs, unpublished, http://www.orgnet.com/. diff --git a/docs/cugraph/source/references/index.rst b/docs/cugraph/source/references/index.rst deleted file mode 100644 index 9ea51a08356..00000000000 --- a/docs/cugraph/source/references/index.rst +++ /dev/null @@ -1,11 +0,0 @@ -========== -References -========== - - -.. toctree:: - :maxdepth: 3 - - cugraph_ref.md - datasets.md - licenses.md diff --git a/docs/cugraph/source/references/licenses.md b/docs/cugraph/source/references/licenses.md deleted file mode 100644 index b95905d9f2f..00000000000 --- a/docs/cugraph/source/references/licenses.md +++ /dev/null @@ -1,208 +0,0 @@ -# License - -Most of the Graph code is open-sourced and developed under the Apache 2.0 licnese. -The cugraph-ops code is closed sourced and developed under a NVIDIA copyright - - - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright 2018 NVIDIA CORPORATION - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/docs/cugraph/source/releases/index.rst b/docs/cugraph/source/releases/index.rst deleted file mode 100644 index cbd22324b9b..00000000000 --- a/docs/cugraph/source/releases/index.rst +++ /dev/null @@ -1,5 +0,0 @@ -======== -Releases -======== - -https://github.com/rapidsai/cugraph/blob/main/CHANGELOG.md diff --git a/docs/cugraph/source/sphinxext/github_link.py b/docs/cugraph/source/sphinxext/github_link.py deleted file mode 100644 index cc28dc6e897..00000000000 --- a/docs/cugraph/source/sphinxext/github_link.py +++ /dev/null @@ -1,142 +0,0 @@ -# Copyright (c) 2019-2023, NVIDIA CORPORATION. -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# NOTE: -# This contains code with copyright by the scikit-learn project, subject to the -# license in /thirdparty/LICENSES/LICENSE.scikit_learn - -import inspect -import re -import subprocess -from functools import partial -from operator import attrgetter - -orig = inspect.isfunction - - -# See https://opendreamkit.org/2017/06/09/CythonSphinx/ -def isfunction(obj): - - orig_val = orig(obj) - - new_val = hasattr(type(obj), "__code__") - - if (orig_val != new_val): - return new_val - - return orig_val - - -inspect.isfunction = isfunction - -REVISION_CMD = 'git rev-parse --short HEAD' - -source_regex = re.compile(r"^File: (.*?) \(starting at line ([0-9]*?)\)$", - re.MULTILINE) - - -def _get_git_revision(): - try: - revision = subprocess.check_output(REVISION_CMD.split()).strip() - except (subprocess.CalledProcessError, OSError): - print('Failed to execute git to get revision') - return None - return revision.decode('utf-8') - - -def _linkcode_resolve(domain, info, url_fmt, revision): - """Determine a link to online source for a class/method/function - - This is called by sphinx.ext.linkcode - - An example with a long-untouched module that everyone has - >>> _linkcode_resolve('py', {'module': 'tty', - ... 'fullname': 'setraw'}, - ... package='tty', - ... url_fmt='http://hg.python.org/cpython/file/' - ... '{revision}/Lib/{package}/{path}#L{lineno}', - ... revision='xxxx') - 'http://hg.python.org/cpython/file/xxxx/Lib/tty/tty.py#L18' - """ - - if revision is None: - return - if domain != 'py': - return - if not info.get('module') or not info.get('fullname'): - return - - class_name = info['fullname'].split('.')[0] - module = __import__(info['module'], fromlist=[class_name]) - obj = attrgetter(info['fullname'])(module) - - # Unwrap the object to get the correct source - # file in case that is wrapped by a decorator - obj = inspect.unwrap(obj) - - fn: str = None - lineno: str = None - - obj_module = inspect.getmodule(obj) - if not obj_module: - print(f"could not infer source code link for: {info}") - return - module_name = obj_module.__name__.split('.')[0] - - module_dir_dict = { - "cugraph_dgl": "cugraph-dgl", - "cugraph_pyg": "cugraph-pyg", - "cugraph_service_client": "cugraph-service/client", - "cugraph_service_server": "cugraph-service/server", - "cugraph": "cugraph", - "pylibcugraph": "pylibcugraph", - } - module_dir = module_dir_dict.get(module_name) - if not module_dir: - print(f"no source path directory set for {module_name}") - return - - obj_path = "/".join(obj_module.__name__.split(".")[1:]) - obj_file_ext = obj_module.__file__.split('.')[-1] - source_ext = "pyx" if obj_file_ext == "so" else "py" - fn = f"{module_dir}/{module_name}/{obj_path}.{source_ext}" - - # Get the line number if we need it. (Can work without it) - if (lineno is None): - try: - lineno = inspect.getsourcelines(obj)[1] - except Exception: - - # Can happen if its a cyfunction. See if it has `__code__` - if (hasattr(obj, "__code__")): - lineno = obj.__code__.co_firstlineno - else: - lineno = '' - return url_fmt.format(revision=revision, - path=fn, - lineno=lineno) - - -def make_linkcode_resolve(url_fmt): - """Returns a linkcode_resolve function for the given URL format - - revision is a git commit reference (hash or name) - - url_fmt is along the lines of ('https://github.com/USER/PROJECT/' - 'blob/{revision}/{package}/' - '{path}#L{lineno}') - """ - revision = _get_git_revision() - return partial(_linkcode_resolve, - revision=revision, - url_fmt=url_fmt) diff --git a/docs/cugraph/source/tutorials/basic_cugraph.md b/docs/cugraph/source/tutorials/basic_cugraph.md deleted file mode 100644 index a0c9ad576b2..00000000000 --- a/docs/cugraph/source/tutorials/basic_cugraph.md +++ /dev/null @@ -1,38 +0,0 @@ -# Getting started with cuGraph - -## Required hardware/software - -CuGraph is part of [Rapids](https://docs.rapids.ai/user-guide) and has the following system requirements: - * NVIDIA GPU, Volta architecture or later, with [compute capability](https://developer.nvidia.com/cuda-gpus) 7.0+ - * CUDA 11.2, 11.4, 11.5, 11.8, 12.0, 12.2, or 12.5 - * Python version 3.10, 3.11, or 3.12 - * NetworkX >= version 3.3 or newer in order to use use [NetworkX Configs](https://networkx.org/documentation/stable/reference/backends.html#module-networkx.utils.configs) **This is required for use of nx-cuGraph, [see below](#cugraph-using-networkx-code).** - -## Installation -The latest RAPIDS System Requirements documentation is located [here](https://docs.rapids.ai/install#system-req). - -This includes several ways to set up cuGraph -* From Unix - * [Conda](https://docs.rapids.ai/install#wsl-conda) - * [Docker](https://docs.rapids.ai/install#wsl-docker) - * [pip](https://docs.rapids.ai/install#wsl-pip) - -* In windows you must install [WSL2](https://learn.microsoft.com/en-us/windows/wsl/install) and then choose one of the following: - * [Conda](https://docs.rapids.ai/install#wsl-conda) - * [Docker](https://docs.rapids.ai/install#wsl-docker) - * [pip](https://docs.rapids.ai/install#wsl-pip) - -* Build From Source - -To build from source, check each RAPIDS GitHub README for set up and build instructions. Further links are provided in the [selector tool](https://docs.rapids.ai/install#selector). If additional help is needed reach out on our [Slack Channel](https://rapids-goai.slack.com/archives/C5E06F4DC). - -## CuGraph Using NetworkX Code -While the steps above are required to use the full suite of cuGraph graph analytics, cuGraph is now supported as a NetworkX backend using [nx-cugraph](https://docs.rapids.ai/api/cugraph/nightly/nx_cugraph/nx_cugraph/). -Nx-cugraph offers those with existing NetworkX code, a **zero code change** option with a growing list of supported algorithms. - - -## Cugraph API Example -Coming soon ! - - -Until then, [the cuGraph notebook repository](https://github.com/rapidsai/cugraph/blob/main/notebooks/README.md) has many examples of loading graph data and running algorithms in Jupyter notebooks. The [cuGraph test code](https://github.com/rapidsai/cugraph/tree/main/python/cugraph/cugraph/tests) gives examples of python scripts settng up and calling cuGraph algorithms. A simple example of [testing the degree centrality algorithm](https://github.com/rapidsai/cugraph/blob/main/python/cugraph/cugraph/tests/centrality/test_degree_centrality.py) is a good place to start. Some of these examples show [multi-GPU tests/examples with larger data sets](https://github.com/rapidsai/cugraph/blob/main/python/cugraph/cugraph/tests/centrality/test_degree_centrality_mg.py) as well. diff --git a/docs/cugraph/source/tutorials/community_resources.md b/docs/cugraph/source/tutorials/community_resources.md deleted file mode 100644 index 975f11965de..00000000000 --- a/docs/cugraph/source/tutorials/community_resources.md +++ /dev/null @@ -1,4 +0,0 @@ -# Commmunity Resources -[Rapids Community Repository](https://github.com/rapidsai-community/notebooks-contrib) -[RAPIDS Containers on Docker Hub](https://catalog.ngc.nvidia.com/containers) -[RAPIDS PyTorch Container in Docker](https://catalog.ngc.nvidia.com/orgs/nvidia/containers/pyg) diff --git a/docs/cugraph/source/tutorials/cugraph_blogs.rst b/docs/cugraph/source/tutorials/cugraph_blogs.rst deleted file mode 100644 index 57fa011ab59..00000000000 --- a/docs/cugraph/source/tutorials/cugraph_blogs.rst +++ /dev/null @@ -1,90 +0,0 @@ - -cuGraph Blogs and Presentations -************************************************ - -The RAPIDS team blogs at https://medium.com/rapids-ai, and many of -these blog posts provide deeper dives into features from cuGraph. -Here, we've selected just a few that are of particular interest to cuGraph users: - - -Blogs & Conferences -==================== -2024 ------- -Coming Soon - -2023 ------- - * `Intro to Graph Neural Networks with cuGraph-DGL `_ - * `GTC 2023 Ask the Experts Q&A `_ - * `Accelerating NetworkX on NVIDIA GPUs for High Performance Graph Analytics `_ - * `Introduction to Graph Neural Networks with NVIDIA cuGraph-DGL `_ - * `Supercharge Graph Analytics at Scale with GPU-CPU Fusion for 100x Performance `_ -2022 ------- - * `GTC: State of cuGraph (video & slides) `_ - * `GTC: Scaling and Validating Louvain in cuGraph against Massive Graphs (video & slides) `_ - * `KDD Tutorial on Accelerated GNN Training with DGL/PyG and cuGraph `_ - -2021 ------- - * `GTC 21 - State of RAPIDS cuGraph and what's comming next `_ - -2020 ------- - * `Status of RAPIDS cuGraph — Refactoring Code And Rethinking Graphs `_ - * `Tackling Large Graphs with RAPIDS cuGraph and CUDA Unified Memory on GPUs `_ - * `RAPIDS cuGraph adds NetworkX and DiGraph Compatibility `_ - * `Large Graph Visualization with RAPIDS cuGraph `_ - * `GTC 20 Fall - cuGraph Goes Big `_ - -2019 -------- - * `RAPIDS cuGraph `_ - * `RAPIDS cuGraph — The vision and journey to version 1.0 and beyond `_ - * `RAPIDS cuGraph : multi-GPU PageRank `_ - * `Similarity in graphs: Jaccard versus the Overlap Coefficient `_ - * `GTC19 Spring - Accelerating Graph Algorithms with RAPIDS `_ - * `GTC19 Fall - Multi-Node Multi-GPU Machine Learning and Graph Analytics with RAPIDS `_ - -2018 -------- - * `GTC18 Fall - RAPIDS: Benchmarking Graph Analytics on the DGX-2 `_ - - - -Media -=============== - * `Nvidia Rapids cuGraph: Making graph analysis ubiquitous `_ - * `RAPIDS cuGraph – Accelerating all your Graph needs `_ - -Academic Papers -=============== - - * Seunghwa Kang, Chuck Hastings, Joe Eaton, Brad Rees `cuGraph C++ primitives: vertex/edge-centric building blocks for parallel graph computing `_ - - * Alex Fender, Brad Rees, Joe Eaton (2022) `Massive Graph Analytics `_ Bader, D. (Editor) CRC Press - - * S Kang, A. Fender, J. Eaton, B. Rees. `Computing PageRank Scores of Web Crawl Data Using DGX A100 Clusters `_. In IEEE HPEC, Sep. 2020 - - * Hricik, T., Bader, D., & Green, O. (2020, September). `Using RAPIDS AI to accelerate graph data science workflows `_. In 2020 IEEE High Performance Extreme Computing Conference (HPEC) (pp. 1-4). IEEE. - - * Richardson, B., Rees, B., Drabas, T., Oldridge, E., Bader, D. A., & Allen, R. (2020, August). Accelerating and Expanding End-to-End Data Science Workflows with DL/ML Interoperability Using RAPIDS. In Proceedings of the 26th ACM SIGKDD International Conference on Knowledge Discovery & Data Mining (pp. 3503-3504). - - * A Gondhalekar, P Sathre, W Feng `Hybrid CPU-GPU Implementation of Edge-Connected Jaccard Similarity in Graph Datasets `_ - - -Other Blogs -======================== -* `4 graph algorithms on steroids for data scientists with cugraph `_ -* `Where should I walk `_ -* `Where really are the parking spots? `_ -* `Accelerating Single Cell Genomic Analysis using RAPIDS `_ -* `Running Large-Scale Graph Analytics with Memgraph and NVIDIA cuGraph Algorithms `_ -* `Dev Blog Repost: Similarity in Graphs: Jaccard Versus the Overlap Coefficient `_ - -RAPIDS Event Notebooks -====================== -* `KDD 2022 Notebook that demonstates using cuDF for ETL/data cleaning and XGBoost for training a fraud predection model. `_ -* `SciPy 22 Notebook comparing cuGraph to NetworkX `_ -* `KDD 2020 Tutorial Notebooks - Accelerating and Expanding End-to-End Data Science Workflows with DL/ML Interoperability Using RAPIDS `_ diff --git a/docs/cugraph/source/tutorials/cugraph_notebooks.md b/docs/cugraph/source/tutorials/cugraph_notebooks.md deleted file mode 100644 index 6d7840dc3c4..00000000000 --- a/docs/cugraph/source/tutorials/cugraph_notebooks.md +++ /dev/null @@ -1,72 +0,0 @@ -# cuGraph Notebooks - -![GraphAnalyticsFigure](https://github.com/rapidsai/cugraph/tree/main/img/GraphAnalyticsFigure.jpg) - -This repository contains a collection of Jupyter Notebooks that outline how to run various cuGraph analytics. The notebooks do not address a complete data science problem. The notebooks are simply examples of how to run the graph analytics. Manipulation of the data before or after the graph analytic is not covered here. Extended, more problem focused, notebooks are being created and available https://github.com/rapidsai/notebooks-extended - -## Summary - -| Folder | Notebook | Description | -| --------------- | ------------------------------------------------------------ | ------------------------------------------------------------ | -| Centrality | | | -| | [Centrality](https://github.com/rapidsai/cugraph/blob/main/notebooks/algorithms/centrality/Centrality.ipynb) | Compute and compare multiple (currently 5) centrality scores | -| | [Katz](https://github.com/rapidsai/cugraph/blob/main/notebooks/algorithms/centrality/Katz.ipynb) | Compute the Katz centrality for every vertex | -| | [Betweenness](https://github.com/rapidsai/cugraph/blob/main/notebooks/algorithms/centrality/Betweenness.ipynb) | Compute both Edge and Vertex Betweenness centrality | -| | [Degree](https://github.com/rapidsai/cugraph/blob/main/notebooks/algorithms/centrality/Degree.ipynb) | Compute Degree Centraility for each vertex | -| | [Eigenvector](https://github.com/rapidsai/cugraph/blob/main/notebooks/algorithms/centrality/Eigenvector.ipynb) | Compute Eigenvector for every vertex | -| Community | | | -| | [Louvain and Leiden](https://github.com/rapidsai/cugraph/blob/main/notebooks/algorithms/community/Louvain.ipynb) | Identify clusters in a graph using both the Louvain and Leiden algorithms | -| | [ECG](https://github.com/rapidsai/cugraph/blob/main/notebooks/algorithms/community/ECG.ipynb) | Identify clusters in a graph using the Ensemble Clustering for Graph | -| | [K-Truss](https://github.com/rapidsai/cugraph/blob/main/notebooks/algorithms/community/ktruss.ipynb) | Extracts the K-Truss cluster | -| | [Spectral-Clustering](https://github.com/rapidsai/cugraph/blob/main/notebooks/algorithms/community/Spectral-Clustering.ipynb) | Identify clusters in a graph using Spectral Clustering with both
- Balanced Cut
- Modularity Modularity | -| | [Subgraph Extraction](https://github.com/rapidsai/cugraph/blob/main/notebooks/algorithms/community/Subgraph-Extraction.ipynb) | Compute a subgraph of the existing graph including only the specified vertices | -| | [Triangle Counting](https://github.com/rapidsai/cugraph/blob/main/notebooks/algorithms/community/Triangle-Counting.ipynb) | Count the number of Triangle in a graph | -| Components | | | -| | [Connected Components](https://github.com/rapidsai/cugraph/blob/main/notebooks/algorithms/components/ConnectedComponents.ipynb) | Find weakly and strongly connected components in a graph | -| Core | | | -| | [K-Core](https://github.com/rapidsai/cugraph/blob/main/notebooks/algorithms/cores/kcore.ipynb) | Extracts the K-core cluster | -| | [Core Number](https://github.com/rapidsai/cugraph/blob/main/notebooks/algorithms/cores/core-number.ipynb) | Computer the Core number for each vertex in a graph | -Layout | | | -| | [Force-Atlas2](https://github.com/rapidsai/cugraph/blob/main/notebooks/algorithms/layout/Force-Atlas2.ipynb) |A large graph visualization achieved with cuGraph. | -| Link Analysis | | | -| | [Pagerank](https://github.com/rapidsai/cugraph/blob/main/notebooks/algorithms/link_analysis/Pagerank.ipynb) | Compute the PageRank of every vertex in a graph | -| | [HITS](https://github.com/rapidsai/cugraph/blob/main/notebooks/algorithms/link_analysis/HITS.ipynb) | Compute the HITS' Hub and Authority scores for every vertex in a graph | -| Link Prediction | | | -| | [Jaccard Similarity](https://github.com/rapidsai/cugraph/blob/main/notebooks/algorithms/link_prediction/Jaccard-Similarity.ipynb) | Compute vertex similarity score using both:
- Jaccard Similarity
- Weighted Jaccard | -| | [Overlap Similarity](https://github.com/rapidsai/cugraph/blob/main/notebooks/algorithms/link_prediction/Overlap-Similarity.ipynb) | Compute vertex similarity score using the Overlap Coefficient | -| Sampling | -| | [Random Walk](https://github.com/rapidsai/cugraph/blob/main/notebooks/algorithms/sampling/RandomWalk.ipynb) | Compute Random Walk for a various number of seeds and path lengths | -| Traversal | | | -| | [BFS](https://github.com/rapidsai/cugraph/blob/main/notebooks/algorithms/traversal/BFS.ipynb) | Compute the Breadth First Search path from a starting vertex to every other vertex in a graph | -| | [SSSP](https://github.com/rapidsai/cugraph/blob/main/notebooks/algorithms/traversal/SSSP.ipynb) | Single Source Shortest Path - compute the shortest path from a starting vertex to every other vertex | -| Structure | | | -| | [Renumbering](algorithms/structure/Renumber.ipynb)
[Renumbering 2](algorithms/structure/Renumber-2.ipynb) | Renumber the vertex IDs in a graph (two sample notebooks) | -| | [Symmetrize](algorithms/structure/Symmetrize.ipynb) | Symmetrize the edges in a graph | - - -## RAPIDS notebooks -Visit the main RAPIDS [notebooks](https://github.com/rapidsai/cugraph/blob/main/notebooks/) repo for a listing of all notebooks across all RAPIDS libraries. - -## Requirements - -Running the example in these notebooks requires: - -* The latest version of RAPIDS with cuGraph. - * Download via Docker, Conda (See [__Getting Started__](https://rapids.ai/start.html)) - -* cuGraph is dependent on the latest version of cuDF. Please install all components of RAPIDS -* Python 3.10+ -* A system with an NVIDIA GPU: Volta architecture or newer -* CUDA 11.4+ - -## Copyright - -Copyright (c) 2019-2023, NVIDIA CORPORATION. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - ---- diff --git a/docs/cugraph/source/tutorials/how_to_guides.md b/docs/cugraph/source/tutorials/how_to_guides.md deleted file mode 100644 index 998957afea1..00000000000 --- a/docs/cugraph/source/tutorials/how_to_guides.md +++ /dev/null @@ -1,9 +0,0 @@ -# How To Guides -- [Basic use of cuGraph](./basic_cugraph.md) -- Property graph with analytic flow -- GNN – model building -- cuGraph Service – client/server setup and use (ucx) -- MNMG Graph – dask, rmm basics and examples -- Pylibcugraph – why and how -- Cugraph for C, C++ users -- Use of nvidia-smi with cugraph diff --git a/docs/cugraph/source/tutorials/index.rst b/docs/cugraph/source/tutorials/index.rst deleted file mode 100644 index 525fbe4f545..00000000000 --- a/docs/cugraph/source/tutorials/index.rst +++ /dev/null @@ -1,12 +0,0 @@ -========= -Tutorials -========= - - -.. toctree:: - :maxdepth: 3 - - how_to_guides.md - cugraph_blogs.rst - community_resources.md - cugraph_notebooks.md diff --git a/docs/cugraph/source/wholegraph/basics/index.rst b/docs/cugraph/source/wholegraph/basics/index.rst deleted file mode 100644 index 429fe35d601..00000000000 --- a/docs/cugraph/source/wholegraph/basics/index.rst +++ /dev/null @@ -1,11 +0,0 @@ -====== -Basics -====== - - -.. toctree:: - :maxdepth: 2 - - wholegraph_intro - wholememory_intro - wholememory_implementation_details diff --git a/docs/cugraph/source/wholegraph/basics/wholegraph_intro.md b/docs/cugraph/source/wholegraph/basics/wholegraph_intro.md deleted file mode 100644 index 360f8e0e36b..00000000000 --- a/docs/cugraph/source/wholegraph/basics/wholegraph_intro.md +++ /dev/null @@ -1,135 +0,0 @@ -# WholeGraph Introduction -WholeGraph helps train large-scale Graph Neural Networks(GNN). -WholeGraph provides underlying storage structure called WholeMemory. -WholeMemory is a Tensor like storage and provides multi-GPU support. -It is optimized for NVLink systems like DGX A100 servers. -By working together with cuGraph, cuGraph-Ops, cuGraph-DGL, cuGraph-PyG, and upstream DGL and PyG, -it will be easy to build GNN applications. - -## WholeMemory -WholeMemory can be regarded as a whole view of GPU memory. -WholeMemory exposes a handle of the memory instance no matter how the underlying data is stored across multiple GPUs. -WholeMemory assumes that separate process is used to control each GPU. - -### WholeMemory Basics -To define WholeMemory, we need to specify the following: - -#### 1. Specify the set of GPU to handle the Memory - -Since WholeMemory is owned by a set of GPUs, you must specify the set of GPUs. -This is done by creating [WholeMemory Communicator](#wholememory-communicator) and specifying the WholeMemory Communicator when creating WholeMemory. - -#### 2. Specify the location of the memory - -Although WholeMemory is owned by a set of GPUs, the memory itself can be located in host memory or in device memory. -The location of the memory need to be specified, two types of locations can be specified. - -- **Host memory**: will use pinned host memory as underlying storage. -- **Device memory**: will use GPU device memory as underlying storage. - -#### 3. Specify the address mapping mode of the memory - -As WholeMemory is owned by multiple GPUs, each GPU will access the whole memory space, so we need address mapping. -There are three types of address mapping modes (also known as WholeMemory types), they are: - -- **Continuous**: All memory from each GPU will be mapped into a single continuous memory address space for each GPU. - In this mode, each GPU can directly access the whole memory using a single pointer and offset, just like using normal - device memory. Software will see no difference. Hardware peer to peer access will handle the underlying communication. - -- **Chunked**: Memory from each GPU will be mapped into different memory chunks, one chunk for each GPU. - In this mode, direct access is also supported, but not using a single pointer. Software will see the chunked memory. - However, an abstract layer may help to hide this. - -- **Distributed**: Memory from other GPUs are not mapped into current GPU, so no direct access is supported. - To access memory of other GPU, explicit communication is needed. - -To learn more details about WholeMemory locations and WholeMemory types, please refer to -[WholeMemory Implementation Details](wholememory_implementation_details.md) - -### WholeMemory Communicator -WholeMemory Communicator has two main purpose: - -- **Defines a set of GPUs which works together on WholeMemory.** WholeMemory Communicator is created by all GPUs that - wants to work together. A WholeMemory Communicator can be reused as long as the GPU set needed is the same. -- **Provides underlying communication channel needed by WholeMemory.** WholeMemory may need commuincator between GPUs - during the WholeMemory creation and some OPs on some types of WholeMemory. - -To Create WholeMemory Communicator, a WholeMemory Unique ID needs to be created first, it is usually created by the first GPU in the set of GPUs, and then broadcasted to all GPUs that want to work together. Then all GPUs in this communicator -will call WholeMemory Communicator creation function using this WholeMemory Unique ID, and the rank of current GPU as -well as all GPU count. - -### WholeMemory Granularity -As underlying storage may be partitioned into multiple GPUs physically, this is usually not wanted inside one single -user data block. To help on this, when creating WholeMemory, the granularity of data can be specified. Then the -WholeMemory is considered as multiple block of the same granularity and will not get split inside the granularity. - -### WholeMemory Mapping -As WholeMemory provides a whole view of memory to GPU, to access WholeMemory, mapping is usually needed. -Different types of WholeMemory have different mapping methods supported as their names. -Some mappings supported include -- All the WholeMemory types support mapping the memory range that local GPU is responsible for. - That is, each rank can directly access "Local" memory in all types of WholeMemory. - Here "Local" memory doesn't have to be on current GPU's memory, it can be on host memory or even maybe on other GPU, - but it is guaranteed to be directly accessed by current GPU. -- Chunked and Continuous WholeMemory also support Chunked mapping. That is, memory of all GPUs can be mapped into - current GPU, one continuous chunk for one GPU. Each chunk can be directly accessed by current GPU. But the memory of - different chunks are not guaranteed to be continuous. -- Continuous WholeMemory can be mapped into continuous memory space. That is, memory of all GPUs are mapped into a - single range of virtual memory, accessing to different position of this memory will physically access to different - GPUs. This mapping will be handled by hardware (CPU pagetable or GPU pagetable). - -### Operations on WholeMemory -There are some operations that can be performed on WholeMemory. They are based on the mapping of WholeMemory. -#### Local Operation -As all WholeMemory supports mapping of local memory, so operation on local memory is supported. The operation can be -either read or write. Just use it as GPU memory of current device is OK. -#### Load and Store -To facilitate file operation, Load / Store WholeMemory from file or to file is supported. WholeMemory uses raw binary -file format for disk operation. For Load, the input file can be a single file or a list of files, if it is a list, they -will be logically concatenated together and then loaded. For store, each GPU stores its local memory to file, producing -a list of files. -#### Gather and Scatter -WholeMemory also supports Gather / Scatter operation, usually they operate on a -[WholeMemory Tensor](#wholememory-tensor). - -### WholeMemory Tensor -Compared to PyTorch, WholeMemory is like PyTorch Storage while a WholeMemory Tensor is like a PyTorch Tensor. -For now, WholeMemory supports only 1D and 2D tensors, or arrays and matrices. Only first dimension is partitioned. - -### WholeMemory Embedding -WholeMemory Embedding is just like a 2D WholeMemory Tensor, with two features added. They support cache and sparse -optimizers. -#### Cache Support -To create WholeMemory Embedding with a cache, WholeMemory CachePolicy needs to be be created first. WholeMemoryCachePolicy can be created with following fields: -- **WholeMemory Communicator**: WholeMemory CachePolicy also needs WholeMemory Communicator. - WholeMemory Communicator defines the set of GPUs that cache all the Embedding. - It can be the same as the WholeMemory Communicator used to create WholeMemory Embedding. -- **WholeMemory type**: WholeMemory CachePolicy uses WholeMemory type to specify the WholeMemory type of cache. -- **WholeMemory location**: WholeMemory CachePolicy use WholeMemory location to specify the location of the cache. -- **Access type**: Access type can be readonly or readwrite. -- **Cache ratio**: Specify how much memory the cache will use. This ratio is computed for each GPU set that caches the - whole embedding. - -The two most commonly used caches are: -- **Device cached host memory**: When the WholeMemory Communicator for Cache Policy is the same as the WholeMemory - Communicator used to create WholeMemory Embedding, it means that the cache has same GPU set as WholeMemory Embedding. - So each GPU just caches its own part of raw Embedding. - Most commonly, when raw WholeMemory Embedding is located on host memory, and the cache is on device - memory, each GPU just caches its own part of host memory. -- **Local cached global memory**: The WholeMemory Communicator of WholeMemory CachePolicy can also be a subset of the - WholeMemory Communicator of WholeMemory Embedding. In this case, the subset of GPUs together cache all the embeddings. - Normally, when raw WholeMemory Embedding is partitioned on different machine nodes, and we - want to cache some embeddings in local machine or local GPU, then the subset of GPU can be all the GPUs in the local - machine. For local cached global memory, only readonly is supported. - -#### WholeMemory Embedding Sparse Optimizer -Another feature of WholeMemory Embedding is that WholeMemory Embedding supports embedding training. -To efficiently train large embedding tables, a sparse optimizer is needed. -WholeMemory Embedding Sparse Optimizer can run on a cached or noncached WholeMemory Embedding. -Currently supported optimizers include SGD, Adam, RMSProp and AdaGrad. - -## Graph Structure -Graph structure in WholeGraph is also based on WholeMemory. -In WholeGraph, graph is stored in [CSR format](https://en.wikipedia.org/wiki/Sparse_matrix#Compressed_sparse_row_(CSR,_CRS_or_Yale_format)). -Both ROW_INDEX (noted as `csr_row_ptr`) and COL_INDEX (notated as `csr_col_ind`) are stored in a -WholeMemory Tensor. So loading Graph Structure can use [WholeMemory Tensor Loading mechanism](#load-and-store). diff --git a/docs/cugraph/source/wholegraph/basics/wholememory_implementation_details.md b/docs/cugraph/source/wholegraph/basics/wholememory_implementation_details.md deleted file mode 100644 index 634539cd27e..00000000000 --- a/docs/cugraph/source/wholegraph/basics/wholememory_implementation_details.md +++ /dev/null @@ -1,58 +0,0 @@ -# WholeMemory Implementation Details -As described in [WholeMemory Introduction](wholegraph_intro.md), there are two WholeMemory location and three -WholeMemory types. So there will be total six WholeMemory. - -| Type | CONTINUOUS | CONTINUOUS | CHUNKED | CHUNKED | DISTRIBUTED | DISTRIBUTED | -|:-------------:|:-----------:|:----------:|:---------:|:---------:|:-----------:|:-----------:| -| Location | DEVICE | HOST | DEVICE | HOST | DEVICE | HOST | -| Allocated by | EACH | FIRST | EACH | FIRST | EACH | EACH | -| Allocate API | Driver | Host | Runtime | Host | Runtime | Runtime | -| IPC Mapping | Unix fd | mmap | cudaIpc | mmap | No IPC map | No IPC map | - -For "Continuous" and "Chunked" types of WholeMemory, all memory is mapped to each GPU, -so these two types are all "Mapped" WholeMemory, in contrast to "Distributed" WholeMemory where all are not mapped. - -## WholeMemory Layout -Since the underlying memory of a single WholeMemory object may be on multiple GPU devices, the WholeGraph library will -partition data into these GPU devices. -The partition method guarantees that each GPU can access one continuous part of the entire memory. -Here "can access" means can directly access from CUDA kernels, but the memory doesn't have to be physically on that GPU. -For example,it can be on host memory or other GPU's device memory that can be access using P2P. -In that case the stored data has its own granularity that shouldn't be split. Data granularity can be specified while -creating WholeMemory. Then each data granularity can be considered as a block of data. - -The follow figure shows the layout of 15 data block over 4 GPUs. -![WholeMemory Layout](../imgs/general_wholememory.png) - -For WholeMemory Tensors, they can be 1D or 2D tensors. -For 1D tensor, data granularity is one element. For 2D tensor, data granularity is its 1D tensor. -The layout will be like this: -![WholeMemory Tensor Layout](../imgs/wholememory_tensor.png) - -## WholeMemory Allocation -As there are six types of WholeMemory, the allocation process of each type are as follows: - -### Device Continuous WholeMemory -For Device Continuous WholeMemory, first a range of virtual address space is reserved in each GPU, which covers the -entire memory range. Then a part of pyhsical memory is allocated in each GPU, as shown in the following figure. -![Device Continuous WholeMemory Allocation Step 1](../imgs/device_continuous_wholememory_step1.png) -After that, each GPU gathers all the memory handles from all GPUs, and maps them to the reserved address space. -![Device Continuous WholeMemory Allocation Step 2](../imgs/device_continuous_wholememory_step2.png) - -### Device Chunked WholeMemory -For Device Chunked WholeMemory, first each GPU allocates its own part of memory using CUDA runtime API, this will create -both a virtual address space and physical memory for its own memory. -![Device Chunked WholeMemory Allocation Step 1](../imgs/device_chunked_wholememory_step1.png) -Each GPU gathers the Ipc handle of memory from all other GPUs, and maps that into its own virtual address space. -![Device Chunked WholeMemory Allocation Step 2](../imgs/device_chunked_wholememory_step2.png) - -### Host Mapped WholeMemory -For Host, Continuous and Chunked are using the same method. First, rank and allocate the host physical and share that to all -ranks. -![Host Mapped WholeMemory Allocation Step 1](../imgs/host_mapped_wholememory_step1.png) -Then each rank registers that host memory to GPU address space. -![Host Mapped WholeMemory Allocation Step 2](../imgs/host_mapped_wholememory_step2.png) - -### Distributed WholeMemory -For Distributed WholeMemory, each GPU just malloc its own part of memory, no need to share to other GPUs. -![Distributed WholeMemory Allocation](../imgs/distributed_wholememory.png) diff --git a/docs/cugraph/source/wholegraph/basics/wholememory_intro.md b/docs/cugraph/source/wholegraph/basics/wholememory_intro.md deleted file mode 100644 index 7209da9471c..00000000000 --- a/docs/cugraph/source/wholegraph/basics/wholememory_intro.md +++ /dev/null @@ -1,123 +0,0 @@ -## WholeMemory -WholeMemory can be regarded as a whole view of GPU memory. -WholeMemory exposes a handle to the memory instance no matter how the underlying data is stored across multiple GPUs. -WholeMemory assumes that a separate process is used to control each GPU. - -### WholeMemory Basics -To define WholeMemory, we need to specify the following: - -#### 1. Specify the set of GPU to handle the Memory - -As WholeMemory is owned by a set of GPUs, so the set of GPUs need to be specified. -This is done by creating [WholeMemory Communicator](#wholememory-communicator) and specify the WholeMemory Communicator -when creating WholeMemory. - -#### 2. Specify the location of the memory - -Although WholeMemory is owned by a set of GPUs, the memory itself can be located on host memory or on device memory. -So the location of the memory needs to be specified. Two types of location can be specified. - -- **Host memory**: will use pinned host memory as underlying storage. -- **Device memory**: will use GPU device memory as underlying storage. - -#### 3. Specify the address mapping mode of the memory - -As WholeMemory is owned by multiple GPUs, each GPU will access the whole memory space, so we need address mapping. -There are three types of address mapping modes (also known as WholeMemory types), they are: - -- **Continuous**: All memory from each GPU will be mapped into a single continuous memory address space for each GPU. - In this mode, each GPU can directly access the whole memory using a single pointer and offset, just like using normal - device memory. Software will see no difference. Hardware peer-to-peer access will handle the underlying communication. - -- **Chunked**: Memory from each GPU will be mapped into different memory chunks, one chunk for each GPU. - In this mode, direct access is also supported, but not using a single pointer. Software will see the chunked memory. - However, an abstract layer can hide this. - -- **Distributed**: Memory from other GPUs is not mapped into current GPU, so no direct access is supported. - To access memory of another GPU, explicit communication is needed. - -If you would like to know more details about WholeMemory locations and WholeMemory types, please refer to -[WholeMemory Implementation Details](wholememory_implementation_details.md) - -### WholeMemory Communicator -WholeMemory Communicator has two main purpose: - -- **Defines a set of GPUs which works together on WholeMemory.** WholeMemory Communicator is created by all GPUs that - wants to work together. A WholeMemory Communicator can be reused as long as the GPU set needed is the same. -- **Provides underlying communication channel needed by WholeMemory.** WholeMemory may need commuincator between GPUs - during the WholeMemory creation and some OPs on some types of WholeMemory. - -To Create WholeMemory Communicator, a WholeMemory Unique ID need to be created first, it is usually created by the first -GPU in the set of GPUs, and then broadcasted to all GPUs that want to work together. Then all GPUs in this communicator -will call WholeMemory Communicator creation function using this WholeMemory Unique ID, and the rank of current GPU as -well as all GPU count. - -### WholeMemory Granularity -As underlying storage may be physically partitioned into multiple GPUs, it is usually not wanted inside one single -user data block. To help with this, when creating WholeMemory, the granularity of data can be specified. Therefore -WholeMemory is considered as multiple blocks of the same granularity and will not get split inside the granularity. - -### WholeMemory Mapping -Since WholeMemory provides a whole view of memory to GPU, mapping is usually needed to access WholeMemory. -Different types of WholeMemory have different mapping methods supported as their names. -Some mappings supported include: -- All the WholeMemory types support mapping the memory range that local GPU is responsible for. - That is, each rank can directly access "Local" memory in all types of WholeMemory. - Here "Local" memory doesn't have to be on current GPU's memory, it can be on host memory or even maybe on other GPU, - but it is guaranteed to be directly accessed by current GPU. -- Chunked and Continuous WholeMemory also support Chunked mapping. That is, memory of all GPUs can be mapped into - current GPU, one continuous chunk for one GPU. Each chunk can be directly accessed by current GPU. But the memory of - different chunks are not guaranteed to be continuous. -- Continuous WholeMemory can be mapped into continuous memory space. That is, memory of all GPUs are mapped into a - single range of virtual memory, accessing different positions of this memory will physically access different - GPUs. This mapping will be handled by hardware (CPU pagetable or GPU pagetable). - -### Operations on WholeMemory -There are some operations that can be performed on WholeMemory. They are based on the mapping of WholeMemory. -#### Local Operation -As all WholeMemory supports mapping of local memory, so operation on local memory is supported. The operation can be -either read or write. Just use it as GPU memory of current device is OK. -#### Load / Store -To facilitate file operation, Load / Store WholeMemory from file or to file is supported. WholeMemory use raw binary -file format for disk operation. For Load, the input file can be single file or a list of files, if it is a list, they -will be logically concatenated together and then loaded. For store, each GPU stores its local memory to file, producing -a list of files. -#### Gather / Scatter -WholeMemory also supports Gather / Scatter operations, usually they operate on a -[WholeMemory Tensor](#wholememory-tensor). - -### WholeMemory Tensor -Compared to PyTorch, WholeMemory is like PyTorch Storage while WholeMemory Tensor is like PyTorch Tensor. -For now, WholeMemory supports only 1D and 2D tensor, or array and matrix. Only first dimension is partitioned. - -### WholeMemory Embedding -WholeMemory Embedding is just like 2D WholeMemory Tensor, with cache support and sparse optimizer support added. -#### Cache Support -WholeMemory Embedding supports cache. To create WholeMemory Embedding with cache, WholeMemory CachePolicy need first be -created. WholeMemoryCachePolicy can be created with following fields: -- **WholeMemory Communicator**: WholeMemory CachePolicy also need WholeMemory Communicator. - This WholeMemory Communicator defines the set of GPUs that cache the all the Embedding. - It can be the same as the WholeMemory Communicator used to create WholeMemory Embedding. -- **WholeMemory type**: WholeMemory CachePolicy uses WholeMemory type to specify the WholeMemory type of the cache. -- **WholeMemory location**: WholeMemory CachePolicy uses WholeMemory location to specify the location of the cache. -- **Access type**: Access type can be readonly or readwrite. -- **Cache ratio**: Specify how much memory the cache will use. This ratio is computed for each GPU set that caches the - whole embedding. - -There are two most commonly used caches. They are: -- **Device cached host memory**: When the WholeMemory Communicator for Cache Policy is the same as the WholeMemory - Communicator used to create WholeMemory Embedding, it means that cache has the same GPU set as WholeMemory Embedding. - So each GPU just cache its own part of raw Embedding. - Normally, when raw WholeMemory Embedding is located on host memory, and the cache is on device - memory, each GPU just caches its own part of host memory. -- **Local cached global memory**: The WholeMemory Communicator of WholeMemory CachePolicy can also be a subset of the - WholeMemory Communicator of WholeMemory Embedding. In this case, the subset of GPUs together cache all the embeddings. - Typically, raw WholeMemory Embedding is partitioned on different machine nodes, and we - want to cache some embeddings in local machine or local GPU, then the subset of GPUs can be all the GPUs on the local - machine. For local cached global memory supports just readonly. - -#### WholeMemory Embedding Sparse Optimizer -Another feature of WholeMemory Embedding is that WholeMemory Embedding supports embedding training. -To efficiently train large embedding tables, a sparse optimizer is needed. -The WholeMemory Embedding Sparse Optimizer can run on cached or non-cached WholeMemory Embedding. -Currently supported optimizers include SGD, Adam, RMSProp and AdaGrad. diff --git a/docs/cugraph/source/wholegraph/imgs/device_chunked_wholememory_step1.png b/docs/cugraph/source/wholegraph/imgs/device_chunked_wholememory_step1.png deleted file mode 100644 index b8a0447e6fb..00000000000 Binary files a/docs/cugraph/source/wholegraph/imgs/device_chunked_wholememory_step1.png and /dev/null differ diff --git a/docs/cugraph/source/wholegraph/imgs/device_chunked_wholememory_step2.png b/docs/cugraph/source/wholegraph/imgs/device_chunked_wholememory_step2.png deleted file mode 100644 index 8b203ce2246..00000000000 Binary files a/docs/cugraph/source/wholegraph/imgs/device_chunked_wholememory_step2.png and /dev/null differ diff --git a/docs/cugraph/source/wholegraph/imgs/device_continuous_wholememory_step1.png b/docs/cugraph/source/wholegraph/imgs/device_continuous_wholememory_step1.png deleted file mode 100644 index 46ecd1f14e7..00000000000 Binary files a/docs/cugraph/source/wholegraph/imgs/device_continuous_wholememory_step1.png and /dev/null differ diff --git a/docs/cugraph/source/wholegraph/imgs/device_continuous_wholememory_step2.png b/docs/cugraph/source/wholegraph/imgs/device_continuous_wholememory_step2.png deleted file mode 100644 index b773b1ef6e9..00000000000 Binary files a/docs/cugraph/source/wholegraph/imgs/device_continuous_wholememory_step2.png and /dev/null differ diff --git a/docs/cugraph/source/wholegraph/imgs/distributed_wholememory.png b/docs/cugraph/source/wholegraph/imgs/distributed_wholememory.png deleted file mode 100644 index e6bbe9f13e9..00000000000 Binary files a/docs/cugraph/source/wholegraph/imgs/distributed_wholememory.png and /dev/null differ diff --git a/docs/cugraph/source/wholegraph/imgs/general_wholememory.png b/docs/cugraph/source/wholegraph/imgs/general_wholememory.png deleted file mode 100644 index 3ece02b007b..00000000000 Binary files a/docs/cugraph/source/wholegraph/imgs/general_wholememory.png and /dev/null differ diff --git a/docs/cugraph/source/wholegraph/imgs/host_mapped_wholememory_step1.png b/docs/cugraph/source/wholegraph/imgs/host_mapped_wholememory_step1.png deleted file mode 100644 index aad8caf0d07..00000000000 Binary files a/docs/cugraph/source/wholegraph/imgs/host_mapped_wholememory_step1.png and /dev/null differ diff --git a/docs/cugraph/source/wholegraph/imgs/host_mapped_wholememory_step2.png b/docs/cugraph/source/wholegraph/imgs/host_mapped_wholememory_step2.png deleted file mode 100644 index 20597f3e515..00000000000 Binary files a/docs/cugraph/source/wholegraph/imgs/host_mapped_wholememory_step2.png and /dev/null differ diff --git a/docs/cugraph/source/wholegraph/imgs/wholememory_tensor.png b/docs/cugraph/source/wholegraph/imgs/wholememory_tensor.png deleted file mode 100644 index e725d6c28ed..00000000000 Binary files a/docs/cugraph/source/wholegraph/imgs/wholememory_tensor.png and /dev/null differ diff --git a/docs/cugraph/source/wholegraph/index.rst b/docs/cugraph/source/wholegraph/index.rst deleted file mode 100644 index bb2281b1351..00000000000 --- a/docs/cugraph/source/wholegraph/index.rst +++ /dev/null @@ -1,13 +0,0 @@ -WholeGraph -========== -RAPIDS WholeGraph has following package: - -* pylibwholegraph: shared memory-based GPU-accelerated GNN training - - -.. toctree:: - :maxdepth: 2 - :caption: Contents: - - basics/index - installation/index diff --git a/docs/cugraph/source/wholegraph/installation/container.md b/docs/cugraph/source/wholegraph/installation/container.md deleted file mode 100644 index 4068ead27b2..00000000000 --- a/docs/cugraph/source/wholegraph/installation/container.md +++ /dev/null @@ -1,30 +0,0 @@ -# Build Container for WholeGraph -To run WholeGraph or build WholeGraph from source, set up the environment first. -We recommend using Docker images. -For example, to build the WholeGraph base image from the NGC pytorch 22.10 image, you can follow `Dockerfile`: -```dockerfile -FROM nvcr.io/nvidia/pytorch:22.10-py3 - -RUN apt update && DEBIAN_FRONTEND=noninteractive apt install -y lsb-core software-properties-common wget libspdlog-dev - -#RUN remove old cmake to update -RUN conda remove --force -y cmake -RUN rm -rf /usr/local/bin/cmake && rm -rf /usr/local/lib/cmake && rm -rf /usr/lib/cmake - -RUN apt-key adv --fetch-keys https://apt.kitware.com/keys/kitware-archive-latest.asc && \ - export LSB_CODENAME=$(lsb_release -cs) && \ - apt-add-repository -y "deb https://apt.kitware.com/ubuntu/ ${LSB_CODENAME} main" && \ - apt update && apt install -y cmake - -# update py for pytest -RUN pip3 install -U py -RUN pip3 install Cython setuputils3 scikit-build nanobind pytest-forked pytest -``` - -To run GNN applications, you may also need DGL and/or PyG libraries to run the GNN layers. -You may refer to [DGL](https://www.dgl.ai/pages/start.html) or [PyG](https://pytorch-geometric.readthedocs.io/en/latest/notes/installation.html) -For example, to install DGL, you may need to add: - -```dockerfile -RUN pip3 install dgl -f https://data.dgl.ai/wheels/torch-2.3/cu118/repo.html -``` diff --git a/docs/cugraph/source/wholegraph/installation/getting_wholegraph.md b/docs/cugraph/source/wholegraph/installation/getting_wholegraph.md deleted file mode 100644 index 80c666d6593..00000000000 --- a/docs/cugraph/source/wholegraph/installation/getting_wholegraph.md +++ /dev/null @@ -1,48 +0,0 @@ - -# Getting the WholeGraph Packages - -Start by reading the [RAPIDS Instalation guide](https://docs.rapids.ai/install) -and checkout the [RAPIDS install selector](https://rapids.ai/start.html) for a pick list of install options. - - -There are 4 ways to get WholeGraph packages: -1. [Quick start with Docker Repo](#docker) -2. [Conda Installation](#conda) -3. [Pip Installation](#pip) -4. [Build from Source](./source_build.md) - - -
- -## Docker -The RAPIDS Docker containers (as of Release 23.10) contain all RAPIDS packages, including WholeGraph, as well as all required supporting packages. To download a container, please see the [Docker Repository](https://hub.docker.com/r/rapidsai/rapidsai/), choosing a tag based on the NVIDIA CUDA version you’re running. This provides a ready to run Docker container with example notebooks and data, showcasing how you can utilize all of the RAPIDS libraries. - -
- - -## Conda -It is easy to install WholeGraph using conda. You can get a minimal conda installation with [miniforge](https://github.com/conda-forge/miniforge). - -WholeGraph conda packages - * libwholegraph - * pylibwholegraph - -Replace the package name in the example below to the one you want to install. - - -Install and update WholeGraph using the conda command: - -```bash -conda install -c rapidsai -c conda-forge -c nvidia wholegraph cudatoolkit=11.8 -``` - -
- -## PIP -wholegraph, and all of RAPIDS, is available via pip. - -``` -pip install wholegraph-cu11 --extra-index-url=https://pypi.nvidia.com -``` - -
diff --git a/docs/cugraph/source/wholegraph/installation/index.rst b/docs/cugraph/source/wholegraph/installation/index.rst deleted file mode 100644 index 09f1cb44a24..00000000000 --- a/docs/cugraph/source/wholegraph/installation/index.rst +++ /dev/null @@ -1,9 +0,0 @@ -Installation -============ - -.. toctree:: - :maxdepth: 2 - - getting_wholegraph - container - source_build diff --git a/docs/cugraph/source/wholegraph/installation/source_build.md b/docs/cugraph/source/wholegraph/installation/source_build.md deleted file mode 100644 index 7213cbfb096..00000000000 --- a/docs/cugraph/source/wholegraph/installation/source_build.md +++ /dev/null @@ -1,186 +0,0 @@ -# Building from Source - -The following instructions are for users wishing to build wholegraph from source code. These instructions are tested on supported distributions of Linux,CUDA, -and Python - See [RAPIDS Getting Started](https://rapids.ai/start.html) for a list of supported environments. -Other operating systems _might be_ compatible, but are not currently tested. - -The wholegraph package includes both a C/C++ CUDA portion and a python portion. Both libraries need to be installed in order for cuGraph to operate correctly. -The C/C++ CUDA library is `libwholegraph` and the python library is `pylibwholegraph`. - -## Prerequisites - -__Compiler__: -* `gcc` version 11.0+ -* `nvcc` version 11.0+ -* `cmake` version 3.26.4+ - -__CUDA__: -* CUDA 11.8+ -* Volta architecture or better - -You can obtain CUDA from [https://developer.nvidia.com/cuda-downloads](https://developer.nvidia.com/cuda-downloads). - -__Other Packages__: -* ninja -* nccl -* cython -* setuputils3 -* scikit-learn -* scikit-build-core -* nanobind>=0.2.0 - -## Building wholegraph -To install wholegraph from source, ensure the dependencies are met. - -### Clone Repo and Configure Conda Environment -__GIT clone a version of the repository__ - - ```bash - # Set the location to wholegraph in an environment variable WHOLEGRAPH_HOME - export WHOLEGRAPH_HOME=$(pwd)/wholegraph - - # Download the wholegraph repo - if you have a forked version, use that path here instead - git clone https://github.com/rapidsai/wholegraph.git $WHOLEGRAPH_HOME - - cd $WHOLEGRAPH_HOME - ``` - -__Create the conda development environment__ - -```bash -# create the conda environment (assuming in base `wholegraph` directory) - -# for CUDA 11.x -conda env create --name wholegraph_dev --file conda/environments/all_cuda-118_arch-x86_64.yaml - -# activate the environment -conda activate wholegraph_dev - -# to deactivate an environment -conda deactivate -``` - - - The environment can be updated as development includes/changes the dependencies. To do so, run: - - -```bash - -# Where XXX is the CUDA version -conda env update --name wholegraph_dev --file conda/environments/all_cuda-XXX_arch-x86_64.yaml - -conda activate wholegraph_dev -``` - - -### Build and Install Using the `build.sh` Script -Using the `build.sh` script make compiling and installing wholegraph a -breeze. To build and install, simply do: - -```bash -$ cd $WHOLEGRAPH_HOME -$ ./build.sh clean -$ ./build.sh libwholegraph -$ ./build.sh pylibwholegraph -``` - -There are several other options available on the build script for advanced users. -`build.sh` options: -```bash -build.sh [ ...] [ ...] - where is: - clean - remove all existing build artifacts and configuration (start over). - uninstall - uninstall libwholegraph and pylibwholegraph from a prior build/install (see also -n) - libwholegraph - build the libwholegraph C++ library. - pylibwholegraph - build the pylibwholegraph Python package. - tests - build the C++ (OPG) tests. - benchmarks - build benchmarks. - docs - build the docs - and is: - -v - verbose build mode - -g - build for debug - -n - no install step - --allgpuarch - build for all supported GPU architectures - --cmake-args=\\\"\\\" - add arbitrary CMake arguments to any cmake call - --compile-cmd - only output compile commands (invoke CMake without build) - --clean - clean an individual target (note: to do a complete rebuild, use the clean target described above) - -h | --h[elp] - print this text - - default action (no args) is to build and install 'libwholegraph' then 'pylibwholegraph' targets - -examples: -$ ./build.sh clean # remove prior build artifacts (start over) -$ ./build.sh - -# make parallelism options can also be defined: Example build jobs using 4 threads (make -j4) -$ PARALLEL_LEVEL=4 ./build.sh libwholegraph - -Note that the libraries will be installed to the location set in `$PREFIX` if set (i.e. `export PREFIX=/install/path`), otherwise to `$CONDA_PREFIX`. -``` - - -## Building each section independently -### Build and Install the C++/CUDA `libwholegraph` Library -CMake depends on the `nvcc` executable being on your path or defined in `$CUDACXX`. - -This project uses cmake for building the C/C++ library. To configure cmake, run: - - ```bash - # Set the location to wholegraph in an environment variable WHOLEGRAPH_HOME - export WHOLEGRAPH_HOME=$(pwd)/wholegraph - - cd $WHOLEGRAPH_HOME - cd cpp # enter cpp directory - mkdir build # create build directory - cd build # enter the build directory - cmake .. -DCMAKE_INSTALL_PREFIX=$CONDA_PREFIX - - # now build the code - make -j # "-j" starts multiple threads - make install # install the libraries - ``` -The default installation locations are `$CMAKE_INSTALL_PREFIX/lib` and `$CMAKE_INSTALL_PREFIX/include/wholegraph` respectively. - -### Building and installing the Python package - -Build and Install the Python packages to your Python path: - -```bash -cd $WHOLEGRAPH_HOME -cd python -cd pylibwholegraph -python setup.py build_ext --inplace -python setup.py install # install pylibwholegraph -``` - -## Run tests - -Run either the C++ or the Python tests with datasets - - - **Python tests with datasets** - - ```bash - cd $WHOLEGRAPH_HOME - cd python - pytest - ``` - - - **C++ stand alone tests** - - From the build directory : - - ```bash - # Run the tests - cd $WHOLEGRAPH_HOME - cd cpp/build - gtests/PARALLEL_UTILS_TESTS # this is an executable file - ``` - - -Note: This conda installation only applies to Linux and Python versions 3.10, 3.11, and 3.12. - -## Creating documentation - -Python API documentation can be generated from _./docs/wholegraph directory_. Or through using "./build.sh docs" - -## Attribution -Portions adopted from https://github.com/pytorch/pytorch/blob/master/CONTRIBUTING.md