Added sonar exclusions/inclusions for tests #11006
This check has been archived and is scheduled for deletion.
Learn more about checks retention
GitHub Actions / JUnit Test Report
failed
Sep 6, 2023 in 0s
7558 tests run, 4786 passed, 2770 skipped, 2 failed.
Annotations
Check failure on line 1697 in deeplake/core/vectorstore/test_deeplake_vectorstore.py
github-actions / JUnit Test Report
test_deeplake_vectorstore.test_multiple_embeddings
Failed: Timeout >300.0s
Raw output
local_path = './hub_pytest/test_deeplake_vectorstore/test_multiple_embeddings'
@pytest.mark.slow
def test_multiple_embeddings(local_path):
vector_store = DeepLakeVectorStore(
path=local_path,
overwrite=True,
tensor_params=[
{
"name": "text",
"htype": "text",
},
{
"name": "embedding_1",
"htype": "embedding",
},
{
"name": "embedding_2",
"htype": "embedding",
},
],
)
with pytest.raises(AssertionError):
vector_store.add(
text=texts,
embedding_function=[embedding_fn, embedding_fn],
embedding_data=[texts],
embedding_tensor=["embedding_1", "embedding_2"],
)
with pytest.raises(AssertionError):
vector_store.add(
text=texts,
embedding_function=[embedding_fn, embedding_fn],
embedding_data=[texts, texts],
embedding_tensor=["embedding_1"],
)
with pytest.raises(AssertionError):
vector_store.add(
text=texts,
embedding_function=[embedding_fn],
embedding_data=[texts, texts],
embedding_tensor=["embedding_1", "embedding_2"],
)
vector_store.add(
text=texts,
embedding_function=[embedding_fn, embedding_fn],
embedding_data=[texts, texts],
embedding_tensor=["embedding_1", "embedding_2"],
)
vector_store.add(
text=texts, embedding_1=(embedding_fn, texts), embedding_2=(embedding_fn, texts)
)
vector_store.add(
text=texts,
embedding_function=embedding_fn,
embedding_data=[texts, texts],
embedding_tensor=["embedding_1", "embedding_2"],
)
# test with initial embedding function
vector_store.embedding_function = embedding_fn
vector_store.add(
text=texts,
embedding_data=[texts, texts],
embedding_tensor=["embedding_1", "embedding_2"],
)
number_of_data = 1000
_texts, embeddings, ids, metadatas, _ = utils.create_data(
number_of_data=number_of_data, embedding_dim=EMBEDDING_DIM
)
vector_store.add(
text=25 * _texts,
embedding_function=[embedding_fn3, embedding_fn3],
embedding_data=[25 * _texts, 25 * _texts],
embedding_tensor=["embedding_1", "embedding_2"],
)
> vector_store.add(
text=25 * _texts,
embedding_1=(embedding_fn3, 25 * _texts),
embedding_2=(embedding_fn3, 25 * _texts),
)
deeplake/core/vectorstore/test_deeplake_vectorstore.py:1697:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
deeplake/core/vectorstore/deeplake_vectorstore.py:277: in add
dataset_utils.extend_or_ingest_dataset(
deeplake/core/vectorstore/vector_search/dataset/dataset.py:460: in extend_or_ingest_dataset
extend(
deeplake/core/vectorstore/vector_search/dataset/dataset.py:445: in extend
dataset.extend(processed_tensors)
deeplake/core/dataset/dataset.py:3131: in extend
self.append(
deeplake/util/invalid_view_op.py:22: in inner
return callable(x, *args, **kwargs)
deeplake/core/dataset/dataset.py:3173: in append
self._append_or_extend(
deeplake/core/dataset/dataset.py:3047: in _append_or_extend
tensor.append(v)
deeplake/util/invalid_view_op.py:22: in inner
return callable(x, *args, **kwargs)
deeplake/core/tensor.py:404: in append
self.extend([sample], progressbar=False)
deeplake/util/invalid_view_op.py:22: in inner
return callable(x, *args, **kwargs)
deeplake/core/tensor.py:316: in extend
self.chunk_engine.extend(
deeplake/core/chunk_engine.py:1080: in extend
self._extend(samples, progressbar, pg_callback=pg_callback)
deeplake/core/chunk_engine.py:1015: in _extend
start_chunk=self.last_appended_chunk(allow_copy=False),
deeplake/core/chunk_engine.py:555: in last_appended_chunk
chunk = self.get_chunk(chunk_key)
deeplake/core/chunk_engine.py:577: in get_chunk
if not partial_chunk_bytes and isinstance(chunk.data_bytes, PartialReader):
deeplake/core/chunk/chunk_compressed_chunk.py:571: in data_bytes
self._compress()
deeplake/core/chunk/chunk_compressed_chunk.py:562: in _compress
self._data_bytes = compress_bytes(self.decompressed_bytes, self.compression)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
buffer = b'0163c264-4c33-11ee-af92-7715898d59320163c265-4c33-11ee-af92-7715898d59320163c266-4c33-11ee-af92-7715898d59320163c267...15898d593257c12881-4c33-11ee-af92-7715898d593257c12882-4c33-11ee-af92-7715898d593257c12883-4c33-11ee-af92-7715898d5932'
compression = 'lz4'
def compress_bytes(
buffer: Union[bytes, memoryview], compression: Optional[str]
) -> bytes:
if not buffer:
return b""
if compression == "lz4":
if not buffer:
return b""
> return numcodecs.lz4.compress(buffer)
E Failed: Timeout >300.0s
deeplake/core/compression.py:162: Failed
Check failure on line 344 in deeplake/api/tests/test_link.py
github-actions / JUnit Test Report
test_link.test_video[False-True]
Failed: Timeout >300.0s
Raw output
request = <FixtureRequest for <Function test_video[False-True]>>
local_ds_generator = <function local_ds_generator.<locals>.generate_local_ds at 0x7f43a62b1750>
create_shape_tensor = True, verify = False
@pytest.mark.slow
@pytest.mark.parametrize("create_shape_tensor", [True, False])
@pytest.mark.parametrize("verify", [True, False])
@pytest.mark.skipif(
os.name == "nt" and sys.version_info < (3, 7), reason="requires python 3.7 or above"
)
def test_video(request, local_ds_generator, create_shape_tensor, verify):
local_ds = local_ds_generator()
with local_ds as ds:
ds.add_creds_key("ENV")
ds.populate_creds("ENV", from_environment=True)
ds.create_tensor(
"linked_videos",
htype="link[video]",
sample_compression="mp4",
create_shape_tensor=create_shape_tensor,
verify=verify,
)
for _ in range(3):
sample = deeplake.link(
"https://commondatastorage.googleapis.com/gtv-videos-bucket/sample/ForBiggerJoyrides.mp4"
)
ds.linked_videos.append(sample)
assert len(ds.linked_videos) == 3
for i in range(3):
assert ds.linked_videos[i].shape == (361, 720, 1280, 3)
> assert ds.linked_videos[i][:5].numpy().shape == (5, 720, 1280, 3)
deeplake/api/tests/test_link.py:344:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
deeplake/core/tensor.py:788: in numpy
ret = self.chunk_engine.numpy(
deeplake/core/chunk_engine.py:1806: in numpy
return (self._sequence_numpy if self.is_sequence else self._numpy)(
deeplake/core/chunk_engine.py:2003: in _numpy
sample = self.get_single_sample(
deeplake/core/chunk_engine.py:1945: in get_single_sample
sample = self.get_non_tiled_sample(
deeplake/core/chunk_engine.py:1896: in get_non_tiled_sample
return self.get_video_sample(
deeplake/core/linked_chunk_engine.py:143: in get_video_sample
video_sample = _decompress_video(
deeplake/core/compression.py:975: in _decompress_video
for frame in packet.decode():
av/packet.pyx:87: in av.packet.Packet.decode
???
av/stream.pyx:172: in av.stream.Stream.decode
???
av/codec/context.pyx:521: in av.codec.context.CodecContext.decode
???
av/codec/context.pyx:285: in av.codec.context.CodecContext.open
???
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = av.Dictionary({}), other = {}, kwds = {}
> def update(self, other=(), /, **kwds):
E Failed: Timeout >300.0s
/opt/hostedtoolcache/Python/3.10.12/x64/lib/python3.10/_collections_abc.py:991: Failed
Loading