Skip to content

Commit c54c654

Browse files
cyyeverpytorchmergebot
authored andcommitted
Enable Leak Sanitizer
1 parent 95cb42c commit c54c654

File tree

5 files changed

+37
-7
lines changed

5 files changed

+37
-7
lines changed

.ci/pytorch/test.sh

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -216,11 +216,13 @@ fi
216216
# if you're not careful. Check this if you made some changes and the
217217
# ASAN test is not working
218218
if [[ "$BUILD_ENVIRONMENT" == *asan* ]]; then
219-
export ASAN_OPTIONS=detect_leaks=0:symbolize=1:detect_stack_use_after_return=true:strict_init_order=true:detect_odr_violation=1:detect_container_overflow=0:check_initialization_order=true:debug=true
219+
export ASAN_OPTIONS=detect_leaks=1:symbolize=1:detect_stack_use_after_return=true:strict_init_order=true:detect_odr_violation=1:detect_container_overflow=0:check_initialization_order=true:debug=true:fast_unwind_on_malloc=1
220220
if [[ "$BUILD_ENVIRONMENT" == *cuda* ]]; then
221221
export ASAN_OPTIONS="${ASAN_OPTIONS}:protect_shadow_gap=0"
222222
fi
223223
export UBSAN_OPTIONS=print_stacktrace=1:suppressions=$PWD/ubsan.supp
224+
# Suppress some hard to solve indirect leaks
225+
export LSAN_OPTIONS="suppressions=$PWD/lsan.supp"
224226
export PYTORCH_TEST_WITH_ASAN=1
225227
export PYTORCH_TEST_WITH_UBSAN=1
226228
# TODO: Figure out how to avoid hard-coding these paths

lsan.supp

Lines changed: 30 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,30 @@
1+
leak:pybind11::cpp_function
2+
leak:PyMem_RawMalloc
3+
leak:unicode_resize
4+
leak:PyObject_Malloc
5+
leak:PyByteArray_Resize
6+
leak:numpy
7+
leak:list_append
8+
leak:unicodeobject
9+
leak:obmalloc
10+
leak:gcmodule
11+
leak:listobject
12+
leak:bytesobject
13+
leak:PyThread_allocate_lock
14+
leak:sccache
15+
leak:rustc-1.61.0
16+
leak:gcc/x86_64-linux-gnu/11
17+
leak:x86_64-linux-gnu-gcc-11
18+
leak:libbfd
19+
leak:x86_64-linux-gnu-ld.bfd
20+
leak:git
21+
leak:libio
22+
leak:unknown module
23+
leak:g++
24+
leak:conda-linux-gnu-ld
25+
leak:crypto
26+
leak:torch::detail::(anonymous namespace)::get_set_cached_attr
27+
leak:torch::jit::tensorexpr::TensorExprKernel::preAllocIntermediateBufs
28+
leak:optree
29+
leak:python
30+
leak:torch::tensors::initialize_aten_types

torch/csrc/Module.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -235,7 +235,7 @@ static PyObject* THPModule_initExtension(
235235
END_HANDLE_TH_ERRORS
236236
}
237237

238-
// The idea behind these two functions is to make it easy to test if we are
238+
// The idea behind these functions is to make it easy to test if we are
239239
// built with ASAN: they're designed not to crash if ASAN is not enabled, but
240240
// to trigger ASAN if it is enabled. This lets us run a "canary" tests which
241241
// checks if our build environment is misconfigured.

torch/csrc/autograd/VariableTypeManual.cpp

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -456,7 +456,7 @@ static Tensor detach(c10::DispatchKeySet ks, const Tensor& self) {
456456
// NB: we can't make detach() a normal view operator because the codegen
457457
// generates allow_tensor_metadata_change = True for them. In the future we
458458
// should have an option for this in the codegen.
459-
auto result = as_view(
459+
return as_view(
460460
/* base */ self,
461461
/* output */ out,
462462
/* is_bw_differentiable */ false,
@@ -465,8 +465,6 @@ static Tensor detach(c10::DispatchKeySet ks, const Tensor& self) {
465465
/* rev_view_func */ nullptr,
466466
/* creation_meta */ CreationMeta::DEFAULT,
467467
/*allow_tensor_metadata_change=*/false);
468-
469-
return result;
470468
}
471469

472470
static Tensor _fw_primal(

torch/csrc/autograd/variable.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -844,7 +844,7 @@ inline Variable make_variable_non_differentiable_view(
844844
/*version_counter=*/impl::version_counter(base),
845845
/*allow_tensor_metadata_change=*/allow_tensor_metadata_change);
846846
data_impl_copy->set_autograd_meta(nullptr);
847-
return Variable(data_impl_copy);
847+
return Variable(std::move(data_impl_copy));
848848
}
849849
return Variable();
850850
}
@@ -903,7 +903,7 @@ inline Variable make_variable(
903903
/*allow_tensor_metadata_change=*/allow_tensor_metadata_change);
904904
data_impl_copy->set_autograd_meta(std::make_unique<AutogradMeta>(
905905
data_impl_copy.get(), false, std::move(gradient_edge)));
906-
return Variable(data_impl_copy);
906+
return Variable(std::move(data_impl_copy));
907907
}
908908
return Variable();
909909
}

0 commit comments

Comments
 (0)