Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[CI] clang-format C++ code and add to linting #2244

Merged
merged 27 commits into from
Jan 9, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
27 commits
Select commit Hold shift + click to select a range
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions .clang-format
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
---
DisableFormat: true
...
4 changes: 4 additions & 0 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -25,3 +25,7 @@ repos:
hooks:
- id: isort
language_version: python3.10
- repo: https://github.com/pre-commit/mirrors-clang-format
rev: v14.0.6
hooks:
- id: clang-format
10 changes: 9 additions & 1 deletion generator/gen_daal4py.py
Original file line number Diff line number Diff line change
Expand Up @@ -1220,9 +1220,17 @@ def gen_daal4py(dalroot, outdir, version, warn_all=False, no_dist=False, no_stre
algo_path = jp(head_path, "algorithms")
rmtree(head_path, ignore_errors=True)
copytree(orig_path, head_path)
formatfile = jp("src", ".clang-format")
for dirpath, dirnames, filenames in os.walk(algo_path):
for filename in filenames:
call([shutil.which("clang-format"), "-i", jp(dirpath, filename)])
call(
[
shutil.which("clang-format"),
"-i",
jp(dirpath, filename),
"-style=file:" + formatfile,
]
)
iface = cython_interface(algo_path)
iface.read()
print("Generating sources...")
Expand Down
84 changes: 44 additions & 40 deletions onedal/basic_statistics/basic_statistics.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -114,9 +114,9 @@ auto get_onedal_result_options(const py::dict& params) {
struct params2desc {
template <typename Float, typename Method, typename Task>
auto operator()(const py::dict& params) {
auto desc = dal::basic_statistics::descriptor<Float,
Method, dal::basic_statistics::task::compute>()
.set_result_options(get_onedal_result_options(params));
auto desc =
dal::basic_statistics::descriptor<Float, Method, dal::basic_statistics::task::compute>()
.set_result_options(get_onedal_result_options(params));
return desc;
}
};
Expand All @@ -126,54 +126,53 @@ struct params2desc_incremental {
template <typename Float, typename Method, typename Task>
auto operator()(const py::dict& params) {
auto desc = dal::basic_statistics::descriptor<Float,
dal::basic_statistics::method::dense, dal::basic_statistics::task::compute>()
.set_result_options(get_onedal_result_options(params));
dal::basic_statistics::method::dense,
dal::basic_statistics::task::compute>()
.set_result_options(get_onedal_result_options(params));
return desc;
}
};

template <typename Policy, typename Task>
void init_compute_ops(py::module& m) {
m.def("compute", [](
const Policy& policy,
const py::dict& params,
const table& data,
const table& weights) {
m.def(
"compute",
[](const Policy& policy, const py::dict& params, const table& data, const table& weights) {
using namespace dal::basic_statistics;
using input_t = compute_input<Task>;

compute_ops ops(policy, input_t{ data, weights }, params2desc{});
return fptype2t{ method2t{ Task{}, ops } }(params);
}
);
});
}


template <typename Policy, typename Task>
void init_partial_compute_ops(py::module& m) {
using prev_result_t = dal::basic_statistics::partial_compute_result<Task>;
m.def("partial_compute", [](
const Policy& policy,
const py::dict& params,
const prev_result_t& prev,
const table& data,
const table& weights) {
using namespace dal::basic_statistics;
using input_t = partial_compute_input<Task>;
partial_compute_ops ops(policy, input_t{ prev, data, weights }, params2desc_incremental{});
return fptype2t{ method2t{ Task{}, ops } }(params);
}
);
m.def("partial_compute",
[](const Policy& policy,
const py::dict& params,
const prev_result_t& prev,
const table& data,
const table& weights) {
using namespace dal::basic_statistics;
using input_t = partial_compute_input<Task>;
partial_compute_ops ops(policy,
input_t{ prev, data, weights },
params2desc_incremental{});
return fptype2t{ method2t{ Task{}, ops } }(params);
});
}

template <typename Policy, typename Task>
void init_finalize_compute_ops(pybind11::module_& m) {
using namespace dal::basic_statistics;
using input_t = partial_compute_result<Task>;
m.def("finalize_compute", [](const Policy& policy, const pybind11::dict& params, const input_t& data) {
finalize_compute_ops ops(policy, data, params2desc_incremental{});
return fptype2t{ method2t{ Task{}, ops } }(params);
});
m.def("finalize_compute",
[](const Policy& policy, const pybind11::dict& params, const input_t& data) {
finalize_compute_ops ops(policy, data, params2desc_incremental{});
return fptype2t{ method2t{ Task{}, ops } }(params);
});
}

template <typename Task>
Expand Down Expand Up @@ -216,23 +215,28 @@ void init_partial_compute_result(py::module_& m) {
py::cast<py::object>(convert_to_pyobject(res.get_partial_max())),
py::cast<py::object>(convert_to_pyobject(res.get_partial_sum())),
py::cast<py::object>(convert_to_pyobject(res.get_partial_sum_squares())),
py::cast<py::object>(convert_to_pyobject(res.get_partial_sum_squares_centered()))
);
py::cast<py::object>(
convert_to_pyobject(res.get_partial_sum_squares_centered())));
},
[](py::tuple t) {
if (t.size() != 6)
throw std::runtime_error("Invalid state!");
result_t res;
if (py::cast<int>(t[0].attr("size")) != 0) res.set_partial_n_rows(convert_to_table(t[0]));
if (py::cast<int>(t[1].attr("size")) != 0) res.set_partial_min(convert_to_table(t[1]));
if (py::cast<int>(t[2].attr("size")) != 0) res.set_partial_max(convert_to_table(t[2]));
if (py::cast<int>(t[3].attr("size")) != 0) res.set_partial_sum(convert_to_table(t[3]));
if (py::cast<int>(t[4].attr("size")) != 0) res.set_partial_sum_squares(convert_to_table(t[4]));
if (py::cast<int>(t[5].attr("size")) != 0) res.set_partial_sum_squares_centered(convert_to_table(t[5]));

if (py::cast<int>(t[0].attr("size")) != 0)
res.set_partial_n_rows(convert_to_table(t[0]));
if (py::cast<int>(t[1].attr("size")) != 0)
res.set_partial_min(convert_to_table(t[1]));
if (py::cast<int>(t[2].attr("size")) != 0)
res.set_partial_max(convert_to_table(t[2]));
if (py::cast<int>(t[3].attr("size")) != 0)
res.set_partial_sum(convert_to_table(t[3]));
if (py::cast<int>(t[4].attr("size")) != 0)
res.set_partial_sum_squares(convert_to_table(t[4]));
if (py::cast<int>(t[5].attr("size")) != 0)
res.set_partial_sum_squares_centered(convert_to_table(t[5]));

return res;
}
));
}));
}

ONEDAL_PY_DECLARE_INSTANTIATOR(init_compute_result);
Expand Down
20 changes: 9 additions & 11 deletions onedal/cluster/dbscan.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -100,17 +100,15 @@ struct params2desc {

template <typename Policy, typename Task>
void init_compute_ops(py::module_& m) {
m.def("compute",
[](const Policy& policy,
const py::dict& params,
const table& data,
const table& weights) {
using namespace dbscan;
using input_t = compute_input<Task>;

compute_ops ops(policy, input_t{ data, weights }, params2desc{});
return fptype2t{ method2t{ Task{}, ops } }(params);
});
m.def(
"compute",
[](const Policy& policy, const py::dict& params, const table& data, const table& weights) {
using namespace dbscan;
using input_t = compute_input<Task>;

compute_ops ops(policy, input_t{ data, weights }, params2desc{});
return fptype2t{ method2t{ Task{}, ops } }(params);
});
}

template <typename Task>
Expand Down
15 changes: 7 additions & 8 deletions onedal/cluster/kmeans_common.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -23,13 +23,11 @@

#include "onedal/common/pybind11_helpers.hpp"

namespace oneapi::dal::python{
namespace oneapi::dal::python {

namespace kmeans {

bool is_same_clustering(const dal::table& left,
const dal::table& right,
std::int64_t n_clusters) {
bool is_same_clustering(const dal::table& left, const dal::table& right, std::int64_t n_clusters) {
if (!left.has_data() || !right.has_data())
throw std::invalid_argument("Empty input table");

Expand All @@ -39,15 +37,16 @@ bool is_same_clustering(const dal::table& left,
if (left.get_column_count() > 1 || right.get_column_count() > 1)
throw std::length_error("Too many columns in input table");

const auto l_arr = l_acc.pull({0, -1});
const auto r_arr = r_acc.pull({0, -1});
const auto l_arr = l_acc.pull({ 0, -1 });
const auto r_arr = r_acc.pull({ 0, -1 });

if (n_clusters < 1)
throw std::invalid_argument("Invalid number of clusters");

constexpr std::int32_t minus_one = -1;
auto map = dal::array<std::int32_t>::full( //
n_clusters, minus_one);
n_clusters,
minus_one);

auto* const m_ptr = map.get_mutable_data();

Expand Down Expand Up @@ -85,4 +84,4 @@ ONEDAL_PY_INIT_MODULE(kmeans_common) {
sub.def("_is_same_clustering", &kmeans::is_same_clustering);
} // ONEDAL_PY_INIT_MODULE(kmeans_common)

} // namespace oneapi::dal::python::kmeans
} // namespace oneapi::dal::python
12 changes: 6 additions & 6 deletions onedal/common.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -16,12 +16,12 @@

#pragma once

#define OVERFLOW_CHECK_BY_ADDING(type, op1, op2) \
{ \
volatile type r = (op1) + (op2); \
r -= (op1); \
if (!(r == (op2))) \
throw std::runtime_error("Integer overflow by adding"); \
#define OVERFLOW_CHECK_BY_ADDING(type, op1, op2) \
{ \
volatile type r = (op1) + (op2); \
r -= (op1); \
if (!(r == (op2))) \
throw std::runtime_error("Integer overflow by adding"); \
}

#define OVERFLOW_CHECK_BY_MULTIPLICATION(type, op1, op2) \
Expand Down
Loading
Loading