Skip to content

Commit

Permalink
updated precommit check, fixed some bugs and cleaned up code
Browse files Browse the repository at this point in the history
  • Loading branch information
bernadettekb committed Dec 19, 2023
1 parent d134451 commit 6bd9731
Show file tree
Hide file tree
Showing 45 changed files with 233 additions and 637 deletions.
26 changes: 15 additions & 11 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -12,14 +12,14 @@ authors = [
{name = "Naoki Yokoyama", email = "[email protected]"},
]
readme = "README.md"
requires-python = ">=3.9"
requires-python = ">=3.10"
dependencies = [
"torch >= 1.10.1",
"numpy >= 1.22.4",
"flask >= 2.3.2",
"seaborn >= 0.12.2", # required by yolov7
"open3d >= 0.17.0",
"transformers == 4.26.0", # higher versions break BLIP-2
"transformers >= 4.28.1", # higher versions than 4.26.0 "break" BLIP-2 but need 4.28.1 for integration
"salesforce-lavis >= 1.0.2", # for BLIP-2
"frontier_exploration @ git+https://github.com/naokiyokoyama/frontier_exploration.git",
"mobile_sam @ git+https://github.com/ChaoningZhang/MobileSAM.git",
Expand Down Expand Up @@ -49,8 +49,8 @@ reality = [
"Homepage" = "theaiinstitute.com"
"GitHub" = "https://github.com/bdaiinstitute/vlfm"

[tool.setuptools]
packages = ["vlfm", "config"]
[tool.setuptools.packages.find]
where = ["vlfm"]

[tool.ruff]
# Enable pycodestyle (`E`), Pyflakes (`F`), and import sorting (`I`)
Expand Down Expand Up @@ -92,8 +92,8 @@ line-length = 120
# Allow unused variables when underscore-prefixed.
dummy-variable-rgx = "^(_+|(_+[a-zA-Z0-9_]*[a-zA-Z0-9]+?))$"

# Assume Python 3.9.
target-version = "py39"
# Assume Python 3.10
target-version = "py310"

[tool.ruff.per-file-ignores]
"__init__.py" = ["F401"]
Expand All @@ -103,8 +103,8 @@ target-version = "py39"
max-complexity = 10

[tool.black]
line-length = 88
target-version = ['py39']
line-length = 120
target-version = ['py310']
include = '\.pyi?$'
# `extend-exclude` is not honored when `black` is passed a file path explicitly,
# as is typical when `black` is invoked via `pre-commit`.
Expand All @@ -116,9 +116,13 @@ force-exclude = '''

preview = true

[tool.coverage.run]
relative_files = true


# mypy configuration
[tool.mypy]
python_version = "3.9"
python_version = "3.10"
disallow_untyped_defs = true
ignore_missing_imports = true
explicit_package_bases = true
Expand All @@ -127,5 +131,5 @@ strict_equality = true
warn_unreachable = true
warn_redundant_casts = true
no_implicit_optional = true
files = ['vlfm']
exclude = '^(docker|.*external|.*thirdparty|.*install|.*build|.*_experimental)/'
files = ['vlfm','test','scripts']
exclude = "^(docker|.*external|.*thirdparty|.*install|.*build|.*_experimental|.*_pb2.py|.*_pb2_grpc.py)"
26 changes: 7 additions & 19 deletions scripts/parse_jsons.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,9 +46,7 @@ def calculate_frequencies(failure_causes: List[str]) -> None:
for cause, count in counter.most_common():
percentage = (count / total) * 100
# Add each row to the table
table.add_row(
[cause.replace("did_not_fail", "succeeded!"), count, f"{percentage:.2f}%"]
)
table.add_row([cause.replace("did_not_fail", "succeeded!"), count, f"{percentage:.2f}%"])

print(table)

Expand All @@ -60,10 +58,7 @@ def calculate_avg_performance(stats: List[Dict[str, Any]]) -> None:
Args:
stats (List[Dict[str, Any]]): A list of stats for each episode.
"""
success, spl, soft_spl = [
[episode.get(k, -1) for episode in stats]
for k in ["success", "spl", "soft_spl"]
]
success, spl, soft_spl = [[episode.get(k, -1) for episode in stats] for k in ["success", "spl", "soft_spl"]]

# Create a table with headers
table = PrettyTable(["Metric", "Average"])
Expand Down Expand Up @@ -101,28 +96,23 @@ def calculate_avg_fail_per_category(stats: List[Dict[str, Any]]) -> None:
table = PrettyTable(["Category", "Average Failure Rate"])

# Add each row to the table
for category, stats in sorted(
for category, c_stats in sorted(
category_stats.items(),
key=lambda x: x[1]["fail_count"],
reverse=True,
):
avg_failure_rate = (stats["fail_count"] / stats["total_count"]) * 100
avg_failure_rate = (c_stats["fail_count"] / c_stats["total_count"]) * 100
table.add_row(
[
category,
(
f"{avg_failure_rate:.2f}% ({stats['fail_count']}/"
f"{stats['total_count']})"
),
f"{avg_failure_rate:.2f}% ({c_stats['fail_count']}/{c_stats['total_count']})",
]
)

print(table)


def calculate_avg_fail_rate_per_category(
stats: List[Dict[str, Any]], failure_cause: str
) -> None:
def calculate_avg_fail_rate_per_category(stats: List[Dict[str, Any]], failure_cause: str) -> None:
"""
For each possible "target_object", count the number of times the agent failed due to
the given failure cause. Then, sum the counts across all categories and use it to
Expand All @@ -147,9 +137,7 @@ def calculate_avg_fail_rate_per_category(
table = PrettyTable(["Category", f"% Occurrence for {failure_cause}"])

# Sort the categories by their failure count in descending order
sorted_categories = sorted(
category_to_fail_count.items(), key=lambda x: x[1], reverse=True
)
sorted_categories = sorted(category_to_fail_count.items(), key=lambda x: x[1], reverse=True)

# Add each row to the table
for category, count in sorted_categories:
Expand Down
2 changes: 1 addition & 1 deletion test/test_setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
from vlfm.utils.generate_dummy_policy import save_dummy_policy


def test_load_and_save_config():
def test_load_and_save_config() -> None:
if not os.path.exists("data"):
os.makedirs("data")

Expand Down
2 changes: 1 addition & 1 deletion test/test_visualization.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
from vlfm.utils.visualization import generate_text_image


def test_visualization():
def test_visualization() -> None:
if not os.path.exists("build"):
os.makedirs("build")

Expand Down
17 changes: 4 additions & 13 deletions vlfm/mapping/base_map.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,9 +12,7 @@ class BaseMap:
_last_camera_yaw: float = 0.0
_map_dtype: np.dtype = np.dtype(np.float32)

def __init__(
self, size: int = 1000, pixels_per_meter: int = 20, *args: Any, **kwargs: Any
):
def __init__(self, size: int = 1000, pixels_per_meter: int = 20, *args: Any, **kwargs: Any):
"""
Args:
size: The size of the map in pixels.
Expand All @@ -23,16 +21,12 @@ def __init__(
self.size = size
self._map = np.zeros((size, size), dtype=self._map_dtype)
self._episode_pixel_origin = np.array([size // 2, size // 2])
self._traj_vis = TrajectoryVisualizer(
self._episode_pixel_origin, self.pixels_per_meter
)
self._traj_vis = TrajectoryVisualizer(self._episode_pixel_origin, self.pixels_per_meter)

def reset(self) -> None:
self._map.fill(0)
self._camera_positions = []
self._traj_vis = TrajectoryVisualizer(
self._episode_pixel_origin, self.pixels_per_meter
)
self._traj_vis = TrajectoryVisualizer(self._episode_pixel_origin, self.pixels_per_meter)

def update_agent_traj(self, robot_xy: np.ndarray, robot_heading: float) -> None:
self._camera_positions.append(robot_xy)
Expand All @@ -47,10 +41,7 @@ def _xy_to_px(self, points: np.ndarray) -> np.ndarray:
Returns:
The array of (x, y) pixel coordinates.
"""
px = (
np.rint(points[:, ::-1] * self.pixels_per_meter)
+ self._episode_pixel_origin
)
px = np.rint(points[:, ::-1] * self.pixels_per_meter) + self._episode_pixel_origin
px[:, 0] = self._map.shape[0] - px[:, 0]
return px.astype(int)

Expand Down
13 changes: 3 additions & 10 deletions vlfm/mapping/frontier_map.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,9 +22,7 @@ def __init__(self, encoding_type: str = "cosine"):
def reset(self) -> None:
self.frontiers = []

def update(
self, frontier_locations: List[np.ndarray], curr_image: np.ndarray, text: str
) -> None:
def update(self, frontier_locations: List[np.ndarray], curr_image: np.ndarray, text: str) -> None:
"""
Takes in a list of frontier coordinates and the current image observation from
the robot. Any stored frontiers that are not present in the given list are
Expand All @@ -41,19 +39,14 @@ def update(
self.frontiers = [
frontier
for frontier in self.frontiers
if any(
np.array_equal(frontier.xyz, location)
for location in frontier_locations
)
if any(np.array_equal(frontier.xyz, location) for location in frontier_locations)
]

# Add any frontiers that are not already stored. Set their image field to the
# given image.
cosine = None
for location in frontier_locations:
if not any(
np.array_equal(frontier.xyz, location) for frontier in self.frontiers
):
if not any(np.array_equal(frontier.xyz, location) for frontier in self.frontiers):
if cosine is None:
cosine = self._encode(curr_image, text)
self.frontiers.append(Frontier(location, cosine))
Expand Down
41 changes: 10 additions & 31 deletions vlfm/mapping/object_point_cloud_map.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,9 +41,7 @@ def update_map(
fy: float,
) -> None:
"""Updates the object map with the latest information from the agent."""
local_cloud = self._extract_object_cloud(
depth_img, object_mask, min_depth, max_depth, fx, fy
)
local_cloud = self._extract_object_cloud(depth_img, object_mask, min_depth, max_depth, fx, fy)
if len(local_cloud) == 0:
return

Expand Down Expand Up @@ -72,15 +70,11 @@ def update_map(
return

if object_name in self.clouds:
self.clouds[object_name] = np.concatenate(
(self.clouds[object_name], global_cloud), axis=0
)
self.clouds[object_name] = np.concatenate((self.clouds[object_name], global_cloud), axis=0)
else:
self.clouds[object_name] = global_cloud

def get_best_object(
self, target_class: str, curr_position: np.ndarray
) -> np.ndarray:
def get_best_object(self, target_class: str, curr_position: np.ndarray) -> np.ndarray:
target_cloud = self.get_target_cloud(target_class)

closest_point_2d = self._get_closest_point(target_cloud, curr_position)[:2]
Expand All @@ -96,10 +90,7 @@ def get_best_object(
if delta_dist < 0.1:
# closest point is only slightly different
return self.last_target_coord
elif (
delta_dist < 0.5
and np.linalg.norm(curr_position - closest_point_2d) > 2.0
):
elif delta_dist < 0.5 and np.linalg.norm(curr_position - closest_point_2d) > 2.0:
# closest point is a little different, but the agent is too far for
# the difference to matter much
return self.last_target_coord
Expand All @@ -108,9 +99,7 @@ def get_best_object(

return self.last_target_coord

def update_explored(
self, tf_camera_to_episodic: np.ndarray, max_depth: float, cone_fov: float
) -> None:
def update_explored(self, tf_camera_to_episodic: np.ndarray, max_depth: float, cone_fov: float) -> None:
"""
This method will remove all point clouds in self.clouds that were originally
detected to be out-of-range, but are now within range. This is just a heuristic
Expand Down Expand Up @@ -140,9 +129,7 @@ def update_explored(
# Detection was originally within range
continue
# Remove all points from self.clouds[obj] that have the same range_id
self.clouds[obj] = self.clouds[obj][
self.clouds[obj][..., -1] != range_id
]
self.clouds[obj] = self.clouds[obj][self.clouds[obj][..., -1] != range_id]

def get_target_cloud(self, target_class: str) -> np.ndarray:
target_cloud = self.clouds[target_class].copy()
Expand All @@ -163,9 +150,7 @@ def _extract_object_cloud(
fy: float,
) -> np.ndarray:
final_mask = object_mask * 255
final_mask = cv2.erode( # type: ignore
final_mask, None, iterations=self._erosion_size
)
final_mask = cv2.erode(final_mask, None, iterations=self._erosion_size) # type: ignore

valid_depth = depth.copy()
valid_depth[valid_depth == 0] = 1 # set all holes (0) to just be far (1)
Expand All @@ -177,15 +162,11 @@ def _extract_object_cloud(

return cloud

def _get_closest_point(
self, cloud: np.ndarray, curr_position: np.ndarray
) -> np.ndarray:
def _get_closest_point(self, cloud: np.ndarray, curr_position: np.ndarray) -> np.ndarray:
ndim = curr_position.shape[0]
if self.use_dbscan:
# Return the point that is closest to curr_position, which is 2D
closest_point = cloud[
np.argmin(np.linalg.norm(cloud[:, :ndim] - curr_position, axis=1))
]
closest_point = cloud[np.argmin(np.linalg.norm(cloud[:, :ndim] - curr_position, axis=1))]
else:
# Calculate the Euclidean distance from each point to the reference point
if ndim == 2:
Expand All @@ -208,9 +189,7 @@ def _get_closest_point(
return closest_point


def open3d_dbscan_filtering(
points: np.ndarray, eps: float = 0.2, min_points: int = 100
) -> np.ndarray:
def open3d_dbscan_filtering(points: np.ndarray, eps: float = 0.2, min_points: int = 100) -> np.ndarray:
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(points)

Expand Down
20 changes: 5 additions & 15 deletions vlfm/mapping/obstacle_map.py
Original file line number Diff line number Diff line change
Expand Up @@ -92,12 +92,8 @@ def update_map(
scaled_depth = filled_depth * (max_depth - min_depth) + min_depth
mask = scaled_depth < max_depth
point_cloud_camera_frame = get_point_cloud(scaled_depth, mask, fx, fy)
point_cloud_episodic_frame = transform_points(
tf_camera_to_episodic, point_cloud_camera_frame
)
obstacle_cloud = filter_points_by_height(
point_cloud_episodic_frame, self._min_height, self._max_height
)
point_cloud_episodic_frame = transform_points(tf_camera_to_episodic, point_cloud_camera_frame)
obstacle_cloud = filter_points_by_height(point_cloud_episodic_frame, self._min_height, self._max_height)

# Populate topdown map with obstacle locations
xy_points = obstacle_cloud[:, :2]
Expand Down Expand Up @@ -126,9 +122,7 @@ def update_map(
fov=np.rad2deg(topdown_fov),
max_line_len=max_depth * self.pixels_per_meter,
)
new_explored_area = cv2.dilate(
new_explored_area, np.ones((3, 3), np.uint8), iterations=1
)
new_explored_area = cv2.dilate(new_explored_area, np.ones((3, 3), np.uint8), iterations=1)
self.explored_area[new_explored_area > 0] = 1
self.explored_area[self._navigable_map == 0] = 0
contours, _ = cv2.findContours(
Expand All @@ -140,9 +134,7 @@ def update_map(
min_dist = np.inf
best_idx = 0
for idx, cnt in enumerate(contours):
dist = cv2.pointPolygonTest(
cnt, tuple([int(i) for i in agent_pixel_location]), True
)
dist = cv2.pointPolygonTest(cnt, tuple([int(i) for i in agent_pixel_location]), True)
if dist >= 0:
best_idx = idx
break
Expand Down Expand Up @@ -201,7 +193,5 @@ def visualize(self) -> np.ndarray:
return vis_img


def filter_points_by_height(
points: np.ndarray, min_height: float, max_height: float
) -> np.ndarray:
def filter_points_by_height(points: np.ndarray, min_height: float, max_height: float) -> np.ndarray:
return points[(points[:, 2] >= min_height) & (points[:, 2] <= max_height)]
Loading

0 comments on commit 6bd9731

Please sign in to comment.