Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
102 changes: 101 additions & 1 deletion opendm/align.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
import dataclasses
import pdal
import numpy as np
from numpy.lib import recfunctions as rfn
import rasterio
from rasterio.crs import CRS
from opendm.utils import double_quote
Expand Down Expand Up @@ -144,4 +145,103 @@ def transform_obj(input_obj, a_matrix, geo_offset, output_obj):
vt = (a_matrix.dot((v + g_off)) - g_off)[:3]
fout.write("v " + " ".join(map(str, list(vt))) + '\n')
else:
fout.write(line)
fout.write(line)


def _upsert_dimension(arrays, name, values, dtype=None):
values = np.asarray(values, dtype=dtype)
if name in arrays.dtype.names:
arrays[name] = values
return arrays
return rfn.append_fields(arrays, name, values, dtypes=dtype, usemask=False)


def _normalize_point_cloud_dimensions(arrays):
rename_pairs = (
("x", "X"),
("y", "Y"),
("z", "Z"),
("red", "Red"),
("green", "Green"),
("blue", "Blue"),
("classification", "Classification"),
)

for source, target in rename_pairs:
if source in arrays.dtype.names and target not in arrays.dtype.names:
arrays = _upsert_dimension(arrays, target, arrays[source])

if "views" in arrays.dtype.names and "UserData" not in arrays.dtype.names:
clipped = np.clip(arrays["views"], 0, 255).astype(np.uint8)
arrays = _upsert_dimension(arrays, "UserData", clipped, np.uint8)

return arrays


def transform_point_cloud_geocoords(
input_point_cloud,
output_point_cloud,
point_transformer,
a_srs,
offset_x,
offset_y,
las_scale,
gcp_geojson_zip=None,
):
pipeline = pdal.Pipeline(json.dumps([input_point_cloud]))
pipeline.execute()

if not pipeline.arrays:
raise RuntimeError("Point cloud pipeline did not return any arrays")

arrays = _normalize_point_cloud_dimensions(pipeline.arrays[0].copy())

if not all(name in arrays.dtype.names for name in ("X", "Y", "Z")):
raise RuntimeError("Point cloud is missing XYZ dimensions")

transformed = point_transformer(np.column_stack((arrays["X"], arrays["Y"], arrays["Z"])))
arrays["X"] = transformed[:, 0]
arrays["Y"] = transformed[:, 1]
arrays["Z"] = transformed[:, 2]

writer = {
"pipeline": [
{
"type": "writers.las",
"filename": output_point_cloud,
"compression": "lazperf",
"a_srs": a_srs,
"offset_x": offset_x,
"offset_y": offset_y,
"offset_z": 0,
"scale_x": las_scale,
"scale_y": las_scale,
"scale_z": las_scale,
"extra_dims": "all",
}
]
}

if gcp_geojson_zip is not None:
writer["pipeline"][0]["vlrs"] = [
{
"filename": gcp_geojson_zip.replace(os.sep, "/"),
"user_id": "ODM",
"record_id": 2,
"description": "Ground Control Points (zip)",
}
]

pdal.Pipeline(json.dumps(writer), arrays=[arrays]).execute()


def transform_obj_geocoords(input_obj, point_transformer, output_obj):
with open(input_obj, "r") as fin:
with open(output_obj, "w") as fout:
for line in fin:
if line.startswith("v "):
v = np.fromstring(line.strip()[2:], sep=" ", dtype=float)
vt = point_transformer(v)
fout.write("v " + " ".join(map(str, list(vt))) + "\n")
else:
fout.write(line)
46 changes: 39 additions & 7 deletions opendm/osfm.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,17 +23,53 @@
from opendm.multispectral import get_photos_by_band
from opendm.gpu import has_popsift_and_can_handle_texsize, has_gpu
from opensfm import multiview, exif
from opensfm.actions.export_geocoords import _transform

class OSFMContext:
def __init__(self, opensfm_project_path):
self.opensfm_project_path = opensfm_project_path
self._geocoords_cache = {}

def run(self, command):
osfm_bin = os.path.join(context.opensfm_path, 'bin', 'opensfm')
system.run('"%s" %s "%s"' %
(osfm_bin, command, self.opensfm_project_path))

def _get_geocoords_context(self, proj4):
if proj4 not in self._geocoords_cache:
ds = DataSet(self.opensfm_project_path)
self._geocoords_cache[proj4] = (
ds.load_reference(),
pyproj.Transformer.from_proj(CRS.from_epsg(4326), CRS.from_user_input(proj4)),
)
return self._geocoords_cache[proj4]

def transform_local_points_to_proj(self, points, proj4):
reference, projection = self._get_geocoords_context(proj4)
points = np.asarray(points, dtype=float)
squeeze = points.ndim == 1
points = np.atleast_2d(points)

latitudes, longitudes, altitudes = reference.to_lla(
points[:, 0], points[:, 1], points[:, 2]
)
eastings, northings = projection.transform(latitudes, longitudes)
transformed = np.column_stack((eastings, northings, altitudes))
return transformed[0] if squeeze else transformed

def transform_local_point_to_proj(self, point, proj4):
return self.transform_local_points_to_proj(point, proj4)

def transform_local_points_to_geocoords(self, points, proj4, offset_x=0.0, offset_y=0.0):
transformed = np.asarray(self.transform_local_points_to_proj(points, proj4), dtype=float)
squeeze = transformed.ndim == 1
transformed = np.atleast_2d(transformed).copy()
transformed[:, 0] -= offset_x
transformed[:, 1] -= offset_y
return transformed[0] if squeeze else transformed

def transform_local_point_to_geocoords(self, point, proj4, offset_x=0.0, offset_y=0.0):
return self.transform_local_points_to_geocoords(point, proj4, offset_x, offset_y)

def is_reconstruction_done(self):
tracks_file = os.path.join(self.opensfm_project_path, 'tracks.csv')
reconstruction_file = os.path.join(self.opensfm_project_path, 'reconstruction.json')
Expand Down Expand Up @@ -617,13 +653,9 @@ def ground_control_points(self, proj4):
if not gcps_stats:
return []

ds = DataSet(self.opensfm_project_path)
reference = ds.load_reference()
projection = pyproj.Proj(proj4)

result = []
for gcp in gcps_stats:
geocoords = _transform(gcp['coordinates'], reference, projection)
geocoords = self.transform_local_point_to_proj(gcp['coordinates'], proj4)
result.append({
'id': gcp['id'],
'observations': gcp['observations'],
Expand Down Expand Up @@ -805,4 +837,4 @@ def is_submodel(opensfm_root):

return (len(parts) >= 2 and parts[-2][:9] == "submodel_") or \
os.path.isfile(os.path.join(opensfm_root, "split_merge_stop_at_reconstruction.txt")) or \
os.path.isfile(os.path.join(opensfm_root, "features", "empty"))
os.path.isfile(os.path.join(opensfm_root, "features", "empty"))
24 changes: 17 additions & 7 deletions opendm/shots.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ def get_origin(shot):
"""The origin of the pose in world coordinates."""
return -get_rotation_matrix(np.array(shot['rotation'])).T.dot(np.array(shot['translation']))

def get_geojson_shots_from_opensfm(reconstruction_file, utm_srs=None, utm_offset=None, pseudo_geotiff=None, a_matrix=None):
def get_geojson_shots_from_opensfm(reconstruction_file, utm_srs=None, utm_offset=None, pseudo_geotiff=None, shot_transformer=None, a_matrix=None):
"""
Extract shots from OpenSfM's reconstruction.json
"""
Expand Down Expand Up @@ -84,14 +84,24 @@ def get_geojson_shots_from_opensfm(reconstruction_file, utm_srs=None, utm_offset

translation = origin
else:
rotation = shot['rotation']

# Just add UTM offset
origin = get_origin(shot)
rotation = shot['rotation']

utm_coords = [origin[0] + utm_offset[0],
origin[1] + utm_offset[1],
origin[2]]
if shot_transformer is not None:
eps = 1e-3
utm_coords = np.array(shot_transformer(origin), dtype=float)
px = np.array(shot_transformer(origin + [eps, 0, 0]), dtype=float)
py = np.array(shot_transformer(origin + [0, eps, 0]), dtype=float)
pz = np.array(shot_transformer(origin + [0, 0, eps]), dtype=float)
J = np.column_stack(((px - utm_coords) / eps, (py - utm_coords) / eps, (pz - utm_coords) / eps))
rotation_matrix = get_rotation_matrix(np.array(shot["rotation"]))
rotation = matrix_to_rotation(np.dot(rotation_matrix, np.linalg.inv(J)))
else:
utm_coords = [
origin[0] + utm_offset[0],
origin[1] + utm_offset[1],
origin[2],
]

if a_matrix is not None:
rotation = list(np.array(rotation).dot(a_matrix[:3,:3]))
Expand Down
2 changes: 1 addition & 1 deletion opendm/types.py
Original file line number Diff line number Diff line change
Expand Up @@ -350,6 +350,7 @@ def __init__(self, root_path, gcp_file = None, geo_file = None, align_file = Non
self.opensfm_reconstruction = os.path.join(self.opensfm, 'reconstruction.json')
self.opensfm_reconstruction_nvm = os.path.join(self.opensfm, 'undistorted/reconstruction.nvm')
self.opensfm_geocoords_reconstruction = os.path.join(self.opensfm, 'reconstruction.geocoords.json')
self.opensfm_geocoords_transformation = os.path.join(self.opensfm, 'geocoords_transformation.txt')
self.opensfm_topocentric_reconstruction = os.path.join(self.opensfm, 'reconstruction.topocentric.json')

# OpenMVS
Expand Down Expand Up @@ -493,4 +494,3 @@ def last_stage(self):

def process(self, args, outputs):
raise NotImplementedError

84 changes: 58 additions & 26 deletions stages/odm_georeferencing.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@
from opendm.multispectral import get_primary_band_name
from opendm.osfm import OSFMContext
from opendm.boundary import as_polygon, export_to_bounds_files
from opendm.align import compute_alignment_matrix, transform_point_cloud, transform_obj
from opendm.align import compute_alignment_matrix, transform_obj, transform_obj_geocoords, transform_point_cloud, transform_point_cloud_geocoords
from opendm.utils import np_to_json

class ODMGeoreferencingStage(types.ODM_Stage):
Expand Down Expand Up @@ -115,17 +115,22 @@ def process(self, args, outputs):
log.ODM_WARNING("GCPs could not be loaded for writing to %s" % gcp_export_file)

if not io.file_exists(tree.odm_georeferencing_model_laz) or self.rerun():
cmd = f'pdal translate -i "{tree.filtered_point_cloud}" -o \"{tree.odm_georeferencing_model_laz}\"'
stages = ["ferry"]
params = [
'--filters.ferry.dimensions="views => UserData"'
]
point_cloud_transformer = None
obj_transformer = None

if reconstruction.is_georeferenced():
log.ODM_INFO("Georeferencing point cloud")

stages.append("transformation")
utmoffset = reconstruction.georef.utm_offset()
octx = OSFMContext(tree.opensfm)
point_cloud_transformer = lambda points: octx.transform_local_points_to_proj(
points,
reconstruction.georef.proj4(),
)
obj_transformer = lambda points: octx.transform_local_points_to_geocoords(
points,
reconstruction.georef.proj4(),
reconstruction.georef.utm_east_offset,
reconstruction.georef.utm_north_offset,
)

# Establish appropriate las scale for export
las_scale = 0.001
Expand All @@ -148,27 +153,24 @@ def powerr(r):
else:
log.ODM_INFO("No point_cloud_stats.json found. Using default las scale: %s" % las_scale)

params += [
f'--filters.transformation.matrix="1 0 0 {utmoffset[0]} 0 1 0 {utmoffset[1]} 0 0 1 0 0 0 0 1"',
f'--writers.las.offset_x={reconstruction.georef.utm_east_offset}' ,
f'--writers.las.offset_y={reconstruction.georef.utm_north_offset}',
f'--writers.las.scale_x={las_scale}',
f'--writers.las.scale_y={las_scale}',
f'--writers.las.scale_z={las_scale}',
'--writers.las.offset_z=0',
f'--writers.las.a_srs="{reconstruction.georef.proj4()}"' # HOBU this should maybe be WKT
]

gcp_vlr_file = None
if reconstruction.has_gcp() and io.file_exists(gcp_geojson_zip_export_file):
if os.path.getsize(gcp_geojson_zip_export_file) <= 65535:
log.ODM_INFO("Embedding GCP info in point cloud")
params += [
'--writers.las.vlrs="{\\\"filename\\\": \\\"%s\\\", \\\"user_id\\\": \\\"ODM\\\", \\\"record_id\\\": 2, \\\"description\\\": \\\"Ground Control Points (zip)\\\"}"' % gcp_geojson_zip_export_file.replace(os.sep, "/")
]
gcp_vlr_file = gcp_geojson_zip_export_file
else:
log.ODM_WARNING("Cannot embed GCP info in point cloud, %s is too large" % gcp_geojson_zip_export_file)

system.run(cmd + ' ' + ' '.join(stages) + ' ' + ' '.join(params))
transform_point_cloud_geocoords(
tree.filtered_point_cloud,
tree.odm_georeferencing_model_laz,
point_cloud_transformer,
reconstruction.georef.proj4(),
reconstruction.georef.utm_east_offset,
reconstruction.georef.utm_north_offset,
las_scale,
gcp_vlr_file,
)

self.update_progress(50)

Expand Down Expand Up @@ -202,13 +204,44 @@ def powerr(r):
export_to_bounds_files(outputs['boundary'], reconstruction.get_proj_srs(), bounds_json, bounds_gpkg)
else:
log.ODM_INFO("Converting point cloud (non-georeferenced)")
system.run(cmd + ' ' + ' '.join(stages) + ' ' + ' '.join(params))
system.run(
'pdal translate -i "{}" -o "{}" ferry --filters.ferry.dimensions="views => UserData"'.format(
tree.filtered_point_cloud,
tree.odm_georeferencing_model_laz,
)
)


stats_dir = tree.path("opensfm", "stats", "codem")
if os.path.exists(stats_dir) and self.rerun():
shutil.rmtree(stats_dir)

def georeference_textured_model(obj):
if not reconstruction.is_georeferenced() or obj_transformer is None:
return
if os.path.isfile(obj):
unaligned_obj = io.related_file_path(obj, postfix="_local")
if os.path.isfile(unaligned_obj):
os.rename(unaligned_obj, obj)
os.rename(obj, unaligned_obj)
try:
transform_obj_geocoords(unaligned_obj, obj_transformer, obj)
log.ODM_INFO("Georeferenced %s" % obj)
except Exception as e:
log.ODM_WARNING("Cannot georeference textured model: %s" % str(e))
os.rename(unaligned_obj, obj)

for texturing in [tree.odm_texturing, tree.odm_25dtexturing]:
if reconstruction.multi_camera:
primary = get_primary_band_name(reconstruction.multi_camera, args.primary_band)
for band in reconstruction.multi_camera:
subdir = "" if band['name'] == primary else band['name'].lower()
obj = os.path.join(texturing, subdir, "odm_textured_model_geo.obj")
georeference_textured_model(obj)
else:
obj = os.path.join(texturing, "odm_textured_model_geo.obj")
georeference_textured_model(obj)

if tree.odm_align_file is not None:
alignment_file_exists = io.file_exists(tree.odm_georeferencing_alignment_matrix)

Expand Down Expand Up @@ -280,4 +313,3 @@ def transform_textured_model(obj):
os.remove(tree.filtered_point_cloud)



13 changes: 12 additions & 1 deletion stages/odm_report.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,12 +51,23 @@ def process(self, args, outputs):
if reconstruction.is_georeferenced():
# Check if alignment has been performed (we need to transform our shots if so)
a_matrix = None
octx = OSFMContext(tree.opensfm)
shot_transformer = lambda origin: octx.transform_local_point_to_proj(
origin,
reconstruction.georef.proj4(),
)
if io.file_exists(tree.odm_georeferencing_alignment_matrix):
with open(tree.odm_georeferencing_alignment_matrix, 'r') as f:
a_matrix = np_from_json(f.read())
log.ODM_INFO("Aligning shots to %s" % a_matrix)

shots = get_geojson_shots_from_opensfm(tree.opensfm_reconstruction, utm_srs=reconstruction.get_proj_srs(), utm_offset=reconstruction.georef.utm_offset(), a_matrix=a_matrix)
shots = get_geojson_shots_from_opensfm(
tree.opensfm_topocentric_reconstruction,
utm_srs=reconstruction.get_proj_srs(),
utm_offset=reconstruction.georef.utm_offset(),
shot_transformer=shot_transformer,
a_matrix=a_matrix,
)
else:
# Pseudo geo
shots = get_geojson_shots_from_opensfm(tree.opensfm_reconstruction, pseudo_geotiff=tree.odm_orthophoto_tif)
Expand Down
Loading