mischeiwiller's picture
fix: update tutorial link
3c3962d verified
import gradio as gr
import kornia as K
import kornia.feature as KF
import torch
import matplotlib
matplotlib.use("Agg")
import numpy as np
from plot_utils import plot_images, plot_lines, plot_color_line_matches
sold2 = KF.SOLD2(pretrained=True, config=None)
ransac = K.geometry.RANSAC(model_type="homography_from_linesegments", inl_th=3.0)
def infer(img1, img2, line_style: str):
torch_img1 = K.image_to_tensor(img1).float() / 255.0
torch_img2 = K.image_to_tensor(img2).float() / 255.0
torch_img1_gray = K.color.rgb_to_grayscale(torch_img1)
torch_img2_gray = K.color.rgb_to_grayscale(torch_img2)
imgs = torch.stack(
[torch_img1_gray, torch_img2_gray],
)
with torch.inference_mode():
outputs = sold2(imgs)
line_seg1 = outputs["line_segments"][0]
line_seg2 = outputs["line_segments"][1]
desc1 = outputs["dense_desc"][0]
desc2 = outputs["dense_desc"][1]
with torch.inference_mode():
matches = sold2.match(line_seg1, line_seg2, desc1[None], desc2[None])
valid_matches = matches != -1
match_indices = matches[valid_matches]
matched_lines1 = line_seg1[valid_matches]
matched_lines2 = line_seg2[match_indices]
imgs_to_plot = [K.tensor_to_image(torch_img1), K.tensor_to_image(torch_img2)]
fig = plot_images(
imgs_to_plot, ["Image 1 - detected lines", "Image 2 - detected lines"]
)
if line_style == "Line Matches":
lines_to_plot = [line_seg1.numpy(), line_seg2.numpy()]
plot_lines(lines_to_plot, fig, ps=3, lw=2, indices={0, 1})
elif line_style == "Color Line Matches":
plot_color_line_matches([matched_lines1, matched_lines2], fig, lw=2)
elif line_style == "Line Segment Homography Warping":
_, _, img1_warp_to2 = get_homography_values(
matched_lines1, matched_lines2, torch_img1
)
fig = plot_images(
[K.tensor_to_image(torch_img2), K.tensor_to_image(img1_warp_to2)],
["Image 2", "Image 1 wrapped to 2"],
)
elif line_style == "Matched Lines for Homography Warping":
_, correspondence_mask, _ = get_homography_values(
matched_lines1, matched_lines2, torch_img1
)
plot_color_line_matches(
[matched_lines1[correspondence_mask], matched_lines2[correspondence_mask]],
fig,
lw=2,
)
return fig
def get_homography_values(matched_lines1, matched_lines2, torch_img1):
H_ransac, correspondence_mask = ransac(
matched_lines1.flip(dims=(2,)), matched_lines2.flip(dims=(2,))
)
img1_warp_to2 = K.geometry.warp_perspective(
torch_img1[None], H_ransac[None], (torch_img1.shape[1:])
)
return H_ransac, correspondence_mask, img1_warp_to2
description = """In this space you can try out Line Detection and Segment Matching with the Kornia library as seen in [this tutorial](https://kornia.github.io/tutorials/#category=Line%20matching).
Just upload two images of a scene with different view points, choose an option for output and run the demo.
"""
Iface = gr.Interface(
fn=infer,
inputs=[
gr.components.Image(),
gr.components.Image(),
gr.components.Dropdown(
[
"Line Matches",
"Color Line Matches",
"Line Segment Homography Warping",
"Matched Lines for Homography Warping",
],
value="Line Matches",
label="Options",
),
],
outputs=gr.components.Plot(),
examples=[["terrace0.JPG", "terrace1.JPG", "Line Matches"]],
title="Line Segment Matching with Kornia",
description=description,
).launch()