vobecant
commited on
Commit
·
d37387f
1
Parent(s):
d05fd36
Initial commit.
Browse files
app.py
CHANGED
|
@@ -172,7 +172,46 @@ def predict(input_img):
|
|
| 172 |
|
| 173 |
title = "Drive&Segment"
|
| 174 |
description = 'Gradio Demo accompanying paper "Drive&Segment: Unsupervised Semantic Segmentation of Urban Scenes via Cross-modal Distillation"\nBecause of the CPU-only inference, it might take up to 20s for large images.\nRight now, it uses the Segmenter model trained on nuScenes and with a simplified inference scheme (for the sake of speed).'
|
| 175 |
-
article = "<p style='text-align: center'><a href='https://vobecant.github.io/DriveAndSegment/' target='_blank'>Project Page</a> | <a href='https://github.com/vobecant/DriveAndSegment' target='_blank'>Github</a></p>"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 176 |
examples = [ #'examples/img5.jpeg',
|
| 177 |
'examples/100.jpeg',
|
| 178 |
'examples/39076.jpeg',
|
|
|
|
| 172 |
|
| 173 |
title = "Drive&Segment"
|
| 174 |
description = 'Gradio Demo accompanying paper "Drive&Segment: Unsupervised Semantic Segmentation of Urban Scenes via Cross-modal Distillation"\nBecause of the CPU-only inference, it might take up to 20s for large images.\nRight now, it uses the Segmenter model trained on nuScenes and with a simplified inference scheme (for the sake of speed).'
|
| 175 |
+
# article = "<p style='text-align: center'><a href='https://vobecant.github.io/DriveAndSegment/' target='_blank'>Project Page</a> | <a href='https://github.com/vobecant/DriveAndSegment' target='_blank'>Github</a></p>"
|
| 176 |
+
article="""
|
| 177 |
+
<h1 align="center">🚙📷 Drive&Segment: Unsupervised Semantic Segmentation of Urban Scenes via Cross-modal Distillation</h1>
|
| 178 |
+
|
| 179 |
+
<h2 align="center">
|
| 180 |
+
<a href="https://vobecant.github.io/DriveAndSegment">project page</a> |
|
| 181 |
+
<a href="#">arXiv</a> |
|
| 182 |
+
<a href="https://huggingface.co/spaces/vobecant/DaS">Gradio</a> |
|
| 183 |
+
<a href="https://colab.research.google.com/drive/126tBVYbt1s0STyv8DKhmLoHKpvWcv33H?usp=sharing">Colab</a> |
|
| 184 |
+
<a href="https://www.youtube.com/watch?v=B9LK-Fxu7ao">video</a>
|
| 185 |
+
</h2>
|
| 186 |
+
|
| 187 |
+
## 💫 Highlights
|
| 188 |
+
|
| 189 |
+
- 🚫🔬 **Unsupervised semantic segmentation:** Drive&Segments proposes learning semantic segmentation in urban scenes without any manual annotation, just from
|
| 190 |
+
the raw non-curated data collected by cars which, equipped with 📷 cameras and 💥 LiDAR sensors.
|
| 191 |
+
- 📷💥 **Multi-modal training:** During the train time our method takes 📷 images and 💥 LiDAR scans as an input, and
|
| 192 |
+
learns a semantic segmentation model *without using manual annotations*.
|
| 193 |
+
- 📷 **Image-only inference:** During the inference time, Drive&Segments takes *only images* as an input.
|
| 194 |
+
- 🏆 **State-of-the-art performance:** Our best single model based on Segmenter architecture achieves **21.8%** in mIoU on
|
| 195 |
+
Cityscapes (without any fine-tuning).
|
| 196 |
+
|
| 197 |
+

|
| 198 |
+
|
| 199 |
+
## 📺 Examples
|
| 200 |
+
|
| 201 |
+
### **Pseudo** segmentation.
|
| 202 |
+
|
| 203 |
+
Example of **pseudo** segmentation.
|
| 204 |
+
|
| 205 |
+

|
| 206 |
+
|
| 207 |
+
### Cityscapes segmentation.
|
| 208 |
+
|
| 209 |
+
Two examples of pseudo segmentation mapped to the 19 ground-truth classes of the Cityscapes dataset by using Hungarian
|
| 210 |
+
algorithm.
|
| 211 |
+
|
| 212 |
+

|
| 213 |
+

|
| 214 |
+
"""
|
| 215 |
examples = [ #'examples/img5.jpeg',
|
| 216 |
'examples/100.jpeg',
|
| 217 |
'examples/39076.jpeg',
|